1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org> 5 * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * TI Common Platform Ethernet Switch (CPSW) Driver 32 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. 33 * 34 * This controller is documented in the AM335x Technical Reference 35 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM 36 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. 37 * 38 * It is basically a single Ethernet port (port 0) wired internally to 39 * a 3-port store-and-forward switch connected to two independent 40 * "sliver" controllers (port 1 and port 2). You can operate the 41 * controller in a variety of different ways by suitably configuring 42 * the slivers and the Address Lookup Engine (ALE) that routes packets 43 * between the ports. 44 * 45 * This code was developed and tested on a BeagleBone with 46 * an AM335x SoC. 47 */ 48 49 #include <sys/cdefs.h> 50 __FBSDID("$FreeBSD$"); 51 52 #include "opt_cpsw.h" 53 54 #include <sys/param.h> 55 #include <sys/bus.h> 56 #include <sys/kernel.h> 57 #include <sys/lock.h> 58 #include <sys/mbuf.h> 59 #include <sys/module.h> 60 #include <sys/mutex.h> 61 #include <sys/rman.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/sysctl.h> 65 66 #include <machine/bus.h> 67 #include <machine/resource.h> 68 #include <machine/stdarg.h> 69 70 #include <net/ethernet.h> 71 #include <net/bpf.h> 72 #include <net/if.h> 73 #include <net/if_dl.h> 74 #include <net/if_media.h> 75 #include <net/if_types.h> 76 77 #include <dev/extres/syscon/syscon.h> 78 #include "syscon_if.h" 79 #include <arm/ti/am335x/am335x_scm.h> 80 81 #include <dev/mii/mii.h> 82 #include <dev/mii/miivar.h> 83 84 #include <dev/ofw/ofw_bus.h> 85 #include <dev/ofw/ofw_bus_subr.h> 86 87 #include <dev/fdt/fdt_common.h> 88 89 #ifdef CPSW_ETHERSWITCH 90 #include <dev/etherswitch/etherswitch.h> 91 #include "etherswitch_if.h" 92 #endif 93 94 #include "if_cpswreg.h" 95 #include "if_cpswvar.h" 96 97 #include "miibus_if.h" 98 99 /* Device probe/attach/detach. */ 100 static int cpsw_probe(device_t); 101 static int cpsw_attach(device_t); 102 static int cpsw_detach(device_t); 103 static int cpswp_probe(device_t); 104 static int cpswp_attach(device_t); 105 static int cpswp_detach(device_t); 106 107 static phandle_t cpsw_get_node(device_t, device_t); 108 109 /* Device Init/shutdown. */ 110 static int cpsw_shutdown(device_t); 111 static void cpswp_init(void *); 112 static void cpswp_init_locked(void *); 113 static void cpswp_stop_locked(struct cpswp_softc *); 114 115 /* Device Suspend/Resume. */ 116 static int cpsw_suspend(device_t); 117 static int cpsw_resume(device_t); 118 119 /* Ioctl. */ 120 static int cpswp_ioctl(if_t, u_long command, caddr_t data); 121 122 static int cpswp_miibus_readreg(device_t, int phy, int reg); 123 static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); 124 static void cpswp_miibus_statchg(device_t); 125 126 /* Send/Receive packets. */ 127 static void cpsw_intr_rx(void *arg); 128 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); 129 static void cpsw_rx_enqueue(struct cpsw_softc *); 130 static void cpswp_start(if_t); 131 static void cpsw_intr_tx(void *); 132 static void cpswp_tx_enqueue(struct cpswp_softc *); 133 static int cpsw_tx_dequeue(struct cpsw_softc *); 134 135 /* Misc interrupts and watchdog. */ 136 static void cpsw_intr_rx_thresh(void *); 137 static void cpsw_intr_misc(void *); 138 static void cpswp_tick(void *); 139 static void cpswp_ifmedia_sts(if_t, struct ifmediareq *); 140 static int cpswp_ifmedia_upd(if_t); 141 static void cpsw_tx_watchdog(void *); 142 143 /* ALE support */ 144 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); 145 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); 146 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); 147 static void cpsw_ale_dump_table(struct cpsw_softc *); 148 static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, 149 int); 150 static int cpswp_ale_update_addresses(struct cpswp_softc *, int); 151 152 /* Statistics and sysctls. */ 153 static void cpsw_add_sysctls(struct cpsw_softc *); 154 static void cpsw_stats_collect(struct cpsw_softc *); 155 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); 156 157 #ifdef CPSW_ETHERSWITCH 158 static etherswitch_info_t *cpsw_getinfo(device_t); 159 static int cpsw_getport(device_t, etherswitch_port_t *); 160 static int cpsw_setport(device_t, etherswitch_port_t *); 161 static int cpsw_getconf(device_t, etherswitch_conf_t *); 162 static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *); 163 static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *); 164 static int cpsw_readreg(device_t, int); 165 static int cpsw_writereg(device_t, int, int); 166 static int cpsw_readphy(device_t, int, int); 167 static int cpsw_writephy(device_t, int, int, int); 168 #endif 169 170 /* 171 * Arbitrary limit on number of segments in an mbuf to be transmitted. 172 * Packets with more segments than this will be defragmented before 173 * they are queued. 174 */ 175 #define CPSW_TXFRAGS 16 176 177 /* Shared resources. */ 178 static device_method_t cpsw_methods[] = { 179 /* Device interface */ 180 DEVMETHOD(device_probe, cpsw_probe), 181 DEVMETHOD(device_attach, cpsw_attach), 182 DEVMETHOD(device_detach, cpsw_detach), 183 DEVMETHOD(device_shutdown, cpsw_shutdown), 184 DEVMETHOD(device_suspend, cpsw_suspend), 185 DEVMETHOD(device_resume, cpsw_resume), 186 /* Bus interface */ 187 DEVMETHOD(bus_add_child, device_add_child_ordered), 188 /* OFW methods */ 189 DEVMETHOD(ofw_bus_get_node, cpsw_get_node), 190 #ifdef CPSW_ETHERSWITCH 191 /* etherswitch interface */ 192 DEVMETHOD(etherswitch_getinfo, cpsw_getinfo), 193 DEVMETHOD(etherswitch_readreg, cpsw_readreg), 194 DEVMETHOD(etherswitch_writereg, cpsw_writereg), 195 DEVMETHOD(etherswitch_readphyreg, cpsw_readphy), 196 DEVMETHOD(etherswitch_writephyreg, cpsw_writephy), 197 DEVMETHOD(etherswitch_getport, cpsw_getport), 198 DEVMETHOD(etherswitch_setport, cpsw_setport), 199 DEVMETHOD(etherswitch_getvgroup, cpsw_getvgroup), 200 DEVMETHOD(etherswitch_setvgroup, cpsw_setvgroup), 201 DEVMETHOD(etherswitch_getconf, cpsw_getconf), 202 #endif 203 DEVMETHOD_END 204 }; 205 206 static driver_t cpsw_driver = { 207 "cpswss", 208 cpsw_methods, 209 sizeof(struct cpsw_softc), 210 }; 211 212 DRIVER_MODULE(cpswss, simplebus, cpsw_driver, 0, 0); 213 214 /* Port/Slave resources. */ 215 static device_method_t cpswp_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_probe, cpswp_probe), 218 DEVMETHOD(device_attach, cpswp_attach), 219 DEVMETHOD(device_detach, cpswp_detach), 220 /* MII interface */ 221 DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), 222 DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), 223 DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), 224 DEVMETHOD_END 225 }; 226 227 static driver_t cpswp_driver = { 228 "cpsw", 229 cpswp_methods, 230 sizeof(struct cpswp_softc), 231 }; 232 233 #ifdef CPSW_ETHERSWITCH 234 DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, 0, 0); 235 MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1); 236 #endif 237 238 DRIVER_MODULE(cpsw, cpswss, cpswp_driver, 0, 0); 239 DRIVER_MODULE(miibus, cpsw, miibus_driver, 0, 0); 240 MODULE_DEPEND(cpsw, ether, 1, 1, 1); 241 MODULE_DEPEND(cpsw, miibus, 1, 1, 1); 242 243 #ifdef CPSW_ETHERSWITCH 244 static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS]; 245 #endif 246 247 static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; 248 249 static struct resource_spec irq_res_spec[] = { 250 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 251 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, 252 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, 253 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, 254 { -1, 0 } 255 }; 256 257 static struct { 258 void (*cb)(void *); 259 } cpsw_intr_cb[] = { 260 { cpsw_intr_rx_thresh }, 261 { cpsw_intr_rx }, 262 { cpsw_intr_tx }, 263 { cpsw_intr_misc }, 264 }; 265 266 /* Number of entries here must match size of stats 267 * array in struct cpswp_softc. */ 268 static struct cpsw_stat { 269 int reg; 270 char *oid; 271 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { 272 {0x00, "GoodRxFrames"}, 273 {0x04, "BroadcastRxFrames"}, 274 {0x08, "MulticastRxFrames"}, 275 {0x0C, "PauseRxFrames"}, 276 {0x10, "RxCrcErrors"}, 277 {0x14, "RxAlignErrors"}, 278 {0x18, "OversizeRxFrames"}, 279 {0x1c, "RxJabbers"}, 280 {0x20, "ShortRxFrames"}, 281 {0x24, "RxFragments"}, 282 {0x30, "RxOctets"}, 283 {0x34, "GoodTxFrames"}, 284 {0x38, "BroadcastTxFrames"}, 285 {0x3c, "MulticastTxFrames"}, 286 {0x40, "PauseTxFrames"}, 287 {0x44, "DeferredTxFrames"}, 288 {0x48, "CollisionsTxFrames"}, 289 {0x4c, "SingleCollisionTxFrames"}, 290 {0x50, "MultipleCollisionTxFrames"}, 291 {0x54, "ExcessiveCollisions"}, 292 {0x58, "LateCollisions"}, 293 {0x5c, "TxUnderrun"}, 294 {0x60, "CarrierSenseErrors"}, 295 {0x64, "TxOctets"}, 296 {0x68, "RxTx64OctetFrames"}, 297 {0x6c, "RxTx65to127OctetFrames"}, 298 {0x70, "RxTx128to255OctetFrames"}, 299 {0x74, "RxTx256to511OctetFrames"}, 300 {0x78, "RxTx512to1024OctetFrames"}, 301 {0x7c, "RxTx1024upOctetFrames"}, 302 {0x80, "NetOctets"}, 303 {0x84, "RxStartOfFrameOverruns"}, 304 {0x88, "RxMiddleOfFrameOverruns"}, 305 {0x8c, "RxDmaOverruns"} 306 }; 307 308 /* 309 * Basic debug support. 310 */ 311 312 static void 313 cpsw_debugf_head(const char *funcname) 314 { 315 int t = (int)(time_second % (24 * 60 * 60)); 316 317 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); 318 } 319 320 static void 321 cpsw_debugf(const char *fmt, ...) 322 { 323 va_list ap; 324 325 va_start(ap, fmt); 326 vprintf(fmt, ap); 327 va_end(ap); 328 printf("\n"); 329 330 } 331 332 #define CPSW_DEBUGF(_sc, a) do { \ 333 if ((_sc)->debug) { \ 334 cpsw_debugf_head(__func__); \ 335 cpsw_debugf a; \ 336 } \ 337 } while (0) 338 339 /* 340 * Locking macros 341 */ 342 #define CPSW_TX_LOCK(sc) do { \ 343 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ 344 mtx_lock(&(sc)->tx.lock); \ 345 } while (0) 346 347 #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) 348 #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) 349 350 #define CPSW_RX_LOCK(sc) do { \ 351 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ 352 mtx_lock(&(sc)->rx.lock); \ 353 } while (0) 354 355 #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) 356 #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) 357 358 #define CPSW_PORT_LOCK(_sc) do { \ 359 mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ 360 mtx_lock(&(_sc)->lock); \ 361 } while (0) 362 363 #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 364 #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) 365 366 /* 367 * Read/Write macros 368 */ 369 #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) 370 #define cpsw_write_4(_sc, _reg, _val) \ 371 bus_write_4((_sc)->mem_res, (_reg), (_val)) 372 373 #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) 374 375 #define cpsw_cpdma_bd_paddr(sc, slot) \ 376 BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) 377 #define cpsw_cpdma_read_bd(sc, slot, val) \ 378 bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 379 #define cpsw_cpdma_write_bd(sc, slot, val) \ 380 bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 381 #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ 382 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) 383 #define cpsw_cpdma_write_bd_flags(sc, slot, val) \ 384 bus_write_2(sc->mem_res, slot->bd_offset + 14, val) 385 #define cpsw_cpdma_read_bd_flags(sc, slot) \ 386 bus_read_2(sc->mem_res, slot->bd_offset + 14) 387 #define cpsw_write_hdp_slot(sc, queue, slot) \ 388 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) 389 #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) 390 #define cpsw_read_cp(sc, queue) \ 391 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) 392 #define cpsw_write_cp(sc, queue, val) \ 393 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) 394 #define cpsw_write_cp_slot(sc, queue, slot) \ 395 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) 396 397 #if 0 398 /* XXX temporary function versions for debugging. */ 399 static void 400 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 401 { 402 uint32_t reg = queue->hdp_offset; 403 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 404 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); 405 cpsw_write_4(sc, reg, v); 406 } 407 408 static void 409 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 410 { 411 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 412 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); 413 cpsw_write_cp(sc, queue, v); 414 } 415 #endif 416 417 /* 418 * Expanded dump routines for verbose debugging. 419 */ 420 static void 421 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 422 { 423 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", 424 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", 425 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", 426 "Port0"}; 427 struct cpsw_cpdma_bd bd; 428 const char *sep; 429 int i; 430 431 cpsw_cpdma_read_bd(sc, slot, &bd); 432 printf("BD Addr : 0x%08x Next : 0x%08x\n", 433 cpsw_cpdma_bd_paddr(sc, slot), bd.next); 434 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); 435 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); 436 printf(" Flags: "); 437 sep = ""; 438 for (i = 0; i < 16; ++i) { 439 if (bd.flags & (1 << (15 - i))) { 440 printf("%s%s", sep, flags[i]); 441 sep = ","; 442 } 443 } 444 printf("\n"); 445 if (slot->mbuf) { 446 printf(" Ether: %14D\n", 447 (char *)(slot->mbuf->m_data), " "); 448 printf(" Packet: %16D\n", 449 (char *)(slot->mbuf->m_data) + 14, " "); 450 } 451 } 452 453 #define CPSW_DUMP_SLOT(cs, slot) do { \ 454 IF_DEBUG(sc) { \ 455 cpsw_dump_slot(sc, slot); \ 456 } \ 457 } while (0) 458 459 static void 460 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) 461 { 462 struct cpsw_slot *slot; 463 int i = 0; 464 int others = 0; 465 466 STAILQ_FOREACH(slot, q, next) { 467 if (i > CPSW_TXFRAGS) 468 ++others; 469 else 470 cpsw_dump_slot(sc, slot); 471 ++i; 472 } 473 if (others) 474 printf(" ... and %d more.\n", others); 475 printf("\n"); 476 } 477 478 #define CPSW_DUMP_QUEUE(sc, q) do { \ 479 IF_DEBUG(sc) { \ 480 cpsw_dump_queue(sc, q); \ 481 } \ 482 } while (0) 483 484 static void 485 cpsw_init_slots(struct cpsw_softc *sc) 486 { 487 struct cpsw_slot *slot; 488 int i; 489 490 STAILQ_INIT(&sc->avail); 491 492 /* Put the slot descriptors onto the global avail list. */ 493 for (i = 0; i < nitems(sc->_slots); i++) { 494 slot = &sc->_slots[i]; 495 slot->bd_offset = cpsw_cpdma_bd_offset(i); 496 STAILQ_INSERT_TAIL(&sc->avail, slot, next); 497 } 498 } 499 500 static int 501 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) 502 { 503 const int max_slots = nitems(sc->_slots); 504 struct cpsw_slot *slot; 505 int i; 506 507 if (requested < 0) 508 requested = max_slots; 509 510 for (i = 0; i < requested; ++i) { 511 slot = STAILQ_FIRST(&sc->avail); 512 if (slot == NULL) 513 return (0); 514 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { 515 device_printf(sc->dev, "failed to create dmamap\n"); 516 return (ENOMEM); 517 } 518 STAILQ_REMOVE_HEAD(&sc->avail, next); 519 STAILQ_INSERT_TAIL(&queue->avail, slot, next); 520 ++queue->avail_queue_len; 521 ++queue->queue_slots; 522 } 523 return (0); 524 } 525 526 static void 527 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 528 { 529 int error __diagused; 530 531 if (slot->dmamap) { 532 if (slot->mbuf) 533 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 534 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); 535 KASSERT(error == 0, ("Mapping still active")); 536 slot->dmamap = NULL; 537 } 538 if (slot->mbuf) { 539 m_freem(slot->mbuf); 540 slot->mbuf = NULL; 541 } 542 } 543 544 static void 545 cpsw_reset(struct cpsw_softc *sc) 546 { 547 int i; 548 549 callout_stop(&sc->watchdog.callout); 550 551 /* Reset RMII/RGMII wrapper. */ 552 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); 553 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) 554 ; 555 556 /* Disable TX and RX interrupts for all cores. */ 557 for (i = 0; i < 3; ++i) { 558 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); 559 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); 560 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); 561 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); 562 } 563 564 /* Reset CPSW subsystem. */ 565 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); 566 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) 567 ; 568 569 /* Reset Sliver port 1 and 2 */ 570 for (i = 0; i < 2; i++) { 571 /* Reset */ 572 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); 573 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) 574 ; 575 } 576 577 /* Reset DMA controller. */ 578 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); 579 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) 580 ; 581 582 /* Disable TX & RX DMA */ 583 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); 584 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); 585 586 /* Clear all queues. */ 587 for (i = 0; i < 8; i++) { 588 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); 589 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); 590 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); 591 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); 592 } 593 594 /* Clear all interrupt Masks */ 595 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); 596 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); 597 } 598 599 static void 600 cpsw_init(struct cpsw_softc *sc) 601 { 602 struct cpsw_slot *slot; 603 uint32_t reg; 604 605 /* Disable the interrupt pacing. */ 606 reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 607 reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 608 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg); 609 610 /* Clear ALE */ 611 cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); 612 613 /* Enable ALE */ 614 reg = CPSW_ALE_CTL_ENABLE; 615 if (sc->dualemac) 616 reg |= CPSW_ALE_CTL_VLAN_AWARE; 617 cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); 618 619 /* Set Host Port Mapping. */ 620 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); 621 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); 622 623 /* Initialize ALE: set host port to forwarding(3). */ 624 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 625 ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); 626 627 cpsw_write_4(sc, CPSW_SS_PTYPE, 0); 628 629 /* Enable statistics for ports 0, 1 and 2 */ 630 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); 631 632 /* Turn off flow control. */ 633 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); 634 635 /* Make IP hdr aligned with 4 */ 636 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); 637 638 /* Initialize RX Buffer Descriptors */ 639 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0); 640 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); 641 642 /* Enable TX & RX DMA */ 643 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); 644 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); 645 646 /* Enable Interrupts for core 0 */ 647 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); 648 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); 649 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF); 650 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); 651 652 /* Enable host Error Interrupt */ 653 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); 654 655 /* Enable interrupts for RX and TX on Channel 0 */ 656 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 657 CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0)); 658 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1); 659 660 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 661 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 662 cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); 663 664 /* Select MII in GMII_SEL, Internal Delay mode */ 665 //ti_scm_reg_write_4(0x650, 0); 666 667 /* Initialize active queues. */ 668 slot = STAILQ_FIRST(&sc->tx.active); 669 if (slot != NULL) 670 cpsw_write_hdp_slot(sc, &sc->tx, slot); 671 slot = STAILQ_FIRST(&sc->rx.active); 672 if (slot != NULL) 673 cpsw_write_hdp_slot(sc, &sc->rx, slot); 674 cpsw_rx_enqueue(sc); 675 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); 676 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS); 677 678 /* Activate network interface. */ 679 sc->rx.running = 1; 680 sc->tx.running = 1; 681 sc->watchdog.timer = 0; 682 callout_init(&sc->watchdog.callout, 0); 683 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 684 } 685 686 /* 687 * 688 * Device Probe, Attach, Detach. 689 * 690 */ 691 692 static int 693 cpsw_probe(device_t dev) 694 { 695 696 if (!ofw_bus_status_okay(dev)) 697 return (ENXIO); 698 699 if (!ofw_bus_is_compatible(dev, "ti,cpsw")) 700 return (ENXIO); 701 702 device_set_desc(dev, "3-port Switch Ethernet Subsystem"); 703 return (BUS_PROBE_DEFAULT); 704 } 705 706 static int 707 cpsw_intr_attach(struct cpsw_softc *sc) 708 { 709 int i; 710 711 for (i = 0; i < CPSW_INTR_COUNT; i++) { 712 if (bus_setup_intr(sc->dev, sc->irq_res[i], 713 INTR_TYPE_NET | INTR_MPSAFE, NULL, 714 cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) { 715 return (-1); 716 } 717 } 718 719 return (0); 720 } 721 722 static void 723 cpsw_intr_detach(struct cpsw_softc *sc) 724 { 725 int i; 726 727 for (i = 0; i < CPSW_INTR_COUNT; i++) { 728 if (sc->ih_cookie[i]) { 729 bus_teardown_intr(sc->dev, sc->irq_res[i], 730 sc->ih_cookie[i]); 731 } 732 } 733 } 734 735 static int 736 cpsw_get_fdt_data(struct cpsw_softc *sc, int port) 737 { 738 char *name; 739 int len, phy, vlan; 740 pcell_t phy_id[3], vlan_id; 741 phandle_t child; 742 unsigned long mdio_child_addr; 743 744 /* Find any slave with phy-handle/phy_id */ 745 phy = -1; 746 vlan = -1; 747 for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { 748 if (OF_getprop_alloc(child, "name", (void **)&name) < 0) 749 continue; 750 if (sscanf(name, "slave@%lx", &mdio_child_addr) != 1) { 751 OF_prop_free(name); 752 continue; 753 } 754 OF_prop_free(name); 755 756 if (mdio_child_addr != slave_mdio_addr[port] && 757 mdio_child_addr != (slave_mdio_addr[port] & 0xFFF)) 758 continue; 759 760 if (fdt_get_phyaddr(child, NULL, &phy, NULL) != 0){ 761 /* Users with old DTB will have phy_id instead */ 762 phy = -1; 763 len = OF_getproplen(child, "phy_id"); 764 if (len / sizeof(pcell_t) == 2) { 765 /* Get phy address from fdt */ 766 if (OF_getencprop(child, "phy_id", phy_id, len) > 0) 767 phy = phy_id[1]; 768 } 769 } 770 771 len = OF_getproplen(child, "dual_emac_res_vlan"); 772 if (len / sizeof(pcell_t) == 1) { 773 /* Get phy address from fdt */ 774 if (OF_getencprop(child, "dual_emac_res_vlan", 775 &vlan_id, len) > 0) { 776 vlan = vlan_id; 777 } 778 } 779 780 break; 781 } 782 if (phy == -1) 783 return (ENXIO); 784 sc->port[port].phy = phy; 785 sc->port[port].vlan = vlan; 786 787 return (0); 788 } 789 790 static int 791 cpsw_attach(device_t dev) 792 { 793 int error, i; 794 struct cpsw_softc *sc; 795 uint32_t reg; 796 797 sc = device_get_softc(dev); 798 sc->dev = dev; 799 sc->node = ofw_bus_get_node(dev); 800 getbinuptime(&sc->attach_uptime); 801 802 if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, 803 sizeof(sc->active_slave)) <= 0) { 804 sc->active_slave = 0; 805 } 806 if (sc->active_slave > 1) 807 sc->active_slave = 1; 808 809 if (OF_hasprop(sc->node, "dual_emac")) 810 sc->dualemac = 1; 811 812 for (i = 0; i < CPSW_PORTS; i++) { 813 if (!sc->dualemac && i != sc->active_slave) 814 continue; 815 if (cpsw_get_fdt_data(sc, i) != 0) { 816 device_printf(dev, 817 "failed to get PHY address from FDT\n"); 818 return (ENXIO); 819 } 820 } 821 822 /* Initialize mutexes */ 823 mtx_init(&sc->tx.lock, device_get_nameunit(dev), 824 "cpsw TX lock", MTX_DEF); 825 mtx_init(&sc->rx.lock, device_get_nameunit(dev), 826 "cpsw RX lock", MTX_DEF); 827 828 /* Allocate IRQ resources */ 829 error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); 830 if (error) { 831 device_printf(dev, "could not allocate IRQ resources\n"); 832 cpsw_detach(dev); 833 return (ENXIO); 834 } 835 836 sc->mem_rid = 0; 837 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 838 &sc->mem_rid, RF_ACTIVE); 839 if (sc->mem_res == NULL) { 840 device_printf(sc->dev, "failed to allocate memory resource\n"); 841 cpsw_detach(dev); 842 return (ENXIO); 843 } 844 845 reg = cpsw_read_4(sc, CPSW_SS_IDVER); 846 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), 847 reg & 0xFF, (reg >> 11) & 0x1F); 848 849 cpsw_add_sysctls(sc); 850 851 /* Allocate a busdma tag and DMA safe memory for mbufs. */ 852 error = bus_dma_tag_create( 853 bus_get_dma_tag(sc->dev), /* parent */ 854 1, 0, /* alignment, boundary */ 855 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 856 BUS_SPACE_MAXADDR, /* highaddr */ 857 NULL, NULL, /* filtfunc, filtfuncarg */ 858 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ 859 MCLBYTES, 0, /* maxsegsz, flags */ 860 NULL, NULL, /* lockfunc, lockfuncarg */ 861 &sc->mbuf_dtag); /* dmatag */ 862 if (error) { 863 device_printf(dev, "bus_dma_tag_create failed\n"); 864 cpsw_detach(dev); 865 return (error); 866 } 867 868 /* Allocate a NULL buffer for padding. */ 869 sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO); 870 871 cpsw_init_slots(sc); 872 873 /* Allocate slots to TX and RX queues. */ 874 STAILQ_INIT(&sc->rx.avail); 875 STAILQ_INIT(&sc->rx.active); 876 STAILQ_INIT(&sc->tx.avail); 877 STAILQ_INIT(&sc->tx.active); 878 // For now: 128 slots to TX, rest to RX. 879 // XXX TODO: start with 32/64 and grow dynamically based on demand. 880 if (cpsw_add_slots(sc, &sc->tx, 128) || 881 cpsw_add_slots(sc, &sc->rx, -1)) { 882 device_printf(dev, "failed to allocate dmamaps\n"); 883 cpsw_detach(dev); 884 return (ENOMEM); 885 } 886 device_printf(dev, "Initial queue size TX=%d RX=%d\n", 887 sc->tx.queue_slots, sc->rx.queue_slots); 888 889 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); 890 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); 891 892 if (cpsw_intr_attach(sc) == -1) { 893 device_printf(dev, "failed to setup interrupts\n"); 894 cpsw_detach(dev); 895 return (ENXIO); 896 } 897 898 #ifdef CPSW_ETHERSWITCH 899 for (i = 0; i < CPSW_VLANS; i++) 900 cpsw_vgroups[i].vid = -1; 901 #endif 902 903 /* Reset the controller. */ 904 cpsw_reset(sc); 905 cpsw_init(sc); 906 907 for (i = 0; i < CPSW_PORTS; i++) { 908 if (!sc->dualemac && i != sc->active_slave) 909 continue; 910 sc->port[i].dev = device_add_child(dev, "cpsw", i); 911 if (sc->port[i].dev == NULL) { 912 cpsw_detach(dev); 913 return (ENXIO); 914 } 915 } 916 bus_generic_probe(dev); 917 bus_generic_attach(dev); 918 919 return (0); 920 } 921 922 static int 923 cpsw_detach(device_t dev) 924 { 925 struct cpsw_softc *sc; 926 int error, i; 927 928 bus_generic_detach(dev); 929 sc = device_get_softc(dev); 930 931 for (i = 0; i < CPSW_PORTS; i++) { 932 if (sc->port[i].dev) 933 device_delete_child(dev, sc->port[i].dev); 934 } 935 936 if (device_is_attached(dev)) { 937 callout_stop(&sc->watchdog.callout); 938 callout_drain(&sc->watchdog.callout); 939 } 940 941 /* Stop and release all interrupts */ 942 cpsw_intr_detach(sc); 943 944 /* Free dmamaps and mbufs */ 945 for (i = 0; i < nitems(sc->_slots); ++i) 946 cpsw_free_slot(sc, &sc->_slots[i]); 947 948 /* Free null padding buffer. */ 949 if (sc->nullpad) 950 free(sc->nullpad, M_DEVBUF); 951 952 /* Free DMA tag */ 953 if (sc->mbuf_dtag) { 954 error = bus_dma_tag_destroy(sc->mbuf_dtag); 955 KASSERT(error == 0, ("Unable to destroy DMA tag")); 956 } 957 958 /* Free IO memory handler */ 959 if (sc->mem_res != NULL) 960 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); 961 bus_release_resources(dev, irq_res_spec, sc->irq_res); 962 963 /* Destroy mutexes */ 964 mtx_destroy(&sc->rx.lock); 965 mtx_destroy(&sc->tx.lock); 966 967 /* Detach the switch device, if present. */ 968 error = bus_generic_detach(dev); 969 if (error != 0) 970 return (error); 971 972 return (device_delete_children(dev)); 973 } 974 975 static phandle_t 976 cpsw_get_node(device_t bus, device_t dev) 977 { 978 979 /* Share controller node with port device. */ 980 return (ofw_bus_get_node(bus)); 981 } 982 983 static int 984 cpswp_probe(device_t dev) 985 { 986 987 if (device_get_unit(dev) > 1) { 988 device_printf(dev, "Only two ports are supported.\n"); 989 return (ENXIO); 990 } 991 device_set_desc(dev, "Ethernet Switch Port"); 992 993 return (BUS_PROBE_DEFAULT); 994 } 995 996 static int 997 cpswp_attach(device_t dev) 998 { 999 int error; 1000 if_t ifp; 1001 struct cpswp_softc *sc; 1002 uint32_t reg; 1003 uint8_t mac_addr[ETHER_ADDR_LEN]; 1004 phandle_t opp_table; 1005 struct syscon *syscon; 1006 1007 sc = device_get_softc(dev); 1008 sc->dev = dev; 1009 sc->pdev = device_get_parent(dev); 1010 sc->swsc = device_get_softc(sc->pdev); 1011 sc->unit = device_get_unit(dev); 1012 sc->phy = sc->swsc->port[sc->unit].phy; 1013 sc->vlan = sc->swsc->port[sc->unit].vlan; 1014 if (sc->swsc->dualemac && sc->vlan == -1) 1015 sc->vlan = sc->unit + 1; 1016 1017 if (sc->unit == 0) { 1018 sc->physel = MDIOUSERPHYSEL0; 1019 sc->phyaccess = MDIOUSERACCESS0; 1020 } else { 1021 sc->physel = MDIOUSERPHYSEL1; 1022 sc->phyaccess = MDIOUSERACCESS1; 1023 } 1024 1025 mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", 1026 MTX_DEF); 1027 1028 /* Allocate network interface */ 1029 ifp = sc->ifp = if_alloc(IFT_ETHER); 1030 if (ifp == NULL) { 1031 cpswp_detach(dev); 1032 return (ENXIO); 1033 } 1034 1035 if_initname(ifp, device_get_name(sc->dev), sc->unit); 1036 if_setsoftc(ifp, sc); 1037 if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST); 1038 1039 if_setcapenable(ifp, if_getcapabilities(ifp)); 1040 1041 if_setinitfn(ifp, cpswp_init); 1042 if_setstartfn(ifp, cpswp_start); 1043 if_setioctlfn(ifp, cpswp_ioctl); 1044 1045 if_setsendqlen(ifp, sc->swsc->tx.queue_slots); 1046 if_setsendqready(ifp); 1047 1048 /* FIXME: For now; Go and kidnap syscon from opp-table */ 1049 /* ti,cpsw actually have an optional syscon reference but only for am33xx?? */ 1050 opp_table = OF_finddevice("/opp-table"); 1051 if (opp_table == -1) { 1052 device_printf(dev, "Cant find /opp-table\n"); 1053 cpswp_detach(dev); 1054 return (ENXIO); 1055 } 1056 if (!OF_hasprop(opp_table, "syscon")) { 1057 device_printf(dev, "/opp-table doesnt have required syscon property\n"); 1058 cpswp_detach(dev); 1059 return (ENXIO); 1060 } 1061 if (syscon_get_by_ofw_property(dev, opp_table, "syscon", &syscon) != 0) { 1062 device_printf(dev, "Failed to get syscon\n"); 1063 cpswp_detach(dev); 1064 return (ENXIO); 1065 } 1066 1067 /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ 1068 reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_HI + sc->unit * 8); 1069 mac_addr[0] = reg & 0xFF; 1070 mac_addr[1] = (reg >> 8) & 0xFF; 1071 mac_addr[2] = (reg >> 16) & 0xFF; 1072 mac_addr[3] = (reg >> 24) & 0xFF; 1073 1074 /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ 1075 reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_LO + sc->unit * 8); 1076 mac_addr[4] = reg & 0xFF; 1077 mac_addr[5] = (reg >> 8) & 0xFF; 1078 1079 error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, 1080 cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); 1081 if (error) { 1082 device_printf(dev, "attaching PHYs failed\n"); 1083 cpswp_detach(dev); 1084 return (error); 1085 } 1086 sc->mii = device_get_softc(sc->miibus); 1087 1088 /* Select PHY and enable interrupts */ 1089 cpsw_write_4(sc->swsc, sc->physel, 1090 MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); 1091 1092 ether_ifattach(sc->ifp, mac_addr); 1093 callout_init(&sc->mii_callout, 0); 1094 1095 return (0); 1096 } 1097 1098 static int 1099 cpswp_detach(device_t dev) 1100 { 1101 struct cpswp_softc *sc; 1102 1103 sc = device_get_softc(dev); 1104 CPSW_DEBUGF(sc->swsc, ("")); 1105 if (device_is_attached(dev)) { 1106 ether_ifdetach(sc->ifp); 1107 CPSW_PORT_LOCK(sc); 1108 cpswp_stop_locked(sc); 1109 CPSW_PORT_UNLOCK(sc); 1110 callout_drain(&sc->mii_callout); 1111 } 1112 1113 bus_generic_detach(dev); 1114 1115 if_free(sc->ifp); 1116 mtx_destroy(&sc->lock); 1117 1118 return (0); 1119 } 1120 1121 /* 1122 * 1123 * Init/Shutdown. 1124 * 1125 */ 1126 1127 static int 1128 cpsw_ports_down(struct cpsw_softc *sc) 1129 { 1130 struct cpswp_softc *psc; 1131 if_t ifp1, ifp2; 1132 1133 if (!sc->dualemac) 1134 return (1); 1135 psc = device_get_softc(sc->port[0].dev); 1136 ifp1 = psc->ifp; 1137 psc = device_get_softc(sc->port[1].dev); 1138 ifp2 = psc->ifp; 1139 if ((if_getflags(ifp1) & IFF_UP) == 0 && (if_getflags(ifp2) & IFF_UP) == 0) 1140 return (1); 1141 1142 return (0); 1143 } 1144 1145 static void 1146 cpswp_init(void *arg) 1147 { 1148 struct cpswp_softc *sc = arg; 1149 1150 CPSW_DEBUGF(sc->swsc, ("")); 1151 CPSW_PORT_LOCK(sc); 1152 cpswp_init_locked(arg); 1153 CPSW_PORT_UNLOCK(sc); 1154 } 1155 1156 static void 1157 cpswp_init_locked(void *arg) 1158 { 1159 #ifdef CPSW_ETHERSWITCH 1160 int i; 1161 #endif 1162 struct cpswp_softc *sc = arg; 1163 if_t ifp; 1164 uint32_t reg; 1165 1166 CPSW_DEBUGF(sc->swsc, ("")); 1167 CPSW_PORT_LOCK_ASSERT(sc); 1168 ifp = sc->ifp; 1169 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1170 return; 1171 1172 getbinuptime(&sc->init_uptime); 1173 1174 if (!sc->swsc->rx.running && !sc->swsc->tx.running) { 1175 /* Reset the controller. */ 1176 cpsw_reset(sc->swsc); 1177 cpsw_init(sc->swsc); 1178 } 1179 1180 /* Set Slave Mapping. */ 1181 cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); 1182 cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 1183 0x33221100); 1184 cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); 1185 /* Enable MAC RX/TX modules. */ 1186 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ 1187 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ 1188 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1189 reg |= CPSW_SL_MACTL_GMII_ENABLE; 1190 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1191 1192 /* Initialize ALE: set port to forwarding, initialize addrs */ 1193 cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), 1194 ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); 1195 cpswp_ale_update_addresses(sc, 1); 1196 1197 if (sc->swsc->dualemac) { 1198 /* Set Port VID. */ 1199 cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), 1200 sc->vlan & 0xfff); 1201 cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, 1202 (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ 1203 (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ 1204 (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ 1205 #ifdef CPSW_ETHERSWITCH 1206 for (i = 0; i < CPSW_VLANS; i++) { 1207 if (cpsw_vgroups[i].vid != -1) 1208 continue; 1209 cpsw_vgroups[i].vid = sc->vlan; 1210 break; 1211 } 1212 #endif 1213 } 1214 1215 mii_mediachg(sc->mii); 1216 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 1217 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 1218 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1219 } 1220 1221 static int 1222 cpsw_shutdown(device_t dev) 1223 { 1224 struct cpsw_softc *sc; 1225 struct cpswp_softc *psc; 1226 int i; 1227 1228 sc = device_get_softc(dev); 1229 CPSW_DEBUGF(sc, ("")); 1230 for (i = 0; i < CPSW_PORTS; i++) { 1231 if (!sc->dualemac && i != sc->active_slave) 1232 continue; 1233 psc = device_get_softc(sc->port[i].dev); 1234 CPSW_PORT_LOCK(psc); 1235 cpswp_stop_locked(psc); 1236 CPSW_PORT_UNLOCK(psc); 1237 } 1238 1239 return (0); 1240 } 1241 1242 static void 1243 cpsw_rx_teardown(struct cpsw_softc *sc) 1244 { 1245 int i = 0; 1246 1247 CPSW_RX_LOCK(sc); 1248 CPSW_DEBUGF(sc, ("starting RX teardown")); 1249 sc->rx.teardown = 1; 1250 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); 1251 CPSW_RX_UNLOCK(sc); 1252 while (sc->rx.running) { 1253 if (++i > 10) { 1254 device_printf(sc->dev, 1255 "Unable to cleanly shutdown receiver\n"); 1256 return; 1257 } 1258 DELAY(200); 1259 } 1260 if (!sc->rx.running) 1261 CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i)); 1262 } 1263 1264 static void 1265 cpsw_tx_teardown(struct cpsw_softc *sc) 1266 { 1267 int i = 0; 1268 1269 CPSW_TX_LOCK(sc); 1270 CPSW_DEBUGF(sc, ("starting TX teardown")); 1271 /* Start the TX queue teardown if queue is not empty. */ 1272 if (STAILQ_FIRST(&sc->tx.active) != NULL) 1273 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); 1274 else 1275 sc->tx.teardown = 1; 1276 cpsw_tx_dequeue(sc); 1277 while (sc->tx.running && ++i < 10) { 1278 DELAY(200); 1279 cpsw_tx_dequeue(sc); 1280 } 1281 if (sc->tx.running) { 1282 device_printf(sc->dev, 1283 "Unable to cleanly shutdown transmitter\n"); 1284 } 1285 CPSW_DEBUGF(sc, 1286 ("finished TX teardown (%d retries, %d idle buffers)", i, 1287 sc->tx.active_queue_len)); 1288 CPSW_TX_UNLOCK(sc); 1289 } 1290 1291 static void 1292 cpswp_stop_locked(struct cpswp_softc *sc) 1293 { 1294 if_t ifp; 1295 uint32_t reg; 1296 1297 ifp = sc->ifp; 1298 CPSW_DEBUGF(sc->swsc, ("")); 1299 CPSW_PORT_LOCK_ASSERT(sc); 1300 1301 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1302 return; 1303 1304 /* Disable interface */ 1305 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1306 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1307 1308 /* Stop ticker */ 1309 callout_stop(&sc->mii_callout); 1310 1311 /* Tear down the RX/TX queues. */ 1312 if (cpsw_ports_down(sc->swsc)) { 1313 cpsw_rx_teardown(sc->swsc); 1314 cpsw_tx_teardown(sc->swsc); 1315 } 1316 1317 /* Stop MAC RX/TX modules. */ 1318 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1319 reg &= ~CPSW_SL_MACTL_GMII_ENABLE; 1320 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1321 1322 if (cpsw_ports_down(sc->swsc)) { 1323 /* Capture stats before we reset controller. */ 1324 cpsw_stats_collect(sc->swsc); 1325 1326 cpsw_reset(sc->swsc); 1327 cpsw_init(sc->swsc); 1328 } 1329 } 1330 1331 /* 1332 * Suspend/Resume. 1333 */ 1334 1335 static int 1336 cpsw_suspend(device_t dev) 1337 { 1338 struct cpsw_softc *sc; 1339 struct cpswp_softc *psc; 1340 int i; 1341 1342 sc = device_get_softc(dev); 1343 CPSW_DEBUGF(sc, ("")); 1344 for (i = 0; i < CPSW_PORTS; i++) { 1345 if (!sc->dualemac && i != sc->active_slave) 1346 continue; 1347 psc = device_get_softc(sc->port[i].dev); 1348 CPSW_PORT_LOCK(psc); 1349 cpswp_stop_locked(psc); 1350 CPSW_PORT_UNLOCK(psc); 1351 } 1352 1353 return (0); 1354 } 1355 1356 static int 1357 cpsw_resume(device_t dev) 1358 { 1359 struct cpsw_softc *sc; 1360 1361 sc = device_get_softc(dev); 1362 CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); 1363 1364 return (0); 1365 } 1366 1367 /* 1368 * 1369 * IOCTL 1370 * 1371 */ 1372 1373 static void 1374 cpsw_set_promisc(struct cpswp_softc *sc, int set) 1375 { 1376 uint32_t reg; 1377 1378 /* 1379 * Enabling promiscuous mode requires ALE_BYPASS to be enabled. 1380 * That disables the ALE forwarding logic and causes every 1381 * packet to be sent only to the host port. In bypass mode, 1382 * the ALE processes host port transmit packets the same as in 1383 * normal mode. 1384 */ 1385 reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); 1386 reg &= ~CPSW_ALE_CTL_BYPASS; 1387 if (set) 1388 reg |= CPSW_ALE_CTL_BYPASS; 1389 cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); 1390 } 1391 1392 static void 1393 cpsw_set_allmulti(struct cpswp_softc *sc, int set) 1394 { 1395 if (set) { 1396 printf("All-multicast mode unimplemented\n"); 1397 } 1398 } 1399 1400 static int 1401 cpswp_ioctl(if_t ifp, u_long command, caddr_t data) 1402 { 1403 struct cpswp_softc *sc; 1404 struct ifreq *ifr; 1405 int error; 1406 uint32_t changed; 1407 1408 error = 0; 1409 sc = if_getsoftc(ifp); 1410 ifr = (struct ifreq *)data; 1411 1412 switch (command) { 1413 case SIOCSIFCAP: 1414 changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap; 1415 if (changed & IFCAP_HWCSUM) { 1416 if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) 1417 if_setcapenablebit(ifp, IFCAP_HWCSUM, 0); 1418 else 1419 if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); 1420 } 1421 error = 0; 1422 break; 1423 case SIOCSIFFLAGS: 1424 CPSW_PORT_LOCK(sc); 1425 if (if_getflags(ifp) & IFF_UP) { 1426 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1427 changed = if_getflags(ifp) ^ sc->if_flags; 1428 CPSW_DEBUGF(sc->swsc, 1429 ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", 1430 changed)); 1431 if (changed & IFF_PROMISC) 1432 cpsw_set_promisc(sc, 1433 if_getflags(ifp) & IFF_PROMISC); 1434 if (changed & IFF_ALLMULTI) 1435 cpsw_set_allmulti(sc, 1436 if_getflags(ifp) & IFF_ALLMULTI); 1437 } else { 1438 CPSW_DEBUGF(sc->swsc, 1439 ("SIOCSIFFLAGS: starting up")); 1440 cpswp_init_locked(sc); 1441 } 1442 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1443 CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down")); 1444 cpswp_stop_locked(sc); 1445 } 1446 1447 sc->if_flags = if_getflags(ifp); 1448 CPSW_PORT_UNLOCK(sc); 1449 break; 1450 case SIOCADDMULTI: 1451 cpswp_ale_update_addresses(sc, 0); 1452 break; 1453 case SIOCDELMULTI: 1454 /* Ugh. DELMULTI doesn't provide the specific address 1455 being removed, so the best we can do is remove 1456 everything and rebuild it all. */ 1457 cpswp_ale_update_addresses(sc, 1); 1458 break; 1459 case SIOCGIFMEDIA: 1460 case SIOCSIFMEDIA: 1461 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); 1462 break; 1463 default: 1464 error = ether_ioctl(ifp, command, data); 1465 } 1466 return (error); 1467 } 1468 1469 /* 1470 * 1471 * MIIBUS 1472 * 1473 */ 1474 static int 1475 cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) 1476 { 1477 uint32_t r, retries = CPSW_MIIBUS_RETRIES; 1478 1479 while (--retries) { 1480 r = cpsw_read_4(sc, reg); 1481 if ((r & MDIO_PHYACCESS_GO) == 0) 1482 return (1); 1483 DELAY(CPSW_MIIBUS_DELAY); 1484 } 1485 1486 return (0); 1487 } 1488 1489 static int 1490 cpswp_miibus_readreg(device_t dev, int phy, int reg) 1491 { 1492 struct cpswp_softc *sc; 1493 uint32_t cmd, r; 1494 1495 sc = device_get_softc(dev); 1496 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1497 device_printf(dev, "MDIO not ready to read\n"); 1498 return (0); 1499 } 1500 1501 /* Set GO, reg, phy */ 1502 cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; 1503 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1504 1505 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1506 device_printf(dev, "MDIO timed out during read\n"); 1507 return (0); 1508 } 1509 1510 r = cpsw_read_4(sc->swsc, sc->phyaccess); 1511 if ((r & MDIO_PHYACCESS_ACK) == 0) { 1512 device_printf(dev, "Failed to read from PHY.\n"); 1513 r = 0; 1514 } 1515 return (r & 0xFFFF); 1516 } 1517 1518 static int 1519 cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) 1520 { 1521 struct cpswp_softc *sc; 1522 uint32_t cmd; 1523 1524 sc = device_get_softc(dev); 1525 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1526 device_printf(dev, "MDIO not ready to write\n"); 1527 return (0); 1528 } 1529 1530 /* Set GO, WRITE, reg, phy, and value */ 1531 cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | 1532 (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); 1533 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1534 1535 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1536 device_printf(dev, "MDIO timed out during write\n"); 1537 return (0); 1538 } 1539 1540 return (0); 1541 } 1542 1543 static void 1544 cpswp_miibus_statchg(device_t dev) 1545 { 1546 struct cpswp_softc *sc; 1547 uint32_t mac_control, reg; 1548 1549 sc = device_get_softc(dev); 1550 CPSW_DEBUGF(sc->swsc, ("")); 1551 1552 reg = CPSW_SL_MACCONTROL(sc->unit); 1553 mac_control = cpsw_read_4(sc->swsc, reg); 1554 mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | 1555 CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); 1556 1557 switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { 1558 case IFM_1000_SX: 1559 case IFM_1000_LX: 1560 case IFM_1000_CX: 1561 case IFM_1000_T: 1562 mac_control |= CPSW_SL_MACTL_GIG; 1563 break; 1564 1565 case IFM_100_TX: 1566 mac_control |= CPSW_SL_MACTL_IFCTL_A; 1567 break; 1568 } 1569 if (sc->mii->mii_media_active & IFM_FDX) 1570 mac_control |= CPSW_SL_MACTL_FULLDUPLEX; 1571 1572 cpsw_write_4(sc->swsc, reg, mac_control); 1573 } 1574 1575 /* 1576 * 1577 * Transmit/Receive Packets. 1578 * 1579 */ 1580 static void 1581 cpsw_intr_rx(void *arg) 1582 { 1583 struct cpsw_softc *sc; 1584 if_t ifp; 1585 struct mbuf *received, *next; 1586 1587 sc = (struct cpsw_softc *)arg; 1588 CPSW_RX_LOCK(sc); 1589 if (sc->rx.teardown) { 1590 sc->rx.running = 0; 1591 sc->rx.teardown = 0; 1592 cpsw_write_cp(sc, &sc->rx, 0xfffffffc); 1593 } 1594 received = cpsw_rx_dequeue(sc); 1595 cpsw_rx_enqueue(sc); 1596 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); 1597 CPSW_RX_UNLOCK(sc); 1598 1599 while (received != NULL) { 1600 next = received->m_nextpkt; 1601 received->m_nextpkt = NULL; 1602 ifp = received->m_pkthdr.rcvif; 1603 if_input(ifp, received); 1604 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1605 received = next; 1606 } 1607 } 1608 1609 static struct mbuf * 1610 cpsw_rx_dequeue(struct cpsw_softc *sc) 1611 { 1612 int nsegs, port, removed; 1613 struct cpsw_cpdma_bd bd; 1614 struct cpsw_slot *last, *slot; 1615 struct cpswp_softc *psc; 1616 struct mbuf *m, *m0, *mb_head, *mb_tail; 1617 uint16_t m0_flags; 1618 1619 nsegs = 0; 1620 m0 = NULL; 1621 last = NULL; 1622 mb_head = NULL; 1623 mb_tail = NULL; 1624 removed = 0; 1625 1626 /* Pull completed packets off hardware RX queue. */ 1627 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { 1628 cpsw_cpdma_read_bd(sc, slot, &bd); 1629 1630 /* 1631 * Stop on packets still in use by hardware, but do not stop 1632 * on packets with the teardown complete flag, they will be 1633 * discarded later. 1634 */ 1635 if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) == 1636 CPDMA_BD_OWNER) 1637 break; 1638 1639 last = slot; 1640 ++removed; 1641 STAILQ_REMOVE_HEAD(&sc->rx.active, next); 1642 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); 1643 1644 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); 1645 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1646 1647 m = slot->mbuf; 1648 slot->mbuf = NULL; 1649 1650 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1651 CPSW_DEBUGF(sc, ("RX teardown is complete")); 1652 m_freem(m); 1653 sc->rx.running = 0; 1654 sc->rx.teardown = 0; 1655 break; 1656 } 1657 1658 port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; 1659 KASSERT(port >= 0 && port <= 1, 1660 ("patcket received with invalid port: %d", port)); 1661 psc = device_get_softc(sc->port[port].dev); 1662 1663 /* Set up mbuf */ 1664 m->m_data += bd.bufoff; 1665 m->m_len = bd.buflen; 1666 if (bd.flags & CPDMA_BD_SOP) { 1667 m->m_pkthdr.len = bd.pktlen; 1668 m->m_pkthdr.rcvif = psc->ifp; 1669 m->m_flags |= M_PKTHDR; 1670 m0_flags = bd.flags; 1671 m0 = m; 1672 } 1673 nsegs++; 1674 m->m_next = NULL; 1675 m->m_nextpkt = NULL; 1676 if (bd.flags & CPDMA_BD_EOP && m0 != NULL) { 1677 if (m0_flags & CPDMA_BD_PASS_CRC) 1678 m_adj(m0, -ETHER_CRC_LEN); 1679 m0_flags = 0; 1680 m0 = NULL; 1681 if (nsegs > sc->rx.longest_chain) 1682 sc->rx.longest_chain = nsegs; 1683 nsegs = 0; 1684 } 1685 1686 if ((if_getcapenable(psc->ifp) & IFCAP_RXCSUM) != 0) { 1687 /* check for valid CRC by looking into pkt_err[5:4] */ 1688 if ((bd.flags & 1689 (CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) == 1690 CPDMA_BD_SOP) { 1691 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1692 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1693 m->m_pkthdr.csum_data = 0xffff; 1694 } 1695 } 1696 1697 if (STAILQ_FIRST(&sc->rx.active) != NULL && 1698 (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) == 1699 (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { 1700 cpsw_write_hdp_slot(sc, &sc->rx, 1701 STAILQ_FIRST(&sc->rx.active)); 1702 sc->rx.queue_restart++; 1703 } 1704 1705 /* Add mbuf to packet list to be returned. */ 1706 if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) { 1707 mb_tail->m_nextpkt = m; 1708 } else if (mb_tail != NULL) { 1709 mb_tail->m_next = m; 1710 } else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) { 1711 if (bootverbose) 1712 printf( 1713 "%s: %s: discanding fragment packet w/o header\n", 1714 __func__, if_name(psc->ifp)); 1715 m_freem(m); 1716 continue; 1717 } else { 1718 mb_head = m; 1719 } 1720 mb_tail = m; 1721 } 1722 1723 if (removed != 0) { 1724 cpsw_write_cp_slot(sc, &sc->rx, last); 1725 sc->rx.queue_removes += removed; 1726 sc->rx.avail_queue_len += removed; 1727 sc->rx.active_queue_len -= removed; 1728 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) 1729 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; 1730 CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed)); 1731 } 1732 1733 return (mb_head); 1734 } 1735 1736 static void 1737 cpsw_rx_enqueue(struct cpsw_softc *sc) 1738 { 1739 bus_dma_segment_t seg[1]; 1740 struct cpsw_cpdma_bd bd; 1741 struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot; 1742 int error, nsegs, added = 0; 1743 1744 /* Register new mbufs with hardware. */ 1745 first_new_slot = NULL; 1746 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); 1747 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { 1748 if (first_new_slot == NULL) 1749 first_new_slot = slot; 1750 if (slot->mbuf == NULL) { 1751 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1752 if (slot->mbuf == NULL) { 1753 device_printf(sc->dev, 1754 "Unable to fill RX queue\n"); 1755 break; 1756 } 1757 slot->mbuf->m_len = 1758 slot->mbuf->m_pkthdr.len = 1759 slot->mbuf->m_ext.ext_size; 1760 } 1761 1762 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1763 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); 1764 1765 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); 1766 KASSERT(error == 0, ("DMA error (error=%d)", error)); 1767 if (error != 0 || nsegs != 1) { 1768 device_printf(sc->dev, 1769 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", 1770 __func__, nsegs, error); 1771 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1772 m_freem(slot->mbuf); 1773 slot->mbuf = NULL; 1774 break; 1775 } 1776 1777 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); 1778 1779 /* Create and submit new rx descriptor. */ 1780 if ((next = STAILQ_NEXT(slot, next)) != NULL) 1781 bd.next = cpsw_cpdma_bd_paddr(sc, next); 1782 else 1783 bd.next = 0; 1784 bd.bufptr = seg->ds_addr; 1785 bd.bufoff = 0; 1786 bd.buflen = MCLBYTES - 1; 1787 bd.pktlen = bd.buflen; 1788 bd.flags = CPDMA_BD_OWNER; 1789 cpsw_cpdma_write_bd(sc, slot, &bd); 1790 ++added; 1791 1792 STAILQ_REMOVE_HEAD(&sc->rx.avail, next); 1793 STAILQ_INSERT_TAIL(&sc->rx.active, slot, next); 1794 } 1795 1796 if (added == 0 || first_new_slot == NULL) 1797 return; 1798 1799 CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); 1800 1801 /* Link new entries to hardware RX queue. */ 1802 if (last_old_slot == NULL) { 1803 /* Start a fresh queue. */ 1804 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1805 } else { 1806 /* Add buffers to end of current queue. */ 1807 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1808 } 1809 sc->rx.queue_adds += added; 1810 sc->rx.avail_queue_len -= added; 1811 sc->rx.active_queue_len += added; 1812 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added); 1813 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) 1814 sc->rx.max_active_queue_len = sc->rx.active_queue_len; 1815 } 1816 1817 static void 1818 cpswp_start(if_t ifp) 1819 { 1820 struct cpswp_softc *sc; 1821 1822 sc = if_getsoftc(ifp); 1823 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || 1824 sc->swsc->tx.running == 0) { 1825 return; 1826 } 1827 CPSW_TX_LOCK(sc->swsc); 1828 cpswp_tx_enqueue(sc); 1829 cpsw_tx_dequeue(sc->swsc); 1830 CPSW_TX_UNLOCK(sc->swsc); 1831 } 1832 1833 static void 1834 cpsw_intr_tx(void *arg) 1835 { 1836 struct cpsw_softc *sc; 1837 1838 sc = (struct cpsw_softc *)arg; 1839 CPSW_TX_LOCK(sc); 1840 if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) 1841 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1842 cpsw_tx_dequeue(sc); 1843 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); 1844 CPSW_TX_UNLOCK(sc); 1845 } 1846 1847 static void 1848 cpswp_tx_enqueue(struct cpswp_softc *sc) 1849 { 1850 bus_dma_segment_t segs[CPSW_TXFRAGS]; 1851 struct cpsw_cpdma_bd bd; 1852 struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot; 1853 struct mbuf *m0; 1854 int error, nsegs, seg, added = 0, padlen; 1855 1856 /* Pull pending packets from IF queue and prep them for DMA. */ 1857 last = NULL; 1858 first_new_slot = NULL; 1859 last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); 1860 while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { 1861 m0 = if_dequeue(sc->ifp); 1862 if (m0 == NULL) 1863 break; 1864 1865 slot->mbuf = m0; 1866 padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len; 1867 if (padlen < 0) 1868 padlen = 0; 1869 else if (padlen > 0) 1870 m_append(slot->mbuf, padlen, sc->swsc->nullpad); 1871 1872 /* Create mapping in DMA memory */ 1873 error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, 1874 slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1875 /* If the packet is too fragmented, try to simplify. */ 1876 if (error == EFBIG || 1877 (error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) { 1878 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1879 m0 = m_defrag(slot->mbuf, M_NOWAIT); 1880 if (m0 == NULL) { 1881 device_printf(sc->dev, 1882 "Can't defragment packet; dropping\n"); 1883 m_freem(slot->mbuf); 1884 } else { 1885 CPSW_DEBUGF(sc->swsc, 1886 ("Requeueing defragmented packet")); 1887 if_sendq_prepend(sc->ifp, m0); 1888 } 1889 slot->mbuf = NULL; 1890 continue; 1891 } 1892 if (error != 0) { 1893 device_printf(sc->dev, 1894 "%s: Can't setup DMA (error=%d), dropping packet\n", 1895 __func__, error); 1896 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1897 m_freem(slot->mbuf); 1898 slot->mbuf = NULL; 1899 break; 1900 } 1901 1902 bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, 1903 BUS_DMASYNC_PREWRITE); 1904 1905 CPSW_DEBUGF(sc->swsc, 1906 ("Queueing TX packet: %d segments + %d pad bytes", 1907 nsegs, padlen)); 1908 1909 if (first_new_slot == NULL) 1910 first_new_slot = slot; 1911 1912 /* Link from the previous descriptor. */ 1913 if (last != NULL) 1914 cpsw_cpdma_write_bd_next(sc->swsc, last, slot); 1915 1916 slot->ifp = sc->ifp; 1917 1918 /* If there is only one segment, the for() loop 1919 * gets skipped and the single buffer gets set up 1920 * as both SOP and EOP. */ 1921 if (nsegs > 1) { 1922 next = STAILQ_NEXT(slot, next); 1923 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1924 } else 1925 bd.next = 0; 1926 /* Start by setting up the first buffer. */ 1927 bd.bufptr = segs[0].ds_addr; 1928 bd.bufoff = 0; 1929 bd.buflen = segs[0].ds_len; 1930 bd.pktlen = m_length(slot->mbuf, NULL); 1931 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER; 1932 if (sc->swsc->dualemac) { 1933 bd.flags |= CPDMA_BD_TO_PORT; 1934 bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK); 1935 } 1936 for (seg = 1; seg < nsegs; ++seg) { 1937 /* Save the previous buffer (which isn't EOP) */ 1938 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1939 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1940 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1941 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1942 1943 /* Setup next buffer (which isn't SOP) */ 1944 if (nsegs > seg + 1) { 1945 next = STAILQ_NEXT(slot, next); 1946 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1947 } else 1948 bd.next = 0; 1949 bd.bufptr = segs[seg].ds_addr; 1950 bd.bufoff = 0; 1951 bd.buflen = segs[seg].ds_len; 1952 bd.pktlen = 0; 1953 bd.flags = CPDMA_BD_OWNER; 1954 } 1955 1956 /* Save the final buffer. */ 1957 bd.flags |= CPDMA_BD_EOP; 1958 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1959 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1960 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1961 1962 last = slot; 1963 added += nsegs; 1964 if (nsegs > sc->swsc->tx.longest_chain) 1965 sc->swsc->tx.longest_chain = nsegs; 1966 1967 BPF_MTAP(sc->ifp, m0); 1968 } 1969 1970 if (first_new_slot == NULL) 1971 return; 1972 1973 /* Attach the list of new buffers to the hardware TX queue. */ 1974 if (last_old_slot != NULL && 1975 (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & 1976 CPDMA_BD_EOQ) == 0) { 1977 /* Add buffers to end of current queue. */ 1978 cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, 1979 first_new_slot); 1980 } else { 1981 /* Start a fresh queue. */ 1982 cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); 1983 } 1984 sc->swsc->tx.queue_adds += added; 1985 sc->swsc->tx.avail_queue_len -= added; 1986 sc->swsc->tx.active_queue_len += added; 1987 if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { 1988 sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; 1989 } 1990 CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added)); 1991 } 1992 1993 static int 1994 cpsw_tx_dequeue(struct cpsw_softc *sc) 1995 { 1996 struct cpsw_slot *slot, *last_removed_slot = NULL; 1997 struct cpsw_cpdma_bd bd; 1998 uint32_t flags, removed = 0; 1999 2000 /* Pull completed buffers off the hardware TX queue. */ 2001 slot = STAILQ_FIRST(&sc->tx.active); 2002 while (slot != NULL) { 2003 flags = cpsw_cpdma_read_bd_flags(sc, slot); 2004 2005 /* TearDown complete is only marked on the SOP for the packet. */ 2006 if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == 2007 (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) { 2008 sc->tx.teardown = 1; 2009 } 2010 2011 if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) == 2012 (CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0) 2013 break; /* Hardware is still using this packet. */ 2014 2015 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); 2016 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 2017 m_freem(slot->mbuf); 2018 slot->mbuf = NULL; 2019 2020 if (slot->ifp) { 2021 if (sc->tx.teardown == 0) 2022 if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); 2023 else 2024 if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1); 2025 } 2026 2027 /* Dequeue any additional buffers used by this packet. */ 2028 while (slot != NULL && slot->mbuf == NULL) { 2029 STAILQ_REMOVE_HEAD(&sc->tx.active, next); 2030 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); 2031 ++removed; 2032 last_removed_slot = slot; 2033 slot = STAILQ_FIRST(&sc->tx.active); 2034 } 2035 2036 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); 2037 2038 /* Restart the TX queue if necessary. */ 2039 cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); 2040 if (slot != NULL && bd.next != 0 && (bd.flags & 2041 (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) == 2042 (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { 2043 cpsw_write_hdp_slot(sc, &sc->tx, slot); 2044 sc->tx.queue_restart++; 2045 break; 2046 } 2047 } 2048 2049 if (removed != 0) { 2050 sc->tx.queue_removes += removed; 2051 sc->tx.active_queue_len -= removed; 2052 sc->tx.avail_queue_len += removed; 2053 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) 2054 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; 2055 CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed)); 2056 } 2057 2058 if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) { 2059 CPSW_DEBUGF(sc, ("TX teardown is complete")); 2060 sc->tx.teardown = 0; 2061 sc->tx.running = 0; 2062 } 2063 2064 return (removed); 2065 } 2066 2067 /* 2068 * 2069 * Miscellaneous interrupts. 2070 * 2071 */ 2072 2073 static void 2074 cpsw_intr_rx_thresh(void *arg) 2075 { 2076 struct cpsw_softc *sc; 2077 if_t ifp; 2078 struct mbuf *received, *next; 2079 2080 sc = (struct cpsw_softc *)arg; 2081 CPSW_RX_LOCK(sc); 2082 received = cpsw_rx_dequeue(sc); 2083 cpsw_rx_enqueue(sc); 2084 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); 2085 CPSW_RX_UNLOCK(sc); 2086 2087 while (received != NULL) { 2088 next = received->m_nextpkt; 2089 received->m_nextpkt = NULL; 2090 ifp = received->m_pkthdr.rcvif; 2091 if_input(ifp, received); 2092 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2093 received = next; 2094 } 2095 } 2096 2097 static void 2098 cpsw_intr_misc_host_error(struct cpsw_softc *sc) 2099 { 2100 uint32_t intstat; 2101 uint32_t dmastat; 2102 int txerr, rxerr, txchan, rxchan; 2103 2104 printf("\n\n"); 2105 device_printf(sc->dev, 2106 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); 2107 printf("\n\n"); 2108 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); 2109 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); 2110 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); 2111 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); 2112 2113 txerr = (dmastat >> 20) & 15; 2114 txchan = (dmastat >> 16) & 7; 2115 rxerr = (dmastat >> 12) & 15; 2116 rxchan = (dmastat >> 8) & 7; 2117 2118 switch (txerr) { 2119 case 0: break; 2120 case 1: printf("SOP error on TX channel %d\n", txchan); 2121 break; 2122 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); 2123 break; 2124 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); 2125 break; 2126 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); 2127 break; 2128 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); 2129 break; 2130 case 6: printf("Packet length error on TX channel %d\n", txchan); 2131 break; 2132 default: printf("Unknown error on TX channel %d\n", txchan); 2133 break; 2134 } 2135 2136 if (txerr != 0) { 2137 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 2138 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); 2139 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 2140 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); 2141 cpsw_dump_queue(sc, &sc->tx.active); 2142 } 2143 2144 switch (rxerr) { 2145 case 0: break; 2146 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); 2147 break; 2148 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); 2149 break; 2150 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); 2151 break; 2152 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); 2153 break; 2154 default: printf("Unknown RX error on RX channel %d\n", rxchan); 2155 break; 2156 } 2157 2158 if (rxerr != 0) { 2159 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", 2160 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); 2161 printf("CPSW_CPDMA_RX%d_CP=0x%x\n", 2162 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); 2163 cpsw_dump_queue(sc, &sc->rx.active); 2164 } 2165 2166 printf("\nALE Table\n"); 2167 cpsw_ale_dump_table(sc); 2168 2169 // XXX do something useful here?? 2170 panic("CPSW HOST ERROR INTERRUPT"); 2171 2172 // Suppress this interrupt in the future. 2173 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); 2174 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); 2175 // The watchdog will probably reset the controller 2176 // in a little while. It will probably fail again. 2177 } 2178 2179 static void 2180 cpsw_intr_misc(void *arg) 2181 { 2182 struct cpsw_softc *sc = arg; 2183 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); 2184 2185 if (stat & CPSW_WR_C_MISC_EVNT_PEND) 2186 CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); 2187 if (stat & CPSW_WR_C_MISC_STAT_PEND) 2188 cpsw_stats_collect(sc); 2189 if (stat & CPSW_WR_C_MISC_HOST_PEND) 2190 cpsw_intr_misc_host_error(sc); 2191 if (stat & CPSW_WR_C_MISC_MDIOLINK) { 2192 cpsw_write_4(sc, MDIOLINKINTMASKED, 2193 cpsw_read_4(sc, MDIOLINKINTMASKED)); 2194 } 2195 if (stat & CPSW_WR_C_MISC_MDIOUSER) { 2196 CPSW_DEBUGF(sc, 2197 ("MDIO operation completed interrupt unimplemented")); 2198 } 2199 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); 2200 } 2201 2202 /* 2203 * 2204 * Periodic Checks and Watchdog. 2205 * 2206 */ 2207 2208 static void 2209 cpswp_tick(void *msc) 2210 { 2211 struct cpswp_softc *sc = msc; 2212 2213 /* Check for media type change */ 2214 mii_tick(sc->mii); 2215 if (sc->media_status != sc->mii->mii_media.ifm_media) { 2216 printf("%s: media type changed (ifm_media=%x)\n", __func__, 2217 sc->mii->mii_media.ifm_media); 2218 cpswp_ifmedia_upd(sc->ifp); 2219 } 2220 2221 /* Schedule another timeout one second from now */ 2222 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 2223 } 2224 2225 static void 2226 cpswp_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 2227 { 2228 struct cpswp_softc *sc; 2229 struct mii_data *mii; 2230 2231 sc = if_getsoftc(ifp); 2232 CPSW_DEBUGF(sc->swsc, ("")); 2233 CPSW_PORT_LOCK(sc); 2234 2235 mii = sc->mii; 2236 mii_pollstat(mii); 2237 2238 ifmr->ifm_active = mii->mii_media_active; 2239 ifmr->ifm_status = mii->mii_media_status; 2240 CPSW_PORT_UNLOCK(sc); 2241 } 2242 2243 static int 2244 cpswp_ifmedia_upd(if_t ifp) 2245 { 2246 struct cpswp_softc *sc; 2247 2248 sc = if_getsoftc(ifp); 2249 CPSW_DEBUGF(sc->swsc, ("")); 2250 CPSW_PORT_LOCK(sc); 2251 mii_mediachg(sc->mii); 2252 sc->media_status = sc->mii->mii_media.ifm_media; 2253 CPSW_PORT_UNLOCK(sc); 2254 2255 return (0); 2256 } 2257 2258 static void 2259 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) 2260 { 2261 struct cpswp_softc *psc; 2262 int i; 2263 2264 cpsw_debugf_head("CPSW watchdog"); 2265 device_printf(sc->dev, "watchdog timeout\n"); 2266 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0, 2267 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0))); 2268 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0, 2269 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0))); 2270 cpsw_dump_queue(sc, &sc->tx.active); 2271 for (i = 0; i < CPSW_PORTS; i++) { 2272 if (!sc->dualemac && i != sc->active_slave) 2273 continue; 2274 psc = device_get_softc(sc->port[i].dev); 2275 CPSW_PORT_LOCK(psc); 2276 cpswp_stop_locked(psc); 2277 CPSW_PORT_UNLOCK(psc); 2278 } 2279 } 2280 2281 static void 2282 cpsw_tx_watchdog(void *msc) 2283 { 2284 struct cpsw_softc *sc; 2285 2286 sc = msc; 2287 CPSW_TX_LOCK(sc); 2288 if (sc->tx.active_queue_len == 0 || !sc->tx.running) { 2289 sc->watchdog.timer = 0; /* Nothing to do. */ 2290 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { 2291 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ 2292 } else if (cpsw_tx_dequeue(sc) > 0) { 2293 sc->watchdog.timer = 0; /* We just did something. */ 2294 } else { 2295 /* There was something to do but it didn't get done. */ 2296 ++sc->watchdog.timer; 2297 if (sc->watchdog.timer > 5) { 2298 sc->watchdog.timer = 0; 2299 ++sc->watchdog.resets; 2300 cpsw_tx_watchdog_full_reset(sc); 2301 } 2302 } 2303 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; 2304 CPSW_TX_UNLOCK(sc); 2305 2306 /* Schedule another timeout one second from now */ 2307 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 2308 } 2309 2310 /* 2311 * 2312 * ALE support routines. 2313 * 2314 */ 2315 2316 static void 2317 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2318 { 2319 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); 2320 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); 2321 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); 2322 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); 2323 } 2324 2325 static void 2326 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2327 { 2328 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); 2329 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); 2330 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); 2331 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); 2332 } 2333 2334 static void 2335 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) 2336 { 2337 int i; 2338 uint32_t ale_entry[3]; 2339 2340 /* First four entries are link address and broadcast. */ 2341 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2342 cpsw_ale_read_entry(sc, i, ale_entry); 2343 if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || 2344 ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && 2345 ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ 2346 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2347 cpsw_ale_write_entry(sc, i, ale_entry); 2348 } 2349 } 2350 } 2351 2352 static int 2353 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, 2354 uint8_t *mac) 2355 { 2356 int free_index = -1, matching_index = -1, i; 2357 uint32_t ale_entry[3], ale_type; 2358 2359 /* Find a matching entry or a free entry. */ 2360 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2361 cpsw_ale_read_entry(sc, i, ale_entry); 2362 2363 /* Entry Type[61:60] is 0 for free entry */ 2364 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2365 free_index = i; 2366 2367 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && 2368 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && 2369 (((ale_entry[0] >>24) & 0xFF) == mac[2]) && 2370 (((ale_entry[0] >>16) & 0xFF) == mac[3]) && 2371 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && 2372 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { 2373 matching_index = i; 2374 break; 2375 } 2376 } 2377 2378 if (matching_index < 0) { 2379 if (free_index < 0) 2380 return (ENOMEM); 2381 i = free_index; 2382 } 2383 2384 if (vlan != -1) 2385 ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; 2386 else 2387 ale_type = ALE_TYPE_ADDR << 28; 2388 2389 /* Set MAC address */ 2390 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2391 ale_entry[1] = mac[0] << 8 | mac[1]; 2392 2393 /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ 2394 ale_entry[1] |= ALE_MCAST_FWD | ale_type; 2395 2396 /* Set portmask [68:66] */ 2397 ale_entry[2] = (portmap & 7) << 2; 2398 2399 cpsw_ale_write_entry(sc, i, ale_entry); 2400 2401 return 0; 2402 } 2403 2404 static void 2405 cpsw_ale_dump_table(struct cpsw_softc *sc) { 2406 int i; 2407 uint32_t ale_entry[3]; 2408 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2409 cpsw_ale_read_entry(sc, i, ale_entry); 2410 switch (ALE_TYPE(ale_entry)) { 2411 case ALE_TYPE_VLAN: 2412 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2413 ale_entry[1], ale_entry[0]); 2414 printf("type: %u ", ALE_TYPE(ale_entry)); 2415 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2416 printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); 2417 printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); 2418 printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); 2419 printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); 2420 printf("\n"); 2421 break; 2422 case ALE_TYPE_ADDR: 2423 case ALE_TYPE_VLAN_ADDR: 2424 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2425 ale_entry[1], ale_entry[0]); 2426 printf("type: %u ", ALE_TYPE(ale_entry)); 2427 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", 2428 (ale_entry[1] >> 8) & 0xFF, 2429 (ale_entry[1] >> 0) & 0xFF, 2430 (ale_entry[0] >>24) & 0xFF, 2431 (ale_entry[0] >>16) & 0xFF, 2432 (ale_entry[0] >> 8) & 0xFF, 2433 (ale_entry[0] >> 0) & 0xFF); 2434 printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); 2435 if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) 2436 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2437 printf("port: %u ", ALE_PORTS(ale_entry)); 2438 printf("\n"); 2439 break; 2440 } 2441 } 2442 printf("\n"); 2443 } 2444 2445 static u_int 2446 cpswp_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2447 { 2448 struct cpswp_softc *sc = arg; 2449 uint32_t portmask; 2450 2451 if (sc->swsc->dualemac) 2452 portmask = 1 << (sc->unit + 1) | 1 << 0; 2453 else 2454 portmask = 7; 2455 2456 cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, LLADDR(sdl)); 2457 2458 return (1); 2459 } 2460 2461 static int 2462 cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) 2463 { 2464 uint8_t *mac; 2465 uint32_t ale_entry[3], ale_type, portmask; 2466 2467 if (sc->swsc->dualemac) { 2468 ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; 2469 portmask = 1 << (sc->unit + 1) | 1 << 0; 2470 } else { 2471 ale_type = ALE_TYPE_ADDR << 28; 2472 portmask = 7; 2473 } 2474 2475 /* 2476 * Route incoming packets for our MAC address to Port 0 (host). 2477 * For simplicity, keep this entry at table index 0 for port 1 and 2478 * at index 2 for port 2 in the ALE. 2479 */ 2480 mac = LLADDR((struct sockaddr_dl *)if_getifaddr(sc->ifp)->ifa_addr); 2481 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2482 ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ 2483 ale_entry[2] = 0; /* port = 0 */ 2484 cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); 2485 2486 /* Set outgoing MAC Address for slave port. */ 2487 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), 2488 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); 2489 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), 2490 mac[5] << 8 | mac[4]); 2491 2492 /* Keep the broadcast address at table entry 1 (or 3). */ 2493 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ 2494 /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ 2495 ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; 2496 ale_entry[2] = portmask << 2; 2497 cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); 2498 2499 /* SIOCDELMULTI doesn't specify the particular address 2500 being removed, so we have to remove all and rebuild. */ 2501 if (purge) 2502 cpsw_ale_remove_all_mc_entries(sc->swsc); 2503 2504 /* Set other multicast addrs desired. */ 2505 if_foreach_llmaddr(sc->ifp, cpswp_set_maddr, sc); 2506 2507 return (0); 2508 } 2509 2510 static int 2511 cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, 2512 int untag, int mcregflood, int mcunregflood) 2513 { 2514 int free_index, i, matching_index; 2515 uint32_t ale_entry[3]; 2516 2517 free_index = matching_index = -1; 2518 /* Find a matching entry or a free entry. */ 2519 for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { 2520 cpsw_ale_read_entry(sc, i, ale_entry); 2521 2522 /* Entry Type[61:60] is 0 for free entry */ 2523 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2524 free_index = i; 2525 2526 if (ALE_VLAN(ale_entry) == vlan) { 2527 matching_index = i; 2528 break; 2529 } 2530 } 2531 2532 if (matching_index < 0) { 2533 if (free_index < 0) 2534 return (-1); 2535 i = free_index; 2536 } 2537 2538 ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | 2539 (mcunregflood & 7) << 8 | (ports & 7); 2540 ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; 2541 ale_entry[2] = 0; 2542 cpsw_ale_write_entry(sc, i, ale_entry); 2543 2544 return (0); 2545 } 2546 2547 /* 2548 * 2549 * Statistics and Sysctls. 2550 * 2551 */ 2552 2553 #if 0 2554 static void 2555 cpsw_stats_dump(struct cpsw_softc *sc) 2556 { 2557 int i; 2558 uint32_t r; 2559 2560 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2561 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2562 cpsw_stat_sysctls[i].reg); 2563 CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, 2564 (intmax_t)sc->shadow_stats[i], r, 2565 (intmax_t)sc->shadow_stats[i] + r)); 2566 } 2567 } 2568 #endif 2569 2570 static void 2571 cpsw_stats_collect(struct cpsw_softc *sc) 2572 { 2573 int i; 2574 uint32_t r; 2575 2576 CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); 2577 2578 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2579 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2580 cpsw_stat_sysctls[i].reg); 2581 sc->shadow_stats[i] += r; 2582 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, 2583 r); 2584 } 2585 } 2586 2587 static int 2588 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) 2589 { 2590 struct cpsw_softc *sc; 2591 struct cpsw_stat *stat; 2592 uint64_t result; 2593 2594 sc = (struct cpsw_softc *)arg1; 2595 stat = &cpsw_stat_sysctls[oidp->oid_number]; 2596 result = sc->shadow_stats[oidp->oid_number]; 2597 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); 2598 return (sysctl_handle_64(oidp, &result, 0, req)); 2599 } 2600 2601 static int 2602 cpsw_stat_attached(SYSCTL_HANDLER_ARGS) 2603 { 2604 struct cpsw_softc *sc; 2605 struct bintime t; 2606 unsigned result; 2607 2608 sc = (struct cpsw_softc *)arg1; 2609 getbinuptime(&t); 2610 bintime_sub(&t, &sc->attach_uptime); 2611 result = t.sec; 2612 return (sysctl_handle_int(oidp, &result, 0, req)); 2613 } 2614 2615 static int 2616 cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS) 2617 { 2618 int error; 2619 struct cpsw_softc *sc; 2620 uint32_t ctrl, intr_per_ms; 2621 2622 sc = (struct cpsw_softc *)arg1; 2623 error = sysctl_handle_int(oidp, &sc->coal_us, 0, req); 2624 if (error != 0 || req->newptr == NULL) 2625 return (error); 2626 2627 ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 2628 ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 2629 if (sc->coal_us == 0) { 2630 /* Disable the interrupt pace hardware. */ 2631 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2632 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0); 2633 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0); 2634 return (0); 2635 } 2636 2637 if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX) 2638 sc->coal_us = CPSW_WR_C_IMAX_US_MAX; 2639 if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN) 2640 sc->coal_us = CPSW_WR_C_IMAX_US_MIN; 2641 intr_per_ms = 1000 / sc->coal_us; 2642 /* Just to make sure... */ 2643 if (intr_per_ms > CPSW_WR_C_IMAX_MAX) 2644 intr_per_ms = CPSW_WR_C_IMAX_MAX; 2645 if (intr_per_ms < CPSW_WR_C_IMAX_MIN) 2646 intr_per_ms = CPSW_WR_C_IMAX_MIN; 2647 2648 /* Set the prescale to produce 4us pulses from the 125 Mhz clock. */ 2649 ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK; 2650 2651 /* Enable the interrupt pace hardware. */ 2652 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms); 2653 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms); 2654 ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE; 2655 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2656 2657 return (0); 2658 } 2659 2660 static int 2661 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) 2662 { 2663 struct cpsw_softc *swsc; 2664 struct cpswp_softc *sc; 2665 struct bintime t; 2666 unsigned result; 2667 2668 swsc = arg1; 2669 sc = device_get_softc(swsc->port[arg2].dev); 2670 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 2671 getbinuptime(&t); 2672 bintime_sub(&t, &sc->init_uptime); 2673 result = t.sec; 2674 } else 2675 result = 0; 2676 return (sysctl_handle_int(oidp, &result, 0, req)); 2677 } 2678 2679 static void 2680 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2681 struct cpsw_queue *queue) 2682 { 2683 struct sysctl_oid_list *parent; 2684 2685 parent = SYSCTL_CHILDREN(node); 2686 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", 2687 CTLFLAG_RD, &queue->queue_slots, 0, 2688 "Total buffers currently assigned to this queue"); 2689 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", 2690 CTLFLAG_RD, &queue->active_queue_len, 0, 2691 "Buffers currently registered with hardware controller"); 2692 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", 2693 CTLFLAG_RD, &queue->max_active_queue_len, 0, 2694 "Max value of activeBuffers since last driver reset"); 2695 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", 2696 CTLFLAG_RD, &queue->avail_queue_len, 0, 2697 "Buffers allocated to this queue but not currently " 2698 "registered with hardware controller"); 2699 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", 2700 CTLFLAG_RD, &queue->max_avail_queue_len, 0, 2701 "Max value of availBuffers since last driver reset"); 2702 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", 2703 CTLFLAG_RD, &queue->queue_adds, 0, 2704 "Total buffers added to queue"); 2705 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", 2706 CTLFLAG_RD, &queue->queue_removes, 0, 2707 "Total buffers removed from queue"); 2708 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart", 2709 CTLFLAG_RD, &queue->queue_restart, 0, 2710 "Total times the queue has been restarted"); 2711 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", 2712 CTLFLAG_RD, &queue->longest_chain, 0, 2713 "Max buffers used for a single packet"); 2714 } 2715 2716 static void 2717 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2718 struct cpsw_softc *sc) 2719 { 2720 struct sysctl_oid_list *parent; 2721 2722 parent = SYSCTL_CHILDREN(node); 2723 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", 2724 CTLFLAG_RD, &sc->watchdog.resets, 0, 2725 "Total number of watchdog resets"); 2726 } 2727 2728 static void 2729 cpsw_add_sysctls(struct cpsw_softc *sc) 2730 { 2731 struct sysctl_ctx_list *ctx; 2732 struct sysctl_oid *stats_node, *queue_node, *node; 2733 struct sysctl_oid_list *parent, *stats_parent, *queue_parent; 2734 struct sysctl_oid_list *ports_parent, *port_parent; 2735 char port[16]; 2736 int i; 2737 2738 ctx = device_get_sysctl_ctx(sc->dev); 2739 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2740 2741 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", 2742 CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); 2743 2744 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", 2745 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2746 sc, 0, cpsw_stat_attached, "IU", 2747 "Time since driver attach"); 2748 2749 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us", 2750 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2751 sc, 0, cpsw_intr_coalesce, "IU", 2752 "minimum time between interrupts"); 2753 2754 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", 2755 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Ports Statistics"); 2756 ports_parent = SYSCTL_CHILDREN(node); 2757 for (i = 0; i < CPSW_PORTS; i++) { 2758 if (!sc->dualemac && i != sc->active_slave) 2759 continue; 2760 port[0] = '0' + i; 2761 port[1] = '\0'; 2762 node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, 2763 port, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2764 "CPSW Port Statistics"); 2765 port_parent = SYSCTL_CHILDREN(node); 2766 SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", 2767 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i, 2768 cpsw_stat_uptime, "IU", "Seconds since driver init"); 2769 } 2770 2771 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 2772 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Statistics"); 2773 stats_parent = SYSCTL_CHILDREN(stats_node); 2774 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2775 SYSCTL_ADD_PROC(ctx, stats_parent, i, 2776 cpsw_stat_sysctls[i].oid, 2777 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2778 sc, 0, cpsw_stats_sysctl, "IU", 2779 cpsw_stat_sysctls[i].oid); 2780 } 2781 2782 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", 2783 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Queue Statistics"); 2784 queue_parent = SYSCTL_CHILDREN(queue_node); 2785 2786 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", 2787 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Statistics"); 2788 cpsw_add_queue_sysctls(ctx, node, &sc->tx); 2789 2790 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", 2791 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Statistics"); 2792 cpsw_add_queue_sysctls(ctx, node, &sc->rx); 2793 2794 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", 2795 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Watchdog Statistics"); 2796 cpsw_add_watchdog_sysctls(ctx, node, sc); 2797 } 2798 2799 #ifdef CPSW_ETHERSWITCH 2800 static etherswitch_info_t etherswitch_info = { 2801 .es_nports = CPSW_PORTS + 1, 2802 .es_nvlangroups = CPSW_VLANS, 2803 .es_name = "TI Common Platform Ethernet Switch (CPSW)", 2804 .es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q, 2805 }; 2806 2807 static etherswitch_info_t * 2808 cpsw_getinfo(device_t dev) 2809 { 2810 return (ðerswitch_info); 2811 } 2812 2813 static int 2814 cpsw_getport(device_t dev, etherswitch_port_t *p) 2815 { 2816 int err; 2817 struct cpsw_softc *sc; 2818 struct cpswp_softc *psc; 2819 struct ifmediareq *ifmr; 2820 uint32_t reg; 2821 2822 if (p->es_port < 0 || p->es_port > CPSW_PORTS) 2823 return (ENXIO); 2824 2825 err = 0; 2826 sc = device_get_softc(dev); 2827 if (p->es_port == CPSW_CPU_PORT) { 2828 p->es_flags |= ETHERSWITCH_PORT_CPU; 2829 ifmr = &p->es_ifmr; 2830 ifmr->ifm_current = ifmr->ifm_active = 2831 IFM_ETHER | IFM_1000_T | IFM_FDX; 2832 ifmr->ifm_mask = 0; 2833 ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; 2834 ifmr->ifm_count = 0; 2835 } else { 2836 psc = device_get_softc(sc->port[p->es_port - 1].dev); 2837 err = ifmedia_ioctl(psc->ifp, &p->es_ifr, 2838 &psc->mii->mii_media, SIOCGIFMEDIA); 2839 } 2840 reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port)); 2841 p->es_pvid = reg & ETHERSWITCH_VID_MASK; 2842 2843 reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); 2844 if (reg & ALE_PORTCTL_DROP_UNTAGGED) 2845 p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; 2846 if (reg & ALE_PORTCTL_INGRESS) 2847 p->es_flags |= ETHERSWITCH_PORT_INGRESS; 2848 2849 return (err); 2850 } 2851 2852 static int 2853 cpsw_setport(device_t dev, etherswitch_port_t *p) 2854 { 2855 struct cpsw_softc *sc; 2856 struct cpswp_softc *psc; 2857 struct ifmedia *ifm; 2858 uint32_t reg; 2859 2860 if (p->es_port < 0 || p->es_port > CPSW_PORTS) 2861 return (ENXIO); 2862 2863 sc = device_get_softc(dev); 2864 if (p->es_pvid != 0) { 2865 cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port), 2866 p->es_pvid & ETHERSWITCH_VID_MASK); 2867 } 2868 2869 reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); 2870 if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) 2871 reg |= ALE_PORTCTL_DROP_UNTAGGED; 2872 else 2873 reg &= ~ALE_PORTCTL_DROP_UNTAGGED; 2874 if (p->es_flags & ETHERSWITCH_PORT_INGRESS) 2875 reg |= ALE_PORTCTL_INGRESS; 2876 else 2877 reg &= ~ALE_PORTCTL_INGRESS; 2878 cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg); 2879 2880 /* CPU port does not allow media settings. */ 2881 if (p->es_port == CPSW_CPU_PORT) 2882 return (0); 2883 2884 psc = device_get_softc(sc->port[p->es_port - 1].dev); 2885 ifm = &psc->mii->mii_media; 2886 2887 return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); 2888 } 2889 2890 static int 2891 cpsw_getconf(device_t dev, etherswitch_conf_t *conf) 2892 { 2893 2894 /* Return the VLAN mode. */ 2895 conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; 2896 conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; 2897 2898 return (0); 2899 } 2900 2901 static int 2902 cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) 2903 { 2904 int i, vid; 2905 uint32_t ale_entry[3]; 2906 struct cpsw_softc *sc; 2907 2908 sc = device_get_softc(dev); 2909 2910 if (vg->es_vlangroup >= CPSW_VLANS) 2911 return (EINVAL); 2912 2913 vg->es_vid = 0; 2914 vid = cpsw_vgroups[vg->es_vlangroup].vid; 2915 if (vid == -1) 2916 return (0); 2917 2918 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2919 cpsw_ale_read_entry(sc, i, ale_entry); 2920 if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) 2921 continue; 2922 if (vid != ALE_VLAN(ale_entry)) 2923 continue; 2924 2925 vg->es_fid = 0; 2926 vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID; 2927 vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry); 2928 vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry); 2929 } 2930 2931 return (0); 2932 } 2933 2934 static void 2935 cpsw_remove_vlan(struct cpsw_softc *sc, int vlan) 2936 { 2937 int i; 2938 uint32_t ale_entry[3]; 2939 2940 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2941 cpsw_ale_read_entry(sc, i, ale_entry); 2942 if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) 2943 continue; 2944 if (vlan != ALE_VLAN(ale_entry)) 2945 continue; 2946 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2947 cpsw_ale_write_entry(sc, i, ale_entry); 2948 break; 2949 } 2950 } 2951 2952 static int 2953 cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) 2954 { 2955 int i; 2956 struct cpsw_softc *sc; 2957 2958 sc = device_get_softc(dev); 2959 2960 for (i = 0; i < CPSW_VLANS; i++) { 2961 /* Is this Vlan ID in use by another vlangroup ? */ 2962 if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid) 2963 return (EINVAL); 2964 } 2965 2966 if (vg->es_vid == 0) { 2967 if (cpsw_vgroups[vg->es_vlangroup].vid == -1) 2968 return (0); 2969 cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid); 2970 cpsw_vgroups[vg->es_vlangroup].vid = -1; 2971 vg->es_untagged_ports = 0; 2972 vg->es_member_ports = 0; 2973 vg->es_vid = 0; 2974 return (0); 2975 } 2976 2977 vg->es_vid &= ETHERSWITCH_VID_MASK; 2978 vg->es_member_ports &= CPSW_PORTS_MASK; 2979 vg->es_untagged_ports &= CPSW_PORTS_MASK; 2980 2981 if (cpsw_vgroups[vg->es_vlangroup].vid != -1 && 2982 cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid) 2983 return (EINVAL); 2984 2985 cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid; 2986 cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports, 2987 vg->es_untagged_ports, vg->es_member_ports, 0); 2988 2989 return (0); 2990 } 2991 2992 static int 2993 cpsw_readreg(device_t dev, int addr) 2994 { 2995 2996 /* Not supported. */ 2997 return (0); 2998 } 2999 3000 static int 3001 cpsw_writereg(device_t dev, int addr, int value) 3002 { 3003 3004 /* Not supported. */ 3005 return (0); 3006 } 3007 3008 static int 3009 cpsw_readphy(device_t dev, int phy, int reg) 3010 { 3011 3012 /* Not supported. */ 3013 return (0); 3014 } 3015 3016 static int 3017 cpsw_writephy(device_t dev, int phy, int reg, int data) 3018 { 3019 3020 /* Not supported. */ 3021 return (0); 3022 } 3023 #endif 3024