1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org> 5 * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * TI Common Platform Ethernet Switch (CPSW) Driver 32 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. 33 * 34 * This controller is documented in the AM335x Technical Reference 35 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM 36 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. 37 * 38 * It is basically a single Ethernet port (port 0) wired internally to 39 * a 3-port store-and-forward switch connected to two independent 40 * "sliver" controllers (port 1 and port 2). You can operate the 41 * controller in a variety of different ways by suitably configuring 42 * the slivers and the Address Lookup Engine (ALE) that routes packets 43 * between the ports. 44 * 45 * This code was developed and tested on a BeagleBone with 46 * an AM335x SoC. 47 */ 48 49 #include <sys/cdefs.h> 50 #include "opt_cpsw.h" 51 52 #include <sys/param.h> 53 #include <sys/bus.h> 54 #include <sys/kernel.h> 55 #include <sys/lock.h> 56 #include <sys/mbuf.h> 57 #include <sys/module.h> 58 #include <sys/mutex.h> 59 #include <sys/rman.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/sysctl.h> 63 64 #include <machine/bus.h> 65 #include <machine/resource.h> 66 #include <machine/stdarg.h> 67 68 #include <net/ethernet.h> 69 #include <net/bpf.h> 70 #include <net/if.h> 71 #include <net/if_dl.h> 72 #include <net/if_media.h> 73 #include <net/if_types.h> 74 75 #include <dev/extres/syscon/syscon.h> 76 #include "syscon_if.h" 77 #include <arm/ti/am335x/am335x_scm.h> 78 79 #include <dev/mii/mii.h> 80 #include <dev/mii/miivar.h> 81 82 #include <dev/ofw/ofw_bus.h> 83 #include <dev/ofw/ofw_bus_subr.h> 84 85 #include <dev/fdt/fdt_common.h> 86 87 #ifdef CPSW_ETHERSWITCH 88 #include <dev/etherswitch/etherswitch.h> 89 #include "etherswitch_if.h" 90 #endif 91 92 #include "if_cpswreg.h" 93 #include "if_cpswvar.h" 94 95 #include "miibus_if.h" 96 97 /* Device probe/attach/detach. */ 98 static int cpsw_probe(device_t); 99 static int cpsw_attach(device_t); 100 static int cpsw_detach(device_t); 101 static int cpswp_probe(device_t); 102 static int cpswp_attach(device_t); 103 static int cpswp_detach(device_t); 104 105 static phandle_t cpsw_get_node(device_t, device_t); 106 107 /* Device Init/shutdown. */ 108 static int cpsw_shutdown(device_t); 109 static void cpswp_init(void *); 110 static void cpswp_init_locked(void *); 111 static void cpswp_stop_locked(struct cpswp_softc *); 112 113 /* Device Suspend/Resume. */ 114 static int cpsw_suspend(device_t); 115 static int cpsw_resume(device_t); 116 117 /* Ioctl. */ 118 static int cpswp_ioctl(if_t, u_long command, caddr_t data); 119 120 static int cpswp_miibus_readreg(device_t, int phy, int reg); 121 static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); 122 static void cpswp_miibus_statchg(device_t); 123 124 /* Send/Receive packets. */ 125 static void cpsw_intr_rx(void *arg); 126 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); 127 static void cpsw_rx_enqueue(struct cpsw_softc *); 128 static void cpswp_start(if_t); 129 static void cpsw_intr_tx(void *); 130 static void cpswp_tx_enqueue(struct cpswp_softc *); 131 static int cpsw_tx_dequeue(struct cpsw_softc *); 132 133 /* Misc interrupts and watchdog. */ 134 static void cpsw_intr_rx_thresh(void *); 135 static void cpsw_intr_misc(void *); 136 static void cpswp_tick(void *); 137 static void cpswp_ifmedia_sts(if_t, struct ifmediareq *); 138 static int cpswp_ifmedia_upd(if_t); 139 static void cpsw_tx_watchdog(void *); 140 141 /* ALE support */ 142 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); 143 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); 144 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); 145 static void cpsw_ale_dump_table(struct cpsw_softc *); 146 static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, 147 int); 148 static int cpswp_ale_update_addresses(struct cpswp_softc *, int); 149 150 /* Statistics and sysctls. */ 151 static void cpsw_add_sysctls(struct cpsw_softc *); 152 static void cpsw_stats_collect(struct cpsw_softc *); 153 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); 154 155 #ifdef CPSW_ETHERSWITCH 156 static etherswitch_info_t *cpsw_getinfo(device_t); 157 static int cpsw_getport(device_t, etherswitch_port_t *); 158 static int cpsw_setport(device_t, etherswitch_port_t *); 159 static int cpsw_getconf(device_t, etherswitch_conf_t *); 160 static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *); 161 static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *); 162 static int cpsw_readreg(device_t, int); 163 static int cpsw_writereg(device_t, int, int); 164 static int cpsw_readphy(device_t, int, int); 165 static int cpsw_writephy(device_t, int, int, int); 166 #endif 167 168 /* 169 * Arbitrary limit on number of segments in an mbuf to be transmitted. 170 * Packets with more segments than this will be defragmented before 171 * they are queued. 172 */ 173 #define CPSW_TXFRAGS 16 174 175 /* Shared resources. */ 176 static device_method_t cpsw_methods[] = { 177 /* Device interface */ 178 DEVMETHOD(device_probe, cpsw_probe), 179 DEVMETHOD(device_attach, cpsw_attach), 180 DEVMETHOD(device_detach, cpsw_detach), 181 DEVMETHOD(device_shutdown, cpsw_shutdown), 182 DEVMETHOD(device_suspend, cpsw_suspend), 183 DEVMETHOD(device_resume, cpsw_resume), 184 /* Bus interface */ 185 DEVMETHOD(bus_add_child, device_add_child_ordered), 186 /* OFW methods */ 187 DEVMETHOD(ofw_bus_get_node, cpsw_get_node), 188 #ifdef CPSW_ETHERSWITCH 189 /* etherswitch interface */ 190 DEVMETHOD(etherswitch_getinfo, cpsw_getinfo), 191 DEVMETHOD(etherswitch_readreg, cpsw_readreg), 192 DEVMETHOD(etherswitch_writereg, cpsw_writereg), 193 DEVMETHOD(etherswitch_readphyreg, cpsw_readphy), 194 DEVMETHOD(etherswitch_writephyreg, cpsw_writephy), 195 DEVMETHOD(etherswitch_getport, cpsw_getport), 196 DEVMETHOD(etherswitch_setport, cpsw_setport), 197 DEVMETHOD(etherswitch_getvgroup, cpsw_getvgroup), 198 DEVMETHOD(etherswitch_setvgroup, cpsw_setvgroup), 199 DEVMETHOD(etherswitch_getconf, cpsw_getconf), 200 #endif 201 DEVMETHOD_END 202 }; 203 204 static driver_t cpsw_driver = { 205 "cpswss", 206 cpsw_methods, 207 sizeof(struct cpsw_softc), 208 }; 209 210 DRIVER_MODULE(cpswss, simplebus, cpsw_driver, 0, 0); 211 212 /* Port/Slave resources. */ 213 static device_method_t cpswp_methods[] = { 214 /* Device interface */ 215 DEVMETHOD(device_probe, cpswp_probe), 216 DEVMETHOD(device_attach, cpswp_attach), 217 DEVMETHOD(device_detach, cpswp_detach), 218 /* MII interface */ 219 DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), 220 DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), 221 DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), 222 DEVMETHOD_END 223 }; 224 225 static driver_t cpswp_driver = { 226 "cpsw", 227 cpswp_methods, 228 sizeof(struct cpswp_softc), 229 }; 230 231 #ifdef CPSW_ETHERSWITCH 232 DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, 0, 0); 233 MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1); 234 #endif 235 236 DRIVER_MODULE(cpsw, cpswss, cpswp_driver, 0, 0); 237 DRIVER_MODULE(miibus, cpsw, miibus_driver, 0, 0); 238 MODULE_DEPEND(cpsw, ether, 1, 1, 1); 239 MODULE_DEPEND(cpsw, miibus, 1, 1, 1); 240 241 #ifdef CPSW_ETHERSWITCH 242 static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS]; 243 #endif 244 245 static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; 246 247 static struct resource_spec irq_res_spec[] = { 248 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 249 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, 250 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, 251 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, 252 { -1, 0 } 253 }; 254 255 static struct { 256 void (*cb)(void *); 257 } cpsw_intr_cb[] = { 258 { cpsw_intr_rx_thresh }, 259 { cpsw_intr_rx }, 260 { cpsw_intr_tx }, 261 { cpsw_intr_misc }, 262 }; 263 264 /* Number of entries here must match size of stats 265 * array in struct cpswp_softc. */ 266 static struct cpsw_stat { 267 int reg; 268 char *oid; 269 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { 270 {0x00, "GoodRxFrames"}, 271 {0x04, "BroadcastRxFrames"}, 272 {0x08, "MulticastRxFrames"}, 273 {0x0C, "PauseRxFrames"}, 274 {0x10, "RxCrcErrors"}, 275 {0x14, "RxAlignErrors"}, 276 {0x18, "OversizeRxFrames"}, 277 {0x1c, "RxJabbers"}, 278 {0x20, "ShortRxFrames"}, 279 {0x24, "RxFragments"}, 280 {0x30, "RxOctets"}, 281 {0x34, "GoodTxFrames"}, 282 {0x38, "BroadcastTxFrames"}, 283 {0x3c, "MulticastTxFrames"}, 284 {0x40, "PauseTxFrames"}, 285 {0x44, "DeferredTxFrames"}, 286 {0x48, "CollisionsTxFrames"}, 287 {0x4c, "SingleCollisionTxFrames"}, 288 {0x50, "MultipleCollisionTxFrames"}, 289 {0x54, "ExcessiveCollisions"}, 290 {0x58, "LateCollisions"}, 291 {0x5c, "TxUnderrun"}, 292 {0x60, "CarrierSenseErrors"}, 293 {0x64, "TxOctets"}, 294 {0x68, "RxTx64OctetFrames"}, 295 {0x6c, "RxTx65to127OctetFrames"}, 296 {0x70, "RxTx128to255OctetFrames"}, 297 {0x74, "RxTx256to511OctetFrames"}, 298 {0x78, "RxTx512to1024OctetFrames"}, 299 {0x7c, "RxTx1024upOctetFrames"}, 300 {0x80, "NetOctets"}, 301 {0x84, "RxStartOfFrameOverruns"}, 302 {0x88, "RxMiddleOfFrameOverruns"}, 303 {0x8c, "RxDmaOverruns"} 304 }; 305 306 /* 307 * Basic debug support. 308 */ 309 310 static void 311 cpsw_debugf_head(const char *funcname) 312 { 313 int t = (int)(time_second % (24 * 60 * 60)); 314 315 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); 316 } 317 318 static void 319 cpsw_debugf(const char *fmt, ...) 320 { 321 va_list ap; 322 323 va_start(ap, fmt); 324 vprintf(fmt, ap); 325 va_end(ap); 326 printf("\n"); 327 328 } 329 330 #define CPSW_DEBUGF(_sc, a) do { \ 331 if ((_sc)->debug) { \ 332 cpsw_debugf_head(__func__); \ 333 cpsw_debugf a; \ 334 } \ 335 } while (0) 336 337 /* 338 * Locking macros 339 */ 340 #define CPSW_TX_LOCK(sc) do { \ 341 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ 342 mtx_lock(&(sc)->tx.lock); \ 343 } while (0) 344 345 #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) 346 #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) 347 348 #define CPSW_RX_LOCK(sc) do { \ 349 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ 350 mtx_lock(&(sc)->rx.lock); \ 351 } while (0) 352 353 #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) 354 #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) 355 356 #define CPSW_PORT_LOCK(_sc) do { \ 357 mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ 358 mtx_lock(&(_sc)->lock); \ 359 } while (0) 360 361 #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 362 #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) 363 364 /* 365 * Read/Write macros 366 */ 367 #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) 368 #define cpsw_write_4(_sc, _reg, _val) \ 369 bus_write_4((_sc)->mem_res, (_reg), (_val)) 370 371 #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) 372 373 #define cpsw_cpdma_bd_paddr(sc, slot) \ 374 BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) 375 #define cpsw_cpdma_read_bd(sc, slot, val) \ 376 bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 377 #define cpsw_cpdma_write_bd(sc, slot, val) \ 378 bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 379 #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ 380 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) 381 #define cpsw_cpdma_write_bd_flags(sc, slot, val) \ 382 bus_write_2(sc->mem_res, slot->bd_offset + 14, val) 383 #define cpsw_cpdma_read_bd_flags(sc, slot) \ 384 bus_read_2(sc->mem_res, slot->bd_offset + 14) 385 #define cpsw_write_hdp_slot(sc, queue, slot) \ 386 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) 387 #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) 388 #define cpsw_read_cp(sc, queue) \ 389 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) 390 #define cpsw_write_cp(sc, queue, val) \ 391 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) 392 #define cpsw_write_cp_slot(sc, queue, slot) \ 393 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) 394 395 #if 0 396 /* XXX temporary function versions for debugging. */ 397 static void 398 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 399 { 400 uint32_t reg = queue->hdp_offset; 401 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 402 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); 403 cpsw_write_4(sc, reg, v); 404 } 405 406 static void 407 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 408 { 409 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 410 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); 411 cpsw_write_cp(sc, queue, v); 412 } 413 #endif 414 415 /* 416 * Expanded dump routines for verbose debugging. 417 */ 418 static void 419 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 420 { 421 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", 422 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", 423 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", 424 "Port0"}; 425 struct cpsw_cpdma_bd bd; 426 const char *sep; 427 int i; 428 429 cpsw_cpdma_read_bd(sc, slot, &bd); 430 printf("BD Addr : 0x%08x Next : 0x%08x\n", 431 cpsw_cpdma_bd_paddr(sc, slot), bd.next); 432 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); 433 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); 434 printf(" Flags: "); 435 sep = ""; 436 for (i = 0; i < 16; ++i) { 437 if (bd.flags & (1 << (15 - i))) { 438 printf("%s%s", sep, flags[i]); 439 sep = ","; 440 } 441 } 442 printf("\n"); 443 if (slot->mbuf) { 444 printf(" Ether: %14D\n", 445 (char *)(slot->mbuf->m_data), " "); 446 printf(" Packet: %16D\n", 447 (char *)(slot->mbuf->m_data) + 14, " "); 448 } 449 } 450 451 #define CPSW_DUMP_SLOT(cs, slot) do { \ 452 IF_DEBUG(sc) { \ 453 cpsw_dump_slot(sc, slot); \ 454 } \ 455 } while (0) 456 457 static void 458 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) 459 { 460 struct cpsw_slot *slot; 461 int i = 0; 462 int others = 0; 463 464 STAILQ_FOREACH(slot, q, next) { 465 if (i > CPSW_TXFRAGS) 466 ++others; 467 else 468 cpsw_dump_slot(sc, slot); 469 ++i; 470 } 471 if (others) 472 printf(" ... and %d more.\n", others); 473 printf("\n"); 474 } 475 476 #define CPSW_DUMP_QUEUE(sc, q) do { \ 477 IF_DEBUG(sc) { \ 478 cpsw_dump_queue(sc, q); \ 479 } \ 480 } while (0) 481 482 static void 483 cpsw_init_slots(struct cpsw_softc *sc) 484 { 485 struct cpsw_slot *slot; 486 int i; 487 488 STAILQ_INIT(&sc->avail); 489 490 /* Put the slot descriptors onto the global avail list. */ 491 for (i = 0; i < nitems(sc->_slots); i++) { 492 slot = &sc->_slots[i]; 493 slot->bd_offset = cpsw_cpdma_bd_offset(i); 494 STAILQ_INSERT_TAIL(&sc->avail, slot, next); 495 } 496 } 497 498 static int 499 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) 500 { 501 const int max_slots = nitems(sc->_slots); 502 struct cpsw_slot *slot; 503 int i; 504 505 if (requested < 0) 506 requested = max_slots; 507 508 for (i = 0; i < requested; ++i) { 509 slot = STAILQ_FIRST(&sc->avail); 510 if (slot == NULL) 511 return (0); 512 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { 513 device_printf(sc->dev, "failed to create dmamap\n"); 514 return (ENOMEM); 515 } 516 STAILQ_REMOVE_HEAD(&sc->avail, next); 517 STAILQ_INSERT_TAIL(&queue->avail, slot, next); 518 ++queue->avail_queue_len; 519 ++queue->queue_slots; 520 } 521 return (0); 522 } 523 524 static void 525 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 526 { 527 int error __diagused; 528 529 if (slot->dmamap) { 530 if (slot->mbuf) 531 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 532 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); 533 KASSERT(error == 0, ("Mapping still active")); 534 slot->dmamap = NULL; 535 } 536 if (slot->mbuf) { 537 m_freem(slot->mbuf); 538 slot->mbuf = NULL; 539 } 540 } 541 542 static void 543 cpsw_reset(struct cpsw_softc *sc) 544 { 545 int i; 546 547 callout_stop(&sc->watchdog.callout); 548 549 /* Reset RMII/RGMII wrapper. */ 550 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); 551 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) 552 ; 553 554 /* Disable TX and RX interrupts for all cores. */ 555 for (i = 0; i < 3; ++i) { 556 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); 557 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); 558 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); 559 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); 560 } 561 562 /* Reset CPSW subsystem. */ 563 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); 564 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) 565 ; 566 567 /* Reset Sliver port 1 and 2 */ 568 for (i = 0; i < 2; i++) { 569 /* Reset */ 570 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); 571 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) 572 ; 573 } 574 575 /* Reset DMA controller. */ 576 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); 577 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) 578 ; 579 580 /* Disable TX & RX DMA */ 581 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); 582 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); 583 584 /* Clear all queues. */ 585 for (i = 0; i < 8; i++) { 586 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); 587 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); 588 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); 589 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); 590 } 591 592 /* Clear all interrupt Masks */ 593 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); 594 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); 595 } 596 597 static void 598 cpsw_init(struct cpsw_softc *sc) 599 { 600 struct cpsw_slot *slot; 601 uint32_t reg; 602 603 /* Disable the interrupt pacing. */ 604 reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 605 reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 606 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg); 607 608 /* Clear ALE */ 609 cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); 610 611 /* Enable ALE */ 612 reg = CPSW_ALE_CTL_ENABLE; 613 if (sc->dualemac) 614 reg |= CPSW_ALE_CTL_VLAN_AWARE; 615 cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); 616 617 /* Set Host Port Mapping. */ 618 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); 619 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); 620 621 /* Initialize ALE: set host port to forwarding(3). */ 622 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 623 ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); 624 625 cpsw_write_4(sc, CPSW_SS_PTYPE, 0); 626 627 /* Enable statistics for ports 0, 1 and 2 */ 628 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); 629 630 /* Turn off flow control. */ 631 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); 632 633 /* Make IP hdr aligned with 4 */ 634 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); 635 636 /* Initialize RX Buffer Descriptors */ 637 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0); 638 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); 639 640 /* Enable TX & RX DMA */ 641 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); 642 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); 643 644 /* Enable Interrupts for core 0 */ 645 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); 646 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); 647 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF); 648 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); 649 650 /* Enable host Error Interrupt */ 651 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); 652 653 /* Enable interrupts for RX and TX on Channel 0 */ 654 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 655 CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0)); 656 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1); 657 658 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 659 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 660 cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); 661 662 /* Select MII in GMII_SEL, Internal Delay mode */ 663 //ti_scm_reg_write_4(0x650, 0); 664 665 /* Initialize active queues. */ 666 slot = STAILQ_FIRST(&sc->tx.active); 667 if (slot != NULL) 668 cpsw_write_hdp_slot(sc, &sc->tx, slot); 669 slot = STAILQ_FIRST(&sc->rx.active); 670 if (slot != NULL) 671 cpsw_write_hdp_slot(sc, &sc->rx, slot); 672 cpsw_rx_enqueue(sc); 673 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); 674 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS); 675 676 /* Activate network interface. */ 677 sc->rx.running = 1; 678 sc->tx.running = 1; 679 sc->watchdog.timer = 0; 680 callout_init(&sc->watchdog.callout, 0); 681 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 682 } 683 684 /* 685 * 686 * Device Probe, Attach, Detach. 687 * 688 */ 689 690 static int 691 cpsw_probe(device_t dev) 692 { 693 694 if (!ofw_bus_status_okay(dev)) 695 return (ENXIO); 696 697 if (!ofw_bus_is_compatible(dev, "ti,cpsw")) 698 return (ENXIO); 699 700 device_set_desc(dev, "3-port Switch Ethernet Subsystem"); 701 return (BUS_PROBE_DEFAULT); 702 } 703 704 static int 705 cpsw_intr_attach(struct cpsw_softc *sc) 706 { 707 int i; 708 709 for (i = 0; i < CPSW_INTR_COUNT; i++) { 710 if (bus_setup_intr(sc->dev, sc->irq_res[i], 711 INTR_TYPE_NET | INTR_MPSAFE, NULL, 712 cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) { 713 return (-1); 714 } 715 } 716 717 return (0); 718 } 719 720 static void 721 cpsw_intr_detach(struct cpsw_softc *sc) 722 { 723 int i; 724 725 for (i = 0; i < CPSW_INTR_COUNT; i++) { 726 if (sc->ih_cookie[i]) { 727 bus_teardown_intr(sc->dev, sc->irq_res[i], 728 sc->ih_cookie[i]); 729 } 730 } 731 } 732 733 static int 734 cpsw_get_fdt_data(struct cpsw_softc *sc, int port) 735 { 736 char *name; 737 int len, phy, vlan; 738 pcell_t phy_id[3], vlan_id; 739 phandle_t child; 740 unsigned long mdio_child_addr; 741 742 /* Find any slave with phy-handle/phy_id */ 743 phy = -1; 744 vlan = -1; 745 for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { 746 if (OF_getprop_alloc(child, "name", (void **)&name) < 0) 747 continue; 748 if (sscanf(name, "slave@%lx", &mdio_child_addr) != 1) { 749 OF_prop_free(name); 750 continue; 751 } 752 OF_prop_free(name); 753 754 if (mdio_child_addr != slave_mdio_addr[port] && 755 mdio_child_addr != (slave_mdio_addr[port] & 0xFFF)) 756 continue; 757 758 if (fdt_get_phyaddr(child, NULL, &phy, NULL) != 0){ 759 /* Users with old DTB will have phy_id instead */ 760 phy = -1; 761 len = OF_getproplen(child, "phy_id"); 762 if (len / sizeof(pcell_t) == 2) { 763 /* Get phy address from fdt */ 764 if (OF_getencprop(child, "phy_id", phy_id, len) > 0) 765 phy = phy_id[1]; 766 } 767 } 768 769 len = OF_getproplen(child, "dual_emac_res_vlan"); 770 if (len / sizeof(pcell_t) == 1) { 771 /* Get phy address from fdt */ 772 if (OF_getencprop(child, "dual_emac_res_vlan", 773 &vlan_id, len) > 0) { 774 vlan = vlan_id; 775 } 776 } 777 778 break; 779 } 780 if (phy == -1) 781 return (ENXIO); 782 sc->port[port].phy = phy; 783 sc->port[port].vlan = vlan; 784 785 return (0); 786 } 787 788 static int 789 cpsw_attach(device_t dev) 790 { 791 int error, i; 792 struct cpsw_softc *sc; 793 uint32_t reg; 794 795 sc = device_get_softc(dev); 796 sc->dev = dev; 797 sc->node = ofw_bus_get_node(dev); 798 getbinuptime(&sc->attach_uptime); 799 800 if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, 801 sizeof(sc->active_slave)) <= 0) { 802 sc->active_slave = 0; 803 } 804 if (sc->active_slave > 1) 805 sc->active_slave = 1; 806 807 if (OF_hasprop(sc->node, "dual_emac")) 808 sc->dualemac = 1; 809 810 for (i = 0; i < CPSW_PORTS; i++) { 811 if (!sc->dualemac && i != sc->active_slave) 812 continue; 813 if (cpsw_get_fdt_data(sc, i) != 0) { 814 device_printf(dev, 815 "failed to get PHY address from FDT\n"); 816 return (ENXIO); 817 } 818 } 819 820 /* Initialize mutexes */ 821 mtx_init(&sc->tx.lock, device_get_nameunit(dev), 822 "cpsw TX lock", MTX_DEF); 823 mtx_init(&sc->rx.lock, device_get_nameunit(dev), 824 "cpsw RX lock", MTX_DEF); 825 826 /* Allocate IRQ resources */ 827 error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); 828 if (error) { 829 device_printf(dev, "could not allocate IRQ resources\n"); 830 cpsw_detach(dev); 831 return (ENXIO); 832 } 833 834 sc->mem_rid = 0; 835 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 836 &sc->mem_rid, RF_ACTIVE); 837 if (sc->mem_res == NULL) { 838 device_printf(sc->dev, "failed to allocate memory resource\n"); 839 cpsw_detach(dev); 840 return (ENXIO); 841 } 842 843 reg = cpsw_read_4(sc, CPSW_SS_IDVER); 844 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), 845 reg & 0xFF, (reg >> 11) & 0x1F); 846 847 cpsw_add_sysctls(sc); 848 849 /* Allocate a busdma tag and DMA safe memory for mbufs. */ 850 error = bus_dma_tag_create( 851 bus_get_dma_tag(sc->dev), /* parent */ 852 1, 0, /* alignment, boundary */ 853 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 854 BUS_SPACE_MAXADDR, /* highaddr */ 855 NULL, NULL, /* filtfunc, filtfuncarg */ 856 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ 857 MCLBYTES, 0, /* maxsegsz, flags */ 858 NULL, NULL, /* lockfunc, lockfuncarg */ 859 &sc->mbuf_dtag); /* dmatag */ 860 if (error) { 861 device_printf(dev, "bus_dma_tag_create failed\n"); 862 cpsw_detach(dev); 863 return (error); 864 } 865 866 /* Allocate a NULL buffer for padding. */ 867 sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO); 868 869 cpsw_init_slots(sc); 870 871 /* Allocate slots to TX and RX queues. */ 872 STAILQ_INIT(&sc->rx.avail); 873 STAILQ_INIT(&sc->rx.active); 874 STAILQ_INIT(&sc->tx.avail); 875 STAILQ_INIT(&sc->tx.active); 876 // For now: 128 slots to TX, rest to RX. 877 // XXX TODO: start with 32/64 and grow dynamically based on demand. 878 if (cpsw_add_slots(sc, &sc->tx, 128) || 879 cpsw_add_slots(sc, &sc->rx, -1)) { 880 device_printf(dev, "failed to allocate dmamaps\n"); 881 cpsw_detach(dev); 882 return (ENOMEM); 883 } 884 device_printf(dev, "Initial queue size TX=%d RX=%d\n", 885 sc->tx.queue_slots, sc->rx.queue_slots); 886 887 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); 888 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); 889 890 if (cpsw_intr_attach(sc) == -1) { 891 device_printf(dev, "failed to setup interrupts\n"); 892 cpsw_detach(dev); 893 return (ENXIO); 894 } 895 896 #ifdef CPSW_ETHERSWITCH 897 for (i = 0; i < CPSW_VLANS; i++) 898 cpsw_vgroups[i].vid = -1; 899 #endif 900 901 /* Reset the controller. */ 902 cpsw_reset(sc); 903 cpsw_init(sc); 904 905 for (i = 0; i < CPSW_PORTS; i++) { 906 if (!sc->dualemac && i != sc->active_slave) 907 continue; 908 sc->port[i].dev = device_add_child(dev, "cpsw", i); 909 if (sc->port[i].dev == NULL) { 910 cpsw_detach(dev); 911 return (ENXIO); 912 } 913 } 914 bus_generic_probe(dev); 915 bus_generic_attach(dev); 916 917 return (0); 918 } 919 920 static int 921 cpsw_detach(device_t dev) 922 { 923 struct cpsw_softc *sc; 924 int error, i; 925 926 bus_generic_detach(dev); 927 sc = device_get_softc(dev); 928 929 for (i = 0; i < CPSW_PORTS; i++) { 930 if (sc->port[i].dev) 931 device_delete_child(dev, sc->port[i].dev); 932 } 933 934 if (device_is_attached(dev)) { 935 callout_stop(&sc->watchdog.callout); 936 callout_drain(&sc->watchdog.callout); 937 } 938 939 /* Stop and release all interrupts */ 940 cpsw_intr_detach(sc); 941 942 /* Free dmamaps and mbufs */ 943 for (i = 0; i < nitems(sc->_slots); ++i) 944 cpsw_free_slot(sc, &sc->_slots[i]); 945 946 /* Free null padding buffer. */ 947 if (sc->nullpad) 948 free(sc->nullpad, M_DEVBUF); 949 950 /* Free DMA tag */ 951 if (sc->mbuf_dtag) { 952 error = bus_dma_tag_destroy(sc->mbuf_dtag); 953 KASSERT(error == 0, ("Unable to destroy DMA tag")); 954 } 955 956 /* Free IO memory handler */ 957 if (sc->mem_res != NULL) 958 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); 959 bus_release_resources(dev, irq_res_spec, sc->irq_res); 960 961 /* Destroy mutexes */ 962 mtx_destroy(&sc->rx.lock); 963 mtx_destroy(&sc->tx.lock); 964 965 /* Detach the switch device, if present. */ 966 error = bus_generic_detach(dev); 967 if (error != 0) 968 return (error); 969 970 return (device_delete_children(dev)); 971 } 972 973 static phandle_t 974 cpsw_get_node(device_t bus, device_t dev) 975 { 976 977 /* Share controller node with port device. */ 978 return (ofw_bus_get_node(bus)); 979 } 980 981 static int 982 cpswp_probe(device_t dev) 983 { 984 985 if (device_get_unit(dev) > 1) { 986 device_printf(dev, "Only two ports are supported.\n"); 987 return (ENXIO); 988 } 989 device_set_desc(dev, "Ethernet Switch Port"); 990 991 return (BUS_PROBE_DEFAULT); 992 } 993 994 static int 995 cpswp_attach(device_t dev) 996 { 997 int error; 998 if_t ifp; 999 struct cpswp_softc *sc; 1000 uint32_t reg; 1001 uint8_t mac_addr[ETHER_ADDR_LEN]; 1002 phandle_t opp_table; 1003 struct syscon *syscon; 1004 1005 sc = device_get_softc(dev); 1006 sc->dev = dev; 1007 sc->pdev = device_get_parent(dev); 1008 sc->swsc = device_get_softc(sc->pdev); 1009 sc->unit = device_get_unit(dev); 1010 sc->phy = sc->swsc->port[sc->unit].phy; 1011 sc->vlan = sc->swsc->port[sc->unit].vlan; 1012 if (sc->swsc->dualemac && sc->vlan == -1) 1013 sc->vlan = sc->unit + 1; 1014 1015 if (sc->unit == 0) { 1016 sc->physel = MDIOUSERPHYSEL0; 1017 sc->phyaccess = MDIOUSERACCESS0; 1018 } else { 1019 sc->physel = MDIOUSERPHYSEL1; 1020 sc->phyaccess = MDIOUSERACCESS1; 1021 } 1022 1023 mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", 1024 MTX_DEF); 1025 1026 /* Allocate network interface */ 1027 ifp = sc->ifp = if_alloc(IFT_ETHER); 1028 if (ifp == NULL) { 1029 cpswp_detach(dev); 1030 return (ENXIO); 1031 } 1032 1033 if_initname(ifp, device_get_name(sc->dev), sc->unit); 1034 if_setsoftc(ifp, sc); 1035 if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST); 1036 1037 if_setcapenable(ifp, if_getcapabilities(ifp)); 1038 1039 if_setinitfn(ifp, cpswp_init); 1040 if_setstartfn(ifp, cpswp_start); 1041 if_setioctlfn(ifp, cpswp_ioctl); 1042 1043 if_setsendqlen(ifp, sc->swsc->tx.queue_slots); 1044 if_setsendqready(ifp); 1045 1046 /* FIXME: For now; Go and kidnap syscon from opp-table */ 1047 /* ti,cpsw actually have an optional syscon reference but only for am33xx?? */ 1048 opp_table = OF_finddevice("/opp-table"); 1049 if (opp_table == -1) { 1050 device_printf(dev, "Cant find /opp-table\n"); 1051 cpswp_detach(dev); 1052 return (ENXIO); 1053 } 1054 if (!OF_hasprop(opp_table, "syscon")) { 1055 device_printf(dev, "/opp-table doesnt have required syscon property\n"); 1056 cpswp_detach(dev); 1057 return (ENXIO); 1058 } 1059 if (syscon_get_by_ofw_property(dev, opp_table, "syscon", &syscon) != 0) { 1060 device_printf(dev, "Failed to get syscon\n"); 1061 cpswp_detach(dev); 1062 return (ENXIO); 1063 } 1064 1065 /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ 1066 reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_HI + sc->unit * 8); 1067 mac_addr[0] = reg & 0xFF; 1068 mac_addr[1] = (reg >> 8) & 0xFF; 1069 mac_addr[2] = (reg >> 16) & 0xFF; 1070 mac_addr[3] = (reg >> 24) & 0xFF; 1071 1072 /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ 1073 reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_LO + sc->unit * 8); 1074 mac_addr[4] = reg & 0xFF; 1075 mac_addr[5] = (reg >> 8) & 0xFF; 1076 1077 error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, 1078 cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); 1079 if (error) { 1080 device_printf(dev, "attaching PHYs failed\n"); 1081 cpswp_detach(dev); 1082 return (error); 1083 } 1084 sc->mii = device_get_softc(sc->miibus); 1085 1086 /* Select PHY and enable interrupts */ 1087 cpsw_write_4(sc->swsc, sc->physel, 1088 MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); 1089 1090 ether_ifattach(sc->ifp, mac_addr); 1091 callout_init(&sc->mii_callout, 0); 1092 1093 return (0); 1094 } 1095 1096 static int 1097 cpswp_detach(device_t dev) 1098 { 1099 struct cpswp_softc *sc; 1100 1101 sc = device_get_softc(dev); 1102 CPSW_DEBUGF(sc->swsc, ("")); 1103 if (device_is_attached(dev)) { 1104 ether_ifdetach(sc->ifp); 1105 CPSW_PORT_LOCK(sc); 1106 cpswp_stop_locked(sc); 1107 CPSW_PORT_UNLOCK(sc); 1108 callout_drain(&sc->mii_callout); 1109 } 1110 1111 bus_generic_detach(dev); 1112 1113 if_free(sc->ifp); 1114 mtx_destroy(&sc->lock); 1115 1116 return (0); 1117 } 1118 1119 /* 1120 * 1121 * Init/Shutdown. 1122 * 1123 */ 1124 1125 static int 1126 cpsw_ports_down(struct cpsw_softc *sc) 1127 { 1128 struct cpswp_softc *psc; 1129 if_t ifp1, ifp2; 1130 1131 if (!sc->dualemac) 1132 return (1); 1133 psc = device_get_softc(sc->port[0].dev); 1134 ifp1 = psc->ifp; 1135 psc = device_get_softc(sc->port[1].dev); 1136 ifp2 = psc->ifp; 1137 if ((if_getflags(ifp1) & IFF_UP) == 0 && (if_getflags(ifp2) & IFF_UP) == 0) 1138 return (1); 1139 1140 return (0); 1141 } 1142 1143 static void 1144 cpswp_init(void *arg) 1145 { 1146 struct cpswp_softc *sc = arg; 1147 1148 CPSW_DEBUGF(sc->swsc, ("")); 1149 CPSW_PORT_LOCK(sc); 1150 cpswp_init_locked(arg); 1151 CPSW_PORT_UNLOCK(sc); 1152 } 1153 1154 static void 1155 cpswp_init_locked(void *arg) 1156 { 1157 #ifdef CPSW_ETHERSWITCH 1158 int i; 1159 #endif 1160 struct cpswp_softc *sc = arg; 1161 if_t ifp; 1162 uint32_t reg; 1163 1164 CPSW_DEBUGF(sc->swsc, ("")); 1165 CPSW_PORT_LOCK_ASSERT(sc); 1166 ifp = sc->ifp; 1167 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1168 return; 1169 1170 getbinuptime(&sc->init_uptime); 1171 1172 if (!sc->swsc->rx.running && !sc->swsc->tx.running) { 1173 /* Reset the controller. */ 1174 cpsw_reset(sc->swsc); 1175 cpsw_init(sc->swsc); 1176 } 1177 1178 /* Set Slave Mapping. */ 1179 cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); 1180 cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 1181 0x33221100); 1182 cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); 1183 /* Enable MAC RX/TX modules. */ 1184 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ 1185 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ 1186 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1187 reg |= CPSW_SL_MACTL_GMII_ENABLE; 1188 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1189 1190 /* Initialize ALE: set port to forwarding, initialize addrs */ 1191 cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), 1192 ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); 1193 cpswp_ale_update_addresses(sc, 1); 1194 1195 if (sc->swsc->dualemac) { 1196 /* Set Port VID. */ 1197 cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), 1198 sc->vlan & 0xfff); 1199 cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, 1200 (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ 1201 (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ 1202 (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ 1203 #ifdef CPSW_ETHERSWITCH 1204 for (i = 0; i < CPSW_VLANS; i++) { 1205 if (cpsw_vgroups[i].vid != -1) 1206 continue; 1207 cpsw_vgroups[i].vid = sc->vlan; 1208 break; 1209 } 1210 #endif 1211 } 1212 1213 mii_mediachg(sc->mii); 1214 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 1215 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 1216 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1217 } 1218 1219 static int 1220 cpsw_shutdown(device_t dev) 1221 { 1222 struct cpsw_softc *sc; 1223 struct cpswp_softc *psc; 1224 int i; 1225 1226 sc = device_get_softc(dev); 1227 CPSW_DEBUGF(sc, ("")); 1228 for (i = 0; i < CPSW_PORTS; i++) { 1229 if (!sc->dualemac && i != sc->active_slave) 1230 continue; 1231 psc = device_get_softc(sc->port[i].dev); 1232 CPSW_PORT_LOCK(psc); 1233 cpswp_stop_locked(psc); 1234 CPSW_PORT_UNLOCK(psc); 1235 } 1236 1237 return (0); 1238 } 1239 1240 static void 1241 cpsw_rx_teardown(struct cpsw_softc *sc) 1242 { 1243 int i = 0; 1244 1245 CPSW_RX_LOCK(sc); 1246 CPSW_DEBUGF(sc, ("starting RX teardown")); 1247 sc->rx.teardown = 1; 1248 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); 1249 CPSW_RX_UNLOCK(sc); 1250 while (sc->rx.running) { 1251 if (++i > 10) { 1252 device_printf(sc->dev, 1253 "Unable to cleanly shutdown receiver\n"); 1254 return; 1255 } 1256 DELAY(200); 1257 } 1258 if (!sc->rx.running) 1259 CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i)); 1260 } 1261 1262 static void 1263 cpsw_tx_teardown(struct cpsw_softc *sc) 1264 { 1265 int i = 0; 1266 1267 CPSW_TX_LOCK(sc); 1268 CPSW_DEBUGF(sc, ("starting TX teardown")); 1269 /* Start the TX queue teardown if queue is not empty. */ 1270 if (STAILQ_FIRST(&sc->tx.active) != NULL) 1271 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); 1272 else 1273 sc->tx.teardown = 1; 1274 cpsw_tx_dequeue(sc); 1275 while (sc->tx.running && ++i < 10) { 1276 DELAY(200); 1277 cpsw_tx_dequeue(sc); 1278 } 1279 if (sc->tx.running) { 1280 device_printf(sc->dev, 1281 "Unable to cleanly shutdown transmitter\n"); 1282 } 1283 CPSW_DEBUGF(sc, 1284 ("finished TX teardown (%d retries, %d idle buffers)", i, 1285 sc->tx.active_queue_len)); 1286 CPSW_TX_UNLOCK(sc); 1287 } 1288 1289 static void 1290 cpswp_stop_locked(struct cpswp_softc *sc) 1291 { 1292 if_t ifp; 1293 uint32_t reg; 1294 1295 ifp = sc->ifp; 1296 CPSW_DEBUGF(sc->swsc, ("")); 1297 CPSW_PORT_LOCK_ASSERT(sc); 1298 1299 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1300 return; 1301 1302 /* Disable interface */ 1303 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1304 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1305 1306 /* Stop ticker */ 1307 callout_stop(&sc->mii_callout); 1308 1309 /* Tear down the RX/TX queues. */ 1310 if (cpsw_ports_down(sc->swsc)) { 1311 cpsw_rx_teardown(sc->swsc); 1312 cpsw_tx_teardown(sc->swsc); 1313 } 1314 1315 /* Stop MAC RX/TX modules. */ 1316 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1317 reg &= ~CPSW_SL_MACTL_GMII_ENABLE; 1318 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1319 1320 if (cpsw_ports_down(sc->swsc)) { 1321 /* Capture stats before we reset controller. */ 1322 cpsw_stats_collect(sc->swsc); 1323 1324 cpsw_reset(sc->swsc); 1325 cpsw_init(sc->swsc); 1326 } 1327 } 1328 1329 /* 1330 * Suspend/Resume. 1331 */ 1332 1333 static int 1334 cpsw_suspend(device_t dev) 1335 { 1336 struct cpsw_softc *sc; 1337 struct cpswp_softc *psc; 1338 int i; 1339 1340 sc = device_get_softc(dev); 1341 CPSW_DEBUGF(sc, ("")); 1342 for (i = 0; i < CPSW_PORTS; i++) { 1343 if (!sc->dualemac && i != sc->active_slave) 1344 continue; 1345 psc = device_get_softc(sc->port[i].dev); 1346 CPSW_PORT_LOCK(psc); 1347 cpswp_stop_locked(psc); 1348 CPSW_PORT_UNLOCK(psc); 1349 } 1350 1351 return (0); 1352 } 1353 1354 static int 1355 cpsw_resume(device_t dev) 1356 { 1357 struct cpsw_softc *sc; 1358 1359 sc = device_get_softc(dev); 1360 CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); 1361 1362 return (0); 1363 } 1364 1365 /* 1366 * 1367 * IOCTL 1368 * 1369 */ 1370 1371 static void 1372 cpsw_set_promisc(struct cpswp_softc *sc, int set) 1373 { 1374 uint32_t reg; 1375 1376 /* 1377 * Enabling promiscuous mode requires ALE_BYPASS to be enabled. 1378 * That disables the ALE forwarding logic and causes every 1379 * packet to be sent only to the host port. In bypass mode, 1380 * the ALE processes host port transmit packets the same as in 1381 * normal mode. 1382 */ 1383 reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); 1384 reg &= ~CPSW_ALE_CTL_BYPASS; 1385 if (set) 1386 reg |= CPSW_ALE_CTL_BYPASS; 1387 cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); 1388 } 1389 1390 static void 1391 cpsw_set_allmulti(struct cpswp_softc *sc, int set) 1392 { 1393 if (set) { 1394 printf("All-multicast mode unimplemented\n"); 1395 } 1396 } 1397 1398 static int 1399 cpswp_ioctl(if_t ifp, u_long command, caddr_t data) 1400 { 1401 struct cpswp_softc *sc; 1402 struct ifreq *ifr; 1403 int error; 1404 uint32_t changed; 1405 1406 error = 0; 1407 sc = if_getsoftc(ifp); 1408 ifr = (struct ifreq *)data; 1409 1410 switch (command) { 1411 case SIOCSIFCAP: 1412 changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap; 1413 if (changed & IFCAP_HWCSUM) { 1414 if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) 1415 if_setcapenablebit(ifp, IFCAP_HWCSUM, 0); 1416 else 1417 if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); 1418 } 1419 error = 0; 1420 break; 1421 case SIOCSIFFLAGS: 1422 CPSW_PORT_LOCK(sc); 1423 if (if_getflags(ifp) & IFF_UP) { 1424 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1425 changed = if_getflags(ifp) ^ sc->if_flags; 1426 CPSW_DEBUGF(sc->swsc, 1427 ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", 1428 changed)); 1429 if (changed & IFF_PROMISC) 1430 cpsw_set_promisc(sc, 1431 if_getflags(ifp) & IFF_PROMISC); 1432 if (changed & IFF_ALLMULTI) 1433 cpsw_set_allmulti(sc, 1434 if_getflags(ifp) & IFF_ALLMULTI); 1435 } else { 1436 CPSW_DEBUGF(sc->swsc, 1437 ("SIOCSIFFLAGS: starting up")); 1438 cpswp_init_locked(sc); 1439 } 1440 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1441 CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down")); 1442 cpswp_stop_locked(sc); 1443 } 1444 1445 sc->if_flags = if_getflags(ifp); 1446 CPSW_PORT_UNLOCK(sc); 1447 break; 1448 case SIOCADDMULTI: 1449 cpswp_ale_update_addresses(sc, 0); 1450 break; 1451 case SIOCDELMULTI: 1452 /* Ugh. DELMULTI doesn't provide the specific address 1453 being removed, so the best we can do is remove 1454 everything and rebuild it all. */ 1455 cpswp_ale_update_addresses(sc, 1); 1456 break; 1457 case SIOCGIFMEDIA: 1458 case SIOCSIFMEDIA: 1459 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); 1460 break; 1461 default: 1462 error = ether_ioctl(ifp, command, data); 1463 } 1464 return (error); 1465 } 1466 1467 /* 1468 * 1469 * MIIBUS 1470 * 1471 */ 1472 static int 1473 cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) 1474 { 1475 uint32_t r, retries = CPSW_MIIBUS_RETRIES; 1476 1477 while (--retries) { 1478 r = cpsw_read_4(sc, reg); 1479 if ((r & MDIO_PHYACCESS_GO) == 0) 1480 return (1); 1481 DELAY(CPSW_MIIBUS_DELAY); 1482 } 1483 1484 return (0); 1485 } 1486 1487 static int 1488 cpswp_miibus_readreg(device_t dev, int phy, int reg) 1489 { 1490 struct cpswp_softc *sc; 1491 uint32_t cmd, r; 1492 1493 sc = device_get_softc(dev); 1494 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1495 device_printf(dev, "MDIO not ready to read\n"); 1496 return (0); 1497 } 1498 1499 /* Set GO, reg, phy */ 1500 cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; 1501 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1502 1503 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1504 device_printf(dev, "MDIO timed out during read\n"); 1505 return (0); 1506 } 1507 1508 r = cpsw_read_4(sc->swsc, sc->phyaccess); 1509 if ((r & MDIO_PHYACCESS_ACK) == 0) { 1510 device_printf(dev, "Failed to read from PHY.\n"); 1511 r = 0; 1512 } 1513 return (r & 0xFFFF); 1514 } 1515 1516 static int 1517 cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) 1518 { 1519 struct cpswp_softc *sc; 1520 uint32_t cmd; 1521 1522 sc = device_get_softc(dev); 1523 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1524 device_printf(dev, "MDIO not ready to write\n"); 1525 return (0); 1526 } 1527 1528 /* Set GO, WRITE, reg, phy, and value */ 1529 cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | 1530 (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); 1531 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1532 1533 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1534 device_printf(dev, "MDIO timed out during write\n"); 1535 return (0); 1536 } 1537 1538 return (0); 1539 } 1540 1541 static void 1542 cpswp_miibus_statchg(device_t dev) 1543 { 1544 struct cpswp_softc *sc; 1545 uint32_t mac_control, reg; 1546 1547 sc = device_get_softc(dev); 1548 CPSW_DEBUGF(sc->swsc, ("")); 1549 1550 reg = CPSW_SL_MACCONTROL(sc->unit); 1551 mac_control = cpsw_read_4(sc->swsc, reg); 1552 mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | 1553 CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); 1554 1555 switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { 1556 case IFM_1000_SX: 1557 case IFM_1000_LX: 1558 case IFM_1000_CX: 1559 case IFM_1000_T: 1560 mac_control |= CPSW_SL_MACTL_GIG; 1561 break; 1562 1563 case IFM_100_TX: 1564 mac_control |= CPSW_SL_MACTL_IFCTL_A; 1565 break; 1566 } 1567 if (sc->mii->mii_media_active & IFM_FDX) 1568 mac_control |= CPSW_SL_MACTL_FULLDUPLEX; 1569 1570 cpsw_write_4(sc->swsc, reg, mac_control); 1571 } 1572 1573 /* 1574 * 1575 * Transmit/Receive Packets. 1576 * 1577 */ 1578 static void 1579 cpsw_intr_rx(void *arg) 1580 { 1581 struct cpsw_softc *sc; 1582 if_t ifp; 1583 struct mbuf *received, *next; 1584 1585 sc = (struct cpsw_softc *)arg; 1586 CPSW_RX_LOCK(sc); 1587 if (sc->rx.teardown) { 1588 sc->rx.running = 0; 1589 sc->rx.teardown = 0; 1590 cpsw_write_cp(sc, &sc->rx, 0xfffffffc); 1591 } 1592 received = cpsw_rx_dequeue(sc); 1593 cpsw_rx_enqueue(sc); 1594 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); 1595 CPSW_RX_UNLOCK(sc); 1596 1597 while (received != NULL) { 1598 next = received->m_nextpkt; 1599 received->m_nextpkt = NULL; 1600 ifp = received->m_pkthdr.rcvif; 1601 if_input(ifp, received); 1602 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1603 received = next; 1604 } 1605 } 1606 1607 static struct mbuf * 1608 cpsw_rx_dequeue(struct cpsw_softc *sc) 1609 { 1610 int nsegs, port, removed; 1611 struct cpsw_cpdma_bd bd; 1612 struct cpsw_slot *last, *slot; 1613 struct cpswp_softc *psc; 1614 struct mbuf *m, *m0, *mb_head, *mb_tail; 1615 uint16_t m0_flags; 1616 1617 nsegs = 0; 1618 m0 = NULL; 1619 last = NULL; 1620 mb_head = NULL; 1621 mb_tail = NULL; 1622 removed = 0; 1623 1624 /* Pull completed packets off hardware RX queue. */ 1625 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { 1626 cpsw_cpdma_read_bd(sc, slot, &bd); 1627 1628 /* 1629 * Stop on packets still in use by hardware, but do not stop 1630 * on packets with the teardown complete flag, they will be 1631 * discarded later. 1632 */ 1633 if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) == 1634 CPDMA_BD_OWNER) 1635 break; 1636 1637 last = slot; 1638 ++removed; 1639 STAILQ_REMOVE_HEAD(&sc->rx.active, next); 1640 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); 1641 1642 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); 1643 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1644 1645 m = slot->mbuf; 1646 slot->mbuf = NULL; 1647 1648 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1649 CPSW_DEBUGF(sc, ("RX teardown is complete")); 1650 m_freem(m); 1651 sc->rx.running = 0; 1652 sc->rx.teardown = 0; 1653 break; 1654 } 1655 1656 port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; 1657 KASSERT(port >= 0 && port <= 1, 1658 ("patcket received with invalid port: %d", port)); 1659 psc = device_get_softc(sc->port[port].dev); 1660 1661 /* Set up mbuf */ 1662 m->m_data += bd.bufoff; 1663 m->m_len = bd.buflen; 1664 if (bd.flags & CPDMA_BD_SOP) { 1665 m->m_pkthdr.len = bd.pktlen; 1666 m->m_pkthdr.rcvif = psc->ifp; 1667 m->m_flags |= M_PKTHDR; 1668 m0_flags = bd.flags; 1669 m0 = m; 1670 } 1671 nsegs++; 1672 m->m_next = NULL; 1673 m->m_nextpkt = NULL; 1674 if (bd.flags & CPDMA_BD_EOP && m0 != NULL) { 1675 if (m0_flags & CPDMA_BD_PASS_CRC) 1676 m_adj(m0, -ETHER_CRC_LEN); 1677 m0_flags = 0; 1678 m0 = NULL; 1679 if (nsegs > sc->rx.longest_chain) 1680 sc->rx.longest_chain = nsegs; 1681 nsegs = 0; 1682 } 1683 1684 if ((if_getcapenable(psc->ifp) & IFCAP_RXCSUM) != 0) { 1685 /* check for valid CRC by looking into pkt_err[5:4] */ 1686 if ((bd.flags & 1687 (CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) == 1688 CPDMA_BD_SOP) { 1689 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1690 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1691 m->m_pkthdr.csum_data = 0xffff; 1692 } 1693 } 1694 1695 if (STAILQ_FIRST(&sc->rx.active) != NULL && 1696 (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) == 1697 (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { 1698 cpsw_write_hdp_slot(sc, &sc->rx, 1699 STAILQ_FIRST(&sc->rx.active)); 1700 sc->rx.queue_restart++; 1701 } 1702 1703 /* Add mbuf to packet list to be returned. */ 1704 if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) { 1705 mb_tail->m_nextpkt = m; 1706 } else if (mb_tail != NULL) { 1707 mb_tail->m_next = m; 1708 } else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) { 1709 if (bootverbose) 1710 printf( 1711 "%s: %s: discanding fragment packet w/o header\n", 1712 __func__, if_name(psc->ifp)); 1713 m_freem(m); 1714 continue; 1715 } else { 1716 mb_head = m; 1717 } 1718 mb_tail = m; 1719 } 1720 1721 if (removed != 0) { 1722 cpsw_write_cp_slot(sc, &sc->rx, last); 1723 sc->rx.queue_removes += removed; 1724 sc->rx.avail_queue_len += removed; 1725 sc->rx.active_queue_len -= removed; 1726 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) 1727 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; 1728 CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed)); 1729 } 1730 1731 return (mb_head); 1732 } 1733 1734 static void 1735 cpsw_rx_enqueue(struct cpsw_softc *sc) 1736 { 1737 bus_dma_segment_t seg[1]; 1738 struct cpsw_cpdma_bd bd; 1739 struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot; 1740 int error, nsegs, added = 0; 1741 1742 /* Register new mbufs with hardware. */ 1743 first_new_slot = NULL; 1744 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); 1745 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { 1746 if (first_new_slot == NULL) 1747 first_new_slot = slot; 1748 if (slot->mbuf == NULL) { 1749 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1750 if (slot->mbuf == NULL) { 1751 device_printf(sc->dev, 1752 "Unable to fill RX queue\n"); 1753 break; 1754 } 1755 slot->mbuf->m_len = 1756 slot->mbuf->m_pkthdr.len = 1757 slot->mbuf->m_ext.ext_size; 1758 } 1759 1760 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1761 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); 1762 1763 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); 1764 KASSERT(error == 0, ("DMA error (error=%d)", error)); 1765 if (error != 0 || nsegs != 1) { 1766 device_printf(sc->dev, 1767 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", 1768 __func__, nsegs, error); 1769 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1770 m_freem(slot->mbuf); 1771 slot->mbuf = NULL; 1772 break; 1773 } 1774 1775 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); 1776 1777 /* Create and submit new rx descriptor. */ 1778 if ((next = STAILQ_NEXT(slot, next)) != NULL) 1779 bd.next = cpsw_cpdma_bd_paddr(sc, next); 1780 else 1781 bd.next = 0; 1782 bd.bufptr = seg->ds_addr; 1783 bd.bufoff = 0; 1784 bd.buflen = MCLBYTES - 1; 1785 bd.pktlen = bd.buflen; 1786 bd.flags = CPDMA_BD_OWNER; 1787 cpsw_cpdma_write_bd(sc, slot, &bd); 1788 ++added; 1789 1790 STAILQ_REMOVE_HEAD(&sc->rx.avail, next); 1791 STAILQ_INSERT_TAIL(&sc->rx.active, slot, next); 1792 } 1793 1794 if (added == 0 || first_new_slot == NULL) 1795 return; 1796 1797 CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); 1798 1799 /* Link new entries to hardware RX queue. */ 1800 if (last_old_slot == NULL) { 1801 /* Start a fresh queue. */ 1802 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1803 } else { 1804 /* Add buffers to end of current queue. */ 1805 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1806 } 1807 sc->rx.queue_adds += added; 1808 sc->rx.avail_queue_len -= added; 1809 sc->rx.active_queue_len += added; 1810 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added); 1811 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) 1812 sc->rx.max_active_queue_len = sc->rx.active_queue_len; 1813 } 1814 1815 static void 1816 cpswp_start(if_t ifp) 1817 { 1818 struct cpswp_softc *sc; 1819 1820 sc = if_getsoftc(ifp); 1821 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || 1822 sc->swsc->tx.running == 0) { 1823 return; 1824 } 1825 CPSW_TX_LOCK(sc->swsc); 1826 cpswp_tx_enqueue(sc); 1827 cpsw_tx_dequeue(sc->swsc); 1828 CPSW_TX_UNLOCK(sc->swsc); 1829 } 1830 1831 static void 1832 cpsw_intr_tx(void *arg) 1833 { 1834 struct cpsw_softc *sc; 1835 1836 sc = (struct cpsw_softc *)arg; 1837 CPSW_TX_LOCK(sc); 1838 if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) 1839 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1840 cpsw_tx_dequeue(sc); 1841 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); 1842 CPSW_TX_UNLOCK(sc); 1843 } 1844 1845 static void 1846 cpswp_tx_enqueue(struct cpswp_softc *sc) 1847 { 1848 bus_dma_segment_t segs[CPSW_TXFRAGS]; 1849 struct cpsw_cpdma_bd bd; 1850 struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot; 1851 struct mbuf *m0; 1852 int error, nsegs, seg, added = 0, padlen; 1853 1854 /* Pull pending packets from IF queue and prep them for DMA. */ 1855 last = NULL; 1856 first_new_slot = NULL; 1857 last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); 1858 while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { 1859 m0 = if_dequeue(sc->ifp); 1860 if (m0 == NULL) 1861 break; 1862 1863 slot->mbuf = m0; 1864 padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len; 1865 if (padlen < 0) 1866 padlen = 0; 1867 else if (padlen > 0) 1868 m_append(slot->mbuf, padlen, sc->swsc->nullpad); 1869 1870 /* Create mapping in DMA memory */ 1871 error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, 1872 slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1873 /* If the packet is too fragmented, try to simplify. */ 1874 if (error == EFBIG || 1875 (error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) { 1876 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1877 m0 = m_defrag(slot->mbuf, M_NOWAIT); 1878 if (m0 == NULL) { 1879 device_printf(sc->dev, 1880 "Can't defragment packet; dropping\n"); 1881 m_freem(slot->mbuf); 1882 } else { 1883 CPSW_DEBUGF(sc->swsc, 1884 ("Requeueing defragmented packet")); 1885 if_sendq_prepend(sc->ifp, m0); 1886 } 1887 slot->mbuf = NULL; 1888 continue; 1889 } 1890 if (error != 0) { 1891 device_printf(sc->dev, 1892 "%s: Can't setup DMA (error=%d), dropping packet\n", 1893 __func__, error); 1894 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1895 m_freem(slot->mbuf); 1896 slot->mbuf = NULL; 1897 break; 1898 } 1899 1900 bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, 1901 BUS_DMASYNC_PREWRITE); 1902 1903 CPSW_DEBUGF(sc->swsc, 1904 ("Queueing TX packet: %d segments + %d pad bytes", 1905 nsegs, padlen)); 1906 1907 if (first_new_slot == NULL) 1908 first_new_slot = slot; 1909 1910 /* Link from the previous descriptor. */ 1911 if (last != NULL) 1912 cpsw_cpdma_write_bd_next(sc->swsc, last, slot); 1913 1914 slot->ifp = sc->ifp; 1915 1916 /* If there is only one segment, the for() loop 1917 * gets skipped and the single buffer gets set up 1918 * as both SOP and EOP. */ 1919 if (nsegs > 1) { 1920 next = STAILQ_NEXT(slot, next); 1921 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1922 } else 1923 bd.next = 0; 1924 /* Start by setting up the first buffer. */ 1925 bd.bufptr = segs[0].ds_addr; 1926 bd.bufoff = 0; 1927 bd.buflen = segs[0].ds_len; 1928 bd.pktlen = m_length(slot->mbuf, NULL); 1929 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER; 1930 if (sc->swsc->dualemac) { 1931 bd.flags |= CPDMA_BD_TO_PORT; 1932 bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK); 1933 } 1934 for (seg = 1; seg < nsegs; ++seg) { 1935 /* Save the previous buffer (which isn't EOP) */ 1936 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1937 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1938 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1939 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1940 1941 /* Setup next buffer (which isn't SOP) */ 1942 if (nsegs > seg + 1) { 1943 next = STAILQ_NEXT(slot, next); 1944 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1945 } else 1946 bd.next = 0; 1947 bd.bufptr = segs[seg].ds_addr; 1948 bd.bufoff = 0; 1949 bd.buflen = segs[seg].ds_len; 1950 bd.pktlen = 0; 1951 bd.flags = CPDMA_BD_OWNER; 1952 } 1953 1954 /* Save the final buffer. */ 1955 bd.flags |= CPDMA_BD_EOP; 1956 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1957 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1958 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1959 1960 last = slot; 1961 added += nsegs; 1962 if (nsegs > sc->swsc->tx.longest_chain) 1963 sc->swsc->tx.longest_chain = nsegs; 1964 1965 BPF_MTAP(sc->ifp, m0); 1966 } 1967 1968 if (first_new_slot == NULL) 1969 return; 1970 1971 /* Attach the list of new buffers to the hardware TX queue. */ 1972 if (last_old_slot != NULL && 1973 (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & 1974 CPDMA_BD_EOQ) == 0) { 1975 /* Add buffers to end of current queue. */ 1976 cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, 1977 first_new_slot); 1978 } else { 1979 /* Start a fresh queue. */ 1980 cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); 1981 } 1982 sc->swsc->tx.queue_adds += added; 1983 sc->swsc->tx.avail_queue_len -= added; 1984 sc->swsc->tx.active_queue_len += added; 1985 if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { 1986 sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; 1987 } 1988 CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added)); 1989 } 1990 1991 static int 1992 cpsw_tx_dequeue(struct cpsw_softc *sc) 1993 { 1994 struct cpsw_slot *slot, *last_removed_slot = NULL; 1995 struct cpsw_cpdma_bd bd; 1996 uint32_t flags, removed = 0; 1997 1998 /* Pull completed buffers off the hardware TX queue. */ 1999 slot = STAILQ_FIRST(&sc->tx.active); 2000 while (slot != NULL) { 2001 flags = cpsw_cpdma_read_bd_flags(sc, slot); 2002 2003 /* TearDown complete is only marked on the SOP for the packet. */ 2004 if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == 2005 (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) { 2006 sc->tx.teardown = 1; 2007 } 2008 2009 if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) == 2010 (CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0) 2011 break; /* Hardware is still using this packet. */ 2012 2013 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); 2014 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 2015 m_freem(slot->mbuf); 2016 slot->mbuf = NULL; 2017 2018 if (slot->ifp) { 2019 if (sc->tx.teardown == 0) 2020 if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); 2021 else 2022 if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1); 2023 } 2024 2025 /* Dequeue any additional buffers used by this packet. */ 2026 while (slot != NULL && slot->mbuf == NULL) { 2027 STAILQ_REMOVE_HEAD(&sc->tx.active, next); 2028 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); 2029 ++removed; 2030 last_removed_slot = slot; 2031 slot = STAILQ_FIRST(&sc->tx.active); 2032 } 2033 2034 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); 2035 2036 /* Restart the TX queue if necessary. */ 2037 cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); 2038 if (slot != NULL && bd.next != 0 && (bd.flags & 2039 (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) == 2040 (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { 2041 cpsw_write_hdp_slot(sc, &sc->tx, slot); 2042 sc->tx.queue_restart++; 2043 break; 2044 } 2045 } 2046 2047 if (removed != 0) { 2048 sc->tx.queue_removes += removed; 2049 sc->tx.active_queue_len -= removed; 2050 sc->tx.avail_queue_len += removed; 2051 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) 2052 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; 2053 CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed)); 2054 } 2055 2056 if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) { 2057 CPSW_DEBUGF(sc, ("TX teardown is complete")); 2058 sc->tx.teardown = 0; 2059 sc->tx.running = 0; 2060 } 2061 2062 return (removed); 2063 } 2064 2065 /* 2066 * 2067 * Miscellaneous interrupts. 2068 * 2069 */ 2070 2071 static void 2072 cpsw_intr_rx_thresh(void *arg) 2073 { 2074 struct cpsw_softc *sc; 2075 if_t ifp; 2076 struct mbuf *received, *next; 2077 2078 sc = (struct cpsw_softc *)arg; 2079 CPSW_RX_LOCK(sc); 2080 received = cpsw_rx_dequeue(sc); 2081 cpsw_rx_enqueue(sc); 2082 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); 2083 CPSW_RX_UNLOCK(sc); 2084 2085 while (received != NULL) { 2086 next = received->m_nextpkt; 2087 received->m_nextpkt = NULL; 2088 ifp = received->m_pkthdr.rcvif; 2089 if_input(ifp, received); 2090 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2091 received = next; 2092 } 2093 } 2094 2095 static void 2096 cpsw_intr_misc_host_error(struct cpsw_softc *sc) 2097 { 2098 uint32_t intstat; 2099 uint32_t dmastat; 2100 int txerr, rxerr, txchan, rxchan; 2101 2102 printf("\n\n"); 2103 device_printf(sc->dev, 2104 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); 2105 printf("\n\n"); 2106 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); 2107 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); 2108 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); 2109 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); 2110 2111 txerr = (dmastat >> 20) & 15; 2112 txchan = (dmastat >> 16) & 7; 2113 rxerr = (dmastat >> 12) & 15; 2114 rxchan = (dmastat >> 8) & 7; 2115 2116 switch (txerr) { 2117 case 0: break; 2118 case 1: printf("SOP error on TX channel %d\n", txchan); 2119 break; 2120 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); 2121 break; 2122 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); 2123 break; 2124 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); 2125 break; 2126 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); 2127 break; 2128 case 6: printf("Packet length error on TX channel %d\n", txchan); 2129 break; 2130 default: printf("Unknown error on TX channel %d\n", txchan); 2131 break; 2132 } 2133 2134 if (txerr != 0) { 2135 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 2136 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); 2137 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 2138 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); 2139 cpsw_dump_queue(sc, &sc->tx.active); 2140 } 2141 2142 switch (rxerr) { 2143 case 0: break; 2144 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); 2145 break; 2146 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); 2147 break; 2148 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); 2149 break; 2150 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); 2151 break; 2152 default: printf("Unknown RX error on RX channel %d\n", rxchan); 2153 break; 2154 } 2155 2156 if (rxerr != 0) { 2157 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", 2158 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); 2159 printf("CPSW_CPDMA_RX%d_CP=0x%x\n", 2160 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); 2161 cpsw_dump_queue(sc, &sc->rx.active); 2162 } 2163 2164 printf("\nALE Table\n"); 2165 cpsw_ale_dump_table(sc); 2166 2167 // XXX do something useful here?? 2168 panic("CPSW HOST ERROR INTERRUPT"); 2169 2170 // Suppress this interrupt in the future. 2171 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); 2172 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); 2173 // The watchdog will probably reset the controller 2174 // in a little while. It will probably fail again. 2175 } 2176 2177 static void 2178 cpsw_intr_misc(void *arg) 2179 { 2180 struct cpsw_softc *sc = arg; 2181 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); 2182 2183 if (stat & CPSW_WR_C_MISC_EVNT_PEND) 2184 CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); 2185 if (stat & CPSW_WR_C_MISC_STAT_PEND) 2186 cpsw_stats_collect(sc); 2187 if (stat & CPSW_WR_C_MISC_HOST_PEND) 2188 cpsw_intr_misc_host_error(sc); 2189 if (stat & CPSW_WR_C_MISC_MDIOLINK) { 2190 cpsw_write_4(sc, MDIOLINKINTMASKED, 2191 cpsw_read_4(sc, MDIOLINKINTMASKED)); 2192 } 2193 if (stat & CPSW_WR_C_MISC_MDIOUSER) { 2194 CPSW_DEBUGF(sc, 2195 ("MDIO operation completed interrupt unimplemented")); 2196 } 2197 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); 2198 } 2199 2200 /* 2201 * 2202 * Periodic Checks and Watchdog. 2203 * 2204 */ 2205 2206 static void 2207 cpswp_tick(void *msc) 2208 { 2209 struct cpswp_softc *sc = msc; 2210 2211 /* Check for media type change */ 2212 mii_tick(sc->mii); 2213 if (sc->media_status != sc->mii->mii_media.ifm_media) { 2214 printf("%s: media type changed (ifm_media=%x)\n", __func__, 2215 sc->mii->mii_media.ifm_media); 2216 cpswp_ifmedia_upd(sc->ifp); 2217 } 2218 2219 /* Schedule another timeout one second from now */ 2220 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 2221 } 2222 2223 static void 2224 cpswp_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 2225 { 2226 struct cpswp_softc *sc; 2227 struct mii_data *mii; 2228 2229 sc = if_getsoftc(ifp); 2230 CPSW_DEBUGF(sc->swsc, ("")); 2231 CPSW_PORT_LOCK(sc); 2232 2233 mii = sc->mii; 2234 mii_pollstat(mii); 2235 2236 ifmr->ifm_active = mii->mii_media_active; 2237 ifmr->ifm_status = mii->mii_media_status; 2238 CPSW_PORT_UNLOCK(sc); 2239 } 2240 2241 static int 2242 cpswp_ifmedia_upd(if_t ifp) 2243 { 2244 struct cpswp_softc *sc; 2245 2246 sc = if_getsoftc(ifp); 2247 CPSW_DEBUGF(sc->swsc, ("")); 2248 CPSW_PORT_LOCK(sc); 2249 mii_mediachg(sc->mii); 2250 sc->media_status = sc->mii->mii_media.ifm_media; 2251 CPSW_PORT_UNLOCK(sc); 2252 2253 return (0); 2254 } 2255 2256 static void 2257 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) 2258 { 2259 struct cpswp_softc *psc; 2260 int i; 2261 2262 cpsw_debugf_head("CPSW watchdog"); 2263 device_printf(sc->dev, "watchdog timeout\n"); 2264 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0, 2265 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0))); 2266 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0, 2267 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0))); 2268 cpsw_dump_queue(sc, &sc->tx.active); 2269 for (i = 0; i < CPSW_PORTS; i++) { 2270 if (!sc->dualemac && i != sc->active_slave) 2271 continue; 2272 psc = device_get_softc(sc->port[i].dev); 2273 CPSW_PORT_LOCK(psc); 2274 cpswp_stop_locked(psc); 2275 CPSW_PORT_UNLOCK(psc); 2276 } 2277 } 2278 2279 static void 2280 cpsw_tx_watchdog(void *msc) 2281 { 2282 struct cpsw_softc *sc; 2283 2284 sc = msc; 2285 CPSW_TX_LOCK(sc); 2286 if (sc->tx.active_queue_len == 0 || !sc->tx.running) { 2287 sc->watchdog.timer = 0; /* Nothing to do. */ 2288 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { 2289 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ 2290 } else if (cpsw_tx_dequeue(sc) > 0) { 2291 sc->watchdog.timer = 0; /* We just did something. */ 2292 } else { 2293 /* There was something to do but it didn't get done. */ 2294 ++sc->watchdog.timer; 2295 if (sc->watchdog.timer > 5) { 2296 sc->watchdog.timer = 0; 2297 ++sc->watchdog.resets; 2298 cpsw_tx_watchdog_full_reset(sc); 2299 } 2300 } 2301 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; 2302 CPSW_TX_UNLOCK(sc); 2303 2304 /* Schedule another timeout one second from now */ 2305 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 2306 } 2307 2308 /* 2309 * 2310 * ALE support routines. 2311 * 2312 */ 2313 2314 static void 2315 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2316 { 2317 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); 2318 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); 2319 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); 2320 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); 2321 } 2322 2323 static void 2324 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2325 { 2326 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); 2327 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); 2328 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); 2329 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); 2330 } 2331 2332 static void 2333 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) 2334 { 2335 int i; 2336 uint32_t ale_entry[3]; 2337 2338 /* First four entries are link address and broadcast. */ 2339 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2340 cpsw_ale_read_entry(sc, i, ale_entry); 2341 if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || 2342 ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && 2343 ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ 2344 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2345 cpsw_ale_write_entry(sc, i, ale_entry); 2346 } 2347 } 2348 } 2349 2350 static int 2351 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, 2352 uint8_t *mac) 2353 { 2354 int free_index = -1, matching_index = -1, i; 2355 uint32_t ale_entry[3], ale_type; 2356 2357 /* Find a matching entry or a free entry. */ 2358 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2359 cpsw_ale_read_entry(sc, i, ale_entry); 2360 2361 /* Entry Type[61:60] is 0 for free entry */ 2362 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2363 free_index = i; 2364 2365 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && 2366 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && 2367 (((ale_entry[0] >>24) & 0xFF) == mac[2]) && 2368 (((ale_entry[0] >>16) & 0xFF) == mac[3]) && 2369 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && 2370 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { 2371 matching_index = i; 2372 break; 2373 } 2374 } 2375 2376 if (matching_index < 0) { 2377 if (free_index < 0) 2378 return (ENOMEM); 2379 i = free_index; 2380 } 2381 2382 if (vlan != -1) 2383 ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; 2384 else 2385 ale_type = ALE_TYPE_ADDR << 28; 2386 2387 /* Set MAC address */ 2388 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2389 ale_entry[1] = mac[0] << 8 | mac[1]; 2390 2391 /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ 2392 ale_entry[1] |= ALE_MCAST_FWD | ale_type; 2393 2394 /* Set portmask [68:66] */ 2395 ale_entry[2] = (portmap & 7) << 2; 2396 2397 cpsw_ale_write_entry(sc, i, ale_entry); 2398 2399 return 0; 2400 } 2401 2402 static void 2403 cpsw_ale_dump_table(struct cpsw_softc *sc) { 2404 int i; 2405 uint32_t ale_entry[3]; 2406 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2407 cpsw_ale_read_entry(sc, i, ale_entry); 2408 switch (ALE_TYPE(ale_entry)) { 2409 case ALE_TYPE_VLAN: 2410 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2411 ale_entry[1], ale_entry[0]); 2412 printf("type: %u ", ALE_TYPE(ale_entry)); 2413 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2414 printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); 2415 printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); 2416 printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); 2417 printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); 2418 printf("\n"); 2419 break; 2420 case ALE_TYPE_ADDR: 2421 case ALE_TYPE_VLAN_ADDR: 2422 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2423 ale_entry[1], ale_entry[0]); 2424 printf("type: %u ", ALE_TYPE(ale_entry)); 2425 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", 2426 (ale_entry[1] >> 8) & 0xFF, 2427 (ale_entry[1] >> 0) & 0xFF, 2428 (ale_entry[0] >>24) & 0xFF, 2429 (ale_entry[0] >>16) & 0xFF, 2430 (ale_entry[0] >> 8) & 0xFF, 2431 (ale_entry[0] >> 0) & 0xFF); 2432 printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); 2433 if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) 2434 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2435 printf("port: %u ", ALE_PORTS(ale_entry)); 2436 printf("\n"); 2437 break; 2438 } 2439 } 2440 printf("\n"); 2441 } 2442 2443 static u_int 2444 cpswp_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2445 { 2446 struct cpswp_softc *sc = arg; 2447 uint32_t portmask; 2448 2449 if (sc->swsc->dualemac) 2450 portmask = 1 << (sc->unit + 1) | 1 << 0; 2451 else 2452 portmask = 7; 2453 2454 cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, LLADDR(sdl)); 2455 2456 return (1); 2457 } 2458 2459 static int 2460 cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) 2461 { 2462 uint8_t *mac; 2463 uint32_t ale_entry[3], ale_type, portmask; 2464 2465 if (sc->swsc->dualemac) { 2466 ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; 2467 portmask = 1 << (sc->unit + 1) | 1 << 0; 2468 } else { 2469 ale_type = ALE_TYPE_ADDR << 28; 2470 portmask = 7; 2471 } 2472 2473 /* 2474 * Route incoming packets for our MAC address to Port 0 (host). 2475 * For simplicity, keep this entry at table index 0 for port 1 and 2476 * at index 2 for port 2 in the ALE. 2477 */ 2478 mac = LLADDR((struct sockaddr_dl *)if_getifaddr(sc->ifp)->ifa_addr); 2479 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2480 ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ 2481 ale_entry[2] = 0; /* port = 0 */ 2482 cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); 2483 2484 /* Set outgoing MAC Address for slave port. */ 2485 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), 2486 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); 2487 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), 2488 mac[5] << 8 | mac[4]); 2489 2490 /* Keep the broadcast address at table entry 1 (or 3). */ 2491 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ 2492 /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ 2493 ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; 2494 ale_entry[2] = portmask << 2; 2495 cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); 2496 2497 /* SIOCDELMULTI doesn't specify the particular address 2498 being removed, so we have to remove all and rebuild. */ 2499 if (purge) 2500 cpsw_ale_remove_all_mc_entries(sc->swsc); 2501 2502 /* Set other multicast addrs desired. */ 2503 if_foreach_llmaddr(sc->ifp, cpswp_set_maddr, sc); 2504 2505 return (0); 2506 } 2507 2508 static int 2509 cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, 2510 int untag, int mcregflood, int mcunregflood) 2511 { 2512 int free_index, i, matching_index; 2513 uint32_t ale_entry[3]; 2514 2515 free_index = matching_index = -1; 2516 /* Find a matching entry or a free entry. */ 2517 for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { 2518 cpsw_ale_read_entry(sc, i, ale_entry); 2519 2520 /* Entry Type[61:60] is 0 for free entry */ 2521 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2522 free_index = i; 2523 2524 if (ALE_VLAN(ale_entry) == vlan) { 2525 matching_index = i; 2526 break; 2527 } 2528 } 2529 2530 if (matching_index < 0) { 2531 if (free_index < 0) 2532 return (-1); 2533 i = free_index; 2534 } 2535 2536 ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | 2537 (mcunregflood & 7) << 8 | (ports & 7); 2538 ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; 2539 ale_entry[2] = 0; 2540 cpsw_ale_write_entry(sc, i, ale_entry); 2541 2542 return (0); 2543 } 2544 2545 /* 2546 * 2547 * Statistics and Sysctls. 2548 * 2549 */ 2550 2551 #if 0 2552 static void 2553 cpsw_stats_dump(struct cpsw_softc *sc) 2554 { 2555 int i; 2556 uint32_t r; 2557 2558 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2559 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2560 cpsw_stat_sysctls[i].reg); 2561 CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, 2562 (intmax_t)sc->shadow_stats[i], r, 2563 (intmax_t)sc->shadow_stats[i] + r)); 2564 } 2565 } 2566 #endif 2567 2568 static void 2569 cpsw_stats_collect(struct cpsw_softc *sc) 2570 { 2571 int i; 2572 uint32_t r; 2573 2574 CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); 2575 2576 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2577 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2578 cpsw_stat_sysctls[i].reg); 2579 sc->shadow_stats[i] += r; 2580 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, 2581 r); 2582 } 2583 } 2584 2585 static int 2586 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) 2587 { 2588 struct cpsw_softc *sc; 2589 struct cpsw_stat *stat; 2590 uint64_t result; 2591 2592 sc = (struct cpsw_softc *)arg1; 2593 stat = &cpsw_stat_sysctls[oidp->oid_number]; 2594 result = sc->shadow_stats[oidp->oid_number]; 2595 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); 2596 return (sysctl_handle_64(oidp, &result, 0, req)); 2597 } 2598 2599 static int 2600 cpsw_stat_attached(SYSCTL_HANDLER_ARGS) 2601 { 2602 struct cpsw_softc *sc; 2603 struct bintime t; 2604 unsigned result; 2605 2606 sc = (struct cpsw_softc *)arg1; 2607 getbinuptime(&t); 2608 bintime_sub(&t, &sc->attach_uptime); 2609 result = t.sec; 2610 return (sysctl_handle_int(oidp, &result, 0, req)); 2611 } 2612 2613 static int 2614 cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS) 2615 { 2616 int error; 2617 struct cpsw_softc *sc; 2618 uint32_t ctrl, intr_per_ms; 2619 2620 sc = (struct cpsw_softc *)arg1; 2621 error = sysctl_handle_int(oidp, &sc->coal_us, 0, req); 2622 if (error != 0 || req->newptr == NULL) 2623 return (error); 2624 2625 ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 2626 ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 2627 if (sc->coal_us == 0) { 2628 /* Disable the interrupt pace hardware. */ 2629 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2630 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0); 2631 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0); 2632 return (0); 2633 } 2634 2635 if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX) 2636 sc->coal_us = CPSW_WR_C_IMAX_US_MAX; 2637 if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN) 2638 sc->coal_us = CPSW_WR_C_IMAX_US_MIN; 2639 intr_per_ms = 1000 / sc->coal_us; 2640 /* Just to make sure... */ 2641 if (intr_per_ms > CPSW_WR_C_IMAX_MAX) 2642 intr_per_ms = CPSW_WR_C_IMAX_MAX; 2643 if (intr_per_ms < CPSW_WR_C_IMAX_MIN) 2644 intr_per_ms = CPSW_WR_C_IMAX_MIN; 2645 2646 /* Set the prescale to produce 4us pulses from the 125 Mhz clock. */ 2647 ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK; 2648 2649 /* Enable the interrupt pace hardware. */ 2650 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms); 2651 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms); 2652 ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE; 2653 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2654 2655 return (0); 2656 } 2657 2658 static int 2659 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) 2660 { 2661 struct cpsw_softc *swsc; 2662 struct cpswp_softc *sc; 2663 struct bintime t; 2664 unsigned result; 2665 2666 swsc = arg1; 2667 sc = device_get_softc(swsc->port[arg2].dev); 2668 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 2669 getbinuptime(&t); 2670 bintime_sub(&t, &sc->init_uptime); 2671 result = t.sec; 2672 } else 2673 result = 0; 2674 return (sysctl_handle_int(oidp, &result, 0, req)); 2675 } 2676 2677 static void 2678 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2679 struct cpsw_queue *queue) 2680 { 2681 struct sysctl_oid_list *parent; 2682 2683 parent = SYSCTL_CHILDREN(node); 2684 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", 2685 CTLFLAG_RD, &queue->queue_slots, 0, 2686 "Total buffers currently assigned to this queue"); 2687 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", 2688 CTLFLAG_RD, &queue->active_queue_len, 0, 2689 "Buffers currently registered with hardware controller"); 2690 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", 2691 CTLFLAG_RD, &queue->max_active_queue_len, 0, 2692 "Max value of activeBuffers since last driver reset"); 2693 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", 2694 CTLFLAG_RD, &queue->avail_queue_len, 0, 2695 "Buffers allocated to this queue but not currently " 2696 "registered with hardware controller"); 2697 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", 2698 CTLFLAG_RD, &queue->max_avail_queue_len, 0, 2699 "Max value of availBuffers since last driver reset"); 2700 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", 2701 CTLFLAG_RD, &queue->queue_adds, 0, 2702 "Total buffers added to queue"); 2703 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", 2704 CTLFLAG_RD, &queue->queue_removes, 0, 2705 "Total buffers removed from queue"); 2706 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart", 2707 CTLFLAG_RD, &queue->queue_restart, 0, 2708 "Total times the queue has been restarted"); 2709 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", 2710 CTLFLAG_RD, &queue->longest_chain, 0, 2711 "Max buffers used for a single packet"); 2712 } 2713 2714 static void 2715 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2716 struct cpsw_softc *sc) 2717 { 2718 struct sysctl_oid_list *parent; 2719 2720 parent = SYSCTL_CHILDREN(node); 2721 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", 2722 CTLFLAG_RD, &sc->watchdog.resets, 0, 2723 "Total number of watchdog resets"); 2724 } 2725 2726 static void 2727 cpsw_add_sysctls(struct cpsw_softc *sc) 2728 { 2729 struct sysctl_ctx_list *ctx; 2730 struct sysctl_oid *stats_node, *queue_node, *node; 2731 struct sysctl_oid_list *parent, *stats_parent, *queue_parent; 2732 struct sysctl_oid_list *ports_parent, *port_parent; 2733 char port[16]; 2734 int i; 2735 2736 ctx = device_get_sysctl_ctx(sc->dev); 2737 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2738 2739 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", 2740 CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); 2741 2742 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", 2743 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2744 sc, 0, cpsw_stat_attached, "IU", 2745 "Time since driver attach"); 2746 2747 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us", 2748 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2749 sc, 0, cpsw_intr_coalesce, "IU", 2750 "minimum time between interrupts"); 2751 2752 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", 2753 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Ports Statistics"); 2754 ports_parent = SYSCTL_CHILDREN(node); 2755 for (i = 0; i < CPSW_PORTS; i++) { 2756 if (!sc->dualemac && i != sc->active_slave) 2757 continue; 2758 port[0] = '0' + i; 2759 port[1] = '\0'; 2760 node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, 2761 port, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2762 "CPSW Port Statistics"); 2763 port_parent = SYSCTL_CHILDREN(node); 2764 SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", 2765 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i, 2766 cpsw_stat_uptime, "IU", "Seconds since driver init"); 2767 } 2768 2769 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 2770 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Statistics"); 2771 stats_parent = SYSCTL_CHILDREN(stats_node); 2772 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2773 SYSCTL_ADD_PROC(ctx, stats_parent, i, 2774 cpsw_stat_sysctls[i].oid, 2775 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2776 sc, 0, cpsw_stats_sysctl, "IU", 2777 cpsw_stat_sysctls[i].oid); 2778 } 2779 2780 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", 2781 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Queue Statistics"); 2782 queue_parent = SYSCTL_CHILDREN(queue_node); 2783 2784 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", 2785 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Statistics"); 2786 cpsw_add_queue_sysctls(ctx, node, &sc->tx); 2787 2788 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", 2789 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Statistics"); 2790 cpsw_add_queue_sysctls(ctx, node, &sc->rx); 2791 2792 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", 2793 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Watchdog Statistics"); 2794 cpsw_add_watchdog_sysctls(ctx, node, sc); 2795 } 2796 2797 #ifdef CPSW_ETHERSWITCH 2798 static etherswitch_info_t etherswitch_info = { 2799 .es_nports = CPSW_PORTS + 1, 2800 .es_nvlangroups = CPSW_VLANS, 2801 .es_name = "TI Common Platform Ethernet Switch (CPSW)", 2802 .es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q, 2803 }; 2804 2805 static etherswitch_info_t * 2806 cpsw_getinfo(device_t dev) 2807 { 2808 return (ðerswitch_info); 2809 } 2810 2811 static int 2812 cpsw_getport(device_t dev, etherswitch_port_t *p) 2813 { 2814 int err; 2815 struct cpsw_softc *sc; 2816 struct cpswp_softc *psc; 2817 struct ifmediareq *ifmr; 2818 uint32_t reg; 2819 2820 if (p->es_port < 0 || p->es_port > CPSW_PORTS) 2821 return (ENXIO); 2822 2823 err = 0; 2824 sc = device_get_softc(dev); 2825 if (p->es_port == CPSW_CPU_PORT) { 2826 p->es_flags |= ETHERSWITCH_PORT_CPU; 2827 ifmr = &p->es_ifmr; 2828 ifmr->ifm_current = ifmr->ifm_active = 2829 IFM_ETHER | IFM_1000_T | IFM_FDX; 2830 ifmr->ifm_mask = 0; 2831 ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; 2832 ifmr->ifm_count = 0; 2833 } else { 2834 psc = device_get_softc(sc->port[p->es_port - 1].dev); 2835 err = ifmedia_ioctl(psc->ifp, &p->es_ifr, 2836 &psc->mii->mii_media, SIOCGIFMEDIA); 2837 } 2838 reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port)); 2839 p->es_pvid = reg & ETHERSWITCH_VID_MASK; 2840 2841 reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); 2842 if (reg & ALE_PORTCTL_DROP_UNTAGGED) 2843 p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; 2844 if (reg & ALE_PORTCTL_INGRESS) 2845 p->es_flags |= ETHERSWITCH_PORT_INGRESS; 2846 2847 return (err); 2848 } 2849 2850 static int 2851 cpsw_setport(device_t dev, etherswitch_port_t *p) 2852 { 2853 struct cpsw_softc *sc; 2854 struct cpswp_softc *psc; 2855 struct ifmedia *ifm; 2856 uint32_t reg; 2857 2858 if (p->es_port < 0 || p->es_port > CPSW_PORTS) 2859 return (ENXIO); 2860 2861 sc = device_get_softc(dev); 2862 if (p->es_pvid != 0) { 2863 cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port), 2864 p->es_pvid & ETHERSWITCH_VID_MASK); 2865 } 2866 2867 reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); 2868 if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) 2869 reg |= ALE_PORTCTL_DROP_UNTAGGED; 2870 else 2871 reg &= ~ALE_PORTCTL_DROP_UNTAGGED; 2872 if (p->es_flags & ETHERSWITCH_PORT_INGRESS) 2873 reg |= ALE_PORTCTL_INGRESS; 2874 else 2875 reg &= ~ALE_PORTCTL_INGRESS; 2876 cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg); 2877 2878 /* CPU port does not allow media settings. */ 2879 if (p->es_port == CPSW_CPU_PORT) 2880 return (0); 2881 2882 psc = device_get_softc(sc->port[p->es_port - 1].dev); 2883 ifm = &psc->mii->mii_media; 2884 2885 return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); 2886 } 2887 2888 static int 2889 cpsw_getconf(device_t dev, etherswitch_conf_t *conf) 2890 { 2891 2892 /* Return the VLAN mode. */ 2893 conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; 2894 conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; 2895 2896 return (0); 2897 } 2898 2899 static int 2900 cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) 2901 { 2902 int i, vid; 2903 uint32_t ale_entry[3]; 2904 struct cpsw_softc *sc; 2905 2906 sc = device_get_softc(dev); 2907 2908 if (vg->es_vlangroup >= CPSW_VLANS) 2909 return (EINVAL); 2910 2911 vg->es_vid = 0; 2912 vid = cpsw_vgroups[vg->es_vlangroup].vid; 2913 if (vid == -1) 2914 return (0); 2915 2916 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2917 cpsw_ale_read_entry(sc, i, ale_entry); 2918 if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) 2919 continue; 2920 if (vid != ALE_VLAN(ale_entry)) 2921 continue; 2922 2923 vg->es_fid = 0; 2924 vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID; 2925 vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry); 2926 vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry); 2927 } 2928 2929 return (0); 2930 } 2931 2932 static void 2933 cpsw_remove_vlan(struct cpsw_softc *sc, int vlan) 2934 { 2935 int i; 2936 uint32_t ale_entry[3]; 2937 2938 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2939 cpsw_ale_read_entry(sc, i, ale_entry); 2940 if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) 2941 continue; 2942 if (vlan != ALE_VLAN(ale_entry)) 2943 continue; 2944 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2945 cpsw_ale_write_entry(sc, i, ale_entry); 2946 break; 2947 } 2948 } 2949 2950 static int 2951 cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) 2952 { 2953 int i; 2954 struct cpsw_softc *sc; 2955 2956 sc = device_get_softc(dev); 2957 2958 for (i = 0; i < CPSW_VLANS; i++) { 2959 /* Is this Vlan ID in use by another vlangroup ? */ 2960 if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid) 2961 return (EINVAL); 2962 } 2963 2964 if (vg->es_vid == 0) { 2965 if (cpsw_vgroups[vg->es_vlangroup].vid == -1) 2966 return (0); 2967 cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid); 2968 cpsw_vgroups[vg->es_vlangroup].vid = -1; 2969 vg->es_untagged_ports = 0; 2970 vg->es_member_ports = 0; 2971 vg->es_vid = 0; 2972 return (0); 2973 } 2974 2975 vg->es_vid &= ETHERSWITCH_VID_MASK; 2976 vg->es_member_ports &= CPSW_PORTS_MASK; 2977 vg->es_untagged_ports &= CPSW_PORTS_MASK; 2978 2979 if (cpsw_vgroups[vg->es_vlangroup].vid != -1 && 2980 cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid) 2981 return (EINVAL); 2982 2983 cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid; 2984 cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports, 2985 vg->es_untagged_ports, vg->es_member_ports, 0); 2986 2987 return (0); 2988 } 2989 2990 static int 2991 cpsw_readreg(device_t dev, int addr) 2992 { 2993 2994 /* Not supported. */ 2995 return (0); 2996 } 2997 2998 static int 2999 cpsw_writereg(device_t dev, int addr, int value) 3000 { 3001 3002 /* Not supported. */ 3003 return (0); 3004 } 3005 3006 static int 3007 cpsw_readphy(device_t dev, int phy, int reg) 3008 { 3009 3010 /* Not supported. */ 3011 return (0); 3012 } 3013 3014 static int 3015 cpsw_writephy(device_t dev, int phy, int reg, int data) 3016 { 3017 3018 /* Not supported. */ 3019 return (0); 3020 } 3021 #endif 3022