xref: /freebsd/sys/arm/ti/cpsw/if_cpsw.c (revision 1834282de6b9f6fd30291bfe1cc9c3ecf5547c40)
1 /*-
2  * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
3  * Copyright (c) 2016 Rubicon Communications, LLC (Netgate)
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*
29  * TI Common Platform Ethernet Switch (CPSW) Driver
30  * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs.
31  *
32  * This controller is documented in the AM335x Technical Reference
33  * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM
34  * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM.
35  *
36  * It is basically a single Ethernet port (port 0) wired internally to
37  * a 3-port store-and-forward switch connected to two independent
38  * "sliver" controllers (port 1 and port 2).  You can operate the
39  * controller in a variety of different ways by suitably configuring
40  * the slivers and the Address Lookup Engine (ALE) that routes packets
41  * between the ports.
42  *
43  * This code was developed and tested on a BeagleBone with
44  * an AM335x SoC.
45  */
46 
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49 
50 #include <sys/param.h>
51 #include <sys/bus.h>
52 #include <sys/kernel.h>
53 #include <sys/lock.h>
54 #include <sys/mbuf.h>
55 #include <sys/module.h>
56 #include <sys/mutex.h>
57 #include <sys/rman.h>
58 #include <sys/socket.h>
59 #include <sys/sockio.h>
60 #include <sys/sysctl.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 #include <machine/stdarg.h>
65 
66 #include <net/ethernet.h>
67 #include <net/bpf.h>
68 #include <net/if.h>
69 #include <net/if_dl.h>
70 #include <net/if_media.h>
71 #include <net/if_types.h>
72 
73 #include <arm/ti/ti_scm.h>
74 #include <arm/ti/am335x/am335x_scm.h>
75 
76 #include <dev/mii/mii.h>
77 #include <dev/mii/miivar.h>
78 
79 #include <dev/fdt/fdt_common.h>
80 #include <dev/ofw/ofw_bus.h>
81 #include <dev/ofw/ofw_bus_subr.h>
82 
83 #include "if_cpswreg.h"
84 #include "if_cpswvar.h"
85 
86 #include "miibus_if.h"
87 
88 /* Device probe/attach/detach. */
89 static int cpsw_probe(device_t);
90 static int cpsw_attach(device_t);
91 static int cpsw_detach(device_t);
92 static int cpswp_probe(device_t);
93 static int cpswp_attach(device_t);
94 static int cpswp_detach(device_t);
95 
96 static phandle_t cpsw_get_node(device_t, device_t);
97 
98 /* Device Init/shutdown. */
99 static int cpsw_shutdown(device_t);
100 static void cpswp_init(void *);
101 static void cpswp_init_locked(void *);
102 static void cpswp_stop_locked(struct cpswp_softc *);
103 
104 /* Device Suspend/Resume. */
105 static int cpsw_suspend(device_t);
106 static int cpsw_resume(device_t);
107 
108 /* Ioctl. */
109 static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data);
110 
111 static int cpswp_miibus_readreg(device_t, int phy, int reg);
112 static int cpswp_miibus_writereg(device_t, int phy, int reg, int value);
113 static void cpswp_miibus_statchg(device_t);
114 
115 /* Send/Receive packets. */
116 static void cpsw_intr_rx(void *arg);
117 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *);
118 static void cpsw_rx_enqueue(struct cpsw_softc *);
119 static void cpswp_start(struct ifnet *);
120 static void cpsw_intr_tx(void *);
121 static void cpswp_tx_enqueue(struct cpswp_softc *);
122 static int cpsw_tx_dequeue(struct cpsw_softc *);
123 
124 /* Misc interrupts and watchdog. */
125 static void cpsw_intr_rx_thresh(void *);
126 static void cpsw_intr_misc(void *);
127 static void cpswp_tick(void *);
128 static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
129 static int cpswp_ifmedia_upd(struct ifnet *);
130 static void cpsw_tx_watchdog(void *);
131 
132 /* ALE support */
133 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *);
134 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *);
135 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *);
136 static void cpsw_ale_dump_table(struct cpsw_softc *);
137 static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int,
138 	int);
139 static int cpswp_ale_update_addresses(struct cpswp_softc *, int);
140 
141 /* Statistics and sysctls. */
142 static void cpsw_add_sysctls(struct cpsw_softc *);
143 static void cpsw_stats_collect(struct cpsw_softc *);
144 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS);
145 
146 /*
147  * Arbitrary limit on number of segments in an mbuf to be transmitted.
148  * Packets with more segments than this will be defragmented before
149  * they are queued.
150  */
151 #define	CPSW_TXFRAGS		16
152 
153 /* Shared resources. */
154 static device_method_t cpsw_methods[] = {
155 	/* Device interface */
156 	DEVMETHOD(device_probe,		cpsw_probe),
157 	DEVMETHOD(device_attach,	cpsw_attach),
158 	DEVMETHOD(device_detach,	cpsw_detach),
159 	DEVMETHOD(device_shutdown,	cpsw_shutdown),
160 	DEVMETHOD(device_suspend,	cpsw_suspend),
161 	DEVMETHOD(device_resume,	cpsw_resume),
162 	/* OFW methods */
163 	DEVMETHOD(ofw_bus_get_node,	cpsw_get_node),
164 	DEVMETHOD_END
165 };
166 
167 static driver_t cpsw_driver = {
168 	"cpswss",
169 	cpsw_methods,
170 	sizeof(struct cpsw_softc),
171 };
172 
173 static devclass_t cpsw_devclass;
174 
175 DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
176 
177 /* Port/Slave resources. */
178 static device_method_t cpswp_methods[] = {
179 	/* Device interface */
180 	DEVMETHOD(device_probe,		cpswp_probe),
181 	DEVMETHOD(device_attach,	cpswp_attach),
182 	DEVMETHOD(device_detach,	cpswp_detach),
183 	/* MII interface */
184 	DEVMETHOD(miibus_readreg,	cpswp_miibus_readreg),
185 	DEVMETHOD(miibus_writereg,	cpswp_miibus_writereg),
186 	DEVMETHOD(miibus_statchg,	cpswp_miibus_statchg),
187 	DEVMETHOD_END
188 };
189 
190 static driver_t cpswp_driver = {
191 	"cpsw",
192 	cpswp_methods,
193 	sizeof(struct cpswp_softc),
194 };
195 
196 static devclass_t cpswp_devclass;
197 
198 DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0);
199 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
200 MODULE_DEPEND(cpsw, ether, 1, 1, 1);
201 MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
202 
203 static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 };
204 
205 static struct resource_spec irq_res_spec[] = {
206 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
207 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
208 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
209 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
210 	{ -1, 0 }
211 };
212 
213 static struct {
214 	void (*cb)(void *);
215 } cpsw_intr_cb[] = {
216 	{ cpsw_intr_rx_thresh },
217 	{ cpsw_intr_rx },
218 	{ cpsw_intr_tx },
219 	{ cpsw_intr_misc },
220 };
221 
222 /* Number of entries here must match size of stats
223  * array in struct cpswp_softc. */
224 static struct cpsw_stat {
225 	int	reg;
226 	char *oid;
227 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = {
228 	{0x00, "GoodRxFrames"},
229 	{0x04, "BroadcastRxFrames"},
230 	{0x08, "MulticastRxFrames"},
231 	{0x0C, "PauseRxFrames"},
232 	{0x10, "RxCrcErrors"},
233 	{0x14, "RxAlignErrors"},
234 	{0x18, "OversizeRxFrames"},
235 	{0x1c, "RxJabbers"},
236 	{0x20, "ShortRxFrames"},
237 	{0x24, "RxFragments"},
238 	{0x30, "RxOctets"},
239 	{0x34, "GoodTxFrames"},
240 	{0x38, "BroadcastTxFrames"},
241 	{0x3c, "MulticastTxFrames"},
242 	{0x40, "PauseTxFrames"},
243 	{0x44, "DeferredTxFrames"},
244 	{0x48, "CollisionsTxFrames"},
245 	{0x4c, "SingleCollisionTxFrames"},
246 	{0x50, "MultipleCollisionTxFrames"},
247 	{0x54, "ExcessiveCollisions"},
248 	{0x58, "LateCollisions"},
249 	{0x5c, "TxUnderrun"},
250 	{0x60, "CarrierSenseErrors"},
251 	{0x64, "TxOctets"},
252 	{0x68, "RxTx64OctetFrames"},
253 	{0x6c, "RxTx65to127OctetFrames"},
254 	{0x70, "RxTx128to255OctetFrames"},
255 	{0x74, "RxTx256to511OctetFrames"},
256 	{0x78, "RxTx512to1024OctetFrames"},
257 	{0x7c, "RxTx1024upOctetFrames"},
258 	{0x80, "NetOctets"},
259 	{0x84, "RxStartOfFrameOverruns"},
260 	{0x88, "RxMiddleOfFrameOverruns"},
261 	{0x8c, "RxDmaOverruns"}
262 };
263 
264 /*
265  * Basic debug support.
266  */
267 
268 static void
269 cpsw_debugf_head(const char *funcname)
270 {
271 	int t = (int)(time_second % (24 * 60 * 60));
272 
273 	printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
274 }
275 
276 static void
277 cpsw_debugf(const char *fmt, ...)
278 {
279 	va_list ap;
280 
281 	va_start(ap, fmt);
282 	vprintf(fmt, ap);
283 	va_end(ap);
284 	printf("\n");
285 
286 }
287 
288 #define	CPSW_DEBUGF(_sc, a) do {					\
289 	if ((_sc)->debug) {						\
290 		cpsw_debugf_head(__func__);				\
291 		cpsw_debugf a;						\
292 	}								\
293 } while (0)
294 
295 /*
296  * Locking macros
297  */
298 #define	CPSW_TX_LOCK(sc) do {						\
299 		mtx_assert(&(sc)->rx.lock, MA_NOTOWNED);		\
300 		mtx_lock(&(sc)->tx.lock);				\
301 } while (0)
302 
303 #define	CPSW_TX_UNLOCK(sc)	mtx_unlock(&(sc)->tx.lock)
304 #define	CPSW_TX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->tx.lock, MA_OWNED)
305 
306 #define	CPSW_RX_LOCK(sc) do {						\
307 		mtx_assert(&(sc)->tx.lock, MA_NOTOWNED);		\
308 		mtx_lock(&(sc)->rx.lock);				\
309 } while (0)
310 
311 #define	CPSW_RX_UNLOCK(sc)		mtx_unlock(&(sc)->rx.lock)
312 #define	CPSW_RX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->rx.lock, MA_OWNED)
313 
314 #define CPSW_PORT_LOCK(_sc) do {					\
315 		mtx_assert(&(_sc)->lock, MA_NOTOWNED);			\
316 		mtx_lock(&(_sc)->lock);					\
317 } while (0)
318 
319 #define	CPSW_PORT_UNLOCK(_sc)	mtx_unlock(&(_sc)->lock)
320 #define	CPSW_PORT_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->lock, MA_OWNED)
321 
322 /*
323  * Read/Write macros
324  */
325 #define	cpsw_read_4(_sc, _reg)		bus_read_4((_sc)->mem_res, (_reg))
326 #define	cpsw_write_4(_sc, _reg, _val)					\
327 	bus_write_4((_sc)->mem_res, (_reg), (_val))
328 
329 #define	cpsw_cpdma_bd_offset(i)	(CPSW_CPPI_RAM_OFFSET + ((i)*16))
330 
331 #define	cpsw_cpdma_bd_paddr(sc, slot)					\
332 	BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset)
333 #define	cpsw_cpdma_read_bd(sc, slot, val)				\
334 	bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
335 #define	cpsw_cpdma_write_bd(sc, slot, val)				\
336 	bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
337 #define	cpsw_cpdma_write_bd_next(sc, slot, next_slot)			\
338 	cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot))
339 #define	cpsw_cpdma_read_bd_flags(sc, slot)				\
340 	bus_read_2(sc->mem_res, slot->bd_offset + 14)
341 #define	cpsw_write_hdp_slot(sc, queue, slot)				\
342 	cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
343 #define	CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0))
344 #define	cpsw_read_cp(sc, queue)						\
345 	cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
346 #define	cpsw_write_cp(sc, queue, val)					\
347 	cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
348 #define	cpsw_write_cp_slot(sc, queue, slot)				\
349 	cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
350 
351 #if 0
352 /* XXX temporary function versions for debugging. */
353 static void
354 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
355 {
356 	uint32_t reg = queue->hdp_offset;
357 	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
358 	CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg)));
359 	cpsw_write_4(sc, reg, v);
360 }
361 
362 static void
363 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
364 {
365 	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
366 	CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
367 	cpsw_write_cp(sc, queue, v);
368 }
369 #endif
370 
371 /*
372  * Expanded dump routines for verbose debugging.
373  */
374 static void
375 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
376 {
377 	static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ",
378 	    "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun",
379 	    "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1",
380 	    "Port0"};
381 	struct cpsw_cpdma_bd bd;
382 	const char *sep;
383 	int i;
384 
385 	cpsw_cpdma_read_bd(sc, slot, &bd);
386 	printf("BD Addr: 0x%08x   Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next);
387 	printf("  BufPtr: 0x%08x   BufLen: 0x%08x\n", bd.bufptr, bd.buflen);
388 	printf("  BufOff: 0x%08x   PktLen: 0x%08x\n", bd.bufoff, bd.pktlen);
389 	printf("  Flags: ");
390 	sep = "";
391 	for (i = 0; i < 16; ++i) {
392 		if (bd.flags & (1 << (15 - i))) {
393 			printf("%s%s", sep, flags[i]);
394 			sep = ",";
395 		}
396 	}
397 	printf("\n");
398 	if (slot->mbuf) {
399 		printf("  Ether:  %14D\n",
400 		    (char *)(slot->mbuf->m_data), " ");
401 		printf("  Packet: %16D\n",
402 		    (char *)(slot->mbuf->m_data) + 14, " ");
403 	}
404 }
405 
406 #define	CPSW_DUMP_SLOT(cs, slot) do {				\
407 	IF_DEBUG(sc) {						\
408 		cpsw_dump_slot(sc, slot);			\
409 	}							\
410 } while (0)
411 
412 static void
413 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q)
414 {
415 	struct cpsw_slot *slot;
416 	int i = 0;
417 	int others = 0;
418 
419 	STAILQ_FOREACH(slot, q, next) {
420 		if (i > 4)
421 			++others;
422 		else
423 			cpsw_dump_slot(sc, slot);
424 		++i;
425 	}
426 	if (others)
427 		printf(" ... and %d more.\n", others);
428 	printf("\n");
429 }
430 
431 #define CPSW_DUMP_QUEUE(sc, q) do {				\
432 	IF_DEBUG(sc) {						\
433 		cpsw_dump_queue(sc, q);				\
434 	}							\
435 } while (0)
436 
437 static void
438 cpsw_init_slots(struct cpsw_softc *sc)
439 {
440 	struct cpsw_slot *slot;
441 	int i;
442 
443 	STAILQ_INIT(&sc->avail);
444 
445 	/* Put the slot descriptors onto the global avail list. */
446 	for (i = 0; i < nitems(sc->_slots); i++) {
447 		slot = &sc->_slots[i];
448 		slot->bd_offset = cpsw_cpdma_bd_offset(i);
449 		STAILQ_INSERT_TAIL(&sc->avail, slot, next);
450 	}
451 }
452 
453 static int
454 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
455 {
456 	const int max_slots = nitems(sc->_slots);
457 	struct cpsw_slot *slot;
458 	int i;
459 
460 	if (requested < 0)
461 		requested = max_slots;
462 
463 	for (i = 0; i < requested; ++i) {
464 		slot = STAILQ_FIRST(&sc->avail);
465 		if (slot == NULL)
466 			return (0);
467 		if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
468 			device_printf(sc->dev, "failed to create dmamap\n");
469 			return (ENOMEM);
470 		}
471 		STAILQ_REMOVE_HEAD(&sc->avail, next);
472 		STAILQ_INSERT_TAIL(&queue->avail, slot, next);
473 		++queue->avail_queue_len;
474 		++queue->queue_slots;
475 	}
476 	return (0);
477 }
478 
479 static void
480 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
481 {
482 	int error;
483 
484 	if (slot->dmamap) {
485 		if (slot->mbuf)
486 			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
487 		error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
488 		KASSERT(error == 0, ("Mapping still active"));
489 		slot->dmamap = NULL;
490 	}
491 	if (slot->mbuf) {
492 		m_freem(slot->mbuf);
493 		slot->mbuf = NULL;
494 	}
495 }
496 
497 static void
498 cpsw_reset(struct cpsw_softc *sc)
499 {
500 	int i;
501 
502 	callout_stop(&sc->watchdog.callout);
503 
504 	/* Reset RMII/RGMII wrapper. */
505 	cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
506 	while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
507 		;
508 
509 	/* Disable TX and RX interrupts for all cores. */
510 	for (i = 0; i < 3; ++i) {
511 		cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00);
512 		cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00);
513 		cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00);
514 		cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00);
515 	}
516 
517 	/* Reset CPSW subsystem. */
518 	cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
519 	while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
520 		;
521 
522 	/* Reset Sliver port 1 and 2 */
523 	for (i = 0; i < 2; i++) {
524 		/* Reset */
525 		cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
526 		while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
527 			;
528 	}
529 
530 	/* Reset DMA controller. */
531 	cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
532 	while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
533 		;
534 
535 	/* Disable TX & RX DMA */
536 	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0);
537 	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0);
538 
539 	/* Clear all queues. */
540 	for (i = 0; i < 8; i++) {
541 		cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
542 		cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
543 		cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
544 		cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
545 	}
546 
547 	/* Clear all interrupt Masks */
548 	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
549 	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
550 }
551 
552 static void
553 cpsw_init(struct cpsw_softc *sc)
554 {
555 	struct cpsw_slot *slot;
556 	uint32_t reg;
557 
558 	/* Disable the interrupt pacing. */
559 	reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
560 	reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
561 	cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg);
562 
563 	/* Clear ALE */
564 	cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL);
565 
566 	/* Enable ALE */
567 	reg = CPSW_ALE_CTL_ENABLE;
568 	if (sc->dualemac)
569 		reg |= CPSW_ALE_CTL_VLAN_AWARE;
570 	cpsw_write_4(sc, CPSW_ALE_CONTROL, reg);
571 
572 	/* Set Host Port Mapping. */
573 	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
574 	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
575 
576 	/* Initialize ALE: set host port to forwarding(3). */
577 	cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3);
578 
579 	cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
580 
581 	/* Enable statistics for ports 0, 1 and 2 */
582 	cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
583 
584 	/* Experiment:  Turn off flow control */
585 	/* This seems to fix the watchdog resets that have plagued
586 	   earlier versions of this driver; I'm not yet sure if there
587 	   are negative effects yet. */
588 	cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
589 
590 	/* Make IP hdr aligned with 4 */
591 	cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
592 
593 	/* Initialize RX Buffer Descriptors */
594 	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
595 
596 	/* Enable TX & RX DMA */
597 	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
598 	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
599 
600 	/* Enable Interrupts for core 0 */
601 	cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF);
602 	cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF);
603 	cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF);
604 	cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
605 
606 	/* Enable host Error Interrupt */
607 	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3);
608 
609 	/* Enable interrupts for RX and TX on Channel 0 */
610 	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
611 	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
612 
613 	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
614 	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
615 	cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff);
616 
617 	/* Select MII in GMII_SEL, Internal Delay mode */
618 	//ti_scm_reg_write_4(0x650, 0);
619 
620 	/* Initialize active queues. */
621 	slot = STAILQ_FIRST(&sc->tx.active);
622 	if (slot != NULL)
623 		cpsw_write_hdp_slot(sc, &sc->tx, slot);
624 	slot = STAILQ_FIRST(&sc->rx.active);
625 	if (slot != NULL)
626 		cpsw_write_hdp_slot(sc, &sc->rx, slot);
627 	cpsw_rx_enqueue(sc);
628 
629 	/* Activate network interface. */
630 	sc->rx.running = 1;
631 	sc->tx.running = 1;
632 	sc->watchdog.timer = 0;
633 	callout_init(&sc->watchdog.callout, 0);
634 	callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
635 }
636 
637 /*
638  *
639  * Device Probe, Attach, Detach.
640  *
641  */
642 
643 static int
644 cpsw_probe(device_t dev)
645 {
646 
647 	if (!ofw_bus_status_okay(dev))
648 		return (ENXIO);
649 
650 	if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
651 		return (ENXIO);
652 
653 	device_set_desc(dev, "3-port Switch Ethernet Subsystem");
654 	return (BUS_PROBE_DEFAULT);
655 }
656 
657 static int
658 cpsw_intr_attach(struct cpsw_softc *sc)
659 {
660 	int i;
661 
662 	for (i = 0; i < CPSW_INTR_COUNT; i++) {
663 		if (bus_setup_intr(sc->dev, sc->irq_res[i],
664 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
665 		    cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) {
666 			return (-1);
667 		}
668 	}
669 
670 	return (0);
671 }
672 
673 static void
674 cpsw_intr_detach(struct cpsw_softc *sc)
675 {
676 	int i;
677 
678 	for (i = 0; i < CPSW_INTR_COUNT; i++) {
679 		if (sc->ih_cookie[i]) {
680 			bus_teardown_intr(sc->dev, sc->irq_res[i],
681 			    sc->ih_cookie[i]);
682 		}
683 	}
684 }
685 
686 static int
687 cpsw_get_fdt_data(struct cpsw_softc *sc, int port)
688 {
689 	char *name;
690 	int len, phy, vlan;
691 	pcell_t phy_id[3], vlan_id;
692 	phandle_t child;
693 	unsigned long mdio_child_addr;
694 
695 	/* Find any slave with phy_id */
696 	phy = -1;
697 	vlan = -1;
698 	for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) {
699 		if (OF_getprop_alloc(child, "name", 1, (void **)&name) < 0)
700 			continue;
701 		if (sscanf(name, "slave@%x", &mdio_child_addr) != 1) {
702 			OF_prop_free(name);
703 			continue;
704 		}
705 		OF_prop_free(name);
706 		if (mdio_child_addr != slave_mdio_addr[port])
707 			continue;
708 
709 		len = OF_getproplen(child, "phy_id");
710 		if (len / sizeof(pcell_t) == 2) {
711 			/* Get phy address from fdt */
712 			if (OF_getencprop(child, "phy_id", phy_id, len) > 0)
713 				phy = phy_id[1];
714 		}
715 
716 		len = OF_getproplen(child, "dual_emac_res_vlan");
717 		if (len / sizeof(pcell_t) == 1) {
718 			/* Get phy address from fdt */
719 			if (OF_getencprop(child, "dual_emac_res_vlan",
720 			    &vlan_id, len) > 0) {
721 				vlan = vlan_id;
722 			}
723 		}
724 
725 		break;
726 	}
727 	if (phy == -1)
728 		return (ENXIO);
729 	sc->port[port].phy = phy;
730 	sc->port[port].vlan = vlan;
731 
732 	return (0);
733 }
734 
735 static int
736 cpsw_attach(device_t dev)
737 {
738 	bus_dma_segment_t segs[1];
739 	int error, i, nsegs;
740 	struct cpsw_softc *sc;
741 	uint32_t reg;
742 
743 	sc = device_get_softc(dev);
744 	sc->dev = dev;
745 	sc->node = ofw_bus_get_node(dev);
746 	getbinuptime(&sc->attach_uptime);
747 
748 	if (OF_getencprop(sc->node, "active_slave", &sc->active_slave,
749 	    sizeof(sc->active_slave)) <= 0) {
750 		sc->active_slave = 0;
751 	}
752 	if (sc->active_slave > 1)
753 		sc->active_slave = 1;
754 
755 	if (OF_hasprop(sc->node, "dual_emac"))
756 		sc->dualemac = 1;
757 
758 	for (i = 0; i < CPSW_PORTS; i++) {
759 		if (!sc->dualemac && i != sc->active_slave)
760 			continue;
761 		if (cpsw_get_fdt_data(sc, i) != 0) {
762 			device_printf(dev,
763 			    "failed to get PHY address from FDT\n");
764 			return (ENXIO);
765 		}
766 	}
767 
768 	/* Initialize mutexes */
769 	mtx_init(&sc->tx.lock, device_get_nameunit(dev),
770 	    "cpsw TX lock", MTX_DEF);
771 	mtx_init(&sc->rx.lock, device_get_nameunit(dev),
772 	    "cpsw RX lock", MTX_DEF);
773 
774 	/* Allocate IRQ resources */
775 	error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res);
776 	if (error) {
777 		device_printf(dev, "could not allocate IRQ resources\n");
778 		cpsw_detach(dev);
779 		return (ENXIO);
780 	}
781 
782 	sc->mem_rid = 0;
783 	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
784 	    &sc->mem_rid, RF_ACTIVE);
785 	if (sc->mem_res == NULL) {
786 		device_printf(sc->dev, "failed to allocate memory resource\n");
787 		cpsw_detach(dev);
788 		return (ENXIO);
789 	}
790 
791 	reg = cpsw_read_4(sc, CPSW_SS_IDVER);
792 	device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7),
793 		reg & 0xFF, (reg >> 11) & 0x1F);
794 
795 	cpsw_add_sysctls(sc);
796 
797 	/* Allocate a busdma tag and DMA safe memory for mbufs. */
798 	error = bus_dma_tag_create(
799 		bus_get_dma_tag(sc->dev),	/* parent */
800 		1, 0,				/* alignment, boundary */
801 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
802 		BUS_SPACE_MAXADDR,		/* highaddr */
803 		NULL, NULL,			/* filtfunc, filtfuncarg */
804 		MCLBYTES, CPSW_TXFRAGS,		/* maxsize, nsegments */
805 		MCLBYTES, 0,			/* maxsegsz, flags */
806 		NULL, NULL,			/* lockfunc, lockfuncarg */
807 		&sc->mbuf_dtag);		/* dmatag */
808 	if (error) {
809 		device_printf(dev, "bus_dma_tag_create failed\n");
810 		cpsw_detach(dev);
811 		return (error);
812 	}
813 
814 	/* Allocate the null mbuf and pre-sync it. */
815 	sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
816 	memset(sc->null_mbuf->m_data, 0, sc->null_mbuf->m_ext.ext_size);
817 	bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap);
818 	bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap,
819 	    sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
820 	bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap,
821 	    BUS_DMASYNC_PREWRITE);
822 	sc->null_mbuf_paddr = segs[0].ds_addr;
823 
824 	cpsw_init_slots(sc);
825 
826 	/* Allocate slots to TX and RX queues. */
827 	STAILQ_INIT(&sc->rx.avail);
828 	STAILQ_INIT(&sc->rx.active);
829 	STAILQ_INIT(&sc->tx.avail);
830 	STAILQ_INIT(&sc->tx.active);
831 	// For now:  128 slots to TX, rest to RX.
832 	// XXX TODO: start with 32/64 and grow dynamically based on demand.
833 	if (cpsw_add_slots(sc, &sc->tx, 128) ||
834 	    cpsw_add_slots(sc, &sc->rx, -1)) {
835 		device_printf(dev, "failed to allocate dmamaps\n");
836 		cpsw_detach(dev);
837 		return (ENOMEM);
838 	}
839 	device_printf(dev, "Initial queue size TX=%d RX=%d\n",
840 	    sc->tx.queue_slots, sc->rx.queue_slots);
841 
842 	sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0);
843 	sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0);
844 
845 	if (cpsw_intr_attach(sc) == -1) {
846 		device_printf(dev, "failed to setup interrupts\n");
847 		cpsw_detach(dev);
848 		return (ENXIO);
849 	}
850 
851 	/* Reset the controller. */
852 	cpsw_reset(sc);
853 	cpsw_init(sc);
854 
855 	for (i = 0; i < CPSW_PORTS; i++) {
856 		if (!sc->dualemac && i != sc->active_slave)
857 			continue;
858 		sc->port[i].dev = device_add_child(dev, "cpsw", i);
859 		if (sc->port[i].dev == NULL) {
860 			cpsw_detach(dev);
861 			return (ENXIO);
862 		}
863 	}
864 	bus_generic_attach(dev);
865 
866 	return (0);
867 }
868 
869 static int
870 cpsw_detach(device_t dev)
871 {
872 	struct cpsw_softc *sc;
873 	int error, i;
874 
875 	bus_generic_detach(dev);
876  	sc = device_get_softc(dev);
877 
878 	for (i = 0; i < CPSW_PORTS; i++) {
879 		if (sc->port[i].dev)
880 			device_delete_child(dev, sc->port[i].dev);
881 	}
882 
883 	if (device_is_attached(dev)) {
884 		callout_stop(&sc->watchdog.callout);
885 		callout_drain(&sc->watchdog.callout);
886 	}
887 
888 	/* Stop and release all interrupts */
889 	cpsw_intr_detach(sc);
890 
891 	/* Free dmamaps and mbufs */
892 	for (i = 0; i < nitems(sc->_slots); ++i)
893 		cpsw_free_slot(sc, &sc->_slots[i]);
894 
895 	/* Free null mbuf. */
896 	if (sc->null_mbuf_dmamap) {
897 		bus_dmamap_unload(sc->mbuf_dtag, sc->null_mbuf_dmamap);
898 		error = bus_dmamap_destroy(sc->mbuf_dtag, sc->null_mbuf_dmamap);
899 		KASSERT(error == 0, ("Mapping still active"));
900 		m_freem(sc->null_mbuf);
901 	}
902 
903 	/* Free DMA tag */
904 	if (sc->mbuf_dtag) {
905 		error = bus_dma_tag_destroy(sc->mbuf_dtag);
906 		KASSERT(error == 0, ("Unable to destroy DMA tag"));
907 	}
908 
909 	/* Free IO memory handler */
910 	if (sc->mem_res != NULL)
911 		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res);
912 	bus_release_resources(dev, irq_res_spec, sc->irq_res);
913 
914 	/* Destroy mutexes */
915 	mtx_destroy(&sc->rx.lock);
916 	mtx_destroy(&sc->tx.lock);
917 
918 	return (0);
919 }
920 
921 static phandle_t
922 cpsw_get_node(device_t bus, device_t dev)
923 {
924 
925 	/* Share controller node with port device. */
926 	return (ofw_bus_get_node(bus));
927 }
928 
929 static int
930 cpswp_probe(device_t dev)
931 {
932 
933 	if (device_get_unit(dev) > 1) {
934 		device_printf(dev, "Only two ports are supported.\n");
935 		return (ENXIO);
936 	}
937 	device_set_desc(dev, "Ethernet Switch Port");
938 
939 	return (BUS_PROBE_DEFAULT);
940 }
941 
942 static int
943 cpswp_attach(device_t dev)
944 {
945 	int error;
946 	struct ifnet *ifp;
947 	struct cpswp_softc *sc;
948 	uint32_t reg;
949 	uint8_t mac_addr[ETHER_ADDR_LEN];
950 
951 	sc = device_get_softc(dev);
952 	sc->dev = dev;
953 	sc->pdev = device_get_parent(dev);
954 	sc->swsc = device_get_softc(sc->pdev);
955 	sc->unit = device_get_unit(dev);
956 	sc->phy = sc->swsc->port[sc->unit].phy;
957 	sc->vlan = sc->swsc->port[sc->unit].vlan;
958 	if (sc->swsc->dualemac && sc->vlan == -1)
959 		sc->vlan = sc->unit + 1;
960 
961 	if (sc->unit == 0) {
962 		sc->physel = MDIOUSERPHYSEL0;
963 		sc->phyaccess = MDIOUSERACCESS0;
964 	} else {
965 		sc->physel = MDIOUSERPHYSEL1;
966 		sc->phyaccess = MDIOUSERACCESS1;
967 	}
968 
969 	mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock",
970 	    MTX_DEF);
971 
972 	/* Allocate network interface */
973 	ifp = sc->ifp = if_alloc(IFT_ETHER);
974 	if (ifp == NULL) {
975 		cpswp_detach(dev);
976 		return (ENXIO);
977 	}
978 
979 	if_initname(ifp, device_get_name(sc->dev), sc->unit);
980 	ifp->if_softc = sc;
981 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
982 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
983 	ifp->if_capenable = ifp->if_capabilities;
984 
985 	ifp->if_init = cpswp_init;
986 	ifp->if_start = cpswp_start;
987 	ifp->if_ioctl = cpswp_ioctl;
988 
989 	ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots;
990 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
991 	IFQ_SET_READY(&ifp->if_snd);
992 
993 	/* Get high part of MAC address from control module (mac_id[0|1]_hi) */
994 	ti_scm_reg_read_4(SCM_MAC_ID0_HI + sc->unit * 8, &reg);
995 	mac_addr[0] = reg & 0xFF;
996 	mac_addr[1] = (reg >>  8) & 0xFF;
997 	mac_addr[2] = (reg >> 16) & 0xFF;
998 	mac_addr[3] = (reg >> 24) & 0xFF;
999 
1000 	/* Get low part of MAC address from control module (mac_id[0|1]_lo) */
1001 	ti_scm_reg_read_4(SCM_MAC_ID0_LO + sc->unit * 8, &reg);
1002 	mac_addr[4] = reg & 0xFF;
1003 	mac_addr[5] = (reg >>  8) & 0xFF;
1004 
1005 	error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd,
1006 	    cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0);
1007 	if (error) {
1008 		device_printf(dev, "attaching PHYs failed\n");
1009 		cpswp_detach(dev);
1010 		return (error);
1011 	}
1012 	sc->mii = device_get_softc(sc->miibus);
1013 
1014 	/* Select PHY and enable interrupts */
1015 	cpsw_write_4(sc->swsc, sc->physel,
1016 	    MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F));
1017 
1018 	ether_ifattach(sc->ifp, mac_addr);
1019 	callout_init(&sc->mii_callout, 0);
1020 
1021 	return (0);
1022 }
1023 
1024 static int
1025 cpswp_detach(device_t dev)
1026 {
1027 	struct cpswp_softc *sc;
1028 
1029 	sc = device_get_softc(dev);
1030 	CPSW_DEBUGF(sc->swsc, (""));
1031 	if (device_is_attached(dev)) {
1032 		ether_ifdetach(sc->ifp);
1033 		CPSW_PORT_LOCK(sc);
1034 		cpswp_stop_locked(sc);
1035 		CPSW_PORT_UNLOCK(sc);
1036 		callout_drain(&sc->mii_callout);
1037 	}
1038 
1039 	bus_generic_detach(dev);
1040 
1041 	if_free(sc->ifp);
1042 	mtx_destroy(&sc->lock);
1043 
1044 	return (0);
1045 }
1046 
1047 /*
1048  *
1049  * Init/Shutdown.
1050  *
1051  */
1052 
1053 static int
1054 cpsw_ports_down(struct cpsw_softc *sc)
1055 {
1056 	struct cpswp_softc *psc;
1057 	struct ifnet *ifp1, *ifp2;
1058 
1059 	if (!sc->dualemac)
1060 		return (1);
1061 	psc = device_get_softc(sc->port[0].dev);
1062 	ifp1 = psc->ifp;
1063 	psc = device_get_softc(sc->port[1].dev);
1064 	ifp2 = psc->ifp;
1065 	if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0)
1066 		return (1);
1067 
1068 	return (0);
1069 }
1070 
1071 static void
1072 cpswp_init(void *arg)
1073 {
1074 	struct cpswp_softc *sc = arg;
1075 
1076 	CPSW_DEBUGF(sc->swsc, (""));
1077 	CPSW_PORT_LOCK(sc);
1078 	cpswp_init_locked(arg);
1079 	CPSW_PORT_UNLOCK(sc);
1080 }
1081 
1082 static void
1083 cpswp_init_locked(void *arg)
1084 {
1085 	struct cpswp_softc *sc = arg;
1086 	struct ifnet *ifp;
1087 	uint32_t reg;
1088 
1089 	CPSW_DEBUGF(sc->swsc, (""));
1090 	CPSW_PORT_LOCK_ASSERT(sc);
1091 	ifp = sc->ifp;
1092 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1093 		return;
1094 
1095 	getbinuptime(&sc->init_uptime);
1096 
1097 	if (!sc->swsc->rx.running && !sc->swsc->tx.running) {
1098 		/* Reset the controller. */
1099 		cpsw_reset(sc->swsc);
1100 		cpsw_init(sc->swsc);
1101 	}
1102 
1103 	/* Set Slave Mapping. */
1104 	cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210);
1105 	cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1),
1106 	    0x33221100);
1107 	cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2);
1108 	/* Enable MAC RX/TX modules. */
1109 	/* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */
1110 	/* Huh?  Docs call bit 0 "Loopback" some places, "FullDuplex" others. */
1111 	reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
1112 	reg |= CPSW_SL_MACTL_GMII_ENABLE;
1113 	cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
1114 
1115 	/* Initialize ALE: set port to forwarding(3), initialize addrs */
1116 	cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), 3);
1117 	cpswp_ale_update_addresses(sc, 1);
1118 
1119 	if (sc->swsc->dualemac) {
1120 		/* Set Port VID. */
1121 		cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1),
1122 		    sc->vlan & 0xfff);
1123 		cpsw_ale_update_vlan_table(sc->swsc, sc->vlan,
1124 		    (1 << (sc->unit + 1)) | (1 << 0), /* Member list */
1125 		    (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */
1126 		    (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */
1127 	}
1128 
1129 	mii_mediachg(sc->mii);
1130 	callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
1131 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1132 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1133 }
1134 
1135 static int
1136 cpsw_shutdown(device_t dev)
1137 {
1138 	struct cpsw_softc *sc;
1139 	struct cpswp_softc *psc;
1140 	int i;
1141 
1142  	sc = device_get_softc(dev);
1143 	CPSW_DEBUGF(sc, (""));
1144 	for (i = 0; i < CPSW_PORTS; i++) {
1145 		if (!sc->dualemac && i != sc->active_slave)
1146 			continue;
1147 		psc = device_get_softc(sc->port[i].dev);
1148 		CPSW_PORT_LOCK(psc);
1149 		cpswp_stop_locked(psc);
1150 		CPSW_PORT_UNLOCK(psc);
1151 	}
1152 
1153 	return (0);
1154 }
1155 
1156 static void
1157 cpsw_rx_teardown_locked(struct cpsw_softc *sc)
1158 {
1159 	struct ifnet *ifp;
1160 	struct mbuf *received, *next;
1161 	int i = 0;
1162 
1163 	CPSW_DEBUGF(sc, ("starting RX teardown"));
1164 	cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1165 	for (;;) {
1166 		received = cpsw_rx_dequeue(sc);
1167 		CPSW_RX_UNLOCK(sc);
1168 		while (received != NULL) {
1169 			next = received->m_nextpkt;
1170 			received->m_nextpkt = NULL;
1171 			ifp = received->m_pkthdr.rcvif;
1172 			(*ifp->if_input)(ifp, received);
1173 			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1174 			received = next;
1175 		}
1176 		CPSW_RX_LOCK(sc);
1177 		if (!sc->rx.running) {
1178 			CPSW_DEBUGF(sc,
1179 			    ("finished RX teardown (%d retries)", i));
1180 			return;
1181 		}
1182 		if (++i > 10) {
1183 			device_printf(sc->dev,
1184 			    "Unable to cleanly shutdown receiver\n");
1185 			return;
1186 		}
1187 		DELAY(10);
1188 	}
1189 }
1190 
1191 static void
1192 cpsw_tx_teardown_locked(struct cpsw_softc *sc)
1193 {
1194 	int i = 0;
1195 
1196 	CPSW_DEBUGF(sc, ("starting TX teardown"));
1197 	cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1198 	cpsw_tx_dequeue(sc);
1199 	while (sc->tx.running && ++i < 10) {
1200 		DELAY(10);
1201 		cpsw_tx_dequeue(sc);
1202 	}
1203 	if (sc->tx.running) {
1204 		device_printf(sc->dev,
1205 		    "Unable to cleanly shutdown transmitter\n");
1206 	}
1207 	CPSW_DEBUGF(sc, ("finished TX teardown (%d retries, %d idle buffers)",
1208 	    i, sc->tx.active_queue_len));
1209 }
1210 
1211 static void
1212 cpswp_stop_locked(struct cpswp_softc *sc)
1213 {
1214 	struct ifnet *ifp;
1215 	uint32_t reg;
1216 
1217 	ifp = sc->ifp;
1218 	CPSW_DEBUGF(sc->swsc, (""));
1219 	CPSW_PORT_LOCK_ASSERT(sc);
1220 
1221 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1222 		return;
1223 
1224 	/* Disable interface */
1225 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1226 	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1227 
1228 	/* Stop ticker */
1229 	callout_stop(&sc->mii_callout);
1230 
1231 	/* Tear down the RX/TX queues. */
1232 	if (cpsw_ports_down(sc->swsc)) {
1233 		CPSW_RX_LOCK(sc->swsc);
1234 		cpsw_rx_teardown_locked(sc->swsc);
1235 		CPSW_RX_UNLOCK(sc->swsc);
1236 		CPSW_TX_LOCK(sc->swsc);
1237 		cpsw_tx_teardown_locked(sc->swsc);
1238 		CPSW_TX_UNLOCK(sc->swsc);
1239 	}
1240 
1241 	/* Stop MAC RX/TX modules. */
1242 	reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
1243 	reg &= ~CPSW_SL_MACTL_GMII_ENABLE;
1244 	cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
1245 
1246 	if (cpsw_ports_down(sc->swsc)) {
1247 		/* Capture stats before we reset controller. */
1248 		cpsw_stats_collect(sc->swsc);
1249 
1250 		cpsw_reset(sc->swsc);
1251 		cpsw_init(sc->swsc);
1252 	}
1253 }
1254 
1255 /*
1256  *  Suspend/Resume.
1257  */
1258 
1259 static int
1260 cpsw_suspend(device_t dev)
1261 {
1262 	struct cpsw_softc *sc;
1263 	struct cpswp_softc *psc;
1264 	int i;
1265 
1266 	sc = device_get_softc(dev);
1267 	CPSW_DEBUGF(sc, (""));
1268 	for (i = 0; i < CPSW_PORTS; i++) {
1269 		if (!sc->dualemac && i != sc->active_slave)
1270 			continue;
1271 		psc = device_get_softc(sc->port[i].dev);
1272 		CPSW_PORT_LOCK(psc);
1273 		cpswp_stop_locked(psc);
1274 		CPSW_PORT_UNLOCK(psc);
1275 	}
1276 
1277 	return (0);
1278 }
1279 
1280 static int
1281 cpsw_resume(device_t dev)
1282 {
1283 	struct cpsw_softc *sc;
1284 
1285 	sc  = device_get_softc(dev);
1286 	CPSW_DEBUGF(sc, ("UNIMPLEMENTED"));
1287 
1288 	return (0);
1289 }
1290 
1291 /*
1292  *
1293  *  IOCTL
1294  *
1295  */
1296 
1297 static void
1298 cpsw_set_promisc(struct cpswp_softc *sc, int set)
1299 {
1300 	uint32_t reg;
1301 
1302 	/*
1303 	 * Enabling promiscuous mode requires ALE_BYPASS to be enabled.
1304 	 * That disables the ALE forwarding logic and causes every
1305 	 * packet to be sent only to the host port.  In bypass mode,
1306 	 * the ALE processes host port transmit packets the same as in
1307 	 * normal mode.
1308 	 */
1309 	reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL);
1310 	reg &= ~CPSW_ALE_CTL_BYPASS;
1311 	if (set)
1312 		reg |= CPSW_ALE_CTL_BYPASS;
1313 	cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg);
1314 }
1315 
1316 static void
1317 cpsw_set_allmulti(struct cpswp_softc *sc, int set)
1318 {
1319 	if (set) {
1320 		printf("All-multicast mode unimplemented\n");
1321 	}
1322 }
1323 
1324 static int
1325 cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1326 {
1327 	struct cpswp_softc *sc;
1328 	struct ifreq *ifr;
1329 	int error;
1330 	uint32_t changed;
1331 
1332 	error = 0;
1333 	sc = ifp->if_softc;
1334 	ifr = (struct ifreq *)data;
1335 
1336 	switch (command) {
1337 	case SIOCSIFFLAGS:
1338 		CPSW_PORT_LOCK(sc);
1339 		if (ifp->if_flags & IFF_UP) {
1340 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1341 				changed = ifp->if_flags ^ sc->if_flags;
1342 				CPSW_DEBUGF(sc->swsc,
1343 				    ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)",
1344 				    changed));
1345 				if (changed & IFF_PROMISC)
1346 					cpsw_set_promisc(sc,
1347 					    ifp->if_flags & IFF_PROMISC);
1348 				if (changed & IFF_ALLMULTI)
1349 					cpsw_set_allmulti(sc,
1350 					    ifp->if_flags & IFF_ALLMULTI);
1351 			} else {
1352 				CPSW_DEBUGF(sc->swsc,
1353 				    ("SIOCSIFFLAGS: UP but not RUNNING; starting up"));
1354 				cpswp_init_locked(sc);
1355 			}
1356 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1357 			CPSW_DEBUGF(sc->swsc,
1358 			    ("SIOCSIFFLAGS: not UP but RUNNING; shutting down"));
1359 			cpswp_stop_locked(sc);
1360 		}
1361 
1362 		sc->if_flags = ifp->if_flags;
1363 		CPSW_PORT_UNLOCK(sc);
1364 		break;
1365 	case SIOCADDMULTI:
1366 		cpswp_ale_update_addresses(sc, 0);
1367 		break;
1368 	case SIOCDELMULTI:
1369 		/* Ugh.  DELMULTI doesn't provide the specific address
1370 		   being removed, so the best we can do is remove
1371 		   everything and rebuild it all. */
1372 		cpswp_ale_update_addresses(sc, 1);
1373 		break;
1374 	case SIOCGIFMEDIA:
1375 	case SIOCSIFMEDIA:
1376 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1377 		break;
1378 	default:
1379 		error = ether_ioctl(ifp, command, data);
1380 	}
1381 	return (error);
1382 }
1383 
1384 /*
1385  *
1386  * MIIBUS
1387  *
1388  */
1389 static int
1390 cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg)
1391 {
1392 	uint32_t r, retries = CPSW_MIIBUS_RETRIES;
1393 
1394 	while (--retries) {
1395 		r = cpsw_read_4(sc, reg);
1396 		if ((r & MDIO_PHYACCESS_GO) == 0)
1397 			return (1);
1398 		DELAY(CPSW_MIIBUS_DELAY);
1399 	}
1400 
1401 	return (0);
1402 }
1403 
1404 static int
1405 cpswp_miibus_readreg(device_t dev, int phy, int reg)
1406 {
1407 	struct cpswp_softc *sc;
1408 	uint32_t cmd, r;
1409 
1410 	sc = device_get_softc(dev);
1411 	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1412 		device_printf(dev, "MDIO not ready to read\n");
1413 		return (0);
1414 	}
1415 
1416 	/* Set GO, reg, phy */
1417 	cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
1418 	cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
1419 
1420 	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1421 		device_printf(dev, "MDIO timed out during read\n");
1422 		return (0);
1423 	}
1424 
1425 	r = cpsw_read_4(sc->swsc, sc->phyaccess);
1426 	if ((r & MDIO_PHYACCESS_ACK) == 0) {
1427 		device_printf(dev, "Failed to read from PHY.\n");
1428 		r = 0;
1429 	}
1430 	return (r & 0xFFFF);
1431 }
1432 
1433 static int
1434 cpswp_miibus_writereg(device_t dev, int phy, int reg, int value)
1435 {
1436 	struct cpswp_softc *sc;
1437 	uint32_t cmd;
1438 
1439 	sc = device_get_softc(dev);
1440 	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1441 		device_printf(dev, "MDIO not ready to write\n");
1442 		return (0);
1443 	}
1444 
1445 	/* Set GO, WRITE, reg, phy, and value */
1446 	cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE |
1447 	    (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF);
1448 	cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
1449 
1450 	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1451 		device_printf(dev, "MDIO timed out during write\n");
1452 		return (0);
1453 	}
1454 
1455 	if ((cpsw_read_4(sc->swsc, sc->phyaccess) & MDIO_PHYACCESS_ACK) == 0)
1456 		device_printf(dev, "Failed to write to PHY.\n");
1457 
1458 	return (0);
1459 }
1460 
1461 static void
1462 cpswp_miibus_statchg(device_t dev)
1463 {
1464 	struct cpswp_softc *sc;
1465 	uint32_t mac_control, reg;
1466 
1467 	sc = device_get_softc(dev);
1468 	CPSW_DEBUGF(sc->swsc, (""));
1469 
1470 	reg = CPSW_SL_MACCONTROL(sc->unit);
1471 	mac_control = cpsw_read_4(sc->swsc, reg);
1472 	mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A |
1473 	    CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX);
1474 
1475 	switch(IFM_SUBTYPE(sc->mii->mii_media_active)) {
1476 	case IFM_1000_SX:
1477 	case IFM_1000_LX:
1478 	case IFM_1000_CX:
1479 	case IFM_1000_T:
1480 		mac_control |= CPSW_SL_MACTL_GIG;
1481 		break;
1482 
1483 	case IFM_100_TX:
1484 		mac_control |= CPSW_SL_MACTL_IFCTL_A;
1485 		break;
1486 	}
1487 	if (sc->mii->mii_media_active & IFM_FDX)
1488 		mac_control |= CPSW_SL_MACTL_FULLDUPLEX;
1489 
1490 	cpsw_write_4(sc->swsc, reg, mac_control);
1491 }
1492 
1493 /*
1494  *
1495  * Transmit/Receive Packets.
1496  *
1497  */
1498 static void
1499 cpsw_intr_rx(void *arg)
1500 {
1501 	struct cpsw_softc *sc = arg;
1502 	struct ifnet *ifp;
1503 	struct mbuf *received, *next;
1504 
1505 	CPSW_RX_LOCK(sc);
1506 	received = cpsw_rx_dequeue(sc);
1507 	cpsw_rx_enqueue(sc);
1508 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1509 	CPSW_RX_UNLOCK(sc);
1510 
1511 	while (received != NULL) {
1512 		next = received->m_nextpkt;
1513 		received->m_nextpkt = NULL;
1514 		ifp = received->m_pkthdr.rcvif;
1515 		(*ifp->if_input)(ifp, received);
1516 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1517 		received = next;
1518 	}
1519 }
1520 
1521 static struct mbuf *
1522 cpsw_rx_dequeue(struct cpsw_softc *sc)
1523 {
1524 	struct cpsw_cpdma_bd bd;
1525 	struct cpsw_slot *slot;
1526 	struct cpswp_softc *psc;
1527 	struct mbuf *mb_head, *mb_tail;
1528 	int port, removed = 0;
1529 
1530 	mb_head = mb_tail = NULL;
1531 
1532 	/* Pull completed packets off hardware RX queue. */
1533 	while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
1534 		cpsw_cpdma_read_bd(sc, slot, &bd);
1535 		if (bd.flags & CPDMA_BD_OWNER)
1536 			break; /* Still in use by hardware */
1537 
1538 		CPSW_DEBUGF(sc, ("Removing received packet from RX queue"));
1539 		++removed;
1540 		STAILQ_REMOVE_HEAD(&sc->rx.active, next);
1541 		STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next);
1542 
1543 		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
1544 		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1545 
1546 		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1547 			CPSW_DEBUGF(sc, ("RX teardown in progress"));
1548 			m_freem(slot->mbuf);
1549 			slot->mbuf = NULL;
1550 			cpsw_write_cp(sc, &sc->rx, 0xfffffffc);
1551 			sc->rx.running = 0;
1552 			break;
1553 		}
1554 
1555 		cpsw_write_cp_slot(sc, &sc->rx, slot);
1556 
1557 		port = (bd.flags & CPDMA_BD_PORT_MASK) - 1;
1558 		KASSERT(port >= 0 && port <= 1,
1559 		    ("patcket received with invalid port: %d", port));
1560 		psc = device_get_softc(sc->port[port].dev);
1561 
1562 		/* Set up mbuf */
1563 		/* TODO: track SOP/EOP bits to assemble a full mbuf
1564 		   out of received fragments. */
1565 		slot->mbuf->m_data += bd.bufoff;
1566 		slot->mbuf->m_len = bd.pktlen - 4;
1567 		slot->mbuf->m_pkthdr.len = bd.pktlen - 4;
1568 		slot->mbuf->m_flags |= M_PKTHDR;
1569 		slot->mbuf->m_pkthdr.rcvif = psc->ifp;
1570 		slot->mbuf->m_nextpkt = NULL;
1571 
1572 		if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1573 			/* check for valid CRC by looking into pkt_err[5:4] */
1574 			if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) {
1575 				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1576 				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1577 				slot->mbuf->m_pkthdr.csum_data = 0xffff;
1578 			}
1579 		}
1580 
1581 		/* Add mbuf to packet list to be returned. */
1582 		if (mb_tail) {
1583 			mb_tail->m_nextpkt = slot->mbuf;
1584 		} else {
1585 			mb_head = slot->mbuf;
1586 		}
1587 		mb_tail = slot->mbuf;
1588 		slot->mbuf = NULL;
1589 	}
1590 
1591 	if (removed != 0) {
1592 		sc->rx.queue_removes += removed;
1593 		sc->rx.active_queue_len -= removed;
1594 		sc->rx.avail_queue_len += removed;
1595 		if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len)
1596 			sc->rx.max_avail_queue_len = sc->rx.avail_queue_len;
1597 	}
1598 	return (mb_head);
1599 }
1600 
1601 static void
1602 cpsw_rx_enqueue(struct cpsw_softc *sc)
1603 {
1604 	bus_dma_segment_t seg[1];
1605 	struct cpsw_cpdma_bd bd;
1606 	struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1607 	struct cpsw_slot *slot, *prev_slot = NULL;
1608 	struct cpsw_slot *last_old_slot, *first_new_slot;
1609 	int error, nsegs, added = 0;
1610 
1611 	/* Register new mbufs with hardware. */
1612 	while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) {
1613 		if (slot->mbuf == NULL) {
1614 			slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1615 			if (slot->mbuf == NULL) {
1616 				device_printf(sc->dev,
1617 				    "Unable to fill RX queue\n");
1618 				break;
1619 			}
1620 			slot->mbuf->m_len =
1621 			    slot->mbuf->m_pkthdr.len =
1622 			    slot->mbuf->m_ext.ext_size;
1623 		}
1624 
1625 		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1626 		    slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
1627 
1628 		KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
1629 		KASSERT(error == 0, ("DMA error (error=%d)", error));
1630 		if (error != 0 || nsegs != 1) {
1631 			device_printf(sc->dev,
1632 			    "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
1633 			    __func__, nsegs, error);
1634 			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1635 			m_freem(slot->mbuf);
1636 			slot->mbuf = NULL;
1637 			break;
1638 		}
1639 
1640 		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
1641 
1642 		/* Create and submit new rx descriptor*/
1643 		bd.next = 0;
1644 		bd.bufptr = seg->ds_addr;
1645 		bd.bufoff = 0;
1646 		bd.buflen = MCLBYTES - 1;
1647 		bd.pktlen = bd.buflen;
1648 		bd.flags = CPDMA_BD_OWNER;
1649 		cpsw_cpdma_write_bd(sc, slot, &bd);
1650 		++added;
1651 
1652 		if (prev_slot != NULL)
1653 			cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1654 		prev_slot = slot;
1655 		STAILQ_REMOVE_HEAD(&sc->rx.avail, next);
1656 		sc->rx.avail_queue_len--;
1657 		STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1658 	}
1659 
1660 	if (added == 0)
1661 		return;
1662 
1663 	CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added));
1664 
1665 	/* Link new entries to hardware RX queue. */
1666 	last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next);
1667 	first_new_slot = STAILQ_FIRST(&tmpqueue);
1668 	STAILQ_CONCAT(&sc->rx.active, &tmpqueue);
1669 	if (first_new_slot == NULL) {
1670 		return;
1671 	} else if (last_old_slot == NULL) {
1672 		/* Start a fresh queue. */
1673 		cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1674 	} else {
1675 		/* Add buffers to end of current queue. */
1676 		cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1677 		/* If underrun, restart queue. */
1678 		if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1679 			cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1680 		}
1681 	}
1682 	sc->rx.queue_adds += added;
1683 	sc->rx.active_queue_len += added;
1684 	if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) {
1685 		sc->rx.max_active_queue_len = sc->rx.active_queue_len;
1686 	}
1687 }
1688 
1689 static void
1690 cpswp_start(struct ifnet *ifp)
1691 {
1692 	struct cpswp_softc *sc = ifp->if_softc;
1693 
1694 	CPSW_TX_LOCK(sc->swsc);
1695 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->swsc->tx.running) {
1696 		cpswp_tx_enqueue(sc);
1697 		cpsw_tx_dequeue(sc->swsc);
1698 	}
1699 	CPSW_TX_UNLOCK(sc->swsc);
1700 }
1701 
1702 static void
1703 cpsw_intr_tx(void *arg)
1704 {
1705 	struct cpsw_softc *sc;
1706 
1707 	sc = (struct cpsw_softc *)arg;
1708 	CPSW_TX_LOCK(sc);
1709 	cpsw_tx_dequeue(sc);
1710 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2);
1711 	CPSW_TX_UNLOCK(sc);
1712 }
1713 
1714 static void
1715 cpswp_tx_enqueue(struct cpswp_softc *sc)
1716 {
1717 	bus_dma_segment_t segs[CPSW_TXFRAGS];
1718 	struct cpsw_cpdma_bd bd;
1719 	struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1720 	struct cpsw_slot *slot, *prev_slot = NULL;
1721 	struct cpsw_slot *last_old_slot, *first_new_slot;
1722 	struct mbuf *m0;
1723 	int error, flags, nsegs, seg, added = 0, padlen;
1724 
1725 	flags = 0;
1726 	if (sc->swsc->dualemac) {
1727 		flags = CPDMA_BD_TO_PORT |
1728 		    ((sc->unit + 1) & CPDMA_BD_PORT_MASK);
1729 	}
1730 	/* Pull pending packets from IF queue and prep them for DMA. */
1731 	while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) {
1732 		IF_DEQUEUE(&sc->ifp->if_snd, m0);
1733 		if (m0 == NULL)
1734 			break;
1735 
1736 		slot->mbuf = m0;
1737 		padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len;
1738 		if (padlen < 0)
1739 			padlen = 0;
1740 
1741 		/* Create mapping in DMA memory */
1742 		error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag,
1743 		    slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1744 		/* If the packet is too fragmented, try to simplify. */
1745 		if (error == EFBIG ||
1746 		    (error == 0 &&
1747 		    nsegs + (padlen > 0 ? 1 : 0) > sc->swsc->tx.avail_queue_len)) {
1748 			bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
1749 			if (padlen > 0) /* May as well add padding. */
1750 				m_append(slot->mbuf, padlen,
1751 				    sc->swsc->null_mbuf->m_data);
1752 			m0 = m_defrag(slot->mbuf, M_NOWAIT);
1753 			if (m0 == NULL) {
1754 				device_printf(sc->dev,
1755 				    "Can't defragment packet; dropping\n");
1756 				m_freem(slot->mbuf);
1757 			} else {
1758 				CPSW_DEBUGF(sc->swsc,
1759 				    ("Requeueing defragmented packet"));
1760 				IF_PREPEND(&sc->ifp->if_snd, m0);
1761 			}
1762 			slot->mbuf = NULL;
1763 			continue;
1764 		}
1765 		if (error != 0) {
1766 			device_printf(sc->dev,
1767 			    "%s: Can't setup DMA (error=%d), dropping packet\n",
1768 			    __func__, error);
1769 			bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
1770 			m_freem(slot->mbuf);
1771 			slot->mbuf = NULL;
1772 			break;
1773 		}
1774 
1775 		bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap,
1776 				BUS_DMASYNC_PREWRITE);
1777 
1778 		CPSW_DEBUGF(sc->swsc,
1779 		    ("Queueing TX packet: %d segments + %d pad bytes",
1780 		    nsegs, padlen));
1781 
1782 		slot->ifp = sc->ifp;
1783 		/* If there is only one segment, the for() loop
1784 		 * gets skipped and the single buffer gets set up
1785 		 * as both SOP and EOP. */
1786 		/* Start by setting up the first buffer */
1787 		bd.next = 0;
1788 		bd.bufptr = segs[0].ds_addr;
1789 		bd.bufoff = 0;
1790 		bd.buflen = segs[0].ds_len;
1791 		bd.pktlen = m_length(slot->mbuf, NULL) + padlen;
1792 		bd.flags =  CPDMA_BD_SOP | CPDMA_BD_OWNER | flags;
1793 		for (seg = 1; seg < nsegs; ++seg) {
1794 			/* Save the previous buffer (which isn't EOP) */
1795 			cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
1796 			if (prev_slot != NULL) {
1797 				cpsw_cpdma_write_bd_next(sc->swsc, prev_slot,
1798 				    slot);
1799 			}
1800 			prev_slot = slot;
1801 			STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
1802 			sc->swsc->tx.avail_queue_len--;
1803 			STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1804 			++added;
1805 			slot = STAILQ_FIRST(&sc->swsc->tx.avail);
1806 
1807 			/* Setup next buffer (which isn't SOP) */
1808 			bd.next = 0;
1809 			bd.bufptr = segs[seg].ds_addr;
1810 			bd.bufoff = 0;
1811 			bd.buflen = segs[seg].ds_len;
1812 			bd.pktlen = 0;
1813 			bd.flags = CPDMA_BD_OWNER | flags;
1814 		}
1815 		/* Save the final buffer. */
1816 		if (padlen <= 0)
1817 			bd.flags |= CPDMA_BD_EOP;
1818 		cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
1819 		if (prev_slot != NULL)
1820 			cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, slot);
1821 		prev_slot = slot;
1822 		STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
1823 		sc->swsc->tx.avail_queue_len--;
1824 		STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1825 		++added;
1826 
1827 		if (padlen > 0) {
1828 			slot = STAILQ_FIRST(&sc->swsc->tx.avail);
1829 			STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
1830 			sc->swsc->tx.avail_queue_len--;
1831 			STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1832 			++added;
1833 
1834 			/* Setup buffer of null pad bytes (definitely EOP) */
1835 			cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, slot);
1836 			prev_slot = slot;
1837 			bd.next = 0;
1838 			bd.bufptr = sc->swsc->null_mbuf_paddr;
1839 			bd.bufoff = 0;
1840 			bd.buflen = padlen;
1841 			bd.pktlen = 0;
1842 			bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER | flags;
1843 			cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
1844 			++nsegs;
1845 		}
1846 
1847 		if (nsegs > sc->swsc->tx.longest_chain)
1848 			sc->swsc->tx.longest_chain = nsegs;
1849 
1850 		// TODO: Should we defer the BPF tap until
1851 		// after all packets are queued?
1852 		BPF_MTAP(sc->ifp, m0);
1853 	}
1854 
1855 	/* Attach the list of new buffers to the hardware TX queue. */
1856 	last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next);
1857 	first_new_slot = STAILQ_FIRST(&tmpqueue);
1858 	STAILQ_CONCAT(&sc->swsc->tx.active, &tmpqueue);
1859 	if (first_new_slot == NULL) {
1860 		return;
1861 	} else if (last_old_slot == NULL) {
1862 		/* Start a fresh queue. */
1863 		sc->swsc->last_hdp = cpsw_cpdma_bd_paddr(sc->swsc, first_new_slot);
1864 		cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot);
1865 	} else {
1866 		/* Add buffers to end of current queue. */
1867 		cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot,
1868 		    first_new_slot);
1869 		/* If underrun, restart queue. */
1870 		if (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) &
1871 		    CPDMA_BD_EOQ) {
1872 			sc->swsc->last_hdp = cpsw_cpdma_bd_paddr(sc->swsc, first_new_slot);
1873 			cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx,
1874 			    first_new_slot);
1875 		}
1876 	}
1877 	sc->swsc->tx.queue_adds += added;
1878 	sc->swsc->tx.active_queue_len += added;
1879 	if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) {
1880 		sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len;
1881 	}
1882 }
1883 
1884 static int
1885 cpsw_tx_dequeue(struct cpsw_softc *sc)
1886 {
1887 	struct cpsw_slot *slot, *last_removed_slot = NULL;
1888 	struct cpsw_cpdma_bd bd;
1889 	uint32_t flags, removed = 0;
1890 
1891 	slot = STAILQ_FIRST(&sc->tx.active);
1892 	if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) {
1893 		CPSW_DEBUGF(sc, ("TX teardown of an empty queue"));
1894 		cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1895 		sc->tx.running = 0;
1896 		return (0);
1897 	}
1898 
1899 	/* Pull completed buffers off the hardware TX queue. */
1900 	while (slot != NULL) {
1901 		flags = cpsw_cpdma_read_bd_flags(sc, slot);
1902 		if (flags & CPDMA_BD_OWNER)
1903 			break; /* Hardware is still using this packet. */
1904 
1905 		CPSW_DEBUGF(sc, ("TX removing completed packet"));
1906 		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
1907 		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1908 		m_freem(slot->mbuf);
1909 		slot->mbuf = NULL;
1910 		if (slot->ifp)
1911 			if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1);
1912 
1913 		/* Dequeue any additional buffers used by this packet. */
1914 		while (slot != NULL && slot->mbuf == NULL) {
1915 			STAILQ_REMOVE_HEAD(&sc->tx.active, next);
1916 			STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next);
1917 			++removed;
1918 			last_removed_slot = slot;
1919 			slot = STAILQ_FIRST(&sc->tx.active);
1920 		}
1921 
1922 		/* TearDown complete is only marked on the SOP for the packet. */
1923 		if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) ==
1924 		    (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) {
1925 			CPSW_DEBUGF(sc, ("TX teardown in progress"));
1926 			cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1927 			// TODO: Increment a count of dropped TX packets
1928 			sc->tx.running = 0;
1929 			break;
1930 		}
1931 
1932 		if ((flags & CPDMA_BD_EOP) == 0)
1933 			flags = cpsw_cpdma_read_bd_flags(sc, last_removed_slot);
1934 		if ((flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) ==
1935 		    (CPDMA_BD_EOP | CPDMA_BD_EOQ)) {
1936 			cpsw_cpdma_read_bd(sc, last_removed_slot, &bd);
1937 			if (bd.next != 0 && bd.next != sc->last_hdp) {
1938 				/* Restart the queue. */
1939 				sc->last_hdp = bd.next;
1940 				cpsw_write_4(sc, sc->tx.hdp_offset, bd.next);
1941 			}
1942 		}
1943 	}
1944 
1945 	if (removed != 0) {
1946 		cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot);
1947 		sc->tx.queue_removes += removed;
1948 		sc->tx.active_queue_len -= removed;
1949 		sc->tx.avail_queue_len += removed;
1950 		if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len)
1951 			sc->tx.max_avail_queue_len = sc->tx.avail_queue_len;
1952 	}
1953 	return (removed);
1954 }
1955 
1956 /*
1957  *
1958  * Miscellaneous interrupts.
1959  *
1960  */
1961 
1962 static void
1963 cpsw_intr_rx_thresh(void *arg)
1964 {
1965 	struct cpsw_softc *sc = arg;
1966 	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0));
1967 
1968 	CPSW_DEBUGF(sc, ("stat=%x", stat));
1969 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
1970 }
1971 
1972 static void
1973 cpsw_intr_misc_host_error(struct cpsw_softc *sc)
1974 {
1975 	uint32_t intstat;
1976 	uint32_t dmastat;
1977 	int txerr, rxerr, txchan, rxchan;
1978 
1979 	printf("\n\n");
1980 	device_printf(sc->dev,
1981 	    "HOST ERROR:  PROGRAMMING ERROR DETECTED BY HARDWARE\n");
1982 	printf("\n\n");
1983 	intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1984 	device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat);
1985 	dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1986 	device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat);
1987 
1988 	txerr = (dmastat >> 20) & 15;
1989 	txchan = (dmastat >> 16) & 7;
1990 	rxerr = (dmastat >> 12) & 15;
1991 	rxchan = (dmastat >> 8) & 7;
1992 
1993 	switch (txerr) {
1994 	case 0: break;
1995 	case 1:	printf("SOP error on TX channel %d\n", txchan);
1996 		break;
1997 	case 2:	printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan);
1998 		break;
1999 	case 3:	printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan);
2000 		break;
2001 	case 4:	printf("Zero Buffer Pointer on TX channel %d\n", txchan);
2002 		break;
2003 	case 5:	printf("Zero Buffer Length on TX channel %d\n", txchan);
2004 		break;
2005 	case 6:	printf("Packet length error on TX channel %d\n", txchan);
2006 		break;
2007 	default: printf("Unknown error on TX channel %d\n", txchan);
2008 		break;
2009 	}
2010 
2011 	if (txerr != 0) {
2012 		printf("CPSW_CPDMA_TX%d_HDP=0x%x\n",
2013 		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan)));
2014 		printf("CPSW_CPDMA_TX%d_CP=0x%x\n",
2015 		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan)));
2016 		cpsw_dump_queue(sc, &sc->tx.active);
2017 	}
2018 
2019 	switch (rxerr) {
2020 	case 0: break;
2021 	case 2:	printf("Ownership bit not set on RX channel %d\n", rxchan);
2022 		break;
2023 	case 4:	printf("Zero Buffer Pointer on RX channel %d\n", rxchan);
2024 		break;
2025 	case 5:	printf("Zero Buffer Length on RX channel %d\n", rxchan);
2026 		break;
2027 	case 6:	printf("Buffer offset too big on RX channel %d\n", rxchan);
2028 		break;
2029 	default: printf("Unknown RX error on RX channel %d\n", rxchan);
2030 		break;
2031 	}
2032 
2033 	if (rxerr != 0) {
2034 		printf("CPSW_CPDMA_RX%d_HDP=0x%x\n",
2035 		    rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan)));
2036 		printf("CPSW_CPDMA_RX%d_CP=0x%x\n",
2037 		    rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan)));
2038 		cpsw_dump_queue(sc, &sc->rx.active);
2039 	}
2040 
2041 	printf("\nALE Table\n");
2042 	cpsw_ale_dump_table(sc);
2043 
2044 	// XXX do something useful here??
2045 	panic("CPSW HOST ERROR INTERRUPT");
2046 
2047 	// Suppress this interrupt in the future.
2048 	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat);
2049 	printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n");
2050 	// The watchdog will probably reset the controller
2051 	// in a little while.  It will probably fail again.
2052 }
2053 
2054 static void
2055 cpsw_intr_misc(void *arg)
2056 {
2057 	struct cpsw_softc *sc = arg;
2058 	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
2059 
2060 	if (stat & CPSW_WR_C_MISC_EVNT_PEND)
2061 		CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented"));
2062 	if (stat & CPSW_WR_C_MISC_STAT_PEND)
2063 		cpsw_stats_collect(sc);
2064 	if (stat & CPSW_WR_C_MISC_HOST_PEND)
2065 		cpsw_intr_misc_host_error(sc);
2066 	if (stat & CPSW_WR_C_MISC_MDIOLINK) {
2067 		cpsw_write_4(sc, MDIOLINKINTMASKED,
2068 		    cpsw_read_4(sc, MDIOLINKINTMASKED));
2069 	}
2070 	if (stat & CPSW_WR_C_MISC_MDIOUSER) {
2071 		CPSW_DEBUGF(sc,
2072 		    ("MDIO operation completed interrupt unimplemented"));
2073 	}
2074 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
2075 }
2076 
2077 /*
2078  *
2079  * Periodic Checks and Watchdog.
2080  *
2081  */
2082 
2083 static void
2084 cpswp_tick(void *msc)
2085 {
2086 	struct cpswp_softc *sc = msc;
2087 
2088 	/* Check for media type change */
2089 	mii_tick(sc->mii);
2090 	if (sc->media_status != sc->mii->mii_media.ifm_media) {
2091 		printf("%s: media type changed (ifm_media=%x)\n", __func__,
2092 			sc->mii->mii_media.ifm_media);
2093 		cpswp_ifmedia_upd(sc->ifp);
2094 	}
2095 
2096 	/* Schedule another timeout one second from now */
2097 	callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
2098 }
2099 
2100 static void
2101 cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2102 {
2103 	struct cpswp_softc *sc;
2104 	struct mii_data *mii;
2105 
2106 	sc = ifp->if_softc;
2107 	CPSW_DEBUGF(sc->swsc, (""));
2108 	CPSW_PORT_LOCK(sc);
2109 
2110 	mii = sc->mii;
2111 	mii_pollstat(mii);
2112 
2113 	ifmr->ifm_active = mii->mii_media_active;
2114 	ifmr->ifm_status = mii->mii_media_status;
2115 	CPSW_PORT_UNLOCK(sc);
2116 }
2117 
2118 static int
2119 cpswp_ifmedia_upd(struct ifnet *ifp)
2120 {
2121 	struct cpswp_softc *sc;
2122 
2123 	sc = ifp->if_softc;
2124 	CPSW_DEBUGF(sc->swsc, (""));
2125 	CPSW_PORT_LOCK(sc);
2126 	mii_mediachg(sc->mii);
2127 	sc->media_status = sc->mii->mii_media.ifm_media;
2128 	CPSW_PORT_UNLOCK(sc);
2129 
2130 	return (0);
2131 }
2132 
2133 static void
2134 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc)
2135 {
2136 	struct cpswp_softc *psc;
2137 	int i;
2138 
2139 	cpsw_debugf_head("CPSW watchdog");
2140 	device_printf(sc->dev, "watchdog timeout\n");
2141 	for (i = 0; i < CPSW_PORTS; i++) {
2142 		if (!sc->dualemac && i != sc->active_slave)
2143 			continue;
2144 		psc = device_get_softc(sc->port[i].dev);
2145 		CPSW_PORT_LOCK(psc);
2146 		cpswp_stop_locked(psc);
2147 		CPSW_PORT_UNLOCK(psc);
2148 	}
2149 }
2150 
2151 static void
2152 cpsw_tx_watchdog(void *msc)
2153 {
2154 	struct cpsw_softc *sc;
2155 
2156 	sc = msc;
2157 	CPSW_TX_LOCK(sc);
2158 	if (sc->tx.active_queue_len == 0 || !sc->tx.running) {
2159 		sc->watchdog.timer = 0; /* Nothing to do. */
2160 	} else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) {
2161 		sc->watchdog.timer = 0;  /* Stuff done while we weren't looking. */
2162 	} else if (cpsw_tx_dequeue(sc) > 0) {
2163 		sc->watchdog.timer = 0;  /* We just did something. */
2164 	} else {
2165 		/* There was something to do but it didn't get done. */
2166 		++sc->watchdog.timer;
2167 		if (sc->watchdog.timer > 5) {
2168 			sc->watchdog.timer = 0;
2169 			++sc->watchdog.resets;
2170 			cpsw_tx_watchdog_full_reset(sc);
2171 		}
2172 	}
2173 	sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes;
2174 	CPSW_TX_UNLOCK(sc);
2175 
2176 	/* Schedule another timeout one second from now */
2177 	callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
2178 }
2179 
2180 /*
2181  *
2182  * ALE support routines.
2183  *
2184  */
2185 
2186 static void
2187 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
2188 {
2189 	cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
2190 	ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
2191 	ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
2192 	ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
2193 }
2194 
2195 static void
2196 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
2197 {
2198 	cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
2199 	cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
2200 	cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
2201 	cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
2202 }
2203 
2204 static void
2205 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
2206 {
2207 	int i;
2208 	uint32_t ale_entry[3];
2209 
2210 	/* First four entries are link address and broadcast. */
2211 	for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
2212 		cpsw_ale_read_entry(sc, i, ale_entry);
2213 		if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR ||
2214 		    ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) &&
2215 		    ALE_MCAST(ale_entry)  == 1) { /* MCast link addr */
2216 			ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
2217 			cpsw_ale_write_entry(sc, i, ale_entry);
2218 		}
2219 	}
2220 }
2221 
2222 static int
2223 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan,
2224 	uint8_t *mac)
2225 {
2226 	int free_index = -1, matching_index = -1, i;
2227 	uint32_t ale_entry[3], ale_type;
2228 
2229 	/* Find a matching entry or a free entry. */
2230 	for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
2231 		cpsw_ale_read_entry(sc, i, ale_entry);
2232 
2233 		/* Entry Type[61:60] is 0 for free entry */
2234 		if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
2235 			free_index = i;
2236 
2237 		if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
2238 		    (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
2239 		    (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
2240 		    (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
2241 		    (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
2242 		    (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
2243 			matching_index = i;
2244 			break;
2245 		}
2246 	}
2247 
2248 	if (matching_index < 0) {
2249 		if (free_index < 0)
2250 			return (ENOMEM);
2251 		i = free_index;
2252 	}
2253 
2254 	if (vlan != -1)
2255 		ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16;
2256 	else
2257 		ale_type = ALE_TYPE_ADDR << 28;
2258 
2259 	/* Set MAC address */
2260 	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
2261 	ale_entry[1] = mac[0] << 8 | mac[1];
2262 
2263 	/* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */
2264 	ale_entry[1] |= ALE_MCAST_FWD | ale_type;
2265 
2266 	/* Set portmask [68:66] */
2267 	ale_entry[2] = (portmap & 7) << 2;
2268 
2269 	cpsw_ale_write_entry(sc, i, ale_entry);
2270 
2271 	return 0;
2272 }
2273 
2274 static void
2275 cpsw_ale_dump_table(struct cpsw_softc *sc) {
2276 	int i;
2277 	uint32_t ale_entry[3];
2278 	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
2279 		cpsw_ale_read_entry(sc, i, ale_entry);
2280 		switch (ALE_TYPE(ale_entry)) {
2281 		case ALE_TYPE_VLAN:
2282 			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
2283 				ale_entry[1], ale_entry[0]);
2284 			printf("type: %u ", ALE_TYPE(ale_entry));
2285 			printf("vlan: %u ", ALE_VLAN(ale_entry));
2286 			printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry));
2287 			printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry));
2288 			printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry));
2289 			printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry));
2290 			printf("\n");
2291 			break;
2292 		case ALE_TYPE_ADDR:
2293 		case ALE_TYPE_VLAN_ADDR:
2294 			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
2295 				ale_entry[1], ale_entry[0]);
2296 			printf("type: %u ", ALE_TYPE(ale_entry));
2297 			printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
2298 				(ale_entry[1] >> 8) & 0xFF,
2299 				(ale_entry[1] >> 0) & 0xFF,
2300 				(ale_entry[0] >>24) & 0xFF,
2301 				(ale_entry[0] >>16) & 0xFF,
2302 				(ale_entry[0] >> 8) & 0xFF,
2303 				(ale_entry[0] >> 0) & 0xFF);
2304 			printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast ");
2305 			if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR)
2306 				printf("vlan: %u ", ALE_VLAN(ale_entry));
2307 			printf("port: %u ", ALE_PORTS(ale_entry));
2308 			printf("\n");
2309 			break;
2310 		}
2311 	}
2312 	printf("\n");
2313 }
2314 
2315 static int
2316 cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge)
2317 {
2318 	uint8_t *mac;
2319 	uint32_t ale_entry[3], ale_type, portmask;
2320 	struct ifmultiaddr *ifma;
2321 
2322 	if (sc->swsc->dualemac) {
2323 		ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16;
2324 		portmask = 1 << (sc->unit + 1) | 1 << 0;
2325 	} else {
2326 		ale_type = ALE_TYPE_ADDR << 28;
2327 		portmask = 7;
2328 	}
2329 
2330 	/*
2331 	 * Route incoming packets for our MAC address to Port 0 (host).
2332 	 * For simplicity, keep this entry at table index 0 for port 1 and
2333 	 * at index 2 for port 2 in the ALE.
2334 	 */
2335         if_addr_rlock(sc->ifp);
2336 	mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr);
2337 	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
2338 	ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */
2339 	ale_entry[2] = 0; /* port = 0 */
2340 	cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry);
2341 
2342 	/* Set outgoing MAC Address for slave port. */
2343 	cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1),
2344 	    mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
2345 	cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1),
2346 	    mac[5] << 8 | mac[4]);
2347         if_addr_runlock(sc->ifp);
2348 
2349 	/* Keep the broadcast address at table entry 1 (or 3). */
2350 	ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */
2351 	/* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */
2352 	ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff;
2353 	ale_entry[2] = portmask << 2;
2354 	cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry);
2355 
2356 	/* SIOCDELMULTI doesn't specify the particular address
2357 	   being removed, so we have to remove all and rebuild. */
2358 	if (purge)
2359 		cpsw_ale_remove_all_mc_entries(sc->swsc);
2360 
2361         /* Set other multicast addrs desired. */
2362         if_maddr_rlock(sc->ifp);
2363         TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
2364                 if (ifma->ifma_addr->sa_family != AF_LINK)
2365                         continue;
2366 		cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan,
2367 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
2368         }
2369         if_maddr_runlock(sc->ifp);
2370 
2371 	return (0);
2372 }
2373 
2374 static int
2375 cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports,
2376 	int untag, int mcregflood, int mcunregflood)
2377 {
2378 	int free_index, i, matching_index;
2379 	uint32_t ale_entry[3];
2380 
2381 	free_index = matching_index = -1;
2382 	/* Find a matching entry or a free entry. */
2383 	for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) {
2384 		cpsw_ale_read_entry(sc, i, ale_entry);
2385 
2386 		/* Entry Type[61:60] is 0 for free entry */
2387 		if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
2388 			free_index = i;
2389 
2390 		if (ALE_VLAN(ale_entry) == vlan) {
2391 			matching_index = i;
2392 			break;
2393 		}
2394 	}
2395 
2396 	if (matching_index < 0) {
2397 		if (free_index < 0)
2398 			return (-1);
2399 		i = free_index;
2400 	}
2401 
2402 	ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 |
2403 	    (mcunregflood & 7) << 8 | (ports & 7);
2404 	ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16;
2405 	ale_entry[2] = 0;
2406 	cpsw_ale_write_entry(sc, i, ale_entry);
2407 
2408 	return (0);
2409 }
2410 
2411 /*
2412  *
2413  * Statistics and Sysctls.
2414  *
2415  */
2416 
2417 #if 0
2418 static void
2419 cpsw_stats_dump(struct cpsw_softc *sc)
2420 {
2421 	int i;
2422 	uint32_t r;
2423 
2424 	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2425 		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2426 		    cpsw_stat_sysctls[i].reg);
2427 		CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid,
2428 		    (intmax_t)sc->shadow_stats[i], r,
2429 		    (intmax_t)sc->shadow_stats[i] + r));
2430 	}
2431 }
2432 #endif
2433 
2434 static void
2435 cpsw_stats_collect(struct cpsw_softc *sc)
2436 {
2437 	int i;
2438 	uint32_t r;
2439 
2440 	CPSW_DEBUGF(sc, ("Controller shadow statistics updated."));
2441 
2442 	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2443 		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2444 		    cpsw_stat_sysctls[i].reg);
2445 		sc->shadow_stats[i] += r;
2446 		cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg,
2447 		    r);
2448 	}
2449 }
2450 
2451 static int
2452 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS)
2453 {
2454 	struct cpsw_softc *sc;
2455 	struct cpsw_stat *stat;
2456 	uint64_t result;
2457 
2458 	sc = (struct cpsw_softc *)arg1;
2459 	stat = &cpsw_stat_sysctls[oidp->oid_number];
2460 	result = sc->shadow_stats[oidp->oid_number];
2461 	result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg);
2462 	return (sysctl_handle_64(oidp, &result, 0, req));
2463 }
2464 
2465 static int
2466 cpsw_stat_attached(SYSCTL_HANDLER_ARGS)
2467 {
2468 	struct cpsw_softc *sc;
2469 	struct bintime t;
2470 	unsigned result;
2471 
2472 	sc = (struct cpsw_softc *)arg1;
2473 	getbinuptime(&t);
2474 	bintime_sub(&t, &sc->attach_uptime);
2475 	result = t.sec;
2476 	return (sysctl_handle_int(oidp, &result, 0, req));
2477 }
2478 
2479 static int
2480 cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS)
2481 {
2482 	int error;
2483 	struct cpsw_softc *sc;
2484 	uint32_t ctrl, intr_per_ms;
2485 
2486 	sc = (struct cpsw_softc *)arg1;
2487 	error = sysctl_handle_int(oidp, &sc->coal_us, 0, req);
2488 	if (error != 0 || req->newptr == NULL)
2489 		return (error);
2490 
2491 	ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
2492 	ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
2493 	if (sc->coal_us == 0) {
2494 		/* Disable the interrupt pace hardware. */
2495 		cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
2496 		cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0);
2497 		cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0);
2498 		return (0);
2499 	}
2500 
2501 	if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX)
2502 		sc->coal_us = CPSW_WR_C_IMAX_US_MAX;
2503 	if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN)
2504 		sc->coal_us = CPSW_WR_C_IMAX_US_MIN;
2505 	intr_per_ms = 1000 / sc->coal_us;
2506 	/* Just to make sure... */
2507 	if (intr_per_ms > CPSW_WR_C_IMAX_MAX)
2508 		intr_per_ms = CPSW_WR_C_IMAX_MAX;
2509 	if (intr_per_ms < CPSW_WR_C_IMAX_MIN)
2510 		intr_per_ms = CPSW_WR_C_IMAX_MIN;
2511 
2512 	/* Set the prescale to produce 4us pulses from the 125 Mhz clock. */
2513 	ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK;
2514 
2515 	/* Enable the interrupt pace hardware. */
2516 	cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms);
2517 	cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms);
2518 	ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE;
2519 	cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
2520 
2521 	return (0);
2522 }
2523 
2524 static int
2525 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS)
2526 {
2527 	struct cpsw_softc *swsc;
2528 	struct cpswp_softc *sc;
2529 	struct bintime t;
2530 	unsigned result;
2531 
2532 	swsc = arg1;
2533 	sc = device_get_softc(swsc->port[arg2].dev);
2534 	if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
2535 		getbinuptime(&t);
2536 		bintime_sub(&t, &sc->init_uptime);
2537 		result = t.sec;
2538 	} else
2539 		result = 0;
2540 	return (sysctl_handle_int(oidp, &result, 0, req));
2541 }
2542 
2543 static void
2544 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
2545 	struct cpsw_queue *queue)
2546 {
2547 	struct sysctl_oid_list *parent;
2548 
2549 	parent = SYSCTL_CHILDREN(node);
2550 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers",
2551 	    CTLFLAG_RD, &queue->queue_slots, 0,
2552 	    "Total buffers currently assigned to this queue");
2553 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers",
2554 	    CTLFLAG_RD, &queue->active_queue_len, 0,
2555 	    "Buffers currently registered with hardware controller");
2556 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers",
2557 	    CTLFLAG_RD, &queue->max_active_queue_len, 0,
2558 	    "Max value of activeBuffers since last driver reset");
2559 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers",
2560 	    CTLFLAG_RD, &queue->avail_queue_len, 0,
2561 	    "Buffers allocated to this queue but not currently "
2562 	    "registered with hardware controller");
2563 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers",
2564 	    CTLFLAG_RD, &queue->max_avail_queue_len, 0,
2565 	    "Max value of availBuffers since last driver reset");
2566 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued",
2567 	    CTLFLAG_RD, &queue->queue_adds, 0,
2568 	    "Total buffers added to queue");
2569 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued",
2570 	    CTLFLAG_RD, &queue->queue_removes, 0,
2571 	    "Total buffers removed from queue");
2572 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain",
2573 	    CTLFLAG_RD, &queue->longest_chain, 0,
2574 	    "Max buffers used for a single packet");
2575 }
2576 
2577 static void
2578 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
2579 	struct cpsw_softc *sc)
2580 {
2581 	struct sysctl_oid_list *parent;
2582 
2583 	parent = SYSCTL_CHILDREN(node);
2584 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets",
2585 	    CTLFLAG_RD, &sc->watchdog.resets, 0,
2586 	    "Total number of watchdog resets");
2587 }
2588 
2589 static void
2590 cpsw_add_sysctls(struct cpsw_softc *sc)
2591 {
2592 	struct sysctl_ctx_list *ctx;
2593 	struct sysctl_oid *stats_node, *queue_node, *node;
2594 	struct sysctl_oid_list *parent, *stats_parent, *queue_parent;
2595 	struct sysctl_oid_list *ports_parent, *port_parent;
2596 	char port[16];
2597 	int i;
2598 
2599 	ctx = device_get_sysctl_ctx(sc->dev);
2600 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2601 
2602 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug",
2603 	    CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages");
2604 
2605 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
2606 	    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU",
2607 	    "Time since driver attach");
2608 
2609 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us",
2610 	    CTLTYPE_UINT | CTLFLAG_RW, sc, 0, cpsw_intr_coalesce, "IU",
2611 	    "minimum time between interrupts");
2612 
2613 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports",
2614 	    CTLFLAG_RD, NULL, "CPSW Ports Statistics");
2615 	ports_parent = SYSCTL_CHILDREN(node);
2616 	for (i = 0; i < CPSW_PORTS; i++) {
2617 		if (!sc->dualemac && i != sc->active_slave)
2618 			continue;
2619 		port[0] = '0' + i;
2620 		port[1] = '\0';
2621 		node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO,
2622 		    port, CTLFLAG_RD, NULL, "CPSW Port Statistics");
2623 		port_parent = SYSCTL_CHILDREN(node);
2624 		SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime",
2625 		    CTLTYPE_UINT | CTLFLAG_RD, sc, i,
2626 		    cpsw_stat_uptime, "IU", "Seconds since driver init");
2627 	}
2628 
2629 	stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
2630 				     CTLFLAG_RD, NULL, "CPSW Statistics");
2631 	stats_parent = SYSCTL_CHILDREN(stats_node);
2632 	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2633 		SYSCTL_ADD_PROC(ctx, stats_parent, i,
2634 				cpsw_stat_sysctls[i].oid,
2635 				CTLTYPE_U64 | CTLFLAG_RD, sc, 0,
2636 				cpsw_stats_sysctl, "IU",
2637 				cpsw_stat_sysctls[i].oid);
2638 	}
2639 
2640 	queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue",
2641 	    CTLFLAG_RD, NULL, "CPSW Queue Statistics");
2642 	queue_parent = SYSCTL_CHILDREN(queue_node);
2643 
2644 	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx",
2645 	    CTLFLAG_RD, NULL, "TX Queue Statistics");
2646 	cpsw_add_queue_sysctls(ctx, node, &sc->tx);
2647 
2648 	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx",
2649 	    CTLFLAG_RD, NULL, "RX Queue Statistics");
2650 	cpsw_add_queue_sysctls(ctx, node, &sc->rx);
2651 
2652 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog",
2653 	    CTLFLAG_RD, NULL, "Watchdog Statistics");
2654 	cpsw_add_watchdog_sysctls(ctx, node, sc);
2655 }
2656