xref: /freebsd/sys/arm/ti/cpsw/if_cpsw.c (revision 1c05a6ea6b849ff95e539c31adea887c644a6a01)
1 /*-
2  * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
3  * Copyright (c) 2016 Rubicon Communications, LLC (Netgate)
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*
29  * TI Common Platform Ethernet Switch (CPSW) Driver
30  * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs.
31  *
32  * This controller is documented in the AM335x Technical Reference
33  * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM
34  * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM.
35  *
36  * It is basically a single Ethernet port (port 0) wired internally to
37  * a 3-port store-and-forward switch connected to two independent
38  * "sliver" controllers (port 1 and port 2).  You can operate the
39  * controller in a variety of different ways by suitably configuring
40  * the slivers and the Address Lookup Engine (ALE) that routes packets
41  * between the ports.
42  *
43  * This code was developed and tested on a BeagleBone with
44  * an AM335x SoC.
45  */
46 
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49 
50 #include "opt_cpsw.h"
51 
52 #include <sys/param.h>
53 #include <sys/bus.h>
54 #include <sys/kernel.h>
55 #include <sys/lock.h>
56 #include <sys/mbuf.h>
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #include <sys/rman.h>
60 #include <sys/socket.h>
61 #include <sys/sockio.h>
62 #include <sys/sysctl.h>
63 
64 #include <machine/bus.h>
65 #include <machine/resource.h>
66 #include <machine/stdarg.h>
67 
68 #include <net/ethernet.h>
69 #include <net/bpf.h>
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 
75 #include <arm/ti/ti_scm.h>
76 #include <arm/ti/am335x/am335x_scm.h>
77 
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80 
81 #include <dev/ofw/ofw_bus.h>
82 #include <dev/ofw/ofw_bus_subr.h>
83 
84 #ifdef CPSW_ETHERSWITCH
85 #include <dev/etherswitch/etherswitch.h>
86 #include "etherswitch_if.h"
87 #endif
88 
89 #include "if_cpswreg.h"
90 #include "if_cpswvar.h"
91 
92 #include "miibus_if.h"
93 
94 /* Device probe/attach/detach. */
95 static int cpsw_probe(device_t);
96 static int cpsw_attach(device_t);
97 static int cpsw_detach(device_t);
98 static int cpswp_probe(device_t);
99 static int cpswp_attach(device_t);
100 static int cpswp_detach(device_t);
101 
102 static phandle_t cpsw_get_node(device_t, device_t);
103 
104 /* Device Init/shutdown. */
105 static int cpsw_shutdown(device_t);
106 static void cpswp_init(void *);
107 static void cpswp_init_locked(void *);
108 static void cpswp_stop_locked(struct cpswp_softc *);
109 
110 /* Device Suspend/Resume. */
111 static int cpsw_suspend(device_t);
112 static int cpsw_resume(device_t);
113 
114 /* Ioctl. */
115 static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data);
116 
117 static int cpswp_miibus_readreg(device_t, int phy, int reg);
118 static int cpswp_miibus_writereg(device_t, int phy, int reg, int value);
119 static void cpswp_miibus_statchg(device_t);
120 
121 /* Send/Receive packets. */
122 static void cpsw_intr_rx(void *arg);
123 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *);
124 static void cpsw_rx_enqueue(struct cpsw_softc *);
125 static void cpswp_start(struct ifnet *);
126 static void cpsw_intr_tx(void *);
127 static void cpswp_tx_enqueue(struct cpswp_softc *);
128 static int cpsw_tx_dequeue(struct cpsw_softc *);
129 
130 /* Misc interrupts and watchdog. */
131 static void cpsw_intr_rx_thresh(void *);
132 static void cpsw_intr_misc(void *);
133 static void cpswp_tick(void *);
134 static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
135 static int cpswp_ifmedia_upd(struct ifnet *);
136 static void cpsw_tx_watchdog(void *);
137 
138 /* ALE support */
139 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *);
140 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *);
141 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *);
142 static void cpsw_ale_dump_table(struct cpsw_softc *);
143 static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int,
144 	int);
145 static int cpswp_ale_update_addresses(struct cpswp_softc *, int);
146 
147 /* Statistics and sysctls. */
148 static void cpsw_add_sysctls(struct cpsw_softc *);
149 static void cpsw_stats_collect(struct cpsw_softc *);
150 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS);
151 
152 #ifdef CPSW_ETHERSWITCH
153 static etherswitch_info_t *cpsw_getinfo(device_t);
154 static int cpsw_getport(device_t, etherswitch_port_t *);
155 static int cpsw_setport(device_t, etherswitch_port_t *);
156 static int cpsw_getconf(device_t, etherswitch_conf_t *);
157 static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *);
158 static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *);
159 static int cpsw_readreg(device_t, int);
160 static int cpsw_writereg(device_t, int, int);
161 static int cpsw_readphy(device_t, int, int);
162 static int cpsw_writephy(device_t, int, int, int);
163 #endif
164 
165 /*
166  * Arbitrary limit on number of segments in an mbuf to be transmitted.
167  * Packets with more segments than this will be defragmented before
168  * they are queued.
169  */
170 #define	CPSW_TXFRAGS		16
171 
172 /* Shared resources. */
173 static device_method_t cpsw_methods[] = {
174 	/* Device interface */
175 	DEVMETHOD(device_probe,		cpsw_probe),
176 	DEVMETHOD(device_attach,	cpsw_attach),
177 	DEVMETHOD(device_detach,	cpsw_detach),
178 	DEVMETHOD(device_shutdown,	cpsw_shutdown),
179 	DEVMETHOD(device_suspend,	cpsw_suspend),
180 	DEVMETHOD(device_resume,	cpsw_resume),
181 	/* Bus interface */
182 	DEVMETHOD(bus_add_child,	device_add_child_ordered),
183 	/* OFW methods */
184 	DEVMETHOD(ofw_bus_get_node,	cpsw_get_node),
185 #ifdef CPSW_ETHERSWITCH
186 	/* etherswitch interface */
187 	DEVMETHOD(etherswitch_getinfo,	cpsw_getinfo),
188 	DEVMETHOD(etherswitch_readreg,	cpsw_readreg),
189 	DEVMETHOD(etherswitch_writereg,	cpsw_writereg),
190 	DEVMETHOD(etherswitch_readphyreg,	cpsw_readphy),
191 	DEVMETHOD(etherswitch_writephyreg,	cpsw_writephy),
192 	DEVMETHOD(etherswitch_getport,	cpsw_getport),
193 	DEVMETHOD(etherswitch_setport,	cpsw_setport),
194 	DEVMETHOD(etherswitch_getvgroup,	cpsw_getvgroup),
195 	DEVMETHOD(etherswitch_setvgroup,	cpsw_setvgroup),
196 	DEVMETHOD(etherswitch_getconf,	cpsw_getconf),
197 #endif
198 	DEVMETHOD_END
199 };
200 
201 static driver_t cpsw_driver = {
202 	"cpswss",
203 	cpsw_methods,
204 	sizeof(struct cpsw_softc),
205 };
206 
207 static devclass_t cpsw_devclass;
208 
209 DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
210 
211 /* Port/Slave resources. */
212 static device_method_t cpswp_methods[] = {
213 	/* Device interface */
214 	DEVMETHOD(device_probe,		cpswp_probe),
215 	DEVMETHOD(device_attach,	cpswp_attach),
216 	DEVMETHOD(device_detach,	cpswp_detach),
217 	/* MII interface */
218 	DEVMETHOD(miibus_readreg,	cpswp_miibus_readreg),
219 	DEVMETHOD(miibus_writereg,	cpswp_miibus_writereg),
220 	DEVMETHOD(miibus_statchg,	cpswp_miibus_statchg),
221 	DEVMETHOD_END
222 };
223 
224 static driver_t cpswp_driver = {
225 	"cpsw",
226 	cpswp_methods,
227 	sizeof(struct cpswp_softc),
228 };
229 
230 static devclass_t cpswp_devclass;
231 
232 #ifdef CPSW_ETHERSWITCH
233 DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, etherswitch_devclass, 0, 0);
234 MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1);
235 #endif
236 
237 DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0);
238 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
239 MODULE_DEPEND(cpsw, ether, 1, 1, 1);
240 MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
241 
242 #ifdef CPSW_ETHERSWITCH
243 static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS];
244 #endif
245 
246 static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 };
247 
248 static struct resource_spec irq_res_spec[] = {
249 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
250 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
251 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
252 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
253 	{ -1, 0 }
254 };
255 
256 static struct {
257 	void (*cb)(void *);
258 } cpsw_intr_cb[] = {
259 	{ cpsw_intr_rx_thresh },
260 	{ cpsw_intr_rx },
261 	{ cpsw_intr_tx },
262 	{ cpsw_intr_misc },
263 };
264 
265 /* Number of entries here must match size of stats
266  * array in struct cpswp_softc. */
267 static struct cpsw_stat {
268 	int	reg;
269 	char *oid;
270 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = {
271 	{0x00, "GoodRxFrames"},
272 	{0x04, "BroadcastRxFrames"},
273 	{0x08, "MulticastRxFrames"},
274 	{0x0C, "PauseRxFrames"},
275 	{0x10, "RxCrcErrors"},
276 	{0x14, "RxAlignErrors"},
277 	{0x18, "OversizeRxFrames"},
278 	{0x1c, "RxJabbers"},
279 	{0x20, "ShortRxFrames"},
280 	{0x24, "RxFragments"},
281 	{0x30, "RxOctets"},
282 	{0x34, "GoodTxFrames"},
283 	{0x38, "BroadcastTxFrames"},
284 	{0x3c, "MulticastTxFrames"},
285 	{0x40, "PauseTxFrames"},
286 	{0x44, "DeferredTxFrames"},
287 	{0x48, "CollisionsTxFrames"},
288 	{0x4c, "SingleCollisionTxFrames"},
289 	{0x50, "MultipleCollisionTxFrames"},
290 	{0x54, "ExcessiveCollisions"},
291 	{0x58, "LateCollisions"},
292 	{0x5c, "TxUnderrun"},
293 	{0x60, "CarrierSenseErrors"},
294 	{0x64, "TxOctets"},
295 	{0x68, "RxTx64OctetFrames"},
296 	{0x6c, "RxTx65to127OctetFrames"},
297 	{0x70, "RxTx128to255OctetFrames"},
298 	{0x74, "RxTx256to511OctetFrames"},
299 	{0x78, "RxTx512to1024OctetFrames"},
300 	{0x7c, "RxTx1024upOctetFrames"},
301 	{0x80, "NetOctets"},
302 	{0x84, "RxStartOfFrameOverruns"},
303 	{0x88, "RxMiddleOfFrameOverruns"},
304 	{0x8c, "RxDmaOverruns"}
305 };
306 
307 /*
308  * Basic debug support.
309  */
310 
311 static void
312 cpsw_debugf_head(const char *funcname)
313 {
314 	int t = (int)(time_second % (24 * 60 * 60));
315 
316 	printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
317 }
318 
319 static void
320 cpsw_debugf(const char *fmt, ...)
321 {
322 	va_list ap;
323 
324 	va_start(ap, fmt);
325 	vprintf(fmt, ap);
326 	va_end(ap);
327 	printf("\n");
328 
329 }
330 
331 #define	CPSW_DEBUGF(_sc, a) do {					\
332 	if ((_sc)->debug) {						\
333 		cpsw_debugf_head(__func__);				\
334 		cpsw_debugf a;						\
335 	}								\
336 } while (0)
337 
338 /*
339  * Locking macros
340  */
341 #define	CPSW_TX_LOCK(sc) do {						\
342 		mtx_assert(&(sc)->rx.lock, MA_NOTOWNED);		\
343 		mtx_lock(&(sc)->tx.lock);				\
344 } while (0)
345 
346 #define	CPSW_TX_UNLOCK(sc)	mtx_unlock(&(sc)->tx.lock)
347 #define	CPSW_TX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->tx.lock, MA_OWNED)
348 
349 #define	CPSW_RX_LOCK(sc) do {						\
350 		mtx_assert(&(sc)->tx.lock, MA_NOTOWNED);		\
351 		mtx_lock(&(sc)->rx.lock);				\
352 } while (0)
353 
354 #define	CPSW_RX_UNLOCK(sc)		mtx_unlock(&(sc)->rx.lock)
355 #define	CPSW_RX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->rx.lock, MA_OWNED)
356 
357 #define CPSW_PORT_LOCK(_sc) do {					\
358 		mtx_assert(&(_sc)->lock, MA_NOTOWNED);			\
359 		mtx_lock(&(_sc)->lock);					\
360 } while (0)
361 
362 #define	CPSW_PORT_UNLOCK(_sc)	mtx_unlock(&(_sc)->lock)
363 #define	CPSW_PORT_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->lock, MA_OWNED)
364 
365 /*
366  * Read/Write macros
367  */
368 #define	cpsw_read_4(_sc, _reg)		bus_read_4((_sc)->mem_res, (_reg))
369 #define	cpsw_write_4(_sc, _reg, _val)					\
370 	bus_write_4((_sc)->mem_res, (_reg), (_val))
371 
372 #define	cpsw_cpdma_bd_offset(i)	(CPSW_CPPI_RAM_OFFSET + ((i)*16))
373 
374 #define	cpsw_cpdma_bd_paddr(sc, slot)					\
375 	BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset)
376 #define	cpsw_cpdma_read_bd(sc, slot, val)				\
377 	bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
378 #define	cpsw_cpdma_write_bd(sc, slot, val)				\
379 	bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
380 #define	cpsw_cpdma_write_bd_next(sc, slot, next_slot)			\
381 	cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot))
382 #define	cpsw_cpdma_write_bd_flags(sc, slot, val)			\
383 	bus_write_2(sc->mem_res, slot->bd_offset + 14, val)
384 #define	cpsw_cpdma_read_bd_flags(sc, slot)				\
385 	bus_read_2(sc->mem_res, slot->bd_offset + 14)
386 #define	cpsw_write_hdp_slot(sc, queue, slot)				\
387 	cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
388 #define	CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0))
389 #define	cpsw_read_cp(sc, queue)						\
390 	cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
391 #define	cpsw_write_cp(sc, queue, val)					\
392 	cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
393 #define	cpsw_write_cp_slot(sc, queue, slot)				\
394 	cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
395 
396 #if 0
397 /* XXX temporary function versions for debugging. */
398 static void
399 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
400 {
401 	uint32_t reg = queue->hdp_offset;
402 	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
403 	CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg)));
404 	cpsw_write_4(sc, reg, v);
405 }
406 
407 static void
408 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
409 {
410 	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
411 	CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
412 	cpsw_write_cp(sc, queue, v);
413 }
414 #endif
415 
416 /*
417  * Expanded dump routines for verbose debugging.
418  */
419 static void
420 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
421 {
422 	static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ",
423 	    "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun",
424 	    "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1",
425 	    "Port0"};
426 	struct cpsw_cpdma_bd bd;
427 	const char *sep;
428 	int i;
429 
430 	cpsw_cpdma_read_bd(sc, slot, &bd);
431 	printf("BD Addr : 0x%08x   Next  : 0x%08x\n",
432 	    cpsw_cpdma_bd_paddr(sc, slot), bd.next);
433 	printf("  BufPtr: 0x%08x   BufLen: 0x%08x\n", bd.bufptr, bd.buflen);
434 	printf("  BufOff: 0x%08x   PktLen: 0x%08x\n", bd.bufoff, bd.pktlen);
435 	printf("  Flags: ");
436 	sep = "";
437 	for (i = 0; i < 16; ++i) {
438 		if (bd.flags & (1 << (15 - i))) {
439 			printf("%s%s", sep, flags[i]);
440 			sep = ",";
441 		}
442 	}
443 	printf("\n");
444 	if (slot->mbuf) {
445 		printf("  Ether:  %14D\n",
446 		    (char *)(slot->mbuf->m_data), " ");
447 		printf("  Packet: %16D\n",
448 		    (char *)(slot->mbuf->m_data) + 14, " ");
449 	}
450 }
451 
452 #define	CPSW_DUMP_SLOT(cs, slot) do {				\
453 	IF_DEBUG(sc) {						\
454 		cpsw_dump_slot(sc, slot);			\
455 	}							\
456 } while (0)
457 
458 static void
459 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q)
460 {
461 	struct cpsw_slot *slot;
462 	int i = 0;
463 	int others = 0;
464 
465 	STAILQ_FOREACH(slot, q, next) {
466 		if (i > CPSW_TXFRAGS)
467 			++others;
468 		else
469 			cpsw_dump_slot(sc, slot);
470 		++i;
471 	}
472 	if (others)
473 		printf(" ... and %d more.\n", others);
474 	printf("\n");
475 }
476 
477 #define CPSW_DUMP_QUEUE(sc, q) do {				\
478 	IF_DEBUG(sc) {						\
479 		cpsw_dump_queue(sc, q);				\
480 	}							\
481 } while (0)
482 
483 static void
484 cpsw_init_slots(struct cpsw_softc *sc)
485 {
486 	struct cpsw_slot *slot;
487 	int i;
488 
489 	STAILQ_INIT(&sc->avail);
490 
491 	/* Put the slot descriptors onto the global avail list. */
492 	for (i = 0; i < nitems(sc->_slots); i++) {
493 		slot = &sc->_slots[i];
494 		slot->bd_offset = cpsw_cpdma_bd_offset(i);
495 		STAILQ_INSERT_TAIL(&sc->avail, slot, next);
496 	}
497 }
498 
499 static int
500 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
501 {
502 	const int max_slots = nitems(sc->_slots);
503 	struct cpsw_slot *slot;
504 	int i;
505 
506 	if (requested < 0)
507 		requested = max_slots;
508 
509 	for (i = 0; i < requested; ++i) {
510 		slot = STAILQ_FIRST(&sc->avail);
511 		if (slot == NULL)
512 			return (0);
513 		if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
514 			device_printf(sc->dev, "failed to create dmamap\n");
515 			return (ENOMEM);
516 		}
517 		STAILQ_REMOVE_HEAD(&sc->avail, next);
518 		STAILQ_INSERT_TAIL(&queue->avail, slot, next);
519 		++queue->avail_queue_len;
520 		++queue->queue_slots;
521 	}
522 	return (0);
523 }
524 
525 static void
526 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
527 {
528 	int error;
529 
530 	if (slot->dmamap) {
531 		if (slot->mbuf)
532 			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
533 		error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
534 		KASSERT(error == 0, ("Mapping still active"));
535 		slot->dmamap = NULL;
536 	}
537 	if (slot->mbuf) {
538 		m_freem(slot->mbuf);
539 		slot->mbuf = NULL;
540 	}
541 }
542 
543 static void
544 cpsw_reset(struct cpsw_softc *sc)
545 {
546 	int i;
547 
548 	callout_stop(&sc->watchdog.callout);
549 
550 	/* Reset RMII/RGMII wrapper. */
551 	cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
552 	while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
553 		;
554 
555 	/* Disable TX and RX interrupts for all cores. */
556 	for (i = 0; i < 3; ++i) {
557 		cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00);
558 		cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00);
559 		cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00);
560 		cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00);
561 	}
562 
563 	/* Reset CPSW subsystem. */
564 	cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
565 	while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
566 		;
567 
568 	/* Reset Sliver port 1 and 2 */
569 	for (i = 0; i < 2; i++) {
570 		/* Reset */
571 		cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
572 		while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
573 			;
574 	}
575 
576 	/* Reset DMA controller. */
577 	cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
578 	while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
579 		;
580 
581 	/* Disable TX & RX DMA */
582 	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0);
583 	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0);
584 
585 	/* Clear all queues. */
586 	for (i = 0; i < 8; i++) {
587 		cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
588 		cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
589 		cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
590 		cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
591 	}
592 
593 	/* Clear all interrupt Masks */
594 	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
595 	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
596 }
597 
598 static void
599 cpsw_init(struct cpsw_softc *sc)
600 {
601 	struct cpsw_slot *slot;
602 	uint32_t reg;
603 
604 	/* Disable the interrupt pacing. */
605 	reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
606 	reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
607 	cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg);
608 
609 	/* Clear ALE */
610 	cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL);
611 
612 	/* Enable ALE */
613 	reg = CPSW_ALE_CTL_ENABLE;
614 	if (sc->dualemac)
615 		reg |= CPSW_ALE_CTL_VLAN_AWARE;
616 	cpsw_write_4(sc, CPSW_ALE_CONTROL, reg);
617 
618 	/* Set Host Port Mapping. */
619 	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
620 	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
621 
622 	/* Initialize ALE: set host port to forwarding(3). */
623 	cpsw_write_4(sc, CPSW_ALE_PORTCTL(0),
624 	    ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD);
625 
626 	cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
627 
628 	/* Enable statistics for ports 0, 1 and 2 */
629 	cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
630 
631 	/* Turn off flow control. */
632 	cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
633 
634 	/* Make IP hdr aligned with 4 */
635 	cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
636 
637 	/* Initialize RX Buffer Descriptors */
638 	cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0);
639 	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
640 
641 	/* Enable TX & RX DMA */
642 	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
643 	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
644 
645 	/* Enable Interrupts for core 0 */
646 	cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF);
647 	cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF);
648 	cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF);
649 	cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
650 
651 	/* Enable host Error Interrupt */
652 	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3);
653 
654 	/* Enable interrupts for RX and TX on Channel 0 */
655 	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET,
656 	    CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0));
657 	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
658 
659 	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
660 	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
661 	cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff);
662 
663 	/* Select MII in GMII_SEL, Internal Delay mode */
664 	//ti_scm_reg_write_4(0x650, 0);
665 
666 	/* Initialize active queues. */
667 	slot = STAILQ_FIRST(&sc->tx.active);
668 	if (slot != NULL)
669 		cpsw_write_hdp_slot(sc, &sc->tx, slot);
670 	slot = STAILQ_FIRST(&sc->rx.active);
671 	if (slot != NULL)
672 		cpsw_write_hdp_slot(sc, &sc->rx, slot);
673 	cpsw_rx_enqueue(sc);
674 	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len);
675 	cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS);
676 
677 	/* Activate network interface. */
678 	sc->rx.running = 1;
679 	sc->tx.running = 1;
680 	sc->watchdog.timer = 0;
681 	callout_init(&sc->watchdog.callout, 0);
682 	callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
683 }
684 
685 /*
686  *
687  * Device Probe, Attach, Detach.
688  *
689  */
690 
691 static int
692 cpsw_probe(device_t dev)
693 {
694 
695 	if (!ofw_bus_status_okay(dev))
696 		return (ENXIO);
697 
698 	if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
699 		return (ENXIO);
700 
701 	device_set_desc(dev, "3-port Switch Ethernet Subsystem");
702 	return (BUS_PROBE_DEFAULT);
703 }
704 
705 static int
706 cpsw_intr_attach(struct cpsw_softc *sc)
707 {
708 	int i;
709 
710 	for (i = 0; i < CPSW_INTR_COUNT; i++) {
711 		if (bus_setup_intr(sc->dev, sc->irq_res[i],
712 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
713 		    cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) {
714 			return (-1);
715 		}
716 	}
717 
718 	return (0);
719 }
720 
721 static void
722 cpsw_intr_detach(struct cpsw_softc *sc)
723 {
724 	int i;
725 
726 	for (i = 0; i < CPSW_INTR_COUNT; i++) {
727 		if (sc->ih_cookie[i]) {
728 			bus_teardown_intr(sc->dev, sc->irq_res[i],
729 			    sc->ih_cookie[i]);
730 		}
731 	}
732 }
733 
734 static int
735 cpsw_get_fdt_data(struct cpsw_softc *sc, int port)
736 {
737 	char *name;
738 	int len, phy, vlan;
739 	pcell_t phy_id[3], vlan_id;
740 	phandle_t child;
741 	unsigned long mdio_child_addr;
742 
743 	/* Find any slave with phy_id */
744 	phy = -1;
745 	vlan = -1;
746 	for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) {
747 		if (OF_getprop_alloc(child, "name", 1, (void **)&name) < 0)
748 			continue;
749 		if (sscanf(name, "slave@%lx", &mdio_child_addr) != 1) {
750 			OF_prop_free(name);
751 			continue;
752 		}
753 		OF_prop_free(name);
754 		if (mdio_child_addr != slave_mdio_addr[port])
755 			continue;
756 
757 		len = OF_getproplen(child, "phy_id");
758 		if (len / sizeof(pcell_t) == 2) {
759 			/* Get phy address from fdt */
760 			if (OF_getencprop(child, "phy_id", phy_id, len) > 0)
761 				phy = phy_id[1];
762 		}
763 
764 		len = OF_getproplen(child, "dual_emac_res_vlan");
765 		if (len / sizeof(pcell_t) == 1) {
766 			/* Get phy address from fdt */
767 			if (OF_getencprop(child, "dual_emac_res_vlan",
768 			    &vlan_id, len) > 0) {
769 				vlan = vlan_id;
770 			}
771 		}
772 
773 		break;
774 	}
775 	if (phy == -1)
776 		return (ENXIO);
777 	sc->port[port].phy = phy;
778 	sc->port[port].vlan = vlan;
779 
780 	return (0);
781 }
782 
783 static int
784 cpsw_attach(device_t dev)
785 {
786 	int error, i;
787 	struct cpsw_softc *sc;
788 	uint32_t reg;
789 
790 	sc = device_get_softc(dev);
791 	sc->dev = dev;
792 	sc->node = ofw_bus_get_node(dev);
793 	getbinuptime(&sc->attach_uptime);
794 
795 	if (OF_getencprop(sc->node, "active_slave", &sc->active_slave,
796 	    sizeof(sc->active_slave)) <= 0) {
797 		sc->active_slave = 0;
798 	}
799 	if (sc->active_slave > 1)
800 		sc->active_slave = 1;
801 
802 	if (OF_hasprop(sc->node, "dual_emac"))
803 		sc->dualemac = 1;
804 
805 	for (i = 0; i < CPSW_PORTS; i++) {
806 		if (!sc->dualemac && i != sc->active_slave)
807 			continue;
808 		if (cpsw_get_fdt_data(sc, i) != 0) {
809 			device_printf(dev,
810 			    "failed to get PHY address from FDT\n");
811 			return (ENXIO);
812 		}
813 	}
814 
815 	/* Initialize mutexes */
816 	mtx_init(&sc->tx.lock, device_get_nameunit(dev),
817 	    "cpsw TX lock", MTX_DEF);
818 	mtx_init(&sc->rx.lock, device_get_nameunit(dev),
819 	    "cpsw RX lock", MTX_DEF);
820 
821 	/* Allocate IRQ resources */
822 	error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res);
823 	if (error) {
824 		device_printf(dev, "could not allocate IRQ resources\n");
825 		cpsw_detach(dev);
826 		return (ENXIO);
827 	}
828 
829 	sc->mem_rid = 0;
830 	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
831 	    &sc->mem_rid, RF_ACTIVE);
832 	if (sc->mem_res == NULL) {
833 		device_printf(sc->dev, "failed to allocate memory resource\n");
834 		cpsw_detach(dev);
835 		return (ENXIO);
836 	}
837 
838 	reg = cpsw_read_4(sc, CPSW_SS_IDVER);
839 	device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7),
840 		reg & 0xFF, (reg >> 11) & 0x1F);
841 
842 	cpsw_add_sysctls(sc);
843 
844 	/* Allocate a busdma tag and DMA safe memory for mbufs. */
845 	error = bus_dma_tag_create(
846 		bus_get_dma_tag(sc->dev),	/* parent */
847 		1, 0,				/* alignment, boundary */
848 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
849 		BUS_SPACE_MAXADDR,		/* highaddr */
850 		NULL, NULL,			/* filtfunc, filtfuncarg */
851 		MCLBYTES, CPSW_TXFRAGS,		/* maxsize, nsegments */
852 		MCLBYTES, 0,			/* maxsegsz, flags */
853 		NULL, NULL,			/* lockfunc, lockfuncarg */
854 		&sc->mbuf_dtag);		/* dmatag */
855 	if (error) {
856 		device_printf(dev, "bus_dma_tag_create failed\n");
857 		cpsw_detach(dev);
858 		return (error);
859 	}
860 
861 	/* Allocate a NULL buffer for padding. */
862 	sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO);
863 
864 	cpsw_init_slots(sc);
865 
866 	/* Allocate slots to TX and RX queues. */
867 	STAILQ_INIT(&sc->rx.avail);
868 	STAILQ_INIT(&sc->rx.active);
869 	STAILQ_INIT(&sc->tx.avail);
870 	STAILQ_INIT(&sc->tx.active);
871 	// For now:  128 slots to TX, rest to RX.
872 	// XXX TODO: start with 32/64 and grow dynamically based on demand.
873 	if (cpsw_add_slots(sc, &sc->tx, 128) ||
874 	    cpsw_add_slots(sc, &sc->rx, -1)) {
875 		device_printf(dev, "failed to allocate dmamaps\n");
876 		cpsw_detach(dev);
877 		return (ENOMEM);
878 	}
879 	device_printf(dev, "Initial queue size TX=%d RX=%d\n",
880 	    sc->tx.queue_slots, sc->rx.queue_slots);
881 
882 	sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0);
883 	sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0);
884 
885 	if (cpsw_intr_attach(sc) == -1) {
886 		device_printf(dev, "failed to setup interrupts\n");
887 		cpsw_detach(dev);
888 		return (ENXIO);
889 	}
890 
891 #ifdef CPSW_ETHERSWITCH
892 	for (i = 0; i < CPSW_VLANS; i++)
893 		cpsw_vgroups[i].vid = -1;
894 #endif
895 
896 	/* Reset the controller. */
897 	cpsw_reset(sc);
898 	cpsw_init(sc);
899 
900 	for (i = 0; i < CPSW_PORTS; i++) {
901 		if (!sc->dualemac && i != sc->active_slave)
902 			continue;
903 		sc->port[i].dev = device_add_child(dev, "cpsw", i);
904 		if (sc->port[i].dev == NULL) {
905 			cpsw_detach(dev);
906 			return (ENXIO);
907 		}
908 	}
909 	bus_generic_probe(dev);
910 	bus_generic_attach(dev);
911 
912 	return (0);
913 }
914 
915 static int
916 cpsw_detach(device_t dev)
917 {
918 	struct cpsw_softc *sc;
919 	int error, i;
920 
921 	bus_generic_detach(dev);
922  	sc = device_get_softc(dev);
923 
924 	for (i = 0; i < CPSW_PORTS; i++) {
925 		if (sc->port[i].dev)
926 			device_delete_child(dev, sc->port[i].dev);
927 	}
928 
929 	if (device_is_attached(dev)) {
930 		callout_stop(&sc->watchdog.callout);
931 		callout_drain(&sc->watchdog.callout);
932 	}
933 
934 	/* Stop and release all interrupts */
935 	cpsw_intr_detach(sc);
936 
937 	/* Free dmamaps and mbufs */
938 	for (i = 0; i < nitems(sc->_slots); ++i)
939 		cpsw_free_slot(sc, &sc->_slots[i]);
940 
941 	/* Free null padding buffer. */
942 	if (sc->nullpad)
943 		free(sc->nullpad, M_DEVBUF);
944 
945 	/* Free DMA tag */
946 	if (sc->mbuf_dtag) {
947 		error = bus_dma_tag_destroy(sc->mbuf_dtag);
948 		KASSERT(error == 0, ("Unable to destroy DMA tag"));
949 	}
950 
951 	/* Free IO memory handler */
952 	if (sc->mem_res != NULL)
953 		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res);
954 	bus_release_resources(dev, irq_res_spec, sc->irq_res);
955 
956 	/* Destroy mutexes */
957 	mtx_destroy(&sc->rx.lock);
958 	mtx_destroy(&sc->tx.lock);
959 
960 	/* Detach the switch device, if present. */
961 	error = bus_generic_detach(dev);
962 	if (error != 0)
963 		return (error);
964 
965 	return (device_delete_children(dev));
966 }
967 
968 static phandle_t
969 cpsw_get_node(device_t bus, device_t dev)
970 {
971 
972 	/* Share controller node with port device. */
973 	return (ofw_bus_get_node(bus));
974 }
975 
976 static int
977 cpswp_probe(device_t dev)
978 {
979 
980 	if (device_get_unit(dev) > 1) {
981 		device_printf(dev, "Only two ports are supported.\n");
982 		return (ENXIO);
983 	}
984 	device_set_desc(dev, "Ethernet Switch Port");
985 
986 	return (BUS_PROBE_DEFAULT);
987 }
988 
989 static int
990 cpswp_attach(device_t dev)
991 {
992 	int error;
993 	struct ifnet *ifp;
994 	struct cpswp_softc *sc;
995 	uint32_t reg;
996 	uint8_t mac_addr[ETHER_ADDR_LEN];
997 
998 	sc = device_get_softc(dev);
999 	sc->dev = dev;
1000 	sc->pdev = device_get_parent(dev);
1001 	sc->swsc = device_get_softc(sc->pdev);
1002 	sc->unit = device_get_unit(dev);
1003 	sc->phy = sc->swsc->port[sc->unit].phy;
1004 	sc->vlan = sc->swsc->port[sc->unit].vlan;
1005 	if (sc->swsc->dualemac && sc->vlan == -1)
1006 		sc->vlan = sc->unit + 1;
1007 
1008 	if (sc->unit == 0) {
1009 		sc->physel = MDIOUSERPHYSEL0;
1010 		sc->phyaccess = MDIOUSERACCESS0;
1011 	} else {
1012 		sc->physel = MDIOUSERPHYSEL1;
1013 		sc->phyaccess = MDIOUSERACCESS1;
1014 	}
1015 
1016 	mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock",
1017 	    MTX_DEF);
1018 
1019 	/* Allocate network interface */
1020 	ifp = sc->ifp = if_alloc(IFT_ETHER);
1021 	if (ifp == NULL) {
1022 		cpswp_detach(dev);
1023 		return (ENXIO);
1024 	}
1025 
1026 	if_initname(ifp, device_get_name(sc->dev), sc->unit);
1027 	ifp->if_softc = sc;
1028 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
1029 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
1030 	ifp->if_capenable = ifp->if_capabilities;
1031 
1032 	ifp->if_init = cpswp_init;
1033 	ifp->if_start = cpswp_start;
1034 	ifp->if_ioctl = cpswp_ioctl;
1035 
1036 	ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots;
1037 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1038 	IFQ_SET_READY(&ifp->if_snd);
1039 
1040 	/* Get high part of MAC address from control module (mac_id[0|1]_hi) */
1041 	ti_scm_reg_read_4(SCM_MAC_ID0_HI + sc->unit * 8, &reg);
1042 	mac_addr[0] = reg & 0xFF;
1043 	mac_addr[1] = (reg >>  8) & 0xFF;
1044 	mac_addr[2] = (reg >> 16) & 0xFF;
1045 	mac_addr[3] = (reg >> 24) & 0xFF;
1046 
1047 	/* Get low part of MAC address from control module (mac_id[0|1]_lo) */
1048 	ti_scm_reg_read_4(SCM_MAC_ID0_LO + sc->unit * 8, &reg);
1049 	mac_addr[4] = reg & 0xFF;
1050 	mac_addr[5] = (reg >>  8) & 0xFF;
1051 
1052 	error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd,
1053 	    cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0);
1054 	if (error) {
1055 		device_printf(dev, "attaching PHYs failed\n");
1056 		cpswp_detach(dev);
1057 		return (error);
1058 	}
1059 	sc->mii = device_get_softc(sc->miibus);
1060 
1061 	/* Select PHY and enable interrupts */
1062 	cpsw_write_4(sc->swsc, sc->physel,
1063 	    MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F));
1064 
1065 	ether_ifattach(sc->ifp, mac_addr);
1066 	callout_init(&sc->mii_callout, 0);
1067 
1068 	return (0);
1069 }
1070 
1071 static int
1072 cpswp_detach(device_t dev)
1073 {
1074 	struct cpswp_softc *sc;
1075 
1076 	sc = device_get_softc(dev);
1077 	CPSW_DEBUGF(sc->swsc, (""));
1078 	if (device_is_attached(dev)) {
1079 		ether_ifdetach(sc->ifp);
1080 		CPSW_PORT_LOCK(sc);
1081 		cpswp_stop_locked(sc);
1082 		CPSW_PORT_UNLOCK(sc);
1083 		callout_drain(&sc->mii_callout);
1084 	}
1085 
1086 	bus_generic_detach(dev);
1087 
1088 	if_free(sc->ifp);
1089 	mtx_destroy(&sc->lock);
1090 
1091 	return (0);
1092 }
1093 
1094 /*
1095  *
1096  * Init/Shutdown.
1097  *
1098  */
1099 
1100 static int
1101 cpsw_ports_down(struct cpsw_softc *sc)
1102 {
1103 	struct cpswp_softc *psc;
1104 	struct ifnet *ifp1, *ifp2;
1105 
1106 	if (!sc->dualemac)
1107 		return (1);
1108 	psc = device_get_softc(sc->port[0].dev);
1109 	ifp1 = psc->ifp;
1110 	psc = device_get_softc(sc->port[1].dev);
1111 	ifp2 = psc->ifp;
1112 	if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0)
1113 		return (1);
1114 
1115 	return (0);
1116 }
1117 
1118 static void
1119 cpswp_init(void *arg)
1120 {
1121 	struct cpswp_softc *sc = arg;
1122 
1123 	CPSW_DEBUGF(sc->swsc, (""));
1124 	CPSW_PORT_LOCK(sc);
1125 	cpswp_init_locked(arg);
1126 	CPSW_PORT_UNLOCK(sc);
1127 }
1128 
1129 static void
1130 cpswp_init_locked(void *arg)
1131 {
1132 #ifdef CPSW_ETHERSWITCH
1133 	int i;
1134 #endif
1135 	struct cpswp_softc *sc = arg;
1136 	struct ifnet *ifp;
1137 	uint32_t reg;
1138 
1139 	CPSW_DEBUGF(sc->swsc, (""));
1140 	CPSW_PORT_LOCK_ASSERT(sc);
1141 	ifp = sc->ifp;
1142 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1143 		return;
1144 
1145 	getbinuptime(&sc->init_uptime);
1146 
1147 	if (!sc->swsc->rx.running && !sc->swsc->tx.running) {
1148 		/* Reset the controller. */
1149 		cpsw_reset(sc->swsc);
1150 		cpsw_init(sc->swsc);
1151 	}
1152 
1153 	/* Set Slave Mapping. */
1154 	cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210);
1155 	cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1),
1156 	    0x33221100);
1157 	cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2);
1158 	/* Enable MAC RX/TX modules. */
1159 	/* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */
1160 	/* Huh?  Docs call bit 0 "Loopback" some places, "FullDuplex" others. */
1161 	reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
1162 	reg |= CPSW_SL_MACTL_GMII_ENABLE;
1163 	cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
1164 
1165 	/* Initialize ALE: set port to forwarding, initialize addrs */
1166 	cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1),
1167 	    ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD);
1168 	cpswp_ale_update_addresses(sc, 1);
1169 
1170 	if (sc->swsc->dualemac) {
1171 		/* Set Port VID. */
1172 		cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1),
1173 		    sc->vlan & 0xfff);
1174 		cpsw_ale_update_vlan_table(sc->swsc, sc->vlan,
1175 		    (1 << (sc->unit + 1)) | (1 << 0), /* Member list */
1176 		    (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */
1177 		    (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */
1178 #ifdef CPSW_ETHERSWITCH
1179 		for (i = 0; i < CPSW_VLANS; i++) {
1180 			if (cpsw_vgroups[i].vid != -1)
1181 				continue;
1182 			cpsw_vgroups[i].vid = sc->vlan;
1183 			break;
1184 		}
1185 #endif
1186 	}
1187 
1188 	mii_mediachg(sc->mii);
1189 	callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
1190 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1191 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1192 }
1193 
1194 static int
1195 cpsw_shutdown(device_t dev)
1196 {
1197 	struct cpsw_softc *sc;
1198 	struct cpswp_softc *psc;
1199 	int i;
1200 
1201  	sc = device_get_softc(dev);
1202 	CPSW_DEBUGF(sc, (""));
1203 	for (i = 0; i < CPSW_PORTS; i++) {
1204 		if (!sc->dualemac && i != sc->active_slave)
1205 			continue;
1206 		psc = device_get_softc(sc->port[i].dev);
1207 		CPSW_PORT_LOCK(psc);
1208 		cpswp_stop_locked(psc);
1209 		CPSW_PORT_UNLOCK(psc);
1210 	}
1211 
1212 	return (0);
1213 }
1214 
1215 static void
1216 cpsw_rx_teardown(struct cpsw_softc *sc)
1217 {
1218 	int i = 0;
1219 
1220 	CPSW_RX_LOCK(sc);
1221 	CPSW_DEBUGF(sc, ("starting RX teardown"));
1222 	sc->rx.teardown = 1;
1223 	cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1224 	CPSW_RX_UNLOCK(sc);
1225 	while (sc->rx.running) {
1226 		if (++i > 10) {
1227 			device_printf(sc->dev,
1228 			    "Unable to cleanly shutdown receiver\n");
1229 			return;
1230 		}
1231 		DELAY(200);
1232 	}
1233 	if (!sc->rx.running)
1234 		CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i));
1235 }
1236 
1237 static void
1238 cpsw_tx_teardown(struct cpsw_softc *sc)
1239 {
1240 	int i = 0;
1241 
1242 	CPSW_TX_LOCK(sc);
1243 	CPSW_DEBUGF(sc, ("starting TX teardown"));
1244 	/* Start the TX queue teardown if queue is not empty. */
1245 	if (STAILQ_FIRST(&sc->tx.active) != NULL)
1246 		cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1247 	else
1248 		sc->tx.teardown = 1;
1249 	cpsw_tx_dequeue(sc);
1250 	while (sc->tx.running && ++i < 10) {
1251 		DELAY(200);
1252 		cpsw_tx_dequeue(sc);
1253 	}
1254 	if (sc->tx.running) {
1255 		device_printf(sc->dev,
1256 		    "Unable to cleanly shutdown transmitter\n");
1257 	}
1258 	CPSW_DEBUGF(sc,
1259 	    ("finished TX teardown (%d retries, %d idle buffers)", i,
1260 	     sc->tx.active_queue_len));
1261 	CPSW_TX_UNLOCK(sc);
1262 }
1263 
1264 static void
1265 cpswp_stop_locked(struct cpswp_softc *sc)
1266 {
1267 	struct ifnet *ifp;
1268 	uint32_t reg;
1269 
1270 	ifp = sc->ifp;
1271 	CPSW_DEBUGF(sc->swsc, (""));
1272 	CPSW_PORT_LOCK_ASSERT(sc);
1273 
1274 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1275 		return;
1276 
1277 	/* Disable interface */
1278 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1279 	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1280 
1281 	/* Stop ticker */
1282 	callout_stop(&sc->mii_callout);
1283 
1284 	/* Tear down the RX/TX queues. */
1285 	if (cpsw_ports_down(sc->swsc)) {
1286 		cpsw_rx_teardown(sc->swsc);
1287 		cpsw_tx_teardown(sc->swsc);
1288 	}
1289 
1290 	/* Stop MAC RX/TX modules. */
1291 	reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
1292 	reg &= ~CPSW_SL_MACTL_GMII_ENABLE;
1293 	cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
1294 
1295 	if (cpsw_ports_down(sc->swsc)) {
1296 		/* Capture stats before we reset controller. */
1297 		cpsw_stats_collect(sc->swsc);
1298 
1299 		cpsw_reset(sc->swsc);
1300 		cpsw_init(sc->swsc);
1301 	}
1302 }
1303 
1304 /*
1305  *  Suspend/Resume.
1306  */
1307 
1308 static int
1309 cpsw_suspend(device_t dev)
1310 {
1311 	struct cpsw_softc *sc;
1312 	struct cpswp_softc *psc;
1313 	int i;
1314 
1315 	sc = device_get_softc(dev);
1316 	CPSW_DEBUGF(sc, (""));
1317 	for (i = 0; i < CPSW_PORTS; i++) {
1318 		if (!sc->dualemac && i != sc->active_slave)
1319 			continue;
1320 		psc = device_get_softc(sc->port[i].dev);
1321 		CPSW_PORT_LOCK(psc);
1322 		cpswp_stop_locked(psc);
1323 		CPSW_PORT_UNLOCK(psc);
1324 	}
1325 
1326 	return (0);
1327 }
1328 
1329 static int
1330 cpsw_resume(device_t dev)
1331 {
1332 	struct cpsw_softc *sc;
1333 
1334 	sc  = device_get_softc(dev);
1335 	CPSW_DEBUGF(sc, ("UNIMPLEMENTED"));
1336 
1337 	return (0);
1338 }
1339 
1340 /*
1341  *
1342  *  IOCTL
1343  *
1344  */
1345 
1346 static void
1347 cpsw_set_promisc(struct cpswp_softc *sc, int set)
1348 {
1349 	uint32_t reg;
1350 
1351 	/*
1352 	 * Enabling promiscuous mode requires ALE_BYPASS to be enabled.
1353 	 * That disables the ALE forwarding logic and causes every
1354 	 * packet to be sent only to the host port.  In bypass mode,
1355 	 * the ALE processes host port transmit packets the same as in
1356 	 * normal mode.
1357 	 */
1358 	reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL);
1359 	reg &= ~CPSW_ALE_CTL_BYPASS;
1360 	if (set)
1361 		reg |= CPSW_ALE_CTL_BYPASS;
1362 	cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg);
1363 }
1364 
1365 static void
1366 cpsw_set_allmulti(struct cpswp_softc *sc, int set)
1367 {
1368 	if (set) {
1369 		printf("All-multicast mode unimplemented\n");
1370 	}
1371 }
1372 
1373 static int
1374 cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1375 {
1376 	struct cpswp_softc *sc;
1377 	struct ifreq *ifr;
1378 	int error;
1379 	uint32_t changed;
1380 
1381 	error = 0;
1382 	sc = ifp->if_softc;
1383 	ifr = (struct ifreq *)data;
1384 
1385 	switch (command) {
1386 	case SIOCSIFCAP:
1387 		changed = ifp->if_capenable ^ ifr->ifr_reqcap;
1388 		if (changed & IFCAP_HWCSUM) {
1389 			if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM)
1390 				ifp->if_capenable |= IFCAP_HWCSUM;
1391 			else
1392 				ifp->if_capenable &= ~IFCAP_HWCSUM;
1393 		}
1394 		error = 0;
1395 		break;
1396 	case SIOCSIFFLAGS:
1397 		CPSW_PORT_LOCK(sc);
1398 		if (ifp->if_flags & IFF_UP) {
1399 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1400 				changed = ifp->if_flags ^ sc->if_flags;
1401 				CPSW_DEBUGF(sc->swsc,
1402 				    ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)",
1403 				    changed));
1404 				if (changed & IFF_PROMISC)
1405 					cpsw_set_promisc(sc,
1406 					    ifp->if_flags & IFF_PROMISC);
1407 				if (changed & IFF_ALLMULTI)
1408 					cpsw_set_allmulti(sc,
1409 					    ifp->if_flags & IFF_ALLMULTI);
1410 			} else {
1411 				CPSW_DEBUGF(sc->swsc,
1412 				    ("SIOCSIFFLAGS: starting up"));
1413 				cpswp_init_locked(sc);
1414 			}
1415 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1416 			CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down"));
1417 			cpswp_stop_locked(sc);
1418 		}
1419 
1420 		sc->if_flags = ifp->if_flags;
1421 		CPSW_PORT_UNLOCK(sc);
1422 		break;
1423 	case SIOCADDMULTI:
1424 		cpswp_ale_update_addresses(sc, 0);
1425 		break;
1426 	case SIOCDELMULTI:
1427 		/* Ugh.  DELMULTI doesn't provide the specific address
1428 		   being removed, so the best we can do is remove
1429 		   everything and rebuild it all. */
1430 		cpswp_ale_update_addresses(sc, 1);
1431 		break;
1432 	case SIOCGIFMEDIA:
1433 	case SIOCSIFMEDIA:
1434 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1435 		break;
1436 	default:
1437 		error = ether_ioctl(ifp, command, data);
1438 	}
1439 	return (error);
1440 }
1441 
1442 /*
1443  *
1444  * MIIBUS
1445  *
1446  */
1447 static int
1448 cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg)
1449 {
1450 	uint32_t r, retries = CPSW_MIIBUS_RETRIES;
1451 
1452 	while (--retries) {
1453 		r = cpsw_read_4(sc, reg);
1454 		if ((r & MDIO_PHYACCESS_GO) == 0)
1455 			return (1);
1456 		DELAY(CPSW_MIIBUS_DELAY);
1457 	}
1458 
1459 	return (0);
1460 }
1461 
1462 static int
1463 cpswp_miibus_readreg(device_t dev, int phy, int reg)
1464 {
1465 	struct cpswp_softc *sc;
1466 	uint32_t cmd, r;
1467 
1468 	sc = device_get_softc(dev);
1469 	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1470 		device_printf(dev, "MDIO not ready to read\n");
1471 		return (0);
1472 	}
1473 
1474 	/* Set GO, reg, phy */
1475 	cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
1476 	cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
1477 
1478 	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1479 		device_printf(dev, "MDIO timed out during read\n");
1480 		return (0);
1481 	}
1482 
1483 	r = cpsw_read_4(sc->swsc, sc->phyaccess);
1484 	if ((r & MDIO_PHYACCESS_ACK) == 0) {
1485 		device_printf(dev, "Failed to read from PHY.\n");
1486 		r = 0;
1487 	}
1488 	return (r & 0xFFFF);
1489 }
1490 
1491 static int
1492 cpswp_miibus_writereg(device_t dev, int phy, int reg, int value)
1493 {
1494 	struct cpswp_softc *sc;
1495 	uint32_t cmd;
1496 
1497 	sc = device_get_softc(dev);
1498 	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1499 		device_printf(dev, "MDIO not ready to write\n");
1500 		return (0);
1501 	}
1502 
1503 	/* Set GO, WRITE, reg, phy, and value */
1504 	cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE |
1505 	    (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF);
1506 	cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
1507 
1508 	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1509 		device_printf(dev, "MDIO timed out during write\n");
1510 		return (0);
1511 	}
1512 
1513 	return (0);
1514 }
1515 
1516 static void
1517 cpswp_miibus_statchg(device_t dev)
1518 {
1519 	struct cpswp_softc *sc;
1520 	uint32_t mac_control, reg;
1521 
1522 	sc = device_get_softc(dev);
1523 	CPSW_DEBUGF(sc->swsc, (""));
1524 
1525 	reg = CPSW_SL_MACCONTROL(sc->unit);
1526 	mac_control = cpsw_read_4(sc->swsc, reg);
1527 	mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A |
1528 	    CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX);
1529 
1530 	switch(IFM_SUBTYPE(sc->mii->mii_media_active)) {
1531 	case IFM_1000_SX:
1532 	case IFM_1000_LX:
1533 	case IFM_1000_CX:
1534 	case IFM_1000_T:
1535 		mac_control |= CPSW_SL_MACTL_GIG;
1536 		break;
1537 
1538 	case IFM_100_TX:
1539 		mac_control |= CPSW_SL_MACTL_IFCTL_A;
1540 		break;
1541 	}
1542 	if (sc->mii->mii_media_active & IFM_FDX)
1543 		mac_control |= CPSW_SL_MACTL_FULLDUPLEX;
1544 
1545 	cpsw_write_4(sc->swsc, reg, mac_control);
1546 }
1547 
1548 /*
1549  *
1550  * Transmit/Receive Packets.
1551  *
1552  */
1553 static void
1554 cpsw_intr_rx(void *arg)
1555 {
1556 	struct cpsw_softc *sc;
1557 	struct ifnet *ifp;
1558 	struct mbuf *received, *next;
1559 
1560 	sc = (struct cpsw_softc *)arg;
1561 	CPSW_RX_LOCK(sc);
1562 	if (sc->rx.teardown) {
1563 		sc->rx.running = 0;
1564 		sc->rx.teardown = 0;
1565 		cpsw_write_cp(sc, &sc->rx, 0xfffffffc);
1566 	}
1567 	received = cpsw_rx_dequeue(sc);
1568 	cpsw_rx_enqueue(sc);
1569 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1570 	CPSW_RX_UNLOCK(sc);
1571 
1572 	while (received != NULL) {
1573 		next = received->m_nextpkt;
1574 		received->m_nextpkt = NULL;
1575 		ifp = received->m_pkthdr.rcvif;
1576 		(*ifp->if_input)(ifp, received);
1577 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1578 		received = next;
1579 	}
1580 }
1581 
1582 static struct mbuf *
1583 cpsw_rx_dequeue(struct cpsw_softc *sc)
1584 {
1585 	int nsegs, port, removed;
1586 	struct cpsw_cpdma_bd bd;
1587 	struct cpsw_slot *last, *slot;
1588 	struct cpswp_softc *psc;
1589 	struct mbuf *m, *m0, *mb_head, *mb_tail;
1590 	uint16_t m0_flags;
1591 
1592 	nsegs = 0;
1593 	m0 = NULL;
1594 	last = NULL;
1595 	mb_head = NULL;
1596 	mb_tail = NULL;
1597 	removed = 0;
1598 
1599 	/* Pull completed packets off hardware RX queue. */
1600 	while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
1601 		cpsw_cpdma_read_bd(sc, slot, &bd);
1602 
1603 		/*
1604 		 * Stop on packets still in use by hardware, but do not stop
1605 		 * on packets with the teardown complete flag, they will be
1606 		 * discarded later.
1607 		 */
1608 		if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) ==
1609 		    CPDMA_BD_OWNER)
1610 			break;
1611 
1612 		last = slot;
1613 		++removed;
1614 		STAILQ_REMOVE_HEAD(&sc->rx.active, next);
1615 		STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next);
1616 
1617 		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
1618 		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1619 
1620 		m = slot->mbuf;
1621 		slot->mbuf = NULL;
1622 
1623 		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1624 			CPSW_DEBUGF(sc, ("RX teardown is complete"));
1625 			m_freem(m);
1626 			sc->rx.running = 0;
1627 			sc->rx.teardown = 0;
1628 			break;
1629 		}
1630 
1631 		port = (bd.flags & CPDMA_BD_PORT_MASK) - 1;
1632 		KASSERT(port >= 0 && port <= 1,
1633 		    ("patcket received with invalid port: %d", port));
1634 		psc = device_get_softc(sc->port[port].dev);
1635 
1636 		/* Set up mbuf */
1637 		m->m_data += bd.bufoff;
1638 		m->m_len = bd.buflen;
1639 		if (bd.flags & CPDMA_BD_SOP) {
1640 			m->m_pkthdr.len = bd.pktlen;
1641 			m->m_pkthdr.rcvif = psc->ifp;
1642 			m->m_flags |= M_PKTHDR;
1643 			m0_flags = bd.flags;
1644 			m0 = m;
1645 		}
1646 		nsegs++;
1647 		m->m_next = NULL;
1648 		m->m_nextpkt = NULL;
1649 		if (bd.flags & CPDMA_BD_EOP && m0 != NULL) {
1650 			if (m0_flags & CPDMA_BD_PASS_CRC)
1651 				m_adj(m0, -ETHER_CRC_LEN);
1652 			m0_flags = 0;
1653 			m0 = NULL;
1654 			if (nsegs > sc->rx.longest_chain)
1655 				sc->rx.longest_chain = nsegs;
1656 			nsegs = 0;
1657 		}
1658 
1659 		if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1660 			/* check for valid CRC by looking into pkt_err[5:4] */
1661 			if ((bd.flags &
1662 			    (CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) ==
1663 			    CPDMA_BD_SOP) {
1664 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1665 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1666 				m->m_pkthdr.csum_data = 0xffff;
1667 			}
1668 		}
1669 
1670 		if (STAILQ_FIRST(&sc->rx.active) != NULL &&
1671 		    (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) ==
1672 		    (CPDMA_BD_EOP | CPDMA_BD_EOQ)) {
1673 			cpsw_write_hdp_slot(sc, &sc->rx,
1674 			    STAILQ_FIRST(&sc->rx.active));
1675 			sc->rx.queue_restart++;
1676 		}
1677 
1678 		/* Add mbuf to packet list to be returned. */
1679 		if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) {
1680 			mb_tail->m_nextpkt = m;
1681 		} else if (mb_tail != NULL) {
1682 			mb_tail->m_next = m;
1683 		} else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) {
1684 			if (bootverbose)
1685 				printf(
1686 				    "%s: %s: discanding fragment packet w/o header\n",
1687 				    __func__, psc->ifp->if_xname);
1688 			m_freem(m);
1689 			continue;
1690 		} else {
1691 			mb_head = m;
1692 		}
1693 		mb_tail = m;
1694 	}
1695 
1696 	if (removed != 0) {
1697 		cpsw_write_cp_slot(sc, &sc->rx, last);
1698 		sc->rx.queue_removes += removed;
1699 		sc->rx.avail_queue_len += removed;
1700 		sc->rx.active_queue_len -= removed;
1701 		if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len)
1702 			sc->rx.max_avail_queue_len = sc->rx.avail_queue_len;
1703 		CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed));
1704 	}
1705 
1706 	return (mb_head);
1707 }
1708 
1709 static void
1710 cpsw_rx_enqueue(struct cpsw_softc *sc)
1711 {
1712 	bus_dma_segment_t seg[1];
1713 	struct cpsw_cpdma_bd bd;
1714 	struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot;
1715 	int error, nsegs, added = 0;
1716 
1717 	/* Register new mbufs with hardware. */
1718 	first_new_slot = NULL;
1719 	last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next);
1720 	while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) {
1721 		if (first_new_slot == NULL)
1722 			first_new_slot = slot;
1723 		if (slot->mbuf == NULL) {
1724 			slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1725 			if (slot->mbuf == NULL) {
1726 				device_printf(sc->dev,
1727 				    "Unable to fill RX queue\n");
1728 				break;
1729 			}
1730 			slot->mbuf->m_len =
1731 			    slot->mbuf->m_pkthdr.len =
1732 			    slot->mbuf->m_ext.ext_size;
1733 		}
1734 
1735 		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1736 		    slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
1737 
1738 		KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
1739 		KASSERT(error == 0, ("DMA error (error=%d)", error));
1740 		if (error != 0 || nsegs != 1) {
1741 			device_printf(sc->dev,
1742 			    "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
1743 			    __func__, nsegs, error);
1744 			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1745 			m_freem(slot->mbuf);
1746 			slot->mbuf = NULL;
1747 			break;
1748 		}
1749 
1750 		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
1751 
1752 		/* Create and submit new rx descriptor. */
1753 		if ((next = STAILQ_NEXT(slot, next)) != NULL)
1754 			bd.next = cpsw_cpdma_bd_paddr(sc, next);
1755 		else
1756 			bd.next = 0;
1757 		bd.bufptr = seg->ds_addr;
1758 		bd.bufoff = 0;
1759 		bd.buflen = MCLBYTES - 1;
1760 		bd.pktlen = bd.buflen;
1761 		bd.flags = CPDMA_BD_OWNER;
1762 		cpsw_cpdma_write_bd(sc, slot, &bd);
1763 		++added;
1764 
1765 		STAILQ_REMOVE_HEAD(&sc->rx.avail, next);
1766 		STAILQ_INSERT_TAIL(&sc->rx.active, slot, next);
1767 	}
1768 
1769 	if (added == 0 || first_new_slot == NULL)
1770 		return;
1771 
1772 	CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added));
1773 
1774 	/* Link new entries to hardware RX queue. */
1775 	if (last_old_slot == NULL) {
1776 		/* Start a fresh queue. */
1777 		cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1778 	} else {
1779 		/* Add buffers to end of current queue. */
1780 		cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1781 	}
1782 	sc->rx.queue_adds += added;
1783 	sc->rx.avail_queue_len -= added;
1784 	sc->rx.active_queue_len += added;
1785 	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added);
1786 	if (sc->rx.active_queue_len > sc->rx.max_active_queue_len)
1787 		sc->rx.max_active_queue_len = sc->rx.active_queue_len;
1788 }
1789 
1790 static void
1791 cpswp_start(struct ifnet *ifp)
1792 {
1793 	struct cpswp_softc *sc;
1794 
1795 	sc = ifp->if_softc;
1796 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1797 	    sc->swsc->tx.running == 0) {
1798 		return;
1799 	}
1800 	CPSW_TX_LOCK(sc->swsc);
1801 	cpswp_tx_enqueue(sc);
1802 	cpsw_tx_dequeue(sc->swsc);
1803 	CPSW_TX_UNLOCK(sc->swsc);
1804 }
1805 
1806 static void
1807 cpsw_intr_tx(void *arg)
1808 {
1809 	struct cpsw_softc *sc;
1810 
1811 	sc = (struct cpsw_softc *)arg;
1812 	CPSW_TX_LOCK(sc);
1813 	if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc)
1814 		cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1815 	cpsw_tx_dequeue(sc);
1816 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2);
1817 	CPSW_TX_UNLOCK(sc);
1818 }
1819 
1820 static void
1821 cpswp_tx_enqueue(struct cpswp_softc *sc)
1822 {
1823 	bus_dma_segment_t segs[CPSW_TXFRAGS];
1824 	struct cpsw_cpdma_bd bd;
1825 	struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot;
1826 	struct mbuf *m0;
1827 	int error, nsegs, seg, added = 0, padlen;
1828 
1829 	/* Pull pending packets from IF queue and prep them for DMA. */
1830 	last = NULL;
1831 	first_new_slot = NULL;
1832 	last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next);
1833 	while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) {
1834 		IF_DEQUEUE(&sc->ifp->if_snd, m0);
1835 		if (m0 == NULL)
1836 			break;
1837 
1838 		slot->mbuf = m0;
1839 		padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len;
1840 		if (padlen < 0)
1841 			padlen = 0;
1842 		else if (padlen > 0)
1843 			m_append(slot->mbuf, padlen, sc->swsc->nullpad);
1844 
1845 		/* Create mapping in DMA memory */
1846 		error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag,
1847 		    slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1848 		/* If the packet is too fragmented, try to simplify. */
1849 		if (error == EFBIG ||
1850 		    (error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) {
1851 			bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
1852 			m0 = m_defrag(slot->mbuf, M_NOWAIT);
1853 			if (m0 == NULL) {
1854 				device_printf(sc->dev,
1855 				    "Can't defragment packet; dropping\n");
1856 				m_freem(slot->mbuf);
1857 			} else {
1858 				CPSW_DEBUGF(sc->swsc,
1859 				    ("Requeueing defragmented packet"));
1860 				IF_PREPEND(&sc->ifp->if_snd, m0);
1861 			}
1862 			slot->mbuf = NULL;
1863 			continue;
1864 		}
1865 		if (error != 0) {
1866 			device_printf(sc->dev,
1867 			    "%s: Can't setup DMA (error=%d), dropping packet\n",
1868 			    __func__, error);
1869 			bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
1870 			m_freem(slot->mbuf);
1871 			slot->mbuf = NULL;
1872 			break;
1873 		}
1874 
1875 		bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap,
1876 				BUS_DMASYNC_PREWRITE);
1877 
1878 		CPSW_DEBUGF(sc->swsc,
1879 		    ("Queueing TX packet: %d segments + %d pad bytes",
1880 		    nsegs, padlen));
1881 
1882 		if (first_new_slot == NULL)
1883 			first_new_slot = slot;
1884 
1885 		/* Link from the previous descriptor. */
1886 		if (last != NULL)
1887 			cpsw_cpdma_write_bd_next(sc->swsc, last, slot);
1888 
1889 		slot->ifp = sc->ifp;
1890 
1891 		/* If there is only one segment, the for() loop
1892 		 * gets skipped and the single buffer gets set up
1893 		 * as both SOP and EOP. */
1894 		if (nsegs > 1) {
1895 			next = STAILQ_NEXT(slot, next);
1896 			bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next);
1897 		} else
1898 			bd.next = 0;
1899 		/* Start by setting up the first buffer. */
1900 		bd.bufptr = segs[0].ds_addr;
1901 		bd.bufoff = 0;
1902 		bd.buflen = segs[0].ds_len;
1903 		bd.pktlen = m_length(slot->mbuf, NULL);
1904 		bd.flags =  CPDMA_BD_SOP | CPDMA_BD_OWNER;
1905 		if (sc->swsc->dualemac) {
1906 			bd.flags |= CPDMA_BD_TO_PORT;
1907 			bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK);
1908 		}
1909 		for (seg = 1; seg < nsegs; ++seg) {
1910 			/* Save the previous buffer (which isn't EOP) */
1911 			cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
1912 			STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
1913 			STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next);
1914 			slot = STAILQ_FIRST(&sc->swsc->tx.avail);
1915 
1916 			/* Setup next buffer (which isn't SOP) */
1917 			if (nsegs > seg + 1) {
1918 				next = STAILQ_NEXT(slot, next);
1919 				bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next);
1920 			} else
1921 				bd.next = 0;
1922 			bd.bufptr = segs[seg].ds_addr;
1923 			bd.bufoff = 0;
1924 			bd.buflen = segs[seg].ds_len;
1925 			bd.pktlen = 0;
1926 			bd.flags = CPDMA_BD_OWNER;
1927 		}
1928 
1929 		/* Save the final buffer. */
1930 		bd.flags |= CPDMA_BD_EOP;
1931 		cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
1932 		STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
1933 		STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next);
1934 
1935 		last = slot;
1936 		added += nsegs;
1937 		if (nsegs > sc->swsc->tx.longest_chain)
1938 			sc->swsc->tx.longest_chain = nsegs;
1939 
1940 		BPF_MTAP(sc->ifp, m0);
1941 	}
1942 
1943 	if (first_new_slot == NULL)
1944 		return;
1945 
1946 	/* Attach the list of new buffers to the hardware TX queue. */
1947 	if (last_old_slot != NULL &&
1948 	    (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) &
1949 	     CPDMA_BD_EOQ) == 0) {
1950 		/* Add buffers to end of current queue. */
1951 		cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot,
1952 		    first_new_slot);
1953 	} else {
1954 		/* Start a fresh queue. */
1955 		cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot);
1956 	}
1957 	sc->swsc->tx.queue_adds += added;
1958 	sc->swsc->tx.avail_queue_len -= added;
1959 	sc->swsc->tx.active_queue_len += added;
1960 	if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) {
1961 		sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len;
1962 	}
1963 	CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added));
1964 }
1965 
1966 static int
1967 cpsw_tx_dequeue(struct cpsw_softc *sc)
1968 {
1969 	struct cpsw_slot *slot, *last_removed_slot = NULL;
1970 	struct cpsw_cpdma_bd bd;
1971 	uint32_t flags, removed = 0;
1972 
1973 	/* Pull completed buffers off the hardware TX queue. */
1974 	slot = STAILQ_FIRST(&sc->tx.active);
1975 	while (slot != NULL) {
1976 		flags = cpsw_cpdma_read_bd_flags(sc, slot);
1977 
1978 		/* TearDown complete is only marked on the SOP for the packet. */
1979 		if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) ==
1980 		    (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) {
1981 			sc->tx.teardown = 1;
1982 		}
1983 
1984 		if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) ==
1985 		    (CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0)
1986 			break; /* Hardware is still using this packet. */
1987 
1988 		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
1989 		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1990 		m_freem(slot->mbuf);
1991 		slot->mbuf = NULL;
1992 
1993 		if (slot->ifp) {
1994 			if (sc->tx.teardown == 0)
1995 				if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1);
1996 			else
1997 				if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1);
1998 		}
1999 
2000 		/* Dequeue any additional buffers used by this packet. */
2001 		while (slot != NULL && slot->mbuf == NULL) {
2002 			STAILQ_REMOVE_HEAD(&sc->tx.active, next);
2003 			STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next);
2004 			++removed;
2005 			last_removed_slot = slot;
2006 			slot = STAILQ_FIRST(&sc->tx.active);
2007 		}
2008 
2009 		cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot);
2010 
2011 		/* Restart the TX queue if necessary. */
2012 		cpsw_cpdma_read_bd(sc, last_removed_slot, &bd);
2013 		if (slot != NULL && bd.next != 0 && (bd.flags &
2014 		    (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) ==
2015 		    (CPDMA_BD_EOP | CPDMA_BD_EOQ)) {
2016 			cpsw_write_hdp_slot(sc, &sc->tx, slot);
2017 			sc->tx.queue_restart++;
2018 			break;
2019 		}
2020 	}
2021 
2022 	if (removed != 0) {
2023 		sc->tx.queue_removes += removed;
2024 		sc->tx.active_queue_len -= removed;
2025 		sc->tx.avail_queue_len += removed;
2026 		if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len)
2027 			sc->tx.max_avail_queue_len = sc->tx.avail_queue_len;
2028 		CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed));
2029 	}
2030 
2031 	if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) {
2032 		CPSW_DEBUGF(sc, ("TX teardown is complete"));
2033 		sc->tx.teardown = 0;
2034 		sc->tx.running = 0;
2035 	}
2036 
2037 	return (removed);
2038 }
2039 
2040 /*
2041  *
2042  * Miscellaneous interrupts.
2043  *
2044  */
2045 
2046 static void
2047 cpsw_intr_rx_thresh(void *arg)
2048 {
2049 	struct cpsw_softc *sc;
2050 	struct ifnet *ifp;
2051 	struct mbuf *received, *next;
2052 
2053 	sc = (struct cpsw_softc *)arg;
2054 	CPSW_RX_LOCK(sc);
2055 	received = cpsw_rx_dequeue(sc);
2056 	cpsw_rx_enqueue(sc);
2057 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
2058 	CPSW_RX_UNLOCK(sc);
2059 
2060 	while (received != NULL) {
2061 		next = received->m_nextpkt;
2062 		received->m_nextpkt = NULL;
2063 		ifp = received->m_pkthdr.rcvif;
2064 		(*ifp->if_input)(ifp, received);
2065 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2066 		received = next;
2067 	}
2068 }
2069 
2070 static void
2071 cpsw_intr_misc_host_error(struct cpsw_softc *sc)
2072 {
2073 	uint32_t intstat;
2074 	uint32_t dmastat;
2075 	int txerr, rxerr, txchan, rxchan;
2076 
2077 	printf("\n\n");
2078 	device_printf(sc->dev,
2079 	    "HOST ERROR:  PROGRAMMING ERROR DETECTED BY HARDWARE\n");
2080 	printf("\n\n");
2081 	intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
2082 	device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat);
2083 	dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
2084 	device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat);
2085 
2086 	txerr = (dmastat >> 20) & 15;
2087 	txchan = (dmastat >> 16) & 7;
2088 	rxerr = (dmastat >> 12) & 15;
2089 	rxchan = (dmastat >> 8) & 7;
2090 
2091 	switch (txerr) {
2092 	case 0: break;
2093 	case 1:	printf("SOP error on TX channel %d\n", txchan);
2094 		break;
2095 	case 2:	printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan);
2096 		break;
2097 	case 3:	printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan);
2098 		break;
2099 	case 4:	printf("Zero Buffer Pointer on TX channel %d\n", txchan);
2100 		break;
2101 	case 5:	printf("Zero Buffer Length on TX channel %d\n", txchan);
2102 		break;
2103 	case 6:	printf("Packet length error on TX channel %d\n", txchan);
2104 		break;
2105 	default: printf("Unknown error on TX channel %d\n", txchan);
2106 		break;
2107 	}
2108 
2109 	if (txerr != 0) {
2110 		printf("CPSW_CPDMA_TX%d_HDP=0x%x\n",
2111 		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan)));
2112 		printf("CPSW_CPDMA_TX%d_CP=0x%x\n",
2113 		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan)));
2114 		cpsw_dump_queue(sc, &sc->tx.active);
2115 	}
2116 
2117 	switch (rxerr) {
2118 	case 0: break;
2119 	case 2:	printf("Ownership bit not set on RX channel %d\n", rxchan);
2120 		break;
2121 	case 4:	printf("Zero Buffer Pointer on RX channel %d\n", rxchan);
2122 		break;
2123 	case 5:	printf("Zero Buffer Length on RX channel %d\n", rxchan);
2124 		break;
2125 	case 6:	printf("Buffer offset too big on RX channel %d\n", rxchan);
2126 		break;
2127 	default: printf("Unknown RX error on RX channel %d\n", rxchan);
2128 		break;
2129 	}
2130 
2131 	if (rxerr != 0) {
2132 		printf("CPSW_CPDMA_RX%d_HDP=0x%x\n",
2133 		    rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan)));
2134 		printf("CPSW_CPDMA_RX%d_CP=0x%x\n",
2135 		    rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan)));
2136 		cpsw_dump_queue(sc, &sc->rx.active);
2137 	}
2138 
2139 	printf("\nALE Table\n");
2140 	cpsw_ale_dump_table(sc);
2141 
2142 	// XXX do something useful here??
2143 	panic("CPSW HOST ERROR INTERRUPT");
2144 
2145 	// Suppress this interrupt in the future.
2146 	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat);
2147 	printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n");
2148 	// The watchdog will probably reset the controller
2149 	// in a little while.  It will probably fail again.
2150 }
2151 
2152 static void
2153 cpsw_intr_misc(void *arg)
2154 {
2155 	struct cpsw_softc *sc = arg;
2156 	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
2157 
2158 	if (stat & CPSW_WR_C_MISC_EVNT_PEND)
2159 		CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented"));
2160 	if (stat & CPSW_WR_C_MISC_STAT_PEND)
2161 		cpsw_stats_collect(sc);
2162 	if (stat & CPSW_WR_C_MISC_HOST_PEND)
2163 		cpsw_intr_misc_host_error(sc);
2164 	if (stat & CPSW_WR_C_MISC_MDIOLINK) {
2165 		cpsw_write_4(sc, MDIOLINKINTMASKED,
2166 		    cpsw_read_4(sc, MDIOLINKINTMASKED));
2167 	}
2168 	if (stat & CPSW_WR_C_MISC_MDIOUSER) {
2169 		CPSW_DEBUGF(sc,
2170 		    ("MDIO operation completed interrupt unimplemented"));
2171 	}
2172 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
2173 }
2174 
2175 /*
2176  *
2177  * Periodic Checks and Watchdog.
2178  *
2179  */
2180 
2181 static void
2182 cpswp_tick(void *msc)
2183 {
2184 	struct cpswp_softc *sc = msc;
2185 
2186 	/* Check for media type change */
2187 	mii_tick(sc->mii);
2188 	if (sc->media_status != sc->mii->mii_media.ifm_media) {
2189 		printf("%s: media type changed (ifm_media=%x)\n", __func__,
2190 			sc->mii->mii_media.ifm_media);
2191 		cpswp_ifmedia_upd(sc->ifp);
2192 	}
2193 
2194 	/* Schedule another timeout one second from now */
2195 	callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
2196 }
2197 
2198 static void
2199 cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2200 {
2201 	struct cpswp_softc *sc;
2202 	struct mii_data *mii;
2203 
2204 	sc = ifp->if_softc;
2205 	CPSW_DEBUGF(sc->swsc, (""));
2206 	CPSW_PORT_LOCK(sc);
2207 
2208 	mii = sc->mii;
2209 	mii_pollstat(mii);
2210 
2211 	ifmr->ifm_active = mii->mii_media_active;
2212 	ifmr->ifm_status = mii->mii_media_status;
2213 	CPSW_PORT_UNLOCK(sc);
2214 }
2215 
2216 static int
2217 cpswp_ifmedia_upd(struct ifnet *ifp)
2218 {
2219 	struct cpswp_softc *sc;
2220 
2221 	sc = ifp->if_softc;
2222 	CPSW_DEBUGF(sc->swsc, (""));
2223 	CPSW_PORT_LOCK(sc);
2224 	mii_mediachg(sc->mii);
2225 	sc->media_status = sc->mii->mii_media.ifm_media;
2226 	CPSW_PORT_UNLOCK(sc);
2227 
2228 	return (0);
2229 }
2230 
2231 static void
2232 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc)
2233 {
2234 	struct cpswp_softc *psc;
2235 	int i;
2236 
2237 	cpsw_debugf_head("CPSW watchdog");
2238 	device_printf(sc->dev, "watchdog timeout\n");
2239 	printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0,
2240 	    cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)));
2241 	printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0,
2242 	    cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)));
2243 	cpsw_dump_queue(sc, &sc->tx.active);
2244 	for (i = 0; i < CPSW_PORTS; i++) {
2245 		if (!sc->dualemac && i != sc->active_slave)
2246 			continue;
2247 		psc = device_get_softc(sc->port[i].dev);
2248 		CPSW_PORT_LOCK(psc);
2249 		cpswp_stop_locked(psc);
2250 		CPSW_PORT_UNLOCK(psc);
2251 	}
2252 }
2253 
2254 static void
2255 cpsw_tx_watchdog(void *msc)
2256 {
2257 	struct cpsw_softc *sc;
2258 
2259 	sc = msc;
2260 	CPSW_TX_LOCK(sc);
2261 	if (sc->tx.active_queue_len == 0 || !sc->tx.running) {
2262 		sc->watchdog.timer = 0; /* Nothing to do. */
2263 	} else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) {
2264 		sc->watchdog.timer = 0;  /* Stuff done while we weren't looking. */
2265 	} else if (cpsw_tx_dequeue(sc) > 0) {
2266 		sc->watchdog.timer = 0;  /* We just did something. */
2267 	} else {
2268 		/* There was something to do but it didn't get done. */
2269 		++sc->watchdog.timer;
2270 		if (sc->watchdog.timer > 5) {
2271 			sc->watchdog.timer = 0;
2272 			++sc->watchdog.resets;
2273 			cpsw_tx_watchdog_full_reset(sc);
2274 		}
2275 	}
2276 	sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes;
2277 	CPSW_TX_UNLOCK(sc);
2278 
2279 	/* Schedule another timeout one second from now */
2280 	callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
2281 }
2282 
2283 /*
2284  *
2285  * ALE support routines.
2286  *
2287  */
2288 
2289 static void
2290 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
2291 {
2292 	cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
2293 	ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
2294 	ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
2295 	ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
2296 }
2297 
2298 static void
2299 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
2300 {
2301 	cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
2302 	cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
2303 	cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
2304 	cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
2305 }
2306 
2307 static void
2308 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
2309 {
2310 	int i;
2311 	uint32_t ale_entry[3];
2312 
2313 	/* First four entries are link address and broadcast. */
2314 	for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
2315 		cpsw_ale_read_entry(sc, i, ale_entry);
2316 		if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR ||
2317 		    ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) &&
2318 		    ALE_MCAST(ale_entry)  == 1) { /* MCast link addr */
2319 			ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
2320 			cpsw_ale_write_entry(sc, i, ale_entry);
2321 		}
2322 	}
2323 }
2324 
2325 static int
2326 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan,
2327 	uint8_t *mac)
2328 {
2329 	int free_index = -1, matching_index = -1, i;
2330 	uint32_t ale_entry[3], ale_type;
2331 
2332 	/* Find a matching entry or a free entry. */
2333 	for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
2334 		cpsw_ale_read_entry(sc, i, ale_entry);
2335 
2336 		/* Entry Type[61:60] is 0 for free entry */
2337 		if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
2338 			free_index = i;
2339 
2340 		if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
2341 		    (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
2342 		    (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
2343 		    (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
2344 		    (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
2345 		    (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
2346 			matching_index = i;
2347 			break;
2348 		}
2349 	}
2350 
2351 	if (matching_index < 0) {
2352 		if (free_index < 0)
2353 			return (ENOMEM);
2354 		i = free_index;
2355 	}
2356 
2357 	if (vlan != -1)
2358 		ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16;
2359 	else
2360 		ale_type = ALE_TYPE_ADDR << 28;
2361 
2362 	/* Set MAC address */
2363 	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
2364 	ale_entry[1] = mac[0] << 8 | mac[1];
2365 
2366 	/* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */
2367 	ale_entry[1] |= ALE_MCAST_FWD | ale_type;
2368 
2369 	/* Set portmask [68:66] */
2370 	ale_entry[2] = (portmap & 7) << 2;
2371 
2372 	cpsw_ale_write_entry(sc, i, ale_entry);
2373 
2374 	return 0;
2375 }
2376 
2377 static void
2378 cpsw_ale_dump_table(struct cpsw_softc *sc) {
2379 	int i;
2380 	uint32_t ale_entry[3];
2381 	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
2382 		cpsw_ale_read_entry(sc, i, ale_entry);
2383 		switch (ALE_TYPE(ale_entry)) {
2384 		case ALE_TYPE_VLAN:
2385 			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
2386 				ale_entry[1], ale_entry[0]);
2387 			printf("type: %u ", ALE_TYPE(ale_entry));
2388 			printf("vlan: %u ", ALE_VLAN(ale_entry));
2389 			printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry));
2390 			printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry));
2391 			printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry));
2392 			printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry));
2393 			printf("\n");
2394 			break;
2395 		case ALE_TYPE_ADDR:
2396 		case ALE_TYPE_VLAN_ADDR:
2397 			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
2398 				ale_entry[1], ale_entry[0]);
2399 			printf("type: %u ", ALE_TYPE(ale_entry));
2400 			printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
2401 				(ale_entry[1] >> 8) & 0xFF,
2402 				(ale_entry[1] >> 0) & 0xFF,
2403 				(ale_entry[0] >>24) & 0xFF,
2404 				(ale_entry[0] >>16) & 0xFF,
2405 				(ale_entry[0] >> 8) & 0xFF,
2406 				(ale_entry[0] >> 0) & 0xFF);
2407 			printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast ");
2408 			if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR)
2409 				printf("vlan: %u ", ALE_VLAN(ale_entry));
2410 			printf("port: %u ", ALE_PORTS(ale_entry));
2411 			printf("\n");
2412 			break;
2413 		}
2414 	}
2415 	printf("\n");
2416 }
2417 
2418 static int
2419 cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge)
2420 {
2421 	uint8_t *mac;
2422 	uint32_t ale_entry[3], ale_type, portmask;
2423 	struct ifmultiaddr *ifma;
2424 
2425 	if (sc->swsc->dualemac) {
2426 		ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16;
2427 		portmask = 1 << (sc->unit + 1) | 1 << 0;
2428 	} else {
2429 		ale_type = ALE_TYPE_ADDR << 28;
2430 		portmask = 7;
2431 	}
2432 
2433 	/*
2434 	 * Route incoming packets for our MAC address to Port 0 (host).
2435 	 * For simplicity, keep this entry at table index 0 for port 1 and
2436 	 * at index 2 for port 2 in the ALE.
2437 	 */
2438         if_addr_rlock(sc->ifp);
2439 	mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr);
2440 	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
2441 	ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */
2442 	ale_entry[2] = 0; /* port = 0 */
2443 	cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry);
2444 
2445 	/* Set outgoing MAC Address for slave port. */
2446 	cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1),
2447 	    mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
2448 	cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1),
2449 	    mac[5] << 8 | mac[4]);
2450         if_addr_runlock(sc->ifp);
2451 
2452 	/* Keep the broadcast address at table entry 1 (or 3). */
2453 	ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */
2454 	/* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */
2455 	ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff;
2456 	ale_entry[2] = portmask << 2;
2457 	cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry);
2458 
2459 	/* SIOCDELMULTI doesn't specify the particular address
2460 	   being removed, so we have to remove all and rebuild. */
2461 	if (purge)
2462 		cpsw_ale_remove_all_mc_entries(sc->swsc);
2463 
2464         /* Set other multicast addrs desired. */
2465         if_maddr_rlock(sc->ifp);
2466         TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
2467                 if (ifma->ifma_addr->sa_family != AF_LINK)
2468                         continue;
2469 		cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan,
2470 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
2471         }
2472         if_maddr_runlock(sc->ifp);
2473 
2474 	return (0);
2475 }
2476 
2477 static int
2478 cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports,
2479 	int untag, int mcregflood, int mcunregflood)
2480 {
2481 	int free_index, i, matching_index;
2482 	uint32_t ale_entry[3];
2483 
2484 	free_index = matching_index = -1;
2485 	/* Find a matching entry or a free entry. */
2486 	for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) {
2487 		cpsw_ale_read_entry(sc, i, ale_entry);
2488 
2489 		/* Entry Type[61:60] is 0 for free entry */
2490 		if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
2491 			free_index = i;
2492 
2493 		if (ALE_VLAN(ale_entry) == vlan) {
2494 			matching_index = i;
2495 			break;
2496 		}
2497 	}
2498 
2499 	if (matching_index < 0) {
2500 		if (free_index < 0)
2501 			return (-1);
2502 		i = free_index;
2503 	}
2504 
2505 	ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 |
2506 	    (mcunregflood & 7) << 8 | (ports & 7);
2507 	ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16;
2508 	ale_entry[2] = 0;
2509 	cpsw_ale_write_entry(sc, i, ale_entry);
2510 
2511 	return (0);
2512 }
2513 
2514 /*
2515  *
2516  * Statistics and Sysctls.
2517  *
2518  */
2519 
2520 #if 0
2521 static void
2522 cpsw_stats_dump(struct cpsw_softc *sc)
2523 {
2524 	int i;
2525 	uint32_t r;
2526 
2527 	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2528 		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2529 		    cpsw_stat_sysctls[i].reg);
2530 		CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid,
2531 		    (intmax_t)sc->shadow_stats[i], r,
2532 		    (intmax_t)sc->shadow_stats[i] + r));
2533 	}
2534 }
2535 #endif
2536 
2537 static void
2538 cpsw_stats_collect(struct cpsw_softc *sc)
2539 {
2540 	int i;
2541 	uint32_t r;
2542 
2543 	CPSW_DEBUGF(sc, ("Controller shadow statistics updated."));
2544 
2545 	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2546 		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2547 		    cpsw_stat_sysctls[i].reg);
2548 		sc->shadow_stats[i] += r;
2549 		cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg,
2550 		    r);
2551 	}
2552 }
2553 
2554 static int
2555 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS)
2556 {
2557 	struct cpsw_softc *sc;
2558 	struct cpsw_stat *stat;
2559 	uint64_t result;
2560 
2561 	sc = (struct cpsw_softc *)arg1;
2562 	stat = &cpsw_stat_sysctls[oidp->oid_number];
2563 	result = sc->shadow_stats[oidp->oid_number];
2564 	result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg);
2565 	return (sysctl_handle_64(oidp, &result, 0, req));
2566 }
2567 
2568 static int
2569 cpsw_stat_attached(SYSCTL_HANDLER_ARGS)
2570 {
2571 	struct cpsw_softc *sc;
2572 	struct bintime t;
2573 	unsigned result;
2574 
2575 	sc = (struct cpsw_softc *)arg1;
2576 	getbinuptime(&t);
2577 	bintime_sub(&t, &sc->attach_uptime);
2578 	result = t.sec;
2579 	return (sysctl_handle_int(oidp, &result, 0, req));
2580 }
2581 
2582 static int
2583 cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS)
2584 {
2585 	int error;
2586 	struct cpsw_softc *sc;
2587 	uint32_t ctrl, intr_per_ms;
2588 
2589 	sc = (struct cpsw_softc *)arg1;
2590 	error = sysctl_handle_int(oidp, &sc->coal_us, 0, req);
2591 	if (error != 0 || req->newptr == NULL)
2592 		return (error);
2593 
2594 	ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
2595 	ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
2596 	if (sc->coal_us == 0) {
2597 		/* Disable the interrupt pace hardware. */
2598 		cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
2599 		cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0);
2600 		cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0);
2601 		return (0);
2602 	}
2603 
2604 	if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX)
2605 		sc->coal_us = CPSW_WR_C_IMAX_US_MAX;
2606 	if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN)
2607 		sc->coal_us = CPSW_WR_C_IMAX_US_MIN;
2608 	intr_per_ms = 1000 / sc->coal_us;
2609 	/* Just to make sure... */
2610 	if (intr_per_ms > CPSW_WR_C_IMAX_MAX)
2611 		intr_per_ms = CPSW_WR_C_IMAX_MAX;
2612 	if (intr_per_ms < CPSW_WR_C_IMAX_MIN)
2613 		intr_per_ms = CPSW_WR_C_IMAX_MIN;
2614 
2615 	/* Set the prescale to produce 4us pulses from the 125 Mhz clock. */
2616 	ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK;
2617 
2618 	/* Enable the interrupt pace hardware. */
2619 	cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms);
2620 	cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms);
2621 	ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE;
2622 	cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
2623 
2624 	return (0);
2625 }
2626 
2627 static int
2628 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS)
2629 {
2630 	struct cpsw_softc *swsc;
2631 	struct cpswp_softc *sc;
2632 	struct bintime t;
2633 	unsigned result;
2634 
2635 	swsc = arg1;
2636 	sc = device_get_softc(swsc->port[arg2].dev);
2637 	if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
2638 		getbinuptime(&t);
2639 		bintime_sub(&t, &sc->init_uptime);
2640 		result = t.sec;
2641 	} else
2642 		result = 0;
2643 	return (sysctl_handle_int(oidp, &result, 0, req));
2644 }
2645 
2646 static void
2647 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
2648 	struct cpsw_queue *queue)
2649 {
2650 	struct sysctl_oid_list *parent;
2651 
2652 	parent = SYSCTL_CHILDREN(node);
2653 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers",
2654 	    CTLFLAG_RD, &queue->queue_slots, 0,
2655 	    "Total buffers currently assigned to this queue");
2656 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers",
2657 	    CTLFLAG_RD, &queue->active_queue_len, 0,
2658 	    "Buffers currently registered with hardware controller");
2659 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers",
2660 	    CTLFLAG_RD, &queue->max_active_queue_len, 0,
2661 	    "Max value of activeBuffers since last driver reset");
2662 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers",
2663 	    CTLFLAG_RD, &queue->avail_queue_len, 0,
2664 	    "Buffers allocated to this queue but not currently "
2665 	    "registered with hardware controller");
2666 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers",
2667 	    CTLFLAG_RD, &queue->max_avail_queue_len, 0,
2668 	    "Max value of availBuffers since last driver reset");
2669 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued",
2670 	    CTLFLAG_RD, &queue->queue_adds, 0,
2671 	    "Total buffers added to queue");
2672 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued",
2673 	    CTLFLAG_RD, &queue->queue_removes, 0,
2674 	    "Total buffers removed from queue");
2675 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart",
2676 	    CTLFLAG_RD, &queue->queue_restart, 0,
2677 	    "Total times the queue has been restarted");
2678 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain",
2679 	    CTLFLAG_RD, &queue->longest_chain, 0,
2680 	    "Max buffers used for a single packet");
2681 }
2682 
2683 static void
2684 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
2685 	struct cpsw_softc *sc)
2686 {
2687 	struct sysctl_oid_list *parent;
2688 
2689 	parent = SYSCTL_CHILDREN(node);
2690 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets",
2691 	    CTLFLAG_RD, &sc->watchdog.resets, 0,
2692 	    "Total number of watchdog resets");
2693 }
2694 
2695 static void
2696 cpsw_add_sysctls(struct cpsw_softc *sc)
2697 {
2698 	struct sysctl_ctx_list *ctx;
2699 	struct sysctl_oid *stats_node, *queue_node, *node;
2700 	struct sysctl_oid_list *parent, *stats_parent, *queue_parent;
2701 	struct sysctl_oid_list *ports_parent, *port_parent;
2702 	char port[16];
2703 	int i;
2704 
2705 	ctx = device_get_sysctl_ctx(sc->dev);
2706 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2707 
2708 	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug",
2709 	    CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages");
2710 
2711 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
2712 	    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU",
2713 	    "Time since driver attach");
2714 
2715 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us",
2716 	    CTLTYPE_UINT | CTLFLAG_RW, sc, 0, cpsw_intr_coalesce, "IU",
2717 	    "minimum time between interrupts");
2718 
2719 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports",
2720 	    CTLFLAG_RD, NULL, "CPSW Ports Statistics");
2721 	ports_parent = SYSCTL_CHILDREN(node);
2722 	for (i = 0; i < CPSW_PORTS; i++) {
2723 		if (!sc->dualemac && i != sc->active_slave)
2724 			continue;
2725 		port[0] = '0' + i;
2726 		port[1] = '\0';
2727 		node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO,
2728 		    port, CTLFLAG_RD, NULL, "CPSW Port Statistics");
2729 		port_parent = SYSCTL_CHILDREN(node);
2730 		SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime",
2731 		    CTLTYPE_UINT | CTLFLAG_RD, sc, i,
2732 		    cpsw_stat_uptime, "IU", "Seconds since driver init");
2733 	}
2734 
2735 	stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
2736 				     CTLFLAG_RD, NULL, "CPSW Statistics");
2737 	stats_parent = SYSCTL_CHILDREN(stats_node);
2738 	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2739 		SYSCTL_ADD_PROC(ctx, stats_parent, i,
2740 				cpsw_stat_sysctls[i].oid,
2741 				CTLTYPE_U64 | CTLFLAG_RD, sc, 0,
2742 				cpsw_stats_sysctl, "IU",
2743 				cpsw_stat_sysctls[i].oid);
2744 	}
2745 
2746 	queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue",
2747 	    CTLFLAG_RD, NULL, "CPSW Queue Statistics");
2748 	queue_parent = SYSCTL_CHILDREN(queue_node);
2749 
2750 	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx",
2751 	    CTLFLAG_RD, NULL, "TX Queue Statistics");
2752 	cpsw_add_queue_sysctls(ctx, node, &sc->tx);
2753 
2754 	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx",
2755 	    CTLFLAG_RD, NULL, "RX Queue Statistics");
2756 	cpsw_add_queue_sysctls(ctx, node, &sc->rx);
2757 
2758 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog",
2759 	    CTLFLAG_RD, NULL, "Watchdog Statistics");
2760 	cpsw_add_watchdog_sysctls(ctx, node, sc);
2761 }
2762 
2763 #ifdef CPSW_ETHERSWITCH
2764 static etherswitch_info_t etherswitch_info = {
2765 	.es_nports =		CPSW_PORTS + 1,
2766 	.es_nvlangroups =	CPSW_VLANS,
2767 	.es_name =		"TI Common Platform Ethernet Switch (CPSW)",
2768 	.es_vlan_caps =		ETHERSWITCH_VLAN_DOT1Q,
2769 };
2770 
2771 static etherswitch_info_t *
2772 cpsw_getinfo(device_t dev)
2773 {
2774 	return (&etherswitch_info);
2775 }
2776 
2777 static int
2778 cpsw_getport(device_t dev, etherswitch_port_t *p)
2779 {
2780 	int err;
2781 	struct cpsw_softc *sc;
2782 	struct cpswp_softc *psc;
2783 	struct ifmediareq *ifmr;
2784 	uint32_t reg;
2785 
2786 	if (p->es_port < 0 || p->es_port > CPSW_PORTS)
2787 		return (ENXIO);
2788 
2789 	err = 0;
2790 	sc = device_get_softc(dev);
2791 	if (p->es_port == CPSW_CPU_PORT) {
2792 		p->es_flags |= ETHERSWITCH_PORT_CPU;
2793  		ifmr = &p->es_ifmr;
2794 		ifmr->ifm_current = ifmr->ifm_active =
2795 		    IFM_ETHER | IFM_1000_T | IFM_FDX;
2796 		ifmr->ifm_mask = 0;
2797 		ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
2798 		ifmr->ifm_count = 0;
2799 	} else {
2800 		psc = device_get_softc(sc->port[p->es_port - 1].dev);
2801 		err = ifmedia_ioctl(psc->ifp, &p->es_ifr,
2802 		    &psc->mii->mii_media, SIOCGIFMEDIA);
2803 	}
2804 	reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port));
2805 	p->es_pvid = reg & ETHERSWITCH_VID_MASK;
2806 
2807 	reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port));
2808 	if (reg & ALE_PORTCTL_DROP_UNTAGGED)
2809 		p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED;
2810 	if (reg & ALE_PORTCTL_INGRESS)
2811 		p->es_flags |= ETHERSWITCH_PORT_INGRESS;
2812 
2813 	return (err);
2814 }
2815 
2816 static int
2817 cpsw_setport(device_t dev, etherswitch_port_t *p)
2818 {
2819 	struct cpsw_softc *sc;
2820 	struct cpswp_softc *psc;
2821 	struct ifmedia *ifm;
2822 	uint32_t reg;
2823 
2824 	if (p->es_port < 0 || p->es_port > CPSW_PORTS)
2825 		return (ENXIO);
2826 
2827 	sc = device_get_softc(dev);
2828 	if (p->es_pvid != 0) {
2829 		cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port),
2830 		    p->es_pvid & ETHERSWITCH_VID_MASK);
2831 	}
2832 
2833 	reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port));
2834 	if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED)
2835 		reg |= ALE_PORTCTL_DROP_UNTAGGED;
2836 	else
2837 		reg &= ~ALE_PORTCTL_DROP_UNTAGGED;
2838 	if (p->es_flags & ETHERSWITCH_PORT_INGRESS)
2839 		reg |= ALE_PORTCTL_INGRESS;
2840 	else
2841 		reg &= ~ALE_PORTCTL_INGRESS;
2842 	cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg);
2843 
2844 	/* CPU port does not allow media settings. */
2845 	if (p->es_port == CPSW_CPU_PORT)
2846 		return (0);
2847 
2848 	psc = device_get_softc(sc->port[p->es_port - 1].dev);
2849 	ifm = &psc->mii->mii_media;
2850 
2851 	return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA));
2852 }
2853 
2854 static int
2855 cpsw_getconf(device_t dev, etherswitch_conf_t *conf)
2856 {
2857 
2858 	/* Return the VLAN mode. */
2859 	conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
2860 	conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
2861 
2862 	return (0);
2863 }
2864 
2865 static int
2866 cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
2867 {
2868 	int i, vid;
2869 	uint32_t ale_entry[3];
2870 	struct cpsw_softc *sc;
2871 
2872 	sc = device_get_softc(dev);
2873 
2874 	if (vg->es_vlangroup >= CPSW_VLANS)
2875 		return (EINVAL);
2876 
2877 	vg->es_vid = 0;
2878 	vid = cpsw_vgroups[vg->es_vlangroup].vid;
2879 	if (vid == -1)
2880 		return (0);
2881 
2882 	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
2883 		cpsw_ale_read_entry(sc, i, ale_entry);
2884 		if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN)
2885 			continue;
2886 		if (vid != ALE_VLAN(ale_entry))
2887 			continue;
2888 
2889 		vg->es_fid = 0;
2890 		vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID;
2891 		vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry);
2892 		vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry);
2893 	}
2894 
2895 	return (0);
2896 }
2897 
2898 static void
2899 cpsw_remove_vlan(struct cpsw_softc *sc, int vlan)
2900 {
2901 	int i;
2902 	uint32_t ale_entry[3];
2903 
2904 	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
2905 		cpsw_ale_read_entry(sc, i, ale_entry);
2906 		if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN)
2907 			continue;
2908 		if (vlan != ALE_VLAN(ale_entry))
2909 			continue;
2910 		ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
2911 		cpsw_ale_write_entry(sc, i, ale_entry);
2912 		break;
2913 	}
2914 }
2915 
2916 static int
2917 cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
2918 {
2919 	int i;
2920 	struct cpsw_softc *sc;
2921 
2922 	sc = device_get_softc(dev);
2923 
2924 	for (i = 0; i < CPSW_VLANS; i++) {
2925 		/* Is this Vlan ID in use by another vlangroup ? */
2926 		if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid)
2927 			return (EINVAL);
2928 	}
2929 
2930 	if (vg->es_vid == 0) {
2931 		if (cpsw_vgroups[vg->es_vlangroup].vid == -1)
2932 			return (0);
2933 		cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid);
2934 		cpsw_vgroups[vg->es_vlangroup].vid = -1;
2935 		vg->es_untagged_ports = 0;
2936 		vg->es_member_ports = 0;
2937 		vg->es_vid = 0;
2938 		return (0);
2939 	}
2940 
2941 	vg->es_vid &= ETHERSWITCH_VID_MASK;
2942 	vg->es_member_ports &= CPSW_PORTS_MASK;
2943 	vg->es_untagged_ports &= CPSW_PORTS_MASK;
2944 
2945 	if (cpsw_vgroups[vg->es_vlangroup].vid != -1 &&
2946 	    cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid)
2947 		return (EINVAL);
2948 
2949 	cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid;
2950 	cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports,
2951 	    vg->es_untagged_ports, vg->es_member_ports, 0);
2952 
2953 	return (0);
2954 }
2955 
2956 static int
2957 cpsw_readreg(device_t dev, int addr)
2958 {
2959 
2960 	/* Not supported. */
2961 	return (0);
2962 }
2963 
2964 static int
2965 cpsw_writereg(device_t dev, int addr, int value)
2966 {
2967 
2968 	/* Not supported. */
2969 	return (0);
2970 }
2971 
2972 static int
2973 cpsw_readphy(device_t dev, int phy, int reg)
2974 {
2975 
2976 	/* Not supported. */
2977 	return (0);
2978 }
2979 
2980 static int
2981 cpsw_writephy(device_t dev, int phy, int reg, int data)
2982 {
2983 
2984 	/* Not supported. */
2985 	return (0);
2986 }
2987 #endif
2988