xref: /freebsd/sys/arm/ti/cpsw/if_cpsw.c (revision 955c8cbb4960e6cf3602de144b1b9154a5092968)
1 /*-
2  * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * TI 3 Port Switch Ethernet (CPSW) Driver
29  * Found in TI8148, AM335x SoCs
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/endian.h>
38 #include <sys/mbuf.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/kernel.h>
42 #include <sys/module.h>
43 #include <sys/socket.h>
44 #include <sys/sysctl.h>
45 
46 #include <net/ethernet.h>
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/if_types.h>
53 #include <net/if_vlan_var.h>
54 
55 #include <netinet/in_systm.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 
59 #include <sys/sockio.h>
60 #include <sys/bus.h>
61 #include <machine/bus.h>
62 #include <sys/rman.h>
63 #include <machine/resource.h>
64 
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
67 
68 #include <dev/fdt/fdt_common.h>
69 #include <dev/ofw/ofw_bus.h>
70 #include <dev/ofw/ofw_bus_subr.h>
71 
72 #include "if_cpswreg.h"
73 #include "if_cpswvar.h"
74 
75 #include <arm/ti/ti_scm.h>
76 
77 #include "miibus_if.h"
78 
79 static int cpsw_probe(device_t dev);
80 static int cpsw_attach(device_t dev);
81 static int cpsw_detach(device_t dev);
82 static int cpsw_shutdown(device_t dev);
83 static int cpsw_suspend(device_t dev);
84 static int cpsw_resume(device_t dev);
85 
86 static int cpsw_miibus_readreg(device_t dev, int phy, int reg);
87 static int cpsw_miibus_writereg(device_t dev, int phy, int reg, int value);
88 
89 static int cpsw_ifmedia_upd(struct ifnet *ifp);
90 static void cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
91 
92 static void cpsw_init(void *arg);
93 static void cpsw_init_locked(void *arg);
94 static void cpsw_start(struct ifnet *ifp);
95 static void cpsw_start_locked(struct ifnet *ifp);
96 static void cpsw_stop_locked(struct cpsw_softc *sc);
97 static int cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
98 static int cpsw_init_slot_lists(struct cpsw_softc *sc);
99 static void cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot);
100 static void cpsw_fill_rx_queue_locked(struct cpsw_softc *sc);
101 static void cpsw_tx_watchdog(struct cpsw_softc *sc);
102 
103 static void cpsw_intr_rx_thresh(void *arg);
104 static void cpsw_intr_rx(void *arg);
105 static void cpsw_intr_rx_locked(void *arg);
106 static void cpsw_intr_tx(void *arg);
107 static void cpsw_intr_tx_locked(void *arg);
108 static void cpsw_intr_misc(void *arg);
109 
110 static void cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry);
111 static void cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry);
112 static int cpsw_ale_uc_entry_set(struct cpsw_softc *sc, uint8_t port, uint8_t *mac);
113 static int cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac);
114 #ifdef CPSW_DEBUG
115 static void cpsw_ale_dump_table(struct cpsw_softc *sc);
116 #endif
117 
118 static device_method_t cpsw_methods[] = {
119 	/* Device interface */
120 	DEVMETHOD(device_probe,		cpsw_probe),
121 	DEVMETHOD(device_attach,	cpsw_attach),
122 	DEVMETHOD(device_detach,	cpsw_detach),
123 	DEVMETHOD(device_shutdown,	cpsw_shutdown),
124 	DEVMETHOD(device_suspend,	cpsw_suspend),
125 	DEVMETHOD(device_resume,	cpsw_resume),
126 	/* MII interface */
127 	DEVMETHOD(miibus_readreg,	cpsw_miibus_readreg),
128 	DEVMETHOD(miibus_writereg,	cpsw_miibus_writereg),
129 	{ 0, 0 }
130 };
131 
132 static driver_t cpsw_driver = {
133 	"cpsw",
134 	cpsw_methods,
135 	sizeof(struct cpsw_softc),
136 };
137 
138 static devclass_t cpsw_devclass;
139 
140 
141 DRIVER_MODULE(cpsw, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
142 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
143 MODULE_DEPEND(cpsw, ether, 1, 1, 1);
144 MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
145 
146 static struct resource_spec res_spec[] = {
147 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
148 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
149 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
150 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
151 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
152 	{ -1, 0 }
153 };
154 
155 static struct {
156 	driver_intr_t *handler;
157 	char * description;
158 } cpsw_intrs[CPSW_INTR_COUNT + 1] = {
159 	{ cpsw_intr_rx_thresh, "CPSW RX threshold interrupt" },
160 	{ cpsw_intr_rx,	"CPSW RX interrupt" },
161 	{ cpsw_intr_tx,	"CPSW TX interrupt" },
162 	{ cpsw_intr_misc, "CPSW misc interrupt" },
163 };
164 
165 /* Locking macros */
166 #define CPSW_TX_LOCK(sc) do {					\
167 		mtx_assert(&(sc)->rx_lock, MA_NOTOWNED);		\
168 		mtx_lock(&(sc)->tx_lock);				\
169 } while (0)
170 
171 #define CPSW_TX_UNLOCK(sc)	mtx_unlock(&(sc)->tx_lock)
172 #define CPSW_TX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->tx_lock, MA_OWNED)
173 
174 #define CPSW_RX_LOCK(sc) do {					\
175 		mtx_assert(&(sc)->tx_lock, MA_NOTOWNED);		\
176 		mtx_lock(&(sc)->rx_lock);				\
177 } while (0)
178 
179 #define CPSW_RX_UNLOCK(sc)		mtx_unlock(&(sc)->rx_lock)
180 #define CPSW_RX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->rx_lock, MA_OWNED)
181 
182 #define CPSW_GLOBAL_LOCK(sc) do {					\
183 		if ((mtx_owned(&(sc)->tx_lock) ? 1 : 0) !=	\
184 		    (mtx_owned(&(sc)->rx_lock) ? 1 : 0)) {		\
185 			panic("cpsw deadlock possibility detection!");	\
186 		}							\
187 		mtx_lock(&(sc)->tx_lock);				\
188 		mtx_lock(&(sc)->rx_lock);				\
189 } while (0)
190 
191 #define CPSW_GLOBAL_UNLOCK(sc) do {					\
192 		CPSW_RX_UNLOCK(sc);				\
193 		CPSW_TX_UNLOCK(sc);				\
194 } while (0)
195 
196 #define CPSW_GLOBAL_LOCK_ASSERT(sc) do {				\
197 		CPSW_TX_LOCK_ASSERT(sc);				\
198 		CPSW_RX_LOCK_ASSERT(sc);				\
199 } while (0)
200 
201 
202 #include <machine/stdarg.h>
203 static void
204 cpsw_debugf_head(const char *funcname)
205 {
206 	int t = (int)(time_second % (24 * 60 * 60));
207 
208 	printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
209 }
210 
211 static void
212 cpsw_debugf(const char *fmt, ...)
213 {
214 	va_list ap;
215 
216 	va_start(ap, fmt);
217 	vprintf(fmt, ap);
218 	va_end(ap);
219 	printf("\n");
220 
221 }
222 
223 #define CPSW_DEBUGF(a) do {						\
224 		if (sc->cpsw_if_flags & IFF_DEBUG) {			\
225 			cpsw_debugf_head(__func__);			\
226 			cpsw_debugf a;					\
227 		}							\
228 } while (0)
229 
230 static int
231 cpsw_probe(device_t dev)
232 {
233 
234 	if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
235 		return (ENXIO);
236 
237 	device_set_desc(dev, "3-port Switch Ethernet Subsystem");
238 	return (BUS_PROBE_DEFAULT);
239 }
240 
241 static int
242 cpsw_attach(device_t dev)
243 {
244 	struct cpsw_softc *sc = device_get_softc(dev);
245 	struct mii_softc *miisc;
246 	struct ifnet *ifp;
247 	void *phy_sc;
248 	int i, error, phy;
249 	uint32_t reg;
250 
251 	CPSW_DEBUGF((""));
252 
253 	sc->dev = dev;
254 	sc->node = ofw_bus_get_node(dev);
255 
256 	/* Get phy address from fdt */
257 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, &phy_sc) != 0) {
258 		device_printf(dev, "failed to get PHY address from FDT\n");
259 		return (ENXIO);
260 	}
261 	/* Initialize mutexes */
262 	mtx_init(&sc->tx_lock, device_get_nameunit(dev),
263 		"cpsw TX lock", MTX_DEF);
264 	mtx_init(&sc->rx_lock, device_get_nameunit(dev),
265 		"cpsw RX lock", MTX_DEF);
266 
267 	/* Allocate IO and IRQ resources */
268 	error = bus_alloc_resources(dev, res_spec, sc->res);
269 	if (error) {
270 		device_printf(dev, "could not allocate resources\n");
271 		cpsw_detach(dev);
272 		return (ENXIO);
273 	}
274 
275 	reg = cpsw_read_4(CPSW_SS_IDVER);
276 	device_printf(dev, "Version %d.%d (%d)\n", (reg >> 8 & 0x7),
277 		reg & 0xFF, (reg >> 11) & 0x1F);
278 
279 	//cpsw_add_sysctls(sc); TODO
280 
281 	/* Allocate a busdma tag and DMA safe memory for mbufs. */
282 	error = bus_dma_tag_create(
283 		bus_get_dma_tag(sc->dev),	/* parent */
284 		1, 0,				/* alignment, boundary */
285 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
286 		BUS_SPACE_MAXADDR,		/* highaddr */
287 		NULL, NULL,			/* filtfunc, filtfuncarg */
288 		MCLBYTES, 1,			/* maxsize, nsegments */
289 		MCLBYTES, 0,			/* maxsegsz, flags */
290 		NULL, NULL,			/* lockfunc, lockfuncarg */
291 		&sc->mbuf_dtag);		/* dmatag */
292 	if (error) {
293 		device_printf(dev, "bus_dma_tag_create failed\n");
294 		cpsw_detach(dev);
295 		return (ENOMEM);
296 	}
297 
298 	/* Initialize the tx_avail and rx_avail lists. */
299 	error = cpsw_init_slot_lists(sc);
300 	if (error) {
301 		device_printf(dev, "failed to allocate dmamaps\n");
302 		cpsw_detach(dev);
303 		return (ENOMEM);
304 	}
305 
306 	/* Allocate network interface */
307 	ifp = sc->ifp = if_alloc(IFT_ETHER);
308 	if (ifp == NULL) {
309 		device_printf(dev, "if_alloc() failed\n");
310 		cpsw_detach(dev);
311 		return (ENOMEM);
312 	}
313 
314 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
315 	ifp->if_softc = sc;
316 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
317 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
318 	ifp->if_capenable = ifp->if_capabilities;
319 
320 	ifp->if_init = cpsw_init;
321 	ifp->if_start = cpsw_start;
322 	ifp->if_ioctl = cpsw_ioctl;
323 
324 	ifp->if_snd.ifq_drv_maxlen = CPSW_MAX_TX_BUFFERS - 1;
325 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
326 	IFQ_SET_READY(&ifp->if_snd);
327 
328 	/* Get high part of MAC address from control module (mac_id0_hi) */
329 	ti_scm_reg_read_4(0x634, &reg);
330 	sc->mac_addr[0] = reg & 0xFF;
331 	sc->mac_addr[1] = (reg >>  8) & 0xFF;
332 	sc->mac_addr[2] = (reg >> 16) & 0xFF;
333 	sc->mac_addr[3] = (reg >> 24) & 0xFF;
334 
335 	/* Get low part of MAC address from control module (mac_id0_lo) */
336 	ti_scm_reg_read_4(0x630, &reg);
337 	sc->mac_addr[4] = reg & 0xFF;
338 	sc->mac_addr[5] = (reg >>  8) & 0xFF;
339 
340 	ether_ifattach(ifp, sc->mac_addr);
341 	callout_init(&sc->wd_callout, 0);
342 
343 	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
344 	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
345 	cpsw_write_4(MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
346 
347 	/* Attach PHY(s) */
348 	error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd,
349 	    cpsw_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
350 	if (error) {
351 		device_printf(dev, "attaching PHYs failed\n");
352 		cpsw_detach(dev);
353 		return (error);
354 	}
355 	sc->mii = device_get_softc(sc->miibus);
356 
357 	/* Tell the MAC where to find the PHY so autoneg works */
358 	miisc = LIST_FIRST(&sc->mii->mii_phys);
359 
360 	/* Select PHY and enable interrupts */
361 	cpsw_write_4(MDIOUSERPHYSEL0, 1 << 6 | (miisc->mii_phy & 0x1F));
362 
363 	/* Attach interrupt handlers */
364 	for (i = 1; i <= CPSW_INTR_COUNT; ++i) {
365 		error = bus_setup_intr(dev, sc->res[i],
366 		    INTR_TYPE_NET | INTR_MPSAFE,
367 		    NULL, *cpsw_intrs[i - 1].handler,
368 		    sc, &sc->ih_cookie[i - 1]);
369 		if (error) {
370 			device_printf(dev, "could not setup %s\n",
371 			    cpsw_intrs[i].description);
372 			cpsw_detach(dev);
373 			return (error);
374 		}
375 	}
376 
377 	return (0);
378 }
379 
380 static int
381 cpsw_detach(device_t dev)
382 {
383 	struct cpsw_softc *sc = device_get_softc(dev);
384 	int error, i;
385 
386 	CPSW_DEBUGF((""));
387 
388 	/* Stop controller and free TX queue */
389 	if (device_is_attached(dev)) {
390 		ether_ifdetach(sc->ifp);
391 		CPSW_GLOBAL_LOCK(sc);
392 		cpsw_stop_locked(sc);
393 		CPSW_GLOBAL_UNLOCK(sc);
394 		callout_drain(&sc->wd_callout);
395 	}
396 
397 	bus_generic_detach(dev);
398 	device_delete_child(dev, sc->miibus);
399 
400 	/* Stop and release all interrupts */
401 	for (i = 0; i < CPSW_INTR_COUNT; ++i) {
402 		if (!sc->ih_cookie[i])
403 			continue;
404 
405 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
406 		if (error)
407 			device_printf(dev, "could not release %s\n",
408 			    cpsw_intrs[i + 1].description);
409 	}
410 
411 	/* Free dmamaps and mbufs */
412 	for (i = 0; i < CPSW_MAX_TX_BUFFERS; i++) {
413 		cpsw_free_slot(sc, &sc->_tx_slots[i]);
414 	}
415 	for (i = 0; i < CPSW_MAX_RX_BUFFERS; i++) {
416 		cpsw_free_slot(sc, &sc->_rx_slots[i]);
417 	}
418 
419 	/* Free DMA tag */
420 	error = bus_dma_tag_destroy(sc->mbuf_dtag);
421 	KASSERT(error == 0, ("Unable to destroy DMA tag"));
422 
423 	/* Free IO memory handler */
424 	bus_release_resources(dev, res_spec, sc->res);
425 
426 	/* Destroy mutexes */
427 	mtx_destroy(&sc->rx_lock);
428 	mtx_destroy(&sc->tx_lock);
429 
430 	return (0);
431 }
432 
433 static int
434 cpsw_suspend(device_t dev)
435 {
436 	struct cpsw_softc *sc = device_get_softc(dev);
437 
438 	CPSW_DEBUGF((""));
439 	CPSW_GLOBAL_LOCK(sc);
440 	cpsw_stop_locked(sc);
441 	CPSW_GLOBAL_UNLOCK(sc);
442 	return (0);
443 }
444 
445 static int
446 cpsw_resume(device_t dev)
447 {
448 	/* XXX TODO XXX */
449 	device_printf(dev, "%s\n", __FUNCTION__);
450 	return (0);
451 }
452 
453 static int
454 cpsw_shutdown(device_t dev)
455 {
456 	struct cpsw_softc *sc = device_get_softc(dev);
457 
458 	CPSW_DEBUGF((""));
459 	CPSW_GLOBAL_LOCK(sc);
460 	cpsw_stop_locked(sc);
461 	CPSW_GLOBAL_UNLOCK(sc);
462 	return (0);
463 }
464 
465 static int
466 cpsw_miibus_ready(struct cpsw_softc *sc)
467 {
468 	uint32_t r, retries = CPSW_MIIBUS_RETRIES;
469 
470 	while (--retries) {
471 		r = cpsw_read_4(MDIOUSERACCESS0);
472 		if ((r & 1 << 31) == 0)
473 			return 1;
474 		DELAY(CPSW_MIIBUS_DELAY);
475 	}
476 	return 0;
477 }
478 
479 static int
480 cpsw_miibus_readreg(device_t dev, int phy, int reg)
481 {
482 	struct cpsw_softc *sc = device_get_softc(dev);
483 	uint32_t cmd, r;
484 
485 	if (!cpsw_miibus_ready(sc)) {
486 		device_printf(dev, "MDIO not ready to read\n");
487 		return 0;
488 	}
489 
490 	/* Set GO, reg, phy */
491 	cmd = 1 << 31 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
492 	cpsw_write_4(MDIOUSERACCESS0, cmd);
493 
494 	if (!cpsw_miibus_ready(sc)) {
495 		device_printf(dev, "MDIO timed out during read\n");
496 		return 0;
497 	}
498 
499 	r = cpsw_read_4(MDIOUSERACCESS0);
500 	if((r & 1 << 29) == 0) {
501 		device_printf(dev, "Failed to read from PHY.\n");
502 		r = 0;
503 	}
504 	return (r & 0xFFFF);
505 }
506 
507 static int
508 cpsw_miibus_writereg(device_t dev, int phy, int reg, int value)
509 {
510 	struct cpsw_softc *sc = device_get_softc(dev);
511 	uint32_t cmd;
512 
513 	if (!cpsw_miibus_ready(sc)) {
514 		device_printf(dev, "MDIO not ready to write\n");
515 		return 0;
516 	}
517 
518 	/* Set GO, WRITE, reg, phy, and value */
519 	cmd = 3 << 30 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16
520 	    | (value & 0xFFFF);
521 	cpsw_write_4(MDIOUSERACCESS0, cmd);
522 
523 	if (!cpsw_miibus_ready(sc)) {
524 		device_printf(dev, "MDIO timed out during write\n");
525 		return 0;
526 	}
527 
528 	if((cpsw_read_4(MDIOUSERACCESS0) & (1 << 29)) == 0)
529 		device_printf(dev, "Failed to write to PHY.\n");
530 
531 	return 0;
532 }
533 
534 static int
535 cpsw_init_slot_lists(struct cpsw_softc *sc)
536 {
537 	int i;
538 
539 	STAILQ_INIT(&sc->rx_active);
540 	STAILQ_INIT(&sc->rx_avail);
541 	STAILQ_INIT(&sc->tx_active);
542 	STAILQ_INIT(&sc->tx_avail);
543 
544 	/* Put the slot descriptors onto the avail lists. */
545 	for (i = 0; i < CPSW_MAX_TX_BUFFERS; i++) {
546 		struct cpsw_slot *slot = &sc->_tx_slots[i];
547 		slot->index = i;
548 		/* XXX TODO: Remove this from here; allocate dmamaps lazily
549 		   in the encap routine to reduce memory usage. */
550 		if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
551 			if_printf(sc->ifp, "failed to create dmamap for tx mbuf\n");
552 			return (ENOMEM);
553 		}
554 		STAILQ_INSERT_TAIL(&sc->tx_avail, slot, next);
555 	}
556 
557 	for (i = 0; i < CPSW_MAX_RX_BUFFERS; i++) {
558 		struct cpsw_slot *slot = &sc->_rx_slots[i];
559 		slot->index = i;
560 		if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
561 			if_printf(sc->ifp, "failed to create dmamap for rx mbuf\n");
562 			return (ENOMEM);
563 		}
564 		STAILQ_INSERT_TAIL(&sc->rx_avail, slot, next);
565 	}
566 
567 	return (0);
568 }
569 
570 static void
571 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
572 {
573 	int error;
574 
575 	if (slot->dmamap) {
576 		error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
577 		KASSERT(error == 0, ("Mapping still active"));
578 		slot->dmamap = NULL;
579 	}
580 	if (slot->mbuf) {
581 		m_freem(slot->mbuf);
582 		slot->mbuf = NULL;
583 	}
584 }
585 
586 /*
587  * Pad the packet to the minimum length for Ethernet.
588  * (CPSW hardware doesn't do this for us.)
589  */
590 static int
591 cpsw_pad(struct mbuf *m)
592 {
593 	int padlen = ETHER_MIN_LEN - m->m_pkthdr.len;
594 	struct mbuf *last, *n;
595 
596 	if (padlen <= 0)
597 		return (0);
598 
599 	/* If there's only the packet-header and we can pad there, use it. */
600 	if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
601 	    M_TRAILINGSPACE(m) >= padlen) {
602 		last = m;
603 	} else {
604 		/*
605 		 * Walk packet chain to find last mbuf. We will either
606 		 * pad there, or append a new mbuf and pad it.
607 		 */
608 		for (last = m; last->m_next != NULL; last = last->m_next)
609 			;
610 		if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
611 			/* Allocate new empty mbuf, pad it. Compact later. */
612 			MGET(n, M_NOWAIT, MT_DATA);
613 			if (n == NULL)
614 				return (ENOBUFS);
615 			n->m_len = 0;
616 			last->m_next = n;
617 			last = n;
618 		}
619 	}
620 
621 	/* Now zero the pad area. */
622 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
623 	last->m_len += padlen;
624 	m->m_pkthdr.len += padlen;
625 
626 	return (0);
627 }
628 
629 static void
630 cpsw_start(struct ifnet *ifp)
631 {
632 	struct cpsw_softc *sc = ifp->if_softc;
633 
634 	CPSW_TX_LOCK(sc);
635 	cpsw_start_locked(ifp);
636 	CPSW_TX_UNLOCK(sc);
637 }
638 
639 static void
640 cpsw_start_locked(struct ifnet *ifp)
641 {
642 	bus_dma_segment_t seg[1];
643 	struct cpsw_cpdma_bd bd;
644 	struct cpsw_softc *sc = ifp->if_softc;
645 	struct cpsw_queue newslots = STAILQ_HEAD_INITIALIZER(newslots);
646 	struct cpsw_slot *slot, *prev_slot = NULL, *first_new_slot;
647 	struct mbuf *m0, *mtmp;
648 	int error, nsegs, enqueued = 0;
649 
650 	CPSW_TX_LOCK_ASSERT(sc);
651 
652 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
653 	    IFF_DRV_RUNNING)
654 		return;
655 
656 	/* Pull pending packets from IF queue and prep them for DMA. */
657 	for (;;) {
658 		slot = STAILQ_FIRST(&sc->tx_avail);
659 		if (slot == NULL) {
660 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
661 			break;
662 		}
663 
664 		IF_DEQUEUE(&ifp->if_snd, m0);
665 		if (m0 == NULL)
666 			break;
667 
668 		if ((error = cpsw_pad(m0))) {
669 			if_printf(ifp,
670 			    "%s: Dropping packet; could not pad\n", __func__);
671 			m_freem(m0);
672 			continue;
673 		}
674 
675 		/* TODO: don't defragment here, queue each
676 		   packet fragment as a separate entry. */
677 		mtmp = m_defrag(m0, M_NOWAIT);
678 		if (mtmp)
679 			m0 = mtmp;
680 
681 		slot->mbuf = m0;
682 		/* Create mapping in DMA memory */
683 		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
684 		    m0, seg, &nsegs, BUS_DMA_NOWAIT);
685 		KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
686 		KASSERT(error == 0, ("DMA error (error=%d)", error));
687 		if (error != 0 || nsegs != 1) {
688 			if_printf(ifp,
689 			    "%s: Can't load packet for DMA (nsegs=%d, error=%d), dropping packet\n",
690 			    __func__, nsegs, error);
691 			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
692 			m_freem(m0);
693 			break;
694 		}
695 		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap,
696 				BUS_DMASYNC_PREWRITE);
697 
698 		if (prev_slot != NULL)
699 			cpsw_cpdma_write_txbd_next(prev_slot->index,
700 			    cpsw_cpdma_txbd_paddr(slot->index));
701 		bd.next = 0;
702 		bd.bufptr = seg->ds_addr;
703 		bd.bufoff = 0;
704 		bd.buflen = seg->ds_len;
705 		bd.pktlen = seg->ds_len;
706 		bd.flags = 7 << 13;	/* Set OWNERSHIP, SOP, EOP */
707 		cpsw_cpdma_write_txbd(slot->index, &bd);
708 		++enqueued;
709 
710 		prev_slot = slot;
711 		STAILQ_REMOVE_HEAD(&sc->tx_avail, next);
712 		STAILQ_INSERT_TAIL(&newslots, slot, next);
713 		BPF_MTAP(ifp, m0);
714 	}
715 
716 	if (STAILQ_EMPTY(&newslots))
717 		return;
718 
719 	/* Attach the list of new buffers to the hardware TX queue. */
720 	prev_slot = STAILQ_LAST(&sc->tx_active, cpsw_slot, next);
721 	first_new_slot = STAILQ_FIRST(&newslots);
722 	STAILQ_CONCAT(&sc->tx_active, &newslots);
723 	if (prev_slot == NULL) {
724 		/* Start the TX queue fresh. */
725 		cpsw_write_4(CPSW_CPDMA_TX_HDP(0),
726 		    cpsw_cpdma_txbd_paddr(first_new_slot->index));
727 	} else {
728 		/* Add buffers to end of current queue. */
729 		cpsw_cpdma_write_txbd_next(prev_slot->index,
730 		   cpsw_cpdma_txbd_paddr(first_new_slot->index));
731 		/* If underrun, restart queue. */
732 		if (cpsw_cpdma_read_txbd_flags(prev_slot->index) & CPDMA_BD_EOQ)
733 			cpsw_write_4(CPSW_CPDMA_TX_HDP(0),
734 			    cpsw_cpdma_txbd_paddr(first_new_slot->index));
735 	}
736 	sc->tx_enqueues += enqueued;
737 	sc->tx_queued += enqueued;
738 	if (sc->tx_queued > sc->tx_max_queued) {
739 		sc->tx_max_queued = sc->tx_queued;
740 		CPSW_DEBUGF(("New TX high water mark %d", sc->tx_queued));
741 	}
742 }
743 
744 static void
745 cpsw_stop_locked(struct cpsw_softc *sc)
746 {
747 	struct ifnet *ifp;
748 	int i;
749 
750 	CPSW_DEBUGF((""));
751 
752 	CPSW_GLOBAL_LOCK_ASSERT(sc);
753 
754 	ifp = sc->ifp;
755 
756 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
757 		return;
758 
759 	/* Disable interface */
760 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
761 
762 	/* Stop tick engine */
763 	callout_stop(&sc->wd_callout);
764 
765 	/* Wait for hardware to clear pending ops. */
766 	CPSW_GLOBAL_UNLOCK(sc);
767 	CPSW_DEBUGF(("starting RX and TX teardown"));
768 	cpsw_write_4(CPSW_CPDMA_RX_TEARDOWN, 0);
769 	cpsw_write_4(CPSW_CPDMA_TX_TEARDOWN, 0);
770 	i = 0;
771 	cpsw_intr_rx(sc); // Try clearing without delay.
772 	cpsw_intr_tx(sc);
773 	while (sc->rx_running || sc->tx_running) {
774 		DELAY(10);
775 		cpsw_intr_rx(sc);
776 		cpsw_intr_tx(sc);
777 		++i;
778 	}
779 	CPSW_DEBUGF(("finished RX and TX teardown (%d tries)", i));
780 	CPSW_GLOBAL_LOCK(sc);
781 
782 	/* All slots are now available */
783 	STAILQ_CONCAT(&sc->rx_avail, &sc->rx_active);
784 	STAILQ_CONCAT(&sc->tx_avail, &sc->tx_active);
785 	CPSW_DEBUGF(("%d buffers dropped at TX reset", sc->tx_queued));
786 	sc->tx_queued = 0;
787 
788 	/* Reset writer */
789 	cpsw_write_4(CPSW_WR_SOFT_RESET, 1);
790 	while (cpsw_read_4(CPSW_WR_SOFT_RESET) & 1)
791 		;
792 
793 	/* Reset SS */
794 	cpsw_write_4(CPSW_SS_SOFT_RESET, 1);
795 	while (cpsw_read_4(CPSW_SS_SOFT_RESET) & 1)
796 		;
797 
798 	/* Reset Sliver port 1 and 2 */
799 	for (i = 0; i < 2; i++) {
800 		/* Reset */
801 		cpsw_write_4(CPSW_SL_SOFT_RESET(i), 1);
802 		while (cpsw_read_4(CPSW_SL_SOFT_RESET(i)) & 1)
803 			;
804 	}
805 
806 	/* Reset CPDMA */
807 	cpsw_write_4(CPSW_CPDMA_SOFT_RESET, 1);
808 	while (cpsw_read_4(CPSW_CPDMA_SOFT_RESET) & 1)
809 		;
810 
811 	/* Disable TX & RX DMA */
812 	cpsw_write_4(CPSW_CPDMA_TX_CONTROL, 0);
813 	cpsw_write_4(CPSW_CPDMA_RX_CONTROL, 0);
814 
815 	/* Disable TX and RX interrupts for all cores. */
816 	for (i = 0; i < 3; ++i) {
817 		cpsw_write_4(CPSW_WR_C_TX_EN(i), 0x00);
818 		cpsw_write_4(CPSW_WR_C_RX_EN(i), 0x00);
819 		cpsw_write_4(CPSW_WR_C_MISC_EN(i), 0x00);
820 	}
821 
822 	/* Clear all interrupt Masks */
823 	cpsw_write_4(CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
824 	cpsw_write_4(CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
825 }
826 
827 static void
828 cpsw_set_promisc(struct cpsw_softc *sc, int set)
829 {
830 	if (set) {
831 		printf("Promiscuous mode unimplemented\n");
832 	}
833 }
834 
835 static void
836 cpsw_set_allmulti(struct cpsw_softc *sc, int set)
837 {
838 	if (set) {
839 		printf("All-multicast mode unimplemented\n");
840 	}
841 }
842 
843 static int
844 cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
845 {
846 	struct cpsw_softc *sc = ifp->if_softc;
847 	struct ifreq *ifr = (struct ifreq *)data;
848 	int error;
849 	uint32_t changed;
850 
851 	CPSW_DEBUGF(("command=0x%lx", command));
852 	error = 0;
853 
854 	switch (command) {
855 	case SIOCSIFFLAGS:
856 		CPSW_GLOBAL_LOCK(sc);
857 		if (ifp->if_flags & IFF_UP) {
858 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
859 				changed = ifp->if_flags ^ sc->cpsw_if_flags;
860 				CPSW_DEBUGF(("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed));
861 				if (changed & IFF_PROMISC)
862 					cpsw_set_promisc(sc,
863 					    ifp->if_flags & IFF_PROMISC);
864 				if (changed & IFF_ALLMULTI)
865 					cpsw_set_allmulti(sc,
866 					    ifp->if_flags & IFF_ALLMULTI);
867 			} else {
868 				CPSW_DEBUGF(("SIOCSIFFLAGS: UP but not RUNNING"));
869 				cpsw_init_locked(sc);
870 			}
871 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
872 			CPSW_DEBUGF(("SIOCSIFFLAGS: not UP but RUNNING"));
873 			cpsw_stop_locked(sc);
874 		}
875 
876 		sc->cpsw_if_flags = ifp->if_flags;
877 		CPSW_GLOBAL_UNLOCK(sc);
878 		break;
879 	case SIOCADDMULTI:
880 		CPSW_DEBUGF(("SIOCADDMULTI unimplemented"));
881 		break;
882 	case SIOCDELMULTI:
883 		CPSW_DEBUGF(("SIOCDELMULTI unimplemented"));
884 		break;
885 	case SIOCGIFMEDIA:
886 	case SIOCSIFMEDIA:
887 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
888 		break;
889 	default:
890 		CPSW_DEBUGF(("ether ioctl"));
891 		error = ether_ioctl(ifp, command, data);
892 	}
893 	return (error);
894 }
895 
896 static void
897 cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
898 {
899 	struct cpsw_softc *sc = ifp->if_softc;
900 	struct mii_data *mii;
901 
902 	CPSW_DEBUGF((""));
903 	CPSW_TX_LOCK(sc);
904 
905 	mii = sc->mii;
906 	mii_pollstat(mii);
907 
908 	ifmr->ifm_active = mii->mii_media_active;
909 	ifmr->ifm_status = mii->mii_media_status;
910 
911 	CPSW_TX_UNLOCK(sc);
912 }
913 
914 
915 static int
916 cpsw_ifmedia_upd(struct ifnet *ifp)
917 {
918 	struct cpsw_softc *sc = ifp->if_softc;
919 
920 	CPSW_DEBUGF((""));
921 	if (ifp->if_flags & IFF_UP) {
922 		CPSW_GLOBAL_LOCK(sc);
923 		sc->cpsw_media_status = sc->mii->mii_media.ifm_media;
924 		mii_mediachg(sc->mii);
925 		cpsw_init_locked(sc);
926 		CPSW_GLOBAL_UNLOCK(sc);
927 	}
928 
929 	return (0);
930 }
931 
932 static void
933 cpsw_intr_rx_thresh(void *arg)
934 {
935 	struct cpsw_softc *sc = arg;
936 	CPSW_DEBUGF((""));
937 }
938 
939 static void
940 cpsw_intr_rx(void *arg)
941 {
942 	struct cpsw_softc *sc = arg;
943 
944 	CPSW_RX_LOCK(sc);
945 	cpsw_intr_rx_locked(arg);
946 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
947 	CPSW_RX_UNLOCK(sc);
948 }
949 
950 static void
951 cpsw_intr_rx_locked(void *arg)
952 {
953 	struct cpsw_softc *sc = arg;
954 	struct cpsw_cpdma_bd bd;
955 	struct cpsw_slot *slot, *last_slot = NULL;
956 	struct ifnet *ifp;
957 
958 	ifp = sc->ifp;
959 	if (!sc->rx_running)
960 		return;
961 
962 	/* Pull completed packets off hardware RX queue. */
963 	slot = STAILQ_FIRST(&sc->rx_active);
964 	while (slot != NULL) {
965 		cpsw_cpdma_read_rxbd(slot->index, &bd);
966 		if (bd.flags & CPDMA_BD_OWNER)
967 			break; /* Still in use by hardware */
968 
969 		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
970 			CPSW_DEBUGF(("RX teardown in progress"));
971 			cpsw_write_4(CPSW_CPDMA_RX_CP(0), 0xfffffffc);
972 			sc->rx_running = 0;
973 			return;
974 		}
975 
976 		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
977 		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
978 
979 		/* Fill mbuf */
980 		/* TODO: track SOP/EOP bits to assemble a full mbuf
981 		   out of received fragments. */
982 		slot->mbuf->m_hdr.mh_data += bd.bufoff;
983 		slot->mbuf->m_hdr.mh_len = bd.pktlen - 4;
984 		slot->mbuf->m_pkthdr.len = bd.pktlen - 4;
985 		slot->mbuf->m_flags |= M_PKTHDR;
986 		slot->mbuf->m_pkthdr.rcvif = ifp;
987 
988 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
989 			/* check for valid CRC by looking into pkt_err[5:4] */
990 			if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) {
991 				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
992 				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
993 				slot->mbuf->m_pkthdr.csum_data = 0xffff;
994 			}
995 		}
996 
997 		/* Handover packet */
998 		CPSW_RX_UNLOCK(sc);
999 		(*ifp->if_input)(ifp, slot->mbuf);
1000 		slot->mbuf = NULL;
1001 		CPSW_RX_LOCK(sc);
1002 
1003 		last_slot = slot;
1004 		STAILQ_REMOVE_HEAD(&sc->rx_active, next);
1005 		STAILQ_INSERT_TAIL(&sc->rx_avail, slot, next);
1006 		slot = STAILQ_FIRST(&sc->rx_active);
1007 	}
1008 
1009 	/* Tell hardware last slot we processed. */
1010 	if (last_slot)
1011 		cpsw_write_4(CPSW_CPDMA_RX_CP(0),
1012 		    cpsw_cpdma_rxbd_paddr(last_slot->index));
1013 
1014 	/* Repopulate hardware RX queue. */
1015 	cpsw_fill_rx_queue_locked(sc);
1016 }
1017 
1018 static void
1019 cpsw_fill_rx_queue_locked(struct cpsw_softc *sc)
1020 {
1021 	bus_dma_segment_t seg[1];
1022 	struct cpsw_queue tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1023 	struct cpsw_cpdma_bd bd;
1024 	struct cpsw_slot *slot, *prev_slot, *next_slot;
1025 	int error, nsegs;
1026 
1027 	/* Try to allocate new mbufs. */
1028 	STAILQ_FOREACH(slot, &sc->rx_avail, next) {
1029 		if (slot->mbuf != NULL)
1030 			continue;
1031 		slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1032 		if (slot->mbuf == NULL) {
1033 			if_printf(sc->ifp, "Unable to fill RX queue\n");
1034 			break;
1035 		}
1036 		slot->mbuf->m_len = slot->mbuf->m_pkthdr.len = slot->mbuf->m_ext.ext_size;
1037 	}
1038 
1039 	/* Register new mbufs with hardware. */
1040 	prev_slot = NULL;
1041 	while (!STAILQ_EMPTY(&sc->rx_avail)) {
1042 		slot = STAILQ_FIRST(&sc->rx_avail);
1043 		if (slot->mbuf == NULL)
1044 			break;
1045 
1046 		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1047 		    slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
1048 
1049 		KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
1050 		KASSERT(error == 0, ("DMA error (error=%d)", error));
1051 		if (nsegs != 1 || error) {
1052 			if_printf(sc->ifp,
1053 			    "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
1054 			    __func__, nsegs, error);
1055 			m_freem(slot->mbuf);
1056 			slot->mbuf = NULL;
1057 			break;
1058 		}
1059 
1060 		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
1061 
1062 		/* Create and submit new rx descriptor*/
1063 		bd.next = 0;
1064 		bd.bufptr = seg->ds_addr;
1065 		bd.buflen = MCLBYTES-1;
1066 		bd.bufoff = 2; /* make IP hdr aligned with 4 */
1067 		bd.pktlen = 0;
1068 		bd.flags = CPDMA_BD_OWNER;
1069 		cpsw_cpdma_write_rxbd(slot->index, &bd);
1070 
1071 		if (prev_slot) {
1072 			cpsw_cpdma_write_rxbd_next(prev_slot->index,
1073 			    cpsw_cpdma_rxbd_paddr(slot->index));
1074 		}
1075 		prev_slot = slot;
1076 		STAILQ_REMOVE_HEAD(&sc->rx_avail, next);
1077 		STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1078 	}
1079 
1080 	/* Link new entries to hardware RX queue. */
1081 	prev_slot = STAILQ_LAST(&sc->rx_active, cpsw_slot, next);
1082 	next_slot = STAILQ_FIRST(&tmpqueue);
1083 	if (next_slot == NULL) {
1084 		return;
1085 	} else if (prev_slot == NULL) {
1086 		/* Start a fresh RX queue. */
1087 		cpsw_write_4(CPSW_CPDMA_RX_HDP(0),
1088 		    cpsw_cpdma_rxbd_paddr(next_slot->index));
1089 	} else {
1090 		/* Add buffers to end of current queue. */
1091 		cpsw_cpdma_write_rxbd_next(prev_slot->index,
1092 		    cpsw_cpdma_rxbd_paddr(next_slot->index));
1093 		/* If underrun, restart queue. */
1094 		if (cpsw_cpdma_read_rxbd_flags(prev_slot->index) & CPDMA_BD_EOQ) {
1095 			cpsw_write_4(CPSW_CPDMA_RX_HDP(0),
1096 			    cpsw_cpdma_rxbd_paddr(next_slot->index));
1097 		}
1098 	}
1099 	STAILQ_CONCAT(&sc->rx_active, &tmpqueue);
1100 }
1101 
1102 static void
1103 cpsw_intr_tx(void *arg)
1104 {
1105 	struct cpsw_softc *sc = arg;
1106 	CPSW_TX_LOCK(sc);
1107 	cpsw_intr_tx_locked(arg);
1108 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 2);
1109 	CPSW_TX_UNLOCK(sc);
1110 }
1111 
1112 static void
1113 cpsw_intr_tx_locked(void *arg)
1114 {
1115 	struct cpsw_softc *sc = arg;
1116 	struct cpsw_slot *slot, *last_slot = NULL;
1117 	uint32_t flags, last_flags = 0, retires = 0;
1118 
1119 	if (!sc->tx_running)
1120 		return;
1121 
1122 	slot = STAILQ_FIRST(&sc->tx_active);
1123 	if (slot == NULL &&
1124 	    cpsw_read_4(CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) {
1125 		CPSW_DEBUGF(("TX teardown of an empty queue"));
1126 		cpsw_write_4(CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1127 		sc->tx_running = 0;
1128 		return;
1129 	}
1130 
1131 	/* Pull completed segments off the hardware TX queue. */
1132 	while (slot != NULL) {
1133 		flags = cpsw_cpdma_read_txbd_flags(slot->index);
1134 		if (flags & CPDMA_BD_OWNER)
1135 			break; /* Hardware is still using this. */
1136 
1137 		if (flags & CPDMA_BD_TDOWNCMPLT) {
1138 			CPSW_DEBUGF(("TX teardown in progress"));
1139 			cpsw_write_4(CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1140 			sc->tx_running = 0;
1141 			return;
1142 		}
1143 
1144 		/* Release dmamap, free mbuf. */
1145 		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap,
1146 		    BUS_DMASYNC_POSTWRITE);
1147 		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1148 		m_freem(slot->mbuf);
1149 		slot->mbuf = NULL;
1150 
1151 		STAILQ_REMOVE_HEAD(&sc->tx_active, next);
1152 		STAILQ_INSERT_TAIL(&sc->tx_avail, slot, next);
1153 		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1154 
1155 		last_slot = slot;
1156 		last_flags = flags;
1157 		++retires;
1158 		slot = STAILQ_FIRST(&sc->tx_active);
1159 	}
1160 
1161 	if (retires != 0) {
1162 		/* Tell hardware the last item we dequeued. */
1163 		cpsw_write_4(CPSW_CPDMA_TX_CP(0),
1164 		     cpsw_cpdma_txbd_paddr(last_slot->index));
1165 		sc->tx_retires += retires;
1166 		sc->tx_queued -= retires;
1167 	}
1168 }
1169 
1170 static void
1171 cpsw_intr_misc(void *arg)
1172 {
1173 	struct cpsw_softc *sc = arg;
1174 	uint32_t stat = cpsw_read_4(CPSW_WR_C_MISC_STAT(0));
1175 
1176 	CPSW_DEBUGF(("stat=%x", stat));
1177 	/* EOI_RX_PULSE */
1178 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
1179 }
1180 
1181 static void
1182 cpsw_tick(void *msc)
1183 {
1184 	struct cpsw_softc *sc = msc;
1185 
1186 	/* Check for TX timeout */
1187 	cpsw_tx_watchdog(sc);
1188 
1189 	mii_tick(sc->mii);
1190 
1191 	/* Check for media type change */
1192 	if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) {
1193 		printf("%s: media type changed (ifm_media=%x)\n", __func__,
1194 			sc->mii->mii_media.ifm_media);
1195 		cpsw_ifmedia_upd(sc->ifp);
1196 	}
1197 
1198 	/* Schedule another timeout one second from now */
1199 	callout_reset(&sc->wd_callout, hz, cpsw_tick, sc);
1200 }
1201 
1202 static void
1203 cpsw_tx_watchdog(struct cpsw_softc *sc)
1204 {
1205 	struct ifnet *ifp = sc->ifp;
1206 
1207 	CPSW_GLOBAL_LOCK(sc);
1208 	if (sc->tx_retires > sc->tx_retires_at_last_tick) {
1209 		sc->tx_wd_timer = 0;  /* Stuff got sent. */
1210 	} else if (sc->tx_queued == 0) {
1211 		sc->tx_wd_timer = 0; /* Nothing to send. */
1212 	} else {
1213 		/* There was something to send but we didn't. */
1214 		++sc->tx_wd_timer;
1215 		if (sc->tx_wd_timer > 3) {
1216 			sc->tx_wd_timer = 0;
1217 			ifp->if_oerrors++;
1218 			if_printf(ifp, "watchdog timeout\n");
1219 			cpsw_stop_locked(sc);
1220 			cpsw_init_locked(sc);
1221 			CPSW_DEBUGF(("watchdog reset completed\n"));
1222 		}
1223 	}
1224 	sc->tx_retires_at_last_tick = sc->tx_retires;
1225 	CPSW_GLOBAL_UNLOCK(sc);
1226 }
1227 
1228 static void
1229 cpsw_init(void *arg)
1230 {
1231 	struct cpsw_softc *sc = arg;
1232 
1233 	CPSW_DEBUGF((""));
1234 	CPSW_GLOBAL_LOCK(sc);
1235 	cpsw_init_locked(arg);
1236 	CPSW_GLOBAL_UNLOCK(sc);
1237 }
1238 
1239 int once = 1;
1240 
1241 static void
1242 cpsw_init_locked(void *arg)
1243 {
1244 	struct ifnet *ifp;
1245 	struct cpsw_softc *sc = arg;
1246 	uint8_t  broadcast_address[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1247 	uint32_t i;
1248 
1249 	CPSW_DEBUGF((""));
1250 	ifp = sc->ifp;
1251 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1252 		return;
1253 
1254 	/* Reset writer */
1255 	cpsw_write_4(CPSW_WR_SOFT_RESET, 1);
1256 	while (cpsw_read_4(CPSW_WR_SOFT_RESET) & 1)
1257 		;
1258 
1259 	/* Reset SS */
1260 	cpsw_write_4(CPSW_SS_SOFT_RESET, 1);
1261 	while (cpsw_read_4(CPSW_SS_SOFT_RESET) & 1)
1262 		;
1263 
1264 	/* Clear table (30) and enable ALE(31) */
1265 	if (once)
1266 		cpsw_write_4(CPSW_ALE_CONTROL, 3 << 30);
1267 	else
1268 		cpsw_write_4(CPSW_ALE_CONTROL, 1 << 31);
1269 	once = 0; // FIXME
1270 
1271 	/* Reset and init Sliver port 1 and 2 */
1272 	for (i = 0; i < 2; i++) {
1273 		/* Reset */
1274 		cpsw_write_4(CPSW_SL_SOFT_RESET(i), 1);
1275 		while (cpsw_read_4(CPSW_SL_SOFT_RESET(i)) & 1)
1276 			;
1277 		/* Set Slave Mapping */
1278 		cpsw_write_4(CPSW_SL_RX_PRI_MAP(i), 0x76543210);
1279 		cpsw_write_4(CPSW_PORT_P_TX_PRI_MAP(i + 1), 0x33221100);
1280 		cpsw_write_4(CPSW_SL_RX_MAXLEN(i), 0x5f2);
1281 		/* Set MAC Address */
1282 		cpsw_write_4(CPSW_PORT_P_SA_HI(i + 1),
1283 			sc->mac_addr[3] << 24 |
1284 			sc->mac_addr[2] << 16 |
1285 			sc->mac_addr[1] << 8 |
1286 			sc->mac_addr[0]);
1287 		cpsw_write_4(CPSW_PORT_P_SA_LO(i+1),
1288 			sc->mac_addr[5] << 8 |
1289 			sc->mac_addr[4]);
1290 
1291 		/* Set MACCONTROL for ports 0,1: FULLDUPLEX(1), GMII_EN(5),
1292 		   IFCTL_A(15), IFCTL_B(16) FIXME */
1293 		cpsw_write_4(CPSW_SL_MACCONTROL(i), 1 << 15 | 1 << 5 | 1);
1294 
1295 		/* Set ALE port to forwarding(3) */
1296 		cpsw_write_4(CPSW_ALE_PORTCTL(i + 1), 3);
1297 	}
1298 
1299 	/* Set Host Port Mapping */
1300 	cpsw_write_4(CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
1301 	cpsw_write_4(CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
1302 
1303 	/* Set ALE port to forwarding(3)*/
1304 	cpsw_write_4(CPSW_ALE_PORTCTL(0), 3);
1305 
1306 	/* Add own MAC address and broadcast to ALE */
1307 	cpsw_ale_uc_entry_set(sc, 0, sc->mac_addr);
1308 	cpsw_ale_mc_entry_set(sc, 7, broadcast_address);
1309 
1310 	cpsw_write_4(CPSW_SS_PTYPE, 0);
1311 	/* Enable statistics for ports 0, 1 and 2 */
1312 	cpsw_write_4(CPSW_SS_STAT_PORT_EN, 7);
1313 
1314 	/* Reset CPDMA */
1315 	cpsw_write_4(CPSW_CPDMA_SOFT_RESET, 1);
1316 	while (cpsw_read_4(CPSW_CPDMA_SOFT_RESET) & 1)
1317 		;
1318 
1319 	/* Make IP hdr aligned with 4 */
1320 	cpsw_write_4(CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
1321 
1322 	for (i = 0; i < 8; i++) {
1323 		cpsw_write_4(CPSW_CPDMA_TX_HDP(i), 0);
1324 		cpsw_write_4(CPSW_CPDMA_RX_HDP(i), 0);
1325 		cpsw_write_4(CPSW_CPDMA_TX_CP(i), 0);
1326 		cpsw_write_4(CPSW_CPDMA_RX_CP(i), 0);
1327 	}
1328 
1329 	/* Initialize RX Buffer Descriptors */
1330 	cpsw_write_4(CPSW_CPDMA_RX_FREEBUFFER(0), 0);
1331 	cpsw_fill_rx_queue_locked(sc);
1332 
1333 	/* Clear all interrupt Masks */
1334 	cpsw_write_4(CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
1335 	cpsw_write_4(CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
1336 
1337 	/* Enable TX & RX DMA */
1338 	cpsw_write_4(CPSW_CPDMA_TX_CONTROL, 1);
1339 	cpsw_write_4(CPSW_CPDMA_RX_CONTROL, 1);
1340 
1341 	/* Enable TX and RX interrupt receive for core 0 */
1342 	cpsw_write_4(CPSW_WR_C_TX_EN(0), 0xFF);
1343 	cpsw_write_4(CPSW_WR_C_RX_EN(0), 0xFF);
1344 	//cpsw_write_4(CPSW_WR_C_MISC_EN(0), 0x3F);
1345 
1346 	/* Enable host Error Interrupt */
1347 	cpsw_write_4(CPSW_CPDMA_DMA_INTMASK_SET, 1);
1348 
1349 	/* Enable interrupts for TX and RX Channel 0 */
1350 	cpsw_write_4(CPSW_CPDMA_TX_INTMASK_SET, 1);
1351 	cpsw_write_4(CPSW_CPDMA_RX_INTMASK_SET, 1);
1352 
1353 	/* Ack stalled irqs */
1354 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
1355 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1356 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 2);
1357 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
1358 
1359 	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
1360 	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
1361 	cpsw_write_4(MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
1362 
1363 	/* Select MII in GMII_SEL, Internal Delay mode */
1364 	//ti_scm_reg_write_4(0x650, 0);
1365 
1366 	/* Activate network interface */
1367 	sc->rx_running = 1;
1368 	sc->tx_running = 1;
1369 	sc->tx_wd_timer = 0;
1370 	callout_reset(&sc->wd_callout, hz, cpsw_tick, sc);
1371 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1372 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1373 }
1374 
1375 static void
1376 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1377 {
1378 	cpsw_write_4(CPSW_ALE_TBLCTL, idx & 1023);
1379 	ale_entry[0] = cpsw_read_4(CPSW_ALE_TBLW0);
1380 	ale_entry[1] = cpsw_read_4(CPSW_ALE_TBLW1);
1381 	ale_entry[2] = cpsw_read_4(CPSW_ALE_TBLW2);
1382 }
1383 
1384 static void
1385 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1386 {
1387 	cpsw_write_4(CPSW_ALE_TBLW0, ale_entry[0]);
1388 	cpsw_write_4(CPSW_ALE_TBLW1, ale_entry[1]);
1389 	cpsw_write_4(CPSW_ALE_TBLW2, ale_entry[2]);
1390 	cpsw_write_4(CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1391 }
1392 
1393 static int
1394 cpsw_ale_find_entry_by_mac(struct cpsw_softc *sc, uint8_t *mac)
1395 {
1396 	int i;
1397 	uint32_t ale_entry[3];
1398 	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1399 		cpsw_ale_read_entry(sc, i, ale_entry);
1400 		if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
1401 		    (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
1402 		    (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
1403 		    (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
1404 		    (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
1405 		    (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
1406 			return (i);
1407 		}
1408 	}
1409 	return CPSW_MAX_ALE_ENTRIES;
1410 }
1411 
1412 static int
1413 cpsw_ale_find_free_entry(struct cpsw_softc *sc)
1414 {
1415 	int i;
1416 	uint32_t ale_entry[3];
1417 	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1418 		cpsw_ale_read_entry(sc, i, ale_entry);
1419 		/* Entry Type[61:60] is 0 for free entry */
1420 		if (((ale_entry[1] >> 28) & 3) == 0) {
1421 			return i;
1422 		}
1423 	}
1424 	return CPSW_MAX_ALE_ENTRIES;
1425 }
1426 
1427 
1428 static int
1429 cpsw_ale_uc_entry_set(struct cpsw_softc *sc, uint8_t port, uint8_t *mac)
1430 {
1431 	int i;
1432 	uint32_t ale_entry[3];
1433 
1434 	if ((i = cpsw_ale_find_entry_by_mac(sc, mac)) == CPSW_MAX_ALE_ENTRIES) {
1435 		i = cpsw_ale_find_free_entry(sc);
1436 	}
1437 
1438 	if (i == CPSW_MAX_ALE_ENTRIES)
1439 		return (ENOMEM);
1440 
1441 	/* Set MAC address */
1442 	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1443 	ale_entry[1] = mac[0] << 8 | mac[1];
1444 
1445 	/* Entry type[61:60] is addr entry(1) */
1446 	ale_entry[1] |= 0x10 << 24;
1447 
1448 	/* Set portmask [67:66] */
1449 	ale_entry[2] = (port & 3) << 2;
1450 
1451 	cpsw_ale_write_entry(sc, i, ale_entry);
1452 
1453 	return 0;
1454 }
1455 
1456 static int
1457 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac)
1458 {
1459 	int i;
1460 	uint32_t ale_entry[3];
1461 
1462 	if ((i = cpsw_ale_find_entry_by_mac(sc, mac)) == CPSW_MAX_ALE_ENTRIES) {
1463 		i = cpsw_ale_find_free_entry(sc);
1464 	}
1465 
1466 	if (i == CPSW_MAX_ALE_ENTRIES)
1467 		return (ENOMEM);
1468 
1469 	/* Set MAC address */
1470 	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1471 	ale_entry[1] = mac[0] << 8 | mac[1];
1472 
1473 	/* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1474 	ale_entry[1] |= 0xd0 << 24;
1475 
1476 	/* Set portmask [68:66] */
1477 	ale_entry[2] = (portmap & 7) << 2;
1478 
1479 	cpsw_ale_write_entry(sc, i, ale_entry);
1480 
1481 	return 0;
1482 }
1483 
1484 #ifdef CPSW_DEBUG
1485 static void
1486 cpsw_ale_dump_table(struct cpsw_softc *sc) {
1487 	int i;
1488 	uint32_t ale_entry[3];
1489 	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1490 		cpsw_ale_read_entry(sc, i, ale_entry);
1491 		if (ale_entry[0] || ale_entry[1] || ale_entry[2]) {
1492 			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[0],
1493 				ale_entry[1], ale_entry[2]);
1494 			printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
1495 				(ale_entry[1] >> 8) & 0xFF,
1496 				(ale_entry[1] >> 0) & 0xFF,
1497 				(ale_entry[0] >>24) & 0xFF,
1498 				(ale_entry[0] >>16) & 0xFF,
1499 				(ale_entry[0] >> 8) & 0xFF,
1500 				(ale_entry[0] >> 0) & 0xFF);
1501 			printf(((ale_entry[1] >> 8) & 1) ? "mcast " : "ucast ");
1502 			printf("type: %u ", (ale_entry[1] >> 28) & 3);
1503 			printf("port: %u ", (ale_entry[2] >> 2) & 7);
1504 			printf("\n");
1505 		}
1506 	}
1507 }
1508 #endif
1509