xref: /freebsd/sys/arm/ti/cpsw/if_cpsw.c (revision 9a41df2a0e6408e9b329bbd8b9e37c2b44461a1b)
1 /*-
2  * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * TI 3 Port Switch Ethernet (CPSW) Driver
29  * Found in TI8148, AM335x SoCs
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/endian.h>
38 #include <sys/mbuf.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/kernel.h>
42 #include <sys/module.h>
43 #include <sys/socket.h>
44 #include <sys/sysctl.h>
45 
46 #include <net/ethernet.h>
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/if_types.h>
53 #include <net/if_vlan_var.h>
54 
55 #include <netinet/in_systm.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 
59 #include <sys/sockio.h>
60 #include <sys/bus.h>
61 #include <machine/bus.h>
62 #include <sys/rman.h>
63 #include <machine/resource.h>
64 
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
67 
68 #include <dev/fdt/fdt_common.h>
69 #include <dev/ofw/ofw_bus.h>
70 #include <dev/ofw/ofw_bus_subr.h>
71 
72 #include <arm/ti/cpsw/if_cpswreg.h>
73 #include <arm/ti/cpsw/if_cpswvar.h>
74 
75 #include <arm/ti/ti_scm.h>
76 
77 #include "miibus_if.h"
78 
79 static int cpsw_probe(device_t dev);
80 static int cpsw_attach(device_t dev);
81 static int cpsw_detach(device_t dev);
82 static int cpsw_shutdown(device_t dev);
83 static int cpsw_suspend(device_t dev);
84 static int cpsw_resume(device_t dev);
85 
86 static int cpsw_miibus_readreg(device_t dev, int phy, int reg);
87 static int cpsw_miibus_writereg(device_t dev, int phy, int reg, int value);
88 
89 static int cpsw_ifmedia_upd(struct ifnet *ifp);
90 static void cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
91 
92 static void cpsw_init(void *arg);
93 static void cpsw_init_locked(void *arg);
94 static void cpsw_start(struct ifnet *ifp);
95 static void cpsw_start_locked(struct ifnet *ifp);
96 static void cpsw_stop_locked(struct cpsw_softc *sc);
97 static int cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
98 static int cpsw_allocate_dma(struct cpsw_softc *sc);
99 static int cpsw_free_dma(struct cpsw_softc *sc);
100 static int cpsw_new_rxbuf(struct cpsw_softc *sc, uint32_t i, uint32_t next);
101 static void cpsw_watchdog(struct cpsw_softc *sc);
102 
103 static void cpsw_intr_rx_thresh(void *arg);
104 static void cpsw_intr_rx(void *arg);
105 static void cpsw_intr_rx_locked(void *arg);
106 static void cpsw_intr_tx(void *arg);
107 static void cpsw_intr_tx_locked(void *arg);
108 static void cpsw_intr_misc(void *arg);
109 
110 static void cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry);
111 static void cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry);
112 static int cpsw_ale_uc_entry_set(struct cpsw_softc *sc, uint8_t port, uint8_t *mac);
113 static int cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac);
114 #ifdef CPSW_DEBUG
115 static void cpsw_ale_dump_table(struct cpsw_softc *sc);
116 #endif
117 
118 static device_method_t cpsw_methods[] = {
119 	/* Device interface */
120 	DEVMETHOD(device_probe,		cpsw_probe),
121 	DEVMETHOD(device_attach,	cpsw_attach),
122 	DEVMETHOD(device_detach,	cpsw_detach),
123 	DEVMETHOD(device_shutdown,	cpsw_shutdown),
124 	DEVMETHOD(device_suspend,	cpsw_suspend),
125 	DEVMETHOD(device_resume,	cpsw_resume),
126 	/* MII interface */
127 	DEVMETHOD(miibus_readreg,	cpsw_miibus_readreg),
128 	DEVMETHOD(miibus_writereg,	cpsw_miibus_writereg),
129 	{ 0, 0 }
130 };
131 
132 static driver_t cpsw_driver = {
133 	"cpsw",
134 	cpsw_methods,
135 	sizeof(struct cpsw_softc),
136 };
137 
138 static devclass_t cpsw_devclass;
139 
140 
141 DRIVER_MODULE(cpsw, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
142 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
143 MODULE_DEPEND(cpsw, ether, 1, 1, 1);
144 MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
145 
146 static struct resource_spec res_spec[] = {
147 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
148 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
149 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
150 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
151 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
152 	{ -1, 0 }
153 };
154 
155 static struct {
156 	driver_intr_t *handler;
157 	char * description;
158 } cpsw_intrs[CPSW_INTR_COUNT + 1] = {
159 	{ cpsw_intr_rx_thresh,"CPSW RX threshold interrupt" },
160 	{ cpsw_intr_rx,	"CPSW RX interrupt" },
161 	{ cpsw_intr_tx,	"CPSW TX interrupt" },
162 	{ cpsw_intr_misc,"CPSW misc interrupt" },
163 };
164 
165 /* Locking macros */
166 #define CPSW_TX_LOCK(sc) do {					\
167 		mtx_assert(&(sc)->rx_lock, MA_NOTOWNED);		\
168 		mtx_lock(&(sc)->tx_lock);				\
169 } while (0)
170 
171 #define CPSW_TX_UNLOCK(sc)	mtx_unlock(&(sc)->tx_lock)
172 #define CPSW_TX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->tx_lock, MA_OWNED)
173 
174 #define CPSW_RX_LOCK(sc) do {					\
175 		mtx_assert(&(sc)->tx_lock, MA_NOTOWNED);		\
176 		mtx_lock(&(sc)->rx_lock);				\
177 } while (0)
178 
179 #define CPSW_RX_UNLOCK(sc)		mtx_unlock(&(sc)->rx_lock)
180 #define CPSW_RX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->rx_lock, MA_OWNED)
181 
182 #define CPSW_GLOBAL_LOCK(sc) do {					\
183 		if ((mtx_owned(&(sc)->tx_lock) ? 1 : 0) !=	\
184 		    (mtx_owned(&(sc)->rx_lock) ? 1 : 0)) {		\
185 			panic("cpsw deadlock possibility detection!");	\
186 		}							\
187 		mtx_lock(&(sc)->tx_lock);				\
188 		mtx_lock(&(sc)->rx_lock);				\
189 } while (0)
190 
191 #define CPSW_GLOBAL_UNLOCK(sc) do {					\
192 		CPSW_RX_UNLOCK(sc);				\
193 		CPSW_TX_UNLOCK(sc);				\
194 } while (0)
195 
196 #define CPSW_GLOBAL_LOCK_ASSERT(sc) do {				\
197 		CPSW_TX_LOCK_ASSERT(sc);				\
198 		CPSW_RX_LOCK_ASSERT(sc);				\
199 } while (0)
200 
201 
202 static int
203 cpsw_probe(device_t dev)
204 {
205 
206 	if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
207 		return (ENXIO);
208 
209 	device_set_desc(dev, "3-port Switch Ethernet Subsystem");
210 	return (BUS_PROBE_DEFAULT);
211 }
212 
213 static int
214 cpsw_attach(device_t dev)
215 {
216 	struct cpsw_softc *sc;
217 	struct mii_softc *miisc;
218 	struct ifnet *ifp;
219 	int i, error, phy;
220 	uint32_t reg;
221 
222 	sc = device_get_softc(dev);
223 	sc->dev = dev;
224 	sc->node = ofw_bus_get_node(dev);
225 
226 	/* Get phy address from fdt */
227 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0) {
228 		device_printf(dev, "failed to get PHY address from FDT\n");
229 		return (ENXIO);
230 	}
231 	/* Initialize mutexes */
232 	mtx_init(&sc->tx_lock, device_get_nameunit(dev),
233 		"cpsw TX lock", MTX_DEF);
234 	mtx_init(&sc->rx_lock, device_get_nameunit(dev),
235 		"cpsw RX lock", MTX_DEF);
236 
237 	/* Allocate IO and IRQ resources */
238 	error = bus_alloc_resources(dev, res_spec, sc->res);
239 	if (error) {
240 		device_printf(dev, "could not allocate resources\n");
241 		cpsw_detach(dev);
242 		return (ENXIO);
243 	}
244 
245 	reg = cpsw_read_4(CPSW_SS_IDVER);
246 	device_printf(dev, "Version %d.%d (%d)\n", (reg >> 8 & 0x7),
247 		reg & 0xFF, (reg >> 11) & 0x1F);
248 
249 	/* Allocate DMA, buffers, buffer descriptors */
250 	error = cpsw_allocate_dma(sc);
251 	if (error) {
252 		cpsw_detach(dev);
253 		return (ENXIO);
254 	}
255 
256 	//cpsw_add_sysctls(sc); TODO
257 
258 	/* Allocate network interface */
259 	ifp = sc->ifp = if_alloc(IFT_ETHER);
260 	if (ifp == NULL) {
261 		device_printf(dev, "if_alloc() failed\n");
262 		cpsw_detach(dev);
263 		return (ENOMEM);
264 	}
265 
266 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
267 	ifp->if_softc = sc;
268 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
269 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
270 	ifp->if_capenable = ifp->if_capabilities;
271 
272 	ifp->if_init = cpsw_init;
273 	ifp->if_start = cpsw_start;
274 	ifp->if_ioctl = cpsw_ioctl;
275 
276 	ifp->if_snd.ifq_drv_maxlen = CPSW_MAX_TX_BUFFERS - 1;
277 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
278 	IFQ_SET_READY(&ifp->if_snd);
279 
280 	/* Get high part of MAC address from control module (mac_id0_hi) */
281 	ti_scm_reg_read_4(0x634, &reg);
282 	sc->mac_addr[0] = reg & 0xFF;
283 	sc->mac_addr[1] = (reg >>  8) & 0xFF;
284 	sc->mac_addr[2] = (reg >> 16) & 0xFF;
285 	sc->mac_addr[3] = (reg >> 24) & 0xFF;
286 
287 	/* Get low part of MAC address from control module (mac_id0_lo) */
288 	ti_scm_reg_read_4(0x630, &reg);
289 	sc->mac_addr[4] = reg & 0xFF;
290 	sc->mac_addr[5] = (reg >>  8) & 0xFF;
291 
292 	ether_ifattach(ifp, sc->mac_addr);
293 	callout_init(&sc->wd_callout, 0);
294 
295 	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
296 	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
297 	cpsw_write_4(MDIOCONTROL, (1<<30) | (1<<18) | 0xFF);
298 
299 	/* Attach PHY(s) */
300 	error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd,
301 	    cpsw_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
302 	if (error) {
303 		device_printf(dev, "attaching PHYs failed\n");
304 		cpsw_detach(dev);
305 		return (error);
306 	}
307 	sc->mii = device_get_softc(sc->miibus);
308 
309 	/* Tell the MAC where to find the PHY so autoneg works */
310 	miisc = LIST_FIRST(&sc->mii->mii_phys);
311 
312 	/* Select PHY and enable interrupts */
313 	cpsw_write_4(MDIOUSERPHYSEL0, (1 << 6) | (miisc->mii_phy & 0x1F));
314 
315 	/* Attach interrupt handlers */
316 	for (i = 1; i <= CPSW_INTR_COUNT; ++i) {
317 		error = bus_setup_intr(dev, sc->res[i],
318 		    INTR_TYPE_NET | INTR_MPSAFE,
319 		    NULL, *cpsw_intrs[i - 1].handler,
320 		    sc, &sc->ih_cookie[i - 1]);
321 		if (error) {
322 			device_printf(dev, "could not setup %s\n",
323 			    cpsw_intrs[i].description);
324 			cpsw_detach(dev);
325 			return (error);
326 		}
327 	}
328 
329 	return (0);
330 }
331 
332 static int
333 cpsw_detach(device_t dev)
334 {
335 	struct cpsw_softc *sc;
336 	int error,i;
337 
338 	sc = device_get_softc(dev);
339 
340 	/* Stop controller and free TX queue */
341 	if (sc->ifp)
342 		cpsw_shutdown(dev);
343 
344 	/* Wait for stopping ticks */
345         callout_drain(&sc->wd_callout);
346 
347 	/* Stop and release all interrupts */
348 	for (i = 0; i < CPSW_INTR_COUNT; ++i) {
349 		if (!sc->ih_cookie[i])
350 			continue;
351 
352 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
353 		if (error)
354 			device_printf(dev, "could not release %s\n",
355 			    cpsw_intrs[i + 1].description);
356 	}
357 
358 	/* Detach network interface */
359 	if (sc->ifp) {
360 		ether_ifdetach(sc->ifp);
361 		if_free(sc->ifp);
362 	}
363 
364 	/* Free DMA resources */
365 	cpsw_free_dma(sc);
366 
367 	/* Free IO memory handler */
368 	bus_release_resources(dev, res_spec, sc->res);
369 
370 	/* Destroy mutexes */
371 	mtx_destroy(&sc->rx_lock);
372 	mtx_destroy(&sc->tx_lock);
373 
374 	return (0);
375 }
376 
377 static int
378 cpsw_suspend(device_t dev)
379 {
380 
381 	device_printf(dev, "%s\n", __FUNCTION__);
382 	return (0);
383 }
384 
385 static int
386 cpsw_resume(device_t dev)
387 {
388 
389 	device_printf(dev, "%s\n", __FUNCTION__);
390 	return (0);
391 }
392 
393 static int
394 cpsw_shutdown(device_t dev)
395 {
396 	struct cpsw_softc *sc = device_get_softc(dev);
397 
398 	CPSW_GLOBAL_LOCK(sc);
399 
400 	cpsw_stop_locked(sc);
401 
402 	CPSW_GLOBAL_UNLOCK(sc);
403 
404 	return (0);
405 }
406 
407 static int
408 cpsw_miibus_readreg(device_t dev, int phy, int reg)
409 {
410 	struct cpsw_softc *sc;
411 	uint32_t r;
412 	uint32_t retries = CPSW_MIIBUS_RETRIES;
413 
414 	sc = device_get_softc(dev);
415 
416 	/* Wait until interface is ready by watching GO bit */
417 	while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) )
418 		DELAY(CPSW_MIIBUS_DELAY);
419 	if (!retries)
420 		device_printf(dev, "Timeout while waiting for MDIO.\n");
421 
422 	/* Set GO, phy and reg */
423 	cpsw_write_4(MDIOUSERACCESS0, (1 << 31) |
424 		((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
425 
426 	while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) )
427 		DELAY(CPSW_MIIBUS_DELAY);
428 	if (!retries)
429 		device_printf(dev, "Timeout while waiting for MDIO.\n");
430 
431 	r = cpsw_read_4(MDIOUSERACCESS0);
432 	/* Check for ACK */
433 	if(r & (1<<29)) {
434 		return (r & 0xFFFF);
435 	}
436 	device_printf(dev, "Failed to read from PHY.\n");
437 	return 0;
438 }
439 
440 static int
441 cpsw_miibus_writereg(device_t dev, int phy, int reg, int value)
442 {
443 	struct cpsw_softc *sc;
444 	uint32_t retries = CPSW_MIIBUS_RETRIES;
445 
446 	sc = device_get_softc(dev);
447 
448 	/* Wait until interface is ready by watching GO bit */
449 	while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) )
450 		DELAY(CPSW_MIIBUS_DELAY);
451 	if (!retries)
452 		device_printf(dev, "Timeout while waiting for MDIO.\n");
453 
454 	/* Set GO, WRITE, phy, reg and value */
455 	cpsw_write_4(MDIOUSERACCESS0, (value & 0xFFFF) | (3 << 30) |
456 		((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
457 
458 	while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) )
459 		DELAY(CPSW_MIIBUS_DELAY);
460 	if (!retries)
461 		device_printf(dev, "Timeout while waiting for MDIO.\n");
462 
463 	/* Check for ACK */
464 	if(cpsw_read_4(MDIOUSERACCESS0) & (1<<29)) {
465 		return 0;
466 	}
467 	device_printf(dev, "Failed to write to PHY.\n");
468 
469 	return 0;
470 }
471 
472 static int
473 cpsw_allocate_dma(struct cpsw_softc *sc)
474 {
475 	int err;
476 	int i;
477 
478 	/* Allocate a busdma tag and DMA safe memory for tx mbufs. */
479 	err = bus_dma_tag_create(
480 		bus_get_dma_tag(sc->dev),	/* parent */
481 		1, 0,				/* alignment, boundary */
482 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
483 		BUS_SPACE_MAXADDR,		/* highaddr */
484 		NULL, NULL,			/* filtfunc, filtfuncarg */
485 		MCLBYTES, 1,			/* maxsize, nsegments */
486 		MCLBYTES, 0,			/* maxsegsz, flags */
487 		NULL, NULL,			/* lockfunc, lockfuncarg */
488 		&sc->mbuf_dtag);		/* dmatag */
489 
490 	if (err)
491 		return (ENOMEM);
492 	for (i = 0; i < CPSW_MAX_TX_BUFFERS; i++) {
493 		if ( bus_dmamap_create(sc->mbuf_dtag, 0, &sc->tx_dmamap[i])) {
494 			if_printf(sc->ifp, "failed to create dmamap for rx mbuf\n");
495 			return (ENOMEM);
496 		}
497 	}
498 
499 	for (i = 0; i < CPSW_MAX_RX_BUFFERS; i++) {
500 		if ( bus_dmamap_create(sc->mbuf_dtag, 0, &sc->rx_dmamap[i])) {
501 			if_printf(sc->ifp, "failed to create dmamap for rx mbuf\n");
502 			return (ENOMEM);
503 		}
504 	}
505 
506 	return (0);
507 }
508 
509 static int
510 cpsw_free_dma(struct cpsw_softc *sc)
511 {
512 	(void)sc; /* UNUSED */
513 	// TODO
514 	return 0;
515 }
516 
517 static int
518 cpsw_new_rxbuf(struct cpsw_softc *sc, uint32_t i, uint32_t next)
519 {
520 	bus_dma_segment_t seg[1];
521 	struct cpsw_cpdma_bd bd;
522 	int error;
523 	int nsegs;
524 
525 	sc->rx_mbuf[i] = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
526 	if (sc->rx_mbuf[i] == NULL)
527 		return (ENOBUFS);
528 
529 	sc->rx_mbuf[i]->m_len = sc->rx_mbuf[i]->m_pkthdr.len = sc->rx_mbuf[i]->m_ext.ext_size;
530 
531 	error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->rx_dmamap[i],
532 		sc->rx_mbuf[i], seg, &nsegs, BUS_DMA_NOWAIT);
533 
534 	KASSERT(nsegs == 1, ("Too many segments returned!"));
535 	if (nsegs != 1 || error)
536 		panic("%s: nsegs(%d), error(%d)",__func__, nsegs, error);
537 
538 	bus_dmamap_sync(sc->mbuf_dtag, sc->rx_dmamap[i], BUS_DMASYNC_PREREAD);
539 
540 	/* Create and submit new rx descriptor*/
541 	bd.next = next;
542 	bd.bufptr = seg->ds_addr;
543 	bd.buflen = MCLBYTES-1;
544 	bd.bufoff = 2; /* make IP hdr aligned with 4 */
545 	bd.pktlen = 0;
546 	bd.flags = CPDMA_BD_OWNER;
547 	cpsw_cpdma_write_rxbd(i, &bd);
548 
549 	return (0);
550 }
551 
552 
553 static int
554 cpsw_encap(struct cpsw_softc *sc, struct mbuf *m0)
555 {
556 	bus_dma_segment_t seg[1];
557 	struct cpsw_cpdma_bd bd;
558 	int error;
559 	int nsegs;
560 	int idx;
561 
562 	if (sc->txbd_queue_size == CPSW_MAX_TX_BUFFERS)
563 		return (ENOBUFS);
564 
565 	idx = sc->txbd_head + sc->txbd_queue_size;
566 
567 	if (idx >= (CPSW_MAX_TX_BUFFERS) )
568 		idx -= CPSW_MAX_TX_BUFFERS;
569 
570 	/* Create mapping in DMA memory */
571 	error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->tx_dmamap[idx], m0, seg, &nsegs,
572 	    BUS_DMA_NOWAIT);
573 	sc->tc[idx]++;
574 	if (error != 0 || nsegs != 1 ) {
575 		bus_dmamap_unload(sc->mbuf_dtag, sc->tx_dmamap[idx]);
576 		return ((error != 0) ? error : -1);
577 	}
578 	bus_dmamap_sync(sc->mbuf_dtag, sc->tx_dmamap[idx], BUS_DMASYNC_PREWRITE);
579 
580 	/* Fill descriptor data */
581 	bd.next = 0;
582 	bd.bufptr = seg->ds_addr;
583 	bd.bufoff = 0;
584 	bd.buflen = (seg->ds_len < 64 ? 64 : seg->ds_len);
585 	bd.pktlen = (seg->ds_len < 64 ? 64 : seg->ds_len);
586 	/* Set OWNERSHIP, SOP, EOP */
587 	bd.flags = (7<<13);
588 
589 	/* Write descriptor */
590 	cpsw_cpdma_write_txbd(idx, &bd);
591 	sc->tx_mbuf[idx] = m0;
592 
593 	/* Previous descriptor should point to us */
594 	cpsw_cpdma_write_txbd_next(((idx-1<0)?(CPSW_MAX_TX_BUFFERS-1):(idx-1)),
595 		cpsw_cpdma_txbd_paddr(idx));
596 
597 	sc->txbd_queue_size++;
598 
599 	return (0);
600 }
601 
602 static void
603 cpsw_start(struct ifnet *ifp)
604 {
605 	struct cpsw_softc *sc = ifp->if_softc;
606 
607 	CPSW_TX_LOCK(sc);
608 	cpsw_start_locked(ifp);
609 	CPSW_TX_UNLOCK(sc);
610 }
611 
612 static void
613 cpsw_start_locked(struct ifnet *ifp)
614 {
615 	struct cpsw_softc *sc = ifp->if_softc;
616 	struct mbuf *m0, *mtmp;
617 	uint32_t queued = 0;
618 
619 	CPSW_TX_LOCK_ASSERT(sc);
620 
621 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
622 	    IFF_DRV_RUNNING)
623 		return;
624 
625 	for (;;) {
626 		/* Get packet from the queue */
627 		IF_DEQUEUE(&ifp->if_snd, m0);
628 		if (m0 == NULL)
629 			break;
630 
631 		mtmp = m_defrag(m0, M_DONTWAIT);
632 		if (mtmp)
633 			m0 = mtmp;
634 
635 		if (cpsw_encap(sc, m0)) {
636 			IF_PREPEND(&ifp->if_snd, m0);
637 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
638 			break;
639 		}
640 		queued++;
641 		BPF_MTAP(ifp, m0);
642 	}
643 
644 	if (!queued)
645 		return;
646 
647 	if (sc->eoq) {
648 		cpsw_write_4(CPSW_CPDMA_TX_HDP(0), cpsw_cpdma_txbd_paddr(sc->txbd_head));
649 		sc->eoq = 0;
650 	}
651 	sc->wd_timer = 5;
652 }
653 
654 static void
655 cpsw_stop_locked(struct cpsw_softc *sc)
656 {
657 	struct ifnet *ifp;
658 
659 	CPSW_GLOBAL_LOCK_ASSERT(sc);
660 
661 	ifp = sc->ifp;
662 
663 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
664 		return;
665 
666 	/* Stop tick engine */
667 	callout_stop(&sc->wd_callout);
668 
669 	/* Disable interface */
670 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
671 	sc->wd_timer = 0;
672 
673 	/* Disable interrupts  TODO */
674 
675 }
676 
677 static int
678 cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
679 {
680 	struct cpsw_softc *sc = ifp->if_softc;
681 	struct ifreq *ifr = (struct ifreq *)data;
682 	int error;
683 	uint32_t flags;
684 
685 	error = 0;
686 
687 	// FIXME
688 	switch (command) {
689 	case SIOCSIFFLAGS:
690 		CPSW_GLOBAL_LOCK(sc);
691 		if (ifp->if_flags & IFF_UP) {
692 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
693 				flags = ifp->if_flags ^ sc->cpsw_if_flags;
694 				if (flags & IFF_PROMISC)
695 					printf("%s: SIOCSIFFLAGS "
696 						"IFF_PROMISC unimplemented\n",
697 						__func__);
698 
699 				if (flags & IFF_ALLMULTI)
700 					printf("%s: SIOCSIFFLAGS "
701 						"IFF_ALLMULTI unimplemented\n",
702 						__func__);
703 			} else {
704 				printf("%s: SIOCSIFFLAGS cpsw_init_locked\n", __func__);
705 				//cpsw_init_locked(sc);
706 			}
707 		}
708 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
709 			cpsw_stop_locked(sc);
710 
711 		sc->cpsw_if_flags = ifp->if_flags;
712 		CPSW_GLOBAL_UNLOCK(sc);
713 		break;
714 	case SIOCADDMULTI:
715 		printf("%s: SIOCADDMULTI\n",__func__);
716 		break;
717 	case SIOCDELMULTI:
718 		printf("%s: SIOCDELMULTI\n",__func__);
719 		break;
720 	case SIOCSIFCAP:
721 		printf("%s: SIOCSIFCAP\n",__func__);
722 		break;
723 	case SIOCGIFMEDIA:
724 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
725 		break;
726 	case SIOCSIFMEDIA:
727 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
728 		break;
729 	default:
730 		error = ether_ioctl(ifp, command, data);
731 	}
732 	return (error);
733 }
734 
735 static void
736 cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
737 {
738 	struct cpsw_softc *sc = ifp->if_softc;
739 	struct mii_data *mii;
740 
741 	CPSW_TX_LOCK(sc);
742 
743 	mii = sc->mii;
744 	mii_pollstat(mii);
745 
746 	ifmr->ifm_active = mii->mii_media_active;
747 	ifmr->ifm_status = mii->mii_media_status;
748 
749 	CPSW_TX_UNLOCK(sc);
750 }
751 
752 
753 static int
754 cpsw_ifmedia_upd(struct ifnet *ifp)
755 {
756 	struct cpsw_softc *sc = ifp->if_softc;
757 
758 	if (ifp->if_flags & IFF_UP) {
759 		CPSW_GLOBAL_LOCK(sc);
760 
761 		sc->cpsw_media_status = sc->mii->mii_media.ifm_media;
762 		mii_mediachg(sc->mii);
763 		cpsw_init_locked(sc);
764 
765 		CPSW_GLOBAL_UNLOCK(sc);
766 	}
767 
768 	return (0);
769 }
770 
771 static void
772 cpsw_intr_rx_thresh(void *arg)
773 {
774 	(void)arg; /* UNUSED */
775 }
776 
777 static void
778 cpsw_intr_rx(void *arg)
779 {
780 	struct cpsw_softc *sc = arg;
781 	CPSW_RX_LOCK(sc);
782 	cpsw_intr_rx_locked(arg);
783 	CPSW_RX_UNLOCK(sc);
784 }
785 
786 static void
787 cpsw_intr_rx_locked(void *arg)
788 {
789 	struct cpsw_softc *sc = arg;
790 	struct cpsw_cpdma_bd bd;
791 	struct ifnet *ifp;
792 	int i;
793 
794 	ifp = sc->ifp;
795 
796 	i = sc->rxbd_head;
797 	cpsw_cpdma_read_rxbd(i, &bd);
798 
799 	while (bd.flags & CPDMA_BD_SOP) {
800 		cpsw_write_4(CPSW_CPDMA_RX_CP(0), cpsw_cpdma_rxbd_paddr(i));
801 
802 		bus_dmamap_sync(sc->mbuf_dtag, sc->rx_dmamap[i], BUS_DMASYNC_POSTREAD);
803 
804 		/* Fill mbuf */
805 		sc->rx_mbuf[i]->m_hdr.mh_data += bd.bufoff;
806 		sc->rx_mbuf[i]->m_hdr.mh_len = bd.pktlen - 4;
807 		sc->rx_mbuf[i]->m_pkthdr.len = bd.pktlen - 4;
808 		sc->rx_mbuf[i]->m_flags |= M_PKTHDR;
809 		sc->rx_mbuf[i]->m_pkthdr.rcvif = ifp;
810 
811 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
812 			/* check for valid CRC by looking into pkt_err[5:4] */
813 			if ( (bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0  ) {
814 				sc->rx_mbuf[i]->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
815 				sc->rx_mbuf[i]->m_pkthdr.csum_flags |= CSUM_IP_VALID;
816 				sc->rx_mbuf[i]->m_pkthdr.csum_data = 0xffff;
817 			}
818 		}
819 
820 		bus_dmamap_sync(sc->mbuf_dtag,
821 		    sc->rx_dmamap[i],
822 		    BUS_DMASYNC_POSTREAD);
823 		bus_dmamap_unload(sc->mbuf_dtag, sc->rx_dmamap[i]);
824 
825 		/* Handover packet */
826 		CPSW_RX_UNLOCK(sc);
827 		(*ifp->if_input)(ifp, sc->rx_mbuf[i]);
828 		sc->rx_mbuf[i] = NULL;
829 		CPSW_RX_LOCK(sc);
830 
831 		/* Allocate new buffer for current descriptor */
832 		cpsw_new_rxbuf(sc, i, 0);
833 
834 		/* we are not at tail so old tail BD should point to new one */
835 		cpsw_cpdma_write_rxbd_next(sc->rxbd_tail,
836 			cpsw_cpdma_rxbd_paddr(i));
837 
838 		/* Check if EOQ is reached */
839 		if (cpsw_cpdma_read_rxbd_flags(sc->rxbd_tail) & CPDMA_BD_EOQ) {
840 			cpsw_write_4(CPSW_CPDMA_RX_HDP(0), cpsw_cpdma_rxbd_paddr(i));
841 		}
842 		sc->rxbd_tail = i;
843 
844 		/* read next descriptor */
845 		if (++i == CPSW_MAX_RX_BUFFERS)
846 			i = 0;
847 		cpsw_cpdma_read_rxbd(i, &bd);
848 		sc->rxbd_head = i;
849 	}
850 
851 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
852 }
853 
854 static void
855 cpsw_intr_tx(void *arg)
856 {
857 	struct cpsw_softc *sc = arg;
858 	CPSW_TX_LOCK(sc);
859 	cpsw_intr_tx_locked(arg);
860 	CPSW_TX_UNLOCK(sc);
861 }
862 
863 static void
864 cpsw_intr_tx_locked(void *arg)
865 {
866 	struct cpsw_softc *sc = arg;
867 	uint32_t flags;
868 
869 	if(sc->txbd_head == -1)
870 		return;
871 
872 	if(sc->txbd_queue_size<1) {
873 		/* in some casses interrupt happens even when there is no
874 		   data in transmit queue */
875 		return;
876 	}
877 
878 	/* Disable watchdog */
879 	sc->wd_timer = 0;
880 
881 	flags = cpsw_cpdma_read_txbd_flags(sc->txbd_head);
882 
883 	/* After BD is transmitted CPDMA will set OWNER to 0 */
884 	if (flags & CPDMA_BD_OWNER)
885 		return;
886 
887 	if(flags & CPDMA_BD_EOQ)
888 		sc->eoq=1;
889 
890 	/* release dmamap and mbuf */
891 	bus_dmamap_sync(sc->mbuf_dtag, sc->tx_dmamap[sc->txbd_head],
892 	    BUS_DMASYNC_POSTWRITE);
893 	bus_dmamap_unload(sc->mbuf_dtag, sc->tx_dmamap[sc->txbd_head]);
894 	m_freem(sc->tx_mbuf[sc->txbd_head]);
895 	sc->tx_mbuf[sc->txbd_head] = NULL;
896 
897 	cpsw_write_4(CPSW_CPDMA_TX_CP(0), cpsw_cpdma_txbd_paddr(sc->txbd_head));
898 
899 	if (++sc->txbd_head == CPSW_MAX_TX_BUFFERS)
900 		sc->txbd_head = 0;
901 
902 	--sc->txbd_queue_size;
903 
904 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 2);
905 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
906 }
907 
908 static void
909 cpsw_intr_misc(void *arg)
910 {
911 	struct cpsw_softc *sc = arg;
912 	uint32_t stat = cpsw_read_4(CPSW_WR_C_MISC_STAT(0));
913 	printf("%s: stat=%x\n",__func__,stat);
914 	/* EOI_RX_PULSE */
915 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
916 }
917 
918 static void
919 cpsw_tick(void *msc)
920 {
921 	struct cpsw_softc *sc = msc;
922 
923 	/* Check for TX timeout */
924 	cpsw_watchdog(sc);
925 
926 	mii_tick(sc->mii);
927 
928 	/* Check for media type change */
929 	if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) {
930 		printf("%s: media type changed (ifm_media=%x)\n",__func__,
931 			sc->mii->mii_media.ifm_media);
932 		cpsw_ifmedia_upd(sc->ifp);
933 	}
934 
935 	/* Schedule another timeout one second from now */
936 	callout_reset(&sc->wd_callout, hz, cpsw_tick, sc);
937 }
938 
939 static void
940 cpsw_watchdog(struct cpsw_softc *sc)
941 {
942 	struct ifnet *ifp;
943 
944 	ifp = sc->ifp;
945 
946 	CPSW_GLOBAL_LOCK(sc);
947 
948 	if (sc->wd_timer == 0 || --sc->wd_timer) {
949 		CPSW_GLOBAL_UNLOCK(sc);
950 		return;
951 	}
952 
953 	ifp->if_oerrors++;
954 	if_printf(ifp, "watchdog timeout\n");
955 
956 	cpsw_stop_locked(sc);
957 	cpsw_init_locked(sc);
958 
959 	CPSW_GLOBAL_UNLOCK(sc);
960 }
961 
962 static void
963 cpsw_init(void *arg)
964 {
965 	struct cpsw_softc *sc = arg;
966 	CPSW_GLOBAL_LOCK(sc);
967 	cpsw_init_locked(arg);
968 	CPSW_GLOBAL_UNLOCK(sc);
969 }
970 
971 int once = 1;
972 
973 static void
974 cpsw_init_locked(void *arg)
975 {
976 	struct ifnet *ifp;
977 	struct cpsw_softc *sc = arg;
978 	uint8_t  broadcast_address[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
979 	uint32_t next_bdp;
980 	uint32_t i;
981 
982 	ifp = sc->ifp;
983 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
984 		return;
985 
986 	printf("%s: start\n",__func__);
987 
988 	/* Reset writer */
989 	cpsw_write_4(CPSW_WR_SOFT_RESET, 1);
990 	while(cpsw_read_4(CPSW_WR_SOFT_RESET) & 1);
991 
992 	/* Reset SS */
993 	cpsw_write_4(CPSW_SS_SOFT_RESET, 1);
994 	while(cpsw_read_4(CPSW_SS_SOFT_RESET) & 1);
995 
996 	/* Clear table (30) and enable ALE(31) */
997 	if (once)
998 		cpsw_write_4(CPSW_ALE_CONTROL, (3 << 30));
999 	else
1000 		cpsw_write_4(CPSW_ALE_CONTROL, (1 << 31));
1001 	once = 0; // FIXME
1002 
1003 	/* Reset and init Sliver port 1 and 2 */
1004 	for(i=0;i<2;i++) {
1005 		/* Reset */
1006 		cpsw_write_4(CPSW_SL_SOFT_RESET(i), 1);
1007 		while(cpsw_read_4(CPSW_SL_SOFT_RESET(i)) & 1);
1008 		/* Set Slave Mapping */
1009 		cpsw_write_4(CPSW_SL_RX_PRI_MAP(i),0x76543210);
1010 		cpsw_write_4(CPSW_PORT_P_TX_PRI_MAP(i+1),0x33221100);
1011 		cpsw_write_4(CPSW_SL_RX_MAXLEN(i),0x5f2);
1012 		/* Set MAC Address */
1013 		cpsw_write_4(CPSW_PORT_P_SA_HI(i+1), sc->mac_addr[0] |
1014 			(sc->mac_addr[1] <<  8) |
1015 			(sc->mac_addr[2] << 16) |
1016 			(sc->mac_addr[3] << 24));
1017 		cpsw_write_4(CPSW_PORT_P_SA_LO(i+1), sc->mac_addr[4] |
1018 			(sc->mac_addr[5] <<  8));
1019 
1020 		/* Set MACCONTROL for ports 0,1: FULLDUPLEX(1), GMII_EN(5),
1021 		   IFCTL_A(15), IFCTL_B(16) FIXME */
1022 		cpsw_write_4(CPSW_SL_MACCONTROL(i), 1 | (1<<5) | (1<<15));
1023 
1024 		/* Set ALE port to forwarding(3) */
1025 		cpsw_write_4(CPSW_ALE_PORTCTL(i+1), 3);
1026 	}
1027 
1028 	/* Set Host Port Mapping */
1029 	cpsw_write_4(CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
1030 	cpsw_write_4(CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
1031 
1032 	/* Set ALE port to forwarding(3)*/
1033 	cpsw_write_4(CPSW_ALE_PORTCTL(0), 3);
1034 
1035 	/* Add own MAC address and broadcast to ALE */
1036 	cpsw_ale_uc_entry_set(sc, 0, sc->mac_addr);
1037 	cpsw_ale_mc_entry_set(sc, 7, broadcast_address);
1038 
1039 	cpsw_write_4(CPSW_SS_PTYPE, 0);
1040 	/* Enable statistics for ports 0, 1 and 2 */
1041 	cpsw_write_4(CPSW_SS_STAT_PORT_EN, 7);
1042 
1043 	/* Reset CPDMA */
1044 	cpsw_write_4(CPSW_CPDMA_SOFT_RESET, 1);
1045 	while(cpsw_read_4(CPSW_CPDMA_SOFT_RESET) & 1);
1046 
1047         for(i = 0; i < 8; i++) {
1048 		cpsw_write_4(CPSW_CPDMA_TX_HDP(i), 0);
1049 		cpsw_write_4(CPSW_CPDMA_RX_HDP(i), 0);
1050 		cpsw_write_4(CPSW_CPDMA_TX_CP(i), 0);
1051 		cpsw_write_4(CPSW_CPDMA_RX_CP(i), 0);
1052         }
1053 
1054 	cpsw_write_4(CPSW_CPDMA_RX_FREEBUFFER(0), 0);
1055 
1056 	/* Initialize RX Buffer Descriptors */
1057 	i = CPSW_MAX_RX_BUFFERS;
1058 	next_bdp = 0;
1059 	while (i--) {
1060 		cpsw_new_rxbuf(sc, i, next_bdp);
1061 		/* Increment number of free RX buffers */
1062 		//cpsw_write_4(CPSW_CPDMA_RX_FREEBUFFER(0), 1);
1063 		next_bdp = cpsw_cpdma_rxbd_paddr(i);
1064 	}
1065 
1066 	sc->rxbd_head = 0;
1067 	sc->rxbd_tail = CPSW_MAX_RX_BUFFERS-1;
1068 	sc->txbd_head = 0;
1069 	sc->eoq = 1;
1070 	sc->txbd_queue_size = 0;
1071 
1072 	/* Make IP hdr aligned with 4 */
1073 	cpsw_write_4(CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
1074 	/* Write channel 0 RX HDP */
1075 	cpsw_write_4(CPSW_CPDMA_RX_HDP(0), cpsw_cpdma_rxbd_paddr(0));
1076 
1077 	/* Clear all interrupt Masks */
1078 	cpsw_write_4(CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
1079 	cpsw_write_4(CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
1080 
1081 	/* Enable TX & RX DMA */
1082 	cpsw_write_4(CPSW_CPDMA_TX_CONTROL, 1);
1083 	cpsw_write_4(CPSW_CPDMA_RX_CONTROL, 1);
1084 
1085 	/* Enable TX and RX interrupt receive for core 0 */
1086 	cpsw_write_4(CPSW_WR_C_TX_EN(0), 0xFF);
1087 	cpsw_write_4(CPSW_WR_C_RX_EN(0), 0xFF);
1088 	//cpsw_write_4(CPSW_WR_C_MISC_EN(0), 0x3F);
1089 
1090 	/* Enable host Error Interrupt */
1091 	cpsw_write_4(CPSW_CPDMA_DMA_INTMASK_SET, 1);
1092 
1093 	/* Enable interrupts for TX and RX Channel 0 */
1094 	cpsw_write_4(CPSW_CPDMA_TX_INTMASK_SET, 1);
1095 	cpsw_write_4(CPSW_CPDMA_RX_INTMASK_SET, 1);
1096 
1097 	/* Ack stalled irqs */
1098 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
1099 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1100 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 2);
1101 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
1102 
1103 	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
1104 	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
1105 	cpsw_write_4(MDIOCONTROL, (1<<30) | (1<<18) | 0xFF);
1106 
1107 	/* Select MII in GMII_SEL, Internal Delay mode */
1108 	//ti_scm_reg_write_4(0x650, 0);
1109 
1110 	/* Activate network interface */
1111 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1112 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1113 	sc->wd_timer = 0;
1114 	callout_reset(&sc->wd_callout, hz, cpsw_tick, sc);
1115 }
1116 
1117 static void
1118 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1119 {
1120 	cpsw_write_4(CPSW_ALE_TBLCTL, idx & 1023);
1121 	ale_entry[0] = cpsw_read_4(CPSW_ALE_TBLW0);
1122 	ale_entry[1] = cpsw_read_4(CPSW_ALE_TBLW1);
1123 	ale_entry[2] = cpsw_read_4(CPSW_ALE_TBLW2);
1124 }
1125 
1126 static void
1127 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1128 {
1129 	cpsw_write_4(CPSW_ALE_TBLW0, ale_entry[0]);
1130 	cpsw_write_4(CPSW_ALE_TBLW1, ale_entry[1]);
1131 	cpsw_write_4(CPSW_ALE_TBLW2, ale_entry[2]);
1132 	cpsw_write_4(CPSW_ALE_TBLCTL, (idx & 1023) | (1 << 31));
1133 }
1134 
1135 static int
1136 cpsw_ale_find_entry_by_mac(struct cpsw_softc *sc, uint8_t *mac)
1137 {
1138 	int i;
1139 	uint32_t ale_entry[3];
1140 	for(i=0; i< CPSW_MAX_ALE_ENTRIES; i++) {
1141 		cpsw_ale_read_entry(sc, i, ale_entry);
1142 		if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
1143 		    (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
1144 		    (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
1145 		    (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
1146 		    (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
1147 		    (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
1148 			return (i);
1149 		}
1150 	}
1151 	return CPSW_MAX_ALE_ENTRIES;
1152 }
1153 
1154 static int
1155 cpsw_ale_find_free_entry(struct cpsw_softc *sc)
1156 {
1157 	int i;
1158 	uint32_t ale_entry[3];
1159 	for(i=0; i< CPSW_MAX_ALE_ENTRIES; i++) {
1160 		cpsw_ale_read_entry(sc, i, ale_entry);
1161 		/* Entry Type[61:60] is 0 for free entry */
1162 		if (((ale_entry[1] >> 28) & 3) == 0) {
1163 			return i;
1164 		}
1165 	}
1166 	return CPSW_MAX_ALE_ENTRIES;
1167 }
1168 
1169 
1170 static int
1171 cpsw_ale_uc_entry_set(struct cpsw_softc *sc, uint8_t port, uint8_t *mac)
1172 {
1173 	int i;
1174 	uint32_t ale_entry[3];
1175 
1176 	if ((i = cpsw_ale_find_entry_by_mac(sc, mac)) == CPSW_MAX_ALE_ENTRIES) {
1177 		i = cpsw_ale_find_free_entry(sc);
1178 	}
1179 
1180 	if (i == CPSW_MAX_ALE_ENTRIES)
1181 		return (ENOMEM);
1182 
1183 	/* Set MAC address */
1184 	ale_entry[0] = mac[2]<<24 | mac[3]<<16 | mac[4]<<8 | mac[5];
1185 	ale_entry[1] = mac[0]<<8 | mac[1];
1186 
1187 	/* Entry type[61:60] is addr entry(1) */
1188 	ale_entry[1] |= 0x10<<24;
1189 
1190 	/* Set portmask [67:66] */
1191 	ale_entry[2] = (port & 3) << 2;
1192 
1193 	cpsw_ale_write_entry(sc, i, ale_entry);
1194 
1195 	return 0;
1196 }
1197 
1198 static int
1199 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac)
1200 {
1201 	int i;
1202 	uint32_t ale_entry[3];
1203 
1204 	if ((i = cpsw_ale_find_entry_by_mac(sc, mac)) == CPSW_MAX_ALE_ENTRIES) {
1205 		i = cpsw_ale_find_free_entry(sc);
1206 	}
1207 
1208 	if (i == CPSW_MAX_ALE_ENTRIES)
1209 		return (ENOMEM);
1210 
1211 	/* Set MAC address */
1212 	ale_entry[0] = mac[2]<<24 | mac[3]<<16 | mac[4]<<8 | mac[5];
1213 	ale_entry[1] = mac[0]<<8 | mac[1];
1214 
1215 	/* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1216 	ale_entry[1] |= 0xd0<<24;
1217 
1218 	/* Set portmask [68:66] */
1219 	ale_entry[2] = (portmap & 7) << 2;
1220 
1221 	cpsw_ale_write_entry(sc, i, ale_entry);
1222 
1223 	return 0;
1224 }
1225 
1226 #ifdef CPSW_DEBUG
1227 static void
1228 cpsw_ale_dump_table(struct cpsw_softc *sc) {
1229 	int i;
1230 	uint32_t ale_entry[3];
1231 	for(i=0; i< CPSW_MAX_ALE_ENTRIES; i++) {
1232 		cpsw_ale_read_entry(sc, i, ale_entry);
1233 		if (ale_entry[0] || ale_entry[1] || ale_entry[2]) {
1234 			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[0],
1235 				ale_entry[1],ale_entry[2]);
1236 			printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
1237 				(ale_entry[1] >> 8) & 0xFF,
1238 				(ale_entry[1] >> 0) & 0xFF,
1239 				(ale_entry[0] >>24) & 0xFF,
1240 				(ale_entry[0] >>16) & 0xFF,
1241 				(ale_entry[0] >> 8) & 0xFF,
1242 				(ale_entry[0] >> 0) & 0xFF);
1243 			printf( ((ale_entry[1]>>8)&1) ? "mcast " : "ucast ");
1244 			printf("type: %u ", (ale_entry[1]>>28)&3);
1245 			printf("port: %u ", (ale_entry[2]>>2)&7);
1246 			printf("\n");
1247 		}
1248 	}
1249 }
1250 #endif
1251