xref: /freebsd/sys/arm/ti/cpsw/if_cpsw.c (revision 724b4bfdf1306e4f2c451b6d146fe0fe0353b2c8)
1 /*-
2  * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * TI 3 Port Switch Ethernet (CPSW) Driver
29  * Found in TI8148, AM335x SoCs
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/endian.h>
38 #include <sys/mbuf.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/kernel.h>
42 #include <sys/module.h>
43 #include <sys/socket.h>
44 #include <sys/sysctl.h>
45 
46 #include <net/ethernet.h>
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/if_types.h>
53 #include <net/if_vlan_var.h>
54 
55 #include <netinet/in_systm.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 
59 #include <sys/sockio.h>
60 #include <sys/bus.h>
61 #include <machine/bus.h>
62 #include <sys/rman.h>
63 #include <machine/resource.h>
64 
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
67 
68 #include <dev/fdt/fdt_common.h>
69 #include <dev/ofw/ofw_bus.h>
70 #include <dev/ofw/ofw_bus_subr.h>
71 
72 #include "if_cpswreg.h"
73 #include "if_cpswvar.h"
74 
75 #include <arm/ti/ti_scm.h>
76 
77 #include "miibus_if.h"
78 
79 static int cpsw_probe(device_t dev);
80 static int cpsw_attach(device_t dev);
81 static int cpsw_detach(device_t dev);
82 static int cpsw_shutdown(device_t dev);
83 static int cpsw_suspend(device_t dev);
84 static int cpsw_resume(device_t dev);
85 
86 static int cpsw_miibus_readreg(device_t dev, int phy, int reg);
87 static int cpsw_miibus_writereg(device_t dev, int phy, int reg, int value);
88 
89 static int cpsw_ifmedia_upd(struct ifnet *ifp);
90 static void cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
91 
92 static void cpsw_init(void *arg);
93 static void cpsw_init_locked(void *arg);
94 static void cpsw_start(struct ifnet *ifp);
95 static void cpsw_start_locked(struct ifnet *ifp);
96 static void cpsw_stop_locked(struct cpsw_softc *sc);
97 static int cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
98 static int cpsw_allocate_dma(struct cpsw_softc *sc);
99 static int cpsw_free_dma(struct cpsw_softc *sc);
100 static int cpsw_new_rxbuf(struct cpsw_softc *sc, uint32_t i, uint32_t next);
101 static void cpsw_watchdog(struct cpsw_softc *sc);
102 
103 static void cpsw_intr_rx_thresh(void *arg);
104 static void cpsw_intr_rx(void *arg);
105 static void cpsw_intr_rx_locked(void *arg);
106 static void cpsw_intr_tx(void *arg);
107 static void cpsw_intr_tx_locked(void *arg);
108 static void cpsw_intr_misc(void *arg);
109 
110 static void cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry);
111 static void cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry);
112 static int cpsw_ale_uc_entry_set(struct cpsw_softc *sc, uint8_t port, uint8_t *mac);
113 static int cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac);
114 #ifdef CPSW_DEBUG
115 static void cpsw_ale_dump_table(struct cpsw_softc *sc);
116 #endif
117 
118 static device_method_t cpsw_methods[] = {
119 	/* Device interface */
120 	DEVMETHOD(device_probe,		cpsw_probe),
121 	DEVMETHOD(device_attach,	cpsw_attach),
122 	DEVMETHOD(device_detach,	cpsw_detach),
123 	DEVMETHOD(device_shutdown,	cpsw_shutdown),
124 	DEVMETHOD(device_suspend,	cpsw_suspend),
125 	DEVMETHOD(device_resume,	cpsw_resume),
126 	/* MII interface */
127 	DEVMETHOD(miibus_readreg,	cpsw_miibus_readreg),
128 	DEVMETHOD(miibus_writereg,	cpsw_miibus_writereg),
129 	{ 0, 0 }
130 };
131 
132 static driver_t cpsw_driver = {
133 	"cpsw",
134 	cpsw_methods,
135 	sizeof(struct cpsw_softc),
136 };
137 
138 static devclass_t cpsw_devclass;
139 
140 
141 DRIVER_MODULE(cpsw, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
142 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
143 MODULE_DEPEND(cpsw, ether, 1, 1, 1);
144 MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
145 
146 static struct resource_spec res_spec[] = {
147 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
148 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
149 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
150 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
151 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
152 	{ -1, 0 }
153 };
154 
155 static struct {
156 	driver_intr_t *handler;
157 	char * description;
158 } cpsw_intrs[CPSW_INTR_COUNT + 1] = {
159 	{ cpsw_intr_rx_thresh,"CPSW RX threshold interrupt" },
160 	{ cpsw_intr_rx,	"CPSW RX interrupt" },
161 	{ cpsw_intr_tx,	"CPSW TX interrupt" },
162 	{ cpsw_intr_misc,"CPSW misc interrupt" },
163 };
164 
165 /* Locking macros */
166 #define CPSW_TX_LOCK(sc) do {					\
167 		mtx_assert(&(sc)->rx_lock, MA_NOTOWNED);		\
168 		mtx_lock(&(sc)->tx_lock);				\
169 } while (0)
170 
171 #define CPSW_TX_UNLOCK(sc)	mtx_unlock(&(sc)->tx_lock)
172 #define CPSW_TX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->tx_lock, MA_OWNED)
173 
174 #define CPSW_RX_LOCK(sc) do {					\
175 		mtx_assert(&(sc)->tx_lock, MA_NOTOWNED);		\
176 		mtx_lock(&(sc)->rx_lock);				\
177 } while (0)
178 
179 #define CPSW_RX_UNLOCK(sc)		mtx_unlock(&(sc)->rx_lock)
180 #define CPSW_RX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->rx_lock, MA_OWNED)
181 
182 #define CPSW_GLOBAL_LOCK(sc) do {					\
183 		if ((mtx_owned(&(sc)->tx_lock) ? 1 : 0) !=	\
184 		    (mtx_owned(&(sc)->rx_lock) ? 1 : 0)) {		\
185 			panic("cpsw deadlock possibility detection!");	\
186 		}							\
187 		mtx_lock(&(sc)->tx_lock);				\
188 		mtx_lock(&(sc)->rx_lock);				\
189 } while (0)
190 
191 #define CPSW_GLOBAL_UNLOCK(sc) do {					\
192 		CPSW_RX_UNLOCK(sc);				\
193 		CPSW_TX_UNLOCK(sc);				\
194 } while (0)
195 
196 #define CPSW_GLOBAL_LOCK_ASSERT(sc) do {				\
197 		CPSW_TX_LOCK_ASSERT(sc);				\
198 		CPSW_RX_LOCK_ASSERT(sc);				\
199 } while (0)
200 
201 
202 static int
203 cpsw_probe(device_t dev)
204 {
205 
206 	if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
207 		return (ENXIO);
208 
209 	device_set_desc(dev, "3-port Switch Ethernet Subsystem");
210 	return (BUS_PROBE_DEFAULT);
211 }
212 
213 static int
214 cpsw_attach(device_t dev)
215 {
216 	struct cpsw_softc *sc;
217 	struct mii_softc *miisc;
218 	struct ifnet *ifp;
219 	int i, error, phy;
220 	uint32_t reg;
221 
222 	sc = device_get_softc(dev);
223 	sc->dev = dev;
224 	sc->node = ofw_bus_get_node(dev);
225 
226 	/* Get phy address from fdt */
227 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0) {
228 		device_printf(dev, "failed to get PHY address from FDT\n");
229 		return (ENXIO);
230 	}
231 	/* Initialize mutexes */
232 	mtx_init(&sc->tx_lock, device_get_nameunit(dev),
233 		"cpsw TX lock", MTX_DEF);
234 	mtx_init(&sc->rx_lock, device_get_nameunit(dev),
235 		"cpsw RX lock", MTX_DEF);
236 
237 	/* Allocate IO and IRQ resources */
238 	error = bus_alloc_resources(dev, res_spec, sc->res);
239 	if (error) {
240 		device_printf(dev, "could not allocate resources\n");
241 		cpsw_detach(dev);
242 		return (ENXIO);
243 	}
244 
245 	reg = cpsw_read_4(CPSW_SS_IDVER);
246 	device_printf(dev, "Version %d.%d (%d)\n", (reg >> 8 & 0x7),
247 		reg & 0xFF, (reg >> 11) & 0x1F);
248 
249 	/* Allocate DMA, buffers, buffer descriptors */
250 	error = cpsw_allocate_dma(sc);
251 	if (error) {
252 		cpsw_detach(dev);
253 		return (ENXIO);
254 	}
255 
256 	//cpsw_add_sysctls(sc); TODO
257 
258 	/* Allocate network interface */
259 	ifp = sc->ifp = if_alloc(IFT_ETHER);
260 	if (ifp == NULL) {
261 		device_printf(dev, "if_alloc() failed\n");
262 		cpsw_detach(dev);
263 		return (ENOMEM);
264 	}
265 
266 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
267 	ifp->if_softc = sc;
268 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
269 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
270 	ifp->if_capenable = ifp->if_capabilities;
271 
272 	ifp->if_init = cpsw_init;
273 	ifp->if_start = cpsw_start;
274 	ifp->if_ioctl = cpsw_ioctl;
275 
276 	ifp->if_snd.ifq_drv_maxlen = CPSW_MAX_TX_BUFFERS - 1;
277 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
278 	IFQ_SET_READY(&ifp->if_snd);
279 
280 	/* Get high part of MAC address from control module (mac_id0_hi) */
281 	ti_scm_reg_read_4(0x634, &reg);
282 	sc->mac_addr[0] = reg & 0xFF;
283 	sc->mac_addr[1] = (reg >>  8) & 0xFF;
284 	sc->mac_addr[2] = (reg >> 16) & 0xFF;
285 	sc->mac_addr[3] = (reg >> 24) & 0xFF;
286 
287 	/* Get low part of MAC address from control module (mac_id0_lo) */
288 	ti_scm_reg_read_4(0x630, &reg);
289 	sc->mac_addr[4] = reg & 0xFF;
290 	sc->mac_addr[5] = (reg >>  8) & 0xFF;
291 
292 	ether_ifattach(ifp, sc->mac_addr);
293 	callout_init(&sc->wd_callout, 0);
294 
295 	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
296 	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
297 	cpsw_write_4(MDIOCONTROL, (1<<30) | (1<<18) | 0xFF);
298 
299 	/* Attach PHY(s) */
300 	error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd,
301 	    cpsw_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
302 	if (error) {
303 		device_printf(dev, "attaching PHYs failed\n");
304 		cpsw_detach(dev);
305 		return (error);
306 	}
307 	sc->mii = device_get_softc(sc->miibus);
308 
309 	/* Tell the MAC where to find the PHY so autoneg works */
310 	miisc = LIST_FIRST(&sc->mii->mii_phys);
311 
312 	/* Select PHY and enable interrupts */
313 	cpsw_write_4(MDIOUSERPHYSEL0, (1 << 6) | (miisc->mii_phy & 0x1F));
314 
315 	/* Attach interrupt handlers */
316 	for (i = 1; i <= CPSW_INTR_COUNT; ++i) {
317 		error = bus_setup_intr(dev, sc->res[i],
318 		    INTR_TYPE_NET | INTR_MPSAFE,
319 		    NULL, *cpsw_intrs[i - 1].handler,
320 		    sc, &sc->ih_cookie[i - 1]);
321 		if (error) {
322 			device_printf(dev, "could not setup %s\n",
323 			    cpsw_intrs[i].description);
324 			cpsw_detach(dev);
325 			return (error);
326 		}
327 	}
328 
329 	return (0);
330 }
331 
332 static int
333 cpsw_detach(device_t dev)
334 {
335 	struct cpsw_softc *sc;
336 	int error,i;
337 
338 	sc = device_get_softc(dev);
339 
340 	/* Stop controller and free TX queue */
341 	if (sc->ifp)
342 		cpsw_shutdown(dev);
343 
344 	/* Wait for stopping ticks */
345         callout_drain(&sc->wd_callout);
346 
347 	/* Stop and release all interrupts */
348 	for (i = 0; i < CPSW_INTR_COUNT; ++i) {
349 		if (!sc->ih_cookie[i])
350 			continue;
351 
352 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
353 		if (error)
354 			device_printf(dev, "could not release %s\n",
355 			    cpsw_intrs[i + 1].description);
356 	}
357 
358 	/* Detach network interface */
359 	if (sc->ifp) {
360 		ether_ifdetach(sc->ifp);
361 		if_free(sc->ifp);
362 	}
363 
364 	/* Free DMA resources */
365 	cpsw_free_dma(sc);
366 
367 	/* Free IO memory handler */
368 	bus_release_resources(dev, res_spec, sc->res);
369 
370 	/* Destroy mutexes */
371 	mtx_destroy(&sc->rx_lock);
372 	mtx_destroy(&sc->tx_lock);
373 
374 	return (0);
375 }
376 
377 static int
378 cpsw_suspend(device_t dev)
379 {
380 
381 	device_printf(dev, "%s\n", __FUNCTION__);
382 	return (0);
383 }
384 
385 static int
386 cpsw_resume(device_t dev)
387 {
388 
389 	device_printf(dev, "%s\n", __FUNCTION__);
390 	return (0);
391 }
392 
393 static int
394 cpsw_shutdown(device_t dev)
395 {
396 	struct cpsw_softc *sc = device_get_softc(dev);
397 
398 	CPSW_GLOBAL_LOCK(sc);
399 
400 	cpsw_stop_locked(sc);
401 
402 	CPSW_GLOBAL_UNLOCK(sc);
403 
404 	return (0);
405 }
406 
407 static int
408 cpsw_miibus_readreg(device_t dev, int phy, int reg)
409 {
410 	struct cpsw_softc *sc;
411 	uint32_t r;
412 	uint32_t retries = CPSW_MIIBUS_RETRIES;
413 
414 	sc = device_get_softc(dev);
415 
416 	/* Wait until interface is ready by watching GO bit */
417 	while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) )
418 		DELAY(CPSW_MIIBUS_DELAY);
419 	if (!retries)
420 		device_printf(dev, "Timeout while waiting for MDIO.\n");
421 
422 	/* Set GO, phy and reg */
423 	cpsw_write_4(MDIOUSERACCESS0, (1 << 31) |
424 		((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
425 
426 	while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) )
427 		DELAY(CPSW_MIIBUS_DELAY);
428 	if (!retries)
429 		device_printf(dev, "Timeout while waiting for MDIO.\n");
430 
431 	r = cpsw_read_4(MDIOUSERACCESS0);
432 	/* Check for ACK */
433 	if(r & (1<<29)) {
434 		return (r & 0xFFFF);
435 	}
436 	device_printf(dev, "Failed to read from PHY.\n");
437 	return 0;
438 }
439 
440 static int
441 cpsw_miibus_writereg(device_t dev, int phy, int reg, int value)
442 {
443 	struct cpsw_softc *sc;
444 	uint32_t retries = CPSW_MIIBUS_RETRIES;
445 
446 	sc = device_get_softc(dev);
447 
448 	/* Wait until interface is ready by watching GO bit */
449 	while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) )
450 		DELAY(CPSW_MIIBUS_DELAY);
451 	if (!retries)
452 		device_printf(dev, "Timeout while waiting for MDIO.\n");
453 
454 	/* Set GO, WRITE, phy, reg and value */
455 	cpsw_write_4(MDIOUSERACCESS0, (value & 0xFFFF) | (3 << 30) |
456 		((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
457 
458 	while(--retries && (cpsw_read_4(MDIOUSERACCESS0) & (1 << 31)) )
459 		DELAY(CPSW_MIIBUS_DELAY);
460 	if (!retries)
461 		device_printf(dev, "Timeout while waiting for MDIO.\n");
462 
463 	/* Check for ACK */
464 	if(cpsw_read_4(MDIOUSERACCESS0) & (1<<29)) {
465 		return 0;
466 	}
467 	device_printf(dev, "Failed to write to PHY.\n");
468 
469 	return 0;
470 }
471 
472 static int
473 cpsw_allocate_dma(struct cpsw_softc *sc)
474 {
475 	int err;
476 	int i;
477 
478 	/* Allocate a busdma tag and DMA safe memory for tx mbufs. */
479 	err = bus_dma_tag_create(
480 		bus_get_dma_tag(sc->dev),	/* parent */
481 		1, 0,				/* alignment, boundary */
482 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
483 		BUS_SPACE_MAXADDR,		/* highaddr */
484 		NULL, NULL,			/* filtfunc, filtfuncarg */
485 		MCLBYTES, 1,			/* maxsize, nsegments */
486 		MCLBYTES, 0,			/* maxsegsz, flags */
487 		NULL, NULL,			/* lockfunc, lockfuncarg */
488 		&sc->mbuf_dtag);		/* dmatag */
489 
490 	if (err)
491 		return (ENOMEM);
492 	for (i = 0; i < CPSW_MAX_TX_BUFFERS; i++) {
493 		if ( bus_dmamap_create(sc->mbuf_dtag, 0, &sc->tx_dmamap[i])) {
494 			if_printf(sc->ifp, "failed to create dmamap for rx mbuf\n");
495 			return (ENOMEM);
496 		}
497 	}
498 
499 	for (i = 0; i < CPSW_MAX_RX_BUFFERS; i++) {
500 		if ( bus_dmamap_create(sc->mbuf_dtag, 0, &sc->rx_dmamap[i])) {
501 			if_printf(sc->ifp, "failed to create dmamap for rx mbuf\n");
502 			return (ENOMEM);
503 		}
504 	}
505 
506 	return (0);
507 }
508 
509 static int
510 cpsw_free_dma(struct cpsw_softc *sc)
511 {
512 	(void)sc; /* UNUSED */
513 	// TODO
514 	return 0;
515 }
516 
517 static int
518 cpsw_new_rxbuf(struct cpsw_softc *sc, uint32_t i, uint32_t next)
519 {
520 	bus_dma_segment_t seg[1];
521 	struct cpsw_cpdma_bd bd;
522 	int error;
523 	int nsegs;
524 
525 	sc->rx_mbuf[i] = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
526 	if (sc->rx_mbuf[i] == NULL)
527 		return (ENOBUFS);
528 
529 	sc->rx_mbuf[i]->m_len = sc->rx_mbuf[i]->m_pkthdr.len = sc->rx_mbuf[i]->m_ext.ext_size;
530 
531 	error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->rx_dmamap[i],
532 		sc->rx_mbuf[i], seg, &nsegs, BUS_DMA_NOWAIT);
533 
534 	KASSERT(nsegs == 1, ("Too many segments returned!"));
535 	if (nsegs != 1 || error)
536 		panic("%s: nsegs(%d), error(%d)",__func__, nsegs, error);
537 
538 	bus_dmamap_sync(sc->mbuf_dtag, sc->rx_dmamap[i], BUS_DMASYNC_PREREAD);
539 
540 	/* Create and submit new rx descriptor*/
541 	bd.next = next;
542 	bd.bufptr = seg->ds_addr;
543 	bd.buflen = MCLBYTES-1;
544 	bd.bufoff = 2; /* make IP hdr aligned with 4 */
545 	bd.pktlen = 0;
546 	bd.flags = CPDMA_BD_OWNER;
547 	cpsw_cpdma_write_rxbd(i, &bd);
548 
549 	return (0);
550 }
551 
552 
553 static int
554 cpsw_encap(struct cpsw_softc *sc, struct mbuf *m0)
555 {
556 	bus_dma_segment_t seg[1];
557 	struct cpsw_cpdma_bd bd;
558 	int error;
559 	int nsegs;
560 	int idx;
561 
562 	if (sc->txbd_queue_size == CPSW_MAX_TX_BUFFERS)
563 		return (ENOBUFS);
564 
565 	idx = sc->txbd_head + sc->txbd_queue_size;
566 
567 	if (idx >= (CPSW_MAX_TX_BUFFERS) )
568 		idx -= CPSW_MAX_TX_BUFFERS;
569 
570 	/* Create mapping in DMA memory */
571 	error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->tx_dmamap[idx], m0, seg, &nsegs,
572 	    BUS_DMA_NOWAIT);
573 	sc->tc[idx]++;
574 	if (error != 0 || nsegs != 1 ) {
575 		bus_dmamap_unload(sc->mbuf_dtag, sc->tx_dmamap[idx]);
576 		return ((error != 0) ? error : -1);
577 	}
578 	bus_dmamap_sync(sc->mbuf_dtag, sc->tx_dmamap[idx], BUS_DMASYNC_PREWRITE);
579 
580 	/* Fill descriptor data */
581 	bd.next = 0;
582 	bd.bufptr = seg->ds_addr;
583 	bd.bufoff = 0;
584 	bd.buflen = seg->ds_len;
585 	bd.pktlen = seg->ds_len;
586 	/* Set OWNERSHIP, SOP, EOP */
587 	bd.flags = (7<<13);
588 
589 	/* Write descriptor */
590 	cpsw_cpdma_write_txbd(idx, &bd);
591 	sc->tx_mbuf[idx] = m0;
592 
593 	/* Previous descriptor should point to us */
594 	cpsw_cpdma_write_txbd_next(((idx-1<0)?(CPSW_MAX_TX_BUFFERS-1):(idx-1)),
595 		cpsw_cpdma_txbd_paddr(idx));
596 
597 	sc->txbd_queue_size++;
598 
599 	return (0);
600 }
601 
602 /*
603  * Pad the packet to the minimum length for Ethernet.
604  * (CPSW hardware doesn't do this for us.)
605  */
606 static int
607 cpsw_pad(struct mbuf *m)
608 {
609 	int padlen = ETHER_MIN_LEN - m->m_pkthdr.len;
610 	struct mbuf *last, *n;
611 
612 	if (padlen <= 0)
613 		return (0);
614 
615 	/* If there's only the packet-header and we can pad there, use it. */
616 	if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
617 	    M_TRAILINGSPACE(m) >= padlen) {
618 		last = m;
619 	} else {
620 		/*
621 		 * Walk packet chain to find last mbuf. We will either
622 		 * pad there, or append a new mbuf and pad it.
623 		 */
624 		for (last = m; last->m_next != NULL; last = last->m_next)
625 			;
626 		if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
627 			/* Allocate new empty mbuf, pad it. Compact later. */
628 			MGET(n, M_DONTWAIT, MT_DATA);
629 			if (n == NULL)
630 				return (ENOBUFS);
631 			n->m_len = 0;
632 			last->m_next = n;
633 			last = n;
634 		}
635 	}
636 
637 	/* Now zero the pad area. */
638 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
639 	last->m_len += padlen;
640 	m->m_pkthdr.len += padlen;
641 
642 	return (0);
643 }
644 
645 static void
646 cpsw_start(struct ifnet *ifp)
647 {
648 	struct cpsw_softc *sc = ifp->if_softc;
649 
650 	CPSW_TX_LOCK(sc);
651 	cpsw_start_locked(ifp);
652 	CPSW_TX_UNLOCK(sc);
653 }
654 
655 static void
656 cpsw_start_locked(struct ifnet *ifp)
657 {
658 	struct cpsw_softc *sc = ifp->if_softc;
659 	struct mbuf *m0, *mtmp;
660 	uint32_t queued = 0;
661 	int error;
662 
663 	CPSW_TX_LOCK_ASSERT(sc);
664 
665 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
666 	    IFF_DRV_RUNNING)
667 		return;
668 
669 	for (;;) {
670 		/* Get packet from the queue */
671 		IF_DEQUEUE(&ifp->if_snd, m0);
672 		if (m0 == NULL)
673 			break;
674 
675 		if ((error = cpsw_pad(m0))) {
676 			m_freem(m0);
677 			continue;
678 		}
679 
680 		mtmp = m_defrag(m0, M_NOWAIT);
681 		if (mtmp)
682 			m0 = mtmp;
683 
684 		if (cpsw_encap(sc, m0)) {
685 			IF_PREPEND(&ifp->if_snd, m0);
686 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
687 			break;
688 		}
689 		queued++;
690 		BPF_MTAP(ifp, m0);
691 	}
692 
693 	if (!queued)
694 		return;
695 
696 	if (sc->eoq) {
697 		cpsw_write_4(CPSW_CPDMA_TX_HDP(0), cpsw_cpdma_txbd_paddr(sc->txbd_head));
698 		sc->eoq = 0;
699 	}
700 	sc->wd_timer = 5;
701 }
702 
703 static void
704 cpsw_stop_locked(struct cpsw_softc *sc)
705 {
706 	struct ifnet *ifp;
707 
708 	CPSW_GLOBAL_LOCK_ASSERT(sc);
709 
710 	ifp = sc->ifp;
711 
712 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
713 		return;
714 
715 	/* Stop tick engine */
716 	callout_stop(&sc->wd_callout);
717 
718 	/* Disable interface */
719 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
720 	sc->wd_timer = 0;
721 
722 	/* Disable interrupts  TODO */
723 
724 }
725 
726 static int
727 cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
728 {
729 	struct cpsw_softc *sc = ifp->if_softc;
730 	struct ifreq *ifr = (struct ifreq *)data;
731 	int error;
732 	uint32_t flags;
733 
734 	error = 0;
735 
736 	// FIXME
737 	switch (command) {
738 	case SIOCSIFFLAGS:
739 		CPSW_GLOBAL_LOCK(sc);
740 		if (ifp->if_flags & IFF_UP) {
741 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
742 				flags = ifp->if_flags ^ sc->cpsw_if_flags;
743 				if (flags & IFF_PROMISC)
744 					printf("%s: SIOCSIFFLAGS "
745 						"IFF_PROMISC unimplemented\n",
746 						__func__);
747 
748 				if (flags & IFF_ALLMULTI)
749 					printf("%s: SIOCSIFFLAGS "
750 						"IFF_ALLMULTI unimplemented\n",
751 						__func__);
752 			} else {
753 				printf("%s: SIOCSIFFLAGS cpsw_init_locked\n", __func__);
754 				//cpsw_init_locked(sc);
755 			}
756 		}
757 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
758 			cpsw_stop_locked(sc);
759 
760 		sc->cpsw_if_flags = ifp->if_flags;
761 		CPSW_GLOBAL_UNLOCK(sc);
762 		break;
763 	case SIOCADDMULTI:
764 		printf("%s: SIOCADDMULTI\n",__func__);
765 		break;
766 	case SIOCDELMULTI:
767 		printf("%s: SIOCDELMULTI\n",__func__);
768 		break;
769 	case SIOCSIFCAP:
770 		printf("%s: SIOCSIFCAP\n",__func__);
771 		break;
772 	case SIOCGIFMEDIA:
773 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
774 		break;
775 	case SIOCSIFMEDIA:
776 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
777 		break;
778 	default:
779 		error = ether_ioctl(ifp, command, data);
780 	}
781 	return (error);
782 }
783 
784 static void
785 cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
786 {
787 	struct cpsw_softc *sc = ifp->if_softc;
788 	struct mii_data *mii;
789 
790 	CPSW_TX_LOCK(sc);
791 
792 	mii = sc->mii;
793 	mii_pollstat(mii);
794 
795 	ifmr->ifm_active = mii->mii_media_active;
796 	ifmr->ifm_status = mii->mii_media_status;
797 
798 	CPSW_TX_UNLOCK(sc);
799 }
800 
801 
802 static int
803 cpsw_ifmedia_upd(struct ifnet *ifp)
804 {
805 	struct cpsw_softc *sc = ifp->if_softc;
806 
807 	if (ifp->if_flags & IFF_UP) {
808 		CPSW_GLOBAL_LOCK(sc);
809 
810 		sc->cpsw_media_status = sc->mii->mii_media.ifm_media;
811 		mii_mediachg(sc->mii);
812 		cpsw_init_locked(sc);
813 
814 		CPSW_GLOBAL_UNLOCK(sc);
815 	}
816 
817 	return (0);
818 }
819 
820 static void
821 cpsw_intr_rx_thresh(void *arg)
822 {
823 	(void)arg; /* UNUSED */
824 }
825 
826 static void
827 cpsw_intr_rx(void *arg)
828 {
829 	struct cpsw_softc *sc = arg;
830 	CPSW_RX_LOCK(sc);
831 	cpsw_intr_rx_locked(arg);
832 	CPSW_RX_UNLOCK(sc);
833 }
834 
835 static void
836 cpsw_intr_rx_locked(void *arg)
837 {
838 	struct cpsw_softc *sc = arg;
839 	struct cpsw_cpdma_bd bd;
840 	struct ifnet *ifp;
841 	int i;
842 
843 	ifp = sc->ifp;
844 
845 	i = sc->rxbd_head;
846 	cpsw_cpdma_read_rxbd(i, &bd);
847 
848 	while (bd.flags & CPDMA_BD_SOP) {
849 		cpsw_write_4(CPSW_CPDMA_RX_CP(0), cpsw_cpdma_rxbd_paddr(i));
850 
851 		bus_dmamap_sync(sc->mbuf_dtag, sc->rx_dmamap[i], BUS_DMASYNC_POSTREAD);
852 		bus_dmamap_unload(sc->mbuf_dtag, sc->rx_dmamap[i]);
853 
854 		/* Fill mbuf */
855 		sc->rx_mbuf[i]->m_hdr.mh_data += bd.bufoff;
856 		sc->rx_mbuf[i]->m_hdr.mh_len = bd.pktlen - 4;
857 		sc->rx_mbuf[i]->m_pkthdr.len = bd.pktlen - 4;
858 		sc->rx_mbuf[i]->m_flags |= M_PKTHDR;
859 		sc->rx_mbuf[i]->m_pkthdr.rcvif = ifp;
860 
861 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
862 			/* check for valid CRC by looking into pkt_err[5:4] */
863 			if ( (bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0  ) {
864 				sc->rx_mbuf[i]->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
865 				sc->rx_mbuf[i]->m_pkthdr.csum_flags |= CSUM_IP_VALID;
866 				sc->rx_mbuf[i]->m_pkthdr.csum_data = 0xffff;
867 			}
868 		}
869 
870 		/* Handover packet */
871 		CPSW_RX_UNLOCK(sc);
872 		(*ifp->if_input)(ifp, sc->rx_mbuf[i]);
873 		sc->rx_mbuf[i] = NULL;
874 		CPSW_RX_LOCK(sc);
875 
876 		/* Allocate new buffer for current descriptor */
877 		cpsw_new_rxbuf(sc, i, 0);
878 
879 		/* we are not at tail so old tail BD should point to new one */
880 		cpsw_cpdma_write_rxbd_next(sc->rxbd_tail,
881 			cpsw_cpdma_rxbd_paddr(i));
882 
883 		/* Check if EOQ is reached */
884 		if (cpsw_cpdma_read_rxbd_flags(sc->rxbd_tail) & CPDMA_BD_EOQ) {
885 			cpsw_write_4(CPSW_CPDMA_RX_HDP(0), cpsw_cpdma_rxbd_paddr(i));
886 		}
887 		sc->rxbd_tail = i;
888 
889 		/* read next descriptor */
890 		if (++i == CPSW_MAX_RX_BUFFERS)
891 			i = 0;
892 		cpsw_cpdma_read_rxbd(i, &bd);
893 		sc->rxbd_head = i;
894 	}
895 
896 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
897 }
898 
899 static void
900 cpsw_intr_tx(void *arg)
901 {
902 	struct cpsw_softc *sc = arg;
903 	CPSW_TX_LOCK(sc);
904 	cpsw_intr_tx_locked(arg);
905 	CPSW_TX_UNLOCK(sc);
906 }
907 
908 static void
909 cpsw_intr_tx_locked(void *arg)
910 {
911 	struct cpsw_softc *sc = arg;
912 	uint32_t flags;
913 
914 	if(sc->txbd_head == -1)
915 		return;
916 
917 	if(sc->txbd_queue_size<1) {
918 		/* in some casses interrupt happens even when there is no
919 		   data in transmit queue */
920 		return;
921 	}
922 
923 	/* Disable watchdog */
924 	sc->wd_timer = 0;
925 
926 	flags = cpsw_cpdma_read_txbd_flags(sc->txbd_head);
927 
928 	/* After BD is transmitted CPDMA will set OWNER to 0 */
929 	if (flags & CPDMA_BD_OWNER)
930 		return;
931 
932 	if(flags & CPDMA_BD_EOQ)
933 		sc->eoq=1;
934 
935 	/* release dmamap and mbuf */
936 	bus_dmamap_sync(sc->mbuf_dtag, sc->tx_dmamap[sc->txbd_head],
937 	    BUS_DMASYNC_POSTWRITE);
938 	bus_dmamap_unload(sc->mbuf_dtag, sc->tx_dmamap[sc->txbd_head]);
939 	m_freem(sc->tx_mbuf[sc->txbd_head]);
940 	sc->tx_mbuf[sc->txbd_head] = NULL;
941 
942 	cpsw_write_4(CPSW_CPDMA_TX_CP(0), cpsw_cpdma_txbd_paddr(sc->txbd_head));
943 
944 	if (++sc->txbd_head == CPSW_MAX_TX_BUFFERS)
945 		sc->txbd_head = 0;
946 
947 	--sc->txbd_queue_size;
948 
949 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 2);
950 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
951 }
952 
953 static void
954 cpsw_intr_misc(void *arg)
955 {
956 	struct cpsw_softc *sc = arg;
957 	uint32_t stat = cpsw_read_4(CPSW_WR_C_MISC_STAT(0));
958 	printf("%s: stat=%x\n",__func__,stat);
959 	/* EOI_RX_PULSE */
960 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
961 }
962 
963 static void
964 cpsw_tick(void *msc)
965 {
966 	struct cpsw_softc *sc = msc;
967 
968 	/* Check for TX timeout */
969 	cpsw_watchdog(sc);
970 
971 	mii_tick(sc->mii);
972 
973 	/* Check for media type change */
974 	if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) {
975 		printf("%s: media type changed (ifm_media=%x)\n",__func__,
976 			sc->mii->mii_media.ifm_media);
977 		cpsw_ifmedia_upd(sc->ifp);
978 	}
979 
980 	/* Schedule another timeout one second from now */
981 	callout_reset(&sc->wd_callout, hz, cpsw_tick, sc);
982 }
983 
984 static void
985 cpsw_watchdog(struct cpsw_softc *sc)
986 {
987 	struct ifnet *ifp;
988 
989 	ifp = sc->ifp;
990 
991 	CPSW_GLOBAL_LOCK(sc);
992 
993 	if (sc->wd_timer == 0 || --sc->wd_timer) {
994 		CPSW_GLOBAL_UNLOCK(sc);
995 		return;
996 	}
997 
998 	ifp->if_oerrors++;
999 	if_printf(ifp, "watchdog timeout\n");
1000 
1001 	cpsw_stop_locked(sc);
1002 	cpsw_init_locked(sc);
1003 
1004 	CPSW_GLOBAL_UNLOCK(sc);
1005 }
1006 
1007 static void
1008 cpsw_init(void *arg)
1009 {
1010 	struct cpsw_softc *sc = arg;
1011 	CPSW_GLOBAL_LOCK(sc);
1012 	cpsw_init_locked(arg);
1013 	CPSW_GLOBAL_UNLOCK(sc);
1014 }
1015 
1016 int once = 1;
1017 
1018 static void
1019 cpsw_init_locked(void *arg)
1020 {
1021 	struct ifnet *ifp;
1022 	struct cpsw_softc *sc = arg;
1023 	uint8_t  broadcast_address[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1024 	uint32_t next_bdp;
1025 	uint32_t i;
1026 
1027 	ifp = sc->ifp;
1028 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1029 		return;
1030 
1031 	printf("%s: start\n",__func__);
1032 
1033 	/* Reset writer */
1034 	cpsw_write_4(CPSW_WR_SOFT_RESET, 1);
1035 	while(cpsw_read_4(CPSW_WR_SOFT_RESET) & 1);
1036 
1037 	/* Reset SS */
1038 	cpsw_write_4(CPSW_SS_SOFT_RESET, 1);
1039 	while(cpsw_read_4(CPSW_SS_SOFT_RESET) & 1);
1040 
1041 	/* Clear table (30) and enable ALE(31) */
1042 	if (once)
1043 		cpsw_write_4(CPSW_ALE_CONTROL, (3 << 30));
1044 	else
1045 		cpsw_write_4(CPSW_ALE_CONTROL, (1 << 31));
1046 	once = 0; // FIXME
1047 
1048 	/* Reset and init Sliver port 1 and 2 */
1049 	for(i=0;i<2;i++) {
1050 		/* Reset */
1051 		cpsw_write_4(CPSW_SL_SOFT_RESET(i), 1);
1052 		while(cpsw_read_4(CPSW_SL_SOFT_RESET(i)) & 1);
1053 		/* Set Slave Mapping */
1054 		cpsw_write_4(CPSW_SL_RX_PRI_MAP(i),0x76543210);
1055 		cpsw_write_4(CPSW_PORT_P_TX_PRI_MAP(i+1),0x33221100);
1056 		cpsw_write_4(CPSW_SL_RX_MAXLEN(i),0x5f2);
1057 		/* Set MAC Address */
1058 		cpsw_write_4(CPSW_PORT_P_SA_HI(i+1), sc->mac_addr[0] |
1059 			(sc->mac_addr[1] <<  8) |
1060 			(sc->mac_addr[2] << 16) |
1061 			(sc->mac_addr[3] << 24));
1062 		cpsw_write_4(CPSW_PORT_P_SA_LO(i+1), sc->mac_addr[4] |
1063 			(sc->mac_addr[5] <<  8));
1064 
1065 		/* Set MACCONTROL for ports 0,1: FULLDUPLEX(1), GMII_EN(5),
1066 		   IFCTL_A(15), IFCTL_B(16) FIXME */
1067 		cpsw_write_4(CPSW_SL_MACCONTROL(i), 1 | (1<<5) | (1<<15));
1068 
1069 		/* Set ALE port to forwarding(3) */
1070 		cpsw_write_4(CPSW_ALE_PORTCTL(i+1), 3);
1071 	}
1072 
1073 	/* Set Host Port Mapping */
1074 	cpsw_write_4(CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
1075 	cpsw_write_4(CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
1076 
1077 	/* Set ALE port to forwarding(3)*/
1078 	cpsw_write_4(CPSW_ALE_PORTCTL(0), 3);
1079 
1080 	/* Add own MAC address and broadcast to ALE */
1081 	cpsw_ale_uc_entry_set(sc, 0, sc->mac_addr);
1082 	cpsw_ale_mc_entry_set(sc, 7, broadcast_address);
1083 
1084 	cpsw_write_4(CPSW_SS_PTYPE, 0);
1085 	/* Enable statistics for ports 0, 1 and 2 */
1086 	cpsw_write_4(CPSW_SS_STAT_PORT_EN, 7);
1087 
1088 	/* Reset CPDMA */
1089 	cpsw_write_4(CPSW_CPDMA_SOFT_RESET, 1);
1090 	while(cpsw_read_4(CPSW_CPDMA_SOFT_RESET) & 1);
1091 
1092         for(i = 0; i < 8; i++) {
1093 		cpsw_write_4(CPSW_CPDMA_TX_HDP(i), 0);
1094 		cpsw_write_4(CPSW_CPDMA_RX_HDP(i), 0);
1095 		cpsw_write_4(CPSW_CPDMA_TX_CP(i), 0);
1096 		cpsw_write_4(CPSW_CPDMA_RX_CP(i), 0);
1097         }
1098 
1099 	cpsw_write_4(CPSW_CPDMA_RX_FREEBUFFER(0), 0);
1100 
1101 	/* Initialize RX Buffer Descriptors */
1102 	i = CPSW_MAX_RX_BUFFERS;
1103 	next_bdp = 0;
1104 	while (i--) {
1105 		cpsw_new_rxbuf(sc, i, next_bdp);
1106 		/* Increment number of free RX buffers */
1107 		//cpsw_write_4(CPSW_CPDMA_RX_FREEBUFFER(0), 1);
1108 		next_bdp = cpsw_cpdma_rxbd_paddr(i);
1109 	}
1110 
1111 	sc->rxbd_head = 0;
1112 	sc->rxbd_tail = CPSW_MAX_RX_BUFFERS-1;
1113 	sc->txbd_head = 0;
1114 	sc->eoq = 1;
1115 	sc->txbd_queue_size = 0;
1116 
1117 	/* Make IP hdr aligned with 4 */
1118 	cpsw_write_4(CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
1119 	/* Write channel 0 RX HDP */
1120 	cpsw_write_4(CPSW_CPDMA_RX_HDP(0), cpsw_cpdma_rxbd_paddr(0));
1121 
1122 	/* Clear all interrupt Masks */
1123 	cpsw_write_4(CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
1124 	cpsw_write_4(CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
1125 
1126 	/* Enable TX & RX DMA */
1127 	cpsw_write_4(CPSW_CPDMA_TX_CONTROL, 1);
1128 	cpsw_write_4(CPSW_CPDMA_RX_CONTROL, 1);
1129 
1130 	/* Enable TX and RX interrupt receive for core 0 */
1131 	cpsw_write_4(CPSW_WR_C_TX_EN(0), 0xFF);
1132 	cpsw_write_4(CPSW_WR_C_RX_EN(0), 0xFF);
1133 	//cpsw_write_4(CPSW_WR_C_MISC_EN(0), 0x3F);
1134 
1135 	/* Enable host Error Interrupt */
1136 	cpsw_write_4(CPSW_CPDMA_DMA_INTMASK_SET, 1);
1137 
1138 	/* Enable interrupts for TX and RX Channel 0 */
1139 	cpsw_write_4(CPSW_CPDMA_TX_INTMASK_SET, 1);
1140 	cpsw_write_4(CPSW_CPDMA_RX_INTMASK_SET, 1);
1141 
1142 	/* Ack stalled irqs */
1143 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
1144 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1145 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 2);
1146 	cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
1147 
1148 	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
1149 	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
1150 	cpsw_write_4(MDIOCONTROL, (1<<30) | (1<<18) | 0xFF);
1151 
1152 	/* Select MII in GMII_SEL, Internal Delay mode */
1153 	//ti_scm_reg_write_4(0x650, 0);
1154 
1155 	/* Activate network interface */
1156 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1157 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1158 	sc->wd_timer = 0;
1159 	callout_reset(&sc->wd_callout, hz, cpsw_tick, sc);
1160 }
1161 
1162 static void
1163 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1164 {
1165 	cpsw_write_4(CPSW_ALE_TBLCTL, idx & 1023);
1166 	ale_entry[0] = cpsw_read_4(CPSW_ALE_TBLW0);
1167 	ale_entry[1] = cpsw_read_4(CPSW_ALE_TBLW1);
1168 	ale_entry[2] = cpsw_read_4(CPSW_ALE_TBLW2);
1169 }
1170 
1171 static void
1172 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1173 {
1174 	cpsw_write_4(CPSW_ALE_TBLW0, ale_entry[0]);
1175 	cpsw_write_4(CPSW_ALE_TBLW1, ale_entry[1]);
1176 	cpsw_write_4(CPSW_ALE_TBLW2, ale_entry[2]);
1177 	cpsw_write_4(CPSW_ALE_TBLCTL, (idx & 1023) | (1 << 31));
1178 }
1179 
1180 static int
1181 cpsw_ale_find_entry_by_mac(struct cpsw_softc *sc, uint8_t *mac)
1182 {
1183 	int i;
1184 	uint32_t ale_entry[3];
1185 	for(i=0; i< CPSW_MAX_ALE_ENTRIES; i++) {
1186 		cpsw_ale_read_entry(sc, i, ale_entry);
1187 		if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
1188 		    (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
1189 		    (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
1190 		    (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
1191 		    (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
1192 		    (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
1193 			return (i);
1194 		}
1195 	}
1196 	return CPSW_MAX_ALE_ENTRIES;
1197 }
1198 
1199 static int
1200 cpsw_ale_find_free_entry(struct cpsw_softc *sc)
1201 {
1202 	int i;
1203 	uint32_t ale_entry[3];
1204 	for(i=0; i< CPSW_MAX_ALE_ENTRIES; i++) {
1205 		cpsw_ale_read_entry(sc, i, ale_entry);
1206 		/* Entry Type[61:60] is 0 for free entry */
1207 		if (((ale_entry[1] >> 28) & 3) == 0) {
1208 			return i;
1209 		}
1210 	}
1211 	return CPSW_MAX_ALE_ENTRIES;
1212 }
1213 
1214 
1215 static int
1216 cpsw_ale_uc_entry_set(struct cpsw_softc *sc, uint8_t port, uint8_t *mac)
1217 {
1218 	int i;
1219 	uint32_t ale_entry[3];
1220 
1221 	if ((i = cpsw_ale_find_entry_by_mac(sc, mac)) == CPSW_MAX_ALE_ENTRIES) {
1222 		i = cpsw_ale_find_free_entry(sc);
1223 	}
1224 
1225 	if (i == CPSW_MAX_ALE_ENTRIES)
1226 		return (ENOMEM);
1227 
1228 	/* Set MAC address */
1229 	ale_entry[0] = mac[2]<<24 | mac[3]<<16 | mac[4]<<8 | mac[5];
1230 	ale_entry[1] = mac[0]<<8 | mac[1];
1231 
1232 	/* Entry type[61:60] is addr entry(1) */
1233 	ale_entry[1] |= 0x10<<24;
1234 
1235 	/* Set portmask [67:66] */
1236 	ale_entry[2] = (port & 3) << 2;
1237 
1238 	cpsw_ale_write_entry(sc, i, ale_entry);
1239 
1240 	return 0;
1241 }
1242 
1243 static int
1244 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac)
1245 {
1246 	int i;
1247 	uint32_t ale_entry[3];
1248 
1249 	if ((i = cpsw_ale_find_entry_by_mac(sc, mac)) == CPSW_MAX_ALE_ENTRIES) {
1250 		i = cpsw_ale_find_free_entry(sc);
1251 	}
1252 
1253 	if (i == CPSW_MAX_ALE_ENTRIES)
1254 		return (ENOMEM);
1255 
1256 	/* Set MAC address */
1257 	ale_entry[0] = mac[2]<<24 | mac[3]<<16 | mac[4]<<8 | mac[5];
1258 	ale_entry[1] = mac[0]<<8 | mac[1];
1259 
1260 	/* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1261 	ale_entry[1] |= 0xd0<<24;
1262 
1263 	/* Set portmask [68:66] */
1264 	ale_entry[2] = (portmap & 7) << 2;
1265 
1266 	cpsw_ale_write_entry(sc, i, ale_entry);
1267 
1268 	return 0;
1269 }
1270 
1271 #ifdef CPSW_DEBUG
1272 static void
1273 cpsw_ale_dump_table(struct cpsw_softc *sc) {
1274 	int i;
1275 	uint32_t ale_entry[3];
1276 	for(i=0; i< CPSW_MAX_ALE_ENTRIES; i++) {
1277 		cpsw_ale_read_entry(sc, i, ale_entry);
1278 		if (ale_entry[0] || ale_entry[1] || ale_entry[2]) {
1279 			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[0],
1280 				ale_entry[1],ale_entry[2]);
1281 			printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
1282 				(ale_entry[1] >> 8) & 0xFF,
1283 				(ale_entry[1] >> 0) & 0xFF,
1284 				(ale_entry[0] >>24) & 0xFF,
1285 				(ale_entry[0] >>16) & 0xFF,
1286 				(ale_entry[0] >> 8) & 0xFF,
1287 				(ale_entry[0] >> 0) & 0xFF);
1288 			printf( ((ale_entry[1]>>8)&1) ? "mcast " : "ucast ");
1289 			printf("type: %u ", (ale_entry[1]>>28)&3);
1290 			printf("port: %u ", (ale_entry[2]>>2)&7);
1291 			printf("\n");
1292 		}
1293 	}
1294 }
1295 #endif
1296