xref: /freebsd/sys/dev/mge/if_mge.c (revision fcb560670601b2a4d87bb31d7531c8dcc37ee71b)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
59 
60 #include <netinet/in_systm.h>
61 #include <netinet/in.h>
62 #include <netinet/ip.h>
63 
64 #include <sys/sockio.h>
65 #include <sys/bus.h>
66 #include <machine/bus.h>
67 #include <sys/rman.h>
68 #include <machine/resource.h>
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #include <dev/fdt/fdt_common.h>
74 #include <dev/ofw/ofw_bus.h>
75 #include <dev/ofw/ofw_bus_subr.h>
76 
77 #include <dev/mge/if_mgevar.h>
78 #include <arm/mv/mvreg.h>
79 #include <arm/mv/mvvar.h>
80 
81 #include "miibus_if.h"
82 
83 static int mge_probe(device_t dev);
84 static int mge_attach(device_t dev);
85 static int mge_detach(device_t dev);
86 static int mge_shutdown(device_t dev);
87 static int mge_suspend(device_t dev);
88 static int mge_resume(device_t dev);
89 
90 static int mge_miibus_readreg(device_t dev, int phy, int reg);
91 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
92 
93 static int mge_ifmedia_upd(struct ifnet *ifp);
94 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
95 
96 static void mge_init(void *arg);
97 static void mge_init_locked(void *arg);
98 static void mge_start(struct ifnet *ifp);
99 static void mge_start_locked(struct ifnet *ifp);
100 static void mge_watchdog(struct mge_softc *sc);
101 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
102 
103 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
104 static uint32_t mge_rx_ipg(uint32_t val, int ver);
105 static void mge_ver_params(struct mge_softc *sc);
106 
107 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
108 static void mge_intr_rxtx(void *arg);
109 static void mge_intr_rx(void *arg);
110 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
111     uint32_t int_cause_ext);
112 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
113 static void mge_intr_tx(void *arg);
114 static void mge_intr_tx_locked(struct mge_softc *sc);
115 static void mge_intr_misc(void *arg);
116 static void mge_intr_sum(void *arg);
117 static void mge_intr_err(void *arg);
118 static void mge_stop(struct mge_softc *sc);
119 static void mge_tick(void *msc);
120 static uint32_t mge_set_port_serial_control(uint32_t media);
121 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
122 static void mge_set_mac_address(struct mge_softc *sc);
123 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
124     uint8_t queue);
125 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
126 static int mge_allocate_dma(struct mge_softc *sc);
127 static int mge_alloc_desc_dma(struct mge_softc *sc,
128     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
129 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
130     struct mbuf **mbufp, bus_addr_t *paddr);
131 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
132 static void mge_free_dma(struct mge_softc *sc);
133 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
134     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
135 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
136     uint32_t status, uint16_t bufsize);
137 static void mge_offload_setup_descriptor(struct mge_softc *sc,
138     struct mge_desc_wrapper *dw);
139 static uint8_t mge_crc8(uint8_t *data, int size);
140 static void mge_setup_multicast(struct mge_softc *sc);
141 static void mge_set_rxic(struct mge_softc *sc);
142 static void mge_set_txic(struct mge_softc *sc);
143 static void mge_add_sysctls(struct mge_softc *sc);
144 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
145 
146 static device_method_t mge_methods[] = {
147 	/* Device interface */
148 	DEVMETHOD(device_probe,		mge_probe),
149 	DEVMETHOD(device_attach,	mge_attach),
150 	DEVMETHOD(device_detach,	mge_detach),
151 	DEVMETHOD(device_shutdown,	mge_shutdown),
152 	DEVMETHOD(device_suspend,	mge_suspend),
153 	DEVMETHOD(device_resume,	mge_resume),
154 	/* MII interface */
155 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
156 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
157 	{ 0, 0 }
158 };
159 
160 static driver_t mge_driver = {
161 	"mge",
162 	mge_methods,
163 	sizeof(struct mge_softc),
164 };
165 
166 static devclass_t mge_devclass;
167 
168 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
169 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
170 MODULE_DEPEND(mge, ether, 1, 1, 1);
171 MODULE_DEPEND(mge, miibus, 1, 1, 1);
172 
173 static struct resource_spec res_spec[] = {
174 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
175 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
176 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
177 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
178 	{ -1, 0 }
179 };
180 
181 static struct {
182 	driver_intr_t *handler;
183 	char * description;
184 } mge_intrs[MGE_INTR_COUNT + 1] = {
185 	{ mge_intr_rxtx,"GbE aggregated interrupt" },
186 	{ mge_intr_rx,	"GbE receive interrupt" },
187 	{ mge_intr_tx,	"GbE transmit interrupt" },
188 	{ mge_intr_misc,"GbE misc interrupt" },
189 	{ mge_intr_sum,	"GbE summary interrupt" },
190 	{ mge_intr_err,	"GbE error interrupt" },
191 };
192 
193 static void
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
195 {
196 	uint32_t mac_l, mac_h;
197 	uint8_t lmac[6];
198 	int i, valid;
199 
200 	/*
201 	 * Retrieve hw address from the device tree.
202 	 */
203 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
204 	if (i == 6) {
205 		valid = 0;
206 		for (i = 0; i < 6; i++)
207 			if (lmac[i] != 0) {
208 				valid = 1;
209 				break;
210 			}
211 
212 		if (valid) {
213 			bcopy(lmac, addr, 6);
214 			return;
215 		}
216 	}
217 
218 	/*
219 	 * Fall back -- use the currently programmed address.
220 	 */
221 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
222 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
223 
224 	addr[0] = (mac_h & 0xff000000) >> 24;
225 	addr[1] = (mac_h & 0x00ff0000) >> 16;
226 	addr[2] = (mac_h & 0x0000ff00) >> 8;
227 	addr[3] = (mac_h & 0x000000ff);
228 	addr[4] = (mac_l & 0x0000ff00) >> 8;
229 	addr[5] = (mac_l & 0x000000ff);
230 }
231 
232 static uint32_t
233 mge_tfut_ipg(uint32_t val, int ver)
234 {
235 
236 	switch (ver) {
237 	case 1:
238 		return ((val & 0x3fff) << 4);
239 	case 2:
240 	default:
241 		return ((val & 0xffff) << 4);
242 	}
243 }
244 
245 static uint32_t
246 mge_rx_ipg(uint32_t val, int ver)
247 {
248 
249 	switch (ver) {
250 	case 1:
251 		return ((val & 0x3fff) << 8);
252 	case 2:
253 	default:
254 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
255 	}
256 }
257 
258 static void
259 mge_ver_params(struct mge_softc *sc)
260 {
261 	uint32_t d, r;
262 
263 	soc_id(&d, &r);
264 	if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
265 	    d == MV_DEV_88F6282 ||
266 	    d == MV_DEV_MV78100 ||
267 	    d == MV_DEV_MV78100_Z0 ||
268 	    (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
269 		sc->mge_ver = 2;
270 		sc->mge_mtu = 0x4e8;
271 		sc->mge_tfut_ipg_max = 0xFFFF;
272 		sc->mge_rx_ipg_max = 0xFFFF;
273 		sc->mge_tx_arb_cfg = 0xFC0000FF;
274 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
275 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
276 	} else {
277 		sc->mge_ver = 1;
278 		sc->mge_mtu = 0x458;
279 		sc->mge_tfut_ipg_max = 0x3FFF;
280 		sc->mge_rx_ipg_max = 0x3FFF;
281 		sc->mge_tx_arb_cfg = 0x000000FF;
282 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
283 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
284 	}
285 	if (d == MV_DEV_88RC8180)
286 		sc->mge_intr_cnt = 1;
287 	else
288 		sc->mge_intr_cnt = 2;
289 
290 	if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
291 		sc->mge_hw_csum = 0;
292 	else
293 		sc->mge_hw_csum = 1;
294 }
295 
296 static void
297 mge_set_mac_address(struct mge_softc *sc)
298 {
299 	char *if_mac;
300 	uint32_t mac_l, mac_h;
301 
302 	MGE_GLOBAL_LOCK_ASSERT(sc);
303 
304 	if_mac = (char *)IF_LLADDR(sc->ifp);
305 
306 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
307 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
308 	    (if_mac[2] << 8) | (if_mac[3] << 0);
309 
310 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
311 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
312 
313 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
314 }
315 
316 static void
317 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
318 {
319 	uint32_t reg_idx, reg_off, reg_val, i;
320 
321 	last_byte &= 0xf;
322 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
323 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
324 	reg_val = (1 | (queue << 1)) << reg_off;
325 
326 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
327 		if ( i == reg_idx)
328 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
329 		else
330 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
331 	}
332 }
333 
334 static void
335 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
336 {
337 	uint32_t port_config;
338 	uint32_t reg_val, i;
339 
340 	/* Enable or disable promiscuous mode as needed */
341 	if (sc->ifp->if_flags & IFF_PROMISC) {
342 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
343 		port_config |= PORT_CONFIG_UPM;
344 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
345 
346 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
347 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
348 
349 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
350 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
351 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
352 		}
353 
354 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
355 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
356 
357 	} else {
358 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
359 		port_config &= ~PORT_CONFIG_UPM;
360 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
361 
362 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
363 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
364 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
365 		}
366 
367 		mge_set_mac_address(sc);
368 	}
369 }
370 
371 static void
372 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
373 {
374 	u_int32_t *paddr;
375 
376 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
377 	paddr = arg;
378 
379 	*paddr = segs->ds_addr;
380 }
381 
382 static int
383 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
384     bus_addr_t *paddr)
385 {
386 	struct mbuf *new_mbuf;
387 	bus_dma_segment_t seg[1];
388 	int error;
389 	int nsegs;
390 
391 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
392 
393 	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
394 	if (new_mbuf == NULL)
395 		return (ENOBUFS);
396 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
397 
398 	if (*mbufp) {
399 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
400 		bus_dmamap_unload(tag, map);
401 	}
402 
403 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
404 	    BUS_DMA_NOWAIT);
405 	KASSERT(nsegs == 1, ("Too many segments returned!"));
406 	if (nsegs != 1 || error)
407 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
408 
409 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
410 
411 	(*mbufp) = new_mbuf;
412 	(*paddr) = seg->ds_addr;
413 	return (0);
414 }
415 
416 static int
417 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
418     uint32_t size, bus_dma_tag_t *buffer_tag)
419 {
420 	struct mge_desc_wrapper *dw;
421 	bus_addr_t desc_paddr;
422 	int i, error;
423 
424 	desc_paddr = 0;
425 	for (i = size - 1; i >= 0; i--) {
426 		dw = &(tab[i]);
427 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
428 		    (void**)&(dw->mge_desc),
429 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
430 		    &(dw->desc_dmap));
431 
432 		if (error) {
433 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
434 			dw->mge_desc = NULL;
435 			return (ENXIO);
436 		}
437 
438 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
439 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
440 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
441 
442 		if (error) {
443 			if_printf(sc->ifp, "can't load descriptor\n");
444 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
445 			    dw->desc_dmap);
446 			dw->mge_desc = NULL;
447 			return (ENXIO);
448 		}
449 
450 		/* Chain descriptors */
451 		dw->mge_desc->next_desc = desc_paddr;
452 		desc_paddr = dw->mge_desc_paddr;
453 	}
454 	tab[size - 1].mge_desc->next_desc = desc_paddr;
455 
456 	/* Allocate a busdma tag for mbufs. */
457 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
458 	    1, 0,				/* alignment, boundary */
459 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
460 	    BUS_SPACE_MAXADDR,			/* highaddr */
461 	    NULL, NULL,				/* filtfunc, filtfuncarg */
462 	    MCLBYTES, 1,			/* maxsize, nsegments */
463 	    MCLBYTES, 0,			/* maxsegsz, flags */
464 	    NULL, NULL,				/* lockfunc, lockfuncarg */
465 	    buffer_tag);			/* dmat */
466 	if (error) {
467 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
468 		return (ENXIO);
469 	}
470 
471 	/* Create TX busdma maps */
472 	for (i = 0; i < size; i++) {
473 		dw = &(tab[i]);
474 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
475 		if (error) {
476 			if_printf(sc->ifp, "failed to create map for mbuf\n");
477 			return (ENXIO);
478 		}
479 
480 		dw->buffer = (struct mbuf*)NULL;
481 		dw->mge_desc->buffer = (bus_addr_t)NULL;
482 	}
483 
484 	return (0);
485 }
486 
487 static int
488 mge_allocate_dma(struct mge_softc *sc)
489 {
490 	int error;
491 	struct mge_desc_wrapper *dw;
492 	int i;
493 
494 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
495 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
496 	    16, 0,				/* alignment, boundary */
497 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
498 	    BUS_SPACE_MAXADDR,			/* highaddr */
499 	    NULL, NULL,				/* filtfunc, filtfuncarg */
500 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
501 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
502 	    NULL, NULL,				/* lockfunc, lockfuncarg */
503 	    &sc->mge_desc_dtag);		/* dmat */
504 
505 
506 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
507 	    &sc->mge_tx_dtag);
508 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
509 	    &sc->mge_rx_dtag);
510 
511 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
512 		dw = &(sc->mge_rx_desc[i]);
513 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
514 		    &dw->mge_desc->buffer);
515 	}
516 
517 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
518 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
519 
520 	return (0);
521 }
522 
523 static void
524 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
525     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
526 {
527 	struct mge_desc_wrapper *dw;
528 	int i;
529 
530 	for (i = 0; i < size; i++) {
531 		/* Free RX mbuf */
532 		dw = &(tab[i]);
533 
534 		if (dw->buffer_dmap) {
535 			if (free_mbufs) {
536 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
537 				    BUS_DMASYNC_POSTREAD);
538 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
539 			}
540 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
541 			if (free_mbufs)
542 				m_freem(dw->buffer);
543 		}
544 		/* Free RX descriptors */
545 		if (dw->desc_dmap) {
546 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
547 			    BUS_DMASYNC_POSTREAD);
548 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
549 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
550 			    dw->desc_dmap);
551 		}
552 	}
553 }
554 
555 static void
556 mge_free_dma(struct mge_softc *sc)
557 {
558 	/* Free desciptors and mbufs */
559 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
560 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
561 
562 	/* Destroy mbuf dma tag */
563 	bus_dma_tag_destroy(sc->mge_tx_dtag);
564 	bus_dma_tag_destroy(sc->mge_rx_dtag);
565 	/* Destroy descriptors tag */
566 	bus_dma_tag_destroy(sc->mge_desc_dtag);
567 }
568 
569 static void
570 mge_reinit_rx(struct mge_softc *sc)
571 {
572 	struct mge_desc_wrapper *dw;
573 	int i;
574 
575 	MGE_RECEIVE_LOCK_ASSERT(sc);
576 
577 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
578 
579 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
580 	    &sc->mge_rx_dtag);
581 
582 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
583 		dw = &(sc->mge_rx_desc[i]);
584 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
585 		&dw->mge_desc->buffer);
586 	}
587 
588 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
589 	sc->rx_desc_curr = 0;
590 
591 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
592 	    sc->rx_desc_start);
593 
594 	/* Enable RX queue */
595 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
596 }
597 
598 #ifdef DEVICE_POLLING
599 static poll_handler_t mge_poll;
600 
601 static int
602 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
603 {
604 	struct mge_softc *sc = ifp->if_softc;
605 	uint32_t int_cause, int_cause_ext;
606 	int rx_npkts = 0;
607 
608 	MGE_GLOBAL_LOCK(sc);
609 
610 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
611 		MGE_GLOBAL_UNLOCK(sc);
612 		return (rx_npkts);
613 	}
614 
615 	if (cmd == POLL_AND_CHECK_STATUS) {
616 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
617 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
618 
619 		/* Check for resource error */
620 		if (int_cause & MGE_PORT_INT_RXERRQ0)
621 			mge_reinit_rx(sc);
622 
623 		if (int_cause || int_cause_ext) {
624 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
625 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
626 		}
627 	}
628 
629 	mge_intr_tx_locked(sc);
630 	rx_npkts = mge_intr_rx_locked(sc, count);
631 
632 	MGE_GLOBAL_UNLOCK(sc);
633 	return (rx_npkts);
634 }
635 #endif /* DEVICE_POLLING */
636 
637 static int
638 mge_attach(device_t dev)
639 {
640 	struct mge_softc *sc;
641 	struct mii_softc *miisc;
642 	struct ifnet *ifp;
643 	uint8_t hwaddr[ETHER_ADDR_LEN];
644 	int i, error, phy;
645 
646 	sc = device_get_softc(dev);
647 	sc->dev = dev;
648 	sc->node = ofw_bus_get_node(dev);
649 
650 	/* Set chip version-dependent parameters */
651 	mge_ver_params(sc);
652 
653 	/* Get phy address and used softc from fdt */
654 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0)
655 		return (ENXIO);
656 
657 	/* Initialize mutexes */
658 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
659 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
660 
661 	/* Allocate IO and IRQ resources */
662 	error = bus_alloc_resources(dev, res_spec, sc->res);
663 	if (error) {
664 		device_printf(dev, "could not allocate resources\n");
665 		mge_detach(dev);
666 		return (ENXIO);
667 	}
668 
669 	/* Allocate DMA, buffers, buffer descriptors */
670 	error = mge_allocate_dma(sc);
671 	if (error) {
672 		mge_detach(dev);
673 		return (ENXIO);
674 	}
675 
676 	sc->tx_desc_curr = 0;
677 	sc->rx_desc_curr = 0;
678 	sc->tx_desc_used_idx = 0;
679 	sc->tx_desc_used_count = 0;
680 
681 	/* Configure defaults for interrupts coalescing */
682 	sc->rx_ic_time = 768;
683 	sc->tx_ic_time = 768;
684 	mge_add_sysctls(sc);
685 
686 	/* Allocate network interface */
687 	ifp = sc->ifp = if_alloc(IFT_ETHER);
688 	if (ifp == NULL) {
689 		device_printf(dev, "if_alloc() failed\n");
690 		mge_detach(dev);
691 		return (ENOMEM);
692 	}
693 
694 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
695 	ifp->if_softc = sc;
696 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
697 	ifp->if_capabilities = IFCAP_VLAN_MTU;
698 	if (sc->mge_hw_csum) {
699 		ifp->if_capabilities |= IFCAP_HWCSUM;
700 		ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
701 	}
702 	ifp->if_capenable = ifp->if_capabilities;
703 
704 #ifdef DEVICE_POLLING
705 	/* Advertise that polling is supported */
706 	ifp->if_capabilities |= IFCAP_POLLING;
707 #endif
708 
709 	ifp->if_init = mge_init;
710 	ifp->if_start = mge_start;
711 	ifp->if_ioctl = mge_ioctl;
712 
713 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
714 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
715 	IFQ_SET_READY(&ifp->if_snd);
716 
717 	mge_get_mac_address(sc, hwaddr);
718 	ether_ifattach(ifp, hwaddr);
719 	callout_init(&sc->wd_callout, 0);
720 
721 	/* Attach PHY(s) */
722 	error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
723 	    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
724 	if (error) {
725 		device_printf(dev, "attaching PHYs failed\n");
726 		mge_detach(dev);
727 		return (error);
728 	}
729 	sc->mii = device_get_softc(sc->miibus);
730 
731 	/* Tell the MAC where to find the PHY so autoneg works */
732 	miisc = LIST_FIRST(&sc->mii->mii_phys);
733 	MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
734 
735 	/* Attach interrupt handlers */
736 	/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
737 	for (i = 1; i <= sc->mge_intr_cnt; ++i) {
738 		error = bus_setup_intr(dev, sc->res[i],
739 		    INTR_TYPE_NET | INTR_MPSAFE,
740 		    NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
741 		    sc, &sc->ih_cookie[i - 1]);
742 		if (error) {
743 			device_printf(dev, "could not setup %s\n",
744 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
745 			mge_detach(dev);
746 			return (error);
747 		}
748 	}
749 
750 	return (0);
751 }
752 
753 static int
754 mge_detach(device_t dev)
755 {
756 	struct mge_softc *sc;
757 	int error,i;
758 
759 	sc = device_get_softc(dev);
760 
761 	/* Stop controller and free TX queue */
762 	if (sc->ifp)
763 		mge_shutdown(dev);
764 
765 	/* Wait for stopping ticks */
766         callout_drain(&sc->wd_callout);
767 
768 	/* Stop and release all interrupts */
769 	for (i = 0; i < sc->mge_intr_cnt; ++i) {
770 		if (!sc->ih_cookie[i])
771 			continue;
772 
773 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
774 		if (error)
775 			device_printf(dev, "could not release %s\n",
776 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
777 	}
778 
779 	/* Detach network interface */
780 	if (sc->ifp) {
781 		ether_ifdetach(sc->ifp);
782 		if_free(sc->ifp);
783 	}
784 
785 	/* Free DMA resources */
786 	mge_free_dma(sc);
787 
788 	/* Free IO memory handler */
789 	bus_release_resources(dev, res_spec, sc->res);
790 
791 	/* Destroy mutexes */
792 	mtx_destroy(&sc->receive_lock);
793 	mtx_destroy(&sc->transmit_lock);
794 
795 	return (0);
796 }
797 
798 static void
799 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
800 {
801 	struct mge_softc *sc = ifp->if_softc;
802 	struct mii_data *mii;
803 
804 	MGE_TRANSMIT_LOCK(sc);
805 
806 	mii = sc->mii;
807 	mii_pollstat(mii);
808 
809 	ifmr->ifm_active = mii->mii_media_active;
810 	ifmr->ifm_status = mii->mii_media_status;
811 
812 	MGE_TRANSMIT_UNLOCK(sc);
813 }
814 
815 static uint32_t
816 mge_set_port_serial_control(uint32_t media)
817 {
818 	uint32_t port_config;
819 
820 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
821 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
822 
823 	if (IFM_TYPE(media) == IFM_ETHER) {
824 		switch(IFM_SUBTYPE(media)) {
825 			case IFM_AUTO:
826 				break;
827 			case IFM_1000_T:
828 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
829 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
830 				    PORT_SERIAL_SPEED_AUTONEG);
831 				break;
832 			case IFM_100_TX:
833 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
834 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
835 				    PORT_SERIAL_SPEED_AUTONEG);
836 				break;
837 			case IFM_10_T:
838 				port_config  |= (PORT_SERIAL_AUTONEG |
839 				    PORT_SERIAL_AUTONEG_FC |
840 				    PORT_SERIAL_SPEED_AUTONEG);
841 				break;
842 		}
843 		if (media & IFM_FDX)
844 			port_config |= PORT_SERIAL_FULL_DUPLEX;
845 	}
846 	return (port_config);
847 }
848 
849 static int
850 mge_ifmedia_upd(struct ifnet *ifp)
851 {
852 	struct mge_softc *sc = ifp->if_softc;
853 
854 	if (ifp->if_flags & IFF_UP) {
855 		MGE_GLOBAL_LOCK(sc);
856 
857 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
858 		mii_mediachg(sc->mii);
859 		mge_init_locked(sc);
860 
861 		MGE_GLOBAL_UNLOCK(sc);
862 	}
863 
864 	return (0);
865 }
866 
867 static void
868 mge_init(void *arg)
869 {
870 	struct mge_softc *sc = arg;
871 
872 	MGE_GLOBAL_LOCK(sc);
873 
874 	mge_init_locked(arg);
875 
876 	MGE_GLOBAL_UNLOCK(sc);
877 }
878 
879 static void
880 mge_init_locked(void *arg)
881 {
882 	struct mge_softc *sc = arg;
883 	struct mge_desc_wrapper *dw;
884 	volatile uint32_t reg_val;
885 	int i, count;
886 
887 
888 	MGE_GLOBAL_LOCK_ASSERT(sc);
889 
890 	/* Stop interface */
891 	mge_stop(sc);
892 
893 	/* Disable interrupts */
894 	mge_intrs_ctrl(sc, 0);
895 
896 	/* Set MAC address */
897 	mge_set_mac_address(sc);
898 
899 	/* Setup multicast filters */
900 	mge_setup_multicast(sc);
901 
902 	if (sc->mge_ver == 2) {
903 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
904 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
905 	}
906 
907 	/* Initialize TX queue configuration registers */
908 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
909 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
910 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
911 
912 	/* Clear TX queue configuration registers for unused queues */
913 	for (i = 1; i < 7; i++) {
914 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
915 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
916 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
917 	}
918 
919 	/* Set default MTU */
920 	MGE_WRITE(sc, sc->mge_mtu, 0);
921 
922 	/* Port configuration */
923 	MGE_WRITE(sc, MGE_PORT_CONFIG,
924 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
925 	    PORT_CONFIG_ARO_RXQ(0));
926 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
927 
928 	/* Setup port configuration */
929 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
930 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
931 
932 	/* Setup SDMA configuration */
933 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
934 	    MGE_SDMA_TX_BYTE_SWAP |
935 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
936 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
937 
938 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
939 
940 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
941 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
942 	    sc->rx_desc_start);
943 
944 	/* Reset descriptor indexes */
945 	sc->tx_desc_curr = 0;
946 	sc->rx_desc_curr = 0;
947 	sc->tx_desc_used_idx = 0;
948 	sc->tx_desc_used_count = 0;
949 
950 	/* Enable RX descriptors */
951 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
952 		dw = &sc->mge_rx_desc[i];
953 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
954 		dw->mge_desc->buff_size = MCLBYTES;
955 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
956 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
957 	}
958 
959 	/* Enable RX queue */
960 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
961 
962 	/* Enable port */
963 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
964 	reg_val |= PORT_SERIAL_ENABLE;
965 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
966 	count = 0x100000;
967 	for (;;) {
968 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
969 		if (reg_val & MGE_STATUS_LINKUP)
970 			break;
971 		DELAY(100);
972 		if (--count == 0) {
973 			if_printf(sc->ifp, "Timeout on link-up\n");
974 			break;
975 		}
976 	}
977 
978 	/* Setup interrupts coalescing */
979 	mge_set_rxic(sc);
980 	mge_set_txic(sc);
981 
982 	/* Enable interrupts */
983 #ifdef DEVICE_POLLING
984         /*
985 	 * * ...only if polling is not turned on. Disable interrupts explicitly
986 	 * if polling is enabled.
987 	 */
988 	if (sc->ifp->if_capenable & IFCAP_POLLING)
989 		mge_intrs_ctrl(sc, 0);
990 	else
991 #endif /* DEVICE_POLLING */
992 	mge_intrs_ctrl(sc, 1);
993 
994 	/* Activate network interface */
995 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
996 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
997 	sc->wd_timer = 0;
998 
999 	/* Schedule watchdog timeout */
1000 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1001 }
1002 
1003 static void
1004 mge_intr_rxtx(void *arg)
1005 {
1006 	struct mge_softc *sc = arg;
1007 	uint32_t int_cause, int_cause_ext;
1008 
1009 	MGE_GLOBAL_LOCK(sc);
1010 
1011 #ifdef DEVICE_POLLING
1012 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1013 		MGE_GLOBAL_UNLOCK(sc);
1014 		return;
1015 	}
1016 #endif
1017 
1018 	/* Get interrupt cause */
1019 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1020 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1021 
1022 	/* Check for Transmit interrupt */
1023 	if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1024 	    MGE_PORT_INT_EXT_TXUR)) {
1025 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1026 		    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1027 		mge_intr_tx_locked(sc);
1028 	}
1029 
1030 	MGE_TRANSMIT_UNLOCK(sc);
1031 
1032 	/* Check for Receive interrupt */
1033 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1034 
1035 	MGE_RECEIVE_UNLOCK(sc);
1036 }
1037 
1038 static void
1039 mge_intr_err(void *arg)
1040 {
1041 	struct mge_softc *sc = arg;
1042 	struct ifnet *ifp;
1043 
1044 	ifp = sc->ifp;
1045 	if_printf(ifp, "%s\n", __FUNCTION__);
1046 }
1047 
1048 static void
1049 mge_intr_misc(void *arg)
1050 {
1051 	struct mge_softc *sc = arg;
1052 	struct ifnet *ifp;
1053 
1054 	ifp = sc->ifp;
1055 	if_printf(ifp, "%s\n", __FUNCTION__);
1056 }
1057 
1058 static void
1059 mge_intr_rx(void *arg) {
1060 	struct mge_softc *sc = arg;
1061 	uint32_t int_cause, int_cause_ext;
1062 
1063 	MGE_RECEIVE_LOCK(sc);
1064 
1065 #ifdef DEVICE_POLLING
1066 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1067 		MGE_RECEIVE_UNLOCK(sc);
1068 		return;
1069 	}
1070 #endif
1071 
1072 	/* Get interrupt cause */
1073 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1074 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1075 
1076 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1077 
1078 	MGE_RECEIVE_UNLOCK(sc);
1079 }
1080 
1081 static void
1082 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1083     uint32_t int_cause_ext)
1084 {
1085 	/* Check for resource error */
1086 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1087 		mge_reinit_rx(sc);
1088 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1089 		    ~(int_cause & MGE_PORT_INT_RXERRQ0));
1090 	}
1091 
1092 	int_cause &= MGE_PORT_INT_RXQ0;
1093 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1094 
1095 	if (int_cause || int_cause_ext) {
1096 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1097 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1098 		mge_intr_rx_locked(sc, -1);
1099 	}
1100 }
1101 
1102 static int
1103 mge_intr_rx_locked(struct mge_softc *sc, int count)
1104 {
1105 	struct ifnet *ifp = sc->ifp;
1106 	uint32_t status;
1107 	uint16_t bufsize;
1108 	struct mge_desc_wrapper* dw;
1109 	struct mbuf *mb;
1110 	int rx_npkts = 0;
1111 
1112 	MGE_RECEIVE_LOCK_ASSERT(sc);
1113 
1114 	while (count != 0) {
1115 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1116 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1117 		    BUS_DMASYNC_POSTREAD);
1118 
1119 		/* Get status */
1120 		status = dw->mge_desc->cmd_status;
1121 		bufsize = dw->mge_desc->buff_size;
1122 		if ((status & MGE_DMA_OWNED) != 0)
1123 			break;
1124 
1125 		if (dw->mge_desc->byte_count &&
1126 		    ~(status & MGE_ERR_SUMMARY)) {
1127 
1128 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1129 			    BUS_DMASYNC_POSTREAD);
1130 
1131 			mb = m_devget(dw->buffer->m_data,
1132 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1133 			    0, ifp, NULL);
1134 
1135 			if (mb == NULL)
1136 				/* Give up if no mbufs */
1137 				break;
1138 
1139 			mb->m_len -= 2;
1140 			mb->m_pkthdr.len -= 2;
1141 			mb->m_data += 2;
1142 
1143 			mb->m_pkthdr.rcvif = ifp;
1144 
1145 			mge_offload_process_frame(ifp, mb, status,
1146 			    bufsize);
1147 
1148 			MGE_RECEIVE_UNLOCK(sc);
1149 			(*ifp->if_input)(ifp, mb);
1150 			MGE_RECEIVE_LOCK(sc);
1151 			rx_npkts++;
1152 		}
1153 
1154 		dw->mge_desc->byte_count = 0;
1155 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1156 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1157 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1158 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1159 
1160 		if (count > 0)
1161 			count -= 1;
1162 	}
1163 
1164 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1165 
1166 	return (rx_npkts);
1167 }
1168 
1169 static void
1170 mge_intr_sum(void *arg)
1171 {
1172 	struct mge_softc *sc = arg;
1173 	struct ifnet *ifp;
1174 
1175 	ifp = sc->ifp;
1176 	if_printf(ifp, "%s\n", __FUNCTION__);
1177 }
1178 
1179 static void
1180 mge_intr_tx(void *arg)
1181 {
1182 	struct mge_softc *sc = arg;
1183 	uint32_t int_cause_ext;
1184 
1185 	MGE_TRANSMIT_LOCK(sc);
1186 
1187 #ifdef DEVICE_POLLING
1188 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1189 		MGE_TRANSMIT_UNLOCK(sc);
1190 		return;
1191 	}
1192 #endif
1193 
1194 	/* Ack the interrupt */
1195 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1196 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1197 	    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1198 
1199 	mge_intr_tx_locked(sc);
1200 
1201 	MGE_TRANSMIT_UNLOCK(sc);
1202 }
1203 
1204 
1205 static void
1206 mge_intr_tx_locked(struct mge_softc *sc)
1207 {
1208 	struct ifnet *ifp = sc->ifp;
1209 	struct mge_desc_wrapper *dw;
1210 	struct mge_desc *desc;
1211 	uint32_t status;
1212 	int send = 0;
1213 
1214 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1215 
1216 	/* Disable watchdog */
1217 	sc->wd_timer = 0;
1218 
1219 	while (sc->tx_desc_used_count) {
1220 		/* Get the descriptor */
1221 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1222 		desc = dw->mge_desc;
1223 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1224 		    BUS_DMASYNC_POSTREAD);
1225 
1226 		/* Get descriptor status */
1227 		status = desc->cmd_status;
1228 
1229 		if (status & MGE_DMA_OWNED)
1230 			break;
1231 
1232 		sc->tx_desc_used_idx =
1233 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1234 		sc->tx_desc_used_count--;
1235 
1236 		/* Update collision statistics */
1237 		if (status & MGE_ERR_SUMMARY) {
1238 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1239 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1240 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1241 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1242 		}
1243 
1244 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1245 		    BUS_DMASYNC_POSTWRITE);
1246 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1247 		m_freem(dw->buffer);
1248 		dw->buffer = (struct mbuf*)NULL;
1249 		send++;
1250 
1251 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1252 	}
1253 
1254 	if (send) {
1255 		/* Now send anything that was pending */
1256 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1257 		mge_start_locked(ifp);
1258 	}
1259 }
1260 
1261 static int
1262 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1263 {
1264 	struct mge_softc *sc = ifp->if_softc;
1265 	struct ifreq *ifr = (struct ifreq *)data;
1266 	int mask, error;
1267 	uint32_t flags;
1268 
1269 	error = 0;
1270 
1271 	switch (command) {
1272 	case SIOCSIFFLAGS:
1273 		MGE_GLOBAL_LOCK(sc);
1274 
1275 		if (ifp->if_flags & IFF_UP) {
1276 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1277 				flags = ifp->if_flags ^ sc->mge_if_flags;
1278 				if (flags & IFF_PROMISC)
1279 					mge_set_prom_mode(sc,
1280 					    MGE_RX_DEFAULT_QUEUE);
1281 
1282 				if (flags & IFF_ALLMULTI)
1283 					mge_setup_multicast(sc);
1284 			} else
1285 				mge_init_locked(sc);
1286 		}
1287 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1288 			mge_stop(sc);
1289 
1290 		sc->mge_if_flags = ifp->if_flags;
1291 		MGE_GLOBAL_UNLOCK(sc);
1292 		break;
1293 	case SIOCADDMULTI:
1294 	case SIOCDELMULTI:
1295 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1296 			MGE_GLOBAL_LOCK(sc);
1297 			mge_setup_multicast(sc);
1298 			MGE_GLOBAL_UNLOCK(sc);
1299 		}
1300 		break;
1301 	case SIOCSIFCAP:
1302 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1303 		if (mask & IFCAP_HWCSUM) {
1304 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1305 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1306 			if (ifp->if_capenable & IFCAP_TXCSUM)
1307 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1308 			else
1309 				ifp->if_hwassist = 0;
1310 		}
1311 #ifdef DEVICE_POLLING
1312 		if (mask & IFCAP_POLLING) {
1313 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1314 				error = ether_poll_register(mge_poll, ifp);
1315 				if (error)
1316 					return(error);
1317 
1318 				MGE_GLOBAL_LOCK(sc);
1319 				mge_intrs_ctrl(sc, 0);
1320 				ifp->if_capenable |= IFCAP_POLLING;
1321 				MGE_GLOBAL_UNLOCK(sc);
1322 			} else {
1323 				error = ether_poll_deregister(ifp);
1324 				MGE_GLOBAL_LOCK(sc);
1325 				mge_intrs_ctrl(sc, 1);
1326 				ifp->if_capenable &= ~IFCAP_POLLING;
1327 				MGE_GLOBAL_UNLOCK(sc);
1328 			}
1329 		}
1330 #endif
1331 		break;
1332 	case SIOCGIFMEDIA: /* fall through */
1333 	case SIOCSIFMEDIA:
1334 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1335 		    && !(ifr->ifr_media & IFM_FDX)) {
1336 			device_printf(sc->dev,
1337 			    "1000baseTX half-duplex unsupported\n");
1338 			return 0;
1339 		}
1340 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1341 		break;
1342 	default:
1343 		error = ether_ioctl(ifp, command, data);
1344 	}
1345 	return (error);
1346 }
1347 
1348 static int
1349 mge_miibus_readreg(device_t dev, int phy, int reg)
1350 {
1351 	struct mge_softc *sc;
1352 	uint32_t retries;
1353 
1354 	sc = device_get_softc(dev);
1355 
1356 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1357 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1358 
1359 	retries = MGE_SMI_READ_RETRIES;
1360 	while (--retries &&
1361 	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
1362 		DELAY(MGE_SMI_READ_DELAY);
1363 
1364 	if (retries == 0)
1365 		device_printf(dev, "Timeout while reading from PHY\n");
1366 
1367 	return (MGE_READ(sc->phy_sc, MGE_REG_SMI) & 0xffff);
1368 }
1369 
1370 static int
1371 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1372 {
1373 	struct mge_softc *sc;
1374 	uint32_t retries;
1375 
1376 	sc = device_get_softc(dev);
1377 
1378 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1379 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1380 
1381 	retries = MGE_SMI_WRITE_RETRIES;
1382 	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
1383 		DELAY(MGE_SMI_WRITE_DELAY);
1384 
1385 	if (retries == 0)
1386 		device_printf(dev, "Timeout while writing to PHY\n");
1387 	return (0);
1388 }
1389 
1390 static int
1391 mge_probe(device_t dev)
1392 {
1393 
1394 	if (!ofw_bus_status_okay(dev))
1395 		return (ENXIO);
1396 
1397 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1398 		return (ENXIO);
1399 
1400 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1401 	return (BUS_PROBE_DEFAULT);
1402 }
1403 
1404 static int
1405 mge_resume(device_t dev)
1406 {
1407 
1408 	device_printf(dev, "%s\n", __FUNCTION__);
1409 	return (0);
1410 }
1411 
1412 static int
1413 mge_shutdown(device_t dev)
1414 {
1415 	struct mge_softc *sc = device_get_softc(dev);
1416 
1417 	MGE_GLOBAL_LOCK(sc);
1418 
1419 #ifdef DEVICE_POLLING
1420         if (sc->ifp->if_capenable & IFCAP_POLLING)
1421 		ether_poll_deregister(sc->ifp);
1422 #endif
1423 
1424 	mge_stop(sc);
1425 
1426 	MGE_GLOBAL_UNLOCK(sc);
1427 
1428 	return (0);
1429 }
1430 
1431 static int
1432 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1433 {
1434 	struct mge_desc_wrapper *dw = NULL;
1435 	struct ifnet *ifp;
1436 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1437 	bus_dmamap_t mapp;
1438 	int error;
1439 	int seg, nsegs;
1440 	int desc_no;
1441 
1442 	ifp = sc->ifp;
1443 
1444 	/* Fetch unused map */
1445 	desc_no = sc->tx_desc_curr;
1446 	dw = &sc->mge_tx_desc[desc_no];
1447 	mapp = dw->buffer_dmap;
1448 
1449 	/* Create mapping in DMA memory */
1450 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1451 	    BUS_DMA_NOWAIT);
1452 	if (error != 0) {
1453 		m_freem(m0);
1454 		return (error);
1455 	}
1456 
1457 	/* Only one segment is supported. */
1458 	if (nsegs != 1) {
1459 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1460 		m_freem(m0);
1461 		return (-1);
1462 	}
1463 
1464 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1465 
1466 	/* Everything is ok, now we can send buffers */
1467 	for (seg = 0; seg < nsegs; seg++) {
1468 		dw->mge_desc->byte_count = segs[seg].ds_len;
1469 		dw->mge_desc->buffer = segs[seg].ds_addr;
1470 		dw->buffer = m0;
1471 		dw->mge_desc->cmd_status = 0;
1472 		if (seg == 0)
1473 			mge_offload_setup_descriptor(sc, dw);
1474 		dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1475 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1476 		    MGE_DMA_OWNED;
1477 	}
1478 
1479 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1480 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1481 
1482 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1483 	sc->tx_desc_used_count++;
1484 	return (0);
1485 }
1486 
1487 static void
1488 mge_tick(void *msc)
1489 {
1490 	struct mge_softc *sc = msc;
1491 
1492 	/* Check for TX timeout */
1493 	mge_watchdog(sc);
1494 
1495 	mii_tick(sc->mii);
1496 
1497 	/* Check for media type change */
1498 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1499 		mge_ifmedia_upd(sc->ifp);
1500 
1501 	/* Schedule another timeout one second from now */
1502 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1503 }
1504 
1505 static void
1506 mge_watchdog(struct mge_softc *sc)
1507 {
1508 	struct ifnet *ifp;
1509 
1510 	ifp = sc->ifp;
1511 
1512 	MGE_GLOBAL_LOCK(sc);
1513 
1514 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1515 		MGE_GLOBAL_UNLOCK(sc);
1516 		return;
1517 	}
1518 
1519 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1520 	if_printf(ifp, "watchdog timeout\n");
1521 
1522 	mge_stop(sc);
1523 	mge_init_locked(sc);
1524 
1525 	MGE_GLOBAL_UNLOCK(sc);
1526 }
1527 
1528 static void
1529 mge_start(struct ifnet *ifp)
1530 {
1531 	struct mge_softc *sc = ifp->if_softc;
1532 
1533 	MGE_TRANSMIT_LOCK(sc);
1534 
1535 	mge_start_locked(ifp);
1536 
1537 	MGE_TRANSMIT_UNLOCK(sc);
1538 }
1539 
1540 static void
1541 mge_start_locked(struct ifnet *ifp)
1542 {
1543 	struct mge_softc *sc;
1544 	struct mbuf *m0, *mtmp;
1545 	uint32_t reg_val, queued = 0;
1546 
1547 	sc = ifp->if_softc;
1548 
1549 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1550 
1551 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1552 	    IFF_DRV_RUNNING)
1553 		return;
1554 
1555 	for (;;) {
1556 		/* Get packet from the queue */
1557 		IF_DEQUEUE(&ifp->if_snd, m0);
1558 		if (m0 == NULL)
1559 			break;
1560 
1561 		if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1562 		    m0->m_flags & M_VLANTAG) {
1563 			if (M_WRITABLE(m0) == 0) {
1564 				mtmp = m_dup(m0, M_NOWAIT);
1565 				m_freem(m0);
1566 				if (mtmp == NULL)
1567 					continue;
1568 				m0 = mtmp;
1569 			}
1570 		}
1571 		/* The driver support only one DMA fragment. */
1572 		if (m0->m_next != NULL) {
1573 			mtmp = m_defrag(m0, M_NOWAIT);
1574 			if (mtmp)
1575 				m0 = mtmp;
1576 		}
1577 
1578 		/* Check for free descriptors */
1579 		if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1580 			IF_PREPEND(&ifp->if_snd, m0);
1581 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1582 			break;
1583 		}
1584 
1585 		if (mge_encap(sc, m0) != 0)
1586 			break;
1587 
1588 		queued++;
1589 		BPF_MTAP(ifp, m0);
1590 	}
1591 
1592 	if (queued) {
1593 		/* Enable transmitter and watchdog timer */
1594 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1595 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1596 		sc->wd_timer = 5;
1597 	}
1598 }
1599 
1600 static void
1601 mge_stop(struct mge_softc *sc)
1602 {
1603 	struct ifnet *ifp;
1604 	volatile uint32_t reg_val, status;
1605 	struct mge_desc_wrapper *dw;
1606 	struct mge_desc *desc;
1607 	int count;
1608 
1609 	ifp = sc->ifp;
1610 
1611 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1612 		return;
1613 
1614 	/* Stop tick engine */
1615 	callout_stop(&sc->wd_callout);
1616 
1617 	/* Disable interface */
1618 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1619 	sc->wd_timer = 0;
1620 
1621 	/* Disable interrupts */
1622 	mge_intrs_ctrl(sc, 0);
1623 
1624 	/* Disable Rx and Tx */
1625 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1626 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1627 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1628 
1629 	/* Remove pending data from TX queue */
1630 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1631 	    sc->tx_desc_used_count) {
1632 		/* Get the descriptor */
1633 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1634 		desc = dw->mge_desc;
1635 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1636 		    BUS_DMASYNC_POSTREAD);
1637 
1638 		/* Get descriptor status */
1639 		status = desc->cmd_status;
1640 
1641 		if (status & MGE_DMA_OWNED)
1642 			break;
1643 
1644 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1645 		    MGE_TX_DESC_NUM;
1646 		sc->tx_desc_used_count--;
1647 
1648 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1649 		    BUS_DMASYNC_POSTWRITE);
1650 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1651 
1652 		m_freem(dw->buffer);
1653 		dw->buffer = (struct mbuf*)NULL;
1654 	}
1655 
1656 	/* Wait for end of transmission */
1657 	count = 0x100000;
1658 	while (count--) {
1659 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1660 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1661 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1662 			break;
1663 		DELAY(100);
1664 	}
1665 
1666 	if(!count)
1667 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1668 		    __FUNCTION__);
1669 
1670 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1671 	reg_val &= ~(PORT_SERIAL_ENABLE);
1672 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1673 }
1674 
1675 static int
1676 mge_suspend(device_t dev)
1677 {
1678 
1679 	device_printf(dev, "%s\n", __FUNCTION__);
1680 	return (0);
1681 }
1682 
1683 static void
1684 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1685     uint32_t status, uint16_t bufsize)
1686 {
1687 	int csum_flags = 0;
1688 
1689 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1690 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1691 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1692 
1693 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1694 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1695 		    (status & MGE_RX_L4_CSUM_OK)) {
1696 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1697 			frame->m_pkthdr.csum_data = 0xFFFF;
1698 		}
1699 
1700 		frame->m_pkthdr.csum_flags = csum_flags;
1701 	}
1702 }
1703 
1704 static void
1705 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1706 {
1707 	struct mbuf *m0 = dw->buffer;
1708 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1709 	int csum_flags = m0->m_pkthdr.csum_flags;
1710 	int cmd_status = 0;
1711 	struct ip *ip;
1712 	int ehlen, etype;
1713 
1714 	if (csum_flags) {
1715 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1716 			etype = ntohs(eh->evl_proto);
1717 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1718 			csum_flags |= MGE_TX_VLAN_TAGGED;
1719 		} else {
1720 			etype = ntohs(eh->evl_encap_proto);
1721 			ehlen = ETHER_HDR_LEN;
1722 		}
1723 
1724 		if (etype != ETHERTYPE_IP) {
1725 			if_printf(sc->ifp,
1726 			    "TCP/IP Offload enabled for unsupported "
1727 			    "protocol!\n");
1728 			return;
1729 		}
1730 
1731 		ip = (struct ip *)(m0->m_data + ehlen);
1732 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1733 		cmd_status |= MGE_TX_NOT_FRAGMENT;
1734 	}
1735 
1736 	if (csum_flags & CSUM_IP)
1737 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1738 
1739 	if (csum_flags & CSUM_TCP)
1740 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1741 
1742 	if (csum_flags & CSUM_UDP)
1743 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1744 
1745 	dw->mge_desc->cmd_status |= cmd_status;
1746 }
1747 
1748 static void
1749 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1750 {
1751 
1752 	if (enable) {
1753 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1754 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1755 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1756 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1757 		    MGE_PORT_INT_EXT_TXBUF0);
1758 	} else {
1759 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1760 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1761 
1762 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1763 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1764 
1765 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1766 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1767 	}
1768 }
1769 
1770 static uint8_t
1771 mge_crc8(uint8_t *data, int size)
1772 {
1773 	uint8_t crc = 0;
1774 	static const uint8_t ct[256] = {
1775 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1776 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1777 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1778 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1779 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1780 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1781 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1782 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1783 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1784 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1785 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1786 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1787 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1788 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1789 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1790 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1791 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1792 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1793 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1794 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1795 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1796 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1797 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1798 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1799 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1800 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1801 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1802 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1803 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1804 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1805 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1806 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1807 	};
1808 
1809 	while(size--)
1810 		crc = ct[crc ^ *(data++)];
1811 
1812 	return(crc);
1813 }
1814 
1815 static void
1816 mge_setup_multicast(struct mge_softc *sc)
1817 {
1818 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1819 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1820 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1821 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1822 	struct ifnet *ifp = sc->ifp;
1823 	struct ifmultiaddr *ifma;
1824 	uint8_t *mac;
1825 	int i;
1826 
1827 	if (ifp->if_flags & IFF_ALLMULTI) {
1828 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1829 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1830 	} else {
1831 		memset(smt, 0, sizeof(smt));
1832 		memset(omt, 0, sizeof(omt));
1833 
1834 		if_maddr_rlock(ifp);
1835 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1836 			if (ifma->ifma_addr->sa_family != AF_LINK)
1837 				continue;
1838 
1839 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1840 			if (memcmp(mac, special, sizeof(special)) == 0) {
1841 				i = mac[5];
1842 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1843 			} else {
1844 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1845 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1846 			}
1847 		}
1848 		if_maddr_runlock(ifp);
1849 	}
1850 
1851 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1852 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1853 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1854 	}
1855 }
1856 
1857 static void
1858 mge_set_rxic(struct mge_softc *sc)
1859 {
1860 	uint32_t reg;
1861 
1862 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1863 		sc->rx_ic_time = sc->mge_rx_ipg_max;
1864 
1865 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1866 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1867 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1868 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1869 }
1870 
1871 static void
1872 mge_set_txic(struct mge_softc *sc)
1873 {
1874 	uint32_t reg;
1875 
1876 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1877 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1878 
1879 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1880 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1881 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1882 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1883 }
1884 
1885 static int
1886 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1887 {
1888 	struct mge_softc *sc = (struct mge_softc *)arg1;
1889 	uint32_t time;
1890 	int error;
1891 
1892 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1893 	error = sysctl_handle_int(oidp, &time, 0, req);
1894 	if (error != 0)
1895 		return(error);
1896 
1897 	MGE_GLOBAL_LOCK(sc);
1898 	if (arg2 == MGE_IC_RX) {
1899 		sc->rx_ic_time = time;
1900 		mge_set_rxic(sc);
1901 	} else {
1902 		sc->tx_ic_time = time;
1903 		mge_set_txic(sc);
1904 	}
1905 	MGE_GLOBAL_UNLOCK(sc);
1906 
1907 	return(0);
1908 }
1909 
1910 static void
1911 mge_add_sysctls(struct mge_softc *sc)
1912 {
1913 	struct sysctl_ctx_list *ctx;
1914 	struct sysctl_oid_list *children;
1915 	struct sysctl_oid *tree;
1916 
1917 	ctx = device_get_sysctl_ctx(sc->dev);
1918 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1919 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1920 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1921 	children = SYSCTL_CHILDREN(tree);
1922 
1923 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1924 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1925 	    "I", "IC RX time threshold");
1926 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1927 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1928 	    "I", "IC TX time threshold");
1929 }
1930