xref: /freebsd/sys/dev/mge/if_mge.c (revision ca50c4b8716572429a4d313a45e179d9e8c4f51c)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 
63 #include <sys/sockio.h>
64 #include <sys/bus.h>
65 #include <machine/bus.h>
66 #include <sys/rman.h>
67 #include <machine/resource.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #include <dev/fdt/fdt_common.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
75 
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
79 
80 #include "miibus_if.h"
81 
82 static int mge_probe(device_t dev);
83 static int mge_attach(device_t dev);
84 static int mge_detach(device_t dev);
85 static int mge_shutdown(device_t dev);
86 static int mge_suspend(device_t dev);
87 static int mge_resume(device_t dev);
88 
89 static int mge_miibus_readreg(device_t dev, int phy, int reg);
90 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
91 
92 static int mge_ifmedia_upd(struct ifnet *ifp);
93 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
94 
95 static void mge_init(void *arg);
96 static void mge_init_locked(void *arg);
97 static void mge_start(struct ifnet *ifp);
98 static void mge_start_locked(struct ifnet *ifp);
99 static void mge_watchdog(struct mge_softc *sc);
100 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
101 
102 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
103 static uint32_t mge_rx_ipg(uint32_t val, int ver);
104 static void mge_ver_params(struct mge_softc *sc);
105 
106 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
107 static void mge_intr_rx(void *arg);
108 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
109 static void mge_intr_tx(void *arg);
110 static void mge_intr_tx_locked(struct mge_softc *sc);
111 static void mge_intr_misc(void *arg);
112 static void mge_intr_sum(void *arg);
113 static void mge_intr_err(void *arg);
114 static void mge_stop(struct mge_softc *sc);
115 static void mge_tick(void *msc);
116 static uint32_t mge_set_port_serial_control(uint32_t media);
117 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
118 static void mge_set_mac_address(struct mge_softc *sc);
119 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
120     uint8_t queue);
121 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
122 static int mge_allocate_dma(struct mge_softc *sc);
123 static int mge_alloc_desc_dma(struct mge_softc *sc,
124     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
125 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
126     struct mbuf **mbufp, bus_addr_t *paddr);
127 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
128 static void mge_free_dma(struct mge_softc *sc);
129 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
130     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
131 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
132     uint32_t status, uint16_t bufsize);
133 static void mge_offload_setup_descriptor(struct mge_softc *sc,
134     struct mge_desc_wrapper *dw);
135 static uint8_t mge_crc8(uint8_t *data, int size);
136 static void mge_setup_multicast(struct mge_softc *sc);
137 static void mge_set_rxic(struct mge_softc *sc);
138 static void mge_set_txic(struct mge_softc *sc);
139 static void mge_add_sysctls(struct mge_softc *sc);
140 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
141 
142 static device_method_t mge_methods[] = {
143 	/* Device interface */
144 	DEVMETHOD(device_probe,		mge_probe),
145 	DEVMETHOD(device_attach,	mge_attach),
146 	DEVMETHOD(device_detach,	mge_detach),
147 	DEVMETHOD(device_shutdown,	mge_shutdown),
148 	DEVMETHOD(device_suspend,	mge_suspend),
149 	DEVMETHOD(device_resume,	mge_resume),
150 	/* MII interface */
151 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
152 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
153 	{ 0, 0 }
154 };
155 
156 static driver_t mge_driver = {
157 	"mge",
158 	mge_methods,
159 	sizeof(struct mge_softc),
160 };
161 
162 static devclass_t mge_devclass;
163 
164 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
165 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
166 MODULE_DEPEND(mge, ether, 1, 1, 1);
167 MODULE_DEPEND(mge, miibus, 1, 1, 1);
168 
169 static struct resource_spec res_spec[] = {
170 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
171 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
172 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
173 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
174 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
175 	{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
176 	{ -1, 0 }
177 };
178 
179 static struct {
180 	driver_intr_t *handler;
181 	char * description;
182 } mge_intrs[MGE_INTR_COUNT] = {
183 	{ mge_intr_rx,	"GbE receive interrupt" },
184 	{ mge_intr_tx,	"GbE transmit interrupt" },
185 	{ mge_intr_misc,"GbE misc interrupt" },
186 	{ mge_intr_sum,	"GbE summary interrupt" },
187 	{ mge_intr_err,	"GbE error interrupt" },
188 };
189 
190 static void
191 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
192 {
193 	uint32_t mac_l, mac_h;
194 	uint8_t lmac[6];
195 	int i, valid;
196 
197 	/*
198 	 * Retrieve hw address from the device tree.
199 	 */
200 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
201 	if (i == 6) {
202 		valid = 0;
203 		for (i = 0; i < 6; i++)
204 			if (lmac[i] != 0) {
205 				valid = 1;
206 				break;
207 			}
208 
209 		if (valid) {
210 			bcopy(lmac, addr, 6);
211 			return;
212 		}
213 	}
214 
215 	/*
216 	 * Fall back -- use the currently programmed address.
217 	 */
218 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
219 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
220 
221 	addr[0] = (mac_h & 0xff000000) >> 24;
222 	addr[1] = (mac_h & 0x00ff0000) >> 16;
223 	addr[2] = (mac_h & 0x0000ff00) >> 8;
224 	addr[3] = (mac_h & 0x000000ff);
225 	addr[4] = (mac_l & 0x0000ff00) >> 8;
226 	addr[5] = (mac_l & 0x000000ff);
227 }
228 
229 static uint32_t
230 mge_tfut_ipg(uint32_t val, int ver)
231 {
232 
233 	switch (ver) {
234 	case 1:
235 		return ((val & 0x3fff) << 4);
236 	case 2:
237 	default:
238 		return ((val & 0xffff) << 4);
239 	}
240 }
241 
242 static uint32_t
243 mge_rx_ipg(uint32_t val, int ver)
244 {
245 
246 	switch (ver) {
247 	case 1:
248 		return ((val & 0x3fff) << 8);
249 	case 2:
250 	default:
251 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
252 	}
253 }
254 
255 static void
256 mge_ver_params(struct mge_softc *sc)
257 {
258 	uint32_t d, r;
259 
260 	soc_id(&d, &r);
261 	if (d == MV_DEV_88F6281 ||
262 	    d == MV_DEV_88F6282 ||
263 	    d == MV_DEV_MV78100 ||
264 	    d == MV_DEV_MV78100_Z0) {
265 		sc->mge_ver = 2;
266 		sc->mge_mtu = 0x4e8;
267 		sc->mge_tfut_ipg_max = 0xFFFF;
268 		sc->mge_rx_ipg_max = 0xFFFF;
269 		sc->mge_tx_arb_cfg = 0xFC0000FF;
270 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
271 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
272 	} else {
273 		sc->mge_ver = 1;
274 		sc->mge_mtu = 0x458;
275 		sc->mge_tfut_ipg_max = 0x3FFF;
276 		sc->mge_rx_ipg_max = 0x3FFF;
277 		sc->mge_tx_arb_cfg = 0x000000FF;
278 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
279 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
280 	}
281 }
282 
283 static void
284 mge_set_mac_address(struct mge_softc *sc)
285 {
286 	char *if_mac;
287 	uint32_t mac_l, mac_h;
288 
289 	MGE_GLOBAL_LOCK_ASSERT(sc);
290 
291 	if_mac = (char *)IF_LLADDR(sc->ifp);
292 
293 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
294 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
295 	    (if_mac[2] << 8) | (if_mac[3] << 0);
296 
297 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
298 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
299 
300 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
301 }
302 
303 static void
304 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
305 {
306 	uint32_t reg_idx, reg_off, reg_val, i;
307 
308 	last_byte &= 0xf;
309 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
310 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
311 	reg_val = (1 | (queue << 1)) << reg_off;
312 
313 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
314 		if ( i == reg_idx)
315 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
316 		else
317 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
318 	}
319 }
320 
321 static void
322 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
323 {
324 	uint32_t port_config;
325 	uint32_t reg_val, i;
326 
327 	/* Enable or disable promiscuous mode as needed */
328 	if (sc->ifp->if_flags & IFF_PROMISC) {
329 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
330 		port_config |= PORT_CONFIG_UPM;
331 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
332 
333 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
334 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
335 
336 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
337 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
338 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
339 		}
340 
341 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
342 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
343 
344 	} else {
345 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
346 		port_config &= ~PORT_CONFIG_UPM;
347 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
348 
349 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
350 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
351 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
352 		}
353 
354 		mge_set_mac_address(sc);
355 	}
356 }
357 
358 static void
359 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
360 {
361 	u_int32_t *paddr;
362 
363 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
364 	paddr = arg;
365 
366 	*paddr = segs->ds_addr;
367 }
368 
369 static int
370 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
371     bus_addr_t *paddr)
372 {
373 	struct mbuf *new_mbuf;
374 	bus_dma_segment_t seg[1];
375 	int error;
376 	int nsegs;
377 
378 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
379 
380 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
381 	if (new_mbuf == NULL)
382 		return (ENOBUFS);
383 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
384 
385 	if (*mbufp) {
386 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
387 		bus_dmamap_unload(tag, map);
388 	}
389 
390 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
391 	    BUS_DMA_NOWAIT);
392 	KASSERT(nsegs == 1, ("Too many segments returned!"));
393 	if (nsegs != 1 || error)
394 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
395 
396 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
397 
398 	(*mbufp) = new_mbuf;
399 	(*paddr) = seg->ds_addr;
400 	return (0);
401 }
402 
403 static int
404 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
405     uint32_t size, bus_dma_tag_t *buffer_tag)
406 {
407 	struct mge_desc_wrapper *dw;
408 	bus_addr_t desc_paddr;
409 	int i, error;
410 
411 	desc_paddr = 0;
412 	for (i = size - 1; i >= 0; i--) {
413 		dw = &(tab[i]);
414 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
415 		    (void**)&(dw->mge_desc),
416 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
417 		    &(dw->desc_dmap));
418 
419 		if (error) {
420 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
421 			dw->mge_desc = NULL;
422 			return (ENXIO);
423 		}
424 
425 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
426 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
427 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
428 
429 		if (error) {
430 			if_printf(sc->ifp, "can't load descriptor\n");
431 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
432 			    dw->desc_dmap);
433 			dw->mge_desc = NULL;
434 			return (ENXIO);
435 		}
436 
437 		/* Chain descriptors */
438 		dw->mge_desc->next_desc = desc_paddr;
439 		desc_paddr = dw->mge_desc_paddr;
440 	}
441 	tab[size - 1].mge_desc->next_desc = desc_paddr;
442 
443 	/* Allocate a busdma tag for mbufs. */
444 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
445 	    8, 0,				/* alignment, boundary */
446 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
447 	    BUS_SPACE_MAXADDR,			/* highaddr */
448 	    NULL, NULL,				/* filtfunc, filtfuncarg */
449 	    MCLBYTES, 1,			/* maxsize, nsegments */
450 	    MCLBYTES, 0,			/* maxsegsz, flags */
451 	    NULL, NULL,				/* lockfunc, lockfuncarg */
452 	    buffer_tag);			/* dmat */
453 	if (error) {
454 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
455 		return (ENXIO);
456 	}
457 
458 	/* Create TX busdma maps */
459 	for (i = 0; i < size; i++) {
460 		dw = &(tab[i]);
461 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
462 		if (error) {
463 			if_printf(sc->ifp, "failed to create map for mbuf\n");
464 			return (ENXIO);
465 		}
466 
467 		dw->buffer = (struct mbuf*)NULL;
468 		dw->mge_desc->buffer = (bus_addr_t)NULL;
469 	}
470 
471 	return (0);
472 }
473 
474 static int
475 mge_allocate_dma(struct mge_softc *sc)
476 {
477 	int error;
478 	struct mge_desc_wrapper *dw;
479 	int i;
480 
481 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
482 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
483 	    16, 0,				/* alignment, boundary */
484 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
485 	    BUS_SPACE_MAXADDR,			/* highaddr */
486 	    NULL, NULL,				/* filtfunc, filtfuncarg */
487 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
488 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
489 	    NULL, NULL,				/* lockfunc, lockfuncarg */
490 	    &sc->mge_desc_dtag);		/* dmat */
491 
492 
493 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
494 	    &sc->mge_tx_dtag);
495 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
496 	    &sc->mge_rx_dtag);
497 
498 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
499 		dw = &(sc->mge_rx_desc[i]);
500 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
501 		    &dw->mge_desc->buffer);
502 	}
503 
504 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
505 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
506 
507 	return (0);
508 }
509 
510 static void
511 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
512     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
513 {
514 	struct mge_desc_wrapper *dw;
515 	int i;
516 
517 	for (i = 0; i < size; i++) {
518 		/* Free RX mbuf */
519 		dw = &(tab[i]);
520 
521 		if (dw->buffer_dmap) {
522 			if (free_mbufs) {
523 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
524 				    BUS_DMASYNC_POSTREAD);
525 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
526 			}
527 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
528 			if (free_mbufs)
529 				m_freem(dw->buffer);
530 		}
531 		/* Free RX descriptors */
532 		if (dw->desc_dmap) {
533 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
534 			    BUS_DMASYNC_POSTREAD);
535 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
536 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
537 			    dw->desc_dmap);
538 		}
539 	}
540 }
541 
542 static void
543 mge_free_dma(struct mge_softc *sc)
544 {
545 	/* Free desciptors and mbufs */
546 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
547 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
548 
549 	/* Destroy mbuf dma tag */
550 	bus_dma_tag_destroy(sc->mge_tx_dtag);
551 	bus_dma_tag_destroy(sc->mge_rx_dtag);
552 	/* Destroy descriptors tag */
553 	bus_dma_tag_destroy(sc->mge_desc_dtag);
554 }
555 
556 static void
557 mge_reinit_rx(struct mge_softc *sc)
558 {
559 	struct mge_desc_wrapper *dw;
560 	int i;
561 
562 	MGE_RECEIVE_LOCK_ASSERT(sc);
563 
564 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
565 
566 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
567 	    &sc->mge_rx_dtag);
568 
569 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
570 		dw = &(sc->mge_rx_desc[i]);
571 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
572 		&dw->mge_desc->buffer);
573 	}
574 
575 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
576 	sc->rx_desc_curr = 0;
577 
578 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
579 	    sc->rx_desc_start);
580 
581 	/* Enable RX queue */
582 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
583 }
584 
585 #ifdef DEVICE_POLLING
586 static poll_handler_t mge_poll;
587 
588 static int
589 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
590 {
591 	struct mge_softc *sc = ifp->if_softc;
592 	uint32_t int_cause, int_cause_ext;
593 	int rx_npkts = 0;
594 
595 	MGE_GLOBAL_LOCK(sc);
596 
597 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
598 		MGE_GLOBAL_UNLOCK(sc);
599 		return (rx_npkts);
600 	}
601 
602 	if (cmd == POLL_AND_CHECK_STATUS) {
603 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
604 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
605 
606 		/* Check for resource error */
607 		if (int_cause & MGE_PORT_INT_RXERRQ0)
608 			mge_reinit_rx(sc);
609 
610 		if (int_cause || int_cause_ext) {
611 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
612 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
613 		}
614 	}
615 
616 	mge_intr_tx_locked(sc);
617 	rx_npkts = mge_intr_rx_locked(sc, count);
618 
619 	MGE_GLOBAL_UNLOCK(sc);
620 	return (rx_npkts);
621 }
622 #endif /* DEVICE_POLLING */
623 
624 static int
625 mge_attach(device_t dev)
626 {
627 	struct mge_softc *sc;
628 	struct mii_softc *miisc;
629 	struct ifnet *ifp;
630 	uint8_t hwaddr[ETHER_ADDR_LEN];
631 	int i, error, phy;
632 
633 	sc = device_get_softc(dev);
634 	sc->dev = dev;
635 	sc->node = ofw_bus_get_node(dev);
636 
637 	/* Set chip version-dependent parameters */
638 	mge_ver_params(sc);
639 
640 	/* Get phy address and used softc from fdt */
641 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0)
642 		return (ENXIO);
643 
644 	/* Initialize mutexes */
645 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
646 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
647 
648 	/* Allocate IO and IRQ resources */
649 	error = bus_alloc_resources(dev, res_spec, sc->res);
650 	if (error) {
651 		device_printf(dev, "could not allocate resources\n");
652 		mge_detach(dev);
653 		return (ENXIO);
654 	}
655 
656 	/* Allocate DMA, buffers, buffer descriptors */
657 	error = mge_allocate_dma(sc);
658 	if (error) {
659 		mge_detach(dev);
660 		return (ENXIO);
661 	}
662 
663 	sc->tx_desc_curr = 0;
664 	sc->rx_desc_curr = 0;
665 	sc->tx_desc_used_idx = 0;
666 	sc->tx_desc_used_count = 0;
667 
668 	/* Configure defaults for interrupts coalescing */
669 	sc->rx_ic_time = 768;
670 	sc->tx_ic_time = 768;
671 	mge_add_sysctls(sc);
672 
673 	/* Allocate network interface */
674 	ifp = sc->ifp = if_alloc(IFT_ETHER);
675 	if (ifp == NULL) {
676 		device_printf(dev, "if_alloc() failed\n");
677 		mge_detach(dev);
678 		return (ENOMEM);
679 	}
680 
681 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
682 	ifp->if_softc = sc;
683 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
684 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
685 	ifp->if_capenable = ifp->if_capabilities;
686 	ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
687 
688 #ifdef DEVICE_POLLING
689 	/* Advertise that polling is supported */
690 	ifp->if_capabilities |= IFCAP_POLLING;
691 #endif
692 
693 	ifp->if_init = mge_init;
694 	ifp->if_start = mge_start;
695 	ifp->if_ioctl = mge_ioctl;
696 
697 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
698 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
699 	IFQ_SET_READY(&ifp->if_snd);
700 
701 	mge_get_mac_address(sc, hwaddr);
702 	ether_ifattach(ifp, hwaddr);
703 	callout_init(&sc->wd_callout, 0);
704 
705 	/* Attach PHY(s) */
706 	error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
707 	    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
708 	if (error) {
709 		device_printf(dev, "attaching PHYs failed\n");
710 		mge_detach(dev);
711 		return (error);
712 	}
713 	sc->mii = device_get_softc(sc->miibus);
714 
715 	/* Tell the MAC where to find the PHY so autoneg works */
716 	miisc = LIST_FIRST(&sc->mii->mii_phys);
717 	MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
718 
719 	/* Attach interrupt handlers */
720 	for (i = 0; i < 2; ++i) {
721 		error = bus_setup_intr(dev, sc->res[1 + i],
722 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
723 		    sc, &sc->ih_cookie[i]);
724 		if (error) {
725 			device_printf(dev, "could not setup %s\n",
726 			    mge_intrs[i].description);
727 			mge_detach(dev);
728 			return (error);
729 		}
730 	}
731 
732 	return (0);
733 }
734 
735 static int
736 mge_detach(device_t dev)
737 {
738 	struct mge_softc *sc;
739 	int error,i;
740 
741 	sc = device_get_softc(dev);
742 
743 	/* Stop controller and free TX queue */
744 	if (sc->ifp)
745 		mge_shutdown(dev);
746 
747 	/* Wait for stopping ticks */
748         callout_drain(&sc->wd_callout);
749 
750 	/* Stop and release all interrupts */
751 	for (i = 0; i < 2; ++i) {
752 		if (!sc->ih_cookie[i])
753 			continue;
754 
755 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
756 		if (error)
757 			device_printf(dev, "could not release %s\n",
758 			    mge_intrs[i].description);
759 	}
760 
761 	/* Detach network interface */
762 	if (sc->ifp) {
763 		ether_ifdetach(sc->ifp);
764 		if_free(sc->ifp);
765 	}
766 
767 	/* Free DMA resources */
768 	mge_free_dma(sc);
769 
770 	/* Free IO memory handler */
771 	bus_release_resources(dev, res_spec, sc->res);
772 
773 	/* Destroy mutexes */
774 	mtx_destroy(&sc->receive_lock);
775 	mtx_destroy(&sc->transmit_lock);
776 
777 	return (0);
778 }
779 
780 static void
781 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
782 {
783 	struct mge_softc *sc = ifp->if_softc;
784 	struct mii_data *mii;
785 
786 	MGE_TRANSMIT_LOCK(sc);
787 
788 	mii = sc->mii;
789 	mii_pollstat(mii);
790 
791 	ifmr->ifm_active = mii->mii_media_active;
792 	ifmr->ifm_status = mii->mii_media_status;
793 
794 	MGE_TRANSMIT_UNLOCK(sc);
795 }
796 
797 static uint32_t
798 mge_set_port_serial_control(uint32_t media)
799 {
800 	uint32_t port_config;
801 
802 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
803 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
804 
805 	if (IFM_TYPE(media) == IFM_ETHER) {
806 		switch(IFM_SUBTYPE(media)) {
807 			case IFM_AUTO:
808 				break;
809 			case IFM_1000_T:
810 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
811 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
812 				    PORT_SERIAL_SPEED_AUTONEG);
813 				break;
814 			case IFM_100_TX:
815 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
816 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
817 				    PORT_SERIAL_SPEED_AUTONEG);
818 				break;
819 			case IFM_10_T:
820 				port_config  |= (PORT_SERIAL_AUTONEG |
821 				    PORT_SERIAL_AUTONEG_FC |
822 				    PORT_SERIAL_SPEED_AUTONEG);
823 				break;
824 		}
825 		if (media & IFM_FDX)
826 			port_config |= PORT_SERIAL_FULL_DUPLEX;
827 	}
828 	return (port_config);
829 }
830 
831 static int
832 mge_ifmedia_upd(struct ifnet *ifp)
833 {
834 	struct mge_softc *sc = ifp->if_softc;
835 
836 	if (ifp->if_flags & IFF_UP) {
837 		MGE_GLOBAL_LOCK(sc);
838 
839 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
840 		mii_mediachg(sc->mii);
841 		mge_init_locked(sc);
842 
843 		MGE_GLOBAL_UNLOCK(sc);
844 	}
845 
846 	return (0);
847 }
848 
849 static void
850 mge_init(void *arg)
851 {
852 	struct mge_softc *sc = arg;
853 
854 	MGE_GLOBAL_LOCK(sc);
855 
856 	mge_init_locked(arg);
857 
858 	MGE_GLOBAL_UNLOCK(sc);
859 }
860 
861 static void
862 mge_init_locked(void *arg)
863 {
864 	struct mge_softc *sc = arg;
865 	struct mge_desc_wrapper *dw;
866 	volatile uint32_t reg_val;
867 	int i, count;
868 
869 
870 	MGE_GLOBAL_LOCK_ASSERT(sc);
871 
872 	/* Stop interface */
873 	mge_stop(sc);
874 
875 	/* Disable interrupts */
876 	mge_intrs_ctrl(sc, 0);
877 
878 	/* Set MAC address */
879 	mge_set_mac_address(sc);
880 
881 	/* Setup multicast filters */
882 	mge_setup_multicast(sc);
883 
884 	if (sc->mge_ver == 2) {
885 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
886 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
887 	}
888 
889 	/* Initialize TX queue configuration registers */
890 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
891 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
892 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
893 
894 	/* Clear TX queue configuration registers for unused queues */
895 	for (i = 1; i < 7; i++) {
896 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
897 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
898 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
899 	}
900 
901 	/* Set default MTU */
902 	MGE_WRITE(sc, sc->mge_mtu, 0);
903 
904 	/* Port configuration */
905 	MGE_WRITE(sc, MGE_PORT_CONFIG,
906 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
907 	    PORT_CONFIG_ARO_RXQ(0));
908 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
909 
910 	/* Setup port configuration */
911 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
912 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
913 
914 	/* Setup SDMA configuration */
915 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
916 	    MGE_SDMA_TX_BYTE_SWAP |
917 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
918 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
919 
920 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
921 
922 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
923 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
924 	    sc->rx_desc_start);
925 
926 	/* Reset descriptor indexes */
927 	sc->tx_desc_curr = 0;
928 	sc->rx_desc_curr = 0;
929 	sc->tx_desc_used_idx = 0;
930 	sc->tx_desc_used_count = 0;
931 
932 	/* Enable RX descriptors */
933 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
934 		dw = &sc->mge_rx_desc[i];
935 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
936 		dw->mge_desc->buff_size = MCLBYTES;
937 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
938 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
939 	}
940 
941 	/* Enable RX queue */
942 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
943 
944 	/* Enable port */
945 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
946 	reg_val |= PORT_SERIAL_ENABLE;
947 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
948 	count = 0x100000;
949 	for (;;) {
950 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
951 		if (reg_val & MGE_STATUS_LINKUP)
952 			break;
953 		DELAY(100);
954 		if (--count == 0) {
955 			if_printf(sc->ifp, "Timeout on link-up\n");
956 			break;
957 		}
958 	}
959 
960 	/* Setup interrupts coalescing */
961 	mge_set_rxic(sc);
962 	mge_set_txic(sc);
963 
964 	/* Enable interrupts */
965 #ifdef DEVICE_POLLING
966         /*
967 	 * * ...only if polling is not turned on. Disable interrupts explicitly
968 	 * if polling is enabled.
969 	 */
970 	if (sc->ifp->if_capenable & IFCAP_POLLING)
971 		mge_intrs_ctrl(sc, 0);
972 	else
973 #endif /* DEVICE_POLLING */
974 	mge_intrs_ctrl(sc, 1);
975 
976 	/* Activate network interface */
977 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
978 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
979 	sc->wd_timer = 0;
980 
981 	/* Schedule watchdog timeout */
982 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
983 }
984 
985 static void
986 mge_intr_err(void *arg)
987 {
988 	struct mge_softc *sc = arg;
989 	struct ifnet *ifp;
990 
991 	ifp = sc->ifp;
992 	if_printf(ifp, "%s\n", __FUNCTION__);
993 }
994 
995 static void
996 mge_intr_misc(void *arg)
997 {
998 	struct mge_softc *sc = arg;
999 	struct ifnet *ifp;
1000 
1001 	ifp = sc->ifp;
1002 	if_printf(ifp, "%s\n", __FUNCTION__);
1003 }
1004 
1005 static void
1006 mge_intr_rx(void *arg) {
1007 	struct mge_softc *sc = arg;
1008 	uint32_t int_cause, int_cause_ext;
1009 
1010 	MGE_RECEIVE_LOCK(sc);
1011 
1012 #ifdef DEVICE_POLLING
1013 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1014 		MGE_RECEIVE_UNLOCK(sc);
1015 		return;
1016 	}
1017 #endif
1018 
1019 	/* Get interrupt cause */
1020 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1021 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1022 
1023 	/* Check for resource error */
1024 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1025 		mge_reinit_rx(sc);
1026 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1027 		    int_cause & ~MGE_PORT_INT_RXERRQ0);
1028 	}
1029 
1030 	int_cause &= MGE_PORT_INT_RXQ0;
1031 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1032 
1033 	if (int_cause || int_cause_ext) {
1034 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1035 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1036 		mge_intr_rx_locked(sc, -1);
1037 	}
1038 
1039 	MGE_RECEIVE_UNLOCK(sc);
1040 }
1041 
1042 
1043 static int
1044 mge_intr_rx_locked(struct mge_softc *sc, int count)
1045 {
1046 	struct ifnet *ifp = sc->ifp;
1047 	uint32_t status;
1048 	uint16_t bufsize;
1049 	struct mge_desc_wrapper* dw;
1050 	struct mbuf *mb;
1051 	int rx_npkts = 0;
1052 
1053 	MGE_RECEIVE_LOCK_ASSERT(sc);
1054 
1055 	while (count != 0) {
1056 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1057 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1058 		    BUS_DMASYNC_POSTREAD);
1059 
1060 		/* Get status */
1061 		status = dw->mge_desc->cmd_status;
1062 		bufsize = dw->mge_desc->buff_size;
1063 		if ((status & MGE_DMA_OWNED) != 0)
1064 			break;
1065 
1066 		if (dw->mge_desc->byte_count &&
1067 		    ~(status & MGE_ERR_SUMMARY)) {
1068 
1069 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1070 			    BUS_DMASYNC_POSTREAD);
1071 
1072 			mb = m_devget(dw->buffer->m_data,
1073 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1074 			    0, ifp, NULL);
1075 
1076 			if (mb == NULL)
1077 				/* Give up if no mbufs */
1078 				break;
1079 
1080 			mb->m_len -= 2;
1081 			mb->m_pkthdr.len -= 2;
1082 			mb->m_data += 2;
1083 
1084 			mge_offload_process_frame(ifp, mb, status,
1085 			    bufsize);
1086 
1087 			MGE_RECEIVE_UNLOCK(sc);
1088 			(*ifp->if_input)(ifp, mb);
1089 			MGE_RECEIVE_LOCK(sc);
1090 			rx_npkts++;
1091 		}
1092 
1093 		dw->mge_desc->byte_count = 0;
1094 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1095 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1096 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1097 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1098 
1099 		if (count > 0)
1100 			count -= 1;
1101 	}
1102 
1103 	return (rx_npkts);
1104 }
1105 
1106 static void
1107 mge_intr_sum(void *arg)
1108 {
1109 	struct mge_softc *sc = arg;
1110 	struct ifnet *ifp;
1111 
1112 	ifp = sc->ifp;
1113 	if_printf(ifp, "%s\n", __FUNCTION__);
1114 }
1115 
1116 static void
1117 mge_intr_tx(void *arg)
1118 {
1119 	struct mge_softc *sc = arg;
1120 	uint32_t int_cause_ext;
1121 
1122 	MGE_TRANSMIT_LOCK(sc);
1123 
1124 #ifdef DEVICE_POLLING
1125 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1126 		MGE_TRANSMIT_UNLOCK(sc);
1127 		return;
1128 	}
1129 #endif
1130 
1131 	/* Ack the interrupt */
1132 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1133 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1134 	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1135 
1136 	mge_intr_tx_locked(sc);
1137 
1138 	MGE_TRANSMIT_UNLOCK(sc);
1139 }
1140 
1141 
1142 static void
1143 mge_intr_tx_locked(struct mge_softc *sc)
1144 {
1145 	struct ifnet *ifp = sc->ifp;
1146 	struct mge_desc_wrapper *dw;
1147 	struct mge_desc *desc;
1148 	uint32_t status;
1149 	int send = 0;
1150 
1151 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1152 
1153 	/* Disable watchdog */
1154 	sc->wd_timer = 0;
1155 
1156 	while (sc->tx_desc_used_count) {
1157 		/* Get the descriptor */
1158 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1159 		desc = dw->mge_desc;
1160 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1161 		    BUS_DMASYNC_POSTREAD);
1162 
1163 		/* Get descriptor status */
1164 		status = desc->cmd_status;
1165 
1166 		if (status & MGE_DMA_OWNED)
1167 			break;
1168 
1169 		sc->tx_desc_used_idx =
1170 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1171 		sc->tx_desc_used_count--;
1172 
1173 		/* Update collision statistics */
1174 		if (status & MGE_ERR_SUMMARY) {
1175 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1176 				ifp->if_collisions++;
1177 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1178 				ifp->if_collisions += 16;
1179 		}
1180 
1181 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1182 		    BUS_DMASYNC_POSTWRITE);
1183 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1184 		m_freem(dw->buffer);
1185 		dw->buffer = (struct mbuf*)NULL;
1186 		send++;
1187 
1188 		ifp->if_opackets++;
1189 	}
1190 
1191 	if (send) {
1192 		/* Now send anything that was pending */
1193 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1194 		mge_start_locked(ifp);
1195 	}
1196 }
1197 
1198 static int
1199 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1200 {
1201 	struct mge_softc *sc = ifp->if_softc;
1202 	struct ifreq *ifr = (struct ifreq *)data;
1203 	int mask, error;
1204 	uint32_t flags;
1205 
1206 	error = 0;
1207 
1208 	switch (command) {
1209 	case SIOCSIFFLAGS:
1210 		MGE_GLOBAL_LOCK(sc);
1211 
1212 		if (ifp->if_flags & IFF_UP) {
1213 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1214 				flags = ifp->if_flags ^ sc->mge_if_flags;
1215 				if (flags & IFF_PROMISC)
1216 					mge_set_prom_mode(sc,
1217 					    MGE_RX_DEFAULT_QUEUE);
1218 
1219 				if (flags & IFF_ALLMULTI)
1220 					mge_setup_multicast(sc);
1221 			} else
1222 				mge_init_locked(sc);
1223 		}
1224 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1225 			mge_stop(sc);
1226 
1227 		sc->mge_if_flags = ifp->if_flags;
1228 		MGE_GLOBAL_UNLOCK(sc);
1229 		break;
1230 	case SIOCADDMULTI:
1231 	case SIOCDELMULTI:
1232 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1233 			MGE_GLOBAL_LOCK(sc);
1234 			mge_setup_multicast(sc);
1235 			MGE_GLOBAL_UNLOCK(sc);
1236 		}
1237 		break;
1238 	case SIOCSIFCAP:
1239 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1240 		if (mask & IFCAP_HWCSUM) {
1241 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1242 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1243 			if (ifp->if_capenable & IFCAP_TXCSUM)
1244 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1245 			else
1246 				ifp->if_hwassist = 0;
1247 		}
1248 #ifdef DEVICE_POLLING
1249 		if (mask & IFCAP_POLLING) {
1250 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1251 				error = ether_poll_register(mge_poll, ifp);
1252 				if (error)
1253 					return(error);
1254 
1255 				MGE_GLOBAL_LOCK(sc);
1256 				mge_intrs_ctrl(sc, 0);
1257 				ifp->if_capenable |= IFCAP_POLLING;
1258 				MGE_GLOBAL_UNLOCK(sc);
1259 			} else {
1260 				error = ether_poll_deregister(ifp);
1261 				MGE_GLOBAL_LOCK(sc);
1262 				mge_intrs_ctrl(sc, 1);
1263 				ifp->if_capenable &= ~IFCAP_POLLING;
1264 				MGE_GLOBAL_UNLOCK(sc);
1265 			}
1266 		}
1267 #endif
1268 		break;
1269 	case SIOCGIFMEDIA: /* fall through */
1270 	case SIOCSIFMEDIA:
1271 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1272 		    && !(ifr->ifr_media & IFM_FDX)) {
1273 			device_printf(sc->dev,
1274 			    "1000baseTX half-duplex unsupported\n");
1275 			return 0;
1276 		}
1277 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1278 		break;
1279 	default:
1280 		error = ether_ioctl(ifp, command, data);
1281 	}
1282 	return (error);
1283 }
1284 
1285 static int
1286 mge_miibus_readreg(device_t dev, int phy, int reg)
1287 {
1288 	struct mge_softc *sc;
1289 	uint32_t retries;
1290 
1291 	sc = device_get_softc(dev);
1292 
1293 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1294 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1295 
1296 	retries = MGE_SMI_READ_RETRIES;
1297 	while (--retries &&
1298 	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
1299 		DELAY(MGE_SMI_READ_DELAY);
1300 
1301 	if (retries == 0)
1302 		device_printf(dev, "Timeout while reading from PHY\n");
1303 
1304 	return (MGE_READ(sc->phy_sc, MGE_REG_SMI) & 0xffff);
1305 }
1306 
1307 static int
1308 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1309 {
1310 	struct mge_softc *sc;
1311 	uint32_t retries;
1312 
1313 	sc = device_get_softc(dev);
1314 
1315 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1316 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1317 
1318 	retries = MGE_SMI_WRITE_RETRIES;
1319 	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
1320 		DELAY(MGE_SMI_WRITE_DELAY);
1321 
1322 	if (retries == 0)
1323 		device_printf(dev, "Timeout while writing to PHY\n");
1324 	return (0);
1325 }
1326 
1327 static int
1328 mge_probe(device_t dev)
1329 {
1330 
1331 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1332 		return (ENXIO);
1333 
1334 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1335 	return (BUS_PROBE_DEFAULT);
1336 }
1337 
1338 static int
1339 mge_resume(device_t dev)
1340 {
1341 
1342 	device_printf(dev, "%s\n", __FUNCTION__);
1343 	return (0);
1344 }
1345 
1346 static int
1347 mge_shutdown(device_t dev)
1348 {
1349 	struct mge_softc *sc = device_get_softc(dev);
1350 
1351 	MGE_GLOBAL_LOCK(sc);
1352 
1353 #ifdef DEVICE_POLLING
1354         if (sc->ifp->if_capenable & IFCAP_POLLING)
1355 		ether_poll_deregister(sc->ifp);
1356 #endif
1357 
1358 	mge_stop(sc);
1359 
1360 	MGE_GLOBAL_UNLOCK(sc);
1361 
1362 	return (0);
1363 }
1364 
1365 static int
1366 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1367 {
1368 	struct mge_desc_wrapper *dw = NULL;
1369 	struct ifnet *ifp;
1370 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1371 	bus_dmamap_t mapp;
1372 	int error;
1373 	int seg, nsegs;
1374 	int desc_no;
1375 
1376 	ifp = sc->ifp;
1377 
1378 	/* Check for free descriptors */
1379 	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1380 		/* No free descriptors */
1381 		return (-1);
1382 	}
1383 
1384 	/* Fetch unused map */
1385 	desc_no = sc->tx_desc_curr;
1386 	dw = &sc->mge_tx_desc[desc_no];
1387 	mapp = dw->buffer_dmap;
1388 
1389 	/* Create mapping in DMA memory */
1390 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1391 	    BUS_DMA_NOWAIT);
1392 	if (error != 0 || nsegs != 1 ) {
1393 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1394 		return ((error != 0) ? error : -1);
1395 	}
1396 
1397 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1398 
1399 	/* Everything is ok, now we can send buffers */
1400 	for (seg = 0; seg < nsegs; seg++) {
1401 		dw->mge_desc->byte_count = segs[seg].ds_len;
1402 		dw->mge_desc->buffer = segs[seg].ds_addr;
1403 		dw->buffer = m0;
1404 		dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1405 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1406 		    MGE_DMA_OWNED;
1407 
1408 		if (seg == 0)
1409 			mge_offload_setup_descriptor(sc, dw);
1410 	}
1411 
1412 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1413 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1414 
1415 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1416 	sc->tx_desc_used_count++;
1417 	return (0);
1418 }
1419 
1420 static void
1421 mge_tick(void *msc)
1422 {
1423 	struct mge_softc *sc = msc;
1424 
1425 	/* Check for TX timeout */
1426 	mge_watchdog(sc);
1427 
1428 	mii_tick(sc->mii);
1429 
1430 	/* Check for media type change */
1431 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1432 		mge_ifmedia_upd(sc->ifp);
1433 
1434 	/* Schedule another timeout one second from now */
1435 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1436 }
1437 
1438 static void
1439 mge_watchdog(struct mge_softc *sc)
1440 {
1441 	struct ifnet *ifp;
1442 
1443 	ifp = sc->ifp;
1444 
1445 	MGE_GLOBAL_LOCK(sc);
1446 
1447 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1448 		MGE_GLOBAL_UNLOCK(sc);
1449 		return;
1450 	}
1451 
1452 	ifp->if_oerrors++;
1453 	if_printf(ifp, "watchdog timeout\n");
1454 
1455 	mge_stop(sc);
1456 	mge_init_locked(sc);
1457 
1458 	MGE_GLOBAL_UNLOCK(sc);
1459 }
1460 
1461 static void
1462 mge_start(struct ifnet *ifp)
1463 {
1464 	struct mge_softc *sc = ifp->if_softc;
1465 
1466 	MGE_TRANSMIT_LOCK(sc);
1467 
1468 	mge_start_locked(ifp);
1469 
1470 	MGE_TRANSMIT_UNLOCK(sc);
1471 }
1472 
1473 static void
1474 mge_start_locked(struct ifnet *ifp)
1475 {
1476 	struct mge_softc *sc;
1477 	struct mbuf *m0, *mtmp;
1478 	uint32_t reg_val, queued = 0;
1479 
1480 	sc = ifp->if_softc;
1481 
1482 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1483 
1484 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1485 	    IFF_DRV_RUNNING)
1486 		return;
1487 
1488 	for (;;) {
1489 		/* Get packet from the queue */
1490 		IF_DEQUEUE(&ifp->if_snd, m0);
1491 		if (m0 == NULL)
1492 			break;
1493 
1494 		mtmp = m_defrag(m0, M_DONTWAIT);
1495 		if (mtmp)
1496 			m0 = mtmp;
1497 
1498 		if (mge_encap(sc, m0)) {
1499 			IF_PREPEND(&ifp->if_snd, m0);
1500 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1501 			break;
1502 		}
1503 		queued++;
1504 		BPF_MTAP(ifp, m0);
1505 	}
1506 
1507 	if (queued) {
1508 		/* Enable transmitter and watchdog timer */
1509 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1510 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1511 		sc->wd_timer = 5;
1512 	}
1513 }
1514 
1515 static void
1516 mge_stop(struct mge_softc *sc)
1517 {
1518 	struct ifnet *ifp;
1519 	volatile uint32_t reg_val, status;
1520 	struct mge_desc_wrapper *dw;
1521 	struct mge_desc *desc;
1522 	int count;
1523 
1524 	ifp = sc->ifp;
1525 
1526 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1527 		return;
1528 
1529 	/* Stop tick engine */
1530 	callout_stop(&sc->wd_callout);
1531 
1532 	/* Disable interface */
1533 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1534 	sc->wd_timer = 0;
1535 
1536 	/* Disable interrupts */
1537 	mge_intrs_ctrl(sc, 0);
1538 
1539 	/* Disable Rx and Tx */
1540 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1541 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1542 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1543 
1544 	/* Remove pending data from TX queue */
1545 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1546 	    sc->tx_desc_used_count) {
1547 		/* Get the descriptor */
1548 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1549 		desc = dw->mge_desc;
1550 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1551 		    BUS_DMASYNC_POSTREAD);
1552 
1553 		/* Get descriptor status */
1554 		status = desc->cmd_status;
1555 
1556 		if (status & MGE_DMA_OWNED)
1557 			break;
1558 
1559 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1560 		    MGE_TX_DESC_NUM;
1561 		sc->tx_desc_used_count--;
1562 
1563 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1564 		    BUS_DMASYNC_POSTWRITE);
1565 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1566 
1567 		m_freem(dw->buffer);
1568 		dw->buffer = (struct mbuf*)NULL;
1569 	}
1570 
1571 	/* Wait for end of transmission */
1572 	count = 0x100000;
1573 	while (count--) {
1574 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1575 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1576 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1577 			break;
1578 		DELAY(100);
1579 	}
1580 
1581 	if(!count)
1582 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1583 		    __FUNCTION__);
1584 
1585 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1586 	reg_val &= ~(PORT_SERIAL_ENABLE);
1587 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1588 }
1589 
1590 static int
1591 mge_suspend(device_t dev)
1592 {
1593 
1594 	device_printf(dev, "%s\n", __FUNCTION__);
1595 	return (0);
1596 }
1597 
1598 static void
1599 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1600     uint32_t status, uint16_t bufsize)
1601 {
1602 	int csum_flags = 0;
1603 
1604 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1605 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1606 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1607 
1608 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1609 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1610 		    (status & MGE_RX_L4_CSUM_OK)) {
1611 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1612 			frame->m_pkthdr.csum_data = 0xFFFF;
1613 		}
1614 
1615 		frame->m_pkthdr.csum_flags = csum_flags;
1616 	}
1617 }
1618 
1619 static void
1620 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1621 {
1622 	struct mbuf *m0 = dw->buffer;
1623 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1624 	int csum_flags = m0->m_pkthdr.csum_flags;
1625 	int cmd_status = 0;
1626 	struct ip *ip;
1627 	int ehlen, etype;
1628 
1629 	if (csum_flags) {
1630 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1631 			etype = ntohs(eh->evl_proto);
1632 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1633 			csum_flags |= MGE_TX_VLAN_TAGGED;
1634 		} else {
1635 			etype = ntohs(eh->evl_encap_proto);
1636 			ehlen = ETHER_HDR_LEN;
1637 		}
1638 
1639 		if (etype != ETHERTYPE_IP) {
1640 			if_printf(sc->ifp,
1641 			    "TCP/IP Offload enabled for unsupported "
1642 			    "protocol!\n");
1643 			return;
1644 		}
1645 
1646 		ip = (struct ip *)(m0->m_data + ehlen);
1647 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1648 
1649 		if ((m0->m_flags & M_FRAG) == 0)
1650 			cmd_status |= MGE_TX_NOT_FRAGMENT;
1651 	}
1652 
1653 	if (csum_flags & CSUM_IP)
1654 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1655 
1656 	if (csum_flags & CSUM_TCP)
1657 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1658 
1659 	if (csum_flags & CSUM_UDP)
1660 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1661 
1662 	dw->mge_desc->cmd_status |= cmd_status;
1663 }
1664 
1665 static void
1666 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1667 {
1668 
1669 	if (enable) {
1670 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1671 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1672 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1673 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1674 		    MGE_PORT_INT_EXT_TXBUF0);
1675 	} else {
1676 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1677 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1678 
1679 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1680 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1681 
1682 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1683 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1684 	}
1685 }
1686 
1687 static uint8_t
1688 mge_crc8(uint8_t *data, int size)
1689 {
1690 	uint8_t crc = 0;
1691 	static const uint8_t ct[256] = {
1692 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1693 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1694 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1695 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1696 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1697 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1698 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1699 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1700 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1701 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1702 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1703 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1704 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1705 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1706 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1707 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1708 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1709 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1710 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1711 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1712 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1713 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1714 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1715 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1716 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1717 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1718 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1719 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1720 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1721 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1722 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1723 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1724 	};
1725 
1726 	while(size--)
1727 		crc = ct[crc ^ *(data++)];
1728 
1729 	return(crc);
1730 }
1731 
1732 static void
1733 mge_setup_multicast(struct mge_softc *sc)
1734 {
1735 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1736 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1737 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1738 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1739 	struct ifnet *ifp = sc->ifp;
1740 	struct ifmultiaddr *ifma;
1741 	uint8_t *mac;
1742 	int i;
1743 
1744 	if (ifp->if_flags & IFF_ALLMULTI) {
1745 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1746 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1747 	} else {
1748 		memset(smt, 0, sizeof(smt));
1749 		memset(omt, 0, sizeof(omt));
1750 
1751 		if_maddr_rlock(ifp);
1752 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1753 			if (ifma->ifma_addr->sa_family != AF_LINK)
1754 				continue;
1755 
1756 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1757 			if (memcmp(mac, special, sizeof(special)) == 0) {
1758 				i = mac[5];
1759 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1760 			} else {
1761 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1762 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1763 			}
1764 		}
1765 		if_maddr_runlock(ifp);
1766 	}
1767 
1768 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1769 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1770 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1771 	}
1772 }
1773 
1774 static void
1775 mge_set_rxic(struct mge_softc *sc)
1776 {
1777 	uint32_t reg;
1778 
1779 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1780 		sc->rx_ic_time = sc->mge_rx_ipg_max;
1781 
1782 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1783 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1784 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1785 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1786 }
1787 
1788 static void
1789 mge_set_txic(struct mge_softc *sc)
1790 {
1791 	uint32_t reg;
1792 
1793 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1794 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1795 
1796 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1797 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1798 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1799 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1800 }
1801 
1802 static int
1803 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1804 {
1805 	struct mge_softc *sc = (struct mge_softc *)arg1;
1806 	uint32_t time;
1807 	int error;
1808 
1809 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1810 	error = sysctl_handle_int(oidp, &time, 0, req);
1811 	if (error != 0)
1812 		return(error);
1813 
1814 	MGE_GLOBAL_LOCK(sc);
1815 	if (arg2 == MGE_IC_RX) {
1816 		sc->rx_ic_time = time;
1817 		mge_set_rxic(sc);
1818 	} else {
1819 		sc->tx_ic_time = time;
1820 		mge_set_txic(sc);
1821 	}
1822 	MGE_GLOBAL_UNLOCK(sc);
1823 
1824 	return(0);
1825 }
1826 
1827 static void
1828 mge_add_sysctls(struct mge_softc *sc)
1829 {
1830 	struct sysctl_ctx_list *ctx;
1831 	struct sysctl_oid_list *children;
1832 	struct sysctl_oid *tree;
1833 
1834 	ctx = device_get_sysctl_ctx(sc->dev);
1835 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1836 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1837 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1838 	children = SYSCTL_CHILDREN(tree);
1839 
1840 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1841 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1842 	    "I", "IC RX time threshold");
1843 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1844 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1845 	    "I", "IC TX time threshold");
1846 }
1847