xref: /freebsd/sys/dev/mge/if_mge.c (revision b2db760808f74bb53c232900091c9da801ebbfcc)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 
63 #include <sys/sockio.h>
64 #include <sys/bus.h>
65 #include <machine/bus.h>
66 #include <sys/rman.h>
67 #include <machine/resource.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #include <dev/fdt/fdt_common.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
75 
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
79 
80 #include "miibus_if.h"
81 
82 /* PHY registers are in the address space of the first mge unit */
83 static struct mge_softc *sc_mge0 = NULL;
84 
85 static int mge_probe(device_t dev);
86 static int mge_attach(device_t dev);
87 static int mge_detach(device_t dev);
88 static int mge_shutdown(device_t dev);
89 static int mge_suspend(device_t dev);
90 static int mge_resume(device_t dev);
91 
92 static int mge_miibus_readreg(device_t dev, int phy, int reg);
93 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
94 
95 static int mge_ifmedia_upd(struct ifnet *ifp);
96 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
97 
98 static void mge_init(void *arg);
99 static void mge_init_locked(void *arg);
100 static void mge_start(struct ifnet *ifp);
101 static void mge_start_locked(struct ifnet *ifp);
102 static void mge_watchdog(struct mge_softc *sc);
103 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
104 
105 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
106 static uint32_t mge_rx_ipg(uint32_t val, int ver);
107 static void mge_ver_params(struct mge_softc *sc);
108 
109 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
110 static void mge_intr_rx(void *arg);
111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112 static void mge_intr_tx(void *arg);
113 static void mge_intr_tx_locked(struct mge_softc *sc);
114 static void mge_intr_misc(void *arg);
115 static void mge_intr_sum(void *arg);
116 static void mge_intr_err(void *arg);
117 static void mge_stop(struct mge_softc *sc);
118 static void mge_tick(void *msc);
119 static uint32_t mge_set_port_serial_control(uint32_t media);
120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121 static void mge_set_mac_address(struct mge_softc *sc);
122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
123     uint8_t queue);
124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125 static int mge_allocate_dma(struct mge_softc *sc);
126 static int mge_alloc_desc_dma(struct mge_softc *sc,
127     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129     struct mbuf **mbufp, bus_addr_t *paddr);
130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131 static void mge_free_dma(struct mge_softc *sc);
132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135     uint32_t status, uint16_t bufsize);
136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
137     struct mge_desc_wrapper *dw);
138 static uint8_t mge_crc8(uint8_t *data, int size);
139 static void mge_setup_multicast(struct mge_softc *sc);
140 static void mge_set_rxic(struct mge_softc *sc);
141 static void mge_set_txic(struct mge_softc *sc);
142 static void mge_add_sysctls(struct mge_softc *sc);
143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
144 
145 static device_method_t mge_methods[] = {
146 	/* Device interface */
147 	DEVMETHOD(device_probe,		mge_probe),
148 	DEVMETHOD(device_attach,	mge_attach),
149 	DEVMETHOD(device_detach,	mge_detach),
150 	DEVMETHOD(device_shutdown,	mge_shutdown),
151 	DEVMETHOD(device_suspend,	mge_suspend),
152 	DEVMETHOD(device_resume,	mge_resume),
153 	/* MII interface */
154 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
155 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
156 	{ 0, 0 }
157 };
158 
159 static driver_t mge_driver = {
160 	"mge",
161 	mge_methods,
162 	sizeof(struct mge_softc),
163 };
164 
165 static devclass_t mge_devclass;
166 
167 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169 MODULE_DEPEND(mge, ether, 1, 1, 1);
170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
171 
172 static struct resource_spec res_spec[] = {
173 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
174 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
177 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
178 	{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
179 	{ -1, 0 }
180 };
181 
182 static struct {
183 	driver_intr_t *handler;
184 	char * description;
185 } mge_intrs[MGE_INTR_COUNT] = {
186 	{ mge_intr_rx,	"GbE receive interrupt" },
187 	{ mge_intr_tx,	"GbE transmit interrupt" },
188 	{ mge_intr_misc,"GbE misc interrupt" },
189 	{ mge_intr_sum,	"GbE summary interrupt" },
190 	{ mge_intr_err,	"GbE error interrupt" },
191 };
192 
193 static void
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
195 {
196 	uint32_t mac_l, mac_h;
197 	uint8_t lmac[6];
198 	int i, valid;
199 
200 	/*
201 	 * Retrieve hw address from the device tree.
202 	 */
203 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
204 	if (i == 6) {
205 		valid = 0;
206 		for (i = 0; i < 6; i++)
207 			if (lmac[i] != 0) {
208 				valid = 1;
209 				break;
210 			}
211 
212 		if (valid) {
213 			bcopy(lmac, addr, 6);
214 			return;
215 		}
216 	}
217 
218 	/*
219 	 * Fall back -- use the currently programmed address.
220 	 */
221 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
222 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
223 
224 	addr[0] = (mac_h & 0xff000000) >> 24;
225 	addr[1] = (mac_h & 0x00ff0000) >> 16;
226 	addr[2] = (mac_h & 0x0000ff00) >> 8;
227 	addr[3] = (mac_h & 0x000000ff);
228 	addr[4] = (mac_l & 0x0000ff00) >> 8;
229 	addr[5] = (mac_l & 0x000000ff);
230 }
231 
232 static uint32_t
233 mge_tfut_ipg(uint32_t val, int ver)
234 {
235 
236 	switch (ver) {
237 	case 1:
238 		return ((val & 0x3fff) << 4);
239 	case 2:
240 	default:
241 		return ((val & 0xffff) << 4);
242 	}
243 }
244 
245 static uint32_t
246 mge_rx_ipg(uint32_t val, int ver)
247 {
248 
249 	switch (ver) {
250 	case 1:
251 		return ((val & 0x3fff) << 8);
252 	case 2:
253 	default:
254 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
255 	}
256 }
257 
258 static void
259 mge_ver_params(struct mge_softc *sc)
260 {
261 	uint32_t d, r;
262 
263 	soc_id(&d, &r);
264 	if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
265 	    d == MV_DEV_MV78100_Z0) {
266 		sc->mge_ver = 2;
267 		sc->mge_mtu = 0x4e8;
268 		sc->mge_tfut_ipg_max = 0xFFFF;
269 		sc->mge_rx_ipg_max = 0xFFFF;
270 		sc->mge_tx_arb_cfg = 0xFC0000FF;
271 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
272 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
273 	} else {
274 		sc->mge_ver = 1;
275 		sc->mge_mtu = 0x458;
276 		sc->mge_tfut_ipg_max = 0x3FFF;
277 		sc->mge_rx_ipg_max = 0x3FFF;
278 		sc->mge_tx_arb_cfg = 0x000000FF;
279 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
280 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
281 	}
282 }
283 
284 static void
285 mge_set_mac_address(struct mge_softc *sc)
286 {
287 	char *if_mac;
288 	uint32_t mac_l, mac_h;
289 
290 	MGE_GLOBAL_LOCK_ASSERT(sc);
291 
292 	if_mac = (char *)IF_LLADDR(sc->ifp);
293 
294 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
295 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
296 	    (if_mac[2] << 8) | (if_mac[3] << 0);
297 
298 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
299 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
300 
301 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
302 }
303 
304 static void
305 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
306 {
307 	uint32_t reg_idx, reg_off, reg_val, i;
308 
309 	last_byte &= 0xf;
310 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
311 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
312 	reg_val = (1 | (queue << 1)) << reg_off;
313 
314 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
315 		if ( i == reg_idx)
316 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
317 		else
318 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
319 	}
320 }
321 
322 static void
323 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
324 {
325 	uint32_t port_config;
326 	uint32_t reg_val, i;
327 
328 	/* Enable or disable promiscuous mode as needed */
329 	if (sc->ifp->if_flags & IFF_PROMISC) {
330 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
331 		port_config |= PORT_CONFIG_UPM;
332 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
333 
334 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
335 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
336 
337 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
338 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
339 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
340 		}
341 
342 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
343 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
344 
345 	} else {
346 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
347 		port_config &= ~PORT_CONFIG_UPM;
348 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
349 
350 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
351 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
352 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
353 		}
354 
355 		mge_set_mac_address(sc);
356 	}
357 }
358 
359 static void
360 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
361 {
362 	u_int32_t *paddr;
363 
364 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
365 	paddr = arg;
366 
367 	*paddr = segs->ds_addr;
368 }
369 
370 static int
371 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
372     bus_addr_t *paddr)
373 {
374 	struct mbuf *new_mbuf;
375 	bus_dma_segment_t seg[1];
376 	int error;
377 	int nsegs;
378 
379 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
380 
381 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
382 	if (new_mbuf == NULL)
383 		return (ENOBUFS);
384 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
385 
386 	if (*mbufp) {
387 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
388 		bus_dmamap_unload(tag, map);
389 	}
390 
391 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
392 	    BUS_DMA_NOWAIT);
393 	KASSERT(nsegs == 1, ("Too many segments returned!"));
394 	if (nsegs != 1 || error)
395 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
396 
397 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
398 
399 	(*mbufp) = new_mbuf;
400 	(*paddr) = seg->ds_addr;
401 	return (0);
402 }
403 
404 static int
405 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
406     uint32_t size, bus_dma_tag_t *buffer_tag)
407 {
408 	struct mge_desc_wrapper *dw;
409 	bus_addr_t desc_paddr;
410 	int i, error;
411 
412 	desc_paddr = 0;
413 	for (i = size - 1; i >= 0; i--) {
414 		dw = &(tab[i]);
415 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
416 		    (void**)&(dw->mge_desc),
417 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
418 		    &(dw->desc_dmap));
419 
420 		if (error) {
421 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
422 			dw->mge_desc = NULL;
423 			return (ENXIO);
424 		}
425 
426 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
427 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
428 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
429 
430 		if (error) {
431 			if_printf(sc->ifp, "can't load descriptor\n");
432 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
433 			    dw->desc_dmap);
434 			dw->mge_desc = NULL;
435 			return (ENXIO);
436 		}
437 
438 		/* Chain descriptors */
439 		dw->mge_desc->next_desc = desc_paddr;
440 		desc_paddr = dw->mge_desc_paddr;
441 	}
442 	tab[size - 1].mge_desc->next_desc = desc_paddr;
443 
444 	/* Allocate a busdma tag for mbufs. */
445 	error = bus_dma_tag_create(NULL,	/* parent */
446 	    8, 0,				/* alignment, boundary */
447 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
448 	    BUS_SPACE_MAXADDR,			/* highaddr */
449 	    NULL, NULL,				/* filtfunc, filtfuncarg */
450 	    MCLBYTES, 1,			/* maxsize, nsegments */
451 	    MCLBYTES, 0,			/* maxsegsz, flags */
452 	    NULL, NULL,				/* lockfunc, lockfuncarg */
453 	    buffer_tag);			/* dmat */
454 	if (error) {
455 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
456 		return (ENXIO);
457 	}
458 
459 	/* Create TX busdma maps */
460 	for (i = 0; i < size; i++) {
461 		dw = &(tab[i]);
462 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
463 		if (error) {
464 			if_printf(sc->ifp, "failed to create map for mbuf\n");
465 			return (ENXIO);
466 		}
467 
468 		dw->buffer = (struct mbuf*)NULL;
469 		dw->mge_desc->buffer = (bus_addr_t)NULL;
470 	}
471 
472 	return (0);
473 }
474 
475 static int
476 mge_allocate_dma(struct mge_softc *sc)
477 {
478 	int error;
479 	struct mge_desc_wrapper *dw;
480 	int i;
481 
482 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
483 	error = bus_dma_tag_create(NULL,	/* parent */
484 	    16, 0,				/* alignment, boundary */
485 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
486 	    BUS_SPACE_MAXADDR,			/* highaddr */
487 	    NULL, NULL,				/* filtfunc, filtfuncarg */
488 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
489 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
490 	    NULL, NULL,				/* lockfunc, lockfuncarg */
491 	    &sc->mge_desc_dtag);		/* dmat */
492 
493 
494 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
495 	    &sc->mge_tx_dtag);
496 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
497 	    &sc->mge_rx_dtag);
498 
499 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
500 		dw = &(sc->mge_rx_desc[i]);
501 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
502 		    &dw->mge_desc->buffer);
503 	}
504 
505 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
506 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
507 
508 	return (0);
509 }
510 
511 static void
512 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
513     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
514 {
515 	struct mge_desc_wrapper *dw;
516 	int i;
517 
518 	for (i = 0; i < size; i++) {
519 		/* Free RX mbuf */
520 		dw = &(tab[i]);
521 
522 		if (dw->buffer_dmap) {
523 			if (free_mbufs) {
524 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
525 				    BUS_DMASYNC_POSTREAD);
526 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
527 			}
528 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
529 			if (free_mbufs)
530 				m_freem(dw->buffer);
531 		}
532 		/* Free RX descriptors */
533 		if (dw->desc_dmap) {
534 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
535 			    BUS_DMASYNC_POSTREAD);
536 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
537 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
538 			    dw->desc_dmap);
539 		}
540 	}
541 }
542 
543 static void
544 mge_free_dma(struct mge_softc *sc)
545 {
546 	/* Free desciptors and mbufs */
547 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
548 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
549 
550 	/* Destroy mbuf dma tag */
551 	bus_dma_tag_destroy(sc->mge_tx_dtag);
552 	bus_dma_tag_destroy(sc->mge_rx_dtag);
553 	/* Destroy descriptors tag */
554 	bus_dma_tag_destroy(sc->mge_desc_dtag);
555 }
556 
557 static void
558 mge_reinit_rx(struct mge_softc *sc)
559 {
560 	struct mge_desc_wrapper *dw;
561 	int i;
562 
563 	MGE_RECEIVE_LOCK_ASSERT(sc);
564 
565 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
566 
567 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
568 	    &sc->mge_rx_dtag);
569 
570 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
571 		dw = &(sc->mge_rx_desc[i]);
572 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
573 		&dw->mge_desc->buffer);
574 	}
575 
576 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
577 	sc->rx_desc_curr = 0;
578 
579 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
580 	    sc->rx_desc_start);
581 
582 	/* Enable RX queue */
583 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
584 }
585 
586 #ifdef DEVICE_POLLING
587 static poll_handler_t mge_poll;
588 
589 static int
590 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
591 {
592 	struct mge_softc *sc = ifp->if_softc;
593 	uint32_t int_cause, int_cause_ext;
594 	int rx_npkts = 0;
595 
596 	MGE_GLOBAL_LOCK(sc);
597 
598 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
599 		MGE_GLOBAL_UNLOCK(sc);
600 		return (rx_npkts);
601 	}
602 
603 	if (cmd == POLL_AND_CHECK_STATUS) {
604 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
605 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
606 
607 		/* Check for resource error */
608 		if (int_cause & MGE_PORT_INT_RXERRQ0)
609 			mge_reinit_rx(sc);
610 
611 		if (int_cause || int_cause_ext) {
612 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
613 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
614 		}
615 	}
616 
617 	mge_intr_tx_locked(sc);
618 	rx_npkts = mge_intr_rx_locked(sc, count);
619 
620 	MGE_GLOBAL_UNLOCK(sc);
621 	return (rx_npkts);
622 }
623 #endif /* DEVICE_POLLING */
624 
625 static int
626 mge_attach(device_t dev)
627 {
628 	struct mge_softc *sc;
629 	struct mii_softc *miisc;
630 	struct ifnet *ifp;
631 	uint8_t hwaddr[ETHER_ADDR_LEN];
632 	int i, error ;
633 
634 	sc = device_get_softc(dev);
635 	sc->dev = dev;
636 	sc->node = ofw_bus_get_node(dev);
637 
638 	if (device_get_unit(dev) == 0)
639 		sc_mge0 = sc;
640 
641 	/* Set chip version-dependent parameters */
642 	mge_ver_params(sc);
643 
644 	/* Get phy address from fdt */
645 	if (fdt_get_phyaddr(sc->node, &sc->phyaddr) != 0)
646 		return (ENXIO);
647 
648 	/* Initialize mutexes */
649 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
650 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
651 
652 	/* Allocate IO and IRQ resources */
653 	error = bus_alloc_resources(dev, res_spec, sc->res);
654 	if (error) {
655 		device_printf(dev, "could not allocate resources\n");
656 		mge_detach(dev);
657 		return (ENXIO);
658 	}
659 
660 	/* Allocate DMA, buffers, buffer descriptors */
661 	error = mge_allocate_dma(sc);
662 	if (error) {
663 		mge_detach(dev);
664 		return (ENXIO);
665 	}
666 
667 	sc->tx_desc_curr = 0;
668 	sc->rx_desc_curr = 0;
669 	sc->tx_desc_used_idx = 0;
670 	sc->tx_desc_used_count = 0;
671 
672 	/* Configure defaults for interrupts coalescing */
673 	sc->rx_ic_time = 768;
674 	sc->tx_ic_time = 768;
675 	mge_add_sysctls(sc);
676 
677 	/* Allocate network interface */
678 	ifp = sc->ifp = if_alloc(IFT_ETHER);
679 	if (ifp == NULL) {
680 		device_printf(dev, "if_alloc() failed\n");
681 		mge_detach(dev);
682 		return (ENOMEM);
683 	}
684 
685 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
686 	ifp->if_softc = sc;
687 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
688 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
689 	ifp->if_capenable = ifp->if_capabilities;
690 	ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
691 
692 #ifdef DEVICE_POLLING
693 	/* Advertise that polling is supported */
694 	ifp->if_capabilities |= IFCAP_POLLING;
695 #endif
696 
697 	ifp->if_init = mge_init;
698 	ifp->if_start = mge_start;
699 	ifp->if_ioctl = mge_ioctl;
700 
701 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
702 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
703 	IFQ_SET_READY(&ifp->if_snd);
704 
705 	mge_get_mac_address(sc, hwaddr);
706 	ether_ifattach(ifp, hwaddr);
707 	callout_init(&sc->wd_callout, 0);
708 
709 	/* Probe PHY(s) */
710 	error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
711 	if (error) {
712 		device_printf(dev, "MII failed to find PHY\n");
713 		mge_detach(dev);
714 		return (error);
715 	}
716 	sc->mii = device_get_softc(sc->miibus);
717 
718 	/* Tell the MAC where to find the PHY so autoneg works */
719 	miisc = LIST_FIRST(&sc->mii->mii_phys);
720 	MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
721 
722 	/* Attach interrupt handlers */
723 	for (i = 0; i < 2; ++i) {
724 		error = bus_setup_intr(dev, sc->res[1 + i],
725 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
726 		    sc, &sc->ih_cookie[i]);
727 		if (error) {
728 			device_printf(dev, "could not setup %s\n",
729 			    mge_intrs[i].description);
730 			mge_detach(dev);
731 			return (error);
732 		}
733 	}
734 
735 	return (0);
736 }
737 
738 static int
739 mge_detach(device_t dev)
740 {
741 	struct mge_softc *sc;
742 	int error,i;
743 
744 	sc = device_get_softc(dev);
745 
746 	/* Stop controller and free TX queue */
747 	if (sc->ifp)
748 		mge_shutdown(dev);
749 
750 	/* Wait for stopping ticks */
751         callout_drain(&sc->wd_callout);
752 
753 	/* Stop and release all interrupts */
754 	for (i = 0; i < 2; ++i) {
755 		if (!sc->ih_cookie[i])
756 			continue;
757 
758 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
759 		if (error)
760 			device_printf(dev, "could not release %s\n",
761 			    mge_intrs[i].description);
762 	}
763 
764 	/* Detach network interface */
765 	if (sc->ifp) {
766 		ether_ifdetach(sc->ifp);
767 		if_free(sc->ifp);
768 	}
769 
770 	/* Free DMA resources */
771 	mge_free_dma(sc);
772 
773 	/* Free IO memory handler */
774 	bus_release_resources(dev, res_spec, sc->res);
775 
776 	/* Destroy mutexes */
777 	mtx_destroy(&sc->receive_lock);
778 	mtx_destroy(&sc->transmit_lock);
779 
780 	return (0);
781 }
782 
783 static void
784 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
785 {
786 	struct mge_softc *sc = ifp->if_softc;
787 	struct mii_data *mii;
788 
789 	MGE_TRANSMIT_LOCK(sc);
790 
791 	mii = sc->mii;
792 	mii_pollstat(mii);
793 
794 	ifmr->ifm_active = mii->mii_media_active;
795 	ifmr->ifm_status = mii->mii_media_status;
796 
797 	MGE_TRANSMIT_UNLOCK(sc);
798 }
799 
800 static uint32_t
801 mge_set_port_serial_control(uint32_t media)
802 {
803 	uint32_t port_config;
804 
805 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
806 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
807 
808 	if (IFM_TYPE(media) == IFM_ETHER) {
809 		switch(IFM_SUBTYPE(media)) {
810 			case IFM_AUTO:
811 				break;
812 			case IFM_1000_T:
813 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
814 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
815 				    PORT_SERIAL_SPEED_AUTONEG);
816 				break;
817 			case IFM_100_TX:
818 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
819 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
820 				    PORT_SERIAL_SPEED_AUTONEG);
821 				break;
822 			case IFM_10_T:
823 				port_config  |= (PORT_SERIAL_AUTONEG |
824 				    PORT_SERIAL_AUTONEG_FC |
825 				    PORT_SERIAL_SPEED_AUTONEG);
826 				break;
827 		}
828 		if (media & IFM_FDX)
829 			port_config |= PORT_SERIAL_FULL_DUPLEX;
830 	}
831 	return (port_config);
832 }
833 
834 static int
835 mge_ifmedia_upd(struct ifnet *ifp)
836 {
837 	struct mge_softc *sc = ifp->if_softc;
838 
839 	if (ifp->if_flags & IFF_UP) {
840 		MGE_GLOBAL_LOCK(sc);
841 
842 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
843 		mii_mediachg(sc->mii);
844 		mge_init_locked(sc);
845 
846 		MGE_GLOBAL_UNLOCK(sc);
847 	}
848 
849 	return (0);
850 }
851 
852 static void
853 mge_init(void *arg)
854 {
855 	struct mge_softc *sc = arg;
856 
857 	MGE_GLOBAL_LOCK(sc);
858 
859 	mge_init_locked(arg);
860 
861 	MGE_GLOBAL_UNLOCK(sc);
862 }
863 
864 static void
865 mge_init_locked(void *arg)
866 {
867 	struct mge_softc *sc = arg;
868 	struct mge_desc_wrapper *dw;
869 	volatile uint32_t reg_val;
870 	int i, count;
871 
872 
873 	MGE_GLOBAL_LOCK_ASSERT(sc);
874 
875 	/* Stop interface */
876 	mge_stop(sc);
877 
878 	/* Disable interrupts */
879 	mge_intrs_ctrl(sc, 0);
880 
881 	/* Set MAC address */
882 	mge_set_mac_address(sc);
883 
884 	/* Setup multicast filters */
885 	mge_setup_multicast(sc);
886 
887 	if (sc->mge_ver == 2) {
888 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
889 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
890 	}
891 
892 	/* Initialize TX queue configuration registers */
893 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
894 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
895 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
896 
897 	/* Clear TX queue configuration registers for unused queues */
898 	for (i = 1; i < 7; i++) {
899 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
900 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
901 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
902 	}
903 
904 	/* Set default MTU */
905 	MGE_WRITE(sc, sc->mge_mtu, 0);
906 
907 	/* Port configuration */
908 	MGE_WRITE(sc, MGE_PORT_CONFIG,
909 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
910 	    PORT_CONFIG_ARO_RXQ(0));
911 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
912 
913 	/* Setup port configuration */
914 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
915 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
916 
917 	/* Setup SDMA configuration */
918 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
919 	    MGE_SDMA_TX_BYTE_SWAP |
920 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
921 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
922 
923 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
924 
925 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
926 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
927 	    sc->rx_desc_start);
928 
929 	/* Reset descriptor indexes */
930 	sc->tx_desc_curr = 0;
931 	sc->rx_desc_curr = 0;
932 	sc->tx_desc_used_idx = 0;
933 	sc->tx_desc_used_count = 0;
934 
935 	/* Enable RX descriptors */
936 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
937 		dw = &sc->mge_rx_desc[i];
938 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
939 		dw->mge_desc->buff_size = MCLBYTES;
940 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
941 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
942 	}
943 
944 	/* Enable RX queue */
945 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
946 
947 	/* Enable port */
948 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
949 	reg_val |= PORT_SERIAL_ENABLE;
950 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
951 	count = 0x100000;
952 	for (;;) {
953 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
954 		if (reg_val & MGE_STATUS_LINKUP)
955 			break;
956 		DELAY(100);
957 		if (--count == 0) {
958 			if_printf(sc->ifp, "Timeout on link-up\n");
959 			break;
960 		}
961 	}
962 
963 	/* Setup interrupts coalescing */
964 	mge_set_rxic(sc);
965 	mge_set_txic(sc);
966 
967 	/* Enable interrupts */
968 #ifdef DEVICE_POLLING
969         /*
970 	 * * ...only if polling is not turned on. Disable interrupts explicitly
971 	 * if polling is enabled.
972 	 */
973 	if (sc->ifp->if_capenable & IFCAP_POLLING)
974 		mge_intrs_ctrl(sc, 0);
975 	else
976 #endif /* DEVICE_POLLING */
977 	mge_intrs_ctrl(sc, 1);
978 
979 	/* Activate network interface */
980 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
981 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
982 	sc->wd_timer = 0;
983 
984 	/* Schedule watchdog timeout */
985 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
986 }
987 
988 static void
989 mge_intr_err(void *arg)
990 {
991 	struct mge_softc *sc = arg;
992 	struct ifnet *ifp;
993 
994 	ifp = sc->ifp;
995 	if_printf(ifp, "%s\n", __FUNCTION__);
996 }
997 
998 static void
999 mge_intr_misc(void *arg)
1000 {
1001 	struct mge_softc *sc = arg;
1002 	struct ifnet *ifp;
1003 
1004 	ifp = sc->ifp;
1005 	if_printf(ifp, "%s\n", __FUNCTION__);
1006 }
1007 
1008 static void
1009 mge_intr_rx(void *arg) {
1010 	struct mge_softc *sc = arg;
1011 	uint32_t int_cause, int_cause_ext;
1012 
1013 	MGE_RECEIVE_LOCK(sc);
1014 
1015 #ifdef DEVICE_POLLING
1016 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1017 		MGE_RECEIVE_UNLOCK(sc);
1018 		return;
1019 	}
1020 #endif
1021 
1022 	/* Get interrupt cause */
1023 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1024 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1025 
1026 	/* Check for resource error */
1027 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1028 		mge_reinit_rx(sc);
1029 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1030 		    int_cause & ~MGE_PORT_INT_RXERRQ0);
1031 	}
1032 
1033 	int_cause &= MGE_PORT_INT_RXQ0;
1034 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1035 
1036 	if (int_cause || int_cause_ext) {
1037 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1038 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1039 		mge_intr_rx_locked(sc, -1);
1040 	}
1041 
1042 	MGE_RECEIVE_UNLOCK(sc);
1043 }
1044 
1045 
1046 static int
1047 mge_intr_rx_locked(struct mge_softc *sc, int count)
1048 {
1049 	struct ifnet *ifp = sc->ifp;
1050 	uint32_t status;
1051 	uint16_t bufsize;
1052 	struct mge_desc_wrapper* dw;
1053 	struct mbuf *mb;
1054 	int rx_npkts = 0;
1055 
1056 	MGE_RECEIVE_LOCK_ASSERT(sc);
1057 
1058 	while (count != 0) {
1059 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1060 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1061 		    BUS_DMASYNC_POSTREAD);
1062 
1063 		/* Get status */
1064 		status = dw->mge_desc->cmd_status;
1065 		bufsize = dw->mge_desc->buff_size;
1066 		if ((status & MGE_DMA_OWNED) != 0)
1067 			break;
1068 
1069 		if (dw->mge_desc->byte_count &&
1070 		    ~(status & MGE_ERR_SUMMARY)) {
1071 
1072 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1073 			    BUS_DMASYNC_POSTREAD);
1074 
1075 			mb = m_devget(dw->buffer->m_data,
1076 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1077 			    0, ifp, NULL);
1078 
1079 			if (mb == NULL)
1080 				/* Give up if no mbufs */
1081 				break;
1082 
1083 			mb->m_len -= 2;
1084 			mb->m_pkthdr.len -= 2;
1085 			mb->m_data += 2;
1086 
1087 			mge_offload_process_frame(ifp, mb, status,
1088 			    bufsize);
1089 
1090 			MGE_RECEIVE_UNLOCK(sc);
1091 			(*ifp->if_input)(ifp, mb);
1092 			MGE_RECEIVE_LOCK(sc);
1093 			rx_npkts++;
1094 		}
1095 
1096 		dw->mge_desc->byte_count = 0;
1097 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1098 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1099 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1100 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1101 
1102 		if (count > 0)
1103 			count -= 1;
1104 	}
1105 
1106 	return (rx_npkts);
1107 }
1108 
1109 static void
1110 mge_intr_sum(void *arg)
1111 {
1112 	struct mge_softc *sc = arg;
1113 	struct ifnet *ifp;
1114 
1115 	ifp = sc->ifp;
1116 	if_printf(ifp, "%s\n", __FUNCTION__);
1117 }
1118 
1119 static void
1120 mge_intr_tx(void *arg)
1121 {
1122 	struct mge_softc *sc = arg;
1123 	uint32_t int_cause_ext;
1124 
1125 	MGE_TRANSMIT_LOCK(sc);
1126 
1127 #ifdef DEVICE_POLLING
1128 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1129 		MGE_TRANSMIT_UNLOCK(sc);
1130 		return;
1131 	}
1132 #endif
1133 
1134 	/* Ack the interrupt */
1135 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1136 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1137 	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1138 
1139 	mge_intr_tx_locked(sc);
1140 
1141 	MGE_TRANSMIT_UNLOCK(sc);
1142 }
1143 
1144 
1145 static void
1146 mge_intr_tx_locked(struct mge_softc *sc)
1147 {
1148 	struct ifnet *ifp = sc->ifp;
1149 	struct mge_desc_wrapper *dw;
1150 	struct mge_desc *desc;
1151 	uint32_t status;
1152 	int send = 0;
1153 
1154 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1155 
1156 	/* Disable watchdog */
1157 	sc->wd_timer = 0;
1158 
1159 	while (sc->tx_desc_used_count) {
1160 		/* Get the descriptor */
1161 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1162 		desc = dw->mge_desc;
1163 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1164 		    BUS_DMASYNC_POSTREAD);
1165 
1166 		/* Get descriptor status */
1167 		status = desc->cmd_status;
1168 
1169 		if (status & MGE_DMA_OWNED)
1170 			break;
1171 
1172 		sc->tx_desc_used_idx =
1173 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1174 		sc->tx_desc_used_count--;
1175 
1176 		/* Update collision statistics */
1177 		if (status & MGE_ERR_SUMMARY) {
1178 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1179 				ifp->if_collisions++;
1180 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1181 				ifp->if_collisions += 16;
1182 		}
1183 
1184 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1185 		    BUS_DMASYNC_POSTWRITE);
1186 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1187 		m_freem(dw->buffer);
1188 		dw->buffer = (struct mbuf*)NULL;
1189 		send++;
1190 
1191 		ifp->if_opackets++;
1192 	}
1193 
1194 	if (send) {
1195 		/* Now send anything that was pending */
1196 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1197 		mge_start_locked(ifp);
1198 	}
1199 }
1200 
1201 static int
1202 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1203 {
1204 	struct mge_softc *sc = ifp->if_softc;
1205 	struct ifreq *ifr = (struct ifreq *)data;
1206 	int mask, error;
1207 	uint32_t flags;
1208 
1209 	error = 0;
1210 
1211 	switch (command) {
1212 	case SIOCSIFFLAGS:
1213 		MGE_GLOBAL_LOCK(sc);
1214 
1215 		if (ifp->if_flags & IFF_UP) {
1216 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1217 				flags = ifp->if_flags ^ sc->mge_if_flags;
1218 				if (flags & IFF_PROMISC)
1219 					mge_set_prom_mode(sc,
1220 					    MGE_RX_DEFAULT_QUEUE);
1221 
1222 				if (flags & IFF_ALLMULTI)
1223 					mge_setup_multicast(sc);
1224 			} else
1225 				mge_init_locked(sc);
1226 		}
1227 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1228 			mge_stop(sc);
1229 
1230 		sc->mge_if_flags = ifp->if_flags;
1231 		MGE_GLOBAL_UNLOCK(sc);
1232 		break;
1233 	case SIOCADDMULTI:
1234 	case SIOCDELMULTI:
1235 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1236 			MGE_GLOBAL_LOCK(sc);
1237 			mge_setup_multicast(sc);
1238 			MGE_GLOBAL_UNLOCK(sc);
1239 		}
1240 		break;
1241 	case SIOCSIFCAP:
1242 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1243 		if (mask & IFCAP_HWCSUM) {
1244 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1245 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1246 			if (ifp->if_capenable & IFCAP_TXCSUM)
1247 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1248 			else
1249 				ifp->if_hwassist = 0;
1250 		}
1251 #ifdef DEVICE_POLLING
1252 		if (mask & IFCAP_POLLING) {
1253 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1254 				error = ether_poll_register(mge_poll, ifp);
1255 				if (error)
1256 					return(error);
1257 
1258 				MGE_GLOBAL_LOCK(sc);
1259 				mge_intrs_ctrl(sc, 0);
1260 				ifp->if_capenable |= IFCAP_POLLING;
1261 				MGE_GLOBAL_UNLOCK(sc);
1262 			} else {
1263 				error = ether_poll_deregister(ifp);
1264 				MGE_GLOBAL_LOCK(sc);
1265 				mge_intrs_ctrl(sc, 1);
1266 				ifp->if_capenable &= ~IFCAP_POLLING;
1267 				MGE_GLOBAL_UNLOCK(sc);
1268 			}
1269 		}
1270 #endif
1271 		break;
1272 	case SIOCGIFMEDIA: /* fall through */
1273 	case SIOCSIFMEDIA:
1274 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1275 		    && !(ifr->ifr_media & IFM_FDX)) {
1276 			device_printf(sc->dev,
1277 			    "1000baseTX half-duplex unsupported\n");
1278 			return 0;
1279 		}
1280 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1281 		break;
1282 	default:
1283 		error = ether_ioctl(ifp, command, data);
1284 	}
1285 	return (error);
1286 }
1287 
1288 static int
1289 mge_miibus_readreg(device_t dev, int phy, int reg)
1290 {
1291 	struct mge_softc *sc;
1292 	uint32_t retries;
1293 
1294 	sc = device_get_softc(dev);
1295 
1296 	if (sc->phyaddr != phy)
1297 		return (0);
1298 
1299 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1300 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1301 
1302 	retries = MGE_SMI_READ_RETRIES;
1303 	while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1304 		DELAY(MGE_SMI_READ_DELAY);
1305 
1306 	if (retries == 0)
1307 		device_printf(dev, "Timeout while reading from PHY\n");
1308 
1309 	return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1310 }
1311 
1312 static int
1313 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1314 {
1315 	struct mge_softc *sc;
1316 	uint32_t retries;
1317 
1318 	sc = device_get_softc(dev);
1319 
1320 	if (sc->phyaddr != phy)
1321 		return (0);
1322 
1323 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1324 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1325 
1326 	retries = MGE_SMI_WRITE_RETRIES;
1327 	while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1328 		DELAY(MGE_SMI_WRITE_DELAY);
1329 
1330 	if (retries == 0)
1331 		device_printf(dev, "Timeout while writing to PHY\n");
1332 	return (0);
1333 }
1334 
1335 static int
1336 mge_probe(device_t dev)
1337 {
1338 
1339 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1340 		return (ENXIO);
1341 
1342 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1343 	return (BUS_PROBE_DEFAULT);
1344 }
1345 
1346 static int
1347 mge_resume(device_t dev)
1348 {
1349 
1350 	device_printf(dev, "%s\n", __FUNCTION__);
1351 	return (0);
1352 }
1353 
1354 static int
1355 mge_shutdown(device_t dev)
1356 {
1357 	struct mge_softc *sc = device_get_softc(dev);
1358 
1359 	MGE_GLOBAL_LOCK(sc);
1360 
1361 #ifdef DEVICE_POLLING
1362         if (sc->ifp->if_capenable & IFCAP_POLLING)
1363 		ether_poll_deregister(sc->ifp);
1364 #endif
1365 
1366 	mge_stop(sc);
1367 
1368 	MGE_GLOBAL_UNLOCK(sc);
1369 
1370 	return (0);
1371 }
1372 
1373 static int
1374 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1375 {
1376 	struct mge_desc_wrapper *dw = NULL;
1377 	struct ifnet *ifp;
1378 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1379 	bus_dmamap_t mapp;
1380 	int error;
1381 	int seg, nsegs;
1382 	int desc_no;
1383 
1384 	ifp = sc->ifp;
1385 
1386 	/* Check for free descriptors */
1387 	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1388 		/* No free descriptors */
1389 		return (-1);
1390 	}
1391 
1392 	/* Fetch unused map */
1393 	desc_no = sc->tx_desc_curr;
1394 	dw = &sc->mge_tx_desc[desc_no];
1395 	mapp = dw->buffer_dmap;
1396 
1397 	/* Create mapping in DMA memory */
1398 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1399 	    BUS_DMA_NOWAIT);
1400 	if (error != 0 || nsegs != 1 ) {
1401 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1402 		return ((error != 0) ? error : -1);
1403 	}
1404 
1405 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1406 
1407 	/* Everything is ok, now we can send buffers */
1408 	for (seg = 0; seg < nsegs; seg++) {
1409 		dw->mge_desc->byte_count = segs[seg].ds_len;
1410 		dw->mge_desc->buffer = segs[seg].ds_addr;
1411 		dw->buffer = m0;
1412 		dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1413 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1414 		    MGE_DMA_OWNED;
1415 
1416 		if (seg == 0)
1417 			mge_offload_setup_descriptor(sc, dw);
1418 	}
1419 
1420 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1421 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1422 
1423 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1424 	sc->tx_desc_used_count++;
1425 	return (0);
1426 }
1427 
1428 static void
1429 mge_tick(void *msc)
1430 {
1431 	struct mge_softc *sc = msc;
1432 
1433 	/* Check for TX timeout */
1434 	mge_watchdog(sc);
1435 
1436 	mii_tick(sc->mii);
1437 
1438 	/* Check for media type change */
1439 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1440 		mge_ifmedia_upd(sc->ifp);
1441 
1442 	/* Schedule another timeout one second from now */
1443 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1444 }
1445 
1446 static void
1447 mge_watchdog(struct mge_softc *sc)
1448 {
1449 	struct ifnet *ifp;
1450 
1451 	ifp = sc->ifp;
1452 
1453 	MGE_GLOBAL_LOCK(sc);
1454 
1455 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1456 		MGE_GLOBAL_UNLOCK(sc);
1457 		return;
1458 	}
1459 
1460 	ifp->if_oerrors++;
1461 	if_printf(ifp, "watchdog timeout\n");
1462 
1463 	mge_stop(sc);
1464 	mge_init_locked(sc);
1465 
1466 	MGE_GLOBAL_UNLOCK(sc);
1467 }
1468 
1469 static void
1470 mge_start(struct ifnet *ifp)
1471 {
1472 	struct mge_softc *sc = ifp->if_softc;
1473 
1474 	MGE_TRANSMIT_LOCK(sc);
1475 
1476 	mge_start_locked(ifp);
1477 
1478 	MGE_TRANSMIT_UNLOCK(sc);
1479 }
1480 
1481 static void
1482 mge_start_locked(struct ifnet *ifp)
1483 {
1484 	struct mge_softc *sc;
1485 	struct mbuf *m0, *mtmp;
1486 	uint32_t reg_val, queued = 0;
1487 
1488 	sc = ifp->if_softc;
1489 
1490 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1491 
1492 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1493 	    IFF_DRV_RUNNING)
1494 		return;
1495 
1496 	for (;;) {
1497 		/* Get packet from the queue */
1498 		IF_DEQUEUE(&ifp->if_snd, m0);
1499 		if (m0 == NULL)
1500 			break;
1501 
1502 		mtmp = m_defrag(m0, M_DONTWAIT);
1503 		if (mtmp)
1504 			m0 = mtmp;
1505 
1506 		if (mge_encap(sc, m0)) {
1507 			IF_PREPEND(&ifp->if_snd, m0);
1508 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1509 			break;
1510 		}
1511 		queued++;
1512 		BPF_MTAP(ifp, m0);
1513 	}
1514 
1515 	if (queued) {
1516 		/* Enable transmitter and watchdog timer */
1517 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1518 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1519 		sc->wd_timer = 5;
1520 	}
1521 }
1522 
1523 static void
1524 mge_stop(struct mge_softc *sc)
1525 {
1526 	struct ifnet *ifp;
1527 	volatile uint32_t reg_val, status;
1528 	struct mge_desc_wrapper *dw;
1529 	struct mge_desc *desc;
1530 	int count;
1531 
1532 	ifp = sc->ifp;
1533 
1534 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1535 		return;
1536 
1537 	/* Stop tick engine */
1538 	callout_stop(&sc->wd_callout);
1539 
1540 	/* Disable interface */
1541 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1542 	sc->wd_timer = 0;
1543 
1544 	/* Disable interrupts */
1545 	mge_intrs_ctrl(sc, 0);
1546 
1547 	/* Disable Rx and Tx */
1548 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1549 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1550 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1551 
1552 	/* Remove pending data from TX queue */
1553 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1554 	    sc->tx_desc_used_count) {
1555 		/* Get the descriptor */
1556 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1557 		desc = dw->mge_desc;
1558 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1559 		    BUS_DMASYNC_POSTREAD);
1560 
1561 		/* Get descriptor status */
1562 		status = desc->cmd_status;
1563 
1564 		if (status & MGE_DMA_OWNED)
1565 			break;
1566 
1567 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1568 		    MGE_TX_DESC_NUM;
1569 		sc->tx_desc_used_count--;
1570 
1571 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1572 		    BUS_DMASYNC_POSTWRITE);
1573 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1574 
1575 		m_freem(dw->buffer);
1576 		dw->buffer = (struct mbuf*)NULL;
1577 	}
1578 
1579 	/* Wait for end of transmission */
1580 	count = 0x100000;
1581 	while (count--) {
1582 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1583 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1584 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1585 			break;
1586 		DELAY(100);
1587 	}
1588 
1589 	if(!count)
1590 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1591 		    __FUNCTION__);
1592 
1593 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1594 	reg_val &= ~(PORT_SERIAL_ENABLE);
1595 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1596 }
1597 
1598 static int
1599 mge_suspend(device_t dev)
1600 {
1601 
1602 	device_printf(dev, "%s\n", __FUNCTION__);
1603 	return (0);
1604 }
1605 
1606 static void
1607 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1608     uint32_t status, uint16_t bufsize)
1609 {
1610 	int csum_flags = 0;
1611 
1612 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1613 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1614 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1615 
1616 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1617 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1618 		    (status & MGE_RX_L4_CSUM_OK)) {
1619 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1620 			frame->m_pkthdr.csum_data = 0xFFFF;
1621 		}
1622 
1623 		frame->m_pkthdr.csum_flags = csum_flags;
1624 	}
1625 }
1626 
1627 static void
1628 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1629 {
1630 	struct mbuf *m0 = dw->buffer;
1631 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1632 	int csum_flags = m0->m_pkthdr.csum_flags;
1633 	int cmd_status = 0;
1634 	struct ip *ip;
1635 	int ehlen, etype;
1636 
1637 	if (csum_flags) {
1638 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1639 			etype = ntohs(eh->evl_proto);
1640 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1641 			csum_flags |= MGE_TX_VLAN_TAGGED;
1642 		} else {
1643 			etype = ntohs(eh->evl_encap_proto);
1644 			ehlen = ETHER_HDR_LEN;
1645 		}
1646 
1647 		if (etype != ETHERTYPE_IP) {
1648 			if_printf(sc->ifp,
1649 			    "TCP/IP Offload enabled for unsupported "
1650 			    "protocol!\n");
1651 			return;
1652 		}
1653 
1654 		ip = (struct ip *)(m0->m_data + ehlen);
1655 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1656 
1657 		if ((m0->m_flags & M_FRAG) == 0)
1658 			cmd_status |= MGE_TX_NOT_FRAGMENT;
1659 	}
1660 
1661 	if (csum_flags & CSUM_IP)
1662 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1663 
1664 	if (csum_flags & CSUM_TCP)
1665 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1666 
1667 	if (csum_flags & CSUM_UDP)
1668 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1669 
1670 	dw->mge_desc->cmd_status |= cmd_status;
1671 }
1672 
1673 static void
1674 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1675 {
1676 
1677 	if (enable) {
1678 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1679 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1680 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1681 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1682 		    MGE_PORT_INT_EXT_TXBUF0);
1683 	} else {
1684 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1685 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1686 
1687 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1688 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1689 
1690 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1691 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1692 	}
1693 }
1694 
1695 static uint8_t
1696 mge_crc8(uint8_t *data, int size)
1697 {
1698 	uint8_t crc = 0;
1699 	static const uint8_t ct[256] = {
1700 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1701 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1702 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1703 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1704 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1705 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1706 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1707 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1708 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1709 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1710 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1711 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1712 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1713 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1714 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1715 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1716 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1717 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1718 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1719 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1720 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1721 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1722 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1723 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1724 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1725 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1726 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1727 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1728 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1729 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1730 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1731 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1732 	};
1733 
1734 	while(size--)
1735 		crc = ct[crc ^ *(data++)];
1736 
1737 	return(crc);
1738 }
1739 
1740 static void
1741 mge_setup_multicast(struct mge_softc *sc)
1742 {
1743 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1744 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1745 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1746 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1747 	struct ifnet *ifp = sc->ifp;
1748 	struct ifmultiaddr *ifma;
1749 	uint8_t *mac;
1750 	int i;
1751 
1752 	if (ifp->if_flags & IFF_ALLMULTI) {
1753 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1754 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1755 	} else {
1756 		memset(smt, 0, sizeof(smt));
1757 		memset(omt, 0, sizeof(omt));
1758 
1759 		if_maddr_rlock(ifp);
1760 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1761 			if (ifma->ifma_addr->sa_family != AF_LINK)
1762 				continue;
1763 
1764 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1765 			if (memcmp(mac, special, sizeof(special)) == 0) {
1766 				i = mac[5];
1767 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1768 			} else {
1769 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1770 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1771 			}
1772 		}
1773 		if_maddr_runlock(ifp);
1774 	}
1775 
1776 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1777 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1778 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1779 	}
1780 }
1781 
1782 static void
1783 mge_set_rxic(struct mge_softc *sc)
1784 {
1785 	uint32_t reg;
1786 
1787 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1788 		sc->rx_ic_time = sc->mge_rx_ipg_max;
1789 
1790 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1791 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1792 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1793 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1794 }
1795 
1796 static void
1797 mge_set_txic(struct mge_softc *sc)
1798 {
1799 	uint32_t reg;
1800 
1801 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1802 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1803 
1804 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1805 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1806 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1807 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1808 }
1809 
1810 static int
1811 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1812 {
1813 	struct mge_softc *sc = (struct mge_softc *)arg1;
1814 	uint32_t time;
1815 	int error;
1816 
1817 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1818 	error = sysctl_handle_int(oidp, &time, 0, req);
1819 	if (error != 0)
1820 		return(error);
1821 
1822 	MGE_GLOBAL_LOCK(sc);
1823 	if (arg2 == MGE_IC_RX) {
1824 		sc->rx_ic_time = time;
1825 		mge_set_rxic(sc);
1826 	} else {
1827 		sc->tx_ic_time = time;
1828 		mge_set_txic(sc);
1829 	}
1830 	MGE_GLOBAL_UNLOCK(sc);
1831 
1832 	return(0);
1833 }
1834 
1835 static void
1836 mge_add_sysctls(struct mge_softc *sc)
1837 {
1838 	struct sysctl_ctx_list *ctx;
1839 	struct sysctl_oid_list *children;
1840 	struct sysctl_oid *tree;
1841 
1842 	ctx = device_get_sysctl_ctx(sc->dev);
1843 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1844 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1845 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1846 	children = SYSCTL_CHILDREN(tree);
1847 
1848 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1849 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1850 	    "I", "IC RX time threshold");
1851 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1852 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1853 	    "I", "IC TX time threshold");
1854 }
1855