xref: /freebsd/sys/dev/mge/if_mge.c (revision 941e286383714ef25f1ffe9ba6ae5040afdd7060)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 
63 #include <sys/sockio.h>
64 #include <sys/bus.h>
65 #include <machine/bus.h>
66 #include <sys/rman.h>
67 #include <machine/resource.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #ifndef MII_ADDR_BASE
73 #define MII_ADDR_BASE 8
74 #endif
75 
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
79 
80 #include "miibus_if.h"
81 
82 /* PHY registers are in the address space of the first mge unit */
83 static struct mge_softc *sc_mge0 = NULL;
84 
85 static int mge_probe(device_t dev);
86 static int mge_attach(device_t dev);
87 static int mge_detach(device_t dev);
88 static int mge_shutdown(device_t dev);
89 static int mge_suspend(device_t dev);
90 static int mge_resume(device_t dev);
91 
92 static int mge_miibus_readreg(device_t dev, int phy, int reg);
93 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
94 
95 static int mge_ifmedia_upd(struct ifnet *ifp);
96 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
97 
98 static void mge_init(void *arg);
99 static void mge_init_locked(void *arg);
100 static void mge_start(struct ifnet *ifp);
101 static void mge_start_locked(struct ifnet *ifp);
102 static void mge_watchdog(struct mge_softc *sc);
103 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
104 
105 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
106 static uint32_t mge_rx_ipg(uint32_t val, int ver);
107 static void mge_ver_params(struct mge_softc *sc);
108 
109 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
110 static void mge_intr_rx(void *arg);
111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112 static void mge_intr_tx(void *arg);
113 static void mge_intr_tx_locked(struct mge_softc *sc);
114 static void mge_intr_misc(void *arg);
115 static void mge_intr_sum(void *arg);
116 static void mge_intr_err(void *arg);
117 static void mge_stop(struct mge_softc *sc);
118 static void mge_tick(void *msc);
119 static uint32_t mge_set_port_serial_control(uint32_t media);
120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121 static void mge_set_mac_address(struct mge_softc *sc);
122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
123     uint8_t queue);
124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125 static int mge_allocate_dma(struct mge_softc *sc);
126 static int mge_alloc_desc_dma(struct mge_softc *sc,
127     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129     struct mbuf **mbufp, bus_addr_t *paddr);
130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131 static void mge_free_dma(struct mge_softc *sc);
132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135     uint32_t status, uint16_t bufsize);
136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
137     struct mge_desc_wrapper *dw);
138 static uint8_t mge_crc8(uint8_t *data, int size);
139 static void mge_setup_multicast(struct mge_softc *sc);
140 static void mge_set_rxic(struct mge_softc *sc);
141 static void mge_set_txic(struct mge_softc *sc);
142 static void mge_add_sysctls(struct mge_softc *sc);
143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
144 
145 static device_method_t mge_methods[] = {
146 	/* Device interface */
147 	DEVMETHOD(device_probe,		mge_probe),
148 	DEVMETHOD(device_attach,	mge_attach),
149 	DEVMETHOD(device_detach,	mge_detach),
150 	DEVMETHOD(device_shutdown,	mge_shutdown),
151 	DEVMETHOD(device_suspend,	mge_suspend),
152 	DEVMETHOD(device_resume,	mge_resume),
153 	/* MII interface */
154 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
155 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
156 	{ 0, 0 }
157 };
158 
159 static driver_t mge_driver = {
160 	"mge",
161 	mge_methods,
162 	sizeof(struct mge_softc),
163 };
164 
165 static devclass_t mge_devclass;
166 
167 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169 MODULE_DEPEND(mge, ether, 1, 1, 1);
170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
171 
172 static struct resource_spec res_spec[] = {
173 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
174 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
177 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
178 	{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
179 	{ -1, 0 }
180 };
181 
182 static struct {
183 	driver_intr_t *handler;
184 	char * description;
185 } mge_intrs[MGE_INTR_COUNT] = {
186 	{ mge_intr_rx,	"GbE receive interrupt" },
187 	{ mge_intr_tx,	"GbE transmit interrupt" },
188 	{ mge_intr_misc,"GbE misc interrupt" },
189 	{ mge_intr_sum,	"GbE summary interrupt" },
190 	{ mge_intr_err,	"GbE error interrupt" },
191 };
192 
193 static void
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
195 {
196 	uint32_t mac_l, mac_h;
197 
198 	/* XXX use currently programmed MAC address; eventually this info will
199 	 * be provided by the loader */
200 
201 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
202 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
203 
204 	addr[0] = (mac_h & 0xff000000) >> 24;
205 	addr[1] = (mac_h & 0x00ff0000) >> 16;
206 	addr[2] = (mac_h & 0x0000ff00) >> 8;
207 	addr[3] = (mac_h & 0x000000ff);
208 	addr[4] = (mac_l & 0x0000ff00) >> 8;
209 	addr[5] = (mac_l & 0x000000ff);
210 }
211 
212 static uint32_t
213 mge_tfut_ipg(uint32_t val, int ver)
214 {
215 
216 	switch (ver) {
217 	case 1:
218 		return ((val & 0x3fff) << 4);
219 	case 2:
220 	default:
221 		return ((val & 0xffff) << 4);
222 	}
223 }
224 
225 static uint32_t
226 mge_rx_ipg(uint32_t val, int ver)
227 {
228 
229 	switch (ver) {
230 	case 1:
231 		return ((val & 0x3fff) << 8);
232 	case 2:
233 	default:
234 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
235 	}
236 }
237 
238 static void
239 mge_ver_params(struct mge_softc *sc)
240 {
241 	uint32_t d, r;
242 
243 	soc_id(&d, &r);
244 	if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
245 	    d == MV_DEV_MV78100_Z0) {
246 		sc->mge_ver = 2;
247 		sc->mge_mtu = 0x4e8;
248 		sc->mge_tfut_ipg_max = 0xFFFF;
249 		sc->mge_rx_ipg_max = 0xFFFF;
250 		sc->mge_tx_arb_cfg = 0xFC0000FF;
251 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
252 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
253 	} else {
254 		sc->mge_ver = 1;
255 		sc->mge_mtu = 0x458;
256 		sc->mge_tfut_ipg_max = 0x3FFF;
257 		sc->mge_rx_ipg_max = 0x3FFF;
258 		sc->mge_tx_arb_cfg = 0x000000FF;
259 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
260 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
261 	}
262 }
263 
264 static void
265 mge_set_mac_address(struct mge_softc *sc)
266 {
267 	char *if_mac;
268 	uint32_t mac_l, mac_h;
269 
270 	MGE_GLOBAL_LOCK_ASSERT(sc);
271 
272 	if_mac = (char *)IF_LLADDR(sc->ifp);
273 
274 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
275 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
276 	    (if_mac[2] << 8) | (if_mac[3] << 0);
277 
278 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
279 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
280 
281 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
282 }
283 
284 static void
285 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
286 {
287 	uint32_t reg_idx, reg_off, reg_val, i;
288 
289 	last_byte &= 0xf;
290 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
291 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
292 	reg_val = (1 | (queue << 1)) << reg_off;
293 
294 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
295 		if ( i == reg_idx)
296 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
297 		else
298 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
299 	}
300 }
301 
302 static void
303 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
304 {
305 	uint32_t port_config;
306 	uint32_t reg_val, i;
307 
308 	/* Enable or disable promiscuous mode as needed */
309 	if (sc->ifp->if_flags & IFF_PROMISC) {
310 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
311 		port_config |= PORT_CONFIG_UPM;
312 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
313 
314 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
315 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
316 
317 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
318 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
319 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
320 		}
321 
322 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
323 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
324 
325 	} else {
326 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
327 		port_config &= ~PORT_CONFIG_UPM;
328 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
329 
330 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
331 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
332 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
333 		}
334 
335 		mge_set_mac_address(sc);
336 	}
337 }
338 
339 static void
340 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
341 {
342 	u_int32_t *paddr;
343 
344 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
345 	paddr = arg;
346 
347 	*paddr = segs->ds_addr;
348 }
349 
350 static int
351 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
352     bus_addr_t *paddr)
353 {
354 	struct mbuf *new_mbuf;
355 	bus_dma_segment_t seg[1];
356 	int error;
357 	int nsegs;
358 
359 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
360 
361 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
362 	if (new_mbuf == NULL)
363 		return (ENOBUFS);
364 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
365 
366 	if (*mbufp) {
367 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
368 		bus_dmamap_unload(tag, map);
369 	}
370 
371 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
372 	    BUS_DMA_NOWAIT);
373 	KASSERT(nsegs == 1, ("Too many segments returned!"));
374 	if (nsegs != 1 || error)
375 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
376 
377 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
378 
379 	(*mbufp) = new_mbuf;
380 	(*paddr) = seg->ds_addr;
381 	return (0);
382 }
383 
384 static int
385 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
386     uint32_t size, bus_dma_tag_t *buffer_tag)
387 {
388 	struct mge_desc_wrapper *dw;
389 	bus_addr_t desc_paddr;
390 	int i, error;
391 
392 	desc_paddr = 0;
393 	for (i = size - 1; i >= 0; i--) {
394 		dw = &(tab[i]);
395 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
396 		    (void**)&(dw->mge_desc),
397 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
398 		    &(dw->desc_dmap));
399 
400 		if (error) {
401 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
402 			dw->mge_desc = NULL;
403 			return (ENXIO);
404 		}
405 
406 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
407 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
408 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
409 
410 		if (error) {
411 			if_printf(sc->ifp, "can't load descriptor\n");
412 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
413 			    dw->desc_dmap);
414 			dw->mge_desc = NULL;
415 			return (ENXIO);
416 		}
417 
418 		/* Chain descriptors */
419 		dw->mge_desc->next_desc = desc_paddr;
420 		desc_paddr = dw->mge_desc_paddr;
421 	}
422 	tab[size - 1].mge_desc->next_desc = desc_paddr;
423 
424 	/* Allocate a busdma tag for mbufs. */
425 	error = bus_dma_tag_create(NULL,	/* parent */
426 	    8, 0,				/* alignment, boundary */
427 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
428 	    BUS_SPACE_MAXADDR,			/* highaddr */
429 	    NULL, NULL,				/* filtfunc, filtfuncarg */
430 	    MCLBYTES, 1,			/* maxsize, nsegments */
431 	    MCLBYTES, 0,			/* maxsegsz, flags */
432 	    NULL, NULL,				/* lockfunc, lockfuncarg */
433 	    buffer_tag);			/* dmat */
434 	if (error) {
435 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
436 		return (ENXIO);
437 	}
438 
439 	/* Create TX busdma maps */
440 	for (i = 0; i < size; i++) {
441 		dw = &(tab[i]);
442 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
443 		if (error) {
444 			if_printf(sc->ifp, "failed to create map for mbuf\n");
445 			return (ENXIO);
446 		}
447 
448 		dw->buffer = (struct mbuf*)NULL;
449 		dw->mge_desc->buffer = (bus_addr_t)NULL;
450 	}
451 
452 	return (0);
453 }
454 
455 static int
456 mge_allocate_dma(struct mge_softc *sc)
457 {
458 	int error;
459 	struct mge_desc_wrapper *dw;
460 	int num, i;
461 
462 
463 	num = MGE_TX_DESC_NUM + MGE_RX_DESC_NUM;
464 
465 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
466 	error = bus_dma_tag_create(NULL,	/* parent */
467 	    16, 0,				/* alignment, boundary */
468 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
469 	    BUS_SPACE_MAXADDR,			/* highaddr */
470 	    NULL, NULL,				/* filtfunc, filtfuncarg */
471 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
472 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
473 	    NULL, NULL,				/* lockfunc, lockfuncarg */
474 	    &sc->mge_desc_dtag);		/* dmat */
475 
476 
477 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
478 	    &sc->mge_tx_dtag);
479 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
480 	    &sc->mge_rx_dtag);
481 
482 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
483 		dw = &(sc->mge_rx_desc[i]);
484 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
485 		    &dw->mge_desc->buffer);
486 	}
487 
488 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
489 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
490 
491 	return (0);
492 }
493 
494 static void
495 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
496     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
497 {
498 	struct mge_desc_wrapper *dw;
499 	int i;
500 
501 	for (i = 0; i < size; i++) {
502 		/* Free RX mbuf */
503 		dw = &(tab[i]);
504 
505 		if (dw->buffer_dmap) {
506 			if (free_mbufs) {
507 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
508 				    BUS_DMASYNC_POSTREAD);
509 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
510 			}
511 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
512 			if (free_mbufs)
513 				m_freem(dw->buffer);
514 		}
515 		/* Free RX descriptors */
516 		if (dw->desc_dmap) {
517 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
518 			    BUS_DMASYNC_POSTREAD);
519 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
520 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
521 			    dw->desc_dmap);
522 		}
523 	}
524 }
525 
526 static void
527 mge_free_dma(struct mge_softc *sc)
528 {
529 	/* Free desciptors and mbufs */
530 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
531 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
532 
533 	/* Destroy mbuf dma tag */
534 	bus_dma_tag_destroy(sc->mge_tx_dtag);
535 	bus_dma_tag_destroy(sc->mge_rx_dtag);
536 	/* Destroy descriptors tag */
537 	bus_dma_tag_destroy(sc->mge_desc_dtag);
538 }
539 
540 static void
541 mge_reinit_rx(struct mge_softc *sc)
542 {
543 	struct mge_desc_wrapper *dw;
544 	int i;
545 
546 	MGE_RECEIVE_LOCK(sc);
547 
548 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
549 
550 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
551 	    &sc->mge_rx_dtag);
552 
553 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
554 		dw = &(sc->mge_rx_desc[i]);
555 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
556 		&dw->mge_desc->buffer);
557 	}
558 
559 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
560 	sc->rx_desc_curr = 0;
561 
562 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
563 	    sc->rx_desc_start);
564 
565 	/* Enable RX queue */
566 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
567 
568 	MGE_RECEIVE_UNLOCK(sc);
569 }
570 
571 #ifdef DEVICE_POLLING
572 static poll_handler_t mge_poll;
573 
574 static int
575 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
576 {
577 	struct mge_softc *sc = ifp->if_softc;
578 	uint32_t int_cause, int_cause_ext;
579 	int rx_npkts = 0;
580 
581 	MGE_GLOBAL_LOCK(sc);
582 
583 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
584 		MGE_GLOBAL_UNLOCK(sc);
585 		return (rx_npkts);
586 	}
587 
588 	if (cmd == POLL_AND_CHECK_STATUS) {
589 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
590 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
591 
592 		/* Check for resource error */
593 		if (int_cause & MGE_PORT_INT_RXERRQ0)
594 			mge_reinit_rx(sc);
595 
596 		if (int_cause || int_cause_ext) {
597 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
598 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
599 		}
600 	}
601 
602 	mge_intr_tx_locked(sc);
603 	rx_npkts = mge_intr_rx_locked(sc, count);
604 
605 	MGE_GLOBAL_UNLOCK(sc);
606 	return (rx_npkts);
607 }
608 #endif /* DEVICE_POLLING */
609 
610 static int
611 mge_attach(device_t dev)
612 {
613 	struct mge_softc *sc;
614 	struct mii_softc *miisc;
615 	struct ifnet *ifp;
616 	uint8_t hwaddr[ETHER_ADDR_LEN];
617 	int i, error ;
618 
619 	sc = device_get_softc(dev);
620 	sc->dev = dev;
621 
622 	if (device_get_unit(dev) == 0)
623 		sc_mge0 = sc;
624 
625 	/* Set chip version-dependent parameters */
626 	mge_ver_params(sc);
627 
628 	/* Initialize mutexes */
629 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
630 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
631 
632 	/* Allocate IO and IRQ resources */
633 	error = bus_alloc_resources(dev, res_spec, sc->res);
634 	if (error) {
635 		device_printf(dev, "could not allocate resources\n");
636 		mge_detach(dev);
637 		return (ENXIO);
638 	}
639 
640 	/* Allocate DMA, buffers, buffer descriptors */
641 	error = mge_allocate_dma(sc);
642 	if (error) {
643 		mge_detach(dev);
644 		return (ENXIO);
645 	}
646 
647 	sc->tx_desc_curr = 0;
648 	sc->rx_desc_curr = 0;
649 	sc->tx_desc_used_idx = 0;
650 	sc->tx_desc_used_count = 0;
651 
652 	/* Configure defaults for interrupts coalescing */
653 	sc->rx_ic_time = 768;
654 	sc->tx_ic_time = 768;
655 	mge_add_sysctls(sc);
656 
657 	/* Allocate network interface */
658 	ifp = sc->ifp = if_alloc(IFT_ETHER);
659 	if (ifp == NULL) {
660 		device_printf(dev, "if_alloc() failed\n");
661 		mge_detach(dev);
662 		return (ENOMEM);
663 	}
664 
665 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
666 	ifp->if_softc = sc;
667 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
668 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
669 	ifp->if_capenable = ifp->if_capabilities;
670 	ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
671 
672 #ifdef DEVICE_POLLING
673 	/* Advertise that polling is supported */
674 	ifp->if_capabilities |= IFCAP_POLLING;
675 #endif
676 
677 	ifp->if_init = mge_init;
678 	ifp->if_start = mge_start;
679 	ifp->if_ioctl = mge_ioctl;
680 
681 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
682 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
683 	IFQ_SET_READY(&ifp->if_snd);
684 
685 	mge_get_mac_address(sc, hwaddr);
686 	ether_ifattach(ifp, hwaddr);
687 	callout_init(&sc->wd_callout, 0);
688 
689 	/* Probe PHY(s) */
690 	error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
691 	if (error) {
692 		device_printf(dev, "MII failed to find PHY\n");
693 		mge_detach(dev);
694 		return (error);
695 	}
696 	sc->mii = device_get_softc(sc->miibus);
697 
698 	/* Tell the MAC where to find the PHY so autoneg works */
699 	miisc = LIST_FIRST(&sc->mii->mii_phys);
700 	MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
701 
702 	/* Attach interrupt handlers */
703 	for (i = 0; i < 2; ++i) {
704 		error = bus_setup_intr(dev, sc->res[1 + i],
705 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
706 		    sc, &sc->ih_cookie[i]);
707 		if (error) {
708 			device_printf(dev, "could not setup %s\n",
709 			    mge_intrs[i].description);
710 			mge_detach(dev);
711 			return (error);
712 		}
713 	}
714 
715 	return (0);
716 }
717 
718 static int
719 mge_detach(device_t dev)
720 {
721 	struct mge_softc *sc;
722 	int error,i;
723 
724 	sc = device_get_softc(dev);
725 
726 	/* Stop controller and free TX queue */
727 	if (sc->ifp)
728 		mge_shutdown(dev);
729 
730 	/* Wait for stopping ticks */
731         callout_drain(&sc->wd_callout);
732 
733 	/* Stop and release all interrupts */
734 	for (i = 0; i < 2; ++i) {
735 		if (!sc->ih_cookie[i])
736 			continue;
737 
738 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
739 		if (error)
740 			device_printf(dev, "could not release %s\n",
741 			    mge_intrs[i].description);
742 	}
743 
744 	/* Detach network interface */
745 	if (sc->ifp) {
746 		ether_ifdetach(sc->ifp);
747 		if_free(sc->ifp);
748 	}
749 
750 	/* Free DMA resources */
751 	mge_free_dma(sc);
752 
753 	/* Free IO memory handler */
754 	bus_release_resources(dev, res_spec, sc->res);
755 
756 	/* Destroy mutexes */
757 	mtx_destroy(&sc->receive_lock);
758 	mtx_destroy(&sc->transmit_lock);
759 
760 	return (0);
761 }
762 
763 static void
764 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
765 {
766 	struct mge_softc *sc = ifp->if_softc;
767 	struct mii_data *mii;
768 
769 	MGE_TRANSMIT_LOCK(sc);
770 
771 	mii = sc->mii;
772 	mii_pollstat(mii);
773 
774 	ifmr->ifm_active = mii->mii_media_active;
775 	ifmr->ifm_status = mii->mii_media_status;
776 
777 	MGE_TRANSMIT_UNLOCK(sc);
778 }
779 
780 static uint32_t
781 mge_set_port_serial_control(uint32_t media)
782 {
783 	uint32_t port_config;
784 
785 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
786 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
787 
788 	if (IFM_TYPE(media) == IFM_ETHER) {
789 		switch(IFM_SUBTYPE(media)) {
790 			case IFM_AUTO:
791 				break;
792 			case IFM_1000_T:
793 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
794 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
795 				    PORT_SERIAL_SPEED_AUTONEG);
796 				break;
797 			case IFM_100_TX:
798 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
799 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
800 				    PORT_SERIAL_SPEED_AUTONEG);
801 				break;
802 			case IFM_10_T:
803 				port_config  |= (PORT_SERIAL_AUTONEG |
804 				    PORT_SERIAL_AUTONEG_FC |
805 				    PORT_SERIAL_SPEED_AUTONEG);
806 				break;
807 		}
808 		if (media & IFM_FDX)
809 			port_config |= PORT_SERIAL_FULL_DUPLEX;
810 	}
811 	return (port_config);
812 }
813 
814 static int
815 mge_ifmedia_upd(struct ifnet *ifp)
816 {
817 	struct mge_softc *sc = ifp->if_softc;
818 
819 	if (ifp->if_flags & IFF_UP) {
820 		MGE_GLOBAL_LOCK(sc);
821 
822 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
823 		mii_mediachg(sc->mii);
824 		mge_init_locked(sc);
825 
826 		MGE_GLOBAL_UNLOCK(sc);
827 	}
828 
829 	return (0);
830 }
831 
832 static void
833 mge_init(void *arg)
834 {
835 	struct mge_softc *sc = arg;
836 
837 	MGE_GLOBAL_LOCK(sc);
838 
839 	mge_init_locked(arg);
840 
841 	MGE_GLOBAL_UNLOCK(sc);
842 }
843 
844 static void
845 mge_init_locked(void *arg)
846 {
847 	struct mge_softc *sc = arg;
848 	struct mge_desc_wrapper *dw;
849 	volatile uint32_t reg_val;
850 	int i, count;
851 
852 
853 	MGE_GLOBAL_LOCK_ASSERT(sc);
854 
855 	/* Stop interface */
856 	mge_stop(sc);
857 
858 	/* Disable interrupts */
859 	mge_intrs_ctrl(sc, 0);
860 
861 	/* Set MAC address */
862 	mge_set_mac_address(sc);
863 
864 	/* Setup multicast filters */
865 	mge_setup_multicast(sc);
866 
867 	if (sc->mge_ver == 2) {
868 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
869 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
870 	}
871 
872 	/* Initialize TX queue configuration registers */
873 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
874 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
875 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
876 
877 	/* Clear TX queue configuration registers for unused queues */
878 	for (i = 1; i < 7; i++) {
879 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
880 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
881 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
882 	}
883 
884 	/* Set default MTU */
885 	MGE_WRITE(sc, sc->mge_mtu, 0);
886 
887 	/* Port configuration */
888 	MGE_WRITE(sc, MGE_PORT_CONFIG,
889 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
890 	    PORT_CONFIG_ARO_RXQ(0));
891 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
892 
893 	/* Setup port configuration */
894 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
895 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
896 
897 	/* Setup SDMA configuration */
898 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
899 	    MGE_SDMA_TX_BYTE_SWAP |
900 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
901 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
902 
903 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
904 
905 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
906 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
907 	    sc->rx_desc_start);
908 
909 	/* Reset descriptor indexes */
910 	sc->tx_desc_curr = 0;
911 	sc->rx_desc_curr = 0;
912 	sc->tx_desc_used_idx = 0;
913 	sc->tx_desc_used_count = 0;
914 
915 	/* Enable RX descriptors */
916 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
917 		dw = &sc->mge_rx_desc[i];
918 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
919 		dw->mge_desc->buff_size = MCLBYTES;
920 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
921 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
922 	}
923 
924 	/* Enable RX queue */
925 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
926 
927 	/* Enable port */
928 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
929 	reg_val |= PORT_SERIAL_ENABLE;
930 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
931 	count = 0x100000;
932 	for (;;) {
933 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
934 		if (reg_val & MGE_STATUS_LINKUP)
935 			break;
936 		DELAY(100);
937 		if (--count == 0) {
938 			if_printf(sc->ifp, "Timeout on link-up\n");
939 			break;
940 		}
941 	}
942 
943 	/* Setup interrupts coalescing */
944 	mge_set_rxic(sc);
945 	mge_set_txic(sc);
946 
947 	/* Enable interrupts */
948 #ifdef DEVICE_POLLING
949         /*
950 	 * * ...only if polling is not turned on. Disable interrupts explicitly
951 	 * if polling is enabled.
952 	 */
953 	if (sc->ifp->if_capenable & IFCAP_POLLING)
954 		mge_intrs_ctrl(sc, 0);
955 	else
956 #endif /* DEVICE_POLLING */
957 	mge_intrs_ctrl(sc, 1);
958 
959 	/* Activate network interface */
960 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
961 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
962 	sc->wd_timer = 0;
963 
964 	/* Schedule watchdog timeout */
965 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
966 }
967 
968 static void
969 mge_intr_err(void *arg)
970 {
971 	struct mge_softc *sc = arg;
972 	struct ifnet *ifp;
973 
974 	ifp = sc->ifp;
975 	if_printf(ifp, "%s\n", __FUNCTION__);
976 }
977 
978 static void
979 mge_intr_misc(void *arg)
980 {
981 	struct mge_softc *sc = arg;
982 	struct ifnet *ifp;
983 
984 	ifp = sc->ifp;
985 	if_printf(ifp, "%s\n", __FUNCTION__);
986 }
987 
988 static void
989 mge_intr_rx(void *arg) {
990 	struct mge_softc *sc = arg;
991 	uint32_t int_cause, int_cause_ext;
992 
993 	MGE_RECEIVE_LOCK(sc);
994 
995 #ifdef DEVICE_POLLING
996 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
997 		MGE_RECEIVE_UNLOCK(sc);
998 		return;
999 	}
1000 #endif
1001 
1002 	/* Get interrupt cause */
1003 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1004 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1005 
1006 	/* Check for resource error */
1007 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1008 		mge_reinit_rx(sc);
1009 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1010 		    int_cause & ~MGE_PORT_INT_RXERRQ0);
1011 	}
1012 
1013 	int_cause &= MGE_PORT_INT_RXQ0;
1014 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1015 
1016 	if (int_cause || int_cause_ext) {
1017 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1018 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1019 		mge_intr_rx_locked(sc, -1);
1020 	}
1021 
1022 	MGE_RECEIVE_UNLOCK(sc);
1023 }
1024 
1025 
1026 static int
1027 mge_intr_rx_locked(struct mge_softc *sc, int count)
1028 {
1029 	struct ifnet *ifp = sc->ifp;
1030 	uint32_t status;
1031 	uint16_t bufsize;
1032 	struct mge_desc_wrapper* dw;
1033 	struct mbuf *mb;
1034 	int rx_npkts = 0;
1035 
1036 	MGE_RECEIVE_LOCK_ASSERT(sc);
1037 
1038 	while (count != 0) {
1039 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1040 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1041 		    BUS_DMASYNC_POSTREAD);
1042 
1043 		/* Get status */
1044 		status = dw->mge_desc->cmd_status;
1045 		bufsize = dw->mge_desc->buff_size;
1046 		if ((status & MGE_DMA_OWNED) != 0)
1047 			break;
1048 
1049 		if (dw->mge_desc->byte_count &&
1050 		    ~(status & MGE_ERR_SUMMARY)) {
1051 
1052 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1053 			    BUS_DMASYNC_POSTREAD);
1054 
1055 			mb = m_devget(dw->buffer->m_data,
1056 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1057 			    0, ifp, NULL);
1058 
1059 			if (mb == NULL)
1060 				/* Give up if no mbufs */
1061 				break;
1062 
1063 			mb->m_len -= 2;
1064 			mb->m_pkthdr.len -= 2;
1065 			mb->m_data += 2;
1066 
1067 			mge_offload_process_frame(ifp, mb, status,
1068 			    bufsize);
1069 
1070 			MGE_RECEIVE_UNLOCK(sc);
1071 			(*ifp->if_input)(ifp, mb);
1072 			MGE_RECEIVE_LOCK(sc);
1073 			rx_npkts++;
1074 		}
1075 
1076 		dw->mge_desc->byte_count = 0;
1077 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1078 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1079 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1080 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1081 
1082 		if (count > 0)
1083 			count -= 1;
1084 	}
1085 
1086 	return (rx_npkts);
1087 }
1088 
1089 static void
1090 mge_intr_sum(void *arg)
1091 {
1092 	struct mge_softc *sc = arg;
1093 	struct ifnet *ifp;
1094 
1095 	ifp = sc->ifp;
1096 	if_printf(ifp, "%s\n", __FUNCTION__);
1097 }
1098 
1099 static void
1100 mge_intr_tx(void *arg)
1101 {
1102 	struct mge_softc *sc = arg;
1103 	uint32_t int_cause_ext;
1104 
1105 	MGE_TRANSMIT_LOCK(sc);
1106 
1107 #ifdef DEVICE_POLLING
1108 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1109 		MGE_TRANSMIT_UNLOCK(sc);
1110 		return;
1111 	}
1112 #endif
1113 
1114 	/* Ack the interrupt */
1115 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1116 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1117 	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1118 
1119 	mge_intr_tx_locked(sc);
1120 
1121 	MGE_TRANSMIT_UNLOCK(sc);
1122 }
1123 
1124 
1125 static void
1126 mge_intr_tx_locked(struct mge_softc *sc)
1127 {
1128 	struct ifnet *ifp = sc->ifp;
1129 	struct mge_desc_wrapper *dw;
1130 	struct mge_desc *desc;
1131 	uint32_t status;
1132 	int send = 0;
1133 
1134 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1135 
1136 	/* Disable watchdog */
1137 	sc->wd_timer = 0;
1138 
1139 	while (sc->tx_desc_used_count) {
1140 		/* Get the descriptor */
1141 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1142 		desc = dw->mge_desc;
1143 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1144 		    BUS_DMASYNC_POSTREAD);
1145 
1146 		/* Get descriptor status */
1147 		status = desc->cmd_status;
1148 
1149 		if (status & MGE_DMA_OWNED)
1150 			break;
1151 
1152 		sc->tx_desc_used_idx =
1153 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1154 		sc->tx_desc_used_count--;
1155 
1156 		/* Update collision statistics */
1157 		if (status & MGE_ERR_SUMMARY) {
1158 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1159 				ifp->if_collisions++;
1160 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1161 				ifp->if_collisions += 16;
1162 		}
1163 
1164 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1165 		    BUS_DMASYNC_POSTWRITE);
1166 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1167 		m_freem(dw->buffer);
1168 		dw->buffer = (struct mbuf*)NULL;
1169 		send++;
1170 
1171 		ifp->if_opackets++;
1172 	}
1173 
1174 	if (send) {
1175 		/* Now send anything that was pending */
1176 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1177 		mge_start_locked(ifp);
1178 	}
1179 }
1180 
1181 static int
1182 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1183 {
1184 	struct mge_softc *sc = ifp->if_softc;
1185 	struct ifreq *ifr = (struct ifreq *)data;
1186 	int mask, error;
1187 	uint32_t flags;
1188 
1189 	error = 0;
1190 
1191 	switch (command) {
1192 	case SIOCSIFFLAGS:
1193 		MGE_GLOBAL_LOCK(sc);
1194 
1195 		if (ifp->if_flags & IFF_UP) {
1196 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1197 				flags = ifp->if_flags ^ sc->mge_if_flags;
1198 				if (flags & IFF_PROMISC)
1199 					mge_set_prom_mode(sc,
1200 					    MGE_RX_DEFAULT_QUEUE);
1201 
1202 				if (flags & IFF_ALLMULTI)
1203 					mge_setup_multicast(sc);
1204 			} else
1205 				mge_init_locked(sc);
1206 		}
1207 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1208 			mge_stop(sc);
1209 
1210 		sc->mge_if_flags = ifp->if_flags;
1211 		MGE_GLOBAL_UNLOCK(sc);
1212 		break;
1213 	case SIOCADDMULTI:
1214 	case SIOCDELMULTI:
1215 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1216 			MGE_GLOBAL_LOCK(sc);
1217 			mge_setup_multicast(sc);
1218 			MGE_GLOBAL_UNLOCK(sc);
1219 		}
1220 		break;
1221 	case SIOCSIFCAP:
1222 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1223 		if (mask & IFCAP_HWCSUM) {
1224 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1225 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1226 			if (ifp->if_capenable & IFCAP_TXCSUM)
1227 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1228 			else
1229 				ifp->if_hwassist = 0;
1230 		}
1231 #ifdef DEVICE_POLLING
1232 		if (mask & IFCAP_POLLING) {
1233 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1234 				error = ether_poll_register(mge_poll, ifp);
1235 				if (error)
1236 					return(error);
1237 
1238 				MGE_GLOBAL_LOCK(sc);
1239 				mge_intrs_ctrl(sc, 0);
1240 				ifp->if_capenable |= IFCAP_POLLING;
1241 				MGE_GLOBAL_UNLOCK(sc);
1242 			} else {
1243 				error = ether_poll_deregister(ifp);
1244 				MGE_GLOBAL_LOCK(sc);
1245 				mge_intrs_ctrl(sc, 1);
1246 				ifp->if_capenable &= ~IFCAP_POLLING;
1247 				MGE_GLOBAL_UNLOCK(sc);
1248 			}
1249 		}
1250 #endif
1251 		break;
1252 	case SIOCGIFMEDIA: /* fall through */
1253 	case SIOCSIFMEDIA:
1254 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1255 		    && !(ifr->ifr_media & IFM_FDX)) {
1256 			device_printf(sc->dev,
1257 			    "1000baseTX half-duplex unsupported\n");
1258 			return 0;
1259 		}
1260 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1261 		break;
1262 	default:
1263 		error = ether_ioctl(ifp, command, data);
1264 	}
1265 	return (error);
1266 }
1267 
1268 static int
1269 mge_miibus_readreg(device_t dev, int phy, int reg)
1270 {
1271 	uint32_t retries;
1272 
1273 	/*
1274 	 * We assume static PHY address <=> device unit mapping:
1275 	 * PHY Address = MII_ADDR_BASE + devce unit.
1276 	 * This is true for most Marvell boards.
1277 	 *
1278 	 * Code below grants proper PHY detection on each device
1279 	 * unit.
1280 	 */
1281 
1282 
1283 	if ((MII_ADDR_BASE + device_get_unit(dev)) != phy)
1284 		return (0);
1285 
1286 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1287 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1288 
1289 	retries = MGE_SMI_READ_RETRIES;
1290 	while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1291 		DELAY(MGE_SMI_READ_DELAY);
1292 
1293 	if (retries == 0)
1294 		device_printf(dev, "Timeout while reading from PHY\n");
1295 
1296 	return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1297 }
1298 
1299 static int
1300 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1301 {
1302 	uint32_t retries;
1303 
1304 	if ((MII_ADDR_BASE + device_get_unit(dev)) != phy)
1305 		return (0);
1306 
1307 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1308 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1309 
1310 	retries = MGE_SMI_WRITE_RETRIES;
1311 	while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1312 		DELAY(MGE_SMI_WRITE_DELAY);
1313 
1314 	if (retries == 0)
1315 		device_printf(dev, "Timeout while writing to PHY\n");
1316 	return (0);
1317 }
1318 
1319 static int
1320 mge_probe(device_t dev)
1321 {
1322 
1323 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1324 	return (BUS_PROBE_DEFAULT);
1325 }
1326 
1327 static int
1328 mge_resume(device_t dev)
1329 {
1330 
1331 	device_printf(dev, "%s\n", __FUNCTION__);
1332 	return (0);
1333 }
1334 
1335 static int
1336 mge_shutdown(device_t dev)
1337 {
1338 	struct mge_softc *sc = device_get_softc(dev);
1339 
1340 	MGE_GLOBAL_LOCK(sc);
1341 
1342 #ifdef DEVICE_POLLING
1343         if (sc->ifp->if_capenable & IFCAP_POLLING)
1344 		ether_poll_deregister(sc->ifp);
1345 #endif
1346 
1347 	mge_stop(sc);
1348 
1349 	MGE_GLOBAL_UNLOCK(sc);
1350 
1351 	return (0);
1352 }
1353 
1354 static int
1355 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1356 {
1357 	struct mge_desc_wrapper *dw = NULL;
1358 	struct ifnet *ifp;
1359 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1360 	bus_dmamap_t mapp;
1361 	int error;
1362 	int seg, nsegs;
1363 	int desc_no;
1364 
1365 	ifp = sc->ifp;
1366 
1367 	/* Check for free descriptors */
1368 	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1369 		/* No free descriptors */
1370 		return (-1);
1371 	}
1372 
1373 	/* Fetch unused map */
1374 	desc_no = sc->tx_desc_curr;
1375 	dw = &sc->mge_tx_desc[desc_no];
1376 	mapp = dw->buffer_dmap;
1377 
1378 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1379 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1380 
1381 	/* Create mapping in DMA memory */
1382 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1383 	    BUS_DMA_NOWAIT);
1384 	if (error != 0 || nsegs != 1 ) {
1385 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1386 		return ((error != 0) ? error : -1);
1387 	}
1388 
1389 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1390 
1391 	/* Everything is ok, now we can send buffers */
1392 	for (seg = 0; seg < nsegs; seg++) {
1393 		dw->mge_desc->byte_count = segs[seg].ds_len;
1394 		dw->mge_desc->buffer = segs[seg].ds_addr;
1395 		dw->buffer = m0;
1396 		dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1397 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1398 		    MGE_DMA_OWNED;
1399 
1400 		if (seg == 0)
1401 			mge_offload_setup_descriptor(sc, dw);
1402 	}
1403 
1404 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1405 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1406 
1407 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1408 	sc->tx_desc_used_count++;
1409 	return (0);
1410 }
1411 
1412 static void
1413 mge_tick(void *msc)
1414 {
1415 	struct mge_softc *sc = msc;
1416 
1417 	/* Check for TX timeout */
1418 	mge_watchdog(sc);
1419 
1420 	mii_tick(sc->mii);
1421 
1422 	/* Check for media type change */
1423 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1424 		mge_ifmedia_upd(sc->ifp);
1425 
1426 	/* Schedule another timeout one second from now */
1427 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1428 }
1429 
1430 static void
1431 mge_watchdog(struct mge_softc *sc)
1432 {
1433 	struct ifnet *ifp;
1434 
1435 	ifp = sc->ifp;
1436 
1437 	MGE_GLOBAL_LOCK(sc);
1438 
1439 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1440 		MGE_GLOBAL_UNLOCK(sc);
1441 		return;
1442 	}
1443 
1444 	ifp->if_oerrors++;
1445 	if_printf(ifp, "watchdog timeout\n");
1446 
1447 	mge_stop(sc);
1448 	mge_init_locked(sc);
1449 
1450 	MGE_GLOBAL_UNLOCK(sc);
1451 }
1452 
1453 static void
1454 mge_start(struct ifnet *ifp)
1455 {
1456 	struct mge_softc *sc = ifp->if_softc;
1457 
1458 	MGE_TRANSMIT_LOCK(sc);
1459 
1460 	mge_start_locked(ifp);
1461 
1462 	MGE_TRANSMIT_UNLOCK(sc);
1463 }
1464 
1465 static void
1466 mge_start_locked(struct ifnet *ifp)
1467 {
1468 	struct mge_softc *sc;
1469 	struct mbuf *m0, *mtmp;
1470 	uint32_t reg_val, queued = 0;
1471 
1472 	sc = ifp->if_softc;
1473 
1474 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1475 
1476 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1477 	    IFF_DRV_RUNNING)
1478 		return;
1479 
1480 	for (;;) {
1481 		/* Get packet from the queue */
1482 		IF_DEQUEUE(&ifp->if_snd, m0);
1483 		if (m0 == NULL)
1484 			break;
1485 
1486 		mtmp = m_defrag(m0, M_DONTWAIT);
1487 		if (mtmp)
1488 			m0 = mtmp;
1489 
1490 		if (mge_encap(sc, m0)) {
1491 			IF_PREPEND(&ifp->if_snd, m0);
1492 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1493 			break;
1494 		}
1495 		queued++;
1496 		BPF_MTAP(ifp, m0);
1497 	}
1498 
1499 	if (queued) {
1500 		/* Enable transmitter and watchdog timer */
1501 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1502 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1503 		sc->wd_timer = 5;
1504 	}
1505 }
1506 
1507 static void
1508 mge_stop(struct mge_softc *sc)
1509 {
1510 	struct ifnet *ifp;
1511 	volatile uint32_t reg_val, status;
1512 	struct mge_desc_wrapper *dw;
1513 	struct mge_desc *desc;
1514 	int count;
1515 
1516 	ifp = sc->ifp;
1517 
1518 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1519 		return;
1520 
1521 	/* Stop tick engine */
1522 	callout_stop(&sc->wd_callout);
1523 
1524 	/* Disable interface */
1525 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1526 	sc->wd_timer = 0;
1527 
1528 	/* Disable interrupts */
1529 	mge_intrs_ctrl(sc, 0);
1530 
1531 	/* Disable Rx and Tx */
1532 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1533 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1534 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1535 
1536 	/* Remove pending data from TX queue */
1537 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1538 	    sc->tx_desc_used_count) {
1539 		/* Get the descriptor */
1540 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1541 		desc = dw->mge_desc;
1542 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1543 		    BUS_DMASYNC_POSTREAD);
1544 
1545 		/* Get descriptor status */
1546 		status = desc->cmd_status;
1547 
1548 		if (status & MGE_DMA_OWNED)
1549 			break;
1550 
1551 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1552 		    MGE_TX_DESC_NUM;
1553 		sc->tx_desc_used_count--;
1554 
1555 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1556 		    BUS_DMASYNC_POSTWRITE);
1557 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1558 
1559 		m_freem(dw->buffer);
1560 		dw->buffer = (struct mbuf*)NULL;
1561 	}
1562 
1563 	/* Wait for end of transmission */
1564 	count = 0x100000;
1565 	while (count--) {
1566 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1567 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1568 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1569 			break;
1570 		DELAY(100);
1571 	}
1572 
1573 	if(!count)
1574 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1575 		    __FUNCTION__);
1576 
1577 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1578 	reg_val &= ~(PORT_SERIAL_ENABLE);
1579 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1580 }
1581 
1582 static int
1583 mge_suspend(device_t dev)
1584 {
1585 
1586 	device_printf(dev, "%s\n", __FUNCTION__);
1587 	return (0);
1588 }
1589 
1590 static void
1591 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1592     uint32_t status, uint16_t bufsize)
1593 {
1594 	int csum_flags = 0;
1595 
1596 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1597 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1598 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1599 
1600 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1601 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1602 		    (status & MGE_RX_L4_CSUM_OK)) {
1603 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1604 			frame->m_pkthdr.csum_data = 0xFFFF;
1605 		}
1606 
1607 		frame->m_pkthdr.csum_flags = csum_flags;
1608 	}
1609 }
1610 
1611 static void
1612 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1613 {
1614 	struct mbuf *m0 = dw->buffer;
1615 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1616 	int csum_flags = m0->m_pkthdr.csum_flags;
1617 	int cmd_status = 0;
1618 	struct ip *ip;
1619 	int ehlen, etype;
1620 
1621 	if (csum_flags) {
1622 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1623 			etype = ntohs(eh->evl_proto);
1624 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1625 			csum_flags |= MGE_TX_VLAN_TAGGED;
1626 		} else {
1627 			etype = ntohs(eh->evl_encap_proto);
1628 			ehlen = ETHER_HDR_LEN;
1629 		}
1630 
1631 		if (etype != ETHERTYPE_IP) {
1632 			if_printf(sc->ifp,
1633 			    "TCP/IP Offload enabled for unsupported "
1634 			    "protocol!\n");
1635 			return;
1636 		}
1637 
1638 		ip = (struct ip *)(m0->m_data + ehlen);
1639 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1640 
1641 		if ((m0->m_flags & M_FRAG) == 0)
1642 			cmd_status |= MGE_TX_NOT_FRAGMENT;
1643 	}
1644 
1645 	if (csum_flags & CSUM_IP)
1646 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1647 
1648 	if (csum_flags & CSUM_TCP)
1649 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1650 
1651 	if (csum_flags & CSUM_UDP)
1652 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1653 
1654 	dw->mge_desc->cmd_status |= cmd_status;
1655 }
1656 
1657 static void
1658 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1659 {
1660 
1661 	if (enable) {
1662 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1663 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1664 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1665 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1666 		    MGE_PORT_INT_EXT_TXBUF0);
1667 	} else {
1668 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1669 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1670 
1671 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1672 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1673 
1674 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1675 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1676 	}
1677 }
1678 
1679 static uint8_t
1680 mge_crc8(uint8_t *data, int size)
1681 {
1682 	uint8_t crc = 0;
1683 	static const uint8_t ct[256] = {
1684 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1685 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1686 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1687 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1688 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1689 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1690 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1691 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1692 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1693 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1694 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1695 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1696 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1697 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1698 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1699 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1700 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1701 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1702 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1703 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1704 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1705 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1706 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1707 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1708 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1709 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1710 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1711 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1712 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1713 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1714 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1715 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1716 	};
1717 
1718 	while(size--)
1719 		crc = ct[crc ^ *(data++)];
1720 
1721 	return(crc);
1722 }
1723 
1724 static void
1725 mge_setup_multicast(struct mge_softc *sc)
1726 {
1727 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1728 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1729 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1730 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1731 	struct ifnet *ifp = sc->ifp;
1732 	struct ifmultiaddr *ifma;
1733 	uint8_t *mac;
1734 	int i;
1735 
1736 	if (ifp->if_flags & IFF_ALLMULTI) {
1737 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1738 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1739 	} else {
1740 		memset(smt, 0, sizeof(smt));
1741 		memset(omt, 0, sizeof(omt));
1742 
1743 		if_maddr_rlock(ifp);
1744 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1745 			if (ifma->ifma_addr->sa_family != AF_LINK)
1746 				continue;
1747 
1748 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1749 			if (memcmp(mac, special, sizeof(special)) == 0) {
1750 				i = mac[5];
1751 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1752 			} else {
1753 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1754 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1755 			}
1756 		}
1757 		if_maddr_runlock(ifp);
1758 	}
1759 
1760 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1761 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1762 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1763 	}
1764 }
1765 
1766 static void
1767 mge_set_rxic(struct mge_softc *sc)
1768 {
1769 	uint32_t reg;
1770 
1771 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1772 		sc->rx_ic_time = sc->mge_rx_ipg_max;
1773 
1774 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1775 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1776 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1777 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1778 }
1779 
1780 static void
1781 mge_set_txic(struct mge_softc *sc)
1782 {
1783 	uint32_t reg;
1784 
1785 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1786 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1787 
1788 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1789 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1790 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1791 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1792 }
1793 
1794 static int
1795 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1796 {
1797 	struct mge_softc *sc = (struct mge_softc *)arg1;
1798 	uint32_t time;
1799 	int error;
1800 
1801 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1802 	error = sysctl_handle_int(oidp, &time, 0, req);
1803 	if (error != 0)
1804 		return(error);
1805 
1806 	MGE_GLOBAL_LOCK(sc);
1807 	if (arg2 == MGE_IC_RX) {
1808 		sc->rx_ic_time = time;
1809 		mge_set_rxic(sc);
1810 	} else {
1811 		sc->tx_ic_time = time;
1812 		mge_set_txic(sc);
1813 	}
1814 	MGE_GLOBAL_UNLOCK(sc);
1815 
1816 	return(0);
1817 }
1818 
1819 static void
1820 mge_add_sysctls(struct mge_softc *sc)
1821 {
1822 	struct sysctl_ctx_list *ctx;
1823 	struct sysctl_oid_list *children;
1824 	struct sysctl_oid *tree;
1825 
1826 	ctx = device_get_sysctl_ctx(sc->dev);
1827 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1828 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1829 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1830 	children = SYSCTL_CHILDREN(tree);
1831 
1832 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1833 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1834 	    "I", "IC RX time threshold");
1835 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1836 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1837 	    "I", "IC TX time threshold");
1838 }
1839