xref: /freebsd/sys/dev/mge/if_mge.c (revision c0020399a650364d0134f79f3fa319f84064372d)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 
63 #include <sys/sockio.h>
64 #include <sys/bus.h>
65 #include <machine/bus.h>
66 #include <sys/rman.h>
67 #include <machine/resource.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #define	MV_PHY_ADDR_BASE	8
73 
74 #include <dev/mge/if_mgevar.h>
75 #include <arm/mv/mvreg.h>
76 #include <arm/mv/mvvar.h>
77 
78 #include "miibus_if.h"
79 
80 /* PHY registers are in the address space of the first mge unit */
81 static struct mge_softc *sc_mge0 = NULL;
82 
83 static int mge_probe(device_t dev);
84 static int mge_attach(device_t dev);
85 static int mge_detach(device_t dev);
86 static int mge_shutdown(device_t dev);
87 static int mge_suspend(device_t dev);
88 static int mge_resume(device_t dev);
89 
90 static int mge_miibus_readreg(device_t dev, int phy, int reg);
91 static void mge_miibus_writereg(device_t dev, int phy, int reg, int value);
92 
93 static int mge_ifmedia_upd(struct ifnet *ifp);
94 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
95 
96 static void mge_init(void *arg);
97 static void mge_init_locked(void *arg);
98 static void mge_start(struct ifnet *ifp);
99 static void mge_start_locked(struct ifnet *ifp);
100 static void mge_watchdog(struct mge_softc *sc);
101 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
102 
103 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
104 static uint32_t mge_rx_ipg(uint32_t val, int ver);
105 static void mge_ver_params(struct mge_softc *sc);
106 
107 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
108 static void mge_intr_rx(void *arg);
109 static void mge_intr_rx_locked(struct mge_softc *sc, int count);
110 static void mge_intr_tx(void *arg);
111 static void mge_intr_tx_locked(struct mge_softc *sc);
112 static void mge_intr_misc(void *arg);
113 static void mge_intr_sum(void *arg);
114 static void mge_intr_err(void *arg);
115 static void mge_stop(struct mge_softc *sc);
116 static void mge_tick(void *msc);
117 static uint32_t mge_set_port_serial_control(uint32_t media);
118 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
119 static void mge_set_mac_address(struct mge_softc *sc);
120 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
121     uint8_t queue);
122 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
123 static int mge_allocate_dma(struct mge_softc *sc);
124 static int mge_alloc_desc_dma(struct mge_softc *sc,
125     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
126 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
127     struct mbuf **mbufp, bus_addr_t *paddr);
128 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
129 static void mge_free_dma(struct mge_softc *sc);
130 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
131     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
132 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
133     uint32_t status, uint16_t bufsize);
134 static void mge_offload_setup_descriptor(struct mge_softc *sc,
135     struct mge_desc_wrapper *dw);
136 static uint8_t mge_crc8(uint8_t *data, int size);
137 static void mge_setup_multicast(struct mge_softc *sc);
138 static void mge_set_rxic(struct mge_softc *sc);
139 static void mge_set_txic(struct mge_softc *sc);
140 static void mge_add_sysctls(struct mge_softc *sc);
141 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
142 
143 static device_method_t mge_methods[] = {
144 	/* Device interface */
145 	DEVMETHOD(device_probe,		mge_probe),
146 	DEVMETHOD(device_attach,	mge_attach),
147 	DEVMETHOD(device_detach,	mge_detach),
148 	DEVMETHOD(device_shutdown,	mge_shutdown),
149 	DEVMETHOD(device_suspend,	mge_suspend),
150 	DEVMETHOD(device_resume,	mge_resume),
151 	/* MII interface */
152 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
153 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
154 	{ 0, 0 }
155 };
156 
157 static driver_t mge_driver = {
158 	"mge",
159 	mge_methods,
160 	sizeof(struct mge_softc),
161 };
162 
163 static devclass_t mge_devclass;
164 
165 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
166 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
167 MODULE_DEPEND(mge, ether, 1, 1, 1);
168 MODULE_DEPEND(mge, miibus, 1, 1, 1);
169 
170 static struct resource_spec res_spec[] = {
171 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
172 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
173 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
174 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
175 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
176 	{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
177 	{ -1, 0 }
178 };
179 
180 static struct {
181 	driver_intr_t *handler;
182 	char * description;
183 } mge_intrs[MGE_INTR_COUNT] = {
184 	{ mge_intr_rx,	"GbE receive interrupt" },
185 	{ mge_intr_tx,	"GbE transmit interrupt" },
186 	{ mge_intr_misc,"GbE misc interrupt" },
187 	{ mge_intr_sum,	"GbE summary interrupt" },
188 	{ mge_intr_err,	"GbE error interrupt" },
189 };
190 
191 static void
192 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
193 {
194 	uint32_t mac_l, mac_h;
195 
196 	/* XXX use currently programmed MAC address; eventually this info will
197 	 * be provided by the loader */
198 
199 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
200 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
201 
202 	addr[0] = (mac_h & 0xff000000) >> 24;
203 	addr[1] = (mac_h & 0x00ff0000) >> 16;
204 	addr[2] = (mac_h & 0x0000ff00) >> 8;
205 	addr[3] = (mac_h & 0x000000ff);
206 	addr[4] = (mac_l & 0x0000ff00) >> 8;
207 	addr[5] = (mac_l & 0x000000ff);
208 }
209 
210 static uint32_t
211 mge_tfut_ipg(uint32_t val, int ver)
212 {
213 
214 	switch (ver) {
215 	case 1:
216 		return ((val & 0x3fff) << 4);
217 	case 2:
218 	default:
219 		return ((val & 0xffff) << 4);
220 	}
221 }
222 
223 static uint32_t
224 mge_rx_ipg(uint32_t val, int ver)
225 {
226 
227 	switch (ver) {
228 	case 1:
229 		return ((val & 0x3fff) << 8);
230 	case 2:
231 	default:
232 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
233 	}
234 }
235 
236 static void
237 mge_ver_params(struct mge_softc *sc)
238 {
239 	uint32_t d, r;
240 
241 	soc_id(&d, &r);
242 	if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
243 	    d == MV_DEV_MV78100_Z0) {
244 		sc->mge_ver = 2;
245 		sc->mge_mtu = 0x4e8;
246 		sc->mge_tfut_ipg_max = 0xFFFF;
247 		sc->mge_rx_ipg_max = 0xFFFF;
248 		sc->mge_tx_arb_cfg = 0xFC0000FF;
249 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
250 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
251 	} else {
252 		sc->mge_ver = 1;
253 		sc->mge_mtu = 0x458;
254 		sc->mge_tfut_ipg_max = 0x3FFF;
255 		sc->mge_rx_ipg_max = 0x3FFF;
256 		sc->mge_tx_arb_cfg = 0x000000FF;
257 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
258 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
259 	}
260 }
261 
262 static void
263 mge_set_mac_address(struct mge_softc *sc)
264 {
265 	char *if_mac;
266 	uint32_t mac_l, mac_h;
267 
268 	MGE_GLOBAL_LOCK_ASSERT(sc);
269 
270 	if_mac = (char *)IF_LLADDR(sc->ifp);
271 
272 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
273 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
274 	    (if_mac[2] << 8) | (if_mac[3] << 0);
275 
276 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
277 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
278 
279 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
280 }
281 
282 static void
283 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
284 {
285 	uint32_t reg_idx, reg_off, reg_val, i;
286 
287 	last_byte &= 0xf;
288 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
289 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
290 	reg_val = (1 | (queue << 1)) << reg_off;
291 
292 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
293 		if ( i == reg_idx)
294 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
295 		else
296 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
297 	}
298 }
299 
300 static void
301 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
302 {
303 	uint32_t port_config;
304 	uint32_t reg_val, i;
305 
306 	/* Enable or disable promiscuous mode as needed */
307 	if (sc->ifp->if_flags & IFF_PROMISC) {
308 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
309 		port_config |= PORT_CONFIG_UPM;
310 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
311 
312 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
313 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
314 
315 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
316 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
317 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
318 		}
319 
320 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
321 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
322 
323 	} else {
324 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
325 		port_config &= ~PORT_CONFIG_UPM;
326 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
327 
328 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
329 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
330 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
331 		}
332 
333 		mge_set_mac_address(sc);
334 	}
335 }
336 
337 static void
338 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
339 {
340 	u_int32_t *paddr;
341 
342 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
343 	paddr = arg;
344 
345 	*paddr = segs->ds_addr;
346 }
347 
348 static int
349 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
350     bus_addr_t *paddr)
351 {
352 	struct mbuf *new_mbuf;
353 	bus_dma_segment_t seg[1];
354 	int error;
355 	int nsegs;
356 
357 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
358 
359 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
360 	if (new_mbuf == NULL)
361 		return (ENOBUFS);
362 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
363 
364 	if (*mbufp) {
365 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
366 		bus_dmamap_unload(tag, map);
367 	}
368 
369 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
370 	    BUS_DMA_NOWAIT);
371 	KASSERT(nsegs == 1, ("Too many segments returned!"));
372 	if (nsegs != 1 || error)
373 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
374 
375 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
376 
377 	(*mbufp) = new_mbuf;
378 	(*paddr) = seg->ds_addr;
379 	return (0);
380 }
381 
382 static int
383 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
384     uint32_t size, bus_dma_tag_t *buffer_tag)
385 {
386 	struct mge_desc_wrapper *dw;
387 	bus_addr_t desc_paddr;
388 	int i, error;
389 
390 	desc_paddr = 0;
391 	for (i = size - 1; i >= 0; i--) {
392 		dw = &(tab[i]);
393 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
394 		    (void**)&(dw->mge_desc),
395 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
396 		    &(dw->desc_dmap));
397 
398 		if (error) {
399 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
400 			dw->mge_desc = NULL;
401 			return (ENXIO);
402 		}
403 
404 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
405 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
406 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
407 
408 		if (error) {
409 			if_printf(sc->ifp, "can't load descriptor\n");
410 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
411 			    dw->desc_dmap);
412 			dw->mge_desc = NULL;
413 			return (ENXIO);
414 		}
415 
416 		/* Chain descriptors */
417 		dw->mge_desc->next_desc = desc_paddr;
418 		desc_paddr = dw->mge_desc_paddr;
419 	}
420 	tab[size - 1].mge_desc->next_desc = desc_paddr;
421 
422 	/* Allocate a busdma tag for mbufs. */
423 	error = bus_dma_tag_create(NULL,	/* parent */
424 	    8, 0,				/* alignment, boundary */
425 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
426 	    BUS_SPACE_MAXADDR,			/* highaddr */
427 	    NULL, NULL,				/* filtfunc, filtfuncarg */
428 	    MCLBYTES, 1,			/* maxsize, nsegments */
429 	    MCLBYTES, 0,			/* maxsegsz, flags */
430 	    NULL, NULL,				/* lockfunc, lockfuncarg */
431 	    buffer_tag);			/* dmat */
432 	if (error) {
433 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
434 		return (ENXIO);
435 	}
436 
437 	/* Create TX busdma maps */
438 	for (i = 0; i < size; i++) {
439 		dw = &(tab[i]);
440 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
441 		if (error) {
442 			if_printf(sc->ifp, "failed to create map for mbuf\n");
443 			return (ENXIO);
444 		}
445 
446 		dw->buffer = (struct mbuf*)NULL;
447 		dw->mge_desc->buffer = (bus_addr_t)NULL;
448 	}
449 
450 	return (0);
451 }
452 
453 static int
454 mge_allocate_dma(struct mge_softc *sc)
455 {
456 	int error;
457 	struct mge_desc_wrapper *dw;
458 	int num, i;
459 
460 
461 	num = MGE_TX_DESC_NUM + MGE_RX_DESC_NUM;
462 
463 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
464 	error = bus_dma_tag_create(NULL,	/* parent */
465 	    16, 0,				/* alignment, boundary */
466 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
467 	    BUS_SPACE_MAXADDR,			/* highaddr */
468 	    NULL, NULL,				/* filtfunc, filtfuncarg */
469 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
470 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
471 	    NULL, NULL,				/* lockfunc, lockfuncarg */
472 	    &sc->mge_desc_dtag);		/* dmat */
473 
474 
475 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
476 	    &sc->mge_tx_dtag);
477 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
478 	    &sc->mge_rx_dtag);
479 
480 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
481 		dw = &(sc->mge_rx_desc[i]);
482 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
483 		    &dw->mge_desc->buffer);
484 	}
485 
486 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
487 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
488 
489 	return (0);
490 }
491 
492 static void
493 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
494     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
495 {
496 	struct mge_desc_wrapper *dw;
497 	int i;
498 
499 	for (i = 0; i < size; i++) {
500 		/* Free RX mbuf */
501 		dw = &(tab[i]);
502 
503 		if (dw->buffer_dmap) {
504 			if (free_mbufs) {
505 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
506 				    BUS_DMASYNC_POSTREAD);
507 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
508 			}
509 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
510 			if (free_mbufs)
511 				m_freem(dw->buffer);
512 		}
513 		/* Free RX descriptors */
514 		if (dw->desc_dmap) {
515 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
516 			    BUS_DMASYNC_POSTREAD);
517 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
518 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
519 			    dw->desc_dmap);
520 		}
521 	}
522 }
523 
524 static void
525 mge_free_dma(struct mge_softc *sc)
526 {
527 	/* Free desciptors and mbufs */
528 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
529 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
530 
531 	/* Destroy mbuf dma tag */
532 	bus_dma_tag_destroy(sc->mge_tx_dtag);
533 	bus_dma_tag_destroy(sc->mge_rx_dtag);
534 	/* Destroy descriptors tag */
535 	bus_dma_tag_destroy(sc->mge_desc_dtag);
536 }
537 
538 static void
539 mge_reinit_rx(struct mge_softc *sc)
540 {
541 	struct mge_desc_wrapper *dw;
542 	int i;
543 
544 	MGE_RECEIVE_LOCK(sc);
545 
546 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
547 
548 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
549 	    &sc->mge_rx_dtag);
550 
551 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
552 		dw = &(sc->mge_rx_desc[i]);
553 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
554 		&dw->mge_desc->buffer);
555 	}
556 
557 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
558 	sc->rx_desc_curr = 0;
559 
560 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
561 	    sc->rx_desc_start);
562 
563 	/* Enable RX queue */
564 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
565 
566 	MGE_RECEIVE_UNLOCK(sc);
567 }
568 
569 #ifdef DEVICE_POLLING
570 static poll_handler_t mge_poll;
571 
572 static void
573 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
574 {
575 	struct mge_softc *sc = ifp->if_softc;
576 	uint32_t int_cause, int_cause_ext;
577 
578 	MGE_GLOBAL_LOCK(sc);
579 
580 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
581 		MGE_GLOBAL_UNLOCK(sc);
582 		return;
583 	}
584 
585 	if (cmd == POLL_AND_CHECK_STATUS) {
586 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
587 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
588 
589 		/* Check for resource error */
590 		if (int_cause & MGE_PORT_INT_RXERRQ0)
591 			mge_reinit_rx(sc);
592 
593 		if (int_cause || int_cause_ext) {
594 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
595 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
596 		}
597 	}
598 
599 	mge_intr_tx_locked(sc);
600 	mge_intr_rx_locked(sc, count);
601 
602 	MGE_GLOBAL_UNLOCK(sc);
603 }
604 #endif /* DEVICE_POLLING */
605 
606 static int
607 mge_attach(device_t dev)
608 {
609 	struct mge_softc *sc;
610 	struct ifnet *ifp;
611 	uint8_t hwaddr[ETHER_ADDR_LEN];
612 	int i, error ;
613 
614 	sc = device_get_softc(dev);
615 	sc->dev = dev;
616 
617 	if (device_get_unit(dev) == 0)
618 		sc_mge0 = sc;
619 
620 	/* Set chip version-dependent parameters */
621 	mge_ver_params(sc);
622 
623 	/* Initialize mutexes */
624 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
625 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
626 
627 	/* Allocate IO and IRQ resources */
628 	error = bus_alloc_resources(dev, res_spec, sc->res);
629 	if (error) {
630 		device_printf(dev, "could not allocate resources\n");
631 		mge_detach(dev);
632 		return (ENXIO);
633 	}
634 
635 	/* Allocate DMA, buffers, buffer descriptors */
636 	error = mge_allocate_dma(sc);
637 	if (error) {
638 		mge_detach(dev);
639 		return (ENXIO);
640 	}
641 
642 	sc->tx_desc_curr = 0;
643 	sc->rx_desc_curr = 0;
644 	sc->tx_desc_used_idx = 0;
645 	sc->tx_desc_used_count = 0;
646 
647 	/* Configure defaults for interrupts coalescing */
648 	sc->rx_ic_time = 768;
649 	sc->tx_ic_time = 768;
650 	mge_add_sysctls(sc);
651 
652 	/* Allocate network interface */
653 	ifp = sc->ifp = if_alloc(IFT_ETHER);
654 	if (ifp == NULL) {
655 		device_printf(dev, "if_alloc() failed\n");
656 		mge_detach(dev);
657 		return (ENOMEM);
658 	}
659 
660 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
661 	ifp->if_softc = sc;
662 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
663 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
664 	ifp->if_capenable = ifp->if_capabilities;
665 	ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
666 
667 #ifdef DEVICE_POLLING
668 	/* Advertise that polling is supported */
669 	ifp->if_capabilities |= IFCAP_POLLING;
670 #endif
671 
672 	ifp->if_init = mge_init;
673 	ifp->if_start = mge_start;
674 	ifp->if_ioctl = mge_ioctl;
675 
676 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
677 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
678 	IFQ_SET_READY(&ifp->if_snd);
679 
680 	mge_get_mac_address(sc, hwaddr);
681 	ether_ifattach(ifp, hwaddr);
682 	callout_init(&sc->wd_callout, 0);
683 
684 	/* Probe PHY(s) */
685 	error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
686 	if (error) {
687 		device_printf(dev, "MII failed to find PHY\n");
688 		if_free(ifp);
689 		sc->ifp = NULL;
690 		mge_detach(dev);
691 		return (error);
692 	}
693 	sc->mii = device_get_softc(sc->miibus);
694 
695 	/* Attach interrupt handlers */
696 	for (i = 0; i < 2; ++i) {
697 		error = bus_setup_intr(dev, sc->res[1 + i],
698 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
699 		    sc, &sc->ih_cookie[i]);
700 		if (error) {
701 			device_printf(dev, "could not setup %s\n",
702 			    mge_intrs[i].description);
703 			ether_ifdetach(sc->ifp);
704 			return (error);
705 		}
706 	}
707 
708 	return (0);
709 }
710 
711 static int
712 mge_detach(device_t dev)
713 {
714 	struct mge_softc *sc;
715 	int error,i;
716 
717 	sc = device_get_softc(dev);
718 
719 	/* Stop controller and free TX queue */
720 	if (sc->ifp)
721 		mge_shutdown(dev);
722 
723 	/* Wait for stopping ticks */
724         callout_drain(&sc->wd_callout);
725 
726 	/* Stop and release all interrupts */
727 	for (i = 0; i < 2; ++i) {
728 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
729 		if (error)
730 			device_printf(dev, "could not release %s\n",
731 			    mge_intrs[i].description);
732 	}
733 
734 	/* Detach network interface */
735 	if (sc->ifp) {
736 		ether_ifdetach(sc->ifp);
737 		if_free(sc->ifp);
738 	}
739 
740 	/* Free DMA resources */
741 	mge_free_dma(sc);
742 
743 	/* Free IO memory handler */
744 	bus_release_resources(dev, res_spec, sc->res);
745 
746 	/* Destroy mutexes */
747 	mtx_destroy(&sc->receive_lock);
748 	mtx_destroy(&sc->transmit_lock);
749 
750 	return (0);
751 }
752 
753 static void
754 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
755 {
756 	struct mge_softc *sc = ifp->if_softc;
757 	struct mii_data *mii;
758 
759 	MGE_TRANSMIT_LOCK(sc);
760 
761 	mii = sc->mii;
762 	mii_pollstat(mii);
763 
764 	ifmr->ifm_active = mii->mii_media_active;
765 	ifmr->ifm_status = mii->mii_media_status;
766 
767 	MGE_TRANSMIT_UNLOCK(sc);
768 }
769 
770 static uint32_t
771 mge_set_port_serial_control(uint32_t media)
772 {
773 	uint32_t port_config;
774 
775 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
776 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
777 
778 	if (IFM_TYPE(media) == IFM_ETHER) {
779 		switch(IFM_SUBTYPE(media)) {
780 			case IFM_AUTO:
781 				break;
782 			case IFM_1000_T:
783 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
784 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
785 				    PORT_SERIAL_SPEED_AUTONEG);
786 				break;
787 			case IFM_100_TX:
788 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
789 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
790 				    PORT_SERIAL_SPEED_AUTONEG);
791 				break;
792 			case IFM_10_T:
793 				port_config  |= (PORT_SERIAL_AUTONEG |
794 				    PORT_SERIAL_AUTONEG_FC |
795 				    PORT_SERIAL_SPEED_AUTONEG);
796 				break;
797 		}
798 		if (media & IFM_FDX)
799 			port_config |= PORT_SERIAL_FULL_DUPLEX;
800 	}
801 	return (port_config);
802 }
803 
804 static int
805 mge_ifmedia_upd(struct ifnet *ifp)
806 {
807 	struct mge_softc *sc = ifp->if_softc;
808 
809 	if (ifp->if_flags & IFF_UP) {
810 		MGE_GLOBAL_LOCK(sc);
811 
812 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
813 		mii_mediachg(sc->mii);
814 		mge_init_locked(sc);
815 
816 		MGE_GLOBAL_UNLOCK(sc);
817 	}
818 
819 	return (0);
820 }
821 
822 static void
823 mge_init(void *arg)
824 {
825 	struct mge_softc *sc = arg;
826 
827 	MGE_GLOBAL_LOCK(sc);
828 
829 	mge_init_locked(arg);
830 
831 	MGE_GLOBAL_UNLOCK(sc);
832 }
833 
834 static void
835 mge_init_locked(void *arg)
836 {
837 	struct mge_softc *sc = arg;
838 	struct mge_desc_wrapper *dw;
839 	volatile uint32_t reg_val;
840 	int i, count;
841 
842 
843 	MGE_GLOBAL_LOCK_ASSERT(sc);
844 
845 	/* Stop interface */
846 	mge_stop(sc);
847 
848 	/* Disable interrupts */
849 	mge_intrs_ctrl(sc, 0);
850 
851 	/* Set MAC address */
852 	mge_set_mac_address(sc);
853 
854 	/* Setup multicast filters */
855 	mge_setup_multicast(sc);
856 
857 	if (sc->mge_ver == 2) {
858 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
859 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
860 	}
861 
862 	/* Initialize TX queue configuration registers */
863 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
864 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
865 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
866 
867 	/* Clear TX queue configuration registers for unused queues */
868 	for (i = 1; i < 7; i++) {
869 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
870 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
871 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
872 	}
873 
874 	/* Set default MTU */
875 	MGE_WRITE(sc, sc->mge_mtu, 0);
876 
877 	/* Port configuration */
878 	MGE_WRITE(sc, MGE_PORT_CONFIG,
879 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
880 	    PORT_CONFIG_ARO_RXQ(0));
881 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
882 
883 	/* Setup port configuration */
884 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
885 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
886 
887 	/* Setup SDMA configuration */
888 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
889 	    MGE_SDMA_TX_BYTE_SWAP |
890 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
891 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
892 
893 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
894 
895 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
896 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
897 	    sc->rx_desc_start);
898 
899 	/* Reset descriptor indexes */
900 	sc->tx_desc_curr = 0;
901 	sc->rx_desc_curr = 0;
902 	sc->tx_desc_used_idx = 0;
903 	sc->tx_desc_used_count = 0;
904 
905 	/* Enable RX descriptors */
906 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
907 		dw = &sc->mge_rx_desc[i];
908 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
909 		dw->mge_desc->buff_size = MCLBYTES;
910 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
911 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
912 	}
913 
914 	/* Enable RX queue */
915 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
916 
917 	/* Enable port */
918 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
919 	reg_val |= PORT_SERIAL_ENABLE;
920 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
921 	count = 0x100000;
922 	for (;;) {
923 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
924 		if (reg_val & MGE_STATUS_LINKUP)
925 			break;
926 		DELAY(100);
927 		if (--count == 0) {
928 			if_printf(sc->ifp, "Timeout on link-up\n");
929 			break;
930 		}
931 	}
932 
933 	/* Setup interrupts coalescing */
934 	mge_set_rxic(sc);
935 	mge_set_txic(sc);
936 
937 	/* Enable interrupts */
938 #ifdef DEVICE_POLLING
939         /*
940 	 * * ...only if polling is not turned on. Disable interrupts explicitly
941 	 * if polling is enabled.
942 	 */
943 	if (sc->ifp->if_capenable & IFCAP_POLLING)
944 		mge_intrs_ctrl(sc, 0);
945 	else
946 #endif /* DEVICE_POLLING */
947 	mge_intrs_ctrl(sc, 1);
948 
949 	/* Activate network interface */
950 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
951 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
952 	sc->wd_timer = 0;
953 
954 	/* Schedule watchdog timeout */
955 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
956 }
957 
958 static void
959 mge_intr_err(void *arg)
960 {
961 	struct mge_softc *sc = arg;
962 	struct ifnet *ifp;
963 
964 	ifp = sc->ifp;
965 	if_printf(ifp, "%s\n", __FUNCTION__);
966 }
967 
968 static void
969 mge_intr_misc(void *arg)
970 {
971 	struct mge_softc *sc = arg;
972 	struct ifnet *ifp;
973 
974 	ifp = sc->ifp;
975 	if_printf(ifp, "%s\n", __FUNCTION__);
976 }
977 
978 static void
979 mge_intr_rx(void *arg) {
980 	struct mge_softc *sc = arg;
981 	uint32_t int_cause, int_cause_ext;
982 
983 	MGE_RECEIVE_LOCK(sc);
984 
985 #ifdef DEVICE_POLLING
986 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
987 		MGE_RECEIVE_UNLOCK(sc);
988 		return;
989 	}
990 #endif
991 
992 	/* Get interrupt cause */
993 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
994 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
995 
996 	/* Check for resource error */
997 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
998 		mge_reinit_rx(sc);
999 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1000 		    int_cause & ~MGE_PORT_INT_RXERRQ0);
1001 	}
1002 
1003 	int_cause &= MGE_PORT_INT_RXQ0;
1004 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1005 
1006 	if (int_cause || int_cause_ext) {
1007 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1008 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1009 		mge_intr_rx_locked(sc, -1);
1010 	}
1011 
1012 	MGE_RECEIVE_UNLOCK(sc);
1013 }
1014 
1015 
1016 static void
1017 mge_intr_rx_locked(struct mge_softc *sc, int count)
1018 {
1019 	struct ifnet *ifp = sc->ifp;
1020 	uint32_t status;
1021 	uint16_t bufsize;
1022 	struct mge_desc_wrapper* dw;
1023 	struct mbuf *mb;
1024 
1025 	MGE_RECEIVE_LOCK_ASSERT(sc);
1026 
1027 	while (count != 0) {
1028 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1029 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1030 		    BUS_DMASYNC_POSTREAD);
1031 
1032 		/* Get status */
1033 		status = dw->mge_desc->cmd_status;
1034 		bufsize = dw->mge_desc->buff_size;
1035 		if ((status & MGE_DMA_OWNED) != 0)
1036 			break;
1037 
1038 		if (dw->mge_desc->byte_count &&
1039 		    ~(status & MGE_ERR_SUMMARY)) {
1040 
1041 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1042 			    BUS_DMASYNC_POSTREAD);
1043 
1044 			mb = m_devget(dw->buffer->m_data,
1045 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1046 			    0, ifp, NULL);
1047 
1048 			if (mb == NULL)
1049 				/* Give up if no mbufs */
1050 				break;
1051 
1052 			mb->m_len -= 2;
1053 			mb->m_pkthdr.len -= 2;
1054 			mb->m_data += 2;
1055 
1056 			mge_offload_process_frame(ifp, mb, status,
1057 			    bufsize);
1058 
1059 			MGE_RECEIVE_UNLOCK(sc);
1060 			(*ifp->if_input)(ifp, mb);
1061 			MGE_RECEIVE_LOCK(sc);
1062 		}
1063 
1064 		dw->mge_desc->byte_count = 0;
1065 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1066 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1067 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1068 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1069 
1070 		if (count > 0)
1071 			count -= 1;
1072 	}
1073 
1074 	return;
1075 }
1076 
1077 static void
1078 mge_intr_sum(void *arg)
1079 {
1080 	struct mge_softc *sc = arg;
1081 	struct ifnet *ifp;
1082 
1083 	ifp = sc->ifp;
1084 	if_printf(ifp, "%s\n", __FUNCTION__);
1085 }
1086 
1087 static void
1088 mge_intr_tx(void *arg)
1089 {
1090 	struct mge_softc *sc = arg;
1091 	uint32_t int_cause_ext;
1092 
1093 	MGE_TRANSMIT_LOCK(sc);
1094 
1095 #ifdef DEVICE_POLLING
1096 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1097 		MGE_TRANSMIT_UNLOCK(sc);
1098 		return;
1099 	}
1100 #endif
1101 
1102 	/* Ack the interrupt */
1103 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1104 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1105 	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1106 
1107 	mge_intr_tx_locked(sc);
1108 
1109 	MGE_TRANSMIT_UNLOCK(sc);
1110 }
1111 
1112 
1113 static void
1114 mge_intr_tx_locked(struct mge_softc *sc)
1115 {
1116 	struct ifnet *ifp = sc->ifp;
1117 	struct mge_desc_wrapper *dw;
1118 	struct mge_desc *desc;
1119 	uint32_t status;
1120 	int send = 0;
1121 
1122 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1123 
1124 	/* Disable watchdog */
1125 	sc->wd_timer = 0;
1126 
1127 	while (sc->tx_desc_used_count) {
1128 		/* Get the descriptor */
1129 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1130 		desc = dw->mge_desc;
1131 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1132 		    BUS_DMASYNC_POSTREAD);
1133 
1134 		/* Get descriptor status */
1135 		status = desc->cmd_status;
1136 
1137 		if (status & MGE_DMA_OWNED)
1138 			break;
1139 
1140 		sc->tx_desc_used_idx =
1141 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;;
1142 		sc->tx_desc_used_count--;
1143 
1144 		/* Update collision statistics */
1145 		if (status & MGE_ERR_SUMMARY) {
1146 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1147 				ifp->if_collisions++;
1148 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1149 				ifp->if_collisions += 16;
1150 		}
1151 
1152 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1153 		    BUS_DMASYNC_POSTWRITE);
1154 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1155 		m_freem(dw->buffer);
1156 		dw->buffer = (struct mbuf*)NULL;
1157 		send++;
1158 
1159 		ifp->if_opackets++;
1160 	}
1161 
1162 	if (send) {
1163 		/* Now send anything that was pending */
1164 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1165 		mge_start_locked(ifp);
1166 	}
1167 }
1168 
1169 static int
1170 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1171 {
1172 	struct mge_softc *sc = ifp->if_softc;
1173 	struct ifreq *ifr = (struct ifreq *)data;
1174 	int mask, error;
1175 	uint32_t flags;
1176 
1177 	error = 0;
1178 
1179 	switch (command) {
1180 	case SIOCSIFFLAGS:
1181 		MGE_GLOBAL_LOCK(sc);
1182 
1183 		if (ifp->if_flags & IFF_UP) {
1184 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1185 				flags = ifp->if_flags ^ sc->mge_if_flags;
1186 				if (flags & IFF_PROMISC)
1187 					mge_set_prom_mode(sc,
1188 					    MGE_RX_DEFAULT_QUEUE);
1189 
1190 				if (flags & IFF_ALLMULTI)
1191 					mge_setup_multicast(sc);
1192 			} else
1193 				mge_init_locked(sc);
1194 		}
1195 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1196 			mge_stop(sc);
1197 
1198 		sc->mge_if_flags = ifp->if_flags;
1199 		MGE_GLOBAL_UNLOCK(sc);
1200 		break;
1201 	case SIOCADDMULTI:
1202 	case SIOCDELMULTI:
1203 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1204 			MGE_GLOBAL_LOCK(sc);
1205 			mge_setup_multicast(sc);
1206 			MGE_GLOBAL_UNLOCK(sc);
1207 		}
1208 		break;
1209 	case SIOCSIFCAP:
1210 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1211 		if (mask & IFCAP_HWCSUM) {
1212 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1213 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1214 			if (ifp->if_capenable & IFCAP_TXCSUM)
1215 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1216 			else
1217 				ifp->if_hwassist = 0;
1218 		}
1219 #ifdef DEVICE_POLLING
1220 		if (mask & IFCAP_POLLING) {
1221 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1222 				error = ether_poll_register(mge_poll, ifp);
1223 				if (error)
1224 					return(error);
1225 
1226 				MGE_GLOBAL_LOCK(sc);
1227 				mge_intrs_ctrl(sc, 0);
1228 				ifp->if_capenable |= IFCAP_POLLING;
1229 				MGE_GLOBAL_UNLOCK(sc);
1230 			} else {
1231 				error = ether_poll_deregister(ifp);
1232 				MGE_GLOBAL_LOCK(sc);
1233 				mge_intrs_ctrl(sc, 1);
1234 				ifp->if_capenable &= ~IFCAP_POLLING;
1235 				MGE_GLOBAL_UNLOCK(sc);
1236 			}
1237 		}
1238 #endif
1239 		break;
1240 	case SIOCGIFMEDIA: /* fall through */
1241 	case SIOCSIFMEDIA:
1242 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1243 		    && !(ifr->ifr_media & IFM_FDX)) {
1244 			device_printf(sc->dev,
1245 			    "1000baseTX half-duplex unsupported\n");
1246 			return 0;
1247 		}
1248 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1249 		break;
1250 	default:
1251 		error = ether_ioctl(ifp, command, data);
1252 	}
1253 	return (error);
1254 }
1255 
1256 static int
1257 mge_miibus_readreg(device_t dev, int phy, int reg)
1258 {
1259 	uint32_t retries;
1260 
1261 	/*
1262 	 * We assume static PHY address <=> device unit mapping:
1263 	 * PHY Address = MV_PHY_ADDR_BASE + devce unit.
1264 	 * This is true for most Marvell boards.
1265 	 *
1266 	 * Code below grants proper PHY detection on each device
1267 	 * unit.
1268 	 */
1269 
1270 	if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
1271 		return (0);
1272 
1273 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1274 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1275 
1276 	retries = MGE_SMI_READ_RETRIES;
1277 	while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1278 		DELAY(MGE_SMI_READ_DELAY);
1279 
1280 	if (retries == 0)
1281 		device_printf(dev, "Timeout while reading from PHY\n");
1282 
1283 	return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1284 }
1285 
1286 static void
1287 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1288 {
1289 	uint32_t retries;
1290 
1291 	if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
1292 		return;
1293 
1294 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1295 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1296 
1297 	retries = MGE_SMI_WRITE_RETRIES;
1298 	while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1299 		DELAY(MGE_SMI_WRITE_DELAY);
1300 
1301 	if (retries == 0)
1302 		device_printf(dev, "Timeout while writing to PHY\n");
1303 }
1304 
1305 static int
1306 mge_probe(device_t dev)
1307 {
1308 
1309 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1310 	return (BUS_PROBE_DEFAULT);
1311 }
1312 
1313 static int
1314 mge_resume(device_t dev)
1315 {
1316 
1317 	device_printf(dev, "%s\n", __FUNCTION__);
1318 	return (0);
1319 }
1320 
1321 static int
1322 mge_shutdown(device_t dev)
1323 {
1324 	struct mge_softc *sc = device_get_softc(dev);
1325 
1326 	MGE_GLOBAL_LOCK(sc);
1327 
1328 #ifdef DEVICE_POLLING
1329         if (sc->ifp->if_capenable & IFCAP_POLLING)
1330 		ether_poll_deregister(sc->ifp);
1331 #endif
1332 
1333 	mge_stop(sc);
1334 
1335 	MGE_GLOBAL_UNLOCK(sc);
1336 
1337 	return (0);
1338 }
1339 
1340 static int
1341 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1342 {
1343 	struct mge_desc_wrapper *dw = NULL;
1344 	struct ifnet *ifp;
1345 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1346 	bus_dmamap_t mapp;
1347 	int error;
1348 	int seg, nsegs;
1349 	int desc_no;
1350 
1351 	ifp = sc->ifp;
1352 
1353 	/* Check for free descriptors */
1354 	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1355 		/* No free descriptors */
1356 		return (-1);
1357 	}
1358 
1359 	/* Fetch unused map */
1360 	desc_no = sc->tx_desc_curr;
1361 	dw = &sc->mge_tx_desc[desc_no];
1362 	mapp = dw->buffer_dmap;
1363 
1364 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1365 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1366 
1367 	/* Create mapping in DMA memory */
1368 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1369 	    BUS_DMA_NOWAIT);
1370 	if (error != 0 || nsegs != 1 ) {
1371 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1372 		return ((error != 0) ? error : -1);
1373 	}
1374 
1375 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1376 
1377 	/* Everything is ok, now we can send buffers */
1378 	for (seg = 0; seg < nsegs; seg++) {
1379 		dw->mge_desc->byte_count = segs[seg].ds_len;
1380 		dw->mge_desc->buffer = segs[seg].ds_addr;
1381 		dw->buffer = m0;
1382 		dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1383 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1384 		    MGE_DMA_OWNED;
1385 
1386 		if (seg == 0)
1387 			mge_offload_setup_descriptor(sc, dw);
1388 	}
1389 
1390 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1391 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1392 
1393 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1394 	sc->tx_desc_used_count++;
1395 	return (0);
1396 }
1397 
1398 static void
1399 mge_tick(void *msc)
1400 {
1401 	struct mge_softc *sc = msc;
1402 
1403 	/* Check for TX timeout */
1404 	mge_watchdog(sc);
1405 
1406 	mii_tick(sc->mii);
1407 
1408 	/* Check for media type change */
1409 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1410 		mge_ifmedia_upd(sc->ifp);
1411 
1412 	/* Schedule another timeout one second from now */
1413 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1414 }
1415 
1416 static void
1417 mge_watchdog(struct mge_softc *sc)
1418 {
1419 	struct ifnet *ifp;
1420 
1421 	ifp = sc->ifp;
1422 
1423 	MGE_GLOBAL_LOCK(sc);
1424 
1425 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1426 		MGE_GLOBAL_UNLOCK(sc);
1427 		return;
1428 	}
1429 
1430 	ifp->if_oerrors++;
1431 	if_printf(ifp, "watchdog timeout\n");
1432 
1433 	mge_stop(sc);
1434 	mge_init_locked(sc);
1435 
1436 	MGE_GLOBAL_UNLOCK(sc);
1437 }
1438 
1439 static void
1440 mge_start(struct ifnet *ifp)
1441 {
1442 	struct mge_softc *sc = ifp->if_softc;
1443 
1444 	MGE_TRANSMIT_LOCK(sc);
1445 
1446 	mge_start_locked(ifp);
1447 
1448 	MGE_TRANSMIT_UNLOCK(sc);
1449 }
1450 
1451 static void
1452 mge_start_locked(struct ifnet *ifp)
1453 {
1454 	struct mge_softc *sc;
1455 	struct mbuf *m0, *mtmp;
1456 	uint32_t reg_val, queued = 0;
1457 
1458 	sc = ifp->if_softc;
1459 
1460 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1461 
1462 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1463 	    IFF_DRV_RUNNING)
1464 		return;
1465 
1466 	for (;;) {
1467 		/* Get packet from the queue */
1468 		IF_DEQUEUE(&ifp->if_snd, m0);
1469 		if (m0 == NULL)
1470 			break;
1471 
1472 		mtmp = m_defrag(m0, M_DONTWAIT);
1473 		if (mtmp)
1474 			m0 = mtmp;
1475 
1476 		if (mge_encap(sc, m0)) {
1477 			IF_PREPEND(&ifp->if_snd, m0);
1478 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1479 			break;
1480 		}
1481 		queued++;
1482 		BPF_MTAP(ifp, m0);
1483 	}
1484 
1485 	if (queued) {
1486 		/* Enable transmitter and watchdog timer */
1487 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1488 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1489 		sc->wd_timer = 5;
1490 	}
1491 }
1492 
1493 static void
1494 mge_stop(struct mge_softc *sc)
1495 {
1496 	struct ifnet *ifp;
1497 	volatile uint32_t reg_val, status;
1498 	struct mge_desc_wrapper *dw;
1499 	struct mge_desc *desc;
1500 	int count;
1501 
1502 	ifp = sc->ifp;
1503 
1504 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1505 		return;
1506 
1507 	/* Stop tick engine */
1508 	callout_stop(&sc->wd_callout);
1509 
1510 	/* Disable interface */
1511 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1512 	sc->wd_timer = 0;
1513 
1514 	/* Disable interrupts */
1515 	mge_intrs_ctrl(sc, 0);
1516 
1517 	/* Disable Rx and Tx */
1518 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1519 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1520 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1521 
1522 	/* Remove pending data from TX queue */
1523 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1524 	    sc->tx_desc_used_count) {
1525 		/* Get the descriptor */
1526 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1527 		desc = dw->mge_desc;
1528 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1529 		    BUS_DMASYNC_POSTREAD);
1530 
1531 		/* Get descriptor status */
1532 		status = desc->cmd_status;
1533 
1534 		if (status & MGE_DMA_OWNED)
1535 			break;
1536 
1537 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1538 		    MGE_TX_DESC_NUM;
1539 		sc->tx_desc_used_count--;
1540 
1541 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1542 		    BUS_DMASYNC_POSTWRITE);
1543 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1544 
1545 		m_freem(dw->buffer);
1546 		dw->buffer = (struct mbuf*)NULL;
1547 	}
1548 
1549 	/* Wait for end of transmission */
1550 	count = 0x100000;
1551 	while (count--) {
1552 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1553 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1554 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1555 			break;
1556 		DELAY(100);
1557 	}
1558 
1559 	if(!count)
1560 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1561 		    __FUNCTION__);
1562 
1563 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1564 	reg_val &= ~(PORT_SERIAL_ENABLE);
1565 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1566 }
1567 
1568 static int
1569 mge_suspend(device_t dev)
1570 {
1571 
1572 	device_printf(dev, "%s\n", __FUNCTION__);
1573 	return (0);
1574 }
1575 
1576 static void
1577 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1578     uint32_t status, uint16_t bufsize)
1579 {
1580 	int csum_flags = 0;
1581 
1582 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1583 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1584 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1585 
1586 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1587 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1588 		    (status & MGE_RX_L4_CSUM_OK)) {
1589 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1590 			frame->m_pkthdr.csum_data = 0xFFFF;
1591 		}
1592 
1593 		frame->m_pkthdr.csum_flags = csum_flags;
1594 	}
1595 }
1596 
1597 static void
1598 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1599 {
1600 	struct mbuf *m0 = dw->buffer;
1601 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1602 	int csum_flags = m0->m_pkthdr.csum_flags;
1603 	int cmd_status = 0;
1604 	struct ip *ip;
1605 	int ehlen, etype;
1606 
1607 	if (csum_flags) {
1608 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1609 			etype = ntohs(eh->evl_proto);
1610 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1611 			csum_flags |= MGE_TX_VLAN_TAGGED;
1612 		} else {
1613 			etype = ntohs(eh->evl_encap_proto);
1614 			ehlen = ETHER_HDR_LEN;
1615 		}
1616 
1617 		if (etype != ETHERTYPE_IP) {
1618 			if_printf(sc->ifp,
1619 			    "TCP/IP Offload enabled for unsupported "
1620 			    "protocol!\n");
1621 			return;
1622 		}
1623 
1624 		ip = (struct ip *)(m0->m_data + ehlen);
1625 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1626 
1627 		if ((m0->m_flags & M_FRAG) == 0)
1628 			cmd_status |= MGE_TX_NOT_FRAGMENT;
1629 	}
1630 
1631 	if (csum_flags & CSUM_IP)
1632 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1633 
1634 	if (csum_flags & CSUM_TCP)
1635 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1636 
1637 	if (csum_flags & CSUM_UDP)
1638 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1639 
1640 	dw->mge_desc->cmd_status |= cmd_status;
1641 }
1642 
1643 static void
1644 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1645 {
1646 
1647 	if (enable) {
1648 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1649 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1650 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1651 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1652 		    MGE_PORT_INT_EXT_TXBUF0);
1653 	} else {
1654 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1655 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1656 
1657 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1658 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1659 
1660 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1661 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1662 	}
1663 }
1664 
1665 static uint8_t
1666 mge_crc8(uint8_t *data, int size)
1667 {
1668 	uint8_t crc = 0;
1669 	static const uint8_t ct[256] = {
1670 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1671 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1672 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1673 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1674 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1675 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1676 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1677 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1678 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1679 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1680 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1681 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1682 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1683 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1684 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1685 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1686 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1687 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1688 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1689 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1690 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1691 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1692 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1693 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1694 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1695 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1696 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1697 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1698 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1699 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1700 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1701 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1702 	};
1703 
1704 	while(size--)
1705 		crc = ct[crc ^ *(data++)];
1706 
1707 	return(crc);
1708 }
1709 
1710 static void
1711 mge_setup_multicast(struct mge_softc *sc)
1712 {
1713 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1714 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1715 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1716 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1717 	struct ifnet *ifp = sc->ifp;
1718 	struct ifmultiaddr *ifma;
1719 	uint8_t *mac;
1720 	int i;
1721 
1722 	if (ifp->if_flags & IFF_ALLMULTI) {
1723 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1724 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1725 	} else {
1726 		memset(smt, 0, sizeof(smt));
1727 		memset(omt, 0, sizeof(omt));
1728 
1729 		IF_ADDR_LOCK(ifp);
1730 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1731 			if (ifma->ifma_addr->sa_family != AF_LINK)
1732 				continue;
1733 
1734 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1735 			if (memcmp(mac, special, sizeof(special)) == 0) {
1736 				i = mac[5];
1737 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1738 			} else {
1739 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1740 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1741 			}
1742 		}
1743 		IF_ADDR_UNLOCK(ifp);
1744 	}
1745 
1746 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1747 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1748 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1749 	}
1750 }
1751 
1752 static void
1753 mge_set_rxic(struct mge_softc *sc)
1754 {
1755 	uint32_t reg;
1756 
1757 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1758 		sc->rx_ic_time = sc->mge_rx_ipg_max;
1759 
1760 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1761 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1762 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1763 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1764 }
1765 
1766 static void
1767 mge_set_txic(struct mge_softc *sc)
1768 {
1769 	uint32_t reg;
1770 
1771 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1772 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1773 
1774 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1775 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1776 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1777 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1778 }
1779 
1780 static int
1781 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1782 {
1783 	struct mge_softc *sc = (struct mge_softc *)arg1;
1784 	uint32_t time;
1785 	int error;
1786 
1787 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1788 	error = sysctl_handle_int(oidp, &time, 0, req);
1789 	if (error != 0)
1790 		return(error);
1791 
1792 	MGE_GLOBAL_LOCK(sc);
1793 	if (arg2 == MGE_IC_RX) {
1794 		sc->rx_ic_time = time;
1795 		mge_set_rxic(sc);
1796 	} else {
1797 		sc->tx_ic_time = time;
1798 		mge_set_txic(sc);
1799 	}
1800 	MGE_GLOBAL_UNLOCK(sc);
1801 
1802 	return(0);
1803 }
1804 
1805 static void
1806 mge_add_sysctls(struct mge_softc *sc)
1807 {
1808 	struct sysctl_ctx_list *ctx;
1809 	struct sysctl_oid_list *children;
1810 	struct sysctl_oid *tree;
1811 
1812 	ctx = device_get_sysctl_ctx(sc->dev);
1813 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1814 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1815 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1816 	children = SYSCTL_CHILDREN(tree);
1817 
1818 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1819 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1820 	    "I", "IC RX time threshold");
1821 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1822 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1823 	    "I", "IC TX time threshold");
1824 }
1825