xref: /freebsd/sys/dev/mge/if_mge.c (revision b3aaa0cc21c63d388230c7ef2a80abd631ff20d5)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 
63 #include <sys/sockio.h>
64 #include <sys/bus.h>
65 #include <machine/bus.h>
66 #include <sys/rman.h>
67 #include <machine/resource.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #define	MV_PHY_ADDR_BASE	8
73 
74 #include <dev/mge/if_mgevar.h>
75 #include <arm/mv/mvreg.h>
76 #include <arm/mv/mvvar.h>
77 
78 #include "miibus_if.h"
79 
80 /* PHY registers are in the address space of the first mge unit */
81 static struct mge_softc *sc_mge0 = NULL;
82 
83 static int mge_probe(device_t dev);
84 static int mge_attach(device_t dev);
85 static int mge_detach(device_t dev);
86 static int mge_shutdown(device_t dev);
87 static int mge_suspend(device_t dev);
88 static int mge_resume(device_t dev);
89 
90 static int mge_miibus_readreg(device_t dev, int phy, int reg);
91 static void mge_miibus_writereg(device_t dev, int phy, int reg, int value);
92 
93 static int mge_ifmedia_upd(struct ifnet *ifp);
94 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
95 
96 static void mge_init(void *arg);
97 static void mge_init_locked(void *arg);
98 static void mge_start(struct ifnet *ifp);
99 static void mge_start_locked(struct ifnet *ifp);
100 static void mge_watchdog(struct mge_softc *sc);
101 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
102 
103 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
104 static uint32_t mge_rx_ipg(uint32_t val, int ver);
105 static void mge_ver_params(struct mge_softc *sc);
106 
107 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
108 static void mge_intr_rx(void *arg);
109 static void mge_intr_rx_locked(struct mge_softc *sc, int count);
110 static void mge_intr_tx(void *arg);
111 static void mge_intr_tx_locked(struct mge_softc *sc);
112 static void mge_intr_misc(void *arg);
113 static void mge_intr_sum(void *arg);
114 static void mge_intr_err(void *arg);
115 static void mge_stop(struct mge_softc *sc);
116 static void mge_tick(void *msc);
117 static uint32_t mge_set_port_serial_control(uint32_t media);
118 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
119 static void mge_set_mac_address(struct mge_softc *sc);
120 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
121     uint8_t queue);
122 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
123 static int mge_allocate_dma(struct mge_softc *sc);
124 static int mge_alloc_desc_dma(struct mge_softc *sc,
125     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
126 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
127     struct mbuf **mbufp, bus_addr_t *paddr);
128 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
129 static void mge_free_dma(struct mge_softc *sc);
130 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
131     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
132 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
133     uint32_t status, uint16_t bufsize);
134 static void mge_offload_setup_descriptor(struct mge_softc *sc,
135     struct mge_desc_wrapper *dw);
136 static uint8_t mge_crc8(uint8_t *data, int size);
137 static void mge_setup_multicast(struct mge_softc *sc);
138 static void mge_set_rxic(struct mge_softc *sc);
139 static void mge_set_txic(struct mge_softc *sc);
140 static void mge_add_sysctls(struct mge_softc *sc);
141 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
142 
143 static device_method_t mge_methods[] = {
144 	/* Device interface */
145 	DEVMETHOD(device_probe,		mge_probe),
146 	DEVMETHOD(device_attach,	mge_attach),
147 	DEVMETHOD(device_detach,	mge_detach),
148 	DEVMETHOD(device_shutdown,	mge_shutdown),
149 	DEVMETHOD(device_suspend,	mge_suspend),
150 	DEVMETHOD(device_resume,	mge_resume),
151 	/* MII interface */
152 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
153 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
154 	{ 0, 0 }
155 };
156 
157 static driver_t mge_driver = {
158 	"mge",
159 	mge_methods,
160 	sizeof(struct mge_softc),
161 };
162 
163 static devclass_t mge_devclass;
164 
165 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
166 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
167 MODULE_DEPEND(mge, ether, 1, 1, 1);
168 MODULE_DEPEND(mge, miibus, 1, 1, 1);
169 
170 static struct resource_spec res_spec[] = {
171 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
172 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
173 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
174 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
175 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
176 	{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
177 	{ -1, 0 }
178 };
179 
180 static struct {
181 	driver_intr_t *handler;
182 	char * description;
183 } mge_intrs[MGE_INTR_COUNT] = {
184 	{ mge_intr_rx,	"GbE receive interrupt" },
185 	{ mge_intr_tx,	"GbE transmit interrupt" },
186 	{ mge_intr_misc,"GbE misc interrupt" },
187 	{ mge_intr_sum,	"GbE summary interrupt" },
188 	{ mge_intr_err,	"GbE error interrupt" },
189 };
190 
191 static void
192 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
193 {
194 	uint32_t mac_l, mac_h;
195 
196 	/* XXX use currently programmed MAC address; eventually this info will
197 	 * be provided by the loader */
198 
199 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
200 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
201 
202 	addr[0] = (mac_h & 0xff000000) >> 24;
203 	addr[1] = (mac_h & 0x00ff0000) >> 16;
204 	addr[2] = (mac_h & 0x0000ff00) >> 8;
205 	addr[3] = (mac_h & 0x000000ff);
206 	addr[4] = (mac_l & 0x0000ff00) >> 8;
207 	addr[5] = (mac_l & 0x000000ff);
208 }
209 
210 static uint32_t
211 mge_tfut_ipg(uint32_t val, int ver)
212 {
213 
214 	switch (ver) {
215 	case 1:
216 		return ((val & 0x3fff) << 4);
217 	case 2:
218 	default:
219 		return ((val & 0xffff) << 4);
220 	}
221 }
222 
223 static uint32_t
224 mge_rx_ipg(uint32_t val, int ver)
225 {
226 
227 	switch (ver) {
228 	case 1:
229 		return ((val & 0x3fff) << 8);
230 	case 2:
231 	default:
232 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
233 	}
234 }
235 
236 static void
237 mge_ver_params(struct mge_softc *sc)
238 {
239 	uint32_t d, r;
240 
241 	soc_id(&d, &r);
242 	if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100) {
243 		sc->mge_ver = 2;
244 		sc->mge_mtu = 0x4e8;
245 		sc->mge_tfut_ipg_max = 0xFFFF;
246 		sc->mge_rx_ipg_max = 0xFFFF;
247 		sc->mge_tx_arb_cfg = 0xFC0000FF;
248 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
249 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
250 	} else {
251 		sc->mge_ver = 1;
252 		sc->mge_mtu = 0x458;
253 		sc->mge_tfut_ipg_max = 0x3FFF;
254 		sc->mge_rx_ipg_max = 0x3FFF;
255 		sc->mge_tx_arb_cfg = 0x000000FF;
256 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
257 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
258 	}
259 }
260 
261 static void
262 mge_set_mac_address(struct mge_softc *sc)
263 {
264 	char *if_mac;
265 	uint32_t mac_l, mac_h;
266 
267 	MGE_GLOBAL_LOCK_ASSERT(sc);
268 
269 	if_mac = (char *)IF_LLADDR(sc->ifp);
270 
271 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
272 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
273 	    (if_mac[2] << 8) | (if_mac[3] << 0);
274 
275 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
276 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
277 
278 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
279 }
280 
281 static void
282 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
283 {
284 	uint32_t reg_idx, reg_off, reg_val, i;
285 
286 	last_byte &= 0xf;
287 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
288 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
289 	reg_val = (1 | (queue << 1)) << reg_off;
290 
291 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
292 		if ( i == reg_idx)
293 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
294 		else
295 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
296 	}
297 }
298 
299 static void
300 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
301 {
302 	uint32_t port_config;
303 	uint32_t reg_val, i;
304 
305 	/* Enable or disable promiscuous mode as needed */
306 	if (sc->ifp->if_flags & IFF_PROMISC) {
307 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
308 		port_config |= PORT_CONFIG_UPM;
309 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
310 
311 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
312 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
313 
314 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
315 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
316 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
317 		}
318 
319 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
320 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
321 
322 	} else {
323 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
324 		port_config &= ~PORT_CONFIG_UPM;
325 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
326 
327 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
328 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
329 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
330 		}
331 
332 		mge_set_mac_address(sc);
333 	}
334 }
335 
336 static void
337 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
338 {
339 	u_int32_t *paddr;
340 
341 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
342 	paddr = arg;
343 
344 	*paddr = segs->ds_addr;
345 }
346 
347 static int
348 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
349     bus_addr_t *paddr)
350 {
351 	struct mbuf *new_mbuf;
352 	bus_dma_segment_t seg[1];
353 	int error;
354 	int nsegs;
355 
356 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
357 
358 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
359 	if (new_mbuf == NULL)
360 		return (ENOBUFS);
361 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
362 
363 	if (*mbufp) {
364 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
365 		bus_dmamap_unload(tag, map);
366 	}
367 
368 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
369 	    BUS_DMA_NOWAIT);
370 	KASSERT(nsegs == 1, ("Too many segments returned!"));
371 	if (nsegs != 1 || error)
372 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
373 
374 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
375 
376 	(*mbufp) = new_mbuf;
377 	(*paddr) = seg->ds_addr;
378 	return (0);
379 }
380 
381 static int
382 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
383     uint32_t size, bus_dma_tag_t *buffer_tag)
384 {
385 	struct mge_desc_wrapper *dw;
386 	bus_addr_t desc_paddr;
387 	int i, error;
388 
389 	desc_paddr = 0;
390 	for (i = size - 1; i >= 0; i--) {
391 		dw = &(tab[i]);
392 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
393 		    (void**)&(dw->mge_desc),
394 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
395 		    &(dw->desc_dmap));
396 
397 		if (error) {
398 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
399 			dw->mge_desc = NULL;
400 			return (ENXIO);
401 		}
402 
403 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
404 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
405 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
406 
407 		if (error) {
408 			if_printf(sc->ifp, "can't load descriptor\n");
409 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
410 			    dw->desc_dmap);
411 			dw->mge_desc = NULL;
412 			return (ENXIO);
413 		}
414 
415 		/* Chain descriptors */
416 		dw->mge_desc->next_desc = desc_paddr;
417 		desc_paddr = dw->mge_desc_paddr;
418 	}
419 	tab[size - 1].mge_desc->next_desc = desc_paddr;
420 
421 	/* Allocate a busdma tag for mbufs. */
422 	error = bus_dma_tag_create(NULL,	/* parent */
423 	    8, 0,				/* alignment, boundary */
424 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
425 	    BUS_SPACE_MAXADDR,			/* highaddr */
426 	    NULL, NULL,				/* filtfunc, filtfuncarg */
427 	    MCLBYTES, 1,			/* maxsize, nsegments */
428 	    MCLBYTES, 0,			/* maxsegsz, flags */
429 	    NULL, NULL,				/* lockfunc, lockfuncarg */
430 	    buffer_tag);			/* dmat */
431 	if (error) {
432 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
433 		return (ENXIO);
434 	}
435 
436 	/* Create TX busdma maps */
437 	for (i = 0; i < size; i++) {
438 		dw = &(tab[i]);
439 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
440 		if (error) {
441 			if_printf(sc->ifp, "failed to create map for mbuf\n");
442 			return (ENXIO);
443 		}
444 
445 		dw->buffer = (struct mbuf*)NULL;
446 		dw->mge_desc->buffer = (bus_addr_t)NULL;
447 	}
448 
449 	return (0);
450 }
451 
452 static int
453 mge_allocate_dma(struct mge_softc *sc)
454 {
455 	int error;
456 	struct mge_desc_wrapper *dw;
457 	int num, i;
458 
459 
460 	num = MGE_TX_DESC_NUM + MGE_RX_DESC_NUM;
461 
462 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
463 	error = bus_dma_tag_create(NULL,	/* parent */
464 	    16, 0,				/* alignment, boundary */
465 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
466 	    BUS_SPACE_MAXADDR,			/* highaddr */
467 	    NULL, NULL,				/* filtfunc, filtfuncarg */
468 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
469 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
470 	    NULL, NULL,				/* lockfunc, lockfuncarg */
471 	    &sc->mge_desc_dtag);		/* dmat */
472 
473 
474 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
475 	    &sc->mge_tx_dtag);
476 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
477 	    &sc->mge_rx_dtag);
478 
479 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
480 		dw = &(sc->mge_rx_desc[i]);
481 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
482 		    &dw->mge_desc->buffer);
483 	}
484 
485 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
486 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
487 
488 	return (0);
489 }
490 
491 static void
492 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
493     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
494 {
495 	struct mge_desc_wrapper *dw;
496 	int i;
497 
498 	for (i = 0; i < size; i++) {
499 		/* Free RX mbuf */
500 		dw = &(tab[i]);
501 
502 		if (dw->buffer_dmap) {
503 			if (free_mbufs) {
504 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
505 				    BUS_DMASYNC_POSTREAD);
506 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
507 			}
508 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
509 			if (free_mbufs)
510 				m_freem(dw->buffer);
511 		}
512 		/* Free RX descriptors */
513 		if (dw->desc_dmap) {
514 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
515 			    BUS_DMASYNC_POSTREAD);
516 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
517 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
518 			    dw->desc_dmap);
519 		}
520 	}
521 }
522 
523 static void
524 mge_free_dma(struct mge_softc *sc)
525 {
526 	/* Free desciptors and mbufs */
527 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
528 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
529 
530 	/* Destroy mbuf dma tag */
531 	bus_dma_tag_destroy(sc->mge_tx_dtag);
532 	bus_dma_tag_destroy(sc->mge_rx_dtag);
533 	/* Destroy descriptors tag */
534 	bus_dma_tag_destroy(sc->mge_desc_dtag);
535 }
536 
537 static void
538 mge_reinit_rx(struct mge_softc *sc)
539 {
540 	struct mge_desc_wrapper *dw;
541 	int i;
542 
543 	MGE_RECEIVE_LOCK(sc);
544 
545 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
546 
547 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
548 	    &sc->mge_rx_dtag);
549 
550 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
551 		dw = &(sc->mge_rx_desc[i]);
552 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
553 		&dw->mge_desc->buffer);
554 	}
555 
556 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
557 	sc->rx_desc_curr = 0;
558 
559 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
560 	    sc->rx_desc_start);
561 
562 	/* Enable RX queue */
563 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
564 
565 	MGE_RECEIVE_UNLOCK(sc);
566 }
567 
568 #ifdef DEVICE_POLLING
569 static poll_handler_t mge_poll;
570 
571 static void
572 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
573 {
574 	struct mge_softc *sc = ifp->if_softc;
575 	uint32_t int_cause, int_cause_ext;
576 
577 	MGE_GLOBAL_LOCK(sc);
578 
579 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
580 		MGE_GLOBAL_UNLOCK(sc);
581 		return;
582 	}
583 
584 	if (cmd == POLL_AND_CHECK_STATUS) {
585 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
586 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
587 
588 		/* Check for resource error */
589 		if (int_cause & MGE_PORT_INT_RXERRQ0)
590 			mge_reinit_rx(sc);
591 
592 		if (int_cause || int_cause_ext) {
593 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
594 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
595 		}
596 	}
597 
598 	mge_intr_tx_locked(sc);
599 	mge_intr_rx_locked(sc, count);
600 
601 	MGE_GLOBAL_UNLOCK(sc);
602 }
603 #endif /* DEVICE_POLLING */
604 
605 static int
606 mge_attach(device_t dev)
607 {
608 	struct mge_softc *sc;
609 	struct ifnet *ifp;
610 	uint8_t hwaddr[ETHER_ADDR_LEN];
611 	int i, error ;
612 
613 	sc = device_get_softc(dev);
614 	sc->dev = dev;
615 
616 	if (device_get_unit(dev) == 0)
617 		sc_mge0 = sc;
618 
619 	/* Set chip version-dependent parameters */
620 	mge_ver_params(sc);
621 
622 	/* Initialize mutexes */
623 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
624 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
625 
626 	/* Allocate IO and IRQ resources */
627 	error = bus_alloc_resources(dev, res_spec, sc->res);
628 	if (error) {
629 		device_printf(dev, "could not allocate resources\n");
630 		mge_detach(dev);
631 		return (ENXIO);
632 	}
633 
634 	/* Allocate DMA, buffers, buffer descriptors */
635 	error = mge_allocate_dma(sc);
636 	if (error) {
637 		mge_detach(dev);
638 		return (ENXIO);
639 	}
640 
641 	sc->tx_desc_curr = 0;
642 	sc->rx_desc_curr = 0;
643 	sc->tx_desc_used_idx = 0;
644 
645 	/* Configure defaults for interrupts coalescing */
646 	sc->rx_ic_time = 768;
647 	sc->tx_ic_time = 768;
648 	mge_add_sysctls(sc);
649 
650 	/* Allocate network interface */
651 	ifp = sc->ifp = if_alloc(IFT_ETHER);
652 	if (ifp == NULL) {
653 		device_printf(dev, "if_alloc() failed\n");
654 		mge_detach(dev);
655 		return (ENOMEM);
656 	}
657 
658 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
659 	ifp->if_softc = sc;
660 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
661 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
662 	ifp->if_capenable = ifp->if_capabilities;
663 	ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
664 
665 #ifdef DEVICE_POLLING
666 	/* Advertise that polling is supported */
667 	ifp->if_capabilities |= IFCAP_POLLING;
668 #endif
669 
670 	ifp->if_init = mge_init;
671 	ifp->if_start = mge_start;
672 	ifp->if_ioctl = mge_ioctl;
673 
674 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
675 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
676 	IFQ_SET_READY(&ifp->if_snd);
677 
678 	mge_get_mac_address(sc, hwaddr);
679 	ether_ifattach(ifp, hwaddr);
680 	callout_init(&sc->wd_callout, 0);
681 
682 	/* Probe PHY(s) */
683 	error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
684 	if (error) {
685 		device_printf(dev, "MII failed to find PHY\n");
686 		if_free(ifp);
687 		sc->ifp = NULL;
688 		mge_detach(dev);
689 		return (error);
690 	}
691 	sc->mii = device_get_softc(sc->miibus);
692 
693 	/* Attach interrupt handlers */
694 	for (i = 0; i < 2; ++i) {
695 		error = bus_setup_intr(dev, sc->res[1 + i],
696 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
697 		    sc, &sc->ih_cookie[i]);
698 		if (error) {
699 			device_printf(dev, "could not setup %s\n",
700 			    mge_intrs[i].description);
701 			ether_ifdetach(sc->ifp);
702 			return (error);
703 		}
704 	}
705 
706 	return (0);
707 }
708 
709 static int
710 mge_detach(device_t dev)
711 {
712 	struct mge_softc *sc;
713 	int error,i;
714 
715 	sc = device_get_softc(dev);
716 
717 	/* Stop controller and free TX queue */
718 	if (sc->ifp)
719 		mge_shutdown(dev);
720 
721 	/* Wait for stopping ticks */
722         callout_drain(&sc->wd_callout);
723 
724 	/* Stop and release all interrupts */
725 	for (i = 0; i < 2; ++i) {
726 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
727 		if (error)
728 			device_printf(dev, "could not release %s\n",
729 			    mge_intrs[i].description);
730 	}
731 
732 	/* Detach network interface */
733 	if (sc->ifp) {
734 		ether_ifdetach(sc->ifp);
735 		if_free(sc->ifp);
736 	}
737 
738 	/* Free DMA resources */
739 	mge_free_dma(sc);
740 
741 	/* Free IO memory handler */
742 	bus_release_resources(dev, res_spec, sc->res);
743 
744 	/* Destroy mutexes */
745 	mtx_destroy(&sc->receive_lock);
746 	mtx_destroy(&sc->transmit_lock);
747 
748 	return (0);
749 }
750 
751 static void
752 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
753 {
754 	struct mge_softc *sc = ifp->if_softc;
755 	struct mii_data *mii;
756 
757 	MGE_TRANSMIT_LOCK(sc);
758 
759 	mii = sc->mii;
760 	mii_pollstat(mii);
761 
762 	ifmr->ifm_active = mii->mii_media_active;
763 	ifmr->ifm_status = mii->mii_media_status;
764 
765 	MGE_TRANSMIT_UNLOCK(sc);
766 }
767 
768 static uint32_t
769 mge_set_port_serial_control(uint32_t media)
770 {
771 	uint32_t port_config;
772 
773 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
774 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
775 
776 	if (IFM_TYPE(media) == IFM_ETHER) {
777 		switch(IFM_SUBTYPE(media)) {
778 			case IFM_AUTO:
779 				break;
780 			case IFM_1000_T:
781 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
782 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
783 				    PORT_SERIAL_SPEED_AUTONEG);
784 				break;
785 			case IFM_100_TX:
786 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
787 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
788 				    PORT_SERIAL_SPEED_AUTONEG);
789 				break;
790 			case IFM_10_T:
791 				port_config  |= (PORT_SERIAL_AUTONEG |
792 				    PORT_SERIAL_AUTONEG_FC |
793 				    PORT_SERIAL_SPEED_AUTONEG);
794 				break;
795 		}
796 		if (media & IFM_FDX)
797 			port_config |= PORT_SERIAL_FULL_DUPLEX;
798 	}
799 	return (port_config);
800 }
801 
802 static int
803 mge_ifmedia_upd(struct ifnet *ifp)
804 {
805 	struct mge_softc *sc = ifp->if_softc;
806 
807 	if (ifp->if_flags & IFF_UP) {
808 		MGE_GLOBAL_LOCK(sc);
809 
810 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
811 		mii_mediachg(sc->mii);
812 		mge_init_locked(sc);
813 
814 		MGE_GLOBAL_UNLOCK(sc);
815 	}
816 
817 	return (0);
818 }
819 
820 static void
821 mge_init(void *arg)
822 {
823 	struct mge_softc *sc = arg;
824 
825 	MGE_GLOBAL_LOCK(sc);
826 
827 	mge_init_locked(arg);
828 
829 	MGE_GLOBAL_UNLOCK(sc);
830 }
831 
832 static void
833 mge_init_locked(void *arg)
834 {
835 	struct mge_softc *sc = arg;
836 	struct mge_desc_wrapper *dw;
837 	volatile uint32_t reg_val;
838 	int i, count;
839 
840 
841 	MGE_GLOBAL_LOCK_ASSERT(sc);
842 
843 	/* Stop interface */
844 	mge_stop(sc);
845 
846 	/* Disable interrupts */
847 	mge_intrs_ctrl(sc, 0);
848 
849 	/* Set MAC address */
850 	mge_set_mac_address(sc);
851 
852 	/* Setup multicast filters */
853 	mge_setup_multicast(sc);
854 
855 	if (sc->mge_ver == 2) {
856 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
857 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
858 	}
859 
860 	/* Initialize TX queue configuration registers */
861 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
862 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
863 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
864 
865 	/* Clear TX queue configuration registers for unused queues */
866 	for (i = 1; i < 7; i++) {
867 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
868 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
869 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
870 	}
871 
872 	/* Set default MTU */
873 	MGE_WRITE(sc, sc->mge_mtu, 0);
874 
875 	/* Port configuration */
876 	MGE_WRITE(sc, MGE_PORT_CONFIG,
877 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
878 	    PORT_CONFIG_ARO_RXQ(0));
879 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
880 
881 	/* Setup port configuration */
882 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
883 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
884 
885 	/* Setup SDMA configuration */
886 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
887 	    MGE_SDMA_TX_BYTE_SWAP |
888 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
889 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
890 
891 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
892 
893 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
894 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
895 	    sc->rx_desc_start);
896 
897 	/* Reset descriptor indexes */
898 	sc->tx_desc_curr = 0;
899 	sc->rx_desc_curr = 0;
900 	sc->tx_desc_used_idx = 0;
901 
902 	/* Enable RX descriptors */
903 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
904 		dw = &sc->mge_rx_desc[i];
905 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
906 		dw->mge_desc->buff_size = MCLBYTES;
907 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
908 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
909 	}
910 
911 	/* Enable RX queue */
912 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
913 
914 	/* Enable port */
915 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
916 	reg_val |= PORT_SERIAL_ENABLE;
917 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
918 	count = 0x100000;
919 	for (;;) {
920 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
921 		if (reg_val & MGE_STATUS_LINKUP)
922 			break;
923 		DELAY(100);
924 		if (--count == 0) {
925 			if_printf(sc->ifp, "Timeout on link-up\n");
926 			break;
927 		}
928 	}
929 
930 	/* Setup interrupts coalescing */
931 	mge_set_rxic(sc);
932 	mge_set_txic(sc);
933 
934 	/* Enable interrupts */
935 #ifdef DEVICE_POLLING
936         /*
937 	 * * ...only if polling is not turned on. Disable interrupts explicitly
938 	 * if polling is enabled.
939 	 */
940 	if (sc->ifp->if_capenable & IFCAP_POLLING)
941 		mge_intrs_ctrl(sc, 0);
942 	else
943 #endif /* DEVICE_POLLING */
944 	mge_intrs_ctrl(sc, 1);
945 
946 	/* Activate network interface */
947 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
948 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
949 	sc->wd_timer = 0;
950 
951 	/* Schedule watchdog timeout */
952 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
953 }
954 
955 static void
956 mge_intr_err(void *arg)
957 {
958 	struct mge_softc *sc = arg;
959 	struct ifnet *ifp;
960 
961 	ifp = sc->ifp;
962 	if_printf(ifp, "%s\n", __FUNCTION__);
963 }
964 
965 static void
966 mge_intr_misc(void *arg)
967 {
968 	struct mge_softc *sc = arg;
969 	struct ifnet *ifp;
970 
971 	ifp = sc->ifp;
972 	if_printf(ifp, "%s\n", __FUNCTION__);
973 }
974 
975 static void
976 mge_intr_rx(void *arg) {
977 	struct mge_softc *sc = arg;
978 	uint32_t int_cause, int_cause_ext;
979 
980 	MGE_RECEIVE_LOCK(sc);
981 
982 #ifdef DEVICE_POLLING
983 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
984 		MGE_RECEIVE_UNLOCK(sc);
985 		return;
986 	}
987 #endif
988 
989 	/* Get interrupt cause */
990 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
991 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
992 
993 	/* Check for resource error */
994 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
995 		mge_reinit_rx(sc);
996 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
997 		    int_cause & ~MGE_PORT_INT_RXERRQ0);
998 	}
999 
1000 	int_cause &= MGE_PORT_INT_RXQ0;
1001 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1002 
1003 	if (int_cause || int_cause_ext) {
1004 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1005 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1006 		mge_intr_rx_locked(sc, -1);
1007 	}
1008 
1009 	MGE_RECEIVE_UNLOCK(sc);
1010 }
1011 
1012 
1013 static void
1014 mge_intr_rx_locked(struct mge_softc *sc, int count)
1015 {
1016 	struct ifnet *ifp = sc->ifp;
1017 	uint32_t status;
1018 	uint16_t bufsize;
1019 	struct mge_desc_wrapper* dw;
1020 	struct mbuf *mb;
1021 
1022 	MGE_RECEIVE_LOCK_ASSERT(sc);
1023 
1024 	while(count != 0) {
1025 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1026 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1027 		    BUS_DMASYNC_POSTREAD);
1028 
1029 		/* Get status */
1030 		status = dw->mge_desc->cmd_status;
1031 		bufsize = dw->mge_desc->buff_size;
1032 		if ((status & MGE_DMA_OWNED) != 0)
1033 			break;
1034 
1035 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1036 		if (dw->mge_desc->byte_count &&
1037 		    ~(status & MGE_ERR_SUMMARY)) {
1038 
1039 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1040 			    BUS_DMASYNC_POSTREAD);
1041 
1042 			mb = m_devget(dw->buffer->m_data,
1043 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1044 			    0, ifp, NULL);
1045 
1046 			mb->m_len -= 2;
1047 			mb->m_pkthdr.len -= 2;
1048 			mb->m_data += 2;
1049 
1050 			mge_offload_process_frame(ifp, mb, status,
1051 			    bufsize);
1052 
1053 			MGE_RECEIVE_UNLOCK(sc);
1054 			(*ifp->if_input)(ifp, mb);
1055 			MGE_RECEIVE_LOCK(sc);
1056 		}
1057 
1058 		dw->mge_desc->byte_count = 0;
1059 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1060 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1061 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1062 
1063 		if (count > 0)
1064 			count -= 1;
1065 	}
1066 
1067 	return;
1068 }
1069 
1070 static void
1071 mge_intr_sum(void *arg)
1072 {
1073 	struct mge_softc *sc = arg;
1074 	struct ifnet *ifp;
1075 
1076 	ifp = sc->ifp;
1077 	if_printf(ifp, "%s\n", __FUNCTION__);
1078 }
1079 
1080 static void
1081 mge_intr_tx(void *arg)
1082 {
1083 	struct mge_softc *sc = arg;
1084 	uint32_t int_cause_ext;
1085 
1086 	MGE_TRANSMIT_LOCK(sc);
1087 
1088 #ifdef DEVICE_POLLING
1089 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1090 		MGE_TRANSMIT_UNLOCK(sc);
1091 		return;
1092 	}
1093 #endif
1094 
1095 	/* Ack the interrupt */
1096 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1097 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1098 	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1099 
1100 	mge_intr_tx_locked(sc);
1101 
1102 	MGE_TRANSMIT_UNLOCK(sc);
1103 }
1104 
1105 
1106 static void
1107 mge_intr_tx_locked(struct mge_softc *sc)
1108 {
1109 	struct ifnet *ifp = sc->ifp;
1110 	struct mge_desc_wrapper *dw;
1111 	struct mge_desc *desc;
1112 	uint32_t status;
1113 	int send = 0;
1114 
1115 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1116 
1117 	/* Disable watchdog */
1118 	sc->wd_timer = 0;
1119 
1120 	while (sc->tx_desc_used_count) {
1121 		/* Get the descriptor */
1122 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1123 		desc = dw->mge_desc;
1124 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1125 		    BUS_DMASYNC_POSTREAD);
1126 
1127 		/* Get descriptor status */
1128 		status = desc->cmd_status;
1129 
1130 		if (status & MGE_DMA_OWNED)
1131 			break;
1132 
1133 		sc->tx_desc_used_idx =
1134 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;;
1135 		sc->tx_desc_used_count--;
1136 
1137 		/* Update collision statistics */
1138 		if (status & MGE_ERR_SUMMARY) {
1139 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1140 				ifp->if_collisions++;
1141 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1142 				ifp->if_collisions += 16;
1143 		}
1144 
1145 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1146 		    BUS_DMASYNC_POSTWRITE);
1147 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1148 		m_freem(dw->buffer);
1149 		dw->buffer = (struct mbuf*)NULL;
1150 		send++;
1151 
1152 		ifp->if_opackets++;
1153 	}
1154 
1155 	if (send) {
1156 		/* Now send anything that was pending */
1157 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1158 		mge_start_locked(ifp);
1159 	}
1160 }
1161 
1162 static int
1163 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1164 {
1165 	struct mge_softc *sc = ifp->if_softc;
1166 	struct ifreq *ifr = (struct ifreq *)data;
1167 	int mask, error;
1168 	uint32_t flags;
1169 
1170 	error = 0;
1171 
1172 	switch (command) {
1173 	case SIOCSIFFLAGS:
1174 		MGE_GLOBAL_LOCK(sc);
1175 
1176 		if (ifp->if_flags & IFF_UP) {
1177 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1178 				flags = ifp->if_flags ^ sc->mge_if_flags;
1179 				if (flags & IFF_PROMISC)
1180 					mge_set_prom_mode(sc,
1181 					    MGE_RX_DEFAULT_QUEUE);
1182 
1183 				if (flags & IFF_ALLMULTI)
1184 					mge_setup_multicast(sc);
1185 			} else
1186 				mge_init_locked(sc);
1187 		}
1188 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1189 			mge_stop(sc);
1190 
1191 		sc->mge_if_flags = ifp->if_flags;
1192 		MGE_GLOBAL_UNLOCK(sc);
1193 		break;
1194 	case SIOCADDMULTI:
1195 	case SIOCDELMULTI:
1196 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1197 			MGE_GLOBAL_LOCK(sc);
1198 			mge_setup_multicast(sc);
1199 			MGE_GLOBAL_UNLOCK(sc);
1200 		}
1201 		break;
1202 	case SIOCSIFCAP:
1203 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1204 		if (mask & IFCAP_HWCSUM) {
1205 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1206 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1207 			if (ifp->if_capenable & IFCAP_TXCSUM)
1208 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1209 			else
1210 				ifp->if_hwassist = 0;
1211 		}
1212 #ifdef DEVICE_POLLING
1213 		if (mask & IFCAP_POLLING) {
1214 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1215 				error = ether_poll_register(mge_poll, ifp);
1216 				if (error)
1217 					return(error);
1218 
1219 				MGE_GLOBAL_LOCK(sc);
1220 				mge_intrs_ctrl(sc, 0);
1221 				ifp->if_capenable |= IFCAP_POLLING;
1222 				MGE_GLOBAL_UNLOCK(sc);
1223 			} else {
1224 				error = ether_poll_deregister(ifp);
1225 				MGE_GLOBAL_LOCK(sc);
1226 				mge_intrs_ctrl(sc, 1);
1227 				ifp->if_capenable &= ~IFCAP_POLLING;
1228 				MGE_GLOBAL_UNLOCK(sc);
1229 			}
1230 		}
1231 #endif
1232 		break;
1233 	case SIOCGIFMEDIA: /* fall through */
1234 	case SIOCSIFMEDIA:
1235 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1236 		    && !(ifr->ifr_media & IFM_FDX)) {
1237 			device_printf(sc->dev,
1238 			    "1000baseTX half-duplex unsupported\n");
1239 			return 0;
1240 		}
1241 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1242 		break;
1243 	default:
1244 		error = ether_ioctl(ifp, command, data);
1245 	}
1246 	return (error);
1247 }
1248 
1249 static int
1250 mge_miibus_readreg(device_t dev, int phy, int reg)
1251 {
1252 	uint32_t retries;
1253 
1254 	/*
1255 	 * We assume static PHY address <=> device unit mapping:
1256 	 * PHY Address = MV_PHY_ADDR_BASE + devce unit.
1257 	 * This is true for most Marvell boards.
1258 	 *
1259 	 * Code below grants proper PHY detection on each device
1260 	 * unit.
1261 	 */
1262 
1263 	if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
1264 		return (0);
1265 
1266 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1267 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1268 
1269 	retries = MGE_SMI_READ_RETRIES;
1270 	while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1271 		DELAY(MGE_SMI_READ_DELAY);
1272 
1273 	if (retries == 0)
1274 		device_printf(dev, "Timeout while reading from PHY\n");
1275 
1276 	return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1277 }
1278 
1279 static void
1280 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1281 {
1282 	uint32_t retries;
1283 
1284 	if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
1285 		return;
1286 
1287 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1288 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1289 
1290 	retries = MGE_SMI_WRITE_RETRIES;
1291 	while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1292 		DELAY(MGE_SMI_WRITE_DELAY);
1293 
1294 	if (retries == 0)
1295 		device_printf(dev, "Timeout while writing to PHY\n");
1296 }
1297 
1298 static int
1299 mge_probe(device_t dev)
1300 {
1301 
1302 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1303 	return (BUS_PROBE_DEFAULT);
1304 }
1305 
1306 static int
1307 mge_resume(device_t dev)
1308 {
1309 
1310 	device_printf(dev, "%s\n", __FUNCTION__);
1311 	return (0);
1312 }
1313 
1314 static int
1315 mge_shutdown(device_t dev)
1316 {
1317 	struct mge_softc *sc = device_get_softc(dev);
1318 
1319 	MGE_GLOBAL_LOCK(sc);
1320 
1321 #ifdef DEVICE_POLLING
1322         if (sc->ifp->if_capenable & IFCAP_POLLING)
1323 		ether_poll_deregister(sc->ifp);
1324 #endif
1325 
1326 	mge_stop(sc);
1327 
1328 	MGE_GLOBAL_UNLOCK(sc);
1329 
1330 	return (0);
1331 }
1332 
1333 static int
1334 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1335 {
1336 	struct mge_desc_wrapper *dw = NULL;
1337 	struct ifnet *ifp;
1338 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1339 	bus_dmamap_t mapp;
1340 	int error;
1341 	int seg, nsegs;
1342 	int desc_no;
1343 
1344 	ifp = sc->ifp;
1345 
1346 	/* Check for free descriptors */
1347 	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1348 		/* No free descriptors */
1349 		return (-1);
1350 	}
1351 
1352 	/* Fetch unused map */
1353 	desc_no = sc->tx_desc_curr;
1354 	dw = &sc->mge_tx_desc[desc_no];
1355 	mapp = dw->buffer_dmap;
1356 
1357 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1358 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1359 
1360 	/* Create mapping in DMA memory */
1361 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1362 	    BUS_DMA_NOWAIT);
1363 	if (error != 0 || nsegs != 1 ) {
1364 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1365 		return ((error != 0) ? error : -1);
1366 	}
1367 
1368 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1369 
1370 	/* Everything is ok, now we can send buffers */
1371 	for (seg = 0; seg < nsegs; seg++) {
1372 		dw->mge_desc->byte_count = segs[seg].ds_len;
1373 		dw->mge_desc->buffer = segs[seg].ds_addr;
1374 		dw->buffer = m0;
1375 		dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1376 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1377 		    MGE_DMA_OWNED;
1378 
1379 		if (seg == 0)
1380 			mge_offload_setup_descriptor(sc, dw);
1381 	}
1382 
1383 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1384 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1385 
1386 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1387 	sc->tx_desc_used_count++;
1388 	return (0);
1389 }
1390 
1391 static void
1392 mge_tick(void *msc)
1393 {
1394 	struct mge_softc *sc = msc;
1395 
1396 	/* Check for TX timeout */
1397 	mge_watchdog(sc);
1398 
1399 	mii_tick(sc->mii);
1400 
1401 	/* Check for media type change */
1402 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1403 		mge_ifmedia_upd(sc->ifp);
1404 
1405 	/* Schedule another timeout one second from now */
1406 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1407 }
1408 
1409 static void
1410 mge_watchdog(struct mge_softc *sc)
1411 {
1412 	struct ifnet *ifp;
1413 
1414 	ifp = sc->ifp;
1415 
1416 	MGE_GLOBAL_LOCK(sc);
1417 
1418 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1419 		MGE_GLOBAL_UNLOCK(sc);
1420 		return;
1421 	}
1422 
1423 	ifp->if_oerrors++;
1424 	if_printf(ifp, "watchdog timeout\n");
1425 
1426 	mge_stop(sc);
1427 	mge_init_locked(sc);
1428 
1429 	MGE_GLOBAL_UNLOCK(sc);
1430 }
1431 
1432 static void
1433 mge_start(struct ifnet *ifp)
1434 {
1435 	struct mge_softc *sc = ifp->if_softc;
1436 
1437 	MGE_TRANSMIT_LOCK(sc);
1438 
1439 	mge_start_locked(ifp);
1440 
1441 	MGE_TRANSMIT_UNLOCK(sc);
1442 }
1443 
1444 static void
1445 mge_start_locked(struct ifnet *ifp)
1446 {
1447 	struct mge_softc *sc;
1448 	struct mbuf *m0, *mtmp;
1449 	uint32_t reg_val, queued = 0;
1450 
1451 	sc = ifp->if_softc;
1452 
1453 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1454 
1455 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1456 	    IFF_DRV_RUNNING)
1457 		return;
1458 
1459 	for (;;) {
1460 		/* Get packet from the queue */
1461 		IF_DEQUEUE(&ifp->if_snd, m0);
1462 		if (m0 == NULL)
1463 			break;
1464 
1465 		mtmp = m_defrag(m0, M_DONTWAIT);
1466 		if (mtmp)
1467 			m0 = mtmp;
1468 
1469 		if (mge_encap(sc, m0)) {
1470 			IF_PREPEND(&ifp->if_snd, m0);
1471 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1472 			break;
1473 		}
1474 		queued++;
1475 		BPF_MTAP(ifp, m0);
1476 	}
1477 
1478 	if (queued) {
1479 		/* Enable transmitter and watchdog timer */
1480 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1481 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1482 		sc->wd_timer = 5;
1483 	}
1484 }
1485 
1486 static void
1487 mge_stop(struct mge_softc *sc)
1488 {
1489 	struct ifnet *ifp;
1490 	volatile uint32_t reg_val, status;
1491 	struct mge_desc_wrapper *dw;
1492 	struct mge_desc *desc;
1493 	int count;
1494 
1495 	ifp = sc->ifp;
1496 
1497 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1498 		return;
1499 
1500 	/* Stop tick engine */
1501 	callout_stop(&sc->wd_callout);
1502 
1503 	/* Disable interface */
1504 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1505 	sc->wd_timer = 0;
1506 
1507 	/* Disable interrupts */
1508 	mge_intrs_ctrl(sc, 0);
1509 
1510 	/* Disable Rx and Tx */
1511 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1512 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1513 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1514 
1515 	/* Remove pending data from TX queue */
1516 	while (sc->tx_desc_used_idx < sc->tx_desc_curr) {
1517 		/* Get the descriptor */
1518 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1519 		desc = dw->mge_desc;
1520 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1521 		    BUS_DMASYNC_POSTREAD);
1522 
1523 		/* Get descriptor status */
1524 		status = desc->cmd_status;
1525 
1526 		if (status & MGE_DMA_OWNED)
1527 			break;
1528 
1529 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1530 		    MGE_TX_DESC_NUM;
1531 
1532 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1533 		    BUS_DMASYNC_POSTWRITE);
1534 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1535 
1536 		m_freem(dw->buffer);
1537 		dw->buffer = (struct mbuf*)NULL;
1538 	}
1539 
1540 	/* Wait for end of transmission */
1541 	count = 0x100000;
1542 	while (count--) {
1543 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1544 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1545 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1546 			break;
1547 		DELAY(100);
1548 	}
1549 
1550 	if(!count)
1551 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1552 		    __FUNCTION__);
1553 
1554 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1555 	reg_val &= ~(PORT_SERIAL_ENABLE);
1556 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1557 }
1558 
1559 static int
1560 mge_suspend(device_t dev)
1561 {
1562 
1563 	device_printf(dev, "%s\n", __FUNCTION__);
1564 	return (0);
1565 }
1566 
1567 static void
1568 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1569     uint32_t status, uint16_t bufsize)
1570 {
1571 	int csum_flags = 0;
1572 
1573 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1574 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1575 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1576 
1577 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1578 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1579 		    (status & MGE_RX_L4_CSUM_OK)) {
1580 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1581 			frame->m_pkthdr.csum_data = 0xFFFF;
1582 		}
1583 
1584 		frame->m_pkthdr.csum_flags = csum_flags;
1585 	}
1586 }
1587 
1588 static void
1589 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1590 {
1591 	struct mbuf *m0 = dw->buffer;
1592 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1593 	int csum_flags = m0->m_pkthdr.csum_flags;
1594 	int cmd_status = 0;
1595 	struct ip *ip;
1596 	int ehlen, etype;
1597 
1598 	if (csum_flags) {
1599 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1600 			etype = ntohs(eh->evl_proto);
1601 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1602 			csum_flags |= MGE_TX_VLAN_TAGGED;
1603 		} else {
1604 			etype = ntohs(eh->evl_encap_proto);
1605 			ehlen = ETHER_HDR_LEN;
1606 		}
1607 
1608 		if (etype != ETHERTYPE_IP) {
1609 			if_printf(sc->ifp,
1610 			    "TCP/IP Offload enabled for unsupported "
1611 			    "protocol!\n");
1612 			return;
1613 		}
1614 
1615 		ip = (struct ip *)(m0->m_data + ehlen);
1616 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1617 
1618 		if ((m0->m_flags & M_FRAG) == 0)
1619 			cmd_status |= MGE_TX_NOT_FRAGMENT;
1620 	}
1621 
1622 	if (csum_flags & CSUM_IP)
1623 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1624 
1625 	if (csum_flags & CSUM_TCP)
1626 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1627 
1628 	if (csum_flags & CSUM_UDP)
1629 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1630 
1631 	dw->mge_desc->cmd_status |= cmd_status;
1632 }
1633 
1634 static void
1635 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1636 {
1637 
1638 	if (enable) {
1639 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1640 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1641 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1642 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1643 		    MGE_PORT_INT_EXT_TXBUF0);
1644 	} else {
1645 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1646 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1647 
1648 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1649 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1650 
1651 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1652 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1653 	}
1654 }
1655 
1656 static uint8_t
1657 mge_crc8(uint8_t *data, int size)
1658 {
1659 	uint8_t crc = 0;
1660 	static const uint8_t ct[256] = {
1661 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1662 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1663 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1664 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1665 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1666 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1667 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1668 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1669 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1670 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1671 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1672 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1673 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1674 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1675 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1676 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1677 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1678 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1679 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1680 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1681 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1682 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1683 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1684 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1685 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1686 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1687 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1688 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1689 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1690 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1691 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1692 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1693 	};
1694 
1695 	while(size--)
1696 		crc = ct[crc ^ *(data++)];
1697 
1698 	return(crc);
1699 }
1700 
1701 static void
1702 mge_setup_multicast(struct mge_softc *sc)
1703 {
1704 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1705 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1706 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1707 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1708 	struct ifnet *ifp = sc->ifp;
1709 	struct ifmultiaddr *ifma;
1710 	uint8_t *mac;
1711 	int i;
1712 
1713 	if (ifp->if_flags & IFF_ALLMULTI) {
1714 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1715 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1716 	} else {
1717 		memset(smt, 0, sizeof(smt));
1718 		memset(omt, 0, sizeof(omt));
1719 
1720 		IF_ADDR_LOCK(ifp);
1721 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1722 			if (ifma->ifma_addr->sa_family != AF_LINK)
1723 				continue;
1724 
1725 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1726 			if (memcmp(mac, special, sizeof(special)) == 0) {
1727 				i = mac[5];
1728 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1729 			} else {
1730 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1731 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1732 			}
1733 		}
1734 		IF_ADDR_UNLOCK(ifp);
1735 	}
1736 
1737 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1738 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1739 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1740 	}
1741 }
1742 
1743 static void
1744 mge_set_rxic(struct mge_softc *sc)
1745 {
1746 	uint32_t reg;
1747 
1748 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1749 		sc->rx_ic_time = sc->mge_rx_ipg_max;
1750 
1751 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1752 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1753 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1754 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1755 }
1756 
1757 static void
1758 mge_set_txic(struct mge_softc *sc)
1759 {
1760 	uint32_t reg;
1761 
1762 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1763 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1764 
1765 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1766 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1767 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1768 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1769 }
1770 
1771 static int
1772 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1773 {
1774 	struct mge_softc *sc = (struct mge_softc *)arg1;
1775 	uint32_t time;
1776 	int error;
1777 
1778 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1779 	error = sysctl_handle_int(oidp, &time, 0, req);
1780 	if (error != 0)
1781 		return(error);
1782 
1783 	MGE_GLOBAL_LOCK(sc);
1784 	if (arg2 == MGE_IC_RX) {
1785 		sc->rx_ic_time = time;
1786 		mge_set_rxic(sc);
1787 	} else {
1788 		sc->tx_ic_time = time;
1789 		mge_set_txic(sc);
1790 	}
1791 	MGE_GLOBAL_UNLOCK(sc);
1792 
1793 	return(0);
1794 }
1795 
1796 static void
1797 mge_add_sysctls(struct mge_softc *sc)
1798 {
1799 	struct sysctl_ctx_list *ctx;
1800 	struct sysctl_oid_list *children;
1801 	struct sysctl_oid *tree;
1802 
1803 	ctx = device_get_sysctl_ctx(sc->dev);
1804 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1805 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1806 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1807 	children = SYSCTL_CHILDREN(tree);
1808 
1809 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1810 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1811 	    "I", "IC RX time threshold");
1812 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1813 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1814 	    "I", "IC TX time threshold");
1815 }
1816