xref: /freebsd/sys/dev/mge/if_mge.c (revision 830940567b49bb0c08dfaed40418999e76616909)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 
63 #include <sys/sockio.h>
64 #include <sys/bus.h>
65 #include <machine/bus.h>
66 #include <sys/rman.h>
67 #include <machine/resource.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #define	MV_PHY_ADDR_BASE	8
73 
74 #include <dev/mge/if_mgevar.h>
75 #include <arm/mv/mvreg.h>
76 #include <arm/mv/mvvar.h>
77 
78 #include "miibus_if.h"
79 
80 /* PHY registers are in the address space of the first mge unit */
81 static struct mge_softc *sc_mge0 = NULL;
82 
83 static int mge_probe(device_t dev);
84 static int mge_attach(device_t dev);
85 static int mge_detach(device_t dev);
86 static int mge_shutdown(device_t dev);
87 static int mge_suspend(device_t dev);
88 static int mge_resume(device_t dev);
89 
90 static int mge_miibus_readreg(device_t dev, int phy, int reg);
91 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
92 
93 static int mge_ifmedia_upd(struct ifnet *ifp);
94 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
95 
96 static void mge_init(void *arg);
97 static void mge_init_locked(void *arg);
98 static void mge_start(struct ifnet *ifp);
99 static void mge_start_locked(struct ifnet *ifp);
100 static void mge_watchdog(struct mge_softc *sc);
101 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
102 
103 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
104 static uint32_t mge_rx_ipg(uint32_t val, int ver);
105 static void mge_ver_params(struct mge_softc *sc);
106 
107 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
108 static void mge_intr_rx(void *arg);
109 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
110 static void mge_intr_tx(void *arg);
111 static void mge_intr_tx_locked(struct mge_softc *sc);
112 static void mge_intr_misc(void *arg);
113 static void mge_intr_sum(void *arg);
114 static void mge_intr_err(void *arg);
115 static void mge_stop(struct mge_softc *sc);
116 static void mge_tick(void *msc);
117 static uint32_t mge_set_port_serial_control(uint32_t media);
118 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
119 static void mge_set_mac_address(struct mge_softc *sc);
120 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
121     uint8_t queue);
122 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
123 static int mge_allocate_dma(struct mge_softc *sc);
124 static int mge_alloc_desc_dma(struct mge_softc *sc,
125     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
126 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
127     struct mbuf **mbufp, bus_addr_t *paddr);
128 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
129 static void mge_free_dma(struct mge_softc *sc);
130 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
131     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
132 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
133     uint32_t status, uint16_t bufsize);
134 static void mge_offload_setup_descriptor(struct mge_softc *sc,
135     struct mge_desc_wrapper *dw);
136 static uint8_t mge_crc8(uint8_t *data, int size);
137 static void mge_setup_multicast(struct mge_softc *sc);
138 static void mge_set_rxic(struct mge_softc *sc);
139 static void mge_set_txic(struct mge_softc *sc);
140 static void mge_add_sysctls(struct mge_softc *sc);
141 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
142 
143 static device_method_t mge_methods[] = {
144 	/* Device interface */
145 	DEVMETHOD(device_probe,		mge_probe),
146 	DEVMETHOD(device_attach,	mge_attach),
147 	DEVMETHOD(device_detach,	mge_detach),
148 	DEVMETHOD(device_shutdown,	mge_shutdown),
149 	DEVMETHOD(device_suspend,	mge_suspend),
150 	DEVMETHOD(device_resume,	mge_resume),
151 	/* MII interface */
152 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
153 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
154 	{ 0, 0 }
155 };
156 
157 static driver_t mge_driver = {
158 	"mge",
159 	mge_methods,
160 	sizeof(struct mge_softc),
161 };
162 
163 static devclass_t mge_devclass;
164 
165 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
166 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
167 MODULE_DEPEND(mge, ether, 1, 1, 1);
168 MODULE_DEPEND(mge, miibus, 1, 1, 1);
169 
170 static struct resource_spec res_spec[] = {
171 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
172 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
173 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
174 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
175 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
176 	{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
177 	{ -1, 0 }
178 };
179 
180 static struct {
181 	driver_intr_t *handler;
182 	char * description;
183 } mge_intrs[MGE_INTR_COUNT] = {
184 	{ mge_intr_rx,	"GbE receive interrupt" },
185 	{ mge_intr_tx,	"GbE transmit interrupt" },
186 	{ mge_intr_misc,"GbE misc interrupt" },
187 	{ mge_intr_sum,	"GbE summary interrupt" },
188 	{ mge_intr_err,	"GbE error interrupt" },
189 };
190 
191 static void
192 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
193 {
194 	uint32_t mac_l, mac_h;
195 
196 	/* XXX use currently programmed MAC address; eventually this info will
197 	 * be provided by the loader */
198 
199 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
200 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
201 
202 	addr[0] = (mac_h & 0xff000000) >> 24;
203 	addr[1] = (mac_h & 0x00ff0000) >> 16;
204 	addr[2] = (mac_h & 0x0000ff00) >> 8;
205 	addr[3] = (mac_h & 0x000000ff);
206 	addr[4] = (mac_l & 0x0000ff00) >> 8;
207 	addr[5] = (mac_l & 0x000000ff);
208 }
209 
210 static uint32_t
211 mge_tfut_ipg(uint32_t val, int ver)
212 {
213 
214 	switch (ver) {
215 	case 1:
216 		return ((val & 0x3fff) << 4);
217 	case 2:
218 	default:
219 		return ((val & 0xffff) << 4);
220 	}
221 }
222 
223 static uint32_t
224 mge_rx_ipg(uint32_t val, int ver)
225 {
226 
227 	switch (ver) {
228 	case 1:
229 		return ((val & 0x3fff) << 8);
230 	case 2:
231 	default:
232 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
233 	}
234 }
235 
236 static void
237 mge_ver_params(struct mge_softc *sc)
238 {
239 	uint32_t d, r;
240 
241 	soc_id(&d, &r);
242 	if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
243 	    d == MV_DEV_MV78100_Z0) {
244 		sc->mge_ver = 2;
245 		sc->mge_mtu = 0x4e8;
246 		sc->mge_tfut_ipg_max = 0xFFFF;
247 		sc->mge_rx_ipg_max = 0xFFFF;
248 		sc->mge_tx_arb_cfg = 0xFC0000FF;
249 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
250 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
251 	} else {
252 		sc->mge_ver = 1;
253 		sc->mge_mtu = 0x458;
254 		sc->mge_tfut_ipg_max = 0x3FFF;
255 		sc->mge_rx_ipg_max = 0x3FFF;
256 		sc->mge_tx_arb_cfg = 0x000000FF;
257 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
258 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
259 	}
260 }
261 
262 static void
263 mge_set_mac_address(struct mge_softc *sc)
264 {
265 	char *if_mac;
266 	uint32_t mac_l, mac_h;
267 
268 	MGE_GLOBAL_LOCK_ASSERT(sc);
269 
270 	if_mac = (char *)IF_LLADDR(sc->ifp);
271 
272 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
273 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
274 	    (if_mac[2] << 8) | (if_mac[3] << 0);
275 
276 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
277 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
278 
279 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
280 }
281 
282 static void
283 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
284 {
285 	uint32_t reg_idx, reg_off, reg_val, i;
286 
287 	last_byte &= 0xf;
288 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
289 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
290 	reg_val = (1 | (queue << 1)) << reg_off;
291 
292 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
293 		if ( i == reg_idx)
294 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
295 		else
296 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
297 	}
298 }
299 
300 static void
301 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
302 {
303 	uint32_t port_config;
304 	uint32_t reg_val, i;
305 
306 	/* Enable or disable promiscuous mode as needed */
307 	if (sc->ifp->if_flags & IFF_PROMISC) {
308 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
309 		port_config |= PORT_CONFIG_UPM;
310 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
311 
312 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
313 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
314 
315 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
316 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
317 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
318 		}
319 
320 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
321 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
322 
323 	} else {
324 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
325 		port_config &= ~PORT_CONFIG_UPM;
326 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
327 
328 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
329 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
330 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
331 		}
332 
333 		mge_set_mac_address(sc);
334 	}
335 }
336 
337 static void
338 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
339 {
340 	u_int32_t *paddr;
341 
342 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
343 	paddr = arg;
344 
345 	*paddr = segs->ds_addr;
346 }
347 
348 static int
349 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
350     bus_addr_t *paddr)
351 {
352 	struct mbuf *new_mbuf;
353 	bus_dma_segment_t seg[1];
354 	int error;
355 	int nsegs;
356 
357 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
358 
359 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
360 	if (new_mbuf == NULL)
361 		return (ENOBUFS);
362 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
363 
364 	if (*mbufp) {
365 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
366 		bus_dmamap_unload(tag, map);
367 	}
368 
369 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
370 	    BUS_DMA_NOWAIT);
371 	KASSERT(nsegs == 1, ("Too many segments returned!"));
372 	if (nsegs != 1 || error)
373 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
374 
375 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
376 
377 	(*mbufp) = new_mbuf;
378 	(*paddr) = seg->ds_addr;
379 	return (0);
380 }
381 
382 static int
383 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
384     uint32_t size, bus_dma_tag_t *buffer_tag)
385 {
386 	struct mge_desc_wrapper *dw;
387 	bus_addr_t desc_paddr;
388 	int i, error;
389 
390 	desc_paddr = 0;
391 	for (i = size - 1; i >= 0; i--) {
392 		dw = &(tab[i]);
393 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
394 		    (void**)&(dw->mge_desc),
395 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
396 		    &(dw->desc_dmap));
397 
398 		if (error) {
399 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
400 			dw->mge_desc = NULL;
401 			return (ENXIO);
402 		}
403 
404 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
405 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
406 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
407 
408 		if (error) {
409 			if_printf(sc->ifp, "can't load descriptor\n");
410 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
411 			    dw->desc_dmap);
412 			dw->mge_desc = NULL;
413 			return (ENXIO);
414 		}
415 
416 		/* Chain descriptors */
417 		dw->mge_desc->next_desc = desc_paddr;
418 		desc_paddr = dw->mge_desc_paddr;
419 	}
420 	tab[size - 1].mge_desc->next_desc = desc_paddr;
421 
422 	/* Allocate a busdma tag for mbufs. */
423 	error = bus_dma_tag_create(NULL,	/* parent */
424 	    8, 0,				/* alignment, boundary */
425 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
426 	    BUS_SPACE_MAXADDR,			/* highaddr */
427 	    NULL, NULL,				/* filtfunc, filtfuncarg */
428 	    MCLBYTES, 1,			/* maxsize, nsegments */
429 	    MCLBYTES, 0,			/* maxsegsz, flags */
430 	    NULL, NULL,				/* lockfunc, lockfuncarg */
431 	    buffer_tag);			/* dmat */
432 	if (error) {
433 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
434 		return (ENXIO);
435 	}
436 
437 	/* Create TX busdma maps */
438 	for (i = 0; i < size; i++) {
439 		dw = &(tab[i]);
440 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
441 		if (error) {
442 			if_printf(sc->ifp, "failed to create map for mbuf\n");
443 			return (ENXIO);
444 		}
445 
446 		dw->buffer = (struct mbuf*)NULL;
447 		dw->mge_desc->buffer = (bus_addr_t)NULL;
448 	}
449 
450 	return (0);
451 }
452 
453 static int
454 mge_allocate_dma(struct mge_softc *sc)
455 {
456 	int error;
457 	struct mge_desc_wrapper *dw;
458 	int num, i;
459 
460 
461 	num = MGE_TX_DESC_NUM + MGE_RX_DESC_NUM;
462 
463 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
464 	error = bus_dma_tag_create(NULL,	/* parent */
465 	    16, 0,				/* alignment, boundary */
466 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
467 	    BUS_SPACE_MAXADDR,			/* highaddr */
468 	    NULL, NULL,				/* filtfunc, filtfuncarg */
469 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
470 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
471 	    NULL, NULL,				/* lockfunc, lockfuncarg */
472 	    &sc->mge_desc_dtag);		/* dmat */
473 
474 
475 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
476 	    &sc->mge_tx_dtag);
477 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
478 	    &sc->mge_rx_dtag);
479 
480 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
481 		dw = &(sc->mge_rx_desc[i]);
482 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
483 		    &dw->mge_desc->buffer);
484 	}
485 
486 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
487 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
488 
489 	return (0);
490 }
491 
492 static void
493 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
494     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
495 {
496 	struct mge_desc_wrapper *dw;
497 	int i;
498 
499 	for (i = 0; i < size; i++) {
500 		/* Free RX mbuf */
501 		dw = &(tab[i]);
502 
503 		if (dw->buffer_dmap) {
504 			if (free_mbufs) {
505 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
506 				    BUS_DMASYNC_POSTREAD);
507 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
508 			}
509 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
510 			if (free_mbufs)
511 				m_freem(dw->buffer);
512 		}
513 		/* Free RX descriptors */
514 		if (dw->desc_dmap) {
515 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
516 			    BUS_DMASYNC_POSTREAD);
517 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
518 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
519 			    dw->desc_dmap);
520 		}
521 	}
522 }
523 
524 static void
525 mge_free_dma(struct mge_softc *sc)
526 {
527 	/* Free desciptors and mbufs */
528 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
529 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
530 
531 	/* Destroy mbuf dma tag */
532 	bus_dma_tag_destroy(sc->mge_tx_dtag);
533 	bus_dma_tag_destroy(sc->mge_rx_dtag);
534 	/* Destroy descriptors tag */
535 	bus_dma_tag_destroy(sc->mge_desc_dtag);
536 }
537 
538 static void
539 mge_reinit_rx(struct mge_softc *sc)
540 {
541 	struct mge_desc_wrapper *dw;
542 	int i;
543 
544 	MGE_RECEIVE_LOCK(sc);
545 
546 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
547 
548 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
549 	    &sc->mge_rx_dtag);
550 
551 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
552 		dw = &(sc->mge_rx_desc[i]);
553 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
554 		&dw->mge_desc->buffer);
555 	}
556 
557 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
558 	sc->rx_desc_curr = 0;
559 
560 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
561 	    sc->rx_desc_start);
562 
563 	/* Enable RX queue */
564 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
565 
566 	MGE_RECEIVE_UNLOCK(sc);
567 }
568 
569 #ifdef DEVICE_POLLING
570 static poll_handler_t mge_poll;
571 
572 static int
573 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
574 {
575 	struct mge_softc *sc = ifp->if_softc;
576 	uint32_t int_cause, int_cause_ext;
577 	int rx_npkts = 0;
578 
579 	MGE_GLOBAL_LOCK(sc);
580 
581 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
582 		MGE_GLOBAL_UNLOCK(sc);
583 		return (rx_npkts);
584 	}
585 
586 	if (cmd == POLL_AND_CHECK_STATUS) {
587 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
588 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
589 
590 		/* Check for resource error */
591 		if (int_cause & MGE_PORT_INT_RXERRQ0)
592 			mge_reinit_rx(sc);
593 
594 		if (int_cause || int_cause_ext) {
595 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
596 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
597 		}
598 	}
599 
600 	mge_intr_tx_locked(sc);
601 	rx_npkts = mge_intr_rx_locked(sc, count);
602 
603 	MGE_GLOBAL_UNLOCK(sc);
604 	return (rx_npkts);
605 }
606 #endif /* DEVICE_POLLING */
607 
608 static int
609 mge_attach(device_t dev)
610 {
611 	struct mge_softc *sc;
612 	struct ifnet *ifp;
613 	uint8_t hwaddr[ETHER_ADDR_LEN];
614 	int i, error ;
615 
616 	sc = device_get_softc(dev);
617 	sc->dev = dev;
618 
619 	if (device_get_unit(dev) == 0)
620 		sc_mge0 = sc;
621 
622 	/* Set chip version-dependent parameters */
623 	mge_ver_params(sc);
624 
625 	/* Initialize mutexes */
626 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
627 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
628 
629 	/* Allocate IO and IRQ resources */
630 	error = bus_alloc_resources(dev, res_spec, sc->res);
631 	if (error) {
632 		device_printf(dev, "could not allocate resources\n");
633 		mge_detach(dev);
634 		return (ENXIO);
635 	}
636 
637 	/* Allocate DMA, buffers, buffer descriptors */
638 	error = mge_allocate_dma(sc);
639 	if (error) {
640 		mge_detach(dev);
641 		return (ENXIO);
642 	}
643 
644 	sc->tx_desc_curr = 0;
645 	sc->rx_desc_curr = 0;
646 	sc->tx_desc_used_idx = 0;
647 	sc->tx_desc_used_count = 0;
648 
649 	/* Configure defaults for interrupts coalescing */
650 	sc->rx_ic_time = 768;
651 	sc->tx_ic_time = 768;
652 	mge_add_sysctls(sc);
653 
654 	/* Allocate network interface */
655 	ifp = sc->ifp = if_alloc(IFT_ETHER);
656 	if (ifp == NULL) {
657 		device_printf(dev, "if_alloc() failed\n");
658 		mge_detach(dev);
659 		return (ENOMEM);
660 	}
661 
662 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
663 	ifp->if_softc = sc;
664 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
665 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
666 	ifp->if_capenable = ifp->if_capabilities;
667 	ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
668 
669 #ifdef DEVICE_POLLING
670 	/* Advertise that polling is supported */
671 	ifp->if_capabilities |= IFCAP_POLLING;
672 #endif
673 
674 	ifp->if_init = mge_init;
675 	ifp->if_start = mge_start;
676 	ifp->if_ioctl = mge_ioctl;
677 
678 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
679 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
680 	IFQ_SET_READY(&ifp->if_snd);
681 
682 	mge_get_mac_address(sc, hwaddr);
683 	ether_ifattach(ifp, hwaddr);
684 	callout_init(&sc->wd_callout, 0);
685 
686 	/* Probe PHY(s) */
687 	error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
688 	if (error) {
689 		device_printf(dev, "MII failed to find PHY\n");
690 		if_free(ifp);
691 		sc->ifp = NULL;
692 		mge_detach(dev);
693 		return (error);
694 	}
695 	sc->mii = device_get_softc(sc->miibus);
696 
697 	/* Attach interrupt handlers */
698 	for (i = 0; i < 2; ++i) {
699 		error = bus_setup_intr(dev, sc->res[1 + i],
700 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
701 		    sc, &sc->ih_cookie[i]);
702 		if (error) {
703 			device_printf(dev, "could not setup %s\n",
704 			    mge_intrs[i].description);
705 			ether_ifdetach(sc->ifp);
706 			return (error);
707 		}
708 	}
709 
710 	return (0);
711 }
712 
713 static int
714 mge_detach(device_t dev)
715 {
716 	struct mge_softc *sc;
717 	int error,i;
718 
719 	sc = device_get_softc(dev);
720 
721 	/* Stop controller and free TX queue */
722 	if (sc->ifp)
723 		mge_shutdown(dev);
724 
725 	/* Wait for stopping ticks */
726         callout_drain(&sc->wd_callout);
727 
728 	/* Stop and release all interrupts */
729 	for (i = 0; i < 2; ++i) {
730 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
731 		if (error)
732 			device_printf(dev, "could not release %s\n",
733 			    mge_intrs[i].description);
734 	}
735 
736 	/* Detach network interface */
737 	if (sc->ifp) {
738 		ether_ifdetach(sc->ifp);
739 		if_free(sc->ifp);
740 	}
741 
742 	/* Free DMA resources */
743 	mge_free_dma(sc);
744 
745 	/* Free IO memory handler */
746 	bus_release_resources(dev, res_spec, sc->res);
747 
748 	/* Destroy mutexes */
749 	mtx_destroy(&sc->receive_lock);
750 	mtx_destroy(&sc->transmit_lock);
751 
752 	return (0);
753 }
754 
755 static void
756 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
757 {
758 	struct mge_softc *sc = ifp->if_softc;
759 	struct mii_data *mii;
760 
761 	MGE_TRANSMIT_LOCK(sc);
762 
763 	mii = sc->mii;
764 	mii_pollstat(mii);
765 
766 	ifmr->ifm_active = mii->mii_media_active;
767 	ifmr->ifm_status = mii->mii_media_status;
768 
769 	MGE_TRANSMIT_UNLOCK(sc);
770 }
771 
772 static uint32_t
773 mge_set_port_serial_control(uint32_t media)
774 {
775 	uint32_t port_config;
776 
777 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
778 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
779 
780 	if (IFM_TYPE(media) == IFM_ETHER) {
781 		switch(IFM_SUBTYPE(media)) {
782 			case IFM_AUTO:
783 				break;
784 			case IFM_1000_T:
785 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
786 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
787 				    PORT_SERIAL_SPEED_AUTONEG);
788 				break;
789 			case IFM_100_TX:
790 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
791 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
792 				    PORT_SERIAL_SPEED_AUTONEG);
793 				break;
794 			case IFM_10_T:
795 				port_config  |= (PORT_SERIAL_AUTONEG |
796 				    PORT_SERIAL_AUTONEG_FC |
797 				    PORT_SERIAL_SPEED_AUTONEG);
798 				break;
799 		}
800 		if (media & IFM_FDX)
801 			port_config |= PORT_SERIAL_FULL_DUPLEX;
802 	}
803 	return (port_config);
804 }
805 
806 static int
807 mge_ifmedia_upd(struct ifnet *ifp)
808 {
809 	struct mge_softc *sc = ifp->if_softc;
810 
811 	if (ifp->if_flags & IFF_UP) {
812 		MGE_GLOBAL_LOCK(sc);
813 
814 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
815 		mii_mediachg(sc->mii);
816 		mge_init_locked(sc);
817 
818 		MGE_GLOBAL_UNLOCK(sc);
819 	}
820 
821 	return (0);
822 }
823 
824 static void
825 mge_init(void *arg)
826 {
827 	struct mge_softc *sc = arg;
828 
829 	MGE_GLOBAL_LOCK(sc);
830 
831 	mge_init_locked(arg);
832 
833 	MGE_GLOBAL_UNLOCK(sc);
834 }
835 
836 static void
837 mge_init_locked(void *arg)
838 {
839 	struct mge_softc *sc = arg;
840 	struct mge_desc_wrapper *dw;
841 	volatile uint32_t reg_val;
842 	int i, count;
843 
844 
845 	MGE_GLOBAL_LOCK_ASSERT(sc);
846 
847 	/* Stop interface */
848 	mge_stop(sc);
849 
850 	/* Disable interrupts */
851 	mge_intrs_ctrl(sc, 0);
852 
853 	/* Set MAC address */
854 	mge_set_mac_address(sc);
855 
856 	/* Setup multicast filters */
857 	mge_setup_multicast(sc);
858 
859 	if (sc->mge_ver == 2) {
860 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
861 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
862 	}
863 
864 	/* Initialize TX queue configuration registers */
865 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
866 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
867 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
868 
869 	/* Clear TX queue configuration registers for unused queues */
870 	for (i = 1; i < 7; i++) {
871 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
872 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
873 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
874 	}
875 
876 	/* Set default MTU */
877 	MGE_WRITE(sc, sc->mge_mtu, 0);
878 
879 	/* Port configuration */
880 	MGE_WRITE(sc, MGE_PORT_CONFIG,
881 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
882 	    PORT_CONFIG_ARO_RXQ(0));
883 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
884 
885 	/* Setup port configuration */
886 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
887 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
888 
889 	/* Setup SDMA configuration */
890 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
891 	    MGE_SDMA_TX_BYTE_SWAP |
892 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
893 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
894 
895 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
896 
897 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
898 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
899 	    sc->rx_desc_start);
900 
901 	/* Reset descriptor indexes */
902 	sc->tx_desc_curr = 0;
903 	sc->rx_desc_curr = 0;
904 	sc->tx_desc_used_idx = 0;
905 	sc->tx_desc_used_count = 0;
906 
907 	/* Enable RX descriptors */
908 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
909 		dw = &sc->mge_rx_desc[i];
910 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
911 		dw->mge_desc->buff_size = MCLBYTES;
912 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
913 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
914 	}
915 
916 	/* Enable RX queue */
917 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
918 
919 	/* Enable port */
920 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
921 	reg_val |= PORT_SERIAL_ENABLE;
922 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
923 	count = 0x100000;
924 	for (;;) {
925 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
926 		if (reg_val & MGE_STATUS_LINKUP)
927 			break;
928 		DELAY(100);
929 		if (--count == 0) {
930 			if_printf(sc->ifp, "Timeout on link-up\n");
931 			break;
932 		}
933 	}
934 
935 	/* Setup interrupts coalescing */
936 	mge_set_rxic(sc);
937 	mge_set_txic(sc);
938 
939 	/* Enable interrupts */
940 #ifdef DEVICE_POLLING
941         /*
942 	 * * ...only if polling is not turned on. Disable interrupts explicitly
943 	 * if polling is enabled.
944 	 */
945 	if (sc->ifp->if_capenable & IFCAP_POLLING)
946 		mge_intrs_ctrl(sc, 0);
947 	else
948 #endif /* DEVICE_POLLING */
949 	mge_intrs_ctrl(sc, 1);
950 
951 	/* Activate network interface */
952 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
953 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
954 	sc->wd_timer = 0;
955 
956 	/* Schedule watchdog timeout */
957 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
958 }
959 
960 static void
961 mge_intr_err(void *arg)
962 {
963 	struct mge_softc *sc = arg;
964 	struct ifnet *ifp;
965 
966 	ifp = sc->ifp;
967 	if_printf(ifp, "%s\n", __FUNCTION__);
968 }
969 
970 static void
971 mge_intr_misc(void *arg)
972 {
973 	struct mge_softc *sc = arg;
974 	struct ifnet *ifp;
975 
976 	ifp = sc->ifp;
977 	if_printf(ifp, "%s\n", __FUNCTION__);
978 }
979 
980 static void
981 mge_intr_rx(void *arg) {
982 	struct mge_softc *sc = arg;
983 	uint32_t int_cause, int_cause_ext;
984 
985 	MGE_RECEIVE_LOCK(sc);
986 
987 #ifdef DEVICE_POLLING
988 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
989 		MGE_RECEIVE_UNLOCK(sc);
990 		return;
991 	}
992 #endif
993 
994 	/* Get interrupt cause */
995 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
996 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
997 
998 	/* Check for resource error */
999 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1000 		mge_reinit_rx(sc);
1001 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1002 		    int_cause & ~MGE_PORT_INT_RXERRQ0);
1003 	}
1004 
1005 	int_cause &= MGE_PORT_INT_RXQ0;
1006 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1007 
1008 	if (int_cause || int_cause_ext) {
1009 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1010 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1011 		mge_intr_rx_locked(sc, -1);
1012 	}
1013 
1014 	MGE_RECEIVE_UNLOCK(sc);
1015 }
1016 
1017 
1018 static int
1019 mge_intr_rx_locked(struct mge_softc *sc, int count)
1020 {
1021 	struct ifnet *ifp = sc->ifp;
1022 	uint32_t status;
1023 	uint16_t bufsize;
1024 	struct mge_desc_wrapper* dw;
1025 	struct mbuf *mb;
1026 	int rx_npkts = 0;
1027 
1028 	MGE_RECEIVE_LOCK_ASSERT(sc);
1029 
1030 	while (count != 0) {
1031 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1032 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1033 		    BUS_DMASYNC_POSTREAD);
1034 
1035 		/* Get status */
1036 		status = dw->mge_desc->cmd_status;
1037 		bufsize = dw->mge_desc->buff_size;
1038 		if ((status & MGE_DMA_OWNED) != 0)
1039 			break;
1040 
1041 		if (dw->mge_desc->byte_count &&
1042 		    ~(status & MGE_ERR_SUMMARY)) {
1043 
1044 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1045 			    BUS_DMASYNC_POSTREAD);
1046 
1047 			mb = m_devget(dw->buffer->m_data,
1048 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1049 			    0, ifp, NULL);
1050 
1051 			if (mb == NULL)
1052 				/* Give up if no mbufs */
1053 				break;
1054 
1055 			mb->m_len -= 2;
1056 			mb->m_pkthdr.len -= 2;
1057 			mb->m_data += 2;
1058 
1059 			mge_offload_process_frame(ifp, mb, status,
1060 			    bufsize);
1061 
1062 			MGE_RECEIVE_UNLOCK(sc);
1063 			(*ifp->if_input)(ifp, mb);
1064 			MGE_RECEIVE_LOCK(sc);
1065 			rx_npkts++;
1066 		}
1067 
1068 		dw->mge_desc->byte_count = 0;
1069 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1070 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1071 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1072 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1073 
1074 		if (count > 0)
1075 			count -= 1;
1076 	}
1077 
1078 	return (rx_npkts);
1079 }
1080 
1081 static void
1082 mge_intr_sum(void *arg)
1083 {
1084 	struct mge_softc *sc = arg;
1085 	struct ifnet *ifp;
1086 
1087 	ifp = sc->ifp;
1088 	if_printf(ifp, "%s\n", __FUNCTION__);
1089 }
1090 
1091 static void
1092 mge_intr_tx(void *arg)
1093 {
1094 	struct mge_softc *sc = arg;
1095 	uint32_t int_cause_ext;
1096 
1097 	MGE_TRANSMIT_LOCK(sc);
1098 
1099 #ifdef DEVICE_POLLING
1100 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1101 		MGE_TRANSMIT_UNLOCK(sc);
1102 		return;
1103 	}
1104 #endif
1105 
1106 	/* Ack the interrupt */
1107 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1108 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1109 	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1110 
1111 	mge_intr_tx_locked(sc);
1112 
1113 	MGE_TRANSMIT_UNLOCK(sc);
1114 }
1115 
1116 
1117 static void
1118 mge_intr_tx_locked(struct mge_softc *sc)
1119 {
1120 	struct ifnet *ifp = sc->ifp;
1121 	struct mge_desc_wrapper *dw;
1122 	struct mge_desc *desc;
1123 	uint32_t status;
1124 	int send = 0;
1125 
1126 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1127 
1128 	/* Disable watchdog */
1129 	sc->wd_timer = 0;
1130 
1131 	while (sc->tx_desc_used_count) {
1132 		/* Get the descriptor */
1133 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1134 		desc = dw->mge_desc;
1135 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1136 		    BUS_DMASYNC_POSTREAD);
1137 
1138 		/* Get descriptor status */
1139 		status = desc->cmd_status;
1140 
1141 		if (status & MGE_DMA_OWNED)
1142 			break;
1143 
1144 		sc->tx_desc_used_idx =
1145 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;;
1146 		sc->tx_desc_used_count--;
1147 
1148 		/* Update collision statistics */
1149 		if (status & MGE_ERR_SUMMARY) {
1150 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1151 				ifp->if_collisions++;
1152 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1153 				ifp->if_collisions += 16;
1154 		}
1155 
1156 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1157 		    BUS_DMASYNC_POSTWRITE);
1158 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1159 		m_freem(dw->buffer);
1160 		dw->buffer = (struct mbuf*)NULL;
1161 		send++;
1162 
1163 		ifp->if_opackets++;
1164 	}
1165 
1166 	if (send) {
1167 		/* Now send anything that was pending */
1168 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1169 		mge_start_locked(ifp);
1170 	}
1171 }
1172 
1173 static int
1174 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1175 {
1176 	struct mge_softc *sc = ifp->if_softc;
1177 	struct ifreq *ifr = (struct ifreq *)data;
1178 	int mask, error;
1179 	uint32_t flags;
1180 
1181 	error = 0;
1182 
1183 	switch (command) {
1184 	case SIOCSIFFLAGS:
1185 		MGE_GLOBAL_LOCK(sc);
1186 
1187 		if (ifp->if_flags & IFF_UP) {
1188 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1189 				flags = ifp->if_flags ^ sc->mge_if_flags;
1190 				if (flags & IFF_PROMISC)
1191 					mge_set_prom_mode(sc,
1192 					    MGE_RX_DEFAULT_QUEUE);
1193 
1194 				if (flags & IFF_ALLMULTI)
1195 					mge_setup_multicast(sc);
1196 			} else
1197 				mge_init_locked(sc);
1198 		}
1199 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1200 			mge_stop(sc);
1201 
1202 		sc->mge_if_flags = ifp->if_flags;
1203 		MGE_GLOBAL_UNLOCK(sc);
1204 		break;
1205 	case SIOCADDMULTI:
1206 	case SIOCDELMULTI:
1207 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1208 			MGE_GLOBAL_LOCK(sc);
1209 			mge_setup_multicast(sc);
1210 			MGE_GLOBAL_UNLOCK(sc);
1211 		}
1212 		break;
1213 	case SIOCSIFCAP:
1214 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1215 		if (mask & IFCAP_HWCSUM) {
1216 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1217 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1218 			if (ifp->if_capenable & IFCAP_TXCSUM)
1219 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1220 			else
1221 				ifp->if_hwassist = 0;
1222 		}
1223 #ifdef DEVICE_POLLING
1224 		if (mask & IFCAP_POLLING) {
1225 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1226 				error = ether_poll_register(mge_poll, ifp);
1227 				if (error)
1228 					return(error);
1229 
1230 				MGE_GLOBAL_LOCK(sc);
1231 				mge_intrs_ctrl(sc, 0);
1232 				ifp->if_capenable |= IFCAP_POLLING;
1233 				MGE_GLOBAL_UNLOCK(sc);
1234 			} else {
1235 				error = ether_poll_deregister(ifp);
1236 				MGE_GLOBAL_LOCK(sc);
1237 				mge_intrs_ctrl(sc, 1);
1238 				ifp->if_capenable &= ~IFCAP_POLLING;
1239 				MGE_GLOBAL_UNLOCK(sc);
1240 			}
1241 		}
1242 #endif
1243 		break;
1244 	case SIOCGIFMEDIA: /* fall through */
1245 	case SIOCSIFMEDIA:
1246 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1247 		    && !(ifr->ifr_media & IFM_FDX)) {
1248 			device_printf(sc->dev,
1249 			    "1000baseTX half-duplex unsupported\n");
1250 			return 0;
1251 		}
1252 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1253 		break;
1254 	default:
1255 		error = ether_ioctl(ifp, command, data);
1256 	}
1257 	return (error);
1258 }
1259 
1260 static int
1261 mge_miibus_readreg(device_t dev, int phy, int reg)
1262 {
1263 	uint32_t retries;
1264 
1265 	/*
1266 	 * We assume static PHY address <=> device unit mapping:
1267 	 * PHY Address = MV_PHY_ADDR_BASE + devce unit.
1268 	 * This is true for most Marvell boards.
1269 	 *
1270 	 * Code below grants proper PHY detection on each device
1271 	 * unit.
1272 	 */
1273 
1274 	if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
1275 		return (0);
1276 
1277 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1278 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1279 
1280 	retries = MGE_SMI_READ_RETRIES;
1281 	while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1282 		DELAY(MGE_SMI_READ_DELAY);
1283 
1284 	if (retries == 0)
1285 		device_printf(dev, "Timeout while reading from PHY\n");
1286 
1287 	return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1288 }
1289 
1290 static int
1291 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1292 {
1293 	uint32_t retries;
1294 
1295 	if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
1296 		return (0);
1297 
1298 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1299 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1300 
1301 	retries = MGE_SMI_WRITE_RETRIES;
1302 	while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1303 		DELAY(MGE_SMI_WRITE_DELAY);
1304 
1305 	if (retries == 0)
1306 		device_printf(dev, "Timeout while writing to PHY\n");
1307 	return (0);
1308 }
1309 
1310 static int
1311 mge_probe(device_t dev)
1312 {
1313 
1314 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1315 	return (BUS_PROBE_DEFAULT);
1316 }
1317 
1318 static int
1319 mge_resume(device_t dev)
1320 {
1321 
1322 	device_printf(dev, "%s\n", __FUNCTION__);
1323 	return (0);
1324 }
1325 
1326 static int
1327 mge_shutdown(device_t dev)
1328 {
1329 	struct mge_softc *sc = device_get_softc(dev);
1330 
1331 	MGE_GLOBAL_LOCK(sc);
1332 
1333 #ifdef DEVICE_POLLING
1334         if (sc->ifp->if_capenable & IFCAP_POLLING)
1335 		ether_poll_deregister(sc->ifp);
1336 #endif
1337 
1338 	mge_stop(sc);
1339 
1340 	MGE_GLOBAL_UNLOCK(sc);
1341 
1342 	return (0);
1343 }
1344 
1345 static int
1346 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1347 {
1348 	struct mge_desc_wrapper *dw = NULL;
1349 	struct ifnet *ifp;
1350 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1351 	bus_dmamap_t mapp;
1352 	int error;
1353 	int seg, nsegs;
1354 	int desc_no;
1355 
1356 	ifp = sc->ifp;
1357 
1358 	/* Check for free descriptors */
1359 	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1360 		/* No free descriptors */
1361 		return (-1);
1362 	}
1363 
1364 	/* Fetch unused map */
1365 	desc_no = sc->tx_desc_curr;
1366 	dw = &sc->mge_tx_desc[desc_no];
1367 	mapp = dw->buffer_dmap;
1368 
1369 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1370 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1371 
1372 	/* Create mapping in DMA memory */
1373 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1374 	    BUS_DMA_NOWAIT);
1375 	if (error != 0 || nsegs != 1 ) {
1376 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1377 		return ((error != 0) ? error : -1);
1378 	}
1379 
1380 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1381 
1382 	/* Everything is ok, now we can send buffers */
1383 	for (seg = 0; seg < nsegs; seg++) {
1384 		dw->mge_desc->byte_count = segs[seg].ds_len;
1385 		dw->mge_desc->buffer = segs[seg].ds_addr;
1386 		dw->buffer = m0;
1387 		dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1388 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1389 		    MGE_DMA_OWNED;
1390 
1391 		if (seg == 0)
1392 			mge_offload_setup_descriptor(sc, dw);
1393 	}
1394 
1395 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1396 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1397 
1398 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1399 	sc->tx_desc_used_count++;
1400 	return (0);
1401 }
1402 
1403 static void
1404 mge_tick(void *msc)
1405 {
1406 	struct mge_softc *sc = msc;
1407 
1408 	/* Check for TX timeout */
1409 	mge_watchdog(sc);
1410 
1411 	mii_tick(sc->mii);
1412 
1413 	/* Check for media type change */
1414 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1415 		mge_ifmedia_upd(sc->ifp);
1416 
1417 	/* Schedule another timeout one second from now */
1418 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1419 }
1420 
1421 static void
1422 mge_watchdog(struct mge_softc *sc)
1423 {
1424 	struct ifnet *ifp;
1425 
1426 	ifp = sc->ifp;
1427 
1428 	MGE_GLOBAL_LOCK(sc);
1429 
1430 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1431 		MGE_GLOBAL_UNLOCK(sc);
1432 		return;
1433 	}
1434 
1435 	ifp->if_oerrors++;
1436 	if_printf(ifp, "watchdog timeout\n");
1437 
1438 	mge_stop(sc);
1439 	mge_init_locked(sc);
1440 
1441 	MGE_GLOBAL_UNLOCK(sc);
1442 }
1443 
1444 static void
1445 mge_start(struct ifnet *ifp)
1446 {
1447 	struct mge_softc *sc = ifp->if_softc;
1448 
1449 	MGE_TRANSMIT_LOCK(sc);
1450 
1451 	mge_start_locked(ifp);
1452 
1453 	MGE_TRANSMIT_UNLOCK(sc);
1454 }
1455 
1456 static void
1457 mge_start_locked(struct ifnet *ifp)
1458 {
1459 	struct mge_softc *sc;
1460 	struct mbuf *m0, *mtmp;
1461 	uint32_t reg_val, queued = 0;
1462 
1463 	sc = ifp->if_softc;
1464 
1465 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1466 
1467 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1468 	    IFF_DRV_RUNNING)
1469 		return;
1470 
1471 	for (;;) {
1472 		/* Get packet from the queue */
1473 		IF_DEQUEUE(&ifp->if_snd, m0);
1474 		if (m0 == NULL)
1475 			break;
1476 
1477 		mtmp = m_defrag(m0, M_DONTWAIT);
1478 		if (mtmp)
1479 			m0 = mtmp;
1480 
1481 		if (mge_encap(sc, m0)) {
1482 			IF_PREPEND(&ifp->if_snd, m0);
1483 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1484 			break;
1485 		}
1486 		queued++;
1487 		BPF_MTAP(ifp, m0);
1488 	}
1489 
1490 	if (queued) {
1491 		/* Enable transmitter and watchdog timer */
1492 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1493 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1494 		sc->wd_timer = 5;
1495 	}
1496 }
1497 
1498 static void
1499 mge_stop(struct mge_softc *sc)
1500 {
1501 	struct ifnet *ifp;
1502 	volatile uint32_t reg_val, status;
1503 	struct mge_desc_wrapper *dw;
1504 	struct mge_desc *desc;
1505 	int count;
1506 
1507 	ifp = sc->ifp;
1508 
1509 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1510 		return;
1511 
1512 	/* Stop tick engine */
1513 	callout_stop(&sc->wd_callout);
1514 
1515 	/* Disable interface */
1516 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1517 	sc->wd_timer = 0;
1518 
1519 	/* Disable interrupts */
1520 	mge_intrs_ctrl(sc, 0);
1521 
1522 	/* Disable Rx and Tx */
1523 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1524 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1525 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1526 
1527 	/* Remove pending data from TX queue */
1528 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1529 	    sc->tx_desc_used_count) {
1530 		/* Get the descriptor */
1531 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1532 		desc = dw->mge_desc;
1533 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1534 		    BUS_DMASYNC_POSTREAD);
1535 
1536 		/* Get descriptor status */
1537 		status = desc->cmd_status;
1538 
1539 		if (status & MGE_DMA_OWNED)
1540 			break;
1541 
1542 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1543 		    MGE_TX_DESC_NUM;
1544 		sc->tx_desc_used_count--;
1545 
1546 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1547 		    BUS_DMASYNC_POSTWRITE);
1548 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1549 
1550 		m_freem(dw->buffer);
1551 		dw->buffer = (struct mbuf*)NULL;
1552 	}
1553 
1554 	/* Wait for end of transmission */
1555 	count = 0x100000;
1556 	while (count--) {
1557 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1558 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1559 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1560 			break;
1561 		DELAY(100);
1562 	}
1563 
1564 	if(!count)
1565 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1566 		    __FUNCTION__);
1567 
1568 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1569 	reg_val &= ~(PORT_SERIAL_ENABLE);
1570 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1571 }
1572 
1573 static int
1574 mge_suspend(device_t dev)
1575 {
1576 
1577 	device_printf(dev, "%s\n", __FUNCTION__);
1578 	return (0);
1579 }
1580 
1581 static void
1582 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1583     uint32_t status, uint16_t bufsize)
1584 {
1585 	int csum_flags = 0;
1586 
1587 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1588 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1589 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1590 
1591 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1592 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1593 		    (status & MGE_RX_L4_CSUM_OK)) {
1594 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1595 			frame->m_pkthdr.csum_data = 0xFFFF;
1596 		}
1597 
1598 		frame->m_pkthdr.csum_flags = csum_flags;
1599 	}
1600 }
1601 
1602 static void
1603 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1604 {
1605 	struct mbuf *m0 = dw->buffer;
1606 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1607 	int csum_flags = m0->m_pkthdr.csum_flags;
1608 	int cmd_status = 0;
1609 	struct ip *ip;
1610 	int ehlen, etype;
1611 
1612 	if (csum_flags) {
1613 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1614 			etype = ntohs(eh->evl_proto);
1615 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1616 			csum_flags |= MGE_TX_VLAN_TAGGED;
1617 		} else {
1618 			etype = ntohs(eh->evl_encap_proto);
1619 			ehlen = ETHER_HDR_LEN;
1620 		}
1621 
1622 		if (etype != ETHERTYPE_IP) {
1623 			if_printf(sc->ifp,
1624 			    "TCP/IP Offload enabled for unsupported "
1625 			    "protocol!\n");
1626 			return;
1627 		}
1628 
1629 		ip = (struct ip *)(m0->m_data + ehlen);
1630 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1631 
1632 		if ((m0->m_flags & M_FRAG) == 0)
1633 			cmd_status |= MGE_TX_NOT_FRAGMENT;
1634 	}
1635 
1636 	if (csum_flags & CSUM_IP)
1637 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1638 
1639 	if (csum_flags & CSUM_TCP)
1640 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1641 
1642 	if (csum_flags & CSUM_UDP)
1643 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1644 
1645 	dw->mge_desc->cmd_status |= cmd_status;
1646 }
1647 
1648 static void
1649 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1650 {
1651 
1652 	if (enable) {
1653 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1654 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1655 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1656 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1657 		    MGE_PORT_INT_EXT_TXBUF0);
1658 	} else {
1659 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1660 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1661 
1662 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1663 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1664 
1665 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1666 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1667 	}
1668 }
1669 
1670 static uint8_t
1671 mge_crc8(uint8_t *data, int size)
1672 {
1673 	uint8_t crc = 0;
1674 	static const uint8_t ct[256] = {
1675 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1676 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1677 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1678 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1679 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1680 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1681 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1682 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1683 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1684 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1685 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1686 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1687 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1688 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1689 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1690 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1691 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1692 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1693 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1694 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1695 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1696 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1697 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1698 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1699 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1700 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1701 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1702 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1703 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1704 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1705 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1706 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1707 	};
1708 
1709 	while(size--)
1710 		crc = ct[crc ^ *(data++)];
1711 
1712 	return(crc);
1713 }
1714 
1715 static void
1716 mge_setup_multicast(struct mge_softc *sc)
1717 {
1718 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1719 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1720 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1721 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1722 	struct ifnet *ifp = sc->ifp;
1723 	struct ifmultiaddr *ifma;
1724 	uint8_t *mac;
1725 	int i;
1726 
1727 	if (ifp->if_flags & IFF_ALLMULTI) {
1728 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1729 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1730 	} else {
1731 		memset(smt, 0, sizeof(smt));
1732 		memset(omt, 0, sizeof(omt));
1733 
1734 		if_maddr_rlock(ifp);
1735 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1736 			if (ifma->ifma_addr->sa_family != AF_LINK)
1737 				continue;
1738 
1739 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1740 			if (memcmp(mac, special, sizeof(special)) == 0) {
1741 				i = mac[5];
1742 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1743 			} else {
1744 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1745 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1746 			}
1747 		}
1748 		if_maddr_runlock(ifp);
1749 	}
1750 
1751 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1752 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1753 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1754 	}
1755 }
1756 
1757 static void
1758 mge_set_rxic(struct mge_softc *sc)
1759 {
1760 	uint32_t reg;
1761 
1762 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1763 		sc->rx_ic_time = sc->mge_rx_ipg_max;
1764 
1765 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1766 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1767 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1768 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1769 }
1770 
1771 static void
1772 mge_set_txic(struct mge_softc *sc)
1773 {
1774 	uint32_t reg;
1775 
1776 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1777 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1778 
1779 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1780 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1781 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1782 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1783 }
1784 
1785 static int
1786 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1787 {
1788 	struct mge_softc *sc = (struct mge_softc *)arg1;
1789 	uint32_t time;
1790 	int error;
1791 
1792 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1793 	error = sysctl_handle_int(oidp, &time, 0, req);
1794 	if (error != 0)
1795 		return(error);
1796 
1797 	MGE_GLOBAL_LOCK(sc);
1798 	if (arg2 == MGE_IC_RX) {
1799 		sc->rx_ic_time = time;
1800 		mge_set_rxic(sc);
1801 	} else {
1802 		sc->tx_ic_time = time;
1803 		mge_set_txic(sc);
1804 	}
1805 	MGE_GLOBAL_UNLOCK(sc);
1806 
1807 	return(0);
1808 }
1809 
1810 static void
1811 mge_add_sysctls(struct mge_softc *sc)
1812 {
1813 	struct sysctl_ctx_list *ctx;
1814 	struct sysctl_oid_list *children;
1815 	struct sysctl_oid *tree;
1816 
1817 	ctx = device_get_sysctl_ctx(sc->dev);
1818 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1819 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1820 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1821 	children = SYSCTL_CHILDREN(tree);
1822 
1823 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1824 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1825 	    "I", "IC RX time threshold");
1826 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1827 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1828 	    "I", "IC TX time threshold");
1829 }
1830