xref: /freebsd/sys/dev/mge/if_mge.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 
63 #include <sys/sockio.h>
64 #include <sys/bus.h>
65 #include <machine/bus.h>
66 #include <sys/rman.h>
67 #include <machine/resource.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #ifndef MII_ADDR_BASE
73 #define MII_ADDR_BASE 8
74 #endif
75 
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
79 
80 #include "miibus_if.h"
81 
82 /* PHY registers are in the address space of the first mge unit */
83 static struct mge_softc *sc_mge0 = NULL;
84 
85 static int mge_probe(device_t dev);
86 static int mge_attach(device_t dev);
87 static int mge_detach(device_t dev);
88 static int mge_shutdown(device_t dev);
89 static int mge_suspend(device_t dev);
90 static int mge_resume(device_t dev);
91 
92 static int mge_miibus_readreg(device_t dev, int phy, int reg);
93 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
94 
95 static int mge_ifmedia_upd(struct ifnet *ifp);
96 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
97 
98 static void mge_init(void *arg);
99 static void mge_init_locked(void *arg);
100 static void mge_start(struct ifnet *ifp);
101 static void mge_start_locked(struct ifnet *ifp);
102 static void mge_watchdog(struct mge_softc *sc);
103 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
104 
105 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
106 static uint32_t mge_rx_ipg(uint32_t val, int ver);
107 static void mge_ver_params(struct mge_softc *sc);
108 
109 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
110 static void mge_intr_rx(void *arg);
111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112 static void mge_intr_tx(void *arg);
113 static void mge_intr_tx_locked(struct mge_softc *sc);
114 static void mge_intr_misc(void *arg);
115 static void mge_intr_sum(void *arg);
116 static void mge_intr_err(void *arg);
117 static void mge_stop(struct mge_softc *sc);
118 static void mge_tick(void *msc);
119 static uint32_t mge_set_port_serial_control(uint32_t media);
120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121 static void mge_set_mac_address(struct mge_softc *sc);
122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
123     uint8_t queue);
124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125 static int mge_allocate_dma(struct mge_softc *sc);
126 static int mge_alloc_desc_dma(struct mge_softc *sc,
127     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129     struct mbuf **mbufp, bus_addr_t *paddr);
130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131 static void mge_free_dma(struct mge_softc *sc);
132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135     uint32_t status, uint16_t bufsize);
136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
137     struct mge_desc_wrapper *dw);
138 static uint8_t mge_crc8(uint8_t *data, int size);
139 static void mge_setup_multicast(struct mge_softc *sc);
140 static void mge_set_rxic(struct mge_softc *sc);
141 static void mge_set_txic(struct mge_softc *sc);
142 static void mge_add_sysctls(struct mge_softc *sc);
143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
144 
145 static device_method_t mge_methods[] = {
146 	/* Device interface */
147 	DEVMETHOD(device_probe,		mge_probe),
148 	DEVMETHOD(device_attach,	mge_attach),
149 	DEVMETHOD(device_detach,	mge_detach),
150 	DEVMETHOD(device_shutdown,	mge_shutdown),
151 	DEVMETHOD(device_suspend,	mge_suspend),
152 	DEVMETHOD(device_resume,	mge_resume),
153 	/* MII interface */
154 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
155 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
156 	{ 0, 0 }
157 };
158 
159 static driver_t mge_driver = {
160 	"mge",
161 	mge_methods,
162 	sizeof(struct mge_softc),
163 };
164 
165 static devclass_t mge_devclass;
166 
167 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169 MODULE_DEPEND(mge, ether, 1, 1, 1);
170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
171 
172 static struct resource_spec res_spec[] = {
173 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
174 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
177 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
178 	{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
179 	{ -1, 0 }
180 };
181 
182 static struct {
183 	driver_intr_t *handler;
184 	char * description;
185 } mge_intrs[MGE_INTR_COUNT] = {
186 	{ mge_intr_rx,	"GbE receive interrupt" },
187 	{ mge_intr_tx,	"GbE transmit interrupt" },
188 	{ mge_intr_misc,"GbE misc interrupt" },
189 	{ mge_intr_sum,	"GbE summary interrupt" },
190 	{ mge_intr_err,	"GbE error interrupt" },
191 };
192 
193 static void
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
195 {
196 	uint32_t mac_l, mac_h;
197 
198 	/* XXX use currently programmed MAC address; eventually this info will
199 	 * be provided by the loader */
200 
201 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
202 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
203 
204 	addr[0] = (mac_h & 0xff000000) >> 24;
205 	addr[1] = (mac_h & 0x00ff0000) >> 16;
206 	addr[2] = (mac_h & 0x0000ff00) >> 8;
207 	addr[3] = (mac_h & 0x000000ff);
208 	addr[4] = (mac_l & 0x0000ff00) >> 8;
209 	addr[5] = (mac_l & 0x000000ff);
210 }
211 
212 static uint32_t
213 mge_tfut_ipg(uint32_t val, int ver)
214 {
215 
216 	switch (ver) {
217 	case 1:
218 		return ((val & 0x3fff) << 4);
219 	case 2:
220 	default:
221 		return ((val & 0xffff) << 4);
222 	}
223 }
224 
225 static uint32_t
226 mge_rx_ipg(uint32_t val, int ver)
227 {
228 
229 	switch (ver) {
230 	case 1:
231 		return ((val & 0x3fff) << 8);
232 	case 2:
233 	default:
234 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
235 	}
236 }
237 
238 static void
239 mge_ver_params(struct mge_softc *sc)
240 {
241 	uint32_t d, r;
242 
243 	soc_id(&d, &r);
244 	if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
245 	    d == MV_DEV_MV78100_Z0) {
246 		sc->mge_ver = 2;
247 		sc->mge_mtu = 0x4e8;
248 		sc->mge_tfut_ipg_max = 0xFFFF;
249 		sc->mge_rx_ipg_max = 0xFFFF;
250 		sc->mge_tx_arb_cfg = 0xFC0000FF;
251 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
252 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
253 	} else {
254 		sc->mge_ver = 1;
255 		sc->mge_mtu = 0x458;
256 		sc->mge_tfut_ipg_max = 0x3FFF;
257 		sc->mge_rx_ipg_max = 0x3FFF;
258 		sc->mge_tx_arb_cfg = 0x000000FF;
259 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
260 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
261 	}
262 }
263 
264 static void
265 mge_set_mac_address(struct mge_softc *sc)
266 {
267 	char *if_mac;
268 	uint32_t mac_l, mac_h;
269 
270 	MGE_GLOBAL_LOCK_ASSERT(sc);
271 
272 	if_mac = (char *)IF_LLADDR(sc->ifp);
273 
274 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
275 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
276 	    (if_mac[2] << 8) | (if_mac[3] << 0);
277 
278 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
279 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
280 
281 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
282 }
283 
284 static void
285 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
286 {
287 	uint32_t reg_idx, reg_off, reg_val, i;
288 
289 	last_byte &= 0xf;
290 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
291 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
292 	reg_val = (1 | (queue << 1)) << reg_off;
293 
294 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
295 		if ( i == reg_idx)
296 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
297 		else
298 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
299 	}
300 }
301 
302 static void
303 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
304 {
305 	uint32_t port_config;
306 	uint32_t reg_val, i;
307 
308 	/* Enable or disable promiscuous mode as needed */
309 	if (sc->ifp->if_flags & IFF_PROMISC) {
310 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
311 		port_config |= PORT_CONFIG_UPM;
312 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
313 
314 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
315 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
316 
317 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
318 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
319 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
320 		}
321 
322 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
323 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
324 
325 	} else {
326 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
327 		port_config &= ~PORT_CONFIG_UPM;
328 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
329 
330 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
331 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
332 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
333 		}
334 
335 		mge_set_mac_address(sc);
336 	}
337 }
338 
339 static void
340 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
341 {
342 	u_int32_t *paddr;
343 
344 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
345 	paddr = arg;
346 
347 	*paddr = segs->ds_addr;
348 }
349 
350 static int
351 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
352     bus_addr_t *paddr)
353 {
354 	struct mbuf *new_mbuf;
355 	bus_dma_segment_t seg[1];
356 	int error;
357 	int nsegs;
358 
359 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
360 
361 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
362 	if (new_mbuf == NULL)
363 		return (ENOBUFS);
364 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
365 
366 	if (*mbufp) {
367 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
368 		bus_dmamap_unload(tag, map);
369 	}
370 
371 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
372 	    BUS_DMA_NOWAIT);
373 	KASSERT(nsegs == 1, ("Too many segments returned!"));
374 	if (nsegs != 1 || error)
375 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
376 
377 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
378 
379 	(*mbufp) = new_mbuf;
380 	(*paddr) = seg->ds_addr;
381 	return (0);
382 }
383 
384 static int
385 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
386     uint32_t size, bus_dma_tag_t *buffer_tag)
387 {
388 	struct mge_desc_wrapper *dw;
389 	bus_addr_t desc_paddr;
390 	int i, error;
391 
392 	desc_paddr = 0;
393 	for (i = size - 1; i >= 0; i--) {
394 		dw = &(tab[i]);
395 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
396 		    (void**)&(dw->mge_desc),
397 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
398 		    &(dw->desc_dmap));
399 
400 		if (error) {
401 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
402 			dw->mge_desc = NULL;
403 			return (ENXIO);
404 		}
405 
406 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
407 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
408 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
409 
410 		if (error) {
411 			if_printf(sc->ifp, "can't load descriptor\n");
412 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
413 			    dw->desc_dmap);
414 			dw->mge_desc = NULL;
415 			return (ENXIO);
416 		}
417 
418 		/* Chain descriptors */
419 		dw->mge_desc->next_desc = desc_paddr;
420 		desc_paddr = dw->mge_desc_paddr;
421 	}
422 	tab[size - 1].mge_desc->next_desc = desc_paddr;
423 
424 	/* Allocate a busdma tag for mbufs. */
425 	error = bus_dma_tag_create(NULL,	/* parent */
426 	    8, 0,				/* alignment, boundary */
427 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
428 	    BUS_SPACE_MAXADDR,			/* highaddr */
429 	    NULL, NULL,				/* filtfunc, filtfuncarg */
430 	    MCLBYTES, 1,			/* maxsize, nsegments */
431 	    MCLBYTES, 0,			/* maxsegsz, flags */
432 	    NULL, NULL,				/* lockfunc, lockfuncarg */
433 	    buffer_tag);			/* dmat */
434 	if (error) {
435 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
436 		return (ENXIO);
437 	}
438 
439 	/* Create TX busdma maps */
440 	for (i = 0; i < size; i++) {
441 		dw = &(tab[i]);
442 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
443 		if (error) {
444 			if_printf(sc->ifp, "failed to create map for mbuf\n");
445 			return (ENXIO);
446 		}
447 
448 		dw->buffer = (struct mbuf*)NULL;
449 		dw->mge_desc->buffer = (bus_addr_t)NULL;
450 	}
451 
452 	return (0);
453 }
454 
455 static int
456 mge_allocate_dma(struct mge_softc *sc)
457 {
458 	int error;
459 	struct mge_desc_wrapper *dw;
460 	int num, i;
461 
462 
463 	num = MGE_TX_DESC_NUM + MGE_RX_DESC_NUM;
464 
465 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
466 	error = bus_dma_tag_create(NULL,	/* parent */
467 	    16, 0,				/* alignment, boundary */
468 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
469 	    BUS_SPACE_MAXADDR,			/* highaddr */
470 	    NULL, NULL,				/* filtfunc, filtfuncarg */
471 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
472 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
473 	    NULL, NULL,				/* lockfunc, lockfuncarg */
474 	    &sc->mge_desc_dtag);		/* dmat */
475 
476 
477 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
478 	    &sc->mge_tx_dtag);
479 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
480 	    &sc->mge_rx_dtag);
481 
482 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
483 		dw = &(sc->mge_rx_desc[i]);
484 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
485 		    &dw->mge_desc->buffer);
486 	}
487 
488 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
489 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
490 
491 	return (0);
492 }
493 
494 static void
495 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
496     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
497 {
498 	struct mge_desc_wrapper *dw;
499 	int i;
500 
501 	for (i = 0; i < size; i++) {
502 		/* Free RX mbuf */
503 		dw = &(tab[i]);
504 
505 		if (dw->buffer_dmap) {
506 			if (free_mbufs) {
507 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
508 				    BUS_DMASYNC_POSTREAD);
509 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
510 			}
511 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
512 			if (free_mbufs)
513 				m_freem(dw->buffer);
514 		}
515 		/* Free RX descriptors */
516 		if (dw->desc_dmap) {
517 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
518 			    BUS_DMASYNC_POSTREAD);
519 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
520 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
521 			    dw->desc_dmap);
522 		}
523 	}
524 }
525 
526 static void
527 mge_free_dma(struct mge_softc *sc)
528 {
529 	/* Free desciptors and mbufs */
530 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
531 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
532 
533 	/* Destroy mbuf dma tag */
534 	bus_dma_tag_destroy(sc->mge_tx_dtag);
535 	bus_dma_tag_destroy(sc->mge_rx_dtag);
536 	/* Destroy descriptors tag */
537 	bus_dma_tag_destroy(sc->mge_desc_dtag);
538 }
539 
540 static void
541 mge_reinit_rx(struct mge_softc *sc)
542 {
543 	struct mge_desc_wrapper *dw;
544 	int i;
545 
546 	MGE_RECEIVE_LOCK(sc);
547 
548 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
549 
550 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
551 	    &sc->mge_rx_dtag);
552 
553 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
554 		dw = &(sc->mge_rx_desc[i]);
555 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
556 		&dw->mge_desc->buffer);
557 	}
558 
559 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
560 	sc->rx_desc_curr = 0;
561 
562 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
563 	    sc->rx_desc_start);
564 
565 	/* Enable RX queue */
566 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
567 
568 	MGE_RECEIVE_UNLOCK(sc);
569 }
570 
571 #ifdef DEVICE_POLLING
572 static poll_handler_t mge_poll;
573 
574 static int
575 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
576 {
577 	struct mge_softc *sc = ifp->if_softc;
578 	uint32_t int_cause, int_cause_ext;
579 	int rx_npkts = 0;
580 
581 	MGE_GLOBAL_LOCK(sc);
582 
583 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
584 		MGE_GLOBAL_UNLOCK(sc);
585 		return (rx_npkts);
586 	}
587 
588 	if (cmd == POLL_AND_CHECK_STATUS) {
589 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
590 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
591 
592 		/* Check for resource error */
593 		if (int_cause & MGE_PORT_INT_RXERRQ0)
594 			mge_reinit_rx(sc);
595 
596 		if (int_cause || int_cause_ext) {
597 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
598 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
599 		}
600 	}
601 
602 	mge_intr_tx_locked(sc);
603 	rx_npkts = mge_intr_rx_locked(sc, count);
604 
605 	MGE_GLOBAL_UNLOCK(sc);
606 	return (rx_npkts);
607 }
608 #endif /* DEVICE_POLLING */
609 
610 static int
611 mge_attach(device_t dev)
612 {
613 	struct mge_softc *sc;
614 	struct ifnet *ifp;
615 	uint8_t hwaddr[ETHER_ADDR_LEN];
616 	int i, error ;
617 
618 	sc = device_get_softc(dev);
619 	sc->dev = dev;
620 
621 	if (device_get_unit(dev) == 0)
622 		sc_mge0 = sc;
623 
624 	/* Set chip version-dependent parameters */
625 	mge_ver_params(sc);
626 
627 	/* Initialize mutexes */
628 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
629 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
630 
631 	/* Allocate IO and IRQ resources */
632 	error = bus_alloc_resources(dev, res_spec, sc->res);
633 	if (error) {
634 		device_printf(dev, "could not allocate resources\n");
635 		mge_detach(dev);
636 		return (ENXIO);
637 	}
638 
639 	/* Allocate DMA, buffers, buffer descriptors */
640 	error = mge_allocate_dma(sc);
641 	if (error) {
642 		mge_detach(dev);
643 		return (ENXIO);
644 	}
645 
646 	sc->tx_desc_curr = 0;
647 	sc->rx_desc_curr = 0;
648 	sc->tx_desc_used_idx = 0;
649 	sc->tx_desc_used_count = 0;
650 
651 	/* Configure defaults for interrupts coalescing */
652 	sc->rx_ic_time = 768;
653 	sc->tx_ic_time = 768;
654 	mge_add_sysctls(sc);
655 
656 	/* Allocate network interface */
657 	ifp = sc->ifp = if_alloc(IFT_ETHER);
658 	if (ifp == NULL) {
659 		device_printf(dev, "if_alloc() failed\n");
660 		mge_detach(dev);
661 		return (ENOMEM);
662 	}
663 
664 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
665 	ifp->if_softc = sc;
666 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
667 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
668 	ifp->if_capenable = ifp->if_capabilities;
669 	ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
670 
671 #ifdef DEVICE_POLLING
672 	/* Advertise that polling is supported */
673 	ifp->if_capabilities |= IFCAP_POLLING;
674 #endif
675 
676 	ifp->if_init = mge_init;
677 	ifp->if_start = mge_start;
678 	ifp->if_ioctl = mge_ioctl;
679 
680 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
681 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
682 	IFQ_SET_READY(&ifp->if_snd);
683 
684 	mge_get_mac_address(sc, hwaddr);
685 	ether_ifattach(ifp, hwaddr);
686 	callout_init(&sc->wd_callout, 0);
687 
688 	/* Probe PHY(s) */
689 	error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
690 	if (error) {
691 		device_printf(dev, "MII failed to find PHY\n");
692 		if_free(ifp);
693 		sc->ifp = NULL;
694 		mge_detach(dev);
695 		return (error);
696 	}
697 	sc->mii = device_get_softc(sc->miibus);
698 
699 	/* Attach interrupt handlers */
700 	for (i = 0; i < 2; ++i) {
701 		error = bus_setup_intr(dev, sc->res[1 + i],
702 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
703 		    sc, &sc->ih_cookie[i]);
704 		if (error) {
705 			device_printf(dev, "could not setup %s\n",
706 			    mge_intrs[i].description);
707 			ether_ifdetach(sc->ifp);
708 			return (error);
709 		}
710 	}
711 
712 	return (0);
713 }
714 
715 static int
716 mge_detach(device_t dev)
717 {
718 	struct mge_softc *sc;
719 	int error,i;
720 
721 	sc = device_get_softc(dev);
722 
723 	/* Stop controller and free TX queue */
724 	if (sc->ifp)
725 		mge_shutdown(dev);
726 
727 	/* Wait for stopping ticks */
728         callout_drain(&sc->wd_callout);
729 
730 	/* Stop and release all interrupts */
731 	for (i = 0; i < 2; ++i) {
732 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
733 		if (error)
734 			device_printf(dev, "could not release %s\n",
735 			    mge_intrs[i].description);
736 	}
737 
738 	/* Detach network interface */
739 	if (sc->ifp) {
740 		ether_ifdetach(sc->ifp);
741 		if_free(sc->ifp);
742 	}
743 
744 	/* Free DMA resources */
745 	mge_free_dma(sc);
746 
747 	/* Free IO memory handler */
748 	bus_release_resources(dev, res_spec, sc->res);
749 
750 	/* Destroy mutexes */
751 	mtx_destroy(&sc->receive_lock);
752 	mtx_destroy(&sc->transmit_lock);
753 
754 	return (0);
755 }
756 
757 static void
758 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
759 {
760 	struct mge_softc *sc = ifp->if_softc;
761 	struct mii_data *mii;
762 
763 	MGE_TRANSMIT_LOCK(sc);
764 
765 	mii = sc->mii;
766 	mii_pollstat(mii);
767 
768 	ifmr->ifm_active = mii->mii_media_active;
769 	ifmr->ifm_status = mii->mii_media_status;
770 
771 	MGE_TRANSMIT_UNLOCK(sc);
772 }
773 
774 static uint32_t
775 mge_set_port_serial_control(uint32_t media)
776 {
777 	uint32_t port_config;
778 
779 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
780 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
781 
782 	if (IFM_TYPE(media) == IFM_ETHER) {
783 		switch(IFM_SUBTYPE(media)) {
784 			case IFM_AUTO:
785 				break;
786 			case IFM_1000_T:
787 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
788 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
789 				    PORT_SERIAL_SPEED_AUTONEG);
790 				break;
791 			case IFM_100_TX:
792 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
793 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
794 				    PORT_SERIAL_SPEED_AUTONEG);
795 				break;
796 			case IFM_10_T:
797 				port_config  |= (PORT_SERIAL_AUTONEG |
798 				    PORT_SERIAL_AUTONEG_FC |
799 				    PORT_SERIAL_SPEED_AUTONEG);
800 				break;
801 		}
802 		if (media & IFM_FDX)
803 			port_config |= PORT_SERIAL_FULL_DUPLEX;
804 	}
805 	return (port_config);
806 }
807 
808 static int
809 mge_ifmedia_upd(struct ifnet *ifp)
810 {
811 	struct mge_softc *sc = ifp->if_softc;
812 
813 	if (ifp->if_flags & IFF_UP) {
814 		MGE_GLOBAL_LOCK(sc);
815 
816 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
817 		mii_mediachg(sc->mii);
818 		mge_init_locked(sc);
819 
820 		MGE_GLOBAL_UNLOCK(sc);
821 	}
822 
823 	return (0);
824 }
825 
826 static void
827 mge_init(void *arg)
828 {
829 	struct mge_softc *sc = arg;
830 
831 	MGE_GLOBAL_LOCK(sc);
832 
833 	mge_init_locked(arg);
834 
835 	MGE_GLOBAL_UNLOCK(sc);
836 }
837 
838 static void
839 mge_init_locked(void *arg)
840 {
841 	struct mge_softc *sc = arg;
842 	struct mge_desc_wrapper *dw;
843 	volatile uint32_t reg_val;
844 	int i, count;
845 
846 
847 	MGE_GLOBAL_LOCK_ASSERT(sc);
848 
849 	/* Stop interface */
850 	mge_stop(sc);
851 
852 	/* Disable interrupts */
853 	mge_intrs_ctrl(sc, 0);
854 
855 	/* Set MAC address */
856 	mge_set_mac_address(sc);
857 
858 	/* Setup multicast filters */
859 	mge_setup_multicast(sc);
860 
861 	if (sc->mge_ver == 2) {
862 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
863 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
864 	}
865 
866 	/* Initialize TX queue configuration registers */
867 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
868 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
869 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
870 
871 	/* Clear TX queue configuration registers for unused queues */
872 	for (i = 1; i < 7; i++) {
873 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
874 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
875 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
876 	}
877 
878 	/* Set default MTU */
879 	MGE_WRITE(sc, sc->mge_mtu, 0);
880 
881 	/* Port configuration */
882 	MGE_WRITE(sc, MGE_PORT_CONFIG,
883 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
884 	    PORT_CONFIG_ARO_RXQ(0));
885 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
886 
887 	/* Setup port configuration */
888 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
889 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
890 
891 	/* Setup SDMA configuration */
892 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
893 	    MGE_SDMA_TX_BYTE_SWAP |
894 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
895 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
896 
897 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
898 
899 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
900 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
901 	    sc->rx_desc_start);
902 
903 	/* Reset descriptor indexes */
904 	sc->tx_desc_curr = 0;
905 	sc->rx_desc_curr = 0;
906 	sc->tx_desc_used_idx = 0;
907 	sc->tx_desc_used_count = 0;
908 
909 	/* Enable RX descriptors */
910 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
911 		dw = &sc->mge_rx_desc[i];
912 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
913 		dw->mge_desc->buff_size = MCLBYTES;
914 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
915 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
916 	}
917 
918 	/* Enable RX queue */
919 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
920 
921 	/* Enable port */
922 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
923 	reg_val |= PORT_SERIAL_ENABLE;
924 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
925 	count = 0x100000;
926 	for (;;) {
927 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
928 		if (reg_val & MGE_STATUS_LINKUP)
929 			break;
930 		DELAY(100);
931 		if (--count == 0) {
932 			if_printf(sc->ifp, "Timeout on link-up\n");
933 			break;
934 		}
935 	}
936 
937 	/* Setup interrupts coalescing */
938 	mge_set_rxic(sc);
939 	mge_set_txic(sc);
940 
941 	/* Enable interrupts */
942 #ifdef DEVICE_POLLING
943         /*
944 	 * * ...only if polling is not turned on. Disable interrupts explicitly
945 	 * if polling is enabled.
946 	 */
947 	if (sc->ifp->if_capenable & IFCAP_POLLING)
948 		mge_intrs_ctrl(sc, 0);
949 	else
950 #endif /* DEVICE_POLLING */
951 	mge_intrs_ctrl(sc, 1);
952 
953 	/* Activate network interface */
954 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
955 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
956 	sc->wd_timer = 0;
957 
958 	/* Schedule watchdog timeout */
959 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
960 }
961 
962 static void
963 mge_intr_err(void *arg)
964 {
965 	struct mge_softc *sc = arg;
966 	struct ifnet *ifp;
967 
968 	ifp = sc->ifp;
969 	if_printf(ifp, "%s\n", __FUNCTION__);
970 }
971 
972 static void
973 mge_intr_misc(void *arg)
974 {
975 	struct mge_softc *sc = arg;
976 	struct ifnet *ifp;
977 
978 	ifp = sc->ifp;
979 	if_printf(ifp, "%s\n", __FUNCTION__);
980 }
981 
982 static void
983 mge_intr_rx(void *arg) {
984 	struct mge_softc *sc = arg;
985 	uint32_t int_cause, int_cause_ext;
986 
987 	MGE_RECEIVE_LOCK(sc);
988 
989 #ifdef DEVICE_POLLING
990 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
991 		MGE_RECEIVE_UNLOCK(sc);
992 		return;
993 	}
994 #endif
995 
996 	/* Get interrupt cause */
997 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
998 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
999 
1000 	/* Check for resource error */
1001 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1002 		mge_reinit_rx(sc);
1003 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1004 		    int_cause & ~MGE_PORT_INT_RXERRQ0);
1005 	}
1006 
1007 	int_cause &= MGE_PORT_INT_RXQ0;
1008 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1009 
1010 	if (int_cause || int_cause_ext) {
1011 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1012 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1013 		mge_intr_rx_locked(sc, -1);
1014 	}
1015 
1016 	MGE_RECEIVE_UNLOCK(sc);
1017 }
1018 
1019 
1020 static int
1021 mge_intr_rx_locked(struct mge_softc *sc, int count)
1022 {
1023 	struct ifnet *ifp = sc->ifp;
1024 	uint32_t status;
1025 	uint16_t bufsize;
1026 	struct mge_desc_wrapper* dw;
1027 	struct mbuf *mb;
1028 	int rx_npkts = 0;
1029 
1030 	MGE_RECEIVE_LOCK_ASSERT(sc);
1031 
1032 	while (count != 0) {
1033 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1034 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1035 		    BUS_DMASYNC_POSTREAD);
1036 
1037 		/* Get status */
1038 		status = dw->mge_desc->cmd_status;
1039 		bufsize = dw->mge_desc->buff_size;
1040 		if ((status & MGE_DMA_OWNED) != 0)
1041 			break;
1042 
1043 		if (dw->mge_desc->byte_count &&
1044 		    ~(status & MGE_ERR_SUMMARY)) {
1045 
1046 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1047 			    BUS_DMASYNC_POSTREAD);
1048 
1049 			mb = m_devget(dw->buffer->m_data,
1050 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1051 			    0, ifp, NULL);
1052 
1053 			if (mb == NULL)
1054 				/* Give up if no mbufs */
1055 				break;
1056 
1057 			mb->m_len -= 2;
1058 			mb->m_pkthdr.len -= 2;
1059 			mb->m_data += 2;
1060 
1061 			mge_offload_process_frame(ifp, mb, status,
1062 			    bufsize);
1063 
1064 			MGE_RECEIVE_UNLOCK(sc);
1065 			(*ifp->if_input)(ifp, mb);
1066 			MGE_RECEIVE_LOCK(sc);
1067 			rx_npkts++;
1068 		}
1069 
1070 		dw->mge_desc->byte_count = 0;
1071 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1072 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1073 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1074 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1075 
1076 		if (count > 0)
1077 			count -= 1;
1078 	}
1079 
1080 	return (rx_npkts);
1081 }
1082 
1083 static void
1084 mge_intr_sum(void *arg)
1085 {
1086 	struct mge_softc *sc = arg;
1087 	struct ifnet *ifp;
1088 
1089 	ifp = sc->ifp;
1090 	if_printf(ifp, "%s\n", __FUNCTION__);
1091 }
1092 
1093 static void
1094 mge_intr_tx(void *arg)
1095 {
1096 	struct mge_softc *sc = arg;
1097 	uint32_t int_cause_ext;
1098 
1099 	MGE_TRANSMIT_LOCK(sc);
1100 
1101 #ifdef DEVICE_POLLING
1102 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1103 		MGE_TRANSMIT_UNLOCK(sc);
1104 		return;
1105 	}
1106 #endif
1107 
1108 	/* Ack the interrupt */
1109 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1110 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1111 	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1112 
1113 	mge_intr_tx_locked(sc);
1114 
1115 	MGE_TRANSMIT_UNLOCK(sc);
1116 }
1117 
1118 
1119 static void
1120 mge_intr_tx_locked(struct mge_softc *sc)
1121 {
1122 	struct ifnet *ifp = sc->ifp;
1123 	struct mge_desc_wrapper *dw;
1124 	struct mge_desc *desc;
1125 	uint32_t status;
1126 	int send = 0;
1127 
1128 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1129 
1130 	/* Disable watchdog */
1131 	sc->wd_timer = 0;
1132 
1133 	while (sc->tx_desc_used_count) {
1134 		/* Get the descriptor */
1135 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1136 		desc = dw->mge_desc;
1137 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1138 		    BUS_DMASYNC_POSTREAD);
1139 
1140 		/* Get descriptor status */
1141 		status = desc->cmd_status;
1142 
1143 		if (status & MGE_DMA_OWNED)
1144 			break;
1145 
1146 		sc->tx_desc_used_idx =
1147 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;;
1148 		sc->tx_desc_used_count--;
1149 
1150 		/* Update collision statistics */
1151 		if (status & MGE_ERR_SUMMARY) {
1152 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1153 				ifp->if_collisions++;
1154 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1155 				ifp->if_collisions += 16;
1156 		}
1157 
1158 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1159 		    BUS_DMASYNC_POSTWRITE);
1160 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1161 		m_freem(dw->buffer);
1162 		dw->buffer = (struct mbuf*)NULL;
1163 		send++;
1164 
1165 		ifp->if_opackets++;
1166 	}
1167 
1168 	if (send) {
1169 		/* Now send anything that was pending */
1170 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1171 		mge_start_locked(ifp);
1172 	}
1173 }
1174 
1175 static int
1176 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1177 {
1178 	struct mge_softc *sc = ifp->if_softc;
1179 	struct ifreq *ifr = (struct ifreq *)data;
1180 	int mask, error;
1181 	uint32_t flags;
1182 
1183 	error = 0;
1184 
1185 	switch (command) {
1186 	case SIOCSIFFLAGS:
1187 		MGE_GLOBAL_LOCK(sc);
1188 
1189 		if (ifp->if_flags & IFF_UP) {
1190 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1191 				flags = ifp->if_flags ^ sc->mge_if_flags;
1192 				if (flags & IFF_PROMISC)
1193 					mge_set_prom_mode(sc,
1194 					    MGE_RX_DEFAULT_QUEUE);
1195 
1196 				if (flags & IFF_ALLMULTI)
1197 					mge_setup_multicast(sc);
1198 			} else
1199 				mge_init_locked(sc);
1200 		}
1201 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1202 			mge_stop(sc);
1203 
1204 		sc->mge_if_flags = ifp->if_flags;
1205 		MGE_GLOBAL_UNLOCK(sc);
1206 		break;
1207 	case SIOCADDMULTI:
1208 	case SIOCDELMULTI:
1209 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1210 			MGE_GLOBAL_LOCK(sc);
1211 			mge_setup_multicast(sc);
1212 			MGE_GLOBAL_UNLOCK(sc);
1213 		}
1214 		break;
1215 	case SIOCSIFCAP:
1216 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1217 		if (mask & IFCAP_HWCSUM) {
1218 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1219 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1220 			if (ifp->if_capenable & IFCAP_TXCSUM)
1221 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1222 			else
1223 				ifp->if_hwassist = 0;
1224 		}
1225 #ifdef DEVICE_POLLING
1226 		if (mask & IFCAP_POLLING) {
1227 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1228 				error = ether_poll_register(mge_poll, ifp);
1229 				if (error)
1230 					return(error);
1231 
1232 				MGE_GLOBAL_LOCK(sc);
1233 				mge_intrs_ctrl(sc, 0);
1234 				ifp->if_capenable |= IFCAP_POLLING;
1235 				MGE_GLOBAL_UNLOCK(sc);
1236 			} else {
1237 				error = ether_poll_deregister(ifp);
1238 				MGE_GLOBAL_LOCK(sc);
1239 				mge_intrs_ctrl(sc, 1);
1240 				ifp->if_capenable &= ~IFCAP_POLLING;
1241 				MGE_GLOBAL_UNLOCK(sc);
1242 			}
1243 		}
1244 #endif
1245 		break;
1246 	case SIOCGIFMEDIA: /* fall through */
1247 	case SIOCSIFMEDIA:
1248 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1249 		    && !(ifr->ifr_media & IFM_FDX)) {
1250 			device_printf(sc->dev,
1251 			    "1000baseTX half-duplex unsupported\n");
1252 			return 0;
1253 		}
1254 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1255 		break;
1256 	default:
1257 		error = ether_ioctl(ifp, command, data);
1258 	}
1259 	return (error);
1260 }
1261 
1262 static int
1263 mge_miibus_readreg(device_t dev, int phy, int reg)
1264 {
1265 	uint32_t retries;
1266 
1267 	/*
1268 	 * We assume static PHY address <=> device unit mapping:
1269 	 * PHY Address = MII_ADDR_BASE + devce unit.
1270 	 * This is true for most Marvell boards.
1271 	 *
1272 	 * Code below grants proper PHY detection on each device
1273 	 * unit.
1274 	 */
1275 
1276 
1277 	if ((MII_ADDR_BASE + device_get_unit(dev)) != phy)
1278 		return (0);
1279 
1280 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1281 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1282 
1283 	retries = MGE_SMI_READ_RETRIES;
1284 	while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1285 		DELAY(MGE_SMI_READ_DELAY);
1286 
1287 	if (retries == 0)
1288 		device_printf(dev, "Timeout while reading from PHY\n");
1289 
1290 	return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1291 }
1292 
1293 static int
1294 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1295 {
1296 	uint32_t retries;
1297 
1298 	if ((MII_ADDR_BASE + device_get_unit(dev)) != phy)
1299 		return (0);
1300 
1301 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1302 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1303 
1304 	retries = MGE_SMI_WRITE_RETRIES;
1305 	while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1306 		DELAY(MGE_SMI_WRITE_DELAY);
1307 
1308 	if (retries == 0)
1309 		device_printf(dev, "Timeout while writing to PHY\n");
1310 	return (0);
1311 }
1312 
1313 static int
1314 mge_probe(device_t dev)
1315 {
1316 
1317 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1318 	return (BUS_PROBE_DEFAULT);
1319 }
1320 
1321 static int
1322 mge_resume(device_t dev)
1323 {
1324 
1325 	device_printf(dev, "%s\n", __FUNCTION__);
1326 	return (0);
1327 }
1328 
1329 static int
1330 mge_shutdown(device_t dev)
1331 {
1332 	struct mge_softc *sc = device_get_softc(dev);
1333 
1334 	MGE_GLOBAL_LOCK(sc);
1335 
1336 #ifdef DEVICE_POLLING
1337         if (sc->ifp->if_capenable & IFCAP_POLLING)
1338 		ether_poll_deregister(sc->ifp);
1339 #endif
1340 
1341 	mge_stop(sc);
1342 
1343 	MGE_GLOBAL_UNLOCK(sc);
1344 
1345 	return (0);
1346 }
1347 
1348 static int
1349 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1350 {
1351 	struct mge_desc_wrapper *dw = NULL;
1352 	struct ifnet *ifp;
1353 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1354 	bus_dmamap_t mapp;
1355 	int error;
1356 	int seg, nsegs;
1357 	int desc_no;
1358 
1359 	ifp = sc->ifp;
1360 
1361 	/* Check for free descriptors */
1362 	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1363 		/* No free descriptors */
1364 		return (-1);
1365 	}
1366 
1367 	/* Fetch unused map */
1368 	desc_no = sc->tx_desc_curr;
1369 	dw = &sc->mge_tx_desc[desc_no];
1370 	mapp = dw->buffer_dmap;
1371 
1372 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1373 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1374 
1375 	/* Create mapping in DMA memory */
1376 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1377 	    BUS_DMA_NOWAIT);
1378 	if (error != 0 || nsegs != 1 ) {
1379 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1380 		return ((error != 0) ? error : -1);
1381 	}
1382 
1383 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1384 
1385 	/* Everything is ok, now we can send buffers */
1386 	for (seg = 0; seg < nsegs; seg++) {
1387 		dw->mge_desc->byte_count = segs[seg].ds_len;
1388 		dw->mge_desc->buffer = segs[seg].ds_addr;
1389 		dw->buffer = m0;
1390 		dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1391 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1392 		    MGE_DMA_OWNED;
1393 
1394 		if (seg == 0)
1395 			mge_offload_setup_descriptor(sc, dw);
1396 	}
1397 
1398 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1399 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1400 
1401 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1402 	sc->tx_desc_used_count++;
1403 	return (0);
1404 }
1405 
1406 static void
1407 mge_tick(void *msc)
1408 {
1409 	struct mge_softc *sc = msc;
1410 
1411 	/* Check for TX timeout */
1412 	mge_watchdog(sc);
1413 
1414 	mii_tick(sc->mii);
1415 
1416 	/* Check for media type change */
1417 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1418 		mge_ifmedia_upd(sc->ifp);
1419 
1420 	/* Schedule another timeout one second from now */
1421 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1422 }
1423 
1424 static void
1425 mge_watchdog(struct mge_softc *sc)
1426 {
1427 	struct ifnet *ifp;
1428 
1429 	ifp = sc->ifp;
1430 
1431 	MGE_GLOBAL_LOCK(sc);
1432 
1433 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1434 		MGE_GLOBAL_UNLOCK(sc);
1435 		return;
1436 	}
1437 
1438 	ifp->if_oerrors++;
1439 	if_printf(ifp, "watchdog timeout\n");
1440 
1441 	mge_stop(sc);
1442 	mge_init_locked(sc);
1443 
1444 	MGE_GLOBAL_UNLOCK(sc);
1445 }
1446 
1447 static void
1448 mge_start(struct ifnet *ifp)
1449 {
1450 	struct mge_softc *sc = ifp->if_softc;
1451 
1452 	MGE_TRANSMIT_LOCK(sc);
1453 
1454 	mge_start_locked(ifp);
1455 
1456 	MGE_TRANSMIT_UNLOCK(sc);
1457 }
1458 
1459 static void
1460 mge_start_locked(struct ifnet *ifp)
1461 {
1462 	struct mge_softc *sc;
1463 	struct mbuf *m0, *mtmp;
1464 	uint32_t reg_val, queued = 0;
1465 
1466 	sc = ifp->if_softc;
1467 
1468 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1469 
1470 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1471 	    IFF_DRV_RUNNING)
1472 		return;
1473 
1474 	for (;;) {
1475 		/* Get packet from the queue */
1476 		IF_DEQUEUE(&ifp->if_snd, m0);
1477 		if (m0 == NULL)
1478 			break;
1479 
1480 		mtmp = m_defrag(m0, M_DONTWAIT);
1481 		if (mtmp)
1482 			m0 = mtmp;
1483 
1484 		if (mge_encap(sc, m0)) {
1485 			IF_PREPEND(&ifp->if_snd, m0);
1486 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1487 			break;
1488 		}
1489 		queued++;
1490 		BPF_MTAP(ifp, m0);
1491 	}
1492 
1493 	if (queued) {
1494 		/* Enable transmitter and watchdog timer */
1495 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1496 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1497 		sc->wd_timer = 5;
1498 	}
1499 }
1500 
1501 static void
1502 mge_stop(struct mge_softc *sc)
1503 {
1504 	struct ifnet *ifp;
1505 	volatile uint32_t reg_val, status;
1506 	struct mge_desc_wrapper *dw;
1507 	struct mge_desc *desc;
1508 	int count;
1509 
1510 	ifp = sc->ifp;
1511 
1512 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1513 		return;
1514 
1515 	/* Stop tick engine */
1516 	callout_stop(&sc->wd_callout);
1517 
1518 	/* Disable interface */
1519 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1520 	sc->wd_timer = 0;
1521 
1522 	/* Disable interrupts */
1523 	mge_intrs_ctrl(sc, 0);
1524 
1525 	/* Disable Rx and Tx */
1526 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1527 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1528 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1529 
1530 	/* Remove pending data from TX queue */
1531 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1532 	    sc->tx_desc_used_count) {
1533 		/* Get the descriptor */
1534 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1535 		desc = dw->mge_desc;
1536 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1537 		    BUS_DMASYNC_POSTREAD);
1538 
1539 		/* Get descriptor status */
1540 		status = desc->cmd_status;
1541 
1542 		if (status & MGE_DMA_OWNED)
1543 			break;
1544 
1545 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1546 		    MGE_TX_DESC_NUM;
1547 		sc->tx_desc_used_count--;
1548 
1549 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1550 		    BUS_DMASYNC_POSTWRITE);
1551 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1552 
1553 		m_freem(dw->buffer);
1554 		dw->buffer = (struct mbuf*)NULL;
1555 	}
1556 
1557 	/* Wait for end of transmission */
1558 	count = 0x100000;
1559 	while (count--) {
1560 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1561 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1562 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1563 			break;
1564 		DELAY(100);
1565 	}
1566 
1567 	if(!count)
1568 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1569 		    __FUNCTION__);
1570 
1571 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1572 	reg_val &= ~(PORT_SERIAL_ENABLE);
1573 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1574 }
1575 
1576 static int
1577 mge_suspend(device_t dev)
1578 {
1579 
1580 	device_printf(dev, "%s\n", __FUNCTION__);
1581 	return (0);
1582 }
1583 
1584 static void
1585 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1586     uint32_t status, uint16_t bufsize)
1587 {
1588 	int csum_flags = 0;
1589 
1590 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1591 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1592 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1593 
1594 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1595 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1596 		    (status & MGE_RX_L4_CSUM_OK)) {
1597 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1598 			frame->m_pkthdr.csum_data = 0xFFFF;
1599 		}
1600 
1601 		frame->m_pkthdr.csum_flags = csum_flags;
1602 	}
1603 }
1604 
1605 static void
1606 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1607 {
1608 	struct mbuf *m0 = dw->buffer;
1609 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1610 	int csum_flags = m0->m_pkthdr.csum_flags;
1611 	int cmd_status = 0;
1612 	struct ip *ip;
1613 	int ehlen, etype;
1614 
1615 	if (csum_flags) {
1616 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1617 			etype = ntohs(eh->evl_proto);
1618 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1619 			csum_flags |= MGE_TX_VLAN_TAGGED;
1620 		} else {
1621 			etype = ntohs(eh->evl_encap_proto);
1622 			ehlen = ETHER_HDR_LEN;
1623 		}
1624 
1625 		if (etype != ETHERTYPE_IP) {
1626 			if_printf(sc->ifp,
1627 			    "TCP/IP Offload enabled for unsupported "
1628 			    "protocol!\n");
1629 			return;
1630 		}
1631 
1632 		ip = (struct ip *)(m0->m_data + ehlen);
1633 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1634 
1635 		if ((m0->m_flags & M_FRAG) == 0)
1636 			cmd_status |= MGE_TX_NOT_FRAGMENT;
1637 	}
1638 
1639 	if (csum_flags & CSUM_IP)
1640 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1641 
1642 	if (csum_flags & CSUM_TCP)
1643 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1644 
1645 	if (csum_flags & CSUM_UDP)
1646 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1647 
1648 	dw->mge_desc->cmd_status |= cmd_status;
1649 }
1650 
1651 static void
1652 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1653 {
1654 
1655 	if (enable) {
1656 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1657 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1658 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1659 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1660 		    MGE_PORT_INT_EXT_TXBUF0);
1661 	} else {
1662 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1663 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1664 
1665 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1666 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1667 
1668 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1669 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1670 	}
1671 }
1672 
1673 static uint8_t
1674 mge_crc8(uint8_t *data, int size)
1675 {
1676 	uint8_t crc = 0;
1677 	static const uint8_t ct[256] = {
1678 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1679 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1680 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1681 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1682 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1683 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1684 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1685 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1686 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1687 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1688 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1689 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1690 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1691 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1692 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1693 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1694 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1695 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1696 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1697 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1698 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1699 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1700 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1701 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1702 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1703 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1704 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1705 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1706 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1707 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1708 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1709 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1710 	};
1711 
1712 	while(size--)
1713 		crc = ct[crc ^ *(data++)];
1714 
1715 	return(crc);
1716 }
1717 
1718 static void
1719 mge_setup_multicast(struct mge_softc *sc)
1720 {
1721 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1722 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1723 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1724 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1725 	struct ifnet *ifp = sc->ifp;
1726 	struct ifmultiaddr *ifma;
1727 	uint8_t *mac;
1728 	int i;
1729 
1730 	if (ifp->if_flags & IFF_ALLMULTI) {
1731 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1732 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1733 	} else {
1734 		memset(smt, 0, sizeof(smt));
1735 		memset(omt, 0, sizeof(omt));
1736 
1737 		if_maddr_rlock(ifp);
1738 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1739 			if (ifma->ifma_addr->sa_family != AF_LINK)
1740 				continue;
1741 
1742 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1743 			if (memcmp(mac, special, sizeof(special)) == 0) {
1744 				i = mac[5];
1745 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1746 			} else {
1747 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1748 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1749 			}
1750 		}
1751 		if_maddr_runlock(ifp);
1752 	}
1753 
1754 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1755 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1756 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1757 	}
1758 }
1759 
1760 static void
1761 mge_set_rxic(struct mge_softc *sc)
1762 {
1763 	uint32_t reg;
1764 
1765 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1766 		sc->rx_ic_time = sc->mge_rx_ipg_max;
1767 
1768 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1769 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1770 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1771 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1772 }
1773 
1774 static void
1775 mge_set_txic(struct mge_softc *sc)
1776 {
1777 	uint32_t reg;
1778 
1779 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1780 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1781 
1782 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1783 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1784 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1785 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1786 }
1787 
1788 static int
1789 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1790 {
1791 	struct mge_softc *sc = (struct mge_softc *)arg1;
1792 	uint32_t time;
1793 	int error;
1794 
1795 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1796 	error = sysctl_handle_int(oidp, &time, 0, req);
1797 	if (error != 0)
1798 		return(error);
1799 
1800 	MGE_GLOBAL_LOCK(sc);
1801 	if (arg2 == MGE_IC_RX) {
1802 		sc->rx_ic_time = time;
1803 		mge_set_rxic(sc);
1804 	} else {
1805 		sc->tx_ic_time = time;
1806 		mge_set_txic(sc);
1807 	}
1808 	MGE_GLOBAL_UNLOCK(sc);
1809 
1810 	return(0);
1811 }
1812 
1813 static void
1814 mge_add_sysctls(struct mge_softc *sc)
1815 {
1816 	struct sysctl_ctx_list *ctx;
1817 	struct sysctl_oid_list *children;
1818 	struct sysctl_oid *tree;
1819 
1820 	ctx = device_get_sysctl_ctx(sc->dev);
1821 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1822 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1823 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1824 	children = SYSCTL_CHILDREN(tree);
1825 
1826 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1827 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1828 	    "I", "IC RX time threshold");
1829 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1830 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1831 	    "I", "IC TX time threshold");
1832 }
1833