xref: /freebsd/sys/dev/mge/if_mge.c (revision aa64588d28258aef88cc33b8043112e8856948d0)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 
63 #include <sys/sockio.h>
64 #include <sys/bus.h>
65 #include <machine/bus.h>
66 #include <sys/rman.h>
67 #include <machine/resource.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #ifndef MII_ADDR_BASE
73 #define MII_ADDR_BASE 8
74 #endif
75 
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
79 
80 #include "miibus_if.h"
81 
82 /* PHY registers are in the address space of the first mge unit */
83 static struct mge_softc *sc_mge0 = NULL;
84 
85 static int mge_probe(device_t dev);
86 static int mge_attach(device_t dev);
87 static int mge_detach(device_t dev);
88 static int mge_shutdown(device_t dev);
89 static int mge_suspend(device_t dev);
90 static int mge_resume(device_t dev);
91 
92 static int mge_miibus_readreg(device_t dev, int phy, int reg);
93 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
94 
95 static int mge_ifmedia_upd(struct ifnet *ifp);
96 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
97 
98 static void mge_init(void *arg);
99 static void mge_init_locked(void *arg);
100 static void mge_start(struct ifnet *ifp);
101 static void mge_start_locked(struct ifnet *ifp);
102 static void mge_watchdog(struct mge_softc *sc);
103 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
104 
105 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
106 static uint32_t mge_rx_ipg(uint32_t val, int ver);
107 static void mge_ver_params(struct mge_softc *sc);
108 
109 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
110 static void mge_intr_rx(void *arg);
111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112 static void mge_intr_tx(void *arg);
113 static void mge_intr_tx_locked(struct mge_softc *sc);
114 static void mge_intr_misc(void *arg);
115 static void mge_intr_sum(void *arg);
116 static void mge_intr_err(void *arg);
117 static void mge_stop(struct mge_softc *sc);
118 static void mge_tick(void *msc);
119 static uint32_t mge_set_port_serial_control(uint32_t media);
120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121 static void mge_set_mac_address(struct mge_softc *sc);
122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
123     uint8_t queue);
124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125 static int mge_allocate_dma(struct mge_softc *sc);
126 static int mge_alloc_desc_dma(struct mge_softc *sc,
127     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129     struct mbuf **mbufp, bus_addr_t *paddr);
130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131 static void mge_free_dma(struct mge_softc *sc);
132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135     uint32_t status, uint16_t bufsize);
136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
137     struct mge_desc_wrapper *dw);
138 static uint8_t mge_crc8(uint8_t *data, int size);
139 static void mge_setup_multicast(struct mge_softc *sc);
140 static void mge_set_rxic(struct mge_softc *sc);
141 static void mge_set_txic(struct mge_softc *sc);
142 static void mge_add_sysctls(struct mge_softc *sc);
143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
144 
145 static device_method_t mge_methods[] = {
146 	/* Device interface */
147 	DEVMETHOD(device_probe,		mge_probe),
148 	DEVMETHOD(device_attach,	mge_attach),
149 	DEVMETHOD(device_detach,	mge_detach),
150 	DEVMETHOD(device_shutdown,	mge_shutdown),
151 	DEVMETHOD(device_suspend,	mge_suspend),
152 	DEVMETHOD(device_resume,	mge_resume),
153 	/* MII interface */
154 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
155 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
156 	{ 0, 0 }
157 };
158 
159 static driver_t mge_driver = {
160 	"mge",
161 	mge_methods,
162 	sizeof(struct mge_softc),
163 };
164 
165 static devclass_t mge_devclass;
166 
167 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169 MODULE_DEPEND(mge, ether, 1, 1, 1);
170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
171 
172 static struct resource_spec res_spec[] = {
173 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
174 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
177 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
178 	{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
179 	{ -1, 0 }
180 };
181 
182 static struct {
183 	driver_intr_t *handler;
184 	char * description;
185 } mge_intrs[MGE_INTR_COUNT] = {
186 	{ mge_intr_rx,	"GbE receive interrupt" },
187 	{ mge_intr_tx,	"GbE transmit interrupt" },
188 	{ mge_intr_misc,"GbE misc interrupt" },
189 	{ mge_intr_sum,	"GbE summary interrupt" },
190 	{ mge_intr_err,	"GbE error interrupt" },
191 };
192 
193 static void
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
195 {
196 	uint32_t mac_l, mac_h;
197 
198 	/* XXX use currently programmed MAC address; eventually this info will
199 	 * be provided by the loader */
200 
201 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
202 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
203 
204 	addr[0] = (mac_h & 0xff000000) >> 24;
205 	addr[1] = (mac_h & 0x00ff0000) >> 16;
206 	addr[2] = (mac_h & 0x0000ff00) >> 8;
207 	addr[3] = (mac_h & 0x000000ff);
208 	addr[4] = (mac_l & 0x0000ff00) >> 8;
209 	addr[5] = (mac_l & 0x000000ff);
210 }
211 
212 static uint32_t
213 mge_tfut_ipg(uint32_t val, int ver)
214 {
215 
216 	switch (ver) {
217 	case 1:
218 		return ((val & 0x3fff) << 4);
219 	case 2:
220 	default:
221 		return ((val & 0xffff) << 4);
222 	}
223 }
224 
225 static uint32_t
226 mge_rx_ipg(uint32_t val, int ver)
227 {
228 
229 	switch (ver) {
230 	case 1:
231 		return ((val & 0x3fff) << 8);
232 	case 2:
233 	default:
234 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
235 	}
236 }
237 
238 static void
239 mge_ver_params(struct mge_softc *sc)
240 {
241 	uint32_t d, r;
242 
243 	soc_id(&d, &r);
244 	if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
245 	    d == MV_DEV_MV78100_Z0) {
246 		sc->mge_ver = 2;
247 		sc->mge_mtu = 0x4e8;
248 		sc->mge_tfut_ipg_max = 0xFFFF;
249 		sc->mge_rx_ipg_max = 0xFFFF;
250 		sc->mge_tx_arb_cfg = 0xFC0000FF;
251 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
252 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
253 	} else {
254 		sc->mge_ver = 1;
255 		sc->mge_mtu = 0x458;
256 		sc->mge_tfut_ipg_max = 0x3FFF;
257 		sc->mge_rx_ipg_max = 0x3FFF;
258 		sc->mge_tx_arb_cfg = 0x000000FF;
259 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
260 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
261 	}
262 }
263 
264 static void
265 mge_set_mac_address(struct mge_softc *sc)
266 {
267 	char *if_mac;
268 	uint32_t mac_l, mac_h;
269 
270 	MGE_GLOBAL_LOCK_ASSERT(sc);
271 
272 	if_mac = (char *)IF_LLADDR(sc->ifp);
273 
274 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
275 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
276 	    (if_mac[2] << 8) | (if_mac[3] << 0);
277 
278 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
279 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
280 
281 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
282 }
283 
284 static void
285 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
286 {
287 	uint32_t reg_idx, reg_off, reg_val, i;
288 
289 	last_byte &= 0xf;
290 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
291 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
292 	reg_val = (1 | (queue << 1)) << reg_off;
293 
294 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
295 		if ( i == reg_idx)
296 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
297 		else
298 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
299 	}
300 }
301 
302 static void
303 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
304 {
305 	uint32_t port_config;
306 	uint32_t reg_val, i;
307 
308 	/* Enable or disable promiscuous mode as needed */
309 	if (sc->ifp->if_flags & IFF_PROMISC) {
310 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
311 		port_config |= PORT_CONFIG_UPM;
312 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
313 
314 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
315 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
316 
317 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
318 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
319 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
320 		}
321 
322 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
323 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
324 
325 	} else {
326 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
327 		port_config &= ~PORT_CONFIG_UPM;
328 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
329 
330 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
331 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
332 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
333 		}
334 
335 		mge_set_mac_address(sc);
336 	}
337 }
338 
339 static void
340 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
341 {
342 	u_int32_t *paddr;
343 
344 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
345 	paddr = arg;
346 
347 	*paddr = segs->ds_addr;
348 }
349 
350 static int
351 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
352     bus_addr_t *paddr)
353 {
354 	struct mbuf *new_mbuf;
355 	bus_dma_segment_t seg[1];
356 	int error;
357 	int nsegs;
358 
359 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
360 
361 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
362 	if (new_mbuf == NULL)
363 		return (ENOBUFS);
364 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
365 
366 	if (*mbufp) {
367 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
368 		bus_dmamap_unload(tag, map);
369 	}
370 
371 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
372 	    BUS_DMA_NOWAIT);
373 	KASSERT(nsegs == 1, ("Too many segments returned!"));
374 	if (nsegs != 1 || error)
375 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
376 
377 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
378 
379 	(*mbufp) = new_mbuf;
380 	(*paddr) = seg->ds_addr;
381 	return (0);
382 }
383 
384 static int
385 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
386     uint32_t size, bus_dma_tag_t *buffer_tag)
387 {
388 	struct mge_desc_wrapper *dw;
389 	bus_addr_t desc_paddr;
390 	int i, error;
391 
392 	desc_paddr = 0;
393 	for (i = size - 1; i >= 0; i--) {
394 		dw = &(tab[i]);
395 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
396 		    (void**)&(dw->mge_desc),
397 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
398 		    &(dw->desc_dmap));
399 
400 		if (error) {
401 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
402 			dw->mge_desc = NULL;
403 			return (ENXIO);
404 		}
405 
406 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
407 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
408 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
409 
410 		if (error) {
411 			if_printf(sc->ifp, "can't load descriptor\n");
412 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
413 			    dw->desc_dmap);
414 			dw->mge_desc = NULL;
415 			return (ENXIO);
416 		}
417 
418 		/* Chain descriptors */
419 		dw->mge_desc->next_desc = desc_paddr;
420 		desc_paddr = dw->mge_desc_paddr;
421 	}
422 	tab[size - 1].mge_desc->next_desc = desc_paddr;
423 
424 	/* Allocate a busdma tag for mbufs. */
425 	error = bus_dma_tag_create(NULL,	/* parent */
426 	    8, 0,				/* alignment, boundary */
427 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
428 	    BUS_SPACE_MAXADDR,			/* highaddr */
429 	    NULL, NULL,				/* filtfunc, filtfuncarg */
430 	    MCLBYTES, 1,			/* maxsize, nsegments */
431 	    MCLBYTES, 0,			/* maxsegsz, flags */
432 	    NULL, NULL,				/* lockfunc, lockfuncarg */
433 	    buffer_tag);			/* dmat */
434 	if (error) {
435 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
436 		return (ENXIO);
437 	}
438 
439 	/* Create TX busdma maps */
440 	for (i = 0; i < size; i++) {
441 		dw = &(tab[i]);
442 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
443 		if (error) {
444 			if_printf(sc->ifp, "failed to create map for mbuf\n");
445 			return (ENXIO);
446 		}
447 
448 		dw->buffer = (struct mbuf*)NULL;
449 		dw->mge_desc->buffer = (bus_addr_t)NULL;
450 	}
451 
452 	return (0);
453 }
454 
455 static int
456 mge_allocate_dma(struct mge_softc *sc)
457 {
458 	int error;
459 	struct mge_desc_wrapper *dw;
460 	int i;
461 
462 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
463 	error = bus_dma_tag_create(NULL,	/* parent */
464 	    16, 0,				/* alignment, boundary */
465 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
466 	    BUS_SPACE_MAXADDR,			/* highaddr */
467 	    NULL, NULL,				/* filtfunc, filtfuncarg */
468 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
469 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
470 	    NULL, NULL,				/* lockfunc, lockfuncarg */
471 	    &sc->mge_desc_dtag);		/* dmat */
472 
473 
474 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
475 	    &sc->mge_tx_dtag);
476 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
477 	    &sc->mge_rx_dtag);
478 
479 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
480 		dw = &(sc->mge_rx_desc[i]);
481 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
482 		    &dw->mge_desc->buffer);
483 	}
484 
485 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
486 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
487 
488 	return (0);
489 }
490 
491 static void
492 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
493     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
494 {
495 	struct mge_desc_wrapper *dw;
496 	int i;
497 
498 	for (i = 0; i < size; i++) {
499 		/* Free RX mbuf */
500 		dw = &(tab[i]);
501 
502 		if (dw->buffer_dmap) {
503 			if (free_mbufs) {
504 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
505 				    BUS_DMASYNC_POSTREAD);
506 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
507 			}
508 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
509 			if (free_mbufs)
510 				m_freem(dw->buffer);
511 		}
512 		/* Free RX descriptors */
513 		if (dw->desc_dmap) {
514 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
515 			    BUS_DMASYNC_POSTREAD);
516 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
517 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
518 			    dw->desc_dmap);
519 		}
520 	}
521 }
522 
523 static void
524 mge_free_dma(struct mge_softc *sc)
525 {
526 	/* Free desciptors and mbufs */
527 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
528 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
529 
530 	/* Destroy mbuf dma tag */
531 	bus_dma_tag_destroy(sc->mge_tx_dtag);
532 	bus_dma_tag_destroy(sc->mge_rx_dtag);
533 	/* Destroy descriptors tag */
534 	bus_dma_tag_destroy(sc->mge_desc_dtag);
535 }
536 
537 static void
538 mge_reinit_rx(struct mge_softc *sc)
539 {
540 	struct mge_desc_wrapper *dw;
541 	int i;
542 
543 	MGE_RECEIVE_LOCK_ASSERT(sc);
544 
545 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
546 
547 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
548 	    &sc->mge_rx_dtag);
549 
550 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
551 		dw = &(sc->mge_rx_desc[i]);
552 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
553 		&dw->mge_desc->buffer);
554 	}
555 
556 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
557 	sc->rx_desc_curr = 0;
558 
559 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
560 	    sc->rx_desc_start);
561 
562 	/* Enable RX queue */
563 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
564 }
565 
566 #ifdef DEVICE_POLLING
567 static poll_handler_t mge_poll;
568 
569 static int
570 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
571 {
572 	struct mge_softc *sc = ifp->if_softc;
573 	uint32_t int_cause, int_cause_ext;
574 	int rx_npkts = 0;
575 
576 	MGE_GLOBAL_LOCK(sc);
577 
578 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
579 		MGE_GLOBAL_UNLOCK(sc);
580 		return (rx_npkts);
581 	}
582 
583 	if (cmd == POLL_AND_CHECK_STATUS) {
584 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
585 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
586 
587 		/* Check for resource error */
588 		if (int_cause & MGE_PORT_INT_RXERRQ0)
589 			mge_reinit_rx(sc);
590 
591 		if (int_cause || int_cause_ext) {
592 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
593 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
594 		}
595 	}
596 
597 	mge_intr_tx_locked(sc);
598 	rx_npkts = mge_intr_rx_locked(sc, count);
599 
600 	MGE_GLOBAL_UNLOCK(sc);
601 	return (rx_npkts);
602 }
603 #endif /* DEVICE_POLLING */
604 
605 static int
606 mge_attach(device_t dev)
607 {
608 	struct mge_softc *sc;
609 	struct mii_softc *miisc;
610 	struct ifnet *ifp;
611 	uint8_t hwaddr[ETHER_ADDR_LEN];
612 	int i, error ;
613 
614 	sc = device_get_softc(dev);
615 	sc->dev = dev;
616 
617 	if (device_get_unit(dev) == 0)
618 		sc_mge0 = sc;
619 
620 	/* Set chip version-dependent parameters */
621 	mge_ver_params(sc);
622 
623 	/* Initialize mutexes */
624 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
625 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
626 
627 	/* Allocate IO and IRQ resources */
628 	error = bus_alloc_resources(dev, res_spec, sc->res);
629 	if (error) {
630 		device_printf(dev, "could not allocate resources\n");
631 		mge_detach(dev);
632 		return (ENXIO);
633 	}
634 
635 	/* Allocate DMA, buffers, buffer descriptors */
636 	error = mge_allocate_dma(sc);
637 	if (error) {
638 		mge_detach(dev);
639 		return (ENXIO);
640 	}
641 
642 	sc->tx_desc_curr = 0;
643 	sc->rx_desc_curr = 0;
644 	sc->tx_desc_used_idx = 0;
645 	sc->tx_desc_used_count = 0;
646 
647 	/* Configure defaults for interrupts coalescing */
648 	sc->rx_ic_time = 768;
649 	sc->tx_ic_time = 768;
650 	mge_add_sysctls(sc);
651 
652 	/* Allocate network interface */
653 	ifp = sc->ifp = if_alloc(IFT_ETHER);
654 	if (ifp == NULL) {
655 		device_printf(dev, "if_alloc() failed\n");
656 		mge_detach(dev);
657 		return (ENOMEM);
658 	}
659 
660 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
661 	ifp->if_softc = sc;
662 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
663 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
664 	ifp->if_capenable = ifp->if_capabilities;
665 	ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
666 
667 #ifdef DEVICE_POLLING
668 	/* Advertise that polling is supported */
669 	ifp->if_capabilities |= IFCAP_POLLING;
670 #endif
671 
672 	ifp->if_init = mge_init;
673 	ifp->if_start = mge_start;
674 	ifp->if_ioctl = mge_ioctl;
675 
676 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
677 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
678 	IFQ_SET_READY(&ifp->if_snd);
679 
680 	mge_get_mac_address(sc, hwaddr);
681 	ether_ifattach(ifp, hwaddr);
682 	callout_init(&sc->wd_callout, 0);
683 
684 	/* Probe PHY(s) */
685 	error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
686 	if (error) {
687 		device_printf(dev, "MII failed to find PHY\n");
688 		mge_detach(dev);
689 		return (error);
690 	}
691 	sc->mii = device_get_softc(sc->miibus);
692 
693 	/* Tell the MAC where to find the PHY so autoneg works */
694 	miisc = LIST_FIRST(&sc->mii->mii_phys);
695 	MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
696 
697 	/* Attach interrupt handlers */
698 	for (i = 0; i < 2; ++i) {
699 		error = bus_setup_intr(dev, sc->res[1 + i],
700 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
701 		    sc, &sc->ih_cookie[i]);
702 		if (error) {
703 			device_printf(dev, "could not setup %s\n",
704 			    mge_intrs[i].description);
705 			mge_detach(dev);
706 			return (error);
707 		}
708 	}
709 
710 	return (0);
711 }
712 
713 static int
714 mge_detach(device_t dev)
715 {
716 	struct mge_softc *sc;
717 	int error,i;
718 
719 	sc = device_get_softc(dev);
720 
721 	/* Stop controller and free TX queue */
722 	if (sc->ifp)
723 		mge_shutdown(dev);
724 
725 	/* Wait for stopping ticks */
726         callout_drain(&sc->wd_callout);
727 
728 	/* Stop and release all interrupts */
729 	for (i = 0; i < 2; ++i) {
730 		if (!sc->ih_cookie[i])
731 			continue;
732 
733 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
734 		if (error)
735 			device_printf(dev, "could not release %s\n",
736 			    mge_intrs[i].description);
737 	}
738 
739 	/* Detach network interface */
740 	if (sc->ifp) {
741 		ether_ifdetach(sc->ifp);
742 		if_free(sc->ifp);
743 	}
744 
745 	/* Free DMA resources */
746 	mge_free_dma(sc);
747 
748 	/* Free IO memory handler */
749 	bus_release_resources(dev, res_spec, sc->res);
750 
751 	/* Destroy mutexes */
752 	mtx_destroy(&sc->receive_lock);
753 	mtx_destroy(&sc->transmit_lock);
754 
755 	return (0);
756 }
757 
758 static void
759 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
760 {
761 	struct mge_softc *sc = ifp->if_softc;
762 	struct mii_data *mii;
763 
764 	MGE_TRANSMIT_LOCK(sc);
765 
766 	mii = sc->mii;
767 	mii_pollstat(mii);
768 
769 	ifmr->ifm_active = mii->mii_media_active;
770 	ifmr->ifm_status = mii->mii_media_status;
771 
772 	MGE_TRANSMIT_UNLOCK(sc);
773 }
774 
775 static uint32_t
776 mge_set_port_serial_control(uint32_t media)
777 {
778 	uint32_t port_config;
779 
780 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
781 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
782 
783 	if (IFM_TYPE(media) == IFM_ETHER) {
784 		switch(IFM_SUBTYPE(media)) {
785 			case IFM_AUTO:
786 				break;
787 			case IFM_1000_T:
788 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
789 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
790 				    PORT_SERIAL_SPEED_AUTONEG);
791 				break;
792 			case IFM_100_TX:
793 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
794 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
795 				    PORT_SERIAL_SPEED_AUTONEG);
796 				break;
797 			case IFM_10_T:
798 				port_config  |= (PORT_SERIAL_AUTONEG |
799 				    PORT_SERIAL_AUTONEG_FC |
800 				    PORT_SERIAL_SPEED_AUTONEG);
801 				break;
802 		}
803 		if (media & IFM_FDX)
804 			port_config |= PORT_SERIAL_FULL_DUPLEX;
805 	}
806 	return (port_config);
807 }
808 
809 static int
810 mge_ifmedia_upd(struct ifnet *ifp)
811 {
812 	struct mge_softc *sc = ifp->if_softc;
813 
814 	if (ifp->if_flags & IFF_UP) {
815 		MGE_GLOBAL_LOCK(sc);
816 
817 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
818 		mii_mediachg(sc->mii);
819 		mge_init_locked(sc);
820 
821 		MGE_GLOBAL_UNLOCK(sc);
822 	}
823 
824 	return (0);
825 }
826 
827 static void
828 mge_init(void *arg)
829 {
830 	struct mge_softc *sc = arg;
831 
832 	MGE_GLOBAL_LOCK(sc);
833 
834 	mge_init_locked(arg);
835 
836 	MGE_GLOBAL_UNLOCK(sc);
837 }
838 
839 static void
840 mge_init_locked(void *arg)
841 {
842 	struct mge_softc *sc = arg;
843 	struct mge_desc_wrapper *dw;
844 	volatile uint32_t reg_val;
845 	int i, count;
846 
847 
848 	MGE_GLOBAL_LOCK_ASSERT(sc);
849 
850 	/* Stop interface */
851 	mge_stop(sc);
852 
853 	/* Disable interrupts */
854 	mge_intrs_ctrl(sc, 0);
855 
856 	/* Set MAC address */
857 	mge_set_mac_address(sc);
858 
859 	/* Setup multicast filters */
860 	mge_setup_multicast(sc);
861 
862 	if (sc->mge_ver == 2) {
863 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
864 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
865 	}
866 
867 	/* Initialize TX queue configuration registers */
868 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
869 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
870 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
871 
872 	/* Clear TX queue configuration registers for unused queues */
873 	for (i = 1; i < 7; i++) {
874 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
875 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
876 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
877 	}
878 
879 	/* Set default MTU */
880 	MGE_WRITE(sc, sc->mge_mtu, 0);
881 
882 	/* Port configuration */
883 	MGE_WRITE(sc, MGE_PORT_CONFIG,
884 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
885 	    PORT_CONFIG_ARO_RXQ(0));
886 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
887 
888 	/* Setup port configuration */
889 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
890 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
891 
892 	/* Setup SDMA configuration */
893 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
894 	    MGE_SDMA_TX_BYTE_SWAP |
895 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
896 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
897 
898 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
899 
900 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
901 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
902 	    sc->rx_desc_start);
903 
904 	/* Reset descriptor indexes */
905 	sc->tx_desc_curr = 0;
906 	sc->rx_desc_curr = 0;
907 	sc->tx_desc_used_idx = 0;
908 	sc->tx_desc_used_count = 0;
909 
910 	/* Enable RX descriptors */
911 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
912 		dw = &sc->mge_rx_desc[i];
913 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
914 		dw->mge_desc->buff_size = MCLBYTES;
915 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
916 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
917 	}
918 
919 	/* Enable RX queue */
920 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
921 
922 	/* Enable port */
923 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
924 	reg_val |= PORT_SERIAL_ENABLE;
925 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
926 	count = 0x100000;
927 	for (;;) {
928 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
929 		if (reg_val & MGE_STATUS_LINKUP)
930 			break;
931 		DELAY(100);
932 		if (--count == 0) {
933 			if_printf(sc->ifp, "Timeout on link-up\n");
934 			break;
935 		}
936 	}
937 
938 	/* Setup interrupts coalescing */
939 	mge_set_rxic(sc);
940 	mge_set_txic(sc);
941 
942 	/* Enable interrupts */
943 #ifdef DEVICE_POLLING
944         /*
945 	 * * ...only if polling is not turned on. Disable interrupts explicitly
946 	 * if polling is enabled.
947 	 */
948 	if (sc->ifp->if_capenable & IFCAP_POLLING)
949 		mge_intrs_ctrl(sc, 0);
950 	else
951 #endif /* DEVICE_POLLING */
952 	mge_intrs_ctrl(sc, 1);
953 
954 	/* Activate network interface */
955 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
956 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
957 	sc->wd_timer = 0;
958 
959 	/* Schedule watchdog timeout */
960 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
961 }
962 
963 static void
964 mge_intr_err(void *arg)
965 {
966 	struct mge_softc *sc = arg;
967 	struct ifnet *ifp;
968 
969 	ifp = sc->ifp;
970 	if_printf(ifp, "%s\n", __FUNCTION__);
971 }
972 
973 static void
974 mge_intr_misc(void *arg)
975 {
976 	struct mge_softc *sc = arg;
977 	struct ifnet *ifp;
978 
979 	ifp = sc->ifp;
980 	if_printf(ifp, "%s\n", __FUNCTION__);
981 }
982 
983 static void
984 mge_intr_rx(void *arg) {
985 	struct mge_softc *sc = arg;
986 	uint32_t int_cause, int_cause_ext;
987 
988 	MGE_RECEIVE_LOCK(sc);
989 
990 #ifdef DEVICE_POLLING
991 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
992 		MGE_RECEIVE_UNLOCK(sc);
993 		return;
994 	}
995 #endif
996 
997 	/* Get interrupt cause */
998 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
999 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1000 
1001 	/* Check for resource error */
1002 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1003 		mge_reinit_rx(sc);
1004 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1005 		    int_cause & ~MGE_PORT_INT_RXERRQ0);
1006 	}
1007 
1008 	int_cause &= MGE_PORT_INT_RXQ0;
1009 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1010 
1011 	if (int_cause || int_cause_ext) {
1012 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1013 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1014 		mge_intr_rx_locked(sc, -1);
1015 	}
1016 
1017 	MGE_RECEIVE_UNLOCK(sc);
1018 }
1019 
1020 
1021 static int
1022 mge_intr_rx_locked(struct mge_softc *sc, int count)
1023 {
1024 	struct ifnet *ifp = sc->ifp;
1025 	uint32_t status;
1026 	uint16_t bufsize;
1027 	struct mge_desc_wrapper* dw;
1028 	struct mbuf *mb;
1029 	int rx_npkts = 0;
1030 
1031 	MGE_RECEIVE_LOCK_ASSERT(sc);
1032 
1033 	while (count != 0) {
1034 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1035 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1036 		    BUS_DMASYNC_POSTREAD);
1037 
1038 		/* Get status */
1039 		status = dw->mge_desc->cmd_status;
1040 		bufsize = dw->mge_desc->buff_size;
1041 		if ((status & MGE_DMA_OWNED) != 0)
1042 			break;
1043 
1044 		if (dw->mge_desc->byte_count &&
1045 		    ~(status & MGE_ERR_SUMMARY)) {
1046 
1047 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1048 			    BUS_DMASYNC_POSTREAD);
1049 
1050 			mb = m_devget(dw->buffer->m_data,
1051 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1052 			    0, ifp, NULL);
1053 
1054 			if (mb == NULL)
1055 				/* Give up if no mbufs */
1056 				break;
1057 
1058 			mb->m_len -= 2;
1059 			mb->m_pkthdr.len -= 2;
1060 			mb->m_data += 2;
1061 
1062 			mge_offload_process_frame(ifp, mb, status,
1063 			    bufsize);
1064 
1065 			MGE_RECEIVE_UNLOCK(sc);
1066 			(*ifp->if_input)(ifp, mb);
1067 			MGE_RECEIVE_LOCK(sc);
1068 			rx_npkts++;
1069 		}
1070 
1071 		dw->mge_desc->byte_count = 0;
1072 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1073 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1074 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1075 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1076 
1077 		if (count > 0)
1078 			count -= 1;
1079 	}
1080 
1081 	return (rx_npkts);
1082 }
1083 
1084 static void
1085 mge_intr_sum(void *arg)
1086 {
1087 	struct mge_softc *sc = arg;
1088 	struct ifnet *ifp;
1089 
1090 	ifp = sc->ifp;
1091 	if_printf(ifp, "%s\n", __FUNCTION__);
1092 }
1093 
1094 static void
1095 mge_intr_tx(void *arg)
1096 {
1097 	struct mge_softc *sc = arg;
1098 	uint32_t int_cause_ext;
1099 
1100 	MGE_TRANSMIT_LOCK(sc);
1101 
1102 #ifdef DEVICE_POLLING
1103 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1104 		MGE_TRANSMIT_UNLOCK(sc);
1105 		return;
1106 	}
1107 #endif
1108 
1109 	/* Ack the interrupt */
1110 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1111 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1112 	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1113 
1114 	mge_intr_tx_locked(sc);
1115 
1116 	MGE_TRANSMIT_UNLOCK(sc);
1117 }
1118 
1119 
1120 static void
1121 mge_intr_tx_locked(struct mge_softc *sc)
1122 {
1123 	struct ifnet *ifp = sc->ifp;
1124 	struct mge_desc_wrapper *dw;
1125 	struct mge_desc *desc;
1126 	uint32_t status;
1127 	int send = 0;
1128 
1129 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1130 
1131 	/* Disable watchdog */
1132 	sc->wd_timer = 0;
1133 
1134 	while (sc->tx_desc_used_count) {
1135 		/* Get the descriptor */
1136 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1137 		desc = dw->mge_desc;
1138 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1139 		    BUS_DMASYNC_POSTREAD);
1140 
1141 		/* Get descriptor status */
1142 		status = desc->cmd_status;
1143 
1144 		if (status & MGE_DMA_OWNED)
1145 			break;
1146 
1147 		sc->tx_desc_used_idx =
1148 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1149 		sc->tx_desc_used_count--;
1150 
1151 		/* Update collision statistics */
1152 		if (status & MGE_ERR_SUMMARY) {
1153 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1154 				ifp->if_collisions++;
1155 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1156 				ifp->if_collisions += 16;
1157 		}
1158 
1159 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1160 		    BUS_DMASYNC_POSTWRITE);
1161 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1162 		m_freem(dw->buffer);
1163 		dw->buffer = (struct mbuf*)NULL;
1164 		send++;
1165 
1166 		ifp->if_opackets++;
1167 	}
1168 
1169 	if (send) {
1170 		/* Now send anything that was pending */
1171 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1172 		mge_start_locked(ifp);
1173 	}
1174 }
1175 
1176 static int
1177 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1178 {
1179 	struct mge_softc *sc = ifp->if_softc;
1180 	struct ifreq *ifr = (struct ifreq *)data;
1181 	int mask, error;
1182 	uint32_t flags;
1183 
1184 	error = 0;
1185 
1186 	switch (command) {
1187 	case SIOCSIFFLAGS:
1188 		MGE_GLOBAL_LOCK(sc);
1189 
1190 		if (ifp->if_flags & IFF_UP) {
1191 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1192 				flags = ifp->if_flags ^ sc->mge_if_flags;
1193 				if (flags & IFF_PROMISC)
1194 					mge_set_prom_mode(sc,
1195 					    MGE_RX_DEFAULT_QUEUE);
1196 
1197 				if (flags & IFF_ALLMULTI)
1198 					mge_setup_multicast(sc);
1199 			} else
1200 				mge_init_locked(sc);
1201 		}
1202 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1203 			mge_stop(sc);
1204 
1205 		sc->mge_if_flags = ifp->if_flags;
1206 		MGE_GLOBAL_UNLOCK(sc);
1207 		break;
1208 	case SIOCADDMULTI:
1209 	case SIOCDELMULTI:
1210 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1211 			MGE_GLOBAL_LOCK(sc);
1212 			mge_setup_multicast(sc);
1213 			MGE_GLOBAL_UNLOCK(sc);
1214 		}
1215 		break;
1216 	case SIOCSIFCAP:
1217 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1218 		if (mask & IFCAP_HWCSUM) {
1219 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1220 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1221 			if (ifp->if_capenable & IFCAP_TXCSUM)
1222 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1223 			else
1224 				ifp->if_hwassist = 0;
1225 		}
1226 #ifdef DEVICE_POLLING
1227 		if (mask & IFCAP_POLLING) {
1228 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1229 				error = ether_poll_register(mge_poll, ifp);
1230 				if (error)
1231 					return(error);
1232 
1233 				MGE_GLOBAL_LOCK(sc);
1234 				mge_intrs_ctrl(sc, 0);
1235 				ifp->if_capenable |= IFCAP_POLLING;
1236 				MGE_GLOBAL_UNLOCK(sc);
1237 			} else {
1238 				error = ether_poll_deregister(ifp);
1239 				MGE_GLOBAL_LOCK(sc);
1240 				mge_intrs_ctrl(sc, 1);
1241 				ifp->if_capenable &= ~IFCAP_POLLING;
1242 				MGE_GLOBAL_UNLOCK(sc);
1243 			}
1244 		}
1245 #endif
1246 		break;
1247 	case SIOCGIFMEDIA: /* fall through */
1248 	case SIOCSIFMEDIA:
1249 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1250 		    && !(ifr->ifr_media & IFM_FDX)) {
1251 			device_printf(sc->dev,
1252 			    "1000baseTX half-duplex unsupported\n");
1253 			return 0;
1254 		}
1255 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1256 		break;
1257 	default:
1258 		error = ether_ioctl(ifp, command, data);
1259 	}
1260 	return (error);
1261 }
1262 
1263 static int
1264 mge_miibus_readreg(device_t dev, int phy, int reg)
1265 {
1266 	uint32_t retries;
1267 
1268 	/*
1269 	 * We assume static PHY address <=> device unit mapping:
1270 	 * PHY Address = MII_ADDR_BASE + devce unit.
1271 	 * This is true for most Marvell boards.
1272 	 *
1273 	 * Code below grants proper PHY detection on each device
1274 	 * unit.
1275 	 */
1276 
1277 
1278 	if ((MII_ADDR_BASE + device_get_unit(dev)) != phy)
1279 		return (0);
1280 
1281 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1282 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1283 
1284 	retries = MGE_SMI_READ_RETRIES;
1285 	while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1286 		DELAY(MGE_SMI_READ_DELAY);
1287 
1288 	if (retries == 0)
1289 		device_printf(dev, "Timeout while reading from PHY\n");
1290 
1291 	return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1292 }
1293 
1294 static int
1295 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1296 {
1297 	uint32_t retries;
1298 
1299 	if ((MII_ADDR_BASE + device_get_unit(dev)) != phy)
1300 		return (0);
1301 
1302 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1303 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1304 
1305 	retries = MGE_SMI_WRITE_RETRIES;
1306 	while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1307 		DELAY(MGE_SMI_WRITE_DELAY);
1308 
1309 	if (retries == 0)
1310 		device_printf(dev, "Timeout while writing to PHY\n");
1311 	return (0);
1312 }
1313 
1314 static int
1315 mge_probe(device_t dev)
1316 {
1317 
1318 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1319 	return (BUS_PROBE_DEFAULT);
1320 }
1321 
1322 static int
1323 mge_resume(device_t dev)
1324 {
1325 
1326 	device_printf(dev, "%s\n", __FUNCTION__);
1327 	return (0);
1328 }
1329 
1330 static int
1331 mge_shutdown(device_t dev)
1332 {
1333 	struct mge_softc *sc = device_get_softc(dev);
1334 
1335 	MGE_GLOBAL_LOCK(sc);
1336 
1337 #ifdef DEVICE_POLLING
1338         if (sc->ifp->if_capenable & IFCAP_POLLING)
1339 		ether_poll_deregister(sc->ifp);
1340 #endif
1341 
1342 	mge_stop(sc);
1343 
1344 	MGE_GLOBAL_UNLOCK(sc);
1345 
1346 	return (0);
1347 }
1348 
1349 static int
1350 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1351 {
1352 	struct mge_desc_wrapper *dw = NULL;
1353 	struct ifnet *ifp;
1354 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1355 	bus_dmamap_t mapp;
1356 	int error;
1357 	int seg, nsegs;
1358 	int desc_no;
1359 
1360 	ifp = sc->ifp;
1361 
1362 	/* Check for free descriptors */
1363 	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1364 		/* No free descriptors */
1365 		return (-1);
1366 	}
1367 
1368 	/* Fetch unused map */
1369 	desc_no = sc->tx_desc_curr;
1370 	dw = &sc->mge_tx_desc[desc_no];
1371 	mapp = dw->buffer_dmap;
1372 
1373 	/* Create mapping in DMA memory */
1374 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1375 	    BUS_DMA_NOWAIT);
1376 	if (error != 0 || nsegs != 1 ) {
1377 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1378 		return ((error != 0) ? error : -1);
1379 	}
1380 
1381 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1382 
1383 	/* Everything is ok, now we can send buffers */
1384 	for (seg = 0; seg < nsegs; seg++) {
1385 		dw->mge_desc->byte_count = segs[seg].ds_len;
1386 		dw->mge_desc->buffer = segs[seg].ds_addr;
1387 		dw->buffer = m0;
1388 		dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1389 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1390 		    MGE_DMA_OWNED;
1391 
1392 		if (seg == 0)
1393 			mge_offload_setup_descriptor(sc, dw);
1394 	}
1395 
1396 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1397 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1398 
1399 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1400 	sc->tx_desc_used_count++;
1401 	return (0);
1402 }
1403 
1404 static void
1405 mge_tick(void *msc)
1406 {
1407 	struct mge_softc *sc = msc;
1408 
1409 	/* Check for TX timeout */
1410 	mge_watchdog(sc);
1411 
1412 	mii_tick(sc->mii);
1413 
1414 	/* Check for media type change */
1415 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1416 		mge_ifmedia_upd(sc->ifp);
1417 
1418 	/* Schedule another timeout one second from now */
1419 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1420 }
1421 
1422 static void
1423 mge_watchdog(struct mge_softc *sc)
1424 {
1425 	struct ifnet *ifp;
1426 
1427 	ifp = sc->ifp;
1428 
1429 	MGE_GLOBAL_LOCK(sc);
1430 
1431 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1432 		MGE_GLOBAL_UNLOCK(sc);
1433 		return;
1434 	}
1435 
1436 	ifp->if_oerrors++;
1437 	if_printf(ifp, "watchdog timeout\n");
1438 
1439 	mge_stop(sc);
1440 	mge_init_locked(sc);
1441 
1442 	MGE_GLOBAL_UNLOCK(sc);
1443 }
1444 
1445 static void
1446 mge_start(struct ifnet *ifp)
1447 {
1448 	struct mge_softc *sc = ifp->if_softc;
1449 
1450 	MGE_TRANSMIT_LOCK(sc);
1451 
1452 	mge_start_locked(ifp);
1453 
1454 	MGE_TRANSMIT_UNLOCK(sc);
1455 }
1456 
1457 static void
1458 mge_start_locked(struct ifnet *ifp)
1459 {
1460 	struct mge_softc *sc;
1461 	struct mbuf *m0, *mtmp;
1462 	uint32_t reg_val, queued = 0;
1463 
1464 	sc = ifp->if_softc;
1465 
1466 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1467 
1468 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1469 	    IFF_DRV_RUNNING)
1470 		return;
1471 
1472 	for (;;) {
1473 		/* Get packet from the queue */
1474 		IF_DEQUEUE(&ifp->if_snd, m0);
1475 		if (m0 == NULL)
1476 			break;
1477 
1478 		mtmp = m_defrag(m0, M_DONTWAIT);
1479 		if (mtmp)
1480 			m0 = mtmp;
1481 
1482 		if (mge_encap(sc, m0)) {
1483 			IF_PREPEND(&ifp->if_snd, m0);
1484 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1485 			break;
1486 		}
1487 		queued++;
1488 		BPF_MTAP(ifp, m0);
1489 	}
1490 
1491 	if (queued) {
1492 		/* Enable transmitter and watchdog timer */
1493 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1494 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1495 		sc->wd_timer = 5;
1496 	}
1497 }
1498 
1499 static void
1500 mge_stop(struct mge_softc *sc)
1501 {
1502 	struct ifnet *ifp;
1503 	volatile uint32_t reg_val, status;
1504 	struct mge_desc_wrapper *dw;
1505 	struct mge_desc *desc;
1506 	int count;
1507 
1508 	ifp = sc->ifp;
1509 
1510 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1511 		return;
1512 
1513 	/* Stop tick engine */
1514 	callout_stop(&sc->wd_callout);
1515 
1516 	/* Disable interface */
1517 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1518 	sc->wd_timer = 0;
1519 
1520 	/* Disable interrupts */
1521 	mge_intrs_ctrl(sc, 0);
1522 
1523 	/* Disable Rx and Tx */
1524 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1525 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1526 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1527 
1528 	/* Remove pending data from TX queue */
1529 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1530 	    sc->tx_desc_used_count) {
1531 		/* Get the descriptor */
1532 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1533 		desc = dw->mge_desc;
1534 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1535 		    BUS_DMASYNC_POSTREAD);
1536 
1537 		/* Get descriptor status */
1538 		status = desc->cmd_status;
1539 
1540 		if (status & MGE_DMA_OWNED)
1541 			break;
1542 
1543 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1544 		    MGE_TX_DESC_NUM;
1545 		sc->tx_desc_used_count--;
1546 
1547 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1548 		    BUS_DMASYNC_POSTWRITE);
1549 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1550 
1551 		m_freem(dw->buffer);
1552 		dw->buffer = (struct mbuf*)NULL;
1553 	}
1554 
1555 	/* Wait for end of transmission */
1556 	count = 0x100000;
1557 	while (count--) {
1558 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1559 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1560 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1561 			break;
1562 		DELAY(100);
1563 	}
1564 
1565 	if(!count)
1566 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1567 		    __FUNCTION__);
1568 
1569 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1570 	reg_val &= ~(PORT_SERIAL_ENABLE);
1571 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1572 }
1573 
1574 static int
1575 mge_suspend(device_t dev)
1576 {
1577 
1578 	device_printf(dev, "%s\n", __FUNCTION__);
1579 	return (0);
1580 }
1581 
1582 static void
1583 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1584     uint32_t status, uint16_t bufsize)
1585 {
1586 	int csum_flags = 0;
1587 
1588 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1589 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1590 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1591 
1592 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1593 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1594 		    (status & MGE_RX_L4_CSUM_OK)) {
1595 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1596 			frame->m_pkthdr.csum_data = 0xFFFF;
1597 		}
1598 
1599 		frame->m_pkthdr.csum_flags = csum_flags;
1600 	}
1601 }
1602 
1603 static void
1604 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1605 {
1606 	struct mbuf *m0 = dw->buffer;
1607 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1608 	int csum_flags = m0->m_pkthdr.csum_flags;
1609 	int cmd_status = 0;
1610 	struct ip *ip;
1611 	int ehlen, etype;
1612 
1613 	if (csum_flags) {
1614 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1615 			etype = ntohs(eh->evl_proto);
1616 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1617 			csum_flags |= MGE_TX_VLAN_TAGGED;
1618 		} else {
1619 			etype = ntohs(eh->evl_encap_proto);
1620 			ehlen = ETHER_HDR_LEN;
1621 		}
1622 
1623 		if (etype != ETHERTYPE_IP) {
1624 			if_printf(sc->ifp,
1625 			    "TCP/IP Offload enabled for unsupported "
1626 			    "protocol!\n");
1627 			return;
1628 		}
1629 
1630 		ip = (struct ip *)(m0->m_data + ehlen);
1631 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1632 
1633 		if ((m0->m_flags & M_FRAG) == 0)
1634 			cmd_status |= MGE_TX_NOT_FRAGMENT;
1635 	}
1636 
1637 	if (csum_flags & CSUM_IP)
1638 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1639 
1640 	if (csum_flags & CSUM_TCP)
1641 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1642 
1643 	if (csum_flags & CSUM_UDP)
1644 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1645 
1646 	dw->mge_desc->cmd_status |= cmd_status;
1647 }
1648 
1649 static void
1650 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1651 {
1652 
1653 	if (enable) {
1654 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1655 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1656 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1657 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1658 		    MGE_PORT_INT_EXT_TXBUF0);
1659 	} else {
1660 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1661 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1662 
1663 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1664 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1665 
1666 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1667 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1668 	}
1669 }
1670 
1671 static uint8_t
1672 mge_crc8(uint8_t *data, int size)
1673 {
1674 	uint8_t crc = 0;
1675 	static const uint8_t ct[256] = {
1676 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1677 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1678 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1679 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1680 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1681 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1682 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1683 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1684 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1685 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1686 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1687 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1688 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1689 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1690 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1691 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1692 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1693 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1694 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1695 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1696 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1697 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1698 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1699 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1700 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1701 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1702 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1703 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1704 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1705 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1706 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1707 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1708 	};
1709 
1710 	while(size--)
1711 		crc = ct[crc ^ *(data++)];
1712 
1713 	return(crc);
1714 }
1715 
1716 static void
1717 mge_setup_multicast(struct mge_softc *sc)
1718 {
1719 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1720 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1721 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1722 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1723 	struct ifnet *ifp = sc->ifp;
1724 	struct ifmultiaddr *ifma;
1725 	uint8_t *mac;
1726 	int i;
1727 
1728 	if (ifp->if_flags & IFF_ALLMULTI) {
1729 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1730 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1731 	} else {
1732 		memset(smt, 0, sizeof(smt));
1733 		memset(omt, 0, sizeof(omt));
1734 
1735 		if_maddr_rlock(ifp);
1736 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1737 			if (ifma->ifma_addr->sa_family != AF_LINK)
1738 				continue;
1739 
1740 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1741 			if (memcmp(mac, special, sizeof(special)) == 0) {
1742 				i = mac[5];
1743 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1744 			} else {
1745 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1746 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1747 			}
1748 		}
1749 		if_maddr_runlock(ifp);
1750 	}
1751 
1752 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1753 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1754 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1755 	}
1756 }
1757 
1758 static void
1759 mge_set_rxic(struct mge_softc *sc)
1760 {
1761 	uint32_t reg;
1762 
1763 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1764 		sc->rx_ic_time = sc->mge_rx_ipg_max;
1765 
1766 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1767 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1768 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1769 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1770 }
1771 
1772 static void
1773 mge_set_txic(struct mge_softc *sc)
1774 {
1775 	uint32_t reg;
1776 
1777 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1778 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1779 
1780 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1781 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1782 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1783 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1784 }
1785 
1786 static int
1787 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1788 {
1789 	struct mge_softc *sc = (struct mge_softc *)arg1;
1790 	uint32_t time;
1791 	int error;
1792 
1793 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1794 	error = sysctl_handle_int(oidp, &time, 0, req);
1795 	if (error != 0)
1796 		return(error);
1797 
1798 	MGE_GLOBAL_LOCK(sc);
1799 	if (arg2 == MGE_IC_RX) {
1800 		sc->rx_ic_time = time;
1801 		mge_set_rxic(sc);
1802 	} else {
1803 		sc->tx_ic_time = time;
1804 		mge_set_txic(sc);
1805 	}
1806 	MGE_GLOBAL_UNLOCK(sc);
1807 
1808 	return(0);
1809 }
1810 
1811 static void
1812 mge_add_sysctls(struct mge_softc *sc)
1813 {
1814 	struct sysctl_ctx_list *ctx;
1815 	struct sysctl_oid_list *children;
1816 	struct sysctl_oid *tree;
1817 
1818 	ctx = device_get_sysctl_ctx(sc->dev);
1819 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1820 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1821 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1822 	children = SYSCTL_CHILDREN(tree);
1823 
1824 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1825 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1826 	    "I", "IC RX time threshold");
1827 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1828 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1829 	    "I", "IC TX time threshold");
1830 }
1831