xref: /freebsd/sys/dev/mge/if_mge.c (revision 3c6e15bceeab4470243c60c9a4b5b9cafca9abaa)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 
63 #include <sys/sockio.h>
64 #include <sys/bus.h>
65 #include <machine/bus.h>
66 #include <sys/rman.h>
67 #include <machine/resource.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY)
73 #define  MGE_VER2	1
74 #endif
75 
76 #define	MV_PHY_ADDR_BASE	8
77 
78 #include <dev/mge/if_mgevar.h>
79 #include <arm/mv/mvreg.h>
80 
81 #include "miibus_if.h"
82 
83 /* PHY registers are in the address space of the first mge unit */
84 static struct mge_softc *sc_mge0 = NULL;
85 
86 static int mge_probe(device_t dev);
87 static int mge_attach(device_t dev);
88 static int mge_detach(device_t dev);
89 static int mge_shutdown(device_t dev);
90 static int mge_suspend(device_t dev);
91 static int mge_resume(device_t dev);
92 
93 static int mge_miibus_readreg(device_t dev, int phy, int reg);
94 static void mge_miibus_writereg(device_t dev, int phy, int reg, int value);
95 
96 static int mge_ifmedia_upd(struct ifnet *ifp);
97 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
98 
99 static void mge_init(void *arg);
100 static void mge_init_locked(void *arg);
101 static void mge_start(struct ifnet *ifp);
102 static void mge_start_locked(struct ifnet *ifp);
103 static void mge_watchdog(struct mge_softc *sc);
104 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
105 
106 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
107 static void mge_intr_rx(void *arg);
108 static void mge_intr_rx_locked(struct mge_softc *sc, int count);
109 static void mge_intr_tx(void *arg);
110 static void mge_intr_tx_locked(struct mge_softc *sc);
111 static void mge_intr_misc(void *arg);
112 static void mge_intr_sum(void *arg);
113 static void mge_intr_err(void *arg);
114 static void mge_stop(struct mge_softc *sc);
115 static void mge_tick(void *msc);
116 static uint32_t mge_set_port_serial_control(uint32_t media);
117 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
118 static void mge_set_mac_address(struct mge_softc *sc);
119 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
120     uint8_t queue);
121 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
122 static int mge_allocate_dma(struct mge_softc *sc);
123 static int mge_alloc_desc_dma(struct mge_softc *sc,
124     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
125 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
126     struct mbuf **mbufp, bus_addr_t *paddr);
127 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
128 static void mge_free_dma(struct mge_softc *sc);
129 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
130     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
131 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
132     uint32_t status, uint16_t bufsize);
133 static void mge_offload_setup_descriptor(struct mge_softc *sc,
134     struct mge_desc_wrapper *dw);
135 static uint8_t mge_crc8(uint8_t *data, int size);
136 static void mge_setup_multicast(struct mge_softc *sc);
137 static void mge_set_rxic(struct mge_softc *sc);
138 static void mge_set_txic(struct mge_softc *sc);
139 static void mge_add_sysctls(struct mge_softc *sc);
140 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
141 
142 static device_method_t mge_methods[] = {
143 	/* Device interface */
144 	DEVMETHOD(device_probe,		mge_probe),
145 	DEVMETHOD(device_attach,	mge_attach),
146 	DEVMETHOD(device_detach,	mge_detach),
147 	DEVMETHOD(device_shutdown,	mge_shutdown),
148 	DEVMETHOD(device_suspend,	mge_suspend),
149 	DEVMETHOD(device_resume,	mge_resume),
150 	/* MII interface */
151 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
152 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
153 	{ 0, 0 }
154 };
155 
156 static driver_t mge_driver = {
157 	"mge",
158 	mge_methods,
159 	sizeof(struct mge_softc),
160 };
161 
162 static devclass_t mge_devclass;
163 
164 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
165 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
166 MODULE_DEPEND(mge, ether, 1, 1, 1);
167 MODULE_DEPEND(mge, miibus, 1, 1, 1);
168 
169 static struct resource_spec res_spec[] = {
170 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
171 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
172 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
173 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
174 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
175 	{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
176 	{ -1, 0 }
177 };
178 
179 static struct {
180 	driver_intr_t *handler;
181 	char * description;
182 } mge_intrs[MGE_INTR_COUNT] = {
183 	{ mge_intr_rx,	"GbE receive interrupt" },
184 	{ mge_intr_tx,	"GbE transmit interrupt" },
185 	{ mge_intr_misc,"GbE misc interrupt" },
186 	{ mge_intr_sum,	"GbE summary interrupt" },
187 	{ mge_intr_err,	"GbE error interrupt" },
188 };
189 
190 static void
191 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
192 {
193 	uint32_t mac_l, mac_h;
194 
195 	/* XXX use currently programmed MAC address; eventually this info will
196 	 * be provided by the loader */
197 
198 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
199 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
200 
201 	addr[0] = (mac_h & 0xff000000) >> 24;
202 	addr[1] = (mac_h & 0x00ff0000) >> 16;
203 	addr[2] = (mac_h & 0x0000ff00) >> 8;
204 	addr[3] = (mac_h & 0x000000ff);
205 	addr[4] = (mac_l & 0x0000ff00) >> 8;
206 	addr[5] = (mac_l & 0x000000ff);
207 }
208 
209 static void
210 mge_set_mac_address(struct mge_softc *sc)
211 {
212 	char *if_mac;
213 	uint32_t mac_l, mac_h;
214 
215 	MGE_GLOBAL_LOCK_ASSERT(sc);
216 
217 	if_mac = (char *)IF_LLADDR(sc->ifp);
218 
219 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
220 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
221 	    (if_mac[2] << 8) | (if_mac[3] << 0);
222 
223 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
224 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
225 
226 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
227 }
228 
229 static void
230 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
231 {
232 	uint32_t reg_idx, reg_off, reg_val, i;
233 
234 	last_byte &= 0xf;
235 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
236 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
237 	reg_val = (1 | (queue << 1)) << reg_off;
238 
239 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
240 		if ( i == reg_idx)
241 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
242 		else
243 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
244 	}
245 }
246 
247 static void
248 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
249 {
250 	uint32_t port_config;
251 	uint32_t reg_val, i;
252 
253 	/* Enable or disable promiscuous mode as needed */
254 	if (sc->ifp->if_flags & IFF_PROMISC) {
255 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
256 		port_config |= PORT_CONFIG_UPM;
257 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
258 
259 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
260 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
261 
262 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
263 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
264 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
265 		}
266 
267 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
268 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
269 
270 	} else {
271 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
272 		port_config &= ~PORT_CONFIG_UPM;
273 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
274 
275 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
276 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
277 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
278 		}
279 
280 		mge_set_mac_address(sc);
281 	}
282 }
283 
284 static void
285 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
286 {
287 	u_int32_t *paddr;
288 
289 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
290 	paddr = arg;
291 
292 	*paddr = segs->ds_addr;
293 }
294 
295 static int
296 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
297     bus_addr_t *paddr)
298 {
299 	struct mbuf *new_mbuf;
300 	bus_dma_segment_t seg[1];
301 	int error;
302 	int nsegs;
303 
304 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
305 
306 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
307 	if (new_mbuf == NULL)
308 		return (ENOBUFS);
309 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
310 
311 	if (*mbufp) {
312 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
313 		bus_dmamap_unload(tag, map);
314 	}
315 
316 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
317 	    BUS_DMA_NOWAIT);
318 	KASSERT(nsegs == 1, ("Too many segments returned!"));
319 	if (nsegs != 1 || error)
320 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
321 
322 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
323 
324 	(*mbufp) = new_mbuf;
325 	(*paddr) = seg->ds_addr;
326 	return (0);
327 }
328 
329 static int
330 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
331     uint32_t size, bus_dma_tag_t *buffer_tag)
332 {
333 	struct mge_desc_wrapper *dw;
334 	bus_addr_t desc_paddr;
335 	int i, error;
336 
337 	desc_paddr = 0;
338 	for (i = size - 1; i >= 0; i--) {
339 		dw = &(tab[i]);
340 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
341 		    (void**)&(dw->mge_desc),
342 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
343 		    &(dw->desc_dmap));
344 
345 		if (error) {
346 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
347 			dw->mge_desc = NULL;
348 			return (ENXIO);
349 		}
350 
351 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
352 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
353 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
354 
355 		if (error) {
356 			if_printf(sc->ifp, "can't load descriptor\n");
357 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
358 			    dw->desc_dmap);
359 			dw->mge_desc = NULL;
360 			return (ENXIO);
361 		}
362 
363 		/* Chain descriptors */
364 		dw->mge_desc->next_desc = desc_paddr;
365 		desc_paddr = dw->mge_desc_paddr;
366 	}
367 	tab[size - 1].mge_desc->next_desc = desc_paddr;
368 
369 	/* Allocate a busdma tag for mbufs. */
370 	error = bus_dma_tag_create(NULL,	/* parent */
371 	    8, 0,				/* alignment, boundary */
372 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
373 	    BUS_SPACE_MAXADDR,			/* highaddr */
374 	    NULL, NULL,				/* filtfunc, filtfuncarg */
375 	    MCLBYTES, 1,			/* maxsize, nsegments */
376 	    MCLBYTES, 0,			/* maxsegsz, flags */
377 	    NULL, NULL,				/* lockfunc, lockfuncarg */
378 	    buffer_tag);			/* dmat */
379 	if (error) {
380 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
381 		return (ENXIO);
382 	}
383 
384 	/* Create TX busdma maps */
385 	for (i = 0; i < size; i++) {
386 		dw = &(tab[i]);
387 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
388 		if (error) {
389 			if_printf(sc->ifp, "failed to create map for mbuf\n");
390 			return (ENXIO);
391 		}
392 
393 		dw->buffer = (struct mbuf*)NULL;
394 		dw->mge_desc->buffer = (bus_addr_t)NULL;
395 	}
396 
397 	return (0);
398 }
399 
400 static int
401 mge_allocate_dma(struct mge_softc *sc)
402 {
403 	int error;
404 	struct mge_desc_wrapper *dw;
405 	int num, i;
406 
407 
408 	num = MGE_TX_DESC_NUM + MGE_RX_DESC_NUM;
409 
410 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
411 	error = bus_dma_tag_create(NULL,	/* parent */
412 	    16, 0,				/* alignment, boundary */
413 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
414 	    BUS_SPACE_MAXADDR,			/* highaddr */
415 	    NULL, NULL,				/* filtfunc, filtfuncarg */
416 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
417 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
418 	    NULL, NULL,				/* lockfunc, lockfuncarg */
419 	    &sc->mge_desc_dtag);		/* dmat */
420 
421 
422 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
423 	    &sc->mge_tx_dtag);
424 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
425 	    &sc->mge_rx_dtag);
426 
427 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
428 		dw = &(sc->mge_rx_desc[i]);
429 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
430 		    &dw->mge_desc->buffer);
431 	}
432 
433 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
434 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
435 
436 	return (0);
437 }
438 
439 static void
440 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
441     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
442 {
443 	struct mge_desc_wrapper *dw;
444 	int i;
445 
446 	for (i = 0; i < size; i++) {
447 		/* Free RX mbuf */
448 		dw = &(tab[i]);
449 
450 		if (dw->buffer_dmap) {
451 			if (free_mbufs) {
452 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
453 				    BUS_DMASYNC_POSTREAD);
454 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
455 			}
456 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
457 			if (free_mbufs)
458 				m_freem(dw->buffer);
459 		}
460 		/* Free RX descriptors */
461 		if (dw->desc_dmap) {
462 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
463 			    BUS_DMASYNC_POSTREAD);
464 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
465 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
466 			    dw->desc_dmap);
467 		}
468 	}
469 }
470 
471 static void
472 mge_free_dma(struct mge_softc *sc)
473 {
474 	/* Free desciptors and mbufs */
475 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
476 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
477 
478 	/* Destroy mbuf dma tag */
479 	bus_dma_tag_destroy(sc->mge_tx_dtag);
480 	bus_dma_tag_destroy(sc->mge_rx_dtag);
481 	/* Destroy descriptors tag */
482 	bus_dma_tag_destroy(sc->mge_desc_dtag);
483 }
484 
485 static void
486 mge_reinit_rx(struct mge_softc *sc)
487 {
488 	struct mge_desc_wrapper *dw;
489 	int i;
490 
491 	MGE_RECEIVE_LOCK(sc);
492 
493 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
494 
495 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
496 	    &sc->mge_rx_dtag);
497 
498 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
499 		dw = &(sc->mge_rx_desc[i]);
500 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
501 		&dw->mge_desc->buffer);
502 	}
503 
504 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
505 	sc->rx_desc_curr = 0;
506 
507 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
508 	    sc->rx_desc_start);
509 
510 	/* Enable RX queue */
511 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
512 
513 	MGE_RECEIVE_UNLOCK(sc);
514 }
515 
516 #ifdef DEVICE_POLLING
517 static poll_handler_t mge_poll;
518 
519 static void
520 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
521 {
522 	struct mge_softc *sc = ifp->if_softc;
523 	uint32_t int_cause, int_cause_ext;
524 
525 	MGE_GLOBAL_LOCK(sc);
526 
527 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
528 		MGE_GLOBAL_UNLOCK(sc);
529 		return;
530 	}
531 
532 	if (cmd == POLL_AND_CHECK_STATUS) {
533 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
534 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
535 
536 		/* Check for resource error */
537 		if (int_cause & MGE_PORT_INT_RXERRQ0)
538 			mge_reinit_rx(sc);
539 
540 		if (int_cause || int_cause_ext) {
541 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
542 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
543 		}
544 	}
545 
546 	mge_intr_tx_locked(sc);
547 	mge_intr_rx_locked(sc, count);
548 
549 	MGE_GLOBAL_UNLOCK(sc);
550 }
551 #endif /* DEVICE_POLLING */
552 
553 static int
554 mge_attach(device_t dev)
555 {
556 	struct mge_softc *sc;
557 	struct ifnet *ifp;
558 	uint8_t hwaddr[ETHER_ADDR_LEN];
559 	int i, error ;
560 
561 	sc = device_get_softc(dev);
562 	sc->dev = dev;
563 
564 	if (device_get_unit(dev) == 0)
565 		sc_mge0 = sc;
566 
567 	/* Initialize mutexes */
568 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
569 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
570 
571 	/* Allocate IO and IRQ resources */
572 	error = bus_alloc_resources(dev, res_spec, sc->res);
573 	if (error) {
574 		device_printf(dev, "could not allocate resources\n");
575 		mge_detach(dev);
576 		return (ENXIO);
577 	}
578 
579 	/* Allocate DMA, buffers, buffer descriptors */
580 	error = mge_allocate_dma(sc);
581 	if (error) {
582 		mge_detach(dev);
583 		return (ENXIO);
584 	}
585 
586 	sc->tx_desc_curr = 0;
587 	sc->rx_desc_curr = 0;
588 	sc->tx_desc_used_idx = 0;
589 
590 	/* Configure defaults for interrupts coalescing */
591 	sc->rx_ic_time = 768;
592 	sc->tx_ic_time = 768;
593 	mge_add_sysctls(sc);
594 
595 	/* Allocate network interface */
596 	ifp = sc->ifp = if_alloc(IFT_ETHER);
597 	if (ifp == NULL) {
598 		device_printf(dev, "if_alloc() failed\n");
599 		mge_detach(dev);
600 		return (ENOMEM);
601 	}
602 
603 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
604 	ifp->if_softc = sc;
605 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
606 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
607 	ifp->if_capenable = ifp->if_capabilities;
608 	ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
609 
610 #ifdef DEVICE_POLLING
611 	/* Advertise that polling is supported */
612 	ifp->if_capabilities |= IFCAP_POLLING;
613 #endif
614 
615 	ifp->if_init = mge_init;
616 	ifp->if_start = mge_start;
617 	ifp->if_ioctl = mge_ioctl;
618 
619 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
620 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
621 	IFQ_SET_READY(&ifp->if_snd);
622 
623 	mge_get_mac_address(sc, hwaddr);
624 	ether_ifattach(ifp, hwaddr);
625 	callout_init(&sc->wd_callout, 0);
626 
627 	/* Probe PHY(s) */
628 	error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
629 	if (error) {
630 		device_printf(dev, "MII failed to find PHY\n");
631 		if_free(ifp);
632 		sc->ifp = NULL;
633 		mge_detach(dev);
634 		return (error);
635 	}
636 	sc->mii = device_get_softc(sc->miibus);
637 
638 	/* Attach interrupt handlers */
639 	for (i = 0; i < 2; ++i) {
640 		error = bus_setup_intr(dev, sc->res[1 + i],
641 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
642 		    sc, &sc->ih_cookie[i]);
643 		if (error) {
644 			device_printf(dev, "could not setup %s\n",
645 			    mge_intrs[i].description);
646 			ether_ifdetach(sc->ifp);
647 			return (error);
648 		}
649 	}
650 
651 	return (0);
652 }
653 
654 static int
655 mge_detach(device_t dev)
656 {
657 	struct mge_softc *sc;
658 	int error,i;
659 
660 	sc = device_get_softc(dev);
661 
662 	/* Stop controller and free TX queue */
663 	if (sc->ifp)
664 		mge_shutdown(dev);
665 
666 	/* Wait for stopping ticks */
667         callout_drain(&sc->wd_callout);
668 
669 	/* Stop and release all interrupts */
670 	for (i = 0; i < 2; ++i) {
671 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
672 		if (error)
673 			device_printf(dev, "could not release %s\n",
674 			    mge_intrs[i].description);
675 	}
676 
677 	/* Detach network interface */
678 	if (sc->ifp) {
679 		ether_ifdetach(sc->ifp);
680 		if_free(sc->ifp);
681 	}
682 
683 	/* Free DMA resources */
684 	mge_free_dma(sc);
685 
686 	/* Free IO memory handler */
687 	bus_release_resources(dev, res_spec, sc->res);
688 
689 	/* Destroy mutexes */
690 	mtx_destroy(&sc->receive_lock);
691 	mtx_destroy(&sc->transmit_lock);
692 
693 	return (0);
694 }
695 
696 static void
697 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
698 {
699 	struct mge_softc *sc = ifp->if_softc;
700 	struct mii_data *mii;
701 
702 	MGE_TRANSMIT_LOCK(sc);
703 
704 	mii = sc->mii;
705 	mii_pollstat(mii);
706 
707 	ifmr->ifm_active = mii->mii_media_active;
708 	ifmr->ifm_status = mii->mii_media_status;
709 
710 	MGE_TRANSMIT_UNLOCK(sc);
711 }
712 
713 static uint32_t
714 mge_set_port_serial_control(uint32_t media)
715 {
716 	uint32_t port_config;
717 
718 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
719 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
720 
721 	if (IFM_TYPE(media) == IFM_ETHER) {
722 		switch(IFM_SUBTYPE(media)) {
723 			case IFM_AUTO:
724 				break;
725 			case IFM_1000_T:
726 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
727 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
728 				    PORT_SERIAL_SPEED_AUTONEG);
729 				break;
730 			case IFM_100_TX:
731 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
732 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
733 				    PORT_SERIAL_SPEED_AUTONEG);
734 				break;
735 			case IFM_10_T:
736 				port_config  |= (PORT_SERIAL_AUTONEG |
737 				    PORT_SERIAL_AUTONEG_FC |
738 				    PORT_SERIAL_SPEED_AUTONEG);
739 				break;
740 		}
741 		if (media & IFM_FDX)
742 			port_config |= PORT_SERIAL_FULL_DUPLEX;
743 	}
744 	return (port_config);
745 }
746 
747 static int
748 mge_ifmedia_upd(struct ifnet *ifp)
749 {
750 	struct mge_softc *sc = ifp->if_softc;
751 
752 	if (ifp->if_flags & IFF_UP) {
753 		MGE_GLOBAL_LOCK(sc);
754 
755 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
756 		mii_mediachg(sc->mii);
757 		mge_init_locked(sc);
758 
759 		MGE_GLOBAL_UNLOCK(sc);
760 	}
761 
762 	return (0);
763 }
764 
765 static void
766 mge_init(void *arg)
767 {
768 	struct mge_softc *sc = arg;
769 
770 	MGE_GLOBAL_LOCK(sc);
771 
772 	mge_init_locked(arg);
773 
774 	MGE_GLOBAL_UNLOCK(sc);
775 }
776 
777 static void
778 mge_init_locked(void *arg)
779 {
780 	struct mge_softc *sc = arg;
781 	struct mge_desc_wrapper *dw;
782 	volatile uint32_t reg_val;
783 	int i, count;
784 
785 
786 	MGE_GLOBAL_LOCK_ASSERT(sc);
787 
788 	/* Stop interface */
789 	mge_stop(sc);
790 
791 	/* Disable interrupts */
792 	mge_intrs_ctrl(sc, 0);
793 
794 	/* Set MAC address */
795 	mge_set_mac_address(sc);
796 
797 	/* Setup multicast filters */
798 	mge_setup_multicast(sc);
799 
800 #if defined(MGE_VER2)
801 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
802 	MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
803 #endif
804 	/* Initialize TX queue configuration registers */
805 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), MGE_TX_TOKEN_Q0_DFLT);
806 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), MGE_TX_TOKEN_Q0_DFLT);
807 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), MGE_TX_ARB_Q0_DFLT);
808 
809 	for (i = 1; i < 7; i++) {
810 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), MGE_TX_TOKEN_Q1_7_DFLT);
811 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), MGE_TX_TOKEN_Q1_7_DFLT);
812 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), MGE_TX_ARB_Q1_7_DFLT);
813 	}
814 
815 	/* Set default MTU */
816 	MGE_WRITE(sc, MGE_MTU, MGE_MTU_DEFAULT);
817 
818 	/* Port configuration */
819 	MGE_WRITE(sc, MGE_PORT_CONFIG,
820 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
821 	    PORT_CONFIG_ARO_RXQ(0));
822 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
823 
824 	/* Setup port configuration */
825 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
826 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
827 
828 	/* Setup SDMA configuration */
829 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
830 	    MGE_SDMA_TX_BYTE_SWAP |
831 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
832 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
833 
834 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
835 
836 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
837 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
838 	    sc->rx_desc_start);
839 
840 	/* Reset descriptor indexes */
841 	sc->tx_desc_curr = 0;
842 	sc->rx_desc_curr = 0;
843 	sc->tx_desc_used_idx = 0;
844 
845 	/* Enable RX descriptors */
846 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
847 		dw = &sc->mge_rx_desc[i];
848 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
849 		dw->mge_desc->buff_size = MCLBYTES;
850 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
851 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
852 	}
853 
854 	/* Enable RX queue */
855 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
856 
857 	/* Enable port */
858 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
859 	reg_val |= PORT_SERIAL_ENABLE;
860 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
861 	count = 0x100000;
862 	for (;;) {
863 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
864 		if (reg_val & MGE_STATUS_LINKUP)
865 			break;
866 		DELAY(100);
867 		if (--count == 0) {
868 			if_printf(sc->ifp, "Timeout on link-up\n");
869 			break;
870 		}
871 	}
872 
873 	/* Setup interrupts coalescing */
874 	mge_set_rxic(sc);
875 	mge_set_txic(sc);
876 
877 	/* Enable interrupts */
878 #ifdef DEVICE_POLLING
879         /*
880 	 * * ...only if polling is not turned on. Disable interrupts explicitly
881 	 * if polling is enabled.
882 	 */
883 	if (sc->ifp->if_capenable & IFCAP_POLLING)
884 		mge_intrs_ctrl(sc, 0);
885 	else
886 #endif /* DEVICE_POLLING */
887 	mge_intrs_ctrl(sc, 1);
888 
889 	/* Activate network interface */
890 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
891 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
892 	sc->wd_timer = 0;
893 
894 	/* Schedule watchdog timeout */
895 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
896 }
897 
898 static void
899 mge_intr_err(void *arg)
900 {
901 	struct mge_softc *sc = arg;
902 	struct ifnet *ifp;
903 
904 	ifp = sc->ifp;
905 	if_printf(ifp, "%s\n", __FUNCTION__);
906 }
907 
908 static void
909 mge_intr_misc(void *arg)
910 {
911 	struct mge_softc *sc = arg;
912 	struct ifnet *ifp;
913 
914 	ifp = sc->ifp;
915 	if_printf(ifp, "%s\n", __FUNCTION__);
916 }
917 
918 static void
919 mge_intr_rx(void *arg) {
920 	struct mge_softc *sc = arg;
921 	uint32_t int_cause, int_cause_ext;
922 
923 	MGE_RECEIVE_LOCK(sc);
924 
925 #ifdef DEVICE_POLLING
926 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
927 		MGE_RECEIVE_UNLOCK(sc);
928 		return;
929 	}
930 #endif
931 
932 	/* Get interrupt cause */
933 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
934 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
935 
936 	/* Check for resource error */
937 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
938 		mge_reinit_rx(sc);
939 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
940 		    int_cause & ~MGE_PORT_INT_RXERRQ0);
941 	}
942 
943 	int_cause &= MGE_PORT_INT_RXQ0;
944 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
945 
946 	if (int_cause || int_cause_ext) {
947 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
948 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
949 		mge_intr_rx_locked(sc, -1);
950 	}
951 
952 	MGE_RECEIVE_UNLOCK(sc);
953 }
954 
955 
956 static void
957 mge_intr_rx_locked(struct mge_softc *sc, int count)
958 {
959 	struct ifnet *ifp = sc->ifp;
960 	uint32_t status;
961 	uint16_t bufsize;
962 	struct mge_desc_wrapper* dw;
963 	struct mbuf *mb;
964 
965 	MGE_RECEIVE_LOCK_ASSERT(sc);
966 
967 	while(count != 0) {
968 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
969 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
970 		    BUS_DMASYNC_POSTREAD);
971 
972 		/* Get status */
973 		status = dw->mge_desc->cmd_status;
974 		bufsize = dw->mge_desc->buff_size;
975 		if ((status & MGE_DMA_OWNED) != 0)
976 			break;
977 
978 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
979 		if (dw->mge_desc->byte_count &&
980 		    ~(status & MGE_ERR_SUMMARY)) {
981 
982 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
983 			    BUS_DMASYNC_POSTREAD);
984 
985 			mb = m_devget(dw->buffer->m_data,
986 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
987 			    0, ifp, NULL);
988 
989 			mb->m_len -= 2;
990 			mb->m_pkthdr.len -= 2;
991 			mb->m_data += 2;
992 
993 			mge_offload_process_frame(ifp, mb, status,
994 			    bufsize);
995 
996 			MGE_RECEIVE_UNLOCK(sc);
997 			(*ifp->if_input)(ifp, mb);
998 			MGE_RECEIVE_LOCK(sc);
999 		}
1000 
1001 		dw->mge_desc->byte_count = 0;
1002 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1003 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1004 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1005 
1006 		if (count > 0)
1007 			count -= 1;
1008 	}
1009 
1010 	return;
1011 }
1012 
1013 static void
1014 mge_intr_sum(void *arg)
1015 {
1016 	struct mge_softc *sc = arg;
1017 	struct ifnet *ifp;
1018 
1019 	ifp = sc->ifp;
1020 	if_printf(ifp, "%s\n", __FUNCTION__);
1021 }
1022 
1023 static void
1024 mge_intr_tx(void *arg)
1025 {
1026 	struct mge_softc *sc = arg;
1027 	uint32_t int_cause_ext;
1028 
1029 	MGE_TRANSMIT_LOCK(sc);
1030 
1031 #ifdef DEVICE_POLLING
1032 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1033 		MGE_TRANSMIT_UNLOCK(sc);
1034 		return;
1035 	}
1036 #endif
1037 
1038 	/* Ack the interrupt */
1039 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1040 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1041 	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1042 
1043 	mge_intr_tx_locked(sc);
1044 
1045 	MGE_TRANSMIT_UNLOCK(sc);
1046 }
1047 
1048 
1049 static void
1050 mge_intr_tx_locked(struct mge_softc *sc)
1051 {
1052 	struct ifnet *ifp = sc->ifp;
1053 	struct mge_desc_wrapper *dw;
1054 	struct mge_desc *desc;
1055 	uint32_t status;
1056 	int send = 0;
1057 
1058 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1059 
1060 	/* Disable watchdog */
1061 	sc->wd_timer = 0;
1062 
1063 	while (sc->tx_desc_used_count) {
1064 		/* Get the descriptor */
1065 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1066 		desc = dw->mge_desc;
1067 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1068 		    BUS_DMASYNC_POSTREAD);
1069 
1070 		/* Get descriptor status */
1071 		status = desc->cmd_status;
1072 
1073 		if (status & MGE_DMA_OWNED)
1074 			break;
1075 
1076 		sc->tx_desc_used_idx =
1077 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;;
1078 		sc->tx_desc_used_count--;
1079 
1080 		/* Update collision statistics */
1081 		if (status & MGE_ERR_SUMMARY) {
1082 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1083 				ifp->if_collisions++;
1084 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1085 				ifp->if_collisions += 16;
1086 		}
1087 
1088 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1089 		    BUS_DMASYNC_POSTWRITE);
1090 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1091 		m_freem(dw->buffer);
1092 		dw->buffer = (struct mbuf*)NULL;
1093 		send++;
1094 
1095 		ifp->if_opackets++;
1096 	}
1097 
1098 	if (send) {
1099 		/* Now send anything that was pending */
1100 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1101 		mge_start_locked(ifp);
1102 	}
1103 }
1104 
1105 static int
1106 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1107 {
1108 	struct mge_softc *sc = ifp->if_softc;
1109 	struct ifreq *ifr = (struct ifreq *)data;
1110 	int mask, error;
1111 	uint32_t flags;
1112 
1113 	error = 0;
1114 
1115 	switch (command) {
1116 	case SIOCSIFFLAGS:
1117 		MGE_GLOBAL_LOCK(sc);
1118 
1119 		if (ifp->if_flags & IFF_UP) {
1120 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1121 				flags = ifp->if_flags ^ sc->mge_if_flags;
1122 				if (flags & IFF_PROMISC)
1123 					mge_set_prom_mode(sc,
1124 					    MGE_RX_DEFAULT_QUEUE);
1125 
1126 				if (flags & IFF_ALLMULTI)
1127 					mge_setup_multicast(sc);
1128 			} else
1129 				mge_init_locked(sc);
1130 		}
1131 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1132 			mge_stop(sc);
1133 
1134 		sc->mge_if_flags = ifp->if_flags;
1135 		MGE_GLOBAL_UNLOCK(sc);
1136 		break;
1137 	case SIOCADDMULTI:
1138 	case SIOCDELMULTI:
1139 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1140 			MGE_GLOBAL_LOCK(sc);
1141 			mge_setup_multicast(sc);
1142 			MGE_GLOBAL_UNLOCK(sc);
1143 		}
1144 		break;
1145 	case SIOCSIFCAP:
1146 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1147 		if (mask & IFCAP_HWCSUM) {
1148 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1149 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1150 			if (ifp->if_capenable & IFCAP_TXCSUM)
1151 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1152 			else
1153 				ifp->if_hwassist = 0;
1154 		}
1155 #ifdef DEVICE_POLLING
1156 		if (mask & IFCAP_POLLING) {
1157 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1158 				error = ether_poll_register(mge_poll, ifp);
1159 				if (error)
1160 					return(error);
1161 
1162 				MGE_GLOBAL_LOCK(sc);
1163 				mge_intrs_ctrl(sc, 0);
1164 				ifp->if_capenable |= IFCAP_POLLING;
1165 				MGE_GLOBAL_UNLOCK(sc);
1166 			} else {
1167 				error = ether_poll_deregister(ifp);
1168 				MGE_GLOBAL_LOCK(sc);
1169 				mge_intrs_ctrl(sc, 1);
1170 				ifp->if_capenable &= ~IFCAP_POLLING;
1171 				MGE_GLOBAL_UNLOCK(sc);
1172 			}
1173 		}
1174 #endif
1175 		break;
1176 	case SIOCGIFMEDIA: /* fall through */
1177 	case SIOCSIFMEDIA:
1178 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1179 		    && !(ifr->ifr_media & IFM_FDX)) {
1180 			device_printf(sc->dev,
1181 			    "1000baseTX half-duplex unsupported\n");
1182 			return 0;
1183 		}
1184 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1185 		break;
1186 	default:
1187 		error = ether_ioctl(ifp, command, data);
1188 	}
1189 	return (error);
1190 }
1191 
1192 static int
1193 mge_miibus_readreg(device_t dev, int phy, int reg)
1194 {
1195 	uint32_t retries;
1196 
1197 	/*
1198 	 * We assume static PHY address <=> device unit mapping:
1199 	 * PHY Address = MV_PHY_ADDR_BASE + devce unit.
1200 	 * This is true for most Marvell boards.
1201 	 *
1202 	 * Code below grants proper PHY detection on each device
1203 	 * unit.
1204 	 */
1205 
1206 	if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
1207 		return (0);
1208 
1209 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1210 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1211 
1212 	retries = MGE_SMI_READ_RETRIES;
1213 	while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1214 		DELAY(MGE_SMI_READ_DELAY);
1215 
1216 	if (retries == 0)
1217 		device_printf(dev, "Timeout while reading from PHY\n");
1218 
1219 	return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1220 }
1221 
1222 static void
1223 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1224 {
1225 	uint32_t retries;
1226 
1227 	if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
1228 		return;
1229 
1230 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1231 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1232 
1233 	retries = MGE_SMI_WRITE_RETRIES;
1234 	while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1235 		DELAY(MGE_SMI_WRITE_DELAY);
1236 
1237 	if (retries == 0)
1238 		device_printf(dev, "Timeout while writing to PHY\n");
1239 }
1240 
1241 static int
1242 mge_probe(device_t dev)
1243 {
1244 
1245 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1246 	return (BUS_PROBE_DEFAULT);
1247 }
1248 
1249 static int
1250 mge_resume(device_t dev)
1251 {
1252 
1253 	device_printf(dev, "%s\n", __FUNCTION__);
1254 	return (0);
1255 }
1256 
1257 static int
1258 mge_shutdown(device_t dev)
1259 {
1260 	struct mge_softc *sc = device_get_softc(dev);
1261 
1262 	MGE_GLOBAL_LOCK(sc);
1263 
1264 #ifdef DEVICE_POLLING
1265         if (sc->ifp->if_capenable & IFCAP_POLLING)
1266 		ether_poll_deregister(sc->ifp);
1267 #endif
1268 
1269 	mge_stop(sc);
1270 
1271 	MGE_GLOBAL_UNLOCK(sc);
1272 
1273 	return (0);
1274 }
1275 
1276 static int
1277 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1278 {
1279 	struct mge_desc_wrapper *dw = NULL;
1280 	struct ifnet *ifp;
1281 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1282 	bus_dmamap_t mapp;
1283 	int error;
1284 	int seg, nsegs;
1285 	int desc_no;
1286 
1287 	ifp = sc->ifp;
1288 
1289 	/* Check for free descriptors */
1290 	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1291 		/* No free descriptors */
1292 		return (-1);
1293 	}
1294 
1295 	/* Fetch unused map */
1296 	desc_no = sc->tx_desc_curr;
1297 	dw = &sc->mge_tx_desc[desc_no];
1298 	mapp = dw->buffer_dmap;
1299 
1300 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1301 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1302 
1303 	/* Create mapping in DMA memory */
1304 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1305 	    BUS_DMA_NOWAIT);
1306 	if (error != 0 || nsegs != 1 ) {
1307 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1308 		return ((error != 0) ? error : -1);
1309 	}
1310 
1311 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1312 
1313 	/* Everything is ok, now we can send buffers */
1314 	for (seg = 0; seg < nsegs; seg++) {
1315 		dw->mge_desc->byte_count = segs[seg].ds_len;
1316 		dw->mge_desc->buffer = segs[seg].ds_addr;
1317 		dw->buffer = m0;
1318 		dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1319 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1320 		    MGE_DMA_OWNED;
1321 
1322 		if (seg == 0)
1323 			mge_offload_setup_descriptor(sc, dw);
1324 	}
1325 
1326 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
1327 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1328 
1329 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1330 	sc->tx_desc_used_count++;
1331 	return (0);
1332 }
1333 
1334 static void
1335 mge_tick(void *msc)
1336 {
1337 	struct mge_softc *sc = msc;
1338 
1339 	/* Check for TX timeout */
1340 	mge_watchdog(sc);
1341 
1342 	mii_tick(sc->mii);
1343 
1344 	/* Check for media type change */
1345 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1346 		mge_ifmedia_upd(sc->ifp);
1347 
1348 	/* Schedule another timeout one second from now */
1349 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1350 }
1351 
1352 static void
1353 mge_watchdog(struct mge_softc *sc)
1354 {
1355 	struct ifnet *ifp;
1356 
1357 	ifp = sc->ifp;
1358 
1359 	MGE_GLOBAL_LOCK(sc);
1360 
1361 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1362 		MGE_GLOBAL_UNLOCK(sc);
1363 		return;
1364 	}
1365 
1366 	ifp->if_oerrors++;
1367 	if_printf(ifp, "watchdog timeout\n");
1368 
1369 	mge_stop(sc);
1370 	mge_init_locked(sc);
1371 
1372 	MGE_GLOBAL_UNLOCK(sc);
1373 }
1374 
1375 static void
1376 mge_start(struct ifnet *ifp)
1377 {
1378 	struct mge_softc *sc = ifp->if_softc;
1379 
1380 	MGE_TRANSMIT_LOCK(sc);
1381 
1382 	mge_start_locked(ifp);
1383 
1384 	MGE_TRANSMIT_UNLOCK(sc);
1385 }
1386 
1387 static void
1388 mge_start_locked(struct ifnet *ifp)
1389 {
1390 	struct mge_softc *sc;
1391 	struct mbuf *m0, *mtmp;
1392 	uint32_t reg_val, queued = 0;
1393 
1394 	sc = ifp->if_softc;
1395 
1396 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1397 
1398 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1399 	    IFF_DRV_RUNNING)
1400 		return;
1401 
1402 	for (;;) {
1403 		/* Get packet from the queue */
1404 		IF_DEQUEUE(&ifp->if_snd, m0);
1405 		if (m0 == NULL)
1406 			break;
1407 
1408 		mtmp = m_defrag(m0, M_DONTWAIT);
1409 		if (mtmp)
1410 			m0 = mtmp;
1411 
1412 		if (mge_encap(sc, m0)) {
1413 			IF_PREPEND(&ifp->if_snd, m0);
1414 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1415 			break;
1416 		}
1417 		queued++;
1418 		BPF_MTAP(ifp, m0);
1419 	}
1420 
1421 	if (queued) {
1422 		/* Enable transmitter and watchdog timer */
1423 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1424 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1425 		sc->wd_timer = 5;
1426 	}
1427 }
1428 
1429 static void
1430 mge_stop(struct mge_softc *sc)
1431 {
1432 	struct ifnet *ifp;
1433 	volatile uint32_t reg_val, status;
1434 	struct mge_desc_wrapper *dw;
1435 	struct mge_desc *desc;
1436 	int count;
1437 
1438 	ifp = sc->ifp;
1439 
1440 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1441 		return;
1442 
1443 	/* Stop tick engine */
1444 	callout_stop(&sc->wd_callout);
1445 
1446 	/* Disable interface */
1447 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1448 	sc->wd_timer = 0;
1449 
1450 	/* Disable interrupts */
1451 	mge_intrs_ctrl(sc, 0);
1452 
1453 	/* Disable Rx and Tx */
1454 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1455 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1456 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1457 
1458 	/* Remove pending data from TX queue */
1459 	while (sc->tx_desc_used_idx < sc->tx_desc_curr) {
1460 		/* Get the descriptor */
1461 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1462 		desc = dw->mge_desc;
1463 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1464 		    BUS_DMASYNC_POSTREAD);
1465 
1466 		/* Get descriptor status */
1467 		status = desc->cmd_status;
1468 
1469 		if (status & MGE_DMA_OWNED)
1470 			break;
1471 
1472 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1473 		    MGE_TX_DESC_NUM;
1474 
1475 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1476 		    BUS_DMASYNC_POSTWRITE);
1477 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1478 
1479 		m_freem(dw->buffer);
1480 		dw->buffer = (struct mbuf*)NULL;
1481 	}
1482 
1483 	/* Wait for end of transmission */
1484 	count = 0x100000;
1485 	while (count--) {
1486 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1487 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1488 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1489 			break;
1490 		DELAY(100);
1491 	}
1492 
1493 	if(!count)
1494 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1495 		    __FUNCTION__);
1496 
1497 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1498 	reg_val &= ~(PORT_SERIAL_ENABLE);
1499 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1500 }
1501 
1502 static int
1503 mge_suspend(device_t dev)
1504 {
1505 
1506 	device_printf(dev, "%s\n", __FUNCTION__);
1507 	return (0);
1508 }
1509 
1510 static void
1511 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1512     uint32_t status, uint16_t bufsize)
1513 {
1514 	int csum_flags = 0;
1515 
1516 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1517 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1518 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1519 
1520 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1521 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1522 		    (status & MGE_RX_L4_CSUM_OK)) {
1523 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1524 			frame->m_pkthdr.csum_data = 0xFFFF;
1525 		}
1526 
1527 		frame->m_pkthdr.csum_flags = csum_flags;
1528 	}
1529 }
1530 
1531 static void
1532 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1533 {
1534 	struct mbuf *m0 = dw->buffer;
1535 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1536 	int csum_flags = m0->m_pkthdr.csum_flags;
1537 	int cmd_status = 0;
1538 	struct ip *ip;
1539 	int ehlen, etype;
1540 
1541 	if (csum_flags) {
1542 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1543 			etype = ntohs(eh->evl_proto);
1544 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1545 			csum_flags |= MGE_TX_VLAN_TAGGED;
1546 		} else {
1547 			etype = ntohs(eh->evl_encap_proto);
1548 			ehlen = ETHER_HDR_LEN;
1549 		}
1550 
1551 		if (etype != ETHERTYPE_IP) {
1552 			if_printf(sc->ifp,
1553 			    "TCP/IP Offload enabled for unsupported "
1554 			    "protocol!\n");
1555 			return;
1556 		}
1557 
1558 		ip = (struct ip *)(m0->m_data + ehlen);
1559 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1560 
1561 		if ((m0->m_flags & M_FRAG) == 0)
1562 			cmd_status |= MGE_TX_NOT_FRAGMENT;
1563 	}
1564 
1565 	if (csum_flags & CSUM_IP)
1566 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1567 
1568 	if (csum_flags & CSUM_TCP)
1569 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1570 
1571 	if (csum_flags & CSUM_UDP)
1572 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1573 
1574 	dw->mge_desc->cmd_status |= cmd_status;
1575 }
1576 
1577 static void
1578 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1579 {
1580 
1581 	if (enable) {
1582 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1583 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1584 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1585 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1586 		    MGE_PORT_INT_EXT_TXBUF0);
1587 	} else {
1588 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1589 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1590 
1591 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1592 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1593 
1594 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1595 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1596 	}
1597 }
1598 
1599 static uint8_t
1600 mge_crc8(uint8_t *data, int size)
1601 {
1602 	uint8_t crc = 0;
1603 	static const uint8_t ct[256] = {
1604 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1605 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1606 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1607 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1608 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1609 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1610 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1611 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1612 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1613 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1614 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1615 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1616 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1617 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1618 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1619 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1620 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1621 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1622 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1623 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1624 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1625 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1626 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1627 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1628 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1629 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1630 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1631 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1632 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1633 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1634 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1635 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1636 	};
1637 
1638 	while(size--)
1639 		crc = ct[crc ^ *(data++)];
1640 
1641 	return(crc);
1642 }
1643 
1644 static void
1645 mge_setup_multicast(struct mge_softc *sc)
1646 {
1647 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1648 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1649 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1650 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1651 	struct ifnet *ifp = sc->ifp;
1652 	struct ifmultiaddr *ifma;
1653 	uint8_t *mac;
1654 	int i;
1655 
1656 	if (ifp->if_flags & IFF_ALLMULTI) {
1657 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1658 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1659 	} else {
1660 		memset(smt, 0, sizeof(smt));
1661 		memset(omt, 0, sizeof(omt));
1662 
1663 		IF_ADDR_LOCK(ifp);
1664 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1665 			if (ifma->ifma_addr->sa_family != AF_LINK)
1666 				continue;
1667 
1668 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1669 			if (memcmp(mac, special, sizeof(special)) == 0) {
1670 				i = mac[5];
1671 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1672 			} else {
1673 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1674 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1675 			}
1676 		}
1677 		IF_ADDR_UNLOCK(ifp);
1678 	}
1679 
1680 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1681 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1682 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1683 	}
1684 }
1685 
1686 static void
1687 mge_set_rxic(struct mge_softc *sc)
1688 {
1689 	uint32_t reg;
1690 
1691 	if (sc->rx_ic_time > MGE_SDMA_RX_IPG_MAX)
1692 		sc->rx_ic_time = MGE_SDMA_RX_IPG_MAX;
1693 
1694 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1695 	reg &= ~MGE_SDMA_RX_IPG(MGE_SDMA_RX_IPG_MAX);
1696 	reg |= MGE_SDMA_RX_IPG(sc->rx_ic_time);
1697 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1698 }
1699 
1700 static void
1701 mge_set_txic(struct mge_softc *sc)
1702 {
1703 	uint32_t reg;
1704 
1705 	if (sc->tx_ic_time > MGE_TX_FIFO_URGENT_TRSH_IPG_MAX)
1706 		sc->tx_ic_time = MGE_TX_FIFO_URGENT_TRSH_IPG_MAX;
1707 
1708 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1709 	reg &= ~MGE_TX_FIFO_URGENT_TRSH_IPG(MGE_TX_FIFO_URGENT_TRSH_IPG_MAX);
1710 	reg |= MGE_TX_FIFO_URGENT_TRSH_IPG(sc->tx_ic_time);
1711 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1712 }
1713 
1714 static int
1715 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1716 {
1717 	struct mge_softc *sc = (struct mge_softc *)arg1;
1718 	uint32_t time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1719 	int error;
1720 
1721 	error = sysctl_handle_int(oidp, &time, 0, req);
1722 	if (error != 0)
1723 		return(error);
1724 
1725 	MGE_GLOBAL_LOCK(sc);
1726 	if (arg2 == MGE_IC_RX) {
1727 		sc->rx_ic_time = time;
1728 		mge_set_rxic(sc);
1729 	} else {
1730 		sc->tx_ic_time = time;
1731 		mge_set_txic(sc);
1732 	}
1733 	MGE_GLOBAL_UNLOCK(sc);
1734 
1735 	return(0);
1736 }
1737 
1738 static void
1739 mge_add_sysctls(struct mge_softc *sc)
1740 {
1741 	struct sysctl_ctx_list *ctx;
1742 	struct sysctl_oid_list *children;
1743 	struct sysctl_oid *tree;
1744 
1745 	ctx = device_get_sysctl_ctx(sc->dev);
1746 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1747 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1748 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1749 	children = SYSCTL_CHILDREN(tree);
1750 
1751 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1752 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1753 	    "I", "IC RX time threshold");
1754 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1755 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1756 	    "I", "IC TX time threshold");
1757 }
1758