xref: /freebsd/sys/dev/mge/if_mge.c (revision 3c4ba5f55438f7afd4f4b0b56f88f2bb505fd6a6)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5  * Copyright (C) 2009-2015 Semihalf
6  * Copyright (C) 2015 Stormshield
7  * All rights reserved.
8  *
9  * Developed by Semihalf.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of MARVELL nor the names of contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/endian.h>
46 #include <sys/mbuf.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/module.h>
51 #include <sys/socket.h>
52 #include <sys/sysctl.h>
53 
54 #include <net/ethernet.h>
55 #include <net/bpf.h>
56 #include <net/if.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_types.h>
61 #include <net/if_vlan_var.h>
62 
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 
67 #include <sys/sockio.h>
68 #include <sys/bus.h>
69 #include <machine/bus.h>
70 #include <sys/rman.h>
71 #include <machine/resource.h>
72 
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75 
76 #include <dev/fdt/fdt_common.h>
77 #include <dev/ofw/ofw_bus.h>
78 #include <dev/ofw/ofw_bus_subr.h>
79 #include <dev/mdio/mdio.h>
80 
81 #include <dev/mge/if_mgevar.h>
82 #include <arm/mv/mvreg.h>
83 #include <arm/mv/mvvar.h>
84 
85 #include "miibus_if.h"
86 #include "mdio_if.h"
87 
88 #define	MGE_DELAY(x)	pause("SMI access sleep", (x) / tick_sbt)
89 
90 static int mge_probe(device_t dev);
91 static int mge_attach(device_t dev);
92 static int mge_detach(device_t dev);
93 static int mge_shutdown(device_t dev);
94 static int mge_suspend(device_t dev);
95 static int mge_resume(device_t dev);
96 
97 static int mge_miibus_readreg(device_t dev, int phy, int reg);
98 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
99 
100 static int mge_mdio_readreg(device_t dev, int phy, int reg);
101 static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
102 
103 static int mge_ifmedia_upd(if_t ifp);
104 static void mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
105 
106 static void mge_init(void *arg);
107 static void mge_init_locked(void *arg);
108 static void mge_start(if_t ifp);
109 static void mge_start_locked(if_t ifp);
110 static void mge_watchdog(struct mge_softc *sc);
111 static int mge_ioctl(if_t ifp, u_long command, caddr_t data);
112 
113 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
114 static uint32_t mge_rx_ipg(uint32_t val, int ver);
115 static void mge_ver_params(struct mge_softc *sc);
116 
117 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
118 static void mge_intr_rxtx(void *arg);
119 static void mge_intr_rx(void *arg);
120 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
121     uint32_t int_cause_ext);
122 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
123 static void mge_intr_tx(void *arg);
124 static void mge_intr_tx_locked(struct mge_softc *sc);
125 static void mge_intr_misc(void *arg);
126 static void mge_intr_sum(void *arg);
127 static void mge_intr_err(void *arg);
128 static void mge_stop(struct mge_softc *sc);
129 static void mge_tick(void *msc);
130 static uint32_t mge_set_port_serial_control(uint32_t media);
131 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
132 static void mge_set_mac_address(struct mge_softc *sc);
133 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
134     uint8_t queue);
135 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
136 static int mge_allocate_dma(struct mge_softc *sc);
137 static int mge_alloc_desc_dma(struct mge_softc *sc,
138     struct mge_desc_wrapper* desc_tab, uint32_t size,
139     bus_dma_tag_t *buffer_tag);
140 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
141     struct mbuf **mbufp, bus_addr_t *paddr);
142 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
143     int error);
144 static void mge_free_dma(struct mge_softc *sc);
145 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
146     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
147 static void mge_offload_process_frame(if_t ifp, struct mbuf *frame,
148     uint32_t status, uint16_t bufsize);
149 static void mge_offload_setup_descriptor(struct mge_softc *sc,
150     struct mge_desc_wrapper *dw);
151 static uint8_t mge_crc8(uint8_t *data, int size);
152 static void mge_setup_multicast(struct mge_softc *sc);
153 static void mge_set_rxic(struct mge_softc *sc);
154 static void mge_set_txic(struct mge_softc *sc);
155 static void mge_add_sysctls(struct mge_softc *sc);
156 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
157 
158 static device_method_t mge_methods[] = {
159 	/* Device interface */
160 	DEVMETHOD(device_probe,		mge_probe),
161 	DEVMETHOD(device_attach,	mge_attach),
162 	DEVMETHOD(device_detach,	mge_detach),
163 	DEVMETHOD(device_shutdown,	mge_shutdown),
164 	DEVMETHOD(device_suspend,	mge_suspend),
165 	DEVMETHOD(device_resume,	mge_resume),
166 	/* MII interface */
167 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
168 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
169 	/* MDIO interface */
170 	DEVMETHOD(mdio_readreg,		mge_mdio_readreg),
171 	DEVMETHOD(mdio_writereg,	mge_mdio_writereg),
172 	{ 0, 0 }
173 };
174 
175 DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
176 
177 static int switch_attached = 0;
178 
179 DRIVER_MODULE(mge, simplebus, mge_driver, 0, 0);
180 DRIVER_MODULE(miibus, mge, miibus_driver, 0, 0);
181 DRIVER_MODULE(mdio, mge, mdio_driver, 0, 0);
182 MODULE_DEPEND(mge, ether, 1, 1, 1);
183 MODULE_DEPEND(mge, miibus, 1, 1, 1);
184 MODULE_DEPEND(mge, mdio, 1, 1, 1);
185 
186 static struct resource_spec res_spec[] = {
187 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
188 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
189 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
190 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
191 	{ -1, 0 }
192 };
193 
194 static struct {
195 	driver_intr_t *handler;
196 	char * description;
197 } mge_intrs[MGE_INTR_COUNT + 1] = {
198 	{ mge_intr_rxtx,"GbE aggregated interrupt" },
199 	{ mge_intr_rx,	"GbE receive interrupt" },
200 	{ mge_intr_tx,	"GbE transmit interrupt" },
201 	{ mge_intr_misc,"GbE misc interrupt" },
202 	{ mge_intr_sum,	"GbE summary interrupt" },
203 	{ mge_intr_err,	"GbE error interrupt" },
204 };
205 
206 /* SMI access interlock */
207 static struct sx sx_smi;
208 
209 static uint32_t
210 mv_read_ge_smi(device_t dev, int phy, int reg)
211 {
212 	uint32_t timeout;
213 	uint32_t ret;
214 	struct mge_softc *sc;
215 
216 	sc = device_get_softc(dev);
217 	KASSERT(sc != NULL, ("NULL softc ptr!"));
218 	timeout = MGE_SMI_WRITE_RETRIES;
219 
220 	MGE_SMI_LOCK();
221 	while (--timeout &&
222 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
223 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
224 
225 	if (timeout == 0) {
226 		device_printf(dev, "SMI write timeout.\n");
227 		ret = ~0U;
228 		goto out;
229 	}
230 
231 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
232 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
233 
234 	/* Wait till finished. */
235 	timeout = MGE_SMI_WRITE_RETRIES;
236 	while (--timeout &&
237 	    !((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
238 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
239 
240 	if (timeout == 0) {
241 		device_printf(dev, "SMI write validation timeout.\n");
242 		ret = ~0U;
243 		goto out;
244 	}
245 
246 	/* Wait for the data to update in the SMI register */
247 	MGE_DELAY(MGE_SMI_DELAY);
248 	ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
249 
250 out:
251 	MGE_SMI_UNLOCK();
252 	return (ret);
253 
254 }
255 
256 static void
257 mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
258 {
259 	uint32_t timeout;
260 	struct mge_softc *sc;
261 
262 	sc = device_get_softc(dev);
263 	KASSERT(sc != NULL, ("NULL softc ptr!"));
264 
265 	MGE_SMI_LOCK();
266 	timeout = MGE_SMI_READ_RETRIES;
267 	while (--timeout &&
268 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
269 		MGE_DELAY(MGE_SMI_READ_DELAY);
270 
271 	if (timeout == 0) {
272 		device_printf(dev, "SMI read timeout.\n");
273 		goto out;
274 	}
275 
276 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
277 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
278 	    (value & MGE_SMI_DATA_MASK)));
279 
280 out:
281 	MGE_SMI_UNLOCK();
282 }
283 
284 static int
285 mv_read_ext_phy(device_t dev, int phy, int reg)
286 {
287 	uint32_t retries;
288 	struct mge_softc *sc;
289 	uint32_t ret;
290 
291 	sc = device_get_softc(dev);
292 
293 	MGE_SMI_LOCK();
294 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
295 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
296 
297 	retries = MGE_SMI_READ_RETRIES;
298 	while (--retries &&
299 	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
300 		DELAY(MGE_SMI_READ_DELAY);
301 
302 	if (retries == 0)
303 		device_printf(dev, "Timeout while reading from PHY\n");
304 
305 	ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
306 	MGE_SMI_UNLOCK();
307 
308 	return (ret);
309 }
310 
311 static void
312 mv_write_ext_phy(device_t dev, int phy, int reg, int value)
313 {
314 	uint32_t retries;
315 	struct mge_softc *sc;
316 
317 	sc = device_get_softc(dev);
318 
319 	MGE_SMI_LOCK();
320 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
321 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
322 	    (value & MGE_SMI_DATA_MASK)));
323 
324 	retries = MGE_SMI_WRITE_RETRIES;
325 	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
326 		DELAY(MGE_SMI_WRITE_DELAY);
327 
328 	if (retries == 0)
329 		device_printf(dev, "Timeout while writing to PHY\n");
330 	MGE_SMI_UNLOCK();
331 }
332 
333 static void
334 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
335 {
336 	uint32_t mac_l, mac_h;
337 	uint8_t lmac[6];
338 	int i, valid;
339 
340 	/*
341 	 * Retrieve hw address from the device tree.
342 	 */
343 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
344 	if (i == 6) {
345 		valid = 0;
346 		for (i = 0; i < 6; i++)
347 			if (lmac[i] != 0) {
348 				valid = 1;
349 				break;
350 			}
351 
352 		if (valid) {
353 			bcopy(lmac, addr, 6);
354 			return;
355 		}
356 	}
357 
358 	/*
359 	 * Fall back -- use the currently programmed address.
360 	 */
361 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
362 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
363 
364 	addr[0] = (mac_h & 0xff000000) >> 24;
365 	addr[1] = (mac_h & 0x00ff0000) >> 16;
366 	addr[2] = (mac_h & 0x0000ff00) >> 8;
367 	addr[3] = (mac_h & 0x000000ff);
368 	addr[4] = (mac_l & 0x0000ff00) >> 8;
369 	addr[5] = (mac_l & 0x000000ff);
370 }
371 
372 static uint32_t
373 mge_tfut_ipg(uint32_t val, int ver)
374 {
375 
376 	switch (ver) {
377 	case 1:
378 		return ((val & 0x3fff) << 4);
379 	case 2:
380 	default:
381 		return ((val & 0xffff) << 4);
382 	}
383 }
384 
385 static uint32_t
386 mge_rx_ipg(uint32_t val, int ver)
387 {
388 
389 	switch (ver) {
390 	case 1:
391 		return ((val & 0x3fff) << 8);
392 	case 2:
393 	default:
394 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
395 	}
396 }
397 
398 static void
399 mge_ver_params(struct mge_softc *sc)
400 {
401 	uint32_t d, r;
402 
403 	soc_id(&d, &r);
404 	if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
405 	    d == MV_DEV_88F6282 ||
406 	    d == MV_DEV_MV78100 ||
407 	    d == MV_DEV_MV78100_Z0 ||
408 	    (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
409 		sc->mge_ver = 2;
410 		sc->mge_mtu = 0x4e8;
411 		sc->mge_tfut_ipg_max = 0xFFFF;
412 		sc->mge_rx_ipg_max = 0xFFFF;
413 		sc->mge_tx_arb_cfg = 0xFC0000FF;
414 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
415 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
416 	} else {
417 		sc->mge_ver = 1;
418 		sc->mge_mtu = 0x458;
419 		sc->mge_tfut_ipg_max = 0x3FFF;
420 		sc->mge_rx_ipg_max = 0x3FFF;
421 		sc->mge_tx_arb_cfg = 0x000000FF;
422 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
423 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
424 	}
425 	if (d == MV_DEV_88RC8180)
426 		sc->mge_intr_cnt = 1;
427 	else
428 		sc->mge_intr_cnt = 2;
429 
430 	if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
431 		sc->mge_hw_csum = 0;
432 	else
433 		sc->mge_hw_csum = 1;
434 }
435 
436 static void
437 mge_set_mac_address(struct mge_softc *sc)
438 {
439 	char *if_mac;
440 	uint32_t mac_l, mac_h;
441 
442 	MGE_GLOBAL_LOCK_ASSERT(sc);
443 
444 	if_mac = (char *)if_getlladdr(sc->ifp);
445 
446 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
447 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
448 	    (if_mac[2] << 8) | (if_mac[3] << 0);
449 
450 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
451 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
452 
453 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
454 }
455 
456 static void
457 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
458 {
459 	uint32_t reg_idx, reg_off, reg_val, i;
460 
461 	last_byte &= 0xf;
462 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
463 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
464 	reg_val = (1 | (queue << 1)) << reg_off;
465 
466 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
467 		if ( i == reg_idx)
468 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
469 		else
470 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
471 	}
472 }
473 
474 static void
475 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
476 {
477 	uint32_t port_config;
478 	uint32_t reg_val, i;
479 
480 	/* Enable or disable promiscuous mode as needed */
481 	if (if_getflags(sc->ifp) & IFF_PROMISC) {
482 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
483 		port_config |= PORT_CONFIG_UPM;
484 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
485 
486 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
487 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
488 
489 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
490 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
491 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
492 		}
493 
494 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
495 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
496 
497 	} else {
498 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
499 		port_config &= ~PORT_CONFIG_UPM;
500 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
501 
502 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
503 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
504 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
505 		}
506 
507 		mge_set_mac_address(sc);
508 	}
509 }
510 
511 static void
512 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
513 {
514 	u_int32_t *paddr;
515 
516 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
517 	paddr = arg;
518 
519 	*paddr = segs->ds_addr;
520 }
521 
522 static int
523 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
524     bus_addr_t *paddr)
525 {
526 	struct mbuf *new_mbuf;
527 	bus_dma_segment_t seg[1];
528 	int error;
529 	int nsegs;
530 
531 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
532 
533 	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
534 	if (new_mbuf == NULL)
535 		return (ENOBUFS);
536 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
537 
538 	if (*mbufp) {
539 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
540 		bus_dmamap_unload(tag, map);
541 	}
542 
543 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
544 	    BUS_DMA_NOWAIT);
545 	KASSERT(nsegs == 1, ("Too many segments returned!"));
546 	if (nsegs != 1 || error)
547 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
548 
549 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
550 
551 	(*mbufp) = new_mbuf;
552 	(*paddr) = seg->ds_addr;
553 	return (0);
554 }
555 
556 static int
557 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
558     uint32_t size, bus_dma_tag_t *buffer_tag)
559 {
560 	struct mge_desc_wrapper *dw;
561 	bus_addr_t desc_paddr;
562 	int i, error;
563 
564 	desc_paddr = 0;
565 	for (i = size - 1; i >= 0; i--) {
566 		dw = &(tab[i]);
567 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
568 		    (void**)&(dw->mge_desc),
569 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
570 		    &(dw->desc_dmap));
571 
572 		if (error) {
573 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
574 			dw->mge_desc = NULL;
575 			return (ENXIO);
576 		}
577 
578 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
579 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
580 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
581 
582 		if (error) {
583 			if_printf(sc->ifp, "can't load descriptor\n");
584 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
585 			    dw->desc_dmap);
586 			dw->mge_desc = NULL;
587 			return (ENXIO);
588 		}
589 
590 		/* Chain descriptors */
591 		dw->mge_desc->next_desc = desc_paddr;
592 		desc_paddr = dw->mge_desc_paddr;
593 	}
594 	tab[size - 1].mge_desc->next_desc = desc_paddr;
595 
596 	/* Allocate a busdma tag for mbufs. */
597 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
598 	    1, 0,				/* alignment, boundary */
599 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
600 	    BUS_SPACE_MAXADDR,			/* highaddr */
601 	    NULL, NULL,				/* filtfunc, filtfuncarg */
602 	    MCLBYTES, 1,			/* maxsize, nsegments */
603 	    MCLBYTES, 0,			/* maxsegsz, flags */
604 	    NULL, NULL,				/* lockfunc, lockfuncarg */
605 	    buffer_tag);			/* dmat */
606 	if (error) {
607 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
608 		return (ENXIO);
609 	}
610 
611 	/* Create TX busdma maps */
612 	for (i = 0; i < size; i++) {
613 		dw = &(tab[i]);
614 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
615 		if (error) {
616 			if_printf(sc->ifp, "failed to create map for mbuf\n");
617 			return (ENXIO);
618 		}
619 
620 		dw->buffer = (struct mbuf*)NULL;
621 		dw->mge_desc->buffer = (bus_addr_t)NULL;
622 	}
623 
624 	return (0);
625 }
626 
627 static int
628 mge_allocate_dma(struct mge_softc *sc)
629 {
630 	struct mge_desc_wrapper *dw;
631 	int i;
632 
633 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
634 	bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
635 	    16, 0,				/* alignment, boundary */
636 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
637 	    BUS_SPACE_MAXADDR,			/* highaddr */
638 	    NULL, NULL,				/* filtfunc, filtfuncarg */
639 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
640 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
641 	    NULL, NULL,				/* lockfunc, lockfuncarg */
642 	    &sc->mge_desc_dtag);		/* dmat */
643 
644 
645 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
646 	    &sc->mge_tx_dtag);
647 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
648 	    &sc->mge_rx_dtag);
649 
650 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
651 		dw = &(sc->mge_rx_desc[i]);
652 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
653 		    &dw->mge_desc->buffer);
654 	}
655 
656 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
657 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
658 
659 	return (0);
660 }
661 
662 static void
663 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
664     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
665 {
666 	struct mge_desc_wrapper *dw;
667 	int i;
668 
669 	for (i = 0; i < size; i++) {
670 		/* Free RX mbuf */
671 		dw = &(tab[i]);
672 
673 		if (dw->buffer_dmap) {
674 			if (free_mbufs) {
675 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
676 				    BUS_DMASYNC_POSTREAD);
677 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
678 			}
679 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
680 			if (free_mbufs)
681 				m_freem(dw->buffer);
682 		}
683 		/* Free RX descriptors */
684 		if (dw->desc_dmap) {
685 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
686 			    BUS_DMASYNC_POSTREAD);
687 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
688 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
689 			    dw->desc_dmap);
690 		}
691 	}
692 }
693 
694 static void
695 mge_free_dma(struct mge_softc *sc)
696 {
697 
698 	/* Free descriptors and mbufs */
699 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
700 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
701 
702 	/* Destroy mbuf dma tag */
703 	bus_dma_tag_destroy(sc->mge_tx_dtag);
704 	bus_dma_tag_destroy(sc->mge_rx_dtag);
705 	/* Destroy descriptors tag */
706 	bus_dma_tag_destroy(sc->mge_desc_dtag);
707 }
708 
709 static void
710 mge_reinit_rx(struct mge_softc *sc)
711 {
712 	struct mge_desc_wrapper *dw;
713 	int i;
714 
715 	MGE_RECEIVE_LOCK_ASSERT(sc);
716 
717 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
718 
719 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
720 	    &sc->mge_rx_dtag);
721 
722 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
723 		dw = &(sc->mge_rx_desc[i]);
724 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
725 		&dw->mge_desc->buffer);
726 	}
727 
728 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
729 	sc->rx_desc_curr = 0;
730 
731 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
732 	    sc->rx_desc_start);
733 
734 	/* Enable RX queue */
735 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
736 }
737 
738 #ifdef DEVICE_POLLING
739 static poll_handler_t mge_poll;
740 
741 static int
742 mge_poll(if_t ifp, enum poll_cmd cmd, int count)
743 {
744 	struct mge_softc *sc = if_getsoftc(ifp);
745 	uint32_t int_cause, int_cause_ext;
746 	int rx_npkts = 0;
747 
748 	MGE_RECEIVE_LOCK(sc);
749 
750 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
751 		MGE_RECEIVE_UNLOCK(sc);
752 		return (rx_npkts);
753 	}
754 
755 	if (cmd == POLL_AND_CHECK_STATUS) {
756 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
757 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
758 
759 		/* Check for resource error */
760 		if (int_cause & MGE_PORT_INT_RXERRQ0)
761 			mge_reinit_rx(sc);
762 
763 		if (int_cause || int_cause_ext) {
764 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
765 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
766 		}
767 	}
768 
769 
770 	rx_npkts = mge_intr_rx_locked(sc, count);
771 
772 	MGE_RECEIVE_UNLOCK(sc);
773 	MGE_TRANSMIT_LOCK(sc);
774 	mge_intr_tx_locked(sc);
775 	MGE_TRANSMIT_UNLOCK(sc);
776 	return (rx_npkts);
777 }
778 #endif /* DEVICE_POLLING */
779 
780 static int
781 mge_attach(device_t dev)
782 {
783 	struct mge_softc *sc;
784 	struct mii_softc *miisc;
785 	if_t ifp;
786 	uint8_t hwaddr[ETHER_ADDR_LEN];
787 	int i, error, phy;
788 
789 	sc = device_get_softc(dev);
790 	sc->dev = dev;
791 	sc->node = ofw_bus_get_node(dev);
792 	phy = 0;
793 
794 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
795 		device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
796 		    device_get_nameunit(sc->phy_sc->dev));
797 		sc->phy_attached = 1;
798 	} else {
799 		device_printf(dev, "PHY not attached.\n");
800 		sc->phy_attached = 0;
801 		sc->phy_sc = sc;
802 	}
803 
804 	if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
805 		device_printf(dev, "Switch attached.\n");
806 		sc->switch_attached = 1;
807 		/* additional variable available across instances */
808 		switch_attached = 1;
809 	} else {
810 		sc->switch_attached = 0;
811 	}
812 
813 	if (device_get_unit(dev) == 0) {
814 		sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
815 	}
816 
817 	/* Set chip version-dependent parameters */
818 	mge_ver_params(sc);
819 
820 	/* Initialize mutexes */
821 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
822 	    MTX_DEF);
823 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
824 	    MTX_DEF);
825 
826 	/* Allocate IO and IRQ resources */
827 	error = bus_alloc_resources(dev, res_spec, sc->res);
828 	if (error) {
829 		device_printf(dev, "could not allocate resources\n");
830 		mge_detach(dev);
831 		return (ENXIO);
832 	}
833 
834 	/* Allocate DMA, buffers, buffer descriptors */
835 	error = mge_allocate_dma(sc);
836 	if (error) {
837 		mge_detach(dev);
838 		return (ENXIO);
839 	}
840 
841 	sc->tx_desc_curr = 0;
842 	sc->rx_desc_curr = 0;
843 	sc->tx_desc_used_idx = 0;
844 	sc->tx_desc_used_count = 0;
845 
846 	/* Configure defaults for interrupts coalescing */
847 	sc->rx_ic_time = 768;
848 	sc->tx_ic_time = 768;
849 	mge_add_sysctls(sc);
850 
851 	/* Allocate network interface */
852 	ifp = sc->ifp = if_alloc(IFT_ETHER);
853 	if (ifp == NULL) {
854 		device_printf(dev, "if_alloc() failed\n");
855 		mge_detach(dev);
856 		return (ENOMEM);
857 	}
858 
859 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
860 	if_setsoftc(ifp, sc);
861 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
862 	if_setcapabilities(ifp, IFCAP_VLAN_MTU);
863 	if (sc->mge_hw_csum) {
864 		if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
865 		if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
866 	}
867 	if_setcapenable(ifp, if_getcapabilities(ifp));
868 
869 #ifdef DEVICE_POLLING
870 	/* Advertise that polling is supported */
871 	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
872 #endif
873 
874 	if_setinitfn(ifp, mge_init);
875 	if_setstartfn(ifp, mge_start);
876 	if_setioctlfn(ifp, mge_ioctl);
877 
878 	if_setsendqlen(ifp, MGE_TX_DESC_NUM - 1);
879 	if_setsendqready(ifp);
880 
881 	mge_get_mac_address(sc, hwaddr);
882 	ether_ifattach(ifp, hwaddr);
883 	callout_init(&sc->wd_callout, 1);
884 
885 	/* Attach PHY(s) */
886 	if (sc->phy_attached) {
887 		error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
888 		    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
889 		if (error) {
890 			device_printf(dev, "MII failed to find PHY\n");
891 			if_free(ifp);
892 			sc->ifp = NULL;
893 			mge_detach(dev);
894 			return (error);
895 		}
896 		sc->mii = device_get_softc(sc->miibus);
897 
898 		/* Tell the MAC where to find the PHY so autoneg works */
899 		miisc = LIST_FIRST(&sc->mii->mii_phys);
900 		MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
901 	} else {
902 		/* no PHY, so use hard-coded values */
903 		ifmedia_init(&sc->mge_ifmedia, 0,
904 		    mge_ifmedia_upd,
905 		    mge_ifmedia_sts);
906 		ifmedia_add(&sc->mge_ifmedia,
907 		    IFM_ETHER | IFM_1000_T | IFM_FDX,
908 		    0, NULL);
909 		ifmedia_set(&sc->mge_ifmedia,
910 		    IFM_ETHER | IFM_1000_T | IFM_FDX);
911 	}
912 
913 	/* Attach interrupt handlers */
914 	/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
915 	for (i = 1; i <= sc->mge_intr_cnt; ++i) {
916 		error = bus_setup_intr(dev, sc->res[i],
917 		    INTR_TYPE_NET | INTR_MPSAFE,
918 		    NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
919 		    sc, &sc->ih_cookie[i - 1]);
920 		if (error) {
921 			device_printf(dev, "could not setup %s\n",
922 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
923 			mge_detach(dev);
924 			return (error);
925 		}
926 	}
927 
928 	if (sc->switch_attached) {
929 		MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
930 		device_add_child(dev, "mdio", -1);
931 		bus_generic_attach(dev);
932 	}
933 
934 	return (0);
935 }
936 
937 static int
938 mge_detach(device_t dev)
939 {
940 	struct mge_softc *sc;
941 	int error,i;
942 
943 	sc = device_get_softc(dev);
944 
945 	/* Stop controller and free TX queue */
946 	if (sc->ifp)
947 		mge_shutdown(dev);
948 
949 	/* Wait for stopping ticks */
950         callout_drain(&sc->wd_callout);
951 
952 	/* Stop and release all interrupts */
953 	for (i = 0; i < sc->mge_intr_cnt; ++i) {
954 		if (!sc->ih_cookie[i])
955 			continue;
956 
957 		error = bus_teardown_intr(dev, sc->res[1 + i],
958 		    sc->ih_cookie[i]);
959 		if (error)
960 			device_printf(dev, "could not release %s\n",
961 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
962 	}
963 
964 	/* Detach network interface */
965 	if (sc->ifp) {
966 		ether_ifdetach(sc->ifp);
967 		if_free(sc->ifp);
968 	}
969 
970 	/* Free DMA resources */
971 	mge_free_dma(sc);
972 
973 	/* Free IO memory handler */
974 	bus_release_resources(dev, res_spec, sc->res);
975 
976 	/* Destroy mutexes */
977 	mtx_destroy(&sc->receive_lock);
978 	mtx_destroy(&sc->transmit_lock);
979 
980 	if (device_get_unit(dev) == 0)
981 		sx_destroy(&sx_smi);
982 
983 	return (0);
984 }
985 
986 static void
987 mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
988 {
989 	struct mge_softc *sc;
990 	struct mii_data *mii;
991 
992 	sc = if_getsoftc(ifp);
993 	MGE_GLOBAL_LOCK(sc);
994 
995 	if (!sc->phy_attached) {
996 		ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
997 		ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
998 		goto out_unlock;
999 	}
1000 
1001 	mii = sc->mii;
1002 	mii_pollstat(mii);
1003 
1004 	ifmr->ifm_active = mii->mii_media_active;
1005 	ifmr->ifm_status = mii->mii_media_status;
1006 
1007 out_unlock:
1008 	MGE_GLOBAL_UNLOCK(sc);
1009 }
1010 
1011 static uint32_t
1012 mge_set_port_serial_control(uint32_t media)
1013 {
1014 	uint32_t port_config;
1015 
1016 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
1017 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
1018 
1019 	if (IFM_TYPE(media) == IFM_ETHER) {
1020 		switch(IFM_SUBTYPE(media)) {
1021 			case IFM_AUTO:
1022 				break;
1023 			case IFM_1000_T:
1024 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
1025 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1026 				    | PORT_SERIAL_SPEED_AUTONEG);
1027 				break;
1028 			case IFM_100_TX:
1029 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
1030 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1031 				    | PORT_SERIAL_SPEED_AUTONEG);
1032 				break;
1033 			case IFM_10_T:
1034 				port_config  |= (PORT_SERIAL_AUTONEG |
1035 				    PORT_SERIAL_AUTONEG_FC |
1036 				    PORT_SERIAL_SPEED_AUTONEG);
1037 				break;
1038 		}
1039 		if (media & IFM_FDX)
1040 			port_config |= PORT_SERIAL_FULL_DUPLEX;
1041 	}
1042 	return (port_config);
1043 }
1044 
1045 static int
1046 mge_ifmedia_upd(if_t ifp)
1047 {
1048 	struct mge_softc *sc = if_getsoftc(ifp);
1049 
1050 	/*
1051 	 * Do not do anything for switch here, as updating media between
1052 	 * MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
1053 	 * break the link.
1054 	 */
1055 	if (sc->phy_attached) {
1056 		MGE_GLOBAL_LOCK(sc);
1057 		if (if_getflags(ifp) & IFF_UP) {
1058 			sc->mge_media_status = sc->mii->mii_media.ifm_media;
1059 			mii_mediachg(sc->mii);
1060 
1061 			/* MGE MAC needs to be reinitialized. */
1062 			mge_init_locked(sc);
1063 
1064 		}
1065 		MGE_GLOBAL_UNLOCK(sc);
1066 	}
1067 
1068 	return (0);
1069 }
1070 
1071 static void
1072 mge_init(void *arg)
1073 {
1074 	struct mge_softc *sc;
1075 
1076 	sc = arg;
1077 	MGE_GLOBAL_LOCK(sc);
1078 
1079 	mge_init_locked(arg);
1080 
1081 	MGE_GLOBAL_UNLOCK(sc);
1082 }
1083 
1084 static void
1085 mge_init_locked(void *arg)
1086 {
1087 	struct mge_softc *sc = arg;
1088 	struct mge_desc_wrapper *dw;
1089 	volatile uint32_t reg_val;
1090 	int i, count;
1091 	uint32_t media_status;
1092 
1093 
1094 	MGE_GLOBAL_LOCK_ASSERT(sc);
1095 
1096 	/* Stop interface */
1097 	mge_stop(sc);
1098 
1099 	/* Disable interrupts */
1100 	mge_intrs_ctrl(sc, 0);
1101 
1102 	/* Set MAC address */
1103 	mge_set_mac_address(sc);
1104 
1105 	/* Setup multicast filters */
1106 	mge_setup_multicast(sc);
1107 
1108 	if (sc->mge_ver == 2) {
1109 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
1110 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
1111 	}
1112 
1113 	/* Initialize TX queue configuration registers */
1114 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1115 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1116 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1117 
1118 	/* Clear TX queue configuration registers for unused queues */
1119 	for (i = 1; i < 7; i++) {
1120 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
1121 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
1122 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
1123 	}
1124 
1125 	/* Set default MTU */
1126 	MGE_WRITE(sc, sc->mge_mtu, 0);
1127 
1128 	/* Port configuration */
1129 	MGE_WRITE(sc, MGE_PORT_CONFIG,
1130 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
1131 	    PORT_CONFIG_ARO_RXQ(0));
1132 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
1133 
1134 	/* Configure promisc mode */
1135 	mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
1136 
1137 	media_status = sc->mge_media_status;
1138 	if (sc->switch_attached) {
1139 		media_status &= ~IFM_TMASK;
1140 		media_status |= IFM_1000_T;
1141 	}
1142 
1143 	/* Setup port configuration */
1144 	reg_val = mge_set_port_serial_control(media_status);
1145 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1146 
1147 	/* Setup SDMA configuration */
1148 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
1149 	    MGE_SDMA_TX_BYTE_SWAP |
1150 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
1151 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
1152 
1153 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
1154 
1155 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1156 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
1157 	    sc->rx_desc_start);
1158 
1159 	/* Reset descriptor indexes */
1160 	sc->tx_desc_curr = 0;
1161 	sc->rx_desc_curr = 0;
1162 	sc->tx_desc_used_idx = 0;
1163 	sc->tx_desc_used_count = 0;
1164 
1165 	/* Enable RX descriptors */
1166 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
1167 		dw = &sc->mge_rx_desc[i];
1168 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1169 		dw->mge_desc->buff_size = MCLBYTES;
1170 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1171 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1172 	}
1173 
1174 	/* Enable RX queue */
1175 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
1176 
1177 	/* Enable port */
1178 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1179 	reg_val |= PORT_SERIAL_ENABLE;
1180 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1181 	count = 0x100000;
1182 	for (;;) {
1183 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1184 		if (reg_val & MGE_STATUS_LINKUP)
1185 			break;
1186 		DELAY(100);
1187 		if (--count == 0) {
1188 			if_printf(sc->ifp, "Timeout on link-up\n");
1189 			break;
1190 		}
1191 	}
1192 
1193 	/* Setup interrupts coalescing */
1194 	mge_set_rxic(sc);
1195 	mge_set_txic(sc);
1196 
1197 	/* Enable interrupts */
1198 #ifdef DEVICE_POLLING
1199         /*
1200 	 * * ...only if polling is not turned on. Disable interrupts explicitly
1201 	 * if polling is enabled.
1202 	 */
1203 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1204 		mge_intrs_ctrl(sc, 0);
1205 	else
1206 #endif /* DEVICE_POLLING */
1207 	mge_intrs_ctrl(sc, 1);
1208 
1209 	/* Activate network interface */
1210 	if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, 0);
1211 	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
1212 	sc->wd_timer = 0;
1213 
1214 	/* Schedule watchdog timeout */
1215 	if (sc->phy_attached)
1216 		callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1217 }
1218 
1219 static void
1220 mge_intr_rxtx(void *arg)
1221 {
1222 	struct mge_softc *sc;
1223 	uint32_t int_cause, int_cause_ext;
1224 
1225 	sc = arg;
1226 	MGE_GLOBAL_LOCK(sc);
1227 
1228 #ifdef DEVICE_POLLING
1229 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1230 		MGE_GLOBAL_UNLOCK(sc);
1231 		return;
1232 	}
1233 #endif
1234 
1235 	/* Get interrupt cause */
1236 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1237 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1238 
1239 	/* Check for Transmit interrupt */
1240 	if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1241 	    MGE_PORT_INT_EXT_TXUR)) {
1242 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1243 		    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1244 		mge_intr_tx_locked(sc);
1245 	}
1246 
1247 	MGE_TRANSMIT_UNLOCK(sc);
1248 
1249 	/* Check for Receive interrupt */
1250 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1251 
1252 	MGE_RECEIVE_UNLOCK(sc);
1253 }
1254 
1255 static void
1256 mge_intr_err(void *arg)
1257 {
1258 	struct mge_softc *sc;
1259 	if_t ifp;
1260 
1261 	sc = arg;
1262 	ifp = sc->ifp;
1263 	if_printf(ifp, "%s\n", __FUNCTION__);
1264 }
1265 
1266 static void
1267 mge_intr_misc(void *arg)
1268 {
1269 	struct mge_softc *sc;
1270 	if_t ifp;
1271 
1272 	sc = arg;
1273 	ifp = sc->ifp;
1274 	if_printf(ifp, "%s\n", __FUNCTION__);
1275 }
1276 
1277 static void
1278 mge_intr_rx(void *arg) {
1279 	struct mge_softc *sc;
1280 	uint32_t int_cause, int_cause_ext;
1281 
1282 	sc = arg;
1283 	MGE_RECEIVE_LOCK(sc);
1284 
1285 #ifdef DEVICE_POLLING
1286 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1287 		MGE_RECEIVE_UNLOCK(sc);
1288 		return;
1289 	}
1290 #endif
1291 
1292 	/* Get interrupt cause */
1293 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1294 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1295 
1296 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1297 
1298 	MGE_RECEIVE_UNLOCK(sc);
1299 }
1300 
1301 static void
1302 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1303     uint32_t int_cause_ext)
1304 {
1305 	/* Check for resource error */
1306 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1307 		mge_reinit_rx(sc);
1308 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1309 		    ~(int_cause & MGE_PORT_INT_RXERRQ0));
1310 	}
1311 
1312 	int_cause &= MGE_PORT_INT_RXQ0;
1313 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1314 
1315 	if (int_cause || int_cause_ext) {
1316 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1317 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1318 		mge_intr_rx_locked(sc, -1);
1319 	}
1320 }
1321 
1322 static int
1323 mge_intr_rx_locked(struct mge_softc *sc, int count)
1324 {
1325 	if_t ifp = sc->ifp;
1326 	uint32_t status;
1327 	uint16_t bufsize;
1328 	struct mge_desc_wrapper* dw;
1329 	struct mbuf *mb;
1330 	int rx_npkts = 0;
1331 
1332 	MGE_RECEIVE_LOCK_ASSERT(sc);
1333 
1334 	while (count != 0) {
1335 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1336 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1337 		    BUS_DMASYNC_POSTREAD);
1338 
1339 		/* Get status */
1340 		status = dw->mge_desc->cmd_status;
1341 		bufsize = dw->mge_desc->buff_size;
1342 		if ((status & MGE_DMA_OWNED) != 0)
1343 			break;
1344 
1345 		if (dw->mge_desc->byte_count &&
1346 		    ~(status & MGE_ERR_SUMMARY)) {
1347 
1348 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1349 			    BUS_DMASYNC_POSTREAD);
1350 
1351 			mb = m_devget(dw->buffer->m_data,
1352 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1353 			    0, ifp, NULL);
1354 
1355 			if (mb == NULL)
1356 				/* Give up if no mbufs */
1357 				break;
1358 
1359 			mb->m_len -= 2;
1360 			mb->m_pkthdr.len -= 2;
1361 			mb->m_data += 2;
1362 
1363 			mb->m_pkthdr.rcvif = ifp;
1364 
1365 			mge_offload_process_frame(ifp, mb, status,
1366 			    bufsize);
1367 
1368 			MGE_RECEIVE_UNLOCK(sc);
1369 			if_input(ifp, mb);
1370 			MGE_RECEIVE_LOCK(sc);
1371 			rx_npkts++;
1372 		}
1373 
1374 		dw->mge_desc->byte_count = 0;
1375 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1376 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1377 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1378 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1379 
1380 		if (count > 0)
1381 			count -= 1;
1382 	}
1383 
1384 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1385 
1386 	return (rx_npkts);
1387 }
1388 
1389 static void
1390 mge_intr_sum(void *arg)
1391 {
1392 	struct mge_softc *sc = arg;
1393 	if_t ifp;
1394 
1395 	ifp = sc->ifp;
1396 	if_printf(ifp, "%s\n", __FUNCTION__);
1397 }
1398 
1399 static void
1400 mge_intr_tx(void *arg)
1401 {
1402 	struct mge_softc *sc = arg;
1403 	uint32_t int_cause_ext;
1404 
1405 	MGE_TRANSMIT_LOCK(sc);
1406 
1407 #ifdef DEVICE_POLLING
1408 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1409 		MGE_TRANSMIT_UNLOCK(sc);
1410 		return;
1411 	}
1412 #endif
1413 
1414 	/* Ack the interrupt */
1415 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1416 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1417 	    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1418 
1419 	mge_intr_tx_locked(sc);
1420 
1421 	MGE_TRANSMIT_UNLOCK(sc);
1422 }
1423 
1424 static void
1425 mge_intr_tx_locked(struct mge_softc *sc)
1426 {
1427 	if_t ifp = sc->ifp;
1428 	struct mge_desc_wrapper *dw;
1429 	struct mge_desc *desc;
1430 	uint32_t status;
1431 	int send = 0;
1432 
1433 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1434 
1435 	/* Disable watchdog */
1436 	sc->wd_timer = 0;
1437 
1438 	while (sc->tx_desc_used_count) {
1439 		/* Get the descriptor */
1440 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1441 		desc = dw->mge_desc;
1442 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1443 		    BUS_DMASYNC_POSTREAD);
1444 
1445 		/* Get descriptor status */
1446 		status = desc->cmd_status;
1447 
1448 		if (status & MGE_DMA_OWNED)
1449 			break;
1450 
1451 		sc->tx_desc_used_idx =
1452 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1453 		sc->tx_desc_used_count--;
1454 
1455 		/* Update collision statistics */
1456 		if (status & MGE_ERR_SUMMARY) {
1457 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1458 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1459 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1460 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1461 		}
1462 
1463 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1464 		    BUS_DMASYNC_POSTWRITE);
1465 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1466 		m_freem(dw->buffer);
1467 		dw->buffer = (struct mbuf*)NULL;
1468 		send++;
1469 
1470 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1471 	}
1472 
1473 	if (send) {
1474 		/* Now send anything that was pending */
1475 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1476 		mge_start_locked(ifp);
1477 	}
1478 }
1479 static int
1480 mge_ioctl(if_t ifp, u_long command, caddr_t data)
1481 {
1482 	struct mge_softc *sc = if_getsoftc(ifp);
1483 	struct ifreq *ifr = (struct ifreq *)data;
1484 	int mask, error;
1485 	uint32_t flags;
1486 
1487 	error = 0;
1488 
1489 	switch (command) {
1490 	case SIOCSIFFLAGS:
1491 		MGE_GLOBAL_LOCK(sc);
1492 
1493 		if (if_getflags(ifp) & IFF_UP) {
1494 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1495 				flags = if_getflags(ifp) ^ sc->mge_if_flags;
1496 				if (flags & IFF_PROMISC)
1497 					mge_set_prom_mode(sc,
1498 					    MGE_RX_DEFAULT_QUEUE);
1499 
1500 				if (flags & IFF_ALLMULTI)
1501 					mge_setup_multicast(sc);
1502 			} else
1503 				mge_init_locked(sc);
1504 		}
1505 		else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1506 			mge_stop(sc);
1507 
1508 		sc->mge_if_flags = if_getflags(ifp);
1509 		MGE_GLOBAL_UNLOCK(sc);
1510 		break;
1511 	case SIOCADDMULTI:
1512 	case SIOCDELMULTI:
1513 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1514 			MGE_GLOBAL_LOCK(sc);
1515 			mge_setup_multicast(sc);
1516 			MGE_GLOBAL_UNLOCK(sc);
1517 		}
1518 		break;
1519 	case SIOCSIFCAP:
1520 		mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1521 		if (mask & IFCAP_HWCSUM) {
1522 			if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
1523 			if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, 0);
1524 			if (if_getcapenable(ifp) & IFCAP_TXCSUM)
1525 				if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
1526 			else
1527 				if_sethwassist(ifp, 0);
1528 		}
1529 #ifdef DEVICE_POLLING
1530 		if (mask & IFCAP_POLLING) {
1531 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1532 				error = ether_poll_register(mge_poll, ifp);
1533 				if (error)
1534 					return(error);
1535 
1536 				MGE_GLOBAL_LOCK(sc);
1537 				mge_intrs_ctrl(sc, 0);
1538 				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1539 				MGE_GLOBAL_UNLOCK(sc);
1540 			} else {
1541 				error = ether_poll_deregister(ifp);
1542 				MGE_GLOBAL_LOCK(sc);
1543 				mge_intrs_ctrl(sc, 1);
1544 				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1545 				MGE_GLOBAL_UNLOCK(sc);
1546 			}
1547 		}
1548 #endif
1549 		break;
1550 	case SIOCGIFMEDIA: /* fall through */
1551 	case SIOCSIFMEDIA:
1552 		/*
1553 		 * Setting up media type via ioctls is *not* supported for MAC
1554 		 * which is connected to switch. Use etherswitchcfg.
1555 		 */
1556 		if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1557 			return (0);
1558 		else if (!sc->phy_attached) {
1559 			error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1560 			    command);
1561 			break;
1562 		}
1563 
1564 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1565 		    && !(ifr->ifr_media & IFM_FDX)) {
1566 			device_printf(sc->dev,
1567 			    "1000baseTX half-duplex unsupported\n");
1568 			return 0;
1569 		}
1570 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1571 		break;
1572 	default:
1573 		error = ether_ioctl(ifp, command, data);
1574 	}
1575 	return (error);
1576 }
1577 
1578 static int
1579 mge_miibus_readreg(device_t dev, int phy, int reg)
1580 {
1581 
1582 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1583 
1584 	return (mv_read_ext_phy(dev, phy, reg));
1585 }
1586 
1587 static int
1588 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1589 {
1590 
1591 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1592 
1593 	mv_write_ext_phy(dev, phy, reg, value);
1594 
1595 	return (0);
1596 }
1597 
1598 static int
1599 mge_probe(device_t dev)
1600 {
1601 
1602 	if (!ofw_bus_status_okay(dev))
1603 		return (ENXIO);
1604 
1605 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1606 		return (ENXIO);
1607 
1608 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1609 	return (BUS_PROBE_DEFAULT);
1610 }
1611 
1612 static int
1613 mge_resume(device_t dev)
1614 {
1615 
1616 	device_printf(dev, "%s\n", __FUNCTION__);
1617 	return (0);
1618 }
1619 
1620 static int
1621 mge_shutdown(device_t dev)
1622 {
1623 	struct mge_softc *sc = device_get_softc(dev);
1624 
1625 	MGE_GLOBAL_LOCK(sc);
1626 
1627 #ifdef DEVICE_POLLING
1628         if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1629 		ether_poll_deregister(sc->ifp);
1630 #endif
1631 
1632 	mge_stop(sc);
1633 
1634 	MGE_GLOBAL_UNLOCK(sc);
1635 
1636 	return (0);
1637 }
1638 
1639 static int
1640 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1641 {
1642 	struct mge_desc_wrapper *dw = NULL;
1643 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1644 	bus_dmamap_t mapp;
1645 	int error;
1646 	int seg, nsegs;
1647 	int desc_no;
1648 
1649 	/* Fetch unused map */
1650 	desc_no = sc->tx_desc_curr;
1651 	dw = &sc->mge_tx_desc[desc_no];
1652 	mapp = dw->buffer_dmap;
1653 
1654 	/* Create mapping in DMA memory */
1655 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1656 	    BUS_DMA_NOWAIT);
1657 	if (error != 0) {
1658 		m_freem(m0);
1659 		return (error);
1660 	}
1661 
1662 	/* Only one segment is supported. */
1663 	if (nsegs != 1) {
1664 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1665 		m_freem(m0);
1666 		return (-1);
1667 	}
1668 
1669 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1670 
1671 	/* Everything is ok, now we can send buffers */
1672 	for (seg = 0; seg < nsegs; seg++) {
1673 		dw->mge_desc->byte_count = segs[seg].ds_len;
1674 		dw->mge_desc->buffer = segs[seg].ds_addr;
1675 		dw->buffer = m0;
1676 		dw->mge_desc->cmd_status = 0;
1677 		if (seg == 0)
1678 			mge_offload_setup_descriptor(sc, dw);
1679 		dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1680 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1681 		    MGE_DMA_OWNED;
1682 	}
1683 
1684 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1685 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1686 
1687 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1688 	sc->tx_desc_used_count++;
1689 	return (0);
1690 }
1691 
1692 static void
1693 mge_tick(void *msc)
1694 {
1695 	struct mge_softc *sc = msc;
1696 
1697 	KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1698 
1699 	MGE_GLOBAL_LOCK(sc);
1700 
1701 	/* Check for TX timeout */
1702 	mge_watchdog(sc);
1703 
1704 	mii_tick(sc->mii);
1705 
1706 	/* Check for media type change */
1707 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1708 		mge_ifmedia_upd(sc->ifp);
1709 
1710 	MGE_GLOBAL_UNLOCK(sc);
1711 
1712 	/* Schedule another timeout one second from now */
1713 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1714 
1715 	return;
1716 }
1717 
1718 static void
1719 mge_watchdog(struct mge_softc *sc)
1720 {
1721 	if_t ifp;
1722 
1723 	ifp = sc->ifp;
1724 
1725 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1726 		return;
1727 	}
1728 
1729 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1730 	if_printf(ifp, "watchdog timeout\n");
1731 
1732 	mge_stop(sc);
1733 	mge_init_locked(sc);
1734 }
1735 
1736 static void
1737 mge_start(if_t ifp)
1738 {
1739 	struct mge_softc *sc = if_getsoftc(ifp);
1740 
1741 	MGE_TRANSMIT_LOCK(sc);
1742 
1743 	mge_start_locked(ifp);
1744 
1745 	MGE_TRANSMIT_UNLOCK(sc);
1746 }
1747 
1748 static void
1749 mge_start_locked(if_t ifp)
1750 {
1751 	struct mge_softc *sc;
1752 	struct mbuf *m0, *mtmp;
1753 	uint32_t reg_val, queued = 0;
1754 
1755 	sc = if_getsoftc(ifp);
1756 
1757 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1758 
1759 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1760 	    IFF_DRV_RUNNING)
1761 		return;
1762 
1763 	for (;;) {
1764 		/* Get packet from the queue */
1765 		m0 = if_dequeue(ifp);
1766 		if (m0 == NULL)
1767 			break;
1768 
1769 		if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1770 		    m0->m_flags & M_VLANTAG) {
1771 			if (M_WRITABLE(m0) == 0) {
1772 				mtmp = m_dup(m0, M_NOWAIT);
1773 				m_freem(m0);
1774 				if (mtmp == NULL)
1775 					continue;
1776 				m0 = mtmp;
1777 			}
1778 		}
1779 		/* The driver support only one DMA fragment. */
1780 		if (m0->m_next != NULL) {
1781 			mtmp = m_defrag(m0, M_NOWAIT);
1782 			if (mtmp != NULL)
1783 				m0 = mtmp;
1784 		}
1785 
1786 		/* Check for free descriptors */
1787 		if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1788 			if_sendq_prepend(ifp, m0);
1789 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1790 			break;
1791 		}
1792 
1793 		if (mge_encap(sc, m0) != 0)
1794 			break;
1795 
1796 		queued++;
1797 		BPF_MTAP(ifp, m0);
1798 	}
1799 
1800 	if (queued) {
1801 		/* Enable transmitter and watchdog timer */
1802 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1803 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1804 		sc->wd_timer = 5;
1805 	}
1806 }
1807 
1808 static void
1809 mge_stop(struct mge_softc *sc)
1810 {
1811 	if_t ifp;
1812 	volatile uint32_t reg_val, status;
1813 	struct mge_desc_wrapper *dw;
1814 	struct mge_desc *desc;
1815 	int count;
1816 
1817 	ifp = sc->ifp;
1818 
1819 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1820 		return;
1821 
1822 	/* Stop tick engine */
1823 	callout_stop(&sc->wd_callout);
1824 
1825 	/* Disable interface */
1826 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1827 	sc->wd_timer = 0;
1828 
1829 	/* Disable interrupts */
1830 	mge_intrs_ctrl(sc, 0);
1831 
1832 	/* Disable Rx and Tx */
1833 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1834 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1835 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1836 
1837 	/* Remove pending data from TX queue */
1838 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1839 	    sc->tx_desc_used_count) {
1840 		/* Get the descriptor */
1841 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1842 		desc = dw->mge_desc;
1843 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1844 		    BUS_DMASYNC_POSTREAD);
1845 
1846 		/* Get descriptor status */
1847 		status = desc->cmd_status;
1848 
1849 		if (status & MGE_DMA_OWNED)
1850 			break;
1851 
1852 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1853 		    MGE_TX_DESC_NUM;
1854 		sc->tx_desc_used_count--;
1855 
1856 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1857 		    BUS_DMASYNC_POSTWRITE);
1858 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1859 
1860 		m_freem(dw->buffer);
1861 		dw->buffer = (struct mbuf*)NULL;
1862 	}
1863 
1864 	/* Wait for end of transmission */
1865 	count = 0x100000;
1866 	while (count--) {
1867 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1868 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1869 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1870 			break;
1871 		DELAY(100);
1872 	}
1873 
1874 	if (count == 0)
1875 		if_printf(ifp,
1876 		    "%s: timeout while waiting for end of transmission\n",
1877 		    __FUNCTION__);
1878 
1879 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1880 	reg_val &= ~(PORT_SERIAL_ENABLE);
1881 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1882 }
1883 
1884 static int
1885 mge_suspend(device_t dev)
1886 {
1887 
1888 	device_printf(dev, "%s\n", __FUNCTION__);
1889 	return (0);
1890 }
1891 
1892 static void
1893 mge_offload_process_frame(if_t ifp, struct mbuf *frame,
1894     uint32_t status, uint16_t bufsize)
1895 {
1896 	int csum_flags = 0;
1897 
1898 	if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
1899 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1900 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1901 
1902 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1903 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1904 		    (status & MGE_RX_L4_CSUM_OK)) {
1905 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1906 			frame->m_pkthdr.csum_data = 0xFFFF;
1907 		}
1908 
1909 		frame->m_pkthdr.csum_flags = csum_flags;
1910 	}
1911 }
1912 
1913 static void
1914 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1915 {
1916 	struct mbuf *m0 = dw->buffer;
1917 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1918 	int csum_flags = m0->m_pkthdr.csum_flags;
1919 	int cmd_status = 0;
1920 	struct ip *ip;
1921 	int ehlen, etype;
1922 
1923 	if (csum_flags != 0) {
1924 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1925 			etype = ntohs(eh->evl_proto);
1926 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1927 			csum_flags |= MGE_TX_VLAN_TAGGED;
1928 		} else {
1929 			etype = ntohs(eh->evl_encap_proto);
1930 			ehlen = ETHER_HDR_LEN;
1931 		}
1932 
1933 		if (etype != ETHERTYPE_IP) {
1934 			if_printf(sc->ifp,
1935 			    "TCP/IP Offload enabled for unsupported "
1936 			    "protocol!\n");
1937 			return;
1938 		}
1939 
1940 		ip = (struct ip *)(m0->m_data + ehlen);
1941 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1942 		cmd_status |= MGE_TX_NOT_FRAGMENT;
1943 	}
1944 
1945 	if (csum_flags & CSUM_IP)
1946 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1947 
1948 	if (csum_flags & CSUM_TCP)
1949 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1950 
1951 	if (csum_flags & CSUM_UDP)
1952 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1953 
1954 	dw->mge_desc->cmd_status |= cmd_status;
1955 }
1956 
1957 static void
1958 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1959 {
1960 
1961 	if (enable) {
1962 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1963 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1964 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1965 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1966 		    MGE_PORT_INT_EXT_TXBUF0);
1967 	} else {
1968 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1969 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1970 
1971 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1972 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1973 
1974 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1975 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1976 	}
1977 }
1978 
1979 static uint8_t
1980 mge_crc8(uint8_t *data, int size)
1981 {
1982 	uint8_t crc = 0;
1983 	static const uint8_t ct[256] = {
1984 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1985 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1986 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1987 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1988 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1989 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1990 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1991 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1992 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1993 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1994 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1995 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1996 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1997 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1998 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1999 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
2000 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
2001 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
2002 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
2003 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
2004 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
2005 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
2006 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
2007 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
2008 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
2009 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
2010 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
2011 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
2012 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
2013 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
2014 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
2015 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
2016 	};
2017 
2018 	while(size--)
2019 		crc = ct[crc ^ *(data++)];
2020 
2021 	return(crc);
2022 }
2023 
2024 struct mge_hash_maddr_ctx {
2025 	uint32_t smt[MGE_MCAST_REG_NUMBER];
2026 	uint32_t omt[MGE_MCAST_REG_NUMBER];
2027 };
2028 
2029 static u_int
2030 mge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2031 {
2032 	static const uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
2033 	struct mge_hash_maddr_ctx *ctx = arg;
2034 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2035 	uint8_t *mac;
2036 	int i;
2037 
2038 	mac = LLADDR(sdl);
2039 	if (memcmp(mac, special, sizeof(special)) == 0) {
2040 		i = mac[5];
2041 		ctx->smt[i >> 2] |= v << ((i & 0x03) << 3);
2042 	} else {
2043 		i = mge_crc8(mac, ETHER_ADDR_LEN);
2044 		ctx->omt[i >> 2] |= v << ((i & 0x03) << 3);
2045 	}
2046 	return (1);
2047 }
2048 
2049 static void
2050 mge_setup_multicast(struct mge_softc *sc)
2051 {
2052 	struct mge_hash_maddr_ctx ctx;
2053 	if_t ifp = sc->ifp;
2054 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2055 	int i;
2056 
2057 	if (if_getflags(ifp) & IFF_ALLMULTI) {
2058 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
2059 			ctx.smt[i] = ctx.omt[i] =
2060 			    (v << 24) | (v << 16) | (v << 8) | v;
2061 	} else {
2062 		memset(&ctx, 0, sizeof(ctx));
2063 		if_foreach_llmaddr(ifp, mge_hash_maddr, &ctx);
2064 	}
2065 
2066 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
2067 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), ctx.smt[i]);
2068 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), ctx.omt[i]);
2069 	}
2070 }
2071 
2072 static void
2073 mge_set_rxic(struct mge_softc *sc)
2074 {
2075 	uint32_t reg;
2076 
2077 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2078 		sc->rx_ic_time = sc->mge_rx_ipg_max;
2079 
2080 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
2081 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2082 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2083 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
2084 }
2085 
2086 static void
2087 mge_set_txic(struct mge_softc *sc)
2088 {
2089 	uint32_t reg;
2090 
2091 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2092 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
2093 
2094 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
2095 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2096 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2097 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
2098 }
2099 
2100 static int
2101 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
2102 {
2103 	struct mge_softc *sc = (struct mge_softc *)arg1;
2104 	uint32_t time;
2105 	int error;
2106 
2107 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2108 	error = sysctl_handle_int(oidp, &time, 0, req);
2109 	if (error != 0)
2110 		return(error);
2111 
2112 	MGE_GLOBAL_LOCK(sc);
2113 	if (arg2 == MGE_IC_RX) {
2114 		sc->rx_ic_time = time;
2115 		mge_set_rxic(sc);
2116 	} else {
2117 		sc->tx_ic_time = time;
2118 		mge_set_txic(sc);
2119 	}
2120 	MGE_GLOBAL_UNLOCK(sc);
2121 
2122 	return(0);
2123 }
2124 
2125 static void
2126 mge_add_sysctls(struct mge_softc *sc)
2127 {
2128 	struct sysctl_ctx_list *ctx;
2129 	struct sysctl_oid_list *children;
2130 	struct sysctl_oid *tree;
2131 
2132 	ctx = device_get_sysctl_ctx(sc->dev);
2133 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2134 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
2135 	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "MGE Interrupts coalescing");
2136 	children = SYSCTL_CHILDREN(tree);
2137 
2138 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
2139 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_RX,
2140 	    mge_sysctl_ic, "I", "IC RX time threshold");
2141 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
2142 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_TX,
2143 	    mge_sysctl_ic, "I", "IC TX time threshold");
2144 }
2145 
2146 static int
2147 mge_mdio_writereg(device_t dev, int phy, int reg, int value)
2148 {
2149 
2150 	mv_write_ge_smi(dev, phy, reg, value);
2151 
2152 	return (0);
2153 }
2154 
2155 
2156 static int
2157 mge_mdio_readreg(device_t dev, int phy, int reg)
2158 {
2159 	int ret;
2160 
2161 	ret = mv_read_ge_smi(dev, phy, reg);
2162 
2163 	return (ret);
2164 }
2165