xref: /freebsd/sys/dev/mge/if_mge.c (revision 2e3507c25e42292b45a5482e116d278f5515d04d)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5  * Copyright (C) 2009-2015 Semihalf
6  * Copyright (C) 2015 Stormshield
7  * All rights reserved.
8  *
9  * Developed by Semihalf.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of MARVELL nor the names of contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/endian.h>
43 #include <sys/mbuf.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/kernel.h>
47 #include <sys/module.h>
48 #include <sys/socket.h>
49 #include <sys/sysctl.h>
50 
51 #include <net/ethernet.h>
52 #include <net/bpf.h>
53 #include <net/if.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
59 
60 #include <netinet/in_systm.h>
61 #include <netinet/in.h>
62 #include <netinet/ip.h>
63 
64 #include <sys/sockio.h>
65 #include <sys/bus.h>
66 #include <machine/bus.h>
67 #include <sys/rman.h>
68 #include <machine/resource.h>
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #include <dev/fdt/fdt_common.h>
74 #include <dev/ofw/ofw_bus.h>
75 #include <dev/ofw/ofw_bus_subr.h>
76 #include <dev/mdio/mdio.h>
77 
78 #include <dev/mge/if_mgevar.h>
79 #include <arm/mv/mvreg.h>
80 #include <arm/mv/mvvar.h>
81 
82 #include "miibus_if.h"
83 #include "mdio_if.h"
84 
85 #define	MGE_DELAY(x)	pause("SMI access sleep", (x) / tick_sbt)
86 
87 static int mge_probe(device_t dev);
88 static int mge_attach(device_t dev);
89 static int mge_detach(device_t dev);
90 static int mge_shutdown(device_t dev);
91 static int mge_suspend(device_t dev);
92 static int mge_resume(device_t dev);
93 
94 static int mge_miibus_readreg(device_t dev, int phy, int reg);
95 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
96 
97 static int mge_mdio_readreg(device_t dev, int phy, int reg);
98 static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
99 
100 static int mge_ifmedia_upd(if_t ifp);
101 static void mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
102 
103 static void mge_init(void *arg);
104 static void mge_init_locked(void *arg);
105 static void mge_start(if_t ifp);
106 static void mge_start_locked(if_t ifp);
107 static void mge_watchdog(struct mge_softc *sc);
108 static int mge_ioctl(if_t ifp, u_long command, caddr_t data);
109 
110 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
111 static uint32_t mge_rx_ipg(uint32_t val, int ver);
112 static void mge_ver_params(struct mge_softc *sc);
113 
114 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
115 static void mge_intr_rxtx(void *arg);
116 static void mge_intr_rx(void *arg);
117 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
118     uint32_t int_cause_ext);
119 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
120 static void mge_intr_tx(void *arg);
121 static void mge_intr_tx_locked(struct mge_softc *sc);
122 static void mge_intr_misc(void *arg);
123 static void mge_intr_sum(void *arg);
124 static void mge_intr_err(void *arg);
125 static void mge_stop(struct mge_softc *sc);
126 static void mge_tick(void *msc);
127 static uint32_t mge_set_port_serial_control(uint32_t media);
128 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
129 static void mge_set_mac_address(struct mge_softc *sc);
130 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
131     uint8_t queue);
132 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
133 static int mge_allocate_dma(struct mge_softc *sc);
134 static int mge_alloc_desc_dma(struct mge_softc *sc,
135     struct mge_desc_wrapper* desc_tab, uint32_t size,
136     bus_dma_tag_t *buffer_tag);
137 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
138     struct mbuf **mbufp, bus_addr_t *paddr);
139 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
140     int error);
141 static void mge_free_dma(struct mge_softc *sc);
142 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
143     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
144 static void mge_offload_process_frame(if_t ifp, struct mbuf *frame,
145     uint32_t status, uint16_t bufsize);
146 static void mge_offload_setup_descriptor(struct mge_softc *sc,
147     struct mge_desc_wrapper *dw);
148 static uint8_t mge_crc8(uint8_t *data, int size);
149 static void mge_setup_multicast(struct mge_softc *sc);
150 static void mge_set_rxic(struct mge_softc *sc);
151 static void mge_set_txic(struct mge_softc *sc);
152 static void mge_add_sysctls(struct mge_softc *sc);
153 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
154 
155 static device_method_t mge_methods[] = {
156 	/* Device interface */
157 	DEVMETHOD(device_probe,		mge_probe),
158 	DEVMETHOD(device_attach,	mge_attach),
159 	DEVMETHOD(device_detach,	mge_detach),
160 	DEVMETHOD(device_shutdown,	mge_shutdown),
161 	DEVMETHOD(device_suspend,	mge_suspend),
162 	DEVMETHOD(device_resume,	mge_resume),
163 	/* MII interface */
164 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
165 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
166 	/* MDIO interface */
167 	DEVMETHOD(mdio_readreg,		mge_mdio_readreg),
168 	DEVMETHOD(mdio_writereg,	mge_mdio_writereg),
169 	{ 0, 0 }
170 };
171 
172 DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
173 
174 static int switch_attached = 0;
175 
176 DRIVER_MODULE(mge, simplebus, mge_driver, 0, 0);
177 DRIVER_MODULE(miibus, mge, miibus_driver, 0, 0);
178 DRIVER_MODULE(mdio, mge, mdio_driver, 0, 0);
179 MODULE_DEPEND(mge, ether, 1, 1, 1);
180 MODULE_DEPEND(mge, miibus, 1, 1, 1);
181 MODULE_DEPEND(mge, mdio, 1, 1, 1);
182 
183 static struct resource_spec res_spec[] = {
184 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
185 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
186 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
187 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
188 	{ -1, 0 }
189 };
190 
191 static struct {
192 	driver_intr_t *handler;
193 	char * description;
194 } mge_intrs[MGE_INTR_COUNT + 1] = {
195 	{ mge_intr_rxtx,"GbE aggregated interrupt" },
196 	{ mge_intr_rx,	"GbE receive interrupt" },
197 	{ mge_intr_tx,	"GbE transmit interrupt" },
198 	{ mge_intr_misc,"GbE misc interrupt" },
199 	{ mge_intr_sum,	"GbE summary interrupt" },
200 	{ mge_intr_err,	"GbE error interrupt" },
201 };
202 
203 /* SMI access interlock */
204 static struct sx sx_smi;
205 
206 static uint32_t
207 mv_read_ge_smi(device_t dev, int phy, int reg)
208 {
209 	uint32_t timeout;
210 	uint32_t ret;
211 	struct mge_softc *sc;
212 
213 	sc = device_get_softc(dev);
214 	KASSERT(sc != NULL, ("NULL softc ptr!"));
215 	timeout = MGE_SMI_WRITE_RETRIES;
216 
217 	MGE_SMI_LOCK();
218 	while (--timeout &&
219 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
220 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
221 
222 	if (timeout == 0) {
223 		device_printf(dev, "SMI write timeout.\n");
224 		ret = ~0U;
225 		goto out;
226 	}
227 
228 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
229 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
230 
231 	/* Wait till finished. */
232 	timeout = MGE_SMI_WRITE_RETRIES;
233 	while (--timeout &&
234 	    !((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
235 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
236 
237 	if (timeout == 0) {
238 		device_printf(dev, "SMI write validation timeout.\n");
239 		ret = ~0U;
240 		goto out;
241 	}
242 
243 	/* Wait for the data to update in the SMI register */
244 	MGE_DELAY(MGE_SMI_DELAY);
245 	ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
246 
247 out:
248 	MGE_SMI_UNLOCK();
249 	return (ret);
250 
251 }
252 
253 static void
254 mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
255 {
256 	uint32_t timeout;
257 	struct mge_softc *sc;
258 
259 	sc = device_get_softc(dev);
260 	KASSERT(sc != NULL, ("NULL softc ptr!"));
261 
262 	MGE_SMI_LOCK();
263 	timeout = MGE_SMI_READ_RETRIES;
264 	while (--timeout &&
265 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
266 		MGE_DELAY(MGE_SMI_READ_DELAY);
267 
268 	if (timeout == 0) {
269 		device_printf(dev, "SMI read timeout.\n");
270 		goto out;
271 	}
272 
273 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
274 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
275 	    (value & MGE_SMI_DATA_MASK)));
276 
277 out:
278 	MGE_SMI_UNLOCK();
279 }
280 
281 static int
282 mv_read_ext_phy(device_t dev, int phy, int reg)
283 {
284 	uint32_t retries;
285 	struct mge_softc *sc;
286 	uint32_t ret;
287 
288 	sc = device_get_softc(dev);
289 
290 	MGE_SMI_LOCK();
291 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
292 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
293 
294 	retries = MGE_SMI_READ_RETRIES;
295 	while (--retries &&
296 	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
297 		DELAY(MGE_SMI_READ_DELAY);
298 
299 	if (retries == 0)
300 		device_printf(dev, "Timeout while reading from PHY\n");
301 
302 	ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
303 	MGE_SMI_UNLOCK();
304 
305 	return (ret);
306 }
307 
308 static void
309 mv_write_ext_phy(device_t dev, int phy, int reg, int value)
310 {
311 	uint32_t retries;
312 	struct mge_softc *sc;
313 
314 	sc = device_get_softc(dev);
315 
316 	MGE_SMI_LOCK();
317 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
318 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
319 	    (value & MGE_SMI_DATA_MASK)));
320 
321 	retries = MGE_SMI_WRITE_RETRIES;
322 	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
323 		DELAY(MGE_SMI_WRITE_DELAY);
324 
325 	if (retries == 0)
326 		device_printf(dev, "Timeout while writing to PHY\n");
327 	MGE_SMI_UNLOCK();
328 }
329 
330 static void
331 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
332 {
333 	uint32_t mac_l, mac_h;
334 	uint8_t lmac[6];
335 	int i, valid;
336 
337 	/*
338 	 * Retrieve hw address from the device tree.
339 	 */
340 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
341 	if (i == 6) {
342 		valid = 0;
343 		for (i = 0; i < 6; i++)
344 			if (lmac[i] != 0) {
345 				valid = 1;
346 				break;
347 			}
348 
349 		if (valid) {
350 			bcopy(lmac, addr, 6);
351 			return;
352 		}
353 	}
354 
355 	/*
356 	 * Fall back -- use the currently programmed address.
357 	 */
358 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
359 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
360 
361 	addr[0] = (mac_h & 0xff000000) >> 24;
362 	addr[1] = (mac_h & 0x00ff0000) >> 16;
363 	addr[2] = (mac_h & 0x0000ff00) >> 8;
364 	addr[3] = (mac_h & 0x000000ff);
365 	addr[4] = (mac_l & 0x0000ff00) >> 8;
366 	addr[5] = (mac_l & 0x000000ff);
367 }
368 
369 static uint32_t
370 mge_tfut_ipg(uint32_t val, int ver)
371 {
372 
373 	switch (ver) {
374 	case 1:
375 		return ((val & 0x3fff) << 4);
376 	case 2:
377 	default:
378 		return ((val & 0xffff) << 4);
379 	}
380 }
381 
382 static uint32_t
383 mge_rx_ipg(uint32_t val, int ver)
384 {
385 
386 	switch (ver) {
387 	case 1:
388 		return ((val & 0x3fff) << 8);
389 	case 2:
390 	default:
391 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
392 	}
393 }
394 
395 static void
396 mge_ver_params(struct mge_softc *sc)
397 {
398 	uint32_t d, r;
399 
400 	soc_id(&d, &r);
401 	if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
402 	    d == MV_DEV_88F6282 ||
403 	    d == MV_DEV_MV78100 ||
404 	    d == MV_DEV_MV78100_Z0 ||
405 	    (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
406 		sc->mge_ver = 2;
407 		sc->mge_mtu = 0x4e8;
408 		sc->mge_tfut_ipg_max = 0xFFFF;
409 		sc->mge_rx_ipg_max = 0xFFFF;
410 		sc->mge_tx_arb_cfg = 0xFC0000FF;
411 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
412 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
413 	} else {
414 		sc->mge_ver = 1;
415 		sc->mge_mtu = 0x458;
416 		sc->mge_tfut_ipg_max = 0x3FFF;
417 		sc->mge_rx_ipg_max = 0x3FFF;
418 		sc->mge_tx_arb_cfg = 0x000000FF;
419 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
420 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
421 	}
422 	if (d == MV_DEV_88RC8180)
423 		sc->mge_intr_cnt = 1;
424 	else
425 		sc->mge_intr_cnt = 2;
426 
427 	if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
428 		sc->mge_hw_csum = 0;
429 	else
430 		sc->mge_hw_csum = 1;
431 }
432 
433 static void
434 mge_set_mac_address(struct mge_softc *sc)
435 {
436 	char *if_mac;
437 	uint32_t mac_l, mac_h;
438 
439 	MGE_GLOBAL_LOCK_ASSERT(sc);
440 
441 	if_mac = (char *)if_getlladdr(sc->ifp);
442 
443 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
444 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
445 	    (if_mac[2] << 8) | (if_mac[3] << 0);
446 
447 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
448 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
449 
450 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
451 }
452 
453 static void
454 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
455 {
456 	uint32_t reg_idx, reg_off, reg_val, i;
457 
458 	last_byte &= 0xf;
459 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
460 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
461 	reg_val = (1 | (queue << 1)) << reg_off;
462 
463 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
464 		if ( i == reg_idx)
465 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
466 		else
467 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
468 	}
469 }
470 
471 static void
472 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
473 {
474 	uint32_t port_config;
475 	uint32_t reg_val, i;
476 
477 	/* Enable or disable promiscuous mode as needed */
478 	if (if_getflags(sc->ifp) & IFF_PROMISC) {
479 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
480 		port_config |= PORT_CONFIG_UPM;
481 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
482 
483 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
484 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
485 
486 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
487 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
488 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
489 		}
490 
491 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
492 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
493 
494 	} else {
495 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
496 		port_config &= ~PORT_CONFIG_UPM;
497 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
498 
499 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
500 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
501 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
502 		}
503 
504 		mge_set_mac_address(sc);
505 	}
506 }
507 
508 static void
509 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
510 {
511 	u_int32_t *paddr;
512 
513 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
514 	paddr = arg;
515 
516 	*paddr = segs->ds_addr;
517 }
518 
519 static int
520 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
521     bus_addr_t *paddr)
522 {
523 	struct mbuf *new_mbuf;
524 	bus_dma_segment_t seg[1];
525 	int error;
526 	int nsegs;
527 
528 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
529 
530 	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
531 	if (new_mbuf == NULL)
532 		return (ENOBUFS);
533 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
534 
535 	if (*mbufp) {
536 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
537 		bus_dmamap_unload(tag, map);
538 	}
539 
540 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
541 	    BUS_DMA_NOWAIT);
542 	KASSERT(nsegs == 1, ("Too many segments returned!"));
543 	if (nsegs != 1 || error)
544 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
545 
546 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
547 
548 	(*mbufp) = new_mbuf;
549 	(*paddr) = seg->ds_addr;
550 	return (0);
551 }
552 
553 static int
554 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
555     uint32_t size, bus_dma_tag_t *buffer_tag)
556 {
557 	struct mge_desc_wrapper *dw;
558 	bus_addr_t desc_paddr;
559 	int i, error;
560 
561 	desc_paddr = 0;
562 	for (i = size - 1; i >= 0; i--) {
563 		dw = &(tab[i]);
564 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
565 		    (void**)&(dw->mge_desc),
566 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
567 		    &(dw->desc_dmap));
568 
569 		if (error) {
570 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
571 			dw->mge_desc = NULL;
572 			return (ENXIO);
573 		}
574 
575 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
576 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
577 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
578 
579 		if (error) {
580 			if_printf(sc->ifp, "can't load descriptor\n");
581 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
582 			    dw->desc_dmap);
583 			dw->mge_desc = NULL;
584 			return (ENXIO);
585 		}
586 
587 		/* Chain descriptors */
588 		dw->mge_desc->next_desc = desc_paddr;
589 		desc_paddr = dw->mge_desc_paddr;
590 	}
591 	tab[size - 1].mge_desc->next_desc = desc_paddr;
592 
593 	/* Allocate a busdma tag for mbufs. */
594 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
595 	    1, 0,				/* alignment, boundary */
596 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
597 	    BUS_SPACE_MAXADDR,			/* highaddr */
598 	    NULL, NULL,				/* filtfunc, filtfuncarg */
599 	    MCLBYTES, 1,			/* maxsize, nsegments */
600 	    MCLBYTES, 0,			/* maxsegsz, flags */
601 	    NULL, NULL,				/* lockfunc, lockfuncarg */
602 	    buffer_tag);			/* dmat */
603 	if (error) {
604 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
605 		return (ENXIO);
606 	}
607 
608 	/* Create TX busdma maps */
609 	for (i = 0; i < size; i++) {
610 		dw = &(tab[i]);
611 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
612 		if (error) {
613 			if_printf(sc->ifp, "failed to create map for mbuf\n");
614 			return (ENXIO);
615 		}
616 
617 		dw->buffer = (struct mbuf*)NULL;
618 		dw->mge_desc->buffer = (bus_addr_t)NULL;
619 	}
620 
621 	return (0);
622 }
623 
624 static int
625 mge_allocate_dma(struct mge_softc *sc)
626 {
627 	struct mge_desc_wrapper *dw;
628 	int i;
629 
630 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
631 	bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
632 	    16, 0,				/* alignment, boundary */
633 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
634 	    BUS_SPACE_MAXADDR,			/* highaddr */
635 	    NULL, NULL,				/* filtfunc, filtfuncarg */
636 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
637 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
638 	    NULL, NULL,				/* lockfunc, lockfuncarg */
639 	    &sc->mge_desc_dtag);		/* dmat */
640 
641 
642 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
643 	    &sc->mge_tx_dtag);
644 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
645 	    &sc->mge_rx_dtag);
646 
647 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
648 		dw = &(sc->mge_rx_desc[i]);
649 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
650 		    &dw->mge_desc->buffer);
651 	}
652 
653 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
654 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
655 
656 	return (0);
657 }
658 
659 static void
660 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
661     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
662 {
663 	struct mge_desc_wrapper *dw;
664 	int i;
665 
666 	for (i = 0; i < size; i++) {
667 		/* Free RX mbuf */
668 		dw = &(tab[i]);
669 
670 		if (dw->buffer_dmap) {
671 			if (free_mbufs) {
672 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
673 				    BUS_DMASYNC_POSTREAD);
674 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
675 			}
676 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
677 			if (free_mbufs)
678 				m_freem(dw->buffer);
679 		}
680 		/* Free RX descriptors */
681 		if (dw->desc_dmap) {
682 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
683 			    BUS_DMASYNC_POSTREAD);
684 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
685 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
686 			    dw->desc_dmap);
687 		}
688 	}
689 }
690 
691 static void
692 mge_free_dma(struct mge_softc *sc)
693 {
694 
695 	/* Free descriptors and mbufs */
696 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
697 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
698 
699 	/* Destroy mbuf dma tag */
700 	bus_dma_tag_destroy(sc->mge_tx_dtag);
701 	bus_dma_tag_destroy(sc->mge_rx_dtag);
702 	/* Destroy descriptors tag */
703 	bus_dma_tag_destroy(sc->mge_desc_dtag);
704 }
705 
706 static void
707 mge_reinit_rx(struct mge_softc *sc)
708 {
709 	struct mge_desc_wrapper *dw;
710 	int i;
711 
712 	MGE_RECEIVE_LOCK_ASSERT(sc);
713 
714 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
715 
716 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
717 	    &sc->mge_rx_dtag);
718 
719 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
720 		dw = &(sc->mge_rx_desc[i]);
721 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
722 		&dw->mge_desc->buffer);
723 	}
724 
725 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
726 	sc->rx_desc_curr = 0;
727 
728 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
729 	    sc->rx_desc_start);
730 
731 	/* Enable RX queue */
732 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
733 }
734 
735 #ifdef DEVICE_POLLING
736 static poll_handler_t mge_poll;
737 
738 static int
739 mge_poll(if_t ifp, enum poll_cmd cmd, int count)
740 {
741 	struct mge_softc *sc = if_getsoftc(ifp);
742 	uint32_t int_cause, int_cause_ext;
743 	int rx_npkts = 0;
744 
745 	MGE_RECEIVE_LOCK(sc);
746 
747 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
748 		MGE_RECEIVE_UNLOCK(sc);
749 		return (rx_npkts);
750 	}
751 
752 	if (cmd == POLL_AND_CHECK_STATUS) {
753 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
754 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
755 
756 		/* Check for resource error */
757 		if (int_cause & MGE_PORT_INT_RXERRQ0)
758 			mge_reinit_rx(sc);
759 
760 		if (int_cause || int_cause_ext) {
761 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
762 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
763 		}
764 	}
765 
766 
767 	rx_npkts = mge_intr_rx_locked(sc, count);
768 
769 	MGE_RECEIVE_UNLOCK(sc);
770 	MGE_TRANSMIT_LOCK(sc);
771 	mge_intr_tx_locked(sc);
772 	MGE_TRANSMIT_UNLOCK(sc);
773 	return (rx_npkts);
774 }
775 #endif /* DEVICE_POLLING */
776 
777 static int
778 mge_attach(device_t dev)
779 {
780 	struct mge_softc *sc;
781 	struct mii_softc *miisc;
782 	if_t ifp;
783 	uint8_t hwaddr[ETHER_ADDR_LEN];
784 	int i, error, phy;
785 
786 	sc = device_get_softc(dev);
787 	sc->dev = dev;
788 	sc->node = ofw_bus_get_node(dev);
789 	phy = 0;
790 
791 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
792 		device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
793 		    device_get_nameunit(sc->phy_sc->dev));
794 		sc->phy_attached = 1;
795 	} else {
796 		device_printf(dev, "PHY not attached.\n");
797 		sc->phy_attached = 0;
798 		sc->phy_sc = sc;
799 	}
800 
801 	if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
802 		device_printf(dev, "Switch attached.\n");
803 		sc->switch_attached = 1;
804 		/* additional variable available across instances */
805 		switch_attached = 1;
806 	} else {
807 		sc->switch_attached = 0;
808 	}
809 
810 	if (device_get_unit(dev) == 0) {
811 		sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
812 	}
813 
814 	/* Set chip version-dependent parameters */
815 	mge_ver_params(sc);
816 
817 	/* Initialize mutexes */
818 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
819 	    MTX_DEF);
820 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
821 	    MTX_DEF);
822 
823 	/* Allocate IO and IRQ resources */
824 	error = bus_alloc_resources(dev, res_spec, sc->res);
825 	if (error) {
826 		device_printf(dev, "could not allocate resources\n");
827 		mge_detach(dev);
828 		return (ENXIO);
829 	}
830 
831 	/* Allocate DMA, buffers, buffer descriptors */
832 	error = mge_allocate_dma(sc);
833 	if (error) {
834 		mge_detach(dev);
835 		return (ENXIO);
836 	}
837 
838 	sc->tx_desc_curr = 0;
839 	sc->rx_desc_curr = 0;
840 	sc->tx_desc_used_idx = 0;
841 	sc->tx_desc_used_count = 0;
842 
843 	/* Configure defaults for interrupts coalescing */
844 	sc->rx_ic_time = 768;
845 	sc->tx_ic_time = 768;
846 	mge_add_sysctls(sc);
847 
848 	/* Allocate network interface */
849 	ifp = sc->ifp = if_alloc(IFT_ETHER);
850 	if (ifp == NULL) {
851 		device_printf(dev, "if_alloc() failed\n");
852 		mge_detach(dev);
853 		return (ENOMEM);
854 	}
855 
856 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
857 	if_setsoftc(ifp, sc);
858 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
859 	if_setcapabilities(ifp, IFCAP_VLAN_MTU);
860 	if (sc->mge_hw_csum) {
861 		if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
862 		if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
863 	}
864 	if_setcapenable(ifp, if_getcapabilities(ifp));
865 
866 #ifdef DEVICE_POLLING
867 	/* Advertise that polling is supported */
868 	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
869 #endif
870 
871 	if_setinitfn(ifp, mge_init);
872 	if_setstartfn(ifp, mge_start);
873 	if_setioctlfn(ifp, mge_ioctl);
874 
875 	if_setsendqlen(ifp, MGE_TX_DESC_NUM - 1);
876 	if_setsendqready(ifp);
877 
878 	mge_get_mac_address(sc, hwaddr);
879 	ether_ifattach(ifp, hwaddr);
880 	callout_init(&sc->wd_callout, 1);
881 
882 	/* Attach PHY(s) */
883 	if (sc->phy_attached) {
884 		error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
885 		    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
886 		if (error) {
887 			device_printf(dev, "MII failed to find PHY\n");
888 			if_free(ifp);
889 			sc->ifp = NULL;
890 			mge_detach(dev);
891 			return (error);
892 		}
893 		sc->mii = device_get_softc(sc->miibus);
894 
895 		/* Tell the MAC where to find the PHY so autoneg works */
896 		miisc = LIST_FIRST(&sc->mii->mii_phys);
897 		MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
898 	} else {
899 		/* no PHY, so use hard-coded values */
900 		ifmedia_init(&sc->mge_ifmedia, 0,
901 		    mge_ifmedia_upd,
902 		    mge_ifmedia_sts);
903 		ifmedia_add(&sc->mge_ifmedia,
904 		    IFM_ETHER | IFM_1000_T | IFM_FDX,
905 		    0, NULL);
906 		ifmedia_set(&sc->mge_ifmedia,
907 		    IFM_ETHER | IFM_1000_T | IFM_FDX);
908 	}
909 
910 	/* Attach interrupt handlers */
911 	/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
912 	for (i = 1; i <= sc->mge_intr_cnt; ++i) {
913 		error = bus_setup_intr(dev, sc->res[i],
914 		    INTR_TYPE_NET | INTR_MPSAFE,
915 		    NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
916 		    sc, &sc->ih_cookie[i - 1]);
917 		if (error) {
918 			device_printf(dev, "could not setup %s\n",
919 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
920 			mge_detach(dev);
921 			return (error);
922 		}
923 	}
924 
925 	if (sc->switch_attached) {
926 		MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
927 		device_add_child(dev, "mdio", -1);
928 		bus_generic_attach(dev);
929 	}
930 
931 	return (0);
932 }
933 
934 static int
935 mge_detach(device_t dev)
936 {
937 	struct mge_softc *sc;
938 	int error,i;
939 
940 	sc = device_get_softc(dev);
941 
942 	/* Stop controller and free TX queue */
943 	if (sc->ifp)
944 		mge_shutdown(dev);
945 
946 	/* Wait for stopping ticks */
947         callout_drain(&sc->wd_callout);
948 
949 	/* Stop and release all interrupts */
950 	for (i = 0; i < sc->mge_intr_cnt; ++i) {
951 		if (!sc->ih_cookie[i])
952 			continue;
953 
954 		error = bus_teardown_intr(dev, sc->res[1 + i],
955 		    sc->ih_cookie[i]);
956 		if (error)
957 			device_printf(dev, "could not release %s\n",
958 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
959 	}
960 
961 	/* Detach network interface */
962 	if (sc->ifp) {
963 		ether_ifdetach(sc->ifp);
964 		if_free(sc->ifp);
965 	}
966 
967 	/* Free DMA resources */
968 	mge_free_dma(sc);
969 
970 	/* Free IO memory handler */
971 	bus_release_resources(dev, res_spec, sc->res);
972 
973 	/* Destroy mutexes */
974 	mtx_destroy(&sc->receive_lock);
975 	mtx_destroy(&sc->transmit_lock);
976 
977 	if (device_get_unit(dev) == 0)
978 		sx_destroy(&sx_smi);
979 
980 	return (0);
981 }
982 
983 static void
984 mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
985 {
986 	struct mge_softc *sc;
987 	struct mii_data *mii;
988 
989 	sc = if_getsoftc(ifp);
990 	MGE_GLOBAL_LOCK(sc);
991 
992 	if (!sc->phy_attached) {
993 		ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
994 		ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
995 		goto out_unlock;
996 	}
997 
998 	mii = sc->mii;
999 	mii_pollstat(mii);
1000 
1001 	ifmr->ifm_active = mii->mii_media_active;
1002 	ifmr->ifm_status = mii->mii_media_status;
1003 
1004 out_unlock:
1005 	MGE_GLOBAL_UNLOCK(sc);
1006 }
1007 
1008 static uint32_t
1009 mge_set_port_serial_control(uint32_t media)
1010 {
1011 	uint32_t port_config;
1012 
1013 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
1014 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
1015 
1016 	if (IFM_TYPE(media) == IFM_ETHER) {
1017 		switch(IFM_SUBTYPE(media)) {
1018 			case IFM_AUTO:
1019 				break;
1020 			case IFM_1000_T:
1021 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
1022 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1023 				    | PORT_SERIAL_SPEED_AUTONEG);
1024 				break;
1025 			case IFM_100_TX:
1026 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
1027 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1028 				    | PORT_SERIAL_SPEED_AUTONEG);
1029 				break;
1030 			case IFM_10_T:
1031 				port_config  |= (PORT_SERIAL_AUTONEG |
1032 				    PORT_SERIAL_AUTONEG_FC |
1033 				    PORT_SERIAL_SPEED_AUTONEG);
1034 				break;
1035 		}
1036 		if (media & IFM_FDX)
1037 			port_config |= PORT_SERIAL_FULL_DUPLEX;
1038 	}
1039 	return (port_config);
1040 }
1041 
1042 static int
1043 mge_ifmedia_upd(if_t ifp)
1044 {
1045 	struct mge_softc *sc = if_getsoftc(ifp);
1046 
1047 	/*
1048 	 * Do not do anything for switch here, as updating media between
1049 	 * MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
1050 	 * break the link.
1051 	 */
1052 	if (sc->phy_attached) {
1053 		MGE_GLOBAL_LOCK(sc);
1054 		if (if_getflags(ifp) & IFF_UP) {
1055 			sc->mge_media_status = sc->mii->mii_media.ifm_media;
1056 			mii_mediachg(sc->mii);
1057 
1058 			/* MGE MAC needs to be reinitialized. */
1059 			mge_init_locked(sc);
1060 
1061 		}
1062 		MGE_GLOBAL_UNLOCK(sc);
1063 	}
1064 
1065 	return (0);
1066 }
1067 
1068 static void
1069 mge_init(void *arg)
1070 {
1071 	struct mge_softc *sc;
1072 
1073 	sc = arg;
1074 	MGE_GLOBAL_LOCK(sc);
1075 
1076 	mge_init_locked(arg);
1077 
1078 	MGE_GLOBAL_UNLOCK(sc);
1079 }
1080 
1081 static void
1082 mge_init_locked(void *arg)
1083 {
1084 	struct mge_softc *sc = arg;
1085 	struct mge_desc_wrapper *dw;
1086 	volatile uint32_t reg_val;
1087 	int i, count;
1088 	uint32_t media_status;
1089 
1090 
1091 	MGE_GLOBAL_LOCK_ASSERT(sc);
1092 
1093 	/* Stop interface */
1094 	mge_stop(sc);
1095 
1096 	/* Disable interrupts */
1097 	mge_intrs_ctrl(sc, 0);
1098 
1099 	/* Set MAC address */
1100 	mge_set_mac_address(sc);
1101 
1102 	/* Setup multicast filters */
1103 	mge_setup_multicast(sc);
1104 
1105 	if (sc->mge_ver == 2) {
1106 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
1107 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
1108 	}
1109 
1110 	/* Initialize TX queue configuration registers */
1111 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1112 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1113 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1114 
1115 	/* Clear TX queue configuration registers for unused queues */
1116 	for (i = 1; i < 7; i++) {
1117 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
1118 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
1119 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
1120 	}
1121 
1122 	/* Set default MTU */
1123 	MGE_WRITE(sc, sc->mge_mtu, 0);
1124 
1125 	/* Port configuration */
1126 	MGE_WRITE(sc, MGE_PORT_CONFIG,
1127 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
1128 	    PORT_CONFIG_ARO_RXQ(0));
1129 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
1130 
1131 	/* Configure promisc mode */
1132 	mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
1133 
1134 	media_status = sc->mge_media_status;
1135 	if (sc->switch_attached) {
1136 		media_status &= ~IFM_TMASK;
1137 		media_status |= IFM_1000_T;
1138 	}
1139 
1140 	/* Setup port configuration */
1141 	reg_val = mge_set_port_serial_control(media_status);
1142 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1143 
1144 	/* Setup SDMA configuration */
1145 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
1146 	    MGE_SDMA_TX_BYTE_SWAP |
1147 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
1148 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
1149 
1150 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
1151 
1152 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1153 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
1154 	    sc->rx_desc_start);
1155 
1156 	/* Reset descriptor indexes */
1157 	sc->tx_desc_curr = 0;
1158 	sc->rx_desc_curr = 0;
1159 	sc->tx_desc_used_idx = 0;
1160 	sc->tx_desc_used_count = 0;
1161 
1162 	/* Enable RX descriptors */
1163 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
1164 		dw = &sc->mge_rx_desc[i];
1165 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1166 		dw->mge_desc->buff_size = MCLBYTES;
1167 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1168 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1169 	}
1170 
1171 	/* Enable RX queue */
1172 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
1173 
1174 	/* Enable port */
1175 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1176 	reg_val |= PORT_SERIAL_ENABLE;
1177 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1178 	count = 0x100000;
1179 	for (;;) {
1180 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1181 		if (reg_val & MGE_STATUS_LINKUP)
1182 			break;
1183 		DELAY(100);
1184 		if (--count == 0) {
1185 			if_printf(sc->ifp, "Timeout on link-up\n");
1186 			break;
1187 		}
1188 	}
1189 
1190 	/* Setup interrupts coalescing */
1191 	mge_set_rxic(sc);
1192 	mge_set_txic(sc);
1193 
1194 	/* Enable interrupts */
1195 #ifdef DEVICE_POLLING
1196         /*
1197 	 * * ...only if polling is not turned on. Disable interrupts explicitly
1198 	 * if polling is enabled.
1199 	 */
1200 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1201 		mge_intrs_ctrl(sc, 0);
1202 	else
1203 #endif /* DEVICE_POLLING */
1204 	mge_intrs_ctrl(sc, 1);
1205 
1206 	/* Activate network interface */
1207 	if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, 0);
1208 	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
1209 	sc->wd_timer = 0;
1210 
1211 	/* Schedule watchdog timeout */
1212 	if (sc->phy_attached)
1213 		callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1214 }
1215 
1216 static void
1217 mge_intr_rxtx(void *arg)
1218 {
1219 	struct mge_softc *sc;
1220 	uint32_t int_cause, int_cause_ext;
1221 
1222 	sc = arg;
1223 	MGE_GLOBAL_LOCK(sc);
1224 
1225 #ifdef DEVICE_POLLING
1226 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1227 		MGE_GLOBAL_UNLOCK(sc);
1228 		return;
1229 	}
1230 #endif
1231 
1232 	/* Get interrupt cause */
1233 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1234 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1235 
1236 	/* Check for Transmit interrupt */
1237 	if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1238 	    MGE_PORT_INT_EXT_TXUR)) {
1239 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1240 		    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1241 		mge_intr_tx_locked(sc);
1242 	}
1243 
1244 	MGE_TRANSMIT_UNLOCK(sc);
1245 
1246 	/* Check for Receive interrupt */
1247 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1248 
1249 	MGE_RECEIVE_UNLOCK(sc);
1250 }
1251 
1252 static void
1253 mge_intr_err(void *arg)
1254 {
1255 	struct mge_softc *sc;
1256 	if_t ifp;
1257 
1258 	sc = arg;
1259 	ifp = sc->ifp;
1260 	if_printf(ifp, "%s\n", __FUNCTION__);
1261 }
1262 
1263 static void
1264 mge_intr_misc(void *arg)
1265 {
1266 	struct mge_softc *sc;
1267 	if_t ifp;
1268 
1269 	sc = arg;
1270 	ifp = sc->ifp;
1271 	if_printf(ifp, "%s\n", __FUNCTION__);
1272 }
1273 
1274 static void
1275 mge_intr_rx(void *arg) {
1276 	struct mge_softc *sc;
1277 	uint32_t int_cause, int_cause_ext;
1278 
1279 	sc = arg;
1280 	MGE_RECEIVE_LOCK(sc);
1281 
1282 #ifdef DEVICE_POLLING
1283 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1284 		MGE_RECEIVE_UNLOCK(sc);
1285 		return;
1286 	}
1287 #endif
1288 
1289 	/* Get interrupt cause */
1290 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1291 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1292 
1293 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1294 
1295 	MGE_RECEIVE_UNLOCK(sc);
1296 }
1297 
1298 static void
1299 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1300     uint32_t int_cause_ext)
1301 {
1302 	/* Check for resource error */
1303 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1304 		mge_reinit_rx(sc);
1305 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1306 		    ~(int_cause & MGE_PORT_INT_RXERRQ0));
1307 	}
1308 
1309 	int_cause &= MGE_PORT_INT_RXQ0;
1310 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1311 
1312 	if (int_cause || int_cause_ext) {
1313 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1314 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1315 		mge_intr_rx_locked(sc, -1);
1316 	}
1317 }
1318 
1319 static int
1320 mge_intr_rx_locked(struct mge_softc *sc, int count)
1321 {
1322 	if_t ifp = sc->ifp;
1323 	uint32_t status;
1324 	uint16_t bufsize;
1325 	struct mge_desc_wrapper* dw;
1326 	struct mbuf *mb;
1327 	int rx_npkts = 0;
1328 
1329 	MGE_RECEIVE_LOCK_ASSERT(sc);
1330 
1331 	while (count != 0) {
1332 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1333 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1334 		    BUS_DMASYNC_POSTREAD);
1335 
1336 		/* Get status */
1337 		status = dw->mge_desc->cmd_status;
1338 		bufsize = dw->mge_desc->buff_size;
1339 		if ((status & MGE_DMA_OWNED) != 0)
1340 			break;
1341 
1342 		if (dw->mge_desc->byte_count &&
1343 		    ~(status & MGE_ERR_SUMMARY)) {
1344 
1345 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1346 			    BUS_DMASYNC_POSTREAD);
1347 
1348 			mb = m_devget(dw->buffer->m_data,
1349 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1350 			    0, ifp, NULL);
1351 
1352 			if (mb == NULL)
1353 				/* Give up if no mbufs */
1354 				break;
1355 
1356 			mb->m_len -= 2;
1357 			mb->m_pkthdr.len -= 2;
1358 			mb->m_data += 2;
1359 
1360 			mb->m_pkthdr.rcvif = ifp;
1361 
1362 			mge_offload_process_frame(ifp, mb, status,
1363 			    bufsize);
1364 
1365 			MGE_RECEIVE_UNLOCK(sc);
1366 			if_input(ifp, mb);
1367 			MGE_RECEIVE_LOCK(sc);
1368 			rx_npkts++;
1369 		}
1370 
1371 		dw->mge_desc->byte_count = 0;
1372 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1373 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1374 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1375 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1376 
1377 		if (count > 0)
1378 			count -= 1;
1379 	}
1380 
1381 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1382 
1383 	return (rx_npkts);
1384 }
1385 
1386 static void
1387 mge_intr_sum(void *arg)
1388 {
1389 	struct mge_softc *sc = arg;
1390 	if_t ifp;
1391 
1392 	ifp = sc->ifp;
1393 	if_printf(ifp, "%s\n", __FUNCTION__);
1394 }
1395 
1396 static void
1397 mge_intr_tx(void *arg)
1398 {
1399 	struct mge_softc *sc = arg;
1400 	uint32_t int_cause_ext;
1401 
1402 	MGE_TRANSMIT_LOCK(sc);
1403 
1404 #ifdef DEVICE_POLLING
1405 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1406 		MGE_TRANSMIT_UNLOCK(sc);
1407 		return;
1408 	}
1409 #endif
1410 
1411 	/* Ack the interrupt */
1412 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1413 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1414 	    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1415 
1416 	mge_intr_tx_locked(sc);
1417 
1418 	MGE_TRANSMIT_UNLOCK(sc);
1419 }
1420 
1421 static void
1422 mge_intr_tx_locked(struct mge_softc *sc)
1423 {
1424 	if_t ifp = sc->ifp;
1425 	struct mge_desc_wrapper *dw;
1426 	struct mge_desc *desc;
1427 	uint32_t status;
1428 	int send = 0;
1429 
1430 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1431 
1432 	/* Disable watchdog */
1433 	sc->wd_timer = 0;
1434 
1435 	while (sc->tx_desc_used_count) {
1436 		/* Get the descriptor */
1437 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1438 		desc = dw->mge_desc;
1439 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1440 		    BUS_DMASYNC_POSTREAD);
1441 
1442 		/* Get descriptor status */
1443 		status = desc->cmd_status;
1444 
1445 		if (status & MGE_DMA_OWNED)
1446 			break;
1447 
1448 		sc->tx_desc_used_idx =
1449 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1450 		sc->tx_desc_used_count--;
1451 
1452 		/* Update collision statistics */
1453 		if (status & MGE_ERR_SUMMARY) {
1454 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1455 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1456 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1457 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1458 		}
1459 
1460 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1461 		    BUS_DMASYNC_POSTWRITE);
1462 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1463 		m_freem(dw->buffer);
1464 		dw->buffer = (struct mbuf*)NULL;
1465 		send++;
1466 
1467 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1468 	}
1469 
1470 	if (send) {
1471 		/* Now send anything that was pending */
1472 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1473 		mge_start_locked(ifp);
1474 	}
1475 }
1476 static int
1477 mge_ioctl(if_t ifp, u_long command, caddr_t data)
1478 {
1479 	struct mge_softc *sc = if_getsoftc(ifp);
1480 	struct ifreq *ifr = (struct ifreq *)data;
1481 	int mask, error;
1482 	uint32_t flags;
1483 
1484 	error = 0;
1485 
1486 	switch (command) {
1487 	case SIOCSIFFLAGS:
1488 		MGE_GLOBAL_LOCK(sc);
1489 
1490 		if (if_getflags(ifp) & IFF_UP) {
1491 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1492 				flags = if_getflags(ifp) ^ sc->mge_if_flags;
1493 				if (flags & IFF_PROMISC)
1494 					mge_set_prom_mode(sc,
1495 					    MGE_RX_DEFAULT_QUEUE);
1496 
1497 				if (flags & IFF_ALLMULTI)
1498 					mge_setup_multicast(sc);
1499 			} else
1500 				mge_init_locked(sc);
1501 		}
1502 		else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1503 			mge_stop(sc);
1504 
1505 		sc->mge_if_flags = if_getflags(ifp);
1506 		MGE_GLOBAL_UNLOCK(sc);
1507 		break;
1508 	case SIOCADDMULTI:
1509 	case SIOCDELMULTI:
1510 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1511 			MGE_GLOBAL_LOCK(sc);
1512 			mge_setup_multicast(sc);
1513 			MGE_GLOBAL_UNLOCK(sc);
1514 		}
1515 		break;
1516 	case SIOCSIFCAP:
1517 		mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1518 		if (mask & IFCAP_HWCSUM) {
1519 			if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
1520 			if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, 0);
1521 			if (if_getcapenable(ifp) & IFCAP_TXCSUM)
1522 				if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
1523 			else
1524 				if_sethwassist(ifp, 0);
1525 		}
1526 #ifdef DEVICE_POLLING
1527 		if (mask & IFCAP_POLLING) {
1528 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1529 				error = ether_poll_register(mge_poll, ifp);
1530 				if (error)
1531 					return(error);
1532 
1533 				MGE_GLOBAL_LOCK(sc);
1534 				mge_intrs_ctrl(sc, 0);
1535 				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1536 				MGE_GLOBAL_UNLOCK(sc);
1537 			} else {
1538 				error = ether_poll_deregister(ifp);
1539 				MGE_GLOBAL_LOCK(sc);
1540 				mge_intrs_ctrl(sc, 1);
1541 				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1542 				MGE_GLOBAL_UNLOCK(sc);
1543 			}
1544 		}
1545 #endif
1546 		break;
1547 	case SIOCGIFMEDIA: /* fall through */
1548 	case SIOCSIFMEDIA:
1549 		/*
1550 		 * Setting up media type via ioctls is *not* supported for MAC
1551 		 * which is connected to switch. Use etherswitchcfg.
1552 		 */
1553 		if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1554 			return (0);
1555 		else if (!sc->phy_attached) {
1556 			error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1557 			    command);
1558 			break;
1559 		}
1560 
1561 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1562 		    && !(ifr->ifr_media & IFM_FDX)) {
1563 			device_printf(sc->dev,
1564 			    "1000baseTX half-duplex unsupported\n");
1565 			return 0;
1566 		}
1567 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1568 		break;
1569 	default:
1570 		error = ether_ioctl(ifp, command, data);
1571 	}
1572 	return (error);
1573 }
1574 
1575 static int
1576 mge_miibus_readreg(device_t dev, int phy, int reg)
1577 {
1578 
1579 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1580 
1581 	return (mv_read_ext_phy(dev, phy, reg));
1582 }
1583 
1584 static int
1585 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1586 {
1587 
1588 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1589 
1590 	mv_write_ext_phy(dev, phy, reg, value);
1591 
1592 	return (0);
1593 }
1594 
1595 static int
1596 mge_probe(device_t dev)
1597 {
1598 
1599 	if (!ofw_bus_status_okay(dev))
1600 		return (ENXIO);
1601 
1602 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1603 		return (ENXIO);
1604 
1605 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1606 	return (BUS_PROBE_DEFAULT);
1607 }
1608 
1609 static int
1610 mge_resume(device_t dev)
1611 {
1612 
1613 	device_printf(dev, "%s\n", __FUNCTION__);
1614 	return (0);
1615 }
1616 
1617 static int
1618 mge_shutdown(device_t dev)
1619 {
1620 	struct mge_softc *sc = device_get_softc(dev);
1621 
1622 	MGE_GLOBAL_LOCK(sc);
1623 
1624 #ifdef DEVICE_POLLING
1625         if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1626 		ether_poll_deregister(sc->ifp);
1627 #endif
1628 
1629 	mge_stop(sc);
1630 
1631 	MGE_GLOBAL_UNLOCK(sc);
1632 
1633 	return (0);
1634 }
1635 
1636 static int
1637 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1638 {
1639 	struct mge_desc_wrapper *dw = NULL;
1640 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1641 	bus_dmamap_t mapp;
1642 	int error;
1643 	int seg, nsegs;
1644 	int desc_no;
1645 
1646 	/* Fetch unused map */
1647 	desc_no = sc->tx_desc_curr;
1648 	dw = &sc->mge_tx_desc[desc_no];
1649 	mapp = dw->buffer_dmap;
1650 
1651 	/* Create mapping in DMA memory */
1652 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1653 	    BUS_DMA_NOWAIT);
1654 	if (error != 0) {
1655 		m_freem(m0);
1656 		return (error);
1657 	}
1658 
1659 	/* Only one segment is supported. */
1660 	if (nsegs != 1) {
1661 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1662 		m_freem(m0);
1663 		return (-1);
1664 	}
1665 
1666 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1667 
1668 	/* Everything is ok, now we can send buffers */
1669 	for (seg = 0; seg < nsegs; seg++) {
1670 		dw->mge_desc->byte_count = segs[seg].ds_len;
1671 		dw->mge_desc->buffer = segs[seg].ds_addr;
1672 		dw->buffer = m0;
1673 		dw->mge_desc->cmd_status = 0;
1674 		if (seg == 0)
1675 			mge_offload_setup_descriptor(sc, dw);
1676 		dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1677 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1678 		    MGE_DMA_OWNED;
1679 	}
1680 
1681 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1682 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1683 
1684 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1685 	sc->tx_desc_used_count++;
1686 	return (0);
1687 }
1688 
1689 static void
1690 mge_tick(void *msc)
1691 {
1692 	struct mge_softc *sc = msc;
1693 
1694 	KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1695 
1696 	MGE_GLOBAL_LOCK(sc);
1697 
1698 	/* Check for TX timeout */
1699 	mge_watchdog(sc);
1700 
1701 	mii_tick(sc->mii);
1702 
1703 	/* Check for media type change */
1704 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1705 		mge_ifmedia_upd(sc->ifp);
1706 
1707 	MGE_GLOBAL_UNLOCK(sc);
1708 
1709 	/* Schedule another timeout one second from now */
1710 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1711 
1712 	return;
1713 }
1714 
1715 static void
1716 mge_watchdog(struct mge_softc *sc)
1717 {
1718 	if_t ifp;
1719 
1720 	ifp = sc->ifp;
1721 
1722 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1723 		return;
1724 	}
1725 
1726 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1727 	if_printf(ifp, "watchdog timeout\n");
1728 
1729 	mge_stop(sc);
1730 	mge_init_locked(sc);
1731 }
1732 
1733 static void
1734 mge_start(if_t ifp)
1735 {
1736 	struct mge_softc *sc = if_getsoftc(ifp);
1737 
1738 	MGE_TRANSMIT_LOCK(sc);
1739 
1740 	mge_start_locked(ifp);
1741 
1742 	MGE_TRANSMIT_UNLOCK(sc);
1743 }
1744 
1745 static void
1746 mge_start_locked(if_t ifp)
1747 {
1748 	struct mge_softc *sc;
1749 	struct mbuf *m0, *mtmp;
1750 	uint32_t reg_val, queued = 0;
1751 
1752 	sc = if_getsoftc(ifp);
1753 
1754 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1755 
1756 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1757 	    IFF_DRV_RUNNING)
1758 		return;
1759 
1760 	for (;;) {
1761 		/* Get packet from the queue */
1762 		m0 = if_dequeue(ifp);
1763 		if (m0 == NULL)
1764 			break;
1765 
1766 		if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1767 		    m0->m_flags & M_VLANTAG) {
1768 			if (M_WRITABLE(m0) == 0) {
1769 				mtmp = m_dup(m0, M_NOWAIT);
1770 				m_freem(m0);
1771 				if (mtmp == NULL)
1772 					continue;
1773 				m0 = mtmp;
1774 			}
1775 		}
1776 		/* The driver support only one DMA fragment. */
1777 		if (m0->m_next != NULL) {
1778 			mtmp = m_defrag(m0, M_NOWAIT);
1779 			if (mtmp != NULL)
1780 				m0 = mtmp;
1781 		}
1782 
1783 		/* Check for free descriptors */
1784 		if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1785 			if_sendq_prepend(ifp, m0);
1786 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1787 			break;
1788 		}
1789 
1790 		if (mge_encap(sc, m0) != 0)
1791 			break;
1792 
1793 		queued++;
1794 		BPF_MTAP(ifp, m0);
1795 	}
1796 
1797 	if (queued) {
1798 		/* Enable transmitter and watchdog timer */
1799 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1800 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1801 		sc->wd_timer = 5;
1802 	}
1803 }
1804 
1805 static void
1806 mge_stop(struct mge_softc *sc)
1807 {
1808 	if_t ifp;
1809 	volatile uint32_t reg_val, status;
1810 	struct mge_desc_wrapper *dw;
1811 	struct mge_desc *desc;
1812 	int count;
1813 
1814 	ifp = sc->ifp;
1815 
1816 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1817 		return;
1818 
1819 	/* Stop tick engine */
1820 	callout_stop(&sc->wd_callout);
1821 
1822 	/* Disable interface */
1823 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1824 	sc->wd_timer = 0;
1825 
1826 	/* Disable interrupts */
1827 	mge_intrs_ctrl(sc, 0);
1828 
1829 	/* Disable Rx and Tx */
1830 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1831 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1832 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1833 
1834 	/* Remove pending data from TX queue */
1835 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1836 	    sc->tx_desc_used_count) {
1837 		/* Get the descriptor */
1838 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1839 		desc = dw->mge_desc;
1840 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1841 		    BUS_DMASYNC_POSTREAD);
1842 
1843 		/* Get descriptor status */
1844 		status = desc->cmd_status;
1845 
1846 		if (status & MGE_DMA_OWNED)
1847 			break;
1848 
1849 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1850 		    MGE_TX_DESC_NUM;
1851 		sc->tx_desc_used_count--;
1852 
1853 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1854 		    BUS_DMASYNC_POSTWRITE);
1855 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1856 
1857 		m_freem(dw->buffer);
1858 		dw->buffer = (struct mbuf*)NULL;
1859 	}
1860 
1861 	/* Wait for end of transmission */
1862 	count = 0x100000;
1863 	while (count--) {
1864 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1865 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1866 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1867 			break;
1868 		DELAY(100);
1869 	}
1870 
1871 	if (count == 0)
1872 		if_printf(ifp,
1873 		    "%s: timeout while waiting for end of transmission\n",
1874 		    __FUNCTION__);
1875 
1876 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1877 	reg_val &= ~(PORT_SERIAL_ENABLE);
1878 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1879 }
1880 
1881 static int
1882 mge_suspend(device_t dev)
1883 {
1884 
1885 	device_printf(dev, "%s\n", __FUNCTION__);
1886 	return (0);
1887 }
1888 
1889 static void
1890 mge_offload_process_frame(if_t ifp, struct mbuf *frame,
1891     uint32_t status, uint16_t bufsize)
1892 {
1893 	int csum_flags = 0;
1894 
1895 	if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
1896 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1897 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1898 
1899 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1900 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1901 		    (status & MGE_RX_L4_CSUM_OK)) {
1902 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1903 			frame->m_pkthdr.csum_data = 0xFFFF;
1904 		}
1905 
1906 		frame->m_pkthdr.csum_flags = csum_flags;
1907 	}
1908 }
1909 
1910 static void
1911 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1912 {
1913 	struct mbuf *m0 = dw->buffer;
1914 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1915 	int csum_flags = m0->m_pkthdr.csum_flags;
1916 	int cmd_status = 0;
1917 	struct ip *ip;
1918 	int ehlen, etype;
1919 
1920 	if (csum_flags != 0) {
1921 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1922 			etype = ntohs(eh->evl_proto);
1923 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1924 			csum_flags |= MGE_TX_VLAN_TAGGED;
1925 		} else {
1926 			etype = ntohs(eh->evl_encap_proto);
1927 			ehlen = ETHER_HDR_LEN;
1928 		}
1929 
1930 		if (etype != ETHERTYPE_IP) {
1931 			if_printf(sc->ifp,
1932 			    "TCP/IP Offload enabled for unsupported "
1933 			    "protocol!\n");
1934 			return;
1935 		}
1936 
1937 		ip = (struct ip *)(m0->m_data + ehlen);
1938 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1939 		cmd_status |= MGE_TX_NOT_FRAGMENT;
1940 	}
1941 
1942 	if (csum_flags & CSUM_IP)
1943 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1944 
1945 	if (csum_flags & CSUM_TCP)
1946 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1947 
1948 	if (csum_flags & CSUM_UDP)
1949 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1950 
1951 	dw->mge_desc->cmd_status |= cmd_status;
1952 }
1953 
1954 static void
1955 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1956 {
1957 
1958 	if (enable) {
1959 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1960 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1961 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1962 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1963 		    MGE_PORT_INT_EXT_TXBUF0);
1964 	} else {
1965 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1966 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1967 
1968 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1969 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1970 
1971 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1972 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1973 	}
1974 }
1975 
1976 static uint8_t
1977 mge_crc8(uint8_t *data, int size)
1978 {
1979 	uint8_t crc = 0;
1980 	static const uint8_t ct[256] = {
1981 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1982 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1983 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1984 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1985 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1986 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1987 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1988 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1989 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1990 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1991 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1992 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1993 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1994 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1995 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1996 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1997 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1998 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1999 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
2000 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
2001 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
2002 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
2003 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
2004 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
2005 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
2006 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
2007 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
2008 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
2009 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
2010 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
2011 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
2012 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
2013 	};
2014 
2015 	while(size--)
2016 		crc = ct[crc ^ *(data++)];
2017 
2018 	return(crc);
2019 }
2020 
2021 struct mge_hash_maddr_ctx {
2022 	uint32_t smt[MGE_MCAST_REG_NUMBER];
2023 	uint32_t omt[MGE_MCAST_REG_NUMBER];
2024 };
2025 
2026 static u_int
2027 mge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2028 {
2029 	static const uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
2030 	struct mge_hash_maddr_ctx *ctx = arg;
2031 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2032 	uint8_t *mac;
2033 	int i;
2034 
2035 	mac = LLADDR(sdl);
2036 	if (memcmp(mac, special, sizeof(special)) == 0) {
2037 		i = mac[5];
2038 		ctx->smt[i >> 2] |= v << ((i & 0x03) << 3);
2039 	} else {
2040 		i = mge_crc8(mac, ETHER_ADDR_LEN);
2041 		ctx->omt[i >> 2] |= v << ((i & 0x03) << 3);
2042 	}
2043 	return (1);
2044 }
2045 
2046 static void
2047 mge_setup_multicast(struct mge_softc *sc)
2048 {
2049 	struct mge_hash_maddr_ctx ctx;
2050 	if_t ifp = sc->ifp;
2051 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2052 	int i;
2053 
2054 	if (if_getflags(ifp) & IFF_ALLMULTI) {
2055 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
2056 			ctx.smt[i] = ctx.omt[i] =
2057 			    (v << 24) | (v << 16) | (v << 8) | v;
2058 	} else {
2059 		memset(&ctx, 0, sizeof(ctx));
2060 		if_foreach_llmaddr(ifp, mge_hash_maddr, &ctx);
2061 	}
2062 
2063 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
2064 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), ctx.smt[i]);
2065 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), ctx.omt[i]);
2066 	}
2067 }
2068 
2069 static void
2070 mge_set_rxic(struct mge_softc *sc)
2071 {
2072 	uint32_t reg;
2073 
2074 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2075 		sc->rx_ic_time = sc->mge_rx_ipg_max;
2076 
2077 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
2078 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2079 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2080 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
2081 }
2082 
2083 static void
2084 mge_set_txic(struct mge_softc *sc)
2085 {
2086 	uint32_t reg;
2087 
2088 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2089 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
2090 
2091 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
2092 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2093 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2094 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
2095 }
2096 
2097 static int
2098 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
2099 {
2100 	struct mge_softc *sc = (struct mge_softc *)arg1;
2101 	uint32_t time;
2102 	int error;
2103 
2104 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2105 	error = sysctl_handle_int(oidp, &time, 0, req);
2106 	if (error != 0)
2107 		return(error);
2108 
2109 	MGE_GLOBAL_LOCK(sc);
2110 	if (arg2 == MGE_IC_RX) {
2111 		sc->rx_ic_time = time;
2112 		mge_set_rxic(sc);
2113 	} else {
2114 		sc->tx_ic_time = time;
2115 		mge_set_txic(sc);
2116 	}
2117 	MGE_GLOBAL_UNLOCK(sc);
2118 
2119 	return(0);
2120 }
2121 
2122 static void
2123 mge_add_sysctls(struct mge_softc *sc)
2124 {
2125 	struct sysctl_ctx_list *ctx;
2126 	struct sysctl_oid_list *children;
2127 	struct sysctl_oid *tree;
2128 
2129 	ctx = device_get_sysctl_ctx(sc->dev);
2130 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2131 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
2132 	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "MGE Interrupts coalescing");
2133 	children = SYSCTL_CHILDREN(tree);
2134 
2135 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
2136 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_RX,
2137 	    mge_sysctl_ic, "I", "IC RX time threshold");
2138 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
2139 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_TX,
2140 	    mge_sysctl_ic, "I", "IC TX time threshold");
2141 }
2142 
2143 static int
2144 mge_mdio_writereg(device_t dev, int phy, int reg, int value)
2145 {
2146 
2147 	mv_write_ge_smi(dev, phy, reg, value);
2148 
2149 	return (0);
2150 }
2151 
2152 
2153 static int
2154 mge_mdio_readreg(device_t dev, int phy, int reg)
2155 {
2156 	int ret;
2157 
2158 	ret = mv_read_ge_smi(dev, phy, reg);
2159 
2160 	return (ret);
2161 }
2162