xref: /freebsd/sys/dev/mge/if_mge.c (revision 732a02b4e77866604a120a275c082bb6221bd2ff)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5  * Copyright (C) 2009-2015 Semihalf
6  * Copyright (C) 2015 Stormshield
7  * All rights reserved.
8  *
9  * Developed by Semihalf.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of MARVELL nor the names of contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/endian.h>
46 #include <sys/mbuf.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/module.h>
51 #include <sys/socket.h>
52 #include <sys/sysctl.h>
53 
54 #include <net/ethernet.h>
55 #include <net/bpf.h>
56 #include <net/if.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_types.h>
61 #include <net/if_vlan_var.h>
62 
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 
67 #include <sys/sockio.h>
68 #include <sys/bus.h>
69 #include <machine/bus.h>
70 #include <sys/rman.h>
71 #include <machine/resource.h>
72 
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75 
76 #include <dev/fdt/fdt_common.h>
77 #include <dev/ofw/ofw_bus.h>
78 #include <dev/ofw/ofw_bus_subr.h>
79 #include <dev/mdio/mdio.h>
80 
81 #include <dev/mge/if_mgevar.h>
82 #include <arm/mv/mvreg.h>
83 #include <arm/mv/mvvar.h>
84 
85 #include "miibus_if.h"
86 #include "mdio_if.h"
87 
88 #define	MGE_DELAY(x)	pause("SMI access sleep", (x) / tick_sbt)
89 
90 static int mge_probe(device_t dev);
91 static int mge_attach(device_t dev);
92 static int mge_detach(device_t dev);
93 static int mge_shutdown(device_t dev);
94 static int mge_suspend(device_t dev);
95 static int mge_resume(device_t dev);
96 
97 static int mge_miibus_readreg(device_t dev, int phy, int reg);
98 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
99 
100 static int mge_mdio_readreg(device_t dev, int phy, int reg);
101 static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
102 
103 static int mge_ifmedia_upd(struct ifnet *ifp);
104 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
105 
106 static void mge_init(void *arg);
107 static void mge_init_locked(void *arg);
108 static void mge_start(struct ifnet *ifp);
109 static void mge_start_locked(struct ifnet *ifp);
110 static void mge_watchdog(struct mge_softc *sc);
111 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
112 
113 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
114 static uint32_t mge_rx_ipg(uint32_t val, int ver);
115 static void mge_ver_params(struct mge_softc *sc);
116 
117 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
118 static void mge_intr_rxtx(void *arg);
119 static void mge_intr_rx(void *arg);
120 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
121     uint32_t int_cause_ext);
122 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
123 static void mge_intr_tx(void *arg);
124 static void mge_intr_tx_locked(struct mge_softc *sc);
125 static void mge_intr_misc(void *arg);
126 static void mge_intr_sum(void *arg);
127 static void mge_intr_err(void *arg);
128 static void mge_stop(struct mge_softc *sc);
129 static void mge_tick(void *msc);
130 static uint32_t mge_set_port_serial_control(uint32_t media);
131 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
132 static void mge_set_mac_address(struct mge_softc *sc);
133 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
134     uint8_t queue);
135 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
136 static int mge_allocate_dma(struct mge_softc *sc);
137 static int mge_alloc_desc_dma(struct mge_softc *sc,
138     struct mge_desc_wrapper* desc_tab, uint32_t size,
139     bus_dma_tag_t *buffer_tag);
140 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
141     struct mbuf **mbufp, bus_addr_t *paddr);
142 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
143     int error);
144 static void mge_free_dma(struct mge_softc *sc);
145 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
146     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
147 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
148     uint32_t status, uint16_t bufsize);
149 static void mge_offload_setup_descriptor(struct mge_softc *sc,
150     struct mge_desc_wrapper *dw);
151 static uint8_t mge_crc8(uint8_t *data, int size);
152 static void mge_setup_multicast(struct mge_softc *sc);
153 static void mge_set_rxic(struct mge_softc *sc);
154 static void mge_set_txic(struct mge_softc *sc);
155 static void mge_add_sysctls(struct mge_softc *sc);
156 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
157 
158 static device_method_t mge_methods[] = {
159 	/* Device interface */
160 	DEVMETHOD(device_probe,		mge_probe),
161 	DEVMETHOD(device_attach,	mge_attach),
162 	DEVMETHOD(device_detach,	mge_detach),
163 	DEVMETHOD(device_shutdown,	mge_shutdown),
164 	DEVMETHOD(device_suspend,	mge_suspend),
165 	DEVMETHOD(device_resume,	mge_resume),
166 	/* MII interface */
167 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
168 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
169 	/* MDIO interface */
170 	DEVMETHOD(mdio_readreg,		mge_mdio_readreg),
171 	DEVMETHOD(mdio_writereg,	mge_mdio_writereg),
172 	{ 0, 0 }
173 };
174 
175 DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
176 
177 static devclass_t mge_devclass;
178 static int switch_attached = 0;
179 
180 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
181 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
182 DRIVER_MODULE(mdio, mge, mdio_driver, mdio_devclass, 0, 0);
183 MODULE_DEPEND(mge, ether, 1, 1, 1);
184 MODULE_DEPEND(mge, miibus, 1, 1, 1);
185 MODULE_DEPEND(mge, mdio, 1, 1, 1);
186 
187 static struct resource_spec res_spec[] = {
188 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
189 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
190 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
191 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
192 	{ -1, 0 }
193 };
194 
195 static struct {
196 	driver_intr_t *handler;
197 	char * description;
198 } mge_intrs[MGE_INTR_COUNT + 1] = {
199 	{ mge_intr_rxtx,"GbE aggregated interrupt" },
200 	{ mge_intr_rx,	"GbE receive interrupt" },
201 	{ mge_intr_tx,	"GbE transmit interrupt" },
202 	{ mge_intr_misc,"GbE misc interrupt" },
203 	{ mge_intr_sum,	"GbE summary interrupt" },
204 	{ mge_intr_err,	"GbE error interrupt" },
205 };
206 
207 /* SMI access interlock */
208 static struct sx sx_smi;
209 
210 static uint32_t
211 mv_read_ge_smi(device_t dev, int phy, int reg)
212 {
213 	uint32_t timeout;
214 	uint32_t ret;
215 	struct mge_softc *sc;
216 
217 	sc = device_get_softc(dev);
218 	KASSERT(sc != NULL, ("NULL softc ptr!"));
219 	timeout = MGE_SMI_WRITE_RETRIES;
220 
221 	MGE_SMI_LOCK();
222 	while (--timeout &&
223 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
224 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
225 
226 	if (timeout == 0) {
227 		device_printf(dev, "SMI write timeout.\n");
228 		ret = ~0U;
229 		goto out;
230 	}
231 
232 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
233 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
234 
235 	/* Wait till finished. */
236 	timeout = MGE_SMI_WRITE_RETRIES;
237 	while (--timeout &&
238 	    !((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
239 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
240 
241 	if (timeout == 0) {
242 		device_printf(dev, "SMI write validation timeout.\n");
243 		ret = ~0U;
244 		goto out;
245 	}
246 
247 	/* Wait for the data to update in the SMI register */
248 	MGE_DELAY(MGE_SMI_DELAY);
249 	ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
250 
251 out:
252 	MGE_SMI_UNLOCK();
253 	return (ret);
254 
255 }
256 
257 static void
258 mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
259 {
260 	uint32_t timeout;
261 	struct mge_softc *sc;
262 
263 	sc = device_get_softc(dev);
264 	KASSERT(sc != NULL, ("NULL softc ptr!"));
265 
266 	MGE_SMI_LOCK();
267 	timeout = MGE_SMI_READ_RETRIES;
268 	while (--timeout &&
269 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
270 		MGE_DELAY(MGE_SMI_READ_DELAY);
271 
272 	if (timeout == 0) {
273 		device_printf(dev, "SMI read timeout.\n");
274 		goto out;
275 	}
276 
277 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
278 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
279 	    (value & MGE_SMI_DATA_MASK)));
280 
281 out:
282 	MGE_SMI_UNLOCK();
283 }
284 
285 static int
286 mv_read_ext_phy(device_t dev, int phy, int reg)
287 {
288 	uint32_t retries;
289 	struct mge_softc *sc;
290 	uint32_t ret;
291 
292 	sc = device_get_softc(dev);
293 
294 	MGE_SMI_LOCK();
295 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
296 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
297 
298 	retries = MGE_SMI_READ_RETRIES;
299 	while (--retries &&
300 	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
301 		DELAY(MGE_SMI_READ_DELAY);
302 
303 	if (retries == 0)
304 		device_printf(dev, "Timeout while reading from PHY\n");
305 
306 	ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
307 	MGE_SMI_UNLOCK();
308 
309 	return (ret);
310 }
311 
312 static void
313 mv_write_ext_phy(device_t dev, int phy, int reg, int value)
314 {
315 	uint32_t retries;
316 	struct mge_softc *sc;
317 
318 	sc = device_get_softc(dev);
319 
320 	MGE_SMI_LOCK();
321 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
322 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
323 	    (value & MGE_SMI_DATA_MASK)));
324 
325 	retries = MGE_SMI_WRITE_RETRIES;
326 	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
327 		DELAY(MGE_SMI_WRITE_DELAY);
328 
329 	if (retries == 0)
330 		device_printf(dev, "Timeout while writing to PHY\n");
331 	MGE_SMI_UNLOCK();
332 }
333 
334 static void
335 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
336 {
337 	uint32_t mac_l, mac_h;
338 	uint8_t lmac[6];
339 	int i, valid;
340 
341 	/*
342 	 * Retrieve hw address from the device tree.
343 	 */
344 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
345 	if (i == 6) {
346 		valid = 0;
347 		for (i = 0; i < 6; i++)
348 			if (lmac[i] != 0) {
349 				valid = 1;
350 				break;
351 			}
352 
353 		if (valid) {
354 			bcopy(lmac, addr, 6);
355 			return;
356 		}
357 	}
358 
359 	/*
360 	 * Fall back -- use the currently programmed address.
361 	 */
362 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
363 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
364 
365 	addr[0] = (mac_h & 0xff000000) >> 24;
366 	addr[1] = (mac_h & 0x00ff0000) >> 16;
367 	addr[2] = (mac_h & 0x0000ff00) >> 8;
368 	addr[3] = (mac_h & 0x000000ff);
369 	addr[4] = (mac_l & 0x0000ff00) >> 8;
370 	addr[5] = (mac_l & 0x000000ff);
371 }
372 
373 static uint32_t
374 mge_tfut_ipg(uint32_t val, int ver)
375 {
376 
377 	switch (ver) {
378 	case 1:
379 		return ((val & 0x3fff) << 4);
380 	case 2:
381 	default:
382 		return ((val & 0xffff) << 4);
383 	}
384 }
385 
386 static uint32_t
387 mge_rx_ipg(uint32_t val, int ver)
388 {
389 
390 	switch (ver) {
391 	case 1:
392 		return ((val & 0x3fff) << 8);
393 	case 2:
394 	default:
395 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
396 	}
397 }
398 
399 static void
400 mge_ver_params(struct mge_softc *sc)
401 {
402 	uint32_t d, r;
403 
404 	soc_id(&d, &r);
405 	if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
406 	    d == MV_DEV_88F6282 ||
407 	    d == MV_DEV_MV78100 ||
408 	    d == MV_DEV_MV78100_Z0 ||
409 	    (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
410 		sc->mge_ver = 2;
411 		sc->mge_mtu = 0x4e8;
412 		sc->mge_tfut_ipg_max = 0xFFFF;
413 		sc->mge_rx_ipg_max = 0xFFFF;
414 		sc->mge_tx_arb_cfg = 0xFC0000FF;
415 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
416 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
417 	} else {
418 		sc->mge_ver = 1;
419 		sc->mge_mtu = 0x458;
420 		sc->mge_tfut_ipg_max = 0x3FFF;
421 		sc->mge_rx_ipg_max = 0x3FFF;
422 		sc->mge_tx_arb_cfg = 0x000000FF;
423 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
424 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
425 	}
426 	if (d == MV_DEV_88RC8180)
427 		sc->mge_intr_cnt = 1;
428 	else
429 		sc->mge_intr_cnt = 2;
430 
431 	if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
432 		sc->mge_hw_csum = 0;
433 	else
434 		sc->mge_hw_csum = 1;
435 }
436 
437 static void
438 mge_set_mac_address(struct mge_softc *sc)
439 {
440 	char *if_mac;
441 	uint32_t mac_l, mac_h;
442 
443 	MGE_GLOBAL_LOCK_ASSERT(sc);
444 
445 	if_mac = (char *)IF_LLADDR(sc->ifp);
446 
447 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
448 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
449 	    (if_mac[2] << 8) | (if_mac[3] << 0);
450 
451 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
452 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
453 
454 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
455 }
456 
457 static void
458 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
459 {
460 	uint32_t reg_idx, reg_off, reg_val, i;
461 
462 	last_byte &= 0xf;
463 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
464 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
465 	reg_val = (1 | (queue << 1)) << reg_off;
466 
467 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
468 		if ( i == reg_idx)
469 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
470 		else
471 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
472 	}
473 }
474 
475 static void
476 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
477 {
478 	uint32_t port_config;
479 	uint32_t reg_val, i;
480 
481 	/* Enable or disable promiscuous mode as needed */
482 	if (sc->ifp->if_flags & IFF_PROMISC) {
483 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
484 		port_config |= PORT_CONFIG_UPM;
485 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
486 
487 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
488 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
489 
490 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
491 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
492 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
493 		}
494 
495 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
496 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
497 
498 	} else {
499 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
500 		port_config &= ~PORT_CONFIG_UPM;
501 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
502 
503 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
504 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
505 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
506 		}
507 
508 		mge_set_mac_address(sc);
509 	}
510 }
511 
512 static void
513 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
514 {
515 	u_int32_t *paddr;
516 
517 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
518 	paddr = arg;
519 
520 	*paddr = segs->ds_addr;
521 }
522 
523 static int
524 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
525     bus_addr_t *paddr)
526 {
527 	struct mbuf *new_mbuf;
528 	bus_dma_segment_t seg[1];
529 	int error;
530 	int nsegs;
531 
532 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
533 
534 	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
535 	if (new_mbuf == NULL)
536 		return (ENOBUFS);
537 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
538 
539 	if (*mbufp) {
540 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
541 		bus_dmamap_unload(tag, map);
542 	}
543 
544 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
545 	    BUS_DMA_NOWAIT);
546 	KASSERT(nsegs == 1, ("Too many segments returned!"));
547 	if (nsegs != 1 || error)
548 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
549 
550 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
551 
552 	(*mbufp) = new_mbuf;
553 	(*paddr) = seg->ds_addr;
554 	return (0);
555 }
556 
557 static int
558 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
559     uint32_t size, bus_dma_tag_t *buffer_tag)
560 {
561 	struct mge_desc_wrapper *dw;
562 	bus_addr_t desc_paddr;
563 	int i, error;
564 
565 	desc_paddr = 0;
566 	for (i = size - 1; i >= 0; i--) {
567 		dw = &(tab[i]);
568 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
569 		    (void**)&(dw->mge_desc),
570 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
571 		    &(dw->desc_dmap));
572 
573 		if (error) {
574 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
575 			dw->mge_desc = NULL;
576 			return (ENXIO);
577 		}
578 
579 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
580 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
581 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
582 
583 		if (error) {
584 			if_printf(sc->ifp, "can't load descriptor\n");
585 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
586 			    dw->desc_dmap);
587 			dw->mge_desc = NULL;
588 			return (ENXIO);
589 		}
590 
591 		/* Chain descriptors */
592 		dw->mge_desc->next_desc = desc_paddr;
593 		desc_paddr = dw->mge_desc_paddr;
594 	}
595 	tab[size - 1].mge_desc->next_desc = desc_paddr;
596 
597 	/* Allocate a busdma tag for mbufs. */
598 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
599 	    1, 0,				/* alignment, boundary */
600 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
601 	    BUS_SPACE_MAXADDR,			/* highaddr */
602 	    NULL, NULL,				/* filtfunc, filtfuncarg */
603 	    MCLBYTES, 1,			/* maxsize, nsegments */
604 	    MCLBYTES, 0,			/* maxsegsz, flags */
605 	    NULL, NULL,				/* lockfunc, lockfuncarg */
606 	    buffer_tag);			/* dmat */
607 	if (error) {
608 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
609 		return (ENXIO);
610 	}
611 
612 	/* Create TX busdma maps */
613 	for (i = 0; i < size; i++) {
614 		dw = &(tab[i]);
615 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
616 		if (error) {
617 			if_printf(sc->ifp, "failed to create map for mbuf\n");
618 			return (ENXIO);
619 		}
620 
621 		dw->buffer = (struct mbuf*)NULL;
622 		dw->mge_desc->buffer = (bus_addr_t)NULL;
623 	}
624 
625 	return (0);
626 }
627 
628 static int
629 mge_allocate_dma(struct mge_softc *sc)
630 {
631 	int error;
632 	struct mge_desc_wrapper *dw;
633 	int i;
634 
635 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
636 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
637 	    16, 0,				/* alignment, boundary */
638 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
639 	    BUS_SPACE_MAXADDR,			/* highaddr */
640 	    NULL, NULL,				/* filtfunc, filtfuncarg */
641 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
642 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
643 	    NULL, NULL,				/* lockfunc, lockfuncarg */
644 	    &sc->mge_desc_dtag);		/* dmat */
645 
646 
647 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
648 	    &sc->mge_tx_dtag);
649 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
650 	    &sc->mge_rx_dtag);
651 
652 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
653 		dw = &(sc->mge_rx_desc[i]);
654 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
655 		    &dw->mge_desc->buffer);
656 	}
657 
658 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
659 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
660 
661 	return (0);
662 }
663 
664 static void
665 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
666     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
667 {
668 	struct mge_desc_wrapper *dw;
669 	int i;
670 
671 	for (i = 0; i < size; i++) {
672 		/* Free RX mbuf */
673 		dw = &(tab[i]);
674 
675 		if (dw->buffer_dmap) {
676 			if (free_mbufs) {
677 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
678 				    BUS_DMASYNC_POSTREAD);
679 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
680 			}
681 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
682 			if (free_mbufs)
683 				m_freem(dw->buffer);
684 		}
685 		/* Free RX descriptors */
686 		if (dw->desc_dmap) {
687 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
688 			    BUS_DMASYNC_POSTREAD);
689 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
690 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
691 			    dw->desc_dmap);
692 		}
693 	}
694 }
695 
696 static void
697 mge_free_dma(struct mge_softc *sc)
698 {
699 
700 	/* Free desciptors and mbufs */
701 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
702 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
703 
704 	/* Destroy mbuf dma tag */
705 	bus_dma_tag_destroy(sc->mge_tx_dtag);
706 	bus_dma_tag_destroy(sc->mge_rx_dtag);
707 	/* Destroy descriptors tag */
708 	bus_dma_tag_destroy(sc->mge_desc_dtag);
709 }
710 
711 static void
712 mge_reinit_rx(struct mge_softc *sc)
713 {
714 	struct mge_desc_wrapper *dw;
715 	int i;
716 
717 	MGE_RECEIVE_LOCK_ASSERT(sc);
718 
719 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
720 
721 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
722 	    &sc->mge_rx_dtag);
723 
724 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
725 		dw = &(sc->mge_rx_desc[i]);
726 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
727 		&dw->mge_desc->buffer);
728 	}
729 
730 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
731 	sc->rx_desc_curr = 0;
732 
733 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
734 	    sc->rx_desc_start);
735 
736 	/* Enable RX queue */
737 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
738 }
739 
740 #ifdef DEVICE_POLLING
741 static poll_handler_t mge_poll;
742 
743 static int
744 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
745 {
746 	struct mge_softc *sc = ifp->if_softc;
747 	uint32_t int_cause, int_cause_ext;
748 	int rx_npkts = 0;
749 
750 	MGE_RECEIVE_LOCK(sc);
751 
752 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
753 		MGE_RECEIVE_UNLOCK(sc);
754 		return (rx_npkts);
755 	}
756 
757 	if (cmd == POLL_AND_CHECK_STATUS) {
758 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
759 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
760 
761 		/* Check for resource error */
762 		if (int_cause & MGE_PORT_INT_RXERRQ0)
763 			mge_reinit_rx(sc);
764 
765 		if (int_cause || int_cause_ext) {
766 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
767 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
768 		}
769 	}
770 
771 
772 	rx_npkts = mge_intr_rx_locked(sc, count);
773 
774 	MGE_RECEIVE_UNLOCK(sc);
775 	MGE_TRANSMIT_LOCK(sc);
776 	mge_intr_tx_locked(sc);
777 	MGE_TRANSMIT_UNLOCK(sc);
778 	return (rx_npkts);
779 }
780 #endif /* DEVICE_POLLING */
781 
782 static int
783 mge_attach(device_t dev)
784 {
785 	struct mge_softc *sc;
786 	struct mii_softc *miisc;
787 	struct ifnet *ifp;
788 	uint8_t hwaddr[ETHER_ADDR_LEN];
789 	int i, error, phy;
790 
791 	sc = device_get_softc(dev);
792 	sc->dev = dev;
793 	sc->node = ofw_bus_get_node(dev);
794 	phy = 0;
795 
796 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
797 		device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
798 		    device_get_nameunit(sc->phy_sc->dev));
799 		sc->phy_attached = 1;
800 	} else {
801 		device_printf(dev, "PHY not attached.\n");
802 		sc->phy_attached = 0;
803 		sc->phy_sc = sc;
804 	}
805 
806 	if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
807 		device_printf(dev, "Switch attached.\n");
808 		sc->switch_attached = 1;
809 		/* additional variable available across instances */
810 		switch_attached = 1;
811 	} else {
812 		sc->switch_attached = 0;
813 	}
814 
815 	if (device_get_unit(dev) == 0) {
816 		sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
817 	}
818 
819 	/* Set chip version-dependent parameters */
820 	mge_ver_params(sc);
821 
822 	/* Initialize mutexes */
823 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
824 	    MTX_DEF);
825 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
826 	    MTX_DEF);
827 
828 	/* Allocate IO and IRQ resources */
829 	error = bus_alloc_resources(dev, res_spec, sc->res);
830 	if (error) {
831 		device_printf(dev, "could not allocate resources\n");
832 		mge_detach(dev);
833 		return (ENXIO);
834 	}
835 
836 	/* Allocate DMA, buffers, buffer descriptors */
837 	error = mge_allocate_dma(sc);
838 	if (error) {
839 		mge_detach(dev);
840 		return (ENXIO);
841 	}
842 
843 	sc->tx_desc_curr = 0;
844 	sc->rx_desc_curr = 0;
845 	sc->tx_desc_used_idx = 0;
846 	sc->tx_desc_used_count = 0;
847 
848 	/* Configure defaults for interrupts coalescing */
849 	sc->rx_ic_time = 768;
850 	sc->tx_ic_time = 768;
851 	mge_add_sysctls(sc);
852 
853 	/* Allocate network interface */
854 	ifp = sc->ifp = if_alloc(IFT_ETHER);
855 	if (ifp == NULL) {
856 		device_printf(dev, "if_alloc() failed\n");
857 		mge_detach(dev);
858 		return (ENOMEM);
859 	}
860 
861 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
862 	ifp->if_softc = sc;
863 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
864 	ifp->if_capabilities = IFCAP_VLAN_MTU;
865 	if (sc->mge_hw_csum) {
866 		ifp->if_capabilities |= IFCAP_HWCSUM;
867 		ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
868 	}
869 	ifp->if_capenable = ifp->if_capabilities;
870 
871 #ifdef DEVICE_POLLING
872 	/* Advertise that polling is supported */
873 	ifp->if_capabilities |= IFCAP_POLLING;
874 #endif
875 
876 	ifp->if_init = mge_init;
877 	ifp->if_start = mge_start;
878 	ifp->if_ioctl = mge_ioctl;
879 
880 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
881 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
882 	IFQ_SET_READY(&ifp->if_snd);
883 
884 	mge_get_mac_address(sc, hwaddr);
885 	ether_ifattach(ifp, hwaddr);
886 	callout_init(&sc->wd_callout, 0);
887 
888 	/* Attach PHY(s) */
889 	if (sc->phy_attached) {
890 		error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
891 		    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
892 		if (error) {
893 			device_printf(dev, "MII failed to find PHY\n");
894 			if_free(ifp);
895 			sc->ifp = NULL;
896 			mge_detach(dev);
897 			return (error);
898 		}
899 		sc->mii = device_get_softc(sc->miibus);
900 
901 		/* Tell the MAC where to find the PHY so autoneg works */
902 		miisc = LIST_FIRST(&sc->mii->mii_phys);
903 		MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
904 	} else {
905 		/* no PHY, so use hard-coded values */
906 		ifmedia_init(&sc->mge_ifmedia, 0,
907 		    mge_ifmedia_upd,
908 		    mge_ifmedia_sts);
909 		ifmedia_add(&sc->mge_ifmedia,
910 		    IFM_ETHER | IFM_1000_T | IFM_FDX,
911 		    0, NULL);
912 		ifmedia_set(&sc->mge_ifmedia,
913 		    IFM_ETHER | IFM_1000_T | IFM_FDX);
914 	}
915 
916 	/* Attach interrupt handlers */
917 	/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
918 	for (i = 1; i <= sc->mge_intr_cnt; ++i) {
919 		error = bus_setup_intr(dev, sc->res[i],
920 		    INTR_TYPE_NET | INTR_MPSAFE,
921 		    NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
922 		    sc, &sc->ih_cookie[i - 1]);
923 		if (error) {
924 			device_printf(dev, "could not setup %s\n",
925 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
926 			mge_detach(dev);
927 			return (error);
928 		}
929 	}
930 
931 	if (sc->switch_attached) {
932 		device_t child;
933 		MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
934 		child = device_add_child(dev, "mdio", -1);
935 		bus_generic_attach(dev);
936 	}
937 
938 	return (0);
939 }
940 
941 static int
942 mge_detach(device_t dev)
943 {
944 	struct mge_softc *sc;
945 	int error,i;
946 
947 	sc = device_get_softc(dev);
948 
949 	/* Stop controller and free TX queue */
950 	if (sc->ifp)
951 		mge_shutdown(dev);
952 
953 	/* Wait for stopping ticks */
954         callout_drain(&sc->wd_callout);
955 
956 	/* Stop and release all interrupts */
957 	for (i = 0; i < sc->mge_intr_cnt; ++i) {
958 		if (!sc->ih_cookie[i])
959 			continue;
960 
961 		error = bus_teardown_intr(dev, sc->res[1 + i],
962 		    sc->ih_cookie[i]);
963 		if (error)
964 			device_printf(dev, "could not release %s\n",
965 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
966 	}
967 
968 	/* Detach network interface */
969 	if (sc->ifp) {
970 		ether_ifdetach(sc->ifp);
971 		if_free(sc->ifp);
972 	}
973 
974 	/* Free DMA resources */
975 	mge_free_dma(sc);
976 
977 	/* Free IO memory handler */
978 	bus_release_resources(dev, res_spec, sc->res);
979 
980 	/* Destroy mutexes */
981 	mtx_destroy(&sc->receive_lock);
982 	mtx_destroy(&sc->transmit_lock);
983 
984 	if (device_get_unit(dev) == 0)
985 		sx_destroy(&sx_smi);
986 
987 	return (0);
988 }
989 
990 static void
991 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
992 {
993 	struct mge_softc *sc;
994 	struct mii_data *mii;
995 
996 	sc = ifp->if_softc;
997 	MGE_GLOBAL_LOCK(sc);
998 
999 	if (!sc->phy_attached) {
1000 		ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
1001 		ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1002 		goto out_unlock;
1003 	}
1004 
1005 	mii = sc->mii;
1006 	mii_pollstat(mii);
1007 
1008 	ifmr->ifm_active = mii->mii_media_active;
1009 	ifmr->ifm_status = mii->mii_media_status;
1010 
1011 out_unlock:
1012 	MGE_GLOBAL_UNLOCK(sc);
1013 }
1014 
1015 static uint32_t
1016 mge_set_port_serial_control(uint32_t media)
1017 {
1018 	uint32_t port_config;
1019 
1020 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
1021 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
1022 
1023 	if (IFM_TYPE(media) == IFM_ETHER) {
1024 		switch(IFM_SUBTYPE(media)) {
1025 			case IFM_AUTO:
1026 				break;
1027 			case IFM_1000_T:
1028 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
1029 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1030 				    | PORT_SERIAL_SPEED_AUTONEG);
1031 				break;
1032 			case IFM_100_TX:
1033 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
1034 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1035 				    | PORT_SERIAL_SPEED_AUTONEG);
1036 				break;
1037 			case IFM_10_T:
1038 				port_config  |= (PORT_SERIAL_AUTONEG |
1039 				    PORT_SERIAL_AUTONEG_FC |
1040 				    PORT_SERIAL_SPEED_AUTONEG);
1041 				break;
1042 		}
1043 		if (media & IFM_FDX)
1044 			port_config |= PORT_SERIAL_FULL_DUPLEX;
1045 	}
1046 	return (port_config);
1047 }
1048 
1049 static int
1050 mge_ifmedia_upd(struct ifnet *ifp)
1051 {
1052 	struct mge_softc *sc = ifp->if_softc;
1053 
1054 	/*
1055 	 * Do not do anything for switch here, as updating media between
1056 	 * MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
1057 	 * break the link.
1058 	 */
1059 	if (sc->phy_attached) {
1060 		MGE_GLOBAL_LOCK(sc);
1061 		if (ifp->if_flags & IFF_UP) {
1062 			sc->mge_media_status = sc->mii->mii_media.ifm_media;
1063 			mii_mediachg(sc->mii);
1064 
1065 			/* MGE MAC needs to be reinitialized. */
1066 			mge_init_locked(sc);
1067 
1068 		}
1069 		MGE_GLOBAL_UNLOCK(sc);
1070 	}
1071 
1072 	return (0);
1073 }
1074 
1075 static void
1076 mge_init(void *arg)
1077 {
1078 	struct mge_softc *sc;
1079 
1080 	sc = arg;
1081 	MGE_GLOBAL_LOCK(sc);
1082 
1083 	mge_init_locked(arg);
1084 
1085 	MGE_GLOBAL_UNLOCK(sc);
1086 }
1087 
1088 static void
1089 mge_init_locked(void *arg)
1090 {
1091 	struct mge_softc *sc = arg;
1092 	struct mge_desc_wrapper *dw;
1093 	volatile uint32_t reg_val;
1094 	int i, count;
1095 	uint32_t media_status;
1096 
1097 
1098 	MGE_GLOBAL_LOCK_ASSERT(sc);
1099 
1100 	/* Stop interface */
1101 	mge_stop(sc);
1102 
1103 	/* Disable interrupts */
1104 	mge_intrs_ctrl(sc, 0);
1105 
1106 	/* Set MAC address */
1107 	mge_set_mac_address(sc);
1108 
1109 	/* Setup multicast filters */
1110 	mge_setup_multicast(sc);
1111 
1112 	if (sc->mge_ver == 2) {
1113 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
1114 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
1115 	}
1116 
1117 	/* Initialize TX queue configuration registers */
1118 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1119 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1120 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1121 
1122 	/* Clear TX queue configuration registers for unused queues */
1123 	for (i = 1; i < 7; i++) {
1124 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
1125 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
1126 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
1127 	}
1128 
1129 	/* Set default MTU */
1130 	MGE_WRITE(sc, sc->mge_mtu, 0);
1131 
1132 	/* Port configuration */
1133 	MGE_WRITE(sc, MGE_PORT_CONFIG,
1134 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
1135 	    PORT_CONFIG_ARO_RXQ(0));
1136 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
1137 
1138 	/* Configure promisc mode */
1139 	mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
1140 
1141 	media_status = sc->mge_media_status;
1142 	if (sc->switch_attached) {
1143 		media_status &= ~IFM_TMASK;
1144 		media_status |= IFM_1000_T;
1145 	}
1146 
1147 	/* Setup port configuration */
1148 	reg_val = mge_set_port_serial_control(media_status);
1149 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1150 
1151 	/* Setup SDMA configuration */
1152 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
1153 	    MGE_SDMA_TX_BYTE_SWAP |
1154 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
1155 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
1156 
1157 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
1158 
1159 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1160 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
1161 	    sc->rx_desc_start);
1162 
1163 	/* Reset descriptor indexes */
1164 	sc->tx_desc_curr = 0;
1165 	sc->rx_desc_curr = 0;
1166 	sc->tx_desc_used_idx = 0;
1167 	sc->tx_desc_used_count = 0;
1168 
1169 	/* Enable RX descriptors */
1170 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
1171 		dw = &sc->mge_rx_desc[i];
1172 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1173 		dw->mge_desc->buff_size = MCLBYTES;
1174 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1175 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1176 	}
1177 
1178 	/* Enable RX queue */
1179 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
1180 
1181 	/* Enable port */
1182 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1183 	reg_val |= PORT_SERIAL_ENABLE;
1184 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1185 	count = 0x100000;
1186 	for (;;) {
1187 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1188 		if (reg_val & MGE_STATUS_LINKUP)
1189 			break;
1190 		DELAY(100);
1191 		if (--count == 0) {
1192 			if_printf(sc->ifp, "Timeout on link-up\n");
1193 			break;
1194 		}
1195 	}
1196 
1197 	/* Setup interrupts coalescing */
1198 	mge_set_rxic(sc);
1199 	mge_set_txic(sc);
1200 
1201 	/* Enable interrupts */
1202 #ifdef DEVICE_POLLING
1203         /*
1204 	 * * ...only if polling is not turned on. Disable interrupts explicitly
1205 	 * if polling is enabled.
1206 	 */
1207 	if (sc->ifp->if_capenable & IFCAP_POLLING)
1208 		mge_intrs_ctrl(sc, 0);
1209 	else
1210 #endif /* DEVICE_POLLING */
1211 	mge_intrs_ctrl(sc, 1);
1212 
1213 	/* Activate network interface */
1214 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1215 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1216 	sc->wd_timer = 0;
1217 
1218 	/* Schedule watchdog timeout */
1219 	if (sc->phy_attached)
1220 		callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1221 }
1222 
1223 static void
1224 mge_intr_rxtx(void *arg)
1225 {
1226 	struct mge_softc *sc;
1227 	uint32_t int_cause, int_cause_ext;
1228 
1229 	sc = arg;
1230 	MGE_GLOBAL_LOCK(sc);
1231 
1232 #ifdef DEVICE_POLLING
1233 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1234 		MGE_GLOBAL_UNLOCK(sc);
1235 		return;
1236 	}
1237 #endif
1238 
1239 	/* Get interrupt cause */
1240 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1241 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1242 
1243 	/* Check for Transmit interrupt */
1244 	if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1245 	    MGE_PORT_INT_EXT_TXUR)) {
1246 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1247 		    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1248 		mge_intr_tx_locked(sc);
1249 	}
1250 
1251 	MGE_TRANSMIT_UNLOCK(sc);
1252 
1253 	/* Check for Receive interrupt */
1254 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1255 
1256 	MGE_RECEIVE_UNLOCK(sc);
1257 }
1258 
1259 static void
1260 mge_intr_err(void *arg)
1261 {
1262 	struct mge_softc *sc;
1263 	struct ifnet *ifp;
1264 
1265 	sc = arg;
1266 	ifp = sc->ifp;
1267 	if_printf(ifp, "%s\n", __FUNCTION__);
1268 }
1269 
1270 static void
1271 mge_intr_misc(void *arg)
1272 {
1273 	struct mge_softc *sc;
1274 	struct ifnet *ifp;
1275 
1276 	sc = arg;
1277 	ifp = sc->ifp;
1278 	if_printf(ifp, "%s\n", __FUNCTION__);
1279 }
1280 
1281 static void
1282 mge_intr_rx(void *arg) {
1283 	struct mge_softc *sc;
1284 	uint32_t int_cause, int_cause_ext;
1285 
1286 	sc = arg;
1287 	MGE_RECEIVE_LOCK(sc);
1288 
1289 #ifdef DEVICE_POLLING
1290 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1291 		MGE_RECEIVE_UNLOCK(sc);
1292 		return;
1293 	}
1294 #endif
1295 
1296 	/* Get interrupt cause */
1297 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1298 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1299 
1300 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1301 
1302 	MGE_RECEIVE_UNLOCK(sc);
1303 }
1304 
1305 static void
1306 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1307     uint32_t int_cause_ext)
1308 {
1309 	/* Check for resource error */
1310 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1311 		mge_reinit_rx(sc);
1312 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1313 		    ~(int_cause & MGE_PORT_INT_RXERRQ0));
1314 	}
1315 
1316 	int_cause &= MGE_PORT_INT_RXQ0;
1317 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1318 
1319 	if (int_cause || int_cause_ext) {
1320 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1321 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1322 		mge_intr_rx_locked(sc, -1);
1323 	}
1324 }
1325 
1326 static int
1327 mge_intr_rx_locked(struct mge_softc *sc, int count)
1328 {
1329 	struct ifnet *ifp = sc->ifp;
1330 	uint32_t status;
1331 	uint16_t bufsize;
1332 	struct mge_desc_wrapper* dw;
1333 	struct mbuf *mb;
1334 	int rx_npkts = 0;
1335 
1336 	MGE_RECEIVE_LOCK_ASSERT(sc);
1337 
1338 	while (count != 0) {
1339 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1340 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1341 		    BUS_DMASYNC_POSTREAD);
1342 
1343 		/* Get status */
1344 		status = dw->mge_desc->cmd_status;
1345 		bufsize = dw->mge_desc->buff_size;
1346 		if ((status & MGE_DMA_OWNED) != 0)
1347 			break;
1348 
1349 		if (dw->mge_desc->byte_count &&
1350 		    ~(status & MGE_ERR_SUMMARY)) {
1351 
1352 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1353 			    BUS_DMASYNC_POSTREAD);
1354 
1355 			mb = m_devget(dw->buffer->m_data,
1356 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1357 			    0, ifp, NULL);
1358 
1359 			if (mb == NULL)
1360 				/* Give up if no mbufs */
1361 				break;
1362 
1363 			mb->m_len -= 2;
1364 			mb->m_pkthdr.len -= 2;
1365 			mb->m_data += 2;
1366 
1367 			mb->m_pkthdr.rcvif = ifp;
1368 
1369 			mge_offload_process_frame(ifp, mb, status,
1370 			    bufsize);
1371 
1372 			MGE_RECEIVE_UNLOCK(sc);
1373 			(*ifp->if_input)(ifp, mb);
1374 			MGE_RECEIVE_LOCK(sc);
1375 			rx_npkts++;
1376 		}
1377 
1378 		dw->mge_desc->byte_count = 0;
1379 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1380 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1381 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1382 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1383 
1384 		if (count > 0)
1385 			count -= 1;
1386 	}
1387 
1388 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1389 
1390 	return (rx_npkts);
1391 }
1392 
1393 static void
1394 mge_intr_sum(void *arg)
1395 {
1396 	struct mge_softc *sc = arg;
1397 	struct ifnet *ifp;
1398 
1399 	ifp = sc->ifp;
1400 	if_printf(ifp, "%s\n", __FUNCTION__);
1401 }
1402 
1403 static void
1404 mge_intr_tx(void *arg)
1405 {
1406 	struct mge_softc *sc = arg;
1407 	uint32_t int_cause_ext;
1408 
1409 	MGE_TRANSMIT_LOCK(sc);
1410 
1411 #ifdef DEVICE_POLLING
1412 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1413 		MGE_TRANSMIT_UNLOCK(sc);
1414 		return;
1415 	}
1416 #endif
1417 
1418 	/* Ack the interrupt */
1419 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1420 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1421 	    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1422 
1423 	mge_intr_tx_locked(sc);
1424 
1425 	MGE_TRANSMIT_UNLOCK(sc);
1426 }
1427 
1428 static void
1429 mge_intr_tx_locked(struct mge_softc *sc)
1430 {
1431 	struct ifnet *ifp = sc->ifp;
1432 	struct mge_desc_wrapper *dw;
1433 	struct mge_desc *desc;
1434 	uint32_t status;
1435 	int send = 0;
1436 
1437 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1438 
1439 	/* Disable watchdog */
1440 	sc->wd_timer = 0;
1441 
1442 	while (sc->tx_desc_used_count) {
1443 		/* Get the descriptor */
1444 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1445 		desc = dw->mge_desc;
1446 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1447 		    BUS_DMASYNC_POSTREAD);
1448 
1449 		/* Get descriptor status */
1450 		status = desc->cmd_status;
1451 
1452 		if (status & MGE_DMA_OWNED)
1453 			break;
1454 
1455 		sc->tx_desc_used_idx =
1456 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1457 		sc->tx_desc_used_count--;
1458 
1459 		/* Update collision statistics */
1460 		if (status & MGE_ERR_SUMMARY) {
1461 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1462 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1463 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1464 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1465 		}
1466 
1467 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1468 		    BUS_DMASYNC_POSTWRITE);
1469 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1470 		m_freem(dw->buffer);
1471 		dw->buffer = (struct mbuf*)NULL;
1472 		send++;
1473 
1474 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1475 	}
1476 
1477 	if (send) {
1478 		/* Now send anything that was pending */
1479 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1480 		mge_start_locked(ifp);
1481 	}
1482 }
1483 static int
1484 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1485 {
1486 	struct mge_softc *sc = ifp->if_softc;
1487 	struct ifreq *ifr = (struct ifreq *)data;
1488 	int mask, error;
1489 	uint32_t flags;
1490 
1491 	error = 0;
1492 
1493 	switch (command) {
1494 	case SIOCSIFFLAGS:
1495 		MGE_GLOBAL_LOCK(sc);
1496 
1497 		if (ifp->if_flags & IFF_UP) {
1498 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1499 				flags = ifp->if_flags ^ sc->mge_if_flags;
1500 				if (flags & IFF_PROMISC)
1501 					mge_set_prom_mode(sc,
1502 					    MGE_RX_DEFAULT_QUEUE);
1503 
1504 				if (flags & IFF_ALLMULTI)
1505 					mge_setup_multicast(sc);
1506 			} else
1507 				mge_init_locked(sc);
1508 		}
1509 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1510 			mge_stop(sc);
1511 
1512 		sc->mge_if_flags = ifp->if_flags;
1513 		MGE_GLOBAL_UNLOCK(sc);
1514 		break;
1515 	case SIOCADDMULTI:
1516 	case SIOCDELMULTI:
1517 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1518 			MGE_GLOBAL_LOCK(sc);
1519 			mge_setup_multicast(sc);
1520 			MGE_GLOBAL_UNLOCK(sc);
1521 		}
1522 		break;
1523 	case SIOCSIFCAP:
1524 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1525 		if (mask & IFCAP_HWCSUM) {
1526 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1527 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1528 			if (ifp->if_capenable & IFCAP_TXCSUM)
1529 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1530 			else
1531 				ifp->if_hwassist = 0;
1532 		}
1533 #ifdef DEVICE_POLLING
1534 		if (mask & IFCAP_POLLING) {
1535 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1536 				error = ether_poll_register(mge_poll, ifp);
1537 				if (error)
1538 					return(error);
1539 
1540 				MGE_GLOBAL_LOCK(sc);
1541 				mge_intrs_ctrl(sc, 0);
1542 				ifp->if_capenable |= IFCAP_POLLING;
1543 				MGE_GLOBAL_UNLOCK(sc);
1544 			} else {
1545 				error = ether_poll_deregister(ifp);
1546 				MGE_GLOBAL_LOCK(sc);
1547 				mge_intrs_ctrl(sc, 1);
1548 				ifp->if_capenable &= ~IFCAP_POLLING;
1549 				MGE_GLOBAL_UNLOCK(sc);
1550 			}
1551 		}
1552 #endif
1553 		break;
1554 	case SIOCGIFMEDIA: /* fall through */
1555 	case SIOCSIFMEDIA:
1556 		/*
1557 		 * Setting up media type via ioctls is *not* supported for MAC
1558 		 * which is connected to switch. Use etherswitchcfg.
1559 		 */
1560 		if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1561 			return (0);
1562 		else if (!sc->phy_attached) {
1563 			error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1564 			    command);
1565 			break;
1566 		}
1567 
1568 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1569 		    && !(ifr->ifr_media & IFM_FDX)) {
1570 			device_printf(sc->dev,
1571 			    "1000baseTX half-duplex unsupported\n");
1572 			return 0;
1573 		}
1574 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1575 		break;
1576 	default:
1577 		error = ether_ioctl(ifp, command, data);
1578 	}
1579 	return (error);
1580 }
1581 
1582 static int
1583 mge_miibus_readreg(device_t dev, int phy, int reg)
1584 {
1585 	struct mge_softc *sc;
1586 	sc = device_get_softc(dev);
1587 
1588 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1589 
1590 	return (mv_read_ext_phy(dev, phy, reg));
1591 }
1592 
1593 static int
1594 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1595 {
1596 	struct mge_softc *sc;
1597 	sc = device_get_softc(dev);
1598 
1599 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1600 
1601 	mv_write_ext_phy(dev, phy, reg, value);
1602 
1603 	return (0);
1604 }
1605 
1606 static int
1607 mge_probe(device_t dev)
1608 {
1609 
1610 	if (!ofw_bus_status_okay(dev))
1611 		return (ENXIO);
1612 
1613 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1614 		return (ENXIO);
1615 
1616 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1617 	return (BUS_PROBE_DEFAULT);
1618 }
1619 
1620 static int
1621 mge_resume(device_t dev)
1622 {
1623 
1624 	device_printf(dev, "%s\n", __FUNCTION__);
1625 	return (0);
1626 }
1627 
1628 static int
1629 mge_shutdown(device_t dev)
1630 {
1631 	struct mge_softc *sc = device_get_softc(dev);
1632 
1633 	MGE_GLOBAL_LOCK(sc);
1634 
1635 #ifdef DEVICE_POLLING
1636         if (sc->ifp->if_capenable & IFCAP_POLLING)
1637 		ether_poll_deregister(sc->ifp);
1638 #endif
1639 
1640 	mge_stop(sc);
1641 
1642 	MGE_GLOBAL_UNLOCK(sc);
1643 
1644 	return (0);
1645 }
1646 
1647 static int
1648 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1649 {
1650 	struct mge_desc_wrapper *dw = NULL;
1651 	struct ifnet *ifp;
1652 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1653 	bus_dmamap_t mapp;
1654 	int error;
1655 	int seg, nsegs;
1656 	int desc_no;
1657 
1658 	ifp = sc->ifp;
1659 
1660 	/* Fetch unused map */
1661 	desc_no = sc->tx_desc_curr;
1662 	dw = &sc->mge_tx_desc[desc_no];
1663 	mapp = dw->buffer_dmap;
1664 
1665 	/* Create mapping in DMA memory */
1666 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1667 	    BUS_DMA_NOWAIT);
1668 	if (error != 0) {
1669 		m_freem(m0);
1670 		return (error);
1671 	}
1672 
1673 	/* Only one segment is supported. */
1674 	if (nsegs != 1) {
1675 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1676 		m_freem(m0);
1677 		return (-1);
1678 	}
1679 
1680 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1681 
1682 	/* Everything is ok, now we can send buffers */
1683 	for (seg = 0; seg < nsegs; seg++) {
1684 		dw->mge_desc->byte_count = segs[seg].ds_len;
1685 		dw->mge_desc->buffer = segs[seg].ds_addr;
1686 		dw->buffer = m0;
1687 		dw->mge_desc->cmd_status = 0;
1688 		if (seg == 0)
1689 			mge_offload_setup_descriptor(sc, dw);
1690 		dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1691 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1692 		    MGE_DMA_OWNED;
1693 	}
1694 
1695 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1696 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1697 
1698 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1699 	sc->tx_desc_used_count++;
1700 	return (0);
1701 }
1702 
1703 static void
1704 mge_tick(void *msc)
1705 {
1706 	struct mge_softc *sc = msc;
1707 
1708 	KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1709 
1710 	MGE_GLOBAL_LOCK(sc);
1711 
1712 	/* Check for TX timeout */
1713 	mge_watchdog(sc);
1714 
1715 	mii_tick(sc->mii);
1716 
1717 	/* Check for media type change */
1718 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1719 		mge_ifmedia_upd(sc->ifp);
1720 
1721 	MGE_GLOBAL_UNLOCK(sc);
1722 
1723 	/* Schedule another timeout one second from now */
1724 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1725 
1726 	return;
1727 }
1728 
1729 static void
1730 mge_watchdog(struct mge_softc *sc)
1731 {
1732 	struct ifnet *ifp;
1733 
1734 	ifp = sc->ifp;
1735 
1736 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1737 		return;
1738 	}
1739 
1740 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1741 	if_printf(ifp, "watchdog timeout\n");
1742 
1743 	mge_stop(sc);
1744 	mge_init_locked(sc);
1745 }
1746 
1747 static void
1748 mge_start(struct ifnet *ifp)
1749 {
1750 	struct mge_softc *sc = ifp->if_softc;
1751 
1752 	MGE_TRANSMIT_LOCK(sc);
1753 
1754 	mge_start_locked(ifp);
1755 
1756 	MGE_TRANSMIT_UNLOCK(sc);
1757 }
1758 
1759 static void
1760 mge_start_locked(struct ifnet *ifp)
1761 {
1762 	struct mge_softc *sc;
1763 	struct mbuf *m0, *mtmp;
1764 	uint32_t reg_val, queued = 0;
1765 
1766 	sc = ifp->if_softc;
1767 
1768 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1769 
1770 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1771 	    IFF_DRV_RUNNING)
1772 		return;
1773 
1774 	for (;;) {
1775 		/* Get packet from the queue */
1776 		IF_DEQUEUE(&ifp->if_snd, m0);
1777 		if (m0 == NULL)
1778 			break;
1779 
1780 		if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1781 		    m0->m_flags & M_VLANTAG) {
1782 			if (M_WRITABLE(m0) == 0) {
1783 				mtmp = m_dup(m0, M_NOWAIT);
1784 				m_freem(m0);
1785 				if (mtmp == NULL)
1786 					continue;
1787 				m0 = mtmp;
1788 			}
1789 		}
1790 		/* The driver support only one DMA fragment. */
1791 		if (m0->m_next != NULL) {
1792 			mtmp = m_defrag(m0, M_NOWAIT);
1793 			if (mtmp != NULL)
1794 				m0 = mtmp;
1795 		}
1796 
1797 		/* Check for free descriptors */
1798 		if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1799 			IF_PREPEND(&ifp->if_snd, m0);
1800 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1801 			break;
1802 		}
1803 
1804 		if (mge_encap(sc, m0) != 0)
1805 			break;
1806 
1807 		queued++;
1808 		BPF_MTAP(ifp, m0);
1809 	}
1810 
1811 	if (queued) {
1812 		/* Enable transmitter and watchdog timer */
1813 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1814 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1815 		sc->wd_timer = 5;
1816 	}
1817 }
1818 
1819 static void
1820 mge_stop(struct mge_softc *sc)
1821 {
1822 	struct ifnet *ifp;
1823 	volatile uint32_t reg_val, status;
1824 	struct mge_desc_wrapper *dw;
1825 	struct mge_desc *desc;
1826 	int count;
1827 
1828 	ifp = sc->ifp;
1829 
1830 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1831 		return;
1832 
1833 	/* Stop tick engine */
1834 	callout_stop(&sc->wd_callout);
1835 
1836 	/* Disable interface */
1837 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1838 	sc->wd_timer = 0;
1839 
1840 	/* Disable interrupts */
1841 	mge_intrs_ctrl(sc, 0);
1842 
1843 	/* Disable Rx and Tx */
1844 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1845 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1846 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1847 
1848 	/* Remove pending data from TX queue */
1849 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1850 	    sc->tx_desc_used_count) {
1851 		/* Get the descriptor */
1852 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1853 		desc = dw->mge_desc;
1854 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1855 		    BUS_DMASYNC_POSTREAD);
1856 
1857 		/* Get descriptor status */
1858 		status = desc->cmd_status;
1859 
1860 		if (status & MGE_DMA_OWNED)
1861 			break;
1862 
1863 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1864 		    MGE_TX_DESC_NUM;
1865 		sc->tx_desc_used_count--;
1866 
1867 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1868 		    BUS_DMASYNC_POSTWRITE);
1869 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1870 
1871 		m_freem(dw->buffer);
1872 		dw->buffer = (struct mbuf*)NULL;
1873 	}
1874 
1875 	/* Wait for end of transmission */
1876 	count = 0x100000;
1877 	while (count--) {
1878 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1879 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1880 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1881 			break;
1882 		DELAY(100);
1883 	}
1884 
1885 	if (count == 0)
1886 		if_printf(ifp,
1887 		    "%s: timeout while waiting for end of transmission\n",
1888 		    __FUNCTION__);
1889 
1890 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1891 	reg_val &= ~(PORT_SERIAL_ENABLE);
1892 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1893 }
1894 
1895 static int
1896 mge_suspend(device_t dev)
1897 {
1898 
1899 	device_printf(dev, "%s\n", __FUNCTION__);
1900 	return (0);
1901 }
1902 
1903 static void
1904 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1905     uint32_t status, uint16_t bufsize)
1906 {
1907 	int csum_flags = 0;
1908 
1909 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1910 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1911 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1912 
1913 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1914 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1915 		    (status & MGE_RX_L4_CSUM_OK)) {
1916 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1917 			frame->m_pkthdr.csum_data = 0xFFFF;
1918 		}
1919 
1920 		frame->m_pkthdr.csum_flags = csum_flags;
1921 	}
1922 }
1923 
1924 static void
1925 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1926 {
1927 	struct mbuf *m0 = dw->buffer;
1928 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1929 	int csum_flags = m0->m_pkthdr.csum_flags;
1930 	int cmd_status = 0;
1931 	struct ip *ip;
1932 	int ehlen, etype;
1933 
1934 	if (csum_flags != 0) {
1935 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1936 			etype = ntohs(eh->evl_proto);
1937 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1938 			csum_flags |= MGE_TX_VLAN_TAGGED;
1939 		} else {
1940 			etype = ntohs(eh->evl_encap_proto);
1941 			ehlen = ETHER_HDR_LEN;
1942 		}
1943 
1944 		if (etype != ETHERTYPE_IP) {
1945 			if_printf(sc->ifp,
1946 			    "TCP/IP Offload enabled for unsupported "
1947 			    "protocol!\n");
1948 			return;
1949 		}
1950 
1951 		ip = (struct ip *)(m0->m_data + ehlen);
1952 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1953 		cmd_status |= MGE_TX_NOT_FRAGMENT;
1954 	}
1955 
1956 	if (csum_flags & CSUM_IP)
1957 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1958 
1959 	if (csum_flags & CSUM_TCP)
1960 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1961 
1962 	if (csum_flags & CSUM_UDP)
1963 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1964 
1965 	dw->mge_desc->cmd_status |= cmd_status;
1966 }
1967 
1968 static void
1969 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1970 {
1971 
1972 	if (enable) {
1973 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1974 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1975 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1976 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1977 		    MGE_PORT_INT_EXT_TXBUF0);
1978 	} else {
1979 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1980 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1981 
1982 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1983 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1984 
1985 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1986 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1987 	}
1988 }
1989 
1990 static uint8_t
1991 mge_crc8(uint8_t *data, int size)
1992 {
1993 	uint8_t crc = 0;
1994 	static const uint8_t ct[256] = {
1995 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1996 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1997 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1998 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1999 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
2000 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
2001 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
2002 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
2003 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
2004 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
2005 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
2006 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
2007 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
2008 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
2009 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
2010 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
2011 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
2012 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
2013 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
2014 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
2015 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
2016 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
2017 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
2018 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
2019 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
2020 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
2021 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
2022 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
2023 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
2024 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
2025 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
2026 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
2027 	};
2028 
2029 	while(size--)
2030 		crc = ct[crc ^ *(data++)];
2031 
2032 	return(crc);
2033 }
2034 
2035 struct mge_hash_maddr_ctx {
2036 	uint32_t smt[MGE_MCAST_REG_NUMBER];
2037 	uint32_t omt[MGE_MCAST_REG_NUMBER];
2038 };
2039 
2040 static u_int
2041 mge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2042 {
2043 	static const uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
2044 	struct mge_hash_maddr_ctx *ctx = arg;
2045 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2046 	uint8_t *mac;
2047 	int i;
2048 
2049 	mac = LLADDR(sdl);
2050 	if (memcmp(mac, special, sizeof(special)) == 0) {
2051 		i = mac[5];
2052 		ctx->smt[i >> 2] |= v << ((i & 0x03) << 3);
2053 	} else {
2054 		i = mge_crc8(mac, ETHER_ADDR_LEN);
2055 		ctx->omt[i >> 2] |= v << ((i & 0x03) << 3);
2056 	}
2057 	return (1);
2058 }
2059 
2060 static void
2061 mge_setup_multicast(struct mge_softc *sc)
2062 {
2063 	struct mge_hash_maddr_ctx ctx;
2064 	struct ifnet *ifp = sc->ifp;
2065 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2066 	int i;
2067 
2068 	if (ifp->if_flags & IFF_ALLMULTI) {
2069 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
2070 			ctx.smt[i] = ctx.omt[i] =
2071 			    (v << 24) | (v << 16) | (v << 8) | v;
2072 	} else {
2073 		memset(&ctx, 0, sizeof(ctx));
2074 		if_foreach_llmaddr(ifp, mge_hash_maddr, &ctx);
2075 	}
2076 
2077 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
2078 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), ctx.smt[i]);
2079 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), ctx.omt[i]);
2080 	}
2081 }
2082 
2083 static void
2084 mge_set_rxic(struct mge_softc *sc)
2085 {
2086 	uint32_t reg;
2087 
2088 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2089 		sc->rx_ic_time = sc->mge_rx_ipg_max;
2090 
2091 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
2092 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2093 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2094 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
2095 }
2096 
2097 static void
2098 mge_set_txic(struct mge_softc *sc)
2099 {
2100 	uint32_t reg;
2101 
2102 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2103 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
2104 
2105 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
2106 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2107 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2108 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
2109 }
2110 
2111 static int
2112 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
2113 {
2114 	struct mge_softc *sc = (struct mge_softc *)arg1;
2115 	uint32_t time;
2116 	int error;
2117 
2118 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2119 	error = sysctl_handle_int(oidp, &time, 0, req);
2120 	if (error != 0)
2121 		return(error);
2122 
2123 	MGE_GLOBAL_LOCK(sc);
2124 	if (arg2 == MGE_IC_RX) {
2125 		sc->rx_ic_time = time;
2126 		mge_set_rxic(sc);
2127 	} else {
2128 		sc->tx_ic_time = time;
2129 		mge_set_txic(sc);
2130 	}
2131 	MGE_GLOBAL_UNLOCK(sc);
2132 
2133 	return(0);
2134 }
2135 
2136 static void
2137 mge_add_sysctls(struct mge_softc *sc)
2138 {
2139 	struct sysctl_ctx_list *ctx;
2140 	struct sysctl_oid_list *children;
2141 	struct sysctl_oid *tree;
2142 
2143 	ctx = device_get_sysctl_ctx(sc->dev);
2144 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2145 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
2146 	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "MGE Interrupts coalescing");
2147 	children = SYSCTL_CHILDREN(tree);
2148 
2149 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
2150 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, MGE_IC_RX,
2151 	    mge_sysctl_ic, "I", "IC RX time threshold");
2152 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
2153 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, MGE_IC_TX,
2154 	    mge_sysctl_ic, "I", "IC TX time threshold");
2155 }
2156 
2157 static int
2158 mge_mdio_writereg(device_t dev, int phy, int reg, int value)
2159 {
2160 
2161 	mv_write_ge_smi(dev, phy, reg, value);
2162 
2163 	return (0);
2164 }
2165 
2166 
2167 static int
2168 mge_mdio_readreg(device_t dev, int phy, int reg)
2169 {
2170 	int ret;
2171 
2172 	ret = mv_read_ge_smi(dev, phy, reg);
2173 
2174 	return (ret);
2175 }
2176