xref: /freebsd/sys/dev/mge/if_mge.c (revision 11d38a5764295585a2472d5e861fa8abe1a11eb2)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * Copyright (C) 2009-2015 Semihalf
4  * Copyright (C) 2015 Stormshield
5  * All rights reserved.
6  *
7  * Developed by Semihalf.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of MARVELL nor the names of contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
36 #endif
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/endian.h>
44 #include <sys/mbuf.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/kernel.h>
48 #include <sys/module.h>
49 #include <sys/socket.h>
50 #include <sys/sysctl.h>
51 
52 #include <net/ethernet.h>
53 #include <net/bpf.h>
54 #include <net/if.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 #include <net/if_vlan_var.h>
60 
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 
65 #include <sys/sockio.h>
66 #include <sys/bus.h>
67 #include <machine/bus.h>
68 #include <sys/rman.h>
69 #include <machine/resource.h>
70 
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
73 
74 #include <dev/fdt/fdt_common.h>
75 #include <dev/ofw/ofw_bus.h>
76 #include <dev/ofw/ofw_bus_subr.h>
77 #include <dev/etherswitch/mdio.h>
78 
79 #include <dev/mge/if_mgevar.h>
80 #include <arm/mv/mvreg.h>
81 #include <arm/mv/mvvar.h>
82 
83 #include "miibus_if.h"
84 #include "mdio_if.h"
85 
86 #define	MGE_DELAY(x)	pause("SMI access sleep", (x) / tick_sbt)
87 
88 static int mge_probe(device_t dev);
89 static int mge_attach(device_t dev);
90 static int mge_detach(device_t dev);
91 static int mge_shutdown(device_t dev);
92 static int mge_suspend(device_t dev);
93 static int mge_resume(device_t dev);
94 
95 static int mge_miibus_readreg(device_t dev, int phy, int reg);
96 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
97 
98 static int mge_mdio_readreg(device_t dev, int phy, int reg);
99 static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
100 
101 static int mge_ifmedia_upd(struct ifnet *ifp);
102 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
103 
104 static void mge_init(void *arg);
105 static void mge_init_locked(void *arg);
106 static void mge_start(struct ifnet *ifp);
107 static void mge_start_locked(struct ifnet *ifp);
108 static void mge_watchdog(struct mge_softc *sc);
109 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
110 
111 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
112 static uint32_t mge_rx_ipg(uint32_t val, int ver);
113 static void mge_ver_params(struct mge_softc *sc);
114 
115 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
116 static void mge_intr_rxtx(void *arg);
117 static void mge_intr_rx(void *arg);
118 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
119     uint32_t int_cause_ext);
120 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
121 static void mge_intr_tx(void *arg);
122 static void mge_intr_tx_locked(struct mge_softc *sc);
123 static void mge_intr_misc(void *arg);
124 static void mge_intr_sum(void *arg);
125 static void mge_intr_err(void *arg);
126 static void mge_stop(struct mge_softc *sc);
127 static void mge_tick(void *msc);
128 static uint32_t mge_set_port_serial_control(uint32_t media);
129 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
130 static void mge_set_mac_address(struct mge_softc *sc);
131 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
132     uint8_t queue);
133 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
134 static int mge_allocate_dma(struct mge_softc *sc);
135 static int mge_alloc_desc_dma(struct mge_softc *sc,
136     struct mge_desc_wrapper* desc_tab, uint32_t size,
137     bus_dma_tag_t *buffer_tag);
138 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
139     struct mbuf **mbufp, bus_addr_t *paddr);
140 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
141     int error);
142 static void mge_free_dma(struct mge_softc *sc);
143 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
144     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
145 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
146     uint32_t status, uint16_t bufsize);
147 static void mge_offload_setup_descriptor(struct mge_softc *sc,
148     struct mge_desc_wrapper *dw);
149 static uint8_t mge_crc8(uint8_t *data, int size);
150 static void mge_setup_multicast(struct mge_softc *sc);
151 static void mge_set_rxic(struct mge_softc *sc);
152 static void mge_set_txic(struct mge_softc *sc);
153 static void mge_add_sysctls(struct mge_softc *sc);
154 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
155 
156 static device_method_t mge_methods[] = {
157 	/* Device interface */
158 	DEVMETHOD(device_probe,		mge_probe),
159 	DEVMETHOD(device_attach,	mge_attach),
160 	DEVMETHOD(device_detach,	mge_detach),
161 	DEVMETHOD(device_shutdown,	mge_shutdown),
162 	DEVMETHOD(device_suspend,	mge_suspend),
163 	DEVMETHOD(device_resume,	mge_resume),
164 	/* MII interface */
165 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
166 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
167 	/* MDIO interface */
168 	DEVMETHOD(mdio_readreg,		mge_mdio_readreg),
169 	DEVMETHOD(mdio_writereg,	mge_mdio_writereg),
170 	{ 0, 0 }
171 };
172 
173 DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
174 
175 static devclass_t mge_devclass;
176 static int switch_attached = 0;
177 
178 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
179 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
180 DRIVER_MODULE(mdio, mge, mdio_driver, mdio_devclass, 0, 0);
181 MODULE_DEPEND(mge, ether, 1, 1, 1);
182 MODULE_DEPEND(mge, miibus, 1, 1, 1);
183 MODULE_DEPEND(mge, mdio, 1, 1, 1);
184 
185 static struct resource_spec res_spec[] = {
186 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
187 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
188 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
189 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
190 	{ -1, 0 }
191 };
192 
193 static struct {
194 	driver_intr_t *handler;
195 	char * description;
196 } mge_intrs[MGE_INTR_COUNT + 1] = {
197 	{ mge_intr_rxtx,"GbE aggregated interrupt" },
198 	{ mge_intr_rx,	"GbE receive interrupt" },
199 	{ mge_intr_tx,	"GbE transmit interrupt" },
200 	{ mge_intr_misc,"GbE misc interrupt" },
201 	{ mge_intr_sum,	"GbE summary interrupt" },
202 	{ mge_intr_err,	"GbE error interrupt" },
203 };
204 
205 /* SMI access interlock */
206 static struct sx sx_smi;
207 
208 static uint32_t
209 mv_read_ge_smi(device_t dev, int phy, int reg)
210 {
211 	uint32_t timeout;
212 	uint32_t ret;
213 	struct mge_softc *sc;
214 
215 	sc = device_get_softc(dev);
216 	KASSERT(sc != NULL, ("NULL softc ptr!"));
217 	timeout = MGE_SMI_WRITE_RETRIES;
218 
219 	MGE_SMI_LOCK();
220 	while (--timeout &&
221 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
222 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
223 
224 	if (timeout == 0) {
225 		device_printf(dev, "SMI write timeout.\n");
226 		ret = ~0U;
227 		goto out;
228 	}
229 
230 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
231 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
232 
233 	/* Wait till finished. */
234 	timeout = MGE_SMI_WRITE_RETRIES;
235 	while (--timeout &&
236 	    !((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
237 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
238 
239 	if (timeout == 0) {
240 		device_printf(dev, "SMI write validation timeout.\n");
241 		ret = ~0U;
242 		goto out;
243 	}
244 
245 	/* Wait for the data to update in the SMI register */
246 	MGE_DELAY(MGE_SMI_DELAY);
247 	ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
248 
249 out:
250 	MGE_SMI_UNLOCK();
251 	return (ret);
252 
253 }
254 
255 static void
256 mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
257 {
258 	uint32_t timeout;
259 	struct mge_softc *sc;
260 
261 	sc = device_get_softc(dev);
262 	KASSERT(sc != NULL, ("NULL softc ptr!"));
263 
264 	MGE_SMI_LOCK();
265 	timeout = MGE_SMI_READ_RETRIES;
266 	while (--timeout &&
267 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
268 		MGE_DELAY(MGE_SMI_READ_DELAY);
269 
270 	if (timeout == 0) {
271 		device_printf(dev, "SMI read timeout.\n");
272 		goto out;
273 	}
274 
275 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
276 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
277 	    (value & MGE_SMI_DATA_MASK)));
278 
279 out:
280 	MGE_SMI_UNLOCK();
281 }
282 
283 static int
284 mv_read_ext_phy(device_t dev, int phy, int reg)
285 {
286 	uint32_t retries;
287 	struct mge_softc *sc;
288 	uint32_t ret;
289 
290 	sc = device_get_softc(dev);
291 
292 	MGE_SMI_LOCK();
293 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
294 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
295 
296 	retries = MGE_SMI_READ_RETRIES;
297 	while (--retries &&
298 	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
299 		DELAY(MGE_SMI_READ_DELAY);
300 
301 	if (retries == 0)
302 		device_printf(dev, "Timeout while reading from PHY\n");
303 
304 	ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
305 	MGE_SMI_UNLOCK();
306 
307 	return (ret);
308 }
309 
310 static void
311 mv_write_ext_phy(device_t dev, int phy, int reg, int value)
312 {
313 	uint32_t retries;
314 	struct mge_softc *sc;
315 
316 	sc = device_get_softc(dev);
317 
318 	MGE_SMI_LOCK();
319 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
320 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
321 	    (value & MGE_SMI_DATA_MASK)));
322 
323 	retries = MGE_SMI_WRITE_RETRIES;
324 	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
325 		DELAY(MGE_SMI_WRITE_DELAY);
326 
327 	if (retries == 0)
328 		device_printf(dev, "Timeout while writing to PHY\n");
329 	MGE_SMI_UNLOCK();
330 }
331 
332 static void
333 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
334 {
335 	uint32_t mac_l, mac_h;
336 	uint8_t lmac[6];
337 	int i, valid;
338 
339 	/*
340 	 * Retrieve hw address from the device tree.
341 	 */
342 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
343 	if (i == 6) {
344 		valid = 0;
345 		for (i = 0; i < 6; i++)
346 			if (lmac[i] != 0) {
347 				valid = 1;
348 				break;
349 			}
350 
351 		if (valid) {
352 			bcopy(lmac, addr, 6);
353 			return;
354 		}
355 	}
356 
357 	/*
358 	 * Fall back -- use the currently programmed address.
359 	 */
360 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
361 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
362 
363 	addr[0] = (mac_h & 0xff000000) >> 24;
364 	addr[1] = (mac_h & 0x00ff0000) >> 16;
365 	addr[2] = (mac_h & 0x0000ff00) >> 8;
366 	addr[3] = (mac_h & 0x000000ff);
367 	addr[4] = (mac_l & 0x0000ff00) >> 8;
368 	addr[5] = (mac_l & 0x000000ff);
369 }
370 
371 static uint32_t
372 mge_tfut_ipg(uint32_t val, int ver)
373 {
374 
375 	switch (ver) {
376 	case 1:
377 		return ((val & 0x3fff) << 4);
378 	case 2:
379 	default:
380 		return ((val & 0xffff) << 4);
381 	}
382 }
383 
384 static uint32_t
385 mge_rx_ipg(uint32_t val, int ver)
386 {
387 
388 	switch (ver) {
389 	case 1:
390 		return ((val & 0x3fff) << 8);
391 	case 2:
392 	default:
393 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
394 	}
395 }
396 
397 static void
398 mge_ver_params(struct mge_softc *sc)
399 {
400 	uint32_t d, r;
401 
402 	soc_id(&d, &r);
403 	if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
404 	    d == MV_DEV_88F6282 ||
405 	    d == MV_DEV_MV78100 ||
406 	    d == MV_DEV_MV78100_Z0 ||
407 	    (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
408 		sc->mge_ver = 2;
409 		sc->mge_mtu = 0x4e8;
410 		sc->mge_tfut_ipg_max = 0xFFFF;
411 		sc->mge_rx_ipg_max = 0xFFFF;
412 		sc->mge_tx_arb_cfg = 0xFC0000FF;
413 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
414 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
415 	} else {
416 		sc->mge_ver = 1;
417 		sc->mge_mtu = 0x458;
418 		sc->mge_tfut_ipg_max = 0x3FFF;
419 		sc->mge_rx_ipg_max = 0x3FFF;
420 		sc->mge_tx_arb_cfg = 0x000000FF;
421 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
422 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
423 	}
424 	if (d == MV_DEV_88RC8180)
425 		sc->mge_intr_cnt = 1;
426 	else
427 		sc->mge_intr_cnt = 2;
428 
429 	if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
430 		sc->mge_hw_csum = 0;
431 	else
432 		sc->mge_hw_csum = 1;
433 }
434 
435 static void
436 mge_set_mac_address(struct mge_softc *sc)
437 {
438 	char *if_mac;
439 	uint32_t mac_l, mac_h;
440 
441 	MGE_GLOBAL_LOCK_ASSERT(sc);
442 
443 	if_mac = (char *)IF_LLADDR(sc->ifp);
444 
445 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
446 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
447 	    (if_mac[2] << 8) | (if_mac[3] << 0);
448 
449 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
450 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
451 
452 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
453 }
454 
455 static void
456 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
457 {
458 	uint32_t reg_idx, reg_off, reg_val, i;
459 
460 	last_byte &= 0xf;
461 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
462 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
463 	reg_val = (1 | (queue << 1)) << reg_off;
464 
465 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
466 		if ( i == reg_idx)
467 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
468 		else
469 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
470 	}
471 }
472 
473 static void
474 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
475 {
476 	uint32_t port_config;
477 	uint32_t reg_val, i;
478 
479 	/* Enable or disable promiscuous mode as needed */
480 	if (sc->ifp->if_flags & IFF_PROMISC) {
481 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
482 		port_config |= PORT_CONFIG_UPM;
483 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
484 
485 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
486 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
487 
488 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
489 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
490 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
491 		}
492 
493 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
494 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
495 
496 	} else {
497 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
498 		port_config &= ~PORT_CONFIG_UPM;
499 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
500 
501 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
502 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
503 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
504 		}
505 
506 		mge_set_mac_address(sc);
507 	}
508 }
509 
510 static void
511 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
512 {
513 	u_int32_t *paddr;
514 
515 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
516 	paddr = arg;
517 
518 	*paddr = segs->ds_addr;
519 }
520 
521 static int
522 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
523     bus_addr_t *paddr)
524 {
525 	struct mbuf *new_mbuf;
526 	bus_dma_segment_t seg[1];
527 	int error;
528 	int nsegs;
529 
530 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
531 
532 	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
533 	if (new_mbuf == NULL)
534 		return (ENOBUFS);
535 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
536 
537 	if (*mbufp) {
538 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
539 		bus_dmamap_unload(tag, map);
540 	}
541 
542 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
543 	    BUS_DMA_NOWAIT);
544 	KASSERT(nsegs == 1, ("Too many segments returned!"));
545 	if (nsegs != 1 || error)
546 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
547 
548 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
549 
550 	(*mbufp) = new_mbuf;
551 	(*paddr) = seg->ds_addr;
552 	return (0);
553 }
554 
555 static int
556 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
557     uint32_t size, bus_dma_tag_t *buffer_tag)
558 {
559 	struct mge_desc_wrapper *dw;
560 	bus_addr_t desc_paddr;
561 	int i, error;
562 
563 	desc_paddr = 0;
564 	for (i = size - 1; i >= 0; i--) {
565 		dw = &(tab[i]);
566 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
567 		    (void**)&(dw->mge_desc),
568 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
569 		    &(dw->desc_dmap));
570 
571 		if (error) {
572 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
573 			dw->mge_desc = NULL;
574 			return (ENXIO);
575 		}
576 
577 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
578 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
579 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
580 
581 		if (error) {
582 			if_printf(sc->ifp, "can't load descriptor\n");
583 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
584 			    dw->desc_dmap);
585 			dw->mge_desc = NULL;
586 			return (ENXIO);
587 		}
588 
589 		/* Chain descriptors */
590 		dw->mge_desc->next_desc = desc_paddr;
591 		desc_paddr = dw->mge_desc_paddr;
592 	}
593 	tab[size - 1].mge_desc->next_desc = desc_paddr;
594 
595 	/* Allocate a busdma tag for mbufs. */
596 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
597 	    1, 0,				/* alignment, boundary */
598 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
599 	    BUS_SPACE_MAXADDR,			/* highaddr */
600 	    NULL, NULL,				/* filtfunc, filtfuncarg */
601 	    MCLBYTES, 1,			/* maxsize, nsegments */
602 	    MCLBYTES, 0,			/* maxsegsz, flags */
603 	    NULL, NULL,				/* lockfunc, lockfuncarg */
604 	    buffer_tag);			/* dmat */
605 	if (error) {
606 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
607 		return (ENXIO);
608 	}
609 
610 	/* Create TX busdma maps */
611 	for (i = 0; i < size; i++) {
612 		dw = &(tab[i]);
613 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
614 		if (error) {
615 			if_printf(sc->ifp, "failed to create map for mbuf\n");
616 			return (ENXIO);
617 		}
618 
619 		dw->buffer = (struct mbuf*)NULL;
620 		dw->mge_desc->buffer = (bus_addr_t)NULL;
621 	}
622 
623 	return (0);
624 }
625 
626 static int
627 mge_allocate_dma(struct mge_softc *sc)
628 {
629 	int error;
630 	struct mge_desc_wrapper *dw;
631 	int i;
632 
633 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
634 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
635 	    16, 0,				/* alignment, boundary */
636 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
637 	    BUS_SPACE_MAXADDR,			/* highaddr */
638 	    NULL, NULL,				/* filtfunc, filtfuncarg */
639 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
640 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
641 	    NULL, NULL,				/* lockfunc, lockfuncarg */
642 	    &sc->mge_desc_dtag);		/* dmat */
643 
644 
645 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
646 	    &sc->mge_tx_dtag);
647 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
648 	    &sc->mge_rx_dtag);
649 
650 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
651 		dw = &(sc->mge_rx_desc[i]);
652 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
653 		    &dw->mge_desc->buffer);
654 	}
655 
656 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
657 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
658 
659 	return (0);
660 }
661 
662 static void
663 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
664     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
665 {
666 	struct mge_desc_wrapper *dw;
667 	int i;
668 
669 	for (i = 0; i < size; i++) {
670 		/* Free RX mbuf */
671 		dw = &(tab[i]);
672 
673 		if (dw->buffer_dmap) {
674 			if (free_mbufs) {
675 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
676 				    BUS_DMASYNC_POSTREAD);
677 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
678 			}
679 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
680 			if (free_mbufs)
681 				m_freem(dw->buffer);
682 		}
683 		/* Free RX descriptors */
684 		if (dw->desc_dmap) {
685 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
686 			    BUS_DMASYNC_POSTREAD);
687 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
688 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
689 			    dw->desc_dmap);
690 		}
691 	}
692 }
693 
694 static void
695 mge_free_dma(struct mge_softc *sc)
696 {
697 
698 	/* Free desciptors and mbufs */
699 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
700 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
701 
702 	/* Destroy mbuf dma tag */
703 	bus_dma_tag_destroy(sc->mge_tx_dtag);
704 	bus_dma_tag_destroy(sc->mge_rx_dtag);
705 	/* Destroy descriptors tag */
706 	bus_dma_tag_destroy(sc->mge_desc_dtag);
707 }
708 
709 static void
710 mge_reinit_rx(struct mge_softc *sc)
711 {
712 	struct mge_desc_wrapper *dw;
713 	int i;
714 
715 	MGE_RECEIVE_LOCK_ASSERT(sc);
716 
717 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
718 
719 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
720 	    &sc->mge_rx_dtag);
721 
722 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
723 		dw = &(sc->mge_rx_desc[i]);
724 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
725 		&dw->mge_desc->buffer);
726 	}
727 
728 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
729 	sc->rx_desc_curr = 0;
730 
731 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
732 	    sc->rx_desc_start);
733 
734 	/* Enable RX queue */
735 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
736 }
737 
738 #ifdef DEVICE_POLLING
739 static poll_handler_t mge_poll;
740 
741 static int
742 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
743 {
744 	struct mge_softc *sc = ifp->if_softc;
745 	uint32_t int_cause, int_cause_ext;
746 	int rx_npkts = 0;
747 
748 	MGE_RECEIVE_LOCK(sc);
749 
750 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
751 		MGE_RECEIVE_UNLOCK(sc);
752 		return (rx_npkts);
753 	}
754 
755 	if (cmd == POLL_AND_CHECK_STATUS) {
756 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
757 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
758 
759 		/* Check for resource error */
760 		if (int_cause & MGE_PORT_INT_RXERRQ0)
761 			mge_reinit_rx(sc);
762 
763 		if (int_cause || int_cause_ext) {
764 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
765 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
766 		}
767 	}
768 
769 
770 	rx_npkts = mge_intr_rx_locked(sc, count);
771 
772 	MGE_RECEIVE_UNLOCK(sc);
773 	MGE_TRANSMIT_LOCK(sc);
774 	mge_intr_tx_locked(sc);
775 	MGE_TRANSMIT_UNLOCK(sc);
776 	return (rx_npkts);
777 }
778 #endif /* DEVICE_POLLING */
779 
780 static int
781 mge_attach(device_t dev)
782 {
783 	struct mge_softc *sc;
784 	struct mii_softc *miisc;
785 	struct ifnet *ifp;
786 	uint8_t hwaddr[ETHER_ADDR_LEN];
787 	int i, error, phy;
788 
789 	sc = device_get_softc(dev);
790 	sc->dev = dev;
791 	sc->node = ofw_bus_get_node(dev);
792 	phy = 0;
793 
794 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
795 		device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
796 		    device_get_nameunit(sc->phy_sc->dev));
797 		sc->phy_attached = 1;
798 	} else {
799 		device_printf(dev, "PHY not attached.\n");
800 		sc->phy_attached = 0;
801 		sc->phy_sc = sc;
802 	}
803 
804 	if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
805 		device_printf(dev, "Switch attached.\n");
806 		sc->switch_attached = 1;
807 		/* additional variable available across instances */
808 		switch_attached = 1;
809 	} else {
810 		sc->switch_attached = 0;
811 	}
812 
813 	if (device_get_unit(dev) == 0) {
814 		sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
815 	}
816 
817 	/* Set chip version-dependent parameters */
818 	mge_ver_params(sc);
819 
820 	/* Initialize mutexes */
821 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
822 	    MTX_DEF);
823 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
824 	    MTX_DEF);
825 
826 	/* Allocate IO and IRQ resources */
827 	error = bus_alloc_resources(dev, res_spec, sc->res);
828 	if (error) {
829 		device_printf(dev, "could not allocate resources\n");
830 		mge_detach(dev);
831 		return (ENXIO);
832 	}
833 
834 	/* Allocate DMA, buffers, buffer descriptors */
835 	error = mge_allocate_dma(sc);
836 	if (error) {
837 		mge_detach(dev);
838 		return (ENXIO);
839 	}
840 
841 	sc->tx_desc_curr = 0;
842 	sc->rx_desc_curr = 0;
843 	sc->tx_desc_used_idx = 0;
844 	sc->tx_desc_used_count = 0;
845 
846 	/* Configure defaults for interrupts coalescing */
847 	sc->rx_ic_time = 768;
848 	sc->tx_ic_time = 768;
849 	mge_add_sysctls(sc);
850 
851 	/* Allocate network interface */
852 	ifp = sc->ifp = if_alloc(IFT_ETHER);
853 	if (ifp == NULL) {
854 		device_printf(dev, "if_alloc() failed\n");
855 		mge_detach(dev);
856 		return (ENOMEM);
857 	}
858 
859 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
860 	ifp->if_softc = sc;
861 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
862 	ifp->if_capabilities = IFCAP_VLAN_MTU;
863 	if (sc->mge_hw_csum) {
864 		ifp->if_capabilities |= IFCAP_HWCSUM;
865 		ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
866 	}
867 	ifp->if_capenable = ifp->if_capabilities;
868 
869 #ifdef DEVICE_POLLING
870 	/* Advertise that polling is supported */
871 	ifp->if_capabilities |= IFCAP_POLLING;
872 #endif
873 
874 	ifp->if_init = mge_init;
875 	ifp->if_start = mge_start;
876 	ifp->if_ioctl = mge_ioctl;
877 
878 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
879 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
880 	IFQ_SET_READY(&ifp->if_snd);
881 
882 	mge_get_mac_address(sc, hwaddr);
883 	ether_ifattach(ifp, hwaddr);
884 	callout_init(&sc->wd_callout, 0);
885 
886 	/* Attach PHY(s) */
887 	if (sc->phy_attached) {
888 		error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
889 		    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
890 		if (error) {
891 			device_printf(dev, "MII failed to find PHY\n");
892 			if_free(ifp);
893 			sc->ifp = NULL;
894 			mge_detach(dev);
895 			return (error);
896 		}
897 		sc->mii = device_get_softc(sc->miibus);
898 
899 		/* Tell the MAC where to find the PHY so autoneg works */
900 		miisc = LIST_FIRST(&sc->mii->mii_phys);
901 		MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
902 	} else {
903 		/* no PHY, so use hard-coded values */
904 		ifmedia_init(&sc->mge_ifmedia, 0,
905 		    mge_ifmedia_upd,
906 		    mge_ifmedia_sts);
907 		ifmedia_add(&sc->mge_ifmedia,
908 		    IFM_ETHER | IFM_1000_T | IFM_FDX,
909 		    0, NULL);
910 		ifmedia_set(&sc->mge_ifmedia,
911 		    IFM_ETHER | IFM_1000_T | IFM_FDX);
912 	}
913 
914 	/* Attach interrupt handlers */
915 	/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
916 	for (i = 1; i <= sc->mge_intr_cnt; ++i) {
917 		error = bus_setup_intr(dev, sc->res[i],
918 		    INTR_TYPE_NET | INTR_MPSAFE,
919 		    NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
920 		    sc, &sc->ih_cookie[i - 1]);
921 		if (error) {
922 			device_printf(dev, "could not setup %s\n",
923 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
924 			mge_detach(dev);
925 			return (error);
926 		}
927 	}
928 
929 	if (sc->switch_attached) {
930 		device_t child;
931 		MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
932 		child = device_add_child(dev, "mdio", -1);
933 		bus_generic_attach(dev);
934 	}
935 
936 	return (0);
937 }
938 
939 static int
940 mge_detach(device_t dev)
941 {
942 	struct mge_softc *sc;
943 	int error,i;
944 
945 	sc = device_get_softc(dev);
946 
947 	/* Stop controller and free TX queue */
948 	if (sc->ifp)
949 		mge_shutdown(dev);
950 
951 	/* Wait for stopping ticks */
952         callout_drain(&sc->wd_callout);
953 
954 	/* Stop and release all interrupts */
955 	for (i = 0; i < sc->mge_intr_cnt; ++i) {
956 		if (!sc->ih_cookie[i])
957 			continue;
958 
959 		error = bus_teardown_intr(dev, sc->res[1 + i],
960 		    sc->ih_cookie[i]);
961 		if (error)
962 			device_printf(dev, "could not release %s\n",
963 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
964 	}
965 
966 	/* Detach network interface */
967 	if (sc->ifp) {
968 		ether_ifdetach(sc->ifp);
969 		if_free(sc->ifp);
970 	}
971 
972 	/* Free DMA resources */
973 	mge_free_dma(sc);
974 
975 	/* Free IO memory handler */
976 	bus_release_resources(dev, res_spec, sc->res);
977 
978 	/* Destroy mutexes */
979 	mtx_destroy(&sc->receive_lock);
980 	mtx_destroy(&sc->transmit_lock);
981 
982 	if (device_get_unit(dev) == 0)
983 		sx_destroy(&sx_smi);
984 
985 	return (0);
986 }
987 
988 static void
989 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
990 {
991 	struct mge_softc *sc;
992 	struct mii_data *mii;
993 
994 	sc = ifp->if_softc;
995 	MGE_GLOBAL_LOCK(sc);
996 
997 	if (!sc->phy_attached) {
998 		ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
999 		ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1000 		goto out_unlock;
1001 	}
1002 
1003 	mii = sc->mii;
1004 	mii_pollstat(mii);
1005 
1006 	ifmr->ifm_active = mii->mii_media_active;
1007 	ifmr->ifm_status = mii->mii_media_status;
1008 
1009 out_unlock:
1010 	MGE_GLOBAL_UNLOCK(sc);
1011 }
1012 
1013 static uint32_t
1014 mge_set_port_serial_control(uint32_t media)
1015 {
1016 	uint32_t port_config;
1017 
1018 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
1019 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
1020 
1021 	if (IFM_TYPE(media) == IFM_ETHER) {
1022 		switch(IFM_SUBTYPE(media)) {
1023 			case IFM_AUTO:
1024 				break;
1025 			case IFM_1000_T:
1026 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
1027 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1028 				    | PORT_SERIAL_SPEED_AUTONEG);
1029 				break;
1030 			case IFM_100_TX:
1031 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
1032 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1033 				    | PORT_SERIAL_SPEED_AUTONEG);
1034 				break;
1035 			case IFM_10_T:
1036 				port_config  |= (PORT_SERIAL_AUTONEG |
1037 				    PORT_SERIAL_AUTONEG_FC |
1038 				    PORT_SERIAL_SPEED_AUTONEG);
1039 				break;
1040 		}
1041 		if (media & IFM_FDX)
1042 			port_config |= PORT_SERIAL_FULL_DUPLEX;
1043 	}
1044 	return (port_config);
1045 }
1046 
1047 static int
1048 mge_ifmedia_upd(struct ifnet *ifp)
1049 {
1050 	struct mge_softc *sc = ifp->if_softc;
1051 
1052 	/*
1053 	 * Do not do anything for switch here, as updating media between
1054 	 * MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
1055 	 * break the link.
1056 	 */
1057 	if (sc->phy_attached) {
1058 		MGE_GLOBAL_LOCK(sc);
1059 		if (ifp->if_flags & IFF_UP) {
1060 			sc->mge_media_status = sc->mii->mii_media.ifm_media;
1061 			mii_mediachg(sc->mii);
1062 
1063 			/* MGE MAC needs to be reinitialized. */
1064 			mge_init_locked(sc);
1065 
1066 		}
1067 		MGE_GLOBAL_UNLOCK(sc);
1068 	}
1069 
1070 	return (0);
1071 }
1072 
1073 static void
1074 mge_init(void *arg)
1075 {
1076 	struct mge_softc *sc;
1077 
1078 	sc = arg;
1079 	MGE_GLOBAL_LOCK(sc);
1080 
1081 	mge_init_locked(arg);
1082 
1083 	MGE_GLOBAL_UNLOCK(sc);
1084 }
1085 
1086 static void
1087 mge_init_locked(void *arg)
1088 {
1089 	struct mge_softc *sc = arg;
1090 	struct mge_desc_wrapper *dw;
1091 	volatile uint32_t reg_val;
1092 	int i, count;
1093 	uint32_t media_status;
1094 
1095 
1096 	MGE_GLOBAL_LOCK_ASSERT(sc);
1097 
1098 	/* Stop interface */
1099 	mge_stop(sc);
1100 
1101 	/* Disable interrupts */
1102 	mge_intrs_ctrl(sc, 0);
1103 
1104 	/* Set MAC address */
1105 	mge_set_mac_address(sc);
1106 
1107 	/* Setup multicast filters */
1108 	mge_setup_multicast(sc);
1109 
1110 	if (sc->mge_ver == 2) {
1111 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
1112 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
1113 	}
1114 
1115 	/* Initialize TX queue configuration registers */
1116 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1117 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1118 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1119 
1120 	/* Clear TX queue configuration registers for unused queues */
1121 	for (i = 1; i < 7; i++) {
1122 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
1123 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
1124 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
1125 	}
1126 
1127 	/* Set default MTU */
1128 	MGE_WRITE(sc, sc->mge_mtu, 0);
1129 
1130 	/* Port configuration */
1131 	MGE_WRITE(sc, MGE_PORT_CONFIG,
1132 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
1133 	    PORT_CONFIG_ARO_RXQ(0));
1134 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
1135 
1136 	/* Configure promisc mode */
1137 	mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
1138 
1139 	media_status = sc->mge_media_status;
1140 	if (sc->switch_attached) {
1141 		media_status &= ~IFM_TMASK;
1142 		media_status |= IFM_1000_T;
1143 	}
1144 
1145 	/* Setup port configuration */
1146 	reg_val = mge_set_port_serial_control(media_status);
1147 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1148 
1149 	/* Setup SDMA configuration */
1150 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
1151 	    MGE_SDMA_TX_BYTE_SWAP |
1152 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
1153 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
1154 
1155 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
1156 
1157 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1158 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
1159 	    sc->rx_desc_start);
1160 
1161 	/* Reset descriptor indexes */
1162 	sc->tx_desc_curr = 0;
1163 	sc->rx_desc_curr = 0;
1164 	sc->tx_desc_used_idx = 0;
1165 	sc->tx_desc_used_count = 0;
1166 
1167 	/* Enable RX descriptors */
1168 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
1169 		dw = &sc->mge_rx_desc[i];
1170 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1171 		dw->mge_desc->buff_size = MCLBYTES;
1172 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1173 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1174 	}
1175 
1176 	/* Enable RX queue */
1177 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
1178 
1179 	/* Enable port */
1180 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1181 	reg_val |= PORT_SERIAL_ENABLE;
1182 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1183 	count = 0x100000;
1184 	for (;;) {
1185 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1186 		if (reg_val & MGE_STATUS_LINKUP)
1187 			break;
1188 		DELAY(100);
1189 		if (--count == 0) {
1190 			if_printf(sc->ifp, "Timeout on link-up\n");
1191 			break;
1192 		}
1193 	}
1194 
1195 	/* Setup interrupts coalescing */
1196 	mge_set_rxic(sc);
1197 	mge_set_txic(sc);
1198 
1199 	/* Enable interrupts */
1200 #ifdef DEVICE_POLLING
1201         /*
1202 	 * * ...only if polling is not turned on. Disable interrupts explicitly
1203 	 * if polling is enabled.
1204 	 */
1205 	if (sc->ifp->if_capenable & IFCAP_POLLING)
1206 		mge_intrs_ctrl(sc, 0);
1207 	else
1208 #endif /* DEVICE_POLLING */
1209 	mge_intrs_ctrl(sc, 1);
1210 
1211 	/* Activate network interface */
1212 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1213 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1214 	sc->wd_timer = 0;
1215 
1216 	/* Schedule watchdog timeout */
1217 	if (sc->phy_attached)
1218 		callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1219 }
1220 
1221 static void
1222 mge_intr_rxtx(void *arg)
1223 {
1224 	struct mge_softc *sc;
1225 	uint32_t int_cause, int_cause_ext;
1226 
1227 	sc = arg;
1228 	MGE_GLOBAL_LOCK(sc);
1229 
1230 #ifdef DEVICE_POLLING
1231 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1232 		MGE_GLOBAL_UNLOCK(sc);
1233 		return;
1234 	}
1235 #endif
1236 
1237 	/* Get interrupt cause */
1238 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1239 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1240 
1241 	/* Check for Transmit interrupt */
1242 	if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1243 	    MGE_PORT_INT_EXT_TXUR)) {
1244 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1245 		    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1246 		mge_intr_tx_locked(sc);
1247 	}
1248 
1249 	MGE_TRANSMIT_UNLOCK(sc);
1250 
1251 	/* Check for Receive interrupt */
1252 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1253 
1254 	MGE_RECEIVE_UNLOCK(sc);
1255 }
1256 
1257 static void
1258 mge_intr_err(void *arg)
1259 {
1260 	struct mge_softc *sc;
1261 	struct ifnet *ifp;
1262 
1263 	sc = arg;
1264 	ifp = sc->ifp;
1265 	if_printf(ifp, "%s\n", __FUNCTION__);
1266 }
1267 
1268 static void
1269 mge_intr_misc(void *arg)
1270 {
1271 	struct mge_softc *sc;
1272 	struct ifnet *ifp;
1273 
1274 	sc = arg;
1275 	ifp = sc->ifp;
1276 	if_printf(ifp, "%s\n", __FUNCTION__);
1277 }
1278 
1279 static void
1280 mge_intr_rx(void *arg) {
1281 	struct mge_softc *sc;
1282 	uint32_t int_cause, int_cause_ext;
1283 
1284 	sc = arg;
1285 	MGE_RECEIVE_LOCK(sc);
1286 
1287 #ifdef DEVICE_POLLING
1288 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1289 		MGE_RECEIVE_UNLOCK(sc);
1290 		return;
1291 	}
1292 #endif
1293 
1294 	/* Get interrupt cause */
1295 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1296 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1297 
1298 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1299 
1300 	MGE_RECEIVE_UNLOCK(sc);
1301 }
1302 
1303 static void
1304 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1305     uint32_t int_cause_ext)
1306 {
1307 	/* Check for resource error */
1308 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1309 		mge_reinit_rx(sc);
1310 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1311 		    ~(int_cause & MGE_PORT_INT_RXERRQ0));
1312 	}
1313 
1314 	int_cause &= MGE_PORT_INT_RXQ0;
1315 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1316 
1317 	if (int_cause || int_cause_ext) {
1318 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1319 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1320 		mge_intr_rx_locked(sc, -1);
1321 	}
1322 }
1323 
1324 static int
1325 mge_intr_rx_locked(struct mge_softc *sc, int count)
1326 {
1327 	struct ifnet *ifp = sc->ifp;
1328 	uint32_t status;
1329 	uint16_t bufsize;
1330 	struct mge_desc_wrapper* dw;
1331 	struct mbuf *mb;
1332 	int rx_npkts = 0;
1333 
1334 	MGE_RECEIVE_LOCK_ASSERT(sc);
1335 
1336 	while (count != 0) {
1337 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1338 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1339 		    BUS_DMASYNC_POSTREAD);
1340 
1341 		/* Get status */
1342 		status = dw->mge_desc->cmd_status;
1343 		bufsize = dw->mge_desc->buff_size;
1344 		if ((status & MGE_DMA_OWNED) != 0)
1345 			break;
1346 
1347 		if (dw->mge_desc->byte_count &&
1348 		    ~(status & MGE_ERR_SUMMARY)) {
1349 
1350 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1351 			    BUS_DMASYNC_POSTREAD);
1352 
1353 			mb = m_devget(dw->buffer->m_data,
1354 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1355 			    0, ifp, NULL);
1356 
1357 			if (mb == NULL)
1358 				/* Give up if no mbufs */
1359 				break;
1360 
1361 			mb->m_len -= 2;
1362 			mb->m_pkthdr.len -= 2;
1363 			mb->m_data += 2;
1364 
1365 			mb->m_pkthdr.rcvif = ifp;
1366 
1367 			mge_offload_process_frame(ifp, mb, status,
1368 			    bufsize);
1369 
1370 			MGE_RECEIVE_UNLOCK(sc);
1371 			(*ifp->if_input)(ifp, mb);
1372 			MGE_RECEIVE_LOCK(sc);
1373 			rx_npkts++;
1374 		}
1375 
1376 		dw->mge_desc->byte_count = 0;
1377 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1378 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1379 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1380 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1381 
1382 		if (count > 0)
1383 			count -= 1;
1384 	}
1385 
1386 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1387 
1388 	return (rx_npkts);
1389 }
1390 
1391 static void
1392 mge_intr_sum(void *arg)
1393 {
1394 	struct mge_softc *sc = arg;
1395 	struct ifnet *ifp;
1396 
1397 	ifp = sc->ifp;
1398 	if_printf(ifp, "%s\n", __FUNCTION__);
1399 }
1400 
1401 static void
1402 mge_intr_tx(void *arg)
1403 {
1404 	struct mge_softc *sc = arg;
1405 	uint32_t int_cause_ext;
1406 
1407 	MGE_TRANSMIT_LOCK(sc);
1408 
1409 #ifdef DEVICE_POLLING
1410 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1411 		MGE_TRANSMIT_UNLOCK(sc);
1412 		return;
1413 	}
1414 #endif
1415 
1416 	/* Ack the interrupt */
1417 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1418 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1419 	    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1420 
1421 	mge_intr_tx_locked(sc);
1422 
1423 	MGE_TRANSMIT_UNLOCK(sc);
1424 }
1425 
1426 static void
1427 mge_intr_tx_locked(struct mge_softc *sc)
1428 {
1429 	struct ifnet *ifp = sc->ifp;
1430 	struct mge_desc_wrapper *dw;
1431 	struct mge_desc *desc;
1432 	uint32_t status;
1433 	int send = 0;
1434 
1435 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1436 
1437 	/* Disable watchdog */
1438 	sc->wd_timer = 0;
1439 
1440 	while (sc->tx_desc_used_count) {
1441 		/* Get the descriptor */
1442 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1443 		desc = dw->mge_desc;
1444 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1445 		    BUS_DMASYNC_POSTREAD);
1446 
1447 		/* Get descriptor status */
1448 		status = desc->cmd_status;
1449 
1450 		if (status & MGE_DMA_OWNED)
1451 			break;
1452 
1453 		sc->tx_desc_used_idx =
1454 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1455 		sc->tx_desc_used_count--;
1456 
1457 		/* Update collision statistics */
1458 		if (status & MGE_ERR_SUMMARY) {
1459 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1460 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1461 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1462 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1463 		}
1464 
1465 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1466 		    BUS_DMASYNC_POSTWRITE);
1467 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1468 		m_freem(dw->buffer);
1469 		dw->buffer = (struct mbuf*)NULL;
1470 		send++;
1471 
1472 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1473 	}
1474 
1475 	if (send) {
1476 		/* Now send anything that was pending */
1477 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1478 		mge_start_locked(ifp);
1479 	}
1480 }
1481 static int
1482 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1483 {
1484 	struct mge_softc *sc = ifp->if_softc;
1485 	struct ifreq *ifr = (struct ifreq *)data;
1486 	int mask, error;
1487 	uint32_t flags;
1488 
1489 	error = 0;
1490 
1491 	switch (command) {
1492 	case SIOCSIFFLAGS:
1493 		MGE_GLOBAL_LOCK(sc);
1494 
1495 		if (ifp->if_flags & IFF_UP) {
1496 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1497 				flags = ifp->if_flags ^ sc->mge_if_flags;
1498 				if (flags & IFF_PROMISC)
1499 					mge_set_prom_mode(sc,
1500 					    MGE_RX_DEFAULT_QUEUE);
1501 
1502 				if (flags & IFF_ALLMULTI)
1503 					mge_setup_multicast(sc);
1504 			} else
1505 				mge_init_locked(sc);
1506 		}
1507 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1508 			mge_stop(sc);
1509 
1510 		sc->mge_if_flags = ifp->if_flags;
1511 		MGE_GLOBAL_UNLOCK(sc);
1512 		break;
1513 	case SIOCADDMULTI:
1514 	case SIOCDELMULTI:
1515 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1516 			MGE_GLOBAL_LOCK(sc);
1517 			mge_setup_multicast(sc);
1518 			MGE_GLOBAL_UNLOCK(sc);
1519 		}
1520 		break;
1521 	case SIOCSIFCAP:
1522 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1523 		if (mask & IFCAP_HWCSUM) {
1524 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1525 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1526 			if (ifp->if_capenable & IFCAP_TXCSUM)
1527 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1528 			else
1529 				ifp->if_hwassist = 0;
1530 		}
1531 #ifdef DEVICE_POLLING
1532 		if (mask & IFCAP_POLLING) {
1533 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1534 				error = ether_poll_register(mge_poll, ifp);
1535 				if (error)
1536 					return(error);
1537 
1538 				MGE_GLOBAL_LOCK(sc);
1539 				mge_intrs_ctrl(sc, 0);
1540 				ifp->if_capenable |= IFCAP_POLLING;
1541 				MGE_GLOBAL_UNLOCK(sc);
1542 			} else {
1543 				error = ether_poll_deregister(ifp);
1544 				MGE_GLOBAL_LOCK(sc);
1545 				mge_intrs_ctrl(sc, 1);
1546 				ifp->if_capenable &= ~IFCAP_POLLING;
1547 				MGE_GLOBAL_UNLOCK(sc);
1548 			}
1549 		}
1550 #endif
1551 		break;
1552 	case SIOCGIFMEDIA: /* fall through */
1553 	case SIOCSIFMEDIA:
1554 		/*
1555 		 * Setting up media type via ioctls is *not* supported for MAC
1556 		 * which is connected to switch. Use etherswitchcfg.
1557 		 */
1558 		if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1559 			return (0);
1560 		else if (!sc->phy_attached) {
1561 			error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1562 			    command);
1563 			break;
1564 		}
1565 
1566 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1567 		    && !(ifr->ifr_media & IFM_FDX)) {
1568 			device_printf(sc->dev,
1569 			    "1000baseTX half-duplex unsupported\n");
1570 			return 0;
1571 		}
1572 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1573 		break;
1574 	default:
1575 		error = ether_ioctl(ifp, command, data);
1576 	}
1577 	return (error);
1578 }
1579 
1580 static int
1581 mge_miibus_readreg(device_t dev, int phy, int reg)
1582 {
1583 	struct mge_softc *sc;
1584 	sc = device_get_softc(dev);
1585 
1586 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1587 
1588 	return (mv_read_ext_phy(dev, phy, reg));
1589 }
1590 
1591 static int
1592 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1593 {
1594 	struct mge_softc *sc;
1595 	sc = device_get_softc(dev);
1596 
1597 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1598 
1599 	mv_write_ext_phy(dev, phy, reg, value);
1600 
1601 	return (0);
1602 }
1603 
1604 static int
1605 mge_probe(device_t dev)
1606 {
1607 
1608 	if (!ofw_bus_status_okay(dev))
1609 		return (ENXIO);
1610 
1611 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1612 		return (ENXIO);
1613 
1614 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1615 	return (BUS_PROBE_DEFAULT);
1616 }
1617 
1618 static int
1619 mge_resume(device_t dev)
1620 {
1621 
1622 	device_printf(dev, "%s\n", __FUNCTION__);
1623 	return (0);
1624 }
1625 
1626 static int
1627 mge_shutdown(device_t dev)
1628 {
1629 	struct mge_softc *sc = device_get_softc(dev);
1630 
1631 	MGE_GLOBAL_LOCK(sc);
1632 
1633 #ifdef DEVICE_POLLING
1634         if (sc->ifp->if_capenable & IFCAP_POLLING)
1635 		ether_poll_deregister(sc->ifp);
1636 #endif
1637 
1638 	mge_stop(sc);
1639 
1640 	MGE_GLOBAL_UNLOCK(sc);
1641 
1642 	return (0);
1643 }
1644 
1645 static int
1646 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1647 {
1648 	struct mge_desc_wrapper *dw = NULL;
1649 	struct ifnet *ifp;
1650 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1651 	bus_dmamap_t mapp;
1652 	int error;
1653 	int seg, nsegs;
1654 	int desc_no;
1655 
1656 	ifp = sc->ifp;
1657 
1658 	/* Fetch unused map */
1659 	desc_no = sc->tx_desc_curr;
1660 	dw = &sc->mge_tx_desc[desc_no];
1661 	mapp = dw->buffer_dmap;
1662 
1663 	/* Create mapping in DMA memory */
1664 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1665 	    BUS_DMA_NOWAIT);
1666 	if (error != 0) {
1667 		m_freem(m0);
1668 		return (error);
1669 	}
1670 
1671 	/* Only one segment is supported. */
1672 	if (nsegs != 1) {
1673 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1674 		m_freem(m0);
1675 		return (-1);
1676 	}
1677 
1678 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1679 
1680 	/* Everything is ok, now we can send buffers */
1681 	for (seg = 0; seg < nsegs; seg++) {
1682 		dw->mge_desc->byte_count = segs[seg].ds_len;
1683 		dw->mge_desc->buffer = segs[seg].ds_addr;
1684 		dw->buffer = m0;
1685 		dw->mge_desc->cmd_status = 0;
1686 		if (seg == 0)
1687 			mge_offload_setup_descriptor(sc, dw);
1688 		dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1689 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1690 		    MGE_DMA_OWNED;
1691 	}
1692 
1693 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1694 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1695 
1696 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1697 	sc->tx_desc_used_count++;
1698 	return (0);
1699 }
1700 
1701 static void
1702 mge_tick(void *msc)
1703 {
1704 	struct mge_softc *sc = msc;
1705 
1706 	KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1707 
1708 	MGE_GLOBAL_LOCK(sc);
1709 
1710 	/* Check for TX timeout */
1711 	mge_watchdog(sc);
1712 
1713 	mii_tick(sc->mii);
1714 
1715 	/* Check for media type change */
1716 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1717 		mge_ifmedia_upd(sc->ifp);
1718 
1719 	MGE_GLOBAL_UNLOCK(sc);
1720 
1721 	/* Schedule another timeout one second from now */
1722 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1723 
1724 	return;
1725 }
1726 
1727 static void
1728 mge_watchdog(struct mge_softc *sc)
1729 {
1730 	struct ifnet *ifp;
1731 
1732 	ifp = sc->ifp;
1733 
1734 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1735 		return;
1736 	}
1737 
1738 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1739 	if_printf(ifp, "watchdog timeout\n");
1740 
1741 	mge_stop(sc);
1742 	mge_init_locked(sc);
1743 }
1744 
1745 static void
1746 mge_start(struct ifnet *ifp)
1747 {
1748 	struct mge_softc *sc = ifp->if_softc;
1749 
1750 	MGE_TRANSMIT_LOCK(sc);
1751 
1752 	mge_start_locked(ifp);
1753 
1754 	MGE_TRANSMIT_UNLOCK(sc);
1755 }
1756 
1757 static void
1758 mge_start_locked(struct ifnet *ifp)
1759 {
1760 	struct mge_softc *sc;
1761 	struct mbuf *m0, *mtmp;
1762 	uint32_t reg_val, queued = 0;
1763 
1764 	sc = ifp->if_softc;
1765 
1766 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1767 
1768 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1769 	    IFF_DRV_RUNNING)
1770 		return;
1771 
1772 	for (;;) {
1773 		/* Get packet from the queue */
1774 		IF_DEQUEUE(&ifp->if_snd, m0);
1775 		if (m0 == NULL)
1776 			break;
1777 
1778 		if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1779 		    m0->m_flags & M_VLANTAG) {
1780 			if (M_WRITABLE(m0) == 0) {
1781 				mtmp = m_dup(m0, M_NOWAIT);
1782 				m_freem(m0);
1783 				if (mtmp == NULL)
1784 					continue;
1785 				m0 = mtmp;
1786 			}
1787 		}
1788 		/* The driver support only one DMA fragment. */
1789 		if (m0->m_next != NULL) {
1790 			mtmp = m_defrag(m0, M_NOWAIT);
1791 			if (mtmp != NULL)
1792 				m0 = mtmp;
1793 		}
1794 
1795 		/* Check for free descriptors */
1796 		if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1797 			IF_PREPEND(&ifp->if_snd, m0);
1798 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1799 			break;
1800 		}
1801 
1802 		if (mge_encap(sc, m0) != 0)
1803 			break;
1804 
1805 		queued++;
1806 		BPF_MTAP(ifp, m0);
1807 	}
1808 
1809 	if (queued) {
1810 		/* Enable transmitter and watchdog timer */
1811 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1812 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1813 		sc->wd_timer = 5;
1814 	}
1815 }
1816 
1817 static void
1818 mge_stop(struct mge_softc *sc)
1819 {
1820 	struct ifnet *ifp;
1821 	volatile uint32_t reg_val, status;
1822 	struct mge_desc_wrapper *dw;
1823 	struct mge_desc *desc;
1824 	int count;
1825 
1826 	ifp = sc->ifp;
1827 
1828 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1829 		return;
1830 
1831 	/* Stop tick engine */
1832 	callout_stop(&sc->wd_callout);
1833 
1834 	/* Disable interface */
1835 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1836 	sc->wd_timer = 0;
1837 
1838 	/* Disable interrupts */
1839 	mge_intrs_ctrl(sc, 0);
1840 
1841 	/* Disable Rx and Tx */
1842 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1843 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1844 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1845 
1846 	/* Remove pending data from TX queue */
1847 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1848 	    sc->tx_desc_used_count) {
1849 		/* Get the descriptor */
1850 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1851 		desc = dw->mge_desc;
1852 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1853 		    BUS_DMASYNC_POSTREAD);
1854 
1855 		/* Get descriptor status */
1856 		status = desc->cmd_status;
1857 
1858 		if (status & MGE_DMA_OWNED)
1859 			break;
1860 
1861 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1862 		    MGE_TX_DESC_NUM;
1863 		sc->tx_desc_used_count--;
1864 
1865 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1866 		    BUS_DMASYNC_POSTWRITE);
1867 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1868 
1869 		m_freem(dw->buffer);
1870 		dw->buffer = (struct mbuf*)NULL;
1871 	}
1872 
1873 	/* Wait for end of transmission */
1874 	count = 0x100000;
1875 	while (count--) {
1876 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1877 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1878 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1879 			break;
1880 		DELAY(100);
1881 	}
1882 
1883 	if (count == 0)
1884 		if_printf(ifp,
1885 		    "%s: timeout while waiting for end of transmission\n",
1886 		    __FUNCTION__);
1887 
1888 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1889 	reg_val &= ~(PORT_SERIAL_ENABLE);
1890 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1891 }
1892 
1893 static int
1894 mge_suspend(device_t dev)
1895 {
1896 
1897 	device_printf(dev, "%s\n", __FUNCTION__);
1898 	return (0);
1899 }
1900 
1901 static void
1902 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1903     uint32_t status, uint16_t bufsize)
1904 {
1905 	int csum_flags = 0;
1906 
1907 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1908 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1909 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1910 
1911 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1912 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1913 		    (status & MGE_RX_L4_CSUM_OK)) {
1914 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1915 			frame->m_pkthdr.csum_data = 0xFFFF;
1916 		}
1917 
1918 		frame->m_pkthdr.csum_flags = csum_flags;
1919 	}
1920 }
1921 
1922 static void
1923 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1924 {
1925 	struct mbuf *m0 = dw->buffer;
1926 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1927 	int csum_flags = m0->m_pkthdr.csum_flags;
1928 	int cmd_status = 0;
1929 	struct ip *ip;
1930 	int ehlen, etype;
1931 
1932 	if (csum_flags != 0) {
1933 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1934 			etype = ntohs(eh->evl_proto);
1935 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1936 			csum_flags |= MGE_TX_VLAN_TAGGED;
1937 		} else {
1938 			etype = ntohs(eh->evl_encap_proto);
1939 			ehlen = ETHER_HDR_LEN;
1940 		}
1941 
1942 		if (etype != ETHERTYPE_IP) {
1943 			if_printf(sc->ifp,
1944 			    "TCP/IP Offload enabled for unsupported "
1945 			    "protocol!\n");
1946 			return;
1947 		}
1948 
1949 		ip = (struct ip *)(m0->m_data + ehlen);
1950 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1951 		cmd_status |= MGE_TX_NOT_FRAGMENT;
1952 	}
1953 
1954 	if (csum_flags & CSUM_IP)
1955 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1956 
1957 	if (csum_flags & CSUM_TCP)
1958 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1959 
1960 	if (csum_flags & CSUM_UDP)
1961 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1962 
1963 	dw->mge_desc->cmd_status |= cmd_status;
1964 }
1965 
1966 static void
1967 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1968 {
1969 
1970 	if (enable) {
1971 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1972 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1973 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1974 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1975 		    MGE_PORT_INT_EXT_TXBUF0);
1976 	} else {
1977 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1978 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1979 
1980 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1981 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1982 
1983 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1984 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1985 	}
1986 }
1987 
1988 static uint8_t
1989 mge_crc8(uint8_t *data, int size)
1990 {
1991 	uint8_t crc = 0;
1992 	static const uint8_t ct[256] = {
1993 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1994 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1995 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1996 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1997 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1998 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1999 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
2000 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
2001 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
2002 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
2003 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
2004 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
2005 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
2006 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
2007 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
2008 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
2009 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
2010 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
2011 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
2012 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
2013 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
2014 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
2015 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
2016 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
2017 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
2018 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
2019 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
2020 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
2021 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
2022 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
2023 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
2024 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
2025 	};
2026 
2027 	while(size--)
2028 		crc = ct[crc ^ *(data++)];
2029 
2030 	return(crc);
2031 }
2032 
2033 static void
2034 mge_setup_multicast(struct mge_softc *sc)
2035 {
2036 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
2037 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2038 	uint32_t smt[MGE_MCAST_REG_NUMBER];
2039 	uint32_t omt[MGE_MCAST_REG_NUMBER];
2040 	struct ifnet *ifp = sc->ifp;
2041 	struct ifmultiaddr *ifma;
2042 	uint8_t *mac;
2043 	int i;
2044 
2045 	if (ifp->if_flags & IFF_ALLMULTI) {
2046 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
2047 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
2048 	} else {
2049 		memset(smt, 0, sizeof(smt));
2050 		memset(omt, 0, sizeof(omt));
2051 
2052 		if_maddr_rlock(ifp);
2053 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2054 			if (ifma->ifma_addr->sa_family != AF_LINK)
2055 				continue;
2056 
2057 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2058 			if (memcmp(mac, special, sizeof(special)) == 0) {
2059 				i = mac[5];
2060 				smt[i >> 2] |= v << ((i & 0x03) << 3);
2061 			} else {
2062 				i = mge_crc8(mac, ETHER_ADDR_LEN);
2063 				omt[i >> 2] |= v << ((i & 0x03) << 3);
2064 			}
2065 		}
2066 		if_maddr_runlock(ifp);
2067 	}
2068 
2069 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
2070 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
2071 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
2072 	}
2073 }
2074 
2075 static void
2076 mge_set_rxic(struct mge_softc *sc)
2077 {
2078 	uint32_t reg;
2079 
2080 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2081 		sc->rx_ic_time = sc->mge_rx_ipg_max;
2082 
2083 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
2084 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2085 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2086 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
2087 }
2088 
2089 static void
2090 mge_set_txic(struct mge_softc *sc)
2091 {
2092 	uint32_t reg;
2093 
2094 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2095 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
2096 
2097 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
2098 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2099 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2100 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
2101 }
2102 
2103 static int
2104 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
2105 {
2106 	struct mge_softc *sc = (struct mge_softc *)arg1;
2107 	uint32_t time;
2108 	int error;
2109 
2110 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2111 	error = sysctl_handle_int(oidp, &time, 0, req);
2112 	if (error != 0)
2113 		return(error);
2114 
2115 	MGE_GLOBAL_LOCK(sc);
2116 	if (arg2 == MGE_IC_RX) {
2117 		sc->rx_ic_time = time;
2118 		mge_set_rxic(sc);
2119 	} else {
2120 		sc->tx_ic_time = time;
2121 		mge_set_txic(sc);
2122 	}
2123 	MGE_GLOBAL_UNLOCK(sc);
2124 
2125 	return(0);
2126 }
2127 
2128 static void
2129 mge_add_sysctls(struct mge_softc *sc)
2130 {
2131 	struct sysctl_ctx_list *ctx;
2132 	struct sysctl_oid_list *children;
2133 	struct sysctl_oid *tree;
2134 
2135 	ctx = device_get_sysctl_ctx(sc->dev);
2136 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2137 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
2138 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
2139 	children = SYSCTL_CHILDREN(tree);
2140 
2141 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
2142 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
2143 	    "I", "IC RX time threshold");
2144 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
2145 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
2146 	    "I", "IC TX time threshold");
2147 }
2148 
2149 static int
2150 mge_mdio_writereg(device_t dev, int phy, int reg, int value)
2151 {
2152 
2153 	mv_write_ge_smi(dev, phy, reg, value);
2154 
2155 	return (0);
2156 }
2157 
2158 
2159 static int
2160 mge_mdio_readreg(device_t dev, int phy, int reg)
2161 {
2162 	int ret;
2163 
2164 	ret = mv_read_ge_smi(dev, phy, reg);
2165 
2166 	return (ret);
2167 }
2168