xref: /freebsd/sys/dev/mge/if_mge.c (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5  * Copyright (C) 2009-2015 Semihalf
6  * Copyright (C) 2015 Stormshield
7  * All rights reserved.
8  *
9  * Developed by Semihalf.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of MARVELL nor the names of contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/endian.h>
46 #include <sys/mbuf.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/module.h>
51 #include <sys/socket.h>
52 #include <sys/sysctl.h>
53 
54 #include <net/ethernet.h>
55 #include <net/bpf.h>
56 #include <net/if.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_types.h>
61 #include <net/if_vlan_var.h>
62 
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 
67 #include <sys/sockio.h>
68 #include <sys/bus.h>
69 #include <machine/bus.h>
70 #include <sys/rman.h>
71 #include <machine/resource.h>
72 
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75 
76 #include <dev/fdt/fdt_common.h>
77 #include <dev/ofw/ofw_bus.h>
78 #include <dev/ofw/ofw_bus_subr.h>
79 #include <dev/mdio/mdio.h>
80 
81 #include <dev/mge/if_mgevar.h>
82 #include <arm/mv/mvreg.h>
83 #include <arm/mv/mvvar.h>
84 
85 #include "miibus_if.h"
86 #include "mdio_if.h"
87 
88 #define	MGE_DELAY(x)	pause("SMI access sleep", (x) / tick_sbt)
89 
90 static int mge_probe(device_t dev);
91 static int mge_attach(device_t dev);
92 static int mge_detach(device_t dev);
93 static int mge_shutdown(device_t dev);
94 static int mge_suspend(device_t dev);
95 static int mge_resume(device_t dev);
96 
97 static int mge_miibus_readreg(device_t dev, int phy, int reg);
98 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
99 
100 static int mge_mdio_readreg(device_t dev, int phy, int reg);
101 static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
102 
103 static int mge_ifmedia_upd(struct ifnet *ifp);
104 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
105 
106 static void mge_init(void *arg);
107 static void mge_init_locked(void *arg);
108 static void mge_start(struct ifnet *ifp);
109 static void mge_start_locked(struct ifnet *ifp);
110 static void mge_watchdog(struct mge_softc *sc);
111 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
112 
113 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
114 static uint32_t mge_rx_ipg(uint32_t val, int ver);
115 static void mge_ver_params(struct mge_softc *sc);
116 
117 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
118 static void mge_intr_rxtx(void *arg);
119 static void mge_intr_rx(void *arg);
120 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
121     uint32_t int_cause_ext);
122 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
123 static void mge_intr_tx(void *arg);
124 static void mge_intr_tx_locked(struct mge_softc *sc);
125 static void mge_intr_misc(void *arg);
126 static void mge_intr_sum(void *arg);
127 static void mge_intr_err(void *arg);
128 static void mge_stop(struct mge_softc *sc);
129 static void mge_tick(void *msc);
130 static uint32_t mge_set_port_serial_control(uint32_t media);
131 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
132 static void mge_set_mac_address(struct mge_softc *sc);
133 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
134     uint8_t queue);
135 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
136 static int mge_allocate_dma(struct mge_softc *sc);
137 static int mge_alloc_desc_dma(struct mge_softc *sc,
138     struct mge_desc_wrapper* desc_tab, uint32_t size,
139     bus_dma_tag_t *buffer_tag);
140 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
141     struct mbuf **mbufp, bus_addr_t *paddr);
142 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
143     int error);
144 static void mge_free_dma(struct mge_softc *sc);
145 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
146     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
147 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
148     uint32_t status, uint16_t bufsize);
149 static void mge_offload_setup_descriptor(struct mge_softc *sc,
150     struct mge_desc_wrapper *dw);
151 static uint8_t mge_crc8(uint8_t *data, int size);
152 static void mge_setup_multicast(struct mge_softc *sc);
153 static void mge_set_rxic(struct mge_softc *sc);
154 static void mge_set_txic(struct mge_softc *sc);
155 static void mge_add_sysctls(struct mge_softc *sc);
156 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
157 
158 static device_method_t mge_methods[] = {
159 	/* Device interface */
160 	DEVMETHOD(device_probe,		mge_probe),
161 	DEVMETHOD(device_attach,	mge_attach),
162 	DEVMETHOD(device_detach,	mge_detach),
163 	DEVMETHOD(device_shutdown,	mge_shutdown),
164 	DEVMETHOD(device_suspend,	mge_suspend),
165 	DEVMETHOD(device_resume,	mge_resume),
166 	/* MII interface */
167 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
168 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
169 	/* MDIO interface */
170 	DEVMETHOD(mdio_readreg,		mge_mdio_readreg),
171 	DEVMETHOD(mdio_writereg,	mge_mdio_writereg),
172 	{ 0, 0 }
173 };
174 
175 DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
176 
177 static int switch_attached = 0;
178 
179 DRIVER_MODULE(mge, simplebus, mge_driver, 0, 0);
180 DRIVER_MODULE(miibus, mge, miibus_driver, 0, 0);
181 DRIVER_MODULE(mdio, mge, mdio_driver, 0, 0);
182 MODULE_DEPEND(mge, ether, 1, 1, 1);
183 MODULE_DEPEND(mge, miibus, 1, 1, 1);
184 MODULE_DEPEND(mge, mdio, 1, 1, 1);
185 
186 static struct resource_spec res_spec[] = {
187 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
188 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
189 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
190 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
191 	{ -1, 0 }
192 };
193 
194 static struct {
195 	driver_intr_t *handler;
196 	char * description;
197 } mge_intrs[MGE_INTR_COUNT + 1] = {
198 	{ mge_intr_rxtx,"GbE aggregated interrupt" },
199 	{ mge_intr_rx,	"GbE receive interrupt" },
200 	{ mge_intr_tx,	"GbE transmit interrupt" },
201 	{ mge_intr_misc,"GbE misc interrupt" },
202 	{ mge_intr_sum,	"GbE summary interrupt" },
203 	{ mge_intr_err,	"GbE error interrupt" },
204 };
205 
206 /* SMI access interlock */
207 static struct sx sx_smi;
208 
209 static uint32_t
210 mv_read_ge_smi(device_t dev, int phy, int reg)
211 {
212 	uint32_t timeout;
213 	uint32_t ret;
214 	struct mge_softc *sc;
215 
216 	sc = device_get_softc(dev);
217 	KASSERT(sc != NULL, ("NULL softc ptr!"));
218 	timeout = MGE_SMI_WRITE_RETRIES;
219 
220 	MGE_SMI_LOCK();
221 	while (--timeout &&
222 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
223 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
224 
225 	if (timeout == 0) {
226 		device_printf(dev, "SMI write timeout.\n");
227 		ret = ~0U;
228 		goto out;
229 	}
230 
231 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
232 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
233 
234 	/* Wait till finished. */
235 	timeout = MGE_SMI_WRITE_RETRIES;
236 	while (--timeout &&
237 	    !((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
238 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
239 
240 	if (timeout == 0) {
241 		device_printf(dev, "SMI write validation timeout.\n");
242 		ret = ~0U;
243 		goto out;
244 	}
245 
246 	/* Wait for the data to update in the SMI register */
247 	MGE_DELAY(MGE_SMI_DELAY);
248 	ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
249 
250 out:
251 	MGE_SMI_UNLOCK();
252 	return (ret);
253 
254 }
255 
256 static void
257 mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
258 {
259 	uint32_t timeout;
260 	struct mge_softc *sc;
261 
262 	sc = device_get_softc(dev);
263 	KASSERT(sc != NULL, ("NULL softc ptr!"));
264 
265 	MGE_SMI_LOCK();
266 	timeout = MGE_SMI_READ_RETRIES;
267 	while (--timeout &&
268 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
269 		MGE_DELAY(MGE_SMI_READ_DELAY);
270 
271 	if (timeout == 0) {
272 		device_printf(dev, "SMI read timeout.\n");
273 		goto out;
274 	}
275 
276 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
277 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
278 	    (value & MGE_SMI_DATA_MASK)));
279 
280 out:
281 	MGE_SMI_UNLOCK();
282 }
283 
284 static int
285 mv_read_ext_phy(device_t dev, int phy, int reg)
286 {
287 	uint32_t retries;
288 	struct mge_softc *sc;
289 	uint32_t ret;
290 
291 	sc = device_get_softc(dev);
292 
293 	MGE_SMI_LOCK();
294 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
295 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
296 
297 	retries = MGE_SMI_READ_RETRIES;
298 	while (--retries &&
299 	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
300 		DELAY(MGE_SMI_READ_DELAY);
301 
302 	if (retries == 0)
303 		device_printf(dev, "Timeout while reading from PHY\n");
304 
305 	ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
306 	MGE_SMI_UNLOCK();
307 
308 	return (ret);
309 }
310 
311 static void
312 mv_write_ext_phy(device_t dev, int phy, int reg, int value)
313 {
314 	uint32_t retries;
315 	struct mge_softc *sc;
316 
317 	sc = device_get_softc(dev);
318 
319 	MGE_SMI_LOCK();
320 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
321 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
322 	    (value & MGE_SMI_DATA_MASK)));
323 
324 	retries = MGE_SMI_WRITE_RETRIES;
325 	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
326 		DELAY(MGE_SMI_WRITE_DELAY);
327 
328 	if (retries == 0)
329 		device_printf(dev, "Timeout while writing to PHY\n");
330 	MGE_SMI_UNLOCK();
331 }
332 
333 static void
334 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
335 {
336 	uint32_t mac_l, mac_h;
337 	uint8_t lmac[6];
338 	int i, valid;
339 
340 	/*
341 	 * Retrieve hw address from the device tree.
342 	 */
343 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
344 	if (i == 6) {
345 		valid = 0;
346 		for (i = 0; i < 6; i++)
347 			if (lmac[i] != 0) {
348 				valid = 1;
349 				break;
350 			}
351 
352 		if (valid) {
353 			bcopy(lmac, addr, 6);
354 			return;
355 		}
356 	}
357 
358 	/*
359 	 * Fall back -- use the currently programmed address.
360 	 */
361 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
362 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
363 
364 	addr[0] = (mac_h & 0xff000000) >> 24;
365 	addr[1] = (mac_h & 0x00ff0000) >> 16;
366 	addr[2] = (mac_h & 0x0000ff00) >> 8;
367 	addr[3] = (mac_h & 0x000000ff);
368 	addr[4] = (mac_l & 0x0000ff00) >> 8;
369 	addr[5] = (mac_l & 0x000000ff);
370 }
371 
372 static uint32_t
373 mge_tfut_ipg(uint32_t val, int ver)
374 {
375 
376 	switch (ver) {
377 	case 1:
378 		return ((val & 0x3fff) << 4);
379 	case 2:
380 	default:
381 		return ((val & 0xffff) << 4);
382 	}
383 }
384 
385 static uint32_t
386 mge_rx_ipg(uint32_t val, int ver)
387 {
388 
389 	switch (ver) {
390 	case 1:
391 		return ((val & 0x3fff) << 8);
392 	case 2:
393 	default:
394 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
395 	}
396 }
397 
398 static void
399 mge_ver_params(struct mge_softc *sc)
400 {
401 	uint32_t d, r;
402 
403 	soc_id(&d, &r);
404 	if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
405 	    d == MV_DEV_88F6282 ||
406 	    d == MV_DEV_MV78100 ||
407 	    d == MV_DEV_MV78100_Z0 ||
408 	    (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
409 		sc->mge_ver = 2;
410 		sc->mge_mtu = 0x4e8;
411 		sc->mge_tfut_ipg_max = 0xFFFF;
412 		sc->mge_rx_ipg_max = 0xFFFF;
413 		sc->mge_tx_arb_cfg = 0xFC0000FF;
414 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
415 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
416 	} else {
417 		sc->mge_ver = 1;
418 		sc->mge_mtu = 0x458;
419 		sc->mge_tfut_ipg_max = 0x3FFF;
420 		sc->mge_rx_ipg_max = 0x3FFF;
421 		sc->mge_tx_arb_cfg = 0x000000FF;
422 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
423 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
424 	}
425 	if (d == MV_DEV_88RC8180)
426 		sc->mge_intr_cnt = 1;
427 	else
428 		sc->mge_intr_cnt = 2;
429 
430 	if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
431 		sc->mge_hw_csum = 0;
432 	else
433 		sc->mge_hw_csum = 1;
434 }
435 
436 static void
437 mge_set_mac_address(struct mge_softc *sc)
438 {
439 	char *if_mac;
440 	uint32_t mac_l, mac_h;
441 
442 	MGE_GLOBAL_LOCK_ASSERT(sc);
443 
444 	if_mac = (char *)IF_LLADDR(sc->ifp);
445 
446 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
447 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
448 	    (if_mac[2] << 8) | (if_mac[3] << 0);
449 
450 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
451 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
452 
453 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
454 }
455 
456 static void
457 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
458 {
459 	uint32_t reg_idx, reg_off, reg_val, i;
460 
461 	last_byte &= 0xf;
462 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
463 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
464 	reg_val = (1 | (queue << 1)) << reg_off;
465 
466 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
467 		if ( i == reg_idx)
468 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
469 		else
470 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
471 	}
472 }
473 
474 static void
475 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
476 {
477 	uint32_t port_config;
478 	uint32_t reg_val, i;
479 
480 	/* Enable or disable promiscuous mode as needed */
481 	if (sc->ifp->if_flags & IFF_PROMISC) {
482 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
483 		port_config |= PORT_CONFIG_UPM;
484 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
485 
486 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
487 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
488 
489 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
490 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
491 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
492 		}
493 
494 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
495 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
496 
497 	} else {
498 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
499 		port_config &= ~PORT_CONFIG_UPM;
500 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
501 
502 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
503 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
504 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
505 		}
506 
507 		mge_set_mac_address(sc);
508 	}
509 }
510 
511 static void
512 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
513 {
514 	u_int32_t *paddr;
515 
516 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
517 	paddr = arg;
518 
519 	*paddr = segs->ds_addr;
520 }
521 
522 static int
523 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
524     bus_addr_t *paddr)
525 {
526 	struct mbuf *new_mbuf;
527 	bus_dma_segment_t seg[1];
528 	int error;
529 	int nsegs;
530 
531 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
532 
533 	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
534 	if (new_mbuf == NULL)
535 		return (ENOBUFS);
536 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
537 
538 	if (*mbufp) {
539 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
540 		bus_dmamap_unload(tag, map);
541 	}
542 
543 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
544 	    BUS_DMA_NOWAIT);
545 	KASSERT(nsegs == 1, ("Too many segments returned!"));
546 	if (nsegs != 1 || error)
547 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
548 
549 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
550 
551 	(*mbufp) = new_mbuf;
552 	(*paddr) = seg->ds_addr;
553 	return (0);
554 }
555 
556 static int
557 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
558     uint32_t size, bus_dma_tag_t *buffer_tag)
559 {
560 	struct mge_desc_wrapper *dw;
561 	bus_addr_t desc_paddr;
562 	int i, error;
563 
564 	desc_paddr = 0;
565 	for (i = size - 1; i >= 0; i--) {
566 		dw = &(tab[i]);
567 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
568 		    (void**)&(dw->mge_desc),
569 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
570 		    &(dw->desc_dmap));
571 
572 		if (error) {
573 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
574 			dw->mge_desc = NULL;
575 			return (ENXIO);
576 		}
577 
578 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
579 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
580 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
581 
582 		if (error) {
583 			if_printf(sc->ifp, "can't load descriptor\n");
584 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
585 			    dw->desc_dmap);
586 			dw->mge_desc = NULL;
587 			return (ENXIO);
588 		}
589 
590 		/* Chain descriptors */
591 		dw->mge_desc->next_desc = desc_paddr;
592 		desc_paddr = dw->mge_desc_paddr;
593 	}
594 	tab[size - 1].mge_desc->next_desc = desc_paddr;
595 
596 	/* Allocate a busdma tag for mbufs. */
597 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
598 	    1, 0,				/* alignment, boundary */
599 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
600 	    BUS_SPACE_MAXADDR,			/* highaddr */
601 	    NULL, NULL,				/* filtfunc, filtfuncarg */
602 	    MCLBYTES, 1,			/* maxsize, nsegments */
603 	    MCLBYTES, 0,			/* maxsegsz, flags */
604 	    NULL, NULL,				/* lockfunc, lockfuncarg */
605 	    buffer_tag);			/* dmat */
606 	if (error) {
607 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
608 		return (ENXIO);
609 	}
610 
611 	/* Create TX busdma maps */
612 	for (i = 0; i < size; i++) {
613 		dw = &(tab[i]);
614 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
615 		if (error) {
616 			if_printf(sc->ifp, "failed to create map for mbuf\n");
617 			return (ENXIO);
618 		}
619 
620 		dw->buffer = (struct mbuf*)NULL;
621 		dw->mge_desc->buffer = (bus_addr_t)NULL;
622 	}
623 
624 	return (0);
625 }
626 
627 static int
628 mge_allocate_dma(struct mge_softc *sc)
629 {
630 	struct mge_desc_wrapper *dw;
631 	int i;
632 
633 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
634 	bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
635 	    16, 0,				/* alignment, boundary */
636 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
637 	    BUS_SPACE_MAXADDR,			/* highaddr */
638 	    NULL, NULL,				/* filtfunc, filtfuncarg */
639 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
640 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
641 	    NULL, NULL,				/* lockfunc, lockfuncarg */
642 	    &sc->mge_desc_dtag);		/* dmat */
643 
644 
645 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
646 	    &sc->mge_tx_dtag);
647 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
648 	    &sc->mge_rx_dtag);
649 
650 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
651 		dw = &(sc->mge_rx_desc[i]);
652 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
653 		    &dw->mge_desc->buffer);
654 	}
655 
656 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
657 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
658 
659 	return (0);
660 }
661 
662 static void
663 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
664     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
665 {
666 	struct mge_desc_wrapper *dw;
667 	int i;
668 
669 	for (i = 0; i < size; i++) {
670 		/* Free RX mbuf */
671 		dw = &(tab[i]);
672 
673 		if (dw->buffer_dmap) {
674 			if (free_mbufs) {
675 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
676 				    BUS_DMASYNC_POSTREAD);
677 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
678 			}
679 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
680 			if (free_mbufs)
681 				m_freem(dw->buffer);
682 		}
683 		/* Free RX descriptors */
684 		if (dw->desc_dmap) {
685 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
686 			    BUS_DMASYNC_POSTREAD);
687 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
688 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
689 			    dw->desc_dmap);
690 		}
691 	}
692 }
693 
694 static void
695 mge_free_dma(struct mge_softc *sc)
696 {
697 
698 	/* Free descriptors and mbufs */
699 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
700 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
701 
702 	/* Destroy mbuf dma tag */
703 	bus_dma_tag_destroy(sc->mge_tx_dtag);
704 	bus_dma_tag_destroy(sc->mge_rx_dtag);
705 	/* Destroy descriptors tag */
706 	bus_dma_tag_destroy(sc->mge_desc_dtag);
707 }
708 
709 static void
710 mge_reinit_rx(struct mge_softc *sc)
711 {
712 	struct mge_desc_wrapper *dw;
713 	int i;
714 
715 	MGE_RECEIVE_LOCK_ASSERT(sc);
716 
717 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
718 
719 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
720 	    &sc->mge_rx_dtag);
721 
722 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
723 		dw = &(sc->mge_rx_desc[i]);
724 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
725 		&dw->mge_desc->buffer);
726 	}
727 
728 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
729 	sc->rx_desc_curr = 0;
730 
731 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
732 	    sc->rx_desc_start);
733 
734 	/* Enable RX queue */
735 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
736 }
737 
738 #ifdef DEVICE_POLLING
739 static poll_handler_t mge_poll;
740 
741 static int
742 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
743 {
744 	struct mge_softc *sc = ifp->if_softc;
745 	uint32_t int_cause, int_cause_ext;
746 	int rx_npkts = 0;
747 
748 	MGE_RECEIVE_LOCK(sc);
749 
750 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
751 		MGE_RECEIVE_UNLOCK(sc);
752 		return (rx_npkts);
753 	}
754 
755 	if (cmd == POLL_AND_CHECK_STATUS) {
756 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
757 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
758 
759 		/* Check for resource error */
760 		if (int_cause & MGE_PORT_INT_RXERRQ0)
761 			mge_reinit_rx(sc);
762 
763 		if (int_cause || int_cause_ext) {
764 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
765 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
766 		}
767 	}
768 
769 
770 	rx_npkts = mge_intr_rx_locked(sc, count);
771 
772 	MGE_RECEIVE_UNLOCK(sc);
773 	MGE_TRANSMIT_LOCK(sc);
774 	mge_intr_tx_locked(sc);
775 	MGE_TRANSMIT_UNLOCK(sc);
776 	return (rx_npkts);
777 }
778 #endif /* DEVICE_POLLING */
779 
780 static int
781 mge_attach(device_t dev)
782 {
783 	struct mge_softc *sc;
784 	struct mii_softc *miisc;
785 	struct ifnet *ifp;
786 	uint8_t hwaddr[ETHER_ADDR_LEN];
787 	int i, error, phy;
788 
789 	sc = device_get_softc(dev);
790 	sc->dev = dev;
791 	sc->node = ofw_bus_get_node(dev);
792 	phy = 0;
793 
794 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
795 		device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
796 		    device_get_nameunit(sc->phy_sc->dev));
797 		sc->phy_attached = 1;
798 	} else {
799 		device_printf(dev, "PHY not attached.\n");
800 		sc->phy_attached = 0;
801 		sc->phy_sc = sc;
802 	}
803 
804 	if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
805 		device_printf(dev, "Switch attached.\n");
806 		sc->switch_attached = 1;
807 		/* additional variable available across instances */
808 		switch_attached = 1;
809 	} else {
810 		sc->switch_attached = 0;
811 	}
812 
813 	if (device_get_unit(dev) == 0) {
814 		sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
815 	}
816 
817 	/* Set chip version-dependent parameters */
818 	mge_ver_params(sc);
819 
820 	/* Initialize mutexes */
821 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
822 	    MTX_DEF);
823 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
824 	    MTX_DEF);
825 
826 	/* Allocate IO and IRQ resources */
827 	error = bus_alloc_resources(dev, res_spec, sc->res);
828 	if (error) {
829 		device_printf(dev, "could not allocate resources\n");
830 		mge_detach(dev);
831 		return (ENXIO);
832 	}
833 
834 	/* Allocate DMA, buffers, buffer descriptors */
835 	error = mge_allocate_dma(sc);
836 	if (error) {
837 		mge_detach(dev);
838 		return (ENXIO);
839 	}
840 
841 	sc->tx_desc_curr = 0;
842 	sc->rx_desc_curr = 0;
843 	sc->tx_desc_used_idx = 0;
844 	sc->tx_desc_used_count = 0;
845 
846 	/* Configure defaults for interrupts coalescing */
847 	sc->rx_ic_time = 768;
848 	sc->tx_ic_time = 768;
849 	mge_add_sysctls(sc);
850 
851 	/* Allocate network interface */
852 	ifp = sc->ifp = if_alloc(IFT_ETHER);
853 	if (ifp == NULL) {
854 		device_printf(dev, "if_alloc() failed\n");
855 		mge_detach(dev);
856 		return (ENOMEM);
857 	}
858 
859 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
860 	ifp->if_softc = sc;
861 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
862 	ifp->if_capabilities = IFCAP_VLAN_MTU;
863 	if (sc->mge_hw_csum) {
864 		ifp->if_capabilities |= IFCAP_HWCSUM;
865 		ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
866 	}
867 	ifp->if_capenable = ifp->if_capabilities;
868 
869 #ifdef DEVICE_POLLING
870 	/* Advertise that polling is supported */
871 	ifp->if_capabilities |= IFCAP_POLLING;
872 #endif
873 
874 	ifp->if_init = mge_init;
875 	ifp->if_start = mge_start;
876 	ifp->if_ioctl = mge_ioctl;
877 
878 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
879 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
880 	IFQ_SET_READY(&ifp->if_snd);
881 
882 	mge_get_mac_address(sc, hwaddr);
883 	ether_ifattach(ifp, hwaddr);
884 	callout_init(&sc->wd_callout, 1);
885 
886 	/* Attach PHY(s) */
887 	if (sc->phy_attached) {
888 		error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
889 		    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
890 		if (error) {
891 			device_printf(dev, "MII failed to find PHY\n");
892 			if_free(ifp);
893 			sc->ifp = NULL;
894 			mge_detach(dev);
895 			return (error);
896 		}
897 		sc->mii = device_get_softc(sc->miibus);
898 
899 		/* Tell the MAC where to find the PHY so autoneg works */
900 		miisc = LIST_FIRST(&sc->mii->mii_phys);
901 		MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
902 	} else {
903 		/* no PHY, so use hard-coded values */
904 		ifmedia_init(&sc->mge_ifmedia, 0,
905 		    mge_ifmedia_upd,
906 		    mge_ifmedia_sts);
907 		ifmedia_add(&sc->mge_ifmedia,
908 		    IFM_ETHER | IFM_1000_T | IFM_FDX,
909 		    0, NULL);
910 		ifmedia_set(&sc->mge_ifmedia,
911 		    IFM_ETHER | IFM_1000_T | IFM_FDX);
912 	}
913 
914 	/* Attach interrupt handlers */
915 	/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
916 	for (i = 1; i <= sc->mge_intr_cnt; ++i) {
917 		error = bus_setup_intr(dev, sc->res[i],
918 		    INTR_TYPE_NET | INTR_MPSAFE,
919 		    NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
920 		    sc, &sc->ih_cookie[i - 1]);
921 		if (error) {
922 			device_printf(dev, "could not setup %s\n",
923 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
924 			mge_detach(dev);
925 			return (error);
926 		}
927 	}
928 
929 	if (sc->switch_attached) {
930 		MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
931 		device_add_child(dev, "mdio", -1);
932 		bus_generic_attach(dev);
933 	}
934 
935 	return (0);
936 }
937 
938 static int
939 mge_detach(device_t dev)
940 {
941 	struct mge_softc *sc;
942 	int error,i;
943 
944 	sc = device_get_softc(dev);
945 
946 	/* Stop controller and free TX queue */
947 	if (sc->ifp)
948 		mge_shutdown(dev);
949 
950 	/* Wait for stopping ticks */
951         callout_drain(&sc->wd_callout);
952 
953 	/* Stop and release all interrupts */
954 	for (i = 0; i < sc->mge_intr_cnt; ++i) {
955 		if (!sc->ih_cookie[i])
956 			continue;
957 
958 		error = bus_teardown_intr(dev, sc->res[1 + i],
959 		    sc->ih_cookie[i]);
960 		if (error)
961 			device_printf(dev, "could not release %s\n",
962 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
963 	}
964 
965 	/* Detach network interface */
966 	if (sc->ifp) {
967 		ether_ifdetach(sc->ifp);
968 		if_free(sc->ifp);
969 	}
970 
971 	/* Free DMA resources */
972 	mge_free_dma(sc);
973 
974 	/* Free IO memory handler */
975 	bus_release_resources(dev, res_spec, sc->res);
976 
977 	/* Destroy mutexes */
978 	mtx_destroy(&sc->receive_lock);
979 	mtx_destroy(&sc->transmit_lock);
980 
981 	if (device_get_unit(dev) == 0)
982 		sx_destroy(&sx_smi);
983 
984 	return (0);
985 }
986 
987 static void
988 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
989 {
990 	struct mge_softc *sc;
991 	struct mii_data *mii;
992 
993 	sc = ifp->if_softc;
994 	MGE_GLOBAL_LOCK(sc);
995 
996 	if (!sc->phy_attached) {
997 		ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
998 		ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
999 		goto out_unlock;
1000 	}
1001 
1002 	mii = sc->mii;
1003 	mii_pollstat(mii);
1004 
1005 	ifmr->ifm_active = mii->mii_media_active;
1006 	ifmr->ifm_status = mii->mii_media_status;
1007 
1008 out_unlock:
1009 	MGE_GLOBAL_UNLOCK(sc);
1010 }
1011 
1012 static uint32_t
1013 mge_set_port_serial_control(uint32_t media)
1014 {
1015 	uint32_t port_config;
1016 
1017 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
1018 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
1019 
1020 	if (IFM_TYPE(media) == IFM_ETHER) {
1021 		switch(IFM_SUBTYPE(media)) {
1022 			case IFM_AUTO:
1023 				break;
1024 			case IFM_1000_T:
1025 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
1026 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1027 				    | PORT_SERIAL_SPEED_AUTONEG);
1028 				break;
1029 			case IFM_100_TX:
1030 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
1031 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1032 				    | PORT_SERIAL_SPEED_AUTONEG);
1033 				break;
1034 			case IFM_10_T:
1035 				port_config  |= (PORT_SERIAL_AUTONEG |
1036 				    PORT_SERIAL_AUTONEG_FC |
1037 				    PORT_SERIAL_SPEED_AUTONEG);
1038 				break;
1039 		}
1040 		if (media & IFM_FDX)
1041 			port_config |= PORT_SERIAL_FULL_DUPLEX;
1042 	}
1043 	return (port_config);
1044 }
1045 
1046 static int
1047 mge_ifmedia_upd(struct ifnet *ifp)
1048 {
1049 	struct mge_softc *sc = ifp->if_softc;
1050 
1051 	/*
1052 	 * Do not do anything for switch here, as updating media between
1053 	 * MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
1054 	 * break the link.
1055 	 */
1056 	if (sc->phy_attached) {
1057 		MGE_GLOBAL_LOCK(sc);
1058 		if (ifp->if_flags & IFF_UP) {
1059 			sc->mge_media_status = sc->mii->mii_media.ifm_media;
1060 			mii_mediachg(sc->mii);
1061 
1062 			/* MGE MAC needs to be reinitialized. */
1063 			mge_init_locked(sc);
1064 
1065 		}
1066 		MGE_GLOBAL_UNLOCK(sc);
1067 	}
1068 
1069 	return (0);
1070 }
1071 
1072 static void
1073 mge_init(void *arg)
1074 {
1075 	struct mge_softc *sc;
1076 
1077 	sc = arg;
1078 	MGE_GLOBAL_LOCK(sc);
1079 
1080 	mge_init_locked(arg);
1081 
1082 	MGE_GLOBAL_UNLOCK(sc);
1083 }
1084 
1085 static void
1086 mge_init_locked(void *arg)
1087 {
1088 	struct mge_softc *sc = arg;
1089 	struct mge_desc_wrapper *dw;
1090 	volatile uint32_t reg_val;
1091 	int i, count;
1092 	uint32_t media_status;
1093 
1094 
1095 	MGE_GLOBAL_LOCK_ASSERT(sc);
1096 
1097 	/* Stop interface */
1098 	mge_stop(sc);
1099 
1100 	/* Disable interrupts */
1101 	mge_intrs_ctrl(sc, 0);
1102 
1103 	/* Set MAC address */
1104 	mge_set_mac_address(sc);
1105 
1106 	/* Setup multicast filters */
1107 	mge_setup_multicast(sc);
1108 
1109 	if (sc->mge_ver == 2) {
1110 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
1111 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
1112 	}
1113 
1114 	/* Initialize TX queue configuration registers */
1115 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1116 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1117 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1118 
1119 	/* Clear TX queue configuration registers for unused queues */
1120 	for (i = 1; i < 7; i++) {
1121 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
1122 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
1123 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
1124 	}
1125 
1126 	/* Set default MTU */
1127 	MGE_WRITE(sc, sc->mge_mtu, 0);
1128 
1129 	/* Port configuration */
1130 	MGE_WRITE(sc, MGE_PORT_CONFIG,
1131 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
1132 	    PORT_CONFIG_ARO_RXQ(0));
1133 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
1134 
1135 	/* Configure promisc mode */
1136 	mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
1137 
1138 	media_status = sc->mge_media_status;
1139 	if (sc->switch_attached) {
1140 		media_status &= ~IFM_TMASK;
1141 		media_status |= IFM_1000_T;
1142 	}
1143 
1144 	/* Setup port configuration */
1145 	reg_val = mge_set_port_serial_control(media_status);
1146 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1147 
1148 	/* Setup SDMA configuration */
1149 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
1150 	    MGE_SDMA_TX_BYTE_SWAP |
1151 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
1152 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
1153 
1154 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
1155 
1156 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1157 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
1158 	    sc->rx_desc_start);
1159 
1160 	/* Reset descriptor indexes */
1161 	sc->tx_desc_curr = 0;
1162 	sc->rx_desc_curr = 0;
1163 	sc->tx_desc_used_idx = 0;
1164 	sc->tx_desc_used_count = 0;
1165 
1166 	/* Enable RX descriptors */
1167 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
1168 		dw = &sc->mge_rx_desc[i];
1169 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1170 		dw->mge_desc->buff_size = MCLBYTES;
1171 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1172 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1173 	}
1174 
1175 	/* Enable RX queue */
1176 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
1177 
1178 	/* Enable port */
1179 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1180 	reg_val |= PORT_SERIAL_ENABLE;
1181 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1182 	count = 0x100000;
1183 	for (;;) {
1184 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1185 		if (reg_val & MGE_STATUS_LINKUP)
1186 			break;
1187 		DELAY(100);
1188 		if (--count == 0) {
1189 			if_printf(sc->ifp, "Timeout on link-up\n");
1190 			break;
1191 		}
1192 	}
1193 
1194 	/* Setup interrupts coalescing */
1195 	mge_set_rxic(sc);
1196 	mge_set_txic(sc);
1197 
1198 	/* Enable interrupts */
1199 #ifdef DEVICE_POLLING
1200         /*
1201 	 * * ...only if polling is not turned on. Disable interrupts explicitly
1202 	 * if polling is enabled.
1203 	 */
1204 	if (sc->ifp->if_capenable & IFCAP_POLLING)
1205 		mge_intrs_ctrl(sc, 0);
1206 	else
1207 #endif /* DEVICE_POLLING */
1208 	mge_intrs_ctrl(sc, 1);
1209 
1210 	/* Activate network interface */
1211 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1212 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1213 	sc->wd_timer = 0;
1214 
1215 	/* Schedule watchdog timeout */
1216 	if (sc->phy_attached)
1217 		callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1218 }
1219 
1220 static void
1221 mge_intr_rxtx(void *arg)
1222 {
1223 	struct mge_softc *sc;
1224 	uint32_t int_cause, int_cause_ext;
1225 
1226 	sc = arg;
1227 	MGE_GLOBAL_LOCK(sc);
1228 
1229 #ifdef DEVICE_POLLING
1230 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1231 		MGE_GLOBAL_UNLOCK(sc);
1232 		return;
1233 	}
1234 #endif
1235 
1236 	/* Get interrupt cause */
1237 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1238 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1239 
1240 	/* Check for Transmit interrupt */
1241 	if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1242 	    MGE_PORT_INT_EXT_TXUR)) {
1243 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1244 		    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1245 		mge_intr_tx_locked(sc);
1246 	}
1247 
1248 	MGE_TRANSMIT_UNLOCK(sc);
1249 
1250 	/* Check for Receive interrupt */
1251 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1252 
1253 	MGE_RECEIVE_UNLOCK(sc);
1254 }
1255 
1256 static void
1257 mge_intr_err(void *arg)
1258 {
1259 	struct mge_softc *sc;
1260 	struct ifnet *ifp;
1261 
1262 	sc = arg;
1263 	ifp = sc->ifp;
1264 	if_printf(ifp, "%s\n", __FUNCTION__);
1265 }
1266 
1267 static void
1268 mge_intr_misc(void *arg)
1269 {
1270 	struct mge_softc *sc;
1271 	struct ifnet *ifp;
1272 
1273 	sc = arg;
1274 	ifp = sc->ifp;
1275 	if_printf(ifp, "%s\n", __FUNCTION__);
1276 }
1277 
1278 static void
1279 mge_intr_rx(void *arg) {
1280 	struct mge_softc *sc;
1281 	uint32_t int_cause, int_cause_ext;
1282 
1283 	sc = arg;
1284 	MGE_RECEIVE_LOCK(sc);
1285 
1286 #ifdef DEVICE_POLLING
1287 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1288 		MGE_RECEIVE_UNLOCK(sc);
1289 		return;
1290 	}
1291 #endif
1292 
1293 	/* Get interrupt cause */
1294 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1295 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1296 
1297 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1298 
1299 	MGE_RECEIVE_UNLOCK(sc);
1300 }
1301 
1302 static void
1303 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1304     uint32_t int_cause_ext)
1305 {
1306 	/* Check for resource error */
1307 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1308 		mge_reinit_rx(sc);
1309 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1310 		    ~(int_cause & MGE_PORT_INT_RXERRQ0));
1311 	}
1312 
1313 	int_cause &= MGE_PORT_INT_RXQ0;
1314 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1315 
1316 	if (int_cause || int_cause_ext) {
1317 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1318 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1319 		mge_intr_rx_locked(sc, -1);
1320 	}
1321 }
1322 
1323 static int
1324 mge_intr_rx_locked(struct mge_softc *sc, int count)
1325 {
1326 	struct ifnet *ifp = sc->ifp;
1327 	uint32_t status;
1328 	uint16_t bufsize;
1329 	struct mge_desc_wrapper* dw;
1330 	struct mbuf *mb;
1331 	int rx_npkts = 0;
1332 
1333 	MGE_RECEIVE_LOCK_ASSERT(sc);
1334 
1335 	while (count != 0) {
1336 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1337 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1338 		    BUS_DMASYNC_POSTREAD);
1339 
1340 		/* Get status */
1341 		status = dw->mge_desc->cmd_status;
1342 		bufsize = dw->mge_desc->buff_size;
1343 		if ((status & MGE_DMA_OWNED) != 0)
1344 			break;
1345 
1346 		if (dw->mge_desc->byte_count &&
1347 		    ~(status & MGE_ERR_SUMMARY)) {
1348 
1349 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1350 			    BUS_DMASYNC_POSTREAD);
1351 
1352 			mb = m_devget(dw->buffer->m_data,
1353 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1354 			    0, ifp, NULL);
1355 
1356 			if (mb == NULL)
1357 				/* Give up if no mbufs */
1358 				break;
1359 
1360 			mb->m_len -= 2;
1361 			mb->m_pkthdr.len -= 2;
1362 			mb->m_data += 2;
1363 
1364 			mb->m_pkthdr.rcvif = ifp;
1365 
1366 			mge_offload_process_frame(ifp, mb, status,
1367 			    bufsize);
1368 
1369 			MGE_RECEIVE_UNLOCK(sc);
1370 			(*ifp->if_input)(ifp, mb);
1371 			MGE_RECEIVE_LOCK(sc);
1372 			rx_npkts++;
1373 		}
1374 
1375 		dw->mge_desc->byte_count = 0;
1376 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1377 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1378 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1379 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1380 
1381 		if (count > 0)
1382 			count -= 1;
1383 	}
1384 
1385 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1386 
1387 	return (rx_npkts);
1388 }
1389 
1390 static void
1391 mge_intr_sum(void *arg)
1392 {
1393 	struct mge_softc *sc = arg;
1394 	struct ifnet *ifp;
1395 
1396 	ifp = sc->ifp;
1397 	if_printf(ifp, "%s\n", __FUNCTION__);
1398 }
1399 
1400 static void
1401 mge_intr_tx(void *arg)
1402 {
1403 	struct mge_softc *sc = arg;
1404 	uint32_t int_cause_ext;
1405 
1406 	MGE_TRANSMIT_LOCK(sc);
1407 
1408 #ifdef DEVICE_POLLING
1409 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1410 		MGE_TRANSMIT_UNLOCK(sc);
1411 		return;
1412 	}
1413 #endif
1414 
1415 	/* Ack the interrupt */
1416 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1417 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1418 	    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1419 
1420 	mge_intr_tx_locked(sc);
1421 
1422 	MGE_TRANSMIT_UNLOCK(sc);
1423 }
1424 
1425 static void
1426 mge_intr_tx_locked(struct mge_softc *sc)
1427 {
1428 	struct ifnet *ifp = sc->ifp;
1429 	struct mge_desc_wrapper *dw;
1430 	struct mge_desc *desc;
1431 	uint32_t status;
1432 	int send = 0;
1433 
1434 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1435 
1436 	/* Disable watchdog */
1437 	sc->wd_timer = 0;
1438 
1439 	while (sc->tx_desc_used_count) {
1440 		/* Get the descriptor */
1441 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1442 		desc = dw->mge_desc;
1443 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1444 		    BUS_DMASYNC_POSTREAD);
1445 
1446 		/* Get descriptor status */
1447 		status = desc->cmd_status;
1448 
1449 		if (status & MGE_DMA_OWNED)
1450 			break;
1451 
1452 		sc->tx_desc_used_idx =
1453 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1454 		sc->tx_desc_used_count--;
1455 
1456 		/* Update collision statistics */
1457 		if (status & MGE_ERR_SUMMARY) {
1458 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1459 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1460 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1461 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1462 		}
1463 
1464 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1465 		    BUS_DMASYNC_POSTWRITE);
1466 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1467 		m_freem(dw->buffer);
1468 		dw->buffer = (struct mbuf*)NULL;
1469 		send++;
1470 
1471 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1472 	}
1473 
1474 	if (send) {
1475 		/* Now send anything that was pending */
1476 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1477 		mge_start_locked(ifp);
1478 	}
1479 }
1480 static int
1481 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1482 {
1483 	struct mge_softc *sc = ifp->if_softc;
1484 	struct ifreq *ifr = (struct ifreq *)data;
1485 	int mask, error;
1486 	uint32_t flags;
1487 
1488 	error = 0;
1489 
1490 	switch (command) {
1491 	case SIOCSIFFLAGS:
1492 		MGE_GLOBAL_LOCK(sc);
1493 
1494 		if (ifp->if_flags & IFF_UP) {
1495 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1496 				flags = ifp->if_flags ^ sc->mge_if_flags;
1497 				if (flags & IFF_PROMISC)
1498 					mge_set_prom_mode(sc,
1499 					    MGE_RX_DEFAULT_QUEUE);
1500 
1501 				if (flags & IFF_ALLMULTI)
1502 					mge_setup_multicast(sc);
1503 			} else
1504 				mge_init_locked(sc);
1505 		}
1506 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1507 			mge_stop(sc);
1508 
1509 		sc->mge_if_flags = ifp->if_flags;
1510 		MGE_GLOBAL_UNLOCK(sc);
1511 		break;
1512 	case SIOCADDMULTI:
1513 	case SIOCDELMULTI:
1514 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1515 			MGE_GLOBAL_LOCK(sc);
1516 			mge_setup_multicast(sc);
1517 			MGE_GLOBAL_UNLOCK(sc);
1518 		}
1519 		break;
1520 	case SIOCSIFCAP:
1521 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1522 		if (mask & IFCAP_HWCSUM) {
1523 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1524 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1525 			if (ifp->if_capenable & IFCAP_TXCSUM)
1526 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1527 			else
1528 				ifp->if_hwassist = 0;
1529 		}
1530 #ifdef DEVICE_POLLING
1531 		if (mask & IFCAP_POLLING) {
1532 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1533 				error = ether_poll_register(mge_poll, ifp);
1534 				if (error)
1535 					return(error);
1536 
1537 				MGE_GLOBAL_LOCK(sc);
1538 				mge_intrs_ctrl(sc, 0);
1539 				ifp->if_capenable |= IFCAP_POLLING;
1540 				MGE_GLOBAL_UNLOCK(sc);
1541 			} else {
1542 				error = ether_poll_deregister(ifp);
1543 				MGE_GLOBAL_LOCK(sc);
1544 				mge_intrs_ctrl(sc, 1);
1545 				ifp->if_capenable &= ~IFCAP_POLLING;
1546 				MGE_GLOBAL_UNLOCK(sc);
1547 			}
1548 		}
1549 #endif
1550 		break;
1551 	case SIOCGIFMEDIA: /* fall through */
1552 	case SIOCSIFMEDIA:
1553 		/*
1554 		 * Setting up media type via ioctls is *not* supported for MAC
1555 		 * which is connected to switch. Use etherswitchcfg.
1556 		 */
1557 		if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1558 			return (0);
1559 		else if (!sc->phy_attached) {
1560 			error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1561 			    command);
1562 			break;
1563 		}
1564 
1565 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1566 		    && !(ifr->ifr_media & IFM_FDX)) {
1567 			device_printf(sc->dev,
1568 			    "1000baseTX half-duplex unsupported\n");
1569 			return 0;
1570 		}
1571 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1572 		break;
1573 	default:
1574 		error = ether_ioctl(ifp, command, data);
1575 	}
1576 	return (error);
1577 }
1578 
1579 static int
1580 mge_miibus_readreg(device_t dev, int phy, int reg)
1581 {
1582 
1583 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1584 
1585 	return (mv_read_ext_phy(dev, phy, reg));
1586 }
1587 
1588 static int
1589 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1590 {
1591 
1592 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1593 
1594 	mv_write_ext_phy(dev, phy, reg, value);
1595 
1596 	return (0);
1597 }
1598 
1599 static int
1600 mge_probe(device_t dev)
1601 {
1602 
1603 	if (!ofw_bus_status_okay(dev))
1604 		return (ENXIO);
1605 
1606 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1607 		return (ENXIO);
1608 
1609 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1610 	return (BUS_PROBE_DEFAULT);
1611 }
1612 
1613 static int
1614 mge_resume(device_t dev)
1615 {
1616 
1617 	device_printf(dev, "%s\n", __FUNCTION__);
1618 	return (0);
1619 }
1620 
1621 static int
1622 mge_shutdown(device_t dev)
1623 {
1624 	struct mge_softc *sc = device_get_softc(dev);
1625 
1626 	MGE_GLOBAL_LOCK(sc);
1627 
1628 #ifdef DEVICE_POLLING
1629         if (sc->ifp->if_capenable & IFCAP_POLLING)
1630 		ether_poll_deregister(sc->ifp);
1631 #endif
1632 
1633 	mge_stop(sc);
1634 
1635 	MGE_GLOBAL_UNLOCK(sc);
1636 
1637 	return (0);
1638 }
1639 
1640 static int
1641 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1642 {
1643 	struct mge_desc_wrapper *dw = NULL;
1644 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1645 	bus_dmamap_t mapp;
1646 	int error;
1647 	int seg, nsegs;
1648 	int desc_no;
1649 
1650 	/* Fetch unused map */
1651 	desc_no = sc->tx_desc_curr;
1652 	dw = &sc->mge_tx_desc[desc_no];
1653 	mapp = dw->buffer_dmap;
1654 
1655 	/* Create mapping in DMA memory */
1656 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1657 	    BUS_DMA_NOWAIT);
1658 	if (error != 0) {
1659 		m_freem(m0);
1660 		return (error);
1661 	}
1662 
1663 	/* Only one segment is supported. */
1664 	if (nsegs != 1) {
1665 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1666 		m_freem(m0);
1667 		return (-1);
1668 	}
1669 
1670 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1671 
1672 	/* Everything is ok, now we can send buffers */
1673 	for (seg = 0; seg < nsegs; seg++) {
1674 		dw->mge_desc->byte_count = segs[seg].ds_len;
1675 		dw->mge_desc->buffer = segs[seg].ds_addr;
1676 		dw->buffer = m0;
1677 		dw->mge_desc->cmd_status = 0;
1678 		if (seg == 0)
1679 			mge_offload_setup_descriptor(sc, dw);
1680 		dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1681 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1682 		    MGE_DMA_OWNED;
1683 	}
1684 
1685 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1686 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1687 
1688 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1689 	sc->tx_desc_used_count++;
1690 	return (0);
1691 }
1692 
1693 static void
1694 mge_tick(void *msc)
1695 {
1696 	struct mge_softc *sc = msc;
1697 
1698 	KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1699 
1700 	MGE_GLOBAL_LOCK(sc);
1701 
1702 	/* Check for TX timeout */
1703 	mge_watchdog(sc);
1704 
1705 	mii_tick(sc->mii);
1706 
1707 	/* Check for media type change */
1708 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1709 		mge_ifmedia_upd(sc->ifp);
1710 
1711 	MGE_GLOBAL_UNLOCK(sc);
1712 
1713 	/* Schedule another timeout one second from now */
1714 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1715 
1716 	return;
1717 }
1718 
1719 static void
1720 mge_watchdog(struct mge_softc *sc)
1721 {
1722 	struct ifnet *ifp;
1723 
1724 	ifp = sc->ifp;
1725 
1726 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1727 		return;
1728 	}
1729 
1730 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1731 	if_printf(ifp, "watchdog timeout\n");
1732 
1733 	mge_stop(sc);
1734 	mge_init_locked(sc);
1735 }
1736 
1737 static void
1738 mge_start(struct ifnet *ifp)
1739 {
1740 	struct mge_softc *sc = ifp->if_softc;
1741 
1742 	MGE_TRANSMIT_LOCK(sc);
1743 
1744 	mge_start_locked(ifp);
1745 
1746 	MGE_TRANSMIT_UNLOCK(sc);
1747 }
1748 
1749 static void
1750 mge_start_locked(struct ifnet *ifp)
1751 {
1752 	struct mge_softc *sc;
1753 	struct mbuf *m0, *mtmp;
1754 	uint32_t reg_val, queued = 0;
1755 
1756 	sc = ifp->if_softc;
1757 
1758 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1759 
1760 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1761 	    IFF_DRV_RUNNING)
1762 		return;
1763 
1764 	for (;;) {
1765 		/* Get packet from the queue */
1766 		IF_DEQUEUE(&ifp->if_snd, m0);
1767 		if (m0 == NULL)
1768 			break;
1769 
1770 		if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1771 		    m0->m_flags & M_VLANTAG) {
1772 			if (M_WRITABLE(m0) == 0) {
1773 				mtmp = m_dup(m0, M_NOWAIT);
1774 				m_freem(m0);
1775 				if (mtmp == NULL)
1776 					continue;
1777 				m0 = mtmp;
1778 			}
1779 		}
1780 		/* The driver support only one DMA fragment. */
1781 		if (m0->m_next != NULL) {
1782 			mtmp = m_defrag(m0, M_NOWAIT);
1783 			if (mtmp != NULL)
1784 				m0 = mtmp;
1785 		}
1786 
1787 		/* Check for free descriptors */
1788 		if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1789 			IF_PREPEND(&ifp->if_snd, m0);
1790 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1791 			break;
1792 		}
1793 
1794 		if (mge_encap(sc, m0) != 0)
1795 			break;
1796 
1797 		queued++;
1798 		BPF_MTAP(ifp, m0);
1799 	}
1800 
1801 	if (queued) {
1802 		/* Enable transmitter and watchdog timer */
1803 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1804 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1805 		sc->wd_timer = 5;
1806 	}
1807 }
1808 
1809 static void
1810 mge_stop(struct mge_softc *sc)
1811 {
1812 	struct ifnet *ifp;
1813 	volatile uint32_t reg_val, status;
1814 	struct mge_desc_wrapper *dw;
1815 	struct mge_desc *desc;
1816 	int count;
1817 
1818 	ifp = sc->ifp;
1819 
1820 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1821 		return;
1822 
1823 	/* Stop tick engine */
1824 	callout_stop(&sc->wd_callout);
1825 
1826 	/* Disable interface */
1827 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1828 	sc->wd_timer = 0;
1829 
1830 	/* Disable interrupts */
1831 	mge_intrs_ctrl(sc, 0);
1832 
1833 	/* Disable Rx and Tx */
1834 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1835 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1836 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1837 
1838 	/* Remove pending data from TX queue */
1839 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1840 	    sc->tx_desc_used_count) {
1841 		/* Get the descriptor */
1842 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1843 		desc = dw->mge_desc;
1844 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1845 		    BUS_DMASYNC_POSTREAD);
1846 
1847 		/* Get descriptor status */
1848 		status = desc->cmd_status;
1849 
1850 		if (status & MGE_DMA_OWNED)
1851 			break;
1852 
1853 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1854 		    MGE_TX_DESC_NUM;
1855 		sc->tx_desc_used_count--;
1856 
1857 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1858 		    BUS_DMASYNC_POSTWRITE);
1859 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1860 
1861 		m_freem(dw->buffer);
1862 		dw->buffer = (struct mbuf*)NULL;
1863 	}
1864 
1865 	/* Wait for end of transmission */
1866 	count = 0x100000;
1867 	while (count--) {
1868 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1869 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1870 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1871 			break;
1872 		DELAY(100);
1873 	}
1874 
1875 	if (count == 0)
1876 		if_printf(ifp,
1877 		    "%s: timeout while waiting for end of transmission\n",
1878 		    __FUNCTION__);
1879 
1880 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1881 	reg_val &= ~(PORT_SERIAL_ENABLE);
1882 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1883 }
1884 
1885 static int
1886 mge_suspend(device_t dev)
1887 {
1888 
1889 	device_printf(dev, "%s\n", __FUNCTION__);
1890 	return (0);
1891 }
1892 
1893 static void
1894 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1895     uint32_t status, uint16_t bufsize)
1896 {
1897 	int csum_flags = 0;
1898 
1899 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1900 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1901 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1902 
1903 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1904 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1905 		    (status & MGE_RX_L4_CSUM_OK)) {
1906 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1907 			frame->m_pkthdr.csum_data = 0xFFFF;
1908 		}
1909 
1910 		frame->m_pkthdr.csum_flags = csum_flags;
1911 	}
1912 }
1913 
1914 static void
1915 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1916 {
1917 	struct mbuf *m0 = dw->buffer;
1918 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1919 	int csum_flags = m0->m_pkthdr.csum_flags;
1920 	int cmd_status = 0;
1921 	struct ip *ip;
1922 	int ehlen, etype;
1923 
1924 	if (csum_flags != 0) {
1925 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1926 			etype = ntohs(eh->evl_proto);
1927 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1928 			csum_flags |= MGE_TX_VLAN_TAGGED;
1929 		} else {
1930 			etype = ntohs(eh->evl_encap_proto);
1931 			ehlen = ETHER_HDR_LEN;
1932 		}
1933 
1934 		if (etype != ETHERTYPE_IP) {
1935 			if_printf(sc->ifp,
1936 			    "TCP/IP Offload enabled for unsupported "
1937 			    "protocol!\n");
1938 			return;
1939 		}
1940 
1941 		ip = (struct ip *)(m0->m_data + ehlen);
1942 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1943 		cmd_status |= MGE_TX_NOT_FRAGMENT;
1944 	}
1945 
1946 	if (csum_flags & CSUM_IP)
1947 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1948 
1949 	if (csum_flags & CSUM_TCP)
1950 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1951 
1952 	if (csum_flags & CSUM_UDP)
1953 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1954 
1955 	dw->mge_desc->cmd_status |= cmd_status;
1956 }
1957 
1958 static void
1959 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1960 {
1961 
1962 	if (enable) {
1963 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1964 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1965 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1966 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1967 		    MGE_PORT_INT_EXT_TXBUF0);
1968 	} else {
1969 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1970 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1971 
1972 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1973 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1974 
1975 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1976 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1977 	}
1978 }
1979 
1980 static uint8_t
1981 mge_crc8(uint8_t *data, int size)
1982 {
1983 	uint8_t crc = 0;
1984 	static const uint8_t ct[256] = {
1985 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1986 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1987 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1988 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1989 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1990 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1991 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1992 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1993 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1994 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1995 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1996 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1997 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1998 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1999 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
2000 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
2001 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
2002 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
2003 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
2004 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
2005 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
2006 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
2007 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
2008 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
2009 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
2010 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
2011 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
2012 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
2013 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
2014 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
2015 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
2016 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
2017 	};
2018 
2019 	while(size--)
2020 		crc = ct[crc ^ *(data++)];
2021 
2022 	return(crc);
2023 }
2024 
2025 struct mge_hash_maddr_ctx {
2026 	uint32_t smt[MGE_MCAST_REG_NUMBER];
2027 	uint32_t omt[MGE_MCAST_REG_NUMBER];
2028 };
2029 
2030 static u_int
2031 mge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2032 {
2033 	static const uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
2034 	struct mge_hash_maddr_ctx *ctx = arg;
2035 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2036 	uint8_t *mac;
2037 	int i;
2038 
2039 	mac = LLADDR(sdl);
2040 	if (memcmp(mac, special, sizeof(special)) == 0) {
2041 		i = mac[5];
2042 		ctx->smt[i >> 2] |= v << ((i & 0x03) << 3);
2043 	} else {
2044 		i = mge_crc8(mac, ETHER_ADDR_LEN);
2045 		ctx->omt[i >> 2] |= v << ((i & 0x03) << 3);
2046 	}
2047 	return (1);
2048 }
2049 
2050 static void
2051 mge_setup_multicast(struct mge_softc *sc)
2052 {
2053 	struct mge_hash_maddr_ctx ctx;
2054 	struct ifnet *ifp = sc->ifp;
2055 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2056 	int i;
2057 
2058 	if (ifp->if_flags & IFF_ALLMULTI) {
2059 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
2060 			ctx.smt[i] = ctx.omt[i] =
2061 			    (v << 24) | (v << 16) | (v << 8) | v;
2062 	} else {
2063 		memset(&ctx, 0, sizeof(ctx));
2064 		if_foreach_llmaddr(ifp, mge_hash_maddr, &ctx);
2065 	}
2066 
2067 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
2068 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), ctx.smt[i]);
2069 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), ctx.omt[i]);
2070 	}
2071 }
2072 
2073 static void
2074 mge_set_rxic(struct mge_softc *sc)
2075 {
2076 	uint32_t reg;
2077 
2078 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2079 		sc->rx_ic_time = sc->mge_rx_ipg_max;
2080 
2081 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
2082 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2083 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2084 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
2085 }
2086 
2087 static void
2088 mge_set_txic(struct mge_softc *sc)
2089 {
2090 	uint32_t reg;
2091 
2092 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2093 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
2094 
2095 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
2096 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2097 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2098 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
2099 }
2100 
2101 static int
2102 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
2103 {
2104 	struct mge_softc *sc = (struct mge_softc *)arg1;
2105 	uint32_t time;
2106 	int error;
2107 
2108 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2109 	error = sysctl_handle_int(oidp, &time, 0, req);
2110 	if (error != 0)
2111 		return(error);
2112 
2113 	MGE_GLOBAL_LOCK(sc);
2114 	if (arg2 == MGE_IC_RX) {
2115 		sc->rx_ic_time = time;
2116 		mge_set_rxic(sc);
2117 	} else {
2118 		sc->tx_ic_time = time;
2119 		mge_set_txic(sc);
2120 	}
2121 	MGE_GLOBAL_UNLOCK(sc);
2122 
2123 	return(0);
2124 }
2125 
2126 static void
2127 mge_add_sysctls(struct mge_softc *sc)
2128 {
2129 	struct sysctl_ctx_list *ctx;
2130 	struct sysctl_oid_list *children;
2131 	struct sysctl_oid *tree;
2132 
2133 	ctx = device_get_sysctl_ctx(sc->dev);
2134 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2135 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
2136 	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "MGE Interrupts coalescing");
2137 	children = SYSCTL_CHILDREN(tree);
2138 
2139 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
2140 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_RX,
2141 	    mge_sysctl_ic, "I", "IC RX time threshold");
2142 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
2143 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_TX,
2144 	    mge_sysctl_ic, "I", "IC TX time threshold");
2145 }
2146 
2147 static int
2148 mge_mdio_writereg(device_t dev, int phy, int reg, int value)
2149 {
2150 
2151 	mv_write_ge_smi(dev, phy, reg, value);
2152 
2153 	return (0);
2154 }
2155 
2156 
2157 static int
2158 mge_mdio_readreg(device_t dev, int phy, int reg)
2159 {
2160 	int ret;
2161 
2162 	ret = mv_read_ge_smi(dev, phy, reg);
2163 
2164 	return (ret);
2165 }
2166