xref: /freebsd/sys/dev/mge/if_mge.c (revision f8967810f5b5806c59fb6c8dbcc123dbc9256bb3)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5  * Copyright (C) 2009-2015 Semihalf
6  * Copyright (C) 2015 Stormshield
7  * All rights reserved.
8  *
9  * Developed by Semihalf.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of MARVELL nor the names of contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/endian.h>
46 #include <sys/mbuf.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/module.h>
51 #include <sys/socket.h>
52 #include <sys/sysctl.h>
53 
54 #include <net/ethernet.h>
55 #include <net/bpf.h>
56 #include <net/if.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_types.h>
61 #include <net/if_vlan_var.h>
62 
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 
67 #include <sys/sockio.h>
68 #include <sys/bus.h>
69 #include <machine/bus.h>
70 #include <sys/rman.h>
71 #include <machine/resource.h>
72 
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75 
76 #include <dev/fdt/fdt_common.h>
77 #include <dev/ofw/ofw_bus.h>
78 #include <dev/ofw/ofw_bus_subr.h>
79 #include <dev/mdio/mdio.h>
80 
81 #include <dev/mge/if_mgevar.h>
82 #include <arm/mv/mvreg.h>
83 #include <arm/mv/mvvar.h>
84 
85 #include "miibus_if.h"
86 #include "mdio_if.h"
87 
88 #define	MGE_DELAY(x)	pause("SMI access sleep", (x) / tick_sbt)
89 
90 static int mge_probe(device_t dev);
91 static int mge_attach(device_t dev);
92 static int mge_detach(device_t dev);
93 static int mge_shutdown(device_t dev);
94 static int mge_suspend(device_t dev);
95 static int mge_resume(device_t dev);
96 
97 static int mge_miibus_readreg(device_t dev, int phy, int reg);
98 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
99 
100 static int mge_mdio_readreg(device_t dev, int phy, int reg);
101 static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
102 
103 static int mge_ifmedia_upd(struct ifnet *ifp);
104 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
105 
106 static void mge_init(void *arg);
107 static void mge_init_locked(void *arg);
108 static void mge_start(struct ifnet *ifp);
109 static void mge_start_locked(struct ifnet *ifp);
110 static void mge_watchdog(struct mge_softc *sc);
111 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
112 
113 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
114 static uint32_t mge_rx_ipg(uint32_t val, int ver);
115 static void mge_ver_params(struct mge_softc *sc);
116 
117 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
118 static void mge_intr_rxtx(void *arg);
119 static void mge_intr_rx(void *arg);
120 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
121     uint32_t int_cause_ext);
122 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
123 static void mge_intr_tx(void *arg);
124 static void mge_intr_tx_locked(struct mge_softc *sc);
125 static void mge_intr_misc(void *arg);
126 static void mge_intr_sum(void *arg);
127 static void mge_intr_err(void *arg);
128 static void mge_stop(struct mge_softc *sc);
129 static void mge_tick(void *msc);
130 static uint32_t mge_set_port_serial_control(uint32_t media);
131 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
132 static void mge_set_mac_address(struct mge_softc *sc);
133 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
134     uint8_t queue);
135 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
136 static int mge_allocate_dma(struct mge_softc *sc);
137 static int mge_alloc_desc_dma(struct mge_softc *sc,
138     struct mge_desc_wrapper* desc_tab, uint32_t size,
139     bus_dma_tag_t *buffer_tag);
140 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
141     struct mbuf **mbufp, bus_addr_t *paddr);
142 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
143     int error);
144 static void mge_free_dma(struct mge_softc *sc);
145 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
146     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
147 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
148     uint32_t status, uint16_t bufsize);
149 static void mge_offload_setup_descriptor(struct mge_softc *sc,
150     struct mge_desc_wrapper *dw);
151 static uint8_t mge_crc8(uint8_t *data, int size);
152 static void mge_setup_multicast(struct mge_softc *sc);
153 static void mge_set_rxic(struct mge_softc *sc);
154 static void mge_set_txic(struct mge_softc *sc);
155 static void mge_add_sysctls(struct mge_softc *sc);
156 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
157 
158 static device_method_t mge_methods[] = {
159 	/* Device interface */
160 	DEVMETHOD(device_probe,		mge_probe),
161 	DEVMETHOD(device_attach,	mge_attach),
162 	DEVMETHOD(device_detach,	mge_detach),
163 	DEVMETHOD(device_shutdown,	mge_shutdown),
164 	DEVMETHOD(device_suspend,	mge_suspend),
165 	DEVMETHOD(device_resume,	mge_resume),
166 	/* MII interface */
167 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
168 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
169 	/* MDIO interface */
170 	DEVMETHOD(mdio_readreg,		mge_mdio_readreg),
171 	DEVMETHOD(mdio_writereg,	mge_mdio_writereg),
172 	{ 0, 0 }
173 };
174 
175 DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
176 
177 static devclass_t mge_devclass;
178 static int switch_attached = 0;
179 
180 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
181 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
182 DRIVER_MODULE(mdio, mge, mdio_driver, mdio_devclass, 0, 0);
183 MODULE_DEPEND(mge, ether, 1, 1, 1);
184 MODULE_DEPEND(mge, miibus, 1, 1, 1);
185 MODULE_DEPEND(mge, mdio, 1, 1, 1);
186 
187 static struct resource_spec res_spec[] = {
188 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
189 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
190 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
191 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
192 	{ -1, 0 }
193 };
194 
195 static struct {
196 	driver_intr_t *handler;
197 	char * description;
198 } mge_intrs[MGE_INTR_COUNT + 1] = {
199 	{ mge_intr_rxtx,"GbE aggregated interrupt" },
200 	{ mge_intr_rx,	"GbE receive interrupt" },
201 	{ mge_intr_tx,	"GbE transmit interrupt" },
202 	{ mge_intr_misc,"GbE misc interrupt" },
203 	{ mge_intr_sum,	"GbE summary interrupt" },
204 	{ mge_intr_err,	"GbE error interrupt" },
205 };
206 
207 /* SMI access interlock */
208 static struct sx sx_smi;
209 
210 static uint32_t
211 mv_read_ge_smi(device_t dev, int phy, int reg)
212 {
213 	uint32_t timeout;
214 	uint32_t ret;
215 	struct mge_softc *sc;
216 
217 	sc = device_get_softc(dev);
218 	KASSERT(sc != NULL, ("NULL softc ptr!"));
219 	timeout = MGE_SMI_WRITE_RETRIES;
220 
221 	MGE_SMI_LOCK();
222 	while (--timeout &&
223 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
224 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
225 
226 	if (timeout == 0) {
227 		device_printf(dev, "SMI write timeout.\n");
228 		ret = ~0U;
229 		goto out;
230 	}
231 
232 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
233 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
234 
235 	/* Wait till finished. */
236 	timeout = MGE_SMI_WRITE_RETRIES;
237 	while (--timeout &&
238 	    !((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
239 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
240 
241 	if (timeout == 0) {
242 		device_printf(dev, "SMI write validation timeout.\n");
243 		ret = ~0U;
244 		goto out;
245 	}
246 
247 	/* Wait for the data to update in the SMI register */
248 	MGE_DELAY(MGE_SMI_DELAY);
249 	ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
250 
251 out:
252 	MGE_SMI_UNLOCK();
253 	return (ret);
254 
255 }
256 
257 static void
258 mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
259 {
260 	uint32_t timeout;
261 	struct mge_softc *sc;
262 
263 	sc = device_get_softc(dev);
264 	KASSERT(sc != NULL, ("NULL softc ptr!"));
265 
266 	MGE_SMI_LOCK();
267 	timeout = MGE_SMI_READ_RETRIES;
268 	while (--timeout &&
269 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
270 		MGE_DELAY(MGE_SMI_READ_DELAY);
271 
272 	if (timeout == 0) {
273 		device_printf(dev, "SMI read timeout.\n");
274 		goto out;
275 	}
276 
277 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
278 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
279 	    (value & MGE_SMI_DATA_MASK)));
280 
281 out:
282 	MGE_SMI_UNLOCK();
283 }
284 
285 static int
286 mv_read_ext_phy(device_t dev, int phy, int reg)
287 {
288 	uint32_t retries;
289 	struct mge_softc *sc;
290 	uint32_t ret;
291 
292 	sc = device_get_softc(dev);
293 
294 	MGE_SMI_LOCK();
295 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
296 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
297 
298 	retries = MGE_SMI_READ_RETRIES;
299 	while (--retries &&
300 	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
301 		DELAY(MGE_SMI_READ_DELAY);
302 
303 	if (retries == 0)
304 		device_printf(dev, "Timeout while reading from PHY\n");
305 
306 	ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
307 	MGE_SMI_UNLOCK();
308 
309 	return (ret);
310 }
311 
312 static void
313 mv_write_ext_phy(device_t dev, int phy, int reg, int value)
314 {
315 	uint32_t retries;
316 	struct mge_softc *sc;
317 
318 	sc = device_get_softc(dev);
319 
320 	MGE_SMI_LOCK();
321 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
322 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
323 	    (value & MGE_SMI_DATA_MASK)));
324 
325 	retries = MGE_SMI_WRITE_RETRIES;
326 	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
327 		DELAY(MGE_SMI_WRITE_DELAY);
328 
329 	if (retries == 0)
330 		device_printf(dev, "Timeout while writing to PHY\n");
331 	MGE_SMI_UNLOCK();
332 }
333 
334 static void
335 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
336 {
337 	uint32_t mac_l, mac_h;
338 	uint8_t lmac[6];
339 	int i, valid;
340 
341 	/*
342 	 * Retrieve hw address from the device tree.
343 	 */
344 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
345 	if (i == 6) {
346 		valid = 0;
347 		for (i = 0; i < 6; i++)
348 			if (lmac[i] != 0) {
349 				valid = 1;
350 				break;
351 			}
352 
353 		if (valid) {
354 			bcopy(lmac, addr, 6);
355 			return;
356 		}
357 	}
358 
359 	/*
360 	 * Fall back -- use the currently programmed address.
361 	 */
362 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
363 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
364 
365 	addr[0] = (mac_h & 0xff000000) >> 24;
366 	addr[1] = (mac_h & 0x00ff0000) >> 16;
367 	addr[2] = (mac_h & 0x0000ff00) >> 8;
368 	addr[3] = (mac_h & 0x000000ff);
369 	addr[4] = (mac_l & 0x0000ff00) >> 8;
370 	addr[5] = (mac_l & 0x000000ff);
371 }
372 
373 static uint32_t
374 mge_tfut_ipg(uint32_t val, int ver)
375 {
376 
377 	switch (ver) {
378 	case 1:
379 		return ((val & 0x3fff) << 4);
380 	case 2:
381 	default:
382 		return ((val & 0xffff) << 4);
383 	}
384 }
385 
386 static uint32_t
387 mge_rx_ipg(uint32_t val, int ver)
388 {
389 
390 	switch (ver) {
391 	case 1:
392 		return ((val & 0x3fff) << 8);
393 	case 2:
394 	default:
395 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
396 	}
397 }
398 
399 static void
400 mge_ver_params(struct mge_softc *sc)
401 {
402 	uint32_t d, r;
403 
404 	soc_id(&d, &r);
405 	if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
406 	    d == MV_DEV_88F6282 ||
407 	    d == MV_DEV_MV78100 ||
408 	    d == MV_DEV_MV78100_Z0 ||
409 	    (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
410 		sc->mge_ver = 2;
411 		sc->mge_mtu = 0x4e8;
412 		sc->mge_tfut_ipg_max = 0xFFFF;
413 		sc->mge_rx_ipg_max = 0xFFFF;
414 		sc->mge_tx_arb_cfg = 0xFC0000FF;
415 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
416 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
417 	} else {
418 		sc->mge_ver = 1;
419 		sc->mge_mtu = 0x458;
420 		sc->mge_tfut_ipg_max = 0x3FFF;
421 		sc->mge_rx_ipg_max = 0x3FFF;
422 		sc->mge_tx_arb_cfg = 0x000000FF;
423 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
424 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
425 	}
426 	if (d == MV_DEV_88RC8180)
427 		sc->mge_intr_cnt = 1;
428 	else
429 		sc->mge_intr_cnt = 2;
430 
431 	if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
432 		sc->mge_hw_csum = 0;
433 	else
434 		sc->mge_hw_csum = 1;
435 }
436 
437 static void
438 mge_set_mac_address(struct mge_softc *sc)
439 {
440 	char *if_mac;
441 	uint32_t mac_l, mac_h;
442 
443 	MGE_GLOBAL_LOCK_ASSERT(sc);
444 
445 	if_mac = (char *)IF_LLADDR(sc->ifp);
446 
447 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
448 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
449 	    (if_mac[2] << 8) | (if_mac[3] << 0);
450 
451 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
452 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
453 
454 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
455 }
456 
457 static void
458 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
459 {
460 	uint32_t reg_idx, reg_off, reg_val, i;
461 
462 	last_byte &= 0xf;
463 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
464 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
465 	reg_val = (1 | (queue << 1)) << reg_off;
466 
467 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
468 		if ( i == reg_idx)
469 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
470 		else
471 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
472 	}
473 }
474 
475 static void
476 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
477 {
478 	uint32_t port_config;
479 	uint32_t reg_val, i;
480 
481 	/* Enable or disable promiscuous mode as needed */
482 	if (sc->ifp->if_flags & IFF_PROMISC) {
483 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
484 		port_config |= PORT_CONFIG_UPM;
485 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
486 
487 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
488 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
489 
490 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
491 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
492 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
493 		}
494 
495 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
496 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
497 
498 	} else {
499 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
500 		port_config &= ~PORT_CONFIG_UPM;
501 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
502 
503 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
504 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
505 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
506 		}
507 
508 		mge_set_mac_address(sc);
509 	}
510 }
511 
512 static void
513 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
514 {
515 	u_int32_t *paddr;
516 
517 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
518 	paddr = arg;
519 
520 	*paddr = segs->ds_addr;
521 }
522 
523 static int
524 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
525     bus_addr_t *paddr)
526 {
527 	struct mbuf *new_mbuf;
528 	bus_dma_segment_t seg[1];
529 	int error;
530 	int nsegs;
531 
532 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
533 
534 	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
535 	if (new_mbuf == NULL)
536 		return (ENOBUFS);
537 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
538 
539 	if (*mbufp) {
540 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
541 		bus_dmamap_unload(tag, map);
542 	}
543 
544 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
545 	    BUS_DMA_NOWAIT);
546 	KASSERT(nsegs == 1, ("Too many segments returned!"));
547 	if (nsegs != 1 || error)
548 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
549 
550 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
551 
552 	(*mbufp) = new_mbuf;
553 	(*paddr) = seg->ds_addr;
554 	return (0);
555 }
556 
557 static int
558 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
559     uint32_t size, bus_dma_tag_t *buffer_tag)
560 {
561 	struct mge_desc_wrapper *dw;
562 	bus_addr_t desc_paddr;
563 	int i, error;
564 
565 	desc_paddr = 0;
566 	for (i = size - 1; i >= 0; i--) {
567 		dw = &(tab[i]);
568 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
569 		    (void**)&(dw->mge_desc),
570 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
571 		    &(dw->desc_dmap));
572 
573 		if (error) {
574 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
575 			dw->mge_desc = NULL;
576 			return (ENXIO);
577 		}
578 
579 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
580 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
581 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
582 
583 		if (error) {
584 			if_printf(sc->ifp, "can't load descriptor\n");
585 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
586 			    dw->desc_dmap);
587 			dw->mge_desc = NULL;
588 			return (ENXIO);
589 		}
590 
591 		/* Chain descriptors */
592 		dw->mge_desc->next_desc = desc_paddr;
593 		desc_paddr = dw->mge_desc_paddr;
594 	}
595 	tab[size - 1].mge_desc->next_desc = desc_paddr;
596 
597 	/* Allocate a busdma tag for mbufs. */
598 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
599 	    1, 0,				/* alignment, boundary */
600 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
601 	    BUS_SPACE_MAXADDR,			/* highaddr */
602 	    NULL, NULL,				/* filtfunc, filtfuncarg */
603 	    MCLBYTES, 1,			/* maxsize, nsegments */
604 	    MCLBYTES, 0,			/* maxsegsz, flags */
605 	    NULL, NULL,				/* lockfunc, lockfuncarg */
606 	    buffer_tag);			/* dmat */
607 	if (error) {
608 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
609 		return (ENXIO);
610 	}
611 
612 	/* Create TX busdma maps */
613 	for (i = 0; i < size; i++) {
614 		dw = &(tab[i]);
615 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
616 		if (error) {
617 			if_printf(sc->ifp, "failed to create map for mbuf\n");
618 			return (ENXIO);
619 		}
620 
621 		dw->buffer = (struct mbuf*)NULL;
622 		dw->mge_desc->buffer = (bus_addr_t)NULL;
623 	}
624 
625 	return (0);
626 }
627 
628 static int
629 mge_allocate_dma(struct mge_softc *sc)
630 {
631 	struct mge_desc_wrapper *dw;
632 	int i;
633 
634 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
635 	bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
636 	    16, 0,				/* alignment, boundary */
637 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
638 	    BUS_SPACE_MAXADDR,			/* highaddr */
639 	    NULL, NULL,				/* filtfunc, filtfuncarg */
640 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
641 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
642 	    NULL, NULL,				/* lockfunc, lockfuncarg */
643 	    &sc->mge_desc_dtag);		/* dmat */
644 
645 
646 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
647 	    &sc->mge_tx_dtag);
648 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
649 	    &sc->mge_rx_dtag);
650 
651 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
652 		dw = &(sc->mge_rx_desc[i]);
653 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
654 		    &dw->mge_desc->buffer);
655 	}
656 
657 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
658 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
659 
660 	return (0);
661 }
662 
663 static void
664 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
665     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
666 {
667 	struct mge_desc_wrapper *dw;
668 	int i;
669 
670 	for (i = 0; i < size; i++) {
671 		/* Free RX mbuf */
672 		dw = &(tab[i]);
673 
674 		if (dw->buffer_dmap) {
675 			if (free_mbufs) {
676 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
677 				    BUS_DMASYNC_POSTREAD);
678 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
679 			}
680 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
681 			if (free_mbufs)
682 				m_freem(dw->buffer);
683 		}
684 		/* Free RX descriptors */
685 		if (dw->desc_dmap) {
686 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
687 			    BUS_DMASYNC_POSTREAD);
688 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
689 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
690 			    dw->desc_dmap);
691 		}
692 	}
693 }
694 
695 static void
696 mge_free_dma(struct mge_softc *sc)
697 {
698 
699 	/* Free descriptors and mbufs */
700 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
701 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
702 
703 	/* Destroy mbuf dma tag */
704 	bus_dma_tag_destroy(sc->mge_tx_dtag);
705 	bus_dma_tag_destroy(sc->mge_rx_dtag);
706 	/* Destroy descriptors tag */
707 	bus_dma_tag_destroy(sc->mge_desc_dtag);
708 }
709 
710 static void
711 mge_reinit_rx(struct mge_softc *sc)
712 {
713 	struct mge_desc_wrapper *dw;
714 	int i;
715 
716 	MGE_RECEIVE_LOCK_ASSERT(sc);
717 
718 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
719 
720 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
721 	    &sc->mge_rx_dtag);
722 
723 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
724 		dw = &(sc->mge_rx_desc[i]);
725 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
726 		&dw->mge_desc->buffer);
727 	}
728 
729 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
730 	sc->rx_desc_curr = 0;
731 
732 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
733 	    sc->rx_desc_start);
734 
735 	/* Enable RX queue */
736 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
737 }
738 
739 #ifdef DEVICE_POLLING
740 static poll_handler_t mge_poll;
741 
742 static int
743 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
744 {
745 	struct mge_softc *sc = ifp->if_softc;
746 	uint32_t int_cause, int_cause_ext;
747 	int rx_npkts = 0;
748 
749 	MGE_RECEIVE_LOCK(sc);
750 
751 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
752 		MGE_RECEIVE_UNLOCK(sc);
753 		return (rx_npkts);
754 	}
755 
756 	if (cmd == POLL_AND_CHECK_STATUS) {
757 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
758 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
759 
760 		/* Check for resource error */
761 		if (int_cause & MGE_PORT_INT_RXERRQ0)
762 			mge_reinit_rx(sc);
763 
764 		if (int_cause || int_cause_ext) {
765 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
766 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
767 		}
768 	}
769 
770 
771 	rx_npkts = mge_intr_rx_locked(sc, count);
772 
773 	MGE_RECEIVE_UNLOCK(sc);
774 	MGE_TRANSMIT_LOCK(sc);
775 	mge_intr_tx_locked(sc);
776 	MGE_TRANSMIT_UNLOCK(sc);
777 	return (rx_npkts);
778 }
779 #endif /* DEVICE_POLLING */
780 
781 static int
782 mge_attach(device_t dev)
783 {
784 	struct mge_softc *sc;
785 	struct mii_softc *miisc;
786 	struct ifnet *ifp;
787 	uint8_t hwaddr[ETHER_ADDR_LEN];
788 	int i, error, phy;
789 
790 	sc = device_get_softc(dev);
791 	sc->dev = dev;
792 	sc->node = ofw_bus_get_node(dev);
793 	phy = 0;
794 
795 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
796 		device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
797 		    device_get_nameunit(sc->phy_sc->dev));
798 		sc->phy_attached = 1;
799 	} else {
800 		device_printf(dev, "PHY not attached.\n");
801 		sc->phy_attached = 0;
802 		sc->phy_sc = sc;
803 	}
804 
805 	if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
806 		device_printf(dev, "Switch attached.\n");
807 		sc->switch_attached = 1;
808 		/* additional variable available across instances */
809 		switch_attached = 1;
810 	} else {
811 		sc->switch_attached = 0;
812 	}
813 
814 	if (device_get_unit(dev) == 0) {
815 		sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
816 	}
817 
818 	/* Set chip version-dependent parameters */
819 	mge_ver_params(sc);
820 
821 	/* Initialize mutexes */
822 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
823 	    MTX_DEF);
824 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
825 	    MTX_DEF);
826 
827 	/* Allocate IO and IRQ resources */
828 	error = bus_alloc_resources(dev, res_spec, sc->res);
829 	if (error) {
830 		device_printf(dev, "could not allocate resources\n");
831 		mge_detach(dev);
832 		return (ENXIO);
833 	}
834 
835 	/* Allocate DMA, buffers, buffer descriptors */
836 	error = mge_allocate_dma(sc);
837 	if (error) {
838 		mge_detach(dev);
839 		return (ENXIO);
840 	}
841 
842 	sc->tx_desc_curr = 0;
843 	sc->rx_desc_curr = 0;
844 	sc->tx_desc_used_idx = 0;
845 	sc->tx_desc_used_count = 0;
846 
847 	/* Configure defaults for interrupts coalescing */
848 	sc->rx_ic_time = 768;
849 	sc->tx_ic_time = 768;
850 	mge_add_sysctls(sc);
851 
852 	/* Allocate network interface */
853 	ifp = sc->ifp = if_alloc(IFT_ETHER);
854 	if (ifp == NULL) {
855 		device_printf(dev, "if_alloc() failed\n");
856 		mge_detach(dev);
857 		return (ENOMEM);
858 	}
859 
860 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
861 	ifp->if_softc = sc;
862 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
863 	ifp->if_capabilities = IFCAP_VLAN_MTU;
864 	if (sc->mge_hw_csum) {
865 		ifp->if_capabilities |= IFCAP_HWCSUM;
866 		ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
867 	}
868 	ifp->if_capenable = ifp->if_capabilities;
869 
870 #ifdef DEVICE_POLLING
871 	/* Advertise that polling is supported */
872 	ifp->if_capabilities |= IFCAP_POLLING;
873 #endif
874 
875 	ifp->if_init = mge_init;
876 	ifp->if_start = mge_start;
877 	ifp->if_ioctl = mge_ioctl;
878 
879 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
880 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
881 	IFQ_SET_READY(&ifp->if_snd);
882 
883 	mge_get_mac_address(sc, hwaddr);
884 	ether_ifattach(ifp, hwaddr);
885 	callout_init(&sc->wd_callout, 1);
886 
887 	/* Attach PHY(s) */
888 	if (sc->phy_attached) {
889 		error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
890 		    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
891 		if (error) {
892 			device_printf(dev, "MII failed to find PHY\n");
893 			if_free(ifp);
894 			sc->ifp = NULL;
895 			mge_detach(dev);
896 			return (error);
897 		}
898 		sc->mii = device_get_softc(sc->miibus);
899 
900 		/* Tell the MAC where to find the PHY so autoneg works */
901 		miisc = LIST_FIRST(&sc->mii->mii_phys);
902 		MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
903 	} else {
904 		/* no PHY, so use hard-coded values */
905 		ifmedia_init(&sc->mge_ifmedia, 0,
906 		    mge_ifmedia_upd,
907 		    mge_ifmedia_sts);
908 		ifmedia_add(&sc->mge_ifmedia,
909 		    IFM_ETHER | IFM_1000_T | IFM_FDX,
910 		    0, NULL);
911 		ifmedia_set(&sc->mge_ifmedia,
912 		    IFM_ETHER | IFM_1000_T | IFM_FDX);
913 	}
914 
915 	/* Attach interrupt handlers */
916 	/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
917 	for (i = 1; i <= sc->mge_intr_cnt; ++i) {
918 		error = bus_setup_intr(dev, sc->res[i],
919 		    INTR_TYPE_NET | INTR_MPSAFE,
920 		    NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
921 		    sc, &sc->ih_cookie[i - 1]);
922 		if (error) {
923 			device_printf(dev, "could not setup %s\n",
924 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
925 			mge_detach(dev);
926 			return (error);
927 		}
928 	}
929 
930 	if (sc->switch_attached) {
931 		MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
932 		device_add_child(dev, "mdio", -1);
933 		bus_generic_attach(dev);
934 	}
935 
936 	return (0);
937 }
938 
939 static int
940 mge_detach(device_t dev)
941 {
942 	struct mge_softc *sc;
943 	int error,i;
944 
945 	sc = device_get_softc(dev);
946 
947 	/* Stop controller and free TX queue */
948 	if (sc->ifp)
949 		mge_shutdown(dev);
950 
951 	/* Wait for stopping ticks */
952         callout_drain(&sc->wd_callout);
953 
954 	/* Stop and release all interrupts */
955 	for (i = 0; i < sc->mge_intr_cnt; ++i) {
956 		if (!sc->ih_cookie[i])
957 			continue;
958 
959 		error = bus_teardown_intr(dev, sc->res[1 + i],
960 		    sc->ih_cookie[i]);
961 		if (error)
962 			device_printf(dev, "could not release %s\n",
963 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
964 	}
965 
966 	/* Detach network interface */
967 	if (sc->ifp) {
968 		ether_ifdetach(sc->ifp);
969 		if_free(sc->ifp);
970 	}
971 
972 	/* Free DMA resources */
973 	mge_free_dma(sc);
974 
975 	/* Free IO memory handler */
976 	bus_release_resources(dev, res_spec, sc->res);
977 
978 	/* Destroy mutexes */
979 	mtx_destroy(&sc->receive_lock);
980 	mtx_destroy(&sc->transmit_lock);
981 
982 	if (device_get_unit(dev) == 0)
983 		sx_destroy(&sx_smi);
984 
985 	return (0);
986 }
987 
988 static void
989 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
990 {
991 	struct mge_softc *sc;
992 	struct mii_data *mii;
993 
994 	sc = ifp->if_softc;
995 	MGE_GLOBAL_LOCK(sc);
996 
997 	if (!sc->phy_attached) {
998 		ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
999 		ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1000 		goto out_unlock;
1001 	}
1002 
1003 	mii = sc->mii;
1004 	mii_pollstat(mii);
1005 
1006 	ifmr->ifm_active = mii->mii_media_active;
1007 	ifmr->ifm_status = mii->mii_media_status;
1008 
1009 out_unlock:
1010 	MGE_GLOBAL_UNLOCK(sc);
1011 }
1012 
1013 static uint32_t
1014 mge_set_port_serial_control(uint32_t media)
1015 {
1016 	uint32_t port_config;
1017 
1018 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
1019 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
1020 
1021 	if (IFM_TYPE(media) == IFM_ETHER) {
1022 		switch(IFM_SUBTYPE(media)) {
1023 			case IFM_AUTO:
1024 				break;
1025 			case IFM_1000_T:
1026 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
1027 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1028 				    | PORT_SERIAL_SPEED_AUTONEG);
1029 				break;
1030 			case IFM_100_TX:
1031 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
1032 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1033 				    | PORT_SERIAL_SPEED_AUTONEG);
1034 				break;
1035 			case IFM_10_T:
1036 				port_config  |= (PORT_SERIAL_AUTONEG |
1037 				    PORT_SERIAL_AUTONEG_FC |
1038 				    PORT_SERIAL_SPEED_AUTONEG);
1039 				break;
1040 		}
1041 		if (media & IFM_FDX)
1042 			port_config |= PORT_SERIAL_FULL_DUPLEX;
1043 	}
1044 	return (port_config);
1045 }
1046 
1047 static int
1048 mge_ifmedia_upd(struct ifnet *ifp)
1049 {
1050 	struct mge_softc *sc = ifp->if_softc;
1051 
1052 	/*
1053 	 * Do not do anything for switch here, as updating media between
1054 	 * MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
1055 	 * break the link.
1056 	 */
1057 	if (sc->phy_attached) {
1058 		MGE_GLOBAL_LOCK(sc);
1059 		if (ifp->if_flags & IFF_UP) {
1060 			sc->mge_media_status = sc->mii->mii_media.ifm_media;
1061 			mii_mediachg(sc->mii);
1062 
1063 			/* MGE MAC needs to be reinitialized. */
1064 			mge_init_locked(sc);
1065 
1066 		}
1067 		MGE_GLOBAL_UNLOCK(sc);
1068 	}
1069 
1070 	return (0);
1071 }
1072 
1073 static void
1074 mge_init(void *arg)
1075 {
1076 	struct mge_softc *sc;
1077 
1078 	sc = arg;
1079 	MGE_GLOBAL_LOCK(sc);
1080 
1081 	mge_init_locked(arg);
1082 
1083 	MGE_GLOBAL_UNLOCK(sc);
1084 }
1085 
1086 static void
1087 mge_init_locked(void *arg)
1088 {
1089 	struct mge_softc *sc = arg;
1090 	struct mge_desc_wrapper *dw;
1091 	volatile uint32_t reg_val;
1092 	int i, count;
1093 	uint32_t media_status;
1094 
1095 
1096 	MGE_GLOBAL_LOCK_ASSERT(sc);
1097 
1098 	/* Stop interface */
1099 	mge_stop(sc);
1100 
1101 	/* Disable interrupts */
1102 	mge_intrs_ctrl(sc, 0);
1103 
1104 	/* Set MAC address */
1105 	mge_set_mac_address(sc);
1106 
1107 	/* Setup multicast filters */
1108 	mge_setup_multicast(sc);
1109 
1110 	if (sc->mge_ver == 2) {
1111 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
1112 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
1113 	}
1114 
1115 	/* Initialize TX queue configuration registers */
1116 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1117 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1118 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1119 
1120 	/* Clear TX queue configuration registers for unused queues */
1121 	for (i = 1; i < 7; i++) {
1122 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
1123 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
1124 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
1125 	}
1126 
1127 	/* Set default MTU */
1128 	MGE_WRITE(sc, sc->mge_mtu, 0);
1129 
1130 	/* Port configuration */
1131 	MGE_WRITE(sc, MGE_PORT_CONFIG,
1132 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
1133 	    PORT_CONFIG_ARO_RXQ(0));
1134 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
1135 
1136 	/* Configure promisc mode */
1137 	mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
1138 
1139 	media_status = sc->mge_media_status;
1140 	if (sc->switch_attached) {
1141 		media_status &= ~IFM_TMASK;
1142 		media_status |= IFM_1000_T;
1143 	}
1144 
1145 	/* Setup port configuration */
1146 	reg_val = mge_set_port_serial_control(media_status);
1147 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1148 
1149 	/* Setup SDMA configuration */
1150 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
1151 	    MGE_SDMA_TX_BYTE_SWAP |
1152 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
1153 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
1154 
1155 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
1156 
1157 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1158 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
1159 	    sc->rx_desc_start);
1160 
1161 	/* Reset descriptor indexes */
1162 	sc->tx_desc_curr = 0;
1163 	sc->rx_desc_curr = 0;
1164 	sc->tx_desc_used_idx = 0;
1165 	sc->tx_desc_used_count = 0;
1166 
1167 	/* Enable RX descriptors */
1168 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
1169 		dw = &sc->mge_rx_desc[i];
1170 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1171 		dw->mge_desc->buff_size = MCLBYTES;
1172 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1173 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1174 	}
1175 
1176 	/* Enable RX queue */
1177 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
1178 
1179 	/* Enable port */
1180 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1181 	reg_val |= PORT_SERIAL_ENABLE;
1182 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1183 	count = 0x100000;
1184 	for (;;) {
1185 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1186 		if (reg_val & MGE_STATUS_LINKUP)
1187 			break;
1188 		DELAY(100);
1189 		if (--count == 0) {
1190 			if_printf(sc->ifp, "Timeout on link-up\n");
1191 			break;
1192 		}
1193 	}
1194 
1195 	/* Setup interrupts coalescing */
1196 	mge_set_rxic(sc);
1197 	mge_set_txic(sc);
1198 
1199 	/* Enable interrupts */
1200 #ifdef DEVICE_POLLING
1201         /*
1202 	 * * ...only if polling is not turned on. Disable interrupts explicitly
1203 	 * if polling is enabled.
1204 	 */
1205 	if (sc->ifp->if_capenable & IFCAP_POLLING)
1206 		mge_intrs_ctrl(sc, 0);
1207 	else
1208 #endif /* DEVICE_POLLING */
1209 	mge_intrs_ctrl(sc, 1);
1210 
1211 	/* Activate network interface */
1212 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1213 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1214 	sc->wd_timer = 0;
1215 
1216 	/* Schedule watchdog timeout */
1217 	if (sc->phy_attached)
1218 		callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1219 }
1220 
1221 static void
1222 mge_intr_rxtx(void *arg)
1223 {
1224 	struct mge_softc *sc;
1225 	uint32_t int_cause, int_cause_ext;
1226 
1227 	sc = arg;
1228 	MGE_GLOBAL_LOCK(sc);
1229 
1230 #ifdef DEVICE_POLLING
1231 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1232 		MGE_GLOBAL_UNLOCK(sc);
1233 		return;
1234 	}
1235 #endif
1236 
1237 	/* Get interrupt cause */
1238 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1239 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1240 
1241 	/* Check for Transmit interrupt */
1242 	if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1243 	    MGE_PORT_INT_EXT_TXUR)) {
1244 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1245 		    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1246 		mge_intr_tx_locked(sc);
1247 	}
1248 
1249 	MGE_TRANSMIT_UNLOCK(sc);
1250 
1251 	/* Check for Receive interrupt */
1252 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1253 
1254 	MGE_RECEIVE_UNLOCK(sc);
1255 }
1256 
1257 static void
1258 mge_intr_err(void *arg)
1259 {
1260 	struct mge_softc *sc;
1261 	struct ifnet *ifp;
1262 
1263 	sc = arg;
1264 	ifp = sc->ifp;
1265 	if_printf(ifp, "%s\n", __FUNCTION__);
1266 }
1267 
1268 static void
1269 mge_intr_misc(void *arg)
1270 {
1271 	struct mge_softc *sc;
1272 	struct ifnet *ifp;
1273 
1274 	sc = arg;
1275 	ifp = sc->ifp;
1276 	if_printf(ifp, "%s\n", __FUNCTION__);
1277 }
1278 
1279 static void
1280 mge_intr_rx(void *arg) {
1281 	struct mge_softc *sc;
1282 	uint32_t int_cause, int_cause_ext;
1283 
1284 	sc = arg;
1285 	MGE_RECEIVE_LOCK(sc);
1286 
1287 #ifdef DEVICE_POLLING
1288 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1289 		MGE_RECEIVE_UNLOCK(sc);
1290 		return;
1291 	}
1292 #endif
1293 
1294 	/* Get interrupt cause */
1295 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1296 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1297 
1298 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1299 
1300 	MGE_RECEIVE_UNLOCK(sc);
1301 }
1302 
1303 static void
1304 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1305     uint32_t int_cause_ext)
1306 {
1307 	/* Check for resource error */
1308 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1309 		mge_reinit_rx(sc);
1310 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1311 		    ~(int_cause & MGE_PORT_INT_RXERRQ0));
1312 	}
1313 
1314 	int_cause &= MGE_PORT_INT_RXQ0;
1315 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1316 
1317 	if (int_cause || int_cause_ext) {
1318 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1319 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1320 		mge_intr_rx_locked(sc, -1);
1321 	}
1322 }
1323 
1324 static int
1325 mge_intr_rx_locked(struct mge_softc *sc, int count)
1326 {
1327 	struct ifnet *ifp = sc->ifp;
1328 	uint32_t status;
1329 	uint16_t bufsize;
1330 	struct mge_desc_wrapper* dw;
1331 	struct mbuf *mb;
1332 	int rx_npkts = 0;
1333 
1334 	MGE_RECEIVE_LOCK_ASSERT(sc);
1335 
1336 	while (count != 0) {
1337 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1338 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1339 		    BUS_DMASYNC_POSTREAD);
1340 
1341 		/* Get status */
1342 		status = dw->mge_desc->cmd_status;
1343 		bufsize = dw->mge_desc->buff_size;
1344 		if ((status & MGE_DMA_OWNED) != 0)
1345 			break;
1346 
1347 		if (dw->mge_desc->byte_count &&
1348 		    ~(status & MGE_ERR_SUMMARY)) {
1349 
1350 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1351 			    BUS_DMASYNC_POSTREAD);
1352 
1353 			mb = m_devget(dw->buffer->m_data,
1354 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1355 			    0, ifp, NULL);
1356 
1357 			if (mb == NULL)
1358 				/* Give up if no mbufs */
1359 				break;
1360 
1361 			mb->m_len -= 2;
1362 			mb->m_pkthdr.len -= 2;
1363 			mb->m_data += 2;
1364 
1365 			mb->m_pkthdr.rcvif = ifp;
1366 
1367 			mge_offload_process_frame(ifp, mb, status,
1368 			    bufsize);
1369 
1370 			MGE_RECEIVE_UNLOCK(sc);
1371 			(*ifp->if_input)(ifp, mb);
1372 			MGE_RECEIVE_LOCK(sc);
1373 			rx_npkts++;
1374 		}
1375 
1376 		dw->mge_desc->byte_count = 0;
1377 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1378 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1379 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1380 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1381 
1382 		if (count > 0)
1383 			count -= 1;
1384 	}
1385 
1386 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1387 
1388 	return (rx_npkts);
1389 }
1390 
1391 static void
1392 mge_intr_sum(void *arg)
1393 {
1394 	struct mge_softc *sc = arg;
1395 	struct ifnet *ifp;
1396 
1397 	ifp = sc->ifp;
1398 	if_printf(ifp, "%s\n", __FUNCTION__);
1399 }
1400 
1401 static void
1402 mge_intr_tx(void *arg)
1403 {
1404 	struct mge_softc *sc = arg;
1405 	uint32_t int_cause_ext;
1406 
1407 	MGE_TRANSMIT_LOCK(sc);
1408 
1409 #ifdef DEVICE_POLLING
1410 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1411 		MGE_TRANSMIT_UNLOCK(sc);
1412 		return;
1413 	}
1414 #endif
1415 
1416 	/* Ack the interrupt */
1417 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1418 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1419 	    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1420 
1421 	mge_intr_tx_locked(sc);
1422 
1423 	MGE_TRANSMIT_UNLOCK(sc);
1424 }
1425 
1426 static void
1427 mge_intr_tx_locked(struct mge_softc *sc)
1428 {
1429 	struct ifnet *ifp = sc->ifp;
1430 	struct mge_desc_wrapper *dw;
1431 	struct mge_desc *desc;
1432 	uint32_t status;
1433 	int send = 0;
1434 
1435 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1436 
1437 	/* Disable watchdog */
1438 	sc->wd_timer = 0;
1439 
1440 	while (sc->tx_desc_used_count) {
1441 		/* Get the descriptor */
1442 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1443 		desc = dw->mge_desc;
1444 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1445 		    BUS_DMASYNC_POSTREAD);
1446 
1447 		/* Get descriptor status */
1448 		status = desc->cmd_status;
1449 
1450 		if (status & MGE_DMA_OWNED)
1451 			break;
1452 
1453 		sc->tx_desc_used_idx =
1454 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1455 		sc->tx_desc_used_count--;
1456 
1457 		/* Update collision statistics */
1458 		if (status & MGE_ERR_SUMMARY) {
1459 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1460 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1461 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1462 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1463 		}
1464 
1465 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1466 		    BUS_DMASYNC_POSTWRITE);
1467 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1468 		m_freem(dw->buffer);
1469 		dw->buffer = (struct mbuf*)NULL;
1470 		send++;
1471 
1472 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1473 	}
1474 
1475 	if (send) {
1476 		/* Now send anything that was pending */
1477 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1478 		mge_start_locked(ifp);
1479 	}
1480 }
1481 static int
1482 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1483 {
1484 	struct mge_softc *sc = ifp->if_softc;
1485 	struct ifreq *ifr = (struct ifreq *)data;
1486 	int mask, error;
1487 	uint32_t flags;
1488 
1489 	error = 0;
1490 
1491 	switch (command) {
1492 	case SIOCSIFFLAGS:
1493 		MGE_GLOBAL_LOCK(sc);
1494 
1495 		if (ifp->if_flags & IFF_UP) {
1496 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1497 				flags = ifp->if_flags ^ sc->mge_if_flags;
1498 				if (flags & IFF_PROMISC)
1499 					mge_set_prom_mode(sc,
1500 					    MGE_RX_DEFAULT_QUEUE);
1501 
1502 				if (flags & IFF_ALLMULTI)
1503 					mge_setup_multicast(sc);
1504 			} else
1505 				mge_init_locked(sc);
1506 		}
1507 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1508 			mge_stop(sc);
1509 
1510 		sc->mge_if_flags = ifp->if_flags;
1511 		MGE_GLOBAL_UNLOCK(sc);
1512 		break;
1513 	case SIOCADDMULTI:
1514 	case SIOCDELMULTI:
1515 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1516 			MGE_GLOBAL_LOCK(sc);
1517 			mge_setup_multicast(sc);
1518 			MGE_GLOBAL_UNLOCK(sc);
1519 		}
1520 		break;
1521 	case SIOCSIFCAP:
1522 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1523 		if (mask & IFCAP_HWCSUM) {
1524 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1525 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1526 			if (ifp->if_capenable & IFCAP_TXCSUM)
1527 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1528 			else
1529 				ifp->if_hwassist = 0;
1530 		}
1531 #ifdef DEVICE_POLLING
1532 		if (mask & IFCAP_POLLING) {
1533 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1534 				error = ether_poll_register(mge_poll, ifp);
1535 				if (error)
1536 					return(error);
1537 
1538 				MGE_GLOBAL_LOCK(sc);
1539 				mge_intrs_ctrl(sc, 0);
1540 				ifp->if_capenable |= IFCAP_POLLING;
1541 				MGE_GLOBAL_UNLOCK(sc);
1542 			} else {
1543 				error = ether_poll_deregister(ifp);
1544 				MGE_GLOBAL_LOCK(sc);
1545 				mge_intrs_ctrl(sc, 1);
1546 				ifp->if_capenable &= ~IFCAP_POLLING;
1547 				MGE_GLOBAL_UNLOCK(sc);
1548 			}
1549 		}
1550 #endif
1551 		break;
1552 	case SIOCGIFMEDIA: /* fall through */
1553 	case SIOCSIFMEDIA:
1554 		/*
1555 		 * Setting up media type via ioctls is *not* supported for MAC
1556 		 * which is connected to switch. Use etherswitchcfg.
1557 		 */
1558 		if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1559 			return (0);
1560 		else if (!sc->phy_attached) {
1561 			error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1562 			    command);
1563 			break;
1564 		}
1565 
1566 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1567 		    && !(ifr->ifr_media & IFM_FDX)) {
1568 			device_printf(sc->dev,
1569 			    "1000baseTX half-duplex unsupported\n");
1570 			return 0;
1571 		}
1572 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1573 		break;
1574 	default:
1575 		error = ether_ioctl(ifp, command, data);
1576 	}
1577 	return (error);
1578 }
1579 
1580 static int
1581 mge_miibus_readreg(device_t dev, int phy, int reg)
1582 {
1583 
1584 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1585 
1586 	return (mv_read_ext_phy(dev, phy, reg));
1587 }
1588 
1589 static int
1590 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1591 {
1592 
1593 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1594 
1595 	mv_write_ext_phy(dev, phy, reg, value);
1596 
1597 	return (0);
1598 }
1599 
1600 static int
1601 mge_probe(device_t dev)
1602 {
1603 
1604 	if (!ofw_bus_status_okay(dev))
1605 		return (ENXIO);
1606 
1607 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1608 		return (ENXIO);
1609 
1610 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1611 	return (BUS_PROBE_DEFAULT);
1612 }
1613 
1614 static int
1615 mge_resume(device_t dev)
1616 {
1617 
1618 	device_printf(dev, "%s\n", __FUNCTION__);
1619 	return (0);
1620 }
1621 
1622 static int
1623 mge_shutdown(device_t dev)
1624 {
1625 	struct mge_softc *sc = device_get_softc(dev);
1626 
1627 	MGE_GLOBAL_LOCK(sc);
1628 
1629 #ifdef DEVICE_POLLING
1630         if (sc->ifp->if_capenable & IFCAP_POLLING)
1631 		ether_poll_deregister(sc->ifp);
1632 #endif
1633 
1634 	mge_stop(sc);
1635 
1636 	MGE_GLOBAL_UNLOCK(sc);
1637 
1638 	return (0);
1639 }
1640 
1641 static int
1642 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1643 {
1644 	struct mge_desc_wrapper *dw = NULL;
1645 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1646 	bus_dmamap_t mapp;
1647 	int error;
1648 	int seg, nsegs;
1649 	int desc_no;
1650 
1651 	/* Fetch unused map */
1652 	desc_no = sc->tx_desc_curr;
1653 	dw = &sc->mge_tx_desc[desc_no];
1654 	mapp = dw->buffer_dmap;
1655 
1656 	/* Create mapping in DMA memory */
1657 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1658 	    BUS_DMA_NOWAIT);
1659 	if (error != 0) {
1660 		m_freem(m0);
1661 		return (error);
1662 	}
1663 
1664 	/* Only one segment is supported. */
1665 	if (nsegs != 1) {
1666 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1667 		m_freem(m0);
1668 		return (-1);
1669 	}
1670 
1671 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1672 
1673 	/* Everything is ok, now we can send buffers */
1674 	for (seg = 0; seg < nsegs; seg++) {
1675 		dw->mge_desc->byte_count = segs[seg].ds_len;
1676 		dw->mge_desc->buffer = segs[seg].ds_addr;
1677 		dw->buffer = m0;
1678 		dw->mge_desc->cmd_status = 0;
1679 		if (seg == 0)
1680 			mge_offload_setup_descriptor(sc, dw);
1681 		dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1682 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1683 		    MGE_DMA_OWNED;
1684 	}
1685 
1686 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1687 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1688 
1689 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1690 	sc->tx_desc_used_count++;
1691 	return (0);
1692 }
1693 
1694 static void
1695 mge_tick(void *msc)
1696 {
1697 	struct mge_softc *sc = msc;
1698 
1699 	KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1700 
1701 	MGE_GLOBAL_LOCK(sc);
1702 
1703 	/* Check for TX timeout */
1704 	mge_watchdog(sc);
1705 
1706 	mii_tick(sc->mii);
1707 
1708 	/* Check for media type change */
1709 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1710 		mge_ifmedia_upd(sc->ifp);
1711 
1712 	MGE_GLOBAL_UNLOCK(sc);
1713 
1714 	/* Schedule another timeout one second from now */
1715 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1716 
1717 	return;
1718 }
1719 
1720 static void
1721 mge_watchdog(struct mge_softc *sc)
1722 {
1723 	struct ifnet *ifp;
1724 
1725 	ifp = sc->ifp;
1726 
1727 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1728 		return;
1729 	}
1730 
1731 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1732 	if_printf(ifp, "watchdog timeout\n");
1733 
1734 	mge_stop(sc);
1735 	mge_init_locked(sc);
1736 }
1737 
1738 static void
1739 mge_start(struct ifnet *ifp)
1740 {
1741 	struct mge_softc *sc = ifp->if_softc;
1742 
1743 	MGE_TRANSMIT_LOCK(sc);
1744 
1745 	mge_start_locked(ifp);
1746 
1747 	MGE_TRANSMIT_UNLOCK(sc);
1748 }
1749 
1750 static void
1751 mge_start_locked(struct ifnet *ifp)
1752 {
1753 	struct mge_softc *sc;
1754 	struct mbuf *m0, *mtmp;
1755 	uint32_t reg_val, queued = 0;
1756 
1757 	sc = ifp->if_softc;
1758 
1759 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1760 
1761 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1762 	    IFF_DRV_RUNNING)
1763 		return;
1764 
1765 	for (;;) {
1766 		/* Get packet from the queue */
1767 		IF_DEQUEUE(&ifp->if_snd, m0);
1768 		if (m0 == NULL)
1769 			break;
1770 
1771 		if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1772 		    m0->m_flags & M_VLANTAG) {
1773 			if (M_WRITABLE(m0) == 0) {
1774 				mtmp = m_dup(m0, M_NOWAIT);
1775 				m_freem(m0);
1776 				if (mtmp == NULL)
1777 					continue;
1778 				m0 = mtmp;
1779 			}
1780 		}
1781 		/* The driver support only one DMA fragment. */
1782 		if (m0->m_next != NULL) {
1783 			mtmp = m_defrag(m0, M_NOWAIT);
1784 			if (mtmp != NULL)
1785 				m0 = mtmp;
1786 		}
1787 
1788 		/* Check for free descriptors */
1789 		if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1790 			IF_PREPEND(&ifp->if_snd, m0);
1791 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1792 			break;
1793 		}
1794 
1795 		if (mge_encap(sc, m0) != 0)
1796 			break;
1797 
1798 		queued++;
1799 		BPF_MTAP(ifp, m0);
1800 	}
1801 
1802 	if (queued) {
1803 		/* Enable transmitter and watchdog timer */
1804 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1805 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1806 		sc->wd_timer = 5;
1807 	}
1808 }
1809 
1810 static void
1811 mge_stop(struct mge_softc *sc)
1812 {
1813 	struct ifnet *ifp;
1814 	volatile uint32_t reg_val, status;
1815 	struct mge_desc_wrapper *dw;
1816 	struct mge_desc *desc;
1817 	int count;
1818 
1819 	ifp = sc->ifp;
1820 
1821 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1822 		return;
1823 
1824 	/* Stop tick engine */
1825 	callout_stop(&sc->wd_callout);
1826 
1827 	/* Disable interface */
1828 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1829 	sc->wd_timer = 0;
1830 
1831 	/* Disable interrupts */
1832 	mge_intrs_ctrl(sc, 0);
1833 
1834 	/* Disable Rx and Tx */
1835 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1836 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1837 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1838 
1839 	/* Remove pending data from TX queue */
1840 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1841 	    sc->tx_desc_used_count) {
1842 		/* Get the descriptor */
1843 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1844 		desc = dw->mge_desc;
1845 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1846 		    BUS_DMASYNC_POSTREAD);
1847 
1848 		/* Get descriptor status */
1849 		status = desc->cmd_status;
1850 
1851 		if (status & MGE_DMA_OWNED)
1852 			break;
1853 
1854 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1855 		    MGE_TX_DESC_NUM;
1856 		sc->tx_desc_used_count--;
1857 
1858 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1859 		    BUS_DMASYNC_POSTWRITE);
1860 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1861 
1862 		m_freem(dw->buffer);
1863 		dw->buffer = (struct mbuf*)NULL;
1864 	}
1865 
1866 	/* Wait for end of transmission */
1867 	count = 0x100000;
1868 	while (count--) {
1869 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1870 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1871 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1872 			break;
1873 		DELAY(100);
1874 	}
1875 
1876 	if (count == 0)
1877 		if_printf(ifp,
1878 		    "%s: timeout while waiting for end of transmission\n",
1879 		    __FUNCTION__);
1880 
1881 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1882 	reg_val &= ~(PORT_SERIAL_ENABLE);
1883 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1884 }
1885 
1886 static int
1887 mge_suspend(device_t dev)
1888 {
1889 
1890 	device_printf(dev, "%s\n", __FUNCTION__);
1891 	return (0);
1892 }
1893 
1894 static void
1895 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1896     uint32_t status, uint16_t bufsize)
1897 {
1898 	int csum_flags = 0;
1899 
1900 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1901 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1902 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1903 
1904 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1905 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1906 		    (status & MGE_RX_L4_CSUM_OK)) {
1907 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1908 			frame->m_pkthdr.csum_data = 0xFFFF;
1909 		}
1910 
1911 		frame->m_pkthdr.csum_flags = csum_flags;
1912 	}
1913 }
1914 
1915 static void
1916 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1917 {
1918 	struct mbuf *m0 = dw->buffer;
1919 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1920 	int csum_flags = m0->m_pkthdr.csum_flags;
1921 	int cmd_status = 0;
1922 	struct ip *ip;
1923 	int ehlen, etype;
1924 
1925 	if (csum_flags != 0) {
1926 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1927 			etype = ntohs(eh->evl_proto);
1928 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1929 			csum_flags |= MGE_TX_VLAN_TAGGED;
1930 		} else {
1931 			etype = ntohs(eh->evl_encap_proto);
1932 			ehlen = ETHER_HDR_LEN;
1933 		}
1934 
1935 		if (etype != ETHERTYPE_IP) {
1936 			if_printf(sc->ifp,
1937 			    "TCP/IP Offload enabled for unsupported "
1938 			    "protocol!\n");
1939 			return;
1940 		}
1941 
1942 		ip = (struct ip *)(m0->m_data + ehlen);
1943 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1944 		cmd_status |= MGE_TX_NOT_FRAGMENT;
1945 	}
1946 
1947 	if (csum_flags & CSUM_IP)
1948 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1949 
1950 	if (csum_flags & CSUM_TCP)
1951 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1952 
1953 	if (csum_flags & CSUM_UDP)
1954 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1955 
1956 	dw->mge_desc->cmd_status |= cmd_status;
1957 }
1958 
1959 static void
1960 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1961 {
1962 
1963 	if (enable) {
1964 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1965 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1966 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1967 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1968 		    MGE_PORT_INT_EXT_TXBUF0);
1969 	} else {
1970 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1971 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1972 
1973 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1974 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1975 
1976 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1977 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1978 	}
1979 }
1980 
1981 static uint8_t
1982 mge_crc8(uint8_t *data, int size)
1983 {
1984 	uint8_t crc = 0;
1985 	static const uint8_t ct[256] = {
1986 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1987 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1988 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1989 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1990 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1991 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1992 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1993 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1994 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1995 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1996 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1997 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1998 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1999 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
2000 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
2001 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
2002 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
2003 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
2004 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
2005 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
2006 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
2007 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
2008 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
2009 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
2010 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
2011 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
2012 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
2013 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
2014 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
2015 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
2016 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
2017 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
2018 	};
2019 
2020 	while(size--)
2021 		crc = ct[crc ^ *(data++)];
2022 
2023 	return(crc);
2024 }
2025 
2026 struct mge_hash_maddr_ctx {
2027 	uint32_t smt[MGE_MCAST_REG_NUMBER];
2028 	uint32_t omt[MGE_MCAST_REG_NUMBER];
2029 };
2030 
2031 static u_int
2032 mge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2033 {
2034 	static const uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
2035 	struct mge_hash_maddr_ctx *ctx = arg;
2036 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2037 	uint8_t *mac;
2038 	int i;
2039 
2040 	mac = LLADDR(sdl);
2041 	if (memcmp(mac, special, sizeof(special)) == 0) {
2042 		i = mac[5];
2043 		ctx->smt[i >> 2] |= v << ((i & 0x03) << 3);
2044 	} else {
2045 		i = mge_crc8(mac, ETHER_ADDR_LEN);
2046 		ctx->omt[i >> 2] |= v << ((i & 0x03) << 3);
2047 	}
2048 	return (1);
2049 }
2050 
2051 static void
2052 mge_setup_multicast(struct mge_softc *sc)
2053 {
2054 	struct mge_hash_maddr_ctx ctx;
2055 	struct ifnet *ifp = sc->ifp;
2056 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2057 	int i;
2058 
2059 	if (ifp->if_flags & IFF_ALLMULTI) {
2060 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
2061 			ctx.smt[i] = ctx.omt[i] =
2062 			    (v << 24) | (v << 16) | (v << 8) | v;
2063 	} else {
2064 		memset(&ctx, 0, sizeof(ctx));
2065 		if_foreach_llmaddr(ifp, mge_hash_maddr, &ctx);
2066 	}
2067 
2068 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
2069 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), ctx.smt[i]);
2070 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), ctx.omt[i]);
2071 	}
2072 }
2073 
2074 static void
2075 mge_set_rxic(struct mge_softc *sc)
2076 {
2077 	uint32_t reg;
2078 
2079 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2080 		sc->rx_ic_time = sc->mge_rx_ipg_max;
2081 
2082 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
2083 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2084 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2085 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
2086 }
2087 
2088 static void
2089 mge_set_txic(struct mge_softc *sc)
2090 {
2091 	uint32_t reg;
2092 
2093 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2094 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
2095 
2096 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
2097 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2098 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2099 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
2100 }
2101 
2102 static int
2103 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
2104 {
2105 	struct mge_softc *sc = (struct mge_softc *)arg1;
2106 	uint32_t time;
2107 	int error;
2108 
2109 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2110 	error = sysctl_handle_int(oidp, &time, 0, req);
2111 	if (error != 0)
2112 		return(error);
2113 
2114 	MGE_GLOBAL_LOCK(sc);
2115 	if (arg2 == MGE_IC_RX) {
2116 		sc->rx_ic_time = time;
2117 		mge_set_rxic(sc);
2118 	} else {
2119 		sc->tx_ic_time = time;
2120 		mge_set_txic(sc);
2121 	}
2122 	MGE_GLOBAL_UNLOCK(sc);
2123 
2124 	return(0);
2125 }
2126 
2127 static void
2128 mge_add_sysctls(struct mge_softc *sc)
2129 {
2130 	struct sysctl_ctx_list *ctx;
2131 	struct sysctl_oid_list *children;
2132 	struct sysctl_oid *tree;
2133 
2134 	ctx = device_get_sysctl_ctx(sc->dev);
2135 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2136 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
2137 	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "MGE Interrupts coalescing");
2138 	children = SYSCTL_CHILDREN(tree);
2139 
2140 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
2141 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_RX,
2142 	    mge_sysctl_ic, "I", "IC RX time threshold");
2143 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
2144 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_TX,
2145 	    mge_sysctl_ic, "I", "IC TX time threshold");
2146 }
2147 
2148 static int
2149 mge_mdio_writereg(device_t dev, int phy, int reg, int value)
2150 {
2151 
2152 	mv_write_ge_smi(dev, phy, reg, value);
2153 
2154 	return (0);
2155 }
2156 
2157 
2158 static int
2159 mge_mdio_readreg(device_t dev, int phy, int reg)
2160 {
2161 	int ret;
2162 
2163 	ret = mv_read_ge_smi(dev, phy, reg);
2164 
2165 	return (ret);
2166 }
2167