xref: /freebsd/sys/dev/mge/if_mge.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5  * Copyright (C) 2009-2015 Semihalf
6  * Copyright (C) 2015 Stormshield
7  * All rights reserved.
8  *
9  * Developed by Semihalf.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of MARVELL nor the names of contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39 
40 #include <sys/cdefs.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/endian.h>
44 #include <sys/mbuf.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/kernel.h>
48 #include <sys/module.h>
49 #include <sys/socket.h>
50 #include <sys/sysctl.h>
51 
52 #include <net/ethernet.h>
53 #include <net/bpf.h>
54 #include <net/if.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 #include <net/if_vlan_var.h>
60 
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 
65 #include <sys/sockio.h>
66 #include <sys/bus.h>
67 #include <machine/bus.h>
68 #include <sys/rman.h>
69 #include <machine/resource.h>
70 
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
73 
74 #include <dev/fdt/fdt_common.h>
75 #include <dev/ofw/ofw_bus.h>
76 #include <dev/ofw/ofw_bus_subr.h>
77 #include <dev/mdio/mdio.h>
78 
79 #include <dev/mge/if_mgevar.h>
80 #include <arm/mv/mvreg.h>
81 #include <arm/mv/mvvar.h>
82 
83 #include "miibus_if.h"
84 #include "mdio_if.h"
85 
86 #define	MGE_DELAY(x)	pause("SMI access sleep", (x) / tick_sbt)
87 
88 static int mge_probe(device_t dev);
89 static int mge_attach(device_t dev);
90 static int mge_detach(device_t dev);
91 static int mge_shutdown(device_t dev);
92 static int mge_suspend(device_t dev);
93 static int mge_resume(device_t dev);
94 
95 static int mge_miibus_readreg(device_t dev, int phy, int reg);
96 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
97 
98 static int mge_mdio_readreg(device_t dev, int phy, int reg);
99 static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
100 
101 static int mge_ifmedia_upd(if_t ifp);
102 static void mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
103 
104 static void mge_init(void *arg);
105 static void mge_init_locked(void *arg);
106 static void mge_start(if_t ifp);
107 static void mge_start_locked(if_t ifp);
108 static void mge_watchdog(struct mge_softc *sc);
109 static int mge_ioctl(if_t ifp, u_long command, caddr_t data);
110 
111 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
112 static uint32_t mge_rx_ipg(uint32_t val, int ver);
113 static void mge_ver_params(struct mge_softc *sc);
114 
115 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
116 static void mge_intr_rxtx(void *arg);
117 static void mge_intr_rx(void *arg);
118 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
119     uint32_t int_cause_ext);
120 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
121 static void mge_intr_tx(void *arg);
122 static void mge_intr_tx_locked(struct mge_softc *sc);
123 static void mge_intr_misc(void *arg);
124 static void mge_intr_sum(void *arg);
125 static void mge_intr_err(void *arg);
126 static void mge_stop(struct mge_softc *sc);
127 static void mge_tick(void *msc);
128 static uint32_t mge_set_port_serial_control(uint32_t media);
129 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
130 static void mge_set_mac_address(struct mge_softc *sc);
131 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
132     uint8_t queue);
133 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
134 static int mge_allocate_dma(struct mge_softc *sc);
135 static int mge_alloc_desc_dma(struct mge_softc *sc,
136     struct mge_desc_wrapper* desc_tab, uint32_t size,
137     bus_dma_tag_t *buffer_tag);
138 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
139     struct mbuf **mbufp, bus_addr_t *paddr);
140 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
141     int error);
142 static void mge_free_dma(struct mge_softc *sc);
143 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
144     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
145 static void mge_offload_process_frame(if_t ifp, struct mbuf *frame,
146     uint32_t status, uint16_t bufsize);
147 static void mge_offload_setup_descriptor(struct mge_softc *sc,
148     struct mge_desc_wrapper *dw);
149 static uint8_t mge_crc8(uint8_t *data, int size);
150 static void mge_setup_multicast(struct mge_softc *sc);
151 static void mge_set_rxic(struct mge_softc *sc);
152 static void mge_set_txic(struct mge_softc *sc);
153 static void mge_add_sysctls(struct mge_softc *sc);
154 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
155 
156 static device_method_t mge_methods[] = {
157 	/* Device interface */
158 	DEVMETHOD(device_probe,		mge_probe),
159 	DEVMETHOD(device_attach,	mge_attach),
160 	DEVMETHOD(device_detach,	mge_detach),
161 	DEVMETHOD(device_shutdown,	mge_shutdown),
162 	DEVMETHOD(device_suspend,	mge_suspend),
163 	DEVMETHOD(device_resume,	mge_resume),
164 	/* MII interface */
165 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
166 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
167 	/* MDIO interface */
168 	DEVMETHOD(mdio_readreg,		mge_mdio_readreg),
169 	DEVMETHOD(mdio_writereg,	mge_mdio_writereg),
170 	{ 0, 0 }
171 };
172 
173 DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
174 
175 static int switch_attached = 0;
176 
177 DRIVER_MODULE(mge, simplebus, mge_driver, 0, 0);
178 DRIVER_MODULE(miibus, mge, miibus_driver, 0, 0);
179 DRIVER_MODULE(mdio, mge, mdio_driver, 0, 0);
180 MODULE_DEPEND(mge, ether, 1, 1, 1);
181 MODULE_DEPEND(mge, miibus, 1, 1, 1);
182 MODULE_DEPEND(mge, mdio, 1, 1, 1);
183 
184 static struct resource_spec res_spec[] = {
185 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
186 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
187 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
188 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
189 	{ -1, 0 }
190 };
191 
192 static struct {
193 	driver_intr_t *handler;
194 	char * description;
195 } mge_intrs[MGE_INTR_COUNT + 1] = {
196 	{ mge_intr_rxtx,"GbE aggregated interrupt" },
197 	{ mge_intr_rx,	"GbE receive interrupt" },
198 	{ mge_intr_tx,	"GbE transmit interrupt" },
199 	{ mge_intr_misc,"GbE misc interrupt" },
200 	{ mge_intr_sum,	"GbE summary interrupt" },
201 	{ mge_intr_err,	"GbE error interrupt" },
202 };
203 
204 /* SMI access interlock */
205 static struct sx sx_smi;
206 
207 static uint32_t
208 mv_read_ge_smi(device_t dev, int phy, int reg)
209 {
210 	uint32_t timeout;
211 	uint32_t ret;
212 	struct mge_softc *sc;
213 
214 	sc = device_get_softc(dev);
215 	KASSERT(sc != NULL, ("NULL softc ptr!"));
216 	timeout = MGE_SMI_WRITE_RETRIES;
217 
218 	MGE_SMI_LOCK();
219 	while (--timeout &&
220 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
221 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
222 
223 	if (timeout == 0) {
224 		device_printf(dev, "SMI write timeout.\n");
225 		ret = ~0U;
226 		goto out;
227 	}
228 
229 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
230 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
231 
232 	/* Wait till finished. */
233 	timeout = MGE_SMI_WRITE_RETRIES;
234 	while (--timeout &&
235 	    !((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
236 		MGE_DELAY(MGE_SMI_WRITE_DELAY);
237 
238 	if (timeout == 0) {
239 		device_printf(dev, "SMI write validation timeout.\n");
240 		ret = ~0U;
241 		goto out;
242 	}
243 
244 	/* Wait for the data to update in the SMI register */
245 	MGE_DELAY(MGE_SMI_DELAY);
246 	ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
247 
248 out:
249 	MGE_SMI_UNLOCK();
250 	return (ret);
251 
252 }
253 
254 static void
255 mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
256 {
257 	uint32_t timeout;
258 	struct mge_softc *sc;
259 
260 	sc = device_get_softc(dev);
261 	KASSERT(sc != NULL, ("NULL softc ptr!"));
262 
263 	MGE_SMI_LOCK();
264 	timeout = MGE_SMI_READ_RETRIES;
265 	while (--timeout &&
266 	    (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
267 		MGE_DELAY(MGE_SMI_READ_DELAY);
268 
269 	if (timeout == 0) {
270 		device_printf(dev, "SMI read timeout.\n");
271 		goto out;
272 	}
273 
274 	MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
275 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
276 	    (value & MGE_SMI_DATA_MASK)));
277 
278 out:
279 	MGE_SMI_UNLOCK();
280 }
281 
282 static int
283 mv_read_ext_phy(device_t dev, int phy, int reg)
284 {
285 	uint32_t retries;
286 	struct mge_softc *sc;
287 	uint32_t ret;
288 
289 	sc = device_get_softc(dev);
290 
291 	MGE_SMI_LOCK();
292 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
293 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
294 
295 	retries = MGE_SMI_READ_RETRIES;
296 	while (--retries &&
297 	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
298 		DELAY(MGE_SMI_READ_DELAY);
299 
300 	if (retries == 0)
301 		device_printf(dev, "Timeout while reading from PHY\n");
302 
303 	ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
304 	MGE_SMI_UNLOCK();
305 
306 	return (ret);
307 }
308 
309 static void
310 mv_write_ext_phy(device_t dev, int phy, int reg, int value)
311 {
312 	uint32_t retries;
313 	struct mge_softc *sc;
314 
315 	sc = device_get_softc(dev);
316 
317 	MGE_SMI_LOCK();
318 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
319 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
320 	    (value & MGE_SMI_DATA_MASK)));
321 
322 	retries = MGE_SMI_WRITE_RETRIES;
323 	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
324 		DELAY(MGE_SMI_WRITE_DELAY);
325 
326 	if (retries == 0)
327 		device_printf(dev, "Timeout while writing to PHY\n");
328 	MGE_SMI_UNLOCK();
329 }
330 
331 static void
332 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
333 {
334 	uint32_t mac_l, mac_h;
335 	uint8_t lmac[6];
336 	int i, valid;
337 
338 	/*
339 	 * Retrieve hw address from the device tree.
340 	 */
341 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
342 	if (i == 6) {
343 		valid = 0;
344 		for (i = 0; i < 6; i++)
345 			if (lmac[i] != 0) {
346 				valid = 1;
347 				break;
348 			}
349 
350 		if (valid) {
351 			bcopy(lmac, addr, 6);
352 			return;
353 		}
354 	}
355 
356 	/*
357 	 * Fall back -- use the currently programmed address.
358 	 */
359 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
360 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
361 
362 	addr[0] = (mac_h & 0xff000000) >> 24;
363 	addr[1] = (mac_h & 0x00ff0000) >> 16;
364 	addr[2] = (mac_h & 0x0000ff00) >> 8;
365 	addr[3] = (mac_h & 0x000000ff);
366 	addr[4] = (mac_l & 0x0000ff00) >> 8;
367 	addr[5] = (mac_l & 0x000000ff);
368 }
369 
370 static uint32_t
371 mge_tfut_ipg(uint32_t val, int ver)
372 {
373 
374 	switch (ver) {
375 	case 1:
376 		return ((val & 0x3fff) << 4);
377 	case 2:
378 	default:
379 		return ((val & 0xffff) << 4);
380 	}
381 }
382 
383 static uint32_t
384 mge_rx_ipg(uint32_t val, int ver)
385 {
386 
387 	switch (ver) {
388 	case 1:
389 		return ((val & 0x3fff) << 8);
390 	case 2:
391 	default:
392 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
393 	}
394 }
395 
396 static void
397 mge_ver_params(struct mge_softc *sc)
398 {
399 	uint32_t d, r;
400 
401 	soc_id(&d, &r);
402 	if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
403 	    d == MV_DEV_88F6282 ||
404 	    d == MV_DEV_MV78100 ||
405 	    d == MV_DEV_MV78100_Z0 ||
406 	    (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
407 		sc->mge_ver = 2;
408 		sc->mge_mtu = 0x4e8;
409 		sc->mge_tfut_ipg_max = 0xFFFF;
410 		sc->mge_rx_ipg_max = 0xFFFF;
411 		sc->mge_tx_arb_cfg = 0xFC0000FF;
412 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
413 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
414 	} else {
415 		sc->mge_ver = 1;
416 		sc->mge_mtu = 0x458;
417 		sc->mge_tfut_ipg_max = 0x3FFF;
418 		sc->mge_rx_ipg_max = 0x3FFF;
419 		sc->mge_tx_arb_cfg = 0x000000FF;
420 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
421 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
422 	}
423 	if (d == MV_DEV_88RC8180)
424 		sc->mge_intr_cnt = 1;
425 	else
426 		sc->mge_intr_cnt = 2;
427 
428 	if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
429 		sc->mge_hw_csum = 0;
430 	else
431 		sc->mge_hw_csum = 1;
432 }
433 
434 static void
435 mge_set_mac_address(struct mge_softc *sc)
436 {
437 	char *if_mac;
438 	uint32_t mac_l, mac_h;
439 
440 	MGE_GLOBAL_LOCK_ASSERT(sc);
441 
442 	if_mac = (char *)if_getlladdr(sc->ifp);
443 
444 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
445 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
446 	    (if_mac[2] << 8) | (if_mac[3] << 0);
447 
448 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
449 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
450 
451 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
452 }
453 
454 static void
455 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
456 {
457 	uint32_t reg_idx, reg_off, reg_val, i;
458 
459 	last_byte &= 0xf;
460 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
461 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
462 	reg_val = (1 | (queue << 1)) << reg_off;
463 
464 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
465 		if ( i == reg_idx)
466 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
467 		else
468 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
469 	}
470 }
471 
472 static void
473 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
474 {
475 	uint32_t port_config;
476 	uint32_t reg_val, i;
477 
478 	/* Enable or disable promiscuous mode as needed */
479 	if (if_getflags(sc->ifp) & IFF_PROMISC) {
480 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
481 		port_config |= PORT_CONFIG_UPM;
482 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
483 
484 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
485 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
486 
487 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
488 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
489 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
490 		}
491 
492 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
493 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
494 
495 	} else {
496 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
497 		port_config &= ~PORT_CONFIG_UPM;
498 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
499 
500 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
501 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
502 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
503 		}
504 
505 		mge_set_mac_address(sc);
506 	}
507 }
508 
509 static void
510 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
511 {
512 	u_int32_t *paddr;
513 
514 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
515 	paddr = arg;
516 
517 	*paddr = segs->ds_addr;
518 }
519 
520 static int
521 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
522     bus_addr_t *paddr)
523 {
524 	struct mbuf *new_mbuf;
525 	bus_dma_segment_t seg[1];
526 	int error;
527 	int nsegs;
528 
529 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
530 
531 	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
532 	if (new_mbuf == NULL)
533 		return (ENOBUFS);
534 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
535 
536 	if (*mbufp) {
537 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
538 		bus_dmamap_unload(tag, map);
539 	}
540 
541 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
542 	    BUS_DMA_NOWAIT);
543 	KASSERT(nsegs == 1, ("Too many segments returned!"));
544 	if (nsegs != 1 || error)
545 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
546 
547 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
548 
549 	(*mbufp) = new_mbuf;
550 	(*paddr) = seg->ds_addr;
551 	return (0);
552 }
553 
554 static int
555 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
556     uint32_t size, bus_dma_tag_t *buffer_tag)
557 {
558 	struct mge_desc_wrapper *dw;
559 	bus_addr_t desc_paddr;
560 	int i, error;
561 
562 	desc_paddr = 0;
563 	for (i = size - 1; i >= 0; i--) {
564 		dw = &(tab[i]);
565 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
566 		    (void**)&(dw->mge_desc),
567 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
568 		    &(dw->desc_dmap));
569 
570 		if (error) {
571 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
572 			dw->mge_desc = NULL;
573 			return (ENXIO);
574 		}
575 
576 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
577 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
578 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
579 
580 		if (error) {
581 			if_printf(sc->ifp, "can't load descriptor\n");
582 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
583 			    dw->desc_dmap);
584 			dw->mge_desc = NULL;
585 			return (ENXIO);
586 		}
587 
588 		/* Chain descriptors */
589 		dw->mge_desc->next_desc = desc_paddr;
590 		desc_paddr = dw->mge_desc_paddr;
591 	}
592 	tab[size - 1].mge_desc->next_desc = desc_paddr;
593 
594 	/* Allocate a busdma tag for mbufs. */
595 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
596 	    1, 0,				/* alignment, boundary */
597 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
598 	    BUS_SPACE_MAXADDR,			/* highaddr */
599 	    NULL, NULL,				/* filtfunc, filtfuncarg */
600 	    MCLBYTES, 1,			/* maxsize, nsegments */
601 	    MCLBYTES, 0,			/* maxsegsz, flags */
602 	    NULL, NULL,				/* lockfunc, lockfuncarg */
603 	    buffer_tag);			/* dmat */
604 	if (error) {
605 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
606 		return (ENXIO);
607 	}
608 
609 	/* Create TX busdma maps */
610 	for (i = 0; i < size; i++) {
611 		dw = &(tab[i]);
612 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
613 		if (error) {
614 			if_printf(sc->ifp, "failed to create map for mbuf\n");
615 			return (ENXIO);
616 		}
617 
618 		dw->buffer = (struct mbuf*)NULL;
619 		dw->mge_desc->buffer = (bus_addr_t)NULL;
620 	}
621 
622 	return (0);
623 }
624 
625 static int
626 mge_allocate_dma(struct mge_softc *sc)
627 {
628 	struct mge_desc_wrapper *dw;
629 	int i;
630 
631 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
632 	bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
633 	    16, 0,				/* alignment, boundary */
634 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
635 	    BUS_SPACE_MAXADDR,			/* highaddr */
636 	    NULL, NULL,				/* filtfunc, filtfuncarg */
637 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
638 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
639 	    NULL, NULL,				/* lockfunc, lockfuncarg */
640 	    &sc->mge_desc_dtag);		/* dmat */
641 
642 
643 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
644 	    &sc->mge_tx_dtag);
645 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
646 	    &sc->mge_rx_dtag);
647 
648 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
649 		dw = &(sc->mge_rx_desc[i]);
650 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
651 		    &dw->mge_desc->buffer);
652 	}
653 
654 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
655 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
656 
657 	return (0);
658 }
659 
660 static void
661 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
662     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
663 {
664 	struct mge_desc_wrapper *dw;
665 	int i;
666 
667 	for (i = 0; i < size; i++) {
668 		/* Free RX mbuf */
669 		dw = &(tab[i]);
670 
671 		if (dw->buffer_dmap) {
672 			if (free_mbufs) {
673 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
674 				    BUS_DMASYNC_POSTREAD);
675 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
676 			}
677 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
678 			if (free_mbufs)
679 				m_freem(dw->buffer);
680 		}
681 		/* Free RX descriptors */
682 		if (dw->desc_dmap) {
683 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
684 			    BUS_DMASYNC_POSTREAD);
685 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
686 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
687 			    dw->desc_dmap);
688 		}
689 	}
690 }
691 
692 static void
693 mge_free_dma(struct mge_softc *sc)
694 {
695 
696 	/* Free descriptors and mbufs */
697 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
698 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
699 
700 	/* Destroy mbuf dma tag */
701 	bus_dma_tag_destroy(sc->mge_tx_dtag);
702 	bus_dma_tag_destroy(sc->mge_rx_dtag);
703 	/* Destroy descriptors tag */
704 	bus_dma_tag_destroy(sc->mge_desc_dtag);
705 }
706 
707 static void
708 mge_reinit_rx(struct mge_softc *sc)
709 {
710 	struct mge_desc_wrapper *dw;
711 	int i;
712 
713 	MGE_RECEIVE_LOCK_ASSERT(sc);
714 
715 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
716 
717 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
718 	    &sc->mge_rx_dtag);
719 
720 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
721 		dw = &(sc->mge_rx_desc[i]);
722 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
723 		&dw->mge_desc->buffer);
724 	}
725 
726 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
727 	sc->rx_desc_curr = 0;
728 
729 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
730 	    sc->rx_desc_start);
731 
732 	/* Enable RX queue */
733 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
734 }
735 
736 #ifdef DEVICE_POLLING
737 static poll_handler_t mge_poll;
738 
739 static int
740 mge_poll(if_t ifp, enum poll_cmd cmd, int count)
741 {
742 	struct mge_softc *sc = if_getsoftc(ifp);
743 	uint32_t int_cause, int_cause_ext;
744 	int rx_npkts = 0;
745 
746 	MGE_RECEIVE_LOCK(sc);
747 
748 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
749 		MGE_RECEIVE_UNLOCK(sc);
750 		return (rx_npkts);
751 	}
752 
753 	if (cmd == POLL_AND_CHECK_STATUS) {
754 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
755 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
756 
757 		/* Check for resource error */
758 		if (int_cause & MGE_PORT_INT_RXERRQ0)
759 			mge_reinit_rx(sc);
760 
761 		if (int_cause || int_cause_ext) {
762 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
763 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
764 		}
765 	}
766 
767 
768 	rx_npkts = mge_intr_rx_locked(sc, count);
769 
770 	MGE_RECEIVE_UNLOCK(sc);
771 	MGE_TRANSMIT_LOCK(sc);
772 	mge_intr_tx_locked(sc);
773 	MGE_TRANSMIT_UNLOCK(sc);
774 	return (rx_npkts);
775 }
776 #endif /* DEVICE_POLLING */
777 
778 static int
779 mge_attach(device_t dev)
780 {
781 	struct mge_softc *sc;
782 	struct mii_softc *miisc;
783 	if_t ifp;
784 	uint8_t hwaddr[ETHER_ADDR_LEN];
785 	int i, error, phy;
786 
787 	sc = device_get_softc(dev);
788 	sc->dev = dev;
789 	sc->node = ofw_bus_get_node(dev);
790 	phy = 0;
791 
792 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
793 		device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
794 		    device_get_nameunit(sc->phy_sc->dev));
795 		sc->phy_attached = 1;
796 	} else {
797 		device_printf(dev, "PHY not attached.\n");
798 		sc->phy_attached = 0;
799 		sc->phy_sc = sc;
800 	}
801 
802 	if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
803 		device_printf(dev, "Switch attached.\n");
804 		sc->switch_attached = 1;
805 		/* additional variable available across instances */
806 		switch_attached = 1;
807 	} else {
808 		sc->switch_attached = 0;
809 	}
810 
811 	if (device_get_unit(dev) == 0) {
812 		sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
813 	}
814 
815 	/* Set chip version-dependent parameters */
816 	mge_ver_params(sc);
817 
818 	/* Initialize mutexes */
819 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
820 	    MTX_DEF);
821 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
822 	    MTX_DEF);
823 
824 	/* Allocate IO and IRQ resources */
825 	error = bus_alloc_resources(dev, res_spec, sc->res);
826 	if (error) {
827 		device_printf(dev, "could not allocate resources\n");
828 		mge_detach(dev);
829 		return (ENXIO);
830 	}
831 
832 	/* Allocate DMA, buffers, buffer descriptors */
833 	error = mge_allocate_dma(sc);
834 	if (error) {
835 		mge_detach(dev);
836 		return (ENXIO);
837 	}
838 
839 	sc->tx_desc_curr = 0;
840 	sc->rx_desc_curr = 0;
841 	sc->tx_desc_used_idx = 0;
842 	sc->tx_desc_used_count = 0;
843 
844 	/* Configure defaults for interrupts coalescing */
845 	sc->rx_ic_time = 768;
846 	sc->tx_ic_time = 768;
847 	mge_add_sysctls(sc);
848 
849 	/* Allocate network interface */
850 	ifp = sc->ifp = if_alloc(IFT_ETHER);
851 	if (ifp == NULL) {
852 		device_printf(dev, "if_alloc() failed\n");
853 		mge_detach(dev);
854 		return (ENOMEM);
855 	}
856 
857 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
858 	if_setsoftc(ifp, sc);
859 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
860 	if_setcapabilities(ifp, IFCAP_VLAN_MTU);
861 	if (sc->mge_hw_csum) {
862 		if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
863 		if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
864 	}
865 	if_setcapenable(ifp, if_getcapabilities(ifp));
866 
867 #ifdef DEVICE_POLLING
868 	/* Advertise that polling is supported */
869 	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
870 #endif
871 
872 	if_setinitfn(ifp, mge_init);
873 	if_setstartfn(ifp, mge_start);
874 	if_setioctlfn(ifp, mge_ioctl);
875 
876 	if_setsendqlen(ifp, MGE_TX_DESC_NUM - 1);
877 	if_setsendqready(ifp);
878 
879 	mge_get_mac_address(sc, hwaddr);
880 	ether_ifattach(ifp, hwaddr);
881 	callout_init(&sc->wd_callout, 1);
882 
883 	/* Attach PHY(s) */
884 	if (sc->phy_attached) {
885 		error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
886 		    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
887 		if (error) {
888 			device_printf(dev, "MII failed to find PHY\n");
889 			if_free(ifp);
890 			sc->ifp = NULL;
891 			mge_detach(dev);
892 			return (error);
893 		}
894 		sc->mii = device_get_softc(sc->miibus);
895 
896 		/* Tell the MAC where to find the PHY so autoneg works */
897 		miisc = LIST_FIRST(&sc->mii->mii_phys);
898 		MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
899 	} else {
900 		/* no PHY, so use hard-coded values */
901 		ifmedia_init(&sc->mge_ifmedia, 0,
902 		    mge_ifmedia_upd,
903 		    mge_ifmedia_sts);
904 		ifmedia_add(&sc->mge_ifmedia,
905 		    IFM_ETHER | IFM_1000_T | IFM_FDX,
906 		    0, NULL);
907 		ifmedia_set(&sc->mge_ifmedia,
908 		    IFM_ETHER | IFM_1000_T | IFM_FDX);
909 	}
910 
911 	/* Attach interrupt handlers */
912 	/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
913 	for (i = 1; i <= sc->mge_intr_cnt; ++i) {
914 		error = bus_setup_intr(dev, sc->res[i],
915 		    INTR_TYPE_NET | INTR_MPSAFE,
916 		    NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
917 		    sc, &sc->ih_cookie[i - 1]);
918 		if (error) {
919 			device_printf(dev, "could not setup %s\n",
920 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
921 			mge_detach(dev);
922 			return (error);
923 		}
924 	}
925 
926 	if (sc->switch_attached) {
927 		MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
928 		device_add_child(dev, "mdio", -1);
929 		bus_generic_attach(dev);
930 	}
931 
932 	return (0);
933 }
934 
935 static int
936 mge_detach(device_t dev)
937 {
938 	struct mge_softc *sc;
939 	int error,i;
940 
941 	sc = device_get_softc(dev);
942 
943 	/* Stop controller and free TX queue */
944 	if (sc->ifp)
945 		mge_shutdown(dev);
946 
947 	/* Wait for stopping ticks */
948         callout_drain(&sc->wd_callout);
949 
950 	/* Stop and release all interrupts */
951 	for (i = 0; i < sc->mge_intr_cnt; ++i) {
952 		if (!sc->ih_cookie[i])
953 			continue;
954 
955 		error = bus_teardown_intr(dev, sc->res[1 + i],
956 		    sc->ih_cookie[i]);
957 		if (error)
958 			device_printf(dev, "could not release %s\n",
959 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
960 	}
961 
962 	/* Detach network interface */
963 	if (sc->ifp) {
964 		ether_ifdetach(sc->ifp);
965 		if_free(sc->ifp);
966 	}
967 
968 	/* Free DMA resources */
969 	mge_free_dma(sc);
970 
971 	/* Free IO memory handler */
972 	bus_release_resources(dev, res_spec, sc->res);
973 
974 	/* Destroy mutexes */
975 	mtx_destroy(&sc->receive_lock);
976 	mtx_destroy(&sc->transmit_lock);
977 
978 	if (device_get_unit(dev) == 0)
979 		sx_destroy(&sx_smi);
980 
981 	return (0);
982 }
983 
984 static void
985 mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
986 {
987 	struct mge_softc *sc;
988 	struct mii_data *mii;
989 
990 	sc = if_getsoftc(ifp);
991 	MGE_GLOBAL_LOCK(sc);
992 
993 	if (!sc->phy_attached) {
994 		ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
995 		ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
996 		goto out_unlock;
997 	}
998 
999 	mii = sc->mii;
1000 	mii_pollstat(mii);
1001 
1002 	ifmr->ifm_active = mii->mii_media_active;
1003 	ifmr->ifm_status = mii->mii_media_status;
1004 
1005 out_unlock:
1006 	MGE_GLOBAL_UNLOCK(sc);
1007 }
1008 
1009 static uint32_t
1010 mge_set_port_serial_control(uint32_t media)
1011 {
1012 	uint32_t port_config;
1013 
1014 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
1015 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
1016 
1017 	if (IFM_TYPE(media) == IFM_ETHER) {
1018 		switch(IFM_SUBTYPE(media)) {
1019 			case IFM_AUTO:
1020 				break;
1021 			case IFM_1000_T:
1022 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
1023 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1024 				    | PORT_SERIAL_SPEED_AUTONEG);
1025 				break;
1026 			case IFM_100_TX:
1027 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
1028 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1029 				    | PORT_SERIAL_SPEED_AUTONEG);
1030 				break;
1031 			case IFM_10_T:
1032 				port_config  |= (PORT_SERIAL_AUTONEG |
1033 				    PORT_SERIAL_AUTONEG_FC |
1034 				    PORT_SERIAL_SPEED_AUTONEG);
1035 				break;
1036 		}
1037 		if (media & IFM_FDX)
1038 			port_config |= PORT_SERIAL_FULL_DUPLEX;
1039 	}
1040 	return (port_config);
1041 }
1042 
1043 static int
1044 mge_ifmedia_upd(if_t ifp)
1045 {
1046 	struct mge_softc *sc = if_getsoftc(ifp);
1047 
1048 	/*
1049 	 * Do not do anything for switch here, as updating media between
1050 	 * MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
1051 	 * break the link.
1052 	 */
1053 	if (sc->phy_attached) {
1054 		MGE_GLOBAL_LOCK(sc);
1055 		if (if_getflags(ifp) & IFF_UP) {
1056 			sc->mge_media_status = sc->mii->mii_media.ifm_media;
1057 			mii_mediachg(sc->mii);
1058 
1059 			/* MGE MAC needs to be reinitialized. */
1060 			mge_init_locked(sc);
1061 
1062 		}
1063 		MGE_GLOBAL_UNLOCK(sc);
1064 	}
1065 
1066 	return (0);
1067 }
1068 
1069 static void
1070 mge_init(void *arg)
1071 {
1072 	struct mge_softc *sc;
1073 
1074 	sc = arg;
1075 	MGE_GLOBAL_LOCK(sc);
1076 
1077 	mge_init_locked(arg);
1078 
1079 	MGE_GLOBAL_UNLOCK(sc);
1080 }
1081 
1082 static void
1083 mge_init_locked(void *arg)
1084 {
1085 	struct mge_softc *sc = arg;
1086 	struct mge_desc_wrapper *dw;
1087 	volatile uint32_t reg_val;
1088 	int i, count;
1089 	uint32_t media_status;
1090 
1091 
1092 	MGE_GLOBAL_LOCK_ASSERT(sc);
1093 
1094 	/* Stop interface */
1095 	mge_stop(sc);
1096 
1097 	/* Disable interrupts */
1098 	mge_intrs_ctrl(sc, 0);
1099 
1100 	/* Set MAC address */
1101 	mge_set_mac_address(sc);
1102 
1103 	/* Setup multicast filters */
1104 	mge_setup_multicast(sc);
1105 
1106 	if (sc->mge_ver == 2) {
1107 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
1108 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
1109 	}
1110 
1111 	/* Initialize TX queue configuration registers */
1112 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1113 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1114 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1115 
1116 	/* Clear TX queue configuration registers for unused queues */
1117 	for (i = 1; i < 7; i++) {
1118 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
1119 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
1120 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
1121 	}
1122 
1123 	/* Set default MTU */
1124 	MGE_WRITE(sc, sc->mge_mtu, 0);
1125 
1126 	/* Port configuration */
1127 	MGE_WRITE(sc, MGE_PORT_CONFIG,
1128 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
1129 	    PORT_CONFIG_ARO_RXQ(0));
1130 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
1131 
1132 	/* Configure promisc mode */
1133 	mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
1134 
1135 	media_status = sc->mge_media_status;
1136 	if (sc->switch_attached) {
1137 		media_status &= ~IFM_TMASK;
1138 		media_status |= IFM_1000_T;
1139 	}
1140 
1141 	/* Setup port configuration */
1142 	reg_val = mge_set_port_serial_control(media_status);
1143 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1144 
1145 	/* Setup SDMA configuration */
1146 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
1147 	    MGE_SDMA_TX_BYTE_SWAP |
1148 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
1149 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
1150 
1151 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
1152 
1153 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1154 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
1155 	    sc->rx_desc_start);
1156 
1157 	/* Reset descriptor indexes */
1158 	sc->tx_desc_curr = 0;
1159 	sc->rx_desc_curr = 0;
1160 	sc->tx_desc_used_idx = 0;
1161 	sc->tx_desc_used_count = 0;
1162 
1163 	/* Enable RX descriptors */
1164 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
1165 		dw = &sc->mge_rx_desc[i];
1166 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1167 		dw->mge_desc->buff_size = MCLBYTES;
1168 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1169 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1170 	}
1171 
1172 	/* Enable RX queue */
1173 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
1174 
1175 	/* Enable port */
1176 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1177 	reg_val |= PORT_SERIAL_ENABLE;
1178 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1179 	count = 0x100000;
1180 	for (;;) {
1181 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1182 		if (reg_val & MGE_STATUS_LINKUP)
1183 			break;
1184 		DELAY(100);
1185 		if (--count == 0) {
1186 			if_printf(sc->ifp, "Timeout on link-up\n");
1187 			break;
1188 		}
1189 	}
1190 
1191 	/* Setup interrupts coalescing */
1192 	mge_set_rxic(sc);
1193 	mge_set_txic(sc);
1194 
1195 	/* Enable interrupts */
1196 #ifdef DEVICE_POLLING
1197         /*
1198 	 * * ...only if polling is not turned on. Disable interrupts explicitly
1199 	 * if polling is enabled.
1200 	 */
1201 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1202 		mge_intrs_ctrl(sc, 0);
1203 	else
1204 #endif /* DEVICE_POLLING */
1205 	mge_intrs_ctrl(sc, 1);
1206 
1207 	/* Activate network interface */
1208 	if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, 0);
1209 	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
1210 	sc->wd_timer = 0;
1211 
1212 	/* Schedule watchdog timeout */
1213 	if (sc->phy_attached)
1214 		callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1215 }
1216 
1217 static void
1218 mge_intr_rxtx(void *arg)
1219 {
1220 	struct mge_softc *sc;
1221 	uint32_t int_cause, int_cause_ext;
1222 
1223 	sc = arg;
1224 	MGE_GLOBAL_LOCK(sc);
1225 
1226 #ifdef DEVICE_POLLING
1227 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1228 		MGE_GLOBAL_UNLOCK(sc);
1229 		return;
1230 	}
1231 #endif
1232 
1233 	/* Get interrupt cause */
1234 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1235 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1236 
1237 	/* Check for Transmit interrupt */
1238 	if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1239 	    MGE_PORT_INT_EXT_TXUR)) {
1240 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1241 		    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1242 		mge_intr_tx_locked(sc);
1243 	}
1244 
1245 	MGE_TRANSMIT_UNLOCK(sc);
1246 
1247 	/* Check for Receive interrupt */
1248 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1249 
1250 	MGE_RECEIVE_UNLOCK(sc);
1251 }
1252 
1253 static void
1254 mge_intr_err(void *arg)
1255 {
1256 	struct mge_softc *sc;
1257 	if_t ifp;
1258 
1259 	sc = arg;
1260 	ifp = sc->ifp;
1261 	if_printf(ifp, "%s\n", __FUNCTION__);
1262 }
1263 
1264 static void
1265 mge_intr_misc(void *arg)
1266 {
1267 	struct mge_softc *sc;
1268 	if_t ifp;
1269 
1270 	sc = arg;
1271 	ifp = sc->ifp;
1272 	if_printf(ifp, "%s\n", __FUNCTION__);
1273 }
1274 
1275 static void
1276 mge_intr_rx(void *arg) {
1277 	struct mge_softc *sc;
1278 	uint32_t int_cause, int_cause_ext;
1279 
1280 	sc = arg;
1281 	MGE_RECEIVE_LOCK(sc);
1282 
1283 #ifdef DEVICE_POLLING
1284 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1285 		MGE_RECEIVE_UNLOCK(sc);
1286 		return;
1287 	}
1288 #endif
1289 
1290 	/* Get interrupt cause */
1291 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1292 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1293 
1294 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1295 
1296 	MGE_RECEIVE_UNLOCK(sc);
1297 }
1298 
1299 static void
1300 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1301     uint32_t int_cause_ext)
1302 {
1303 	/* Check for resource error */
1304 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1305 		mge_reinit_rx(sc);
1306 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1307 		    ~(int_cause & MGE_PORT_INT_RXERRQ0));
1308 	}
1309 
1310 	int_cause &= MGE_PORT_INT_RXQ0;
1311 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1312 
1313 	if (int_cause || int_cause_ext) {
1314 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1315 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1316 		mge_intr_rx_locked(sc, -1);
1317 	}
1318 }
1319 
1320 static int
1321 mge_intr_rx_locked(struct mge_softc *sc, int count)
1322 {
1323 	if_t ifp = sc->ifp;
1324 	uint32_t status;
1325 	uint16_t bufsize;
1326 	struct mge_desc_wrapper* dw;
1327 	struct mbuf *mb;
1328 	int rx_npkts = 0;
1329 
1330 	MGE_RECEIVE_LOCK_ASSERT(sc);
1331 
1332 	while (count != 0) {
1333 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1334 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1335 		    BUS_DMASYNC_POSTREAD);
1336 
1337 		/* Get status */
1338 		status = dw->mge_desc->cmd_status;
1339 		bufsize = dw->mge_desc->buff_size;
1340 		if ((status & MGE_DMA_OWNED) != 0)
1341 			break;
1342 
1343 		if (dw->mge_desc->byte_count &&
1344 		    ~(status & MGE_ERR_SUMMARY)) {
1345 
1346 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1347 			    BUS_DMASYNC_POSTREAD);
1348 
1349 			mb = m_devget(dw->buffer->m_data,
1350 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1351 			    0, ifp, NULL);
1352 
1353 			if (mb == NULL)
1354 				/* Give up if no mbufs */
1355 				break;
1356 
1357 			mb->m_len -= 2;
1358 			mb->m_pkthdr.len -= 2;
1359 			mb->m_data += 2;
1360 
1361 			mb->m_pkthdr.rcvif = ifp;
1362 
1363 			mge_offload_process_frame(ifp, mb, status,
1364 			    bufsize);
1365 
1366 			MGE_RECEIVE_UNLOCK(sc);
1367 			if_input(ifp, mb);
1368 			MGE_RECEIVE_LOCK(sc);
1369 			rx_npkts++;
1370 		}
1371 
1372 		dw->mge_desc->byte_count = 0;
1373 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1374 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1375 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1376 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1377 
1378 		if (count > 0)
1379 			count -= 1;
1380 	}
1381 
1382 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1383 
1384 	return (rx_npkts);
1385 }
1386 
1387 static void
1388 mge_intr_sum(void *arg)
1389 {
1390 	struct mge_softc *sc = arg;
1391 	if_t ifp;
1392 
1393 	ifp = sc->ifp;
1394 	if_printf(ifp, "%s\n", __FUNCTION__);
1395 }
1396 
1397 static void
1398 mge_intr_tx(void *arg)
1399 {
1400 	struct mge_softc *sc = arg;
1401 	uint32_t int_cause_ext;
1402 
1403 	MGE_TRANSMIT_LOCK(sc);
1404 
1405 #ifdef DEVICE_POLLING
1406 	if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1407 		MGE_TRANSMIT_UNLOCK(sc);
1408 		return;
1409 	}
1410 #endif
1411 
1412 	/* Ack the interrupt */
1413 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1414 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1415 	    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1416 
1417 	mge_intr_tx_locked(sc);
1418 
1419 	MGE_TRANSMIT_UNLOCK(sc);
1420 }
1421 
1422 static void
1423 mge_intr_tx_locked(struct mge_softc *sc)
1424 {
1425 	if_t ifp = sc->ifp;
1426 	struct mge_desc_wrapper *dw;
1427 	struct mge_desc *desc;
1428 	uint32_t status;
1429 	int send = 0;
1430 
1431 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1432 
1433 	/* Disable watchdog */
1434 	sc->wd_timer = 0;
1435 
1436 	while (sc->tx_desc_used_count) {
1437 		/* Get the descriptor */
1438 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1439 		desc = dw->mge_desc;
1440 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1441 		    BUS_DMASYNC_POSTREAD);
1442 
1443 		/* Get descriptor status */
1444 		status = desc->cmd_status;
1445 
1446 		if (status & MGE_DMA_OWNED)
1447 			break;
1448 
1449 		sc->tx_desc_used_idx =
1450 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1451 		sc->tx_desc_used_count--;
1452 
1453 		/* Update collision statistics */
1454 		if (status & MGE_ERR_SUMMARY) {
1455 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1456 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1457 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1458 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1459 		}
1460 
1461 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1462 		    BUS_DMASYNC_POSTWRITE);
1463 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1464 		m_freem(dw->buffer);
1465 		dw->buffer = (struct mbuf*)NULL;
1466 		send++;
1467 
1468 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1469 	}
1470 
1471 	if (send) {
1472 		/* Now send anything that was pending */
1473 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1474 		mge_start_locked(ifp);
1475 	}
1476 }
1477 static int
1478 mge_ioctl(if_t ifp, u_long command, caddr_t data)
1479 {
1480 	struct mge_softc *sc = if_getsoftc(ifp);
1481 	struct ifreq *ifr = (struct ifreq *)data;
1482 	int mask, error;
1483 	uint32_t flags;
1484 
1485 	error = 0;
1486 
1487 	switch (command) {
1488 	case SIOCSIFFLAGS:
1489 		MGE_GLOBAL_LOCK(sc);
1490 
1491 		if (if_getflags(ifp) & IFF_UP) {
1492 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1493 				flags = if_getflags(ifp) ^ sc->mge_if_flags;
1494 				if (flags & IFF_PROMISC)
1495 					mge_set_prom_mode(sc,
1496 					    MGE_RX_DEFAULT_QUEUE);
1497 
1498 				if (flags & IFF_ALLMULTI)
1499 					mge_setup_multicast(sc);
1500 			} else
1501 				mge_init_locked(sc);
1502 		}
1503 		else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1504 			mge_stop(sc);
1505 
1506 		sc->mge_if_flags = if_getflags(ifp);
1507 		MGE_GLOBAL_UNLOCK(sc);
1508 		break;
1509 	case SIOCADDMULTI:
1510 	case SIOCDELMULTI:
1511 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1512 			MGE_GLOBAL_LOCK(sc);
1513 			mge_setup_multicast(sc);
1514 			MGE_GLOBAL_UNLOCK(sc);
1515 		}
1516 		break;
1517 	case SIOCSIFCAP:
1518 		mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1519 		if (mask & IFCAP_HWCSUM) {
1520 			if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
1521 			if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, 0);
1522 			if (if_getcapenable(ifp) & IFCAP_TXCSUM)
1523 				if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
1524 			else
1525 				if_sethwassist(ifp, 0);
1526 		}
1527 #ifdef DEVICE_POLLING
1528 		if (mask & IFCAP_POLLING) {
1529 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1530 				error = ether_poll_register(mge_poll, ifp);
1531 				if (error)
1532 					return(error);
1533 
1534 				MGE_GLOBAL_LOCK(sc);
1535 				mge_intrs_ctrl(sc, 0);
1536 				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1537 				MGE_GLOBAL_UNLOCK(sc);
1538 			} else {
1539 				error = ether_poll_deregister(ifp);
1540 				MGE_GLOBAL_LOCK(sc);
1541 				mge_intrs_ctrl(sc, 1);
1542 				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1543 				MGE_GLOBAL_UNLOCK(sc);
1544 			}
1545 		}
1546 #endif
1547 		break;
1548 	case SIOCGIFMEDIA: /* fall through */
1549 	case SIOCSIFMEDIA:
1550 		/*
1551 		 * Setting up media type via ioctls is *not* supported for MAC
1552 		 * which is connected to switch. Use etherswitchcfg.
1553 		 */
1554 		if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1555 			return (0);
1556 		else if (!sc->phy_attached) {
1557 			error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1558 			    command);
1559 			break;
1560 		}
1561 
1562 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1563 		    && !(ifr->ifr_media & IFM_FDX)) {
1564 			device_printf(sc->dev,
1565 			    "1000baseTX half-duplex unsupported\n");
1566 			return 0;
1567 		}
1568 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1569 		break;
1570 	default:
1571 		error = ether_ioctl(ifp, command, data);
1572 	}
1573 	return (error);
1574 }
1575 
1576 static int
1577 mge_miibus_readreg(device_t dev, int phy, int reg)
1578 {
1579 
1580 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1581 
1582 	return (mv_read_ext_phy(dev, phy, reg));
1583 }
1584 
1585 static int
1586 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1587 {
1588 
1589 	KASSERT(!switch_attached, ("miibus used with switch attached"));
1590 
1591 	mv_write_ext_phy(dev, phy, reg, value);
1592 
1593 	return (0);
1594 }
1595 
1596 static int
1597 mge_probe(device_t dev)
1598 {
1599 
1600 	if (!ofw_bus_status_okay(dev))
1601 		return (ENXIO);
1602 
1603 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1604 		return (ENXIO);
1605 
1606 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1607 	return (BUS_PROBE_DEFAULT);
1608 }
1609 
1610 static int
1611 mge_resume(device_t dev)
1612 {
1613 
1614 	device_printf(dev, "%s\n", __FUNCTION__);
1615 	return (0);
1616 }
1617 
1618 static int
1619 mge_shutdown(device_t dev)
1620 {
1621 	struct mge_softc *sc = device_get_softc(dev);
1622 
1623 	MGE_GLOBAL_LOCK(sc);
1624 
1625 #ifdef DEVICE_POLLING
1626         if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1627 		ether_poll_deregister(sc->ifp);
1628 #endif
1629 
1630 	mge_stop(sc);
1631 
1632 	MGE_GLOBAL_UNLOCK(sc);
1633 
1634 	return (0);
1635 }
1636 
1637 static int
1638 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1639 {
1640 	struct mge_desc_wrapper *dw = NULL;
1641 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1642 	bus_dmamap_t mapp;
1643 	int error;
1644 	int seg, nsegs;
1645 	int desc_no;
1646 
1647 	/* Fetch unused map */
1648 	desc_no = sc->tx_desc_curr;
1649 	dw = &sc->mge_tx_desc[desc_no];
1650 	mapp = dw->buffer_dmap;
1651 
1652 	/* Create mapping in DMA memory */
1653 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1654 	    BUS_DMA_NOWAIT);
1655 	if (error != 0) {
1656 		m_freem(m0);
1657 		return (error);
1658 	}
1659 
1660 	/* Only one segment is supported. */
1661 	if (nsegs != 1) {
1662 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1663 		m_freem(m0);
1664 		return (-1);
1665 	}
1666 
1667 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1668 
1669 	/* Everything is ok, now we can send buffers */
1670 	for (seg = 0; seg < nsegs; seg++) {
1671 		dw->mge_desc->byte_count = segs[seg].ds_len;
1672 		dw->mge_desc->buffer = segs[seg].ds_addr;
1673 		dw->buffer = m0;
1674 		dw->mge_desc->cmd_status = 0;
1675 		if (seg == 0)
1676 			mge_offload_setup_descriptor(sc, dw);
1677 		dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1678 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1679 		    MGE_DMA_OWNED;
1680 	}
1681 
1682 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1683 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1684 
1685 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1686 	sc->tx_desc_used_count++;
1687 	return (0);
1688 }
1689 
1690 static void
1691 mge_tick(void *msc)
1692 {
1693 	struct mge_softc *sc = msc;
1694 
1695 	KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1696 
1697 	MGE_GLOBAL_LOCK(sc);
1698 
1699 	/* Check for TX timeout */
1700 	mge_watchdog(sc);
1701 
1702 	mii_tick(sc->mii);
1703 
1704 	/* Check for media type change */
1705 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1706 		mge_ifmedia_upd(sc->ifp);
1707 
1708 	MGE_GLOBAL_UNLOCK(sc);
1709 
1710 	/* Schedule another timeout one second from now */
1711 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1712 
1713 	return;
1714 }
1715 
1716 static void
1717 mge_watchdog(struct mge_softc *sc)
1718 {
1719 	if_t ifp;
1720 
1721 	ifp = sc->ifp;
1722 
1723 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1724 		return;
1725 	}
1726 
1727 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1728 	if_printf(ifp, "watchdog timeout\n");
1729 
1730 	mge_stop(sc);
1731 	mge_init_locked(sc);
1732 }
1733 
1734 static void
1735 mge_start(if_t ifp)
1736 {
1737 	struct mge_softc *sc = if_getsoftc(ifp);
1738 
1739 	MGE_TRANSMIT_LOCK(sc);
1740 
1741 	mge_start_locked(ifp);
1742 
1743 	MGE_TRANSMIT_UNLOCK(sc);
1744 }
1745 
1746 static void
1747 mge_start_locked(if_t ifp)
1748 {
1749 	struct mge_softc *sc;
1750 	struct mbuf *m0, *mtmp;
1751 	uint32_t reg_val, queued = 0;
1752 
1753 	sc = if_getsoftc(ifp);
1754 
1755 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1756 
1757 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1758 	    IFF_DRV_RUNNING)
1759 		return;
1760 
1761 	for (;;) {
1762 		/* Get packet from the queue */
1763 		m0 = if_dequeue(ifp);
1764 		if (m0 == NULL)
1765 			break;
1766 
1767 		if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1768 		    m0->m_flags & M_VLANTAG) {
1769 			if (M_WRITABLE(m0) == 0) {
1770 				mtmp = m_dup(m0, M_NOWAIT);
1771 				m_freem(m0);
1772 				if (mtmp == NULL)
1773 					continue;
1774 				m0 = mtmp;
1775 			}
1776 		}
1777 		/* The driver support only one DMA fragment. */
1778 		if (m0->m_next != NULL) {
1779 			mtmp = m_defrag(m0, M_NOWAIT);
1780 			if (mtmp != NULL)
1781 				m0 = mtmp;
1782 		}
1783 
1784 		/* Check for free descriptors */
1785 		if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1786 			if_sendq_prepend(ifp, m0);
1787 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1788 			break;
1789 		}
1790 
1791 		if (mge_encap(sc, m0) != 0)
1792 			break;
1793 
1794 		queued++;
1795 		BPF_MTAP(ifp, m0);
1796 	}
1797 
1798 	if (queued) {
1799 		/* Enable transmitter and watchdog timer */
1800 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1801 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1802 		sc->wd_timer = 5;
1803 	}
1804 }
1805 
1806 static void
1807 mge_stop(struct mge_softc *sc)
1808 {
1809 	if_t ifp;
1810 	volatile uint32_t reg_val, status;
1811 	struct mge_desc_wrapper *dw;
1812 	struct mge_desc *desc;
1813 	int count;
1814 
1815 	ifp = sc->ifp;
1816 
1817 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1818 		return;
1819 
1820 	/* Stop tick engine */
1821 	callout_stop(&sc->wd_callout);
1822 
1823 	/* Disable interface */
1824 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1825 	sc->wd_timer = 0;
1826 
1827 	/* Disable interrupts */
1828 	mge_intrs_ctrl(sc, 0);
1829 
1830 	/* Disable Rx and Tx */
1831 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1832 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1833 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1834 
1835 	/* Remove pending data from TX queue */
1836 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1837 	    sc->tx_desc_used_count) {
1838 		/* Get the descriptor */
1839 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1840 		desc = dw->mge_desc;
1841 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1842 		    BUS_DMASYNC_POSTREAD);
1843 
1844 		/* Get descriptor status */
1845 		status = desc->cmd_status;
1846 
1847 		if (status & MGE_DMA_OWNED)
1848 			break;
1849 
1850 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1851 		    MGE_TX_DESC_NUM;
1852 		sc->tx_desc_used_count--;
1853 
1854 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1855 		    BUS_DMASYNC_POSTWRITE);
1856 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1857 
1858 		m_freem(dw->buffer);
1859 		dw->buffer = (struct mbuf*)NULL;
1860 	}
1861 
1862 	/* Wait for end of transmission */
1863 	count = 0x100000;
1864 	while (count--) {
1865 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1866 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1867 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1868 			break;
1869 		DELAY(100);
1870 	}
1871 
1872 	if (count == 0)
1873 		if_printf(ifp,
1874 		    "%s: timeout while waiting for end of transmission\n",
1875 		    __FUNCTION__);
1876 
1877 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1878 	reg_val &= ~(PORT_SERIAL_ENABLE);
1879 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1880 }
1881 
1882 static int
1883 mge_suspend(device_t dev)
1884 {
1885 
1886 	device_printf(dev, "%s\n", __FUNCTION__);
1887 	return (0);
1888 }
1889 
1890 static void
1891 mge_offload_process_frame(if_t ifp, struct mbuf *frame,
1892     uint32_t status, uint16_t bufsize)
1893 {
1894 	int csum_flags = 0;
1895 
1896 	if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
1897 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1898 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1899 
1900 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1901 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1902 		    (status & MGE_RX_L4_CSUM_OK)) {
1903 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1904 			frame->m_pkthdr.csum_data = 0xFFFF;
1905 		}
1906 
1907 		frame->m_pkthdr.csum_flags = csum_flags;
1908 	}
1909 }
1910 
1911 static void
1912 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1913 {
1914 	struct mbuf *m0 = dw->buffer;
1915 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1916 	int csum_flags = m0->m_pkthdr.csum_flags;
1917 	int cmd_status = 0;
1918 	struct ip *ip;
1919 	int ehlen, etype;
1920 
1921 	if (csum_flags != 0) {
1922 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1923 			etype = ntohs(eh->evl_proto);
1924 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1925 			csum_flags |= MGE_TX_VLAN_TAGGED;
1926 		} else {
1927 			etype = ntohs(eh->evl_encap_proto);
1928 			ehlen = ETHER_HDR_LEN;
1929 		}
1930 
1931 		if (etype != ETHERTYPE_IP) {
1932 			if_printf(sc->ifp,
1933 			    "TCP/IP Offload enabled for unsupported "
1934 			    "protocol!\n");
1935 			return;
1936 		}
1937 
1938 		ip = (struct ip *)(m0->m_data + ehlen);
1939 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1940 		cmd_status |= MGE_TX_NOT_FRAGMENT;
1941 	}
1942 
1943 	if (csum_flags & CSUM_IP)
1944 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1945 
1946 	if (csum_flags & CSUM_TCP)
1947 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1948 
1949 	if (csum_flags & CSUM_UDP)
1950 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1951 
1952 	dw->mge_desc->cmd_status |= cmd_status;
1953 }
1954 
1955 static void
1956 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1957 {
1958 
1959 	if (enable) {
1960 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1961 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1962 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1963 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1964 		    MGE_PORT_INT_EXT_TXBUF0);
1965 	} else {
1966 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1967 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1968 
1969 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1970 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1971 
1972 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1973 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1974 	}
1975 }
1976 
1977 static uint8_t
1978 mge_crc8(uint8_t *data, int size)
1979 {
1980 	uint8_t crc = 0;
1981 	static const uint8_t ct[256] = {
1982 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1983 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1984 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1985 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1986 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1987 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1988 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1989 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1990 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1991 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1992 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1993 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1994 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1995 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1996 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1997 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1998 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1999 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
2000 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
2001 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
2002 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
2003 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
2004 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
2005 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
2006 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
2007 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
2008 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
2009 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
2010 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
2011 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
2012 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
2013 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
2014 	};
2015 
2016 	while(size--)
2017 		crc = ct[crc ^ *(data++)];
2018 
2019 	return(crc);
2020 }
2021 
2022 struct mge_hash_maddr_ctx {
2023 	uint32_t smt[MGE_MCAST_REG_NUMBER];
2024 	uint32_t omt[MGE_MCAST_REG_NUMBER];
2025 };
2026 
2027 static u_int
2028 mge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2029 {
2030 	static const uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
2031 	struct mge_hash_maddr_ctx *ctx = arg;
2032 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2033 	uint8_t *mac;
2034 	int i;
2035 
2036 	mac = LLADDR(sdl);
2037 	if (memcmp(mac, special, sizeof(special)) == 0) {
2038 		i = mac[5];
2039 		ctx->smt[i >> 2] |= v << ((i & 0x03) << 3);
2040 	} else {
2041 		i = mge_crc8(mac, ETHER_ADDR_LEN);
2042 		ctx->omt[i >> 2] |= v << ((i & 0x03) << 3);
2043 	}
2044 	return (1);
2045 }
2046 
2047 static void
2048 mge_setup_multicast(struct mge_softc *sc)
2049 {
2050 	struct mge_hash_maddr_ctx ctx;
2051 	if_t ifp = sc->ifp;
2052 	static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2053 	int i;
2054 
2055 	if (if_getflags(ifp) & IFF_ALLMULTI) {
2056 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
2057 			ctx.smt[i] = ctx.omt[i] =
2058 			    (v << 24) | (v << 16) | (v << 8) | v;
2059 	} else {
2060 		memset(&ctx, 0, sizeof(ctx));
2061 		if_foreach_llmaddr(ifp, mge_hash_maddr, &ctx);
2062 	}
2063 
2064 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
2065 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), ctx.smt[i]);
2066 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), ctx.omt[i]);
2067 	}
2068 }
2069 
2070 static void
2071 mge_set_rxic(struct mge_softc *sc)
2072 {
2073 	uint32_t reg;
2074 
2075 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2076 		sc->rx_ic_time = sc->mge_rx_ipg_max;
2077 
2078 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
2079 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2080 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2081 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
2082 }
2083 
2084 static void
2085 mge_set_txic(struct mge_softc *sc)
2086 {
2087 	uint32_t reg;
2088 
2089 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2090 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
2091 
2092 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
2093 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2094 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2095 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
2096 }
2097 
2098 static int
2099 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
2100 {
2101 	struct mge_softc *sc = (struct mge_softc *)arg1;
2102 	uint32_t time;
2103 	int error;
2104 
2105 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2106 	error = sysctl_handle_int(oidp, &time, 0, req);
2107 	if (error != 0)
2108 		return(error);
2109 
2110 	MGE_GLOBAL_LOCK(sc);
2111 	if (arg2 == MGE_IC_RX) {
2112 		sc->rx_ic_time = time;
2113 		mge_set_rxic(sc);
2114 	} else {
2115 		sc->tx_ic_time = time;
2116 		mge_set_txic(sc);
2117 	}
2118 	MGE_GLOBAL_UNLOCK(sc);
2119 
2120 	return(0);
2121 }
2122 
2123 static void
2124 mge_add_sysctls(struct mge_softc *sc)
2125 {
2126 	struct sysctl_ctx_list *ctx;
2127 	struct sysctl_oid_list *children;
2128 	struct sysctl_oid *tree;
2129 
2130 	ctx = device_get_sysctl_ctx(sc->dev);
2131 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2132 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
2133 	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "MGE Interrupts coalescing");
2134 	children = SYSCTL_CHILDREN(tree);
2135 
2136 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
2137 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_RX,
2138 	    mge_sysctl_ic, "I", "IC RX time threshold");
2139 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
2140 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_TX,
2141 	    mge_sysctl_ic, "I", "IC TX time threshold");
2142 }
2143 
2144 static int
2145 mge_mdio_writereg(device_t dev, int phy, int reg, int value)
2146 {
2147 
2148 	mv_write_ge_smi(dev, phy, reg, value);
2149 
2150 	return (0);
2151 }
2152 
2153 
2154 static int
2155 mge_mdio_readreg(device_t dev, int phy, int reg)
2156 {
2157 	int ret;
2158 
2159 	ret = mv_read_ge_smi(dev, phy, reg);
2160 
2161 	return (ret);
2162 }
2163