xref: /freebsd/sys/arm64/broadcom/genet/if_genet.c (revision b3e7694832e81d7a904a10f525f8797b753bf0d3)
1 /*-
2  * Copyright (c) 2020 Michael J Karels
3  * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller
29  *
30  * This driver is derived in large part from bcmgenet.c from NetBSD by
31  * Jared McNeill.  Parts of the structure and other common code in
32  * this driver have been copied from if_awg.c for the Allwinner EMAC,
33  * also by Jared McNeill.
34  */
35 
36 #include "opt_device_polling.h"
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/bus.h>
44 #include <sys/rman.h>
45 #include <sys/kernel.h>
46 #include <sys/endian.h>
47 #include <sys/mbuf.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
51 #include <sys/module.h>
52 #include <sys/taskqueue.h>
53 #include <sys/gpio.h>
54 
55 #include <net/bpf.h>
56 #include <net/if.h>
57 #include <net/ethernet.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_types.h>
61 #include <net/if_var.h>
62 
63 #include <machine/bus.h>
64 
65 #include <dev/ofw/ofw_bus.h>
66 #include <dev/ofw/ofw_bus_subr.h>
67 
68 #define __BIT(_x)	(1 << (_x))
69 #include "if_genetreg.h"
70 
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
73 #include <dev/mii/mii_fdt.h>
74 
75 #include <netinet/in.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip6.h>
78 
79 #include "syscon_if.h"
80 #include "miibus_if.h"
81 #include "gpio_if.h"
82 
83 #define	RD4(sc, reg)		bus_read_4((sc)->res[_RES_MAC], (reg))
84 #define	WR4(sc, reg, val)	bus_write_4((sc)->res[_RES_MAC], (reg), (val))
85 
86 #define	GEN_LOCK(sc)		mtx_lock(&(sc)->mtx)
87 #define	GEN_UNLOCK(sc)		mtx_unlock(&(sc)->mtx)
88 #define	GEN_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->mtx, MA_OWNED)
89 #define	GEN_ASSERT_UNLOCKED(sc)	mtx_assert(&(sc)->mtx, MA_NOTOWNED)
90 
91 #define	TX_DESC_COUNT		GENET_DMA_DESC_COUNT
92 #define	RX_DESC_COUNT		GENET_DMA_DESC_COUNT
93 
94 #define	TX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
95 #define	RX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
96 
97 #define	TX_MAX_SEGS		20
98 
99 static SYSCTL_NODE(_hw, OID_AUTO, genet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
100     "genet driver parameters");
101 
102 /* Maximum number of mbufs to pass per call to if_input */
103 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */;
104 SYSCTL_INT(_hw_genet, OID_AUTO, rx_batch, CTLFLAG_RDTUN,
105     &gen_rx_batch, 0, "max mbufs per call to if_input");
106 
107 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch);	/* old name/interface */
108 
109 /*
110  * Transmitting packets with only an Ethernet header in the first mbuf
111  * fails.  Examples include reflected ICMPv6 packets, e.g. echo replies;
112  * forwarded IPv6/TCP packets; and forwarded IPv4/TCP packets that use NAT
113  * with IPFW.  Pulling up the sizes of ether_header + ip6_hdr + icmp6_hdr
114  * seems to work for both ICMPv6 and TCP over IPv6, as well as the IPv4/TCP
115  * case.
116  */
117 static int gen_tx_hdr_min = 56;		/* ether_header + ip6_hdr + icmp6_hdr */
118 SYSCTL_INT(_hw_genet, OID_AUTO, tx_hdr_min, CTLFLAG_RW,
119     &gen_tx_hdr_min, 0, "header to add to packets with ether header only");
120 
121 static struct ofw_compat_data compat_data[] = {
122 	{ "brcm,genet-v1",		1 },
123 	{ "brcm,genet-v2",		2 },
124 	{ "brcm,genet-v3",		3 },
125 	{ "brcm,genet-v4",		4 },
126 	{ "brcm,genet-v5",		5 },
127 	{ "brcm,bcm2711-genet-v5",	5 },
128 	{ NULL,				0 }
129 };
130 
131 enum {
132 	_RES_MAC,		/* what to call this? */
133 	_RES_IRQ1,
134 	_RES_IRQ2,
135 	_RES_NITEMS
136 };
137 
138 static struct resource_spec gen_spec[] = {
139 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
140 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
141 	{ SYS_RES_IRQ,		1,	RF_ACTIVE },
142 	{ -1, 0 }
143 };
144 
145 /* structure per ring entry */
146 struct gen_ring_ent {
147 	bus_dmamap_t		map;
148 	struct mbuf		*mbuf;
149 };
150 
151 struct tx_queue {
152 	int			hwindex;		/* hardware index */
153 	int			nentries;
154 	u_int			queued;			/* or avail? */
155 	u_int			cur;
156 	u_int			next;
157 	u_int			prod_idx;
158 	u_int			cons_idx;
159 	struct gen_ring_ent	*entries;
160 };
161 
162 struct rx_queue {
163 	int			hwindex;		/* hardware index */
164 	int			nentries;
165 	u_int			cur;
166 	u_int			prod_idx;
167 	u_int			cons_idx;
168 	struct gen_ring_ent	*entries;
169 };
170 
171 struct gen_softc {
172 	struct resource		*res[_RES_NITEMS];
173 	struct mtx		mtx;
174 	if_t			ifp;
175 	device_t		dev;
176 	device_t		miibus;
177 	mii_contype_t		phy_mode;
178 
179 	struct callout		stat_ch;
180 	struct task		link_task;
181 	void			*ih;
182 	void			*ih2;
183 	int			type;
184 	int			if_flags;
185 	int			link;
186 	bus_dma_tag_t		tx_buf_tag;
187 	/*
188 	 * The genet chip has multiple queues for transmit and receive.
189 	 * This driver uses only one (queue 16, the default), but is cast
190 	 * with multiple rings.  The additional rings are used for different
191 	 * priorities.
192 	 */
193 #define DEF_TXQUEUE	0
194 #define NTXQUEUE	1
195 	struct tx_queue		tx_queue[NTXQUEUE];
196 	struct gen_ring_ent	tx_ring_ent[TX_DESC_COUNT];  /* ring entries */
197 
198 	bus_dma_tag_t		rx_buf_tag;
199 #define DEF_RXQUEUE	0
200 #define NRXQUEUE	1
201 	struct rx_queue		rx_queue[NRXQUEUE];
202 	struct gen_ring_ent	rx_ring_ent[RX_DESC_COUNT];  /* ring entries */
203 };
204 
205 static void gen_init(void *softc);
206 static void gen_start(if_t ifp);
207 static void gen_destroy(struct gen_softc *sc);
208 static int gen_encap(struct gen_softc *sc, struct mbuf **mp);
209 static int gen_parse_tx(struct mbuf *m, int csum_flags);
210 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data);
211 static int gen_get_phy_mode(device_t dev);
212 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr);
213 static void gen_set_enaddr(struct gen_softc *sc);
214 static void gen_setup_rxfilter(struct gen_softc *sc);
215 static void gen_reset(struct gen_softc *sc);
216 static void gen_enable(struct gen_softc *sc);
217 static void gen_dma_disable(struct gen_softc *sc);
218 static int gen_bus_dma_init(struct gen_softc *sc);
219 static void gen_bus_dma_teardown(struct gen_softc *sc);
220 static void gen_enable_intr(struct gen_softc *sc);
221 static void gen_init_txrings(struct gen_softc *sc);
222 static void gen_init_rxrings(struct gen_softc *sc);
223 static void gen_intr(void *softc);
224 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q);
225 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
226 static void gen_intr2(void *softc);
227 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index);
228 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
229     struct mbuf *m);
230 static void gen_link_task(void *arg, int pending);
231 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr);
232 static int gen_media_change(if_t ifp);
233 static void gen_tick(void *softc);
234 
235 static int
236 gen_probe(device_t dev)
237 {
238 	if (!ofw_bus_status_okay(dev))
239 		return (ENXIO);
240 
241 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
242 		return (ENXIO);
243 
244 	device_set_desc(dev, "RPi4 Gigabit Ethernet");
245 	return (BUS_PROBE_DEFAULT);
246 }
247 
248 static int
249 gen_attach(device_t dev)
250 {
251 	struct ether_addr eaddr;
252 	struct gen_softc *sc;
253 	int major, minor, error, mii_flags;
254 	bool eaddr_found;
255 
256 	sc = device_get_softc(dev);
257 	sc->dev = dev;
258 	sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
259 
260 	if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) {
261 		device_printf(dev, "cannot allocate resources for device\n");
262 		error = ENXIO;
263 		goto fail;
264 	}
265 
266 	major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT;
267 	if (major != REV_MAJOR_V5) {
268 		device_printf(dev, "version %d is not supported\n", major);
269 		error = ENXIO;
270 		goto fail;
271 	}
272 	minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT;
273 	device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor,
274 		RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY);
275 
276 	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
277 	callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
278 	TASK_INIT(&sc->link_task, 0, gen_link_task, sc);
279 
280 	error = gen_get_phy_mode(dev);
281 	if (error != 0)
282 		goto fail;
283 
284 	bzero(&eaddr, sizeof(eaddr));
285 	eaddr_found = gen_get_eaddr(dev, &eaddr);
286 
287 	/* reset core */
288 	gen_reset(sc);
289 
290 	gen_dma_disable(sc);
291 
292 	/* Setup DMA */
293 	error = gen_bus_dma_init(sc);
294 	if (error != 0) {
295 		device_printf(dev, "cannot setup bus dma\n");
296 		goto fail;
297 	}
298 
299 	/* Setup ethernet interface */
300 	sc->ifp = if_alloc(IFT_ETHER);
301 	if_setsoftc(sc->ifp, sc);
302 	if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
303 	if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
304 	if_setstartfn(sc->ifp, gen_start);
305 	if_setioctlfn(sc->ifp, gen_ioctl);
306 	if_setinitfn(sc->ifp, gen_init);
307 	if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
308 	if_setsendqready(sc->ifp);
309 #define GEN_CSUM_FEATURES	(CSUM_UDP | CSUM_TCP)
310 	if_sethwassist(sc->ifp, GEN_CSUM_FEATURES);
311 	if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
312 	    IFCAP_HWCSUM_IPV6);
313 	if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
314 
315 	/* Install interrupt handlers */
316 	error = bus_setup_intr(dev, sc->res[_RES_IRQ1],
317 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih);
318 	if (error != 0) {
319 		device_printf(dev, "cannot setup interrupt handler1\n");
320 		goto fail;
321 	}
322 
323 	error = bus_setup_intr(dev, sc->res[_RES_IRQ2],
324 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2);
325 	if (error != 0) {
326 		device_printf(dev, "cannot setup interrupt handler2\n");
327 		goto fail;
328 	}
329 
330 	/* Attach MII driver */
331 	mii_flags = 0;
332 	switch (sc->phy_mode)
333 	{
334 	case MII_CONTYPE_RGMII_ID:
335 		mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY;
336 		break;
337 	case MII_CONTYPE_RGMII_RXID:
338 		mii_flags |= MIIF_RX_DELAY;
339 		break;
340 	case MII_CONTYPE_RGMII_TXID:
341 		mii_flags |= MIIF_TX_DELAY;
342 		break;
343 	default:
344 		break;
345 	}
346 	error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change,
347 	    gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
348 	    mii_flags);
349 	if (error != 0) {
350 		device_printf(dev, "cannot attach PHY\n");
351 		goto fail;
352 	}
353 
354 	/* If address was not found, create one based on the hostid and name. */
355 	if (eaddr_found == 0)
356 		ether_gen_addr(sc->ifp, &eaddr);
357 	/* Attach ethernet interface */
358 	ether_ifattach(sc->ifp, eaddr.octet);
359 
360 fail:
361 	if (error)
362 		gen_destroy(sc);
363 	return (error);
364 }
365 
366 /* Free resources after failed attach.  This is not a complete detach. */
367 static void
368 gen_destroy(struct gen_softc *sc)
369 {
370 
371 	if (sc->miibus) {	/* can't happen */
372 		device_delete_child(sc->dev, sc->miibus);
373 		sc->miibus = NULL;
374 	}
375 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
376 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
377 	gen_bus_dma_teardown(sc);
378 	callout_drain(&sc->stat_ch);
379 	if (mtx_initialized(&sc->mtx))
380 		mtx_destroy(&sc->mtx);
381 	bus_release_resources(sc->dev, gen_spec, sc->res);
382 	if (sc->ifp != NULL) {
383 		if_free(sc->ifp);
384 		sc->ifp = NULL;
385 	}
386 }
387 
388 static int
389 gen_get_phy_mode(device_t dev)
390 {
391 	struct gen_softc *sc;
392 	phandle_t node;
393 	mii_contype_t type;
394 	int error = 0;
395 
396 	sc = device_get_softc(dev);
397 	node = ofw_bus_get_node(dev);
398 	type = mii_fdt_get_contype(node);
399 
400 	switch (type) {
401 	case MII_CONTYPE_RGMII:
402 	case MII_CONTYPE_RGMII_ID:
403 	case MII_CONTYPE_RGMII_RXID:
404 	case MII_CONTYPE_RGMII_TXID:
405 		sc->phy_mode = type;
406 		break;
407 	default:
408 		device_printf(dev, "unknown phy-mode '%s'\n",
409 		    mii_fdt_contype_to_name(type));
410 		error = ENXIO;
411 		break;
412 	}
413 
414 	return (error);
415 }
416 
417 static bool
418 gen_get_eaddr(device_t dev, struct ether_addr *eaddr)
419 {
420 	struct gen_softc *sc;
421 	uint32_t maclo, machi, val;
422 	phandle_t node;
423 
424 	sc = device_get_softc(dev);
425 
426 	node = ofw_bus_get_node(dev);
427 	if (OF_getprop(node, "mac-address", eaddr->octet,
428 	    ETHER_ADDR_LEN) != -1 ||
429 	    OF_getprop(node, "local-mac-address", eaddr->octet,
430 	    ETHER_ADDR_LEN) != -1 ||
431 	    OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1)
432 		return (true);
433 
434 	device_printf(dev, "No Ethernet address found in fdt!\n");
435 	maclo = machi = 0;
436 
437 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
438 	if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
439 		maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
440 		machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
441 	}
442 
443 	if (maclo == 0 && machi == 0) {
444 		if (bootverbose)
445 			device_printf(dev,
446 			    "No Ethernet address found in controller\n");
447 		return (false);
448 	} else {
449 		eaddr->octet[0] = maclo & 0xff;
450 		eaddr->octet[1] = (maclo >> 8) & 0xff;
451 		eaddr->octet[2] = (maclo >> 16) & 0xff;
452 		eaddr->octet[3] = (maclo >> 24) & 0xff;
453 		eaddr->octet[4] = machi & 0xff;
454 		eaddr->octet[5] = (machi >> 8) & 0xff;
455 		return (true);
456 	}
457 }
458 
459 static void
460 gen_reset(struct gen_softc *sc)
461 {
462 	uint32_t val;
463 
464 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
465 	val |= GENET_SYS_RBUF_FLUSH_RESET;
466 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
467 	DELAY(10);
468 
469 	val &= ~GENET_SYS_RBUF_FLUSH_RESET;
470 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
471 	DELAY(10);
472 
473 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
474 	DELAY(10);
475 
476 	WR4(sc, GENET_UMAC_CMD, 0);
477 	WR4(sc, GENET_UMAC_CMD,
478 	    GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
479 	DELAY(10);
480 	WR4(sc, GENET_UMAC_CMD, 0);
481 
482 	WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
483 	    GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
484 	WR4(sc, GENET_UMAC_MIB_CTRL, 0);
485 }
486 
487 static void
488 gen_enable(struct gen_softc *sc)
489 {
490 	u_int val;
491 
492 	WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
493 
494 	val = RD4(sc, GENET_RBUF_CTRL);
495 	val |= GENET_RBUF_ALIGN_2B;
496 	WR4(sc, GENET_RBUF_CTRL, val);
497 
498 	WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
499 
500 	/* Enable transmitter and receiver */
501 	val = RD4(sc, GENET_UMAC_CMD);
502 	val |= GENET_UMAC_CMD_TXEN;
503 	val |= GENET_UMAC_CMD_RXEN;
504 	WR4(sc, GENET_UMAC_CMD, val);
505 
506 	/* Enable interrupts */
507 	gen_enable_intr(sc);
508 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
509 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
510 }
511 
512 static void
513 gen_disable_intr(struct gen_softc *sc)
514 {
515 	/* Disable interrupts */
516 	WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
517 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 0xffffffff);
518 }
519 
520 static void
521 gen_disable(struct gen_softc *sc)
522 {
523 	uint32_t val;
524 
525 	/* Stop receiver */
526 	val = RD4(sc, GENET_UMAC_CMD);
527 	val &= ~GENET_UMAC_CMD_RXEN;
528 	WR4(sc, GENET_UMAC_CMD, val);
529 
530 	/* Stop transmitter */
531 	val = RD4(sc, GENET_UMAC_CMD);
532 	val &= ~GENET_UMAC_CMD_TXEN;
533 	WR4(sc, GENET_UMAC_CMD, val);
534 
535 	/* Disable Interrupt */
536 	gen_disable_intr(sc);
537 }
538 
539 static void
540 gen_enable_offload(struct gen_softc *sc)
541 {
542 	uint32_t check_ctrl, buf_ctrl;
543 
544 	check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL);
545 	buf_ctrl  = RD4(sc, GENET_RBUF_CTRL);
546 	if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) {
547 		check_ctrl |= GENET_RBUF_CHECK_CTRL_EN;
548 		buf_ctrl |= GENET_RBUF_64B_EN;
549 	} else {
550 		check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN;
551 		buf_ctrl &= ~GENET_RBUF_64B_EN;
552 	}
553 	WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl);
554 	WR4(sc, GENET_RBUF_CTRL, buf_ctrl);
555 
556 	buf_ctrl  = RD4(sc, GENET_TBUF_CTRL);
557 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
558 	    0)
559 		buf_ctrl |= GENET_RBUF_64B_EN;
560 	else
561 		buf_ctrl &= ~GENET_RBUF_64B_EN;
562 	WR4(sc, GENET_TBUF_CTRL, buf_ctrl);
563 }
564 
565 static void
566 gen_dma_disable(struct gen_softc *sc)
567 {
568 	int val;
569 
570 	val = RD4(sc, GENET_TX_DMA_CTRL);
571 	val &= ~GENET_TX_DMA_CTRL_EN;
572 	val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
573 	WR4(sc, GENET_TX_DMA_CTRL, val);
574 
575 	val = RD4(sc, GENET_RX_DMA_CTRL);
576 	val &= ~GENET_RX_DMA_CTRL_EN;
577 	val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
578 	WR4(sc, GENET_RX_DMA_CTRL, val);
579 }
580 
581 static int
582 gen_bus_dma_init(struct gen_softc *sc)
583 {
584 	device_t dev = sc->dev;
585 	int i, error;
586 
587 	error = bus_dma_tag_create(
588 	    bus_get_dma_tag(dev),	/* Parent tag */
589 	    4, 0,			/* alignment, boundary */
590 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
591 	    BUS_SPACE_MAXADDR,		/* highaddr */
592 	    NULL, NULL,			/* filter, filterarg */
593 	    MCLBYTES, TX_MAX_SEGS,	/* maxsize, nsegs */
594 	    MCLBYTES,			/* maxsegsize */
595 	    0,				/* flags */
596 	    NULL, NULL,			/* lockfunc, lockarg */
597 	    &sc->tx_buf_tag);
598 	if (error != 0) {
599 		device_printf(dev, "cannot create TX buffer tag\n");
600 		return (error);
601 	}
602 
603 	for (i = 0; i < TX_DESC_COUNT; i++) {
604 		error = bus_dmamap_create(sc->tx_buf_tag, 0,
605 		    &sc->tx_ring_ent[i].map);
606 		if (error != 0) {
607 			device_printf(dev, "cannot create TX buffer map\n");
608 			return (error);
609 		}
610 	}
611 
612 	error = bus_dma_tag_create(
613 	    bus_get_dma_tag(dev),	/* Parent tag */
614 	    4, 0,			/* alignment, boundary */
615 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
616 	    BUS_SPACE_MAXADDR,		/* highaddr */
617 	    NULL, NULL,			/* filter, filterarg */
618 	    MCLBYTES, 1,		/* maxsize, nsegs */
619 	    MCLBYTES,			/* maxsegsize */
620 	    0,				/* flags */
621 	    NULL, NULL,			/* lockfunc, lockarg */
622 	    &sc->rx_buf_tag);
623 	if (error != 0) {
624 		device_printf(dev, "cannot create RX buffer tag\n");
625 		return (error);
626 	}
627 
628 	for (i = 0; i < RX_DESC_COUNT; i++) {
629 		error = bus_dmamap_create(sc->rx_buf_tag, 0,
630 		    &sc->rx_ring_ent[i].map);
631 		if (error != 0) {
632 			device_printf(dev, "cannot create RX buffer map\n");
633 			return (error);
634 		}
635 	}
636 	return (0);
637 }
638 
639 static void
640 gen_bus_dma_teardown(struct gen_softc *sc)
641 {
642 	int i, error;
643 
644 	if (sc->tx_buf_tag != NULL) {
645 		for (i = 0; i < TX_DESC_COUNT; i++) {
646 			error = bus_dmamap_destroy(sc->tx_buf_tag,
647 			    sc->tx_ring_ent[i].map);
648 			sc->tx_ring_ent[i].map = NULL;
649 			if (error)
650 				device_printf(sc->dev,
651 				    "%s: bus_dmamap_destroy failed: %d\n",
652 				    __func__, error);
653 		}
654 		error = bus_dma_tag_destroy(sc->tx_buf_tag);
655 		sc->tx_buf_tag = NULL;
656 		if (error)
657 			device_printf(sc->dev,
658 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
659 			    error);
660 	}
661 
662 	if (sc->tx_buf_tag != NULL) {
663 		for (i = 0; i < RX_DESC_COUNT; i++) {
664 			error = bus_dmamap_destroy(sc->rx_buf_tag,
665 			    sc->rx_ring_ent[i].map);
666 			sc->rx_ring_ent[i].map = NULL;
667 			if (error)
668 				device_printf(sc->dev,
669 				    "%s: bus_dmamap_destroy failed: %d\n",
670 				    __func__, error);
671 		}
672 		error = bus_dma_tag_destroy(sc->rx_buf_tag);
673 		sc->rx_buf_tag = NULL;
674 		if (error)
675 			device_printf(sc->dev,
676 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
677 			    error);
678 	}
679 }
680 
681 static void
682 gen_enable_intr(struct gen_softc *sc)
683 {
684 
685 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
686 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
687 }
688 
689 /*
690  * "queue" is the software queue index (0-4); "qid" is the hardware index
691  * (0-16).  "base" is the starting index in the ring array.
692  */
693 static void
694 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
695     int nentries)
696 {
697 	struct tx_queue *q;
698 	uint32_t val;
699 
700 	q = &sc->tx_queue[queue];
701 	q->entries = &sc->tx_ring_ent[base];
702 	q->hwindex = qid;
703 	q->nentries = nentries;
704 
705 	/* TX ring */
706 
707 	q->queued = 0;
708 	q->cons_idx = q->prod_idx = 0;
709 
710 	WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
711 
712 	WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
713 	WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
714 	WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
715 	WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
716 	WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
717 	    (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
718 	    (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
719 	WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
720 	WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
721 	WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
722 	    TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
723 	WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
724 	WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
725 	WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
726 	WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
727 	WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
728 
729 	WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid));	/* enable */
730 
731 	/* Enable transmit DMA */
732 	val = RD4(sc, GENET_TX_DMA_CTRL);
733 	val |= GENET_TX_DMA_CTRL_EN;
734 	val |= GENET_TX_DMA_CTRL_RBUF_EN(qid);
735 	WR4(sc, GENET_TX_DMA_CTRL, val);
736 }
737 
738 /*
739  * "queue" is the software queue index (0-4); "qid" is the hardware index
740  * (0-16).  "base" is the starting index in the ring array.
741  */
742 static void
743 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
744     int nentries)
745 {
746 	struct rx_queue *q;
747 	uint32_t val;
748 	int i;
749 
750 	q = &sc->rx_queue[queue];
751 	q->entries = &sc->rx_ring_ent[base];
752 	q->hwindex = qid;
753 	q->nentries = nentries;
754 	q->cons_idx = q->prod_idx = 0;
755 
756 	WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
757 
758 	WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
759 	WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
760 	WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
761 	WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
762 	WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
763 	    (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
764 	    (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
765 	WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
766 	WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
767 	WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
768 	    RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
769 	WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
770 	WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
771 	    (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4));
772 	WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
773 	WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
774 
775 	WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid));	/* enable */
776 
777 	/* fill ring */
778 	for (i = 0; i < RX_DESC_COUNT; i++)
779 		gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i);
780 
781 	/* Enable receive DMA */
782 	val = RD4(sc, GENET_RX_DMA_CTRL);
783 	val |= GENET_RX_DMA_CTRL_EN;
784 	val |= GENET_RX_DMA_CTRL_RBUF_EN(qid);
785 	WR4(sc, GENET_RX_DMA_CTRL, val);
786 }
787 
788 static void
789 gen_init_txrings(struct gen_softc *sc)
790 {
791 	int base = 0;
792 #ifdef PRI_RINGS
793 	int i;
794 
795 	/* init priority rings */
796 	for (i = 0; i < PRI_RINGS; i++) {
797 		gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT);
798 		sc->tx_queue[i].queue = i;
799 		base += TX_DESC_PRICOUNT;
800 		dma_ring_conf |= 1 << i;
801 		dma_control |= DMA_RENABLE(i);
802 	}
803 #endif
804 
805 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
806 	gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
807 	    TX_DESC_COUNT);
808 	sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
809 }
810 
811 static void
812 gen_init_rxrings(struct gen_softc *sc)
813 {
814 	int base = 0;
815 #ifdef PRI_RINGS
816 	int i;
817 
818 	/* init priority rings */
819 	for (i = 0; i < PRI_RINGS; i++) {
820 		gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT);
821 		sc->rx_queue[i].queue = i;
822 		base += TX_DESC_PRICOUNT;
823 		dma_ring_conf |= 1 << i;
824 		dma_control |= DMA_RENABLE(i);
825 	}
826 #endif
827 
828 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
829 	gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
830 	    RX_DESC_COUNT);
831 	sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
832 
833 }
834 
835 static void
836 gen_stop(struct gen_softc *sc)
837 {
838 	int i;
839 	struct gen_ring_ent *ent;
840 
841 	GEN_ASSERT_LOCKED(sc);
842 
843 	callout_stop(&sc->stat_ch);
844 	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
845 	gen_reset(sc);
846 	gen_disable(sc);
847 	gen_dma_disable(sc);
848 
849 	/* Clear the tx/rx ring buffer */
850 	for (i = 0; i < TX_DESC_COUNT; i++) {
851 		ent = &sc->tx_ring_ent[i];
852 		if (ent->mbuf != NULL) {
853 			bus_dmamap_sync(sc->tx_buf_tag, ent->map,
854 				BUS_DMASYNC_POSTWRITE);
855 			bus_dmamap_unload(sc->tx_buf_tag, ent->map);
856 			m_freem(ent->mbuf);
857 			ent->mbuf = NULL;
858 		}
859 	}
860 
861 	for (i = 0; i < RX_DESC_COUNT; i++) {
862 		ent = &sc->rx_ring_ent[i];
863 		if (ent->mbuf != NULL) {
864 			bus_dmamap_sync(sc->rx_buf_tag, ent->map,
865 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
866 			bus_dmamap_unload(sc->rx_buf_tag, ent->map);
867 			m_freem(ent->mbuf);
868 			ent->mbuf = NULL;
869 		}
870 	}
871 }
872 
873 static void
874 gen_init_locked(struct gen_softc *sc)
875 {
876 	struct mii_data *mii;
877 	if_t ifp;
878 
879 	mii = device_get_softc(sc->miibus);
880 	ifp = sc->ifp;
881 
882 	GEN_ASSERT_LOCKED(sc);
883 
884 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
885 		return;
886 
887 	switch (sc->phy_mode)
888 	{
889 	case MII_CONTYPE_RGMII:
890 	case MII_CONTYPE_RGMII_ID:
891 	case MII_CONTYPE_RGMII_RXID:
892 	case MII_CONTYPE_RGMII_TXID:
893 		WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY);
894 		break;
895 	default:
896 		WR4(sc, GENET_SYS_PORT_CTRL, 0);
897 	}
898 
899 	gen_set_enaddr(sc);
900 
901 	/* Setup RX filter */
902 	gen_setup_rxfilter(sc);
903 
904 	gen_init_txrings(sc);
905 	gen_init_rxrings(sc);
906 	gen_enable(sc);
907 	gen_enable_offload(sc);
908 
909 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
910 
911 	mii_mediachg(mii);
912 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
913 }
914 
915 static void
916 gen_init(void *softc)
917 {
918         struct gen_softc *sc;
919 
920         sc = softc;
921 	GEN_LOCK(sc);
922 	gen_init_locked(sc);
923 	GEN_UNLOCK(sc);
924 }
925 
926 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
927 
928 static void
929 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea)
930 {
931 	uint32_t addr0 = (ea[0] << 8) | ea[1];
932 	uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
933 
934 	WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
935 	WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
936 }
937 
938 static u_int
939 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count)
940 {
941 	struct gen_softc *sc = arg;
942 
943 	/* "count + 2" to account for unicast and broadcast */
944 	gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl));
945 	return (1);		/* increment to count */
946 }
947 
948 static void
949 gen_setup_rxfilter(struct gen_softc *sc)
950 {
951 	if_t ifp = sc->ifp;
952 	uint32_t cmd, mdf_ctrl;
953 	u_int n;
954 
955 	GEN_ASSERT_LOCKED(sc);
956 
957 	cmd = RD4(sc, GENET_UMAC_CMD);
958 
959 	/*
960 	 * Count the required number of hardware filters. We need one
961 	 * for each multicast address, plus one for our own address and
962 	 * the broadcast address.
963 	 */
964 	n = if_llmaddr_count(ifp) + 2;
965 
966 	if (n > GENET_MAX_MDF_FILTER)
967 		if_setflagbits(ifp, IFF_ALLMULTI, 0);
968 	else
969 		if_setflagbits(ifp, 0, IFF_ALLMULTI);
970 
971 	if ((if_getflags(ifp) & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
972 		cmd |= GENET_UMAC_CMD_PROMISC;
973 		mdf_ctrl = 0;
974 	} else {
975 		cmd &= ~GENET_UMAC_CMD_PROMISC;
976 		gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr);
977 		gen_setup_rxfilter_mdf(sc, 1, if_getlladdr(ifp));
978 		(void) if_foreach_llmaddr(ifp, gen_setup_multi, sc);
979 		mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1)  &~
980 		    (__BIT(GENET_MAX_MDF_FILTER - n) - 1);
981 	}
982 
983 	WR4(sc, GENET_UMAC_CMD, cmd);
984 	WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
985 }
986 
987 static void
988 gen_set_enaddr(struct gen_softc *sc)
989 {
990 	uint8_t *enaddr;
991 	uint32_t val;
992 	if_t ifp;
993 
994 	GEN_ASSERT_LOCKED(sc);
995 
996 	ifp = sc->ifp;
997 
998 	/* Write our unicast address */
999 	enaddr = if_getlladdr(ifp);
1000 	/* Write hardware address */
1001 	val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
1002 	    (enaddr[0] << 24);
1003 	WR4(sc, GENET_UMAC_MAC0, val);
1004 	val = enaddr[5] | (enaddr[4] << 8);
1005 	WR4(sc, GENET_UMAC_MAC1, val);
1006 }
1007 
1008 static void
1009 gen_start_locked(struct gen_softc *sc)
1010 {
1011 	struct mbuf *m;
1012 	if_t ifp;
1013 	int err;
1014 
1015 	GEN_ASSERT_LOCKED(sc);
1016 
1017 	if (!sc->link)
1018 		return;
1019 
1020 	ifp = sc->ifp;
1021 
1022 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1023 	    IFF_DRV_RUNNING)
1024 		return;
1025 
1026 	while (true) {
1027 		m = if_dequeue(ifp);
1028 		if (m == NULL)
1029 			break;
1030 
1031 		err = gen_encap(sc, &m);
1032 		if (err != 0) {
1033 			if (err == ENOBUFS)
1034 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1035 			else if (m == NULL)
1036 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1037 			if (m != NULL)
1038 				if_sendq_prepend(ifp, m);
1039 			break;
1040 		}
1041 		if_bpfmtap(ifp, m);
1042 	}
1043 }
1044 
1045 static void
1046 gen_start(if_t ifp)
1047 {
1048 	struct gen_softc *sc;
1049 
1050 	sc = if_getsoftc(ifp);
1051 
1052 	GEN_LOCK(sc);
1053 	gen_start_locked(sc);
1054 	GEN_UNLOCK(sc);
1055 }
1056 
1057 /* Test for any delayed checksum */
1058 #define CSUM_DELAY_ANY	(CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)
1059 
1060 static int
1061 gen_encap(struct gen_softc *sc, struct mbuf **mp)
1062 {
1063 	bus_dmamap_t map;
1064 	bus_dma_segment_t segs[TX_MAX_SEGS];
1065 	int error, nsegs, cur, first, i, index, offset;
1066 	uint32_t csuminfo, length_status, csum_flags = 0, csumdata;
1067 	struct mbuf *m;
1068 	struct statusblock *sb = NULL;
1069 	struct tx_queue *q;
1070 	struct gen_ring_ent *ent;
1071 
1072 	GEN_ASSERT_LOCKED(sc);
1073 
1074 	q = &sc->tx_queue[DEF_TXQUEUE];
1075 
1076 	m = *mp;
1077 
1078 	/*
1079 	 * Don't attempt to send packets with only an Ethernet header in
1080 	 * first mbuf; see comment above with gen_tx_hdr_min.
1081 	 */
1082 	if (m->m_len == sizeof(struct ether_header)) {
1083 		m = m_pullup(m, MIN(m->m_pkthdr.len, gen_tx_hdr_min));
1084 		if (m == NULL) {
1085 			if (if_getflags(sc->ifp) & IFF_DEBUG)
1086 				device_printf(sc->dev,
1087 				    "header pullup fail\n");
1088 			*mp = NULL;
1089 			return (ENOMEM);
1090 		}
1091 	}
1092 
1093 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
1094 	    0) {
1095 		csum_flags = m->m_pkthdr.csum_flags;
1096 		csumdata = m->m_pkthdr.csum_data;
1097 		M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT);
1098 		if (m == NULL) {
1099 			if (if_getflags(sc->ifp) & IFF_DEBUG)
1100 				device_printf(sc->dev, "prepend fail\n");
1101 			*mp = NULL;
1102 			return (ENOMEM);
1103 		}
1104 		offset = gen_parse_tx(m, csum_flags);
1105 		sb = mtod(m, struct statusblock *);
1106 		if ((csum_flags & CSUM_DELAY_ANY) != 0) {
1107 			csuminfo = (offset << TXCSUM_OFF_SHIFT) |
1108 			    (offset + csumdata);
1109 			csuminfo |= TXCSUM_LEN_VALID;
1110 			if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP))
1111 				csuminfo |= TXCSUM_UDP;
1112 			sb->txcsuminfo = csuminfo;
1113 		} else
1114 			sb->txcsuminfo = 0;
1115 	}
1116 
1117 	*mp = m;
1118 
1119 	cur = first = q->cur;
1120 	ent = &q->entries[cur];
1121 	map = ent->map;
1122 	error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs,
1123 	    &nsegs, BUS_DMA_NOWAIT);
1124 	if (error == EFBIG) {
1125 		m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
1126 		if (m == NULL) {
1127 			device_printf(sc->dev,
1128 			    "gen_encap: m_collapse failed\n");
1129 			m_freem(*mp);
1130 			*mp = NULL;
1131 			return (ENOMEM);
1132 		}
1133 		*mp = m;
1134 		error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m,
1135 		    segs, &nsegs, BUS_DMA_NOWAIT);
1136 		if (error != 0) {
1137 			m_freem(*mp);
1138 			*mp = NULL;
1139 		}
1140 	}
1141 	if (error != 0) {
1142 		device_printf(sc->dev,
1143 		    "gen_encap: bus_dmamap_load_mbuf_sg failed\n");
1144 		return (error);
1145 	}
1146 	if (nsegs == 0) {
1147 		m_freem(*mp);
1148 		*mp = NULL;
1149 		return (EIO);
1150 	}
1151 
1152 	/* Remove statusblock after mapping, before possible requeue or bpf. */
1153 	if (sb != NULL) {
1154 		m->m_data += sizeof(struct statusblock);
1155 		m->m_len -= sizeof(struct statusblock);
1156 		m->m_pkthdr.len -= sizeof(struct statusblock);
1157 	}
1158 	if (q->queued + nsegs > q->nentries) {
1159 		bus_dmamap_unload(sc->tx_buf_tag, map);
1160 		return (ENOBUFS);
1161 	}
1162 
1163 	bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE);
1164 
1165 	index = q->prod_idx & (q->nentries - 1);
1166 	for (i = 0; i < nsegs; i++) {
1167 		ent = &q->entries[cur];
1168 		length_status = GENET_TX_DESC_STATUS_QTAG_MASK;
1169 		if (i == 0) {
1170 			length_status |= GENET_TX_DESC_STATUS_SOP |
1171 			    GENET_TX_DESC_STATUS_CRC;
1172 			if ((csum_flags & CSUM_DELAY_ANY) != 0)
1173 				length_status |= GENET_TX_DESC_STATUS_CKSUM;
1174 		}
1175 		if (i == nsegs - 1)
1176 			length_status |= GENET_TX_DESC_STATUS_EOP;
1177 
1178 		length_status |= segs[i].ds_len <<
1179 		    GENET_TX_DESC_STATUS_BUFLEN_SHIFT;
1180 
1181 		WR4(sc, GENET_TX_DESC_ADDRESS_LO(index),
1182 		    (uint32_t)segs[i].ds_addr);
1183 		WR4(sc, GENET_TX_DESC_ADDRESS_HI(index),
1184 		    (uint32_t)(segs[i].ds_addr >> 32));
1185 		WR4(sc, GENET_TX_DESC_STATUS(index), length_status);
1186 
1187 		++q->queued;
1188 		cur = TX_NEXT(cur, q->nentries);
1189 		index = TX_NEXT(index, q->nentries);
1190 	}
1191 
1192 	q->prod_idx += nsegs;
1193 	q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK;
1194 	/* We probably don't need to write the producer index on every iter */
1195 	if (nsegs != 0)
1196 		WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx);
1197 	q->cur = cur;
1198 
1199 	/* Store mbuf in the last segment */
1200 	q->entries[first].mbuf = m;
1201 
1202 	return (0);
1203 }
1204 
1205 /*
1206  * Parse a packet to find the offset of the transport header for checksum
1207  * offload.  Ensure that the link and network headers are contiguous with
1208  * the status block, or transmission fails.
1209  */
1210 static int
1211 gen_parse_tx(struct mbuf *m, int csum_flags)
1212 {
1213 	int offset, off_in_m;
1214 	bool copy = false, shift = false;
1215 	u_char *p, *copy_p = NULL;
1216 	struct mbuf *m0 = m;
1217 	uint16_t ether_type;
1218 
1219 	if (m->m_len == sizeof(struct statusblock)) {
1220 		/* M_PREPEND placed statusblock at end; move to beginning */
1221 		m->m_data = m->m_pktdat;
1222 		copy_p = mtodo(m, sizeof(struct statusblock));
1223 		m = m->m_next;
1224 		off_in_m = 0;
1225 		p = mtod(m, u_char *);
1226 		copy = true;
1227 	} else {
1228 		/*
1229 		 * If statusblock is not at beginning of mbuf (likely),
1230 		 * then remember to move mbuf contents down before copying
1231 		 * after them.
1232 		 */
1233 		if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat)
1234 			shift = true;
1235 		p = mtodo(m, sizeof(struct statusblock));
1236 		off_in_m = sizeof(struct statusblock);
1237 	}
1238 
1239 /*
1240  * If headers need to be copied contiguous to statusblock, do so.
1241  * If copying to the internal mbuf data area, and the status block
1242  * is not at the beginning of that area, shift the status block (which
1243  * is empty) and following data.
1244  */
1245 #define COPY(size) {							\
1246 	int hsize = size;						\
1247 	if (copy) {							\
1248 		if (shift) {						\
1249 			u_char *p0;					\
1250 			shift = false;					\
1251 			p0 = mtodo(m0, sizeof(struct statusblock));	\
1252 			m0->m_data = m0->m_pktdat;			\
1253 			bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\
1254 			    m0->m_len - sizeof(struct statusblock));	\
1255 			copy_p = mtodo(m0, m0->m_len);			\
1256 		}							\
1257 		bcopy(p, copy_p, hsize);				\
1258 		m0->m_len += hsize;					\
1259 		m->m_len -= hsize;					\
1260 		m->m_data += hsize;					\
1261 	}								\
1262 	copy_p += hsize;						\
1263 }
1264 
1265 	KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) +
1266 	    sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__));
1267 
1268 	if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) {
1269 		offset = sizeof(struct ether_vlan_header);
1270 		ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto);
1271 		COPY(sizeof(struct ether_vlan_header));
1272 		if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) {
1273 			m = m->m_next;
1274 			off_in_m = 0;
1275 			p = mtod(m, u_char *);
1276 			copy = true;
1277 		} else {
1278 			off_in_m += sizeof(struct ether_vlan_header);
1279 			p += sizeof(struct ether_vlan_header);
1280 		}
1281 	} else {
1282 		offset = sizeof(struct ether_header);
1283 		ether_type = ntohs(((struct ether_header *)p)->ether_type);
1284 		COPY(sizeof(struct ether_header));
1285 		if (m->m_len == off_in_m + sizeof(struct ether_header)) {
1286 			m = m->m_next;
1287 			off_in_m = 0;
1288 			p = mtod(m, u_char *);
1289 			copy = true;
1290 		} else {
1291 			off_in_m += sizeof(struct ether_header);
1292 			p += sizeof(struct ether_header);
1293 		}
1294 	}
1295 	if (ether_type == ETHERTYPE_IP) {
1296 		COPY(((struct ip *)p)->ip_hl << 2);
1297 		offset += ((struct ip *)p)->ip_hl << 2;
1298 	} else if (ether_type == ETHERTYPE_IPV6) {
1299 		COPY(sizeof(struct ip6_hdr));
1300 		offset += sizeof(struct ip6_hdr);
1301 	} else {
1302 		/*
1303 		 * Unknown whether most other cases require moving a header;
1304 		 * ARP works without.  However, Wake On LAN packets sent
1305 		 * by wake(8) via BPF need something like this.
1306 		 */
1307 		COPY(MIN(gen_tx_hdr_min, m->m_len));
1308 		offset += MIN(gen_tx_hdr_min, m->m_len);
1309 	}
1310 	return (offset);
1311 #undef COPY
1312 }
1313 
1314 static void
1315 gen_intr(void *arg)
1316 {
1317 	struct gen_softc *sc = arg;
1318 	uint32_t val;
1319 
1320 	GEN_LOCK(sc);
1321 
1322 	val = RD4(sc, GENET_INTRL2_CPU_STAT);
1323 	val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
1324 	WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
1325 
1326 	if (val & GENET_IRQ_RXDMA_DONE)
1327 		gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]);
1328 
1329 	if (val & GENET_IRQ_TXDMA_DONE) {
1330 		gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
1331 		if (!if_sendq_empty(sc->ifp))
1332 			gen_start_locked(sc);
1333 	}
1334 
1335 	GEN_UNLOCK(sc);
1336 }
1337 
1338 static int
1339 gen_rxintr(struct gen_softc *sc, struct rx_queue *q)
1340 {
1341 	if_t ifp;
1342 	struct mbuf *m, *mh, *mt;
1343 	struct statusblock *sb = NULL;
1344 	int error, index, len, cnt, npkt, n;
1345 	uint32_t status, prod_idx, total;
1346 
1347 	ifp = sc->ifp;
1348 	mh = mt = NULL;
1349 	cnt = 0;
1350 	npkt = 0;
1351 
1352 	prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) &
1353 	    GENET_RX_DMA_PROD_CONS_MASK;
1354 	total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK;
1355 
1356 	index = q->cons_idx & (RX_DESC_COUNT - 1);
1357 	for (n = 0; n < total; n++) {
1358 		bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map,
1359 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1360 		bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map);
1361 
1362 		m = q->entries[index].mbuf;
1363 
1364 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1365 			sb = mtod(m, struct statusblock *);
1366 			status = sb->status_buflen;
1367 		} else
1368 			status = RD4(sc, GENET_RX_DESC_STATUS(index));
1369 
1370 		len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >>
1371 		    GENET_RX_DESC_STATUS_BUFLEN_SHIFT;
1372 
1373 		/* check for errors */
1374 		if ((status &
1375 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP |
1376 		    GENET_RX_DESC_STATUS_RX_ERROR)) !=
1377 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) {
1378 			if (if_getflags(ifp) & IFF_DEBUG)
1379 				device_printf(sc->dev,
1380 				    "error/frag %x csum %x\n", status,
1381 				    sb->rxcsum);
1382 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1383 			continue;
1384 		}
1385 
1386 		error = gen_newbuf_rx(sc, q, index);
1387 		if (error != 0) {
1388 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1389 			if (if_getflags(ifp) & IFF_DEBUG)
1390 				device_printf(sc->dev, "gen_newbuf_rx %d\n",
1391 				    error);
1392 			/* reuse previous mbuf */
1393 			(void) gen_mapbuf_rx(sc, q, index, m);
1394 			continue;
1395 		}
1396 
1397 		if (sb != NULL) {
1398 			if (status & GENET_RX_DESC_STATUS_CKSUM_OK) {
1399 				/* L4 checksum checked; not sure about L3. */
1400 				m->m_pkthdr.csum_flags = CSUM_DATA_VALID |
1401 				    CSUM_PSEUDO_HDR;
1402 				m->m_pkthdr.csum_data = 0xffff;
1403 			}
1404 			m->m_data += sizeof(struct statusblock);
1405 			m->m_len -= sizeof(struct statusblock);
1406 			len -= sizeof(struct statusblock);
1407 		}
1408 		if (len > ETHER_ALIGN) {
1409 			m_adj(m, ETHER_ALIGN);
1410 			len -= ETHER_ALIGN;
1411 		}
1412 
1413 		m->m_pkthdr.rcvif = ifp;
1414 		m->m_pkthdr.len = len;
1415 		m->m_len = len;
1416 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1417 
1418 		m->m_nextpkt = NULL;
1419 		if (mh == NULL)
1420 			mh = m;
1421 		else
1422 			mt->m_nextpkt = m;
1423 		mt = m;
1424 		++cnt;
1425 		++npkt;
1426 
1427 		index = RX_NEXT(index, q->nentries);
1428 
1429 		q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK;
1430 		WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx);
1431 
1432 		if (cnt == gen_rx_batch) {
1433 			GEN_UNLOCK(sc);
1434 			if_input(ifp, mh);
1435 			GEN_LOCK(sc);
1436 			mh = mt = NULL;
1437 			cnt = 0;
1438 		}
1439 	}
1440 
1441 	if (mh != NULL) {
1442 		GEN_UNLOCK(sc);
1443 		if_input(ifp, mh);
1444 		GEN_LOCK(sc);
1445 	}
1446 
1447 	return (npkt);
1448 }
1449 
1450 static void
1451 gen_txintr(struct gen_softc *sc, struct tx_queue *q)
1452 {
1453 	uint32_t cons_idx, total;
1454 	struct gen_ring_ent *ent;
1455 	if_t ifp;
1456 	int i, prog;
1457 
1458 	GEN_ASSERT_LOCKED(sc);
1459 
1460 	ifp = sc->ifp;
1461 
1462 	cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) &
1463 	    GENET_TX_DMA_PROD_CONS_MASK;
1464 	total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK;
1465 
1466 	prog = 0;
1467 	for (i = q->next; q->queued > 0 && total > 0;
1468 	    i = TX_NEXT(i, q->nentries), total--) {
1469 		/* XXX check for errors */
1470 
1471 		ent = &q->entries[i];
1472 		if (ent->mbuf != NULL) {
1473 			bus_dmamap_sync(sc->tx_buf_tag, ent->map,
1474 			    BUS_DMASYNC_POSTWRITE);
1475 			bus_dmamap_unload(sc->tx_buf_tag, ent->map);
1476 			m_freem(ent->mbuf);
1477 			ent->mbuf = NULL;
1478 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1479 		}
1480 
1481 		prog++;
1482 		--q->queued;
1483 	}
1484 
1485 	if (prog > 0) {
1486 		q->next = i;
1487 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1488 	}
1489 
1490 	q->cons_idx = cons_idx;
1491 }
1492 
1493 static void
1494 gen_intr2(void *arg)
1495 {
1496 	struct gen_softc *sc = arg;
1497 
1498 	device_printf(sc->dev, "gen_intr2\n");
1499 }
1500 
1501 static int
1502 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index)
1503 {
1504 	struct mbuf *m;
1505 
1506 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1507 	if (m == NULL)
1508 		return (ENOBUFS);
1509 
1510 	m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
1511 	m_adj(m, ETHER_ALIGN);
1512 
1513 	return (gen_mapbuf_rx(sc, q, index, m));
1514 }
1515 
1516 static int
1517 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
1518     struct mbuf *m)
1519 {
1520 	bus_dma_segment_t seg;
1521 	bus_dmamap_t map;
1522 	int nsegs;
1523 
1524 	map = q->entries[index].map;
1525 	if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs,
1526 	    BUS_DMA_NOWAIT) != 0) {
1527 		m_freem(m);
1528 		return (ENOBUFS);
1529 	}
1530 
1531 	bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD);
1532 
1533 	q->entries[index].mbuf = m;
1534 	WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr);
1535 	WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32));
1536 
1537 	return (0);
1538 }
1539 
1540 static int
1541 gen_ioctl(if_t ifp, u_long cmd, caddr_t data)
1542 {
1543 	struct gen_softc *sc;
1544 	struct mii_data *mii;
1545 	struct ifreq *ifr;
1546 	int flags, enable, error;
1547 
1548 	sc = if_getsoftc(ifp);
1549 	mii = device_get_softc(sc->miibus);
1550 	ifr = (struct ifreq *)data;
1551 	error = 0;
1552 
1553 	switch (cmd) {
1554 	case SIOCSIFFLAGS:
1555 		GEN_LOCK(sc);
1556 		if (if_getflags(ifp) & IFF_UP) {
1557 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1558 				flags = if_getflags(ifp) ^ sc->if_flags;
1559 				if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
1560 					gen_setup_rxfilter(sc);
1561 			} else
1562 				gen_init_locked(sc);
1563 		} else {
1564 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1565 				gen_stop(sc);
1566 		}
1567 		sc->if_flags = if_getflags(ifp);
1568 		GEN_UNLOCK(sc);
1569 		break;
1570 
1571 	case SIOCADDMULTI:
1572 	case SIOCDELMULTI:
1573 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1574 			GEN_LOCK(sc);
1575 			gen_setup_rxfilter(sc);
1576 			GEN_UNLOCK(sc);
1577 		}
1578 		break;
1579 
1580 	case SIOCSIFMEDIA:
1581 	case SIOCGIFMEDIA:
1582 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1583 		break;
1584 
1585 	case SIOCSIFCAP:
1586 		enable = if_getcapenable(ifp);
1587 		flags = ifr->ifr_reqcap ^ enable;
1588 		if (flags & IFCAP_RXCSUM)
1589 			enable ^= IFCAP_RXCSUM;
1590 		if (flags & IFCAP_RXCSUM_IPV6)
1591 			enable ^= IFCAP_RXCSUM_IPV6;
1592 		if (flags & IFCAP_TXCSUM)
1593 			enable ^= IFCAP_TXCSUM;
1594 		if (flags & IFCAP_TXCSUM_IPV6)
1595 			enable ^= IFCAP_TXCSUM_IPV6;
1596 		if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6))
1597 			if_sethwassist(ifp, GEN_CSUM_FEATURES);
1598 		else
1599 			if_sethwassist(ifp, 0);
1600 		if_setcapenable(ifp, enable);
1601 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1602 			gen_enable_offload(sc);
1603 		break;
1604 
1605 	default:
1606 		error = ether_ioctl(ifp, cmd, data);
1607 		break;
1608 	}
1609 	return (error);
1610 }
1611 
1612 static void
1613 gen_tick(void *softc)
1614 {
1615 	struct gen_softc *sc;
1616 	struct mii_data *mii;
1617 	if_t ifp;
1618 	int link;
1619 
1620 	sc = softc;
1621 	ifp = sc->ifp;
1622 	mii = device_get_softc(sc->miibus);
1623 
1624 	GEN_ASSERT_LOCKED(sc);
1625 
1626 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1627 		return;
1628 
1629 	link = sc->link;
1630 	mii_tick(mii);
1631 	if (sc->link && !link)
1632 		gen_start_locked(sc);
1633 
1634 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
1635 }
1636 
1637 #define	MII_BUSY_RETRY		1000
1638 
1639 static int
1640 gen_miibus_readreg(device_t dev, int phy, int reg)
1641 {
1642 	struct gen_softc *sc;
1643 	int retry, val;
1644 
1645 	sc = device_get_softc(dev);
1646 	val = 0;
1647 
1648 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ |
1649 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT));
1650 	val = RD4(sc, GENET_MDIO_CMD);
1651 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1652 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1653 		if (((val = RD4(sc, GENET_MDIO_CMD)) &
1654 		    GENET_MDIO_START_BUSY) == 0) {
1655 			if (val & GENET_MDIO_READ_FAILED)
1656 				return (0);	/* -1? */
1657 			val &= GENET_MDIO_VAL_MASK;
1658 			break;
1659 		}
1660 		DELAY(10);
1661 	}
1662 
1663 	if (retry == 0)
1664 		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
1665 		    phy, reg);
1666 
1667 	return (val);
1668 }
1669 
1670 static int
1671 gen_miibus_writereg(device_t dev, int phy, int reg, int val)
1672 {
1673 	struct gen_softc *sc;
1674 	int retry;
1675 
1676 	sc = device_get_softc(dev);
1677 
1678 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE |
1679 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) |
1680 	    (val & GENET_MDIO_VAL_MASK));
1681 	val = RD4(sc, GENET_MDIO_CMD);
1682 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1683 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1684 		val = RD4(sc, GENET_MDIO_CMD);
1685 		if ((val & GENET_MDIO_START_BUSY) == 0)
1686 			break;
1687 		DELAY(10);
1688 	}
1689 	if (retry == 0)
1690 		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
1691 		    phy, reg);
1692 
1693 	return (0);
1694 }
1695 
1696 static void
1697 gen_update_link_locked(struct gen_softc *sc)
1698 {
1699 	struct mii_data *mii;
1700 	uint32_t val;
1701 	u_int speed;
1702 
1703 	GEN_ASSERT_LOCKED(sc);
1704 
1705 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
1706 		return;
1707 	mii = device_get_softc(sc->miibus);
1708 
1709 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1710 	    (IFM_ACTIVE | IFM_AVALID)) {
1711 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1712 		case IFM_1000_T:
1713 		case IFM_1000_SX:
1714 			speed = GENET_UMAC_CMD_SPEED_1000;
1715 			sc->link = 1;
1716 			break;
1717 		case IFM_100_TX:
1718 			speed = GENET_UMAC_CMD_SPEED_100;
1719 			sc->link = 1;
1720 			break;
1721 		case IFM_10_T:
1722 			speed = GENET_UMAC_CMD_SPEED_10;
1723 			sc->link = 1;
1724 			break;
1725 		default:
1726 			sc->link = 0;
1727 			break;
1728 		}
1729 	} else
1730 		sc->link = 0;
1731 
1732 	if (sc->link == 0)
1733 		return;
1734 
1735 	val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
1736 	val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
1737 	val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
1738 	val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
1739 	if (sc->phy_mode == MII_CONTYPE_RGMII)
1740 		val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1741 	else
1742 		val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1743 	WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
1744 
1745 	val = RD4(sc, GENET_UMAC_CMD);
1746 	val &= ~GENET_UMAC_CMD_SPEED;
1747 	val |= speed;
1748 	WR4(sc, GENET_UMAC_CMD, val);
1749 }
1750 
1751 static void
1752 gen_link_task(void *arg, int pending)
1753 {
1754 	struct gen_softc *sc;
1755 
1756 	sc = arg;
1757 
1758 	GEN_LOCK(sc);
1759 	gen_update_link_locked(sc);
1760 	GEN_UNLOCK(sc);
1761 }
1762 
1763 static void
1764 gen_miibus_statchg(device_t dev)
1765 {
1766 	struct gen_softc *sc;
1767 
1768 	sc = device_get_softc(dev);
1769 
1770 	taskqueue_enqueue(taskqueue_swi, &sc->link_task);
1771 }
1772 
1773 static void
1774 gen_media_status(if_t ifp, struct ifmediareq *ifmr)
1775 {
1776 	struct gen_softc *sc;
1777 	struct mii_data *mii;
1778 
1779 	sc = if_getsoftc(ifp);
1780 	mii = device_get_softc(sc->miibus);
1781 
1782 	GEN_LOCK(sc);
1783 	mii_pollstat(mii);
1784 	ifmr->ifm_active = mii->mii_media_active;
1785 	ifmr->ifm_status = mii->mii_media_status;
1786 	GEN_UNLOCK(sc);
1787 }
1788 
1789 static int
1790 gen_media_change(if_t ifp)
1791 {
1792 	struct gen_softc *sc;
1793 	struct mii_data *mii;
1794 	int error;
1795 
1796 	sc = if_getsoftc(ifp);
1797 	mii = device_get_softc(sc->miibus);
1798 
1799 	GEN_LOCK(sc);
1800 	error = mii_mediachg(mii);
1801 	GEN_UNLOCK(sc);
1802 
1803 	return (error);
1804 }
1805 
1806 static device_method_t gen_methods[] = {
1807 	/* Device interface */
1808 	DEVMETHOD(device_probe,		gen_probe),
1809 	DEVMETHOD(device_attach,	gen_attach),
1810 
1811 	/* MII interface */
1812 	DEVMETHOD(miibus_readreg,	gen_miibus_readreg),
1813 	DEVMETHOD(miibus_writereg,	gen_miibus_writereg),
1814 	DEVMETHOD(miibus_statchg,	gen_miibus_statchg),
1815 
1816 	DEVMETHOD_END
1817 };
1818 
1819 static driver_t gen_driver = {
1820 	"genet",
1821 	gen_methods,
1822 	sizeof(struct gen_softc),
1823 };
1824 
1825 DRIVER_MODULE(genet, simplebus, gen_driver, 0, 0);
1826 DRIVER_MODULE(miibus, genet, miibus_driver, 0, 0);
1827 MODULE_DEPEND(genet, ether, 1, 1, 1);
1828 MODULE_DEPEND(genet, miibus, 1, 1, 1);
1829