xref: /freebsd/sys/arm64/broadcom/genet/if_genet.c (revision 608da65de9552d5678c1000776ed69da04a45983)
1 /*-
2  * Copyright (c) 2020 Michael J Karels
3  * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller
29  *
30  * This driver is derived in large part from bcmgenet.c from NetBSD by
31  * Jared McNeill.  Parts of the structure and other common code in
32  * this driver have been copied from if_awg.c for the Allwinner EMAC,
33  * also by Jared McNeill.
34  */
35 
36 #include "opt_device_polling.h"
37 
38 #include <sys/cdefs.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/rman.h>
43 #include <sys/kernel.h>
44 #include <sys/endian.h>
45 #include <sys/mbuf.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/module.h>
50 #include <sys/taskqueue.h>
51 #include <sys/gpio.h>
52 
53 #include <net/bpf.h>
54 #include <net/if.h>
55 #include <net/ethernet.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 #include <net/if_var.h>
60 
61 #include <machine/bus.h>
62 
63 #include <dev/ofw/ofw_bus.h>
64 #include <dev/ofw/ofw_bus_subr.h>
65 
66 #define __BIT(_x)	(1 << (_x))
67 #include "if_genetreg.h"
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 #include <dev/mii/mii_fdt.h>
72 
73 #include <netinet/in.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip6.h>
76 
77 #include "syscon_if.h"
78 #include "miibus_if.h"
79 #include "gpio_if.h"
80 
81 #define	RD4(sc, reg)		bus_read_4((sc)->res[_RES_MAC], (reg))
82 #define	WR4(sc, reg, val)	bus_write_4((sc)->res[_RES_MAC], (reg), (val))
83 
84 #define	GEN_LOCK(sc)		mtx_lock(&(sc)->mtx)
85 #define	GEN_UNLOCK(sc)		mtx_unlock(&(sc)->mtx)
86 #define	GEN_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->mtx, MA_OWNED)
87 #define	GEN_ASSERT_UNLOCKED(sc)	mtx_assert(&(sc)->mtx, MA_NOTOWNED)
88 
89 #define	TX_DESC_COUNT		GENET_DMA_DESC_COUNT
90 #define	RX_DESC_COUNT		GENET_DMA_DESC_COUNT
91 
92 #define	TX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
93 #define	RX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
94 
95 #define	TX_MAX_SEGS		20
96 
97 static SYSCTL_NODE(_hw, OID_AUTO, genet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
98     "genet driver parameters");
99 
100 /* Maximum number of mbufs to pass per call to if_input */
101 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */;
102 SYSCTL_INT(_hw_genet, OID_AUTO, rx_batch, CTLFLAG_RDTUN,
103     &gen_rx_batch, 0, "max mbufs per call to if_input");
104 
105 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch);	/* old name/interface */
106 
107 /*
108  * Transmitting packets with only an Ethernet header in the first mbuf
109  * fails.  Examples include reflected ICMPv6 packets, e.g. echo replies;
110  * forwarded IPv6/TCP packets; and forwarded IPv4/TCP packets that use NAT
111  * with IPFW.  Pulling up the sizes of ether_header + ip6_hdr + icmp6_hdr
112  * seems to work for both ICMPv6 and TCP over IPv6, as well as the IPv4/TCP
113  * case.
114  */
115 static int gen_tx_hdr_min = 56;		/* ether_header + ip6_hdr + icmp6_hdr */
116 SYSCTL_INT(_hw_genet, OID_AUTO, tx_hdr_min, CTLFLAG_RW,
117     &gen_tx_hdr_min, 0, "header to add to packets with ether header only");
118 
119 static struct ofw_compat_data compat_data[] = {
120 	{ "brcm,genet-v1",		1 },
121 	{ "brcm,genet-v2",		2 },
122 	{ "brcm,genet-v3",		3 },
123 	{ "brcm,genet-v4",		4 },
124 	{ "brcm,genet-v5",		5 },
125 	{ "brcm,bcm2711-genet-v5",	5 },
126 	{ NULL,				0 }
127 };
128 
129 enum {
130 	_RES_MAC,		/* what to call this? */
131 	_RES_IRQ1,
132 	_RES_IRQ2,
133 	_RES_NITEMS
134 };
135 
136 static struct resource_spec gen_spec[] = {
137 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
138 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
139 	{ SYS_RES_IRQ,		1,	RF_ACTIVE },
140 	{ -1, 0 }
141 };
142 
143 /* structure per ring entry */
144 struct gen_ring_ent {
145 	bus_dmamap_t		map;
146 	struct mbuf		*mbuf;
147 };
148 
149 struct tx_queue {
150 	int			hwindex;		/* hardware index */
151 	int			nentries;
152 	u_int			queued;			/* or avail? */
153 	u_int			cur;
154 	u_int			next;
155 	u_int			prod_idx;
156 	u_int			cons_idx;
157 	struct gen_ring_ent	*entries;
158 };
159 
160 struct rx_queue {
161 	int			hwindex;		/* hardware index */
162 	int			nentries;
163 	u_int			cur;
164 	u_int			prod_idx;
165 	u_int			cons_idx;
166 	struct gen_ring_ent	*entries;
167 };
168 
169 struct gen_softc {
170 	struct resource		*res[_RES_NITEMS];
171 	struct mtx		mtx;
172 	if_t			ifp;
173 	device_t		dev;
174 	device_t		miibus;
175 	mii_contype_t		phy_mode;
176 
177 	struct callout		stat_ch;
178 	struct task		link_task;
179 	void			*ih;
180 	void			*ih2;
181 	int			type;
182 	int			if_flags;
183 	int			link;
184 	bus_dma_tag_t		tx_buf_tag;
185 	/*
186 	 * The genet chip has multiple queues for transmit and receive.
187 	 * This driver uses only one (queue 16, the default), but is cast
188 	 * with multiple rings.  The additional rings are used for different
189 	 * priorities.
190 	 */
191 #define DEF_TXQUEUE	0
192 #define NTXQUEUE	1
193 	struct tx_queue		tx_queue[NTXQUEUE];
194 	struct gen_ring_ent	tx_ring_ent[TX_DESC_COUNT];  /* ring entries */
195 
196 	bus_dma_tag_t		rx_buf_tag;
197 #define DEF_RXQUEUE	0
198 #define NRXQUEUE	1
199 	struct rx_queue		rx_queue[NRXQUEUE];
200 	struct gen_ring_ent	rx_ring_ent[RX_DESC_COUNT];  /* ring entries */
201 };
202 
203 static void gen_init(void *softc);
204 static void gen_start(if_t ifp);
205 static void gen_destroy(struct gen_softc *sc);
206 static int gen_encap(struct gen_softc *sc, struct mbuf **mp);
207 static int gen_parse_tx(struct mbuf *m, int csum_flags);
208 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data);
209 static int gen_get_phy_mode(device_t dev);
210 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr);
211 static void gen_set_enaddr(struct gen_softc *sc);
212 static void gen_setup_rxfilter(struct gen_softc *sc);
213 static void gen_reset(struct gen_softc *sc);
214 static void gen_enable(struct gen_softc *sc);
215 static void gen_dma_disable(struct gen_softc *sc);
216 static int gen_bus_dma_init(struct gen_softc *sc);
217 static void gen_bus_dma_teardown(struct gen_softc *sc);
218 static void gen_enable_intr(struct gen_softc *sc);
219 static void gen_init_txrings(struct gen_softc *sc);
220 static void gen_init_rxrings(struct gen_softc *sc);
221 static void gen_intr(void *softc);
222 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q);
223 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
224 static void gen_intr2(void *softc);
225 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index);
226 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
227     struct mbuf *m);
228 static void gen_link_task(void *arg, int pending);
229 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr);
230 static int gen_media_change(if_t ifp);
231 static void gen_tick(void *softc);
232 
233 static int
234 gen_probe(device_t dev)
235 {
236 	if (!ofw_bus_status_okay(dev))
237 		return (ENXIO);
238 
239 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
240 		return (ENXIO);
241 
242 	device_set_desc(dev, "RPi4 Gigabit Ethernet");
243 	return (BUS_PROBE_DEFAULT);
244 }
245 
246 static int
247 gen_attach(device_t dev)
248 {
249 	struct ether_addr eaddr;
250 	struct gen_softc *sc;
251 	int major, minor, error, mii_flags;
252 	bool eaddr_found;
253 
254 	sc = device_get_softc(dev);
255 	sc->dev = dev;
256 	sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
257 
258 	if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) {
259 		device_printf(dev, "cannot allocate resources for device\n");
260 		error = ENXIO;
261 		goto fail;
262 	}
263 
264 	major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT;
265 	if (major != REV_MAJOR_V5) {
266 		device_printf(dev, "version %d is not supported\n", major);
267 		error = ENXIO;
268 		goto fail;
269 	}
270 	minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT;
271 	device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor,
272 		RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY);
273 
274 	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
275 	callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
276 	TASK_INIT(&sc->link_task, 0, gen_link_task, sc);
277 
278 	error = gen_get_phy_mode(dev);
279 	if (error != 0)
280 		goto fail;
281 
282 	bzero(&eaddr, sizeof(eaddr));
283 	eaddr_found = gen_get_eaddr(dev, &eaddr);
284 
285 	/* reset core */
286 	gen_reset(sc);
287 
288 	gen_dma_disable(sc);
289 
290 	/* Setup DMA */
291 	error = gen_bus_dma_init(sc);
292 	if (error != 0) {
293 		device_printf(dev, "cannot setup bus dma\n");
294 		goto fail;
295 	}
296 
297 	/* Setup ethernet interface */
298 	sc->ifp = if_alloc(IFT_ETHER);
299 	if_setsoftc(sc->ifp, sc);
300 	if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
301 	if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
302 	if_setstartfn(sc->ifp, gen_start);
303 	if_setioctlfn(sc->ifp, gen_ioctl);
304 	if_setinitfn(sc->ifp, gen_init);
305 	if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
306 	if_setsendqready(sc->ifp);
307 #define GEN_CSUM_FEATURES	(CSUM_UDP | CSUM_TCP)
308 	if_sethwassist(sc->ifp, GEN_CSUM_FEATURES);
309 	if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
310 	    IFCAP_HWCSUM_IPV6);
311 	if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
312 
313 	/* Install interrupt handlers */
314 	error = bus_setup_intr(dev, sc->res[_RES_IRQ1],
315 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih);
316 	if (error != 0) {
317 		device_printf(dev, "cannot setup interrupt handler1\n");
318 		goto fail;
319 	}
320 
321 	error = bus_setup_intr(dev, sc->res[_RES_IRQ2],
322 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2);
323 	if (error != 0) {
324 		device_printf(dev, "cannot setup interrupt handler2\n");
325 		goto fail;
326 	}
327 
328 	/* Attach MII driver */
329 	mii_flags = 0;
330 	switch (sc->phy_mode)
331 	{
332 	case MII_CONTYPE_RGMII_ID:
333 		mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY;
334 		break;
335 	case MII_CONTYPE_RGMII_RXID:
336 		mii_flags |= MIIF_RX_DELAY;
337 		break;
338 	case MII_CONTYPE_RGMII_TXID:
339 		mii_flags |= MIIF_TX_DELAY;
340 		break;
341 	default:
342 		break;
343 	}
344 	error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change,
345 	    gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
346 	    mii_flags);
347 	if (error != 0) {
348 		device_printf(dev, "cannot attach PHY\n");
349 		goto fail;
350 	}
351 
352 	/* If address was not found, create one based on the hostid and name. */
353 	if (eaddr_found == 0)
354 		ether_gen_addr(sc->ifp, &eaddr);
355 	/* Attach ethernet interface */
356 	ether_ifattach(sc->ifp, eaddr.octet);
357 
358 fail:
359 	if (error)
360 		gen_destroy(sc);
361 	return (error);
362 }
363 
364 /* Free resources after failed attach.  This is not a complete detach. */
365 static void
366 gen_destroy(struct gen_softc *sc)
367 {
368 
369 	if (sc->miibus) {	/* can't happen */
370 		device_delete_child(sc->dev, sc->miibus);
371 		sc->miibus = NULL;
372 	}
373 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
374 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
375 	gen_bus_dma_teardown(sc);
376 	callout_drain(&sc->stat_ch);
377 	if (mtx_initialized(&sc->mtx))
378 		mtx_destroy(&sc->mtx);
379 	bus_release_resources(sc->dev, gen_spec, sc->res);
380 	if (sc->ifp != NULL) {
381 		if_free(sc->ifp);
382 		sc->ifp = NULL;
383 	}
384 }
385 
386 static int
387 gen_get_phy_mode(device_t dev)
388 {
389 	struct gen_softc *sc;
390 	phandle_t node;
391 	mii_contype_t type;
392 	int error = 0;
393 
394 	sc = device_get_softc(dev);
395 	node = ofw_bus_get_node(dev);
396 	type = mii_fdt_get_contype(node);
397 
398 	switch (type) {
399 	case MII_CONTYPE_RGMII:
400 	case MII_CONTYPE_RGMII_ID:
401 	case MII_CONTYPE_RGMII_RXID:
402 	case MII_CONTYPE_RGMII_TXID:
403 		sc->phy_mode = type;
404 		break;
405 	default:
406 		device_printf(dev, "unknown phy-mode '%s'\n",
407 		    mii_fdt_contype_to_name(type));
408 		error = ENXIO;
409 		break;
410 	}
411 
412 	return (error);
413 }
414 
415 static bool
416 gen_get_eaddr(device_t dev, struct ether_addr *eaddr)
417 {
418 	struct gen_softc *sc;
419 	uint32_t maclo, machi, val;
420 	phandle_t node;
421 
422 	sc = device_get_softc(dev);
423 
424 	node = ofw_bus_get_node(dev);
425 	if (OF_getprop(node, "mac-address", eaddr->octet,
426 	    ETHER_ADDR_LEN) != -1 ||
427 	    OF_getprop(node, "local-mac-address", eaddr->octet,
428 	    ETHER_ADDR_LEN) != -1 ||
429 	    OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1)
430 		return (true);
431 
432 	device_printf(dev, "No Ethernet address found in fdt!\n");
433 	maclo = machi = 0;
434 
435 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
436 	if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
437 		maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
438 		machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
439 	}
440 
441 	if (maclo == 0 && machi == 0) {
442 		if (bootverbose)
443 			device_printf(dev,
444 			    "No Ethernet address found in controller\n");
445 		return (false);
446 	} else {
447 		eaddr->octet[0] = maclo & 0xff;
448 		eaddr->octet[1] = (maclo >> 8) & 0xff;
449 		eaddr->octet[2] = (maclo >> 16) & 0xff;
450 		eaddr->octet[3] = (maclo >> 24) & 0xff;
451 		eaddr->octet[4] = machi & 0xff;
452 		eaddr->octet[5] = (machi >> 8) & 0xff;
453 		return (true);
454 	}
455 }
456 
457 static void
458 gen_reset(struct gen_softc *sc)
459 {
460 	uint32_t val;
461 
462 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
463 	val |= GENET_SYS_RBUF_FLUSH_RESET;
464 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
465 	DELAY(10);
466 
467 	val &= ~GENET_SYS_RBUF_FLUSH_RESET;
468 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
469 	DELAY(10);
470 
471 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
472 	DELAY(10);
473 
474 	WR4(sc, GENET_UMAC_CMD, 0);
475 	WR4(sc, GENET_UMAC_CMD,
476 	    GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
477 	DELAY(10);
478 	WR4(sc, GENET_UMAC_CMD, 0);
479 
480 	WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
481 	    GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
482 	WR4(sc, GENET_UMAC_MIB_CTRL, 0);
483 }
484 
485 static void
486 gen_enable(struct gen_softc *sc)
487 {
488 	u_int val;
489 
490 	WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
491 
492 	val = RD4(sc, GENET_RBUF_CTRL);
493 	val |= GENET_RBUF_ALIGN_2B;
494 	WR4(sc, GENET_RBUF_CTRL, val);
495 
496 	WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
497 
498 	/* Enable transmitter and receiver */
499 	val = RD4(sc, GENET_UMAC_CMD);
500 	val |= GENET_UMAC_CMD_TXEN;
501 	val |= GENET_UMAC_CMD_RXEN;
502 	WR4(sc, GENET_UMAC_CMD, val);
503 
504 	/* Enable interrupts */
505 	gen_enable_intr(sc);
506 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
507 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
508 }
509 
510 static void
511 gen_disable_intr(struct gen_softc *sc)
512 {
513 	/* Disable interrupts */
514 	WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
515 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 0xffffffff);
516 }
517 
518 static void
519 gen_disable(struct gen_softc *sc)
520 {
521 	uint32_t val;
522 
523 	/* Stop receiver */
524 	val = RD4(sc, GENET_UMAC_CMD);
525 	val &= ~GENET_UMAC_CMD_RXEN;
526 	WR4(sc, GENET_UMAC_CMD, val);
527 
528 	/* Stop transmitter */
529 	val = RD4(sc, GENET_UMAC_CMD);
530 	val &= ~GENET_UMAC_CMD_TXEN;
531 	WR4(sc, GENET_UMAC_CMD, val);
532 
533 	/* Disable Interrupt */
534 	gen_disable_intr(sc);
535 }
536 
537 static void
538 gen_enable_offload(struct gen_softc *sc)
539 {
540 	uint32_t check_ctrl, buf_ctrl;
541 
542 	check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL);
543 	buf_ctrl  = RD4(sc, GENET_RBUF_CTRL);
544 	if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) {
545 		check_ctrl |= GENET_RBUF_CHECK_CTRL_EN;
546 		buf_ctrl |= GENET_RBUF_64B_EN;
547 	} else {
548 		check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN;
549 		buf_ctrl &= ~GENET_RBUF_64B_EN;
550 	}
551 	WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl);
552 	WR4(sc, GENET_RBUF_CTRL, buf_ctrl);
553 
554 	buf_ctrl  = RD4(sc, GENET_TBUF_CTRL);
555 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
556 	    0)
557 		buf_ctrl |= GENET_RBUF_64B_EN;
558 	else
559 		buf_ctrl &= ~GENET_RBUF_64B_EN;
560 	WR4(sc, GENET_TBUF_CTRL, buf_ctrl);
561 }
562 
563 static void
564 gen_dma_disable(struct gen_softc *sc)
565 {
566 	int val;
567 
568 	val = RD4(sc, GENET_TX_DMA_CTRL);
569 	val &= ~GENET_TX_DMA_CTRL_EN;
570 	val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
571 	WR4(sc, GENET_TX_DMA_CTRL, val);
572 
573 	val = RD4(sc, GENET_RX_DMA_CTRL);
574 	val &= ~GENET_RX_DMA_CTRL_EN;
575 	val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
576 	WR4(sc, GENET_RX_DMA_CTRL, val);
577 }
578 
579 static int
580 gen_bus_dma_init(struct gen_softc *sc)
581 {
582 	device_t dev = sc->dev;
583 	int i, error;
584 
585 	error = bus_dma_tag_create(
586 	    bus_get_dma_tag(dev),	/* Parent tag */
587 	    4, 0,			/* alignment, boundary */
588 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
589 	    BUS_SPACE_MAXADDR,		/* highaddr */
590 	    NULL, NULL,			/* filter, filterarg */
591 	    MCLBYTES, TX_MAX_SEGS,	/* maxsize, nsegs */
592 	    MCLBYTES,			/* maxsegsize */
593 	    0,				/* flags */
594 	    NULL, NULL,			/* lockfunc, lockarg */
595 	    &sc->tx_buf_tag);
596 	if (error != 0) {
597 		device_printf(dev, "cannot create TX buffer tag\n");
598 		return (error);
599 	}
600 
601 	for (i = 0; i < TX_DESC_COUNT; i++) {
602 		error = bus_dmamap_create(sc->tx_buf_tag, 0,
603 		    &sc->tx_ring_ent[i].map);
604 		if (error != 0) {
605 			device_printf(dev, "cannot create TX buffer map\n");
606 			return (error);
607 		}
608 	}
609 
610 	error = bus_dma_tag_create(
611 	    bus_get_dma_tag(dev),	/* Parent tag */
612 	    4, 0,			/* alignment, boundary */
613 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
614 	    BUS_SPACE_MAXADDR,		/* highaddr */
615 	    NULL, NULL,			/* filter, filterarg */
616 	    MCLBYTES, 1,		/* maxsize, nsegs */
617 	    MCLBYTES,			/* maxsegsize */
618 	    0,				/* flags */
619 	    NULL, NULL,			/* lockfunc, lockarg */
620 	    &sc->rx_buf_tag);
621 	if (error != 0) {
622 		device_printf(dev, "cannot create RX buffer tag\n");
623 		return (error);
624 	}
625 
626 	for (i = 0; i < RX_DESC_COUNT; i++) {
627 		error = bus_dmamap_create(sc->rx_buf_tag, 0,
628 		    &sc->rx_ring_ent[i].map);
629 		if (error != 0) {
630 			device_printf(dev, "cannot create RX buffer map\n");
631 			return (error);
632 		}
633 	}
634 	return (0);
635 }
636 
637 static void
638 gen_bus_dma_teardown(struct gen_softc *sc)
639 {
640 	int i, error;
641 
642 	if (sc->tx_buf_tag != NULL) {
643 		for (i = 0; i < TX_DESC_COUNT; i++) {
644 			error = bus_dmamap_destroy(sc->tx_buf_tag,
645 			    sc->tx_ring_ent[i].map);
646 			sc->tx_ring_ent[i].map = NULL;
647 			if (error)
648 				device_printf(sc->dev,
649 				    "%s: bus_dmamap_destroy failed: %d\n",
650 				    __func__, error);
651 		}
652 		error = bus_dma_tag_destroy(sc->tx_buf_tag);
653 		sc->tx_buf_tag = NULL;
654 		if (error)
655 			device_printf(sc->dev,
656 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
657 			    error);
658 	}
659 
660 	if (sc->tx_buf_tag != NULL) {
661 		for (i = 0; i < RX_DESC_COUNT; i++) {
662 			error = bus_dmamap_destroy(sc->rx_buf_tag,
663 			    sc->rx_ring_ent[i].map);
664 			sc->rx_ring_ent[i].map = NULL;
665 			if (error)
666 				device_printf(sc->dev,
667 				    "%s: bus_dmamap_destroy failed: %d\n",
668 				    __func__, error);
669 		}
670 		error = bus_dma_tag_destroy(sc->rx_buf_tag);
671 		sc->rx_buf_tag = NULL;
672 		if (error)
673 			device_printf(sc->dev,
674 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
675 			    error);
676 	}
677 }
678 
679 static void
680 gen_enable_intr(struct gen_softc *sc)
681 {
682 
683 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
684 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
685 }
686 
687 /*
688  * "queue" is the software queue index (0-4); "qid" is the hardware index
689  * (0-16).  "base" is the starting index in the ring array.
690  */
691 static void
692 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
693     int nentries)
694 {
695 	struct tx_queue *q;
696 	uint32_t val;
697 
698 	q = &sc->tx_queue[queue];
699 	q->entries = &sc->tx_ring_ent[base];
700 	q->hwindex = qid;
701 	q->nentries = nentries;
702 
703 	/* TX ring */
704 
705 	q->queued = 0;
706 	q->cons_idx = q->prod_idx = 0;
707 
708 	WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
709 
710 	WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
711 	WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
712 	WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
713 	WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
714 	WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
715 	    (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
716 	    (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
717 	WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
718 	WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
719 	WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
720 	    TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
721 	WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
722 	WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
723 	WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
724 	WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
725 	WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
726 
727 	WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid));	/* enable */
728 
729 	/* Enable transmit DMA */
730 	val = RD4(sc, GENET_TX_DMA_CTRL);
731 	val |= GENET_TX_DMA_CTRL_EN;
732 	val |= GENET_TX_DMA_CTRL_RBUF_EN(qid);
733 	WR4(sc, GENET_TX_DMA_CTRL, val);
734 }
735 
736 /*
737  * "queue" is the software queue index (0-4); "qid" is the hardware index
738  * (0-16).  "base" is the starting index in the ring array.
739  */
740 static void
741 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
742     int nentries)
743 {
744 	struct rx_queue *q;
745 	uint32_t val;
746 	int i;
747 
748 	q = &sc->rx_queue[queue];
749 	q->entries = &sc->rx_ring_ent[base];
750 	q->hwindex = qid;
751 	q->nentries = nentries;
752 	q->cons_idx = q->prod_idx = 0;
753 
754 	WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
755 
756 	WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
757 	WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
758 	WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
759 	WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
760 	WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
761 	    (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
762 	    (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
763 	WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
764 	WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
765 	WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
766 	    RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
767 	WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
768 	WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
769 	    (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4));
770 	WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
771 	WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
772 
773 	WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid));	/* enable */
774 
775 	/* fill ring */
776 	for (i = 0; i < RX_DESC_COUNT; i++)
777 		gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i);
778 
779 	/* Enable receive DMA */
780 	val = RD4(sc, GENET_RX_DMA_CTRL);
781 	val |= GENET_RX_DMA_CTRL_EN;
782 	val |= GENET_RX_DMA_CTRL_RBUF_EN(qid);
783 	WR4(sc, GENET_RX_DMA_CTRL, val);
784 }
785 
786 static void
787 gen_init_txrings(struct gen_softc *sc)
788 {
789 	int base = 0;
790 #ifdef PRI_RINGS
791 	int i;
792 
793 	/* init priority rings */
794 	for (i = 0; i < PRI_RINGS; i++) {
795 		gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT);
796 		sc->tx_queue[i].queue = i;
797 		base += TX_DESC_PRICOUNT;
798 		dma_ring_conf |= 1 << i;
799 		dma_control |= DMA_RENABLE(i);
800 	}
801 #endif
802 
803 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
804 	gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
805 	    TX_DESC_COUNT);
806 	sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
807 }
808 
809 static void
810 gen_init_rxrings(struct gen_softc *sc)
811 {
812 	int base = 0;
813 #ifdef PRI_RINGS
814 	int i;
815 
816 	/* init priority rings */
817 	for (i = 0; i < PRI_RINGS; i++) {
818 		gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT);
819 		sc->rx_queue[i].queue = i;
820 		base += TX_DESC_PRICOUNT;
821 		dma_ring_conf |= 1 << i;
822 		dma_control |= DMA_RENABLE(i);
823 	}
824 #endif
825 
826 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
827 	gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
828 	    RX_DESC_COUNT);
829 	sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
830 
831 }
832 
833 static void
834 gen_stop(struct gen_softc *sc)
835 {
836 	int i;
837 	struct gen_ring_ent *ent;
838 
839 	GEN_ASSERT_LOCKED(sc);
840 
841 	callout_stop(&sc->stat_ch);
842 	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
843 	gen_reset(sc);
844 	gen_disable(sc);
845 	gen_dma_disable(sc);
846 
847 	/* Clear the tx/rx ring buffer */
848 	for (i = 0; i < TX_DESC_COUNT; i++) {
849 		ent = &sc->tx_ring_ent[i];
850 		if (ent->mbuf != NULL) {
851 			bus_dmamap_sync(sc->tx_buf_tag, ent->map,
852 				BUS_DMASYNC_POSTWRITE);
853 			bus_dmamap_unload(sc->tx_buf_tag, ent->map);
854 			m_freem(ent->mbuf);
855 			ent->mbuf = NULL;
856 		}
857 	}
858 
859 	for (i = 0; i < RX_DESC_COUNT; i++) {
860 		ent = &sc->rx_ring_ent[i];
861 		if (ent->mbuf != NULL) {
862 			bus_dmamap_sync(sc->rx_buf_tag, ent->map,
863 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
864 			bus_dmamap_unload(sc->rx_buf_tag, ent->map);
865 			m_freem(ent->mbuf);
866 			ent->mbuf = NULL;
867 		}
868 	}
869 }
870 
871 static void
872 gen_init_locked(struct gen_softc *sc)
873 {
874 	struct mii_data *mii;
875 	if_t ifp;
876 
877 	mii = device_get_softc(sc->miibus);
878 	ifp = sc->ifp;
879 
880 	GEN_ASSERT_LOCKED(sc);
881 
882 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
883 		return;
884 
885 	switch (sc->phy_mode)
886 	{
887 	case MII_CONTYPE_RGMII:
888 	case MII_CONTYPE_RGMII_ID:
889 	case MII_CONTYPE_RGMII_RXID:
890 	case MII_CONTYPE_RGMII_TXID:
891 		WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY);
892 		break;
893 	default:
894 		WR4(sc, GENET_SYS_PORT_CTRL, 0);
895 	}
896 
897 	gen_set_enaddr(sc);
898 
899 	/* Setup RX filter */
900 	gen_setup_rxfilter(sc);
901 
902 	gen_init_txrings(sc);
903 	gen_init_rxrings(sc);
904 	gen_enable(sc);
905 	gen_enable_offload(sc);
906 
907 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
908 
909 	mii_mediachg(mii);
910 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
911 }
912 
913 static void
914 gen_init(void *softc)
915 {
916         struct gen_softc *sc;
917 
918         sc = softc;
919 	GEN_LOCK(sc);
920 	gen_init_locked(sc);
921 	GEN_UNLOCK(sc);
922 }
923 
924 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
925 
926 static void
927 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea)
928 {
929 	uint32_t addr0 = (ea[0] << 8) | ea[1];
930 	uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
931 
932 	WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
933 	WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
934 }
935 
936 static u_int
937 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count)
938 {
939 	struct gen_softc *sc = arg;
940 
941 	/* "count + 2" to account for unicast and broadcast */
942 	gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl));
943 	return (1);		/* increment to count */
944 }
945 
946 static void
947 gen_setup_rxfilter(struct gen_softc *sc)
948 {
949 	if_t ifp = sc->ifp;
950 	uint32_t cmd, mdf_ctrl;
951 	u_int n;
952 
953 	GEN_ASSERT_LOCKED(sc);
954 
955 	cmd = RD4(sc, GENET_UMAC_CMD);
956 
957 	/*
958 	 * Count the required number of hardware filters. We need one
959 	 * for each multicast address, plus one for our own address and
960 	 * the broadcast address.
961 	 */
962 	n = if_llmaddr_count(ifp) + 2;
963 
964 	if (n > GENET_MAX_MDF_FILTER)
965 		if_setflagbits(ifp, IFF_ALLMULTI, 0);
966 	else
967 		if_setflagbits(ifp, 0, IFF_ALLMULTI);
968 
969 	if ((if_getflags(ifp) & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
970 		cmd |= GENET_UMAC_CMD_PROMISC;
971 		mdf_ctrl = 0;
972 	} else {
973 		cmd &= ~GENET_UMAC_CMD_PROMISC;
974 		gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr);
975 		gen_setup_rxfilter_mdf(sc, 1, if_getlladdr(ifp));
976 		(void) if_foreach_llmaddr(ifp, gen_setup_multi, sc);
977 		mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1)  &~
978 		    (__BIT(GENET_MAX_MDF_FILTER - n) - 1);
979 	}
980 
981 	WR4(sc, GENET_UMAC_CMD, cmd);
982 	WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
983 }
984 
985 static void
986 gen_set_enaddr(struct gen_softc *sc)
987 {
988 	uint8_t *enaddr;
989 	uint32_t val;
990 	if_t ifp;
991 
992 	GEN_ASSERT_LOCKED(sc);
993 
994 	ifp = sc->ifp;
995 
996 	/* Write our unicast address */
997 	enaddr = if_getlladdr(ifp);
998 	/* Write hardware address */
999 	val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
1000 	    (enaddr[0] << 24);
1001 	WR4(sc, GENET_UMAC_MAC0, val);
1002 	val = enaddr[5] | (enaddr[4] << 8);
1003 	WR4(sc, GENET_UMAC_MAC1, val);
1004 }
1005 
1006 static void
1007 gen_start_locked(struct gen_softc *sc)
1008 {
1009 	struct mbuf *m;
1010 	if_t ifp;
1011 	int err;
1012 
1013 	GEN_ASSERT_LOCKED(sc);
1014 
1015 	if (!sc->link)
1016 		return;
1017 
1018 	ifp = sc->ifp;
1019 
1020 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1021 	    IFF_DRV_RUNNING)
1022 		return;
1023 
1024 	while (true) {
1025 		m = if_dequeue(ifp);
1026 		if (m == NULL)
1027 			break;
1028 
1029 		err = gen_encap(sc, &m);
1030 		if (err != 0) {
1031 			if (err == ENOBUFS)
1032 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1033 			else if (m == NULL)
1034 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1035 			if (m != NULL)
1036 				if_sendq_prepend(ifp, m);
1037 			break;
1038 		}
1039 		bpf_mtap_if(ifp, m);
1040 	}
1041 }
1042 
1043 static void
1044 gen_start(if_t ifp)
1045 {
1046 	struct gen_softc *sc;
1047 
1048 	sc = if_getsoftc(ifp);
1049 
1050 	GEN_LOCK(sc);
1051 	gen_start_locked(sc);
1052 	GEN_UNLOCK(sc);
1053 }
1054 
1055 /* Test for any delayed checksum */
1056 #define CSUM_DELAY_ANY	(CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)
1057 
1058 static int
1059 gen_encap(struct gen_softc *sc, struct mbuf **mp)
1060 {
1061 	bus_dmamap_t map;
1062 	bus_dma_segment_t segs[TX_MAX_SEGS];
1063 	int error, nsegs, cur, first, i, index, offset;
1064 	uint32_t csuminfo, length_status, csum_flags = 0, csumdata;
1065 	struct mbuf *m;
1066 	struct statusblock *sb = NULL;
1067 	struct tx_queue *q;
1068 	struct gen_ring_ent *ent;
1069 
1070 	GEN_ASSERT_LOCKED(sc);
1071 
1072 	q = &sc->tx_queue[DEF_TXQUEUE];
1073 
1074 	m = *mp;
1075 
1076 	/*
1077 	 * Don't attempt to send packets with only an Ethernet header in
1078 	 * first mbuf; see comment above with gen_tx_hdr_min.
1079 	 */
1080 	if (m->m_len == sizeof(struct ether_header)) {
1081 		m = m_pullup(m, MIN(m->m_pkthdr.len, gen_tx_hdr_min));
1082 		if (m == NULL) {
1083 			if (if_getflags(sc->ifp) & IFF_DEBUG)
1084 				device_printf(sc->dev,
1085 				    "header pullup fail\n");
1086 			*mp = NULL;
1087 			return (ENOMEM);
1088 		}
1089 	}
1090 
1091 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
1092 	    0) {
1093 		csum_flags = m->m_pkthdr.csum_flags;
1094 		csumdata = m->m_pkthdr.csum_data;
1095 		M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT);
1096 		if (m == NULL) {
1097 			if (if_getflags(sc->ifp) & IFF_DEBUG)
1098 				device_printf(sc->dev, "prepend fail\n");
1099 			*mp = NULL;
1100 			return (ENOMEM);
1101 		}
1102 		offset = gen_parse_tx(m, csum_flags);
1103 		sb = mtod(m, struct statusblock *);
1104 		if ((csum_flags & CSUM_DELAY_ANY) != 0) {
1105 			csuminfo = (offset << TXCSUM_OFF_SHIFT) |
1106 			    (offset + csumdata);
1107 			csuminfo |= TXCSUM_LEN_VALID;
1108 			if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP))
1109 				csuminfo |= TXCSUM_UDP;
1110 			sb->txcsuminfo = csuminfo;
1111 		} else
1112 			sb->txcsuminfo = 0;
1113 	}
1114 
1115 	*mp = m;
1116 
1117 	cur = first = q->cur;
1118 	ent = &q->entries[cur];
1119 	map = ent->map;
1120 	error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs,
1121 	    &nsegs, BUS_DMA_NOWAIT);
1122 	if (error == EFBIG) {
1123 		m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
1124 		if (m == NULL) {
1125 			device_printf(sc->dev,
1126 			    "gen_encap: m_collapse failed\n");
1127 			m_freem(*mp);
1128 			*mp = NULL;
1129 			return (ENOMEM);
1130 		}
1131 		*mp = m;
1132 		error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m,
1133 		    segs, &nsegs, BUS_DMA_NOWAIT);
1134 		if (error != 0) {
1135 			m_freem(*mp);
1136 			*mp = NULL;
1137 		}
1138 	}
1139 	if (error != 0) {
1140 		device_printf(sc->dev,
1141 		    "gen_encap: bus_dmamap_load_mbuf_sg failed\n");
1142 		return (error);
1143 	}
1144 	if (nsegs == 0) {
1145 		m_freem(*mp);
1146 		*mp = NULL;
1147 		return (EIO);
1148 	}
1149 
1150 	/* Remove statusblock after mapping, before possible requeue or bpf. */
1151 	if (sb != NULL) {
1152 		m->m_data += sizeof(struct statusblock);
1153 		m->m_len -= sizeof(struct statusblock);
1154 		m->m_pkthdr.len -= sizeof(struct statusblock);
1155 	}
1156 	if (q->queued + nsegs > q->nentries) {
1157 		bus_dmamap_unload(sc->tx_buf_tag, map);
1158 		return (ENOBUFS);
1159 	}
1160 
1161 	bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE);
1162 
1163 	index = q->prod_idx & (q->nentries - 1);
1164 	for (i = 0; i < nsegs; i++) {
1165 		ent = &q->entries[cur];
1166 		length_status = GENET_TX_DESC_STATUS_QTAG_MASK;
1167 		if (i == 0) {
1168 			length_status |= GENET_TX_DESC_STATUS_SOP |
1169 			    GENET_TX_DESC_STATUS_CRC;
1170 			if ((csum_flags & CSUM_DELAY_ANY) != 0)
1171 				length_status |= GENET_TX_DESC_STATUS_CKSUM;
1172 		}
1173 		if (i == nsegs - 1)
1174 			length_status |= GENET_TX_DESC_STATUS_EOP;
1175 
1176 		length_status |= segs[i].ds_len <<
1177 		    GENET_TX_DESC_STATUS_BUFLEN_SHIFT;
1178 
1179 		WR4(sc, GENET_TX_DESC_ADDRESS_LO(index),
1180 		    (uint32_t)segs[i].ds_addr);
1181 		WR4(sc, GENET_TX_DESC_ADDRESS_HI(index),
1182 		    (uint32_t)(segs[i].ds_addr >> 32));
1183 		WR4(sc, GENET_TX_DESC_STATUS(index), length_status);
1184 
1185 		++q->queued;
1186 		cur = TX_NEXT(cur, q->nentries);
1187 		index = TX_NEXT(index, q->nentries);
1188 	}
1189 
1190 	q->prod_idx += nsegs;
1191 	q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK;
1192 	/* We probably don't need to write the producer index on every iter */
1193 	if (nsegs != 0)
1194 		WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx);
1195 	q->cur = cur;
1196 
1197 	/* Store mbuf in the last segment */
1198 	q->entries[first].mbuf = m;
1199 
1200 	return (0);
1201 }
1202 
1203 /*
1204  * Parse a packet to find the offset of the transport header for checksum
1205  * offload.  Ensure that the link and network headers are contiguous with
1206  * the status block, or transmission fails.
1207  */
1208 static int
1209 gen_parse_tx(struct mbuf *m, int csum_flags)
1210 {
1211 	int offset, off_in_m;
1212 	bool copy = false, shift = false;
1213 	u_char *p, *copy_p = NULL;
1214 	struct mbuf *m0 = m;
1215 	uint16_t ether_type;
1216 
1217 	if (m->m_len == sizeof(struct statusblock)) {
1218 		/* M_PREPEND placed statusblock at end; move to beginning */
1219 		m->m_data = m->m_pktdat;
1220 		copy_p = mtodo(m, sizeof(struct statusblock));
1221 		m = m->m_next;
1222 		off_in_m = 0;
1223 		p = mtod(m, u_char *);
1224 		copy = true;
1225 	} else {
1226 		/*
1227 		 * If statusblock is not at beginning of mbuf (likely),
1228 		 * then remember to move mbuf contents down before copying
1229 		 * after them.
1230 		 */
1231 		if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat)
1232 			shift = true;
1233 		p = mtodo(m, sizeof(struct statusblock));
1234 		off_in_m = sizeof(struct statusblock);
1235 	}
1236 
1237 /*
1238  * If headers need to be copied contiguous to statusblock, do so.
1239  * If copying to the internal mbuf data area, and the status block
1240  * is not at the beginning of that area, shift the status block (which
1241  * is empty) and following data.
1242  */
1243 #define COPY(size) {							\
1244 	int hsize = size;						\
1245 	if (copy) {							\
1246 		if (shift) {						\
1247 			u_char *p0;					\
1248 			shift = false;					\
1249 			p0 = mtodo(m0, sizeof(struct statusblock));	\
1250 			m0->m_data = m0->m_pktdat;			\
1251 			bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\
1252 			    m0->m_len - sizeof(struct statusblock));	\
1253 			copy_p = mtodo(m0, m0->m_len);			\
1254 		}							\
1255 		bcopy(p, copy_p, hsize);				\
1256 		m0->m_len += hsize;					\
1257 		m->m_len -= hsize;					\
1258 		m->m_data += hsize;					\
1259 	}								\
1260 	copy_p += hsize;						\
1261 }
1262 
1263 	KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) +
1264 	    sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__));
1265 
1266 	if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) {
1267 		offset = sizeof(struct ether_vlan_header);
1268 		ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto);
1269 		COPY(sizeof(struct ether_vlan_header));
1270 		if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) {
1271 			m = m->m_next;
1272 			off_in_m = 0;
1273 			p = mtod(m, u_char *);
1274 			copy = true;
1275 		} else {
1276 			off_in_m += sizeof(struct ether_vlan_header);
1277 			p += sizeof(struct ether_vlan_header);
1278 		}
1279 	} else {
1280 		offset = sizeof(struct ether_header);
1281 		ether_type = ntohs(((struct ether_header *)p)->ether_type);
1282 		COPY(sizeof(struct ether_header));
1283 		if (m->m_len == off_in_m + sizeof(struct ether_header)) {
1284 			m = m->m_next;
1285 			off_in_m = 0;
1286 			p = mtod(m, u_char *);
1287 			copy = true;
1288 		} else {
1289 			off_in_m += sizeof(struct ether_header);
1290 			p += sizeof(struct ether_header);
1291 		}
1292 	}
1293 	if (ether_type == ETHERTYPE_IP) {
1294 		COPY(((struct ip *)p)->ip_hl << 2);
1295 		offset += ((struct ip *)p)->ip_hl << 2;
1296 	} else if (ether_type == ETHERTYPE_IPV6) {
1297 		COPY(sizeof(struct ip6_hdr));
1298 		offset += sizeof(struct ip6_hdr);
1299 	} else {
1300 		/*
1301 		 * Unknown whether most other cases require moving a header;
1302 		 * ARP works without.  However, Wake On LAN packets sent
1303 		 * by wake(8) via BPF need something like this.
1304 		 */
1305 		COPY(MIN(gen_tx_hdr_min, m->m_len));
1306 		offset += MIN(gen_tx_hdr_min, m->m_len);
1307 	}
1308 	return (offset);
1309 #undef COPY
1310 }
1311 
1312 static void
1313 gen_intr(void *arg)
1314 {
1315 	struct gen_softc *sc = arg;
1316 	uint32_t val;
1317 
1318 	GEN_LOCK(sc);
1319 
1320 	val = RD4(sc, GENET_INTRL2_CPU_STAT);
1321 	val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
1322 	WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
1323 
1324 	if (val & GENET_IRQ_RXDMA_DONE)
1325 		gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]);
1326 
1327 	if (val & GENET_IRQ_TXDMA_DONE) {
1328 		gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
1329 		if (!if_sendq_empty(sc->ifp))
1330 			gen_start_locked(sc);
1331 	}
1332 
1333 	GEN_UNLOCK(sc);
1334 }
1335 
1336 static int
1337 gen_rxintr(struct gen_softc *sc, struct rx_queue *q)
1338 {
1339 	if_t ifp;
1340 	struct mbuf *m, *mh, *mt;
1341 	struct statusblock *sb = NULL;
1342 	int error, index, len, cnt, npkt, n;
1343 	uint32_t status, prod_idx, total;
1344 
1345 	ifp = sc->ifp;
1346 	mh = mt = NULL;
1347 	cnt = 0;
1348 	npkt = 0;
1349 
1350 	prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) &
1351 	    GENET_RX_DMA_PROD_CONS_MASK;
1352 	total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK;
1353 
1354 	index = q->cons_idx & (RX_DESC_COUNT - 1);
1355 	for (n = 0; n < total; n++) {
1356 		bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map,
1357 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1358 		bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map);
1359 
1360 		m = q->entries[index].mbuf;
1361 
1362 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1363 			sb = mtod(m, struct statusblock *);
1364 			status = sb->status_buflen;
1365 		} else
1366 			status = RD4(sc, GENET_RX_DESC_STATUS(index));
1367 
1368 		len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >>
1369 		    GENET_RX_DESC_STATUS_BUFLEN_SHIFT;
1370 
1371 		/* check for errors */
1372 		if ((status &
1373 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP |
1374 		    GENET_RX_DESC_STATUS_RX_ERROR)) !=
1375 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) {
1376 			if (if_getflags(ifp) & IFF_DEBUG)
1377 				device_printf(sc->dev,
1378 				    "error/frag %x csum %x\n", status,
1379 				    sb->rxcsum);
1380 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1381 			continue;
1382 		}
1383 
1384 		error = gen_newbuf_rx(sc, q, index);
1385 		if (error != 0) {
1386 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1387 			if (if_getflags(ifp) & IFF_DEBUG)
1388 				device_printf(sc->dev, "gen_newbuf_rx %d\n",
1389 				    error);
1390 			/* reuse previous mbuf */
1391 			(void) gen_mapbuf_rx(sc, q, index, m);
1392 			continue;
1393 		}
1394 
1395 		if (sb != NULL) {
1396 			if (status & GENET_RX_DESC_STATUS_CKSUM_OK) {
1397 				/* L4 checksum checked; not sure about L3. */
1398 				m->m_pkthdr.csum_flags = CSUM_DATA_VALID |
1399 				    CSUM_PSEUDO_HDR;
1400 				m->m_pkthdr.csum_data = 0xffff;
1401 			}
1402 			m->m_data += sizeof(struct statusblock);
1403 			m->m_len -= sizeof(struct statusblock);
1404 			len -= sizeof(struct statusblock);
1405 		}
1406 		if (len > ETHER_ALIGN) {
1407 			m_adj(m, ETHER_ALIGN);
1408 			len -= ETHER_ALIGN;
1409 		}
1410 
1411 		m->m_pkthdr.rcvif = ifp;
1412 		m->m_pkthdr.len = len;
1413 		m->m_len = len;
1414 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1415 
1416 		m->m_nextpkt = NULL;
1417 		if (mh == NULL)
1418 			mh = m;
1419 		else
1420 			mt->m_nextpkt = m;
1421 		mt = m;
1422 		++cnt;
1423 		++npkt;
1424 
1425 		index = RX_NEXT(index, q->nentries);
1426 
1427 		q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK;
1428 		WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx);
1429 
1430 		if (cnt == gen_rx_batch) {
1431 			GEN_UNLOCK(sc);
1432 			if_input(ifp, mh);
1433 			GEN_LOCK(sc);
1434 			mh = mt = NULL;
1435 			cnt = 0;
1436 		}
1437 	}
1438 
1439 	if (mh != NULL) {
1440 		GEN_UNLOCK(sc);
1441 		if_input(ifp, mh);
1442 		GEN_LOCK(sc);
1443 	}
1444 
1445 	return (npkt);
1446 }
1447 
1448 static void
1449 gen_txintr(struct gen_softc *sc, struct tx_queue *q)
1450 {
1451 	uint32_t cons_idx, total;
1452 	struct gen_ring_ent *ent;
1453 	if_t ifp;
1454 	int i, prog;
1455 
1456 	GEN_ASSERT_LOCKED(sc);
1457 
1458 	ifp = sc->ifp;
1459 
1460 	cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) &
1461 	    GENET_TX_DMA_PROD_CONS_MASK;
1462 	total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK;
1463 
1464 	prog = 0;
1465 	for (i = q->next; q->queued > 0 && total > 0;
1466 	    i = TX_NEXT(i, q->nentries), total--) {
1467 		/* XXX check for errors */
1468 
1469 		ent = &q->entries[i];
1470 		if (ent->mbuf != NULL) {
1471 			bus_dmamap_sync(sc->tx_buf_tag, ent->map,
1472 			    BUS_DMASYNC_POSTWRITE);
1473 			bus_dmamap_unload(sc->tx_buf_tag, ent->map);
1474 			m_freem(ent->mbuf);
1475 			ent->mbuf = NULL;
1476 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1477 		}
1478 
1479 		prog++;
1480 		--q->queued;
1481 	}
1482 
1483 	if (prog > 0) {
1484 		q->next = i;
1485 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1486 	}
1487 
1488 	q->cons_idx = cons_idx;
1489 }
1490 
1491 static void
1492 gen_intr2(void *arg)
1493 {
1494 	struct gen_softc *sc = arg;
1495 
1496 	device_printf(sc->dev, "gen_intr2\n");
1497 }
1498 
1499 static int
1500 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index)
1501 {
1502 	struct mbuf *m;
1503 
1504 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1505 	if (m == NULL)
1506 		return (ENOBUFS);
1507 
1508 	m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
1509 	m_adj(m, ETHER_ALIGN);
1510 
1511 	return (gen_mapbuf_rx(sc, q, index, m));
1512 }
1513 
1514 static int
1515 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
1516     struct mbuf *m)
1517 {
1518 	bus_dma_segment_t seg;
1519 	bus_dmamap_t map;
1520 	int nsegs;
1521 
1522 	map = q->entries[index].map;
1523 	if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs,
1524 	    BUS_DMA_NOWAIT) != 0) {
1525 		m_freem(m);
1526 		return (ENOBUFS);
1527 	}
1528 
1529 	bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD);
1530 
1531 	q->entries[index].mbuf = m;
1532 	WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr);
1533 	WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32));
1534 
1535 	return (0);
1536 }
1537 
1538 static int
1539 gen_ioctl(if_t ifp, u_long cmd, caddr_t data)
1540 {
1541 	struct gen_softc *sc;
1542 	struct mii_data *mii;
1543 	struct ifreq *ifr;
1544 	int flags, enable, error;
1545 
1546 	sc = if_getsoftc(ifp);
1547 	mii = device_get_softc(sc->miibus);
1548 	ifr = (struct ifreq *)data;
1549 	error = 0;
1550 
1551 	switch (cmd) {
1552 	case SIOCSIFFLAGS:
1553 		GEN_LOCK(sc);
1554 		if (if_getflags(ifp) & IFF_UP) {
1555 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1556 				flags = if_getflags(ifp) ^ sc->if_flags;
1557 				if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
1558 					gen_setup_rxfilter(sc);
1559 			} else
1560 				gen_init_locked(sc);
1561 		} else {
1562 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1563 				gen_stop(sc);
1564 		}
1565 		sc->if_flags = if_getflags(ifp);
1566 		GEN_UNLOCK(sc);
1567 		break;
1568 
1569 	case SIOCADDMULTI:
1570 	case SIOCDELMULTI:
1571 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1572 			GEN_LOCK(sc);
1573 			gen_setup_rxfilter(sc);
1574 			GEN_UNLOCK(sc);
1575 		}
1576 		break;
1577 
1578 	case SIOCSIFMEDIA:
1579 	case SIOCGIFMEDIA:
1580 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1581 		break;
1582 
1583 	case SIOCSIFCAP:
1584 		enable = if_getcapenable(ifp);
1585 		flags = ifr->ifr_reqcap ^ enable;
1586 		if (flags & IFCAP_RXCSUM)
1587 			enable ^= IFCAP_RXCSUM;
1588 		if (flags & IFCAP_RXCSUM_IPV6)
1589 			enable ^= IFCAP_RXCSUM_IPV6;
1590 		if (flags & IFCAP_TXCSUM)
1591 			enable ^= IFCAP_TXCSUM;
1592 		if (flags & IFCAP_TXCSUM_IPV6)
1593 			enable ^= IFCAP_TXCSUM_IPV6;
1594 		if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6))
1595 			if_sethwassist(ifp, GEN_CSUM_FEATURES);
1596 		else
1597 			if_sethwassist(ifp, 0);
1598 		if_setcapenable(ifp, enable);
1599 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1600 			gen_enable_offload(sc);
1601 		break;
1602 
1603 	default:
1604 		error = ether_ioctl(ifp, cmd, data);
1605 		break;
1606 	}
1607 	return (error);
1608 }
1609 
1610 static void
1611 gen_tick(void *softc)
1612 {
1613 	struct gen_softc *sc;
1614 	struct mii_data *mii;
1615 	if_t ifp;
1616 	int link;
1617 
1618 	sc = softc;
1619 	ifp = sc->ifp;
1620 	mii = device_get_softc(sc->miibus);
1621 
1622 	GEN_ASSERT_LOCKED(sc);
1623 
1624 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1625 		return;
1626 
1627 	link = sc->link;
1628 	mii_tick(mii);
1629 	if (sc->link && !link)
1630 		gen_start_locked(sc);
1631 
1632 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
1633 }
1634 
1635 #define	MII_BUSY_RETRY		1000
1636 
1637 static int
1638 gen_miibus_readreg(device_t dev, int phy, int reg)
1639 {
1640 	struct gen_softc *sc;
1641 	int retry, val;
1642 
1643 	sc = device_get_softc(dev);
1644 	val = 0;
1645 
1646 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ |
1647 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT));
1648 	val = RD4(sc, GENET_MDIO_CMD);
1649 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1650 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1651 		if (((val = RD4(sc, GENET_MDIO_CMD)) &
1652 		    GENET_MDIO_START_BUSY) == 0) {
1653 			if (val & GENET_MDIO_READ_FAILED)
1654 				return (0);	/* -1? */
1655 			val &= GENET_MDIO_VAL_MASK;
1656 			break;
1657 		}
1658 		DELAY(10);
1659 	}
1660 
1661 	if (retry == 0)
1662 		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
1663 		    phy, reg);
1664 
1665 	return (val);
1666 }
1667 
1668 static int
1669 gen_miibus_writereg(device_t dev, int phy, int reg, int val)
1670 {
1671 	struct gen_softc *sc;
1672 	int retry;
1673 
1674 	sc = device_get_softc(dev);
1675 
1676 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE |
1677 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) |
1678 	    (val & GENET_MDIO_VAL_MASK));
1679 	val = RD4(sc, GENET_MDIO_CMD);
1680 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1681 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1682 		val = RD4(sc, GENET_MDIO_CMD);
1683 		if ((val & GENET_MDIO_START_BUSY) == 0)
1684 			break;
1685 		DELAY(10);
1686 	}
1687 	if (retry == 0)
1688 		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
1689 		    phy, reg);
1690 
1691 	return (0);
1692 }
1693 
1694 static void
1695 gen_update_link_locked(struct gen_softc *sc)
1696 {
1697 	struct mii_data *mii;
1698 	uint32_t val;
1699 	u_int speed;
1700 
1701 	GEN_ASSERT_LOCKED(sc);
1702 
1703 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
1704 		return;
1705 	mii = device_get_softc(sc->miibus);
1706 
1707 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1708 	    (IFM_ACTIVE | IFM_AVALID)) {
1709 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1710 		case IFM_1000_T:
1711 		case IFM_1000_SX:
1712 			speed = GENET_UMAC_CMD_SPEED_1000;
1713 			sc->link = 1;
1714 			break;
1715 		case IFM_100_TX:
1716 			speed = GENET_UMAC_CMD_SPEED_100;
1717 			sc->link = 1;
1718 			break;
1719 		case IFM_10_T:
1720 			speed = GENET_UMAC_CMD_SPEED_10;
1721 			sc->link = 1;
1722 			break;
1723 		default:
1724 			sc->link = 0;
1725 			break;
1726 		}
1727 	} else
1728 		sc->link = 0;
1729 
1730 	if (sc->link == 0)
1731 		return;
1732 
1733 	val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
1734 	val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
1735 	val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
1736 	val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
1737 	if (sc->phy_mode == MII_CONTYPE_RGMII)
1738 		val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1739 	else
1740 		val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1741 	WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
1742 
1743 	val = RD4(sc, GENET_UMAC_CMD);
1744 	val &= ~GENET_UMAC_CMD_SPEED;
1745 	val |= speed;
1746 	WR4(sc, GENET_UMAC_CMD, val);
1747 }
1748 
1749 static void
1750 gen_link_task(void *arg, int pending)
1751 {
1752 	struct gen_softc *sc;
1753 
1754 	sc = arg;
1755 
1756 	GEN_LOCK(sc);
1757 	gen_update_link_locked(sc);
1758 	GEN_UNLOCK(sc);
1759 }
1760 
1761 static void
1762 gen_miibus_statchg(device_t dev)
1763 {
1764 	struct gen_softc *sc;
1765 
1766 	sc = device_get_softc(dev);
1767 
1768 	taskqueue_enqueue(taskqueue_swi, &sc->link_task);
1769 }
1770 
1771 static void
1772 gen_media_status(if_t ifp, struct ifmediareq *ifmr)
1773 {
1774 	struct gen_softc *sc;
1775 	struct mii_data *mii;
1776 
1777 	sc = if_getsoftc(ifp);
1778 	mii = device_get_softc(sc->miibus);
1779 
1780 	GEN_LOCK(sc);
1781 	mii_pollstat(mii);
1782 	ifmr->ifm_active = mii->mii_media_active;
1783 	ifmr->ifm_status = mii->mii_media_status;
1784 	GEN_UNLOCK(sc);
1785 }
1786 
1787 static int
1788 gen_media_change(if_t ifp)
1789 {
1790 	struct gen_softc *sc;
1791 	struct mii_data *mii;
1792 	int error;
1793 
1794 	sc = if_getsoftc(ifp);
1795 	mii = device_get_softc(sc->miibus);
1796 
1797 	GEN_LOCK(sc);
1798 	error = mii_mediachg(mii);
1799 	GEN_UNLOCK(sc);
1800 
1801 	return (error);
1802 }
1803 
1804 static device_method_t gen_methods[] = {
1805 	/* Device interface */
1806 	DEVMETHOD(device_probe,		gen_probe),
1807 	DEVMETHOD(device_attach,	gen_attach),
1808 
1809 	/* MII interface */
1810 	DEVMETHOD(miibus_readreg,	gen_miibus_readreg),
1811 	DEVMETHOD(miibus_writereg,	gen_miibus_writereg),
1812 	DEVMETHOD(miibus_statchg,	gen_miibus_statchg),
1813 
1814 	DEVMETHOD_END
1815 };
1816 
1817 static driver_t gen_driver = {
1818 	"genet",
1819 	gen_methods,
1820 	sizeof(struct gen_softc),
1821 };
1822 
1823 DRIVER_MODULE(genet, simplebus, gen_driver, 0, 0);
1824 DRIVER_MODULE(miibus, genet, miibus_driver, 0, 0);
1825 MODULE_DEPEND(genet, ether, 1, 1, 1);
1826 MODULE_DEPEND(genet, miibus, 1, 1, 1);
1827