xref: /freebsd/sys/arm64/broadcom/genet/if_genet.c (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 /*-
2  * Copyright (c) 2020 Michael J Karels
3  * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller
29  *
30  * This driver is derived in large part from bcmgenet.c from NetBSD by
31  * Jared McNeill.  Parts of the structure and other common code in
32  * this driver have been copied from if_awg.c for the Allwinner EMAC,
33  * also by Jared McNeill.
34  */
35 
36 #include "opt_device_polling.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/rman.h>
42 #include <sys/kernel.h>
43 #include <sys/endian.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/module.h>
49 #include <sys/taskqueue.h>
50 #include <sys/gpio.h>
51 
52 #include <net/bpf.h>
53 #include <net/if.h>
54 #include <net/ethernet.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_var.h>
59 
60 #include <machine/bus.h>
61 
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64 
65 #define __BIT(_x)	(1 << (_x))
66 #include "if_genetreg.h"
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 #include <dev/mii/mii_fdt.h>
71 
72 #include <netinet/in.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip6.h>
75 
76 #include "syscon_if.h"
77 #include "miibus_if.h"
78 #include "gpio_if.h"
79 
80 #define	RD4(sc, reg)		bus_read_4((sc)->res[_RES_MAC], (reg))
81 #define	WR4(sc, reg, val)	bus_write_4((sc)->res[_RES_MAC], (reg), (val))
82 
83 #define	GEN_LOCK(sc)		mtx_lock(&(sc)->mtx)
84 #define	GEN_UNLOCK(sc)		mtx_unlock(&(sc)->mtx)
85 #define	GEN_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->mtx, MA_OWNED)
86 #define	GEN_ASSERT_UNLOCKED(sc)	mtx_assert(&(sc)->mtx, MA_NOTOWNED)
87 
88 #define	TX_DESC_COUNT		GENET_DMA_DESC_COUNT
89 #define	RX_DESC_COUNT		GENET_DMA_DESC_COUNT
90 
91 #define	TX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
92 #define	RX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
93 
94 #define	TX_MAX_SEGS		20
95 
96 static SYSCTL_NODE(_hw, OID_AUTO, genet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
97     "genet driver parameters");
98 
99 /* Maximum number of mbufs to pass per call to if_input */
100 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */;
101 SYSCTL_INT(_hw_genet, OID_AUTO, rx_batch, CTLFLAG_RDTUN,
102     &gen_rx_batch, 0, "max mbufs per call to if_input");
103 
104 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch);	/* old name/interface */
105 
106 /*
107  * Transmitting packets with only an Ethernet header in the first mbuf
108  * fails.  Examples include reflected ICMPv6 packets, e.g. echo replies;
109  * forwarded IPv6/TCP packets; and forwarded IPv4/TCP packets that use NAT
110  * with IPFW.  Pulling up the sizes of ether_header + ip6_hdr + icmp6_hdr
111  * seems to work for both ICMPv6 and TCP over IPv6, as well as the IPv4/TCP
112  * case.
113  */
114 static int gen_tx_hdr_min = 56;		/* ether_header + ip6_hdr + icmp6_hdr */
115 SYSCTL_INT(_hw_genet, OID_AUTO, tx_hdr_min, CTLFLAG_RW,
116     &gen_tx_hdr_min, 0, "header to add to packets with ether header only");
117 
118 static struct ofw_compat_data compat_data[] = {
119 	{ "brcm,genet-v1",		1 },
120 	{ "brcm,genet-v2",		2 },
121 	{ "brcm,genet-v3",		3 },
122 	{ "brcm,genet-v4",		4 },
123 	{ "brcm,genet-v5",		5 },
124 	{ "brcm,bcm2711-genet-v5",	5 },
125 	{ NULL,				0 }
126 };
127 
128 enum {
129 	_RES_MAC,		/* what to call this? */
130 	_RES_IRQ1,
131 	_RES_IRQ2,
132 	_RES_NITEMS
133 };
134 
135 static struct resource_spec gen_spec[] = {
136 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
137 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
138 	{ SYS_RES_IRQ,		1,	RF_ACTIVE },
139 	{ -1, 0 }
140 };
141 
142 /* structure per ring entry */
143 struct gen_ring_ent {
144 	bus_dmamap_t		map;
145 	struct mbuf		*mbuf;
146 };
147 
148 struct tx_queue {
149 	int			hwindex;		/* hardware index */
150 	int			nentries;
151 	u_int			queued;			/* or avail? */
152 	u_int			cur;
153 	u_int			next;
154 	u_int			prod_idx;
155 	u_int			cons_idx;
156 	struct gen_ring_ent	*entries;
157 };
158 
159 struct rx_queue {
160 	int			hwindex;		/* hardware index */
161 	int			nentries;
162 	u_int			cur;
163 	u_int			prod_idx;
164 	u_int			cons_idx;
165 	struct gen_ring_ent	*entries;
166 };
167 
168 struct gen_softc {
169 	struct resource		*res[_RES_NITEMS];
170 	struct mtx		mtx;
171 	if_t			ifp;
172 	device_t		dev;
173 	device_t		miibus;
174 	mii_contype_t		phy_mode;
175 
176 	struct callout		stat_ch;
177 	struct task		link_task;
178 	void			*ih;
179 	void			*ih2;
180 	int			type;
181 	int			if_flags;
182 	int			link;
183 	bus_dma_tag_t		tx_buf_tag;
184 	/*
185 	 * The genet chip has multiple queues for transmit and receive.
186 	 * This driver uses only one (queue 16, the default), but is cast
187 	 * with multiple rings.  The additional rings are used for different
188 	 * priorities.
189 	 */
190 #define DEF_TXQUEUE	0
191 #define NTXQUEUE	1
192 	struct tx_queue		tx_queue[NTXQUEUE];
193 	struct gen_ring_ent	tx_ring_ent[TX_DESC_COUNT];  /* ring entries */
194 
195 	bus_dma_tag_t		rx_buf_tag;
196 #define DEF_RXQUEUE	0
197 #define NRXQUEUE	1
198 	struct rx_queue		rx_queue[NRXQUEUE];
199 	struct gen_ring_ent	rx_ring_ent[RX_DESC_COUNT];  /* ring entries */
200 };
201 
202 static void gen_init(void *softc);
203 static void gen_start(if_t ifp);
204 static void gen_destroy(struct gen_softc *sc);
205 static int gen_encap(struct gen_softc *sc, struct mbuf **mp);
206 static int gen_parse_tx(struct mbuf *m, int csum_flags);
207 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data);
208 static int gen_get_phy_mode(device_t dev);
209 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr);
210 static void gen_set_enaddr(struct gen_softc *sc);
211 static void gen_setup_rxfilter(struct gen_softc *sc);
212 static void gen_reset(struct gen_softc *sc);
213 static void gen_enable(struct gen_softc *sc);
214 static void gen_dma_disable(struct gen_softc *sc);
215 static int gen_bus_dma_init(struct gen_softc *sc);
216 static void gen_bus_dma_teardown(struct gen_softc *sc);
217 static void gen_enable_intr(struct gen_softc *sc);
218 static void gen_init_txrings(struct gen_softc *sc);
219 static void gen_init_rxrings(struct gen_softc *sc);
220 static void gen_intr(void *softc);
221 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q);
222 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
223 static void gen_intr2(void *softc);
224 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index);
225 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
226     struct mbuf *m);
227 static void gen_link_task(void *arg, int pending);
228 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr);
229 static int gen_media_change(if_t ifp);
230 static void gen_tick(void *softc);
231 
232 static int
233 gen_probe(device_t dev)
234 {
235 	if (!ofw_bus_status_okay(dev))
236 		return (ENXIO);
237 
238 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
239 		return (ENXIO);
240 
241 	device_set_desc(dev, "RPi4 Gigabit Ethernet");
242 	return (BUS_PROBE_DEFAULT);
243 }
244 
245 static int
246 gen_attach(device_t dev)
247 {
248 	struct ether_addr eaddr;
249 	struct gen_softc *sc;
250 	int major, minor, error, mii_flags;
251 	bool eaddr_found;
252 
253 	sc = device_get_softc(dev);
254 	sc->dev = dev;
255 	sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
256 
257 	if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) {
258 		device_printf(dev, "cannot allocate resources for device\n");
259 		error = ENXIO;
260 		goto fail;
261 	}
262 
263 	major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT;
264 	if (major != REV_MAJOR_V5) {
265 		device_printf(dev, "version %d is not supported\n", major);
266 		error = ENXIO;
267 		goto fail;
268 	}
269 	minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT;
270 	device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor,
271 		RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY);
272 
273 	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
274 	callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
275 	TASK_INIT(&sc->link_task, 0, gen_link_task, sc);
276 
277 	error = gen_get_phy_mode(dev);
278 	if (error != 0)
279 		goto fail;
280 
281 	bzero(&eaddr, sizeof(eaddr));
282 	eaddr_found = gen_get_eaddr(dev, &eaddr);
283 
284 	/* reset core */
285 	gen_reset(sc);
286 
287 	gen_dma_disable(sc);
288 
289 	/* Setup DMA */
290 	error = gen_bus_dma_init(sc);
291 	if (error != 0) {
292 		device_printf(dev, "cannot setup bus dma\n");
293 		goto fail;
294 	}
295 
296 	/* Setup ethernet interface */
297 	sc->ifp = if_alloc(IFT_ETHER);
298 	if_setsoftc(sc->ifp, sc);
299 	if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
300 	if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
301 	if_setstartfn(sc->ifp, gen_start);
302 	if_setioctlfn(sc->ifp, gen_ioctl);
303 	if_setinitfn(sc->ifp, gen_init);
304 	if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
305 	if_setsendqready(sc->ifp);
306 #define GEN_CSUM_FEATURES	(CSUM_UDP | CSUM_TCP)
307 	if_sethwassist(sc->ifp, GEN_CSUM_FEATURES);
308 	if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
309 	    IFCAP_HWCSUM_IPV6);
310 	if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
311 
312 	/* Install interrupt handlers */
313 	error = bus_setup_intr(dev, sc->res[_RES_IRQ1],
314 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih);
315 	if (error != 0) {
316 		device_printf(dev, "cannot setup interrupt handler1\n");
317 		goto fail;
318 	}
319 
320 	error = bus_setup_intr(dev, sc->res[_RES_IRQ2],
321 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2);
322 	if (error != 0) {
323 		device_printf(dev, "cannot setup interrupt handler2\n");
324 		goto fail;
325 	}
326 
327 	/* Attach MII driver */
328 	mii_flags = 0;
329 	switch (sc->phy_mode)
330 	{
331 	case MII_CONTYPE_RGMII_ID:
332 		mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY;
333 		break;
334 	case MII_CONTYPE_RGMII_RXID:
335 		mii_flags |= MIIF_RX_DELAY;
336 		break;
337 	case MII_CONTYPE_RGMII_TXID:
338 		mii_flags |= MIIF_TX_DELAY;
339 		break;
340 	default:
341 		break;
342 	}
343 	error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change,
344 	    gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
345 	    mii_flags);
346 	if (error != 0) {
347 		device_printf(dev, "cannot attach PHY\n");
348 		goto fail;
349 	}
350 
351 	/* If address was not found, create one based on the hostid and name. */
352 	if (eaddr_found == 0)
353 		ether_gen_addr(sc->ifp, &eaddr);
354 	/* Attach ethernet interface */
355 	ether_ifattach(sc->ifp, eaddr.octet);
356 
357 fail:
358 	if (error)
359 		gen_destroy(sc);
360 	return (error);
361 }
362 
363 /* Free resources after failed attach.  This is not a complete detach. */
364 static void
365 gen_destroy(struct gen_softc *sc)
366 {
367 
368 	if (sc->miibus) {	/* can't happen */
369 		device_delete_child(sc->dev, sc->miibus);
370 		sc->miibus = NULL;
371 	}
372 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
373 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
374 	gen_bus_dma_teardown(sc);
375 	callout_drain(&sc->stat_ch);
376 	if (mtx_initialized(&sc->mtx))
377 		mtx_destroy(&sc->mtx);
378 	bus_release_resources(sc->dev, gen_spec, sc->res);
379 	if (sc->ifp != NULL) {
380 		if_free(sc->ifp);
381 		sc->ifp = NULL;
382 	}
383 }
384 
385 static int
386 gen_get_phy_mode(device_t dev)
387 {
388 	struct gen_softc *sc;
389 	phandle_t node;
390 	mii_contype_t type;
391 	int error = 0;
392 
393 	sc = device_get_softc(dev);
394 	node = ofw_bus_get_node(dev);
395 	type = mii_fdt_get_contype(node);
396 
397 	switch (type) {
398 	case MII_CONTYPE_RGMII:
399 	case MII_CONTYPE_RGMII_ID:
400 	case MII_CONTYPE_RGMII_RXID:
401 	case MII_CONTYPE_RGMII_TXID:
402 		sc->phy_mode = type;
403 		break;
404 	default:
405 		device_printf(dev, "unknown phy-mode '%s'\n",
406 		    mii_fdt_contype_to_name(type));
407 		error = ENXIO;
408 		break;
409 	}
410 
411 	return (error);
412 }
413 
414 static bool
415 gen_get_eaddr(device_t dev, struct ether_addr *eaddr)
416 {
417 	struct gen_softc *sc;
418 	uint32_t maclo, machi, val;
419 	phandle_t node;
420 
421 	sc = device_get_softc(dev);
422 
423 	node = ofw_bus_get_node(dev);
424 	if (OF_getprop(node, "mac-address", eaddr->octet,
425 	    ETHER_ADDR_LEN) != -1 ||
426 	    OF_getprop(node, "local-mac-address", eaddr->octet,
427 	    ETHER_ADDR_LEN) != -1 ||
428 	    OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1)
429 		return (true);
430 
431 	device_printf(dev, "No Ethernet address found in fdt!\n");
432 	maclo = machi = 0;
433 
434 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
435 	if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
436 		maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
437 		machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
438 	}
439 
440 	if (maclo == 0 && machi == 0) {
441 		if (bootverbose)
442 			device_printf(dev,
443 			    "No Ethernet address found in controller\n");
444 		return (false);
445 	} else {
446 		eaddr->octet[0] = maclo & 0xff;
447 		eaddr->octet[1] = (maclo >> 8) & 0xff;
448 		eaddr->octet[2] = (maclo >> 16) & 0xff;
449 		eaddr->octet[3] = (maclo >> 24) & 0xff;
450 		eaddr->octet[4] = machi & 0xff;
451 		eaddr->octet[5] = (machi >> 8) & 0xff;
452 		return (true);
453 	}
454 }
455 
456 static void
457 gen_reset(struct gen_softc *sc)
458 {
459 	uint32_t val;
460 
461 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
462 	val |= GENET_SYS_RBUF_FLUSH_RESET;
463 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
464 	DELAY(10);
465 
466 	val &= ~GENET_SYS_RBUF_FLUSH_RESET;
467 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
468 	DELAY(10);
469 
470 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
471 	DELAY(10);
472 
473 	WR4(sc, GENET_UMAC_CMD, 0);
474 	WR4(sc, GENET_UMAC_CMD,
475 	    GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
476 	DELAY(10);
477 	WR4(sc, GENET_UMAC_CMD, 0);
478 
479 	WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
480 	    GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
481 	WR4(sc, GENET_UMAC_MIB_CTRL, 0);
482 }
483 
484 static void
485 gen_enable(struct gen_softc *sc)
486 {
487 	u_int val;
488 
489 	WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
490 
491 	val = RD4(sc, GENET_RBUF_CTRL);
492 	val |= GENET_RBUF_ALIGN_2B;
493 	WR4(sc, GENET_RBUF_CTRL, val);
494 
495 	WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
496 
497 	/* Enable transmitter and receiver */
498 	val = RD4(sc, GENET_UMAC_CMD);
499 	val |= GENET_UMAC_CMD_TXEN;
500 	val |= GENET_UMAC_CMD_RXEN;
501 	WR4(sc, GENET_UMAC_CMD, val);
502 
503 	/* Enable interrupts */
504 	gen_enable_intr(sc);
505 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
506 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
507 }
508 
509 static void
510 gen_disable_intr(struct gen_softc *sc)
511 {
512 	/* Disable interrupts */
513 	WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
514 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 0xffffffff);
515 }
516 
517 static void
518 gen_disable(struct gen_softc *sc)
519 {
520 	uint32_t val;
521 
522 	/* Stop receiver */
523 	val = RD4(sc, GENET_UMAC_CMD);
524 	val &= ~GENET_UMAC_CMD_RXEN;
525 	WR4(sc, GENET_UMAC_CMD, val);
526 
527 	/* Stop transmitter */
528 	val = RD4(sc, GENET_UMAC_CMD);
529 	val &= ~GENET_UMAC_CMD_TXEN;
530 	WR4(sc, GENET_UMAC_CMD, val);
531 
532 	/* Disable Interrupt */
533 	gen_disable_intr(sc);
534 }
535 
536 static void
537 gen_enable_offload(struct gen_softc *sc)
538 {
539 	uint32_t check_ctrl, buf_ctrl;
540 
541 	check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL);
542 	buf_ctrl  = RD4(sc, GENET_RBUF_CTRL);
543 	if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) {
544 		check_ctrl |= GENET_RBUF_CHECK_CTRL_EN;
545 		buf_ctrl |= GENET_RBUF_64B_EN;
546 	} else {
547 		check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN;
548 		buf_ctrl &= ~GENET_RBUF_64B_EN;
549 	}
550 	WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl);
551 	WR4(sc, GENET_RBUF_CTRL, buf_ctrl);
552 
553 	buf_ctrl  = RD4(sc, GENET_TBUF_CTRL);
554 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
555 	    0)
556 		buf_ctrl |= GENET_RBUF_64B_EN;
557 	else
558 		buf_ctrl &= ~GENET_RBUF_64B_EN;
559 	WR4(sc, GENET_TBUF_CTRL, buf_ctrl);
560 }
561 
562 static void
563 gen_dma_disable(struct gen_softc *sc)
564 {
565 	int val;
566 
567 	val = RD4(sc, GENET_TX_DMA_CTRL);
568 	val &= ~GENET_TX_DMA_CTRL_EN;
569 	val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
570 	WR4(sc, GENET_TX_DMA_CTRL, val);
571 
572 	val = RD4(sc, GENET_RX_DMA_CTRL);
573 	val &= ~GENET_RX_DMA_CTRL_EN;
574 	val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
575 	WR4(sc, GENET_RX_DMA_CTRL, val);
576 }
577 
578 static int
579 gen_bus_dma_init(struct gen_softc *sc)
580 {
581 	device_t dev = sc->dev;
582 	int i, error;
583 
584 	error = bus_dma_tag_create(
585 	    bus_get_dma_tag(dev),	/* Parent tag */
586 	    4, 0,			/* alignment, boundary */
587 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
588 	    BUS_SPACE_MAXADDR,		/* highaddr */
589 	    NULL, NULL,			/* filter, filterarg */
590 	    MCLBYTES, TX_MAX_SEGS,	/* maxsize, nsegs */
591 	    MCLBYTES,			/* maxsegsize */
592 	    0,				/* flags */
593 	    NULL, NULL,			/* lockfunc, lockarg */
594 	    &sc->tx_buf_tag);
595 	if (error != 0) {
596 		device_printf(dev, "cannot create TX buffer tag\n");
597 		return (error);
598 	}
599 
600 	for (i = 0; i < TX_DESC_COUNT; i++) {
601 		error = bus_dmamap_create(sc->tx_buf_tag, 0,
602 		    &sc->tx_ring_ent[i].map);
603 		if (error != 0) {
604 			device_printf(dev, "cannot create TX buffer map\n");
605 			return (error);
606 		}
607 	}
608 
609 	error = bus_dma_tag_create(
610 	    bus_get_dma_tag(dev),	/* Parent tag */
611 	    4, 0,			/* alignment, boundary */
612 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
613 	    BUS_SPACE_MAXADDR,		/* highaddr */
614 	    NULL, NULL,			/* filter, filterarg */
615 	    MCLBYTES, 1,		/* maxsize, nsegs */
616 	    MCLBYTES,			/* maxsegsize */
617 	    0,				/* flags */
618 	    NULL, NULL,			/* lockfunc, lockarg */
619 	    &sc->rx_buf_tag);
620 	if (error != 0) {
621 		device_printf(dev, "cannot create RX buffer tag\n");
622 		return (error);
623 	}
624 
625 	for (i = 0; i < RX_DESC_COUNT; i++) {
626 		error = bus_dmamap_create(sc->rx_buf_tag, 0,
627 		    &sc->rx_ring_ent[i].map);
628 		if (error != 0) {
629 			device_printf(dev, "cannot create RX buffer map\n");
630 			return (error);
631 		}
632 	}
633 	return (0);
634 }
635 
636 static void
637 gen_bus_dma_teardown(struct gen_softc *sc)
638 {
639 	int i, error;
640 
641 	if (sc->tx_buf_tag != NULL) {
642 		for (i = 0; i < TX_DESC_COUNT; i++) {
643 			error = bus_dmamap_destroy(sc->tx_buf_tag,
644 			    sc->tx_ring_ent[i].map);
645 			sc->tx_ring_ent[i].map = NULL;
646 			if (error)
647 				device_printf(sc->dev,
648 				    "%s: bus_dmamap_destroy failed: %d\n",
649 				    __func__, error);
650 		}
651 		error = bus_dma_tag_destroy(sc->tx_buf_tag);
652 		sc->tx_buf_tag = NULL;
653 		if (error)
654 			device_printf(sc->dev,
655 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
656 			    error);
657 	}
658 
659 	if (sc->tx_buf_tag != NULL) {
660 		for (i = 0; i < RX_DESC_COUNT; i++) {
661 			error = bus_dmamap_destroy(sc->rx_buf_tag,
662 			    sc->rx_ring_ent[i].map);
663 			sc->rx_ring_ent[i].map = NULL;
664 			if (error)
665 				device_printf(sc->dev,
666 				    "%s: bus_dmamap_destroy failed: %d\n",
667 				    __func__, error);
668 		}
669 		error = bus_dma_tag_destroy(sc->rx_buf_tag);
670 		sc->rx_buf_tag = NULL;
671 		if (error)
672 			device_printf(sc->dev,
673 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
674 			    error);
675 	}
676 }
677 
678 static void
679 gen_enable_intr(struct gen_softc *sc)
680 {
681 
682 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
683 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
684 }
685 
686 /*
687  * "queue" is the software queue index (0-4); "qid" is the hardware index
688  * (0-16).  "base" is the starting index in the ring array.
689  */
690 static void
691 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
692     int nentries)
693 {
694 	struct tx_queue *q;
695 	uint32_t val;
696 
697 	q = &sc->tx_queue[queue];
698 	q->entries = &sc->tx_ring_ent[base];
699 	q->hwindex = qid;
700 	q->nentries = nentries;
701 
702 	/* TX ring */
703 
704 	q->queued = 0;
705 	q->cons_idx = q->prod_idx = 0;
706 
707 	WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
708 
709 	WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
710 	WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
711 	WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
712 	WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
713 	WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
714 	    (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
715 	    (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
716 	WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
717 	WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
718 	WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
719 	    TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
720 	WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
721 	WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
722 	WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
723 	WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
724 	WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
725 
726 	WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid));	/* enable */
727 
728 	/* Enable transmit DMA */
729 	val = RD4(sc, GENET_TX_DMA_CTRL);
730 	val |= GENET_TX_DMA_CTRL_EN;
731 	val |= GENET_TX_DMA_CTRL_RBUF_EN(qid);
732 	WR4(sc, GENET_TX_DMA_CTRL, val);
733 }
734 
735 /*
736  * "queue" is the software queue index (0-4); "qid" is the hardware index
737  * (0-16).  "base" is the starting index in the ring array.
738  */
739 static void
740 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
741     int nentries)
742 {
743 	struct rx_queue *q;
744 	uint32_t val;
745 	int i;
746 
747 	q = &sc->rx_queue[queue];
748 	q->entries = &sc->rx_ring_ent[base];
749 	q->hwindex = qid;
750 	q->nentries = nentries;
751 	q->cons_idx = q->prod_idx = 0;
752 
753 	WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
754 
755 	WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
756 	WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
757 	WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
758 	WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
759 	WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
760 	    (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
761 	    (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
762 	WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
763 	WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
764 	WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
765 	    RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
766 	WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
767 	WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
768 	    (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4));
769 	WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
770 	WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
771 
772 	WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid));	/* enable */
773 
774 	/* fill ring */
775 	for (i = 0; i < RX_DESC_COUNT; i++)
776 		gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i);
777 
778 	/* Enable receive DMA */
779 	val = RD4(sc, GENET_RX_DMA_CTRL);
780 	val |= GENET_RX_DMA_CTRL_EN;
781 	val |= GENET_RX_DMA_CTRL_RBUF_EN(qid);
782 	WR4(sc, GENET_RX_DMA_CTRL, val);
783 }
784 
785 static void
786 gen_init_txrings(struct gen_softc *sc)
787 {
788 	int base = 0;
789 #ifdef PRI_RINGS
790 	int i;
791 
792 	/* init priority rings */
793 	for (i = 0; i < PRI_RINGS; i++) {
794 		gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT);
795 		sc->tx_queue[i].queue = i;
796 		base += TX_DESC_PRICOUNT;
797 		dma_ring_conf |= 1 << i;
798 		dma_control |= DMA_RENABLE(i);
799 	}
800 #endif
801 
802 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
803 	gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
804 	    TX_DESC_COUNT);
805 	sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
806 }
807 
808 static void
809 gen_init_rxrings(struct gen_softc *sc)
810 {
811 	int base = 0;
812 #ifdef PRI_RINGS
813 	int i;
814 
815 	/* init priority rings */
816 	for (i = 0; i < PRI_RINGS; i++) {
817 		gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT);
818 		sc->rx_queue[i].queue = i;
819 		base += TX_DESC_PRICOUNT;
820 		dma_ring_conf |= 1 << i;
821 		dma_control |= DMA_RENABLE(i);
822 	}
823 #endif
824 
825 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
826 	gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
827 	    RX_DESC_COUNT);
828 	sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
829 
830 }
831 
832 static void
833 gen_stop(struct gen_softc *sc)
834 {
835 	int i;
836 	struct gen_ring_ent *ent;
837 
838 	GEN_ASSERT_LOCKED(sc);
839 
840 	callout_stop(&sc->stat_ch);
841 	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
842 	gen_reset(sc);
843 	gen_disable(sc);
844 	gen_dma_disable(sc);
845 
846 	/* Clear the tx/rx ring buffer */
847 	for (i = 0; i < TX_DESC_COUNT; i++) {
848 		ent = &sc->tx_ring_ent[i];
849 		if (ent->mbuf != NULL) {
850 			bus_dmamap_sync(sc->tx_buf_tag, ent->map,
851 				BUS_DMASYNC_POSTWRITE);
852 			bus_dmamap_unload(sc->tx_buf_tag, ent->map);
853 			m_freem(ent->mbuf);
854 			ent->mbuf = NULL;
855 		}
856 	}
857 
858 	for (i = 0; i < RX_DESC_COUNT; i++) {
859 		ent = &sc->rx_ring_ent[i];
860 		if (ent->mbuf != NULL) {
861 			bus_dmamap_sync(sc->rx_buf_tag, ent->map,
862 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
863 			bus_dmamap_unload(sc->rx_buf_tag, ent->map);
864 			m_freem(ent->mbuf);
865 			ent->mbuf = NULL;
866 		}
867 	}
868 }
869 
870 static void
871 gen_init_locked(struct gen_softc *sc)
872 {
873 	struct mii_data *mii;
874 	if_t ifp;
875 
876 	mii = device_get_softc(sc->miibus);
877 	ifp = sc->ifp;
878 
879 	GEN_ASSERT_LOCKED(sc);
880 
881 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
882 		return;
883 
884 	switch (sc->phy_mode)
885 	{
886 	case MII_CONTYPE_RGMII:
887 	case MII_CONTYPE_RGMII_ID:
888 	case MII_CONTYPE_RGMII_RXID:
889 	case MII_CONTYPE_RGMII_TXID:
890 		WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY);
891 		break;
892 	default:
893 		WR4(sc, GENET_SYS_PORT_CTRL, 0);
894 	}
895 
896 	gen_set_enaddr(sc);
897 
898 	/* Setup RX filter */
899 	gen_setup_rxfilter(sc);
900 
901 	gen_init_txrings(sc);
902 	gen_init_rxrings(sc);
903 	gen_enable(sc);
904 	gen_enable_offload(sc);
905 
906 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
907 
908 	mii_mediachg(mii);
909 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
910 }
911 
912 static void
913 gen_init(void *softc)
914 {
915         struct gen_softc *sc;
916 
917         sc = softc;
918 	GEN_LOCK(sc);
919 	gen_init_locked(sc);
920 	GEN_UNLOCK(sc);
921 }
922 
923 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
924 
925 static void
926 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea)
927 {
928 	uint32_t addr0 = (ea[0] << 8) | ea[1];
929 	uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
930 
931 	WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
932 	WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
933 }
934 
935 static u_int
936 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count)
937 {
938 	struct gen_softc *sc = arg;
939 
940 	/* "count + 2" to account for unicast and broadcast */
941 	gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl));
942 	return (1);		/* increment to count */
943 }
944 
945 static void
946 gen_setup_rxfilter(struct gen_softc *sc)
947 {
948 	if_t ifp = sc->ifp;
949 	uint32_t cmd, mdf_ctrl;
950 	u_int n;
951 
952 	GEN_ASSERT_LOCKED(sc);
953 
954 	cmd = RD4(sc, GENET_UMAC_CMD);
955 
956 	/*
957 	 * Count the required number of hardware filters. We need one
958 	 * for each multicast address, plus one for our own address and
959 	 * the broadcast address.
960 	 */
961 	n = if_llmaddr_count(ifp) + 2;
962 
963 	if (n > GENET_MAX_MDF_FILTER)
964 		if_setflagbits(ifp, IFF_ALLMULTI, 0);
965 	else
966 		if_setflagbits(ifp, 0, IFF_ALLMULTI);
967 
968 	if ((if_getflags(ifp) & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
969 		cmd |= GENET_UMAC_CMD_PROMISC;
970 		mdf_ctrl = 0;
971 	} else {
972 		cmd &= ~GENET_UMAC_CMD_PROMISC;
973 		gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr);
974 		gen_setup_rxfilter_mdf(sc, 1, if_getlladdr(ifp));
975 		(void) if_foreach_llmaddr(ifp, gen_setup_multi, sc);
976 		mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1)  &~
977 		    (__BIT(GENET_MAX_MDF_FILTER - n) - 1);
978 	}
979 
980 	WR4(sc, GENET_UMAC_CMD, cmd);
981 	WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
982 }
983 
984 static void
985 gen_set_enaddr(struct gen_softc *sc)
986 {
987 	uint8_t *enaddr;
988 	uint32_t val;
989 	if_t ifp;
990 
991 	GEN_ASSERT_LOCKED(sc);
992 
993 	ifp = sc->ifp;
994 
995 	/* Write our unicast address */
996 	enaddr = if_getlladdr(ifp);
997 	/* Write hardware address */
998 	val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
999 	    (enaddr[0] << 24);
1000 	WR4(sc, GENET_UMAC_MAC0, val);
1001 	val = enaddr[5] | (enaddr[4] << 8);
1002 	WR4(sc, GENET_UMAC_MAC1, val);
1003 }
1004 
1005 static void
1006 gen_start_locked(struct gen_softc *sc)
1007 {
1008 	struct mbuf *m;
1009 	if_t ifp;
1010 	int err;
1011 
1012 	GEN_ASSERT_LOCKED(sc);
1013 
1014 	if (!sc->link)
1015 		return;
1016 
1017 	ifp = sc->ifp;
1018 
1019 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1020 	    IFF_DRV_RUNNING)
1021 		return;
1022 
1023 	while (true) {
1024 		m = if_dequeue(ifp);
1025 		if (m == NULL)
1026 			break;
1027 
1028 		err = gen_encap(sc, &m);
1029 		if (err != 0) {
1030 			if (err == ENOBUFS)
1031 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1032 			else if (m == NULL)
1033 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1034 			if (m != NULL)
1035 				if_sendq_prepend(ifp, m);
1036 			break;
1037 		}
1038 		bpf_mtap_if(ifp, m);
1039 	}
1040 }
1041 
1042 static void
1043 gen_start(if_t ifp)
1044 {
1045 	struct gen_softc *sc;
1046 
1047 	sc = if_getsoftc(ifp);
1048 
1049 	GEN_LOCK(sc);
1050 	gen_start_locked(sc);
1051 	GEN_UNLOCK(sc);
1052 }
1053 
1054 /* Test for any delayed checksum */
1055 #define CSUM_DELAY_ANY	(CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)
1056 
1057 static int
1058 gen_encap(struct gen_softc *sc, struct mbuf **mp)
1059 {
1060 	bus_dmamap_t map;
1061 	bus_dma_segment_t segs[TX_MAX_SEGS];
1062 	int error, nsegs, cur, first, i, index, offset;
1063 	uint32_t csuminfo, length_status, csum_flags = 0, csumdata;
1064 	struct mbuf *m;
1065 	struct statusblock *sb = NULL;
1066 	struct tx_queue *q;
1067 	struct gen_ring_ent *ent;
1068 
1069 	GEN_ASSERT_LOCKED(sc);
1070 
1071 	q = &sc->tx_queue[DEF_TXQUEUE];
1072 
1073 	m = *mp;
1074 
1075 	/*
1076 	 * Don't attempt to send packets with only an Ethernet header in
1077 	 * first mbuf; see comment above with gen_tx_hdr_min.
1078 	 */
1079 	if (m->m_len == sizeof(struct ether_header)) {
1080 		m = m_pullup(m, MIN(m->m_pkthdr.len, gen_tx_hdr_min));
1081 		if (m == NULL) {
1082 			if (if_getflags(sc->ifp) & IFF_DEBUG)
1083 				device_printf(sc->dev,
1084 				    "header pullup fail\n");
1085 			*mp = NULL;
1086 			return (ENOMEM);
1087 		}
1088 	}
1089 
1090 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
1091 	    0) {
1092 		csum_flags = m->m_pkthdr.csum_flags;
1093 		csumdata = m->m_pkthdr.csum_data;
1094 		M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT);
1095 		if (m == NULL) {
1096 			if (if_getflags(sc->ifp) & IFF_DEBUG)
1097 				device_printf(sc->dev, "prepend fail\n");
1098 			*mp = NULL;
1099 			return (ENOMEM);
1100 		}
1101 		offset = gen_parse_tx(m, csum_flags);
1102 		sb = mtod(m, struct statusblock *);
1103 		if ((csum_flags & CSUM_DELAY_ANY) != 0) {
1104 			csuminfo = (offset << TXCSUM_OFF_SHIFT) |
1105 			    (offset + csumdata);
1106 			csuminfo |= TXCSUM_LEN_VALID;
1107 			if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP))
1108 				csuminfo |= TXCSUM_UDP;
1109 			sb->txcsuminfo = csuminfo;
1110 		} else
1111 			sb->txcsuminfo = 0;
1112 	}
1113 
1114 	*mp = m;
1115 
1116 	cur = first = q->cur;
1117 	ent = &q->entries[cur];
1118 	map = ent->map;
1119 	error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs,
1120 	    &nsegs, BUS_DMA_NOWAIT);
1121 	if (error == EFBIG) {
1122 		m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
1123 		if (m == NULL) {
1124 			device_printf(sc->dev,
1125 			    "gen_encap: m_collapse failed\n");
1126 			m_freem(*mp);
1127 			*mp = NULL;
1128 			return (ENOMEM);
1129 		}
1130 		*mp = m;
1131 		error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m,
1132 		    segs, &nsegs, BUS_DMA_NOWAIT);
1133 		if (error != 0) {
1134 			m_freem(*mp);
1135 			*mp = NULL;
1136 		}
1137 	}
1138 	if (error != 0) {
1139 		device_printf(sc->dev,
1140 		    "gen_encap: bus_dmamap_load_mbuf_sg failed\n");
1141 		return (error);
1142 	}
1143 	if (nsegs == 0) {
1144 		m_freem(*mp);
1145 		*mp = NULL;
1146 		return (EIO);
1147 	}
1148 
1149 	/* Remove statusblock after mapping, before possible requeue or bpf. */
1150 	if (sb != NULL) {
1151 		m->m_data += sizeof(struct statusblock);
1152 		m->m_len -= sizeof(struct statusblock);
1153 		m->m_pkthdr.len -= sizeof(struct statusblock);
1154 	}
1155 	if (q->queued + nsegs > q->nentries) {
1156 		bus_dmamap_unload(sc->tx_buf_tag, map);
1157 		return (ENOBUFS);
1158 	}
1159 
1160 	bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE);
1161 
1162 	index = q->prod_idx & (q->nentries - 1);
1163 	for (i = 0; i < nsegs; i++) {
1164 		ent = &q->entries[cur];
1165 		length_status = GENET_TX_DESC_STATUS_QTAG_MASK;
1166 		if (i == 0) {
1167 			length_status |= GENET_TX_DESC_STATUS_SOP |
1168 			    GENET_TX_DESC_STATUS_CRC;
1169 			if ((csum_flags & CSUM_DELAY_ANY) != 0)
1170 				length_status |= GENET_TX_DESC_STATUS_CKSUM;
1171 		}
1172 		if (i == nsegs - 1)
1173 			length_status |= GENET_TX_DESC_STATUS_EOP;
1174 
1175 		length_status |= segs[i].ds_len <<
1176 		    GENET_TX_DESC_STATUS_BUFLEN_SHIFT;
1177 
1178 		WR4(sc, GENET_TX_DESC_ADDRESS_LO(index),
1179 		    (uint32_t)segs[i].ds_addr);
1180 		WR4(sc, GENET_TX_DESC_ADDRESS_HI(index),
1181 		    (uint32_t)(segs[i].ds_addr >> 32));
1182 		WR4(sc, GENET_TX_DESC_STATUS(index), length_status);
1183 
1184 		++q->queued;
1185 		cur = TX_NEXT(cur, q->nentries);
1186 		index = TX_NEXT(index, q->nentries);
1187 	}
1188 
1189 	q->prod_idx += nsegs;
1190 	q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK;
1191 	/* We probably don't need to write the producer index on every iter */
1192 	if (nsegs != 0)
1193 		WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx);
1194 	q->cur = cur;
1195 
1196 	/* Store mbuf in the last segment */
1197 	q->entries[first].mbuf = m;
1198 
1199 	return (0);
1200 }
1201 
1202 /*
1203  * Parse a packet to find the offset of the transport header for checksum
1204  * offload.  Ensure that the link and network headers are contiguous with
1205  * the status block, or transmission fails.
1206  */
1207 static int
1208 gen_parse_tx(struct mbuf *m, int csum_flags)
1209 {
1210 	int offset, off_in_m;
1211 	bool copy = false, shift = false;
1212 	u_char *p, *copy_p = NULL;
1213 	struct mbuf *m0 = m;
1214 	uint16_t ether_type;
1215 
1216 	if (m->m_len == sizeof(struct statusblock)) {
1217 		/* M_PREPEND placed statusblock at end; move to beginning */
1218 		m->m_data = m->m_pktdat;
1219 		copy_p = mtodo(m, sizeof(struct statusblock));
1220 		m = m->m_next;
1221 		off_in_m = 0;
1222 		p = mtod(m, u_char *);
1223 		copy = true;
1224 	} else {
1225 		/*
1226 		 * If statusblock is not at beginning of mbuf (likely),
1227 		 * then remember to move mbuf contents down before copying
1228 		 * after them.
1229 		 */
1230 		if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat)
1231 			shift = true;
1232 		p = mtodo(m, sizeof(struct statusblock));
1233 		off_in_m = sizeof(struct statusblock);
1234 	}
1235 
1236 /*
1237  * If headers need to be copied contiguous to statusblock, do so.
1238  * If copying to the internal mbuf data area, and the status block
1239  * is not at the beginning of that area, shift the status block (which
1240  * is empty) and following data.
1241  */
1242 #define COPY(size) {							\
1243 	int hsize = size;						\
1244 	if (copy) {							\
1245 		if (shift) {						\
1246 			u_char *p0;					\
1247 			shift = false;					\
1248 			p0 = mtodo(m0, sizeof(struct statusblock));	\
1249 			m0->m_data = m0->m_pktdat;			\
1250 			bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\
1251 			    m0->m_len - sizeof(struct statusblock));	\
1252 			copy_p = mtodo(m0, m0->m_len);			\
1253 		}							\
1254 		bcopy(p, copy_p, hsize);				\
1255 		m0->m_len += hsize;					\
1256 		m->m_len -= hsize;					\
1257 		m->m_data += hsize;					\
1258 	}								\
1259 	copy_p += hsize;						\
1260 }
1261 
1262 	KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) +
1263 	    sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__));
1264 
1265 	if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) {
1266 		offset = sizeof(struct ether_vlan_header);
1267 		ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto);
1268 		COPY(sizeof(struct ether_vlan_header));
1269 		if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) {
1270 			m = m->m_next;
1271 			off_in_m = 0;
1272 			p = mtod(m, u_char *);
1273 			copy = true;
1274 		} else {
1275 			off_in_m += sizeof(struct ether_vlan_header);
1276 			p += sizeof(struct ether_vlan_header);
1277 		}
1278 	} else {
1279 		offset = sizeof(struct ether_header);
1280 		ether_type = ntohs(((struct ether_header *)p)->ether_type);
1281 		COPY(sizeof(struct ether_header));
1282 		if (m->m_len == off_in_m + sizeof(struct ether_header)) {
1283 			m = m->m_next;
1284 			off_in_m = 0;
1285 			p = mtod(m, u_char *);
1286 			copy = true;
1287 		} else {
1288 			off_in_m += sizeof(struct ether_header);
1289 			p += sizeof(struct ether_header);
1290 		}
1291 	}
1292 	if (ether_type == ETHERTYPE_IP) {
1293 		COPY(((struct ip *)p)->ip_hl << 2);
1294 		offset += ((struct ip *)p)->ip_hl << 2;
1295 	} else if (ether_type == ETHERTYPE_IPV6) {
1296 		COPY(sizeof(struct ip6_hdr));
1297 		offset += sizeof(struct ip6_hdr);
1298 	} else {
1299 		/*
1300 		 * Unknown whether most other cases require moving a header;
1301 		 * ARP works without.  However, Wake On LAN packets sent
1302 		 * by wake(8) via BPF need something like this.
1303 		 */
1304 		COPY(MIN(gen_tx_hdr_min, m->m_len));
1305 		offset += MIN(gen_tx_hdr_min, m->m_len);
1306 	}
1307 	return (offset);
1308 #undef COPY
1309 }
1310 
1311 static void
1312 gen_intr(void *arg)
1313 {
1314 	struct gen_softc *sc = arg;
1315 	uint32_t val;
1316 
1317 	GEN_LOCK(sc);
1318 
1319 	val = RD4(sc, GENET_INTRL2_CPU_STAT);
1320 	val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
1321 	WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
1322 
1323 	if (val & GENET_IRQ_RXDMA_DONE)
1324 		gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]);
1325 
1326 	if (val & GENET_IRQ_TXDMA_DONE) {
1327 		gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
1328 		if (!if_sendq_empty(sc->ifp))
1329 			gen_start_locked(sc);
1330 	}
1331 
1332 	GEN_UNLOCK(sc);
1333 }
1334 
1335 static int
1336 gen_rxintr(struct gen_softc *sc, struct rx_queue *q)
1337 {
1338 	if_t ifp;
1339 	struct mbuf *m, *mh, *mt;
1340 	struct statusblock *sb = NULL;
1341 	int error, index, len, cnt, npkt, n;
1342 	uint32_t status, prod_idx, total;
1343 
1344 	ifp = sc->ifp;
1345 	mh = mt = NULL;
1346 	cnt = 0;
1347 	npkt = 0;
1348 
1349 	prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) &
1350 	    GENET_RX_DMA_PROD_CONS_MASK;
1351 	total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK;
1352 
1353 	index = q->cons_idx & (RX_DESC_COUNT - 1);
1354 	for (n = 0; n < total; n++) {
1355 		bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map,
1356 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1357 		bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map);
1358 
1359 		m = q->entries[index].mbuf;
1360 
1361 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1362 			sb = mtod(m, struct statusblock *);
1363 			status = sb->status_buflen;
1364 		} else
1365 			status = RD4(sc, GENET_RX_DESC_STATUS(index));
1366 
1367 		len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >>
1368 		    GENET_RX_DESC_STATUS_BUFLEN_SHIFT;
1369 
1370 		/* check for errors */
1371 		if ((status &
1372 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP |
1373 		    GENET_RX_DESC_STATUS_RX_ERROR)) !=
1374 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) {
1375 			if (if_getflags(ifp) & IFF_DEBUG)
1376 				device_printf(sc->dev,
1377 				    "error/frag %x csum %x\n", status,
1378 				    sb->rxcsum);
1379 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1380 			continue;
1381 		}
1382 
1383 		error = gen_newbuf_rx(sc, q, index);
1384 		if (error != 0) {
1385 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1386 			if (if_getflags(ifp) & IFF_DEBUG)
1387 				device_printf(sc->dev, "gen_newbuf_rx %d\n",
1388 				    error);
1389 			/* reuse previous mbuf */
1390 			(void) gen_mapbuf_rx(sc, q, index, m);
1391 			continue;
1392 		}
1393 
1394 		if (sb != NULL) {
1395 			if (status & GENET_RX_DESC_STATUS_CKSUM_OK) {
1396 				/* L4 checksum checked; not sure about L3. */
1397 				m->m_pkthdr.csum_flags = CSUM_DATA_VALID |
1398 				    CSUM_PSEUDO_HDR;
1399 				m->m_pkthdr.csum_data = 0xffff;
1400 			}
1401 			m->m_data += sizeof(struct statusblock);
1402 			m->m_len -= sizeof(struct statusblock);
1403 			len -= sizeof(struct statusblock);
1404 		}
1405 		if (len > ETHER_ALIGN) {
1406 			m_adj(m, ETHER_ALIGN);
1407 			len -= ETHER_ALIGN;
1408 		}
1409 
1410 		m->m_pkthdr.rcvif = ifp;
1411 		m->m_pkthdr.len = len;
1412 		m->m_len = len;
1413 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1414 
1415 		m->m_nextpkt = NULL;
1416 		if (mh == NULL)
1417 			mh = m;
1418 		else
1419 			mt->m_nextpkt = m;
1420 		mt = m;
1421 		++cnt;
1422 		++npkt;
1423 
1424 		index = RX_NEXT(index, q->nentries);
1425 
1426 		q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK;
1427 		WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx);
1428 
1429 		if (cnt == gen_rx_batch) {
1430 			GEN_UNLOCK(sc);
1431 			if_input(ifp, mh);
1432 			GEN_LOCK(sc);
1433 			mh = mt = NULL;
1434 			cnt = 0;
1435 		}
1436 	}
1437 
1438 	if (mh != NULL) {
1439 		GEN_UNLOCK(sc);
1440 		if_input(ifp, mh);
1441 		GEN_LOCK(sc);
1442 	}
1443 
1444 	return (npkt);
1445 }
1446 
1447 static void
1448 gen_txintr(struct gen_softc *sc, struct tx_queue *q)
1449 {
1450 	uint32_t cons_idx, total;
1451 	struct gen_ring_ent *ent;
1452 	if_t ifp;
1453 	int i, prog;
1454 
1455 	GEN_ASSERT_LOCKED(sc);
1456 
1457 	ifp = sc->ifp;
1458 
1459 	cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) &
1460 	    GENET_TX_DMA_PROD_CONS_MASK;
1461 	total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK;
1462 
1463 	prog = 0;
1464 	for (i = q->next; q->queued > 0 && total > 0;
1465 	    i = TX_NEXT(i, q->nentries), total--) {
1466 		/* XXX check for errors */
1467 
1468 		ent = &q->entries[i];
1469 		if (ent->mbuf != NULL) {
1470 			bus_dmamap_sync(sc->tx_buf_tag, ent->map,
1471 			    BUS_DMASYNC_POSTWRITE);
1472 			bus_dmamap_unload(sc->tx_buf_tag, ent->map);
1473 			m_freem(ent->mbuf);
1474 			ent->mbuf = NULL;
1475 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1476 		}
1477 
1478 		prog++;
1479 		--q->queued;
1480 	}
1481 
1482 	if (prog > 0) {
1483 		q->next = i;
1484 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1485 	}
1486 
1487 	q->cons_idx = cons_idx;
1488 }
1489 
1490 static void
1491 gen_intr2(void *arg)
1492 {
1493 	struct gen_softc *sc = arg;
1494 
1495 	device_printf(sc->dev, "gen_intr2\n");
1496 }
1497 
1498 static int
1499 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index)
1500 {
1501 	struct mbuf *m;
1502 
1503 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1504 	if (m == NULL)
1505 		return (ENOBUFS);
1506 
1507 	m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
1508 	m_adj(m, ETHER_ALIGN);
1509 
1510 	return (gen_mapbuf_rx(sc, q, index, m));
1511 }
1512 
1513 static int
1514 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
1515     struct mbuf *m)
1516 {
1517 	bus_dma_segment_t seg;
1518 	bus_dmamap_t map;
1519 	int nsegs;
1520 
1521 	map = q->entries[index].map;
1522 	if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs,
1523 	    BUS_DMA_NOWAIT) != 0) {
1524 		m_freem(m);
1525 		return (ENOBUFS);
1526 	}
1527 
1528 	bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD);
1529 
1530 	q->entries[index].mbuf = m;
1531 	WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr);
1532 	WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32));
1533 
1534 	return (0);
1535 }
1536 
1537 static int
1538 gen_ioctl(if_t ifp, u_long cmd, caddr_t data)
1539 {
1540 	struct gen_softc *sc;
1541 	struct mii_data *mii;
1542 	struct ifreq *ifr;
1543 	int flags, enable, error;
1544 
1545 	sc = if_getsoftc(ifp);
1546 	mii = device_get_softc(sc->miibus);
1547 	ifr = (struct ifreq *)data;
1548 	error = 0;
1549 
1550 	switch (cmd) {
1551 	case SIOCSIFFLAGS:
1552 		GEN_LOCK(sc);
1553 		if (if_getflags(ifp) & IFF_UP) {
1554 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1555 				flags = if_getflags(ifp) ^ sc->if_flags;
1556 				if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
1557 					gen_setup_rxfilter(sc);
1558 			} else
1559 				gen_init_locked(sc);
1560 		} else {
1561 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1562 				gen_stop(sc);
1563 		}
1564 		sc->if_flags = if_getflags(ifp);
1565 		GEN_UNLOCK(sc);
1566 		break;
1567 
1568 	case SIOCADDMULTI:
1569 	case SIOCDELMULTI:
1570 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1571 			GEN_LOCK(sc);
1572 			gen_setup_rxfilter(sc);
1573 			GEN_UNLOCK(sc);
1574 		}
1575 		break;
1576 
1577 	case SIOCSIFMEDIA:
1578 	case SIOCGIFMEDIA:
1579 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1580 		break;
1581 
1582 	case SIOCSIFCAP:
1583 		enable = if_getcapenable(ifp);
1584 		flags = ifr->ifr_reqcap ^ enable;
1585 		if (flags & IFCAP_RXCSUM)
1586 			enable ^= IFCAP_RXCSUM;
1587 		if (flags & IFCAP_RXCSUM_IPV6)
1588 			enable ^= IFCAP_RXCSUM_IPV6;
1589 		if (flags & IFCAP_TXCSUM)
1590 			enable ^= IFCAP_TXCSUM;
1591 		if (flags & IFCAP_TXCSUM_IPV6)
1592 			enable ^= IFCAP_TXCSUM_IPV6;
1593 		if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6))
1594 			if_sethwassist(ifp, GEN_CSUM_FEATURES);
1595 		else
1596 			if_sethwassist(ifp, 0);
1597 		if_setcapenable(ifp, enable);
1598 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1599 			gen_enable_offload(sc);
1600 		break;
1601 
1602 	default:
1603 		error = ether_ioctl(ifp, cmd, data);
1604 		break;
1605 	}
1606 	return (error);
1607 }
1608 
1609 static void
1610 gen_tick(void *softc)
1611 {
1612 	struct gen_softc *sc;
1613 	struct mii_data *mii;
1614 	if_t ifp;
1615 	int link;
1616 
1617 	sc = softc;
1618 	ifp = sc->ifp;
1619 	mii = device_get_softc(sc->miibus);
1620 
1621 	GEN_ASSERT_LOCKED(sc);
1622 
1623 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1624 		return;
1625 
1626 	link = sc->link;
1627 	mii_tick(mii);
1628 	if (sc->link && !link)
1629 		gen_start_locked(sc);
1630 
1631 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
1632 }
1633 
1634 #define	MII_BUSY_RETRY		1000
1635 
1636 static int
1637 gen_miibus_readreg(device_t dev, int phy, int reg)
1638 {
1639 	struct gen_softc *sc;
1640 	int retry, val;
1641 
1642 	sc = device_get_softc(dev);
1643 	val = 0;
1644 
1645 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ |
1646 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT));
1647 	val = RD4(sc, GENET_MDIO_CMD);
1648 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1649 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1650 		if (((val = RD4(sc, GENET_MDIO_CMD)) &
1651 		    GENET_MDIO_START_BUSY) == 0) {
1652 			if (val & GENET_MDIO_READ_FAILED)
1653 				return (0);	/* -1? */
1654 			val &= GENET_MDIO_VAL_MASK;
1655 			break;
1656 		}
1657 		DELAY(10);
1658 	}
1659 
1660 	if (retry == 0)
1661 		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
1662 		    phy, reg);
1663 
1664 	return (val);
1665 }
1666 
1667 static int
1668 gen_miibus_writereg(device_t dev, int phy, int reg, int val)
1669 {
1670 	struct gen_softc *sc;
1671 	int retry;
1672 
1673 	sc = device_get_softc(dev);
1674 
1675 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE |
1676 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) |
1677 	    (val & GENET_MDIO_VAL_MASK));
1678 	val = RD4(sc, GENET_MDIO_CMD);
1679 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1680 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1681 		val = RD4(sc, GENET_MDIO_CMD);
1682 		if ((val & GENET_MDIO_START_BUSY) == 0)
1683 			break;
1684 		DELAY(10);
1685 	}
1686 	if (retry == 0)
1687 		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
1688 		    phy, reg);
1689 
1690 	return (0);
1691 }
1692 
1693 static void
1694 gen_update_link_locked(struct gen_softc *sc)
1695 {
1696 	struct mii_data *mii;
1697 	uint32_t val;
1698 	u_int speed;
1699 
1700 	GEN_ASSERT_LOCKED(sc);
1701 
1702 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
1703 		return;
1704 	mii = device_get_softc(sc->miibus);
1705 
1706 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1707 	    (IFM_ACTIVE | IFM_AVALID)) {
1708 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1709 		case IFM_1000_T:
1710 		case IFM_1000_SX:
1711 			speed = GENET_UMAC_CMD_SPEED_1000;
1712 			sc->link = 1;
1713 			break;
1714 		case IFM_100_TX:
1715 			speed = GENET_UMAC_CMD_SPEED_100;
1716 			sc->link = 1;
1717 			break;
1718 		case IFM_10_T:
1719 			speed = GENET_UMAC_CMD_SPEED_10;
1720 			sc->link = 1;
1721 			break;
1722 		default:
1723 			sc->link = 0;
1724 			break;
1725 		}
1726 	} else
1727 		sc->link = 0;
1728 
1729 	if (sc->link == 0)
1730 		return;
1731 
1732 	val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
1733 	val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
1734 	val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
1735 	val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
1736 	if (sc->phy_mode == MII_CONTYPE_RGMII)
1737 		val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1738 	else
1739 		val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1740 	WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
1741 
1742 	val = RD4(sc, GENET_UMAC_CMD);
1743 	val &= ~GENET_UMAC_CMD_SPEED;
1744 	val |= speed;
1745 	WR4(sc, GENET_UMAC_CMD, val);
1746 }
1747 
1748 static void
1749 gen_link_task(void *arg, int pending)
1750 {
1751 	struct gen_softc *sc;
1752 
1753 	sc = arg;
1754 
1755 	GEN_LOCK(sc);
1756 	gen_update_link_locked(sc);
1757 	GEN_UNLOCK(sc);
1758 }
1759 
1760 static void
1761 gen_miibus_statchg(device_t dev)
1762 {
1763 	struct gen_softc *sc;
1764 
1765 	sc = device_get_softc(dev);
1766 
1767 	taskqueue_enqueue(taskqueue_swi, &sc->link_task);
1768 }
1769 
1770 static void
1771 gen_media_status(if_t ifp, struct ifmediareq *ifmr)
1772 {
1773 	struct gen_softc *sc;
1774 	struct mii_data *mii;
1775 
1776 	sc = if_getsoftc(ifp);
1777 	mii = device_get_softc(sc->miibus);
1778 
1779 	GEN_LOCK(sc);
1780 	mii_pollstat(mii);
1781 	ifmr->ifm_active = mii->mii_media_active;
1782 	ifmr->ifm_status = mii->mii_media_status;
1783 	GEN_UNLOCK(sc);
1784 }
1785 
1786 static int
1787 gen_media_change(if_t ifp)
1788 {
1789 	struct gen_softc *sc;
1790 	struct mii_data *mii;
1791 	int error;
1792 
1793 	sc = if_getsoftc(ifp);
1794 	mii = device_get_softc(sc->miibus);
1795 
1796 	GEN_LOCK(sc);
1797 	error = mii_mediachg(mii);
1798 	GEN_UNLOCK(sc);
1799 
1800 	return (error);
1801 }
1802 
1803 static device_method_t gen_methods[] = {
1804 	/* Device interface */
1805 	DEVMETHOD(device_probe,		gen_probe),
1806 	DEVMETHOD(device_attach,	gen_attach),
1807 
1808 	/* MII interface */
1809 	DEVMETHOD(miibus_readreg,	gen_miibus_readreg),
1810 	DEVMETHOD(miibus_writereg,	gen_miibus_writereg),
1811 	DEVMETHOD(miibus_statchg,	gen_miibus_statchg),
1812 
1813 	DEVMETHOD_END
1814 };
1815 
1816 static driver_t gen_driver = {
1817 	"genet",
1818 	gen_methods,
1819 	sizeof(struct gen_softc),
1820 };
1821 
1822 DRIVER_MODULE(genet, simplebus, gen_driver, 0, 0);
1823 DRIVER_MODULE(miibus, genet, miibus_driver, 0, 0);
1824 MODULE_DEPEND(genet, ether, 1, 1, 1);
1825 MODULE_DEPEND(genet, miibus, 1, 1, 1);
1826