xref: /freebsd/sys/arm64/broadcom/genet/if_genet.c (revision 13604fb0fd43c85e6bb3a0ad6400a684f150bdea)
1 /*-
2  * Copyright (c) 2020 Michael J Karels
3  * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 /*
30  * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller
31  *
32  * This driver is derived in large part from bcmgenet.c from NetBSD by
33  * Jared McNeill.  Parts of the structure and other common code in
34  * this driver have been copied from if_awg.c for the Allwinner EMAC,
35  * also by Jared McNeill.
36  */
37 
38 #include "opt_device_polling.h"
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/kernel.h>
48 #include <sys/endian.h>
49 #include <sys/mbuf.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/module.h>
53 #include <sys/taskqueue.h>
54 #include <sys/gpio.h>
55 
56 #include <net/bpf.h>
57 #include <net/if.h>
58 #include <net/ethernet.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62 #include <net/if_var.h>
63 
64 #include <machine/bus.h>
65 
66 #include <dev/ofw/ofw_bus.h>
67 #include <dev/ofw/ofw_bus_subr.h>
68 
69 #define __BIT(_x)	(1 << (_x))
70 #include "if_genetreg.h"
71 
72 #include <dev/mii/mii.h>
73 #include <dev/mii/miivar.h>
74 #include <dev/mii/mii_fdt.h>
75 
76 #include <netinet/in.h>
77 #include <netinet/ip.h>
78 #include <netinet/ip6.h>
79 
80 #include "syscon_if.h"
81 #include "miibus_if.h"
82 #include "gpio_if.h"
83 
84 #define	RD4(sc, reg)		bus_read_4((sc)->res[_RES_MAC], (reg))
85 #define	WR4(sc, reg, val)	bus_write_4((sc)->res[_RES_MAC], (reg), (val))
86 
87 #define	GEN_LOCK(sc)		mtx_lock(&(sc)->mtx)
88 #define	GEN_UNLOCK(sc)		mtx_unlock(&(sc)->mtx)
89 #define	GEN_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->mtx, MA_OWNED)
90 #define	GEN_ASSERT_UNLOCKED(sc)	mtx_assert(&(sc)->mtx, MA_NOTOWNED)
91 
92 #define	TX_DESC_COUNT		GENET_DMA_DESC_COUNT
93 #define	RX_DESC_COUNT		GENET_DMA_DESC_COUNT
94 
95 #define	TX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
96 #define	RX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
97 
98 #define	TX_MAX_SEGS		20
99 
100 /* Maximum number of mbufs to send to if_input */
101 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */;
102 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch);
103 
104 static struct ofw_compat_data compat_data[] = {
105 	{ "brcm,genet-v1",		1 },
106 	{ "brcm,genet-v2",		2 },
107 	{ "brcm,genet-v3",		3 },
108 	{ "brcm,genet-v4",		4 },
109 	{ "brcm,genet-v5",		5 },
110 	{ "brcm,bcm2711-genet-v5",	5 },
111 	{ NULL,				0 }
112 };
113 
114 enum {
115 	_RES_MAC,		/* what to call this? */
116 	_RES_IRQ1,
117 	_RES_IRQ2,
118 	_RES_NITEMS
119 };
120 
121 static struct resource_spec gen_spec[] = {
122 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
123 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
124 	{ SYS_RES_IRQ,		1,	RF_ACTIVE },
125 	{ -1, 0 }
126 };
127 
128 /* structure per ring entry */
129 struct gen_ring_ent {
130 	bus_dmamap_t		map;
131 	struct mbuf		*mbuf;
132 };
133 
134 struct tx_queue {
135 	int			hwindex;		/* hardware index */
136 	int			nentries;
137 	u_int			queued;			/* or avail? */
138 	u_int			cur;
139 	u_int			next;
140 	u_int			prod_idx;
141 	u_int			cons_idx;
142 	struct gen_ring_ent	*entries;
143 };
144 
145 struct rx_queue {
146 	int			hwindex;		/* hardware index */
147 	int			nentries;
148 	u_int			cur;
149 	u_int			prod_idx;
150 	u_int			cons_idx;
151 	struct gen_ring_ent	*entries;
152 };
153 
154 struct gen_softc {
155 	struct resource		*res[_RES_NITEMS];
156 	struct mtx		mtx;
157 	if_t			ifp;
158 	device_t		dev;
159 	device_t		miibus;
160 	mii_contype_t		phy_mode;
161 
162 	struct callout		stat_ch;
163 	struct task		link_task;
164 	void			*ih;
165 	void			*ih2;
166 	int			type;
167 	int			if_flags;
168 	int			link;
169 	bus_dma_tag_t		tx_buf_tag;
170 	/*
171 	 * The genet chip has multiple queues for transmit and receive.
172 	 * This driver uses only one (queue 16, the default), but is cast
173 	 * with multiple rings.  The additional rings are used for different
174 	 * priorities.
175 	 */
176 #define DEF_TXQUEUE	0
177 #define NTXQUEUE	1
178 	struct tx_queue		tx_queue[NTXQUEUE];
179 	struct gen_ring_ent	tx_ring_ent[TX_DESC_COUNT];  /* ring entries */
180 
181 	bus_dma_tag_t		rx_buf_tag;
182 #define DEF_RXQUEUE	0
183 #define NRXQUEUE	1
184 	struct rx_queue		rx_queue[NRXQUEUE];
185 	struct gen_ring_ent	rx_ring_ent[RX_DESC_COUNT];  /* ring entries */
186 };
187 
188 static void gen_init(void *softc);
189 static void gen_start(if_t ifp);
190 static void gen_destroy(struct gen_softc *sc);
191 static int gen_encap(struct gen_softc *sc, struct mbuf **mp);
192 static int gen_parse_tx(struct mbuf *m, int csum_flags);
193 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data);
194 static int gen_get_phy_mode(device_t dev);
195 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr);
196 static void gen_set_enaddr(struct gen_softc *sc);
197 static void gen_setup_rxfilter(struct gen_softc *sc);
198 static void gen_reset(struct gen_softc *sc);
199 static void gen_enable(struct gen_softc *sc);
200 static void gen_dma_disable(device_t dev);
201 static int gen_bus_dma_init(struct gen_softc *sc);
202 static void gen_bus_dma_teardown(struct gen_softc *sc);
203 static void gen_enable_intr(struct gen_softc *sc);
204 static void gen_init_txrings(struct gen_softc *sc);
205 static void gen_init_rxrings(struct gen_softc *sc);
206 static void gen_intr(void *softc);
207 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q);
208 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
209 static void gen_intr2(void *softc);
210 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index);
211 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
212     struct mbuf *m);
213 static void gen_link_task(void *arg, int pending);
214 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr);
215 static int gen_media_change(if_t ifp);
216 static void gen_tick(void *softc);
217 
218 static int
219 gen_probe(device_t dev)
220 {
221 	if (!ofw_bus_status_okay(dev))
222 		return (ENXIO);
223 
224 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
225 		return (ENXIO);
226 
227 	device_set_desc(dev, "RPi4 Gigabit Ethernet");
228 	return (BUS_PROBE_DEFAULT);
229 }
230 
231 static int
232 gen_attach(device_t dev)
233 {
234 	struct ether_addr eaddr;
235 	struct gen_softc *sc;
236 	int major, minor, error, mii_flags;
237 	bool eaddr_found;
238 
239 	sc = device_get_softc(dev);
240 	sc->dev = dev;
241 	sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
242 
243 	if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) {
244 		device_printf(dev, "cannot allocate resources for device\n");
245 		error = ENXIO;
246 		goto fail;
247 	}
248 
249 	major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT;
250 	if (major != REV_MAJOR_V5) {
251 		device_printf(dev, "version %d is not supported\n", major);
252 		error = ENXIO;
253 		goto fail;
254 	}
255 	minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT;
256 	device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor,
257 		RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY);
258 
259 	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
260 	callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
261 	TASK_INIT(&sc->link_task, 0, gen_link_task, sc);
262 
263 	error = gen_get_phy_mode(dev);
264 	if (error != 0)
265 		goto fail;
266 
267 	bzero(&eaddr, sizeof(eaddr));
268 	eaddr_found = gen_get_eaddr(dev, &eaddr);
269 
270 	/* reset core */
271 	gen_reset(sc);
272 
273 	gen_dma_disable(dev);
274 
275 	/* Setup DMA */
276 	error = gen_bus_dma_init(sc);
277 	if (error != 0) {
278 		device_printf(dev, "cannot setup bus dma\n");
279 		goto fail;
280 	}
281 
282 	/* Setup ethernet interface */
283 	sc->ifp = if_alloc(IFT_ETHER);
284 	if_setsoftc(sc->ifp, sc);
285 	if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
286 	if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
287 	if_setstartfn(sc->ifp, gen_start);
288 	if_setioctlfn(sc->ifp, gen_ioctl);
289 	if_setinitfn(sc->ifp, gen_init);
290 	if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
291 	if_setsendqready(sc->ifp);
292 #define GEN_CSUM_FEATURES	(CSUM_UDP | CSUM_TCP)
293 	if_sethwassist(sc->ifp, GEN_CSUM_FEATURES);
294 	if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
295 	    IFCAP_HWCSUM_IPV6);
296 	if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
297 
298 	/* Install interrupt handlers */
299 	error = bus_setup_intr(dev, sc->res[_RES_IRQ1],
300 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih);
301 	if (error != 0) {
302 		device_printf(dev, "cannot setup interrupt handler1\n");
303 		goto fail;
304 	}
305 
306 	error = bus_setup_intr(dev, sc->res[_RES_IRQ2],
307 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2);
308 	if (error != 0) {
309 		device_printf(dev, "cannot setup interrupt handler2\n");
310 		goto fail;
311 	}
312 
313 	/* Attach MII driver */
314 	mii_flags = 0;
315 	switch (sc->phy_mode)
316 	{
317 	case MII_CONTYPE_RGMII_ID:
318 		mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY;
319 		break;
320 	case MII_CONTYPE_RGMII_RXID:
321 		mii_flags |= MIIF_RX_DELAY;
322 		break;
323 	case MII_CONTYPE_RGMII_TXID:
324 		mii_flags |= MIIF_TX_DELAY;
325 		break;
326 	default:
327 		break;
328 	}
329 	error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change,
330 	    gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
331 	    mii_flags);
332 	if (error != 0) {
333 		device_printf(dev, "cannot attach PHY\n");
334 		goto fail;
335 	}
336 
337 	/* If address was not found, create one based on the hostid and name. */
338 	if (eaddr_found == 0)
339 		ether_gen_addr(sc->ifp, &eaddr);
340 	/* Attach ethernet interface */
341 	ether_ifattach(sc->ifp, eaddr.octet);
342 
343 fail:
344 	if (error)
345 		gen_destroy(sc);
346 	return (error);
347 }
348 
349 /* Free resources after failed attach.  This is not a complete detach. */
350 static void
351 gen_destroy(struct gen_softc *sc)
352 {
353 
354 	if (sc->miibus) {	/* can't happen */
355 		device_delete_child(sc->dev, sc->miibus);
356 		sc->miibus = NULL;
357 	}
358 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
359 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
360 	gen_bus_dma_teardown(sc);
361 	callout_drain(&sc->stat_ch);
362 	if (mtx_initialized(&sc->mtx))
363 		mtx_destroy(&sc->mtx);
364 	bus_release_resources(sc->dev, gen_spec, sc->res);
365 	if (sc->ifp != NULL) {
366 		if_free(sc->ifp);
367 		sc->ifp = NULL;
368 	}
369 }
370 
371 static int
372 gen_get_phy_mode(device_t dev)
373 {
374 	struct gen_softc *sc;
375 	phandle_t node;
376 	mii_contype_t type;
377 	int error = 0;
378 
379 	sc = device_get_softc(dev);
380 	node = ofw_bus_get_node(dev);
381 	type = mii_fdt_get_contype(node);
382 
383 	switch (type) {
384 	case MII_CONTYPE_RGMII:
385 	case MII_CONTYPE_RGMII_ID:
386 	case MII_CONTYPE_RGMII_RXID:
387 	case MII_CONTYPE_RGMII_TXID:
388 		sc->phy_mode = type;
389 		break;
390 	default:
391 		device_printf(dev, "unknown phy-mode '%s'\n",
392 		    mii_fdt_contype_to_name(type));
393 		error = ENXIO;
394 		break;
395 	}
396 
397 	return (error);
398 }
399 
400 static bool
401 gen_get_eaddr(device_t dev, struct ether_addr *eaddr)
402 {
403 	struct gen_softc *sc;
404 	uint32_t maclo, machi, val;
405 	phandle_t node;
406 
407 	sc = device_get_softc(dev);
408 
409 	node = ofw_bus_get_node(dev);
410 	if (OF_getprop(node, "mac-address", eaddr->octet,
411 	    ETHER_ADDR_LEN) != -1 ||
412 	    OF_getprop(node, "local-mac-address", eaddr->octet,
413 	    ETHER_ADDR_LEN) != -1 ||
414 	    OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1)
415 		return (true);
416 
417 	device_printf(dev, "No Ethernet address found in fdt!\n");
418 	maclo = machi = 0;
419 
420 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
421 	if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
422 		maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
423 		machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
424 	}
425 
426 	if (maclo == 0 && machi == 0) {
427 		if (bootverbose)
428 			device_printf(dev,
429 			    "No Ethernet address found in controller\n");
430 		return (false);
431 	} else {
432 		eaddr->octet[0] = maclo & 0xff;
433 		eaddr->octet[1] = (maclo >> 8) & 0xff;
434 		eaddr->octet[2] = (maclo >> 16) & 0xff;
435 		eaddr->octet[3] = (maclo >> 24) & 0xff;
436 		eaddr->octet[4] = machi & 0xff;
437 		eaddr->octet[5] = (machi >> 8) & 0xff;
438 		return (true);
439 	}
440 }
441 
442 static void
443 gen_reset(struct gen_softc *sc)
444 {
445 	uint32_t val;
446 
447 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
448 	val |= GENET_SYS_RBUF_FLUSH_RESET;
449 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
450 	DELAY(10);
451 
452 	val &= ~GENET_SYS_RBUF_FLUSH_RESET;
453 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
454 	DELAY(10);
455 
456 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
457 	DELAY(10);
458 
459 	WR4(sc, GENET_UMAC_CMD, 0);
460 	WR4(sc, GENET_UMAC_CMD,
461 	    GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
462 	DELAY(10);
463 	WR4(sc, GENET_UMAC_CMD, 0);
464 
465 	WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
466 	    GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
467 	WR4(sc, GENET_UMAC_MIB_CTRL, 0);
468 
469 	WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
470 
471 	val = RD4(sc, GENET_RBUF_CTRL);
472 	val |= GENET_RBUF_ALIGN_2B;
473 	WR4(sc, GENET_RBUF_CTRL, val);
474 
475 	WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
476 }
477 
478 static void
479 gen_enable(struct gen_softc *sc)
480 {
481 	u_int val;
482 
483 	/* Enable transmitter and receiver */
484 	val = RD4(sc, GENET_UMAC_CMD);
485 	val |= GENET_UMAC_CMD_TXEN;
486 	val |= GENET_UMAC_CMD_RXEN;
487 	WR4(sc, GENET_UMAC_CMD, val);
488 
489 	/* Enable interrupts */
490 	gen_enable_intr(sc);
491 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
492 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
493 }
494 
495 static void
496 gen_enable_offload(struct gen_softc *sc)
497 {
498 	uint32_t check_ctrl, buf_ctrl;
499 
500 	check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL);
501 	buf_ctrl  = RD4(sc, GENET_RBUF_CTRL);
502 	if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) {
503 		check_ctrl |= GENET_RBUF_CHECK_CTRL_EN;
504 		buf_ctrl |= GENET_RBUF_64B_EN;
505 	} else {
506 		check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN;
507 		buf_ctrl &= ~GENET_RBUF_64B_EN;
508 	}
509 	WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl);
510 	WR4(sc, GENET_RBUF_CTRL, buf_ctrl);
511 
512 	buf_ctrl  = RD4(sc, GENET_TBUF_CTRL);
513 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
514 	    0)
515 		buf_ctrl |= GENET_RBUF_64B_EN;
516 	else
517 		buf_ctrl &= ~GENET_RBUF_64B_EN;
518 	WR4(sc, GENET_TBUF_CTRL, buf_ctrl);
519 }
520 
521 static void
522 gen_dma_disable(device_t dev)
523 {
524 	struct gen_softc *sc = device_get_softc(dev);
525 	int val;
526 
527 	val = RD4(sc, GENET_TX_DMA_CTRL);
528 	val &= ~GENET_TX_DMA_CTRL_EN;
529 	val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
530 	WR4(sc, GENET_TX_DMA_CTRL, val);
531 
532 	val = RD4(sc, GENET_RX_DMA_CTRL);
533 	val &= ~GENET_RX_DMA_CTRL_EN;
534 	val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
535 	WR4(sc, GENET_RX_DMA_CTRL, val);
536 }
537 
538 static int
539 gen_bus_dma_init(struct gen_softc *sc)
540 {
541 	device_t dev = sc->dev;
542 	int i, error;
543 
544 	error = bus_dma_tag_create(
545 	    bus_get_dma_tag(dev),	/* Parent tag */
546 	    4, 0,			/* alignment, boundary */
547 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
548 	    BUS_SPACE_MAXADDR,		/* highaddr */
549 	    NULL, NULL,			/* filter, filterarg */
550 	    MCLBYTES, TX_MAX_SEGS,	/* maxsize, nsegs */
551 	    MCLBYTES,			/* maxsegsize */
552 	    0,				/* flags */
553 	    NULL, NULL,			/* lockfunc, lockarg */
554 	    &sc->tx_buf_tag);
555 	if (error != 0) {
556 		device_printf(dev, "cannot create TX buffer tag\n");
557 		return (error);
558 	}
559 
560 	for (i = 0; i < TX_DESC_COUNT; i++) {
561 		error = bus_dmamap_create(sc->tx_buf_tag, 0,
562 		    &sc->tx_ring_ent[i].map);
563 		if (error != 0) {
564 			device_printf(dev, "cannot create TX buffer map\n");
565 			return (error);
566 		}
567 	}
568 
569 	error = bus_dma_tag_create(
570 	    bus_get_dma_tag(dev),	/* Parent tag */
571 	    4, 0,			/* alignment, boundary */
572 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
573 	    BUS_SPACE_MAXADDR,		/* highaddr */
574 	    NULL, NULL,			/* filter, filterarg */
575 	    MCLBYTES, 1,		/* maxsize, nsegs */
576 	    MCLBYTES,			/* maxsegsize */
577 	    0,				/* flags */
578 	    NULL, NULL,			/* lockfunc, lockarg */
579 	    &sc->rx_buf_tag);
580 	if (error != 0) {
581 		device_printf(dev, "cannot create RX buffer tag\n");
582 		return (error);
583 	}
584 
585 	for (i = 0; i < RX_DESC_COUNT; i++) {
586 		error = bus_dmamap_create(sc->rx_buf_tag, 0,
587 		    &sc->rx_ring_ent[i].map);
588 		if (error != 0) {
589 			device_printf(dev, "cannot create RX buffer map\n");
590 			return (error);
591 		}
592 	}
593 	return (0);
594 }
595 
596 static void
597 gen_bus_dma_teardown(struct gen_softc *sc)
598 {
599 	int i, error;
600 
601 	if (sc->tx_buf_tag != NULL) {
602 		for (i = 0; i < TX_DESC_COUNT; i++) {
603 			error = bus_dmamap_destroy(sc->tx_buf_tag,
604 			    sc->tx_ring_ent[i].map);
605 			sc->tx_ring_ent[i].map = NULL;
606 			if (error)
607 				device_printf(sc->dev,
608 				    "%s: bus_dmamap_destroy failed: %d\n",
609 				    __func__, error);
610 		}
611 		error = bus_dma_tag_destroy(sc->tx_buf_tag);
612 		sc->tx_buf_tag = NULL;
613 		if (error)
614 			device_printf(sc->dev,
615 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
616 			    error);
617 	}
618 
619 	if (sc->tx_buf_tag != NULL) {
620 		for (i = 0; i < RX_DESC_COUNT; i++) {
621 			error = bus_dmamap_destroy(sc->rx_buf_tag,
622 			    sc->rx_ring_ent[i].map);
623 			sc->rx_ring_ent[i].map = NULL;
624 			if (error)
625 				device_printf(sc->dev,
626 				    "%s: bus_dmamap_destroy failed: %d\n",
627 				    __func__, error);
628 		}
629 		error = bus_dma_tag_destroy(sc->rx_buf_tag);
630 		sc->rx_buf_tag = NULL;
631 		if (error)
632 			device_printf(sc->dev,
633 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
634 			    error);
635 	}
636 }
637 
638 static void
639 gen_enable_intr(struct gen_softc *sc)
640 {
641 
642 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
643 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
644 }
645 
646 /*
647  * "queue" is the software queue index (0-4); "qid" is the hardware index
648  * (0-16).  "base" is the starting index in the ring array.
649  */
650 static void
651 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
652     int nentries)
653 {
654 	struct tx_queue *q;
655 	uint32_t val;
656 
657 	q = &sc->tx_queue[queue];
658 	q->entries = &sc->tx_ring_ent[base];
659 	q->hwindex = qid;
660 	q->nentries = nentries;
661 
662 	/* TX ring */
663 
664 	q->queued = 0;
665 	q->cons_idx = q->prod_idx = 0;
666 
667 	WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
668 
669 	WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
670 	WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
671 	WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
672 	WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
673 	WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
674 	    (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
675 	    (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
676 	WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
677 	WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
678 	WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
679 	    TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
680 	WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
681 	WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
682 	WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
683 	WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
684 	WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
685 
686 	WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid));	/* enable */
687 
688 	/* Enable transmit DMA */
689 	val = RD4(sc, GENET_TX_DMA_CTRL);
690 	val |= GENET_TX_DMA_CTRL_EN;
691 	val |= GENET_TX_DMA_CTRL_RBUF_EN(qid);
692 	WR4(sc, GENET_TX_DMA_CTRL, val);
693 }
694 
695 /*
696  * "queue" is the software queue index (0-4); "qid" is the hardware index
697  * (0-16).  "base" is the starting index in the ring array.
698  */
699 static void
700 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
701     int nentries)
702 {
703 	struct rx_queue *q;
704 	uint32_t val;
705 	int i;
706 
707 	q = &sc->rx_queue[queue];
708 	q->entries = &sc->rx_ring_ent[base];
709 	q->hwindex = qid;
710 	q->nentries = nentries;
711 	q->cons_idx = q->prod_idx = 0;
712 
713 	WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
714 
715 	WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
716 	WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
717 	WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
718 	WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
719 	WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
720 	    (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
721 	    (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
722 	WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
723 	WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
724 	WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
725 	    RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
726 	WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
727 	WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
728 	    (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4));
729 	WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
730 	WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
731 
732 	WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid));	/* enable */
733 
734 	/* fill ring */
735 	for (i = 0; i < RX_DESC_COUNT; i++)
736 		gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i);
737 
738 	/* Enable receive DMA */
739 	val = RD4(sc, GENET_RX_DMA_CTRL);
740 	val |= GENET_RX_DMA_CTRL_EN;
741 	val |= GENET_RX_DMA_CTRL_RBUF_EN(qid);
742 	WR4(sc, GENET_RX_DMA_CTRL, val);
743 }
744 
745 static void
746 gen_init_txrings(struct gen_softc *sc)
747 {
748 	int base = 0;
749 #ifdef PRI_RINGS
750 	int i;
751 
752 	/* init priority rings */
753 	for (i = 0; i < PRI_RINGS; i++) {
754 		gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT);
755 		sc->tx_queue[i].queue = i;
756 		base += TX_DESC_PRICOUNT;
757 		dma_ring_conf |= 1 << i;
758 		dma_control |= DMA_RENABLE(i);
759 	}
760 #endif
761 
762 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
763 	gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
764 	    TX_DESC_COUNT);
765 	sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
766 }
767 
768 static void
769 gen_init_rxrings(struct gen_softc *sc)
770 {
771 	int base = 0;
772 #ifdef PRI_RINGS
773 	int i;
774 
775 	/* init priority rings */
776 	for (i = 0; i < PRI_RINGS; i++) {
777 		gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT);
778 		sc->rx_queue[i].queue = i;
779 		base += TX_DESC_PRICOUNT;
780 		dma_ring_conf |= 1 << i;
781 		dma_control |= DMA_RENABLE(i);
782 	}
783 #endif
784 
785 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
786 	gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
787 	    RX_DESC_COUNT);
788 	sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
789 
790 }
791 
792 static void
793 gen_init_locked(struct gen_softc *sc)
794 {
795 	struct mii_data *mii;
796 	if_t ifp;
797 
798 	mii = device_get_softc(sc->miibus);
799 	ifp = sc->ifp;
800 
801 	GEN_ASSERT_LOCKED(sc);
802 
803 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
804 		return;
805 
806 	switch (sc->phy_mode)
807 	{
808 	case MII_CONTYPE_RGMII:
809 	case MII_CONTYPE_RGMII_ID:
810 	case MII_CONTYPE_RGMII_RXID:
811 	case MII_CONTYPE_RGMII_TXID:
812 		WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY);
813 		break;
814 	default:
815 		WR4(sc, GENET_SYS_PORT_CTRL, 0);
816 	}
817 
818 	gen_set_enaddr(sc);
819 
820 	/* Setup RX filter */
821 	gen_setup_rxfilter(sc);
822 
823 	gen_init_txrings(sc);
824 	gen_init_rxrings(sc);
825 	gen_enable(sc);
826 	gen_enable_offload(sc);
827 
828 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
829 
830 	mii_mediachg(mii);
831 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
832 }
833 
834 static void
835 gen_init(void *softc)
836 {
837         struct gen_softc *sc;
838 
839         sc = softc;
840 	GEN_LOCK(sc);
841 	gen_init_locked(sc);
842 	GEN_UNLOCK(sc);
843 }
844 
845 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
846 
847 static void
848 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea)
849 {
850 	uint32_t addr0 = (ea[0] << 8) | ea[1];
851 	uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
852 
853 	WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
854 	WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
855 }
856 
857 static u_int
858 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count)
859 {
860 	struct gen_softc *sc = arg;
861 
862 	/* "count + 2" to account for unicast and broadcast */
863 	gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl));
864 	return (1);		/* increment to count */
865 }
866 
867 static void
868 gen_setup_rxfilter(struct gen_softc *sc)
869 {
870 	struct ifnet *ifp = sc->ifp;
871 	uint32_t cmd, mdf_ctrl;
872 	u_int n;
873 
874 	GEN_ASSERT_LOCKED(sc);
875 
876 	cmd = RD4(sc, GENET_UMAC_CMD);
877 
878 	/*
879 	 * Count the required number of hardware filters. We need one
880 	 * for each multicast address, plus one for our own address and
881 	 * the broadcast address.
882 	 */
883 	n = if_llmaddr_count(ifp) + 2;
884 
885 	if (n > GENET_MAX_MDF_FILTER)
886 		ifp->if_flags |= IFF_ALLMULTI;
887 	else
888 		ifp->if_flags &= ~IFF_ALLMULTI;
889 
890 	if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
891 		cmd |= GENET_UMAC_CMD_PROMISC;
892 		mdf_ctrl = 0;
893 	} else {
894 		cmd &= ~GENET_UMAC_CMD_PROMISC;
895 		gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr);
896 		gen_setup_rxfilter_mdf(sc, 1, IF_LLADDR(ifp));
897 		(void) if_foreach_llmaddr(ifp, gen_setup_multi, sc);
898 		mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1)  &~
899 		    (__BIT(GENET_MAX_MDF_FILTER - n) - 1);
900 	}
901 
902 	WR4(sc, GENET_UMAC_CMD, cmd);
903 	WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
904 }
905 
906 static void
907 gen_set_enaddr(struct gen_softc *sc)
908 {
909 	uint8_t *enaddr;
910 	uint32_t val;
911 	if_t ifp;
912 
913 	GEN_ASSERT_LOCKED(sc);
914 
915 	ifp = sc->ifp;
916 
917 	/* Write our unicast address */
918 	enaddr = IF_LLADDR(ifp);
919 	/* Write hardware address */
920 	val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
921 	    (enaddr[0] << 24);
922 	WR4(sc, GENET_UMAC_MAC0, val);
923 	val = enaddr[5] | (enaddr[4] << 8);
924 	WR4(sc, GENET_UMAC_MAC1, val);
925 }
926 
927 static void
928 gen_start_locked(struct gen_softc *sc)
929 {
930 	struct mbuf *m;
931 	if_t ifp;
932 	int cnt, err;
933 
934 	GEN_ASSERT_LOCKED(sc);
935 
936 	if (!sc->link)
937 		return;
938 
939 	ifp = sc->ifp;
940 
941 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
942 	    IFF_DRV_RUNNING)
943 		return;
944 
945 	for (cnt = 0; ; cnt++) {
946 		m = if_dequeue(ifp);
947 		if (m == NULL)
948 			break;
949 
950 		err = gen_encap(sc, &m);
951 		if (err != 0) {
952 			if (err == ENOBUFS)
953 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
954 			else if (m == NULL)
955 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
956 			if (m != NULL)
957 				if_sendq_prepend(ifp, m);
958 			break;
959 		}
960 		if_bpfmtap(ifp, m);
961 	}
962 }
963 
964 static void
965 gen_start(if_t ifp)
966 {
967 	struct gen_softc *sc;
968 
969 	sc = if_getsoftc(ifp);
970 
971 	GEN_LOCK(sc);
972 	gen_start_locked(sc);
973 	GEN_UNLOCK(sc);
974 }
975 
976 /* Test for any delayed checksum */
977 #define CSUM_DELAY_ANY	(CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)
978 
979 static int
980 gen_encap(struct gen_softc *sc, struct mbuf **mp)
981 {
982 	bus_dmamap_t map;
983 	bus_dma_segment_t segs[TX_MAX_SEGS];
984 	int error, nsegs, cur, first, i, index, offset;
985 	uint32_t csuminfo, length_status, csum_flags = 0, csumdata;
986 	struct mbuf *m;
987 	struct statusblock *sb = NULL;
988 	struct tx_queue *q;
989 	struct gen_ring_ent *ent;
990 
991 	GEN_ASSERT_LOCKED(sc);
992 
993 	q = &sc->tx_queue[DEF_TXQUEUE];
994 
995 	m = *mp;
996 
997 	/*
998 	 * Reflected ICMPv6 packets, e.g. echo replies, tend to get laid
999 	 * out with only the Ethernet header in the first mbuf, and this
1000 	 * doesn't seem to work.  Forwarded TCP packets over IPv6 also
1001 	 * fail if laid out with only the Ethernet header in the first mbuf.
1002 	 * For now, pull up any IPv6 packet with that layout.  Maybe IPv4
1003 	 * needs it but we haven't run into it.  Pulling up the sizes of
1004 	 * ether_header + ip6_header + icmp6_hdr seems to work for both
1005 	 * ICMPv6 and TCP over IPv6.
1006 	 */
1007 #define IP6_PULLUP_LEN (sizeof(struct ether_header) + \
1008 		        sizeof(struct ip6_hdr) + 8)
1009 	if (m->m_len == sizeof(struct ether_header)) {
1010 		int ether_type = mtod(m, struct ether_header *)->ether_type;
1011 		if (ntohs(ether_type) == ETHERTYPE_IPV6) {
1012 			m = m_pullup(m, MIN(m->m_pkthdr.len, IP6_PULLUP_LEN));
1013 			if (m == NULL) {
1014 				if (sc->ifp->if_flags & IFF_DEBUG)
1015 					device_printf(sc->dev,
1016 					    "IPV6 pullup fail\n");
1017 				*mp = NULL;
1018 				return (ENOMEM);
1019 			}
1020 		}
1021 	}
1022 #undef IP6_PULLUP_LEN
1023 
1024 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
1025 	    0) {
1026 		csum_flags = m->m_pkthdr.csum_flags;
1027 		csumdata = m->m_pkthdr.csum_data;
1028 		M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT);
1029 		if (m == NULL) {
1030 			if (sc->ifp->if_flags & IFF_DEBUG)
1031 				device_printf(sc->dev, "prepend fail\n");
1032 			*mp = NULL;
1033 			return (ENOMEM);
1034 		}
1035 		offset = gen_parse_tx(m, csum_flags);
1036 		sb = mtod(m, struct statusblock *);
1037 		if ((csum_flags & CSUM_DELAY_ANY) != 0) {
1038 			csuminfo = (offset << TXCSUM_OFF_SHIFT) |
1039 			    (offset + csumdata);
1040 			csuminfo |= TXCSUM_LEN_VALID;
1041 			if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP))
1042 				csuminfo |= TXCSUM_UDP;
1043 			sb->txcsuminfo = csuminfo;
1044 		} else
1045 			sb->txcsuminfo = 0;
1046 	}
1047 
1048 	*mp = m;
1049 
1050 	cur = first = q->cur;
1051 	ent = &q->entries[cur];
1052 	map = ent->map;
1053 	error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs,
1054 	    &nsegs, BUS_DMA_NOWAIT);
1055 	if (error == EFBIG) {
1056 		m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
1057 		if (m == NULL) {
1058 			device_printf(sc->dev,
1059 			    "gen_encap: m_collapse failed\n");
1060 			m_freem(*mp);
1061 			*mp = NULL;
1062 			return (ENOMEM);
1063 		}
1064 		*mp = m;
1065 		error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m,
1066 		    segs, &nsegs, BUS_DMA_NOWAIT);
1067 		if (error != 0) {
1068 			m_freem(*mp);
1069 			*mp = NULL;
1070 		}
1071 	}
1072 	if (error != 0) {
1073 		device_printf(sc->dev,
1074 		    "gen_encap: bus_dmamap_load_mbuf_sg failed\n");
1075 		return (error);
1076 	}
1077 	if (nsegs == 0) {
1078 		m_freem(*mp);
1079 		*mp = NULL;
1080 		return (EIO);
1081 	}
1082 
1083 	/* Remove statusblock after mapping, before possible requeue or bpf. */
1084 	if (sb != NULL) {
1085 		m->m_data += sizeof(struct statusblock);
1086 		m->m_len -= sizeof(struct statusblock);
1087 		m->m_pkthdr.len -= sizeof(struct statusblock);
1088 	}
1089 	if (q->queued + nsegs > q->nentries) {
1090 		bus_dmamap_unload(sc->tx_buf_tag, map);
1091 		return (ENOBUFS);
1092 	}
1093 
1094 	bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE);
1095 
1096 	index = q->prod_idx & (q->nentries - 1);
1097 	for (i = 0; i < nsegs; i++) {
1098 		ent = &q->entries[cur];
1099 		length_status = GENET_TX_DESC_STATUS_QTAG_MASK;
1100 		if (i == 0) {
1101 			length_status |= GENET_TX_DESC_STATUS_SOP |
1102 			    GENET_TX_DESC_STATUS_CRC;
1103 			if ((csum_flags & CSUM_DELAY_ANY) != 0)
1104 				length_status |= GENET_TX_DESC_STATUS_CKSUM;
1105 		}
1106 		if (i == nsegs - 1)
1107 			length_status |= GENET_TX_DESC_STATUS_EOP;
1108 
1109 		length_status |= segs[i].ds_len <<
1110 		    GENET_TX_DESC_STATUS_BUFLEN_SHIFT;
1111 
1112 		WR4(sc, GENET_TX_DESC_ADDRESS_LO(index),
1113 		    (uint32_t)segs[i].ds_addr);
1114 		WR4(sc, GENET_TX_DESC_ADDRESS_HI(index),
1115 		    (uint32_t)(segs[i].ds_addr >> 32));
1116 		WR4(sc, GENET_TX_DESC_STATUS(index), length_status);
1117 
1118 		++q->queued;
1119 		cur = TX_NEXT(cur, q->nentries);
1120 		index = TX_NEXT(index, q->nentries);
1121 	}
1122 
1123 	q->prod_idx += nsegs;
1124 	q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK;
1125 	/* We probably don't need to write the producer index on every iter */
1126 	if (nsegs != 0)
1127 		WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx);
1128 	q->cur = cur;
1129 
1130 	/* Store mbuf in the last segment */
1131 	q->entries[first].mbuf = m;
1132 
1133 	return (0);
1134 }
1135 
1136 /*
1137  * Parse a packet to find the offset of the transport header for checksum
1138  * offload.  Ensure that the link and network headers are contiguous with
1139  * the status block, or transmission fails.
1140  */
1141 static int
1142 gen_parse_tx(struct mbuf *m, int csum_flags)
1143 {
1144 	int offset, off_in_m;
1145 	bool copy = false, shift = false;
1146 	u_char *p, *copy_p = NULL;
1147 	struct mbuf *m0 = m;
1148 	uint16_t ether_type;
1149 
1150 	if (m->m_len == sizeof(struct statusblock)) {
1151 		/* M_PREPEND placed statusblock at end; move to beginning */
1152 		m->m_data = m->m_pktdat;
1153 		copy_p = mtodo(m, sizeof(struct statusblock));
1154 		m = m->m_next;
1155 		off_in_m = 0;
1156 		p = mtod(m, u_char *);
1157 		copy = true;
1158 	} else {
1159 		/*
1160 		 * If statusblock is not at beginning of mbuf (likely),
1161 		 * then remember to move mbuf contents down before copying
1162 		 * after them.
1163 		 */
1164 		if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat)
1165 			shift = true;
1166 		p = mtodo(m, sizeof(struct statusblock));
1167 		off_in_m = sizeof(struct statusblock);
1168 	}
1169 
1170 /*
1171  * If headers need to be copied contiguous to statusblock, do so.
1172  * If copying to the internal mbuf data area, and the status block
1173  * is not at the beginning of that area, shift the status block (which
1174  * is empty) and following data.
1175  */
1176 #define COPY(size) {							\
1177 	int hsize = size;						\
1178 	if (copy) {							\
1179 		if (shift) {						\
1180 			u_char *p0;					\
1181 			shift = false;					\
1182 			p0 = mtodo(m0, sizeof(struct statusblock));	\
1183 			m0->m_data = m0->m_pktdat;			\
1184 			bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\
1185 			    m0->m_len - sizeof(struct statusblock));	\
1186 			copy_p = mtodo(m0, sizeof(struct statusblock));	\
1187 		}							\
1188 		bcopy(p, copy_p, hsize);				\
1189 		m0->m_len += hsize;					\
1190 		m0->m_pkthdr.len += hsize;	/* unneeded */		\
1191 		m->m_len -= hsize;					\
1192 		m->m_data += hsize;					\
1193 	}								\
1194 	copy_p += hsize;						\
1195 }
1196 
1197 	KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) +
1198 	    sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__));
1199 
1200 	if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) {
1201 		offset = sizeof(struct ether_vlan_header);
1202 		ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto);
1203 		COPY(sizeof(struct ether_vlan_header));
1204 		if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) {
1205 			m = m->m_next;
1206 			off_in_m = 0;
1207 			p = mtod(m, u_char *);
1208 			copy = true;
1209 		} else {
1210 			off_in_m += sizeof(struct ether_vlan_header);
1211 			p += sizeof(struct ether_vlan_header);
1212 		}
1213 	} else {
1214 		offset = sizeof(struct ether_header);
1215 		ether_type = ntohs(((struct ether_header *)p)->ether_type);
1216 		COPY(sizeof(struct ether_header));
1217 		if (m->m_len == off_in_m + sizeof(struct ether_header)) {
1218 			m = m->m_next;
1219 			off_in_m = 0;
1220 			p = mtod(m, u_char *);
1221 			copy = true;
1222 		} else {
1223 			off_in_m += sizeof(struct ether_header);
1224 			p += sizeof(struct ether_header);
1225 		}
1226 	}
1227 	if (ether_type == ETHERTYPE_IP) {
1228 		COPY(((struct ip *)p)->ip_hl << 2);
1229 		offset += ((struct ip *)p)->ip_hl << 2;
1230 	} else if (ether_type == ETHERTYPE_IPV6) {
1231 		COPY(sizeof(struct ip6_hdr));
1232 		offset += sizeof(struct ip6_hdr);
1233 	} else {
1234 		/*
1235 		 * Unknown whether other cases require moving a header;
1236 		 * ARP works without.
1237 		 */
1238 	}
1239 	return (offset);
1240 #undef COPY
1241 }
1242 
1243 static void
1244 gen_intr(void *arg)
1245 {
1246 	struct gen_softc *sc = arg;
1247 	uint32_t val;
1248 
1249 	GEN_LOCK(sc);
1250 
1251 	val = RD4(sc, GENET_INTRL2_CPU_STAT);
1252 	val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
1253 	WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
1254 
1255 	if (val & GENET_IRQ_RXDMA_DONE)
1256 		gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]);
1257 
1258 	if (val & GENET_IRQ_TXDMA_DONE) {
1259 		gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
1260 		if (!if_sendq_empty(sc->ifp))
1261 			gen_start_locked(sc);
1262 	}
1263 
1264 	GEN_UNLOCK(sc);
1265 }
1266 
1267 static int
1268 gen_rxintr(struct gen_softc *sc, struct rx_queue *q)
1269 {
1270 	if_t ifp;
1271 	struct mbuf *m, *mh, *mt;
1272 	struct statusblock *sb = NULL;
1273 	int error, index, len, cnt, npkt, n;
1274 	uint32_t status, prod_idx, total;
1275 
1276 	ifp = sc->ifp;
1277 	mh = mt = NULL;
1278 	cnt = 0;
1279 	npkt = 0;
1280 
1281 	prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) &
1282 	    GENET_RX_DMA_PROD_CONS_MASK;
1283 	total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK;
1284 
1285 	index = q->cons_idx & (RX_DESC_COUNT - 1);
1286 	for (n = 0; n < total; n++) {
1287 		bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map,
1288 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1289 		bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map);
1290 
1291 		m = q->entries[index].mbuf;
1292 
1293 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1294 			sb = mtod(m, struct statusblock *);
1295 			status = sb->status_buflen;
1296 		} else
1297 			status = RD4(sc, GENET_RX_DESC_STATUS(index));
1298 
1299 		len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >>
1300 		    GENET_RX_DESC_STATUS_BUFLEN_SHIFT;
1301 
1302 		/* check for errors */
1303 		if ((status &
1304 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP |
1305 		    GENET_RX_DESC_STATUS_RX_ERROR)) !=
1306 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) {
1307 			if (ifp->if_flags & IFF_DEBUG)
1308 				device_printf(sc->dev,
1309 				    "error/frag %x csum %x\n", status,
1310 				    sb->rxcsum);
1311 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1312 			continue;
1313 		}
1314 
1315 		error = gen_newbuf_rx(sc, q, index);
1316 		if (error != 0) {
1317 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1318 			if (ifp->if_flags & IFF_DEBUG)
1319 				device_printf(sc->dev, "gen_newbuf_rx %d\n",
1320 				    error);
1321 			/* reuse previous mbuf */
1322 			(void) gen_mapbuf_rx(sc, q, index, m);
1323 			continue;
1324 		}
1325 
1326 		if (sb != NULL) {
1327 			if (status & GENET_RX_DESC_STATUS_CKSUM_OK) {
1328 				/* L4 checksum checked; not sure about L3. */
1329 				m->m_pkthdr.csum_flags = CSUM_DATA_VALID |
1330 				    CSUM_PSEUDO_HDR;
1331 				m->m_pkthdr.csum_data = 0xffff;
1332 			}
1333 			m->m_data += sizeof(struct statusblock);
1334 			m->m_len -= sizeof(struct statusblock);
1335 			len -= sizeof(struct statusblock);
1336 		}
1337 		if (len > ETHER_ALIGN) {
1338 			m_adj(m, ETHER_ALIGN);
1339 			len -= ETHER_ALIGN;
1340 		}
1341 
1342 		m->m_pkthdr.rcvif = ifp;
1343 		m->m_pkthdr.len = len;
1344 		m->m_len = len;
1345 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1346 
1347 		m->m_nextpkt = NULL;
1348 		if (mh == NULL)
1349 			mh = m;
1350 		else
1351 			mt->m_nextpkt = m;
1352 		mt = m;
1353 		++cnt;
1354 		++npkt;
1355 
1356 		index = RX_NEXT(index, q->nentries);
1357 
1358 		q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK;
1359 		WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx);
1360 
1361 		if (cnt == gen_rx_batch) {
1362 			GEN_UNLOCK(sc);
1363 			if_input(ifp, mh);
1364 			GEN_LOCK(sc);
1365 			mh = mt = NULL;
1366 			cnt = 0;
1367 		}
1368 	}
1369 
1370 	if (mh != NULL) {
1371 		GEN_UNLOCK(sc);
1372 		if_input(ifp, mh);
1373 		GEN_LOCK(sc);
1374 	}
1375 
1376 	return (npkt);
1377 }
1378 
1379 static void
1380 gen_txintr(struct gen_softc *sc, struct tx_queue *q)
1381 {
1382 	uint32_t cons_idx, total;
1383 	struct gen_ring_ent *ent;
1384 	if_t ifp;
1385 	int i, prog;
1386 
1387 	GEN_ASSERT_LOCKED(sc);
1388 
1389 	ifp = sc->ifp;
1390 
1391 	cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) &
1392 	    GENET_TX_DMA_PROD_CONS_MASK;
1393 	total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK;
1394 
1395 	prog = 0;
1396 	for (i = q->next; q->queued > 0 && total > 0;
1397 	    i = TX_NEXT(i, q->nentries), total--) {
1398 		/* XXX check for errors */
1399 
1400 		ent = &q->entries[i];
1401 		if (ent->mbuf != NULL) {
1402 			bus_dmamap_sync(sc->tx_buf_tag, ent->map,
1403 			    BUS_DMASYNC_POSTWRITE);
1404 			bus_dmamap_unload(sc->tx_buf_tag, ent->map);
1405 			m_freem(ent->mbuf);
1406 			ent->mbuf = NULL;
1407 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1408 		}
1409 
1410 		prog++;
1411 		--q->queued;
1412 	}
1413 
1414 	if (prog > 0) {
1415 		q->next = i;
1416 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1417 	}
1418 
1419 	q->cons_idx = cons_idx;
1420 }
1421 
1422 static void
1423 gen_intr2(void *arg)
1424 {
1425 	struct gen_softc *sc = arg;
1426 
1427 	device_printf(sc->dev, "gen_intr2\n");
1428 }
1429 
1430 static int
1431 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index)
1432 {
1433 	struct mbuf *m;
1434 
1435 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1436 	if (m == NULL)
1437 		return (ENOBUFS);
1438 
1439 	m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
1440 	m_adj(m, ETHER_ALIGN);
1441 
1442 	return (gen_mapbuf_rx(sc, q, index, m));
1443 }
1444 
1445 static int
1446 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
1447     struct mbuf *m)
1448 {
1449 	bus_dma_segment_t seg;
1450 	bus_dmamap_t map;
1451 	int nsegs;
1452 
1453 	map = q->entries[index].map;
1454 	if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs,
1455 	    BUS_DMA_NOWAIT) != 0) {
1456 		m_freem(m);
1457 		return (ENOBUFS);
1458 	}
1459 
1460 	bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD);
1461 
1462 	q->entries[index].mbuf = m;
1463 	WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr);
1464 	WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32));
1465 
1466 	return (0);
1467 }
1468 
1469 static int
1470 gen_ioctl(if_t ifp, u_long cmd, caddr_t data)
1471 {
1472 	struct gen_softc *sc;
1473 	struct mii_data *mii;
1474 	struct ifreq *ifr;
1475 	int flags, enable, error;
1476 
1477 	sc = if_getsoftc(ifp);
1478 	mii = device_get_softc(sc->miibus);
1479 	ifr = (struct ifreq *)data;
1480 	error = 0;
1481 
1482 	switch (cmd) {
1483 	case SIOCSIFFLAGS:
1484 		GEN_LOCK(sc);
1485 		if (if_getflags(ifp) & IFF_UP) {
1486 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1487 				flags = if_getflags(ifp) ^ sc->if_flags;
1488 				if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
1489 					gen_setup_rxfilter(sc);
1490 			} else
1491 				gen_init_locked(sc);
1492 		} else {
1493 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1494 				gen_reset(sc);
1495 		}
1496 		sc->if_flags = if_getflags(ifp);
1497 		GEN_UNLOCK(sc);
1498 		break;
1499 
1500 	case SIOCADDMULTI:
1501 	case SIOCDELMULTI:
1502 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1503 			GEN_LOCK(sc);
1504 			gen_setup_rxfilter(sc);
1505 			GEN_UNLOCK(sc);
1506 		}
1507 		break;
1508 
1509 	case SIOCSIFMEDIA:
1510 	case SIOCGIFMEDIA:
1511 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1512 		break;
1513 
1514 	case SIOCSIFCAP:
1515 		enable = if_getcapenable(ifp);
1516 		flags = ifr->ifr_reqcap ^ enable;
1517 		if (flags & IFCAP_RXCSUM)
1518 			enable ^= IFCAP_RXCSUM;
1519 		if (flags & IFCAP_RXCSUM_IPV6)
1520 			enable ^= IFCAP_RXCSUM_IPV6;
1521 		if (flags & IFCAP_TXCSUM)
1522 			enable ^= IFCAP_TXCSUM;
1523 		if (flags & IFCAP_TXCSUM_IPV6)
1524 			enable ^= IFCAP_TXCSUM_IPV6;
1525 		if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6))
1526 			if_sethwassist(ifp, GEN_CSUM_FEATURES);
1527 		else
1528 			if_sethwassist(ifp, 0);
1529 		if_setcapenable(ifp, enable);
1530 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1531 			gen_enable_offload(sc);
1532 		break;
1533 
1534 	default:
1535 		error = ether_ioctl(ifp, cmd, data);
1536 		break;
1537 	}
1538 	return (error);
1539 }
1540 
1541 static void
1542 gen_tick(void *softc)
1543 {
1544 	struct gen_softc *sc;
1545 	struct mii_data *mii;
1546 	if_t ifp;
1547 	int link;
1548 
1549 	sc = softc;
1550 	ifp = sc->ifp;
1551 	mii = device_get_softc(sc->miibus);
1552 
1553 	GEN_ASSERT_LOCKED(sc);
1554 
1555 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1556 		return;
1557 
1558 	link = sc->link;
1559 	mii_tick(mii);
1560 	if (sc->link && !link)
1561 		gen_start_locked(sc);
1562 
1563 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
1564 }
1565 
1566 #define	MII_BUSY_RETRY		1000
1567 
1568 static int
1569 gen_miibus_readreg(device_t dev, int phy, int reg)
1570 {
1571 	struct gen_softc *sc;
1572 	int retry, val;
1573 
1574 	sc = device_get_softc(dev);
1575 	val = 0;
1576 
1577 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ |
1578 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT));
1579 	val = RD4(sc, GENET_MDIO_CMD);
1580 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1581 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1582 		if (((val = RD4(sc, GENET_MDIO_CMD)) &
1583 		    GENET_MDIO_START_BUSY) == 0) {
1584 			if (val & GENET_MDIO_READ_FAILED)
1585 				return (0);	/* -1? */
1586 			val &= GENET_MDIO_VAL_MASK;
1587 			break;
1588 		}
1589 		DELAY(10);
1590 	}
1591 
1592 	if (retry == 0)
1593 		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
1594 		    phy, reg);
1595 
1596 	return (val);
1597 }
1598 
1599 static int
1600 gen_miibus_writereg(device_t dev, int phy, int reg, int val)
1601 {
1602 	struct gen_softc *sc;
1603 	int retry;
1604 
1605 	sc = device_get_softc(dev);
1606 
1607 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE |
1608 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) |
1609 	    (val & GENET_MDIO_VAL_MASK));
1610 	val = RD4(sc, GENET_MDIO_CMD);
1611 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1612 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1613 		val = RD4(sc, GENET_MDIO_CMD);
1614 		if ((val & GENET_MDIO_START_BUSY) == 0)
1615 			break;
1616 		DELAY(10);
1617 	}
1618 	if (retry == 0)
1619 		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
1620 		    phy, reg);
1621 
1622 	return (0);
1623 }
1624 
1625 static void
1626 gen_update_link_locked(struct gen_softc *sc)
1627 {
1628 	struct mii_data *mii;
1629 	uint32_t val;
1630 	u_int speed;
1631 
1632 	GEN_ASSERT_LOCKED(sc);
1633 
1634 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
1635 		return;
1636 	mii = device_get_softc(sc->miibus);
1637 
1638 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1639 	    (IFM_ACTIVE | IFM_AVALID)) {
1640 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1641 		case IFM_1000_T:
1642 		case IFM_1000_SX:
1643 			speed = GENET_UMAC_CMD_SPEED_1000;
1644 			sc->link = 1;
1645 			break;
1646 		case IFM_100_TX:
1647 			speed = GENET_UMAC_CMD_SPEED_100;
1648 			sc->link = 1;
1649 			break;
1650 		case IFM_10_T:
1651 			speed = GENET_UMAC_CMD_SPEED_10;
1652 			sc->link = 1;
1653 			break;
1654 		default:
1655 			sc->link = 0;
1656 			break;
1657 		}
1658 	} else
1659 		sc->link = 0;
1660 
1661 	if (sc->link == 0)
1662 		return;
1663 
1664 	val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
1665 	val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
1666 	val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
1667 	val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
1668 	if (sc->phy_mode == MII_CONTYPE_RGMII)
1669 		val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1670 	else
1671 		val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1672 	WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
1673 
1674 	val = RD4(sc, GENET_UMAC_CMD);
1675 	val &= ~GENET_UMAC_CMD_SPEED;
1676 	val |= speed;
1677 	WR4(sc, GENET_UMAC_CMD, val);
1678 }
1679 
1680 static void
1681 gen_link_task(void *arg, int pending)
1682 {
1683 	struct gen_softc *sc;
1684 
1685 	sc = arg;
1686 
1687 	GEN_LOCK(sc);
1688 	gen_update_link_locked(sc);
1689 	GEN_UNLOCK(sc);
1690 }
1691 
1692 static void
1693 gen_miibus_statchg(device_t dev)
1694 {
1695 	struct gen_softc *sc;
1696 
1697 	sc = device_get_softc(dev);
1698 
1699 	taskqueue_enqueue(taskqueue_swi, &sc->link_task);
1700 }
1701 
1702 static void
1703 gen_media_status(if_t ifp, struct ifmediareq *ifmr)
1704 {
1705 	struct gen_softc *sc;
1706 	struct mii_data *mii;
1707 
1708 	sc = if_getsoftc(ifp);
1709 	mii = device_get_softc(sc->miibus);
1710 
1711 	GEN_LOCK(sc);
1712 	mii_pollstat(mii);
1713 	ifmr->ifm_active = mii->mii_media_active;
1714 	ifmr->ifm_status = mii->mii_media_status;
1715 	GEN_UNLOCK(sc);
1716 }
1717 
1718 static int
1719 gen_media_change(if_t ifp)
1720 {
1721 	struct gen_softc *sc;
1722 	struct mii_data *mii;
1723 	int error;
1724 
1725 	sc = if_getsoftc(ifp);
1726 	mii = device_get_softc(sc->miibus);
1727 
1728 	GEN_LOCK(sc);
1729 	error = mii_mediachg(mii);
1730 	GEN_UNLOCK(sc);
1731 
1732 	return (error);
1733 }
1734 
1735 static device_method_t gen_methods[] = {
1736 	/* Device interface */
1737 	DEVMETHOD(device_probe,		gen_probe),
1738 	DEVMETHOD(device_attach,	gen_attach),
1739 
1740 	/* MII interface */
1741 	DEVMETHOD(miibus_readreg,	gen_miibus_readreg),
1742 	DEVMETHOD(miibus_writereg,	gen_miibus_writereg),
1743 	DEVMETHOD(miibus_statchg,	gen_miibus_statchg),
1744 
1745 	DEVMETHOD_END
1746 };
1747 
1748 static driver_t gen_driver = {
1749 	"genet",
1750 	gen_methods,
1751 	sizeof(struct gen_softc),
1752 };
1753 
1754 static devclass_t gen_devclass;
1755 
1756 DRIVER_MODULE(genet, simplebus, gen_driver, gen_devclass, 0, 0);
1757 DRIVER_MODULE(miibus, genet, miibus_driver, miibus_devclass, 0, 0);
1758 MODULE_DEPEND(genet, ether, 1, 1, 1);
1759 MODULE_DEPEND(genet, miibus, 1, 1, 1);
1760