xref: /freebsd/sys/arm64/broadcom/genet/if_genet.c (revision 96a550e9dd892f24a479da00ead93fbfaafb7733)
1 /*-
2  * Copyright (c) 2020 Michael J Karels
3  * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller
29  *
30  * This driver is derived in large part from bcmgenet.c from NetBSD by
31  * Jared McNeill.  Parts of the structure and other common code in
32  * this driver have been copied from if_awg.c for the Allwinner EMAC,
33  * also by Jared McNeill.
34  */
35 
36 #include "opt_device_polling.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/rman.h>
42 #include <sys/kernel.h>
43 #include <sys/endian.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/module.h>
49 #include <sys/taskqueue.h>
50 #include <sys/gpio.h>
51 
52 #include <net/bpf.h>
53 #include <net/if.h>
54 #include <net/ethernet.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_var.h>
59 
60 #include <machine/bus.h>
61 
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64 
65 #define __BIT(_x)	(1 << (_x))
66 #include "if_genetreg.h"
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 #include <dev/mii/mii_fdt.h>
71 
72 #include <netinet/in.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip6.h>
75 
76 #include "syscon_if.h"
77 #include "miibus_if.h"
78 #include "gpio_if.h"
79 
80 #define	RD4(sc, reg)		bus_read_4((sc)->res[_RES_MAC], (reg))
81 #define	WR4(sc, reg, val)	bus_write_4((sc)->res[_RES_MAC], (reg), (val))
82 
83 #define	GEN_LOCK(sc)		mtx_lock(&(sc)->mtx)
84 #define	GEN_UNLOCK(sc)		mtx_unlock(&(sc)->mtx)
85 #define	GEN_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->mtx, MA_OWNED)
86 #define	GEN_ASSERT_UNLOCKED(sc)	mtx_assert(&(sc)->mtx, MA_NOTOWNED)
87 
88 #define	TX_DESC_COUNT		GENET_DMA_DESC_COUNT
89 #define	RX_DESC_COUNT		GENET_DMA_DESC_COUNT
90 
91 #define	TX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
92 #define	RX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
93 
94 #define	TX_MAX_SEGS		20
95 
96 static SYSCTL_NODE(_hw, OID_AUTO, genet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
97     "genet driver parameters");
98 
99 /* Maximum number of mbufs to pass per call to if_input */
100 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */;
101 SYSCTL_INT(_hw_genet, OID_AUTO, rx_batch, CTLFLAG_RDTUN,
102     &gen_rx_batch, 0, "max mbufs per call to if_input");
103 
104 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch);	/* old name/interface */
105 
106 /*
107  * Transmitting packets with only an Ethernet header in the first mbuf
108  * fails.  Examples include reflected ICMPv6 packets, e.g. echo replies;
109  * forwarded IPv6/TCP packets; and forwarded IPv4/TCP packets that use NAT
110  * with IPFW.  Pulling up the sizes of ether_header + ip6_hdr + icmp6_hdr
111  * seems to work for both ICMPv6 and TCP over IPv6, as well as the IPv4/TCP
112  * case.
113  */
114 static int gen_tx_hdr_min = 56;		/* ether_header + ip6_hdr + icmp6_hdr */
115 SYSCTL_INT(_hw_genet, OID_AUTO, tx_hdr_min, CTLFLAG_RW,
116     &gen_tx_hdr_min, 0, "header to add to packets with ether header only");
117 
118 static struct ofw_compat_data compat_data[] = {
119 	{ "brcm,genet-v1",		1 },
120 	{ "brcm,genet-v2",		2 },
121 	{ "brcm,genet-v3",		3 },
122 	{ "brcm,genet-v4",		4 },
123 	{ "brcm,genet-v5",		5 },
124 	{ "brcm,bcm2711-genet-v5",	5 },
125 	{ NULL,				0 }
126 };
127 
128 enum {
129 	_RES_MAC,		/* what to call this? */
130 	_RES_IRQ1,
131 	_RES_IRQ2,
132 	_RES_NITEMS
133 };
134 
135 static struct resource_spec gen_spec[] = {
136 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
137 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
138 	{ SYS_RES_IRQ,		1,	RF_ACTIVE },
139 	{ -1, 0 }
140 };
141 
142 /* structure per ring entry */
143 struct gen_ring_ent {
144 	bus_dmamap_t		map;
145 	struct mbuf		*mbuf;
146 };
147 
148 struct tx_queue {
149 	int			hwindex;		/* hardware index */
150 	int			nentries;
151 	u_int			queued;			/* or avail? */
152 	u_int			cur;
153 	u_int			next;
154 	u_int			prod_idx;
155 	u_int			cons_idx;
156 	struct gen_ring_ent	*entries;
157 };
158 
159 struct rx_queue {
160 	int			hwindex;		/* hardware index */
161 	int			nentries;
162 	u_int			cur;
163 	u_int			prod_idx;
164 	u_int			cons_idx;
165 	struct gen_ring_ent	*entries;
166 };
167 
168 struct gen_softc {
169 	struct resource		*res[_RES_NITEMS];
170 	struct mtx		mtx;
171 	if_t			ifp;
172 	device_t		dev;
173 	device_t		miibus;
174 	mii_contype_t		phy_mode;
175 
176 	struct callout		stat_ch;
177 	struct task		link_task;
178 	void			*ih;
179 	void			*ih2;
180 	int			type;
181 	int			if_flags;
182 	int			link;
183 	bus_dma_tag_t		tx_buf_tag;
184 	/*
185 	 * The genet chip has multiple queues for transmit and receive.
186 	 * This driver uses only one (queue 16, the default), but is cast
187 	 * with multiple rings.  The additional rings are used for different
188 	 * priorities.
189 	 */
190 #define DEF_TXQUEUE	0
191 #define NTXQUEUE	1
192 	struct tx_queue		tx_queue[NTXQUEUE];
193 	struct gen_ring_ent	tx_ring_ent[TX_DESC_COUNT];  /* ring entries */
194 
195 	bus_dma_tag_t		rx_buf_tag;
196 #define DEF_RXQUEUE	0
197 #define NRXQUEUE	1
198 	struct rx_queue		rx_queue[NRXQUEUE];
199 	struct gen_ring_ent	rx_ring_ent[RX_DESC_COUNT];  /* ring entries */
200 };
201 
202 static void gen_init(void *softc);
203 static void gen_start(if_t ifp);
204 static void gen_stop(struct gen_softc *sc);
205 static void gen_destroy(struct gen_softc *sc);
206 static int gen_encap(struct gen_softc *sc, struct mbuf **mp);
207 static int gen_parse_tx(struct mbuf *m, int csum_flags);
208 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data);
209 static int gen_get_phy_mode(device_t dev);
210 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr);
211 static void gen_set_enaddr(struct gen_softc *sc);
212 static void gen_setup_rxfilter(struct gen_softc *sc);
213 static void gen_reset(struct gen_softc *sc);
214 static void gen_enable(struct gen_softc *sc);
215 static void gen_dma_disable(struct gen_softc *sc);
216 static int gen_bus_dma_init(struct gen_softc *sc);
217 static void gen_bus_dma_teardown(struct gen_softc *sc);
218 static void gen_enable_intr(struct gen_softc *sc);
219 static void gen_init_txrings(struct gen_softc *sc);
220 static void gen_init_rxrings(struct gen_softc *sc);
221 static void gen_intr(void *softc);
222 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q);
223 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
224 static void gen_intr2(void *softc);
225 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index);
226 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
227     struct mbuf *m);
228 static void gen_link_task(void *arg, int pending);
229 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr);
230 static int gen_media_change(if_t ifp);
231 static void gen_tick(void *softc);
232 
233 static int
gen_probe(device_t dev)234 gen_probe(device_t dev)
235 {
236 	if (!ofw_bus_status_okay(dev))
237 		return (ENXIO);
238 
239 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
240 		return (ENXIO);
241 
242 	device_set_desc(dev, "RPi4 Gigabit Ethernet");
243 	return (BUS_PROBE_DEFAULT);
244 }
245 
246 static int
gen_attach(device_t dev)247 gen_attach(device_t dev)
248 {
249 	struct ether_addr eaddr;
250 	struct gen_softc *sc;
251 	int major, minor, error, mii_flags;
252 	bool eaddr_found;
253 
254 	sc = device_get_softc(dev);
255 	sc->dev = dev;
256 	sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
257 
258 	if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) {
259 		device_printf(dev, "cannot allocate resources for device\n");
260 		error = ENXIO;
261 		goto fail;
262 	}
263 
264 	major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT;
265 	if (major != REV_MAJOR_V5) {
266 		device_printf(dev, "version %d is not supported\n", major);
267 		error = ENXIO;
268 		goto fail;
269 	}
270 	minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT;
271 	device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor,
272 		RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY);
273 
274 	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
275 	callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
276 	TASK_INIT(&sc->link_task, 0, gen_link_task, sc);
277 
278 	error = gen_get_phy_mode(dev);
279 	if (error != 0)
280 		goto fail;
281 
282 	bzero(&eaddr, sizeof(eaddr));
283 	eaddr_found = gen_get_eaddr(dev, &eaddr);
284 
285 	/* reset core */
286 	gen_reset(sc);
287 
288 	gen_dma_disable(sc);
289 
290 	/* Setup DMA */
291 	error = gen_bus_dma_init(sc);
292 	if (error != 0) {
293 		device_printf(dev, "cannot setup bus dma\n");
294 		goto fail;
295 	}
296 
297 	/* Setup ethernet interface */
298 	sc->ifp = if_alloc(IFT_ETHER);
299 	if_setsoftc(sc->ifp, sc);
300 	if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
301 	if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
302 	if_setstartfn(sc->ifp, gen_start);
303 	if_setioctlfn(sc->ifp, gen_ioctl);
304 	if_setinitfn(sc->ifp, gen_init);
305 	if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
306 	if_setsendqready(sc->ifp);
307 #define GEN_CSUM_FEATURES	(CSUM_UDP | CSUM_TCP)
308 	if_sethwassist(sc->ifp, GEN_CSUM_FEATURES);
309 	if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
310 	    IFCAP_HWCSUM_IPV6);
311 	if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
312 
313 	/* Install interrupt handlers */
314 	error = bus_setup_intr(dev, sc->res[_RES_IRQ1],
315 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih);
316 	if (error != 0) {
317 		device_printf(dev, "cannot setup interrupt handler1\n");
318 		goto fail;
319 	}
320 
321 	error = bus_setup_intr(dev, sc->res[_RES_IRQ2],
322 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2);
323 	if (error != 0) {
324 		device_printf(dev, "cannot setup interrupt handler2\n");
325 		goto fail;
326 	}
327 
328 	/* Attach MII driver */
329 	mii_flags = 0;
330 	switch (sc->phy_mode)
331 	{
332 	case MII_CONTYPE_RGMII_ID:
333 		mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY;
334 		break;
335 	case MII_CONTYPE_RGMII_RXID:
336 		mii_flags |= MIIF_RX_DELAY;
337 		break;
338 	case MII_CONTYPE_RGMII_TXID:
339 		mii_flags |= MIIF_TX_DELAY;
340 		break;
341 	default:
342 		break;
343 	}
344 	error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change,
345 	    gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
346 	    mii_flags);
347 	if (error != 0) {
348 		device_printf(dev, "cannot attach PHY\n");
349 		goto fail;
350 	}
351 
352 	/* If address was not found, create one based on the hostid and name. */
353 	if (!eaddr_found)
354 		ether_gen_addr(sc->ifp, &eaddr);
355 	/* Attach ethernet interface */
356 	ether_ifattach(sc->ifp, eaddr.octet);
357 
358 fail:
359 	if (error)
360 		gen_destroy(sc);
361 	return (error);
362 }
363 
364 /* Free resources after failed attach.  This is not a complete detach. */
365 static void
gen_destroy(struct gen_softc * sc)366 gen_destroy(struct gen_softc *sc)
367 {
368 
369 	device_delete_children(sc->dev);
370 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
371 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
372 	gen_bus_dma_teardown(sc);
373 	callout_drain(&sc->stat_ch);
374 	if (mtx_initialized(&sc->mtx))
375 		mtx_destroy(&sc->mtx);
376 	bus_release_resources(sc->dev, gen_spec, sc->res);
377 	if (sc->ifp != NULL) {
378 		if_free(sc->ifp);
379 		sc->ifp = NULL;
380 	}
381 }
382 
383 static int
gen_detach(device_t dev)384 gen_detach(device_t dev)
385 {
386 	struct gen_softc *sc;
387 	int error;
388 
389 	sc = device_get_softc(dev);
390 
391 	GEN_LOCK(sc);
392 	gen_stop(sc);
393 	GEN_UNLOCK(sc);
394 	callout_drain(&sc->stat_ch);
395 	ether_ifdetach(sc->ifp);
396 
397 	/* Detach the miibus */
398 	error = bus_generic_detach(dev);
399 	if (error != 0)
400 		return (error);
401 
402 	/* clean up dma */
403 	gen_bus_dma_teardown(sc);
404 
405 	/* Release bus resources. */
406 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
407 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
408 	bus_release_resources(sc->dev, gen_spec, sc->res);
409 
410 	if (sc->ifp != NULL)
411 		if_free(sc->ifp);
412 	mtx_destroy(&sc->mtx);
413 	return (0);
414 }
415 
416 static int
gen_get_phy_mode(device_t dev)417 gen_get_phy_mode(device_t dev)
418 {
419 	struct gen_softc *sc;
420 	phandle_t node;
421 	mii_contype_t type;
422 	int error = 0;
423 
424 	sc = device_get_softc(dev);
425 	node = ofw_bus_get_node(dev);
426 	type = mii_fdt_get_contype(node);
427 
428 	switch (type) {
429 	case MII_CONTYPE_RGMII:
430 	case MII_CONTYPE_RGMII_ID:
431 	case MII_CONTYPE_RGMII_RXID:
432 	case MII_CONTYPE_RGMII_TXID:
433 		sc->phy_mode = type;
434 		break;
435 	default:
436 		device_printf(dev, "unknown phy-mode '%s'\n",
437 		    mii_fdt_contype_to_name(type));
438 		error = ENXIO;
439 		break;
440 	}
441 
442 	return (error);
443 }
444 
445 static bool
gen_get_eaddr(device_t dev,struct ether_addr * eaddr)446 gen_get_eaddr(device_t dev, struct ether_addr *eaddr)
447 {
448 	struct gen_softc *sc;
449 	uint32_t maclo, machi, val;
450 	phandle_t node;
451 
452 	sc = device_get_softc(dev);
453 
454 	node = ofw_bus_get_node(dev);
455 	if (OF_getprop(node, "mac-address", eaddr->octet,
456 	    ETHER_ADDR_LEN) != -1 ||
457 	    OF_getprop(node, "local-mac-address", eaddr->octet,
458 	    ETHER_ADDR_LEN) != -1 ||
459 	    OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1)
460 		return (true);
461 
462 	device_printf(dev, "No Ethernet address found in fdt!\n");
463 	maclo = machi = 0;
464 
465 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
466 	if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
467 		maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
468 		machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
469 	}
470 
471 	if (maclo == 0 && machi == 0) {
472 		if (bootverbose)
473 			device_printf(dev,
474 			    "No Ethernet address found in controller\n");
475 		return (false);
476 	} else {
477 		eaddr->octet[0] = maclo & 0xff;
478 		eaddr->octet[1] = (maclo >> 8) & 0xff;
479 		eaddr->octet[2] = (maclo >> 16) & 0xff;
480 		eaddr->octet[3] = (maclo >> 24) & 0xff;
481 		eaddr->octet[4] = machi & 0xff;
482 		eaddr->octet[5] = (machi >> 8) & 0xff;
483 		return (true);
484 	}
485 }
486 
487 static void
gen_reset(struct gen_softc * sc)488 gen_reset(struct gen_softc *sc)
489 {
490 	uint32_t val;
491 
492 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
493 	val |= GENET_SYS_RBUF_FLUSH_RESET;
494 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
495 	DELAY(10);
496 
497 	val &= ~GENET_SYS_RBUF_FLUSH_RESET;
498 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
499 	DELAY(10);
500 
501 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
502 	DELAY(10);
503 
504 	WR4(sc, GENET_UMAC_CMD, 0);
505 	WR4(sc, GENET_UMAC_CMD,
506 	    GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
507 	DELAY(10);
508 	WR4(sc, GENET_UMAC_CMD, 0);
509 
510 	WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
511 	    GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
512 	WR4(sc, GENET_UMAC_MIB_CTRL, 0);
513 }
514 
515 static void
gen_enable(struct gen_softc * sc)516 gen_enable(struct gen_softc *sc)
517 {
518 	u_int val;
519 
520 	WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
521 
522 	val = RD4(sc, GENET_RBUF_CTRL);
523 	val |= GENET_RBUF_ALIGN_2B;
524 	WR4(sc, GENET_RBUF_CTRL, val);
525 
526 	WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
527 
528 	/* Enable transmitter and receiver */
529 	val = RD4(sc, GENET_UMAC_CMD);
530 	val |= GENET_UMAC_CMD_TXEN;
531 	val |= GENET_UMAC_CMD_RXEN;
532 	WR4(sc, GENET_UMAC_CMD, val);
533 
534 	/* Enable interrupts */
535 	gen_enable_intr(sc);
536 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
537 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
538 }
539 
540 static void
gen_disable_intr(struct gen_softc * sc)541 gen_disable_intr(struct gen_softc *sc)
542 {
543 	/* Disable interrupts */
544 	WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
545 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 0xffffffff);
546 }
547 
548 static void
gen_disable(struct gen_softc * sc)549 gen_disable(struct gen_softc *sc)
550 {
551 	uint32_t val;
552 
553 	/* Stop receiver */
554 	val = RD4(sc, GENET_UMAC_CMD);
555 	val &= ~GENET_UMAC_CMD_RXEN;
556 	WR4(sc, GENET_UMAC_CMD, val);
557 
558 	/* Stop transmitter */
559 	val = RD4(sc, GENET_UMAC_CMD);
560 	val &= ~GENET_UMAC_CMD_TXEN;
561 	WR4(sc, GENET_UMAC_CMD, val);
562 
563 	/* Disable Interrupt */
564 	gen_disable_intr(sc);
565 }
566 
567 static void
gen_enable_offload(struct gen_softc * sc)568 gen_enable_offload(struct gen_softc *sc)
569 {
570 	uint32_t check_ctrl, buf_ctrl;
571 
572 	check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL);
573 	buf_ctrl  = RD4(sc, GENET_RBUF_CTRL);
574 	if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) {
575 		check_ctrl |= GENET_RBUF_CHECK_CTRL_EN;
576 		buf_ctrl |= GENET_RBUF_64B_EN;
577 	} else {
578 		check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN;
579 		buf_ctrl &= ~GENET_RBUF_64B_EN;
580 	}
581 	WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl);
582 	WR4(sc, GENET_RBUF_CTRL, buf_ctrl);
583 
584 	buf_ctrl  = RD4(sc, GENET_TBUF_CTRL);
585 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
586 	    0)
587 		buf_ctrl |= GENET_RBUF_64B_EN;
588 	else
589 		buf_ctrl &= ~GENET_RBUF_64B_EN;
590 	WR4(sc, GENET_TBUF_CTRL, buf_ctrl);
591 }
592 
593 static void
gen_dma_disable(struct gen_softc * sc)594 gen_dma_disable(struct gen_softc *sc)
595 {
596 	int val;
597 
598 	val = RD4(sc, GENET_TX_DMA_CTRL);
599 	val &= ~GENET_TX_DMA_CTRL_EN;
600 	val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
601 	WR4(sc, GENET_TX_DMA_CTRL, val);
602 
603 	val = RD4(sc, GENET_RX_DMA_CTRL);
604 	val &= ~GENET_RX_DMA_CTRL_EN;
605 	val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
606 	WR4(sc, GENET_RX_DMA_CTRL, val);
607 }
608 
609 static int
gen_bus_dma_init(struct gen_softc * sc)610 gen_bus_dma_init(struct gen_softc *sc)
611 {
612 	device_t dev = sc->dev;
613 	int i, error;
614 
615 	error = bus_dma_tag_create(
616 	    bus_get_dma_tag(dev),	/* Parent tag */
617 	    4, 0,			/* alignment, boundary */
618 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
619 	    BUS_SPACE_MAXADDR,		/* highaddr */
620 	    NULL, NULL,			/* filter, filterarg */
621 	    MCLBYTES, TX_MAX_SEGS,	/* maxsize, nsegs */
622 	    MCLBYTES,			/* maxsegsize */
623 	    0,				/* flags */
624 	    NULL, NULL,			/* lockfunc, lockarg */
625 	    &sc->tx_buf_tag);
626 	if (error != 0) {
627 		device_printf(dev, "cannot create TX buffer tag\n");
628 		return (error);
629 	}
630 
631 	for (i = 0; i < TX_DESC_COUNT; i++) {
632 		error = bus_dmamap_create(sc->tx_buf_tag, 0,
633 		    &sc->tx_ring_ent[i].map);
634 		if (error != 0) {
635 			device_printf(dev, "cannot create TX buffer map\n");
636 			return (error);
637 		}
638 	}
639 
640 	error = bus_dma_tag_create(
641 	    bus_get_dma_tag(dev),	/* Parent tag */
642 	    4, 0,			/* alignment, boundary */
643 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
644 	    BUS_SPACE_MAXADDR,		/* highaddr */
645 	    NULL, NULL,			/* filter, filterarg */
646 	    MCLBYTES, 1,		/* maxsize, nsegs */
647 	    MCLBYTES,			/* maxsegsize */
648 	    0,				/* flags */
649 	    NULL, NULL,			/* lockfunc, lockarg */
650 	    &sc->rx_buf_tag);
651 	if (error != 0) {
652 		device_printf(dev, "cannot create RX buffer tag\n");
653 		return (error);
654 	}
655 
656 	for (i = 0; i < RX_DESC_COUNT; i++) {
657 		error = bus_dmamap_create(sc->rx_buf_tag, 0,
658 		    &sc->rx_ring_ent[i].map);
659 		if (error != 0) {
660 			device_printf(dev, "cannot create RX buffer map\n");
661 			return (error);
662 		}
663 	}
664 	return (0);
665 }
666 
667 static void
gen_bus_dma_teardown(struct gen_softc * sc)668 gen_bus_dma_teardown(struct gen_softc *sc)
669 {
670 	int i, error;
671 
672 	if (sc->tx_buf_tag != NULL) {
673 		for (i = 0; i < TX_DESC_COUNT; i++) {
674 			error = bus_dmamap_destroy(sc->tx_buf_tag,
675 			    sc->tx_ring_ent[i].map);
676 			sc->tx_ring_ent[i].map = NULL;
677 			if (error)
678 				device_printf(sc->dev,
679 				    "%s: bus_dmamap_destroy failed: %d\n",
680 				    __func__, error);
681 		}
682 		error = bus_dma_tag_destroy(sc->tx_buf_tag);
683 		sc->tx_buf_tag = NULL;
684 		if (error)
685 			device_printf(sc->dev,
686 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
687 			    error);
688 	}
689 
690 	if (sc->rx_buf_tag != NULL) {
691 		for (i = 0; i < RX_DESC_COUNT; i++) {
692 			error = bus_dmamap_destroy(sc->rx_buf_tag,
693 			    sc->rx_ring_ent[i].map);
694 			sc->rx_ring_ent[i].map = NULL;
695 			if (error)
696 				device_printf(sc->dev,
697 				    "%s: bus_dmamap_destroy failed: %d\n",
698 				    __func__, error);
699 		}
700 		error = bus_dma_tag_destroy(sc->rx_buf_tag);
701 		sc->rx_buf_tag = NULL;
702 		if (error)
703 			device_printf(sc->dev,
704 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
705 			    error);
706 	}
707 }
708 
709 static void
gen_enable_intr(struct gen_softc * sc)710 gen_enable_intr(struct gen_softc *sc)
711 {
712 
713 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
714 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
715 }
716 
717 /*
718  * "queue" is the software queue index (0-4); "qid" is the hardware index
719  * (0-16).  "base" is the starting index in the ring array.
720  */
721 static void
gen_init_txring(struct gen_softc * sc,int queue,int qid,int base,int nentries)722 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
723     int nentries)
724 {
725 	struct tx_queue *q;
726 	uint32_t val;
727 
728 	q = &sc->tx_queue[queue];
729 	q->entries = &sc->tx_ring_ent[base];
730 	q->hwindex = qid;
731 	q->nentries = nentries;
732 
733 	/* TX ring */
734 
735 	q->queued = 0;
736 	q->cons_idx = q->prod_idx = 0;
737 
738 	WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
739 
740 	WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
741 	WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
742 	WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
743 	WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
744 	WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
745 	    (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
746 	    (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
747 	WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
748 	WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
749 	WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
750 	    TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
751 	WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
752 	WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
753 	WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
754 	WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
755 	WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
756 
757 	WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid));	/* enable */
758 
759 	/* Enable transmit DMA */
760 	val = RD4(sc, GENET_TX_DMA_CTRL);
761 	val |= GENET_TX_DMA_CTRL_EN;
762 	val |= GENET_TX_DMA_CTRL_RBUF_EN(qid);
763 	WR4(sc, GENET_TX_DMA_CTRL, val);
764 }
765 
766 /*
767  * "queue" is the software queue index (0-4); "qid" is the hardware index
768  * (0-16).  "base" is the starting index in the ring array.
769  */
770 static void
gen_init_rxring(struct gen_softc * sc,int queue,int qid,int base,int nentries)771 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
772     int nentries)
773 {
774 	struct rx_queue *q;
775 	uint32_t val;
776 	int i;
777 
778 	q = &sc->rx_queue[queue];
779 	q->entries = &sc->rx_ring_ent[base];
780 	q->hwindex = qid;
781 	q->nentries = nentries;
782 	q->cons_idx = q->prod_idx = 0;
783 
784 	WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
785 
786 	WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
787 	WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
788 	WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
789 	WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
790 	WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
791 	    (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
792 	    (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
793 	WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
794 	WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
795 	WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
796 	    RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
797 	WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
798 	WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
799 	    (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4));
800 	WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
801 	WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
802 
803 	WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid));	/* enable */
804 
805 	/* fill ring */
806 	for (i = 0; i < RX_DESC_COUNT; i++)
807 		gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i);
808 
809 	/* Enable receive DMA */
810 	val = RD4(sc, GENET_RX_DMA_CTRL);
811 	val |= GENET_RX_DMA_CTRL_EN;
812 	val |= GENET_RX_DMA_CTRL_RBUF_EN(qid);
813 	WR4(sc, GENET_RX_DMA_CTRL, val);
814 }
815 
816 static void
gen_init_txrings(struct gen_softc * sc)817 gen_init_txrings(struct gen_softc *sc)
818 {
819 	int base = 0;
820 #ifdef PRI_RINGS
821 	int i;
822 
823 	/* init priority rings */
824 	for (i = 0; i < PRI_RINGS; i++) {
825 		gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT);
826 		sc->tx_queue[i].queue = i;
827 		base += TX_DESC_PRICOUNT;
828 		dma_ring_conf |= 1 << i;
829 		dma_control |= DMA_RENABLE(i);
830 	}
831 #endif
832 
833 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
834 	gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
835 	    TX_DESC_COUNT);
836 	sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
837 }
838 
839 static void
gen_init_rxrings(struct gen_softc * sc)840 gen_init_rxrings(struct gen_softc *sc)
841 {
842 	int base = 0;
843 #ifdef PRI_RINGS
844 	int i;
845 
846 	/* init priority rings */
847 	for (i = 0; i < PRI_RINGS; i++) {
848 		gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT);
849 		sc->rx_queue[i].queue = i;
850 		base += TX_DESC_PRICOUNT;
851 		dma_ring_conf |= 1 << i;
852 		dma_control |= DMA_RENABLE(i);
853 	}
854 #endif
855 
856 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
857 	gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
858 	    RX_DESC_COUNT);
859 	sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
860 
861 }
862 
863 static void
gen_stop(struct gen_softc * sc)864 gen_stop(struct gen_softc *sc)
865 {
866 	int i;
867 	struct gen_ring_ent *ent;
868 
869 	GEN_ASSERT_LOCKED(sc);
870 
871 	callout_stop(&sc->stat_ch);
872 	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
873 	gen_reset(sc);
874 	gen_disable(sc);
875 	gen_dma_disable(sc);
876 
877 	/* Clear the tx/rx ring buffer */
878 	for (i = 0; i < TX_DESC_COUNT; i++) {
879 		ent = &sc->tx_ring_ent[i];
880 		if (ent->mbuf != NULL) {
881 			bus_dmamap_sync(sc->tx_buf_tag, ent->map,
882 				BUS_DMASYNC_POSTWRITE);
883 			bus_dmamap_unload(sc->tx_buf_tag, ent->map);
884 			m_freem(ent->mbuf);
885 			ent->mbuf = NULL;
886 		}
887 	}
888 
889 	for (i = 0; i < RX_DESC_COUNT; i++) {
890 		ent = &sc->rx_ring_ent[i];
891 		if (ent->mbuf != NULL) {
892 			bus_dmamap_sync(sc->rx_buf_tag, ent->map,
893 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
894 			bus_dmamap_unload(sc->rx_buf_tag, ent->map);
895 			m_freem(ent->mbuf);
896 			ent->mbuf = NULL;
897 		}
898 	}
899 }
900 
901 static void
gen_init_locked(struct gen_softc * sc)902 gen_init_locked(struct gen_softc *sc)
903 {
904 	struct mii_data *mii;
905 	if_t ifp;
906 
907 	mii = device_get_softc(sc->miibus);
908 	ifp = sc->ifp;
909 
910 	GEN_ASSERT_LOCKED(sc);
911 
912 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
913 		return;
914 
915 	switch (sc->phy_mode)
916 	{
917 	case MII_CONTYPE_RGMII:
918 	case MII_CONTYPE_RGMII_ID:
919 	case MII_CONTYPE_RGMII_RXID:
920 	case MII_CONTYPE_RGMII_TXID:
921 		WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY);
922 		break;
923 	default:
924 		WR4(sc, GENET_SYS_PORT_CTRL, 0);
925 	}
926 
927 	gen_set_enaddr(sc);
928 
929 	/* Setup RX filter */
930 	gen_setup_rxfilter(sc);
931 
932 	gen_init_txrings(sc);
933 	gen_init_rxrings(sc);
934 	gen_enable(sc);
935 	gen_enable_offload(sc);
936 
937 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
938 
939 	mii_mediachg(mii);
940 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
941 }
942 
943 static void
gen_init(void * softc)944 gen_init(void *softc)
945 {
946         struct gen_softc *sc;
947 
948         sc = softc;
949 	GEN_LOCK(sc);
950 	gen_init_locked(sc);
951 	GEN_UNLOCK(sc);
952 }
953 
954 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
955 
956 static void
gen_setup_rxfilter_mdf(struct gen_softc * sc,u_int n,const uint8_t * ea)957 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea)
958 {
959 	uint32_t addr0 = (ea[0] << 8) | ea[1];
960 	uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
961 
962 	WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
963 	WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
964 }
965 
966 static u_int
gen_setup_multi(void * arg,struct sockaddr_dl * sdl,u_int count)967 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count)
968 {
969 	struct gen_softc *sc = arg;
970 
971 	/* "count + 2" to account for unicast and broadcast */
972 	gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl));
973 	return (1);		/* increment to count */
974 }
975 
976 static void
gen_setup_rxfilter(struct gen_softc * sc)977 gen_setup_rxfilter(struct gen_softc *sc)
978 {
979 	if_t ifp = sc->ifp;
980 	uint32_t cmd, mdf_ctrl;
981 	u_int n;
982 
983 	GEN_ASSERT_LOCKED(sc);
984 
985 	cmd = RD4(sc, GENET_UMAC_CMD);
986 
987 	/*
988 	 * Count the required number of hardware filters. We need one
989 	 * for each multicast address, plus one for our own address and
990 	 * the broadcast address.
991 	 */
992 	n = if_llmaddr_count(ifp) + 2;
993 
994 	if (n > GENET_MAX_MDF_FILTER)
995 		if_setflagbits(ifp, IFF_ALLMULTI, 0);
996 	else
997 		if_setflagbits(ifp, 0, IFF_ALLMULTI);
998 
999 	if ((if_getflags(ifp) & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
1000 		cmd |= GENET_UMAC_CMD_PROMISC;
1001 		mdf_ctrl = 0;
1002 	} else {
1003 		cmd &= ~GENET_UMAC_CMD_PROMISC;
1004 		gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr);
1005 		gen_setup_rxfilter_mdf(sc, 1, if_getlladdr(ifp));
1006 		(void) if_foreach_llmaddr(ifp, gen_setup_multi, sc);
1007 		mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1)  &~
1008 		    (__BIT(GENET_MAX_MDF_FILTER - n) - 1);
1009 	}
1010 
1011 	WR4(sc, GENET_UMAC_CMD, cmd);
1012 	WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
1013 }
1014 
1015 static void
gen_set_enaddr(struct gen_softc * sc)1016 gen_set_enaddr(struct gen_softc *sc)
1017 {
1018 	uint8_t *enaddr;
1019 	uint32_t val;
1020 	if_t ifp;
1021 
1022 	GEN_ASSERT_LOCKED(sc);
1023 
1024 	ifp = sc->ifp;
1025 
1026 	/* Write our unicast address */
1027 	enaddr = if_getlladdr(ifp);
1028 	/* Write hardware address */
1029 	val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
1030 	    (enaddr[0] << 24);
1031 	WR4(sc, GENET_UMAC_MAC0, val);
1032 	val = enaddr[5] | (enaddr[4] << 8);
1033 	WR4(sc, GENET_UMAC_MAC1, val);
1034 }
1035 
1036 static void
gen_start_locked(struct gen_softc * sc)1037 gen_start_locked(struct gen_softc *sc)
1038 {
1039 	struct mbuf *m;
1040 	if_t ifp;
1041 	int err;
1042 
1043 	GEN_ASSERT_LOCKED(sc);
1044 
1045 	if (!sc->link)
1046 		return;
1047 
1048 	ifp = sc->ifp;
1049 
1050 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1051 	    IFF_DRV_RUNNING)
1052 		return;
1053 
1054 	while (true) {
1055 		m = if_dequeue(ifp);
1056 		if (m == NULL)
1057 			break;
1058 
1059 		err = gen_encap(sc, &m);
1060 		if (err != 0) {
1061 			if (err == ENOBUFS)
1062 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1063 			else if (m == NULL)
1064 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1065 			if (m != NULL)
1066 				if_sendq_prepend(ifp, m);
1067 			break;
1068 		}
1069 		bpf_mtap_if(ifp, m);
1070 	}
1071 }
1072 
1073 static void
gen_start(if_t ifp)1074 gen_start(if_t ifp)
1075 {
1076 	struct gen_softc *sc;
1077 
1078 	sc = if_getsoftc(ifp);
1079 
1080 	GEN_LOCK(sc);
1081 	gen_start_locked(sc);
1082 	GEN_UNLOCK(sc);
1083 }
1084 
1085 /* Test for any delayed checksum */
1086 #define CSUM_DELAY_ANY	(CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)
1087 
1088 static int
gen_encap(struct gen_softc * sc,struct mbuf ** mp)1089 gen_encap(struct gen_softc *sc, struct mbuf **mp)
1090 {
1091 	bus_dmamap_t map;
1092 	bus_dma_segment_t segs[TX_MAX_SEGS];
1093 	int error, nsegs, cur, first, i, index, offset;
1094 	uint32_t csuminfo, length_status, csum_flags = 0, csumdata;
1095 	struct mbuf *m;
1096 	struct statusblock *sb = NULL;
1097 	struct tx_queue *q;
1098 	struct gen_ring_ent *ent;
1099 
1100 	GEN_ASSERT_LOCKED(sc);
1101 
1102 	q = &sc->tx_queue[DEF_TXQUEUE];
1103 	if (q->queued == q->nentries) {
1104 		/* tx_queue is full */
1105 		return (ENOBUFS);
1106 	}
1107 
1108 	m = *mp;
1109 
1110 	/*
1111 	 * Don't attempt to send packets with only an Ethernet header in
1112 	 * first mbuf; see comment above with gen_tx_hdr_min.
1113 	 */
1114 	if (m->m_len == sizeof(struct ether_header)) {
1115 		m = m_pullup(m, MIN(m->m_pkthdr.len, gen_tx_hdr_min));
1116 		if (m == NULL) {
1117 			if (if_getflags(sc->ifp) & IFF_DEBUG)
1118 				device_printf(sc->dev,
1119 				    "header pullup fail\n");
1120 			*mp = NULL;
1121 			return (ENOMEM);
1122 		}
1123 	}
1124 
1125 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
1126 	    0) {
1127 		csum_flags = m->m_pkthdr.csum_flags;
1128 		csumdata = m->m_pkthdr.csum_data;
1129 		M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT);
1130 		if (m == NULL) {
1131 			if (if_getflags(sc->ifp) & IFF_DEBUG)
1132 				device_printf(sc->dev, "prepend fail\n");
1133 			*mp = NULL;
1134 			return (ENOMEM);
1135 		}
1136 		offset = gen_parse_tx(m, csum_flags);
1137 		sb = mtod(m, struct statusblock *);
1138 		if ((csum_flags & CSUM_DELAY_ANY) != 0) {
1139 			csuminfo = (offset << TXCSUM_OFF_SHIFT) |
1140 			    (offset + csumdata);
1141 			csuminfo |= TXCSUM_LEN_VALID;
1142 			if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP))
1143 				csuminfo |= TXCSUM_UDP;
1144 			sb->txcsuminfo = csuminfo;
1145 		} else
1146 			sb->txcsuminfo = 0;
1147 	}
1148 
1149 	*mp = m;
1150 
1151 	cur = first = q->cur;
1152 	ent = &q->entries[cur];
1153 	map = ent->map;
1154 	error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs,
1155 	    &nsegs, BUS_DMA_NOWAIT);
1156 	if (error == EFBIG) {
1157 		m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
1158 		if (m == NULL) {
1159 			device_printf(sc->dev,
1160 			    "gen_encap: m_collapse failed\n");
1161 			m_freem(*mp);
1162 			*mp = NULL;
1163 			return (ENOMEM);
1164 		}
1165 		*mp = m;
1166 		error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m,
1167 		    segs, &nsegs, BUS_DMA_NOWAIT);
1168 		if (error != 0) {
1169 			m_freem(*mp);
1170 			*mp = NULL;
1171 		}
1172 	}
1173 	if (error != 0) {
1174 		device_printf(sc->dev,
1175 		    "gen_encap: bus_dmamap_load_mbuf_sg failed\n");
1176 		return (error);
1177 	}
1178 	if (nsegs == 0) {
1179 		m_freem(*mp);
1180 		*mp = NULL;
1181 		return (EIO);
1182 	}
1183 
1184 	/* Remove statusblock after mapping, before possible requeue or bpf. */
1185 	if (sb != NULL) {
1186 		m->m_data += sizeof(struct statusblock);
1187 		m->m_len -= sizeof(struct statusblock);
1188 		m->m_pkthdr.len -= sizeof(struct statusblock);
1189 	}
1190 	if (q->queued + nsegs > q->nentries) {
1191 		bus_dmamap_unload(sc->tx_buf_tag, map);
1192 		return (ENOBUFS);
1193 	}
1194 
1195 	bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE);
1196 
1197 	index = q->prod_idx & (q->nentries - 1);
1198 	for (i = 0; i < nsegs; i++) {
1199 		ent = &q->entries[cur];
1200 		length_status = GENET_TX_DESC_STATUS_QTAG_MASK;
1201 		if (i == 0) {
1202 			length_status |= GENET_TX_DESC_STATUS_SOP |
1203 			    GENET_TX_DESC_STATUS_CRC;
1204 			if ((csum_flags & CSUM_DELAY_ANY) != 0)
1205 				length_status |= GENET_TX_DESC_STATUS_CKSUM;
1206 		}
1207 		if (i == nsegs - 1)
1208 			length_status |= GENET_TX_DESC_STATUS_EOP;
1209 
1210 		length_status |= segs[i].ds_len <<
1211 		    GENET_TX_DESC_STATUS_BUFLEN_SHIFT;
1212 
1213 		WR4(sc, GENET_TX_DESC_ADDRESS_LO(index),
1214 		    (uint32_t)segs[i].ds_addr);
1215 		WR4(sc, GENET_TX_DESC_ADDRESS_HI(index),
1216 		    (uint32_t)(segs[i].ds_addr >> 32));
1217 		WR4(sc, GENET_TX_DESC_STATUS(index), length_status);
1218 
1219 		++q->queued;
1220 		cur = TX_NEXT(cur, q->nentries);
1221 		index = TX_NEXT(index, q->nentries);
1222 	}
1223 
1224 	q->prod_idx += nsegs;
1225 	q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK;
1226 	/* We probably don't need to write the producer index on every iter */
1227 	if (nsegs != 0)
1228 		WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx);
1229 	q->cur = cur;
1230 
1231 	/* Store mbuf in the last segment */
1232 	q->entries[first].mbuf = m;
1233 
1234 	return (0);
1235 }
1236 
1237 /*
1238  * Parse a packet to find the offset of the transport header for checksum
1239  * offload.  Ensure that the link and network headers are contiguous with
1240  * the status block, or transmission fails.
1241  */
1242 static int
gen_parse_tx(struct mbuf * m,int csum_flags)1243 gen_parse_tx(struct mbuf *m, int csum_flags)
1244 {
1245 	int offset, off_in_m;
1246 	bool copy = false, shift = false;
1247 	u_char *p, *copy_p = NULL;
1248 	struct mbuf *m0 = m;
1249 	uint16_t ether_type;
1250 
1251 	if (m->m_len == sizeof(struct statusblock)) {
1252 		/* M_PREPEND placed statusblock at end; move to beginning */
1253 		m->m_data = m->m_pktdat;
1254 		copy_p = mtodo(m, sizeof(struct statusblock));
1255 		m = m->m_next;
1256 		off_in_m = 0;
1257 		p = mtod(m, u_char *);
1258 		copy = true;
1259 	} else {
1260 		/*
1261 		 * If statusblock is not at beginning of mbuf (likely),
1262 		 * then remember to move mbuf contents down before copying
1263 		 * after them.
1264 		 */
1265 		if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat)
1266 			shift = true;
1267 		p = mtodo(m, sizeof(struct statusblock));
1268 		off_in_m = sizeof(struct statusblock);
1269 	}
1270 
1271 /*
1272  * If headers need to be copied contiguous to statusblock, do so.
1273  * If copying to the internal mbuf data area, and the status block
1274  * is not at the beginning of that area, shift the status block (which
1275  * is empty) and following data.
1276  */
1277 #define COPY(size) {							\
1278 	int hsize = size;						\
1279 	if (copy) {							\
1280 		if (shift) {						\
1281 			u_char *p0;					\
1282 			shift = false;					\
1283 			p0 = mtodo(m0, sizeof(struct statusblock));	\
1284 			m0->m_data = m0->m_pktdat;			\
1285 			bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\
1286 			    m0->m_len - sizeof(struct statusblock));	\
1287 			copy_p = mtodo(m0, m0->m_len);			\
1288 		}							\
1289 		bcopy(p, copy_p, hsize);				\
1290 		m0->m_len += hsize;					\
1291 		m->m_len -= hsize;					\
1292 		m->m_data += hsize;					\
1293 	}								\
1294 	copy_p += hsize;						\
1295 }
1296 
1297 	KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) +
1298 	    sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__));
1299 
1300 	if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) {
1301 		offset = sizeof(struct ether_vlan_header);
1302 		ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto);
1303 		COPY(sizeof(struct ether_vlan_header));
1304 		if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) {
1305 			m = m->m_next;
1306 			off_in_m = 0;
1307 			p = mtod(m, u_char *);
1308 			copy = true;
1309 		} else {
1310 			off_in_m += sizeof(struct ether_vlan_header);
1311 			p += sizeof(struct ether_vlan_header);
1312 		}
1313 	} else {
1314 		offset = sizeof(struct ether_header);
1315 		ether_type = ntohs(((struct ether_header *)p)->ether_type);
1316 		COPY(sizeof(struct ether_header));
1317 		if (m->m_len == off_in_m + sizeof(struct ether_header)) {
1318 			m = m->m_next;
1319 			off_in_m = 0;
1320 			p = mtod(m, u_char *);
1321 			copy = true;
1322 		} else {
1323 			off_in_m += sizeof(struct ether_header);
1324 			p += sizeof(struct ether_header);
1325 		}
1326 	}
1327 	if (ether_type == ETHERTYPE_IP) {
1328 		COPY(((struct ip *)p)->ip_hl << 2);
1329 		offset += ((struct ip *)p)->ip_hl << 2;
1330 	} else if (ether_type == ETHERTYPE_IPV6) {
1331 		COPY(sizeof(struct ip6_hdr));
1332 		offset += sizeof(struct ip6_hdr);
1333 	} else {
1334 		/*
1335 		 * Unknown whether most other cases require moving a header;
1336 		 * ARP works without.  However, Wake On LAN packets sent
1337 		 * by wake(8) via BPF need something like this.
1338 		 */
1339 		COPY(MIN(gen_tx_hdr_min, m->m_len));
1340 		offset += MIN(gen_tx_hdr_min, m->m_len);
1341 	}
1342 	return (offset);
1343 #undef COPY
1344 }
1345 
1346 static void
gen_intr(void * arg)1347 gen_intr(void *arg)
1348 {
1349 	struct gen_softc *sc = arg;
1350 	uint32_t val;
1351 
1352 	GEN_LOCK(sc);
1353 
1354 	val = RD4(sc, GENET_INTRL2_CPU_STAT);
1355 	val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
1356 	WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
1357 
1358 	if (val & GENET_IRQ_RXDMA_DONE)
1359 		gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]);
1360 
1361 	if (val & GENET_IRQ_TXDMA_DONE) {
1362 		gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
1363 		if (!if_sendq_empty(sc->ifp))
1364 			gen_start_locked(sc);
1365 	}
1366 
1367 	GEN_UNLOCK(sc);
1368 }
1369 
1370 static int
gen_rxintr(struct gen_softc * sc,struct rx_queue * q)1371 gen_rxintr(struct gen_softc *sc, struct rx_queue *q)
1372 {
1373 	if_t ifp;
1374 	struct mbuf *m, *mh, *mt;
1375 	struct statusblock *sb = NULL;
1376 	int error, index, len, cnt, npkt, n;
1377 	uint32_t status, prod_idx, total;
1378 
1379 	ifp = sc->ifp;
1380 	mh = mt = NULL;
1381 	cnt = 0;
1382 	npkt = 0;
1383 
1384 	prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) &
1385 	    GENET_RX_DMA_PROD_CONS_MASK;
1386 	total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK;
1387 
1388 	index = q->cons_idx & (RX_DESC_COUNT - 1);
1389 	for (n = 0; n < total; n++) {
1390 		bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map,
1391 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1392 		bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map);
1393 
1394 		m = q->entries[index].mbuf;
1395 
1396 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1397 			sb = mtod(m, struct statusblock *);
1398 			status = sb->status_buflen;
1399 		} else
1400 			status = RD4(sc, GENET_RX_DESC_STATUS(index));
1401 
1402 		len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >>
1403 		    GENET_RX_DESC_STATUS_BUFLEN_SHIFT;
1404 
1405 		/* check for errors */
1406 		if ((status &
1407 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP |
1408 		    GENET_RX_DESC_STATUS_RX_ERROR)) !=
1409 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) {
1410 			if (if_getflags(ifp) & IFF_DEBUG)
1411 				device_printf(sc->dev,
1412 				    "error/frag %x csum %x\n", status,
1413 				    sb->rxcsum);
1414 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1415 			continue;
1416 		}
1417 
1418 		error = gen_newbuf_rx(sc, q, index);
1419 		if (error != 0) {
1420 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1421 			if (if_getflags(ifp) & IFF_DEBUG)
1422 				device_printf(sc->dev, "gen_newbuf_rx %d\n",
1423 				    error);
1424 			/* reuse previous mbuf */
1425 			(void) gen_mapbuf_rx(sc, q, index, m);
1426 			continue;
1427 		}
1428 
1429 		if (sb != NULL) {
1430 			if (status & GENET_RX_DESC_STATUS_CKSUM_OK) {
1431 				/* L4 checksum checked; not sure about L3. */
1432 				m->m_pkthdr.csum_flags = CSUM_DATA_VALID |
1433 				    CSUM_PSEUDO_HDR;
1434 				m->m_pkthdr.csum_data = 0xffff;
1435 			}
1436 			m->m_data += sizeof(struct statusblock);
1437 			m->m_len -= sizeof(struct statusblock);
1438 			len -= sizeof(struct statusblock);
1439 		}
1440 		if (len > ETHER_ALIGN) {
1441 			m_adj(m, ETHER_ALIGN);
1442 			len -= ETHER_ALIGN;
1443 		}
1444 
1445 		m->m_pkthdr.rcvif = ifp;
1446 		m->m_pkthdr.len = len;
1447 		m->m_len = len;
1448 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1449 
1450 		m->m_nextpkt = NULL;
1451 		if (mh == NULL)
1452 			mh = m;
1453 		else
1454 			mt->m_nextpkt = m;
1455 		mt = m;
1456 		++cnt;
1457 		++npkt;
1458 
1459 		index = RX_NEXT(index, q->nentries);
1460 
1461 		q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK;
1462 		WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx);
1463 
1464 		if (cnt == gen_rx_batch) {
1465 			GEN_UNLOCK(sc);
1466 			if_input(ifp, mh);
1467 			GEN_LOCK(sc);
1468 			mh = mt = NULL;
1469 			cnt = 0;
1470 		}
1471 	}
1472 
1473 	if (mh != NULL) {
1474 		GEN_UNLOCK(sc);
1475 		if_input(ifp, mh);
1476 		GEN_LOCK(sc);
1477 	}
1478 
1479 	return (npkt);
1480 }
1481 
1482 static void
gen_txintr(struct gen_softc * sc,struct tx_queue * q)1483 gen_txintr(struct gen_softc *sc, struct tx_queue *q)
1484 {
1485 	uint32_t cons_idx, total;
1486 	struct gen_ring_ent *ent;
1487 	if_t ifp;
1488 	int i, prog;
1489 
1490 	GEN_ASSERT_LOCKED(sc);
1491 
1492 	ifp = sc->ifp;
1493 
1494 	cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) &
1495 	    GENET_TX_DMA_PROD_CONS_MASK;
1496 	total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK;
1497 
1498 	prog = 0;
1499 	for (i = q->next; q->queued > 0 && total > 0;
1500 	    i = TX_NEXT(i, q->nentries), total--) {
1501 		/* XXX check for errors */
1502 
1503 		ent = &q->entries[i];
1504 		if (ent->mbuf != NULL) {
1505 			bus_dmamap_sync(sc->tx_buf_tag, ent->map,
1506 			    BUS_DMASYNC_POSTWRITE);
1507 			bus_dmamap_unload(sc->tx_buf_tag, ent->map);
1508 			m_freem(ent->mbuf);
1509 			ent->mbuf = NULL;
1510 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1511 		}
1512 
1513 		prog++;
1514 		--q->queued;
1515 	}
1516 
1517 	if (prog > 0) {
1518 		q->next = i;
1519 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1520 	}
1521 
1522 	q->cons_idx = cons_idx;
1523 }
1524 
1525 static void
gen_intr2(void * arg)1526 gen_intr2(void *arg)
1527 {
1528 	struct gen_softc *sc = arg;
1529 
1530 	device_printf(sc->dev, "gen_intr2\n");
1531 }
1532 
1533 static int
gen_newbuf_rx(struct gen_softc * sc,struct rx_queue * q,int index)1534 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index)
1535 {
1536 	struct mbuf *m;
1537 
1538 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1539 	if (m == NULL)
1540 		return (ENOBUFS);
1541 
1542 	m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
1543 	m_adj(m, ETHER_ALIGN);
1544 
1545 	return (gen_mapbuf_rx(sc, q, index, m));
1546 }
1547 
1548 static int
gen_mapbuf_rx(struct gen_softc * sc,struct rx_queue * q,int index,struct mbuf * m)1549 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
1550     struct mbuf *m)
1551 {
1552 	bus_dma_segment_t seg;
1553 	bus_dmamap_t map;
1554 	int nsegs;
1555 
1556 	map = q->entries[index].map;
1557 	if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs,
1558 	    BUS_DMA_NOWAIT) != 0) {
1559 		m_freem(m);
1560 		return (ENOBUFS);
1561 	}
1562 
1563 	bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD);
1564 
1565 	q->entries[index].mbuf = m;
1566 	WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr);
1567 	WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32));
1568 
1569 	return (0);
1570 }
1571 
1572 static int
gen_ioctl(if_t ifp,u_long cmd,caddr_t data)1573 gen_ioctl(if_t ifp, u_long cmd, caddr_t data)
1574 {
1575 	struct gen_softc *sc;
1576 	struct mii_data *mii;
1577 	struct ifreq *ifr;
1578 	int flags, enable, error;
1579 
1580 	sc = if_getsoftc(ifp);
1581 	mii = device_get_softc(sc->miibus);
1582 	ifr = (struct ifreq *)data;
1583 	error = 0;
1584 
1585 	switch (cmd) {
1586 	case SIOCSIFFLAGS:
1587 		GEN_LOCK(sc);
1588 		if (if_getflags(ifp) & IFF_UP) {
1589 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1590 				flags = if_getflags(ifp) ^ sc->if_flags;
1591 				if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
1592 					gen_setup_rxfilter(sc);
1593 			} else
1594 				gen_init_locked(sc);
1595 		} else {
1596 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1597 				gen_stop(sc);
1598 		}
1599 		sc->if_flags = if_getflags(ifp);
1600 		GEN_UNLOCK(sc);
1601 		break;
1602 
1603 	case SIOCADDMULTI:
1604 	case SIOCDELMULTI:
1605 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1606 			GEN_LOCK(sc);
1607 			gen_setup_rxfilter(sc);
1608 			GEN_UNLOCK(sc);
1609 		}
1610 		break;
1611 
1612 	case SIOCSIFMEDIA:
1613 	case SIOCGIFMEDIA:
1614 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1615 		break;
1616 
1617 	case SIOCSIFCAP:
1618 		enable = if_getcapenable(ifp);
1619 		flags = ifr->ifr_reqcap ^ enable;
1620 		if (flags & IFCAP_RXCSUM)
1621 			enable ^= IFCAP_RXCSUM;
1622 		if (flags & IFCAP_RXCSUM_IPV6)
1623 			enable ^= IFCAP_RXCSUM_IPV6;
1624 		if (flags & IFCAP_TXCSUM)
1625 			enable ^= IFCAP_TXCSUM;
1626 		if (flags & IFCAP_TXCSUM_IPV6)
1627 			enable ^= IFCAP_TXCSUM_IPV6;
1628 		if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6))
1629 			if_sethwassist(ifp, GEN_CSUM_FEATURES);
1630 		else
1631 			if_sethwassist(ifp, 0);
1632 		if_setcapenable(ifp, enable);
1633 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1634 			gen_enable_offload(sc);
1635 		break;
1636 
1637 	default:
1638 		error = ether_ioctl(ifp, cmd, data);
1639 		break;
1640 	}
1641 	return (error);
1642 }
1643 
1644 static void
gen_tick(void * softc)1645 gen_tick(void *softc)
1646 {
1647 	struct gen_softc *sc;
1648 	struct mii_data *mii;
1649 	if_t ifp;
1650 	int link;
1651 
1652 	sc = softc;
1653 	ifp = sc->ifp;
1654 	mii = device_get_softc(sc->miibus);
1655 
1656 	GEN_ASSERT_LOCKED(sc);
1657 
1658 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1659 		return;
1660 
1661 	link = sc->link;
1662 	mii_tick(mii);
1663 	if (sc->link && !link)
1664 		gen_start_locked(sc);
1665 
1666 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
1667 }
1668 
1669 #define	MII_BUSY_RETRY		1000
1670 
1671 static int
gen_miibus_readreg(device_t dev,int phy,int reg)1672 gen_miibus_readreg(device_t dev, int phy, int reg)
1673 {
1674 	struct gen_softc *sc;
1675 	int retry, val;
1676 
1677 	sc = device_get_softc(dev);
1678 	val = 0;
1679 
1680 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ |
1681 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT));
1682 	val = RD4(sc, GENET_MDIO_CMD);
1683 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1684 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1685 		if (((val = RD4(sc, GENET_MDIO_CMD)) &
1686 		    GENET_MDIO_START_BUSY) == 0) {
1687 			if (val & GENET_MDIO_READ_FAILED)
1688 				return (0);	/* -1? */
1689 			val &= GENET_MDIO_VAL_MASK;
1690 			break;
1691 		}
1692 		DELAY(10);
1693 	}
1694 
1695 	if (retry == 0)
1696 		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
1697 		    phy, reg);
1698 
1699 	return (val);
1700 }
1701 
1702 static int
gen_miibus_writereg(device_t dev,int phy,int reg,int val)1703 gen_miibus_writereg(device_t dev, int phy, int reg, int val)
1704 {
1705 	struct gen_softc *sc;
1706 	int retry;
1707 
1708 	sc = device_get_softc(dev);
1709 
1710 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE |
1711 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) |
1712 	    (val & GENET_MDIO_VAL_MASK));
1713 	val = RD4(sc, GENET_MDIO_CMD);
1714 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1715 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1716 		val = RD4(sc, GENET_MDIO_CMD);
1717 		if ((val & GENET_MDIO_START_BUSY) == 0)
1718 			break;
1719 		DELAY(10);
1720 	}
1721 	if (retry == 0)
1722 		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
1723 		    phy, reg);
1724 
1725 	return (0);
1726 }
1727 
1728 static void
gen_update_link_locked(struct gen_softc * sc)1729 gen_update_link_locked(struct gen_softc *sc)
1730 {
1731 	struct mii_data *mii;
1732 	uint32_t val;
1733 	u_int speed;
1734 
1735 	GEN_ASSERT_LOCKED(sc);
1736 
1737 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
1738 		return;
1739 	mii = device_get_softc(sc->miibus);
1740 
1741 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1742 	    (IFM_ACTIVE | IFM_AVALID)) {
1743 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1744 		case IFM_1000_T:
1745 		case IFM_1000_SX:
1746 			speed = GENET_UMAC_CMD_SPEED_1000;
1747 			sc->link = 1;
1748 			break;
1749 		case IFM_100_TX:
1750 			speed = GENET_UMAC_CMD_SPEED_100;
1751 			sc->link = 1;
1752 			break;
1753 		case IFM_10_T:
1754 			speed = GENET_UMAC_CMD_SPEED_10;
1755 			sc->link = 1;
1756 			break;
1757 		default:
1758 			sc->link = 0;
1759 			break;
1760 		}
1761 	} else
1762 		sc->link = 0;
1763 
1764 	if (sc->link == 0)
1765 		return;
1766 
1767 	val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
1768 	val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
1769 	val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
1770 	val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
1771 	if (sc->phy_mode == MII_CONTYPE_RGMII)
1772 		val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1773 	else
1774 		val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1775 	WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
1776 
1777 	val = RD4(sc, GENET_UMAC_CMD);
1778 	val &= ~GENET_UMAC_CMD_SPEED;
1779 	val |= speed;
1780 	WR4(sc, GENET_UMAC_CMD, val);
1781 }
1782 
1783 static void
gen_link_task(void * arg,int pending)1784 gen_link_task(void *arg, int pending)
1785 {
1786 	struct gen_softc *sc;
1787 
1788 	sc = arg;
1789 
1790 	GEN_LOCK(sc);
1791 	gen_update_link_locked(sc);
1792 	GEN_UNLOCK(sc);
1793 }
1794 
1795 static void
gen_miibus_statchg(device_t dev)1796 gen_miibus_statchg(device_t dev)
1797 {
1798 	struct gen_softc *sc;
1799 
1800 	sc = device_get_softc(dev);
1801 
1802 	taskqueue_enqueue(taskqueue_swi, &sc->link_task);
1803 }
1804 
1805 static void
gen_media_status(if_t ifp,struct ifmediareq * ifmr)1806 gen_media_status(if_t ifp, struct ifmediareq *ifmr)
1807 {
1808 	struct gen_softc *sc;
1809 	struct mii_data *mii;
1810 
1811 	sc = if_getsoftc(ifp);
1812 	mii = device_get_softc(sc->miibus);
1813 
1814 	GEN_LOCK(sc);
1815 	mii_pollstat(mii);
1816 	ifmr->ifm_active = mii->mii_media_active;
1817 	ifmr->ifm_status = mii->mii_media_status;
1818 	GEN_UNLOCK(sc);
1819 }
1820 
1821 static int
gen_media_change(if_t ifp)1822 gen_media_change(if_t ifp)
1823 {
1824 	struct gen_softc *sc;
1825 	struct mii_data *mii;
1826 	int error;
1827 
1828 	sc = if_getsoftc(ifp);
1829 	mii = device_get_softc(sc->miibus);
1830 
1831 	GEN_LOCK(sc);
1832 	error = mii_mediachg(mii);
1833 	GEN_UNLOCK(sc);
1834 
1835 	return (error);
1836 }
1837 
1838 static device_method_t gen_methods[] = {
1839 	/* Device interface */
1840 	DEVMETHOD(device_probe,		gen_probe),
1841 	DEVMETHOD(device_attach,	gen_attach),
1842 	DEVMETHOD(device_detach,	gen_detach),
1843 
1844 	/* MII interface */
1845 	DEVMETHOD(miibus_readreg,	gen_miibus_readreg),
1846 	DEVMETHOD(miibus_writereg,	gen_miibus_writereg),
1847 	DEVMETHOD(miibus_statchg,	gen_miibus_statchg),
1848 
1849 	DEVMETHOD_END
1850 };
1851 
1852 static driver_t gen_driver = {
1853 	"genet",
1854 	gen_methods,
1855 	sizeof(struct gen_softc),
1856 };
1857 
1858 DRIVER_MODULE(genet, simplebus, gen_driver, 0, 0);
1859 DRIVER_MODULE(miibus, genet, miibus_driver, 0, 0);
1860 MODULE_DEPEND(genet, ether, 1, 1, 1);
1861 MODULE_DEPEND(genet, miibus, 1, 1, 1);
1862