1 /*-
2 * Copyright (c) 2020 Michael J Karels
3 * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 /*
28 * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller
29 *
30 * This driver is derived in large part from bcmgenet.c from NetBSD by
31 * Jared McNeill. Parts of the structure and other common code in
32 * this driver have been copied from if_awg.c for the Allwinner EMAC,
33 * also by Jared McNeill.
34 */
35
36 #include "opt_device_polling.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/rman.h>
42 #include <sys/kernel.h>
43 #include <sys/endian.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/module.h>
49 #include <sys/taskqueue.h>
50 #include <sys/gpio.h>
51
52 #include <net/bpf.h>
53 #include <net/if.h>
54 #include <net/ethernet.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_var.h>
59
60 #include <machine/bus.h>
61
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64
65 #define __BIT(_x) (1 << (_x))
66 #include "if_genetreg.h"
67
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 #include <dev/mii/mii_fdt.h>
71
72 #include <netinet/in.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip6.h>
75
76 #include "syscon_if.h"
77 #include "miibus_if.h"
78 #include "gpio_if.h"
79
80 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_MAC], (reg))
81 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_MAC], (reg), (val))
82
83 #define GEN_LOCK(sc) mtx_lock(&(sc)->mtx)
84 #define GEN_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
85 #define GEN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
86 #define GEN_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
87
88 #define TX_DESC_COUNT GENET_DMA_DESC_COUNT
89 #define RX_DESC_COUNT GENET_DMA_DESC_COUNT
90
91 #define TX_NEXT(n, count) (((n) + 1) & ((count) - 1))
92 #define RX_NEXT(n, count) (((n) + 1) & ((count) - 1))
93
94 #define TX_MAX_SEGS 20
95
96 static SYSCTL_NODE(_hw, OID_AUTO, genet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
97 "genet driver parameters");
98
99 /* Maximum number of mbufs to pass per call to if_input */
100 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */;
101 SYSCTL_INT(_hw_genet, OID_AUTO, rx_batch, CTLFLAG_RDTUN,
102 &gen_rx_batch, 0, "max mbufs per call to if_input");
103
104 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch); /* old name/interface */
105
106 /*
107 * Transmitting packets with only an Ethernet header in the first mbuf
108 * fails. Examples include reflected ICMPv6 packets, e.g. echo replies;
109 * forwarded IPv6/TCP packets; and forwarded IPv4/TCP packets that use NAT
110 * with IPFW. Pulling up the sizes of ether_header + ip6_hdr + icmp6_hdr
111 * seems to work for both ICMPv6 and TCP over IPv6, as well as the IPv4/TCP
112 * case.
113 */
114 static int gen_tx_hdr_min = 56; /* ether_header + ip6_hdr + icmp6_hdr */
115 SYSCTL_INT(_hw_genet, OID_AUTO, tx_hdr_min, CTLFLAG_RW,
116 &gen_tx_hdr_min, 0, "header to add to packets with ether header only");
117
118 static struct ofw_compat_data compat_data[] = {
119 { "brcm,genet-v1", 1 },
120 { "brcm,genet-v2", 2 },
121 { "brcm,genet-v3", 3 },
122 { "brcm,genet-v4", 4 },
123 { "brcm,genet-v5", 5 },
124 { "brcm,bcm2711-genet-v5", 5 },
125 { NULL, 0 }
126 };
127
128 enum {
129 _RES_MAC, /* what to call this? */
130 _RES_IRQ1,
131 _RES_IRQ2,
132 _RES_NITEMS
133 };
134
135 static struct resource_spec gen_spec[] = {
136 { SYS_RES_MEMORY, 0, RF_ACTIVE },
137 { SYS_RES_IRQ, 0, RF_ACTIVE },
138 { SYS_RES_IRQ, 1, RF_ACTIVE },
139 { -1, 0 }
140 };
141
142 /* structure per ring entry */
143 struct gen_ring_ent {
144 bus_dmamap_t map;
145 struct mbuf *mbuf;
146 };
147
148 struct tx_queue {
149 int hwindex; /* hardware index */
150 int nentries;
151 u_int queued; /* or avail? */
152 u_int cur;
153 u_int next;
154 u_int prod_idx;
155 u_int cons_idx;
156 struct gen_ring_ent *entries;
157 };
158
159 struct rx_queue {
160 int hwindex; /* hardware index */
161 int nentries;
162 u_int cur;
163 u_int prod_idx;
164 u_int cons_idx;
165 struct gen_ring_ent *entries;
166 };
167
168 struct gen_softc {
169 struct resource *res[_RES_NITEMS];
170 struct mtx mtx;
171 if_t ifp;
172 device_t dev;
173 device_t miibus;
174 mii_contype_t phy_mode;
175
176 struct callout stat_ch;
177 struct task link_task;
178 void *ih;
179 void *ih2;
180 int type;
181 int if_flags;
182 int link;
183 bus_dma_tag_t tx_buf_tag;
184 /*
185 * The genet chip has multiple queues for transmit and receive.
186 * This driver uses only one (queue 16, the default), but is cast
187 * with multiple rings. The additional rings are used for different
188 * priorities.
189 */
190 #define DEF_TXQUEUE 0
191 #define NTXQUEUE 1
192 struct tx_queue tx_queue[NTXQUEUE];
193 struct gen_ring_ent tx_ring_ent[TX_DESC_COUNT]; /* ring entries */
194
195 bus_dma_tag_t rx_buf_tag;
196 #define DEF_RXQUEUE 0
197 #define NRXQUEUE 1
198 struct rx_queue rx_queue[NRXQUEUE];
199 struct gen_ring_ent rx_ring_ent[RX_DESC_COUNT]; /* ring entries */
200 };
201
202 static void gen_init(void *softc);
203 static void gen_start(if_t ifp);
204 static void gen_destroy(struct gen_softc *sc);
205 static int gen_encap(struct gen_softc *sc, struct mbuf **mp);
206 static int gen_parse_tx(struct mbuf *m, int csum_flags);
207 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data);
208 static int gen_get_phy_mode(device_t dev);
209 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr);
210 static void gen_set_enaddr(struct gen_softc *sc);
211 static void gen_setup_rxfilter(struct gen_softc *sc);
212 static void gen_reset(struct gen_softc *sc);
213 static void gen_enable(struct gen_softc *sc);
214 static void gen_dma_disable(struct gen_softc *sc);
215 static int gen_bus_dma_init(struct gen_softc *sc);
216 static void gen_bus_dma_teardown(struct gen_softc *sc);
217 static void gen_enable_intr(struct gen_softc *sc);
218 static void gen_init_txrings(struct gen_softc *sc);
219 static void gen_init_rxrings(struct gen_softc *sc);
220 static void gen_intr(void *softc);
221 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q);
222 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
223 static void gen_intr2(void *softc);
224 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index);
225 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
226 struct mbuf *m);
227 static void gen_link_task(void *arg, int pending);
228 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr);
229 static int gen_media_change(if_t ifp);
230 static void gen_tick(void *softc);
231
232 static int
gen_probe(device_t dev)233 gen_probe(device_t dev)
234 {
235 if (!ofw_bus_status_okay(dev))
236 return (ENXIO);
237
238 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
239 return (ENXIO);
240
241 device_set_desc(dev, "RPi4 Gigabit Ethernet");
242 return (BUS_PROBE_DEFAULT);
243 }
244
245 static int
gen_attach(device_t dev)246 gen_attach(device_t dev)
247 {
248 struct ether_addr eaddr;
249 struct gen_softc *sc;
250 int major, minor, error, mii_flags;
251 bool eaddr_found;
252
253 sc = device_get_softc(dev);
254 sc->dev = dev;
255 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
256
257 if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) {
258 device_printf(dev, "cannot allocate resources for device\n");
259 error = ENXIO;
260 goto fail;
261 }
262
263 major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT;
264 if (major != REV_MAJOR_V5) {
265 device_printf(dev, "version %d is not supported\n", major);
266 error = ENXIO;
267 goto fail;
268 }
269 minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT;
270 device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor,
271 RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY);
272
273 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
274 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
275 TASK_INIT(&sc->link_task, 0, gen_link_task, sc);
276
277 error = gen_get_phy_mode(dev);
278 if (error != 0)
279 goto fail;
280
281 bzero(&eaddr, sizeof(eaddr));
282 eaddr_found = gen_get_eaddr(dev, &eaddr);
283
284 /* reset core */
285 gen_reset(sc);
286
287 gen_dma_disable(sc);
288
289 /* Setup DMA */
290 error = gen_bus_dma_init(sc);
291 if (error != 0) {
292 device_printf(dev, "cannot setup bus dma\n");
293 goto fail;
294 }
295
296 /* Setup ethernet interface */
297 sc->ifp = if_alloc(IFT_ETHER);
298 if_setsoftc(sc->ifp, sc);
299 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
300 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
301 if_setstartfn(sc->ifp, gen_start);
302 if_setioctlfn(sc->ifp, gen_ioctl);
303 if_setinitfn(sc->ifp, gen_init);
304 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
305 if_setsendqready(sc->ifp);
306 #define GEN_CSUM_FEATURES (CSUM_UDP | CSUM_TCP)
307 if_sethwassist(sc->ifp, GEN_CSUM_FEATURES);
308 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
309 IFCAP_HWCSUM_IPV6);
310 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
311
312 /* Install interrupt handlers */
313 error = bus_setup_intr(dev, sc->res[_RES_IRQ1],
314 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih);
315 if (error != 0) {
316 device_printf(dev, "cannot setup interrupt handler1\n");
317 goto fail;
318 }
319
320 error = bus_setup_intr(dev, sc->res[_RES_IRQ2],
321 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2);
322 if (error != 0) {
323 device_printf(dev, "cannot setup interrupt handler2\n");
324 goto fail;
325 }
326
327 /* Attach MII driver */
328 mii_flags = 0;
329 switch (sc->phy_mode)
330 {
331 case MII_CONTYPE_RGMII_ID:
332 mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY;
333 break;
334 case MII_CONTYPE_RGMII_RXID:
335 mii_flags |= MIIF_RX_DELAY;
336 break;
337 case MII_CONTYPE_RGMII_TXID:
338 mii_flags |= MIIF_TX_DELAY;
339 break;
340 default:
341 break;
342 }
343 error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change,
344 gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
345 mii_flags);
346 if (error != 0) {
347 device_printf(dev, "cannot attach PHY\n");
348 goto fail;
349 }
350
351 /* If address was not found, create one based on the hostid and name. */
352 if (eaddr_found == 0)
353 ether_gen_addr(sc->ifp, &eaddr);
354 /* Attach ethernet interface */
355 ether_ifattach(sc->ifp, eaddr.octet);
356
357 fail:
358 if (error)
359 gen_destroy(sc);
360 return (error);
361 }
362
363 /* Free resources after failed attach. This is not a complete detach. */
364 static void
gen_destroy(struct gen_softc * sc)365 gen_destroy(struct gen_softc *sc)
366 {
367
368 device_delete_children(sc->dev);
369 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
370 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
371 gen_bus_dma_teardown(sc);
372 callout_drain(&sc->stat_ch);
373 if (mtx_initialized(&sc->mtx))
374 mtx_destroy(&sc->mtx);
375 bus_release_resources(sc->dev, gen_spec, sc->res);
376 if (sc->ifp != NULL) {
377 if_free(sc->ifp);
378 sc->ifp = NULL;
379 }
380 }
381
382 static int
gen_get_phy_mode(device_t dev)383 gen_get_phy_mode(device_t dev)
384 {
385 struct gen_softc *sc;
386 phandle_t node;
387 mii_contype_t type;
388 int error = 0;
389
390 sc = device_get_softc(dev);
391 node = ofw_bus_get_node(dev);
392 type = mii_fdt_get_contype(node);
393
394 switch (type) {
395 case MII_CONTYPE_RGMII:
396 case MII_CONTYPE_RGMII_ID:
397 case MII_CONTYPE_RGMII_RXID:
398 case MII_CONTYPE_RGMII_TXID:
399 sc->phy_mode = type;
400 break;
401 default:
402 device_printf(dev, "unknown phy-mode '%s'\n",
403 mii_fdt_contype_to_name(type));
404 error = ENXIO;
405 break;
406 }
407
408 return (error);
409 }
410
411 static bool
gen_get_eaddr(device_t dev,struct ether_addr * eaddr)412 gen_get_eaddr(device_t dev, struct ether_addr *eaddr)
413 {
414 struct gen_softc *sc;
415 uint32_t maclo, machi, val;
416 phandle_t node;
417
418 sc = device_get_softc(dev);
419
420 node = ofw_bus_get_node(dev);
421 if (OF_getprop(node, "mac-address", eaddr->octet,
422 ETHER_ADDR_LEN) != -1 ||
423 OF_getprop(node, "local-mac-address", eaddr->octet,
424 ETHER_ADDR_LEN) != -1 ||
425 OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1)
426 return (true);
427
428 device_printf(dev, "No Ethernet address found in fdt!\n");
429 maclo = machi = 0;
430
431 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
432 if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
433 maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
434 machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
435 }
436
437 if (maclo == 0 && machi == 0) {
438 if (bootverbose)
439 device_printf(dev,
440 "No Ethernet address found in controller\n");
441 return (false);
442 } else {
443 eaddr->octet[0] = maclo & 0xff;
444 eaddr->octet[1] = (maclo >> 8) & 0xff;
445 eaddr->octet[2] = (maclo >> 16) & 0xff;
446 eaddr->octet[3] = (maclo >> 24) & 0xff;
447 eaddr->octet[4] = machi & 0xff;
448 eaddr->octet[5] = (machi >> 8) & 0xff;
449 return (true);
450 }
451 }
452
453 static void
gen_reset(struct gen_softc * sc)454 gen_reset(struct gen_softc *sc)
455 {
456 uint32_t val;
457
458 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
459 val |= GENET_SYS_RBUF_FLUSH_RESET;
460 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
461 DELAY(10);
462
463 val &= ~GENET_SYS_RBUF_FLUSH_RESET;
464 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
465 DELAY(10);
466
467 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
468 DELAY(10);
469
470 WR4(sc, GENET_UMAC_CMD, 0);
471 WR4(sc, GENET_UMAC_CMD,
472 GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
473 DELAY(10);
474 WR4(sc, GENET_UMAC_CMD, 0);
475
476 WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
477 GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
478 WR4(sc, GENET_UMAC_MIB_CTRL, 0);
479 }
480
481 static void
gen_enable(struct gen_softc * sc)482 gen_enable(struct gen_softc *sc)
483 {
484 u_int val;
485
486 WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
487
488 val = RD4(sc, GENET_RBUF_CTRL);
489 val |= GENET_RBUF_ALIGN_2B;
490 WR4(sc, GENET_RBUF_CTRL, val);
491
492 WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
493
494 /* Enable transmitter and receiver */
495 val = RD4(sc, GENET_UMAC_CMD);
496 val |= GENET_UMAC_CMD_TXEN;
497 val |= GENET_UMAC_CMD_RXEN;
498 WR4(sc, GENET_UMAC_CMD, val);
499
500 /* Enable interrupts */
501 gen_enable_intr(sc);
502 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
503 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
504 }
505
506 static void
gen_disable_intr(struct gen_softc * sc)507 gen_disable_intr(struct gen_softc *sc)
508 {
509 /* Disable interrupts */
510 WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
511 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 0xffffffff);
512 }
513
514 static void
gen_disable(struct gen_softc * sc)515 gen_disable(struct gen_softc *sc)
516 {
517 uint32_t val;
518
519 /* Stop receiver */
520 val = RD4(sc, GENET_UMAC_CMD);
521 val &= ~GENET_UMAC_CMD_RXEN;
522 WR4(sc, GENET_UMAC_CMD, val);
523
524 /* Stop transmitter */
525 val = RD4(sc, GENET_UMAC_CMD);
526 val &= ~GENET_UMAC_CMD_TXEN;
527 WR4(sc, GENET_UMAC_CMD, val);
528
529 /* Disable Interrupt */
530 gen_disable_intr(sc);
531 }
532
533 static void
gen_enable_offload(struct gen_softc * sc)534 gen_enable_offload(struct gen_softc *sc)
535 {
536 uint32_t check_ctrl, buf_ctrl;
537
538 check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL);
539 buf_ctrl = RD4(sc, GENET_RBUF_CTRL);
540 if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) {
541 check_ctrl |= GENET_RBUF_CHECK_CTRL_EN;
542 buf_ctrl |= GENET_RBUF_64B_EN;
543 } else {
544 check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN;
545 buf_ctrl &= ~GENET_RBUF_64B_EN;
546 }
547 WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl);
548 WR4(sc, GENET_RBUF_CTRL, buf_ctrl);
549
550 buf_ctrl = RD4(sc, GENET_TBUF_CTRL);
551 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
552 0)
553 buf_ctrl |= GENET_RBUF_64B_EN;
554 else
555 buf_ctrl &= ~GENET_RBUF_64B_EN;
556 WR4(sc, GENET_TBUF_CTRL, buf_ctrl);
557 }
558
559 static void
gen_dma_disable(struct gen_softc * sc)560 gen_dma_disable(struct gen_softc *sc)
561 {
562 int val;
563
564 val = RD4(sc, GENET_TX_DMA_CTRL);
565 val &= ~GENET_TX_DMA_CTRL_EN;
566 val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
567 WR4(sc, GENET_TX_DMA_CTRL, val);
568
569 val = RD4(sc, GENET_RX_DMA_CTRL);
570 val &= ~GENET_RX_DMA_CTRL_EN;
571 val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
572 WR4(sc, GENET_RX_DMA_CTRL, val);
573 }
574
575 static int
gen_bus_dma_init(struct gen_softc * sc)576 gen_bus_dma_init(struct gen_softc *sc)
577 {
578 device_t dev = sc->dev;
579 int i, error;
580
581 error = bus_dma_tag_create(
582 bus_get_dma_tag(dev), /* Parent tag */
583 4, 0, /* alignment, boundary */
584 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */
585 BUS_SPACE_MAXADDR, /* highaddr */
586 NULL, NULL, /* filter, filterarg */
587 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */
588 MCLBYTES, /* maxsegsize */
589 0, /* flags */
590 NULL, NULL, /* lockfunc, lockarg */
591 &sc->tx_buf_tag);
592 if (error != 0) {
593 device_printf(dev, "cannot create TX buffer tag\n");
594 return (error);
595 }
596
597 for (i = 0; i < TX_DESC_COUNT; i++) {
598 error = bus_dmamap_create(sc->tx_buf_tag, 0,
599 &sc->tx_ring_ent[i].map);
600 if (error != 0) {
601 device_printf(dev, "cannot create TX buffer map\n");
602 return (error);
603 }
604 }
605
606 error = bus_dma_tag_create(
607 bus_get_dma_tag(dev), /* Parent tag */
608 4, 0, /* alignment, boundary */
609 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */
610 BUS_SPACE_MAXADDR, /* highaddr */
611 NULL, NULL, /* filter, filterarg */
612 MCLBYTES, 1, /* maxsize, nsegs */
613 MCLBYTES, /* maxsegsize */
614 0, /* flags */
615 NULL, NULL, /* lockfunc, lockarg */
616 &sc->rx_buf_tag);
617 if (error != 0) {
618 device_printf(dev, "cannot create RX buffer tag\n");
619 return (error);
620 }
621
622 for (i = 0; i < RX_DESC_COUNT; i++) {
623 error = bus_dmamap_create(sc->rx_buf_tag, 0,
624 &sc->rx_ring_ent[i].map);
625 if (error != 0) {
626 device_printf(dev, "cannot create RX buffer map\n");
627 return (error);
628 }
629 }
630 return (0);
631 }
632
633 static void
gen_bus_dma_teardown(struct gen_softc * sc)634 gen_bus_dma_teardown(struct gen_softc *sc)
635 {
636 int i, error;
637
638 if (sc->tx_buf_tag != NULL) {
639 for (i = 0; i < TX_DESC_COUNT; i++) {
640 error = bus_dmamap_destroy(sc->tx_buf_tag,
641 sc->tx_ring_ent[i].map);
642 sc->tx_ring_ent[i].map = NULL;
643 if (error)
644 device_printf(sc->dev,
645 "%s: bus_dmamap_destroy failed: %d\n",
646 __func__, error);
647 }
648 error = bus_dma_tag_destroy(sc->tx_buf_tag);
649 sc->tx_buf_tag = NULL;
650 if (error)
651 device_printf(sc->dev,
652 "%s: bus_dma_tag_destroy failed: %d\n", __func__,
653 error);
654 }
655
656 if (sc->tx_buf_tag != NULL) {
657 for (i = 0; i < RX_DESC_COUNT; i++) {
658 error = bus_dmamap_destroy(sc->rx_buf_tag,
659 sc->rx_ring_ent[i].map);
660 sc->rx_ring_ent[i].map = NULL;
661 if (error)
662 device_printf(sc->dev,
663 "%s: bus_dmamap_destroy failed: %d\n",
664 __func__, error);
665 }
666 error = bus_dma_tag_destroy(sc->rx_buf_tag);
667 sc->rx_buf_tag = NULL;
668 if (error)
669 device_printf(sc->dev,
670 "%s: bus_dma_tag_destroy failed: %d\n", __func__,
671 error);
672 }
673 }
674
675 static void
gen_enable_intr(struct gen_softc * sc)676 gen_enable_intr(struct gen_softc *sc)
677 {
678
679 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
680 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
681 }
682
683 /*
684 * "queue" is the software queue index (0-4); "qid" is the hardware index
685 * (0-16). "base" is the starting index in the ring array.
686 */
687 static void
gen_init_txring(struct gen_softc * sc,int queue,int qid,int base,int nentries)688 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
689 int nentries)
690 {
691 struct tx_queue *q;
692 uint32_t val;
693
694 q = &sc->tx_queue[queue];
695 q->entries = &sc->tx_ring_ent[base];
696 q->hwindex = qid;
697 q->nentries = nentries;
698
699 /* TX ring */
700
701 q->queued = 0;
702 q->cons_idx = q->prod_idx = 0;
703
704 WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
705
706 WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
707 WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
708 WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
709 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
710 WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
711 (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
712 (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
713 WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
714 WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
715 WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
716 TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
717 WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
718 WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
719 WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
720 WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
721 WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
722
723 WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */
724
725 /* Enable transmit DMA */
726 val = RD4(sc, GENET_TX_DMA_CTRL);
727 val |= GENET_TX_DMA_CTRL_EN;
728 val |= GENET_TX_DMA_CTRL_RBUF_EN(qid);
729 WR4(sc, GENET_TX_DMA_CTRL, val);
730 }
731
732 /*
733 * "queue" is the software queue index (0-4); "qid" is the hardware index
734 * (0-16). "base" is the starting index in the ring array.
735 */
736 static void
gen_init_rxring(struct gen_softc * sc,int queue,int qid,int base,int nentries)737 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
738 int nentries)
739 {
740 struct rx_queue *q;
741 uint32_t val;
742 int i;
743
744 q = &sc->rx_queue[queue];
745 q->entries = &sc->rx_ring_ent[base];
746 q->hwindex = qid;
747 q->nentries = nentries;
748 q->cons_idx = q->prod_idx = 0;
749
750 WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
751
752 WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
753 WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
754 WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
755 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
756 WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
757 (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
758 (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
759 WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
760 WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
761 WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
762 RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
763 WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
764 WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
765 (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4));
766 WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
767 WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
768
769 WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */
770
771 /* fill ring */
772 for (i = 0; i < RX_DESC_COUNT; i++)
773 gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i);
774
775 /* Enable receive DMA */
776 val = RD4(sc, GENET_RX_DMA_CTRL);
777 val |= GENET_RX_DMA_CTRL_EN;
778 val |= GENET_RX_DMA_CTRL_RBUF_EN(qid);
779 WR4(sc, GENET_RX_DMA_CTRL, val);
780 }
781
782 static void
gen_init_txrings(struct gen_softc * sc)783 gen_init_txrings(struct gen_softc *sc)
784 {
785 int base = 0;
786 #ifdef PRI_RINGS
787 int i;
788
789 /* init priority rings */
790 for (i = 0; i < PRI_RINGS; i++) {
791 gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT);
792 sc->tx_queue[i].queue = i;
793 base += TX_DESC_PRICOUNT;
794 dma_ring_conf |= 1 << i;
795 dma_control |= DMA_RENABLE(i);
796 }
797 #endif
798
799 /* init GENET_DMA_DEFAULT_QUEUE (16) */
800 gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
801 TX_DESC_COUNT);
802 sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
803 }
804
805 static void
gen_init_rxrings(struct gen_softc * sc)806 gen_init_rxrings(struct gen_softc *sc)
807 {
808 int base = 0;
809 #ifdef PRI_RINGS
810 int i;
811
812 /* init priority rings */
813 for (i = 0; i < PRI_RINGS; i++) {
814 gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT);
815 sc->rx_queue[i].queue = i;
816 base += TX_DESC_PRICOUNT;
817 dma_ring_conf |= 1 << i;
818 dma_control |= DMA_RENABLE(i);
819 }
820 #endif
821
822 /* init GENET_DMA_DEFAULT_QUEUE (16) */
823 gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
824 RX_DESC_COUNT);
825 sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
826
827 }
828
829 static void
gen_stop(struct gen_softc * sc)830 gen_stop(struct gen_softc *sc)
831 {
832 int i;
833 struct gen_ring_ent *ent;
834
835 GEN_ASSERT_LOCKED(sc);
836
837 callout_stop(&sc->stat_ch);
838 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
839 gen_reset(sc);
840 gen_disable(sc);
841 gen_dma_disable(sc);
842
843 /* Clear the tx/rx ring buffer */
844 for (i = 0; i < TX_DESC_COUNT; i++) {
845 ent = &sc->tx_ring_ent[i];
846 if (ent->mbuf != NULL) {
847 bus_dmamap_sync(sc->tx_buf_tag, ent->map,
848 BUS_DMASYNC_POSTWRITE);
849 bus_dmamap_unload(sc->tx_buf_tag, ent->map);
850 m_freem(ent->mbuf);
851 ent->mbuf = NULL;
852 }
853 }
854
855 for (i = 0; i < RX_DESC_COUNT; i++) {
856 ent = &sc->rx_ring_ent[i];
857 if (ent->mbuf != NULL) {
858 bus_dmamap_sync(sc->rx_buf_tag, ent->map,
859 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
860 bus_dmamap_unload(sc->rx_buf_tag, ent->map);
861 m_freem(ent->mbuf);
862 ent->mbuf = NULL;
863 }
864 }
865 }
866
867 static void
gen_init_locked(struct gen_softc * sc)868 gen_init_locked(struct gen_softc *sc)
869 {
870 struct mii_data *mii;
871 if_t ifp;
872
873 mii = device_get_softc(sc->miibus);
874 ifp = sc->ifp;
875
876 GEN_ASSERT_LOCKED(sc);
877
878 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
879 return;
880
881 switch (sc->phy_mode)
882 {
883 case MII_CONTYPE_RGMII:
884 case MII_CONTYPE_RGMII_ID:
885 case MII_CONTYPE_RGMII_RXID:
886 case MII_CONTYPE_RGMII_TXID:
887 WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY);
888 break;
889 default:
890 WR4(sc, GENET_SYS_PORT_CTRL, 0);
891 }
892
893 gen_set_enaddr(sc);
894
895 /* Setup RX filter */
896 gen_setup_rxfilter(sc);
897
898 gen_init_txrings(sc);
899 gen_init_rxrings(sc);
900 gen_enable(sc);
901 gen_enable_offload(sc);
902
903 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
904
905 mii_mediachg(mii);
906 callout_reset(&sc->stat_ch, hz, gen_tick, sc);
907 }
908
909 static void
gen_init(void * softc)910 gen_init(void *softc)
911 {
912 struct gen_softc *sc;
913
914 sc = softc;
915 GEN_LOCK(sc);
916 gen_init_locked(sc);
917 GEN_UNLOCK(sc);
918 }
919
920 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
921
922 static void
gen_setup_rxfilter_mdf(struct gen_softc * sc,u_int n,const uint8_t * ea)923 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea)
924 {
925 uint32_t addr0 = (ea[0] << 8) | ea[1];
926 uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
927
928 WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
929 WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
930 }
931
932 static u_int
gen_setup_multi(void * arg,struct sockaddr_dl * sdl,u_int count)933 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count)
934 {
935 struct gen_softc *sc = arg;
936
937 /* "count + 2" to account for unicast and broadcast */
938 gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl));
939 return (1); /* increment to count */
940 }
941
942 static void
gen_setup_rxfilter(struct gen_softc * sc)943 gen_setup_rxfilter(struct gen_softc *sc)
944 {
945 if_t ifp = sc->ifp;
946 uint32_t cmd, mdf_ctrl;
947 u_int n;
948
949 GEN_ASSERT_LOCKED(sc);
950
951 cmd = RD4(sc, GENET_UMAC_CMD);
952
953 /*
954 * Count the required number of hardware filters. We need one
955 * for each multicast address, plus one for our own address and
956 * the broadcast address.
957 */
958 n = if_llmaddr_count(ifp) + 2;
959
960 if (n > GENET_MAX_MDF_FILTER)
961 if_setflagbits(ifp, IFF_ALLMULTI, 0);
962 else
963 if_setflagbits(ifp, 0, IFF_ALLMULTI);
964
965 if ((if_getflags(ifp) & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
966 cmd |= GENET_UMAC_CMD_PROMISC;
967 mdf_ctrl = 0;
968 } else {
969 cmd &= ~GENET_UMAC_CMD_PROMISC;
970 gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr);
971 gen_setup_rxfilter_mdf(sc, 1, if_getlladdr(ifp));
972 (void) if_foreach_llmaddr(ifp, gen_setup_multi, sc);
973 mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1) &~
974 (__BIT(GENET_MAX_MDF_FILTER - n) - 1);
975 }
976
977 WR4(sc, GENET_UMAC_CMD, cmd);
978 WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
979 }
980
981 static void
gen_set_enaddr(struct gen_softc * sc)982 gen_set_enaddr(struct gen_softc *sc)
983 {
984 uint8_t *enaddr;
985 uint32_t val;
986 if_t ifp;
987
988 GEN_ASSERT_LOCKED(sc);
989
990 ifp = sc->ifp;
991
992 /* Write our unicast address */
993 enaddr = if_getlladdr(ifp);
994 /* Write hardware address */
995 val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
996 (enaddr[0] << 24);
997 WR4(sc, GENET_UMAC_MAC0, val);
998 val = enaddr[5] | (enaddr[4] << 8);
999 WR4(sc, GENET_UMAC_MAC1, val);
1000 }
1001
1002 static void
gen_start_locked(struct gen_softc * sc)1003 gen_start_locked(struct gen_softc *sc)
1004 {
1005 struct mbuf *m;
1006 if_t ifp;
1007 int err;
1008
1009 GEN_ASSERT_LOCKED(sc);
1010
1011 if (!sc->link)
1012 return;
1013
1014 ifp = sc->ifp;
1015
1016 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1017 IFF_DRV_RUNNING)
1018 return;
1019
1020 while (true) {
1021 m = if_dequeue(ifp);
1022 if (m == NULL)
1023 break;
1024
1025 err = gen_encap(sc, &m);
1026 if (err != 0) {
1027 if (err == ENOBUFS)
1028 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1029 else if (m == NULL)
1030 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1031 if (m != NULL)
1032 if_sendq_prepend(ifp, m);
1033 break;
1034 }
1035 bpf_mtap_if(ifp, m);
1036 }
1037 }
1038
1039 static void
gen_start(if_t ifp)1040 gen_start(if_t ifp)
1041 {
1042 struct gen_softc *sc;
1043
1044 sc = if_getsoftc(ifp);
1045
1046 GEN_LOCK(sc);
1047 gen_start_locked(sc);
1048 GEN_UNLOCK(sc);
1049 }
1050
1051 /* Test for any delayed checksum */
1052 #define CSUM_DELAY_ANY (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)
1053
1054 static int
gen_encap(struct gen_softc * sc,struct mbuf ** mp)1055 gen_encap(struct gen_softc *sc, struct mbuf **mp)
1056 {
1057 bus_dmamap_t map;
1058 bus_dma_segment_t segs[TX_MAX_SEGS];
1059 int error, nsegs, cur, first, i, index, offset;
1060 uint32_t csuminfo, length_status, csum_flags = 0, csumdata;
1061 struct mbuf *m;
1062 struct statusblock *sb = NULL;
1063 struct tx_queue *q;
1064 struct gen_ring_ent *ent;
1065
1066 GEN_ASSERT_LOCKED(sc);
1067
1068 q = &sc->tx_queue[DEF_TXQUEUE];
1069 if (q->queued == q->nentries) {
1070 /* tx_queue is full */
1071 return (ENOBUFS);
1072 }
1073
1074 m = *mp;
1075
1076 /*
1077 * Don't attempt to send packets with only an Ethernet header in
1078 * first mbuf; see comment above with gen_tx_hdr_min.
1079 */
1080 if (m->m_len == sizeof(struct ether_header)) {
1081 m = m_pullup(m, MIN(m->m_pkthdr.len, gen_tx_hdr_min));
1082 if (m == NULL) {
1083 if (if_getflags(sc->ifp) & IFF_DEBUG)
1084 device_printf(sc->dev,
1085 "header pullup fail\n");
1086 *mp = NULL;
1087 return (ENOMEM);
1088 }
1089 }
1090
1091 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
1092 0) {
1093 csum_flags = m->m_pkthdr.csum_flags;
1094 csumdata = m->m_pkthdr.csum_data;
1095 M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT);
1096 if (m == NULL) {
1097 if (if_getflags(sc->ifp) & IFF_DEBUG)
1098 device_printf(sc->dev, "prepend fail\n");
1099 *mp = NULL;
1100 return (ENOMEM);
1101 }
1102 offset = gen_parse_tx(m, csum_flags);
1103 sb = mtod(m, struct statusblock *);
1104 if ((csum_flags & CSUM_DELAY_ANY) != 0) {
1105 csuminfo = (offset << TXCSUM_OFF_SHIFT) |
1106 (offset + csumdata);
1107 csuminfo |= TXCSUM_LEN_VALID;
1108 if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP))
1109 csuminfo |= TXCSUM_UDP;
1110 sb->txcsuminfo = csuminfo;
1111 } else
1112 sb->txcsuminfo = 0;
1113 }
1114
1115 *mp = m;
1116
1117 cur = first = q->cur;
1118 ent = &q->entries[cur];
1119 map = ent->map;
1120 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs,
1121 &nsegs, BUS_DMA_NOWAIT);
1122 if (error == EFBIG) {
1123 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
1124 if (m == NULL) {
1125 device_printf(sc->dev,
1126 "gen_encap: m_collapse failed\n");
1127 m_freem(*mp);
1128 *mp = NULL;
1129 return (ENOMEM);
1130 }
1131 *mp = m;
1132 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m,
1133 segs, &nsegs, BUS_DMA_NOWAIT);
1134 if (error != 0) {
1135 m_freem(*mp);
1136 *mp = NULL;
1137 }
1138 }
1139 if (error != 0) {
1140 device_printf(sc->dev,
1141 "gen_encap: bus_dmamap_load_mbuf_sg failed\n");
1142 return (error);
1143 }
1144 if (nsegs == 0) {
1145 m_freem(*mp);
1146 *mp = NULL;
1147 return (EIO);
1148 }
1149
1150 /* Remove statusblock after mapping, before possible requeue or bpf. */
1151 if (sb != NULL) {
1152 m->m_data += sizeof(struct statusblock);
1153 m->m_len -= sizeof(struct statusblock);
1154 m->m_pkthdr.len -= sizeof(struct statusblock);
1155 }
1156 if (q->queued + nsegs > q->nentries) {
1157 bus_dmamap_unload(sc->tx_buf_tag, map);
1158 return (ENOBUFS);
1159 }
1160
1161 bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE);
1162
1163 index = q->prod_idx & (q->nentries - 1);
1164 for (i = 0; i < nsegs; i++) {
1165 ent = &q->entries[cur];
1166 length_status = GENET_TX_DESC_STATUS_QTAG_MASK;
1167 if (i == 0) {
1168 length_status |= GENET_TX_DESC_STATUS_SOP |
1169 GENET_TX_DESC_STATUS_CRC;
1170 if ((csum_flags & CSUM_DELAY_ANY) != 0)
1171 length_status |= GENET_TX_DESC_STATUS_CKSUM;
1172 }
1173 if (i == nsegs - 1)
1174 length_status |= GENET_TX_DESC_STATUS_EOP;
1175
1176 length_status |= segs[i].ds_len <<
1177 GENET_TX_DESC_STATUS_BUFLEN_SHIFT;
1178
1179 WR4(sc, GENET_TX_DESC_ADDRESS_LO(index),
1180 (uint32_t)segs[i].ds_addr);
1181 WR4(sc, GENET_TX_DESC_ADDRESS_HI(index),
1182 (uint32_t)(segs[i].ds_addr >> 32));
1183 WR4(sc, GENET_TX_DESC_STATUS(index), length_status);
1184
1185 ++q->queued;
1186 cur = TX_NEXT(cur, q->nentries);
1187 index = TX_NEXT(index, q->nentries);
1188 }
1189
1190 q->prod_idx += nsegs;
1191 q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK;
1192 /* We probably don't need to write the producer index on every iter */
1193 if (nsegs != 0)
1194 WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx);
1195 q->cur = cur;
1196
1197 /* Store mbuf in the last segment */
1198 q->entries[first].mbuf = m;
1199
1200 return (0);
1201 }
1202
1203 /*
1204 * Parse a packet to find the offset of the transport header for checksum
1205 * offload. Ensure that the link and network headers are contiguous with
1206 * the status block, or transmission fails.
1207 */
1208 static int
gen_parse_tx(struct mbuf * m,int csum_flags)1209 gen_parse_tx(struct mbuf *m, int csum_flags)
1210 {
1211 int offset, off_in_m;
1212 bool copy = false, shift = false;
1213 u_char *p, *copy_p = NULL;
1214 struct mbuf *m0 = m;
1215 uint16_t ether_type;
1216
1217 if (m->m_len == sizeof(struct statusblock)) {
1218 /* M_PREPEND placed statusblock at end; move to beginning */
1219 m->m_data = m->m_pktdat;
1220 copy_p = mtodo(m, sizeof(struct statusblock));
1221 m = m->m_next;
1222 off_in_m = 0;
1223 p = mtod(m, u_char *);
1224 copy = true;
1225 } else {
1226 /*
1227 * If statusblock is not at beginning of mbuf (likely),
1228 * then remember to move mbuf contents down before copying
1229 * after them.
1230 */
1231 if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat)
1232 shift = true;
1233 p = mtodo(m, sizeof(struct statusblock));
1234 off_in_m = sizeof(struct statusblock);
1235 }
1236
1237 /*
1238 * If headers need to be copied contiguous to statusblock, do so.
1239 * If copying to the internal mbuf data area, and the status block
1240 * is not at the beginning of that area, shift the status block (which
1241 * is empty) and following data.
1242 */
1243 #define COPY(size) { \
1244 int hsize = size; \
1245 if (copy) { \
1246 if (shift) { \
1247 u_char *p0; \
1248 shift = false; \
1249 p0 = mtodo(m0, sizeof(struct statusblock)); \
1250 m0->m_data = m0->m_pktdat; \
1251 bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\
1252 m0->m_len - sizeof(struct statusblock)); \
1253 copy_p = mtodo(m0, m0->m_len); \
1254 } \
1255 bcopy(p, copy_p, hsize); \
1256 m0->m_len += hsize; \
1257 m->m_len -= hsize; \
1258 m->m_data += hsize; \
1259 } \
1260 copy_p += hsize; \
1261 }
1262
1263 KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) +
1264 sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__));
1265
1266 if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) {
1267 offset = sizeof(struct ether_vlan_header);
1268 ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto);
1269 COPY(sizeof(struct ether_vlan_header));
1270 if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) {
1271 m = m->m_next;
1272 off_in_m = 0;
1273 p = mtod(m, u_char *);
1274 copy = true;
1275 } else {
1276 off_in_m += sizeof(struct ether_vlan_header);
1277 p += sizeof(struct ether_vlan_header);
1278 }
1279 } else {
1280 offset = sizeof(struct ether_header);
1281 ether_type = ntohs(((struct ether_header *)p)->ether_type);
1282 COPY(sizeof(struct ether_header));
1283 if (m->m_len == off_in_m + sizeof(struct ether_header)) {
1284 m = m->m_next;
1285 off_in_m = 0;
1286 p = mtod(m, u_char *);
1287 copy = true;
1288 } else {
1289 off_in_m += sizeof(struct ether_header);
1290 p += sizeof(struct ether_header);
1291 }
1292 }
1293 if (ether_type == ETHERTYPE_IP) {
1294 COPY(((struct ip *)p)->ip_hl << 2);
1295 offset += ((struct ip *)p)->ip_hl << 2;
1296 } else if (ether_type == ETHERTYPE_IPV6) {
1297 COPY(sizeof(struct ip6_hdr));
1298 offset += sizeof(struct ip6_hdr);
1299 } else {
1300 /*
1301 * Unknown whether most other cases require moving a header;
1302 * ARP works without. However, Wake On LAN packets sent
1303 * by wake(8) via BPF need something like this.
1304 */
1305 COPY(MIN(gen_tx_hdr_min, m->m_len));
1306 offset += MIN(gen_tx_hdr_min, m->m_len);
1307 }
1308 return (offset);
1309 #undef COPY
1310 }
1311
1312 static void
gen_intr(void * arg)1313 gen_intr(void *arg)
1314 {
1315 struct gen_softc *sc = arg;
1316 uint32_t val;
1317
1318 GEN_LOCK(sc);
1319
1320 val = RD4(sc, GENET_INTRL2_CPU_STAT);
1321 val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
1322 WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
1323
1324 if (val & GENET_IRQ_RXDMA_DONE)
1325 gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]);
1326
1327 if (val & GENET_IRQ_TXDMA_DONE) {
1328 gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
1329 if (!if_sendq_empty(sc->ifp))
1330 gen_start_locked(sc);
1331 }
1332
1333 GEN_UNLOCK(sc);
1334 }
1335
1336 static int
gen_rxintr(struct gen_softc * sc,struct rx_queue * q)1337 gen_rxintr(struct gen_softc *sc, struct rx_queue *q)
1338 {
1339 if_t ifp;
1340 struct mbuf *m, *mh, *mt;
1341 struct statusblock *sb = NULL;
1342 int error, index, len, cnt, npkt, n;
1343 uint32_t status, prod_idx, total;
1344
1345 ifp = sc->ifp;
1346 mh = mt = NULL;
1347 cnt = 0;
1348 npkt = 0;
1349
1350 prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) &
1351 GENET_RX_DMA_PROD_CONS_MASK;
1352 total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK;
1353
1354 index = q->cons_idx & (RX_DESC_COUNT - 1);
1355 for (n = 0; n < total; n++) {
1356 bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map,
1357 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1358 bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map);
1359
1360 m = q->entries[index].mbuf;
1361
1362 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1363 sb = mtod(m, struct statusblock *);
1364 status = sb->status_buflen;
1365 } else
1366 status = RD4(sc, GENET_RX_DESC_STATUS(index));
1367
1368 len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >>
1369 GENET_RX_DESC_STATUS_BUFLEN_SHIFT;
1370
1371 /* check for errors */
1372 if ((status &
1373 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP |
1374 GENET_RX_DESC_STATUS_RX_ERROR)) !=
1375 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) {
1376 if (if_getflags(ifp) & IFF_DEBUG)
1377 device_printf(sc->dev,
1378 "error/frag %x csum %x\n", status,
1379 sb->rxcsum);
1380 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1381 continue;
1382 }
1383
1384 error = gen_newbuf_rx(sc, q, index);
1385 if (error != 0) {
1386 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1387 if (if_getflags(ifp) & IFF_DEBUG)
1388 device_printf(sc->dev, "gen_newbuf_rx %d\n",
1389 error);
1390 /* reuse previous mbuf */
1391 (void) gen_mapbuf_rx(sc, q, index, m);
1392 continue;
1393 }
1394
1395 if (sb != NULL) {
1396 if (status & GENET_RX_DESC_STATUS_CKSUM_OK) {
1397 /* L4 checksum checked; not sure about L3. */
1398 m->m_pkthdr.csum_flags = CSUM_DATA_VALID |
1399 CSUM_PSEUDO_HDR;
1400 m->m_pkthdr.csum_data = 0xffff;
1401 }
1402 m->m_data += sizeof(struct statusblock);
1403 m->m_len -= sizeof(struct statusblock);
1404 len -= sizeof(struct statusblock);
1405 }
1406 if (len > ETHER_ALIGN) {
1407 m_adj(m, ETHER_ALIGN);
1408 len -= ETHER_ALIGN;
1409 }
1410
1411 m->m_pkthdr.rcvif = ifp;
1412 m->m_pkthdr.len = len;
1413 m->m_len = len;
1414 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1415
1416 m->m_nextpkt = NULL;
1417 if (mh == NULL)
1418 mh = m;
1419 else
1420 mt->m_nextpkt = m;
1421 mt = m;
1422 ++cnt;
1423 ++npkt;
1424
1425 index = RX_NEXT(index, q->nentries);
1426
1427 q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK;
1428 WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx);
1429
1430 if (cnt == gen_rx_batch) {
1431 GEN_UNLOCK(sc);
1432 if_input(ifp, mh);
1433 GEN_LOCK(sc);
1434 mh = mt = NULL;
1435 cnt = 0;
1436 }
1437 }
1438
1439 if (mh != NULL) {
1440 GEN_UNLOCK(sc);
1441 if_input(ifp, mh);
1442 GEN_LOCK(sc);
1443 }
1444
1445 return (npkt);
1446 }
1447
1448 static void
gen_txintr(struct gen_softc * sc,struct tx_queue * q)1449 gen_txintr(struct gen_softc *sc, struct tx_queue *q)
1450 {
1451 uint32_t cons_idx, total;
1452 struct gen_ring_ent *ent;
1453 if_t ifp;
1454 int i, prog;
1455
1456 GEN_ASSERT_LOCKED(sc);
1457
1458 ifp = sc->ifp;
1459
1460 cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) &
1461 GENET_TX_DMA_PROD_CONS_MASK;
1462 total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK;
1463
1464 prog = 0;
1465 for (i = q->next; q->queued > 0 && total > 0;
1466 i = TX_NEXT(i, q->nentries), total--) {
1467 /* XXX check for errors */
1468
1469 ent = &q->entries[i];
1470 if (ent->mbuf != NULL) {
1471 bus_dmamap_sync(sc->tx_buf_tag, ent->map,
1472 BUS_DMASYNC_POSTWRITE);
1473 bus_dmamap_unload(sc->tx_buf_tag, ent->map);
1474 m_freem(ent->mbuf);
1475 ent->mbuf = NULL;
1476 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1477 }
1478
1479 prog++;
1480 --q->queued;
1481 }
1482
1483 if (prog > 0) {
1484 q->next = i;
1485 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1486 }
1487
1488 q->cons_idx = cons_idx;
1489 }
1490
1491 static void
gen_intr2(void * arg)1492 gen_intr2(void *arg)
1493 {
1494 struct gen_softc *sc = arg;
1495
1496 device_printf(sc->dev, "gen_intr2\n");
1497 }
1498
1499 static int
gen_newbuf_rx(struct gen_softc * sc,struct rx_queue * q,int index)1500 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index)
1501 {
1502 struct mbuf *m;
1503
1504 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1505 if (m == NULL)
1506 return (ENOBUFS);
1507
1508 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
1509 m_adj(m, ETHER_ALIGN);
1510
1511 return (gen_mapbuf_rx(sc, q, index, m));
1512 }
1513
1514 static int
gen_mapbuf_rx(struct gen_softc * sc,struct rx_queue * q,int index,struct mbuf * m)1515 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
1516 struct mbuf *m)
1517 {
1518 bus_dma_segment_t seg;
1519 bus_dmamap_t map;
1520 int nsegs;
1521
1522 map = q->entries[index].map;
1523 if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs,
1524 BUS_DMA_NOWAIT) != 0) {
1525 m_freem(m);
1526 return (ENOBUFS);
1527 }
1528
1529 bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD);
1530
1531 q->entries[index].mbuf = m;
1532 WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr);
1533 WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32));
1534
1535 return (0);
1536 }
1537
1538 static int
gen_ioctl(if_t ifp,u_long cmd,caddr_t data)1539 gen_ioctl(if_t ifp, u_long cmd, caddr_t data)
1540 {
1541 struct gen_softc *sc;
1542 struct mii_data *mii;
1543 struct ifreq *ifr;
1544 int flags, enable, error;
1545
1546 sc = if_getsoftc(ifp);
1547 mii = device_get_softc(sc->miibus);
1548 ifr = (struct ifreq *)data;
1549 error = 0;
1550
1551 switch (cmd) {
1552 case SIOCSIFFLAGS:
1553 GEN_LOCK(sc);
1554 if (if_getflags(ifp) & IFF_UP) {
1555 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1556 flags = if_getflags(ifp) ^ sc->if_flags;
1557 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
1558 gen_setup_rxfilter(sc);
1559 } else
1560 gen_init_locked(sc);
1561 } else {
1562 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1563 gen_stop(sc);
1564 }
1565 sc->if_flags = if_getflags(ifp);
1566 GEN_UNLOCK(sc);
1567 break;
1568
1569 case SIOCADDMULTI:
1570 case SIOCDELMULTI:
1571 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1572 GEN_LOCK(sc);
1573 gen_setup_rxfilter(sc);
1574 GEN_UNLOCK(sc);
1575 }
1576 break;
1577
1578 case SIOCSIFMEDIA:
1579 case SIOCGIFMEDIA:
1580 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1581 break;
1582
1583 case SIOCSIFCAP:
1584 enable = if_getcapenable(ifp);
1585 flags = ifr->ifr_reqcap ^ enable;
1586 if (flags & IFCAP_RXCSUM)
1587 enable ^= IFCAP_RXCSUM;
1588 if (flags & IFCAP_RXCSUM_IPV6)
1589 enable ^= IFCAP_RXCSUM_IPV6;
1590 if (flags & IFCAP_TXCSUM)
1591 enable ^= IFCAP_TXCSUM;
1592 if (flags & IFCAP_TXCSUM_IPV6)
1593 enable ^= IFCAP_TXCSUM_IPV6;
1594 if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6))
1595 if_sethwassist(ifp, GEN_CSUM_FEATURES);
1596 else
1597 if_sethwassist(ifp, 0);
1598 if_setcapenable(ifp, enable);
1599 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1600 gen_enable_offload(sc);
1601 break;
1602
1603 default:
1604 error = ether_ioctl(ifp, cmd, data);
1605 break;
1606 }
1607 return (error);
1608 }
1609
1610 static void
gen_tick(void * softc)1611 gen_tick(void *softc)
1612 {
1613 struct gen_softc *sc;
1614 struct mii_data *mii;
1615 if_t ifp;
1616 int link;
1617
1618 sc = softc;
1619 ifp = sc->ifp;
1620 mii = device_get_softc(sc->miibus);
1621
1622 GEN_ASSERT_LOCKED(sc);
1623
1624 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1625 return;
1626
1627 link = sc->link;
1628 mii_tick(mii);
1629 if (sc->link && !link)
1630 gen_start_locked(sc);
1631
1632 callout_reset(&sc->stat_ch, hz, gen_tick, sc);
1633 }
1634
1635 #define MII_BUSY_RETRY 1000
1636
1637 static int
gen_miibus_readreg(device_t dev,int phy,int reg)1638 gen_miibus_readreg(device_t dev, int phy, int reg)
1639 {
1640 struct gen_softc *sc;
1641 int retry, val;
1642
1643 sc = device_get_softc(dev);
1644 val = 0;
1645
1646 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ |
1647 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT));
1648 val = RD4(sc, GENET_MDIO_CMD);
1649 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1650 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1651 if (((val = RD4(sc, GENET_MDIO_CMD)) &
1652 GENET_MDIO_START_BUSY) == 0) {
1653 if (val & GENET_MDIO_READ_FAILED)
1654 return (0); /* -1? */
1655 val &= GENET_MDIO_VAL_MASK;
1656 break;
1657 }
1658 DELAY(10);
1659 }
1660
1661 if (retry == 0)
1662 device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
1663 phy, reg);
1664
1665 return (val);
1666 }
1667
1668 static int
gen_miibus_writereg(device_t dev,int phy,int reg,int val)1669 gen_miibus_writereg(device_t dev, int phy, int reg, int val)
1670 {
1671 struct gen_softc *sc;
1672 int retry;
1673
1674 sc = device_get_softc(dev);
1675
1676 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE |
1677 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) |
1678 (val & GENET_MDIO_VAL_MASK));
1679 val = RD4(sc, GENET_MDIO_CMD);
1680 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1681 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1682 val = RD4(sc, GENET_MDIO_CMD);
1683 if ((val & GENET_MDIO_START_BUSY) == 0)
1684 break;
1685 DELAY(10);
1686 }
1687 if (retry == 0)
1688 device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
1689 phy, reg);
1690
1691 return (0);
1692 }
1693
1694 static void
gen_update_link_locked(struct gen_softc * sc)1695 gen_update_link_locked(struct gen_softc *sc)
1696 {
1697 struct mii_data *mii;
1698 uint32_t val;
1699 u_int speed;
1700
1701 GEN_ASSERT_LOCKED(sc);
1702
1703 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
1704 return;
1705 mii = device_get_softc(sc->miibus);
1706
1707 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1708 (IFM_ACTIVE | IFM_AVALID)) {
1709 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1710 case IFM_1000_T:
1711 case IFM_1000_SX:
1712 speed = GENET_UMAC_CMD_SPEED_1000;
1713 sc->link = 1;
1714 break;
1715 case IFM_100_TX:
1716 speed = GENET_UMAC_CMD_SPEED_100;
1717 sc->link = 1;
1718 break;
1719 case IFM_10_T:
1720 speed = GENET_UMAC_CMD_SPEED_10;
1721 sc->link = 1;
1722 break;
1723 default:
1724 sc->link = 0;
1725 break;
1726 }
1727 } else
1728 sc->link = 0;
1729
1730 if (sc->link == 0)
1731 return;
1732
1733 val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
1734 val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
1735 val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
1736 val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
1737 if (sc->phy_mode == MII_CONTYPE_RGMII)
1738 val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1739 else
1740 val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1741 WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
1742
1743 val = RD4(sc, GENET_UMAC_CMD);
1744 val &= ~GENET_UMAC_CMD_SPEED;
1745 val |= speed;
1746 WR4(sc, GENET_UMAC_CMD, val);
1747 }
1748
1749 static void
gen_link_task(void * arg,int pending)1750 gen_link_task(void *arg, int pending)
1751 {
1752 struct gen_softc *sc;
1753
1754 sc = arg;
1755
1756 GEN_LOCK(sc);
1757 gen_update_link_locked(sc);
1758 GEN_UNLOCK(sc);
1759 }
1760
1761 static void
gen_miibus_statchg(device_t dev)1762 gen_miibus_statchg(device_t dev)
1763 {
1764 struct gen_softc *sc;
1765
1766 sc = device_get_softc(dev);
1767
1768 taskqueue_enqueue(taskqueue_swi, &sc->link_task);
1769 }
1770
1771 static void
gen_media_status(if_t ifp,struct ifmediareq * ifmr)1772 gen_media_status(if_t ifp, struct ifmediareq *ifmr)
1773 {
1774 struct gen_softc *sc;
1775 struct mii_data *mii;
1776
1777 sc = if_getsoftc(ifp);
1778 mii = device_get_softc(sc->miibus);
1779
1780 GEN_LOCK(sc);
1781 mii_pollstat(mii);
1782 ifmr->ifm_active = mii->mii_media_active;
1783 ifmr->ifm_status = mii->mii_media_status;
1784 GEN_UNLOCK(sc);
1785 }
1786
1787 static int
gen_media_change(if_t ifp)1788 gen_media_change(if_t ifp)
1789 {
1790 struct gen_softc *sc;
1791 struct mii_data *mii;
1792 int error;
1793
1794 sc = if_getsoftc(ifp);
1795 mii = device_get_softc(sc->miibus);
1796
1797 GEN_LOCK(sc);
1798 error = mii_mediachg(mii);
1799 GEN_UNLOCK(sc);
1800
1801 return (error);
1802 }
1803
1804 static device_method_t gen_methods[] = {
1805 /* Device interface */
1806 DEVMETHOD(device_probe, gen_probe),
1807 DEVMETHOD(device_attach, gen_attach),
1808
1809 /* MII interface */
1810 DEVMETHOD(miibus_readreg, gen_miibus_readreg),
1811 DEVMETHOD(miibus_writereg, gen_miibus_writereg),
1812 DEVMETHOD(miibus_statchg, gen_miibus_statchg),
1813
1814 DEVMETHOD_END
1815 };
1816
1817 static driver_t gen_driver = {
1818 "genet",
1819 gen_methods,
1820 sizeof(struct gen_softc),
1821 };
1822
1823 DRIVER_MODULE(genet, simplebus, gen_driver, 0, 0);
1824 DRIVER_MODULE(miibus, genet, miibus_driver, 0, 0);
1825 MODULE_DEPEND(genet, ether, 1, 1, 1);
1826 MODULE_DEPEND(genet, miibus, 1, 1, 1);
1827