1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019-2025 Ruslan Bukin <br@bsdpad.com>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_page.h>
48
49 #include <net/bpf.h>
50 #include <net/if.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_var.h>
56
57 #include <machine/bus.h>
58
59 #include <dev/mii/mii.h>
60 #include <dev/mii/miivar.h>
61 #include <dev/mii/tiphy.h>
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64
65 #include <dev/xilinx/axidma.h>
66 #include <dev/xilinx/if_xaereg.h>
67 #include <dev/xilinx/if_xaevar.h>
68
69 #include "miibus_if.h"
70 #include "axidma_if.h"
71
72 #define XAE_RD4(_sc, _reg) bus_read_4((_sc)->res[0], _reg)
73 #define XAE_RD8(_sc, _reg) bus_read_8((_sc)->res[0], _reg)
74 #define XAE_WR4(_sc, _reg, _val) bus_write_4((_sc)->res[0], _reg, _val)
75 #define XAE_WR8(_sc, _reg, _val) bus_write_8((_sc)->res[0], _reg, _val)
76
77 #define AXIDMA_RD4(_sc, _reg) bus_read_4((_sc)->dma_res, _reg)
78 #define AXIDMA_RD8(_sc, _reg) bus_read_8((_sc)->dma_res, _reg)
79 #define AXIDMA_WR4(_sc, _reg, _val) bus_write_4((_sc)->dma_res, _reg, _val)
80 #define AXIDMA_WR8(_sc, _reg, _val) bus_write_8((_sc)->dma_res, _reg, _val)
81
82 #define XAE_LOCK(sc) mtx_lock(&(sc)->mtx)
83 #define XAE_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
84 #define XAE_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
85 #define XAE_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
86
87 #define dprintf(fmt, ...)
88
89 #define MDIO_CLK_DIV_DEFAULT 29
90
91 #define PHY1_RD(sc, _r) xae_miibus_read_reg(sc->dev, 1, _r)
92 #define PHY1_WR(sc, _r, _v) xae_miibus_write_reg(sc->dev, 1, _r, _v)
93 #define PHY_RD(sc, _r) xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
94 #define PHY_WR(sc, _r, _v) \
95 xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
96
97 /* Use this macro to access regs > 0x1f */
98 #define WRITE_TI_EREG(sc, reg, data) { \
99 PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK); \
100 PHY_WR(sc, MII_MMDAADR, reg); \
101 PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI); \
102 PHY_WR(sc, MII_MMDAADR, data); \
103 }
104
105 /* Not documented, Xilinx VCU118 workaround */
106 #define CFG4_SGMII_TMR 0x160 /* bits 8:7 MUST be '10' */
107 #define DP83867_SGMIICTL1 0xD3 /* not documented register */
108 #define SGMIICTL1_SGMII_6W (1 << 14) /* no idea what it is */
109
110 #define AXI_DESC_RING_ALIGN 64
111
112 /*
113 * Driver data and defines.
114 */
115
116 static struct resource_spec xae_spec[] = {
117 { SYS_RES_MEMORY, 0, RF_ACTIVE },
118 { SYS_RES_IRQ, 0, RF_ACTIVE },
119 { -1, 0 }
120 };
121
122 static inline uint32_t
next_rxidx(struct xae_softc * sc,uint32_t curidx)123 next_rxidx(struct xae_softc *sc, uint32_t curidx)
124 {
125
126 return ((curidx == RX_DESC_COUNT - 1) ? 0 : curidx + 1);
127 }
128
129 static inline uint32_t
next_txidx(struct xae_softc * sc,uint32_t curidx)130 next_txidx(struct xae_softc *sc, uint32_t curidx)
131 {
132
133 return ((curidx == TX_DESC_COUNT - 1) ? 0 : curidx + 1);
134 }
135
136 static void
xae_get1paddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)137 xae_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
138 {
139
140 if (error != 0)
141 return;
142 *(bus_addr_t *)arg = segs[0].ds_addr;
143 }
144
145 inline static uint32_t
xae_setup_txdesc(struct xae_softc * sc,int idx,bus_addr_t paddr,uint32_t len)146 xae_setup_txdesc(struct xae_softc *sc, int idx, bus_addr_t paddr,
147 uint32_t len)
148 {
149 struct axidma_desc *desc;
150 uint32_t nidx;
151 uint32_t flags;
152
153 nidx = next_txidx(sc, idx);
154
155 desc = &sc->txdesc_ring[idx];
156
157 /* Addr/len 0 means we're clearing the descriptor after xmit done. */
158 if (paddr == 0 || len == 0) {
159 flags = 0;
160 --sc->txcount;
161 } else {
162 flags = BD_CONTROL_TXSOF | BD_CONTROL_TXEOF;
163 ++sc->txcount;
164 }
165
166 desc->next = sc->txdesc_ring_paddr + sizeof(struct axidma_desc) * nidx;
167 desc->phys = paddr;
168 desc->status = 0;
169 desc->control = len | flags;
170
171 return (nidx);
172 }
173
174 static int
xae_setup_txbuf(struct xae_softc * sc,int idx,struct mbuf ** mp)175 xae_setup_txbuf(struct xae_softc *sc, int idx, struct mbuf **mp)
176 {
177 struct bus_dma_segment seg;
178 struct mbuf *m;
179 int error;
180 int nsegs;
181
182 dprintf("%s\n", __func__);
183
184 if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
185 return (ENOMEM);
186
187 *mp = m;
188
189 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
190 m, &seg, &nsegs, 0);
191 if (error != 0)
192 return (ENOMEM);
193
194 bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
195 BUS_DMASYNC_PREWRITE);
196
197 sc->txbuf_map[idx].mbuf = m;
198 xae_setup_txdesc(sc, idx, seg.ds_addr, seg.ds_len);
199
200 return (0);
201 }
202
203 static void
xae_txstart_locked(struct xae_softc * sc)204 xae_txstart_locked(struct xae_softc *sc)
205 {
206 struct mbuf *m;
207 int enqueued;
208 uint32_t addr;
209 int tmp;
210 if_t ifp;
211
212 dprintf("%s\n", __func__);
213
214 XAE_ASSERT_LOCKED(sc);
215
216 if (!sc->link_is_up)
217 return;
218
219 ifp = sc->ifp;
220
221 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE)
222 return;
223
224 enqueued = 0;
225
226 for (;;) {
227 if (sc->txcount == (TX_DESC_COUNT - 1)) {
228 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
229 break;
230 }
231 m = if_dequeue(ifp);
232 if (m == NULL)
233 break;
234 if (xae_setup_txbuf(sc, sc->tx_idx_head, &m) != 0) {
235 if_sendq_prepend(ifp, m);
236 break;
237 }
238 BPF_MTAP(ifp, m);
239 tmp = sc->tx_idx_head;
240 sc->tx_idx_head = next_txidx(sc, sc->tx_idx_head);
241 ++enqueued;
242 }
243
244 if (enqueued != 0) {
245 bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map,
246 BUS_DMASYNC_PREWRITE);
247
248 addr = sc->txdesc_ring_paddr + tmp * sizeof(struct axidma_desc);
249 dprintf("%s: new tail desc %x\n", __func__, addr);
250 AXIDMA_WR8(sc, AXI_TAILDESC(AXIDMA_TX_CHAN), addr);
251 }
252 }
253
254 static void
xae_txfinish_locked(struct xae_softc * sc)255 xae_txfinish_locked(struct xae_softc *sc)
256 {
257 struct axidma_desc *desc;
258 struct xae_bufmap *bmap;
259 boolean_t retired_buffer;
260
261 XAE_ASSERT_LOCKED(sc);
262
263 bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_PREREAD);
264 bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_POSTREAD);
265 retired_buffer = false;
266 while (sc->tx_idx_tail != sc->tx_idx_head) {
267 desc = &sc->txdesc_ring[sc->tx_idx_tail];
268 if ((desc->status & BD_STATUS_CMPLT) == 0)
269 break;
270 retired_buffer = true;
271 bmap = &sc->txbuf_map[sc->tx_idx_tail];
272 bus_dmamap_sync(sc->txbuf_tag, bmap->map,
273 BUS_DMASYNC_POSTWRITE);
274 bus_dmamap_unload(sc->txbuf_tag, bmap->map);
275 m_freem(bmap->mbuf);
276 bmap->mbuf = NULL;
277 xae_setup_txdesc(sc, sc->tx_idx_tail, 0, 0);
278 sc->tx_idx_tail = next_txidx(sc, sc->tx_idx_tail);
279 }
280
281 /*
282 * If we retired any buffers, there will be open tx slots available in
283 * the descriptor ring, go try to start some new output.
284 */
285 if (retired_buffer) {
286 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
287 xae_txstart_locked(sc);
288 }
289 }
290
291 inline static uint32_t
xae_setup_rxdesc(struct xae_softc * sc,int idx,bus_addr_t paddr)292 xae_setup_rxdesc(struct xae_softc *sc, int idx, bus_addr_t paddr)
293 {
294 struct axidma_desc *desc;
295 uint32_t nidx;
296
297 /*
298 * The hardware requires 32-bit physical addresses. We set up the dma
299 * tag to indicate that, so the cast to uint32_t should never lose
300 * significant bits.
301 */
302 nidx = next_rxidx(sc, idx);
303
304 desc = &sc->rxdesc_ring[idx];
305 desc->next = sc->rxdesc_ring_paddr + sizeof(struct axidma_desc) * nidx;
306 desc->phys = paddr;
307 desc->status = 0;
308 desc->control = MCLBYTES | BD_CONTROL_TXSOF | BD_CONTROL_TXEOF;
309
310 return (nidx);
311 }
312
313 static struct mbuf *
xae_alloc_mbufcl(struct xae_softc * sc)314 xae_alloc_mbufcl(struct xae_softc *sc)
315 {
316 struct mbuf *m;
317
318 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
319 if (m != NULL)
320 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
321
322 return (m);
323 }
324
325 static int
xae_setup_rxbuf(struct xae_softc * sc,int idx,struct mbuf * m)326 xae_setup_rxbuf(struct xae_softc *sc, int idx, struct mbuf * m)
327 {
328 int error, nsegs;
329 struct bus_dma_segment seg;
330
331 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
332 m, &seg, &nsegs, 0);
333 if (error != 0)
334 return (error);
335
336 bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
337 BUS_DMASYNC_PREREAD);
338
339 sc->rxbuf_map[idx].mbuf = m;
340 xae_setup_rxdesc(sc, idx, seg.ds_addr);
341
342 return (0);
343 }
344
345 static void
xae_rxfinish_onebuf(struct xae_softc * sc,int len)346 xae_rxfinish_onebuf(struct xae_softc *sc, int len)
347 {
348 struct mbuf *m, *newmbuf;
349 struct xae_bufmap *bmap;
350 int error;
351
352 dprintf("%s\n", __func__);
353
354 /*
355 * First try to get a new mbuf to plug into this slot in the rx ring.
356 * If that fails, drop the current packet and recycle the current
357 * mbuf, which is still mapped and loaded.
358 */
359 if ((newmbuf = xae_alloc_mbufcl(sc)) == NULL) {
360 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
361 xae_setup_rxdesc(sc, sc->rx_idx,
362 sc->rxdesc_ring[sc->rx_idx].phys);
363 return;
364 }
365
366 XAE_UNLOCK(sc);
367
368 bmap = &sc->rxbuf_map[sc->rx_idx];
369 bus_dmamap_sync(sc->rxbuf_tag, bmap->map, BUS_DMASYNC_POSTREAD);
370 bus_dmamap_unload(sc->rxbuf_tag, bmap->map);
371 m = bmap->mbuf;
372 bmap->mbuf = NULL;
373 m->m_len = len;
374 m->m_pkthdr.len = len;
375 m->m_pkthdr.rcvif = sc->ifp;
376
377 if_input(sc->ifp, m);
378
379 XAE_LOCK(sc);
380
381 if ((error = xae_setup_rxbuf(sc, sc->rx_idx, newmbuf)) != 0) {
382 device_printf(sc->dev, "xae_setup_rxbuf error %d\n", error);
383 /* XXX Now what? We've got a hole in the rx ring. */
384 }
385 }
386
387 static void
xae_rxfinish_locked(struct xae_softc * sc)388 xae_rxfinish_locked(struct xae_softc *sc)
389 {
390 boolean_t desc_completed;
391 struct axidma_desc *desc;
392 uint32_t addr;
393 int len;
394 int tmp;
395
396 dprintf("%s\n", __func__);
397
398 XAE_ASSERT_LOCKED(sc);
399
400 bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map, BUS_DMASYNC_PREREAD);
401 bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map, BUS_DMASYNC_POSTREAD);
402 desc_completed = false;
403 for (;;) {
404 desc = &sc->rxdesc_ring[sc->rx_idx];
405 if ((desc->status & BD_STATUS_CMPLT) == 0)
406 break;
407 desc_completed = true;
408 len = desc->status & BD_CONTROL_LEN_M;
409 xae_rxfinish_onebuf(sc, len);
410 tmp = sc->rx_idx;
411 sc->rx_idx = next_rxidx(sc, sc->rx_idx);
412 }
413
414 if (desc_completed) {
415 bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map,
416 BUS_DMASYNC_PREWRITE);
417
418 addr = sc->rxdesc_ring_paddr + tmp * sizeof(struct axidma_desc);
419 dprintf("%s: new tail desc %x\n", __func__, addr);
420 AXIDMA_WR8(sc, AXI_TAILDESC(AXIDMA_RX_CHAN), addr);
421 }
422 }
423
424 static void
xae_intr_rx(void * arg)425 xae_intr_rx(void *arg)
426 {
427 struct xae_softc *sc;
428 uint32_t pending;
429
430 sc = arg;
431
432 XAE_LOCK(sc);
433 pending = AXIDMA_RD4(sc, AXI_DMASR(AXIDMA_RX_CHAN));
434 dprintf("%s: pending %x\n", __func__, pending);
435 AXIDMA_WR4(sc, AXI_DMASR(AXIDMA_RX_CHAN), pending);
436 xae_rxfinish_locked(sc);
437 XAE_UNLOCK(sc);
438 }
439
440 static void
xae_intr_tx(void * arg)441 xae_intr_tx(void *arg)
442 {
443 struct xae_softc *sc;
444 uint32_t pending;
445
446 sc = arg;
447
448 XAE_LOCK(sc);
449 pending = AXIDMA_RD4(sc, AXI_DMASR(AXIDMA_TX_CHAN));
450 dprintf("%s: pending %x\n", __func__, pending);
451 AXIDMA_WR4(sc, AXI_DMASR(AXIDMA_TX_CHAN), pending);
452 xae_txfinish_locked(sc);
453 XAE_UNLOCK(sc);
454 }
455
456
457 static u_int
xae_write_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)458 xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
459 {
460 struct xae_softc *sc = arg;
461 uint32_t reg;
462 uint8_t *ma;
463
464 if (cnt >= XAE_MULTICAST_TABLE_SIZE)
465 return (1);
466
467 ma = LLADDR(sdl);
468
469 reg = XAE_RD4(sc, XAE_FFC) & 0xffffff00;
470 reg |= cnt;
471 XAE_WR4(sc, XAE_FFC, reg);
472
473 reg = (ma[0]);
474 reg |= (ma[1] << 8);
475 reg |= (ma[2] << 16);
476 reg |= (ma[3] << 24);
477 XAE_WR4(sc, XAE_FFV(0), reg);
478
479 reg = ma[4];
480 reg |= ma[5] << 8;
481 XAE_WR4(sc, XAE_FFV(1), reg);
482
483 return (1);
484 }
485
486 static void
xae_setup_rxfilter(struct xae_softc * sc)487 xae_setup_rxfilter(struct xae_softc *sc)
488 {
489 if_t ifp;
490 uint32_t reg;
491
492 XAE_ASSERT_LOCKED(sc);
493
494 ifp = sc->ifp;
495
496 /*
497 * Set the multicast (group) filter hash.
498 */
499 if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
500 reg = XAE_RD4(sc, XAE_FFC);
501 reg |= FFC_PM;
502 XAE_WR4(sc, XAE_FFC, reg);
503 } else {
504 reg = XAE_RD4(sc, XAE_FFC);
505 reg &= ~FFC_PM;
506 XAE_WR4(sc, XAE_FFC, reg);
507
508 if_foreach_llmaddr(ifp, xae_write_maddr, sc);
509 }
510
511 /*
512 * Set the primary address.
513 */
514 reg = sc->macaddr[0];
515 reg |= (sc->macaddr[1] << 8);
516 reg |= (sc->macaddr[2] << 16);
517 reg |= (sc->macaddr[3] << 24);
518 XAE_WR4(sc, XAE_UAW0, reg);
519
520 reg = sc->macaddr[4];
521 reg |= (sc->macaddr[5] << 8);
522 XAE_WR4(sc, XAE_UAW1, reg);
523 }
524
525 static int
xae_get_phyaddr(phandle_t node,int * phy_addr)526 xae_get_phyaddr(phandle_t node, int *phy_addr)
527 {
528 phandle_t phy_node;
529 pcell_t phy_handle, phy_reg;
530
531 if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
532 sizeof(phy_handle)) <= 0)
533 return (ENXIO);
534
535 phy_node = OF_node_from_xref(phy_handle);
536
537 if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
538 sizeof(phy_reg)) <= 0)
539 return (ENXIO);
540
541 *phy_addr = phy_reg;
542
543 return (0);
544 }
545
546 static void
xae_stop_locked(struct xae_softc * sc)547 xae_stop_locked(struct xae_softc *sc)
548 {
549 uint32_t reg;
550
551 XAE_ASSERT_LOCKED(sc);
552
553 if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
554
555 callout_stop(&sc->xae_callout);
556
557 /* Stop the transmitter */
558 reg = XAE_RD4(sc, XAE_TC);
559 reg &= ~TC_TX;
560 XAE_WR4(sc, XAE_TC, reg);
561
562 /* Stop the receiver. */
563 reg = XAE_RD4(sc, XAE_RCW1);
564 reg &= ~RCW1_RX;
565 XAE_WR4(sc, XAE_RCW1, reg);
566 }
567
568 static uint64_t
xae_stat(struct xae_softc * sc,int counter_id)569 xae_stat(struct xae_softc *sc, int counter_id)
570 {
571 uint64_t new, old;
572 uint64_t delta;
573
574 KASSERT(counter_id < XAE_MAX_COUNTERS,
575 ("counter %d is out of range", counter_id));
576
577 new = XAE_RD8(sc, XAE_STATCNT(counter_id));
578 old = sc->counters[counter_id];
579
580 if (new >= old)
581 delta = new - old;
582 else
583 delta = UINT64_MAX - old + new;
584 sc->counters[counter_id] = new;
585
586 return (delta);
587 }
588
589 static void
xae_harvest_stats(struct xae_softc * sc)590 xae_harvest_stats(struct xae_softc *sc)
591 {
592 if_t ifp;
593
594 ifp = sc->ifp;
595
596 if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES));
597 if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS));
598 if_inc_counter(ifp, IFCOUNTER_IERRORS,
599 xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) +
600 xae_stat(sc, RX_LEN_OUT_OF_RANGE) +
601 xae_stat(sc, RX_ALIGNMENT_ERRORS));
602
603 if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES));
604 if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES));
605 if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS));
606 if_inc_counter(ifp, IFCOUNTER_OERRORS,
607 xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS));
608
609 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
610 xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) +
611 xae_stat(sc, TX_MULTI_COLLISION_FRAMES) +
612 xae_stat(sc, TX_LATE_COLLISIONS) +
613 xae_stat(sc, TX_EXCESS_COLLISIONS));
614 }
615
616 static void
xae_tick(void * arg)617 xae_tick(void *arg)
618 {
619 struct xae_softc *sc;
620 if_t ifp;
621 int link_was_up;
622
623 sc = arg;
624
625 XAE_ASSERT_LOCKED(sc);
626
627 ifp = sc->ifp;
628
629 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
630 return;
631
632 /* Gather stats from hardware counters. */
633 xae_harvest_stats(sc);
634
635 /* Check the media status. */
636 link_was_up = sc->link_is_up;
637 mii_tick(sc->mii_softc);
638 if (sc->link_is_up && !link_was_up)
639 xae_txstart_locked(sc);
640
641 /* Schedule another check one second from now. */
642 callout_reset(&sc->xae_callout, hz, xae_tick, sc);
643 }
644
645 static void
xae_init_locked(struct xae_softc * sc)646 xae_init_locked(struct xae_softc *sc)
647 {
648 if_t ifp;
649
650 XAE_ASSERT_LOCKED(sc);
651
652 ifp = sc->ifp;
653 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
654 return;
655
656 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
657
658 xae_setup_rxfilter(sc);
659
660 /* Enable the transmitter */
661 XAE_WR4(sc, XAE_TC, TC_TX);
662
663 /* Enable the receiver. */
664 XAE_WR4(sc, XAE_RCW1, RCW1_RX);
665
666 /*
667 * Call mii_mediachg() which will call back into xae_miibus_statchg()
668 * to set up the remaining config registers based on current media.
669 */
670 mii_mediachg(sc->mii_softc);
671 callout_reset(&sc->xae_callout, hz, xae_tick, sc);
672 }
673
674 static void
xae_init(void * arg)675 xae_init(void *arg)
676 {
677 struct xae_softc *sc;
678
679 sc = arg;
680
681 XAE_LOCK(sc);
682 xae_init_locked(sc);
683 XAE_UNLOCK(sc);
684 }
685
686 static void
xae_media_status(if_t ifp,struct ifmediareq * ifmr)687 xae_media_status(if_t ifp, struct ifmediareq *ifmr)
688 {
689 struct xae_softc *sc;
690 struct mii_data *mii;
691
692 sc = if_getsoftc(ifp);
693 mii = sc->mii_softc;
694
695 XAE_LOCK(sc);
696 mii_pollstat(mii);
697 ifmr->ifm_active = mii->mii_media_active;
698 ifmr->ifm_status = mii->mii_media_status;
699 XAE_UNLOCK(sc);
700 }
701
702 static int
xae_media_change_locked(struct xae_softc * sc)703 xae_media_change_locked(struct xae_softc *sc)
704 {
705
706 return (mii_mediachg(sc->mii_softc));
707 }
708
709 static int
xae_media_change(if_t ifp)710 xae_media_change(if_t ifp)
711 {
712 struct xae_softc *sc;
713 int error;
714
715 sc = if_getsoftc(ifp);
716
717 XAE_LOCK(sc);
718 error = xae_media_change_locked(sc);
719 XAE_UNLOCK(sc);
720
721 return (error);
722 }
723
724 static int
xae_ioctl(if_t ifp,u_long cmd,caddr_t data)725 xae_ioctl(if_t ifp, u_long cmd, caddr_t data)
726 {
727 struct xae_softc *sc;
728 struct mii_data *mii;
729 struct ifreq *ifr;
730 int mask, error;
731
732 sc = if_getsoftc(ifp);
733 ifr = (struct ifreq *)data;
734
735 error = 0;
736 switch (cmd) {
737 case SIOCSIFFLAGS:
738 XAE_LOCK(sc);
739 if (if_getflags(ifp) & IFF_UP) {
740 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
741 if ((if_getflags(ifp) ^ sc->if_flags) &
742 (IFF_PROMISC | IFF_ALLMULTI))
743 xae_setup_rxfilter(sc);
744 } else {
745 if (!sc->is_detaching)
746 xae_init_locked(sc);
747 }
748 } else {
749 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
750 xae_stop_locked(sc);
751 }
752 sc->if_flags = if_getflags(ifp);
753 XAE_UNLOCK(sc);
754 break;
755 case SIOCADDMULTI:
756 case SIOCDELMULTI:
757 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
758 XAE_LOCK(sc);
759 xae_setup_rxfilter(sc);
760 XAE_UNLOCK(sc);
761 }
762 break;
763 case SIOCSIFMEDIA:
764 case SIOCGIFMEDIA:
765 mii = sc->mii_softc;
766 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
767 break;
768 case SIOCSIFCAP:
769 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
770 if (mask & IFCAP_VLAN_MTU) {
771 /* No work to do except acknowledge the change took */
772 if_togglecapenable(ifp, IFCAP_VLAN_MTU);
773 }
774 break;
775
776 default:
777 error = ether_ioctl(ifp, cmd, data);
778 break;
779 }
780
781 return (error);
782 }
783
784 static void
xae_intr(void * arg)785 xae_intr(void *arg)
786 {
787
788 }
789
790 static int
xae_get_hwaddr(struct xae_softc * sc,uint8_t * hwaddr)791 xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr)
792 {
793 phandle_t node;
794 int len;
795
796 node = ofw_bus_get_node(sc->dev);
797
798 /* Check if there is property */
799 if ((len = OF_getproplen(node, "local-mac-address")) <= 0)
800 return (EINVAL);
801
802 if (len != ETHER_ADDR_LEN)
803 return (EINVAL);
804
805 OF_getprop(node, "local-mac-address", hwaddr, ETHER_ADDR_LEN);
806
807 return (0);
808 }
809
810 static int
mdio_wait(struct xae_softc * sc)811 mdio_wait(struct xae_softc *sc)
812 {
813 uint32_t reg;
814 int timeout;
815
816 timeout = 200;
817
818 do {
819 reg = XAE_RD4(sc, XAE_MDIO_CTRL);
820 if (reg & MDIO_CTRL_READY)
821 break;
822 DELAY(1);
823 } while (timeout--);
824
825 if (timeout <= 0) {
826 printf("Failed to get MDIO ready\n");
827 return (1);
828 }
829
830 return (0);
831 }
832
833 static int
xae_miibus_read_reg(device_t dev,int phy,int reg)834 xae_miibus_read_reg(device_t dev, int phy, int reg)
835 {
836 struct xae_softc *sc;
837 uint32_t mii;
838 int rv;
839
840 sc = device_get_softc(dev);
841
842 if (mdio_wait(sc))
843 return (0);
844
845 mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE;
846 mii |= (reg << MDIO_TX_REGAD_S);
847 mii |= (phy << MDIO_TX_PHYAD_S);
848
849 XAE_WR4(sc, XAE_MDIO_CTRL, mii);
850
851 if (mdio_wait(sc))
852 return (0);
853
854 rv = XAE_RD4(sc, XAE_MDIO_READ);
855
856 return (rv);
857 }
858
859 static int
xae_miibus_write_reg(device_t dev,int phy,int reg,int val)860 xae_miibus_write_reg(device_t dev, int phy, int reg, int val)
861 {
862 struct xae_softc *sc;
863 uint32_t mii;
864
865 sc = device_get_softc(dev);
866
867 if (mdio_wait(sc))
868 return (1);
869
870 mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE;
871 mii |= (reg << MDIO_TX_REGAD_S);
872 mii |= (phy << MDIO_TX_PHYAD_S);
873
874 XAE_WR4(sc, XAE_MDIO_WRITE, val);
875 XAE_WR4(sc, XAE_MDIO_CTRL, mii);
876
877 if (mdio_wait(sc))
878 return (1);
879
880 return (0);
881 }
882
883 static void
xae_phy_fixup(struct xae_softc * sc)884 xae_phy_fixup(struct xae_softc *sc)
885 {
886 uint32_t reg;
887
888 do {
889 WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W);
890 PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN);
891
892 reg = PHY_RD(sc, DP83867_CFG2);
893 reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M;
894 reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4);
895 reg |= CFG2_INTERRUPT_POLARITY;
896 reg |= CFG2_SPEED_OPT_ENHANCED_EN;
897 reg |= CFG2_SPEED_OPT_10M_EN;
898 PHY_WR(sc, DP83867_CFG2, reg);
899
900 WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR);
901 PHY_WR(sc, MII_BMCR,
902 BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET);
903 } while (PHY1_RD(sc, MII_BMCR) == 0x0ffff);
904
905 do {
906 PHY1_WR(sc, MII_BMCR,
907 BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG);
908 DELAY(40000);
909 } while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0);
910 }
911
912 static int
get_axistream(struct xae_softc * sc)913 get_axistream(struct xae_softc *sc)
914 {
915 phandle_t node;
916 pcell_t prop;
917 size_t len;
918
919 node = ofw_bus_get_node(sc->dev);
920 len = OF_getencprop(node, "axistream-connected", &prop, sizeof(prop));
921 if (len != sizeof(prop)) {
922 device_printf(sc->dev,
923 "%s: Couldn't get axistream-connected prop.\n", __func__);
924 return (ENXIO);
925 }
926 sc->dma_dev = OF_device_from_xref(prop);
927 if (sc->dma_dev == NULL) {
928 device_printf(sc->dev, "Could not get DMA device by xref.\n");
929 return (ENXIO);
930 }
931 sc->dma_res = AXIDMA_MEMRES(sc->dma_dev);
932
933 return (0);
934 }
935
936 static void
xae_txstart(if_t ifp)937 xae_txstart(if_t ifp)
938 {
939 struct xae_softc *sc;
940
941 sc = if_getsoftc(ifp);
942
943 dprintf("%s\n", __func__);
944
945 XAE_LOCK(sc);
946 xae_txstart_locked(sc);
947 XAE_UNLOCK(sc);
948 }
949
950 static int
xae_setup_dma(struct xae_softc * sc)951 xae_setup_dma(struct xae_softc *sc)
952 {
953 struct axidma_desc *desc;
954 uint32_t addr;
955 uint32_t reg;
956 struct mbuf *m;
957 int error;
958 int idx;
959
960 sc->rxbuf_align = PAGE_SIZE;
961 sc->txbuf_align = PAGE_SIZE;
962
963 /*
964 * Set up TX descriptor ring, descriptors, and dma maps.
965 */
966 error = bus_dma_tag_create(
967 bus_get_dma_tag(sc->dev), /* Parent tag. */
968 AXI_DESC_RING_ALIGN, 0, /* alignment, boundary */
969 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
970 BUS_SPACE_MAXADDR, /* highaddr */
971 NULL, NULL, /* filter, filterarg */
972 TX_DESC_SIZE, 1, /* maxsize, nsegments */
973 TX_DESC_SIZE, /* maxsegsize */
974 0, /* flags */
975 NULL, NULL, /* lockfunc, lockarg */
976 &sc->txdesc_tag);
977 if (error != 0) {
978 device_printf(sc->dev, "could not create TX ring DMA tag.\n");
979 goto out;
980 }
981
982 error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
983 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->txdesc_map);
984 if (error != 0) {
985 device_printf(sc->dev,
986 "could not allocate TX descriptor ring.\n");
987 goto out;
988 }
989
990 error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, sc->txdesc_ring,
991 TX_DESC_SIZE, xae_get1paddr, &sc->txdesc_ring_paddr, 0);
992 if (error != 0) {
993 device_printf(sc->dev,
994 "could not load TX descriptor ring map.\n");
995 goto out;
996 }
997
998 error = bus_dma_tag_create(
999 bus_get_dma_tag(sc->dev), /* Parent tag. */
1000 sc->txbuf_align, 0, /* alignment, boundary */
1001 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1002 BUS_SPACE_MAXADDR, /* highaddr */
1003 NULL, NULL, /* filter, filterarg */
1004 MCLBYTES, 1, /* maxsize, nsegments */
1005 MCLBYTES, /* maxsegsize */
1006 0, /* flags */
1007 NULL, NULL, /* lockfunc, lockarg */
1008 &sc->txbuf_tag);
1009 if (error != 0) {
1010 device_printf(sc->dev,
1011 "could not create TX ring DMA tag.\n");
1012 goto out;
1013 }
1014
1015 for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
1016 desc = &sc->txdesc_ring[idx];
1017 bzero(desc, sizeof(struct axidma_desc));
1018 }
1019
1020 for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
1021 error = bus_dmamap_create(sc->txbuf_tag, 0,
1022 &sc->txbuf_map[idx].map);
1023 if (error != 0) {
1024 device_printf(sc->dev,
1025 "could not create TX buffer DMA map.\n");
1026 goto out;
1027 }
1028 xae_setup_txdesc(sc, idx, 0, 0);
1029 }
1030
1031 /*
1032 * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
1033 */
1034 error = bus_dma_tag_create(
1035 bus_get_dma_tag(sc->dev), /* Parent tag. */
1036 AXI_DESC_RING_ALIGN, 0, /* alignment, boundary */
1037 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1038 BUS_SPACE_MAXADDR, /* highaddr */
1039 NULL, NULL, /* filter, filterarg */
1040 RX_DESC_SIZE, 1, /* maxsize, nsegments */
1041 RX_DESC_SIZE, /* maxsegsize */
1042 0, /* flags */
1043 NULL, NULL, /* lockfunc, lockarg */
1044 &sc->rxdesc_tag);
1045 if (error != 0) {
1046 device_printf(sc->dev, "could not create RX ring DMA tag.\n");
1047 goto out;
1048 }
1049
1050 error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
1051 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rxdesc_map);
1052 if (error != 0) {
1053 device_printf(sc->dev,
1054 "could not allocate RX descriptor ring.\n");
1055 goto out;
1056 }
1057
1058 error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, sc->rxdesc_ring,
1059 RX_DESC_SIZE, xae_get1paddr, &sc->rxdesc_ring_paddr, 0);
1060 if (error != 0) {
1061 device_printf(sc->dev,
1062 "could not load RX descriptor ring map.\n");
1063 goto out;
1064 }
1065
1066 error = bus_dma_tag_create(
1067 bus_get_dma_tag(sc->dev), /* Parent tag. */
1068 1, 0, /* alignment, boundary */
1069 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1070 BUS_SPACE_MAXADDR, /* highaddr */
1071 NULL, NULL, /* filter, filterarg */
1072 MCLBYTES, 1, /* maxsize, nsegments */
1073 MCLBYTES, /* maxsegsize */
1074 0, /* flags */
1075 NULL, NULL, /* lockfunc, lockarg */
1076 &sc->rxbuf_tag);
1077 if (error != 0) {
1078 device_printf(sc->dev, "could not create RX buf DMA tag.\n");
1079 goto out;
1080 }
1081
1082 for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
1083 desc = &sc->rxdesc_ring[idx];
1084 bzero(desc, sizeof(struct axidma_desc));
1085 }
1086
1087 for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
1088 error = bus_dmamap_create(sc->rxbuf_tag, 0,
1089 &sc->rxbuf_map[idx].map);
1090 if (error != 0) {
1091 device_printf(sc->dev,
1092 "could not create RX buffer DMA map.\n");
1093 goto out;
1094 }
1095 if ((m = xae_alloc_mbufcl(sc)) == NULL) {
1096 device_printf(sc->dev, "Could not alloc mbuf\n");
1097 error = ENOMEM;
1098 goto out;
1099 }
1100 if ((error = xae_setup_rxbuf(sc, idx, m)) != 0) {
1101 device_printf(sc->dev,
1102 "could not create new RX buffer.\n");
1103 goto out;
1104 }
1105 }
1106
1107 if (AXIDMA_RESET(sc->dma_dev, AXIDMA_TX_CHAN) != 0) {
1108 device_printf(sc->dev, "Could not reset TX channel.\n");
1109 goto out;
1110 }
1111 if (AXIDMA_RESET(sc->dma_dev, AXIDMA_RX_CHAN) != 0) {
1112 device_printf(sc->dev, "Could not reset TX channel.\n");
1113 goto out;
1114 }
1115 if (AXIDMA_SETUP_CB(sc->dma_dev, AXIDMA_TX_CHAN, xae_intr_tx, sc)) {
1116 device_printf(sc->dev, "Could not setup TX intr callback.\n");
1117 goto out;
1118 }
1119 if (AXIDMA_SETUP_CB(sc->dma_dev, AXIDMA_RX_CHAN, xae_intr_rx, sc)) {
1120 device_printf(sc->dev, "Could not setup RX intr callback.\n");
1121 goto out;
1122 }
1123
1124 dprintf("%s: tx desc base %lx\n", __func__, sc->txdesc_ring_paddr);
1125 AXIDMA_WR8(sc, AXI_CURDESC(AXIDMA_TX_CHAN), sc->txdesc_ring_paddr);
1126 reg = AXIDMA_RD4(sc, AXI_DMACR(AXIDMA_TX_CHAN));
1127 reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
1128 AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_TX_CHAN), reg);
1129 reg |= DMACR_RS;
1130 AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_TX_CHAN), reg);
1131
1132 AXIDMA_WR8(sc, AXI_CURDESC(AXIDMA_RX_CHAN), sc->rxdesc_ring_paddr);
1133 reg = AXIDMA_RD4(sc, AXI_DMACR(AXIDMA_RX_CHAN));
1134 reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
1135 AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_RX_CHAN), reg);
1136 reg |= DMACR_RS;
1137 AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_RX_CHAN), reg);
1138
1139 addr = sc->rxdesc_ring_paddr +
1140 (RX_DESC_COUNT - 1) * sizeof(struct axidma_desc);
1141 dprintf("%s: new RX tail desc %x\n", __func__, addr);
1142 AXIDMA_WR8(sc, AXI_TAILDESC(AXIDMA_RX_CHAN), addr);
1143
1144 return (0);
1145
1146 out:
1147 /* TODO: release resources. */
1148 return (-1);
1149 }
1150
1151
1152 static int
xae_probe(device_t dev)1153 xae_probe(device_t dev)
1154 {
1155
1156 if (!ofw_bus_status_okay(dev))
1157 return (ENXIO);
1158
1159 if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a"))
1160 return (ENXIO);
1161
1162 device_set_desc(dev, "Xilinx AXI Ethernet");
1163
1164 return (BUS_PROBE_DEFAULT);
1165 }
1166
1167 static int
xae_attach(device_t dev)1168 xae_attach(device_t dev)
1169 {
1170 struct xae_softc *sc;
1171 if_t ifp;
1172 phandle_t node;
1173 uint32_t reg;
1174 int error;
1175
1176 sc = device_get_softc(dev);
1177 sc->dev = dev;
1178 sc->rx_idx = 0;
1179 sc->tx_idx_head = sc->tx_idx_tail = 0;
1180 sc->txcount = 0;
1181
1182 mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK,
1183 MTX_DEF);
1184
1185 node = ofw_bus_get_node(dev);
1186
1187 /* Get out MAC addr. */
1188 if (xae_get_hwaddr(sc, sc->macaddr)) {
1189 device_printf(sc->dev, "can't get mac\n");
1190 return (ENXIO);
1191 }
1192
1193 /* DMA */
1194 error = get_axistream(sc);
1195 if (error != 0)
1196 return (error);
1197
1198 if (bus_alloc_resources(dev, xae_spec, sc->res)) {
1199 device_printf(dev, "could not allocate resources\n");
1200 return (ENXIO);
1201 }
1202
1203 /* Setup interrupt handler. */
1204 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
1205 NULL, xae_intr, sc, &sc->intr_cookie);
1206 if (error != 0) {
1207 device_printf(dev, "could not setup interrupt handler.\n");
1208 return (ENXIO);
1209 }
1210
1211 /* Memory interface */
1212 sc->bst = rman_get_bustag(sc->res[0]);
1213 sc->bsh = rman_get_bushandle(sc->res[0]);
1214
1215 device_printf(sc->dev, "Identification: %x\n", XAE_RD4(sc, XAE_IDENT));
1216
1217 error = xae_setup_dma(sc);
1218 if (error != 0)
1219 return (error);
1220
1221 /* Enable MII clock */
1222 reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S);
1223 reg |= MDIO_SETUP_ENABLE;
1224 XAE_WR4(sc, XAE_MDIO_SETUP, reg);
1225 if (mdio_wait(sc))
1226 return (ENXIO);
1227
1228 callout_init_mtx(&sc->xae_callout, &sc->mtx, 0);
1229
1230 /* Set up the ethernet interface. */
1231 sc->ifp = ifp = if_alloc(IFT_ETHER);
1232 if_setsoftc(ifp, sc);
1233 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1234 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1235 if_setcapabilities(ifp, IFCAP_VLAN_MTU);
1236 if_setcapenable(ifp, if_getcapabilities(ifp));
1237 if_setioctlfn(ifp, xae_ioctl);
1238 if_setstartfn(ifp, xae_txstart);
1239 if_setinitfn(ifp, xae_init);
1240 if_setsendqlen(ifp, TX_DESC_COUNT - 1);
1241 if_setsendqready(ifp);
1242
1243 if (xae_get_phyaddr(node, &sc->phy_addr) != 0)
1244 return (ENXIO);
1245
1246 /* Attach the mii driver. */
1247 error = mii_attach(dev, &sc->miibus, ifp, xae_media_change,
1248 xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr,
1249 MII_OFFSET_ANY, 0);
1250 if (error != 0) {
1251 device_printf(dev, "PHY attach failed\n");
1252 return (ENXIO);
1253 }
1254
1255 sc->mii_softc = device_get_softc(sc->miibus);
1256
1257 /* Apply vcu118 workaround. */
1258 if (OF_getproplen(node, "xlnx,vcu118") >= 0)
1259 xae_phy_fixup(sc);
1260
1261 /* All ready to run, attach the ethernet interface. */
1262 ether_ifattach(ifp, sc->macaddr);
1263 sc->is_attached = true;
1264
1265 return (0);
1266 }
1267
1268 static int
xae_detach(device_t dev)1269 xae_detach(device_t dev)
1270 {
1271 struct xae_softc *sc;
1272 if_t ifp;
1273
1274 sc = device_get_softc(dev);
1275
1276 KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized",
1277 device_get_nameunit(dev)));
1278
1279 ifp = sc->ifp;
1280
1281 /* Only cleanup if attach succeeded. */
1282 if (device_is_attached(dev)) {
1283 XAE_LOCK(sc);
1284 xae_stop_locked(sc);
1285 XAE_UNLOCK(sc);
1286 callout_drain(&sc->xae_callout);
1287 ether_ifdetach(ifp);
1288 }
1289
1290 bus_generic_detach(dev);
1291
1292 if (ifp != NULL)
1293 if_free(ifp);
1294
1295 mtx_destroy(&sc->mtx);
1296
1297 bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
1298
1299 bus_release_resources(dev, xae_spec, sc->res);
1300
1301 return (0);
1302 }
1303
1304 static void
xae_miibus_statchg(device_t dev)1305 xae_miibus_statchg(device_t dev)
1306 {
1307 struct xae_softc *sc;
1308 struct mii_data *mii;
1309 uint32_t reg;
1310
1311 /*
1312 * Called by the MII bus driver when the PHY establishes
1313 * link to set the MAC interface registers.
1314 */
1315
1316 sc = device_get_softc(dev);
1317
1318 XAE_ASSERT_LOCKED(sc);
1319
1320 mii = sc->mii_softc;
1321
1322 if (mii->mii_media_status & IFM_ACTIVE)
1323 sc->link_is_up = true;
1324 else
1325 sc->link_is_up = false;
1326
1327 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1328 case IFM_1000_T:
1329 case IFM_1000_SX:
1330 reg = SPEED_1000;
1331 break;
1332 case IFM_100_TX:
1333 reg = SPEED_100;
1334 break;
1335 case IFM_10_T:
1336 reg = SPEED_10;
1337 break;
1338 case IFM_NONE:
1339 sc->link_is_up = false;
1340 return;
1341 default:
1342 sc->link_is_up = false;
1343 device_printf(dev, "Unsupported media %u\n",
1344 IFM_SUBTYPE(mii->mii_media_active));
1345 return;
1346 }
1347
1348 XAE_WR4(sc, XAE_SPEED, reg);
1349 }
1350
1351 static device_method_t xae_methods[] = {
1352 DEVMETHOD(device_probe, xae_probe),
1353 DEVMETHOD(device_attach, xae_attach),
1354 DEVMETHOD(device_detach, xae_detach),
1355
1356 /* MII Interface */
1357 DEVMETHOD(miibus_readreg, xae_miibus_read_reg),
1358 DEVMETHOD(miibus_writereg, xae_miibus_write_reg),
1359 DEVMETHOD(miibus_statchg, xae_miibus_statchg),
1360 { 0, 0 }
1361 };
1362
1363 driver_t xae_driver = {
1364 "xae",
1365 xae_methods,
1366 sizeof(struct xae_softc),
1367 };
1368
1369 DRIVER_MODULE(xae, simplebus, xae_driver, 0, 0);
1370 DRIVER_MODULE(miibus, xae, miibus_driver, 0, 0);
1371
1372 MODULE_DEPEND(xae, axidma, 1, 1, 1);
1373 MODULE_DEPEND(xae, ether, 1, 1, 1);
1374 MODULE_DEPEND(xae, miibus, 1, 1, 1);
1375