1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019-2025 Ruslan Bukin <br@bsdpad.com>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_page.h>
48
49 #include <net/bpf.h>
50 #include <net/if.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_var.h>
56
57 #include <machine/bus.h>
58
59 #include <dev/mii/mii.h>
60 #include <dev/mii/miivar.h>
61 #include <dev/mii/tiphy.h>
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64
65 #include <dev/xilinx/axidma.h>
66 #include <dev/xilinx/if_xaereg.h>
67 #include <dev/xilinx/if_xaevar.h>
68
69 #include "miibus_if.h"
70 #include "axidma_if.h"
71
72 #define XAE_RD4(_sc, _reg) bus_read_4((_sc)->res[0], _reg)
73 #define XAE_RD8(_sc, _reg) bus_read_8((_sc)->res[0], _reg)
74 #define XAE_WR4(_sc, _reg, _val) bus_write_4((_sc)->res[0], _reg, _val)
75 #define XAE_WR8(_sc, _reg, _val) bus_write_8((_sc)->res[0], _reg, _val)
76
77 #define AXIDMA_RD4(_sc, _reg) bus_read_4((_sc)->dma_res, _reg)
78 #define AXIDMA_RD8(_sc, _reg) bus_read_8((_sc)->dma_res, _reg)
79 #define AXIDMA_WR4(_sc, _reg, _val) bus_write_4((_sc)->dma_res, _reg, _val)
80 #define AXIDMA_WR8(_sc, _reg, _val) bus_write_8((_sc)->dma_res, _reg, _val)
81
82 #define XAE_LOCK(sc) mtx_lock(&(sc)->mtx)
83 #define XAE_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
84 #define XAE_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
85 #define XAE_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
86
87 #define dprintf(fmt, ...)
88
89 #define MDIO_CLK_DIV_DEFAULT 29
90
91 #define PHY1_RD(sc, _r) xae_miibus_read_reg(sc->dev, 1, _r)
92 #define PHY1_WR(sc, _r, _v) xae_miibus_write_reg(sc->dev, 1, _r, _v)
93 #define PHY_RD(sc, _r) xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
94 #define PHY_WR(sc, _r, _v) \
95 xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
96
97 /* Use this macro to access regs > 0x1f */
98 #define WRITE_TI_EREG(sc, reg, data) { \
99 PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK); \
100 PHY_WR(sc, MII_MMDAADR, reg); \
101 PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI); \
102 PHY_WR(sc, MII_MMDAADR, data); \
103 }
104
105 /* Not documented, Xilinx VCU118 workaround */
106 #define CFG4_SGMII_TMR 0x160 /* bits 8:7 MUST be '10' */
107 #define DP83867_SGMIICTL1 0xD3 /* not documented register */
108 #define SGMIICTL1_SGMII_6W (1 << 14) /* no idea what it is */
109
110 #define AXI_DESC_RING_ALIGN 64
111
112 /*
113 * Driver data and defines.
114 */
115
116 static struct resource_spec xae_spec[] = {
117 { SYS_RES_MEMORY, 0, RF_ACTIVE },
118 { SYS_RES_IRQ, 0, RF_ACTIVE },
119 { -1, 0 }
120 };
121
122 static inline uint32_t
next_rxidx(struct xae_softc * sc,uint32_t curidx)123 next_rxidx(struct xae_softc *sc, uint32_t curidx)
124 {
125
126 return ((curidx == RX_DESC_COUNT - 1) ? 0 : curidx + 1);
127 }
128
129 static inline uint32_t
next_txidx(struct xae_softc * sc,uint32_t curidx)130 next_txidx(struct xae_softc *sc, uint32_t curidx)
131 {
132
133 return ((curidx == TX_DESC_COUNT - 1) ? 0 : curidx + 1);
134 }
135
136 static void
xae_get1paddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)137 xae_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
138 {
139
140 if (error != 0)
141 return;
142 *(bus_addr_t *)arg = segs[0].ds_addr;
143 }
144
145 inline static uint32_t
xae_setup_txdesc(struct xae_softc * sc,int idx,bus_addr_t paddr,uint32_t len)146 xae_setup_txdesc(struct xae_softc *sc, int idx, bus_addr_t paddr,
147 uint32_t len)
148 {
149 struct axidma_desc *desc;
150 uint32_t nidx;
151 uint32_t flags;
152
153 nidx = next_txidx(sc, idx);
154
155 desc = &sc->txdesc_ring[idx];
156
157 /* Addr/len 0 means we're clearing the descriptor after xmit done. */
158 if (paddr == 0 || len == 0) {
159 flags = 0;
160 --sc->txcount;
161 } else {
162 flags = BD_CONTROL_TXSOF | BD_CONTROL_TXEOF;
163 ++sc->txcount;
164 }
165
166 desc->next = sc->txdesc_ring_paddr + sizeof(struct axidma_desc) * nidx;
167 desc->phys = paddr;
168 desc->status = 0;
169 desc->control = len | flags;
170
171 return (nidx);
172 }
173
174 static int
xae_setup_txbuf(struct xae_softc * sc,int idx,struct mbuf ** mp)175 xae_setup_txbuf(struct xae_softc *sc, int idx, struct mbuf **mp)
176 {
177 struct bus_dma_segment seg;
178 struct mbuf *m;
179 int error;
180 int nsegs;
181
182 dprintf("%s\n", __func__);
183
184 if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
185 return (ENOMEM);
186
187 *mp = m;
188
189 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
190 m, &seg, &nsegs, 0);
191 if (error != 0)
192 return (ENOMEM);
193
194 bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
195 BUS_DMASYNC_PREWRITE);
196
197 sc->txbuf_map[idx].mbuf = m;
198 xae_setup_txdesc(sc, idx, seg.ds_addr, seg.ds_len);
199
200 return (0);
201 }
202
203 static void
xae_txstart_locked(struct xae_softc * sc)204 xae_txstart_locked(struct xae_softc *sc)
205 {
206 struct mbuf *m;
207 int enqueued;
208 uint32_t addr;
209 int tmp;
210 if_t ifp;
211
212 dprintf("%s\n", __func__);
213
214 XAE_ASSERT_LOCKED(sc);
215
216 if (!sc->link_is_up)
217 return;
218
219 ifp = sc->ifp;
220
221 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE)
222 return;
223
224 enqueued = 0;
225
226 for (;;) {
227 if (sc->txcount == (TX_DESC_COUNT - 1)) {
228 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
229 break;
230 }
231 m = if_dequeue(ifp);
232 if (m == NULL)
233 break;
234 if (xae_setup_txbuf(sc, sc->tx_idx_head, &m) != 0) {
235 if_sendq_prepend(ifp, m);
236 break;
237 }
238 BPF_MTAP(ifp, m);
239 tmp = sc->tx_idx_head;
240 sc->tx_idx_head = next_txidx(sc, sc->tx_idx_head);
241 ++enqueued;
242 }
243
244 if (enqueued != 0) {
245 bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map,
246 BUS_DMASYNC_PREWRITE);
247
248 addr = sc->txdesc_ring_paddr + tmp * sizeof(struct axidma_desc);
249 dprintf("%s: new tail desc %x\n", __func__, addr);
250 AXIDMA_WR8(sc, AXI_TAILDESC(AXIDMA_TX_CHAN), addr);
251 }
252 }
253
254 static void
xae_txfinish_locked(struct xae_softc * sc)255 xae_txfinish_locked(struct xae_softc *sc)
256 {
257 struct axidma_desc *desc;
258 struct xae_bufmap *bmap;
259 boolean_t retired_buffer;
260
261 XAE_ASSERT_LOCKED(sc);
262
263 bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_PREREAD);
264 bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_POSTREAD);
265 retired_buffer = false;
266 while (sc->tx_idx_tail != sc->tx_idx_head) {
267 desc = &sc->txdesc_ring[sc->tx_idx_tail];
268 if ((desc->status & BD_STATUS_CMPLT) == 0)
269 break;
270 retired_buffer = true;
271 bmap = &sc->txbuf_map[sc->tx_idx_tail];
272 bus_dmamap_sync(sc->txbuf_tag, bmap->map,
273 BUS_DMASYNC_POSTWRITE);
274 bus_dmamap_unload(sc->txbuf_tag, bmap->map);
275 m_freem(bmap->mbuf);
276 bmap->mbuf = NULL;
277 xae_setup_txdesc(sc, sc->tx_idx_tail, 0, 0);
278 sc->tx_idx_tail = next_txidx(sc, sc->tx_idx_tail);
279 }
280
281 /*
282 * If we retired any buffers, there will be open tx slots available in
283 * the descriptor ring, go try to start some new output.
284 */
285 if (retired_buffer) {
286 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
287 xae_txstart_locked(sc);
288 }
289 }
290
291 inline static uint32_t
xae_setup_rxdesc(struct xae_softc * sc,int idx,bus_addr_t paddr)292 xae_setup_rxdesc(struct xae_softc *sc, int idx, bus_addr_t paddr)
293 {
294 struct axidma_desc *desc;
295 uint32_t nidx;
296
297 /*
298 * The hardware requires 32-bit physical addresses. We set up the dma
299 * tag to indicate that, so the cast to uint32_t should never lose
300 * significant bits.
301 */
302 nidx = next_rxidx(sc, idx);
303
304 desc = &sc->rxdesc_ring[idx];
305 desc->next = sc->rxdesc_ring_paddr + sizeof(struct axidma_desc) * nidx;
306 desc->phys = paddr;
307 desc->status = 0;
308 desc->control = MCLBYTES | BD_CONTROL_TXSOF | BD_CONTROL_TXEOF;
309
310 return (nidx);
311 }
312
313 static struct mbuf *
xae_alloc_mbufcl(struct xae_softc * sc)314 xae_alloc_mbufcl(struct xae_softc *sc)
315 {
316 struct mbuf *m;
317
318 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
319 if (m != NULL)
320 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
321
322 return (m);
323 }
324
325 static int
xae_setup_rxbuf(struct xae_softc * sc,int idx,struct mbuf * m)326 xae_setup_rxbuf(struct xae_softc *sc, int idx, struct mbuf * m)
327 {
328 int error, nsegs;
329 struct bus_dma_segment seg;
330
331 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
332 m, &seg, &nsegs, 0);
333 if (error != 0)
334 return (error);
335
336 bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
337 BUS_DMASYNC_PREREAD);
338
339 sc->rxbuf_map[idx].mbuf = m;
340 xae_setup_rxdesc(sc, idx, seg.ds_addr);
341
342 return (0);
343 }
344
345 static void
xae_rxfinish_onebuf(struct xae_softc * sc,int len)346 xae_rxfinish_onebuf(struct xae_softc *sc, int len)
347 {
348 struct mbuf *m, *newmbuf;
349 struct xae_bufmap *bmap;
350 int error;
351
352 dprintf("%s\n", __func__);
353
354 /*
355 * First try to get a new mbuf to plug into this slot in the rx ring.
356 * If that fails, drop the current packet and recycle the current
357 * mbuf, which is still mapped and loaded.
358 */
359 if ((newmbuf = xae_alloc_mbufcl(sc)) == NULL) {
360 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
361 xae_setup_rxdesc(sc, sc->rx_idx,
362 sc->rxdesc_ring[sc->rx_idx].phys);
363 return;
364 }
365
366 XAE_UNLOCK(sc);
367
368 bmap = &sc->rxbuf_map[sc->rx_idx];
369 bus_dmamap_sync(sc->rxbuf_tag, bmap->map, BUS_DMASYNC_POSTREAD);
370 bus_dmamap_unload(sc->rxbuf_tag, bmap->map);
371 m = bmap->mbuf;
372 bmap->mbuf = NULL;
373 m->m_len = len;
374 m->m_pkthdr.len = len;
375 m->m_pkthdr.rcvif = sc->ifp;
376
377 if_input(sc->ifp, m);
378
379 XAE_LOCK(sc);
380
381 if ((error = xae_setup_rxbuf(sc, sc->rx_idx, newmbuf)) != 0) {
382 device_printf(sc->dev, "xae_setup_rxbuf error %d\n", error);
383 /* XXX Now what? We've got a hole in the rx ring. */
384 }
385 }
386
387 static void
xae_rxfinish_locked(struct xae_softc * sc)388 xae_rxfinish_locked(struct xae_softc *sc)
389 {
390 boolean_t desc_completed;
391 struct axidma_desc *desc;
392 uint32_t addr;
393 int len;
394 int tmp;
395
396 dprintf("%s\n", __func__);
397
398 XAE_ASSERT_LOCKED(sc);
399
400 bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map, BUS_DMASYNC_PREREAD);
401 bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map, BUS_DMASYNC_POSTREAD);
402 desc_completed = false;
403 for (;;) {
404 desc = &sc->rxdesc_ring[sc->rx_idx];
405 if ((desc->status & BD_STATUS_CMPLT) == 0)
406 break;
407 desc_completed = true;
408 len = desc->status & BD_CONTROL_LEN_M;
409 xae_rxfinish_onebuf(sc, len);
410 tmp = sc->rx_idx;
411 sc->rx_idx = next_rxidx(sc, sc->rx_idx);
412 }
413
414 if (desc_completed) {
415 bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map,
416 BUS_DMASYNC_PREWRITE);
417
418 addr = sc->rxdesc_ring_paddr + tmp * sizeof(struct axidma_desc);
419 dprintf("%s: new tail desc %x\n", __func__, addr);
420 AXIDMA_WR8(sc, AXI_TAILDESC(AXIDMA_RX_CHAN), addr);
421 }
422 }
423
424 static void
xae_intr_rx(void * arg)425 xae_intr_rx(void *arg)
426 {
427 struct xae_softc *sc;
428 uint32_t pending;
429
430 sc = arg;
431
432 XAE_LOCK(sc);
433 pending = AXIDMA_RD4(sc, AXI_DMASR(AXIDMA_RX_CHAN));
434 dprintf("%s: pending %x\n", __func__, pending);
435 AXIDMA_WR4(sc, AXI_DMASR(AXIDMA_RX_CHAN), pending);
436 xae_rxfinish_locked(sc);
437 XAE_UNLOCK(sc);
438 }
439
440 static void
xae_intr_tx(void * arg)441 xae_intr_tx(void *arg)
442 {
443 struct xae_softc *sc;
444 uint32_t pending;
445
446 sc = arg;
447
448 XAE_LOCK(sc);
449 pending = AXIDMA_RD4(sc, AXI_DMASR(AXIDMA_TX_CHAN));
450 dprintf("%s: pending %x\n", __func__, pending);
451 AXIDMA_WR4(sc, AXI_DMASR(AXIDMA_TX_CHAN), pending);
452 xae_txfinish_locked(sc);
453 XAE_UNLOCK(sc);
454 }
455
456
457 static u_int
xae_write_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)458 xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
459 {
460 struct xae_softc *sc = arg;
461 uint32_t reg;
462 uint8_t *ma;
463
464 if (cnt >= XAE_MULTICAST_TABLE_SIZE)
465 return (1);
466
467 ma = LLADDR(sdl);
468
469 reg = XAE_RD4(sc, XAE_FFC) & 0xffffff00;
470 reg |= cnt;
471 XAE_WR4(sc, XAE_FFC, reg);
472
473 reg = (ma[0]);
474 reg |= (ma[1] << 8);
475 reg |= (ma[2] << 16);
476 reg |= (ma[3] << 24);
477 XAE_WR4(sc, XAE_FFV(0), reg);
478
479 reg = ma[4];
480 reg |= ma[5] << 8;
481 XAE_WR4(sc, XAE_FFV(1), reg);
482
483 return (1);
484 }
485
486 static void
xae_setup_rxfilter(struct xae_softc * sc)487 xae_setup_rxfilter(struct xae_softc *sc)
488 {
489 if_t ifp;
490 uint32_t reg;
491
492 XAE_ASSERT_LOCKED(sc);
493
494 ifp = sc->ifp;
495
496 /*
497 * Set the multicast (group) filter hash.
498 */
499 if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
500 reg = XAE_RD4(sc, XAE_FFC);
501 reg |= FFC_PM;
502 XAE_WR4(sc, XAE_FFC, reg);
503 } else {
504 reg = XAE_RD4(sc, XAE_FFC);
505 reg &= ~FFC_PM;
506 XAE_WR4(sc, XAE_FFC, reg);
507
508 if_foreach_llmaddr(ifp, xae_write_maddr, sc);
509 }
510
511 /*
512 * Set the primary address.
513 */
514 reg = sc->macaddr[0];
515 reg |= (sc->macaddr[1] << 8);
516 reg |= (sc->macaddr[2] << 16);
517 reg |= (sc->macaddr[3] << 24);
518 XAE_WR4(sc, XAE_UAW0, reg);
519
520 reg = sc->macaddr[4];
521 reg |= (sc->macaddr[5] << 8);
522 XAE_WR4(sc, XAE_UAW1, reg);
523 }
524
525 static int
xae_get_phyaddr(phandle_t node,int * phy_addr)526 xae_get_phyaddr(phandle_t node, int *phy_addr)
527 {
528 phandle_t phy_node;
529 pcell_t phy_handle, phy_reg;
530
531 if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
532 sizeof(phy_handle)) <= 0)
533 return (ENXIO);
534
535 phy_node = OF_node_from_xref(phy_handle);
536
537 if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
538 sizeof(phy_reg)) <= 0)
539 return (ENXIO);
540
541 *phy_addr = phy_reg;
542
543 return (0);
544 }
545
546 static void
xae_qflush(if_t ifp)547 xae_qflush(if_t ifp)
548 {
549 }
550
551 static void
xae_stop_locked(struct xae_softc * sc)552 xae_stop_locked(struct xae_softc *sc)
553 {
554 uint32_t reg;
555
556 XAE_ASSERT_LOCKED(sc);
557
558 if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
559
560 callout_stop(&sc->xae_callout);
561
562 /* Stop the transmitter */
563 reg = XAE_RD4(sc, XAE_TC);
564 reg &= ~TC_TX;
565 XAE_WR4(sc, XAE_TC, reg);
566
567 /* Stop the receiver. */
568 reg = XAE_RD4(sc, XAE_RCW1);
569 reg &= ~RCW1_RX;
570 XAE_WR4(sc, XAE_RCW1, reg);
571 }
572
573 static uint64_t
xae_stat(struct xae_softc * sc,int counter_id)574 xae_stat(struct xae_softc *sc, int counter_id)
575 {
576 uint64_t new, old;
577 uint64_t delta;
578
579 KASSERT(counter_id < XAE_MAX_COUNTERS,
580 ("counter %d is out of range", counter_id));
581
582 new = XAE_RD8(sc, XAE_STATCNT(counter_id));
583 old = sc->counters[counter_id];
584
585 if (new >= old)
586 delta = new - old;
587 else
588 delta = UINT64_MAX - old + new;
589 sc->counters[counter_id] = new;
590
591 return (delta);
592 }
593
594 static void
xae_harvest_stats(struct xae_softc * sc)595 xae_harvest_stats(struct xae_softc *sc)
596 {
597 if_t ifp;
598
599 ifp = sc->ifp;
600
601 if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES));
602 if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS));
603 if_inc_counter(ifp, IFCOUNTER_IERRORS,
604 xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) +
605 xae_stat(sc, RX_LEN_OUT_OF_RANGE) +
606 xae_stat(sc, RX_ALIGNMENT_ERRORS));
607
608 if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES));
609 if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES));
610 if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS));
611 if_inc_counter(ifp, IFCOUNTER_OERRORS,
612 xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS));
613
614 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
615 xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) +
616 xae_stat(sc, TX_MULTI_COLLISION_FRAMES) +
617 xae_stat(sc, TX_LATE_COLLISIONS) +
618 xae_stat(sc, TX_EXCESS_COLLISIONS));
619 }
620
621 static void
xae_tick(void * arg)622 xae_tick(void *arg)
623 {
624 struct xae_softc *sc;
625 if_t ifp;
626 int link_was_up;
627
628 sc = arg;
629
630 XAE_ASSERT_LOCKED(sc);
631
632 ifp = sc->ifp;
633
634 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
635 return;
636
637 /* Gather stats from hardware counters. */
638 xae_harvest_stats(sc);
639
640 /* Check the media status. */
641 link_was_up = sc->link_is_up;
642 mii_tick(sc->mii_softc);
643 if (sc->link_is_up && !link_was_up)
644 xae_txstart_locked(sc);
645
646 /* Schedule another check one second from now. */
647 callout_reset(&sc->xae_callout, hz, xae_tick, sc);
648 }
649
650 static void
xae_init_locked(struct xae_softc * sc)651 xae_init_locked(struct xae_softc *sc)
652 {
653 if_t ifp;
654
655 XAE_ASSERT_LOCKED(sc);
656
657 ifp = sc->ifp;
658 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
659 return;
660
661 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
662
663 xae_setup_rxfilter(sc);
664
665 /* Enable the transmitter */
666 XAE_WR4(sc, XAE_TC, TC_TX);
667
668 /* Enable the receiver. */
669 XAE_WR4(sc, XAE_RCW1, RCW1_RX);
670
671 /*
672 * Call mii_mediachg() which will call back into xae_miibus_statchg()
673 * to set up the remaining config registers based on current media.
674 */
675 mii_mediachg(sc->mii_softc);
676 callout_reset(&sc->xae_callout, hz, xae_tick, sc);
677 }
678
679 static void
xae_init(void * arg)680 xae_init(void *arg)
681 {
682 struct xae_softc *sc;
683
684 sc = arg;
685
686 XAE_LOCK(sc);
687 xae_init_locked(sc);
688 XAE_UNLOCK(sc);
689 }
690
691 static void
xae_media_status(if_t ifp,struct ifmediareq * ifmr)692 xae_media_status(if_t ifp, struct ifmediareq *ifmr)
693 {
694 struct xae_softc *sc;
695 struct mii_data *mii;
696
697 sc = if_getsoftc(ifp);
698 mii = sc->mii_softc;
699
700 XAE_LOCK(sc);
701 mii_pollstat(mii);
702 ifmr->ifm_active = mii->mii_media_active;
703 ifmr->ifm_status = mii->mii_media_status;
704 XAE_UNLOCK(sc);
705 }
706
707 static int
xae_media_change_locked(struct xae_softc * sc)708 xae_media_change_locked(struct xae_softc *sc)
709 {
710
711 return (mii_mediachg(sc->mii_softc));
712 }
713
714 static int
xae_media_change(if_t ifp)715 xae_media_change(if_t ifp)
716 {
717 struct xae_softc *sc;
718 int error;
719
720 sc = if_getsoftc(ifp);
721
722 XAE_LOCK(sc);
723 error = xae_media_change_locked(sc);
724 XAE_UNLOCK(sc);
725
726 return (error);
727 }
728
729 static int
xae_ioctl(if_t ifp,u_long cmd,caddr_t data)730 xae_ioctl(if_t ifp, u_long cmd, caddr_t data)
731 {
732 struct xae_softc *sc;
733 struct mii_data *mii;
734 struct ifreq *ifr;
735 int mask, error;
736
737 sc = if_getsoftc(ifp);
738 ifr = (struct ifreq *)data;
739
740 error = 0;
741 switch (cmd) {
742 case SIOCSIFFLAGS:
743 XAE_LOCK(sc);
744 if (if_getflags(ifp) & IFF_UP) {
745 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
746 if ((if_getflags(ifp) ^ sc->if_flags) &
747 (IFF_PROMISC | IFF_ALLMULTI))
748 xae_setup_rxfilter(sc);
749 } else {
750 if (!sc->is_detaching)
751 xae_init_locked(sc);
752 }
753 } else {
754 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
755 xae_stop_locked(sc);
756 }
757 sc->if_flags = if_getflags(ifp);
758 XAE_UNLOCK(sc);
759 break;
760 case SIOCADDMULTI:
761 case SIOCDELMULTI:
762 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
763 XAE_LOCK(sc);
764 xae_setup_rxfilter(sc);
765 XAE_UNLOCK(sc);
766 }
767 break;
768 case SIOCSIFMEDIA:
769 case SIOCGIFMEDIA:
770 mii = sc->mii_softc;
771 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
772 break;
773 case SIOCSIFCAP:
774 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
775 if (mask & IFCAP_VLAN_MTU) {
776 /* No work to do except acknowledge the change took */
777 if_togglecapenable(ifp, IFCAP_VLAN_MTU);
778 }
779 break;
780
781 default:
782 error = ether_ioctl(ifp, cmd, data);
783 break;
784 }
785
786 return (error);
787 }
788
789 static void
xae_intr(void * arg)790 xae_intr(void *arg)
791 {
792
793 }
794
795 static int
xae_get_hwaddr(struct xae_softc * sc,uint8_t * hwaddr)796 xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr)
797 {
798 phandle_t node;
799 int len;
800
801 node = ofw_bus_get_node(sc->dev);
802
803 /* Check if there is property */
804 if ((len = OF_getproplen(node, "local-mac-address")) <= 0)
805 return (EINVAL);
806
807 if (len != ETHER_ADDR_LEN)
808 return (EINVAL);
809
810 OF_getprop(node, "local-mac-address", hwaddr, ETHER_ADDR_LEN);
811
812 return (0);
813 }
814
815 static int
mdio_wait(struct xae_softc * sc)816 mdio_wait(struct xae_softc *sc)
817 {
818 uint32_t reg;
819 int timeout;
820
821 timeout = 200;
822
823 do {
824 reg = XAE_RD4(sc, XAE_MDIO_CTRL);
825 if (reg & MDIO_CTRL_READY)
826 break;
827 DELAY(1);
828 } while (timeout--);
829
830 if (timeout <= 0) {
831 printf("Failed to get MDIO ready\n");
832 return (1);
833 }
834
835 return (0);
836 }
837
838 static int
xae_miibus_read_reg(device_t dev,int phy,int reg)839 xae_miibus_read_reg(device_t dev, int phy, int reg)
840 {
841 struct xae_softc *sc;
842 uint32_t mii;
843 int rv;
844
845 sc = device_get_softc(dev);
846
847 if (mdio_wait(sc))
848 return (0);
849
850 mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE;
851 mii |= (reg << MDIO_TX_REGAD_S);
852 mii |= (phy << MDIO_TX_PHYAD_S);
853
854 XAE_WR4(sc, XAE_MDIO_CTRL, mii);
855
856 if (mdio_wait(sc))
857 return (0);
858
859 rv = XAE_RD4(sc, XAE_MDIO_READ);
860
861 return (rv);
862 }
863
864 static int
xae_miibus_write_reg(device_t dev,int phy,int reg,int val)865 xae_miibus_write_reg(device_t dev, int phy, int reg, int val)
866 {
867 struct xae_softc *sc;
868 uint32_t mii;
869
870 sc = device_get_softc(dev);
871
872 if (mdio_wait(sc))
873 return (1);
874
875 mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE;
876 mii |= (reg << MDIO_TX_REGAD_S);
877 mii |= (phy << MDIO_TX_PHYAD_S);
878
879 XAE_WR4(sc, XAE_MDIO_WRITE, val);
880 XAE_WR4(sc, XAE_MDIO_CTRL, mii);
881
882 if (mdio_wait(sc))
883 return (1);
884
885 return (0);
886 }
887
888 static void
xae_phy_fixup(struct xae_softc * sc)889 xae_phy_fixup(struct xae_softc *sc)
890 {
891 uint32_t reg;
892
893 do {
894 WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W);
895 PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN);
896
897 reg = PHY_RD(sc, DP83867_CFG2);
898 reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M;
899 reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4);
900 reg |= CFG2_INTERRUPT_POLARITY;
901 reg |= CFG2_SPEED_OPT_ENHANCED_EN;
902 reg |= CFG2_SPEED_OPT_10M_EN;
903 PHY_WR(sc, DP83867_CFG2, reg);
904
905 WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR);
906 PHY_WR(sc, MII_BMCR,
907 BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET);
908 } while (PHY1_RD(sc, MII_BMCR) == 0x0ffff);
909
910 do {
911 PHY1_WR(sc, MII_BMCR,
912 BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG);
913 DELAY(40000);
914 } while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0);
915 }
916
917 static int
get_axistream(struct xae_softc * sc)918 get_axistream(struct xae_softc *sc)
919 {
920 phandle_t node;
921 pcell_t prop;
922 size_t len;
923
924 node = ofw_bus_get_node(sc->dev);
925 len = OF_getencprop(node, "axistream-connected", &prop, sizeof(prop));
926 if (len != sizeof(prop)) {
927 device_printf(sc->dev,
928 "%s: Couldn't get axistream-connected prop.\n", __func__);
929 return (ENXIO);
930 }
931 sc->dma_dev = OF_device_from_xref(prop);
932 if (sc->dma_dev == NULL) {
933 device_printf(sc->dev, "Could not get DMA device by xref.\n");
934 return (ENXIO);
935 }
936 sc->dma_res = AXIDMA_MEMRES(sc->dma_dev);
937
938 return (0);
939 }
940
941 static void
xae_txstart(if_t ifp)942 xae_txstart(if_t ifp)
943 {
944 struct xae_softc *sc;
945
946 sc = if_getsoftc(ifp);
947
948 dprintf("%s\n", __func__);
949
950 XAE_LOCK(sc);
951 xae_txstart_locked(sc);
952 XAE_UNLOCK(sc);
953 }
954
955 static int
xae_setup_dma(struct xae_softc * sc)956 xae_setup_dma(struct xae_softc *sc)
957 {
958 struct axidma_desc *desc;
959 uint32_t addr;
960 uint32_t reg;
961 struct mbuf *m;
962 int error;
963 int idx;
964
965 sc->rxbuf_align = PAGE_SIZE;
966 sc->txbuf_align = PAGE_SIZE;
967
968 /*
969 * Set up TX descriptor ring, descriptors, and dma maps.
970 */
971 error = bus_dma_tag_create(
972 bus_get_dma_tag(sc->dev), /* Parent tag. */
973 AXI_DESC_RING_ALIGN, 0, /* alignment, boundary */
974 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
975 BUS_SPACE_MAXADDR, /* highaddr */
976 NULL, NULL, /* filter, filterarg */
977 TX_DESC_SIZE, 1, /* maxsize, nsegments */
978 TX_DESC_SIZE, /* maxsegsize */
979 0, /* flags */
980 NULL, NULL, /* lockfunc, lockarg */
981 &sc->txdesc_tag);
982 if (error != 0) {
983 device_printf(sc->dev, "could not create TX ring DMA tag.\n");
984 goto out;
985 }
986
987 error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
988 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->txdesc_map);
989 if (error != 0) {
990 device_printf(sc->dev,
991 "could not allocate TX descriptor ring.\n");
992 goto out;
993 }
994
995 error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, sc->txdesc_ring,
996 TX_DESC_SIZE, xae_get1paddr, &sc->txdesc_ring_paddr, 0);
997 if (error != 0) {
998 device_printf(sc->dev,
999 "could not load TX descriptor ring map.\n");
1000 goto out;
1001 }
1002
1003 error = bus_dma_tag_create(
1004 bus_get_dma_tag(sc->dev), /* Parent tag. */
1005 sc->txbuf_align, 0, /* alignment, boundary */
1006 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1007 BUS_SPACE_MAXADDR, /* highaddr */
1008 NULL, NULL, /* filter, filterarg */
1009 MCLBYTES, 1, /* maxsize, nsegments */
1010 MCLBYTES, /* maxsegsize */
1011 0, /* flags */
1012 NULL, NULL, /* lockfunc, lockarg */
1013 &sc->txbuf_tag);
1014 if (error != 0) {
1015 device_printf(sc->dev,
1016 "could not create TX ring DMA tag.\n");
1017 goto out;
1018 }
1019
1020 for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
1021 desc = &sc->txdesc_ring[idx];
1022 bzero(desc, sizeof(struct axidma_desc));
1023 }
1024
1025 for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
1026 error = bus_dmamap_create(sc->txbuf_tag, 0,
1027 &sc->txbuf_map[idx].map);
1028 if (error != 0) {
1029 device_printf(sc->dev,
1030 "could not create TX buffer DMA map.\n");
1031 goto out;
1032 }
1033 xae_setup_txdesc(sc, idx, 0, 0);
1034 }
1035
1036 /*
1037 * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
1038 */
1039 error = bus_dma_tag_create(
1040 bus_get_dma_tag(sc->dev), /* Parent tag. */
1041 AXI_DESC_RING_ALIGN, 0, /* alignment, boundary */
1042 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1043 BUS_SPACE_MAXADDR, /* highaddr */
1044 NULL, NULL, /* filter, filterarg */
1045 RX_DESC_SIZE, 1, /* maxsize, nsegments */
1046 RX_DESC_SIZE, /* maxsegsize */
1047 0, /* flags */
1048 NULL, NULL, /* lockfunc, lockarg */
1049 &sc->rxdesc_tag);
1050 if (error != 0) {
1051 device_printf(sc->dev, "could not create RX ring DMA tag.\n");
1052 goto out;
1053 }
1054
1055 error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
1056 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rxdesc_map);
1057 if (error != 0) {
1058 device_printf(sc->dev,
1059 "could not allocate RX descriptor ring.\n");
1060 goto out;
1061 }
1062
1063 error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, sc->rxdesc_ring,
1064 RX_DESC_SIZE, xae_get1paddr, &sc->rxdesc_ring_paddr, 0);
1065 if (error != 0) {
1066 device_printf(sc->dev,
1067 "could not load RX descriptor ring map.\n");
1068 goto out;
1069 }
1070
1071 error = bus_dma_tag_create(
1072 bus_get_dma_tag(sc->dev), /* Parent tag. */
1073 1, 0, /* alignment, boundary */
1074 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1075 BUS_SPACE_MAXADDR, /* highaddr */
1076 NULL, NULL, /* filter, filterarg */
1077 MCLBYTES, 1, /* maxsize, nsegments */
1078 MCLBYTES, /* maxsegsize */
1079 0, /* flags */
1080 NULL, NULL, /* lockfunc, lockarg */
1081 &sc->rxbuf_tag);
1082 if (error != 0) {
1083 device_printf(sc->dev, "could not create RX buf DMA tag.\n");
1084 goto out;
1085 }
1086
1087 for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
1088 desc = &sc->rxdesc_ring[idx];
1089 bzero(desc, sizeof(struct axidma_desc));
1090 }
1091
1092 for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
1093 error = bus_dmamap_create(sc->rxbuf_tag, 0,
1094 &sc->rxbuf_map[idx].map);
1095 if (error != 0) {
1096 device_printf(sc->dev,
1097 "could not create RX buffer DMA map.\n");
1098 goto out;
1099 }
1100 if ((m = xae_alloc_mbufcl(sc)) == NULL) {
1101 device_printf(sc->dev, "Could not alloc mbuf\n");
1102 error = ENOMEM;
1103 goto out;
1104 }
1105 if ((error = xae_setup_rxbuf(sc, idx, m)) != 0) {
1106 device_printf(sc->dev,
1107 "could not create new RX buffer.\n");
1108 goto out;
1109 }
1110 }
1111
1112 if (AXIDMA_RESET(sc->dma_dev, AXIDMA_TX_CHAN) != 0) {
1113 device_printf(sc->dev, "Could not reset TX channel.\n");
1114 goto out;
1115 }
1116 if (AXIDMA_RESET(sc->dma_dev, AXIDMA_RX_CHAN) != 0) {
1117 device_printf(sc->dev, "Could not reset TX channel.\n");
1118 goto out;
1119 }
1120 if (AXIDMA_SETUP_CB(sc->dma_dev, AXIDMA_TX_CHAN, xae_intr_tx, sc)) {
1121 device_printf(sc->dev, "Could not setup TX intr callback.\n");
1122 goto out;
1123 }
1124 if (AXIDMA_SETUP_CB(sc->dma_dev, AXIDMA_RX_CHAN, xae_intr_rx, sc)) {
1125 device_printf(sc->dev, "Could not setup RX intr callback.\n");
1126 goto out;
1127 }
1128
1129 dprintf("%s: tx desc base %lx\n", __func__, sc->txdesc_ring_paddr);
1130 AXIDMA_WR8(sc, AXI_CURDESC(AXIDMA_TX_CHAN), sc->txdesc_ring_paddr);
1131 reg = AXIDMA_RD4(sc, AXI_DMACR(AXIDMA_TX_CHAN));
1132 reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
1133 AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_TX_CHAN), reg);
1134 reg |= DMACR_RS;
1135 AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_TX_CHAN), reg);
1136
1137 AXIDMA_WR8(sc, AXI_CURDESC(AXIDMA_RX_CHAN), sc->rxdesc_ring_paddr);
1138 reg = AXIDMA_RD4(sc, AXI_DMACR(AXIDMA_RX_CHAN));
1139 reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
1140 AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_RX_CHAN), reg);
1141 reg |= DMACR_RS;
1142 AXIDMA_WR4(sc, AXI_DMACR(AXIDMA_RX_CHAN), reg);
1143
1144 addr = sc->rxdesc_ring_paddr +
1145 (RX_DESC_COUNT - 1) * sizeof(struct axidma_desc);
1146 dprintf("%s: new RX tail desc %x\n", __func__, addr);
1147 AXIDMA_WR8(sc, AXI_TAILDESC(AXIDMA_RX_CHAN), addr);
1148
1149 return (0);
1150
1151 out:
1152 /* TODO: release resources. */
1153 return (-1);
1154 }
1155
1156
1157 static int
xae_probe(device_t dev)1158 xae_probe(device_t dev)
1159 {
1160
1161 if (!ofw_bus_status_okay(dev))
1162 return (ENXIO);
1163
1164 if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a"))
1165 return (ENXIO);
1166
1167 device_set_desc(dev, "Xilinx AXI Ethernet");
1168
1169 return (BUS_PROBE_DEFAULT);
1170 }
1171
1172 static int
xae_attach(device_t dev)1173 xae_attach(device_t dev)
1174 {
1175 struct xae_softc *sc;
1176 if_t ifp;
1177 phandle_t node;
1178 uint32_t reg;
1179 int error;
1180
1181 sc = device_get_softc(dev);
1182 sc->dev = dev;
1183 sc->rx_idx = 0;
1184 sc->tx_idx_head = sc->tx_idx_tail = 0;
1185 sc->txcount = 0;
1186
1187 mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK,
1188 MTX_DEF);
1189
1190 node = ofw_bus_get_node(dev);
1191
1192 /* Get out MAC addr. */
1193 if (xae_get_hwaddr(sc, sc->macaddr)) {
1194 device_printf(sc->dev, "can't get mac\n");
1195 return (ENXIO);
1196 }
1197
1198 /* DMA */
1199 error = get_axistream(sc);
1200 if (error != 0)
1201 return (error);
1202
1203 if (bus_alloc_resources(dev, xae_spec, sc->res)) {
1204 device_printf(dev, "could not allocate resources\n");
1205 return (ENXIO);
1206 }
1207
1208 /* Setup interrupt handler. */
1209 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
1210 NULL, xae_intr, sc, &sc->intr_cookie);
1211 if (error != 0) {
1212 device_printf(dev, "could not setup interrupt handler.\n");
1213 return (ENXIO);
1214 }
1215
1216 /* Memory interface */
1217 sc->bst = rman_get_bustag(sc->res[0]);
1218 sc->bsh = rman_get_bushandle(sc->res[0]);
1219
1220 device_printf(sc->dev, "Identification: %x\n", XAE_RD4(sc, XAE_IDENT));
1221
1222 error = xae_setup_dma(sc);
1223 if (error != 0)
1224 return (error);
1225
1226 /* Enable MII clock */
1227 reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S);
1228 reg |= MDIO_SETUP_ENABLE;
1229 XAE_WR4(sc, XAE_MDIO_SETUP, reg);
1230 if (mdio_wait(sc))
1231 return (ENXIO);
1232
1233 callout_init_mtx(&sc->xae_callout, &sc->mtx, 0);
1234
1235 /* Set up the ethernet interface. */
1236 sc->ifp = ifp = if_alloc(IFT_ETHER);
1237 if_setsoftc(ifp, sc);
1238 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1239 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1240 if_setcapabilities(ifp, IFCAP_VLAN_MTU);
1241 if_setcapenable(ifp, if_getcapabilities(ifp));
1242 if_setqflushfn(ifp, xae_qflush);
1243 if_setioctlfn(ifp, xae_ioctl);
1244 if_setstartfn(ifp, xae_txstart);
1245 if_setinitfn(ifp, xae_init);
1246 if_setsendqlen(ifp, TX_DESC_COUNT - 1);
1247 if_setsendqready(ifp);
1248
1249 if (xae_get_phyaddr(node, &sc->phy_addr) != 0)
1250 return (ENXIO);
1251
1252 /* Attach the mii driver. */
1253 error = mii_attach(dev, &sc->miibus, ifp, xae_media_change,
1254 xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr,
1255 MII_OFFSET_ANY, 0);
1256 if (error != 0) {
1257 device_printf(dev, "PHY attach failed\n");
1258 return (ENXIO);
1259 }
1260
1261 sc->mii_softc = device_get_softc(sc->miibus);
1262
1263 /* Apply vcu118 workaround. */
1264 if (OF_getproplen(node, "xlnx,vcu118") >= 0)
1265 xae_phy_fixup(sc);
1266
1267 /* All ready to run, attach the ethernet interface. */
1268 ether_ifattach(ifp, sc->macaddr);
1269 sc->is_attached = true;
1270
1271 return (0);
1272 }
1273
1274 static int
xae_detach(device_t dev)1275 xae_detach(device_t dev)
1276 {
1277 struct xae_softc *sc;
1278 if_t ifp;
1279
1280 sc = device_get_softc(dev);
1281
1282 KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized",
1283 device_get_nameunit(dev)));
1284
1285 ifp = sc->ifp;
1286
1287 /* Only cleanup if attach succeeded. */
1288 if (device_is_attached(dev)) {
1289 XAE_LOCK(sc);
1290 xae_stop_locked(sc);
1291 XAE_UNLOCK(sc);
1292 callout_drain(&sc->xae_callout);
1293 ether_ifdetach(ifp);
1294 }
1295
1296 bus_generic_detach(dev);
1297
1298 if (ifp != NULL)
1299 if_free(ifp);
1300
1301 mtx_destroy(&sc->mtx);
1302
1303 bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
1304
1305 bus_release_resources(dev, xae_spec, sc->res);
1306
1307 return (0);
1308 }
1309
1310 static void
xae_miibus_statchg(device_t dev)1311 xae_miibus_statchg(device_t dev)
1312 {
1313 struct xae_softc *sc;
1314 struct mii_data *mii;
1315 uint32_t reg;
1316
1317 /*
1318 * Called by the MII bus driver when the PHY establishes
1319 * link to set the MAC interface registers.
1320 */
1321
1322 sc = device_get_softc(dev);
1323
1324 XAE_ASSERT_LOCKED(sc);
1325
1326 mii = sc->mii_softc;
1327
1328 if (mii->mii_media_status & IFM_ACTIVE)
1329 sc->link_is_up = true;
1330 else
1331 sc->link_is_up = false;
1332
1333 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1334 case IFM_1000_T:
1335 case IFM_1000_SX:
1336 reg = SPEED_1000;
1337 break;
1338 case IFM_100_TX:
1339 reg = SPEED_100;
1340 break;
1341 case IFM_10_T:
1342 reg = SPEED_10;
1343 break;
1344 case IFM_NONE:
1345 sc->link_is_up = false;
1346 return;
1347 default:
1348 sc->link_is_up = false;
1349 device_printf(dev, "Unsupported media %u\n",
1350 IFM_SUBTYPE(mii->mii_media_active));
1351 return;
1352 }
1353
1354 XAE_WR4(sc, XAE_SPEED, reg);
1355 }
1356
1357 static device_method_t xae_methods[] = {
1358 DEVMETHOD(device_probe, xae_probe),
1359 DEVMETHOD(device_attach, xae_attach),
1360 DEVMETHOD(device_detach, xae_detach),
1361
1362 /* MII Interface */
1363 DEVMETHOD(miibus_readreg, xae_miibus_read_reg),
1364 DEVMETHOD(miibus_writereg, xae_miibus_write_reg),
1365 DEVMETHOD(miibus_statchg, xae_miibus_statchg),
1366 { 0, 0 }
1367 };
1368
1369 driver_t xae_driver = {
1370 "xae",
1371 xae_methods,
1372 sizeof(struct xae_softc),
1373 };
1374
1375 DRIVER_MODULE(xae, simplebus, xae_driver, 0, 0);
1376 DRIVER_MODULE(miibus, xae, miibus_driver, 0, 0);
1377
1378 MODULE_DEPEND(xae, axidma, 1, 1, 1);
1379 MODULE_DEPEND(xae, ether, 1, 1, 1);
1380 MODULE_DEPEND(xae, miibus, 1, 1, 1);
1381