xref: /freebsd/sys/dev/xilinx/if_xae.c (revision 4717628ed859513a3262ea68259d0605f39de0b3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory (Department of Computer Science and
8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9  * DARPA SSITH research programme.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/rman.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/ethernet.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/if_types.h>
53 #include <net/if_var.h>
54 
55 #include <machine/bus.h>
56 
57 #include <dev/mii/mii.h>
58 #include <dev/mii/miivar.h>
59 #include <dev/mii/tiphy.h>
60 #include <dev/ofw/ofw_bus.h>
61 #include <dev/ofw/ofw_bus_subr.h>
62 #include <dev/xilinx/if_xaereg.h>
63 #include <dev/xilinx/if_xaevar.h>
64 
65 #include <dev/xilinx/axidma.h>
66 
67 #include "miibus_if.h"
68 
69 #define	READ4(_sc, _reg) \
70 	bus_read_4((_sc)->res[0], _reg)
71 #define	WRITE4(_sc, _reg, _val) \
72 	bus_write_4((_sc)->res[0], _reg, _val)
73 
74 #define	READ8(_sc, _reg) \
75 	bus_read_8((_sc)->res[0], _reg)
76 #define	WRITE8(_sc, _reg, _val) \
77 	bus_write_8((_sc)->res[0], _reg, _val)
78 
79 #define	XAE_LOCK(sc)			mtx_lock(&(sc)->mtx)
80 #define	XAE_UNLOCK(sc)			mtx_unlock(&(sc)->mtx)
81 #define	XAE_ASSERT_LOCKED(sc)		mtx_assert(&(sc)->mtx, MA_OWNED)
82 #define	XAE_ASSERT_UNLOCKED(sc)		mtx_assert(&(sc)->mtx, MA_NOTOWNED)
83 
84 #define XAE_DEBUG
85 #undef XAE_DEBUG
86 
87 #ifdef XAE_DEBUG
88 #define dprintf(fmt, ...)  printf(fmt, ##__VA_ARGS__)
89 #else
90 #define dprintf(fmt, ...)
91 #endif
92 
93 #define	RX_QUEUE_SIZE		64
94 #define	TX_QUEUE_SIZE		64
95 #define	NUM_RX_MBUF		16
96 #define	BUFRING_SIZE		8192
97 #define	MDIO_CLK_DIV_DEFAULT	29
98 
99 #define	PHY1_RD(sc, _r)		\
100 	xae_miibus_read_reg(sc->dev, 1, _r)
101 #define	PHY1_WR(sc, _r, _v)	\
102 	xae_miibus_write_reg(sc->dev, 1, _r, _v)
103 
104 #define	PHY_RD(sc, _r)		\
105 	xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
106 #define	PHY_WR(sc, _r, _v)	\
107 	xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
108 
109 /* Use this macro to access regs > 0x1f */
110 #define WRITE_TI_EREG(sc, reg, data) {					\
111 	PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK);			\
112 	PHY_WR(sc, MII_MMDAADR, reg);					\
113 	PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI);	\
114 	PHY_WR(sc, MII_MMDAADR, data);					\
115 }
116 
117 /* Not documented, Xilinx VCU118 workaround */
118 #define	 CFG4_SGMII_TMR			0x160 /* bits 8:7 MUST be '10' */
119 #define	DP83867_SGMIICTL1		0xD3 /* not documented register */
120 #define	 SGMIICTL1_SGMII_6W		(1 << 14) /* no idea what it is */
121 
122 static struct resource_spec xae_spec[] = {
123 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
124 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
125 	{ -1, 0 }
126 };
127 
128 static void xae_stop_locked(struct xae_softc *sc);
129 static void xae_setup_rxfilter(struct xae_softc *sc);
130 
131 static int
132 xae_rx_enqueue(struct xae_softc *sc, uint32_t n)
133 {
134 	struct mbuf *m;
135 	int i;
136 
137 	for (i = 0; i < n; i++) {
138 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
139 		if (m == NULL) {
140 			device_printf(sc->dev,
141 			    "%s: Can't alloc rx mbuf\n", __func__);
142 			return (-1);
143 		}
144 
145 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
146 		xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
147 	}
148 
149 	return (0);
150 }
151 
152 static int
153 xae_get_phyaddr(phandle_t node, int *phy_addr)
154 {
155 	phandle_t phy_node;
156 	pcell_t phy_handle, phy_reg;
157 
158 	if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
159 	    sizeof(phy_handle)) <= 0)
160 		return (ENXIO);
161 
162 	phy_node = OF_node_from_xref(phy_handle);
163 
164 	if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
165 	    sizeof(phy_reg)) <= 0)
166 		return (ENXIO);
167 
168 	*phy_addr = phy_reg;
169 
170 	return (0);
171 }
172 
173 static int
174 xae_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
175 {
176 	xdma_transfer_status_t st;
177 	struct xae_softc *sc;
178 	if_t ifp;
179 	struct mbuf *m;
180 	int err;
181 
182 	sc = arg;
183 
184 	XAE_LOCK(sc);
185 
186 	ifp = sc->ifp;
187 
188 	for (;;) {
189 		err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
190 		if (err != 0) {
191 			break;
192 		}
193 
194 		if (st.error != 0) {
195 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
196 		}
197 
198 		m_freem(m);
199 	}
200 
201 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
202 
203 	XAE_UNLOCK(sc);
204 
205 	return (0);
206 }
207 
208 static int
209 xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
210 {
211 	xdma_transfer_status_t st;
212 	struct xae_softc *sc;
213 	if_t ifp;
214 	struct mbuf *m;
215 	int err;
216 	uint32_t cnt_processed;
217 
218 	sc = arg;
219 
220 	dprintf("%s\n", __func__);
221 
222 	XAE_LOCK(sc);
223 
224 	ifp = sc->ifp;
225 
226 	cnt_processed = 0;
227 	for (;;) {
228 		err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
229 		if (err != 0) {
230 			break;
231 		}
232 		cnt_processed++;
233 
234 		if (st.error != 0) {
235 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
236 			m_freem(m);
237 			continue;
238 		}
239 
240 		m->m_pkthdr.len = m->m_len = st.transferred;
241 		m->m_pkthdr.rcvif = ifp;
242 		XAE_UNLOCK(sc);
243 		if_input(ifp, m);
244 		XAE_LOCK(sc);
245 	}
246 
247 	xae_rx_enqueue(sc, cnt_processed);
248 
249 	XAE_UNLOCK(sc);
250 
251 	return (0);
252 }
253 
254 static void
255 xae_qflush(if_t ifp)
256 {
257 }
258 
259 static int
260 xae_transmit_locked(if_t ifp)
261 {
262 	struct xae_softc *sc;
263 	struct mbuf *m;
264 	struct buf_ring *br;
265 	int error;
266 	int enq;
267 
268 	dprintf("%s\n", __func__);
269 
270 	sc = if_getsoftc(ifp);
271 	br = sc->br;
272 
273 	enq = 0;
274 
275 	while ((m = drbr_peek(ifp, br)) != NULL) {
276 		error = xdma_enqueue_mbuf(sc->xchan_tx,
277 		    &m, 0, 4, 4, XDMA_MEM_TO_DEV);
278 		if (error != 0) {
279 			/* No space in request queue available yet. */
280 			drbr_putback(ifp, br, m);
281 			break;
282 		}
283 
284 		drbr_advance(ifp, br);
285 
286 		enq++;
287 
288 		/* If anyone is interested give them a copy. */
289 		ETHER_BPF_MTAP(ifp, m);
290         }
291 
292 	if (enq > 0)
293 		xdma_queue_submit(sc->xchan_tx);
294 
295 	return (0);
296 }
297 
298 static int
299 xae_transmit(if_t ifp, struct mbuf *m)
300 {
301 	struct xae_softc *sc;
302 	int error;
303 
304 	dprintf("%s\n", __func__);
305 
306 	sc = if_getsoftc(ifp);
307 
308 	XAE_LOCK(sc);
309 
310 	error = drbr_enqueue(ifp, sc->br, m);
311 	if (error) {
312 		XAE_UNLOCK(sc);
313 		return (error);
314 	}
315 
316 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
317 	    IFF_DRV_RUNNING) {
318 		XAE_UNLOCK(sc);
319 		return (0);
320 	}
321 
322 	if (!sc->link_is_up) {
323 		XAE_UNLOCK(sc);
324 		return (0);
325 	}
326 
327 	error = xae_transmit_locked(ifp);
328 
329 	XAE_UNLOCK(sc);
330 
331 	return (error);
332 }
333 
334 static void
335 xae_stop_locked(struct xae_softc *sc)
336 {
337 	if_t ifp;
338 	uint32_t reg;
339 
340 	XAE_ASSERT_LOCKED(sc);
341 
342 	ifp = sc->ifp;
343 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
344 
345 	callout_stop(&sc->xae_callout);
346 
347 	/* Stop the transmitter */
348 	reg = READ4(sc, XAE_TC);
349 	reg &= ~TC_TX;
350 	WRITE4(sc, XAE_TC, reg);
351 
352 	/* Stop the receiver. */
353 	reg = READ4(sc, XAE_RCW1);
354 	reg &= ~RCW1_RX;
355 	WRITE4(sc, XAE_RCW1, reg);
356 }
357 
358 static uint64_t
359 xae_stat(struct xae_softc *sc, int counter_id)
360 {
361 	uint64_t new, old;
362 	uint64_t delta;
363 
364 	KASSERT(counter_id < XAE_MAX_COUNTERS,
365 		("counter %d is out of range", counter_id));
366 
367 	new = READ8(sc, XAE_STATCNT(counter_id));
368 	old = sc->counters[counter_id];
369 
370 	if (new >= old)
371 		delta = new - old;
372 	else
373 		delta = UINT64_MAX - old + new;
374 	sc->counters[counter_id] = new;
375 
376 	return (delta);
377 }
378 
379 static void
380 xae_harvest_stats(struct xae_softc *sc)
381 {
382 	if_t ifp;
383 
384 	ifp = sc->ifp;
385 
386 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES));
387 	if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS));
388 	if_inc_counter(ifp, IFCOUNTER_IERRORS,
389 	    xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) +
390 	    xae_stat(sc, RX_LEN_OUT_OF_RANGE) +
391 	    xae_stat(sc, RX_ALIGNMENT_ERRORS));
392 
393 	if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES));
394 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES));
395 	if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS));
396 	if_inc_counter(ifp, IFCOUNTER_OERRORS,
397 	    xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS));
398 
399 	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
400 	    xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) +
401 	    xae_stat(sc, TX_MULTI_COLLISION_FRAMES) +
402 	    xae_stat(sc, TX_LATE_COLLISIONS) +
403 	    xae_stat(sc, TX_EXCESS_COLLISIONS));
404 }
405 
406 static void
407 xae_tick(void *arg)
408 {
409 	struct xae_softc *sc;
410 	if_t ifp;
411 	int link_was_up;
412 
413 	sc = arg;
414 
415 	XAE_ASSERT_LOCKED(sc);
416 
417 	ifp = sc->ifp;
418 
419 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
420 		return;
421 
422 	/* Gather stats from hardware counters. */
423 	xae_harvest_stats(sc);
424 
425 	/* Check the media status. */
426 	link_was_up = sc->link_is_up;
427 	mii_tick(sc->mii_softc);
428 	if (sc->link_is_up && !link_was_up)
429 		xae_transmit_locked(sc->ifp);
430 
431 	/* Schedule another check one second from now. */
432 	callout_reset(&sc->xae_callout, hz, xae_tick, sc);
433 }
434 
435 static void
436 xae_init_locked(struct xae_softc *sc)
437 {
438 	if_t ifp;
439 
440 	XAE_ASSERT_LOCKED(sc);
441 
442 	ifp = sc->ifp;
443 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
444 		return;
445 
446 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
447 
448 	xae_setup_rxfilter(sc);
449 
450 	/* Enable the transmitter */
451 	WRITE4(sc, XAE_TC, TC_TX);
452 
453 	/* Enable the receiver. */
454 	WRITE4(sc, XAE_RCW1, RCW1_RX);
455 
456 	/*
457 	 * Call mii_mediachg() which will call back into xae_miibus_statchg()
458 	 * to set up the remaining config registers based on current media.
459 	 */
460 	mii_mediachg(sc->mii_softc);
461 	callout_reset(&sc->xae_callout, hz, xae_tick, sc);
462 }
463 
464 static void
465 xae_init(void *arg)
466 {
467 	struct xae_softc *sc;
468 
469 	sc = arg;
470 
471 	XAE_LOCK(sc);
472 	xae_init_locked(sc);
473 	XAE_UNLOCK(sc);
474 }
475 
476 static void
477 xae_media_status(if_t  ifp, struct ifmediareq *ifmr)
478 {
479 	struct xae_softc *sc;
480 	struct mii_data *mii;
481 
482 	sc = if_getsoftc(ifp);
483 	mii = sc->mii_softc;
484 
485 	XAE_LOCK(sc);
486 	mii_pollstat(mii);
487 	ifmr->ifm_active = mii->mii_media_active;
488 	ifmr->ifm_status = mii->mii_media_status;
489 	XAE_UNLOCK(sc);
490 }
491 
492 static int
493 xae_media_change_locked(struct xae_softc *sc)
494 {
495 
496 	return (mii_mediachg(sc->mii_softc));
497 }
498 
499 static int
500 xae_media_change(if_t  ifp)
501 {
502 	struct xae_softc *sc;
503 	int error;
504 
505 	sc = if_getsoftc(ifp);
506 
507 	XAE_LOCK(sc);
508 	error = xae_media_change_locked(sc);
509 	XAE_UNLOCK(sc);
510 
511 	return (error);
512 }
513 
514 static u_int
515 xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
516 {
517 	struct xae_softc *sc = arg;
518 	uint32_t reg;
519 	uint8_t *ma;
520 
521 	if (cnt >= XAE_MULTICAST_TABLE_SIZE)
522 		return (1);
523 
524 	ma = LLADDR(sdl);
525 
526 	reg = READ4(sc, XAE_FFC) & 0xffffff00;
527 	reg |= cnt;
528 	WRITE4(sc, XAE_FFC, reg);
529 
530 	reg = (ma[0]);
531 	reg |= (ma[1] << 8);
532 	reg |= (ma[2] << 16);
533 	reg |= (ma[3] << 24);
534 	WRITE4(sc, XAE_FFV(0), reg);
535 
536 	reg = ma[4];
537 	reg |= ma[5] << 8;
538 	WRITE4(sc, XAE_FFV(1), reg);
539 
540 	return (1);
541 }
542 
543 static void
544 xae_setup_rxfilter(struct xae_softc *sc)
545 {
546 	if_t ifp;
547 	uint32_t reg;
548 
549 	XAE_ASSERT_LOCKED(sc);
550 
551 	ifp = sc->ifp;
552 
553 	/*
554 	 * Set the multicast (group) filter hash.
555 	 */
556 	if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
557 		reg = READ4(sc, XAE_FFC);
558 		reg |= FFC_PM;
559 		WRITE4(sc, XAE_FFC, reg);
560 	} else {
561 		reg = READ4(sc, XAE_FFC);
562 		reg &= ~FFC_PM;
563 		WRITE4(sc, XAE_FFC, reg);
564 
565 		if_foreach_llmaddr(ifp, xae_write_maddr, sc);
566 	}
567 
568 	/*
569 	 * Set the primary address.
570 	 */
571 	reg = sc->macaddr[0];
572 	reg |= (sc->macaddr[1] << 8);
573 	reg |= (sc->macaddr[2] << 16);
574 	reg |= (sc->macaddr[3] << 24);
575 	WRITE4(sc, XAE_UAW0, reg);
576 
577 	reg = sc->macaddr[4];
578 	reg |= (sc->macaddr[5] << 8);
579 	WRITE4(sc, XAE_UAW1, reg);
580 }
581 
582 static int
583 xae_ioctl(if_t ifp, u_long cmd, caddr_t data)
584 {
585 	struct xae_softc *sc;
586 	struct mii_data *mii;
587 	struct ifreq *ifr;
588 	int mask, error;
589 
590 	sc = if_getsoftc(ifp);
591 	ifr = (struct ifreq *)data;
592 
593 	error = 0;
594 	switch (cmd) {
595 	case SIOCSIFFLAGS:
596 		XAE_LOCK(sc);
597 		if (if_getflags(ifp) & IFF_UP) {
598 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
599 				if ((if_getflags(ifp) ^ sc->if_flags) &
600 				    (IFF_PROMISC | IFF_ALLMULTI))
601 					xae_setup_rxfilter(sc);
602 			} else {
603 				if (!sc->is_detaching)
604 					xae_init_locked(sc);
605 			}
606 		} else {
607 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
608 				xae_stop_locked(sc);
609 		}
610 		sc->if_flags = if_getflags(ifp);
611 		XAE_UNLOCK(sc);
612 		break;
613 	case SIOCADDMULTI:
614 	case SIOCDELMULTI:
615 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
616 			XAE_LOCK(sc);
617 			xae_setup_rxfilter(sc);
618 			XAE_UNLOCK(sc);
619 		}
620 		break;
621 	case SIOCSIFMEDIA:
622 	case SIOCGIFMEDIA:
623 		mii = sc->mii_softc;
624 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
625 		break;
626 	case SIOCSIFCAP:
627 		mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
628 		if (mask & IFCAP_VLAN_MTU) {
629 			/* No work to do except acknowledge the change took */
630 			if_togglecapenable(ifp, IFCAP_VLAN_MTU);
631 		}
632 		break;
633 
634 	default:
635 		error = ether_ioctl(ifp, cmd, data);
636 		break;
637 	}
638 
639 	return (error);
640 }
641 
642 static void
643 xae_intr(void *arg)
644 {
645 
646 }
647 
648 static int
649 xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr)
650 {
651 	phandle_t node;
652 	int len;
653 
654 	node = ofw_bus_get_node(sc->dev);
655 
656 	/* Check if there is property */
657 	if ((len = OF_getproplen(node, "local-mac-address")) <= 0)
658 		return (EINVAL);
659 
660 	if (len != ETHER_ADDR_LEN)
661 		return (EINVAL);
662 
663 	OF_getprop(node, "local-mac-address", hwaddr,
664 	    ETHER_ADDR_LEN);
665 
666 	return (0);
667 }
668 
669 static int
670 mdio_wait(struct xae_softc *sc)
671 {
672 	uint32_t reg;
673 	int timeout;
674 
675 	timeout = 200;
676 
677 	do {
678 		reg = READ4(sc, XAE_MDIO_CTRL);
679 		if (reg & MDIO_CTRL_READY)
680 			break;
681 		DELAY(1);
682 	} while (timeout--);
683 
684 	if (timeout <= 0) {
685 		printf("Failed to get MDIO ready\n");
686 		return (1);
687 	}
688 
689 	return (0);
690 }
691 
692 static int
693 xae_miibus_read_reg(device_t dev, int phy, int reg)
694 {
695 	struct xae_softc *sc;
696 	uint32_t mii;
697 	int rv;
698 
699 	sc = device_get_softc(dev);
700 
701 	if (mdio_wait(sc))
702 		return (0);
703 
704 	mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE;
705 	mii |= (reg << MDIO_TX_REGAD_S);
706 	mii |= (phy << MDIO_TX_PHYAD_S);
707 
708 	WRITE4(sc, XAE_MDIO_CTRL, mii);
709 
710 	if (mdio_wait(sc))
711 		return (0);
712 
713 	rv = READ4(sc, XAE_MDIO_READ);
714 
715 	return (rv);
716 }
717 
718 static int
719 xae_miibus_write_reg(device_t dev, int phy, int reg, int val)
720 {
721 	struct xae_softc *sc;
722 	uint32_t mii;
723 
724 	sc = device_get_softc(dev);
725 
726 	if (mdio_wait(sc))
727 		return (1);
728 
729 	mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE;
730 	mii |= (reg << MDIO_TX_REGAD_S);
731 	mii |= (phy << MDIO_TX_PHYAD_S);
732 
733 	WRITE4(sc, XAE_MDIO_WRITE, val);
734 	WRITE4(sc, XAE_MDIO_CTRL, mii);
735 
736 	if (mdio_wait(sc))
737 		return (1);
738 
739 	return (0);
740 }
741 
742 static void
743 xae_phy_fixup(struct xae_softc *sc)
744 {
745 	uint32_t reg;
746 
747 	do {
748 		WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W);
749 		PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN);
750 
751 		reg = PHY_RD(sc, DP83867_CFG2);
752 		reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M;
753 		reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4);
754 		reg |= CFG2_INTERRUPT_POLARITY;
755 		reg |= CFG2_SPEED_OPT_ENHANCED_EN;
756 		reg |= CFG2_SPEED_OPT_10M_EN;
757 		PHY_WR(sc, DP83867_CFG2, reg);
758 
759 		WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR);
760 		PHY_WR(sc, MII_BMCR,
761 		    BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET);
762 	} while (PHY1_RD(sc, MII_BMCR) == 0x0ffff);
763 
764 	do {
765 		PHY1_WR(sc, MII_BMCR,
766 		    BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG);
767 		DELAY(40000);
768 	} while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0);
769 }
770 
771 static int
772 get_xdma_std(struct xae_softc *sc)
773 {
774 
775 	sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
776 	if (sc->xdma_tx == NULL)
777 		return (ENXIO);
778 
779 	sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
780 	if (sc->xdma_rx == NULL) {
781 		xdma_put(sc->xdma_tx);
782 		return (ENXIO);
783 	}
784 
785 	return (0);
786 }
787 
788 static int
789 get_xdma_axistream(struct xae_softc *sc)
790 {
791 	struct axidma_fdt_data *data;
792 	device_t dma_dev;
793 	phandle_t node;
794 	pcell_t prop;
795 	size_t len;
796 
797 	node = ofw_bus_get_node(sc->dev);
798 	len = OF_getencprop(node, "axistream-connected", &prop, sizeof(prop));
799 	if (len != sizeof(prop)) {
800 		device_printf(sc->dev,
801 		    "%s: Couldn't get axistream-connected prop.\n", __func__);
802 		return (ENXIO);
803 	}
804 	dma_dev = OF_device_from_xref(prop);
805 	if (dma_dev == NULL) {
806 		device_printf(sc->dev, "Could not get DMA device by xref.\n");
807 		return (ENXIO);
808 	}
809 
810 	sc->xdma_tx = xdma_get(sc->dev, dma_dev);
811 	if (sc->xdma_tx == NULL) {
812 		device_printf(sc->dev, "Could not find DMA controller.\n");
813 		return (ENXIO);
814 	}
815 	data = malloc(sizeof(struct axidma_fdt_data),
816 	    M_DEVBUF, (M_WAITOK | M_ZERO));
817 	data->id = AXIDMA_TX_CHAN;
818 	sc->xdma_tx->data = data;
819 
820 	sc->xdma_rx = xdma_get(sc->dev, dma_dev);
821 	if (sc->xdma_rx == NULL) {
822 		device_printf(sc->dev, "Could not find DMA controller.\n");
823 		return (ENXIO);
824 	}
825 	data = malloc(sizeof(struct axidma_fdt_data),
826 	    M_DEVBUF, (M_WAITOK | M_ZERO));
827 	data->id = AXIDMA_RX_CHAN;
828 	sc->xdma_rx->data = data;
829 
830 	return (0);
831 }
832 
833 static int
834 setup_xdma(struct xae_softc *sc)
835 {
836 	device_t dev;
837 	vmem_t *vmem;
838 	int error;
839 
840 	dev = sc->dev;
841 
842 	/* Get xDMA controller */
843 	error = get_xdma_std(sc);
844 
845 	if (error) {
846 		device_printf(sc->dev,
847 		    "Fallback to axistream-connected property\n");
848 		error = get_xdma_axistream(sc);
849 	}
850 
851 	if (error) {
852 		device_printf(dev, "Could not find xDMA controllers.\n");
853 		return (ENXIO);
854 	}
855 
856 	/* Alloc xDMA TX virtual channel. */
857 	sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, 0);
858 	if (sc->xchan_tx == NULL) {
859 		device_printf(dev, "Can't alloc virtual DMA TX channel.\n");
860 		return (ENXIO);
861 	}
862 
863 	/* Setup interrupt handler. */
864 	error = xdma_setup_intr(sc->xchan_tx, 0,
865 	    xae_xdma_tx_intr, sc, &sc->ih_tx);
866 	if (error) {
867 		device_printf(sc->dev,
868 		    "Can't setup xDMA TX interrupt handler.\n");
869 		return (ENXIO);
870 	}
871 
872 	/* Alloc xDMA RX virtual channel. */
873 	sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, 0);
874 	if (sc->xchan_rx == NULL) {
875 		device_printf(dev, "Can't alloc virtual DMA RX channel.\n");
876 		return (ENXIO);
877 	}
878 
879 	/* Setup interrupt handler. */
880 	error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET,
881 	    xae_xdma_rx_intr, sc, &sc->ih_rx);
882 	if (error) {
883 		device_printf(sc->dev,
884 		    "Can't setup xDMA RX interrupt handler.\n");
885 		return (ENXIO);
886 	}
887 
888 	/* Setup bounce buffer */
889 	vmem = xdma_get_memory(dev);
890 	if (vmem) {
891 		xchan_set_memory(sc->xchan_tx, vmem);
892 		xchan_set_memory(sc->xchan_rx, vmem);
893 	}
894 
895 	xdma_prep_sg(sc->xchan_tx,
896 	    TX_QUEUE_SIZE,	/* xchan requests queue size */
897 	    MCLBYTES,	/* maxsegsize */
898 	    8,		/* maxnsegs */
899 	    16,		/* alignment */
900 	    0,		/* boundary */
901 	    BUS_SPACE_MAXADDR_32BIT,
902 	    BUS_SPACE_MAXADDR);
903 
904 	xdma_prep_sg(sc->xchan_rx,
905 	    RX_QUEUE_SIZE,	/* xchan requests queue size */
906 	    MCLBYTES,	/* maxsegsize */
907 	    1,		/* maxnsegs */
908 	    16,		/* alignment */
909 	    0,		/* boundary */
910 	    BUS_SPACE_MAXADDR_32BIT,
911 	    BUS_SPACE_MAXADDR);
912 
913 	return (0);
914 }
915 
916 static int
917 xae_probe(device_t dev)
918 {
919 
920 	if (!ofw_bus_status_okay(dev))
921 		return (ENXIO);
922 
923 	if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a"))
924 		return (ENXIO);
925 
926 	device_set_desc(dev, "Xilinx AXI Ethernet");
927 
928 	return (BUS_PROBE_DEFAULT);
929 }
930 
931 static int
932 xae_attach(device_t dev)
933 {
934 	struct xae_softc *sc;
935 	if_t ifp;
936 	phandle_t node;
937 	uint32_t reg;
938 	int error;
939 
940 	sc = device_get_softc(dev);
941 	sc->dev = dev;
942 	node = ofw_bus_get_node(dev);
943 
944 	if (setup_xdma(sc) != 0) {
945 		device_printf(dev, "Could not setup xDMA.\n");
946 		return (ENXIO);
947 	}
948 
949 	mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
950 	    MTX_NETWORK_LOCK, MTX_DEF);
951 
952 	sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
953 	    M_NOWAIT, &sc->mtx);
954 	if (sc->br == NULL)
955 		return (ENOMEM);
956 
957 	if (bus_alloc_resources(dev, xae_spec, sc->res)) {
958 		device_printf(dev, "could not allocate resources\n");
959 		return (ENXIO);
960 	}
961 
962 	/* Memory interface */
963 	sc->bst = rman_get_bustag(sc->res[0]);
964 	sc->bsh = rman_get_bushandle(sc->res[0]);
965 
966 	device_printf(sc->dev, "Identification: %x\n",
967 	    READ4(sc, XAE_IDENT));
968 
969 	/* Get MAC addr */
970 	if (xae_get_hwaddr(sc, sc->macaddr)) {
971 		device_printf(sc->dev, "can't get mac\n");
972 		return (ENXIO);
973 	}
974 
975 	/* Enable MII clock */
976 	reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S);
977 	reg |= MDIO_SETUP_ENABLE;
978 	WRITE4(sc, XAE_MDIO_SETUP, reg);
979 	if (mdio_wait(sc))
980 		return (ENXIO);
981 
982 	callout_init_mtx(&sc->xae_callout, &sc->mtx, 0);
983 
984 	/* Setup interrupt handler. */
985 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
986 	    NULL, xae_intr, sc, &sc->intr_cookie);
987 	if (error != 0) {
988 		device_printf(dev, "could not setup interrupt handler.\n");
989 		return (ENXIO);
990 	}
991 
992 	/* Set up the ethernet interface. */
993 	sc->ifp = ifp = if_alloc(IFT_ETHER);
994 	if (ifp == NULL) {
995 		device_printf(dev, "could not allocate ifp.\n");
996 		return (ENXIO);
997 	}
998 
999 	if_setsoftc(ifp, sc);
1000 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1001 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1002 	if_setcapabilities(ifp, IFCAP_VLAN_MTU);
1003 	if_setcapenable(ifp, if_getcapabilities(ifp));
1004 	if_settransmitfn(ifp, xae_transmit);
1005 	if_setqflushfn(ifp, xae_qflush);
1006 	if_setioctlfn(ifp, xae_ioctl);
1007 	if_setinitfn(ifp, xae_init);
1008 	if_setsendqlen(ifp, TX_DESC_COUNT - 1);
1009 	if_setsendqready(ifp);
1010 
1011 	if (xae_get_phyaddr(node, &sc->phy_addr) != 0)
1012 		return (ENXIO);
1013 
1014 	/* Attach the mii driver. */
1015 	error = mii_attach(dev, &sc->miibus, ifp, xae_media_change,
1016 	    xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr,
1017 	    MII_OFFSET_ANY, 0);
1018 
1019 	if (error != 0) {
1020 		device_printf(dev, "PHY attach failed\n");
1021 		return (ENXIO);
1022 	}
1023 	sc->mii_softc = device_get_softc(sc->miibus);
1024 
1025 	/* Apply vcu118 workaround. */
1026 	if (OF_getproplen(node, "xlnx,vcu118") >= 0)
1027 		xae_phy_fixup(sc);
1028 
1029 	/* All ready to run, attach the ethernet interface. */
1030 	ether_ifattach(ifp, sc->macaddr);
1031 	sc->is_attached = true;
1032 
1033 	xae_rx_enqueue(sc, NUM_RX_MBUF);
1034 	xdma_queue_submit(sc->xchan_rx);
1035 
1036 	return (0);
1037 }
1038 
1039 static int
1040 xae_detach(device_t dev)
1041 {
1042 	struct xae_softc *sc;
1043 	if_t ifp;
1044 
1045 	sc = device_get_softc(dev);
1046 
1047 	KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized",
1048 	    device_get_nameunit(dev)));
1049 
1050 	ifp = sc->ifp;
1051 
1052 	/* Only cleanup if attach succeeded. */
1053 	if (device_is_attached(dev)) {
1054 		XAE_LOCK(sc);
1055 		xae_stop_locked(sc);
1056 		XAE_UNLOCK(sc);
1057 		callout_drain(&sc->xae_callout);
1058 		ether_ifdetach(ifp);
1059 	}
1060 
1061 	if (sc->miibus != NULL)
1062 		device_delete_child(dev, sc->miibus);
1063 
1064 	if (ifp != NULL)
1065 		if_free(ifp);
1066 
1067 	mtx_destroy(&sc->mtx);
1068 
1069 	bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
1070 
1071 	bus_release_resources(dev, xae_spec, sc->res);
1072 
1073 	xdma_channel_free(sc->xchan_tx);
1074 	xdma_channel_free(sc->xchan_rx);
1075 	xdma_put(sc->xdma_tx);
1076 	xdma_put(sc->xdma_rx);
1077 
1078 	return (0);
1079 }
1080 
1081 static void
1082 xae_miibus_statchg(device_t dev)
1083 {
1084 	struct xae_softc *sc;
1085 	struct mii_data *mii;
1086 	uint32_t reg;
1087 
1088 	/*
1089 	 * Called by the MII bus driver when the PHY establishes
1090 	 * link to set the MAC interface registers.
1091 	 */
1092 
1093 	sc = device_get_softc(dev);
1094 
1095 	XAE_ASSERT_LOCKED(sc);
1096 
1097 	mii = sc->mii_softc;
1098 
1099 	if (mii->mii_media_status & IFM_ACTIVE)
1100 		sc->link_is_up = true;
1101 	else
1102 		sc->link_is_up = false;
1103 
1104 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1105 	case IFM_1000_T:
1106 	case IFM_1000_SX:
1107 		reg = SPEED_1000;
1108 		break;
1109 	case IFM_100_TX:
1110 		reg = SPEED_100;
1111 		break;
1112 	case IFM_10_T:
1113 		reg = SPEED_10;
1114 		break;
1115 	case IFM_NONE:
1116 		sc->link_is_up = false;
1117 		return;
1118 	default:
1119 		sc->link_is_up = false;
1120 		device_printf(dev, "Unsupported media %u\n",
1121 		    IFM_SUBTYPE(mii->mii_media_active));
1122 		return;
1123 	}
1124 
1125 	WRITE4(sc, XAE_SPEED, reg);
1126 }
1127 
1128 static device_method_t xae_methods[] = {
1129 	DEVMETHOD(device_probe,		xae_probe),
1130 	DEVMETHOD(device_attach,	xae_attach),
1131 	DEVMETHOD(device_detach,	xae_detach),
1132 
1133 	/* MII Interface */
1134 	DEVMETHOD(miibus_readreg,	xae_miibus_read_reg),
1135 	DEVMETHOD(miibus_writereg,	xae_miibus_write_reg),
1136 	DEVMETHOD(miibus_statchg,	xae_miibus_statchg),
1137 	{ 0, 0 }
1138 };
1139 
1140 driver_t xae_driver = {
1141 	"xae",
1142 	xae_methods,
1143 	sizeof(struct xae_softc),
1144 };
1145 
1146 DRIVER_MODULE(xae, simplebus, xae_driver, 0, 0);
1147 DRIVER_MODULE(miibus, xae, miibus_driver, 0, 0);
1148 
1149 MODULE_DEPEND(xae, ether, 1, 1, 1);
1150 MODULE_DEPEND(xae, miibus, 1, 1, 1);
1151