xref: /freebsd/sys/dev/xilinx/if_xae.c (revision 25ecdc7d52770caf1c9b44b5ec11f468f6b636f3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory (Department of Computer Science and
8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9  * DARPA SSITH research programme.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/module.h>
44 #include <sys/mutex.h>
45 #include <sys/rman.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 
49 #include <net/bpf.h>
50 #include <net/if.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_var.h>
56 
57 #include <machine/bus.h>
58 
59 #include <dev/mii/mii.h>
60 #include <dev/mii/miivar.h>
61 #include <dev/mii/tiphy.h>
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64 #include <dev/xilinx/if_xaereg.h>
65 #include <dev/xilinx/if_xaevar.h>
66 
67 #include <dev/xilinx/axidma.h>
68 
69 #include "miibus_if.h"
70 
71 #define	READ4(_sc, _reg) \
72 	bus_read_4((_sc)->res[0], _reg)
73 #define	WRITE4(_sc, _reg, _val) \
74 	bus_write_4((_sc)->res[0], _reg, _val)
75 
76 #define	READ8(_sc, _reg) \
77 	bus_read_8((_sc)->res[0], _reg)
78 #define	WRITE8(_sc, _reg, _val) \
79 	bus_write_8((_sc)->res[0], _reg, _val)
80 
81 #define	XAE_LOCK(sc)			mtx_lock(&(sc)->mtx)
82 #define	XAE_UNLOCK(sc)			mtx_unlock(&(sc)->mtx)
83 #define	XAE_ASSERT_LOCKED(sc)		mtx_assert(&(sc)->mtx, MA_OWNED)
84 #define	XAE_ASSERT_UNLOCKED(sc)		mtx_assert(&(sc)->mtx, MA_NOTOWNED)
85 
86 #define XAE_DEBUG
87 #undef XAE_DEBUG
88 
89 #ifdef XAE_DEBUG
90 #define dprintf(fmt, ...)  printf(fmt, ##__VA_ARGS__)
91 #else
92 #define dprintf(fmt, ...)
93 #endif
94 
95 #define	RX_QUEUE_SIZE		64
96 #define	TX_QUEUE_SIZE		64
97 #define	NUM_RX_MBUF		16
98 #define	BUFRING_SIZE		8192
99 #define	MDIO_CLK_DIV_DEFAULT	29
100 
101 #define	PHY1_RD(sc, _r)		\
102 	xae_miibus_read_reg(sc->dev, 1, _r)
103 #define	PHY1_WR(sc, _r, _v)	\
104 	xae_miibus_write_reg(sc->dev, 1, _r, _v)
105 
106 #define	PHY_RD(sc, _r)		\
107 	xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
108 #define	PHY_WR(sc, _r, _v)	\
109 	xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
110 
111 /* Use this macro to access regs > 0x1f */
112 #define WRITE_TI_EREG(sc, reg, data) {					\
113 	PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK);			\
114 	PHY_WR(sc, MII_MMDAADR, reg);					\
115 	PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI);	\
116 	PHY_WR(sc, MII_MMDAADR, data);					\
117 }
118 
119 /* Not documented, Xilinx VCU118 workaround */
120 #define	 CFG4_SGMII_TMR			0x160 /* bits 8:7 MUST be '10' */
121 #define	DP83867_SGMIICTL1		0xD3 /* not documented register */
122 #define	 SGMIICTL1_SGMII_6W		(1 << 14) /* no idea what it is */
123 
124 static struct resource_spec xae_spec[] = {
125 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
126 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
127 	{ -1, 0 }
128 };
129 
130 static void xae_stop_locked(struct xae_softc *sc);
131 static void xae_setup_rxfilter(struct xae_softc *sc);
132 
133 static int
134 xae_rx_enqueue(struct xae_softc *sc, uint32_t n)
135 {
136 	struct mbuf *m;
137 	int i;
138 
139 	for (i = 0; i < n; i++) {
140 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
141 		if (m == NULL) {
142 			device_printf(sc->dev,
143 			    "%s: Can't alloc rx mbuf\n", __func__);
144 			return (-1);
145 		}
146 
147 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
148 		xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
149 	}
150 
151 	return (0);
152 }
153 
154 static int
155 xae_get_phyaddr(phandle_t node, int *phy_addr)
156 {
157 	phandle_t phy_node;
158 	pcell_t phy_handle, phy_reg;
159 
160 	if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
161 	    sizeof(phy_handle)) <= 0)
162 		return (ENXIO);
163 
164 	phy_node = OF_node_from_xref(phy_handle);
165 
166 	if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
167 	    sizeof(phy_reg)) <= 0)
168 		return (ENXIO);
169 
170 	*phy_addr = phy_reg;
171 
172 	return (0);
173 }
174 
175 static int
176 xae_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
177 {
178 	xdma_transfer_status_t st;
179 	struct xae_softc *sc;
180 	struct ifnet *ifp;
181 	struct mbuf *m;
182 	int err;
183 
184 	sc = arg;
185 
186 	XAE_LOCK(sc);
187 
188 	ifp = sc->ifp;
189 
190 	for (;;) {
191 		err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
192 		if (err != 0) {
193 			break;
194 		}
195 
196 		if (st.error != 0) {
197 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
198 		}
199 
200 		m_freem(m);
201 	}
202 
203 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
204 
205 	XAE_UNLOCK(sc);
206 
207 	return (0);
208 }
209 
210 static int
211 xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
212 {
213 	xdma_transfer_status_t st;
214 	struct xae_softc *sc;
215 	struct ifnet *ifp;
216 	struct mbuf *m;
217 	int err;
218 	uint32_t cnt_processed;
219 
220 	sc = arg;
221 
222 	dprintf("%s\n", __func__);
223 
224 	XAE_LOCK(sc);
225 
226 	ifp = sc->ifp;
227 
228 	cnt_processed = 0;
229 	for (;;) {
230 		err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
231 		if (err != 0) {
232 			break;
233 		}
234 		cnt_processed++;
235 
236 		if (st.error != 0) {
237 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
238 			m_freem(m);
239 			continue;
240 		}
241 
242 		m->m_pkthdr.len = m->m_len = st.transferred;
243 		m->m_pkthdr.rcvif = ifp;
244 		XAE_UNLOCK(sc);
245 		(*ifp->if_input)(ifp, m);
246 		XAE_LOCK(sc);
247 	}
248 
249 	xae_rx_enqueue(sc, cnt_processed);
250 
251 	XAE_UNLOCK(sc);
252 
253 	return (0);
254 }
255 
256 static void
257 xae_qflush(struct ifnet *ifp)
258 {
259 	struct xae_softc *sc;
260 
261 	sc = ifp->if_softc;
262 }
263 
264 static int
265 xae_transmit_locked(struct ifnet *ifp)
266 {
267 	struct xae_softc *sc;
268 	struct mbuf *m;
269 	struct buf_ring *br;
270 	int error;
271 	int enq;
272 
273 	dprintf("%s\n", __func__);
274 
275 	sc = ifp->if_softc;
276 	br = sc->br;
277 
278 	enq = 0;
279 
280 	while ((m = drbr_peek(ifp, br)) != NULL) {
281 		error = xdma_enqueue_mbuf(sc->xchan_tx,
282 		    &m, 0, 4, 4, XDMA_MEM_TO_DEV);
283 		if (error != 0) {
284 			/* No space in request queue available yet. */
285 			drbr_putback(ifp, br, m);
286 			break;
287 		}
288 
289 		drbr_advance(ifp, br);
290 
291 		enq++;
292 
293 		/* If anyone is interested give them a copy. */
294 		ETHER_BPF_MTAP(ifp, m);
295         }
296 
297 	if (enq > 0)
298 		xdma_queue_submit(sc->xchan_tx);
299 
300 	return (0);
301 }
302 
303 static int
304 xae_transmit(struct ifnet *ifp, struct mbuf *m)
305 {
306 	struct xae_softc *sc;
307 	int error;
308 
309 	dprintf("%s\n", __func__);
310 
311 	sc = ifp->if_softc;
312 
313 	XAE_LOCK(sc);
314 
315 	error = drbr_enqueue(ifp, sc->br, m);
316 	if (error) {
317 		XAE_UNLOCK(sc);
318 		return (error);
319 	}
320 
321 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
322 	    IFF_DRV_RUNNING) {
323 		XAE_UNLOCK(sc);
324 		return (0);
325 	}
326 
327 	if (!sc->link_is_up) {
328 		XAE_UNLOCK(sc);
329 		return (0);
330 	}
331 
332 	error = xae_transmit_locked(ifp);
333 
334 	XAE_UNLOCK(sc);
335 
336 	return (error);
337 }
338 
339 static void
340 xae_stop_locked(struct xae_softc *sc)
341 {
342 	struct ifnet *ifp;
343 	uint32_t reg;
344 
345 	XAE_ASSERT_LOCKED(sc);
346 
347 	ifp = sc->ifp;
348 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
349 
350 	callout_stop(&sc->xae_callout);
351 
352 	/* Stop the transmitter */
353 	reg = READ4(sc, XAE_TC);
354 	reg &= ~TC_TX;
355 	WRITE4(sc, XAE_TC, reg);
356 
357 	/* Stop the receiver. */
358 	reg = READ4(sc, XAE_RCW1);
359 	reg &= ~RCW1_RX;
360 	WRITE4(sc, XAE_RCW1, reg);
361 }
362 
363 static uint64_t
364 xae_stat(struct xae_softc *sc, int counter_id)
365 {
366 	uint64_t new, old;
367 	uint64_t delta;
368 
369 	KASSERT(counter_id < XAE_MAX_COUNTERS,
370 		("counter %d is out of range", counter_id));
371 
372 	new = READ8(sc, XAE_STATCNT(counter_id));
373 	old = sc->counters[counter_id];
374 
375 	if (new >= old)
376 		delta = new - old;
377 	else
378 		delta = UINT64_MAX - old + new;
379 	sc->counters[counter_id] = new;
380 
381 	return (delta);
382 }
383 
384 static void
385 xae_harvest_stats(struct xae_softc *sc)
386 {
387 	struct ifnet *ifp;
388 
389 	ifp = sc->ifp;
390 
391 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES));
392 	if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS));
393 	if_inc_counter(ifp, IFCOUNTER_IERRORS,
394 	    xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) +
395 	    xae_stat(sc, RX_LEN_OUT_OF_RANGE) +
396 	    xae_stat(sc, RX_ALIGNMENT_ERRORS));
397 
398 	if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES));
399 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES));
400 	if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS));
401 	if_inc_counter(ifp, IFCOUNTER_OERRORS,
402 	    xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS));
403 
404 	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
405 	    xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) +
406 	    xae_stat(sc, TX_MULTI_COLLISION_FRAMES) +
407 	    xae_stat(sc, TX_LATE_COLLISIONS) +
408 	    xae_stat(sc, TX_EXCESS_COLLISIONS));
409 }
410 
411 static void
412 xae_tick(void *arg)
413 {
414 	struct xae_softc *sc;
415 	struct ifnet *ifp;
416 	int link_was_up;
417 
418 	sc = arg;
419 
420 	XAE_ASSERT_LOCKED(sc);
421 
422 	ifp = sc->ifp;
423 
424 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
425 		return;
426 
427 	/* Gather stats from hardware counters. */
428 	xae_harvest_stats(sc);
429 
430 	/* Check the media status. */
431 	link_was_up = sc->link_is_up;
432 	mii_tick(sc->mii_softc);
433 	if (sc->link_is_up && !link_was_up)
434 		xae_transmit_locked(sc->ifp);
435 
436 	/* Schedule another check one second from now. */
437 	callout_reset(&sc->xae_callout, hz, xae_tick, sc);
438 }
439 
440 static void
441 xae_init_locked(struct xae_softc *sc)
442 {
443 	struct ifnet *ifp;
444 
445 	XAE_ASSERT_LOCKED(sc);
446 
447 	ifp = sc->ifp;
448 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
449 		return;
450 
451 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
452 
453 	xae_setup_rxfilter(sc);
454 
455 	/* Enable the transmitter */
456 	WRITE4(sc, XAE_TC, TC_TX);
457 
458 	/* Enable the receiver. */
459 	WRITE4(sc, XAE_RCW1, RCW1_RX);
460 
461 	/*
462 	 * Call mii_mediachg() which will call back into xae_miibus_statchg()
463 	 * to set up the remaining config registers based on current media.
464 	 */
465 	mii_mediachg(sc->mii_softc);
466 	callout_reset(&sc->xae_callout, hz, xae_tick, sc);
467 }
468 
469 static void
470 xae_init(void *arg)
471 {
472 	struct xae_softc *sc;
473 
474 	sc = arg;
475 
476 	XAE_LOCK(sc);
477 	xae_init_locked(sc);
478 	XAE_UNLOCK(sc);
479 }
480 
481 static void
482 xae_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
483 {
484 	struct xae_softc *sc;
485 	struct mii_data *mii;
486 
487 	sc = ifp->if_softc;
488 	mii = sc->mii_softc;
489 
490 	XAE_LOCK(sc);
491 	mii_pollstat(mii);
492 	ifmr->ifm_active = mii->mii_media_active;
493 	ifmr->ifm_status = mii->mii_media_status;
494 	XAE_UNLOCK(sc);
495 }
496 
497 static int
498 xae_media_change_locked(struct xae_softc *sc)
499 {
500 
501 	return (mii_mediachg(sc->mii_softc));
502 }
503 
504 static int
505 xae_media_change(struct ifnet * ifp)
506 {
507 	struct xae_softc *sc;
508 	int error;
509 
510 	sc = ifp->if_softc;
511 
512 	XAE_LOCK(sc);
513 	error = xae_media_change_locked(sc);
514 	XAE_UNLOCK(sc);
515 
516 	return (error);
517 }
518 
519 static u_int
520 xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
521 {
522 	struct xae_softc *sc = arg;
523 	uint32_t reg;
524 	uint8_t *ma;
525 
526 	if (cnt >= XAE_MULTICAST_TABLE_SIZE)
527 		return (1);
528 
529 	ma = LLADDR(sdl);
530 
531 	reg = READ4(sc, XAE_FFC) & 0xffffff00;
532 	reg |= cnt;
533 	WRITE4(sc, XAE_FFC, reg);
534 
535 	reg = (ma[0]);
536 	reg |= (ma[1] << 8);
537 	reg |= (ma[2] << 16);
538 	reg |= (ma[3] << 24);
539 	WRITE4(sc, XAE_FFV(0), reg);
540 
541 	reg = ma[4];
542 	reg |= ma[5] << 8;
543 	WRITE4(sc, XAE_FFV(1), reg);
544 
545 	return (1);
546 }
547 
548 static void
549 xae_setup_rxfilter(struct xae_softc *sc)
550 {
551 	struct ifnet *ifp;
552 	uint32_t reg;
553 
554 	XAE_ASSERT_LOCKED(sc);
555 
556 	ifp = sc->ifp;
557 
558 	/*
559 	 * Set the multicast (group) filter hash.
560 	 */
561 	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
562 		reg = READ4(sc, XAE_FFC);
563 		reg |= FFC_PM;
564 		WRITE4(sc, XAE_FFC, reg);
565 	} else {
566 		reg = READ4(sc, XAE_FFC);
567 		reg &= ~FFC_PM;
568 		WRITE4(sc, XAE_FFC, reg);
569 
570 		if_foreach_llmaddr(ifp, xae_write_maddr, sc);
571 	}
572 
573 	/*
574 	 * Set the primary address.
575 	 */
576 	reg = sc->macaddr[0];
577 	reg |= (sc->macaddr[1] << 8);
578 	reg |= (sc->macaddr[2] << 16);
579 	reg |= (sc->macaddr[3] << 24);
580 	WRITE4(sc, XAE_UAW0, reg);
581 
582 	reg = sc->macaddr[4];
583 	reg |= (sc->macaddr[5] << 8);
584 	WRITE4(sc, XAE_UAW1, reg);
585 }
586 
587 static int
588 xae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
589 {
590 	struct xae_softc *sc;
591 	struct mii_data *mii;
592 	struct ifreq *ifr;
593 	int mask, error;
594 
595 	sc = ifp->if_softc;
596 	ifr = (struct ifreq *)data;
597 
598 	error = 0;
599 	switch (cmd) {
600 	case SIOCSIFFLAGS:
601 		XAE_LOCK(sc);
602 		if (ifp->if_flags & IFF_UP) {
603 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
604 				if ((ifp->if_flags ^ sc->if_flags) &
605 				    (IFF_PROMISC | IFF_ALLMULTI))
606 					xae_setup_rxfilter(sc);
607 			} else {
608 				if (!sc->is_detaching)
609 					xae_init_locked(sc);
610 			}
611 		} else {
612 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
613 				xae_stop_locked(sc);
614 		}
615 		sc->if_flags = ifp->if_flags;
616 		XAE_UNLOCK(sc);
617 		break;
618 	case SIOCADDMULTI:
619 	case SIOCDELMULTI:
620 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
621 			XAE_LOCK(sc);
622 			xae_setup_rxfilter(sc);
623 			XAE_UNLOCK(sc);
624 		}
625 		break;
626 	case SIOCSIFMEDIA:
627 	case SIOCGIFMEDIA:
628 		mii = sc->mii_softc;
629 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
630 		break;
631 	case SIOCSIFCAP:
632 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
633 		if (mask & IFCAP_VLAN_MTU) {
634 			/* No work to do except acknowledge the change took */
635 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
636 		}
637 		break;
638 
639 	default:
640 		error = ether_ioctl(ifp, cmd, data);
641 		break;
642 	}
643 
644 	return (error);
645 }
646 
647 static void
648 xae_intr(void *arg)
649 {
650 
651 }
652 
653 static int
654 xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr)
655 {
656 	phandle_t node;
657 	int len;
658 
659 	node = ofw_bus_get_node(sc->dev);
660 
661 	/* Check if there is property */
662 	if ((len = OF_getproplen(node, "local-mac-address")) <= 0)
663 		return (EINVAL);
664 
665 	if (len != ETHER_ADDR_LEN)
666 		return (EINVAL);
667 
668 	OF_getprop(node, "local-mac-address", hwaddr,
669 	    ETHER_ADDR_LEN);
670 
671 	return (0);
672 }
673 
674 static int
675 mdio_wait(struct xae_softc *sc)
676 {
677 	uint32_t reg;
678 	int timeout;
679 
680 	timeout = 200;
681 
682 	do {
683 		reg = READ4(sc, XAE_MDIO_CTRL);
684 		if (reg & MDIO_CTRL_READY)
685 			break;
686 		DELAY(1);
687 	} while (timeout--);
688 
689 	if (timeout <= 0) {
690 		printf("Failed to get MDIO ready\n");
691 		return (1);
692 	}
693 
694 	return (0);
695 }
696 
697 static int
698 xae_miibus_read_reg(device_t dev, int phy, int reg)
699 {
700 	struct xae_softc *sc;
701 	uint32_t mii;
702 	int rv;
703 
704 	sc = device_get_softc(dev);
705 
706 	if (mdio_wait(sc))
707 		return (0);
708 
709 	mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE;
710 	mii |= (reg << MDIO_TX_REGAD_S);
711 	mii |= (phy << MDIO_TX_PHYAD_S);
712 
713 	WRITE4(sc, XAE_MDIO_CTRL, mii);
714 
715 	if (mdio_wait(sc))
716 		return (0);
717 
718 	rv = READ4(sc, XAE_MDIO_READ);
719 
720 	return (rv);
721 }
722 
723 static int
724 xae_miibus_write_reg(device_t dev, int phy, int reg, int val)
725 {
726 	struct xae_softc *sc;
727 	uint32_t mii;
728 
729 	sc = device_get_softc(dev);
730 
731 	if (mdio_wait(sc))
732 		return (1);
733 
734 	mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE;
735 	mii |= (reg << MDIO_TX_REGAD_S);
736 	mii |= (phy << MDIO_TX_PHYAD_S);
737 
738 	WRITE4(sc, XAE_MDIO_WRITE, val);
739 	WRITE4(sc, XAE_MDIO_CTRL, mii);
740 
741 	if (mdio_wait(sc))
742 		return (1);
743 
744 	return (0);
745 }
746 
747 static void
748 xae_phy_fixup(struct xae_softc *sc)
749 {
750 	uint32_t reg;
751 	device_t dev;
752 
753 	dev = sc->dev;
754 
755 	do {
756 		WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W);
757 		PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN);
758 
759 		reg = PHY_RD(sc, DP83867_CFG2);
760 		reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M;
761 		reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4);
762 		reg |= CFG2_INTERRUPT_POLARITY;
763 		reg |= CFG2_SPEED_OPT_ENHANCED_EN;
764 		reg |= CFG2_SPEED_OPT_10M_EN;
765 		PHY_WR(sc, DP83867_CFG2, reg);
766 
767 		WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR);
768 		PHY_WR(sc, MII_BMCR,
769 		    BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET);
770 	} while (PHY1_RD(sc, MII_BMCR) == 0x0ffff);
771 
772 	do {
773 		PHY1_WR(sc, MII_BMCR,
774 		    BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG);
775 		DELAY(40000);
776 	} while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0);
777 }
778 
779 static int
780 get_xdma_std(struct xae_softc *sc)
781 {
782 
783 	sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
784 	if (sc->xdma_tx == NULL)
785 		return (ENXIO);
786 
787 	sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
788 	if (sc->xdma_rx == NULL) {
789 		xdma_put(sc->xdma_tx);
790 		return (ENXIO);
791 	}
792 
793 	return (0);
794 }
795 
796 static int
797 get_xdma_axistream(struct xae_softc *sc)
798 {
799 	struct axidma_fdt_data *data;
800 	device_t dma_dev;
801 	phandle_t node;
802 	pcell_t prop;
803 	size_t len;
804 
805 	node = ofw_bus_get_node(sc->dev);
806 	len = OF_getencprop(node, "axistream-connected", &prop, sizeof(prop));
807 	if (len != sizeof(prop)) {
808 		device_printf(sc->dev,
809 		    "%s: Couldn't get axistream-connected prop.\n", __func__);
810 		return (ENXIO);
811 	}
812 	dma_dev = OF_device_from_xref(prop);
813 	if (dma_dev == NULL) {
814 		device_printf(sc->dev, "Could not get DMA device by xref.\n");
815 		return (ENXIO);
816 	}
817 
818 	sc->xdma_tx = xdma_get(sc->dev, dma_dev);
819 	if (sc->xdma_tx == NULL) {
820 		device_printf(sc->dev, "Could not find DMA controller.\n");
821 		return (ENXIO);
822 	}
823 	data = malloc(sizeof(struct axidma_fdt_data),
824 	    M_DEVBUF, (M_WAITOK | M_ZERO));
825 	data->id = AXIDMA_TX_CHAN;
826 	sc->xdma_tx->data = data;
827 
828 	sc->xdma_rx = xdma_get(sc->dev, dma_dev);
829 	if (sc->xdma_rx == NULL) {
830 		device_printf(sc->dev, "Could not find DMA controller.\n");
831 		return (ENXIO);
832 	}
833 	data = malloc(sizeof(struct axidma_fdt_data),
834 	    M_DEVBUF, (M_WAITOK | M_ZERO));
835 	data->id = AXIDMA_RX_CHAN;
836 	sc->xdma_rx->data = data;
837 
838 	return (0);
839 }
840 
841 static int
842 setup_xdma(struct xae_softc *sc)
843 {
844 	device_t dev;
845 	vmem_t *vmem;
846 	int error;
847 
848 	dev = sc->dev;
849 
850 	/* Get xDMA controller */
851 	error = get_xdma_std(sc);
852 
853 	if (error) {
854 		device_printf(sc->dev,
855 		    "Fallback to axistream-connected property\n");
856 		error = get_xdma_axistream(sc);
857 	}
858 
859 	if (error) {
860 		device_printf(dev, "Could not find xDMA controllers.\n");
861 		return (ENXIO);
862 	}
863 
864 	/* Alloc xDMA TX virtual channel. */
865 	sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, 0);
866 	if (sc->xchan_tx == NULL) {
867 		device_printf(dev, "Can't alloc virtual DMA TX channel.\n");
868 		return (ENXIO);
869 	}
870 
871 	/* Setup interrupt handler. */
872 	error = xdma_setup_intr(sc->xchan_tx, 0,
873 	    xae_xdma_tx_intr, sc, &sc->ih_tx);
874 	if (error) {
875 		device_printf(sc->dev,
876 		    "Can't setup xDMA TX interrupt handler.\n");
877 		return (ENXIO);
878 	}
879 
880 	/* Alloc xDMA RX virtual channel. */
881 	sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, 0);
882 	if (sc->xchan_rx == NULL) {
883 		device_printf(dev, "Can't alloc virtual DMA RX channel.\n");
884 		return (ENXIO);
885 	}
886 
887 	/* Setup interrupt handler. */
888 	error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET,
889 	    xae_xdma_rx_intr, sc, &sc->ih_rx);
890 	if (error) {
891 		device_printf(sc->dev,
892 		    "Can't setup xDMA RX interrupt handler.\n");
893 		return (ENXIO);
894 	}
895 
896 	/* Setup bounce buffer */
897 	vmem = xdma_get_memory(dev);
898 	if (vmem) {
899 		xchan_set_memory(sc->xchan_tx, vmem);
900 		xchan_set_memory(sc->xchan_rx, vmem);
901 	}
902 
903 	xdma_prep_sg(sc->xchan_tx,
904 	    TX_QUEUE_SIZE,	/* xchan requests queue size */
905 	    MCLBYTES,	/* maxsegsize */
906 	    8,		/* maxnsegs */
907 	    16,		/* alignment */
908 	    0,		/* boundary */
909 	    BUS_SPACE_MAXADDR_32BIT,
910 	    BUS_SPACE_MAXADDR);
911 
912 	xdma_prep_sg(sc->xchan_rx,
913 	    RX_QUEUE_SIZE,	/* xchan requests queue size */
914 	    MCLBYTES,	/* maxsegsize */
915 	    1,		/* maxnsegs */
916 	    16,		/* alignment */
917 	    0,		/* boundary */
918 	    BUS_SPACE_MAXADDR_32BIT,
919 	    BUS_SPACE_MAXADDR);
920 
921 	return (0);
922 }
923 
924 static int
925 xae_probe(device_t dev)
926 {
927 
928 	if (!ofw_bus_status_okay(dev))
929 		return (ENXIO);
930 
931 	if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a"))
932 		return (ENXIO);
933 
934 	device_set_desc(dev, "Xilinx AXI Ethernet");
935 
936 	return (BUS_PROBE_DEFAULT);
937 }
938 
939 static int
940 xae_attach(device_t dev)
941 {
942 	struct xae_softc *sc;
943 	struct ifnet *ifp;
944 	phandle_t node;
945 	uint32_t reg;
946 	int error;
947 
948 	sc = device_get_softc(dev);
949 	sc->dev = dev;
950 	node = ofw_bus_get_node(dev);
951 
952 	if (setup_xdma(sc) != 0) {
953 		device_printf(dev, "Could not setup xDMA.\n");
954 		return (ENXIO);
955 	}
956 
957 	mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
958 	    MTX_NETWORK_LOCK, MTX_DEF);
959 
960 	sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
961 	    M_NOWAIT, &sc->mtx);
962 	if (sc->br == NULL)
963 		return (ENOMEM);
964 
965 	if (bus_alloc_resources(dev, xae_spec, sc->res)) {
966 		device_printf(dev, "could not allocate resources\n");
967 		return (ENXIO);
968 	}
969 
970 	/* Memory interface */
971 	sc->bst = rman_get_bustag(sc->res[0]);
972 	sc->bsh = rman_get_bushandle(sc->res[0]);
973 
974 	device_printf(sc->dev, "Identification: %x\n",
975 	    READ4(sc, XAE_IDENT));
976 
977 	/* Get MAC addr */
978 	if (xae_get_hwaddr(sc, sc->macaddr)) {
979 		device_printf(sc->dev, "can't get mac\n");
980 		return (ENXIO);
981 	}
982 
983 	/* Enable MII clock */
984 	reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S);
985 	reg |= MDIO_SETUP_ENABLE;
986 	WRITE4(sc, XAE_MDIO_SETUP, reg);
987 	if (mdio_wait(sc))
988 		return (ENXIO);
989 
990 	callout_init_mtx(&sc->xae_callout, &sc->mtx, 0);
991 
992 	/* Setup interrupt handler. */
993 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
994 	    NULL, xae_intr, sc, &sc->intr_cookie);
995 	if (error != 0) {
996 		device_printf(dev, "could not setup interrupt handler.\n");
997 		return (ENXIO);
998 	}
999 
1000 	/* Set up the ethernet interface. */
1001 	sc->ifp = ifp = if_alloc(IFT_ETHER);
1002 	if (ifp == NULL) {
1003 		device_printf(dev, "could not allocate ifp.\n");
1004 		return (ENXIO);
1005 	}
1006 
1007 	ifp->if_softc = sc;
1008 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1009 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1010 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1011 	ifp->if_capenable = ifp->if_capabilities;
1012 	ifp->if_transmit = xae_transmit;
1013 	ifp->if_qflush = xae_qflush;
1014 	ifp->if_ioctl = xae_ioctl;
1015 	ifp->if_init = xae_init;
1016 	IFQ_SET_MAXLEN(&ifp->if_snd, TX_DESC_COUNT - 1);
1017 	ifp->if_snd.ifq_drv_maxlen = TX_DESC_COUNT - 1;
1018 	IFQ_SET_READY(&ifp->if_snd);
1019 
1020 	if (xae_get_phyaddr(node, &sc->phy_addr) != 0)
1021 		return (ENXIO);
1022 
1023 	/* Attach the mii driver. */
1024 	error = mii_attach(dev, &sc->miibus, ifp, xae_media_change,
1025 	    xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr,
1026 	    MII_OFFSET_ANY, 0);
1027 
1028 	if (error != 0) {
1029 		device_printf(dev, "PHY attach failed\n");
1030 		return (ENXIO);
1031 	}
1032 	sc->mii_softc = device_get_softc(sc->miibus);
1033 
1034 	/* Apply vcu118 workaround. */
1035 	if (OF_getproplen(node, "xlnx,vcu118") >= 0)
1036 		xae_phy_fixup(sc);
1037 
1038 	/* All ready to run, attach the ethernet interface. */
1039 	ether_ifattach(ifp, sc->macaddr);
1040 	sc->is_attached = true;
1041 
1042 	xae_rx_enqueue(sc, NUM_RX_MBUF);
1043 	xdma_queue_submit(sc->xchan_rx);
1044 
1045 	return (0);
1046 }
1047 
1048 static int
1049 xae_detach(device_t dev)
1050 {
1051 	struct xae_softc *sc;
1052 	struct ifnet *ifp;
1053 
1054 	sc = device_get_softc(dev);
1055 
1056 	KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized",
1057 	    device_get_nameunit(dev)));
1058 
1059 	ifp = sc->ifp;
1060 
1061 	/* Only cleanup if attach succeeded. */
1062 	if (device_is_attached(dev)) {
1063 		XAE_LOCK(sc);
1064 		xae_stop_locked(sc);
1065 		XAE_UNLOCK(sc);
1066 		callout_drain(&sc->xae_callout);
1067 		ether_ifdetach(ifp);
1068 	}
1069 
1070 	if (sc->miibus != NULL)
1071 		device_delete_child(dev, sc->miibus);
1072 
1073 	if (ifp != NULL)
1074 		if_free(ifp);
1075 
1076 	mtx_destroy(&sc->mtx);
1077 
1078 	bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
1079 
1080 	bus_release_resources(dev, xae_spec, sc->res);
1081 
1082 	xdma_channel_free(sc->xchan_tx);
1083 	xdma_channel_free(sc->xchan_rx);
1084 	xdma_put(sc->xdma_tx);
1085 	xdma_put(sc->xdma_rx);
1086 
1087 	return (0);
1088 }
1089 
1090 static void
1091 xae_miibus_statchg(device_t dev)
1092 {
1093 	struct xae_softc *sc;
1094 	struct mii_data *mii;
1095 	uint32_t reg;
1096 
1097 	/*
1098 	 * Called by the MII bus driver when the PHY establishes
1099 	 * link to set the MAC interface registers.
1100 	 */
1101 
1102 	sc = device_get_softc(dev);
1103 
1104 	XAE_ASSERT_LOCKED(sc);
1105 
1106 	mii = sc->mii_softc;
1107 
1108 	if (mii->mii_media_status & IFM_ACTIVE)
1109 		sc->link_is_up = true;
1110 	else
1111 		sc->link_is_up = false;
1112 
1113 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1114 	case IFM_1000_T:
1115 	case IFM_1000_SX:
1116 		reg = SPEED_1000;
1117 		break;
1118 	case IFM_100_TX:
1119 		reg = SPEED_100;
1120 		break;
1121 	case IFM_10_T:
1122 		reg = SPEED_10;
1123 		break;
1124 	case IFM_NONE:
1125 		sc->link_is_up = false;
1126 		return;
1127 	default:
1128 		sc->link_is_up = false;
1129 		device_printf(dev, "Unsupported media %u\n",
1130 		    IFM_SUBTYPE(mii->mii_media_active));
1131 		return;
1132 	}
1133 
1134 	WRITE4(sc, XAE_SPEED, reg);
1135 }
1136 
1137 static device_method_t xae_methods[] = {
1138 	DEVMETHOD(device_probe,		xae_probe),
1139 	DEVMETHOD(device_attach,	xae_attach),
1140 	DEVMETHOD(device_detach,	xae_detach),
1141 
1142 	/* MII Interface */
1143 	DEVMETHOD(miibus_readreg,	xae_miibus_read_reg),
1144 	DEVMETHOD(miibus_writereg,	xae_miibus_write_reg),
1145 	DEVMETHOD(miibus_statchg,	xae_miibus_statchg),
1146 	{ 0, 0 }
1147 };
1148 
1149 driver_t xae_driver = {
1150 	"xae",
1151 	xae_methods,
1152 	sizeof(struct xae_softc),
1153 };
1154 
1155 static devclass_t xae_devclass;
1156 
1157 DRIVER_MODULE(xae, simplebus, xae_driver, xae_devclass, 0, 0);
1158 DRIVER_MODULE(miibus, xae, miibus_driver, miibus_devclass, 0, 0);
1159 
1160 MODULE_DEPEND(xae, ether, 1, 1, 1);
1161 MODULE_DEPEND(xae, miibus, 1, 1, 1);
1162