xref: /freebsd/sys/dev/altera/atse/if_atse.c (revision 0957b409a90fd597c1e9124cbaf3edd2b488f4ac)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012, 2013 Bjoern A. Zeeb
5  * Copyright (c) 2014 Robert N. M. Watson
6  * Copyright (c) 2016-2017 Ruslan Bukin <br@bsdpad.com>
7  * All rights reserved.
8  *
9  * This software was developed by SRI International and the University of
10  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
11  * ("MRC2"), as part of the DARPA MRC research programme.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * Altera Triple-Speed Ethernet MegaCore, Function User Guide
36  * UG-01008-3.0, Software Version: 12.0, June 2012.
37  * Available at the time of writing at:
38  * http://www.altera.com/literature/ug/ug_ethernet.pdf
39  *
40  * We are using an Marvell E1111 (Alaska) PHY on the DE4.  See mii/e1000phy.c.
41  */
42 /*
43  * XXX-BZ NOTES:
44  * - ifOutBroadcastPkts are only counted if both ether dst and src are all-1s;
45  *   seems an IP core bug, they count ether broadcasts as multicast.  Is this
46  *   still the case?
47  * - figure out why the TX FIFO fill status and intr did not work as expected.
48  * - test 100Mbit/s and 10Mbit/s
49  * - blacklist the one special factory programmed ethernet address (for now
50  *   hardcoded, later from loader?)
51  * - resolve all XXX, left as reminders to shake out details later
52  * - Jumbo frame support
53  */
54 
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
57 
58 #include "opt_device_polling.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/bus.h>
64 #include <sys/endian.h>
65 #include <sys/jail.h>
66 #include <sys/lock.h>
67 #include <sys/module.h>
68 #include <sys/mutex.h>
69 #include <sys/proc.h>
70 #include <sys/socket.h>
71 #include <sys/sockio.h>
72 #include <sys/types.h>
73 
74 #include <net/ethernet.h>
75 #include <net/if.h>
76 #include <net/if_var.h>
77 #include <net/if_dl.h>
78 #include <net/if_media.h>
79 #include <net/if_types.h>
80 #include <net/if_vlan_var.h>
81 
82 #include <net/bpf.h>
83 
84 #include <machine/bus.h>
85 #include <machine/resource.h>
86 #include <sys/rman.h>
87 
88 #include <dev/mii/mii.h>
89 #include <dev/mii/miivar.h>
90 
91 #include <dev/altera/atse/if_atsereg.h>
92 #include <dev/xdma/xdma.h>
93 
94 #define	RX_QUEUE_SIZE		4096
95 #define	TX_QUEUE_SIZE		4096
96 #define	NUM_RX_MBUF		512
97 #define	BUFRING_SIZE		8192
98 
99 #include <machine/cache.h>
100 
101 /* XXX once we'd do parallel attach, we need a global lock for this. */
102 #define	ATSE_ETHERNET_OPTION_BITS_UNDEF	0
103 #define	ATSE_ETHERNET_OPTION_BITS_READ	1
104 static int atse_ethernet_option_bits_flag = ATSE_ETHERNET_OPTION_BITS_UNDEF;
105 static uint8_t atse_ethernet_option_bits[ALTERA_ETHERNET_OPTION_BITS_LEN];
106 
107 /*
108  * Softc and critical resource locking.
109  */
110 #define	ATSE_LOCK(_sc)		mtx_lock(&(_sc)->atse_mtx)
111 #define	ATSE_UNLOCK(_sc)	mtx_unlock(&(_sc)->atse_mtx)
112 #define	ATSE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->atse_mtx, MA_OWNED)
113 
114 #define ATSE_DEBUG
115 #undef ATSE_DEBUG
116 
117 #ifdef ATSE_DEBUG
118 #define	DPRINTF(format, ...)	printf(format, __VA_ARGS__)
119 #else
120 #define	DPRINTF(format, ...)
121 #endif
122 
123 /*
124  * Register space access macros.
125  */
126 static inline void
127 csr_write_4(struct atse_softc *sc, uint32_t reg, uint32_t val4,
128     const char *f, const int l)
129 {
130 
131 	val4 = htole32(val4);
132 	DPRINTF("[%s:%d] CSR W %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
133 	    "atse_mem_res", reg, reg * 4, val4);
134 	bus_write_4(sc->atse_mem_res, reg * 4, val4);
135 }
136 
137 static inline uint32_t
138 csr_read_4(struct atse_softc *sc, uint32_t reg, const char *f, const int l)
139 {
140 	uint32_t val4;
141 
142 	val4 = le32toh(bus_read_4(sc->atse_mem_res, reg * 4));
143 	DPRINTF("[%s:%d] CSR R %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
144 	    "atse_mem_res", reg, reg * 4, val4);
145 
146 	return (val4);
147 }
148 
149 /*
150  * See page 5-2 that it's all dword offsets and the MS 16 bits need to be zero
151  * on write and ignored on read.
152  */
153 static inline void
154 pxx_write_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, uint16_t val,
155     const char *f, const int l, const char *s)
156 {
157 	uint32_t val4;
158 
159 	val4 = htole32(val & 0x0000ffff);
160 	DPRINTF("[%s:%d] %s W %s 0x%08x (0x%08jx) = 0x%08x\n", f, l, s,
161 	    "atse_mem_res", reg, (bmcr + reg) * 4, val4);
162 	bus_write_4(sc->atse_mem_res, (bmcr + reg) * 4, val4);
163 }
164 
165 static inline uint16_t
166 pxx_read_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, const char *f,
167     const int l, const char *s)
168 {
169 	uint32_t val4;
170 	uint16_t val;
171 
172 	val4 = bus_read_4(sc->atse_mem_res, (bmcr + reg) * 4);
173 	val = le32toh(val4) & 0x0000ffff;
174 	DPRINTF("[%s:%d] %s R %s 0x%08x (0x%08jx) = 0x%04x\n", f, l, s,
175 	    "atse_mem_res", reg, (bmcr + reg) * 4, val);
176 
177 	return (val);
178 }
179 
180 #define	CSR_WRITE_4(sc, reg, val)	\
181 	csr_write_4((sc), (reg), (val), __func__, __LINE__)
182 #define	CSR_READ_4(sc, reg)		\
183 	csr_read_4((sc), (reg), __func__, __LINE__)
184 #define	PCS_WRITE_2(sc, reg, val)	\
185 	pxx_write_2((sc), sc->atse_bmcr0, (reg), (val), __func__, __LINE__, \
186 	    "PCS")
187 #define	PCS_READ_2(sc, reg)		\
188 	pxx_read_2((sc), sc->atse_bmcr0, (reg), __func__, __LINE__, "PCS")
189 #define	PHY_WRITE_2(sc, reg, val)	\
190 	pxx_write_2((sc), sc->atse_bmcr1, (reg), (val), __func__, __LINE__, \
191 	    "PHY")
192 #define	PHY_READ_2(sc, reg)		\
193 	pxx_read_2((sc), sc->atse_bmcr1, (reg), __func__, __LINE__, "PHY")
194 
195 static void atse_tick(void *);
196 static int atse_detach(device_t);
197 
198 devclass_t atse_devclass;
199 
200 static int
201 atse_rx_enqueue(struct atse_softc *sc, uint32_t n)
202 {
203 	struct mbuf *m;
204 	int i;
205 
206 	for (i = 0; i < n; i++) {
207 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
208 		if (m == NULL) {
209 			device_printf(sc->dev,
210 			    "%s: Can't alloc rx mbuf\n", __func__);
211 			return (-1);
212 		}
213 
214 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
215 		xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
216 	}
217 
218 	return (0);
219 }
220 
221 static int
222 atse_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
223 {
224 	xdma_transfer_status_t st;
225 	struct atse_softc *sc;
226 	struct ifnet *ifp;
227 	struct mbuf *m;
228 	int err;
229 
230 	sc = arg;
231 
232 	ATSE_LOCK(sc);
233 
234 	ifp = sc->atse_ifp;
235 
236 	for (;;) {
237 		err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
238 		if (err != 0) {
239 			break;
240 		}
241 
242 		if (st.error != 0) {
243 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
244 		}
245 
246 		m_freem(m);
247 		sc->txcount--;
248 	}
249 
250 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
251 
252 	ATSE_UNLOCK(sc);
253 
254 	return (0);
255 }
256 
257 static int
258 atse_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
259 {
260 	xdma_transfer_status_t st;
261 	struct atse_softc *sc;
262 	struct ifnet *ifp;
263 	struct mbuf *m;
264 	int err;
265 	uint32_t cnt_processed;
266 
267 	sc = arg;
268 
269 	ATSE_LOCK(sc);
270 
271 	ifp = sc->atse_ifp;
272 
273 	cnt_processed = 0;
274 	for (;;) {
275 		err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
276 		if (err != 0) {
277 			break;
278 		}
279 		cnt_processed++;
280 
281 		if (st.error != 0) {
282 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
283 			m_freem(m);
284 			continue;
285 		}
286 
287 		m->m_pkthdr.len = m->m_len = st.transferred;
288 		m->m_pkthdr.rcvif = ifp;
289 		m_adj(m, ETHER_ALIGN);
290 		ATSE_UNLOCK(sc);
291 		(*ifp->if_input)(ifp, m);
292 		ATSE_LOCK(sc);
293 	}
294 
295 	atse_rx_enqueue(sc, cnt_processed);
296 
297 	ATSE_UNLOCK(sc);
298 
299 	return (0);
300 }
301 
302 static int
303 atse_transmit_locked(struct ifnet *ifp)
304 {
305 	struct atse_softc *sc;
306 	struct mbuf *m;
307 	struct buf_ring *br;
308 	int error;
309 	int enq;
310 
311 	sc = ifp->if_softc;
312 	br = sc->br;
313 
314 	enq = 0;
315 
316 	while ((m = drbr_peek(ifp, br)) != NULL) {
317 		error = xdma_enqueue_mbuf(sc->xchan_tx, &m, 0, 4, 4, XDMA_MEM_TO_DEV);
318 		if (error != 0) {
319 			/* No space in request queue available yet. */
320 			drbr_putback(ifp, br, m);
321 			break;
322 		}
323 
324 		drbr_advance(ifp, br);
325 
326 		sc->txcount++;
327 		enq++;
328 
329 		/* If anyone is interested give them a copy. */
330 		ETHER_BPF_MTAP(ifp, m);
331         }
332 
333 	if (enq > 0)
334 		xdma_queue_submit(sc->xchan_tx);
335 
336 	return (0);
337 }
338 
339 static int
340 atse_transmit(struct ifnet *ifp, struct mbuf *m)
341 {
342 	struct atse_softc *sc;
343 	struct buf_ring *br;
344 	int error;
345 
346 	sc = ifp->if_softc;
347 	br = sc->br;
348 
349 	ATSE_LOCK(sc);
350 
351 	mtx_lock(&sc->br_mtx);
352 
353 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
354 		error = drbr_enqueue(ifp, sc->br, m);
355 		mtx_unlock(&sc->br_mtx);
356 		ATSE_UNLOCK(sc);
357 		return (error);
358 	}
359 
360 	if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
361 		error = drbr_enqueue(ifp, sc->br, m);
362 		mtx_unlock(&sc->br_mtx);
363 		ATSE_UNLOCK(sc);
364 		return (error);
365 	}
366 
367 	error = drbr_enqueue(ifp, br, m);
368 	if (error) {
369 		mtx_unlock(&sc->br_mtx);
370 		ATSE_UNLOCK(sc);
371 		return (error);
372 	}
373 	error = atse_transmit_locked(ifp);
374 
375 	mtx_unlock(&sc->br_mtx);
376 	ATSE_UNLOCK(sc);
377 
378 	return (error);
379 }
380 
381 static void
382 atse_qflush(struct ifnet *ifp)
383 {
384 	struct atse_softc *sc;
385 
386 	sc = ifp->if_softc;
387 
388 	printf("%s\n", __func__);
389 }
390 
391 static int
392 atse_stop_locked(struct atse_softc *sc)
393 {
394 	uint32_t mask, val4;
395 	struct ifnet *ifp;
396 	int i;
397 
398 	ATSE_LOCK_ASSERT(sc);
399 
400 	callout_stop(&sc->atse_tick);
401 
402 	ifp = sc->atse_ifp;
403 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
404 
405 	/* Disable MAC transmit and receive datapath. */
406 	mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
407 	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
408 	val4 &= ~mask;
409 	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
410 
411 	/* Wait for bits to be cleared; i=100 is excessive. */
412 	for (i = 0; i < 100; i++) {
413 		val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
414 		if ((val4 & mask) == 0) {
415 			break;
416 		}
417 		DELAY(10);
418 	}
419 
420 	if ((val4 & mask) != 0) {
421 		device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
422 		/* Punt. */
423 	}
424 
425 	sc->atse_flags &= ~ATSE_FLAGS_LINK;
426 
427 	return (0);
428 }
429 
430 static uint8_t
431 atse_mchash(struct atse_softc *sc __unused, const uint8_t *addr)
432 {
433 	uint8_t x, y;
434 	int i, j;
435 
436 	x = 0;
437 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
438 		y = addr[i] & 0x01;
439 		for (j = 1; j < 8; j++)
440 			y ^= (addr[i] >> j) & 0x01;
441 		x |= (y << i);
442 	}
443 
444 	return (x);
445 }
446 
447 static int
448 atse_rxfilter_locked(struct atse_softc *sc)
449 {
450 	struct ifmultiaddr *ifma;
451 	struct ifnet *ifp;
452 	uint32_t val4;
453 	int i;
454 
455 	/* XXX-BZ can we find out if we have the MHASH synthesized? */
456 	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
457 	/* For simplicity always hash full 48 bits of addresses. */
458 	if ((val4 & BASE_CFG_COMMAND_CONFIG_MHASH_SEL) != 0)
459 		val4 &= ~BASE_CFG_COMMAND_CONFIG_MHASH_SEL;
460 
461 	ifp = sc->atse_ifp;
462 	if (ifp->if_flags & IFF_PROMISC) {
463 		val4 |= BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
464 	} else {
465 		val4 &= ~BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
466 	}
467 
468 	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
469 
470 	if (ifp->if_flags & IFF_ALLMULTI) {
471 		/* Accept all multicast addresses. */
472 		for (i = 0; i <= MHASH_LEN; i++)
473 			CSR_WRITE_4(sc, MHASH_START + i, 0x1);
474 	} else {
475 		/*
476 		 * Can hold MHASH_LEN entries.
477 		 * XXX-BZ bitstring.h would be more general.
478 		 */
479 		uint64_t h;
480 
481 		h = 0;
482 		/*
483 		 * Re-build and re-program hash table.  First build the
484 		 * bit-field "yes" or "no" for each slot per address, then
485 		 * do all the programming afterwards.
486 		 */
487 		if_maddr_rlock(ifp);
488 		CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
489 			if (ifma->ifma_addr->sa_family != AF_LINK) {
490 				continue;
491 			}
492 
493 			h |= (1 << atse_mchash(sc,
494 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr)));
495 		}
496 		if_maddr_runlock(ifp);
497 		for (i = 0; i <= MHASH_LEN; i++) {
498 			CSR_WRITE_4(sc, MHASH_START + i,
499 			    (h & (1 << i)) ? 0x01 : 0x00);
500 		}
501 	}
502 
503 	return (0);
504 }
505 
506 static int
507 atse_ethernet_option_bits_read_fdt(device_t dev)
508 {
509 	struct resource *res;
510 	device_t fdev;
511 	int i, rid;
512 
513 	if (atse_ethernet_option_bits_flag & ATSE_ETHERNET_OPTION_BITS_READ) {
514 		return (0);
515 	}
516 
517 	fdev = device_find_child(device_get_parent(dev), "cfi", 0);
518 	if (fdev == NULL) {
519 		return (ENOENT);
520 	}
521 
522 	rid = 0;
523 	res = bus_alloc_resource_any(fdev, SYS_RES_MEMORY, &rid,
524 	    RF_ACTIVE | RF_SHAREABLE);
525 	if (res == NULL) {
526 		return (ENXIO);
527 	}
528 
529 	for (i = 0; i < ALTERA_ETHERNET_OPTION_BITS_LEN; i++) {
530 		atse_ethernet_option_bits[i] = bus_read_1(res,
531 		    ALTERA_ETHERNET_OPTION_BITS_OFF + i);
532 	}
533 
534 	bus_release_resource(fdev, SYS_RES_MEMORY, rid, res);
535 	atse_ethernet_option_bits_flag |= ATSE_ETHERNET_OPTION_BITS_READ;
536 
537 	return (0);
538 }
539 
540 static int
541 atse_ethernet_option_bits_read(device_t dev)
542 {
543 	int error;
544 
545 	error = atse_ethernet_option_bits_read_fdt(dev);
546 	if (error == 0)
547 		return (0);
548 
549 	device_printf(dev, "Cannot read Ethernet addresses from flash.\n");
550 
551 	return (error);
552 }
553 
554 static int
555 atse_get_eth_address(struct atse_softc *sc)
556 {
557 	unsigned long hostid;
558 	uint32_t val4;
559 	int unit;
560 
561 	/*
562 	 * Make sure to only ever do this once.  Otherwise a reset would
563 	 * possibly change our ethernet address, which is not good at all.
564 	 */
565 	if (sc->atse_eth_addr[0] != 0x00 || sc->atse_eth_addr[1] != 0x00 ||
566 	    sc->atse_eth_addr[2] != 0x00) {
567 		return (0);
568 	}
569 
570 	if ((atse_ethernet_option_bits_flag &
571 	    ATSE_ETHERNET_OPTION_BITS_READ) == 0) {
572 		goto get_random;
573 	}
574 
575 	val4 = atse_ethernet_option_bits[0] << 24;
576 	val4 |= atse_ethernet_option_bits[1] << 16;
577 	val4 |= atse_ethernet_option_bits[2] << 8;
578 	val4 |= atse_ethernet_option_bits[3];
579 	/* They chose "safe". */
580 	if (val4 != le32toh(0x00005afe)) {
581 		device_printf(sc->atse_dev, "Magic '5afe' is not safe: 0x%08x. "
582 		    "Falling back to random numbers for hardware address.\n",
583 		     val4);
584 		goto get_random;
585 	}
586 
587 	sc->atse_eth_addr[0] = atse_ethernet_option_bits[4];
588 	sc->atse_eth_addr[1] = atse_ethernet_option_bits[5];
589 	sc->atse_eth_addr[2] = atse_ethernet_option_bits[6];
590 	sc->atse_eth_addr[3] = atse_ethernet_option_bits[7];
591 	sc->atse_eth_addr[4] = atse_ethernet_option_bits[8];
592 	sc->atse_eth_addr[5] = atse_ethernet_option_bits[9];
593 
594 	/* Handle factory default ethernet addresss: 00:07:ed:ff:ed:15 */
595 	if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x07 &&
596 	    sc->atse_eth_addr[2] == 0xed && sc->atse_eth_addr[3] == 0xff &&
597 	    sc->atse_eth_addr[4] == 0xed && sc->atse_eth_addr[5] == 0x15) {
598 
599 		device_printf(sc->atse_dev, "Factory programmed Ethernet "
600 		    "hardware address blacklisted.  Falling back to random "
601 		    "address to avoid collisions.\n");
602 		device_printf(sc->atse_dev, "Please re-program your flash.\n");
603 		goto get_random;
604 	}
605 
606 	if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x00 &&
607 	    sc->atse_eth_addr[2] == 0x00 && sc->atse_eth_addr[3] == 0x00 &&
608 	    sc->atse_eth_addr[4] == 0x00 && sc->atse_eth_addr[5] == 0x00) {
609 		device_printf(sc->atse_dev, "All zero's Ethernet hardware "
610 		    "address blacklisted.  Falling back to random address.\n");
611 		device_printf(sc->atse_dev, "Please re-program your flash.\n");
612 		goto get_random;
613 	}
614 
615 	if (ETHER_IS_MULTICAST(sc->atse_eth_addr)) {
616 		device_printf(sc->atse_dev, "Multicast Ethernet hardware "
617 		    "address blacklisted.  Falling back to random address.\n");
618 		device_printf(sc->atse_dev, "Please re-program your flash.\n");
619 		goto get_random;
620 	}
621 
622 	/*
623 	 * If we find an Altera prefixed address with a 0x0 ending
624 	 * adjust by device unit.  If not and this is not the first
625 	 * Ethernet, go to random.
626 	 */
627 	unit = device_get_unit(sc->atse_dev);
628 	if (unit == 0x00) {
629 		return (0);
630 	}
631 
632 	if (unit > 0x0f) {
633 		device_printf(sc->atse_dev, "We do not support Ethernet "
634 		    "addresses for more than 16 MACs. Falling back to "
635 		    "random hadware address.\n");
636 		goto get_random;
637 	}
638 	if ((sc->atse_eth_addr[0] & ~0x2) != 0 ||
639 	    sc->atse_eth_addr[1] != 0x07 || sc->atse_eth_addr[2] != 0xed ||
640 	    (sc->atse_eth_addr[5] & 0x0f) != 0x0) {
641 		device_printf(sc->atse_dev, "Ethernet address not meeting our "
642 		    "multi-MAC standards. Falling back to random hadware "
643 		    "address.\n");
644 		goto get_random;
645 	}
646 	sc->atse_eth_addr[5] |= (unit & 0x0f);
647 
648 	return (0);
649 
650 get_random:
651 	/*
652 	 * Fall back to random code we also use on bridge(4).
653 	 */
654 	getcredhostid(curthread->td_ucred, &hostid);
655 	if (hostid == 0) {
656 		arc4rand(sc->atse_eth_addr, ETHER_ADDR_LEN, 1);
657 		sc->atse_eth_addr[0] &= ~1;/* clear multicast bit */
658 		sc->atse_eth_addr[0] |= 2; /* set the LAA bit */
659 	} else {
660 		sc->atse_eth_addr[0] = 0x2;
661 		sc->atse_eth_addr[1] = (hostid >> 24)	& 0xff;
662 		sc->atse_eth_addr[2] = (hostid >> 16)	& 0xff;
663 		sc->atse_eth_addr[3] = (hostid >> 8 )	& 0xff;
664 		sc->atse_eth_addr[4] = hostid		& 0xff;
665 		sc->atse_eth_addr[5] = sc->atse_unit	& 0xff;
666 	}
667 
668 	return (0);
669 }
670 
671 static int
672 atse_set_eth_address(struct atse_softc *sc, int n)
673 {
674 	uint32_t v0, v1;
675 
676 	v0 = (sc->atse_eth_addr[3] << 24) | (sc->atse_eth_addr[2] << 16) |
677 	    (sc->atse_eth_addr[1] << 8) | sc->atse_eth_addr[0];
678 	v1 = (sc->atse_eth_addr[5] << 8) | sc->atse_eth_addr[4];
679 
680 	if (n & ATSE_ETH_ADDR_DEF) {
681 		CSR_WRITE_4(sc, BASE_CFG_MAC_0, v0);
682 		CSR_WRITE_4(sc, BASE_CFG_MAC_1, v1);
683 	}
684 	if (n & ATSE_ETH_ADDR_SUPP1) {
685 		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_0, v0);
686 		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_1, v1);
687 	}
688 	if (n & ATSE_ETH_ADDR_SUPP2) {
689 		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_0, v0);
690 		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_1, v1);
691 	}
692 	if (n & ATSE_ETH_ADDR_SUPP3) {
693 		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_0, v0);
694 		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_1, v1);
695 	}
696 	if (n & ATSE_ETH_ADDR_SUPP4) {
697 		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_0, v0);
698 		CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_1, v1);
699 	}
700 
701 	return (0);
702 }
703 
704 static int
705 atse_reset(struct atse_softc *sc)
706 {
707 	uint32_t val4, mask;
708 	uint16_t val;
709 	int i;
710 
711 	/* 1. External PHY Initialization using MDIO. */
712 	/*
713 	 * We select the right MDIO space in atse_attach() and let MII do
714 	 * anything else.
715 	 */
716 
717 	/* 2. PCS Configuration Register Initialization. */
718 	/* a. Set auto negotiation link timer to 1.6ms for SGMII. */
719 	PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_0, 0x0D40);
720 	PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_1, 0x0003);
721 
722 	/* b. Configure SGMII. */
723 	val = PCS_EXT_IF_MODE_SGMII_ENA|PCS_EXT_IF_MODE_USE_SGMII_AN;
724 	PCS_WRITE_2(sc, PCS_EXT_IF_MODE, val);
725 
726 	/* c. Enable auto negotiation. */
727 	/* Ignore Bits 6,8,13; should be set,set,unset. */
728 	val = PCS_READ_2(sc, PCS_CONTROL);
729 	val &= ~(PCS_CONTROL_ISOLATE|PCS_CONTROL_POWERDOWN);
730 	val &= ~PCS_CONTROL_LOOPBACK;		/* Make this a -link1 option? */
731 	val |= PCS_CONTROL_AUTO_NEGOTIATION_ENABLE;
732 	PCS_WRITE_2(sc, PCS_CONTROL, val);
733 
734 	/* d. PCS reset. */
735 	val = PCS_READ_2(sc, PCS_CONTROL);
736 	val |= PCS_CONTROL_RESET;
737 	PCS_WRITE_2(sc, PCS_CONTROL, val);
738 
739 	/* Wait for reset bit to clear; i=100 is excessive. */
740 	for (i = 0; i < 100; i++) {
741 		val = PCS_READ_2(sc, PCS_CONTROL);
742 		if ((val & PCS_CONTROL_RESET) == 0) {
743 			break;
744 		}
745 		DELAY(10);
746 	}
747 
748 	if ((val & PCS_CONTROL_RESET) != 0) {
749 		device_printf(sc->atse_dev, "PCS reset timed out.\n");
750 		return (ENXIO);
751 	}
752 
753 	/* 3. MAC Configuration Register Initialization. */
754 	/* a. Disable MAC transmit and receive datapath. */
755 	mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
756 	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
757 	val4 &= ~mask;
758 	/* Samples in the manual do have the SW_RESET bit set here, why? */
759 	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
760 	/* Wait for bits to be cleared; i=100 is excessive. */
761 	for (i = 0; i < 100; i++) {
762 		val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
763 		if ((val4 & mask) == 0) {
764 			break;
765 		}
766 		DELAY(10);
767 	}
768 	if ((val4 & mask) != 0) {
769 		device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
770 		return (ENXIO);
771 	}
772 	/* b. MAC FIFO configuration. */
773 	CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_EMPTY, FIFO_DEPTH_TX - 16);
774 	CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_FULL, 3);
775 	CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_EMPTY, 8);
776 	CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_EMPTY, FIFO_DEPTH_RX - 16);
777 	CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_FULL, 8);
778 	CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_EMPTY, 8);
779 #if 0
780 	CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 16);
781 	CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 16);
782 #else
783 	/* For store-and-forward mode, set this threshold to 0. */
784 	CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 0);
785 	CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 0);
786 #endif
787 	/* c. MAC address configuration. */
788 	/* Also intialize supplementary addresses to our primary one. */
789 	/* XXX-BZ FreeBSD really needs to grow and API for using these. */
790 	atse_get_eth_address(sc);
791 	atse_set_eth_address(sc, ATSE_ETH_ADDR_ALL);
792 
793 	/* d. MAC function configuration. */
794 	CSR_WRITE_4(sc, BASE_CFG_FRM_LENGTH, 1518);	/* Default. */
795 	CSR_WRITE_4(sc, BASE_CFG_TX_IPG_LENGTH, 12);
796 	CSR_WRITE_4(sc, BASE_CFG_PAUSE_QUANT, 0xFFFF);
797 
798 	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
799 	/*
800 	 * If 1000BASE-X/SGMII PCS is initialized, set the ETH_SPEED (bit 3)
801 	 * and ENA_10 (bit 25) in command_config register to 0.  If half duplex
802 	 * is reported in the PHY/PCS status register, set the HD_ENA (bit 10)
803 	 * to 1 in command_config register.
804 	 * BZ: We shoot for 1000 instead.
805 	 */
806 #if 0
807 	val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
808 #else
809 	val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
810 #endif
811 	val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
812 #if 0
813 	/*
814 	 * We do not want to set this, otherwise, we could not even send
815 	 * random raw ethernet frames for various other research.  By default
816 	 * FreeBSD will use the right ether source address.
817 	 */
818 	val4 |= BASE_CFG_COMMAND_CONFIG_TX_ADDR_INS;
819 #endif
820 	val4 |= BASE_CFG_COMMAND_CONFIG_PAD_EN;
821 	val4 &= ~BASE_CFG_COMMAND_CONFIG_CRC_FWD;
822 #if 0
823 	val4 |= BASE_CFG_COMMAND_CONFIG_CNTL_FRM_ENA;
824 #endif
825 #if 1
826 	val4 |= BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC;
827 #endif
828 	val &= ~BASE_CFG_COMMAND_CONFIG_LOOP_ENA;		/* link0? */
829 	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
830 
831 	/*
832 	 * Make sure we do not enable 32bit alignment;  FreeBSD cannot
833 	 * cope with the additional padding (though we should!?).
834 	 * Also make sure we get the CRC appended.
835 	 */
836 	val4 = CSR_READ_4(sc, TX_CMD_STAT);
837 	val4 &= ~(TX_CMD_STAT_OMIT_CRC|TX_CMD_STAT_TX_SHIFT16);
838 	CSR_WRITE_4(sc, TX_CMD_STAT, val4);
839 
840 	val4 = CSR_READ_4(sc, RX_CMD_STAT);
841 	val4 &= ~RX_CMD_STAT_RX_SHIFT16;
842 	val4 |= RX_CMD_STAT_RX_SHIFT16;
843 	CSR_WRITE_4(sc, RX_CMD_STAT, val4);
844 
845 	/* e. Reset MAC. */
846 	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
847 	val4 |= BASE_CFG_COMMAND_CONFIG_SW_RESET;
848 	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
849 	/* Wait for bits to be cleared; i=100 is excessive. */
850 	for (i = 0; i < 100; i++) {
851 		val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
852 		if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) == 0) {
853 			break;
854 		}
855 		DELAY(10);
856 	}
857 	if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) != 0) {
858 		device_printf(sc->atse_dev, "MAC reset timed out.\n");
859 		return (ENXIO);
860 	}
861 
862 	/* f. Enable MAC transmit and receive datapath. */
863 	mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
864 	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
865 	val4 |= mask;
866 	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
867 	/* Wait for bits to be cleared; i=100 is excessive. */
868 	for (i = 0; i < 100; i++) {
869 		val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
870 		if ((val4 & mask) == mask) {
871 			break;
872 		}
873 		DELAY(10);
874 	}
875 	if ((val4 & mask) != mask) {
876 		device_printf(sc->atse_dev, "Enabling MAC TX/RX timed out.\n");
877 		return (ENXIO);
878 	}
879 
880 	return (0);
881 }
882 
883 static void
884 atse_init_locked(struct atse_softc *sc)
885 {
886 	struct ifnet *ifp;
887 	struct mii_data *mii;
888 	uint8_t *eaddr;
889 
890 	ATSE_LOCK_ASSERT(sc);
891 	ifp = sc->atse_ifp;
892 
893 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
894 		return;
895 	}
896 
897 	/*
898 	 * Must update the ether address if changed.  Given we do not handle
899 	 * in atse_ioctl() but it's in the general framework, just always
900 	 * do it here before atse_reset().
901 	 */
902 	eaddr = IF_LLADDR(sc->atse_ifp);
903 	bcopy(eaddr, &sc->atse_eth_addr, ETHER_ADDR_LEN);
904 
905 	/* Make things frind to halt, cleanup, ... */
906 	atse_stop_locked(sc);
907 
908 	atse_reset(sc);
909 
910 	/* ... and fire up the engine again. */
911 	atse_rxfilter_locked(sc);
912 
913 	sc->atse_flags &= ATSE_FLAGS_LINK;	/* Preserve. */
914 
915 	mii = device_get_softc(sc->atse_miibus);
916 
917 	sc->atse_flags &= ~ATSE_FLAGS_LINK;
918 	mii_mediachg(mii);
919 
920 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
921 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
922 
923 	callout_reset(&sc->atse_tick, hz, atse_tick, sc);
924 }
925 
926 static void
927 atse_init(void *xsc)
928 {
929 	struct atse_softc *sc;
930 
931 	/*
932 	 * XXXRW: There is some argument that we should immediately do RX
933 	 * processing after enabling interrupts, or one may not fire if there
934 	 * are buffered packets.
935 	 */
936 	sc = (struct atse_softc *)xsc;
937 	ATSE_LOCK(sc);
938 	atse_init_locked(sc);
939 	ATSE_UNLOCK(sc);
940 }
941 
942 static int
943 atse_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
944 {
945 	struct atse_softc *sc;
946 	struct ifreq *ifr;
947 	int error, mask;
948 
949 	error = 0;
950 	sc = ifp->if_softc;
951 	ifr = (struct ifreq *)data;
952 
953 	switch (command) {
954 	case SIOCSIFFLAGS:
955 		ATSE_LOCK(sc);
956 		if (ifp->if_flags & IFF_UP) {
957 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
958 			    ((ifp->if_flags ^ sc->atse_if_flags) &
959 			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
960 				atse_rxfilter_locked(sc);
961 			else
962 				atse_init_locked(sc);
963 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
964 			atse_stop_locked(sc);
965 		sc->atse_if_flags = ifp->if_flags;
966 		ATSE_UNLOCK(sc);
967 		break;
968 	case SIOCSIFCAP:
969 		ATSE_LOCK(sc);
970 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
971 		ATSE_UNLOCK(sc);
972 		break;
973 	case SIOCADDMULTI:
974 	case SIOCDELMULTI:
975 		ATSE_LOCK(sc);
976 		atse_rxfilter_locked(sc);
977 		ATSE_UNLOCK(sc);
978 		break;
979 	case SIOCGIFMEDIA:
980 	case SIOCSIFMEDIA:
981 	{
982 		struct mii_data *mii;
983 		struct ifreq *ifr;
984 
985 		mii = device_get_softc(sc->atse_miibus);
986 		ifr = (struct ifreq *)data;
987 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
988 		break;
989 	}
990 	default:
991 		error = ether_ioctl(ifp, command, data);
992 		break;
993 	}
994 
995 	return (error);
996 }
997 
998 static void
999 atse_tick(void *xsc)
1000 {
1001 	struct atse_softc *sc;
1002 	struct mii_data *mii;
1003 	struct ifnet *ifp;
1004 
1005 	sc = (struct atse_softc *)xsc;
1006 	ATSE_LOCK_ASSERT(sc);
1007 	ifp = sc->atse_ifp;
1008 
1009 	mii = device_get_softc(sc->atse_miibus);
1010 	mii_tick(mii);
1011 	if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
1012 		atse_miibus_statchg(sc->atse_dev);
1013 	}
1014 
1015 	callout_reset(&sc->atse_tick, hz, atse_tick, sc);
1016 }
1017 
1018 /*
1019  * Set media options.
1020  */
1021 static int
1022 atse_ifmedia_upd(struct ifnet *ifp)
1023 {
1024 	struct atse_softc *sc;
1025 	struct mii_data *mii;
1026 	struct mii_softc *miisc;
1027 	int error;
1028 
1029 	sc = ifp->if_softc;
1030 
1031 	ATSE_LOCK(sc);
1032 	mii = device_get_softc(sc->atse_miibus);
1033 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1034 		PHY_RESET(miisc);
1035 	}
1036 	error = mii_mediachg(mii);
1037 	ATSE_UNLOCK(sc);
1038 
1039 	return (error);
1040 }
1041 
1042 /*
1043  * Report current media status.
1044  */
1045 static void
1046 atse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1047 {
1048 	struct atse_softc *sc;
1049 	struct mii_data *mii;
1050 
1051 	sc = ifp->if_softc;
1052 
1053 	ATSE_LOCK(sc);
1054 	mii = device_get_softc(sc->atse_miibus);
1055 	mii_pollstat(mii);
1056 	ifmr->ifm_active = mii->mii_media_active;
1057 	ifmr->ifm_status = mii->mii_media_status;
1058 	ATSE_UNLOCK(sc);
1059 }
1060 
1061 static struct atse_mac_stats_regs {
1062 	const char *name;
1063 	const char *descr;	/* Mostly copied from Altera datasheet. */
1064 } atse_mac_stats_regs[] = {
1065 	[0x1a] =
1066 	{ "aFramesTransmittedOK",
1067 	    "The number of frames that are successfully transmitted including "
1068 	    "the pause frames." },
1069 	{ "aFramesReceivedOK",
1070 	    "The number of frames that are successfully received including the "
1071 	    "pause frames." },
1072 	{ "aFrameCheckSequenceErrors",
1073 	    "The number of receive frames with CRC error." },
1074 	{ "aAlignmentErrors",
1075 	    "The number of receive frames with alignment error." },
1076 	{ "aOctetsTransmittedOK",
1077 	    "The lower 32 bits of the number of data and padding octets that "
1078 	    "are successfully transmitted." },
1079 	{ "aOctetsReceivedOK",
1080 	    "The lower 32 bits of the number of data and padding octets that "
1081 	    " are successfully received." },
1082 	{ "aTxPAUSEMACCtrlFrames",
1083 	    "The number of pause frames transmitted." },
1084 	{ "aRxPAUSEMACCtrlFrames",
1085 	    "The number received pause frames received." },
1086 	{ "ifInErrors",
1087 	    "The number of errored frames received." },
1088 	{ "ifOutErrors",
1089 	    "The number of transmit frames with either a FIFO overflow error, "
1090 	    "a FIFO underflow error, or a error defined by the user "
1091 	    "application." },
1092 	{ "ifInUcastPkts",
1093 	    "The number of valid unicast frames received." },
1094 	{ "ifInMulticastPkts",
1095 	    "The number of valid multicast frames received. The count does "
1096 	    "not include pause frames." },
1097 	{ "ifInBroadcastPkts",
1098 	    "The number of valid broadcast frames received." },
1099 	{ "ifOutDiscards",
1100 	    "This statistics counter is not in use.  The MAC function does not "
1101 	    "discard frames that are written to the FIFO buffer by the user "
1102 	    "application." },
1103 	{ "ifOutUcastPkts",
1104 	    "The number of valid unicast frames transmitted." },
1105 	{ "ifOutMulticastPkts",
1106 	    "The number of valid multicast frames transmitted, excluding pause "
1107 	    "frames." },
1108 	{ "ifOutBroadcastPkts",
1109 	    "The number of valid broadcast frames transmitted." },
1110 	{ "etherStatsDropEvents",
1111 	    "The number of frames that are dropped due to MAC internal errors "
1112 	    "when FIFO buffer overflow persists." },
1113 	{ "etherStatsOctets",
1114 	    "The lower 32 bits of the total number of octets received. This "
1115 	    "count includes both good and errored frames." },
1116 	{ "etherStatsPkts",
1117 	    "The total number of good and errored frames received." },
1118 	{ "etherStatsUndersizePkts",
1119 	    "The number of frames received with length less than 64 bytes. "
1120 	    "This count does not include errored frames." },
1121 	{ "etherStatsOversizePkts",
1122 	    "The number of frames received that are longer than the value "
1123 	    "configured in the frm_length register. This count does not "
1124 	    "include errored frames." },
1125 	{ "etherStatsPkts64Octets",
1126 	    "The number of 64-byte frames received. This count includes good "
1127 	    "and errored frames." },
1128 	{ "etherStatsPkts65to127Octets",
1129 	    "The number of received good and errored frames between the length "
1130 	    "of 65 and 127 bytes." },
1131 	{ "etherStatsPkts128to255Octets",
1132 	    "The number of received good and errored frames between the length "
1133 	    "of 128 and 255 bytes." },
1134 	{ "etherStatsPkts256to511Octets",
1135 	    "The number of received good and errored frames between the length "
1136 	    "of 256 and 511 bytes." },
1137 	{ "etherStatsPkts512to1023Octets",
1138 	    "The number of received good and errored frames between the length "
1139 	    "of 512 and 1023 bytes." },
1140 	{ "etherStatsPkts1024to1518Octets",
1141 	    "The number of received good and errored frames between the length "
1142 	    "of 1024 and 1518 bytes." },
1143 	{ "etherStatsPkts1519toXOctets",
1144 	    "The number of received good and errored frames between the length "
1145 	    "of 1519 and the maximum frame length configured in the frm_length "
1146 	    "register." },
1147 	{ "etherStatsJabbers",
1148 	    "Too long frames with CRC error." },
1149 	{ "etherStatsFragments",
1150 	    "Too short frames with CRC error." },
1151 	/* 0x39 unused, 0x3a/b non-stats. */
1152 	[0x3c] =
1153 	/* Extended Statistics Counters */
1154 	{ "msb_aOctetsTransmittedOK",
1155 	    "Upper 32 bits of the number of data and padding octets that are "
1156 	    "successfully transmitted." },
1157 	{ "msb_aOctetsReceivedOK",
1158 	    "Upper 32 bits of the number of data and padding octets that are "
1159 	    "successfully received." },
1160 	{ "msb_etherStatsOctets",
1161 	    "Upper 32 bits of the total number of octets received. This count "
1162 	    "includes both good and errored frames." }
1163 };
1164 
1165 static int
1166 sysctl_atse_mac_stats_proc(SYSCTL_HANDLER_ARGS)
1167 {
1168 	struct atse_softc *sc;
1169 	int error, offset, s;
1170 
1171 	sc = arg1;
1172 	offset = arg2;
1173 
1174 	s = CSR_READ_4(sc, offset);
1175 	error = sysctl_handle_int(oidp, &s, 0, req);
1176 	if (error || !req->newptr) {
1177 		return (error);
1178 	}
1179 
1180 	return (0);
1181 }
1182 
1183 static struct atse_rx_err_stats_regs {
1184 	const char *name;
1185 	const char *descr;
1186 } atse_rx_err_stats_regs[] = {
1187 
1188 #define	ATSE_RX_ERR_FIFO_THRES_EOP	0 /* FIFO threshold reached, on EOP. */
1189 #define	ATSE_RX_ERR_ELEN		1 /* Frame/payload length not valid. */
1190 #define	ATSE_RX_ERR_CRC32		2 /* CRC-32 error. */
1191 #define	ATSE_RX_ERR_FIFO_THRES_TRUNC	3 /* FIFO thresh., truncated frame. */
1192 #define	ATSE_RX_ERR_4			4 /* ? */
1193 #define	ATSE_RX_ERR_5			5 /* / */
1194 
1195 	{ "rx_err_fifo_thres_eop",
1196 	    "FIFO threshold reached, reported on EOP." },
1197 	{ "rx_err_fifo_elen",
1198 	    "Frame or payload length not valid." },
1199 	{ "rx_err_fifo_crc32",
1200 	    "CRC-32 error." },
1201 	{ "rx_err_fifo_thres_trunc",
1202 	    "FIFO threshold reached, truncated frame" },
1203 	{ "rx_err_4",
1204 	    "?" },
1205 	{ "rx_err_5",
1206 	    "?" },
1207 };
1208 
1209 static int
1210 sysctl_atse_rx_err_stats_proc(SYSCTL_HANDLER_ARGS)
1211 {
1212 	struct atse_softc *sc;
1213 	int error, offset, s;
1214 
1215 	sc = arg1;
1216 	offset = arg2;
1217 
1218 	s = sc->atse_rx_err[offset];
1219 	error = sysctl_handle_int(oidp, &s, 0, req);
1220 	if (error || !req->newptr) {
1221 		return (error);
1222 	}
1223 
1224 	return (0);
1225 }
1226 
1227 static void
1228 atse_sysctl_stats_attach(device_t dev)
1229 {
1230 	struct sysctl_ctx_list *sctx;
1231 	struct sysctl_oid *soid;
1232 	struct atse_softc *sc;
1233 	int i;
1234 
1235 	sc = device_get_softc(dev);
1236 	sctx = device_get_sysctl_ctx(dev);
1237 	soid = device_get_sysctl_tree(dev);
1238 
1239 	/* MAC statistics. */
1240 	for (i = 0; i < nitems(atse_mac_stats_regs); i++) {
1241 		if (atse_mac_stats_regs[i].name == NULL ||
1242 		    atse_mac_stats_regs[i].descr == NULL) {
1243 			continue;
1244 		}
1245 
1246 		SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
1247 		    atse_mac_stats_regs[i].name, CTLTYPE_UINT|CTLFLAG_RD,
1248 		    sc, i, sysctl_atse_mac_stats_proc, "IU",
1249 		    atse_mac_stats_regs[i].descr);
1250 	}
1251 
1252 	/* rx_err[]. */
1253 	for (i = 0; i < ATSE_RX_ERR_MAX; i++) {
1254 		if (atse_rx_err_stats_regs[i].name == NULL ||
1255 		    atse_rx_err_stats_regs[i].descr == NULL) {
1256 			continue;
1257 		}
1258 
1259 		SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
1260 		    atse_rx_err_stats_regs[i].name, CTLTYPE_UINT|CTLFLAG_RD,
1261 		    sc, i, sysctl_atse_rx_err_stats_proc, "IU",
1262 		    atse_rx_err_stats_regs[i].descr);
1263 	}
1264 }
1265 
1266 /*
1267  * Generic device handling routines.
1268  */
1269 int
1270 atse_attach(device_t dev)
1271 {
1272 	struct atse_softc *sc;
1273 	struct ifnet *ifp;
1274 	uint32_t caps;
1275 	int error;
1276 
1277 	sc = device_get_softc(dev);
1278 	sc->dev = dev;
1279 
1280 	/* Get xDMA controller */
1281 	sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
1282 	if (sc->xdma_tx == NULL) {
1283 		device_printf(dev, "Can't find DMA controller.\n");
1284 		return (ENXIO);
1285 	}
1286 
1287 	/*
1288 	 * Only final (EOP) write can be less than "symbols per beat" value
1289 	 * so we have to defrag mbuf chain.
1290 	 * Chapter 15. On-Chip FIFO Memory Core.
1291 	 * Embedded Peripherals IP User Guide.
1292 	 */
1293 	caps = XCHAN_CAP_BUSDMA_NOSEG;
1294 
1295 	/* Alloc xDMA virtual channel. */
1296 	sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, caps);
1297 	if (sc->xchan_tx == NULL) {
1298 		device_printf(dev, "Can't alloc virtual DMA channel.\n");
1299 		return (ENXIO);
1300 	}
1301 
1302 	/* Setup interrupt handler. */
1303 	error = xdma_setup_intr(sc->xchan_tx, atse_xdma_tx_intr, sc, &sc->ih_tx);
1304 	if (error) {
1305 		device_printf(sc->dev,
1306 		    "Can't setup xDMA interrupt handler.\n");
1307 		return (ENXIO);
1308 	}
1309 
1310 	xdma_prep_sg(sc->xchan_tx,
1311 	    TX_QUEUE_SIZE,	/* xchan requests queue size */
1312 	    MCLBYTES,	/* maxsegsize */
1313 	    8,		/* maxnsegs */
1314 	    16,		/* alignment */
1315 	    0,		/* boundary */
1316 	    BUS_SPACE_MAXADDR_32BIT,
1317 	    BUS_SPACE_MAXADDR);
1318 
1319 	/* Get RX xDMA controller */
1320 	sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
1321 	if (sc->xdma_rx == NULL) {
1322 		device_printf(dev, "Can't find DMA controller.\n");
1323 		return (ENXIO);
1324 	}
1325 
1326 	/* Alloc xDMA virtual channel. */
1327 	sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, caps);
1328 	if (sc->xchan_rx == NULL) {
1329 		device_printf(dev, "Can't alloc virtual DMA channel.\n");
1330 		return (ENXIO);
1331 	}
1332 
1333 	/* Setup interrupt handler. */
1334 	error = xdma_setup_intr(sc->xchan_rx, atse_xdma_rx_intr, sc, &sc->ih_rx);
1335 	if (error) {
1336 		device_printf(sc->dev,
1337 		    "Can't setup xDMA interrupt handler.\n");
1338 		return (ENXIO);
1339 	}
1340 
1341 	xdma_prep_sg(sc->xchan_rx,
1342 	    RX_QUEUE_SIZE,	/* xchan requests queue size */
1343 	    MCLBYTES,		/* maxsegsize */
1344 	    1,			/* maxnsegs */
1345 	    16,			/* alignment */
1346 	    0,			/* boundary */
1347 	    BUS_SPACE_MAXADDR_32BIT,
1348 	    BUS_SPACE_MAXADDR);
1349 
1350 	mtx_init(&sc->br_mtx, "buf ring mtx", NULL, MTX_DEF);
1351 	sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
1352 	    M_NOWAIT, &sc->br_mtx);
1353 	if (sc->br == NULL) {
1354 		return (ENOMEM);
1355 	}
1356 
1357 	atse_ethernet_option_bits_read(dev);
1358 
1359 	mtx_init(&sc->atse_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1360 	    MTX_DEF);
1361 
1362 	callout_init_mtx(&sc->atse_tick, &sc->atse_mtx, 0);
1363 
1364 	/*
1365 	 * We are only doing single-PHY with this driver currently.  The
1366 	 * defaults would be right so that BASE_CFG_MDIO_ADDR0 points to the
1367 	 * 1st PHY address (0) apart from the fact that BMCR0 is always
1368 	 * the PCS mapping, so we always use BMCR1. See Table 5-1 0xA0-0xBF.
1369 	 */
1370 #if 0	/* Always PCS. */
1371 	sc->atse_bmcr0 = MDIO_0_START;
1372 	CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR0, 0x00);
1373 #endif
1374 	/* Always use matching PHY for atse[0..]. */
1375 	sc->atse_phy_addr = device_get_unit(dev);
1376 	sc->atse_bmcr1 = MDIO_1_START;
1377 	CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR1, sc->atse_phy_addr);
1378 
1379 	/* Reset the adapter. */
1380 	atse_reset(sc);
1381 
1382 	/* Setup interface. */
1383 	ifp = sc->atse_ifp = if_alloc(IFT_ETHER);
1384 	if (ifp == NULL) {
1385 		device_printf(dev, "if_alloc() failed\n");
1386 		error = ENOSPC;
1387 		goto err;
1388 	}
1389 	ifp->if_softc = sc;
1390 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1391 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1392 	ifp->if_ioctl = atse_ioctl;
1393 	ifp->if_transmit = atse_transmit;
1394 	ifp->if_qflush = atse_qflush;
1395 	ifp->if_init = atse_init;
1396 	IFQ_SET_MAXLEN(&ifp->if_snd, ATSE_TX_LIST_CNT - 1);
1397 	ifp->if_snd.ifq_drv_maxlen = ATSE_TX_LIST_CNT - 1;
1398 	IFQ_SET_READY(&ifp->if_snd);
1399 
1400 	/* MII setup. */
1401 	error = mii_attach(dev, &sc->atse_miibus, ifp, atse_ifmedia_upd,
1402 	    atse_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1403 	if (error != 0) {
1404 		device_printf(dev, "attaching PHY failed: %d\n", error);
1405 		goto err;
1406 	}
1407 
1408 	/* Call media-indepedent attach routine. */
1409 	ether_ifattach(ifp, sc->atse_eth_addr);
1410 
1411 	/* Tell the upper layer(s) about vlan mtu support. */
1412 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1413 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1414 	ifp->if_capenable = ifp->if_capabilities;
1415 
1416 err:
1417 	if (error != 0) {
1418 		atse_detach(dev);
1419 	}
1420 
1421 	if (error == 0) {
1422 		atse_sysctl_stats_attach(dev);
1423 	}
1424 
1425 	atse_rx_enqueue(sc, NUM_RX_MBUF);
1426 	xdma_queue_submit(sc->xchan_rx);
1427 
1428 	return (error);
1429 }
1430 
1431 static int
1432 atse_detach(device_t dev)
1433 {
1434 	struct atse_softc *sc;
1435 	struct ifnet *ifp;
1436 
1437 	sc = device_get_softc(dev);
1438 	KASSERT(mtx_initialized(&sc->atse_mtx), ("%s: mutex not initialized",
1439 	    device_get_nameunit(dev)));
1440 	ifp = sc->atse_ifp;
1441 
1442 	/* Only cleanup if attach succeeded. */
1443 	if (device_is_attached(dev)) {
1444 		ATSE_LOCK(sc);
1445 		atse_stop_locked(sc);
1446 		ATSE_UNLOCK(sc);
1447 		callout_drain(&sc->atse_tick);
1448 		ether_ifdetach(ifp);
1449 	}
1450 	if (sc->atse_miibus != NULL) {
1451 		device_delete_child(dev, sc->atse_miibus);
1452 	}
1453 
1454 	if (ifp != NULL) {
1455 		if_free(ifp);
1456 	}
1457 
1458 	mtx_destroy(&sc->atse_mtx);
1459 
1460 	return (0);
1461 }
1462 
1463 /* Shared between nexus and fdt implementation. */
1464 void
1465 atse_detach_resources(device_t dev)
1466 {
1467 	struct atse_softc *sc;
1468 
1469 	sc = device_get_softc(dev);
1470 
1471 	if (sc->atse_mem_res != NULL) {
1472 		bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_mem_rid,
1473 		    sc->atse_mem_res);
1474 		sc->atse_mem_res = NULL;
1475 	}
1476 }
1477 
1478 int
1479 atse_detach_dev(device_t dev)
1480 {
1481 	int error;
1482 
1483 	error = atse_detach(dev);
1484 	if (error) {
1485 		/* We are basically in undefined state now. */
1486 		device_printf(dev, "atse_detach() failed: %d\n", error);
1487 		return (error);
1488 	}
1489 
1490 	atse_detach_resources(dev);
1491 
1492 	return (0);
1493 }
1494 
1495 int
1496 atse_miibus_readreg(device_t dev, int phy, int reg)
1497 {
1498 	struct atse_softc *sc;
1499 	int val;
1500 
1501 	sc = device_get_softc(dev);
1502 
1503 	/*
1504 	 * We currently do not support re-mapping of MDIO space on-the-fly
1505 	 * but de-facto hard-code the phy#.
1506 	 */
1507 	if (phy != sc->atse_phy_addr) {
1508 		return (0);
1509 	}
1510 
1511 	val = PHY_READ_2(sc, reg);
1512 
1513 	return (val);
1514 }
1515 
1516 int
1517 atse_miibus_writereg(device_t dev, int phy, int reg, int data)
1518 {
1519 	struct atse_softc *sc;
1520 
1521 	sc = device_get_softc(dev);
1522 
1523 	/*
1524 	 * We currently do not support re-mapping of MDIO space on-the-fly
1525 	 * but de-facto hard-code the phy#.
1526 	 */
1527 	if (phy != sc->atse_phy_addr) {
1528 		return (0);
1529 	}
1530 
1531 	PHY_WRITE_2(sc, reg, data);
1532 	return (0);
1533 }
1534 
1535 void
1536 atse_miibus_statchg(device_t dev)
1537 {
1538 	struct atse_softc *sc;
1539 	struct mii_data *mii;
1540 	struct ifnet *ifp;
1541 	uint32_t val4;
1542 
1543 	sc = device_get_softc(dev);
1544 	ATSE_LOCK_ASSERT(sc);
1545 
1546 	mii = device_get_softc(sc->atse_miibus);
1547 	ifp = sc->atse_ifp;
1548 	if (mii == NULL || ifp == NULL ||
1549 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1550 		return;
1551 	}
1552 
1553 	val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
1554 
1555 	/* Assume no link. */
1556 	sc->atse_flags &= ~ATSE_FLAGS_LINK;
1557 
1558 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1559 	    (IFM_ACTIVE | IFM_AVALID)) {
1560 
1561 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1562 		case IFM_10_T:
1563 			val4 |= BASE_CFG_COMMAND_CONFIG_ENA_10;
1564 			val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
1565 			sc->atse_flags |= ATSE_FLAGS_LINK;
1566 			break;
1567 		case IFM_100_TX:
1568 			val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
1569 			val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
1570 			sc->atse_flags |= ATSE_FLAGS_LINK;
1571 			break;
1572 		case IFM_1000_T:
1573 			val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
1574 			val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
1575 			sc->atse_flags |= ATSE_FLAGS_LINK;
1576 			break;
1577 		default:
1578 			break;
1579 		}
1580 	}
1581 
1582 	if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
1583 		/* Need to stop the MAC? */
1584 		return;
1585 	}
1586 
1587 	if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
1588 		val4 &= ~BASE_CFG_COMMAND_CONFIG_HD_ENA;
1589 	} else {
1590 		val4 |= BASE_CFG_COMMAND_CONFIG_HD_ENA;
1591 	}
1592 
1593 	/* flow control? */
1594 
1595 	/* Make sure the MAC is activated. */
1596 	val4 |= BASE_CFG_COMMAND_CONFIG_TX_ENA;
1597 	val4 |= BASE_CFG_COMMAND_CONFIG_RX_ENA;
1598 
1599 	CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
1600 }
1601 
1602 MODULE_DEPEND(atse, ether, 1, 1, 1);
1603 MODULE_DEPEND(atse, miibus, 1, 1, 1);
1604