1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012, 2013 Bjoern A. Zeeb
5 * Copyright (c) 2014 Robert N. M. Watson
6 * Copyright (c) 2016-2017 Ruslan Bukin <br@bsdpad.com>
7 * All rights reserved.
8 *
9 * This software was developed by SRI International and the University of
10 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
11 * ("MRC2"), as part of the DARPA MRC research programme.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34 /*
35 * Altera Triple-Speed Ethernet MegaCore, Function User Guide
36 * UG-01008-3.0, Software Version: 12.0, June 2012.
37 * Available at the time of writing at:
38 * http://www.altera.com/literature/ug/ug_ethernet.pdf
39 *
40 * We are using an Marvell E1111 (Alaska) PHY on the DE4. See mii/e1000phy.c.
41 */
42 /*
43 * XXX-BZ NOTES:
44 * - ifOutBroadcastPkts are only counted if both ether dst and src are all-1s;
45 * seems an IP core bug, they count ether broadcasts as multicast. Is this
46 * still the case?
47 * - figure out why the TX FIFO fill status and intr did not work as expected.
48 * - test 100Mbit/s and 10Mbit/s
49 * - blacklist the one special factory programmed ethernet address (for now
50 * hardcoded, later from loader?)
51 * - resolve all XXX, left as reminders to shake out details later
52 * - Jumbo frame support
53 */
54
55 #include <sys/cdefs.h>
56 #include "opt_device_polling.h"
57
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
61 #include <sys/bus.h>
62 #include <sys/endian.h>
63 #include <sys/jail.h>
64 #include <sys/lock.h>
65 #include <sys/module.h>
66 #include <sys/mutex.h>
67 #include <sys/proc.h>
68 #include <sys/socket.h>
69 #include <sys/sockio.h>
70 #include <sys/types.h>
71
72 #include <net/ethernet.h>
73 #include <net/if.h>
74 #include <net/if_var.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_types.h>
78 #include <net/if_vlan_var.h>
79
80 #include <net/bpf.h>
81
82 #include <machine/bus.h>
83 #include <machine/resource.h>
84 #include <sys/rman.h>
85
86 #include <dev/mii/mii.h>
87 #include <dev/mii/miivar.h>
88
89 #include <dev/altera/atse/if_atsereg.h>
90 #include <dev/xdma/xdma.h>
91
92 #define RX_QUEUE_SIZE 4096
93 #define TX_QUEUE_SIZE 4096
94 #define NUM_RX_MBUF 512
95 #define BUFRING_SIZE 8192
96
97 #include <machine/cache.h>
98
99 /* XXX once we'd do parallel attach, we need a global lock for this. */
100 #define ATSE_ETHERNET_OPTION_BITS_UNDEF 0
101 #define ATSE_ETHERNET_OPTION_BITS_READ 1
102 static int atse_ethernet_option_bits_flag = ATSE_ETHERNET_OPTION_BITS_UNDEF;
103 static uint8_t atse_ethernet_option_bits[ALTERA_ETHERNET_OPTION_BITS_LEN];
104
105 /*
106 * Softc and critical resource locking.
107 */
108 #define ATSE_LOCK(_sc) mtx_lock(&(_sc)->atse_mtx)
109 #define ATSE_UNLOCK(_sc) mtx_unlock(&(_sc)->atse_mtx)
110 #define ATSE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->atse_mtx, MA_OWNED)
111
112 #define ATSE_DEBUG
113 #undef ATSE_DEBUG
114
115 #ifdef ATSE_DEBUG
116 #define DPRINTF(format, ...) printf(format, __VA_ARGS__)
117 #else
118 #define DPRINTF(format, ...)
119 #endif
120
121 /*
122 * Register space access macros.
123 */
124 static inline void
csr_write_4(struct atse_softc * sc,uint32_t reg,uint32_t val4,const char * f,const int l)125 csr_write_4(struct atse_softc *sc, uint32_t reg, uint32_t val4,
126 const char *f, const int l)
127 {
128
129 val4 = htole32(val4);
130 DPRINTF("[%s:%d] CSR W %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
131 "atse_mem_res", reg, reg * 4, val4);
132 bus_write_4(sc->atse_mem_res, reg * 4, val4);
133 }
134
135 static inline uint32_t
csr_read_4(struct atse_softc * sc,uint32_t reg,const char * f,const int l)136 csr_read_4(struct atse_softc *sc, uint32_t reg, const char *f, const int l)
137 {
138 uint32_t val4;
139
140 val4 = le32toh(bus_read_4(sc->atse_mem_res, reg * 4));
141 DPRINTF("[%s:%d] CSR R %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
142 "atse_mem_res", reg, reg * 4, val4);
143
144 return (val4);
145 }
146
147 /*
148 * See page 5-2 that it's all dword offsets and the MS 16 bits need to be zero
149 * on write and ignored on read.
150 */
151 static inline void
pxx_write_2(struct atse_softc * sc,bus_addr_t bmcr,uint32_t reg,uint16_t val,const char * f,const int l,const char * s)152 pxx_write_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, uint16_t val,
153 const char *f, const int l, const char *s)
154 {
155 uint32_t val4;
156
157 val4 = htole32(val & 0x0000ffff);
158 DPRINTF("[%s:%d] %s W %s 0x%08x (0x%08jx) = 0x%08x\n", f, l, s,
159 "atse_mem_res", reg, (bmcr + reg) * 4, val4);
160 bus_write_4(sc->atse_mem_res, (bmcr + reg) * 4, val4);
161 }
162
163 static inline uint16_t
pxx_read_2(struct atse_softc * sc,bus_addr_t bmcr,uint32_t reg,const char * f,const int l,const char * s)164 pxx_read_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, const char *f,
165 const int l, const char *s)
166 {
167 uint32_t val4;
168 uint16_t val;
169
170 val4 = bus_read_4(sc->atse_mem_res, (bmcr + reg) * 4);
171 val = le32toh(val4) & 0x0000ffff;
172 DPRINTF("[%s:%d] %s R %s 0x%08x (0x%08jx) = 0x%04x\n", f, l, s,
173 "atse_mem_res", reg, (bmcr + reg) * 4, val);
174
175 return (val);
176 }
177
178 #define CSR_WRITE_4(sc, reg, val) \
179 csr_write_4((sc), (reg), (val), __func__, __LINE__)
180 #define CSR_READ_4(sc, reg) \
181 csr_read_4((sc), (reg), __func__, __LINE__)
182 #define PCS_WRITE_2(sc, reg, val) \
183 pxx_write_2((sc), sc->atse_bmcr0, (reg), (val), __func__, __LINE__, \
184 "PCS")
185 #define PCS_READ_2(sc, reg) \
186 pxx_read_2((sc), sc->atse_bmcr0, (reg), __func__, __LINE__, "PCS")
187 #define PHY_WRITE_2(sc, reg, val) \
188 pxx_write_2((sc), sc->atse_bmcr1, (reg), (val), __func__, __LINE__, \
189 "PHY")
190 #define PHY_READ_2(sc, reg) \
191 pxx_read_2((sc), sc->atse_bmcr1, (reg), __func__, __LINE__, "PHY")
192
193 static void atse_tick(void *);
194 static int atse_detach(device_t);
195
196 static int
atse_rx_enqueue(struct atse_softc * sc,uint32_t n)197 atse_rx_enqueue(struct atse_softc *sc, uint32_t n)
198 {
199 struct mbuf *m;
200 int i;
201
202 for (i = 0; i < n; i++) {
203 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
204 if (m == NULL) {
205 device_printf(sc->dev,
206 "%s: Can't alloc rx mbuf\n", __func__);
207 return (-1);
208 }
209
210 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
211 xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
212 }
213
214 return (0);
215 }
216
217 static int
atse_xdma_tx_intr(void * arg,xdma_transfer_status_t * status)218 atse_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
219 {
220 xdma_transfer_status_t st;
221 struct atse_softc *sc;
222 if_t ifp;
223 struct mbuf *m;
224 int err;
225
226 sc = arg;
227
228 ATSE_LOCK(sc);
229
230 ifp = sc->atse_ifp;
231
232 for (;;) {
233 err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
234 if (err != 0) {
235 break;
236 }
237
238 if (st.error != 0) {
239 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
240 }
241
242 m_freem(m);
243 sc->txcount--;
244 }
245
246 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
247
248 ATSE_UNLOCK(sc);
249
250 return (0);
251 }
252
253 static int
atse_xdma_rx_intr(void * arg,xdma_transfer_status_t * status)254 atse_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
255 {
256 xdma_transfer_status_t st;
257 struct atse_softc *sc;
258 if_t ifp;
259 struct mbuf *m;
260 int err;
261 uint32_t cnt_processed;
262
263 sc = arg;
264
265 ATSE_LOCK(sc);
266
267 ifp = sc->atse_ifp;
268
269 cnt_processed = 0;
270 for (;;) {
271 err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
272 if (err != 0) {
273 break;
274 }
275 cnt_processed++;
276
277 if (st.error != 0) {
278 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
279 m_freem(m);
280 continue;
281 }
282
283 m->m_pkthdr.len = m->m_len = st.transferred;
284 m->m_pkthdr.rcvif = ifp;
285 m_adj(m, ETHER_ALIGN);
286 ATSE_UNLOCK(sc);
287 if_input(ifp, m);
288 ATSE_LOCK(sc);
289 }
290
291 atse_rx_enqueue(sc, cnt_processed);
292
293 ATSE_UNLOCK(sc);
294
295 return (0);
296 }
297
298 static int
atse_transmit_locked(if_t ifp)299 atse_transmit_locked(if_t ifp)
300 {
301 struct atse_softc *sc;
302 struct mbuf *m;
303 struct buf_ring *br;
304 int error;
305 int enq;
306
307 sc = if_getsoftc(ifp);
308 br = sc->br;
309
310 enq = 0;
311
312 while ((m = drbr_peek(ifp, br)) != NULL) {
313 error = xdma_enqueue_mbuf(sc->xchan_tx, &m, 0, 4, 4, XDMA_MEM_TO_DEV);
314 if (error != 0) {
315 /* No space in request queue available yet. */
316 drbr_putback(ifp, br, m);
317 break;
318 }
319
320 drbr_advance(ifp, br);
321
322 sc->txcount++;
323 enq++;
324
325 /* If anyone is interested give them a copy. */
326 ETHER_BPF_MTAP(ifp, m);
327 }
328
329 if (enq > 0)
330 xdma_queue_submit(sc->xchan_tx);
331
332 return (0);
333 }
334
335 static int
atse_transmit(if_t ifp,struct mbuf * m)336 atse_transmit(if_t ifp, struct mbuf *m)
337 {
338 struct atse_softc *sc;
339 struct buf_ring *br;
340 int error;
341
342 sc = if_getsoftc(ifp);
343 br = sc->br;
344
345 ATSE_LOCK(sc);
346
347 mtx_lock(&sc->br_mtx);
348
349 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
350 error = drbr_enqueue(ifp, sc->br, m);
351 mtx_unlock(&sc->br_mtx);
352 ATSE_UNLOCK(sc);
353 return (error);
354 }
355
356 if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
357 error = drbr_enqueue(ifp, sc->br, m);
358 mtx_unlock(&sc->br_mtx);
359 ATSE_UNLOCK(sc);
360 return (error);
361 }
362
363 error = drbr_enqueue(ifp, br, m);
364 if (error) {
365 mtx_unlock(&sc->br_mtx);
366 ATSE_UNLOCK(sc);
367 return (error);
368 }
369 error = atse_transmit_locked(ifp);
370
371 mtx_unlock(&sc->br_mtx);
372 ATSE_UNLOCK(sc);
373
374 return (error);
375 }
376
377 static void
atse_qflush(if_t ifp)378 atse_qflush(if_t ifp)
379 {
380 struct atse_softc *sc;
381
382 sc = if_getsoftc(ifp);
383
384 printf("%s\n", __func__);
385 }
386
387 static int
atse_stop_locked(struct atse_softc * sc)388 atse_stop_locked(struct atse_softc *sc)
389 {
390 uint32_t mask, val4;
391 if_t ifp;
392 int i;
393
394 ATSE_LOCK_ASSERT(sc);
395
396 callout_stop(&sc->atse_tick);
397
398 ifp = sc->atse_ifp;
399 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
400
401 /* Disable MAC transmit and receive datapath. */
402 mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
403 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
404 val4 &= ~mask;
405 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
406
407 /* Wait for bits to be cleared; i=100 is excessive. */
408 for (i = 0; i < 100; i++) {
409 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
410 if ((val4 & mask) == 0) {
411 break;
412 }
413 DELAY(10);
414 }
415
416 if ((val4 & mask) != 0) {
417 device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
418 /* Punt. */
419 }
420
421 sc->atse_flags &= ~ATSE_FLAGS_LINK;
422
423 return (0);
424 }
425
426 static u_int
atse_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)427 atse_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
428 {
429 uint64_t *h = arg;
430 uint8_t *addr, x, y;
431 int i, j;
432
433 addr = LLADDR(sdl);
434 x = 0;
435 for (i = 0; i < ETHER_ADDR_LEN; i++) {
436 y = addr[i] & 0x01;
437 for (j = 1; j < 8; j++)
438 y ^= (addr[i] >> j) & 0x01;
439 x |= (y << i);
440 }
441 *h |= (1 << x);
442
443 return (1);
444 }
445
446 static int
atse_rxfilter_locked(struct atse_softc * sc)447 atse_rxfilter_locked(struct atse_softc *sc)
448 {
449 if_t ifp;
450 uint32_t val4;
451 int i;
452
453 /* XXX-BZ can we find out if we have the MHASH synthesized? */
454 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
455 /* For simplicity always hash full 48 bits of addresses. */
456 if ((val4 & BASE_CFG_COMMAND_CONFIG_MHASH_SEL) != 0)
457 val4 &= ~BASE_CFG_COMMAND_CONFIG_MHASH_SEL;
458
459 ifp = sc->atse_ifp;
460 if (if_getflags(ifp) & IFF_PROMISC) {
461 val4 |= BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
462 } else {
463 val4 &= ~BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
464 }
465
466 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
467
468 if (if_getflags(ifp) & IFF_ALLMULTI) {
469 /* Accept all multicast addresses. */
470 for (i = 0; i <= MHASH_LEN; i++)
471 CSR_WRITE_4(sc, MHASH_START + i, 0x1);
472 } else {
473 /*
474 * Can hold MHASH_LEN entries.
475 * XXX-BZ bitstring.h would be more general.
476 */
477 uint64_t h;
478
479 /*
480 * Re-build and re-program hash table. First build the
481 * bit-field "yes" or "no" for each slot per address, then
482 * do all the programming afterwards.
483 */
484 h = 0;
485 (void)if_foreach_llmaddr(ifp, atse_hash_maddr, &h);
486 for (i = 0; i <= MHASH_LEN; i++) {
487 CSR_WRITE_4(sc, MHASH_START + i,
488 (h & (1 << i)) ? 0x01 : 0x00);
489 }
490 }
491
492 return (0);
493 }
494
495 static int
atse_ethernet_option_bits_read_fdt(device_t dev)496 atse_ethernet_option_bits_read_fdt(device_t dev)
497 {
498 struct resource *res;
499 device_t fdev;
500 int i, rid;
501
502 if (atse_ethernet_option_bits_flag & ATSE_ETHERNET_OPTION_BITS_READ) {
503 return (0);
504 }
505
506 fdev = device_find_child(device_get_parent(dev), "cfi", 0);
507 if (fdev == NULL) {
508 return (ENOENT);
509 }
510
511 rid = 0;
512 res = bus_alloc_resource_any(fdev, SYS_RES_MEMORY, &rid,
513 RF_ACTIVE | RF_SHAREABLE);
514 if (res == NULL) {
515 return (ENXIO);
516 }
517
518 for (i = 0; i < ALTERA_ETHERNET_OPTION_BITS_LEN; i++) {
519 atse_ethernet_option_bits[i] = bus_read_1(res,
520 ALTERA_ETHERNET_OPTION_BITS_OFF + i);
521 }
522
523 bus_release_resource(fdev, SYS_RES_MEMORY, rid, res);
524 atse_ethernet_option_bits_flag |= ATSE_ETHERNET_OPTION_BITS_READ;
525
526 return (0);
527 }
528
529 static int
atse_ethernet_option_bits_read(device_t dev)530 atse_ethernet_option_bits_read(device_t dev)
531 {
532 int error;
533
534 error = atse_ethernet_option_bits_read_fdt(dev);
535 if (error == 0)
536 return (0);
537
538 device_printf(dev, "Cannot read Ethernet addresses from flash.\n");
539
540 return (error);
541 }
542
543 static int
atse_get_eth_address(struct atse_softc * sc)544 atse_get_eth_address(struct atse_softc *sc)
545 {
546 unsigned long hostid;
547 uint32_t val4;
548 int unit;
549
550 /*
551 * Make sure to only ever do this once. Otherwise a reset would
552 * possibly change our ethernet address, which is not good at all.
553 */
554 if (sc->atse_eth_addr[0] != 0x00 || sc->atse_eth_addr[1] != 0x00 ||
555 sc->atse_eth_addr[2] != 0x00) {
556 return (0);
557 }
558
559 if ((atse_ethernet_option_bits_flag &
560 ATSE_ETHERNET_OPTION_BITS_READ) == 0) {
561 goto get_random;
562 }
563
564 val4 = atse_ethernet_option_bits[0] << 24;
565 val4 |= atse_ethernet_option_bits[1] << 16;
566 val4 |= atse_ethernet_option_bits[2] << 8;
567 val4 |= atse_ethernet_option_bits[3];
568 /* They chose "safe". */
569 if (val4 != le32toh(0x00005afe)) {
570 device_printf(sc->atse_dev, "Magic '5afe' is not safe: 0x%08x. "
571 "Falling back to random numbers for hardware address.\n",
572 val4);
573 goto get_random;
574 }
575
576 sc->atse_eth_addr[0] = atse_ethernet_option_bits[4];
577 sc->atse_eth_addr[1] = atse_ethernet_option_bits[5];
578 sc->atse_eth_addr[2] = atse_ethernet_option_bits[6];
579 sc->atse_eth_addr[3] = atse_ethernet_option_bits[7];
580 sc->atse_eth_addr[4] = atse_ethernet_option_bits[8];
581 sc->atse_eth_addr[5] = atse_ethernet_option_bits[9];
582
583 /* Handle factory default ethernet address: 00:07:ed:ff:ed:15 */
584 if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x07 &&
585 sc->atse_eth_addr[2] == 0xed && sc->atse_eth_addr[3] == 0xff &&
586 sc->atse_eth_addr[4] == 0xed && sc->atse_eth_addr[5] == 0x15) {
587 device_printf(sc->atse_dev, "Factory programmed Ethernet "
588 "hardware address blacklisted. Falling back to random "
589 "address to avoid collisions.\n");
590 device_printf(sc->atse_dev, "Please re-program your flash.\n");
591 goto get_random;
592 }
593
594 if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x00 &&
595 sc->atse_eth_addr[2] == 0x00 && sc->atse_eth_addr[3] == 0x00 &&
596 sc->atse_eth_addr[4] == 0x00 && sc->atse_eth_addr[5] == 0x00) {
597 device_printf(sc->atse_dev, "All zero's Ethernet hardware "
598 "address blacklisted. Falling back to random address.\n");
599 device_printf(sc->atse_dev, "Please re-program your flash.\n");
600 goto get_random;
601 }
602
603 if (ETHER_IS_MULTICAST(sc->atse_eth_addr)) {
604 device_printf(sc->atse_dev, "Multicast Ethernet hardware "
605 "address blacklisted. Falling back to random address.\n");
606 device_printf(sc->atse_dev, "Please re-program your flash.\n");
607 goto get_random;
608 }
609
610 /*
611 * If we find an Altera prefixed address with a 0x0 ending
612 * adjust by device unit. If not and this is not the first
613 * Ethernet, go to random.
614 */
615 unit = device_get_unit(sc->atse_dev);
616 if (unit == 0x00) {
617 return (0);
618 }
619
620 if (unit > 0x0f) {
621 device_printf(sc->atse_dev, "We do not support Ethernet "
622 "addresses for more than 16 MACs. Falling back to "
623 "random hadware address.\n");
624 goto get_random;
625 }
626 if ((sc->atse_eth_addr[0] & ~0x2) != 0 ||
627 sc->atse_eth_addr[1] != 0x07 || sc->atse_eth_addr[2] != 0xed ||
628 (sc->atse_eth_addr[5] & 0x0f) != 0x0) {
629 device_printf(sc->atse_dev, "Ethernet address not meeting our "
630 "multi-MAC standards. Falling back to random hadware "
631 "address.\n");
632 goto get_random;
633 }
634 sc->atse_eth_addr[5] |= (unit & 0x0f);
635
636 return (0);
637
638 get_random:
639 /*
640 * Fall back to random code we also use on bridge(4).
641 */
642 getcredhostid(curthread->td_ucred, &hostid);
643 if (hostid == 0) {
644 arc4rand(sc->atse_eth_addr, ETHER_ADDR_LEN, 1);
645 sc->atse_eth_addr[0] &= ~1;/* clear multicast bit */
646 sc->atse_eth_addr[0] |= 2; /* set the LAA bit */
647 } else {
648 sc->atse_eth_addr[0] = 0x2;
649 sc->atse_eth_addr[1] = (hostid >> 24) & 0xff;
650 sc->atse_eth_addr[2] = (hostid >> 16) & 0xff;
651 sc->atse_eth_addr[3] = (hostid >> 8 ) & 0xff;
652 sc->atse_eth_addr[4] = hostid & 0xff;
653 sc->atse_eth_addr[5] = sc->atse_unit & 0xff;
654 }
655
656 return (0);
657 }
658
659 static int
atse_set_eth_address(struct atse_softc * sc,int n)660 atse_set_eth_address(struct atse_softc *sc, int n)
661 {
662 uint32_t v0, v1;
663
664 v0 = (sc->atse_eth_addr[3] << 24) | (sc->atse_eth_addr[2] << 16) |
665 (sc->atse_eth_addr[1] << 8) | sc->atse_eth_addr[0];
666 v1 = (sc->atse_eth_addr[5] << 8) | sc->atse_eth_addr[4];
667
668 if (n & ATSE_ETH_ADDR_DEF) {
669 CSR_WRITE_4(sc, BASE_CFG_MAC_0, v0);
670 CSR_WRITE_4(sc, BASE_CFG_MAC_1, v1);
671 }
672 if (n & ATSE_ETH_ADDR_SUPP1) {
673 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_0, v0);
674 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_1, v1);
675 }
676 if (n & ATSE_ETH_ADDR_SUPP2) {
677 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_0, v0);
678 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_1, v1);
679 }
680 if (n & ATSE_ETH_ADDR_SUPP3) {
681 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_0, v0);
682 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_1, v1);
683 }
684 if (n & ATSE_ETH_ADDR_SUPP4) {
685 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_0, v0);
686 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_1, v1);
687 }
688
689 return (0);
690 }
691
692 static int
atse_reset(struct atse_softc * sc)693 atse_reset(struct atse_softc *sc)
694 {
695 uint32_t val4, mask;
696 uint16_t val;
697 int i;
698
699 /* 1. External PHY Initialization using MDIO. */
700 /*
701 * We select the right MDIO space in atse_attach() and let MII do
702 * anything else.
703 */
704
705 /* 2. PCS Configuration Register Initialization. */
706 /* a. Set auto negotiation link timer to 1.6ms for SGMII. */
707 PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_0, 0x0D40);
708 PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_1, 0x0003);
709
710 /* b. Configure SGMII. */
711 val = PCS_EXT_IF_MODE_SGMII_ENA|PCS_EXT_IF_MODE_USE_SGMII_AN;
712 PCS_WRITE_2(sc, PCS_EXT_IF_MODE, val);
713
714 /* c. Enable auto negotiation. */
715 /* Ignore Bits 6,8,13; should be set,set,unset. */
716 val = PCS_READ_2(sc, PCS_CONTROL);
717 val &= ~(PCS_CONTROL_ISOLATE|PCS_CONTROL_POWERDOWN);
718 val &= ~PCS_CONTROL_LOOPBACK; /* Make this a -link1 option? */
719 val |= PCS_CONTROL_AUTO_NEGOTIATION_ENABLE;
720 PCS_WRITE_2(sc, PCS_CONTROL, val);
721
722 /* d. PCS reset. */
723 val = PCS_READ_2(sc, PCS_CONTROL);
724 val |= PCS_CONTROL_RESET;
725 PCS_WRITE_2(sc, PCS_CONTROL, val);
726
727 /* Wait for reset bit to clear; i=100 is excessive. */
728 for (i = 0; i < 100; i++) {
729 val = PCS_READ_2(sc, PCS_CONTROL);
730 if ((val & PCS_CONTROL_RESET) == 0) {
731 break;
732 }
733 DELAY(10);
734 }
735
736 if ((val & PCS_CONTROL_RESET) != 0) {
737 device_printf(sc->atse_dev, "PCS reset timed out.\n");
738 return (ENXIO);
739 }
740
741 /* 3. MAC Configuration Register Initialization. */
742 /* a. Disable MAC transmit and receive datapath. */
743 mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
744 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
745 val4 &= ~mask;
746 /* Samples in the manual do have the SW_RESET bit set here, why? */
747 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
748 /* Wait for bits to be cleared; i=100 is excessive. */
749 for (i = 0; i < 100; i++) {
750 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
751 if ((val4 & mask) == 0) {
752 break;
753 }
754 DELAY(10);
755 }
756 if ((val4 & mask) != 0) {
757 device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
758 return (ENXIO);
759 }
760 /* b. MAC FIFO configuration. */
761 CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_EMPTY, FIFO_DEPTH_TX - 16);
762 CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_FULL, 3);
763 CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_EMPTY, 8);
764 CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_EMPTY, FIFO_DEPTH_RX - 16);
765 CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_FULL, 8);
766 CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_EMPTY, 8);
767 #if 0
768 CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 16);
769 CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 16);
770 #else
771 /* For store-and-forward mode, set this threshold to 0. */
772 CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 0);
773 CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 0);
774 #endif
775 /* c. MAC address configuration. */
776 /* Also intialize supplementary addresses to our primary one. */
777 /* XXX-BZ FreeBSD really needs to grow and API for using these. */
778 atse_get_eth_address(sc);
779 atse_set_eth_address(sc, ATSE_ETH_ADDR_ALL);
780
781 /* d. MAC function configuration. */
782 CSR_WRITE_4(sc, BASE_CFG_FRM_LENGTH, 1518); /* Default. */
783 CSR_WRITE_4(sc, BASE_CFG_TX_IPG_LENGTH, 12);
784 CSR_WRITE_4(sc, BASE_CFG_PAUSE_QUANT, 0xFFFF);
785
786 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
787 /*
788 * If 1000BASE-X/SGMII PCS is initialized, set the ETH_SPEED (bit 3)
789 * and ENA_10 (bit 25) in command_config register to 0. If half duplex
790 * is reported in the PHY/PCS status register, set the HD_ENA (bit 10)
791 * to 1 in command_config register.
792 * BZ: We shoot for 1000 instead.
793 */
794 #if 0
795 val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
796 #else
797 val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
798 #endif
799 val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
800 #if 0
801 /*
802 * We do not want to set this, otherwise, we could not even send
803 * random raw ethernet frames for various other research. By default
804 * FreeBSD will use the right ether source address.
805 */
806 val4 |= BASE_CFG_COMMAND_CONFIG_TX_ADDR_INS;
807 #endif
808 val4 |= BASE_CFG_COMMAND_CONFIG_PAD_EN;
809 val4 &= ~BASE_CFG_COMMAND_CONFIG_CRC_FWD;
810 #if 0
811 val4 |= BASE_CFG_COMMAND_CONFIG_CNTL_FRM_ENA;
812 #endif
813 #if 1
814 val4 |= BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC;
815 #endif
816 val &= ~BASE_CFG_COMMAND_CONFIG_LOOP_ENA; /* link0? */
817 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
818
819 /*
820 * Make sure we do not enable 32bit alignment; FreeBSD cannot
821 * cope with the additional padding (though we should!?).
822 * Also make sure we get the CRC appended.
823 */
824 val4 = CSR_READ_4(sc, TX_CMD_STAT);
825 val4 &= ~(TX_CMD_STAT_OMIT_CRC|TX_CMD_STAT_TX_SHIFT16);
826 CSR_WRITE_4(sc, TX_CMD_STAT, val4);
827
828 val4 = CSR_READ_4(sc, RX_CMD_STAT);
829 val4 &= ~RX_CMD_STAT_RX_SHIFT16;
830 val4 |= RX_CMD_STAT_RX_SHIFT16;
831 CSR_WRITE_4(sc, RX_CMD_STAT, val4);
832
833 /* e. Reset MAC. */
834 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
835 val4 |= BASE_CFG_COMMAND_CONFIG_SW_RESET;
836 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
837 /* Wait for bits to be cleared; i=100 is excessive. */
838 for (i = 0; i < 100; i++) {
839 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
840 if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) == 0) {
841 break;
842 }
843 DELAY(10);
844 }
845 if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) != 0) {
846 device_printf(sc->atse_dev, "MAC reset timed out.\n");
847 return (ENXIO);
848 }
849
850 /* f. Enable MAC transmit and receive datapath. */
851 mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
852 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
853 val4 |= mask;
854 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
855 /* Wait for bits to be cleared; i=100 is excessive. */
856 for (i = 0; i < 100; i++) {
857 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
858 if ((val4 & mask) == mask) {
859 break;
860 }
861 DELAY(10);
862 }
863 if ((val4 & mask) != mask) {
864 device_printf(sc->atse_dev, "Enabling MAC TX/RX timed out.\n");
865 return (ENXIO);
866 }
867
868 return (0);
869 }
870
871 static void
atse_init_locked(struct atse_softc * sc)872 atse_init_locked(struct atse_softc *sc)
873 {
874 if_t ifp;
875 struct mii_data *mii;
876 uint8_t *eaddr;
877
878 ATSE_LOCK_ASSERT(sc);
879 ifp = sc->atse_ifp;
880
881 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
882 return;
883 }
884
885 /*
886 * Must update the ether address if changed. Given we do not handle
887 * in atse_ioctl() but it's in the general framework, just always
888 * do it here before atse_reset().
889 */
890 eaddr = if_getlladdr(sc->atse_ifp);
891 bcopy(eaddr, &sc->atse_eth_addr, ETHER_ADDR_LEN);
892
893 /* Make things frind to halt, cleanup, ... */
894 atse_stop_locked(sc);
895
896 atse_reset(sc);
897
898 /* ... and fire up the engine again. */
899 atse_rxfilter_locked(sc);
900
901 sc->atse_flags &= ATSE_FLAGS_LINK; /* Preserve. */
902
903 mii = device_get_softc(sc->atse_miibus);
904
905 sc->atse_flags &= ~ATSE_FLAGS_LINK;
906 mii_mediachg(mii);
907
908 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
909 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
910
911 callout_reset(&sc->atse_tick, hz, atse_tick, sc);
912 }
913
914 static void
atse_init(void * xsc)915 atse_init(void *xsc)
916 {
917 struct atse_softc *sc;
918
919 /*
920 * XXXRW: There is some argument that we should immediately do RX
921 * processing after enabling interrupts, or one may not fire if there
922 * are buffered packets.
923 */
924 sc = (struct atse_softc *)xsc;
925 ATSE_LOCK(sc);
926 atse_init_locked(sc);
927 ATSE_UNLOCK(sc);
928 }
929
930 static int
atse_ioctl(if_t ifp,u_long command,caddr_t data)931 atse_ioctl(if_t ifp, u_long command, caddr_t data)
932 {
933 struct atse_softc *sc;
934 struct ifreq *ifr;
935 int error, mask;
936
937 error = 0;
938 sc = if_getsoftc(ifp);
939 ifr = (struct ifreq *)data;
940
941 switch (command) {
942 case SIOCSIFFLAGS:
943 ATSE_LOCK(sc);
944 if (if_getflags(ifp) & IFF_UP) {
945 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
946 ((if_getflags(ifp) ^ sc->atse_if_flags) &
947 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
948 atse_rxfilter_locked(sc);
949 else
950 atse_init_locked(sc);
951 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
952 atse_stop_locked(sc);
953 sc->atse_if_flags = if_getflags(ifp);
954 ATSE_UNLOCK(sc);
955 break;
956 case SIOCSIFCAP:
957 ATSE_LOCK(sc);
958 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
959 ATSE_UNLOCK(sc);
960 break;
961 case SIOCADDMULTI:
962 case SIOCDELMULTI:
963 ATSE_LOCK(sc);
964 atse_rxfilter_locked(sc);
965 ATSE_UNLOCK(sc);
966 break;
967 case SIOCGIFMEDIA:
968 case SIOCSIFMEDIA:
969 {
970 struct mii_data *mii;
971 struct ifreq *ifr;
972
973 mii = device_get_softc(sc->atse_miibus);
974 ifr = (struct ifreq *)data;
975 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
976 break;
977 }
978 default:
979 error = ether_ioctl(ifp, command, data);
980 break;
981 }
982
983 return (error);
984 }
985
986 static void
atse_tick(void * xsc)987 atse_tick(void *xsc)
988 {
989 struct atse_softc *sc;
990 struct mii_data *mii;
991 if_t ifp;
992
993 sc = (struct atse_softc *)xsc;
994 ATSE_LOCK_ASSERT(sc);
995 ifp = sc->atse_ifp;
996
997 mii = device_get_softc(sc->atse_miibus);
998 mii_tick(mii);
999 if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
1000 atse_miibus_statchg(sc->atse_dev);
1001 }
1002
1003 callout_reset(&sc->atse_tick, hz, atse_tick, sc);
1004 }
1005
1006 /*
1007 * Set media options.
1008 */
1009 static int
atse_ifmedia_upd(if_t ifp)1010 atse_ifmedia_upd(if_t ifp)
1011 {
1012 struct atse_softc *sc;
1013 struct mii_data *mii;
1014 struct mii_softc *miisc;
1015 int error;
1016
1017 sc = if_getsoftc(ifp);
1018
1019 ATSE_LOCK(sc);
1020 mii = device_get_softc(sc->atse_miibus);
1021 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1022 PHY_RESET(miisc);
1023 }
1024 error = mii_mediachg(mii);
1025 ATSE_UNLOCK(sc);
1026
1027 return (error);
1028 }
1029
1030 /*
1031 * Report current media status.
1032 */
1033 static void
atse_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)1034 atse_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
1035 {
1036 struct atse_softc *sc;
1037 struct mii_data *mii;
1038
1039 sc = if_getsoftc(ifp);
1040
1041 ATSE_LOCK(sc);
1042 mii = device_get_softc(sc->atse_miibus);
1043 mii_pollstat(mii);
1044 ifmr->ifm_active = mii->mii_media_active;
1045 ifmr->ifm_status = mii->mii_media_status;
1046 ATSE_UNLOCK(sc);
1047 }
1048
1049 static struct atse_mac_stats_regs {
1050 const char *name;
1051 const char *descr; /* Mostly copied from Altera datasheet. */
1052 } atse_mac_stats_regs[] = {
1053 [0x1a] =
1054 { "aFramesTransmittedOK",
1055 "The number of frames that are successfully transmitted including "
1056 "the pause frames." },
1057 { "aFramesReceivedOK",
1058 "The number of frames that are successfully received including the "
1059 "pause frames." },
1060 { "aFrameCheckSequenceErrors",
1061 "The number of receive frames with CRC error." },
1062 { "aAlignmentErrors",
1063 "The number of receive frames with alignment error." },
1064 { "aOctetsTransmittedOK",
1065 "The lower 32 bits of the number of data and padding octets that "
1066 "are successfully transmitted." },
1067 { "aOctetsReceivedOK",
1068 "The lower 32 bits of the number of data and padding octets that "
1069 " are successfully received." },
1070 { "aTxPAUSEMACCtrlFrames",
1071 "The number of pause frames transmitted." },
1072 { "aRxPAUSEMACCtrlFrames",
1073 "The number received pause frames received." },
1074 { "ifInErrors",
1075 "The number of errored frames received." },
1076 { "ifOutErrors",
1077 "The number of transmit frames with either a FIFO overflow error, "
1078 "a FIFO underflow error, or a error defined by the user "
1079 "application." },
1080 { "ifInUcastPkts",
1081 "The number of valid unicast frames received." },
1082 { "ifInMulticastPkts",
1083 "The number of valid multicast frames received. The count does "
1084 "not include pause frames." },
1085 { "ifInBroadcastPkts",
1086 "The number of valid broadcast frames received." },
1087 { "ifOutDiscards",
1088 "This statistics counter is not in use. The MAC function does not "
1089 "discard frames that are written to the FIFO buffer by the user "
1090 "application." },
1091 { "ifOutUcastPkts",
1092 "The number of valid unicast frames transmitted." },
1093 { "ifOutMulticastPkts",
1094 "The number of valid multicast frames transmitted, excluding pause "
1095 "frames." },
1096 { "ifOutBroadcastPkts",
1097 "The number of valid broadcast frames transmitted." },
1098 { "etherStatsDropEvents",
1099 "The number of frames that are dropped due to MAC internal errors "
1100 "when FIFO buffer overflow persists." },
1101 { "etherStatsOctets",
1102 "The lower 32 bits of the total number of octets received. This "
1103 "count includes both good and errored frames." },
1104 { "etherStatsPkts",
1105 "The total number of good and errored frames received." },
1106 { "etherStatsUndersizePkts",
1107 "The number of frames received with length less than 64 bytes. "
1108 "This count does not include errored frames." },
1109 { "etherStatsOversizePkts",
1110 "The number of frames received that are longer than the value "
1111 "configured in the frm_length register. This count does not "
1112 "include errored frames." },
1113 { "etherStatsPkts64Octets",
1114 "The number of 64-byte frames received. This count includes good "
1115 "and errored frames." },
1116 { "etherStatsPkts65to127Octets",
1117 "The number of received good and errored frames between the length "
1118 "of 65 and 127 bytes." },
1119 { "etherStatsPkts128to255Octets",
1120 "The number of received good and errored frames between the length "
1121 "of 128 and 255 bytes." },
1122 { "etherStatsPkts256to511Octets",
1123 "The number of received good and errored frames between the length "
1124 "of 256 and 511 bytes." },
1125 { "etherStatsPkts512to1023Octets",
1126 "The number of received good and errored frames between the length "
1127 "of 512 and 1023 bytes." },
1128 { "etherStatsPkts1024to1518Octets",
1129 "The number of received good and errored frames between the length "
1130 "of 1024 and 1518 bytes." },
1131 { "etherStatsPkts1519toXOctets",
1132 "The number of received good and errored frames between the length "
1133 "of 1519 and the maximum frame length configured in the frm_length "
1134 "register." },
1135 { "etherStatsJabbers",
1136 "Too long frames with CRC error." },
1137 { "etherStatsFragments",
1138 "Too short frames with CRC error." },
1139 /* 0x39 unused, 0x3a/b non-stats. */
1140 [0x3c] =
1141 /* Extended Statistics Counters */
1142 { "msb_aOctetsTransmittedOK",
1143 "Upper 32 bits of the number of data and padding octets that are "
1144 "successfully transmitted." },
1145 { "msb_aOctetsReceivedOK",
1146 "Upper 32 bits of the number of data and padding octets that are "
1147 "successfully received." },
1148 { "msb_etherStatsOctets",
1149 "Upper 32 bits of the total number of octets received. This count "
1150 "includes both good and errored frames." }
1151 };
1152
1153 static int
sysctl_atse_mac_stats_proc(SYSCTL_HANDLER_ARGS)1154 sysctl_atse_mac_stats_proc(SYSCTL_HANDLER_ARGS)
1155 {
1156 struct atse_softc *sc;
1157 int error, offset, s;
1158
1159 sc = arg1;
1160 offset = arg2;
1161
1162 s = CSR_READ_4(sc, offset);
1163 error = sysctl_handle_int(oidp, &s, 0, req);
1164 if (error || !req->newptr) {
1165 return (error);
1166 }
1167
1168 return (0);
1169 }
1170
1171 static struct atse_rx_err_stats_regs {
1172 const char *name;
1173 const char *descr;
1174 } atse_rx_err_stats_regs[] = {
1175 #define ATSE_RX_ERR_FIFO_THRES_EOP 0 /* FIFO threshold reached, on EOP. */
1176 #define ATSE_RX_ERR_ELEN 1 /* Frame/payload length not valid. */
1177 #define ATSE_RX_ERR_CRC32 2 /* CRC-32 error. */
1178 #define ATSE_RX_ERR_FIFO_THRES_TRUNC 3 /* FIFO thresh., truncated frame. */
1179 #define ATSE_RX_ERR_4 4 /* ? */
1180 #define ATSE_RX_ERR_5 5 /* / */
1181
1182 { "rx_err_fifo_thres_eop",
1183 "FIFO threshold reached, reported on EOP." },
1184 { "rx_err_fifo_elen",
1185 "Frame or payload length not valid." },
1186 { "rx_err_fifo_crc32",
1187 "CRC-32 error." },
1188 { "rx_err_fifo_thres_trunc",
1189 "FIFO threshold reached, truncated frame" },
1190 { "rx_err_4",
1191 "?" },
1192 { "rx_err_5",
1193 "?" },
1194 };
1195
1196 static int
sysctl_atse_rx_err_stats_proc(SYSCTL_HANDLER_ARGS)1197 sysctl_atse_rx_err_stats_proc(SYSCTL_HANDLER_ARGS)
1198 {
1199 struct atse_softc *sc;
1200 int error, offset, s;
1201
1202 sc = arg1;
1203 offset = arg2;
1204
1205 s = sc->atse_rx_err[offset];
1206 error = sysctl_handle_int(oidp, &s, 0, req);
1207 if (error || !req->newptr) {
1208 return (error);
1209 }
1210
1211 return (0);
1212 }
1213
1214 static void
atse_sysctl_stats_attach(device_t dev)1215 atse_sysctl_stats_attach(device_t dev)
1216 {
1217 struct sysctl_ctx_list *sctx;
1218 struct sysctl_oid *soid;
1219 struct atse_softc *sc;
1220 int i;
1221
1222 sc = device_get_softc(dev);
1223 sctx = device_get_sysctl_ctx(dev);
1224 soid = device_get_sysctl_tree(dev);
1225
1226 /* MAC statistics. */
1227 for (i = 0; i < nitems(atse_mac_stats_regs); i++) {
1228 if (atse_mac_stats_regs[i].name == NULL ||
1229 atse_mac_stats_regs[i].descr == NULL) {
1230 continue;
1231 }
1232
1233 SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
1234 atse_mac_stats_regs[i].name,
1235 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
1236 sc, i, sysctl_atse_mac_stats_proc, "IU",
1237 atse_mac_stats_regs[i].descr);
1238 }
1239
1240 /* rx_err[]. */
1241 for (i = 0; i < ATSE_RX_ERR_MAX; i++) {
1242 if (atse_rx_err_stats_regs[i].name == NULL ||
1243 atse_rx_err_stats_regs[i].descr == NULL) {
1244 continue;
1245 }
1246
1247 SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
1248 atse_rx_err_stats_regs[i].name,
1249 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
1250 sc, i, sysctl_atse_rx_err_stats_proc, "IU",
1251 atse_rx_err_stats_regs[i].descr);
1252 }
1253 }
1254
1255 /*
1256 * Generic device handling routines.
1257 */
1258 int
atse_attach(device_t dev)1259 atse_attach(device_t dev)
1260 {
1261 struct atse_softc *sc;
1262 if_t ifp;
1263 uint32_t caps;
1264 int error;
1265
1266 sc = device_get_softc(dev);
1267 sc->dev = dev;
1268
1269 /* Get xDMA controller */
1270 sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
1271 if (sc->xdma_tx == NULL) {
1272 device_printf(dev, "Can't find DMA controller.\n");
1273 return (ENXIO);
1274 }
1275
1276 /*
1277 * Only final (EOP) write can be less than "symbols per beat" value
1278 * so we have to defrag mbuf chain.
1279 * Chapter 15. On-Chip FIFO Memory Core.
1280 * Embedded Peripherals IP User Guide.
1281 */
1282 caps = XCHAN_CAP_NOSEG;
1283
1284 /* Alloc xDMA virtual channel. */
1285 sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, caps);
1286 if (sc->xchan_tx == NULL) {
1287 device_printf(dev, "Can't alloc virtual DMA channel.\n");
1288 return (ENXIO);
1289 }
1290
1291 /* Setup interrupt handler. */
1292 error = xdma_setup_intr(sc->xchan_tx, 0,
1293 atse_xdma_tx_intr, sc, &sc->ih_tx);
1294 if (error) {
1295 device_printf(sc->dev,
1296 "Can't setup xDMA interrupt handler.\n");
1297 return (ENXIO);
1298 }
1299
1300 xdma_prep_sg(sc->xchan_tx,
1301 TX_QUEUE_SIZE, /* xchan requests queue size */
1302 MCLBYTES, /* maxsegsize */
1303 8, /* maxnsegs */
1304 16, /* alignment */
1305 0, /* boundary */
1306 BUS_SPACE_MAXADDR_32BIT,
1307 BUS_SPACE_MAXADDR);
1308
1309 /* Get RX xDMA controller */
1310 sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
1311 if (sc->xdma_rx == NULL) {
1312 device_printf(dev, "Can't find DMA controller.\n");
1313 return (ENXIO);
1314 }
1315
1316 /* Alloc xDMA virtual channel. */
1317 sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, caps);
1318 if (sc->xchan_rx == NULL) {
1319 device_printf(dev, "Can't alloc virtual DMA channel.\n");
1320 return (ENXIO);
1321 }
1322
1323 /* Setup interrupt handler. */
1324 error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET,
1325 atse_xdma_rx_intr, sc, &sc->ih_rx);
1326 if (error) {
1327 device_printf(sc->dev,
1328 "Can't setup xDMA interrupt handler.\n");
1329 return (ENXIO);
1330 }
1331
1332 xdma_prep_sg(sc->xchan_rx,
1333 RX_QUEUE_SIZE, /* xchan requests queue size */
1334 MCLBYTES, /* maxsegsize */
1335 1, /* maxnsegs */
1336 16, /* alignment */
1337 0, /* boundary */
1338 BUS_SPACE_MAXADDR_32BIT,
1339 BUS_SPACE_MAXADDR);
1340
1341 mtx_init(&sc->br_mtx, "buf ring mtx", NULL, MTX_DEF);
1342 sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
1343 M_NOWAIT, &sc->br_mtx);
1344 if (sc->br == NULL) {
1345 return (ENOMEM);
1346 }
1347
1348 atse_ethernet_option_bits_read(dev);
1349
1350 mtx_init(&sc->atse_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1351 MTX_DEF);
1352
1353 callout_init_mtx(&sc->atse_tick, &sc->atse_mtx, 0);
1354
1355 /*
1356 * We are only doing single-PHY with this driver currently. The
1357 * defaults would be right so that BASE_CFG_MDIO_ADDR0 points to the
1358 * 1st PHY address (0) apart from the fact that BMCR0 is always
1359 * the PCS mapping, so we always use BMCR1. See Table 5-1 0xA0-0xBF.
1360 */
1361 #if 0 /* Always PCS. */
1362 sc->atse_bmcr0 = MDIO_0_START;
1363 CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR0, 0x00);
1364 #endif
1365 /* Always use matching PHY for atse[0..]. */
1366 sc->atse_phy_addr = device_get_unit(dev);
1367 sc->atse_bmcr1 = MDIO_1_START;
1368 CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR1, sc->atse_phy_addr);
1369
1370 /* Reset the adapter. */
1371 atse_reset(sc);
1372
1373 /* Setup interface. */
1374 ifp = sc->atse_ifp = if_alloc(IFT_ETHER);
1375 if_setsoftc(ifp, sc);
1376 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1377 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1378 if_setioctlfn(ifp, atse_ioctl);
1379 if_settransmitfn(ifp, atse_transmit);
1380 if_setqflushfn(ifp, atse_qflush);
1381 if_setinitfn(ifp, atse_init);
1382 if_setsendqlen(ifp, ATSE_TX_LIST_CNT - 1);
1383 if_setsendqready(ifp);
1384
1385 /* MII setup. */
1386 error = mii_attach(dev, &sc->atse_miibus, ifp, atse_ifmedia_upd,
1387 atse_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1388 if (error != 0) {
1389 device_printf(dev, "attaching PHY failed: %d\n", error);
1390 goto err;
1391 }
1392
1393 /* Call media-indepedent attach routine. */
1394 ether_ifattach(ifp, sc->atse_eth_addr);
1395
1396 /* Tell the upper layer(s) about vlan mtu support. */
1397 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
1398 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
1399 if_setcapenable(ifp, if_getcapabilities(ifp));
1400
1401 err:
1402 if (error != 0) {
1403 atse_detach(dev);
1404 }
1405
1406 if (error == 0) {
1407 atse_sysctl_stats_attach(dev);
1408 }
1409
1410 atse_rx_enqueue(sc, NUM_RX_MBUF);
1411 xdma_queue_submit(sc->xchan_rx);
1412
1413 return (error);
1414 }
1415
1416 static int
atse_detach(device_t dev)1417 atse_detach(device_t dev)
1418 {
1419 struct atse_softc *sc;
1420 if_t ifp;
1421
1422 sc = device_get_softc(dev);
1423 KASSERT(mtx_initialized(&sc->atse_mtx), ("%s: mutex not initialized",
1424 device_get_nameunit(dev)));
1425 ifp = sc->atse_ifp;
1426
1427 /* Only cleanup if attach succeeded. */
1428 if (device_is_attached(dev)) {
1429 ATSE_LOCK(sc);
1430 atse_stop_locked(sc);
1431 ATSE_UNLOCK(sc);
1432 callout_drain(&sc->atse_tick);
1433 ether_ifdetach(ifp);
1434 }
1435 if (sc->atse_miibus != NULL) {
1436 device_delete_child(dev, sc->atse_miibus);
1437 }
1438
1439 if (ifp != NULL) {
1440 if_free(ifp);
1441 }
1442
1443 mtx_destroy(&sc->atse_mtx);
1444
1445 xdma_channel_free(sc->xchan_tx);
1446 xdma_channel_free(sc->xchan_rx);
1447 xdma_put(sc->xdma_tx);
1448 xdma_put(sc->xdma_rx);
1449
1450 return (0);
1451 }
1452
1453 /* Shared between nexus and fdt implementation. */
1454 void
atse_detach_resources(device_t dev)1455 atse_detach_resources(device_t dev)
1456 {
1457 struct atse_softc *sc;
1458
1459 sc = device_get_softc(dev);
1460
1461 if (sc->atse_mem_res != NULL) {
1462 bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_mem_rid,
1463 sc->atse_mem_res);
1464 sc->atse_mem_res = NULL;
1465 }
1466 }
1467
1468 int
atse_detach_dev(device_t dev)1469 atse_detach_dev(device_t dev)
1470 {
1471 int error;
1472
1473 error = atse_detach(dev);
1474 if (error) {
1475 /* We are basically in undefined state now. */
1476 device_printf(dev, "atse_detach() failed: %d\n", error);
1477 return (error);
1478 }
1479
1480 atse_detach_resources(dev);
1481
1482 return (0);
1483 }
1484
1485 int
atse_miibus_readreg(device_t dev,int phy,int reg)1486 atse_miibus_readreg(device_t dev, int phy, int reg)
1487 {
1488 struct atse_softc *sc;
1489 int val;
1490
1491 sc = device_get_softc(dev);
1492
1493 /*
1494 * We currently do not support re-mapping of MDIO space on-the-fly
1495 * but de-facto hard-code the phy#.
1496 */
1497 if (phy != sc->atse_phy_addr) {
1498 return (0);
1499 }
1500
1501 val = PHY_READ_2(sc, reg);
1502
1503 return (val);
1504 }
1505
1506 int
atse_miibus_writereg(device_t dev,int phy,int reg,int data)1507 atse_miibus_writereg(device_t dev, int phy, int reg, int data)
1508 {
1509 struct atse_softc *sc;
1510
1511 sc = device_get_softc(dev);
1512
1513 /*
1514 * We currently do not support re-mapping of MDIO space on-the-fly
1515 * but de-facto hard-code the phy#.
1516 */
1517 if (phy != sc->atse_phy_addr) {
1518 return (0);
1519 }
1520
1521 PHY_WRITE_2(sc, reg, data);
1522 return (0);
1523 }
1524
1525 void
atse_miibus_statchg(device_t dev)1526 atse_miibus_statchg(device_t dev)
1527 {
1528 struct atse_softc *sc;
1529 struct mii_data *mii;
1530 if_t ifp;
1531 uint32_t val4;
1532
1533 sc = device_get_softc(dev);
1534 ATSE_LOCK_ASSERT(sc);
1535
1536 mii = device_get_softc(sc->atse_miibus);
1537 ifp = sc->atse_ifp;
1538 if (mii == NULL || ifp == NULL ||
1539 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1540 return;
1541 }
1542
1543 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
1544
1545 /* Assume no link. */
1546 sc->atse_flags &= ~ATSE_FLAGS_LINK;
1547
1548 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1549 (IFM_ACTIVE | IFM_AVALID)) {
1550 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1551 case IFM_10_T:
1552 val4 |= BASE_CFG_COMMAND_CONFIG_ENA_10;
1553 val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
1554 sc->atse_flags |= ATSE_FLAGS_LINK;
1555 break;
1556 case IFM_100_TX:
1557 val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
1558 val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
1559 sc->atse_flags |= ATSE_FLAGS_LINK;
1560 break;
1561 case IFM_1000_T:
1562 val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
1563 val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
1564 sc->atse_flags |= ATSE_FLAGS_LINK;
1565 break;
1566 default:
1567 break;
1568 }
1569 }
1570
1571 if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
1572 /* Need to stop the MAC? */
1573 return;
1574 }
1575
1576 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
1577 val4 &= ~BASE_CFG_COMMAND_CONFIG_HD_ENA;
1578 } else {
1579 val4 |= BASE_CFG_COMMAND_CONFIG_HD_ENA;
1580 }
1581
1582 /* flow control? */
1583
1584 /* Make sure the MAC is activated. */
1585 val4 |= BASE_CFG_COMMAND_CONFIG_TX_ENA;
1586 val4 |= BASE_CFG_COMMAND_CONFIG_RX_ENA;
1587
1588 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
1589 }
1590
1591 MODULE_DEPEND(atse, ether, 1, 1, 1);
1592 MODULE_DEPEND(atse, miibus, 1, 1, 1);
1593