1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * A network interface driver for Cadence GEM Gigabit Ethernet
31 * interface such as the one used in Xilinx Zynq-7000 SoC.
32 *
33 * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
34 * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16
35 * and register definitions are in appendix B.18.
36 */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/module.h>
45 #include <sys/rman.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49
50 #include <machine/bus.h>
51
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_mib.h>
58 #include <net/if_types.h>
59
60 #ifdef INET
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/ip.h>
65 #endif
66
67 #include <net/bpf.h>
68 #include <net/bpfdesc.h>
69
70 #include <dev/fdt/fdt_common.h>
71 #include <dev/ofw/ofw_bus.h>
72 #include <dev/ofw/ofw_bus_subr.h>
73
74 #include <dev/mii/mii.h>
75 #include <dev/mii/miivar.h>
76 #include <dev/mii/mii_fdt.h>
77
78 #include <dev/clk/clk.h>
79
80 #if BUS_SPACE_MAXADDR > BUS_SPACE_MAXADDR_32BIT
81 #define CGEM64
82 #endif
83
84 #include <dev/cadence/if_cgem_hw.h>
85
86 #include "miibus_if.h"
87
88 #define IF_CGEM_NAME "cgem"
89
90 #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */
91 #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */
92
93 /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */
94 #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */
95
96 #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */
97
98 #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \
99 CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
100
101 #define HWQUIRK_NONE 0
102 #define HWQUIRK_NEEDNULLQS 1
103 #define HWQUIRK_RXHANGWAR 2
104
105 static struct ofw_compat_data compat_data[] = {
106 { "cdns,zynq-gem", HWQUIRK_RXHANGWAR }, /* Deprecated */
107 { "cdns,zynqmp-gem", HWQUIRK_NEEDNULLQS }, /* Deprecated */
108 { "xlnx,zynq-gem", HWQUIRK_RXHANGWAR },
109 { "xlnx,zynqmp-gem", HWQUIRK_NEEDNULLQS },
110 { "microchip,mpfs-mss-gem", HWQUIRK_NEEDNULLQS },
111 { "sifive,fu540-c000-gem", HWQUIRK_NONE },
112 { "sifive,fu740-c000-gem", HWQUIRK_NONE },
113 { NULL, 0 }
114 };
115
116 struct cgem_softc {
117 if_t ifp;
118 struct mtx sc_mtx;
119 device_t dev;
120 device_t miibus;
121 u_int mii_media_active; /* last active media */
122 int if_old_flags;
123 struct resource *mem_res;
124 struct resource *irq_res;
125 void *intrhand;
126 struct callout tick_ch;
127 uint32_t net_ctl_shadow;
128 uint32_t net_cfg_shadow;
129 clk_t clk_pclk;
130 clk_t clk_hclk;
131 clk_t clk_txclk;
132 clk_t clk_rxclk;
133 clk_t clk_tsuclk;
134 int neednullqs;
135 int phy_contype;
136
137 bus_dma_tag_t desc_dma_tag;
138 bus_dma_tag_t mbuf_dma_tag;
139
140 /* receive descriptor ring */
141 struct cgem_rx_desc *rxring;
142 bus_addr_t rxring_physaddr;
143 struct mbuf *rxring_m[CGEM_NUM_RX_DESCS];
144 bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS];
145 int rxring_hd_ptr; /* where to put rcv bufs */
146 int rxring_tl_ptr; /* where to get receives */
147 int rxring_queued; /* how many rcv bufs queued */
148 bus_dmamap_t rxring_dma_map;
149 int rxbufs; /* tunable number rcv bufs */
150 int rxhangwar; /* rx hang work-around */
151 u_int rxoverruns; /* rx overruns */
152 u_int rxnobufs; /* rx buf ring empty events */
153 u_int rxdmamapfails; /* rx dmamap failures */
154 uint32_t rx_frames_prev;
155
156 /* transmit descriptor ring */
157 struct cgem_tx_desc *txring;
158 bus_addr_t txring_physaddr;
159 struct mbuf *txring_m[CGEM_NUM_TX_DESCS];
160 bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS];
161 int txring_hd_ptr; /* where to put next xmits */
162 int txring_tl_ptr; /* next xmit mbuf to free */
163 int txring_queued; /* num xmits segs queued */
164 u_int txfull; /* tx ring full events */
165 u_int txdefrags; /* tx calls to m_defrag() */
166 u_int txdefragfails; /* tx m_defrag() failures */
167 u_int txdmamapfails; /* tx dmamap failures */
168
169 /* null descriptor rings */
170 void *null_qs;
171 bus_addr_t null_qs_physaddr;
172
173 /* hardware provided statistics */
174 struct cgem_hw_stats {
175 uint64_t tx_bytes;
176 uint32_t tx_frames;
177 uint32_t tx_frames_bcast;
178 uint32_t tx_frames_multi;
179 uint32_t tx_frames_pause;
180 uint32_t tx_frames_64b;
181 uint32_t tx_frames_65to127b;
182 uint32_t tx_frames_128to255b;
183 uint32_t tx_frames_256to511b;
184 uint32_t tx_frames_512to1023b;
185 uint32_t tx_frames_1024to1536b;
186 uint32_t tx_under_runs;
187 uint32_t tx_single_collisn;
188 uint32_t tx_multi_collisn;
189 uint32_t tx_excsv_collisn;
190 uint32_t tx_late_collisn;
191 uint32_t tx_deferred_frames;
192 uint32_t tx_carrier_sense_errs;
193
194 uint64_t rx_bytes;
195 uint32_t rx_frames;
196 uint32_t rx_frames_bcast;
197 uint32_t rx_frames_multi;
198 uint32_t rx_frames_pause;
199 uint32_t rx_frames_64b;
200 uint32_t rx_frames_65to127b;
201 uint32_t rx_frames_128to255b;
202 uint32_t rx_frames_256to511b;
203 uint32_t rx_frames_512to1023b;
204 uint32_t rx_frames_1024to1536b;
205 uint32_t rx_frames_undersize;
206 uint32_t rx_frames_oversize;
207 uint32_t rx_frames_jabber;
208 uint32_t rx_frames_fcs_errs;
209 uint32_t rx_frames_length_errs;
210 uint32_t rx_symbol_errs;
211 uint32_t rx_align_errs;
212 uint32_t rx_resource_errs;
213 uint32_t rx_overrun_errs;
214 uint32_t rx_ip_hdr_csum_errs;
215 uint32_t rx_tcp_csum_errs;
216 uint32_t rx_udp_csum_errs;
217 } stats;
218 };
219
220 #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off)))
221 #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val)))
222 #define BARRIER(sc, off, len, flags) \
223 (bus_barrier((sc)->mem_res, (off), (len), (flags))
224
225 #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
226 #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
227 #define CGEM_LOCK_INIT(sc) mtx_init(&(sc)->sc_mtx, \
228 device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF)
229 #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx)
230 #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
231
232 /* Allow platforms to optionally provide a way to set the reference clock. */
233 int cgem_set_ref_clk(int unit, int frequency);
234
235 static int cgem_probe(device_t dev);
236 static int cgem_attach(device_t dev);
237 static int cgem_detach(device_t dev);
238 static void cgem_tick(void *);
239 static void cgem_intr(void *);
240
241 static void cgem_mediachange(struct cgem_softc *, struct mii_data *);
242
243 static void
cgem_get_mac(struct cgem_softc * sc,u_char eaddr[])244 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
245 {
246 int i;
247 uint32_t rnd;
248
249 /* See if boot loader gave us a MAC address already. */
250 for (i = 0; i < 4; i++) {
251 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
252 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
253 if (low != 0 || high != 0) {
254 eaddr[0] = low & 0xff;
255 eaddr[1] = (low >> 8) & 0xff;
256 eaddr[2] = (low >> 16) & 0xff;
257 eaddr[3] = (low >> 24) & 0xff;
258 eaddr[4] = high & 0xff;
259 eaddr[5] = (high >> 8) & 0xff;
260 break;
261 }
262 }
263
264 /* No MAC from boot loader? Assign a random one. */
265 if (i == 4) {
266 rnd = arc4random();
267
268 eaddr[0] = 'b';
269 eaddr[1] = 's';
270 eaddr[2] = 'd';
271 eaddr[3] = (rnd >> 16) & 0xff;
272 eaddr[4] = (rnd >> 8) & 0xff;
273 eaddr[5] = rnd & 0xff;
274
275 device_printf(sc->dev, "no mac address found, assigning "
276 "random: %02x:%02x:%02x:%02x:%02x:%02x\n", eaddr[0],
277 eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]);
278 }
279
280 /* Move address to first slot and zero out the rest. */
281 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
282 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
283 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
284
285 for (i = 1; i < 4; i++) {
286 WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0);
287 WR4(sc, CGEM_SPEC_ADDR_HI(i), 0);
288 }
289 }
290
291 /*
292 * cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash
293 * corresponds to a bit in a 64-bit hash register. Setting that bit in the
294 * hash register enables reception of all frames with a destination address
295 * that hashes to that 6-bit value.
296 *
297 * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
298 * Reference Manual. Bits 0-5 in the hash are the exclusive-or of
299 * every sixth bit in the destination address.
300 */
301 static int
cgem_mac_hash(u_char eaddr[])302 cgem_mac_hash(u_char eaddr[])
303 {
304 int hash;
305 int i, j;
306
307 hash = 0;
308 for (i = 0; i < 6; i++)
309 for (j = i; j < 48; j += 6)
310 if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
311 hash ^= (1 << i);
312
313 return hash;
314 }
315
316 static u_int
cgem_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)317 cgem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
318 {
319 uint32_t *hashes = arg;
320 int index;
321
322 index = cgem_mac_hash(LLADDR(sdl));
323 if (index > 31)
324 hashes[0] |= (1U << (index - 32));
325 else
326 hashes[1] |= (1U << index);
327
328 return (1);
329 }
330
331 /*
332 * After any change in rx flags or multi-cast addresses, set up hash registers
333 * and net config register bits.
334 */
335 static void
cgem_rx_filter(struct cgem_softc * sc)336 cgem_rx_filter(struct cgem_softc *sc)
337 {
338 if_t ifp = sc->ifp;
339 uint32_t hashes[2] = { 0, 0 };
340
341 sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
342 CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL);
343
344 if ((if_getflags(ifp) & IFF_PROMISC) != 0)
345 sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL;
346 else {
347 if ((if_getflags(ifp) & IFF_BROADCAST) == 0)
348 sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST;
349 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
350 hashes[0] = 0xffffffff;
351 hashes[1] = 0xffffffff;
352 } else
353 if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes);
354
355 if (hashes[0] != 0 || hashes[1] != 0)
356 sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN;
357 }
358
359 WR4(sc, CGEM_HASH_TOP, hashes[0]);
360 WR4(sc, CGEM_HASH_BOT, hashes[1]);
361 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
362 }
363
364 /* For bus_dmamap_load() callback. */
365 static void
cgem_getaddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)366 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
367 {
368
369 if (nsegs != 1 || error != 0)
370 return;
371 *(bus_addr_t *)arg = segs[0].ds_addr;
372 }
373
374 /* Set up null queues for priority queues we actually can't disable. */
375 static void
cgem_null_qs(struct cgem_softc * sc)376 cgem_null_qs(struct cgem_softc *sc)
377 {
378 struct cgem_rx_desc *rx_desc;
379 struct cgem_tx_desc *tx_desc;
380 uint32_t queue_mask;
381 int n;
382
383 /* Read design config register 6 to determine number of queues. */
384 queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) &
385 CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1;
386 if (queue_mask == 0)
387 return;
388
389 /* Create empty RX queue and empty TX buf queues. */
390 memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) +
391 sizeof(struct cgem_tx_desc));
392 rx_desc = sc->null_qs;
393 rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP;
394 tx_desc = (struct cgem_tx_desc *)(rx_desc + 1);
395 tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP;
396
397 /* Point all valid ring base pointers to the null queues. */
398 for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) {
399 WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr);
400 WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr +
401 sizeof(struct cgem_rx_desc));
402 }
403 }
404
405 /* Create DMA'able descriptor rings. */
406 static int
cgem_setup_descs(struct cgem_softc * sc)407 cgem_setup_descs(struct cgem_softc *sc)
408 {
409 int i, err;
410 int desc_rings_size = CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc) +
411 CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
412
413 if (sc->neednullqs)
414 desc_rings_size += sizeof(struct cgem_rx_desc) +
415 sizeof(struct cgem_tx_desc);
416
417 sc->txring = NULL;
418 sc->rxring = NULL;
419
420 /* Allocate non-cached DMA space for RX and TX descriptors. */
421 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1,
422 #ifdef CGEM64
423 1ULL << 32, /* Do not cross a 4G boundary. */
424 #else
425 0,
426 #endif
427 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
428 desc_rings_size, 1, desc_rings_size, 0,
429 busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag);
430 if (err)
431 return (err);
432
433 /* Set up a bus_dma_tag for mbufs. */
434 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
435 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
436 TX_MAX_DMA_SEGS, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx,
437 &sc->mbuf_dma_tag);
438 if (err)
439 return (err);
440
441 /*
442 * Allocate DMA memory. We allocate transmit, receive and null
443 * descriptor queues all at once because the hardware only provides
444 * one register for the upper 32 bits of rx and tx descriptor queues
445 * hardware addresses.
446 */
447 err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring,
448 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO,
449 &sc->rxring_dma_map);
450 if (err)
451 return (err);
452
453 /* Load descriptor DMA memory. */
454 err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
455 (void *)sc->rxring, desc_rings_size,
456 cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT);
457 if (err)
458 return (err);
459
460 /* Initialize RX descriptors. */
461 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
462 sc->rxring[i].addr = CGEM_RXDESC_OWN;
463 sc->rxring[i].ctl = 0;
464 sc->rxring_m[i] = NULL;
465 sc->rxring_m_dmamap[i] = NULL;
466 }
467 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
468
469 sc->rxring_hd_ptr = 0;
470 sc->rxring_tl_ptr = 0;
471 sc->rxring_queued = 0;
472
473 sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS);
474 sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS *
475 sizeof(struct cgem_rx_desc);
476
477 /* Initialize TX descriptor ring. */
478 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
479 sc->txring[i].addr = 0;
480 sc->txring[i].ctl = CGEM_TXDESC_USED;
481 sc->txring_m[i] = NULL;
482 sc->txring_m_dmamap[i] = NULL;
483 }
484 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
485
486 sc->txring_hd_ptr = 0;
487 sc->txring_tl_ptr = 0;
488 sc->txring_queued = 0;
489
490 if (sc->neednullqs) {
491 sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS);
492 sc->null_qs_physaddr = sc->txring_physaddr +
493 CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
494
495 cgem_null_qs(sc);
496 }
497
498 return (0);
499 }
500
501 /* Fill receive descriptor ring with mbufs. */
502 static void
cgem_fill_rqueue(struct cgem_softc * sc)503 cgem_fill_rqueue(struct cgem_softc *sc)
504 {
505 struct mbuf *m = NULL;
506 bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
507 int nsegs;
508
509 CGEM_ASSERT_LOCKED(sc);
510
511 while (sc->rxring_queued < sc->rxbufs) {
512 /* Get a cluster mbuf. */
513 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
514 if (m == NULL)
515 break;
516
517 m->m_len = MCLBYTES;
518 m->m_pkthdr.len = MCLBYTES;
519 m->m_pkthdr.rcvif = sc->ifp;
520
521 /* Load map and plug in physical address. */
522 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
523 &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) {
524 sc->rxdmamapfails++;
525 m_free(m);
526 break;
527 }
528 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
529 sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
530 segs, &nsegs, BUS_DMA_NOWAIT)) {
531 sc->rxdmamapfails++;
532 bus_dmamap_destroy(sc->mbuf_dma_tag,
533 sc->rxring_m_dmamap[sc->rxring_hd_ptr]);
534 sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL;
535 m_free(m);
536 break;
537 }
538 sc->rxring_m[sc->rxring_hd_ptr] = m;
539
540 /* Sync cache with receive buffer. */
541 bus_dmamap_sync(sc->mbuf_dma_tag,
542 sc->rxring_m_dmamap[sc->rxring_hd_ptr],
543 BUS_DMASYNC_PREREAD);
544
545 /* Write rx descriptor and increment head pointer. */
546 sc->rxring[sc->rxring_hd_ptr].ctl = 0;
547 #ifdef CGEM64
548 sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32;
549 #endif
550 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
551 sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
552 CGEM_RXDESC_WRAP;
553 sc->rxring_hd_ptr = 0;
554 } else
555 sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
556
557 sc->rxring_queued++;
558 }
559 }
560
561 /* Pull received packets off of receive descriptor ring. */
562 static void
cgem_recv(struct cgem_softc * sc)563 cgem_recv(struct cgem_softc *sc)
564 {
565 if_t ifp = sc->ifp;
566 struct mbuf *m, *m_hd, **m_tl;
567 uint32_t ctl;
568
569 CGEM_ASSERT_LOCKED(sc);
570
571 /* Pick up all packets in which the OWN bit is set. */
572 m_hd = NULL;
573 m_tl = &m_hd;
574 while (sc->rxring_queued > 0 &&
575 (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
576 ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
577
578 /* Grab filled mbuf. */
579 m = sc->rxring_m[sc->rxring_tl_ptr];
580 sc->rxring_m[sc->rxring_tl_ptr] = NULL;
581
582 /* Sync cache with receive buffer. */
583 bus_dmamap_sync(sc->mbuf_dma_tag,
584 sc->rxring_m_dmamap[sc->rxring_tl_ptr],
585 BUS_DMASYNC_POSTREAD);
586
587 /* Unload and destroy dmamap. */
588 bus_dmamap_unload(sc->mbuf_dma_tag,
589 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
590 bus_dmamap_destroy(sc->mbuf_dma_tag,
591 sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
592 sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL;
593
594 /* Increment tail pointer. */
595 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
596 sc->rxring_tl_ptr = 0;
597 sc->rxring_queued--;
598
599 /*
600 * Check FCS and make sure entire packet landed in one mbuf
601 * cluster (which is much bigger than the largest ethernet
602 * packet).
603 */
604 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
605 (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
606 (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
607 /* discard. */
608 m_free(m);
609 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
610 continue;
611 }
612
613 /* Ready it to hand off to upper layers. */
614 m->m_data += ETHER_ALIGN;
615 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
616 m->m_pkthdr.rcvif = ifp;
617 m->m_pkthdr.len = m->m_len;
618
619 /*
620 * Are we using hardware checksumming? Check the status in the
621 * receive descriptor.
622 */
623 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
624 /* TCP or UDP checks out, IP checks out too. */
625 if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
626 CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
627 (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
628 CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
629 m->m_pkthdr.csum_flags |=
630 CSUM_IP_CHECKED | CSUM_IP_VALID |
631 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
632 m->m_pkthdr.csum_data = 0xffff;
633 } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
634 CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
635 /* Only IP checks out. */
636 m->m_pkthdr.csum_flags |=
637 CSUM_IP_CHECKED | CSUM_IP_VALID;
638 m->m_pkthdr.csum_data = 0xffff;
639 }
640 }
641
642 /* Queue it up for delivery below. */
643 *m_tl = m;
644 m_tl = &m->m_next;
645 }
646
647 /* Replenish receive buffers. */
648 cgem_fill_rqueue(sc);
649
650 /* Unlock and send up packets. */
651 CGEM_UNLOCK(sc);
652 while (m_hd != NULL) {
653 m = m_hd;
654 m_hd = m_hd->m_next;
655 m->m_next = NULL;
656 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
657 if_input(ifp, m);
658 }
659 CGEM_LOCK(sc);
660 }
661
662 /* Find completed transmits and free their mbufs. */
663 static void
cgem_clean_tx(struct cgem_softc * sc)664 cgem_clean_tx(struct cgem_softc *sc)
665 {
666 struct mbuf *m;
667 uint32_t ctl;
668
669 CGEM_ASSERT_LOCKED(sc);
670
671 /* free up finished transmits. */
672 while (sc->txring_queued > 0 &&
673 ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
674 CGEM_TXDESC_USED) != 0) {
675 /* Sync cache. */
676 bus_dmamap_sync(sc->mbuf_dma_tag,
677 sc->txring_m_dmamap[sc->txring_tl_ptr],
678 BUS_DMASYNC_POSTWRITE);
679
680 /* Unload and destroy DMA map. */
681 bus_dmamap_unload(sc->mbuf_dma_tag,
682 sc->txring_m_dmamap[sc->txring_tl_ptr]);
683 bus_dmamap_destroy(sc->mbuf_dma_tag,
684 sc->txring_m_dmamap[sc->txring_tl_ptr]);
685 sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL;
686
687 /* Free up the mbuf. */
688 m = sc->txring_m[sc->txring_tl_ptr];
689 sc->txring_m[sc->txring_tl_ptr] = NULL;
690 m_freem(m);
691
692 /* Check the status. */
693 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
694 /* Serious bus error. log to console. */
695 #ifdef CGEM64
696 device_printf(sc->dev,
697 "cgem_clean_tx: AHB error, addr=0x%x%08x\n",
698 sc->txring[sc->txring_tl_ptr].addrhi,
699 sc->txring[sc->txring_tl_ptr].addr);
700 #else
701 device_printf(sc->dev,
702 "cgem_clean_tx: AHB error, addr=0x%x\n",
703 sc->txring[sc->txring_tl_ptr].addr);
704 #endif
705 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
706 CGEM_TXDESC_LATE_COLL)) != 0) {
707 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
708 } else
709 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
710
711 /*
712 * If the packet spanned more than one tx descriptor, skip
713 * descriptors until we find the end so that only
714 * start-of-frame descriptors are processed.
715 */
716 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
717 if ((ctl & CGEM_TXDESC_WRAP) != 0)
718 sc->txring_tl_ptr = 0;
719 else
720 sc->txring_tl_ptr++;
721 sc->txring_queued--;
722
723 ctl = sc->txring[sc->txring_tl_ptr].ctl;
724
725 sc->txring[sc->txring_tl_ptr].ctl =
726 ctl | CGEM_TXDESC_USED;
727 }
728
729 /* Next descriptor. */
730 if ((ctl & CGEM_TXDESC_WRAP) != 0)
731 sc->txring_tl_ptr = 0;
732 else
733 sc->txring_tl_ptr++;
734 sc->txring_queued--;
735
736 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
737 }
738 }
739
740 /* Start transmits. */
741 static void
cgem_start_locked(if_t ifp)742 cgem_start_locked(if_t ifp)
743 {
744 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
745 struct mbuf *m;
746 bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
747 uint32_t ctl;
748 int i, nsegs, wrap, err;
749
750 CGEM_ASSERT_LOCKED(sc);
751
752 if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0)
753 return;
754
755 for (;;) {
756 /* Check that there is room in the descriptor ring. */
757 if (sc->txring_queued >=
758 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
759 /* Try to make room. */
760 cgem_clean_tx(sc);
761
762 /* Still no room? */
763 if (sc->txring_queued >=
764 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
765 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
766 sc->txfull++;
767 break;
768 }
769 }
770
771 /* Grab next transmit packet. */
772 m = if_dequeue(ifp);
773 if (m == NULL)
774 break;
775
776 /* Create and load DMA map. */
777 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
778 &sc->txring_m_dmamap[sc->txring_hd_ptr])) {
779 m_freem(m);
780 sc->txdmamapfails++;
781 continue;
782 }
783 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
784 sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs,
785 BUS_DMA_NOWAIT);
786 if (err == EFBIG) {
787 /* Too many segments! defrag and try again. */
788 struct mbuf *m2 = m_defrag(m, M_NOWAIT);
789
790 if (m2 == NULL) {
791 sc->txdefragfails++;
792 m_freem(m);
793 bus_dmamap_destroy(sc->mbuf_dma_tag,
794 sc->txring_m_dmamap[sc->txring_hd_ptr]);
795 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
796 continue;
797 }
798 m = m2;
799 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
800 sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs,
801 &nsegs, BUS_DMA_NOWAIT);
802 sc->txdefrags++;
803 }
804 if (err) {
805 /* Give up. */
806 m_freem(m);
807 bus_dmamap_destroy(sc->mbuf_dma_tag,
808 sc->txring_m_dmamap[sc->txring_hd_ptr]);
809 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
810 sc->txdmamapfails++;
811 continue;
812 }
813 sc->txring_m[sc->txring_hd_ptr] = m;
814
815 /* Sync tx buffer with cache. */
816 bus_dmamap_sync(sc->mbuf_dma_tag,
817 sc->txring_m_dmamap[sc->txring_hd_ptr],
818 BUS_DMASYNC_PREWRITE);
819
820 /* Set wrap flag if next packet might run off end of ring. */
821 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
822 CGEM_NUM_TX_DESCS;
823
824 /*
825 * Fill in the TX descriptors back to front so that USED bit in
826 * first descriptor is cleared last.
827 */
828 for (i = nsegs - 1; i >= 0; i--) {
829 /* Descriptor address. */
830 sc->txring[sc->txring_hd_ptr + i].addr =
831 segs[i].ds_addr;
832 #ifdef CGEM64
833 sc->txring[sc->txring_hd_ptr + i].addrhi =
834 segs[i].ds_addr >> 32;
835 #endif
836 /* Descriptor control word. */
837 ctl = segs[i].ds_len;
838 if (i == nsegs - 1) {
839 ctl |= CGEM_TXDESC_LAST_BUF;
840 if (wrap)
841 ctl |= CGEM_TXDESC_WRAP;
842 }
843 sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
844
845 if (i != 0)
846 sc->txring_m[sc->txring_hd_ptr + i] = NULL;
847 }
848
849 if (wrap)
850 sc->txring_hd_ptr = 0;
851 else
852 sc->txring_hd_ptr += nsegs;
853 sc->txring_queued += nsegs;
854
855 /* Kick the transmitter. */
856 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
857 CGEM_NET_CTRL_START_TX);
858
859 /* If there is a BPF listener, bounce a copy to him. */
860 ETHER_BPF_MTAP(ifp, m);
861 }
862 }
863
864 static void
cgem_start(if_t ifp)865 cgem_start(if_t ifp)
866 {
867 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
868
869 CGEM_LOCK(sc);
870 cgem_start_locked(ifp);
871 CGEM_UNLOCK(sc);
872 }
873
874 static void
cgem_poll_hw_stats(struct cgem_softc * sc)875 cgem_poll_hw_stats(struct cgem_softc *sc)
876 {
877 uint32_t n;
878
879 CGEM_ASSERT_LOCKED(sc);
880
881 sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT);
882 sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32;
883
884 sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX);
885 sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX);
886 sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX);
887 sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX);
888 sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX);
889 sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX);
890 sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX);
891 sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX);
892 sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX);
893 sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX);
894 sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS);
895
896 n = RD4(sc, CGEM_SINGLE_COLL_FRAMES);
897 sc->stats.tx_single_collisn += n;
898 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
899 n = RD4(sc, CGEM_MULTI_COLL_FRAMES);
900 sc->stats.tx_multi_collisn += n;
901 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
902 n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES);
903 sc->stats.tx_excsv_collisn += n;
904 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
905 n = RD4(sc, CGEM_LATE_COLL);
906 sc->stats.tx_late_collisn += n;
907 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
908
909 sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES);
910 sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS);
911
912 sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT);
913 sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32;
914
915 sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX);
916 sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX);
917 sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX);
918 sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX);
919 sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX);
920 sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX);
921 sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX);
922 sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX);
923 sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX);
924 sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX);
925 sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX);
926 sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX);
927 sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX);
928 sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS);
929 sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS);
930 sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS);
931 sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS);
932 sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS);
933 sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS);
934 sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS);
935 sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS);
936 sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS);
937 }
938
939 static void
cgem_tick(void * arg)940 cgem_tick(void *arg)
941 {
942 struct cgem_softc *sc = (struct cgem_softc *)arg;
943 struct mii_data *mii;
944
945 CGEM_ASSERT_LOCKED(sc);
946
947 /* Poll the phy. */
948 if (sc->miibus != NULL) {
949 mii = device_get_softc(sc->miibus);
950 mii_tick(mii);
951 }
952
953 /* Poll statistics registers. */
954 cgem_poll_hw_stats(sc);
955
956 /* Check for receiver hang. */
957 if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) {
958 /*
959 * Reset receiver logic by toggling RX_EN bit. 1usec
960 * delay is necessary especially when operating at 100mbps
961 * and 10mbps speeds.
962 */
963 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow &
964 ~CGEM_NET_CTRL_RX_EN);
965 DELAY(1);
966 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
967 }
968 sc->rx_frames_prev = sc->stats.rx_frames;
969
970 /* Next callout in one second. */
971 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
972 }
973
974 /* Interrupt handler. */
975 static void
cgem_intr(void * arg)976 cgem_intr(void *arg)
977 {
978 struct cgem_softc *sc = (struct cgem_softc *)arg;
979 if_t ifp = sc->ifp;
980 uint32_t istatus;
981
982 CGEM_LOCK(sc);
983
984 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
985 CGEM_UNLOCK(sc);
986 return;
987 }
988
989 /* Read interrupt status and immediately clear the bits. */
990 istatus = RD4(sc, CGEM_INTR_STAT);
991 WR4(sc, CGEM_INTR_STAT, istatus);
992
993 /* Packets received. */
994 if ((istatus & CGEM_INTR_RX_COMPLETE) != 0)
995 cgem_recv(sc);
996
997 /* Free up any completed transmit buffers. */
998 cgem_clean_tx(sc);
999
1000 /* Hresp not ok. Something is very bad with DMA. Try to clear. */
1001 if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
1002 device_printf(sc->dev,
1003 "cgem_intr: hresp not okay! rx_status=0x%x\n",
1004 RD4(sc, CGEM_RX_STAT));
1005 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
1006 }
1007
1008 /* Receiver overrun. */
1009 if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
1010 /* Clear status bit. */
1011 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN);
1012 sc->rxoverruns++;
1013 }
1014
1015 /* Receiver ran out of bufs. */
1016 if ((istatus & CGEM_INTR_RX_USED_READ) != 0) {
1017 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
1018 CGEM_NET_CTRL_FLUSH_DPRAM_PKT);
1019 cgem_fill_rqueue(sc);
1020 sc->rxnobufs++;
1021 }
1022
1023 /* Restart transmitter if needed. */
1024 if (!if_sendq_empty(ifp))
1025 cgem_start_locked(ifp);
1026
1027 CGEM_UNLOCK(sc);
1028 }
1029
1030 /* Reset hardware. */
1031 static void
cgem_reset(struct cgem_softc * sc)1032 cgem_reset(struct cgem_softc *sc)
1033 {
1034
1035 CGEM_ASSERT_LOCKED(sc);
1036
1037 /* Determine data bus width from design configuration register. */
1038 switch (RD4(sc, CGEM_DESIGN_CFG1) &
1039 CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) {
1040 case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64:
1041 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64;
1042 break;
1043 case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128:
1044 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128;
1045 break;
1046 default:
1047 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32;
1048 }
1049
1050 WR4(sc, CGEM_NET_CTRL, 0);
1051 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1052 WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
1053 WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
1054 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
1055 WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
1056 WR4(sc, CGEM_HASH_BOT, 0);
1057 WR4(sc, CGEM_HASH_TOP, 0);
1058 WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */
1059 WR4(sc, CGEM_RX_QBAR, 0);
1060
1061 /* Get management port running even if interface is down. */
1062 sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48;
1063 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1064
1065 sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
1066 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1067 }
1068
1069 /* Bring up the hardware. */
1070 static void
cgem_config(struct cgem_softc * sc)1071 cgem_config(struct cgem_softc *sc)
1072 {
1073 if_t ifp = sc->ifp;
1074 uint32_t dma_cfg;
1075 u_char *eaddr = if_getlladdr(ifp);
1076
1077 CGEM_ASSERT_LOCKED(sc);
1078
1079 /* Program Net Config Register. */
1080 sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK |
1081 CGEM_NET_CFG_DBUS_WIDTH_MASK);
1082 sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE |
1083 CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
1084 CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_1536RXEN |
1085 CGEM_NET_CFG_FULL_DUPLEX | CGEM_NET_CFG_SPEED100);
1086
1087 /* Check connection type, enable SGMII bits if necessary. */
1088 if (sc->phy_contype == MII_CONTYPE_SGMII) {
1089 sc->net_cfg_shadow |= CGEM_NET_CFG_SGMII_EN;
1090 sc->net_cfg_shadow |= CGEM_NET_CFG_PCS_SEL;
1091 }
1092
1093 /* Enable receive checksum offloading? */
1094 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
1095 sc->net_cfg_shadow |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1096
1097 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1098
1099 /* Program DMA Config Register. */
1100 dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
1101 CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
1102 CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
1103 CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
1104 #ifdef CGEM64
1105 CGEM_DMA_CFG_ADDR_BUS_64 |
1106 #endif
1107 CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
1108
1109 /* Enable transmit checksum offloading? */
1110 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1111 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
1112
1113 WR4(sc, CGEM_DMA_CFG, dma_cfg);
1114
1115 /* Write the rx and tx descriptor ring addresses to the QBAR regs. */
1116 WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr);
1117 WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr);
1118 #ifdef CGEM64
1119 WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32));
1120 WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32));
1121 #endif
1122
1123 /* Enable rx and tx. */
1124 sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
1125 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
1126
1127 /* Set receive address in case it changed. */
1128 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
1129 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
1130 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
1131
1132 /* Set up interrupts. */
1133 WR4(sc, CGEM_INTR_EN, CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN |
1134 CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ |
1135 CGEM_INTR_HRESP_NOT_OK);
1136 }
1137
1138 /* Turn on interface and load up receive ring with buffers. */
1139 static void
cgem_init_locked(struct cgem_softc * sc)1140 cgem_init_locked(struct cgem_softc *sc)
1141 {
1142 struct mii_data *mii;
1143
1144 CGEM_ASSERT_LOCKED(sc);
1145
1146 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
1147 return;
1148
1149 cgem_config(sc);
1150 cgem_fill_rqueue(sc);
1151
1152 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1153
1154 if (sc->miibus != NULL) {
1155 mii = device_get_softc(sc->miibus);
1156 mii_mediachg(mii);
1157 }
1158
1159 callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
1160 }
1161
1162 static void
cgem_init(void * arg)1163 cgem_init(void *arg)
1164 {
1165 struct cgem_softc *sc = (struct cgem_softc *)arg;
1166
1167 CGEM_LOCK(sc);
1168 cgem_init_locked(sc);
1169 CGEM_UNLOCK(sc);
1170 }
1171
1172 /* Turn off interface. Free up any buffers in transmit or receive queues. */
1173 static void
cgem_stop(struct cgem_softc * sc)1174 cgem_stop(struct cgem_softc *sc)
1175 {
1176 int i;
1177
1178 CGEM_ASSERT_LOCKED(sc);
1179
1180 callout_stop(&sc->tick_ch);
1181
1182 /* Shut down hardware. */
1183 cgem_reset(sc);
1184
1185 /* Clear out transmit queue. */
1186 memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc));
1187 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
1188 sc->txring[i].ctl = CGEM_TXDESC_USED;
1189 if (sc->txring_m[i]) {
1190 /* Unload and destroy dmamap. */
1191 bus_dmamap_unload(sc->mbuf_dma_tag,
1192 sc->txring_m_dmamap[i]);
1193 bus_dmamap_destroy(sc->mbuf_dma_tag,
1194 sc->txring_m_dmamap[i]);
1195 sc->txring_m_dmamap[i] = NULL;
1196 m_freem(sc->txring_m[i]);
1197 sc->txring_m[i] = NULL;
1198 }
1199 }
1200 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
1201
1202 sc->txring_hd_ptr = 0;
1203 sc->txring_tl_ptr = 0;
1204 sc->txring_queued = 0;
1205
1206 /* Clear out receive queue. */
1207 memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc));
1208 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
1209 sc->rxring[i].addr = CGEM_RXDESC_OWN;
1210 if (sc->rxring_m[i]) {
1211 /* Unload and destroy dmamap. */
1212 bus_dmamap_unload(sc->mbuf_dma_tag,
1213 sc->rxring_m_dmamap[i]);
1214 bus_dmamap_destroy(sc->mbuf_dma_tag,
1215 sc->rxring_m_dmamap[i]);
1216 sc->rxring_m_dmamap[i] = NULL;
1217
1218 m_freem(sc->rxring_m[i]);
1219 sc->rxring_m[i] = NULL;
1220 }
1221 }
1222 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
1223
1224 sc->rxring_hd_ptr = 0;
1225 sc->rxring_tl_ptr = 0;
1226 sc->rxring_queued = 0;
1227
1228 /* Force next statchg or linkchg to program net config register. */
1229 sc->mii_media_active = 0;
1230 }
1231
1232 static int
cgem_ioctl(if_t ifp,u_long cmd,caddr_t data)1233 cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
1234 {
1235 struct cgem_softc *sc = if_getsoftc(ifp);
1236 struct ifreq *ifr = (struct ifreq *)data;
1237 struct mii_data *mii;
1238 int error = 0, mask;
1239
1240 switch (cmd) {
1241 case SIOCSIFFLAGS:
1242 CGEM_LOCK(sc);
1243 if ((if_getflags(ifp) & IFF_UP) != 0) {
1244 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1245 if (((if_getflags(ifp) ^ sc->if_old_flags) &
1246 (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1247 cgem_rx_filter(sc);
1248 }
1249 } else {
1250 cgem_init_locked(sc);
1251 }
1252 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1253 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1254 cgem_stop(sc);
1255 }
1256 sc->if_old_flags = if_getflags(ifp);
1257 CGEM_UNLOCK(sc);
1258 break;
1259
1260 case SIOCADDMULTI:
1261 case SIOCDELMULTI:
1262 /* Set up multi-cast filters. */
1263 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1264 CGEM_LOCK(sc);
1265 cgem_rx_filter(sc);
1266 CGEM_UNLOCK(sc);
1267 }
1268 break;
1269
1270 case SIOCSIFMEDIA:
1271 case SIOCGIFMEDIA:
1272 if (sc->miibus == NULL)
1273 return (ENXIO);
1274 mii = device_get_softc(sc->miibus);
1275 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1276 break;
1277
1278 case SIOCSIFCAP:
1279 CGEM_LOCK(sc);
1280 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1281
1282 if ((mask & IFCAP_TXCSUM) != 0) {
1283 if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1284 /* Turn on TX checksumming. */
1285 if_setcapenablebit(ifp, IFCAP_TXCSUM |
1286 IFCAP_TXCSUM_IPV6, 0);
1287 if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0);
1288
1289 WR4(sc, CGEM_DMA_CFG,
1290 RD4(sc, CGEM_DMA_CFG) |
1291 CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1292 } else {
1293 /* Turn off TX checksumming. */
1294 if_setcapenablebit(ifp, 0, IFCAP_TXCSUM |
1295 IFCAP_TXCSUM_IPV6);
1296 if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST);
1297
1298 WR4(sc, CGEM_DMA_CFG,
1299 RD4(sc, CGEM_DMA_CFG) &
1300 ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1301 }
1302 }
1303 if ((mask & IFCAP_RXCSUM) != 0) {
1304 if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1305 /* Turn on RX checksumming. */
1306 if_setcapenablebit(ifp, IFCAP_RXCSUM |
1307 IFCAP_RXCSUM_IPV6, 0);
1308 sc->net_cfg_shadow |=
1309 CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1310 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1311 } else {
1312 /* Turn off RX checksumming. */
1313 if_setcapenablebit(ifp, 0, IFCAP_RXCSUM |
1314 IFCAP_RXCSUM_IPV6);
1315 sc->net_cfg_shadow &=
1316 ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
1317 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1318 }
1319 }
1320 if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) ==
1321 (IFCAP_RXCSUM | IFCAP_TXCSUM))
1322 if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0);
1323 else
1324 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM);
1325
1326 CGEM_UNLOCK(sc);
1327 break;
1328 default:
1329 error = ether_ioctl(ifp, cmd, data);
1330 break;
1331 }
1332
1333 return (error);
1334 }
1335
1336 /* MII bus support routines.
1337 */
1338 static int
cgem_ifmedia_upd(if_t ifp)1339 cgem_ifmedia_upd(if_t ifp)
1340 {
1341 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1342 struct mii_data *mii;
1343 struct mii_softc *miisc;
1344 int error = 0;
1345
1346 mii = device_get_softc(sc->miibus);
1347 CGEM_LOCK(sc);
1348 if ((if_getflags(ifp) & IFF_UP) != 0) {
1349 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1350 PHY_RESET(miisc);
1351 error = mii_mediachg(mii);
1352 }
1353 CGEM_UNLOCK(sc);
1354
1355 return (error);
1356 }
1357
1358 static void
cgem_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)1359 cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
1360 {
1361 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
1362 struct mii_data *mii;
1363
1364 mii = device_get_softc(sc->miibus);
1365 CGEM_LOCK(sc);
1366 mii_pollstat(mii);
1367 ifmr->ifm_active = mii->mii_media_active;
1368 ifmr->ifm_status = mii->mii_media_status;
1369 CGEM_UNLOCK(sc);
1370 }
1371
1372 static int
cgem_miibus_readreg(device_t dev,int phy,int reg)1373 cgem_miibus_readreg(device_t dev, int phy, int reg)
1374 {
1375 struct cgem_softc *sc = device_get_softc(dev);
1376 int tries, val;
1377
1378 WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
1379 CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_READ |
1380 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1381 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1382
1383 /* Wait for completion. */
1384 tries=0;
1385 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1386 DELAY(5);
1387 if (++tries > 200) {
1388 device_printf(dev, "phy read timeout: %d\n", reg);
1389 return (-1);
1390 }
1391 }
1392
1393 val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1394
1395 if (reg == MII_EXTSR)
1396 /*
1397 * MAC does not support half-duplex at gig speeds.
1398 * Let mii(4) exclude the capability.
1399 */
1400 val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX);
1401
1402 return (val);
1403 }
1404
1405 static int
cgem_miibus_writereg(device_t dev,int phy,int reg,int data)1406 cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1407 {
1408 struct cgem_softc *sc = device_get_softc(dev);
1409 int tries;
1410
1411 WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
1412 CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_WRITE |
1413 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1414 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1415 (data & CGEM_PHY_MAINT_DATA_MASK));
1416
1417 /* Wait for completion. */
1418 tries = 0;
1419 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1420 DELAY(5);
1421 if (++tries > 200) {
1422 device_printf(dev, "phy write timeout: %d\n", reg);
1423 return (-1);
1424 }
1425 }
1426
1427 return (0);
1428 }
1429
1430 static void
cgem_miibus_statchg(device_t dev)1431 cgem_miibus_statchg(device_t dev)
1432 {
1433 struct cgem_softc *sc = device_get_softc(dev);
1434 struct mii_data *mii = device_get_softc(sc->miibus);
1435
1436 CGEM_ASSERT_LOCKED(sc);
1437
1438 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1439 (IFM_ACTIVE | IFM_AVALID) &&
1440 sc->mii_media_active != mii->mii_media_active)
1441 cgem_mediachange(sc, mii);
1442 }
1443
1444 static void
cgem_miibus_linkchg(device_t dev)1445 cgem_miibus_linkchg(device_t dev)
1446 {
1447 struct cgem_softc *sc = device_get_softc(dev);
1448 struct mii_data *mii = device_get_softc(sc->miibus);
1449
1450 CGEM_ASSERT_LOCKED(sc);
1451
1452 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1453 (IFM_ACTIVE | IFM_AVALID) &&
1454 sc->mii_media_active != mii->mii_media_active)
1455 cgem_mediachange(sc, mii);
1456 }
1457
1458 /*
1459 * Overridable weak symbol cgem_set_ref_clk(). This allows platforms to
1460 * provide a function to set the cgem's reference clock.
1461 */
1462 static int __used
cgem_default_set_ref_clk(int unit,int frequency)1463 cgem_default_set_ref_clk(int unit, int frequency)
1464 {
1465
1466 return 0;
1467 }
1468 __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk);
1469
1470 /* Call to set reference clock and network config bits according to media. */
1471 static void
cgem_mediachange(struct cgem_softc * sc,struct mii_data * mii)1472 cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
1473 {
1474 int ref_clk_freq;
1475
1476 CGEM_ASSERT_LOCKED(sc);
1477
1478 /* Update hardware to reflect media. */
1479 sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
1480 CGEM_NET_CFG_FULL_DUPLEX);
1481
1482 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1483 case IFM_1000_T:
1484 sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 |
1485 CGEM_NET_CFG_GIGE_EN);
1486 ref_clk_freq = 125000000;
1487 break;
1488 case IFM_100_TX:
1489 sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100;
1490 ref_clk_freq = 25000000;
1491 break;
1492 default:
1493 ref_clk_freq = 2500000;
1494 }
1495
1496 if ((mii->mii_media_active & IFM_FDX) != 0)
1497 sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX;
1498
1499 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
1500
1501 if (sc->clk_pclk != NULL) {
1502 CGEM_UNLOCK(sc);
1503 if (clk_set_freq(sc->clk_pclk, ref_clk_freq, 0))
1504 device_printf(sc->dev, "could not set ref clk to %d\n",
1505 ref_clk_freq);
1506 CGEM_LOCK(sc);
1507 }
1508
1509 sc->mii_media_active = mii->mii_media_active;
1510 }
1511
1512 static void
cgem_add_sysctls(device_t dev)1513 cgem_add_sysctls(device_t dev)
1514 {
1515 struct cgem_softc *sc = device_get_softc(dev);
1516 struct sysctl_ctx_list *ctx;
1517 struct sysctl_oid_list *child;
1518 struct sysctl_oid *tree;
1519
1520 ctx = device_get_sysctl_ctx(dev);
1521 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1522
1523 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW,
1524 &sc->rxbufs, 0, "Number receive buffers to provide");
1525
1526 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW,
1527 &sc->rxhangwar, 0, "Enable receive hang work-around");
1528
1529 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1530 &sc->rxoverruns, 0, "Receive overrun events");
1531
1532 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD,
1533 &sc->rxnobufs, 0, "Receive buf queue empty events");
1534
1535 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD,
1536 &sc->rxdmamapfails, 0, "Receive DMA map failures");
1537
1538 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD,
1539 &sc->txfull, 0, "Transmit ring full events");
1540
1541 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD,
1542 &sc->txdmamapfails, 0, "Transmit DMA map failures");
1543
1544 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD,
1545 &sc->txdefrags, 0, "Transmit m_defrag() calls");
1546
1547 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD,
1548 &sc->txdefragfails, 0, "Transmit m_defrag() failures");
1549
1550 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
1551 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "GEM statistics");
1552 child = SYSCTL_CHILDREN(tree);
1553
1554 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD,
1555 &sc->stats.tx_bytes, "Total bytes transmitted");
1556
1557 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD,
1558 &sc->stats.tx_frames, 0, "Total frames transmitted");
1559
1560 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD,
1561 &sc->stats.tx_frames_bcast, 0,
1562 "Number broadcast frames transmitted");
1563
1564 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD,
1565 &sc->stats.tx_frames_multi, 0,
1566 "Number multicast frames transmitted");
1567
1568 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause",
1569 CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
1570 "Number pause frames transmitted");
1571
1572 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD,
1573 &sc->stats.tx_frames_64b, 0,
1574 "Number frames transmitted of size 64 bytes or less");
1575
1576 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD,
1577 &sc->stats.tx_frames_65to127b, 0,
1578 "Number frames transmitted of size 65-127 bytes");
1579
1580 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b",
1581 CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
1582 "Number frames transmitted of size 128-255 bytes");
1583
1584 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b",
1585 CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
1586 "Number frames transmitted of size 256-511 bytes");
1587
1588 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b",
1589 CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
1590 "Number frames transmitted of size 512-1023 bytes");
1591
1592 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b",
1593 CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
1594 "Number frames transmitted of size 1024-1536 bytes");
1595
1596 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs",
1597 CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
1598 "Number transmit under-run events");
1599
1600 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn",
1601 CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
1602 "Number single-collision transmit frames");
1603
1604 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn",
1605 CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
1606 "Number multi-collision transmit frames");
1607
1608 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn",
1609 CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
1610 "Number excessive collision transmit frames");
1611
1612 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn",
1613 CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
1614 "Number late-collision transmit frames");
1615
1616 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames",
1617 CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
1618 "Number deferred transmit frames");
1619
1620 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs",
1621 CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
1622 "Number carrier sense errors on transmit");
1623
1624 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD,
1625 &sc->stats.rx_bytes, "Total bytes received");
1626
1627 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD,
1628 &sc->stats.rx_frames, 0, "Total frames received");
1629
1630 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast",
1631 CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
1632 "Number broadcast frames received");
1633
1634 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi",
1635 CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
1636 "Number multicast frames received");
1637
1638 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause",
1639 CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
1640 "Number pause frames received");
1641
1642 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b",
1643 CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
1644 "Number frames received of size 64 bytes or less");
1645
1646 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b",
1647 CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
1648 "Number frames received of size 65-127 bytes");
1649
1650 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b",
1651 CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
1652 "Number frames received of size 128-255 bytes");
1653
1654 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b",
1655 CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
1656 "Number frames received of size 256-511 bytes");
1657
1658 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b",
1659 CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
1660 "Number frames received of size 512-1023 bytes");
1661
1662 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b",
1663 CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
1664 "Number frames received of size 1024-1536 bytes");
1665
1666 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize",
1667 CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
1668 "Number undersize frames received");
1669
1670 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize",
1671 CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
1672 "Number oversize frames received");
1673
1674 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber",
1675 CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
1676 "Number jabber frames received");
1677
1678 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs",
1679 CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
1680 "Number frames received with FCS errors");
1681
1682 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs",
1683 CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
1684 "Number frames received with length errors");
1685
1686 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs",
1687 CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
1688 "Number receive symbol errors");
1689
1690 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs",
1691 CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
1692 "Number receive alignment errors");
1693
1694 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs",
1695 CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
1696 "Number frames received when no rx buffer available");
1697
1698 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs",
1699 CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
1700 "Number frames received but not copied due to receive overrun");
1701
1702 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs",
1703 CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
1704 "Number frames received with IP header checksum errors");
1705
1706 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs",
1707 CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
1708 "Number frames received with TCP checksum errors");
1709
1710 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs",
1711 CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
1712 "Number frames received with UDP checksum errors");
1713 }
1714
1715 static int
cgem_probe(device_t dev)1716 cgem_probe(device_t dev)
1717 {
1718
1719 if (!ofw_bus_status_okay(dev))
1720 return (ENXIO);
1721
1722 if (ofw_bus_search_compatible(dev, compat_data)->ocd_str == NULL)
1723 return (ENXIO);
1724
1725 device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1726 return (0);
1727 }
1728
1729 static int
cgem_attach(device_t dev)1730 cgem_attach(device_t dev)
1731 {
1732 struct cgem_softc *sc = device_get_softc(dev);
1733 if_t ifp = NULL;
1734 int rid, err;
1735 u_char eaddr[ETHER_ADDR_LEN];
1736 int hwquirks;
1737 phandle_t node;
1738
1739 sc->dev = dev;
1740 CGEM_LOCK_INIT(sc);
1741
1742 /* Key off of compatible string and set hardware-specific options. */
1743 hwquirks = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
1744 if ((hwquirks & HWQUIRK_NEEDNULLQS) != 0)
1745 sc->neednullqs = 1;
1746 if ((hwquirks & HWQUIRK_RXHANGWAR) != 0)
1747 sc->rxhangwar = 1;
1748 /*
1749 * Both pclk and hclk are mandatory but we don't have a proper
1750 * clock driver for Zynq so don't make it fatal if we can't
1751 * get them.
1752 */
1753 if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->clk_pclk) != 0)
1754 device_printf(dev,
1755 "could not retrieve pclk.\n");
1756 else {
1757 if (clk_enable(sc->clk_pclk) != 0)
1758 device_printf(dev, "could not enable pclk.\n");
1759 }
1760 if (clk_get_by_ofw_name(dev, 0, "hclk", &sc->clk_hclk) != 0)
1761 device_printf(dev,
1762 "could not retrieve hclk.\n");
1763 else {
1764 if (clk_enable(sc->clk_hclk) != 0)
1765 device_printf(dev, "could not enable hclk.\n");
1766 }
1767
1768 /* Optional clocks */
1769 if (clk_get_by_ofw_name(dev, 0, "tx_clk", &sc->clk_txclk) == 0) {
1770 if (clk_enable(sc->clk_txclk) != 0) {
1771 device_printf(dev, "could not enable tx_clk.\n");
1772 err = ENXIO;
1773 goto err_pclk;
1774 }
1775 }
1776 if (clk_get_by_ofw_name(dev, 0, "rx_clk", &sc->clk_rxclk) == 0) {
1777 if (clk_enable(sc->clk_rxclk) != 0) {
1778 device_printf(dev, "could not enable rx_clk.\n");
1779 err = ENXIO;
1780 goto err_tx_clk;
1781 }
1782 }
1783 if (clk_get_by_ofw_name(dev, 0, "tsu_clk", &sc->clk_tsuclk) == 0) {
1784 if (clk_enable(sc->clk_tsuclk) != 0) {
1785 device_printf(dev, "could not enable tsu_clk.\n");
1786 err = ENXIO;
1787 goto err_rx_clk;
1788 }
1789 }
1790
1791 node = ofw_bus_get_node(dev);
1792 sc->phy_contype = mii_fdt_get_contype(node);
1793
1794 /* Get memory resource. */
1795 rid = 0;
1796 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1797 RF_ACTIVE);
1798 if (sc->mem_res == NULL) {
1799 device_printf(dev, "could not allocate memory resources.\n");
1800 err = ENOMEM;
1801 goto err_tsu_clk;
1802 }
1803
1804 /* Get IRQ resource. */
1805 rid = 0;
1806 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1807 RF_ACTIVE);
1808 if (sc->irq_res == NULL) {
1809 device_printf(dev, "could not allocate interrupt resource.\n");
1810 cgem_detach(dev);
1811 return (ENOMEM);
1812 }
1813
1814 /* Set up ifnet structure. */
1815 ifp = sc->ifp = if_alloc(IFT_ETHER);
1816 if_setsoftc(ifp, sc);
1817 if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1818 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1819 if_setinitfn(ifp, cgem_init);
1820 if_setioctlfn(ifp, cgem_ioctl);
1821 if_setstartfn(ifp, cgem_start);
1822 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1823 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0);
1824 if_setsendqlen(ifp, CGEM_NUM_TX_DESCS);
1825 if_setsendqready(ifp);
1826
1827 /* Disable hardware checksumming by default. */
1828 if_sethwassist(ifp, 0);
1829 if_setcapenable(ifp, if_getcapabilities(ifp) &
1830 ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM));
1831
1832 sc->if_old_flags = if_getflags(ifp);
1833 sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1834
1835 /* Reset hardware. */
1836 CGEM_LOCK(sc);
1837 cgem_reset(sc);
1838 CGEM_UNLOCK(sc);
1839
1840 /* Attach phy to mii bus. */
1841 err = mii_attach(dev, &sc->miibus, ifp,
1842 cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK,
1843 MII_PHY_ANY, MII_OFFSET_ANY, 0);
1844 if (err)
1845 device_printf(dev, "warning: attaching PHYs failed\n");
1846
1847 /* Set up TX and RX descriptor area. */
1848 err = cgem_setup_descs(sc);
1849 if (err) {
1850 device_printf(dev, "could not set up dma mem for descs.\n");
1851 cgem_detach(dev);
1852 goto err;
1853 }
1854
1855 /* Get a MAC address. */
1856 cgem_get_mac(sc, eaddr);
1857
1858 /* Start ticks. */
1859 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1860
1861 ether_ifattach(ifp, eaddr);
1862
1863 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1864 INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1865 if (err) {
1866 device_printf(dev, "could not set interrupt handler.\n");
1867 ether_ifdetach(ifp);
1868 cgem_detach(dev);
1869 goto err;
1870 }
1871
1872 cgem_add_sysctls(dev);
1873
1874 return (0);
1875
1876 err_tsu_clk:
1877 if (sc->clk_tsuclk)
1878 clk_release(sc->clk_tsuclk);
1879 err_rx_clk:
1880 if (sc->clk_rxclk)
1881 clk_release(sc->clk_rxclk);
1882 err_tx_clk:
1883 if (sc->clk_txclk)
1884 clk_release(sc->clk_txclk);
1885 err_pclk:
1886 if (sc->clk_pclk)
1887 clk_release(sc->clk_pclk);
1888 if (sc->clk_hclk)
1889 clk_release(sc->clk_hclk);
1890 err:
1891 return (err);
1892 }
1893
1894 static int
cgem_detach(device_t dev)1895 cgem_detach(device_t dev)
1896 {
1897 struct cgem_softc *sc = device_get_softc(dev);
1898 int i;
1899
1900 if (sc == NULL)
1901 return (ENODEV);
1902
1903 if (device_is_attached(dev)) {
1904 CGEM_LOCK(sc);
1905 cgem_stop(sc);
1906 CGEM_UNLOCK(sc);
1907 callout_drain(&sc->tick_ch);
1908 if_setflagbits(sc->ifp, 0, IFF_UP);
1909 ether_ifdetach(sc->ifp);
1910 }
1911
1912 if (sc->miibus != NULL) {
1913 device_delete_child(dev, sc->miibus);
1914 sc->miibus = NULL;
1915 }
1916
1917 /* Release resources. */
1918 if (sc->mem_res != NULL) {
1919 bus_release_resource(dev, SYS_RES_MEMORY,
1920 rman_get_rid(sc->mem_res), sc->mem_res);
1921 sc->mem_res = NULL;
1922 }
1923 if (sc->irq_res != NULL) {
1924 if (sc->intrhand)
1925 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1926 bus_release_resource(dev, SYS_RES_IRQ,
1927 rman_get_rid(sc->irq_res), sc->irq_res);
1928 sc->irq_res = NULL;
1929 }
1930
1931 /* Release DMA resources. */
1932 if (sc->rxring != NULL) {
1933 if (sc->rxring_physaddr != 0) {
1934 bus_dmamap_unload(sc->desc_dma_tag,
1935 sc->rxring_dma_map);
1936 sc->rxring_physaddr = 0;
1937 sc->txring_physaddr = 0;
1938 sc->null_qs_physaddr = 0;
1939 }
1940 bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1941 sc->rxring_dma_map);
1942 sc->rxring = NULL;
1943 sc->txring = NULL;
1944 sc->null_qs = NULL;
1945
1946 for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1947 if (sc->rxring_m_dmamap[i] != NULL) {
1948 bus_dmamap_destroy(sc->mbuf_dma_tag,
1949 sc->rxring_m_dmamap[i]);
1950 sc->rxring_m_dmamap[i] = NULL;
1951 }
1952 for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1953 if (sc->txring_m_dmamap[i] != NULL) {
1954 bus_dmamap_destroy(sc->mbuf_dma_tag,
1955 sc->txring_m_dmamap[i]);
1956 sc->txring_m_dmamap[i] = NULL;
1957 }
1958 }
1959 if (sc->desc_dma_tag != NULL) {
1960 bus_dma_tag_destroy(sc->desc_dma_tag);
1961 sc->desc_dma_tag = NULL;
1962 }
1963 if (sc->mbuf_dma_tag != NULL) {
1964 bus_dma_tag_destroy(sc->mbuf_dma_tag);
1965 sc->mbuf_dma_tag = NULL;
1966 }
1967
1968 bus_generic_detach(dev);
1969
1970 if (sc->clk_tsuclk)
1971 clk_release(sc->clk_tsuclk);
1972 if (sc->clk_rxclk)
1973 clk_release(sc->clk_rxclk);
1974 if (sc->clk_txclk)
1975 clk_release(sc->clk_txclk);
1976 if (sc->clk_pclk)
1977 clk_release(sc->clk_pclk);
1978 if (sc->clk_hclk)
1979 clk_release(sc->clk_hclk);
1980
1981 CGEM_LOCK_DESTROY(sc);
1982
1983 return (0);
1984 }
1985
1986 static device_method_t cgem_methods[] = {
1987 /* Device interface */
1988 DEVMETHOD(device_probe, cgem_probe),
1989 DEVMETHOD(device_attach, cgem_attach),
1990 DEVMETHOD(device_detach, cgem_detach),
1991
1992 /* MII interface */
1993 DEVMETHOD(miibus_readreg, cgem_miibus_readreg),
1994 DEVMETHOD(miibus_writereg, cgem_miibus_writereg),
1995 DEVMETHOD(miibus_statchg, cgem_miibus_statchg),
1996 DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg),
1997
1998 DEVMETHOD_END
1999 };
2000
2001 static driver_t cgem_driver = {
2002 "cgem",
2003 cgem_methods,
2004 sizeof(struct cgem_softc),
2005 };
2006
2007 DRIVER_MODULE(cgem, simplebus, cgem_driver, NULL, NULL);
2008 DRIVER_MODULE(miibus, cgem, miibus_driver, NULL, NULL);
2009 MODULE_DEPEND(cgem, miibus, 1, 1, 1);
2010 MODULE_DEPEND(cgem, ether, 1, 1, 1);
2011 SIMPLEBUS_PNP_INFO(compat_data);
2012