xref: /freebsd/sys/dev/cadence/if_cgem.c (revision 95d45410b5100e07f6f98450bcd841a8945d4726)
1 /*-
2  * Copyright (c) 2012-2014 Thomas Skibo
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * A network interface driver for Cadence GEM Gigabit Ethernet
29  * interface such as the one used in Xilinx Zynq-7000 SoC.
30  *
31  * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
32  * (v1.4) November 16, 2012.  Xilinx doc UG585.  GEM is covered in Ch. 16
33  * and register definitions are in appendix B.18.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/sysctl.h>
50 
51 #include <machine/bus.h>
52 
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <net/if_arp.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_mib.h>
60 #include <net/if_types.h>
61 
62 #ifdef INET
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_var.h>
66 #include <netinet/ip.h>
67 #endif
68 
69 #include <net/bpf.h>
70 #include <net/bpfdesc.h>
71 
72 #include <dev/fdt/fdt_common.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
75 
76 #include <dev/mii/mii.h>
77 #include <dev/mii/miivar.h>
78 
79 #include <dev/cadence/if_cgem_hw.h>
80 
81 #include "miibus_if.h"
82 
83 #define IF_CGEM_NAME "cgem"
84 
85 #define CGEM_NUM_RX_DESCS	256	/* size of receive descriptor ring */
86 #define CGEM_NUM_TX_DESCS	256	/* size of transmit descriptor ring */
87 
88 #define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\
89 				CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc)))
90 
91 
92 /* Default for sysctl rxbufs.  Must be < CGEM_NUM_RX_DESCS of course. */
93 #define DEFAULT_NUM_RX_BUFS	64	/* number of receive bufs to queue. */
94 
95 #define TX_MAX_DMA_SEGS		4	/* maximum segs in a tx mbuf dma */
96 
97 #define CGEM_CKSUM_ASSIST	(CSUM_IP | CSUM_TCP | CSUM_UDP | \
98 				 CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
99 
100 struct cgem_softc {
101 	struct ifnet		*ifp;
102 	struct mtx		sc_mtx;
103 	device_t		dev;
104 	device_t		miibus;
105 	int			if_old_flags;
106 	struct resource 	*mem_res;
107 	struct resource 	*irq_res;
108 	void			*intrhand;
109 	struct callout		tick_ch;
110 	uint32_t		net_ctl_shadow;
111 	int			ref_clk_num;
112 	u_char			eaddr[6];
113 
114 	bus_dma_tag_t		desc_dma_tag;
115 	bus_dma_tag_t		mbuf_dma_tag;
116 
117 	/* receive descriptor ring */
118 	struct cgem_rx_desc	*rxring;
119 	bus_addr_t		rxring_physaddr;
120 	struct mbuf		*rxring_m[CGEM_NUM_RX_DESCS];
121 	bus_dmamap_t		rxring_m_dmamap[CGEM_NUM_RX_DESCS];
122 	int			rxring_hd_ptr;	/* where to put rcv bufs */
123 	int			rxring_tl_ptr;	/* where to get receives */
124 	int			rxring_queued;	/* how many rcv bufs queued */
125  	bus_dmamap_t		rxring_dma_map;
126 	int			rxbufs;		/* tunable number rcv bufs */
127 	int			rxoverruns;	/* rx ring overruns */
128 
129 	/* transmit descriptor ring */
130 	struct cgem_tx_desc	*txring;
131 	bus_addr_t		txring_physaddr;
132 	struct mbuf		*txring_m[CGEM_NUM_TX_DESCS];
133 	bus_dmamap_t		txring_m_dmamap[CGEM_NUM_TX_DESCS];
134 	int			txring_hd_ptr;	/* where to put next xmits */
135 	int			txring_tl_ptr;	/* next xmit mbuf to free */
136 	int			txring_queued;	/* num xmits segs queued */
137 	bus_dmamap_t		txring_dma_map;
138 };
139 
140 #define RD4(sc, off) 		(bus_read_4((sc)->mem_res, (off)))
141 #define WR4(sc, off, val) 	(bus_write_4((sc)->mem_res, (off), (val)))
142 #define BARRIER(sc, off, len, flags) \
143 	(bus_barrier((sc)->mem_res, (off), (len), (flags))
144 
145 #define CGEM_LOCK(sc)		mtx_lock(&(sc)->sc_mtx)
146 #define CGEM_UNLOCK(sc)	mtx_unlock(&(sc)->sc_mtx)
147 #define CGEM_LOCK_INIT(sc)	\
148 	mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
149 		 MTX_NETWORK_LOCK, MTX_DEF)
150 #define CGEM_LOCK_DESTROY(sc)	mtx_destroy(&(sc)->sc_mtx)
151 #define CGEM_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->sc_mtx, MA_OWNED)
152 
153 /* Allow platforms to optionally provide a way to set the reference clock. */
154 int cgem_set_ref_clk(int unit, int frequency);
155 
156 static devclass_t cgem_devclass;
157 
158 static int cgem_probe(device_t dev);
159 static int cgem_attach(device_t dev);
160 static int cgem_detach(device_t dev);
161 static void cgem_tick(void *);
162 static void cgem_intr(void *);
163 
164 static void
165 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
166 {
167 	int i;
168 	uint32_t rnd;
169 
170 	/* See if boot loader gave us a MAC address already. */
171 	for (i = 0; i < 4; i++) {
172 		uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
173 		uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
174 		if (low != 0 || high != 0) {
175 			eaddr[0] = low & 0xff;
176 			eaddr[1] = (low >> 8) & 0xff;
177 			eaddr[2] = (low >> 16) & 0xff;
178 			eaddr[3] = (low >> 24) & 0xff;
179 			eaddr[4] = high & 0xff;
180 			eaddr[5] = (high >> 8) & 0xff;
181 			break;
182 		}
183 	}
184 
185 	/* No MAC from boot loader?  Assign a random one. */
186 	if (i == 4) {
187 		rnd = arc4random();
188 
189 		eaddr[0] = 'b';
190 		eaddr[1] = 's';
191 		eaddr[2] = 'd';
192 		eaddr[3] = (rnd >> 16) & 0xff;
193 		eaddr[4] = (rnd >> 8) & 0xff;
194 		eaddr[5] = rnd & 0xff;
195 
196 		device_printf(sc->dev, "no mac address found, assigning "
197 			      "random: %02x:%02x:%02x:%02x:%02x:%02x\n",
198 			      eaddr[0], eaddr[1], eaddr[2],
199 			      eaddr[3], eaddr[4], eaddr[5]);
200 
201 		WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
202 		    (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
203 		WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
204 	}
205 }
206 
207 /* cgem_mac_hash():  map 48-bit address to a 6-bit hash.
208  * The 6-bit hash corresponds to a bit in a 64-bit hash
209  * register.  Setting that bit in the hash register enables
210  * reception of all frames with a destination address that hashes
211  * to that 6-bit value.
212  *
213  * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
214  * Reference Manual.  Bits 0-5 in the hash are the exclusive-or of
215  * every sixth bit in the destination address.
216  */
217 static int
218 cgem_mac_hash(u_char eaddr[])
219 {
220 	int hash;
221 	int i, j;
222 
223 	hash = 0;
224 	for (i = 0; i < 6; i++)
225 		for (j = i; j < 48; j += 6)
226 			if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
227 				hash ^= (1 << i);
228 
229 	return hash;
230 }
231 
232 /* After any change in rx flags or multi-cast addresses, set up
233  * hash registers and net config register bits.
234  */
235 static void
236 cgem_rx_filter(struct cgem_softc *sc)
237 {
238 	struct ifnet *ifp = sc->ifp;
239 	struct ifmultiaddr *ifma;
240 	int index;
241 	uint32_t hash_hi, hash_lo;
242 	uint32_t net_cfg;
243 
244 	hash_hi = 0;
245 	hash_lo = 0;
246 
247 	net_cfg = RD4(sc, CGEM_NET_CFG);
248 
249 	net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
250 		     CGEM_NET_CFG_NO_BCAST |
251 		     CGEM_NET_CFG_COPY_ALL);
252 
253 	if ((ifp->if_flags & IFF_PROMISC) != 0)
254 		net_cfg |= CGEM_NET_CFG_COPY_ALL;
255 	else {
256 		if ((ifp->if_flags & IFF_BROADCAST) == 0)
257 			net_cfg |= CGEM_NET_CFG_NO_BCAST;
258 		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
259 			hash_hi = 0xffffffff;
260 			hash_lo = 0xffffffff;
261 		} else {
262 			if_maddr_rlock(ifp);
263 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
264 				if (ifma->ifma_addr->sa_family != AF_LINK)
265 					continue;
266 				index = cgem_mac_hash(
267 					LLADDR((struct sockaddr_dl *)
268 					       ifma->ifma_addr));
269 				if (index > 31)
270 					hash_hi |= (1<<(index-32));
271 				else
272 					hash_lo |= (1<<index);
273 			}
274 			if_maddr_runlock(ifp);
275 		}
276 
277 		if (hash_hi != 0 || hash_lo != 0)
278 			net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN;
279 	}
280 
281 	WR4(sc, CGEM_HASH_TOP, hash_hi);
282 	WR4(sc, CGEM_HASH_BOT, hash_lo);
283 	WR4(sc, CGEM_NET_CFG, net_cfg);
284 }
285 
286 /* For bus_dmamap_load() callback. */
287 static void
288 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
289 {
290 
291 	if (nsegs != 1 || error != 0)
292 		return;
293 	*(bus_addr_t *)arg = segs[0].ds_addr;
294 }
295 
296 /* Create DMA'able descriptor rings. */
297 static int
298 cgem_setup_descs(struct cgem_softc *sc)
299 {
300 	int i, err;
301 
302 	sc->txring = NULL;
303 	sc->rxring = NULL;
304 
305 	/* Allocate non-cached DMA space for RX and TX descriptors.
306 	 */
307 	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
308 				 BUS_SPACE_MAXADDR_32BIT,
309 				 BUS_SPACE_MAXADDR,
310 				 NULL, NULL,
311 				 MAX_DESC_RING_SIZE,
312 				 1,
313 				 MAX_DESC_RING_SIZE,
314 				 0,
315 				 busdma_lock_mutex,
316 				 &sc->sc_mtx,
317 				 &sc->desc_dma_tag);
318 	if (err)
319 		return (err);
320 
321 	/* Set up a bus_dma_tag for mbufs. */
322 	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
323 				 BUS_SPACE_MAXADDR_32BIT,
324 				 BUS_SPACE_MAXADDR,
325 				 NULL, NULL,
326 				 MCLBYTES,
327 				 TX_MAX_DMA_SEGS,
328 				 MCLBYTES,
329 				 0,
330 				 busdma_lock_mutex,
331 				 &sc->sc_mtx,
332 				 &sc->mbuf_dma_tag);
333 	if (err)
334 		return (err);
335 
336 	/* Allocate DMA memory in non-cacheable space. */
337 	err = bus_dmamem_alloc(sc->desc_dma_tag,
338 			       (void **)&sc->rxring,
339 			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
340 			       &sc->rxring_dma_map);
341 	if (err)
342 		return (err);
343 
344 	/* Load descriptor DMA memory. */
345 	err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
346 			      (void *)sc->rxring,
347 			      CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
348 			      cgem_getaddr, &sc->rxring_physaddr,
349 			      BUS_DMA_NOWAIT);
350 	if (err)
351 		return (err);
352 
353 	/* Initialize RX descriptors. */
354 	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
355 		sc->rxring[i].addr = CGEM_RXDESC_OWN;
356 		sc->rxring[i].ctl = 0;
357 		sc->rxring_m[i] = NULL;
358 		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
359 					&sc->rxring_m_dmamap[i]);
360 		if (err)
361 			return (err);
362 	}
363 	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
364 
365 	sc->rxring_hd_ptr = 0;
366 	sc->rxring_tl_ptr = 0;
367 	sc->rxring_queued = 0;
368 
369 	/* Allocate DMA memory for TX descriptors in non-cacheable space. */
370 	err = bus_dmamem_alloc(sc->desc_dma_tag,
371 			       (void **)&sc->txring,
372 			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
373 			       &sc->txring_dma_map);
374 	if (err)
375 		return (err);
376 
377 	/* Load TX descriptor DMA memory. */
378 	err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
379 			      (void *)sc->txring,
380 			      CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
381 			      cgem_getaddr, &sc->txring_physaddr,
382 			      BUS_DMA_NOWAIT);
383 	if (err)
384 		return (err);
385 
386 	/* Initialize TX descriptor ring. */
387 	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
388 		sc->txring[i].addr = 0;
389 		sc->txring[i].ctl = CGEM_TXDESC_USED;
390 		sc->txring_m[i] = NULL;
391 		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
392 					&sc->txring_m_dmamap[i]);
393 		if (err)
394 			return (err);
395 	}
396 	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
397 
398 	sc->txring_hd_ptr = 0;
399 	sc->txring_tl_ptr = 0;
400 	sc->txring_queued = 0;
401 
402 	return (0);
403 }
404 
405 /* Fill receive descriptor ring with mbufs. */
406 static void
407 cgem_fill_rqueue(struct cgem_softc *sc)
408 {
409 	struct mbuf *m = NULL;
410 	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
411 	int nsegs;
412 
413 	CGEM_ASSERT_LOCKED(sc);
414 
415 	while (sc->rxring_queued < sc->rxbufs) {
416 		/* Get a cluster mbuf. */
417 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
418 		if (m == NULL)
419 			break;
420 
421 		m->m_len = MCLBYTES;
422 		m->m_pkthdr.len = MCLBYTES;
423 		m->m_pkthdr.rcvif = sc->ifp;
424 
425 		/* Load map and plug in physical address. */
426 		if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
427 			      sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
428 			      segs, &nsegs, BUS_DMA_NOWAIT)) {
429 			/* XXX: warn? */
430 			m_free(m);
431 			break;
432 		}
433 		sc->rxring_m[sc->rxring_hd_ptr] = m;
434 
435 		/* Sync cache with receive buffer. */
436 		bus_dmamap_sync(sc->mbuf_dma_tag,
437 				sc->rxring_m_dmamap[sc->rxring_hd_ptr],
438 				BUS_DMASYNC_PREREAD);
439 
440 		/* Write rx descriptor and increment head pointer. */
441 		sc->rxring[sc->rxring_hd_ptr].ctl = 0;
442 		if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
443 			sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
444 				CGEM_RXDESC_WRAP;
445 			sc->rxring_hd_ptr = 0;
446 		} else
447 			sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
448 
449 		sc->rxring_queued++;
450 	}
451 }
452 
453 /* Pull received packets off of receive descriptor ring. */
454 static void
455 cgem_recv(struct cgem_softc *sc)
456 {
457 	struct ifnet *ifp = sc->ifp;
458 	struct mbuf *m;
459 	uint32_t ctl;
460 
461 	CGEM_ASSERT_LOCKED(sc);
462 
463 	/* Pick up all packets in which the OWN bit is set. */
464 	while (sc->rxring_queued > 0 &&
465 	       (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
466 
467 		ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
468 
469 		/* Grab filled mbuf. */
470 		m = sc->rxring_m[sc->rxring_tl_ptr];
471 		sc->rxring_m[sc->rxring_tl_ptr] = NULL;
472 
473 		/* Sync cache with receive buffer. */
474 		bus_dmamap_sync(sc->mbuf_dma_tag,
475 				sc->rxring_m_dmamap[sc->rxring_tl_ptr],
476 				BUS_DMASYNC_POSTREAD);
477 
478 		/* Unload dmamap. */
479 		bus_dmamap_unload(sc->mbuf_dma_tag,
480 		  	sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
481 
482 		/* Increment tail pointer. */
483 		if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
484 			sc->rxring_tl_ptr = 0;
485 		sc->rxring_queued--;
486 
487 		/* Check FCS and make sure entire packet landed in one mbuf
488 		 * cluster (which is much bigger than the largest ethernet
489 		 * packet).
490 		 */
491 		if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
492 		    (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
493 		           (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
494 			/* discard. */
495 			m_free(m);
496 			ifp->if_ierrors++;
497 			continue;
498 		}
499 
500 		/* Hand it off to upper layers. */
501 		m->m_data += ETHER_ALIGN;
502 		m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
503 		m->m_pkthdr.rcvif = ifp;
504 		m->m_pkthdr.len = m->m_len;
505 
506 		/* Are we using hardware checksumming?  Check the
507 		 * status in the receive descriptor.
508 		 */
509 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
510 			/* TCP or UDP checks out, IP checks out too. */
511 			if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
512 			    CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
513 			    (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
514 			    CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
515 				m->m_pkthdr.csum_flags |=
516 					CSUM_IP_CHECKED | CSUM_IP_VALID |
517 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
518 				m->m_pkthdr.csum_data = 0xffff;
519 			} else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
520 				   CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
521 				/* Only IP checks out. */
522 				m->m_pkthdr.csum_flags |=
523 					CSUM_IP_CHECKED | CSUM_IP_VALID;
524 				m->m_pkthdr.csum_data = 0xffff;
525 			}
526 		}
527 
528 		ifp->if_ipackets++;
529 		CGEM_UNLOCK(sc);
530 		(*ifp->if_input)(ifp, m);
531 		CGEM_LOCK(sc);
532 	}
533 }
534 
535 /* Find completed transmits and free their mbufs. */
536 static void
537 cgem_clean_tx(struct cgem_softc *sc)
538 {
539 	struct mbuf *m;
540 	uint32_t ctl;
541 
542 	CGEM_ASSERT_LOCKED(sc);
543 
544 	/* free up finished transmits. */
545 	while (sc->txring_queued > 0 &&
546 	       ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
547 		CGEM_TXDESC_USED) != 0) {
548 
549 		/* Sync cache.  nop? */
550 		bus_dmamap_sync(sc->mbuf_dma_tag,
551 				sc->txring_m_dmamap[sc->txring_tl_ptr],
552 				BUS_DMASYNC_POSTWRITE);
553 
554 		/* Unload DMA map. */
555 		bus_dmamap_unload(sc->mbuf_dma_tag,
556 				  sc->txring_m_dmamap[sc->txring_tl_ptr]);
557 
558 		/* Free up the mbuf. */
559 		m = sc->txring_m[sc->txring_tl_ptr];
560 		sc->txring_m[sc->txring_tl_ptr] = NULL;
561 		m_freem(m);
562 
563 		/* Check the status. */
564 		if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
565 			/* Serious bus error. log to console. */
566 			device_printf(sc->dev, "cgem_clean_tx: Whoa! "
567 				   "AHB error, addr=0x%x\n",
568 				   sc->txring[sc->txring_tl_ptr].addr);
569 		} else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
570 				   CGEM_TXDESC_LATE_COLL)) != 0) {
571 			sc->ifp->if_oerrors++;
572 		} else
573 			sc->ifp->if_opackets++;
574 
575 		/* If the packet spanned more than one tx descriptor,
576 		 * skip descriptors until we find the end so that only
577 		 * start-of-frame descriptors are processed.
578 		 */
579 		while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
580 			if ((ctl & CGEM_TXDESC_WRAP) != 0)
581 				sc->txring_tl_ptr = 0;
582 			else
583 				sc->txring_tl_ptr++;
584 			sc->txring_queued--;
585 
586 			ctl = sc->txring[sc->txring_tl_ptr].ctl;
587 
588 			sc->txring[sc->txring_tl_ptr].ctl =
589 				ctl | CGEM_TXDESC_USED;
590 		}
591 
592 		/* Next descriptor. */
593 		if ((ctl & CGEM_TXDESC_WRAP) != 0)
594 			sc->txring_tl_ptr = 0;
595 		else
596 			sc->txring_tl_ptr++;
597 		sc->txring_queued--;
598 	}
599 }
600 
601 /* Start transmits. */
602 static void
603 cgem_start_locked(struct ifnet *ifp)
604 {
605 	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
606 	struct mbuf *m;
607 	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
608 	uint32_t ctl;
609 	int i, nsegs, wrap, err;
610 
611 	CGEM_ASSERT_LOCKED(sc);
612 
613 	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0)
614 		return;
615 
616 	for (;;) {
617 		/* Check that there is room in the descriptor ring. */
618 		if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
619 		    TX_MAX_DMA_SEGS - 1) {
620 
621 			/* Try to make room. */
622 			cgem_clean_tx(sc);
623 
624 			/* Still no room? */
625 			if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
626 			    TX_MAX_DMA_SEGS - 1) {
627 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
628 				break;
629 			}
630 		}
631 
632 		/* Grab next transmit packet. */
633 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
634 		if (m == NULL)
635 			break;
636 
637 		/* Load DMA map. */
638 		err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
639 				      sc->txring_m_dmamap[sc->txring_hd_ptr],
640 				      m, segs, &nsegs, BUS_DMA_NOWAIT);
641 		if (err == EFBIG) {
642 			/* Too many segments!  defrag and try again. */
643 			struct mbuf *m2 = m_defrag(m, M_NOWAIT);
644 
645 			if (m2 == NULL) {
646 				m_freem(m);
647 				continue;
648 			}
649 			m = m2;
650 			err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
651 				      sc->txring_m_dmamap[sc->txring_hd_ptr],
652 				      m, segs, &nsegs, BUS_DMA_NOWAIT);
653 		}
654 		if (err) {
655 			/* Give up. */
656 			m_freem(m);
657 			continue;
658 		}
659 		sc->txring_m[sc->txring_hd_ptr] = m;
660 
661 		/* Sync tx buffer with cache. */
662 		bus_dmamap_sync(sc->mbuf_dma_tag,
663 				sc->txring_m_dmamap[sc->txring_hd_ptr],
664 				BUS_DMASYNC_PREWRITE);
665 
666 		/* Set wrap flag if next packet might run off end of ring. */
667 		wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
668 			CGEM_NUM_TX_DESCS;
669 
670 		/* Fill in the TX descriptors back to front so that USED
671 		 * bit in first descriptor is cleared last.
672 		 */
673 		for (i = nsegs - 1; i >= 0; i--) {
674 			/* Descriptor address. */
675 			sc->txring[sc->txring_hd_ptr + i].addr =
676 				segs[i].ds_addr;
677 
678 			/* Descriptor control word. */
679 			ctl = segs[i].ds_len;
680 			if (i == nsegs - 1) {
681 				ctl |= CGEM_TXDESC_LAST_BUF;
682 				if (wrap)
683 					ctl |= CGEM_TXDESC_WRAP;
684 			}
685 			sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
686 
687 			if (i != 0)
688 				sc->txring_m[sc->txring_hd_ptr + i] = NULL;
689 		}
690 
691 		if (wrap)
692 			sc->txring_hd_ptr = 0;
693 		else
694 			sc->txring_hd_ptr += nsegs;
695 		sc->txring_queued += nsegs;
696 
697 		/* Kick the transmitter. */
698 		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
699 		    CGEM_NET_CTRL_START_TX);
700 	}
701 
702 }
703 
704 static void
705 cgem_start(struct ifnet *ifp)
706 {
707 	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
708 
709 	CGEM_LOCK(sc);
710 	cgem_start_locked(ifp);
711 	CGEM_UNLOCK(sc);
712 }
713 
714 static void
715 cgem_tick(void *arg)
716 {
717 	struct cgem_softc *sc = (struct cgem_softc *)arg;
718 	struct mii_data *mii;
719 
720 	CGEM_ASSERT_LOCKED(sc);
721 
722 	/* Poll the phy. */
723 	if (sc->miibus != NULL) {
724 		mii = device_get_softc(sc->miibus);
725 		mii_tick(mii);
726 	}
727 
728 	/* Next callout in one second. */
729 	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
730 }
731 
732 /* Interrupt handler. */
733 static void
734 cgem_intr(void *arg)
735 {
736 	struct cgem_softc *sc = (struct cgem_softc *)arg;
737 	uint32_t istatus;
738 
739 	CGEM_LOCK(sc);
740 
741 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
742 		CGEM_UNLOCK(sc);
743 		return;
744 	}
745 
746 	istatus = RD4(sc, CGEM_INTR_STAT);
747 	WR4(sc, CGEM_INTR_STAT, istatus &
748 	    (CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
749 	     CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK));
750 
751 	/* Hresp not ok.  Something very bad with DMA.  Try to clear. */
752 	if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
753 		printf("cgem_intr: hresp not okay! rx_status=0x%x\n",
754 		       RD4(sc, CGEM_RX_STAT));
755 		WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
756 	}
757 
758 	/* Transmitter has idled.  Free up any spent transmit buffers. */
759 	if ((istatus & CGEM_INTR_TX_USED_READ) != 0)
760 		cgem_clean_tx(sc);
761 
762 	/* Packets received or overflow. */
763 	if ((istatus & (CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN)) != 0) {
764 		cgem_recv(sc);
765 		cgem_fill_rqueue(sc);
766 		if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
767 			/* Clear rx status register. */
768 			sc->rxoverruns++;
769 			WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
770 		}
771 	}
772 
773 	CGEM_UNLOCK(sc);
774 }
775 
776 /* Reset hardware. */
777 static void
778 cgem_reset(struct cgem_softc *sc)
779 {
780 
781 	CGEM_ASSERT_LOCKED(sc);
782 
783 	WR4(sc, CGEM_NET_CTRL, 0);
784 	WR4(sc, CGEM_NET_CFG, 0);
785 	WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
786 	WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
787 	WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
788 	WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
789 	WR4(sc, CGEM_HASH_BOT, 0);
790 	WR4(sc, CGEM_HASH_TOP, 0);
791 	WR4(sc, CGEM_TX_QBAR, 0);	/* manual says do this. */
792 	WR4(sc, CGEM_RX_QBAR, 0);
793 
794 	/* Get management port running even if interface is down. */
795 	WR4(sc, CGEM_NET_CFG,
796 	    CGEM_NET_CFG_DBUS_WIDTH_32 |
797 	    CGEM_NET_CFG_MDC_CLK_DIV_64);
798 
799 	sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
800 	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
801 }
802 
803 /* Bring up the hardware. */
804 static void
805 cgem_config(struct cgem_softc *sc)
806 {
807 	uint32_t net_cfg;
808 	uint32_t dma_cfg;
809 
810 	CGEM_ASSERT_LOCKED(sc);
811 
812 	/* Program Net Config Register. */
813 	net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
814 		CGEM_NET_CFG_MDC_CLK_DIV_64 |
815 		CGEM_NET_CFG_FCS_REMOVE |
816 		CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
817 		CGEM_NET_CFG_GIGE_EN |
818 		CGEM_NET_CFG_FULL_DUPLEX |
819 		CGEM_NET_CFG_SPEED100;
820 
821 	/* Enable receive checksum offloading? */
822 	if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0)
823 		net_cfg |=  CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
824 
825 	WR4(sc, CGEM_NET_CFG, net_cfg);
826 
827 	/* Program DMA Config Register. */
828 	dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
829 		CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
830 		CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
831 		CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16;
832 
833 	/* Enable transmit checksum offloading? */
834 	if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0)
835 		dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
836 
837 	WR4(sc, CGEM_DMA_CFG, dma_cfg);
838 
839 	/* Write the rx and tx descriptor ring addresses to the QBAR regs. */
840 	WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
841 	WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
842 
843 	/* Enable rx and tx. */
844 	sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
845 	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
846 
847 	/* Set up interrupts. */
848 	WR4(sc, CGEM_INTR_EN,
849 	    CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
850 	    CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK);
851 }
852 
853 /* Turn on interface and load up receive ring with buffers. */
854 static void
855 cgem_init_locked(struct cgem_softc *sc)
856 {
857 	struct mii_data *mii;
858 
859 	CGEM_ASSERT_LOCKED(sc);
860 
861 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
862 		return;
863 
864 	cgem_config(sc);
865 	cgem_fill_rqueue(sc);
866 
867 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
868 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
869 
870 	mii = device_get_softc(sc->miibus);
871 	mii_pollstat(mii);
872 	cgem_start_locked(sc->ifp);
873 
874 	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
875 }
876 
877 static void
878 cgem_init(void *arg)
879 {
880 	struct cgem_softc *sc = (struct cgem_softc *)arg;
881 
882 	CGEM_LOCK(sc);
883 	cgem_init_locked(sc);
884 	CGEM_UNLOCK(sc);
885 }
886 
887 /* Turn off interface.  Free up any buffers in transmit or receive queues. */
888 static void
889 cgem_stop(struct cgem_softc *sc)
890 {
891 	int i;
892 
893 	CGEM_ASSERT_LOCKED(sc);
894 
895 	callout_stop(&sc->tick_ch);
896 
897 	/* Shut down hardware. */
898 	cgem_reset(sc);
899 
900 	/* Clear out transmit queue. */
901 	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
902 		sc->txring[i].ctl = CGEM_TXDESC_USED;
903 		sc->txring[i].addr = 0;
904 		if (sc->txring_m[i]) {
905 			bus_dmamap_unload(sc->mbuf_dma_tag,
906 					  sc->txring_m_dmamap[i]);
907 			m_freem(sc->txring_m[i]);
908 			sc->txring_m[i] = NULL;
909 		}
910 	}
911 	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
912 
913 	sc->txring_hd_ptr = 0;
914 	sc->txring_tl_ptr = 0;
915 	sc->txring_queued = 0;
916 
917 	/* Clear out receive queue. */
918 	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
919 		sc->rxring[i].addr = CGEM_RXDESC_OWN;
920 		sc->rxring[i].ctl = 0;
921 		if (sc->rxring_m[i]) {
922 			/* Unload dmamap. */
923 			bus_dmamap_unload(sc->mbuf_dma_tag,
924 				  sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
925 
926 			m_freem(sc->rxring_m[i]);
927 			sc->rxring_m[i] = NULL;
928 		}
929 	}
930 	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
931 
932 	sc->rxring_hd_ptr = 0;
933 	sc->rxring_tl_ptr = 0;
934 	sc->rxring_queued = 0;
935 }
936 
937 
938 static int
939 cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
940 {
941 	struct cgem_softc *sc = ifp->if_softc;
942 	struct ifreq *ifr = (struct ifreq *)data;
943 	struct mii_data *mii;
944 	int error = 0, mask;
945 
946 	switch (cmd) {
947 	case SIOCSIFFLAGS:
948 		CGEM_LOCK(sc);
949 		if ((ifp->if_flags & IFF_UP) != 0) {
950 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
951 				if (((ifp->if_flags ^ sc->if_old_flags) &
952 				     (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
953 					cgem_rx_filter(sc);
954 				}
955 			} else {
956 				cgem_init_locked(sc);
957 			}
958 		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
959 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
960 			cgem_stop(sc);
961 		}
962 		sc->if_old_flags = ifp->if_flags;
963 		CGEM_UNLOCK(sc);
964 		break;
965 
966 	case SIOCADDMULTI:
967 	case SIOCDELMULTI:
968 		/* Set up multi-cast filters. */
969 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
970 			CGEM_LOCK(sc);
971 			cgem_rx_filter(sc);
972 			CGEM_UNLOCK(sc);
973 		}
974 		break;
975 
976 	case SIOCSIFMEDIA:
977 	case SIOCGIFMEDIA:
978 		mii = device_get_softc(sc->miibus);
979 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
980 		break;
981 
982 	case SIOCSIFCAP:
983 		CGEM_LOCK(sc);
984 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
985 
986 		if ((mask & IFCAP_TXCSUM) != 0) {
987 			if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
988 				/* Turn on TX checksumming. */
989 				ifp->if_capenable |= (IFCAP_TXCSUM |
990 						      IFCAP_TXCSUM_IPV6);
991 				ifp->if_hwassist |= CGEM_CKSUM_ASSIST;
992 
993 				WR4(sc, CGEM_DMA_CFG,
994 				    RD4(sc, CGEM_DMA_CFG) |
995 				     CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
996 			} else {
997 				/* Turn off TX checksumming. */
998 				ifp->if_capenable &= ~(IFCAP_TXCSUM |
999 						       IFCAP_TXCSUM_IPV6);
1000 				ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST;
1001 
1002 				WR4(sc, CGEM_DMA_CFG,
1003 				    RD4(sc, CGEM_DMA_CFG) &
1004 				     ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1005 			}
1006 		}
1007 		if ((mask & IFCAP_RXCSUM) != 0) {
1008 			if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1009 				/* Turn on RX checksumming. */
1010 				ifp->if_capenable |= (IFCAP_RXCSUM |
1011 						      IFCAP_RXCSUM_IPV6);
1012 				WR4(sc, CGEM_NET_CFG,
1013 				    RD4(sc, CGEM_NET_CFG) |
1014 				     CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1015 			} else {
1016 				/* Turn off RX checksumming. */
1017 				ifp->if_capenable &= ~(IFCAP_RXCSUM |
1018 						       IFCAP_RXCSUM_IPV6);
1019 				WR4(sc, CGEM_NET_CFG,
1020 				    RD4(sc, CGEM_NET_CFG) &
1021 				     ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1022 			}
1023 		}
1024 
1025 		CGEM_UNLOCK(sc);
1026 		break;
1027 	default:
1028 		error = ether_ioctl(ifp, cmd, data);
1029 		break;
1030 	}
1031 
1032 	return (error);
1033 }
1034 
1035 /* MII bus support routines.
1036  */
1037 static void
1038 cgem_child_detached(device_t dev, device_t child)
1039 {
1040 	struct cgem_softc *sc = device_get_softc(dev);
1041 	if (child == sc->miibus)
1042 		sc->miibus = NULL;
1043 }
1044 
1045 static int
1046 cgem_ifmedia_upd(struct ifnet *ifp)
1047 {
1048 	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1049 	struct mii_data *mii;
1050 	int error;
1051 
1052 	mii = device_get_softc(sc->miibus);
1053 	CGEM_LOCK(sc);
1054 	error = mii_mediachg(mii);
1055 	CGEM_UNLOCK(sc);
1056 	return (error);
1057 }
1058 
1059 static void
1060 cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1061 {
1062 	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1063 	struct mii_data *mii;
1064 
1065 	mii = device_get_softc(sc->miibus);
1066 	CGEM_LOCK(sc);
1067 	mii_pollstat(mii);
1068 	ifmr->ifm_active = mii->mii_media_active;
1069 	ifmr->ifm_status = mii->mii_media_status;
1070 	CGEM_UNLOCK(sc);
1071 }
1072 
1073 static int
1074 cgem_miibus_readreg(device_t dev, int phy, int reg)
1075 {
1076 	struct cgem_softc *sc = device_get_softc(dev);
1077 	int tries, val;
1078 
1079 	WR4(sc, CGEM_PHY_MAINT,
1080 	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1081 	    CGEM_PHY_MAINT_OP_READ |
1082 	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1083 	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1084 
1085 	/* Wait for completion. */
1086 	tries=0;
1087 	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1088 		DELAY(5);
1089 		if (++tries > 200) {
1090 			device_printf(dev, "phy read timeout: %d\n", reg);
1091 			return (-1);
1092 		}
1093 	}
1094 
1095 	val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1096 
1097 	return (val);
1098 }
1099 
1100 static int
1101 cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1102 {
1103 	struct cgem_softc *sc = device_get_softc(dev);
1104 	int tries;
1105 
1106 	WR4(sc, CGEM_PHY_MAINT,
1107 	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1108 	    CGEM_PHY_MAINT_OP_WRITE |
1109 	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1110 	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1111 	    (data & CGEM_PHY_MAINT_DATA_MASK));
1112 
1113 	/* Wait for completion. */
1114 	tries = 0;
1115 	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1116 		DELAY(5);
1117 		if (++tries > 200) {
1118 			device_printf(dev, "phy write timeout: %d\n", reg);
1119 			return (-1);
1120 		}
1121 	}
1122 
1123 	return (0);
1124 }
1125 
1126 /*
1127  * Overridable weak symbol cgem_set_ref_clk().  This allows platforms to
1128  * provide a function to set the cgem's reference clock.
1129  */
1130 static int __used
1131 cgem_default_set_ref_clk(int unit, int frequency)
1132 {
1133 
1134 	return 0;
1135 }
1136 __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk);
1137 
1138 static void
1139 cgem_miibus_statchg(device_t dev)
1140 {
1141 	struct cgem_softc *sc;
1142 	struct mii_data *mii;
1143 	uint32_t net_cfg;
1144 	int ref_clk_freq;
1145 
1146 	sc  = device_get_softc(dev);
1147 
1148 	mii = device_get_softc(sc->miibus);
1149 
1150 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1151 		/* Update hardware to reflect phy status. */
1152 		net_cfg = RD4(sc, CGEM_NET_CFG);
1153 		net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
1154 			     CGEM_NET_CFG_FULL_DUPLEX);
1155 
1156 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1157 		case IFM_1000_T:
1158 			net_cfg |= (CGEM_NET_CFG_SPEED100 |
1159 				    CGEM_NET_CFG_GIGE_EN);
1160 			ref_clk_freq = 125000000;
1161 			break;
1162 		case IFM_100_TX:
1163 			net_cfg |= CGEM_NET_CFG_SPEED100;
1164 			ref_clk_freq = 25000000;
1165 			break;
1166 		default:
1167 			ref_clk_freq = 2500000;
1168 		}
1169 
1170 		if ((mii->mii_media_active & IFM_FDX) != 0)
1171 			net_cfg |= CGEM_NET_CFG_FULL_DUPLEX;
1172 		WR4(sc, CGEM_NET_CFG, net_cfg);
1173 
1174 		/* Set the reference clock if necessary. */
1175 		if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq))
1176 			device_printf(dev, "could not set ref clk%d to %d.\n",
1177 				      sc->ref_clk_num, ref_clk_freq);
1178 	}
1179 }
1180 
1181 static int
1182 cgem_probe(device_t dev)
1183 {
1184 
1185 	if (!ofw_bus_is_compatible(dev, "cadence,gem"))
1186 		return (ENXIO);
1187 
1188 	device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1189 	return (0);
1190 }
1191 
1192 static int
1193 cgem_attach(device_t dev)
1194 {
1195 	struct cgem_softc *sc = device_get_softc(dev);
1196 	struct ifnet *ifp = NULL;
1197 	phandle_t node;
1198 	pcell_t cell;
1199 	int rid, err;
1200 	u_char eaddr[ETHER_ADDR_LEN];
1201 
1202 	sc->dev = dev;
1203 	CGEM_LOCK_INIT(sc);
1204 
1205 	/* Get reference clock number and base divider from fdt. */
1206 	node = ofw_bus_get_node(dev);
1207 	sc->ref_clk_num = 0;
1208 	if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0)
1209 		sc->ref_clk_num = fdt32_to_cpu(cell);
1210 
1211 	/* Get memory resource. */
1212 	rid = 0;
1213 	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1214 					     RF_ACTIVE);
1215 	if (sc->mem_res == NULL) {
1216 		device_printf(dev, "could not allocate memory resources.\n");
1217 		return (ENOMEM);
1218 	}
1219 
1220 	/* Get IRQ resource. */
1221 	rid = 0;
1222 	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1223 					     RF_ACTIVE);
1224 	if (sc->irq_res == NULL) {
1225 		device_printf(dev, "could not allocate interrupt resource.\n");
1226 		cgem_detach(dev);
1227 		return (ENOMEM);
1228 	}
1229 
1230 	ifp = sc->ifp = if_alloc(IFT_ETHER);
1231 	if (ifp == NULL) {
1232 		device_printf(dev, "could not allocate ifnet structure\n");
1233 		cgem_detach(dev);
1234 		return (ENOMEM);
1235 	}
1236 
1237 	CGEM_LOCK(sc);
1238 
1239 	/* Reset hardware. */
1240 	cgem_reset(sc);
1241 
1242 	/* Attach phy to mii bus. */
1243 	err = mii_attach(dev, &sc->miibus, ifp,
1244 			 cgem_ifmedia_upd, cgem_ifmedia_sts,
1245 			 BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1246 	if (err) {
1247 		CGEM_UNLOCK(sc);
1248 		device_printf(dev, "attaching PHYs failed\n");
1249 		cgem_detach(dev);
1250 		return (err);
1251 	}
1252 
1253 	/* Set up TX and RX descriptor area. */
1254 	err = cgem_setup_descs(sc);
1255 	if (err) {
1256 		CGEM_UNLOCK(sc);
1257 		device_printf(dev, "could not set up dma mem for descs.\n");
1258 		cgem_detach(dev);
1259 		return (ENOMEM);
1260 	}
1261 
1262 	/* Get a MAC address. */
1263 	cgem_get_mac(sc, eaddr);
1264 
1265 	/* Start ticks. */
1266 	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1267 
1268 	/* Set up ifnet structure. */
1269 	ifp->if_softc = sc;
1270 	if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1271 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1272 	ifp->if_start = cgem_start;
1273 	ifp->if_ioctl = cgem_ioctl;
1274 	ifp->if_init = cgem_init;
1275 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
1276 	/* XXX: disable hw checksumming for now. */
1277 	ifp->if_hwassist = 0;
1278 	ifp->if_capenable = ifp->if_capabilities &
1279 		~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
1280 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1281 	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1282 	IFQ_SET_READY(&ifp->if_snd);
1283 
1284 	sc->if_old_flags = ifp->if_flags;
1285 	sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1286 
1287 	ether_ifattach(ifp, eaddr);
1288 
1289 	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1290 			     INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1291 	if (err) {
1292 		CGEM_UNLOCK(sc);
1293 		device_printf(dev, "could not set interrupt handler.\n");
1294 		ether_ifdetach(ifp);
1295 		cgem_detach(dev);
1296 		return (err);
1297 	}
1298 
1299 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1300 		       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1301 		       OID_AUTO, "rxbufs", CTLFLAG_RW,
1302 		       &sc->rxbufs, 0,
1303 		       "Number receive buffers to provide");
1304 
1305 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1306 		       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1307 		       OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1308 		       &sc->rxoverruns, 0,
1309 		       "Receive ring overrun events");
1310 
1311 	CGEM_UNLOCK(sc);
1312 
1313 	return (0);
1314 }
1315 
1316 static int
1317 cgem_detach(device_t dev)
1318 {
1319 	struct cgem_softc *sc = device_get_softc(dev);
1320 	int i;
1321 
1322 	if (sc == NULL)
1323 		return (ENODEV);
1324 
1325 	if (device_is_attached(dev)) {
1326 		CGEM_LOCK(sc);
1327 		cgem_stop(sc);
1328 		CGEM_UNLOCK(sc);
1329 		callout_drain(&sc->tick_ch);
1330 		sc->ifp->if_flags &= ~IFF_UP;
1331 		ether_ifdetach(sc->ifp);
1332 	}
1333 
1334 	if (sc->miibus != NULL) {
1335 		device_delete_child(dev, sc->miibus);
1336 		sc->miibus = NULL;
1337 	}
1338 
1339 	/* Release resrouces. */
1340 	if (sc->mem_res != NULL) {
1341 		bus_release_resource(dev, SYS_RES_MEMORY,
1342 				     rman_get_rid(sc->mem_res), sc->mem_res);
1343 		sc->mem_res = NULL;
1344 	}
1345 	if (sc->irq_res != NULL) {
1346 		if (sc->intrhand)
1347 			bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1348 		bus_release_resource(dev, SYS_RES_IRQ,
1349 				     rman_get_rid(sc->irq_res), sc->irq_res);
1350 		sc->irq_res = NULL;
1351 	}
1352 
1353 	/* Release DMA resources. */
1354 	if (sc->rxring != NULL) {
1355 		if (sc->rxring_physaddr != 0) {
1356 			bus_dmamap_unload(sc->desc_dma_tag, sc->rxring_dma_map);
1357 			sc->rxring_physaddr = 0;
1358 		}
1359 		bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1360 				sc->rxring_dma_map);
1361 		sc->rxring = NULL;
1362 		for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1363 			if (sc->rxring_m_dmamap[i] != NULL) {
1364 				bus_dmamap_destroy(sc->mbuf_dma_tag,
1365 						   sc->rxring_m_dmamap[i]);
1366 				sc->rxring_m_dmamap[i] = NULL;
1367 			}
1368 	}
1369 	if (sc->txring != NULL) {
1370 		if (sc->txring_physaddr != 0) {
1371 			bus_dmamap_unload(sc->desc_dma_tag, sc->txring_dma_map);
1372 			sc->txring_physaddr = 0;
1373 		}
1374 		bus_dmamem_free(sc->desc_dma_tag, sc->txring,
1375 				sc->txring_dma_map);
1376 		sc->txring = NULL;
1377 		for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1378 			if (sc->txring_m_dmamap[i] != NULL) {
1379 				bus_dmamap_destroy(sc->mbuf_dma_tag,
1380 						   sc->txring_m_dmamap[i]);
1381 				sc->txring_m_dmamap[i] = NULL;
1382 			}
1383 	}
1384 	if (sc->desc_dma_tag != NULL) {
1385 		bus_dma_tag_destroy(sc->desc_dma_tag);
1386 		sc->desc_dma_tag = NULL;
1387 	}
1388 	if (sc->mbuf_dma_tag != NULL) {
1389 		bus_dma_tag_destroy(sc->mbuf_dma_tag);
1390 		sc->mbuf_dma_tag = NULL;
1391 	}
1392 
1393 	bus_generic_detach(dev);
1394 
1395 	CGEM_LOCK_DESTROY(sc);
1396 
1397 	return (0);
1398 }
1399 
1400 static device_method_t cgem_methods[] = {
1401 	/* Device interface */
1402 	DEVMETHOD(device_probe,		cgem_probe),
1403 	DEVMETHOD(device_attach,	cgem_attach),
1404 	DEVMETHOD(device_detach,	cgem_detach),
1405 
1406 	/* Bus interface */
1407 	DEVMETHOD(bus_child_detached,	cgem_child_detached),
1408 
1409 	/* MII interface */
1410 	DEVMETHOD(miibus_readreg,	cgem_miibus_readreg),
1411 	DEVMETHOD(miibus_writereg,	cgem_miibus_writereg),
1412 	DEVMETHOD(miibus_statchg,	cgem_miibus_statchg),
1413 
1414 	DEVMETHOD_END
1415 };
1416 
1417 static driver_t cgem_driver = {
1418 	"cgem",
1419 	cgem_methods,
1420 	sizeof(struct cgem_softc),
1421 };
1422 
1423 DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
1424 DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
1425 MODULE_DEPEND(cgem, miibus, 1, 1, 1);
1426 MODULE_DEPEND(cgem, ether, 1, 1, 1);
1427