xref: /freebsd/sys/dev/cadence/if_cgem.c (revision 9ecd54f24fe9fa373e07c9fd7c052deb2188f545)
1 /*-
2  * Copyright (c) 2012-2013 Thomas Skibo
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * A network interface driver for Cadence GEM Gigabit Ethernet
29  * interface such as the one used in Xilinx Zynq-7000 SoC.
30  *
31  * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
32  * (v1.4) November 16, 2012.  Xilinx doc UG585.  GEM is covered in Ch. 16
33  * and register definitions are in appendix B.18.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/sysctl.h>
50 
51 #include <machine/bus.h>
52 
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <net/if_arp.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_mib.h>
60 #include <net/if_types.h>
61 
62 #ifdef INET
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_var.h>
66 #include <netinet/ip.h>
67 #endif
68 
69 #include <net/bpf.h>
70 #include <net/bpfdesc.h>
71 
72 #include <dev/fdt/fdt_common.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
75 
76 #include <dev/mii/mii.h>
77 #include <dev/mii/miivar.h>
78 
79 #include <dev/cadence/if_cgem_hw.h>
80 
81 #include "miibus_if.h"
82 
83 #define IF_CGEM_NAME "cgem"
84 
85 #define CGEM_NUM_RX_DESCS	256	/* size of receive descriptor ring */
86 #define CGEM_NUM_TX_DESCS	256	/* size of transmit descriptor ring */
87 
88 #define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\
89 				CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc)))
90 
91 
92 /* Default for sysctl rxbufs.  Must be < CGEM_NUM_RX_DESCS of course. */
93 #define DEFAULT_NUM_RX_BUFS	64	/* number of receive bufs to queue. */
94 
95 #define TX_MAX_DMA_SEGS		4	/* maximum segs in a tx mbuf dma */
96 
97 #define CGEM_CKSUM_ASSIST	(CSUM_IP | CSUM_TCP | CSUM_UDP | \
98 				 CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
99 
100 struct cgem_softc {
101 	struct ifnet		*ifp;
102 	struct mtx		sc_mtx;
103 	device_t		dev;
104 	device_t		miibus;
105 	int			if_old_flags;
106 	struct resource 	*mem_res;
107 	struct resource 	*irq_res;
108 	void			*intrhand;
109 	struct callout		tick_ch;
110 	uint32_t		net_ctl_shadow;
111 	u_char			eaddr[6];
112 
113 	bus_dma_tag_t		desc_dma_tag;
114 	bus_dma_tag_t		mbuf_dma_tag;
115 
116 	/* receive descriptor ring */
117 	struct cgem_rx_desc	*rxring;
118 	bus_addr_t		rxring_physaddr;
119 	struct mbuf		*rxring_m[CGEM_NUM_RX_DESCS];
120 	bus_dmamap_t		rxring_m_dmamap[CGEM_NUM_RX_DESCS];
121 	int			rxring_hd_ptr;	/* where to put rcv bufs */
122 	int			rxring_tl_ptr;	/* where to get receives */
123 	int			rxring_queued;	/* how many rcv bufs queued */
124  	bus_dmamap_t		rxring_dma_map;
125 	int			rxbufs;		/* tunable number rcv bufs */
126 	int			rxoverruns;	/* rx ring overruns */
127 
128 	/* transmit descriptor ring */
129 	struct cgem_tx_desc	*txring;
130 	bus_addr_t		txring_physaddr;
131 	struct mbuf		*txring_m[CGEM_NUM_TX_DESCS];
132 	bus_dmamap_t		txring_m_dmamap[CGEM_NUM_TX_DESCS];
133 	int			txring_hd_ptr;	/* where to put next xmits */
134 	int			txring_tl_ptr;	/* next xmit mbuf to free */
135 	int			txring_queued;	/* num xmits segs queued */
136 	bus_dmamap_t		txring_dma_map;
137 };
138 
139 #define RD4(sc, off) 		(bus_read_4((sc)->mem_res, (off)))
140 #define WR4(sc, off, val) 	(bus_write_4((sc)->mem_res, (off), (val)))
141 #define BARRIER(sc, off, len, flags) \
142 	(bus_barrier((sc)->mem_res, (off), (len), (flags))
143 
144 #define CGEM_LOCK(sc)		mtx_lock(&(sc)->sc_mtx)
145 #define CGEM_UNLOCK(sc)	mtx_unlock(&(sc)->sc_mtx)
146 #define CGEM_LOCK_INIT(sc)	\
147 	mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
148 		 MTX_NETWORK_LOCK, MTX_DEF)
149 #define CGEM_LOCK_DESTROY(sc)	mtx_destroy(&(sc)->sc_mtx)
150 #define CGEM_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->sc_mtx, MA_OWNED)
151 
152 static devclass_t cgem_devclass;
153 
154 static int cgem_probe(device_t dev);
155 static int cgem_attach(device_t dev);
156 static int cgem_detach(device_t dev);
157 static void cgem_tick(void *);
158 static void cgem_intr(void *);
159 
160 static void
161 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
162 {
163 	int i;
164 	uint32_t rnd;
165 
166 	/* See if boot loader gave us a MAC address already. */
167 	for (i = 0; i < 4; i++) {
168 		uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
169 		uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
170 		if (low != 0 || high != 0) {
171 			eaddr[0] = low & 0xff;
172 			eaddr[1] = (low >> 8) & 0xff;
173 			eaddr[2] = (low >> 16) & 0xff;
174 			eaddr[3] = (low >> 24) & 0xff;
175 			eaddr[4] = high & 0xff;
176 			eaddr[5] = (high >> 8) & 0xff;
177 			break;
178 		}
179 	}
180 
181 	/* No MAC from boot loader?  Assign a random one. */
182 	if (i == 4) {
183 		rnd = arc4random();
184 
185 		eaddr[0] = 'b';
186 		eaddr[1] = 's';
187 		eaddr[2] = 'd';
188 		eaddr[3] = (rnd >> 16) & 0xff;
189 		eaddr[4] = (rnd >> 8) & 0xff;
190 		eaddr[5] = rnd & 0xff;
191 
192 		device_printf(sc->dev, "no mac address found, assigning "
193 			      "random: %02x:%02x:%02x:%02x:%02x:%02x\n",
194 			      eaddr[0], eaddr[1], eaddr[2],
195 			      eaddr[3], eaddr[4], eaddr[5]);
196 
197 		WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
198 		    (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
199 		WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
200 	}
201 }
202 
203 /* cgem_mac_hash():  map 48-bit address to a 6-bit hash.
204  * The 6-bit hash corresponds to a bit in a 64-bit hash
205  * register.  Setting that bit in the hash register enables
206  * reception of all frames with a destination address that hashes
207  * to that 6-bit value.
208  *
209  * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
210  * Reference Manual.  Bits 0-5 in the hash are the exclusive-or of
211  * every sixth bit in the destination address.
212  */
213 static int
214 cgem_mac_hash(u_char eaddr[])
215 {
216 	int hash;
217 	int i, j;
218 
219 	hash = 0;
220 	for (i = 0; i < 6; i++)
221 		for (j = i; j < 48; j += 6)
222 			if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
223 				hash ^= (1 << i);
224 
225 	return hash;
226 }
227 
228 /* After any change in rx flags or multi-cast addresses, set up
229  * hash registers and net config register bits.
230  */
231 static void
232 cgem_rx_filter(struct cgem_softc *sc)
233 {
234 	struct ifnet *ifp = sc->ifp;
235 	struct ifmultiaddr *ifma;
236 	int index;
237 	uint32_t hash_hi, hash_lo;
238 	uint32_t net_cfg;
239 
240 	hash_hi = 0;
241 	hash_lo = 0;
242 
243 	net_cfg = RD4(sc, CGEM_NET_CFG);
244 
245 	net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
246 		     CGEM_NET_CFG_NO_BCAST |
247 		     CGEM_NET_CFG_COPY_ALL);
248 
249 	if ((ifp->if_flags & IFF_PROMISC) != 0)
250 		net_cfg |= CGEM_NET_CFG_COPY_ALL;
251 	else {
252 		if ((ifp->if_flags & IFF_BROADCAST) == 0)
253 			net_cfg |= CGEM_NET_CFG_NO_BCAST;
254 		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
255 			hash_hi = 0xffffffff;
256 			hash_lo = 0xffffffff;
257 		} else {
258 			if_maddr_rlock(ifp);
259 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
260 				if (ifma->ifma_addr->sa_family != AF_LINK)
261 					continue;
262 				index = cgem_mac_hash(
263 					LLADDR((struct sockaddr_dl *)
264 					       ifma->ifma_addr));
265 				if (index > 31)
266 					hash_hi |= (1<<(index-32));
267 				else
268 					hash_lo |= (1<<index);
269 			}
270 			if_maddr_runlock(ifp);
271 		}
272 
273 		if (hash_hi != 0 || hash_lo != 0)
274 			net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN;
275 	}
276 
277 	WR4(sc, CGEM_HASH_TOP, hash_hi);
278 	WR4(sc, CGEM_HASH_BOT, hash_lo);
279 	WR4(sc, CGEM_NET_CFG, net_cfg);
280 }
281 
282 /* For bus_dmamap_load() callback. */
283 static void
284 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
285 {
286 
287 	if (nsegs != 1 || error != 0)
288 		return;
289 	*(bus_addr_t *)arg = segs[0].ds_addr;
290 }
291 
292 /* Create DMA'able descriptor rings. */
293 static int
294 cgem_setup_descs(struct cgem_softc *sc)
295 {
296 	int i, err;
297 
298 	sc->txring = NULL;
299 	sc->rxring = NULL;
300 
301 	/* Allocate non-cached DMA space for RX and TX descriptors.
302 	 */
303 	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
304 				 BUS_SPACE_MAXADDR_32BIT,
305 				 BUS_SPACE_MAXADDR,
306 				 NULL, NULL,
307 				 MAX_DESC_RING_SIZE,
308 				 1,
309 				 MAX_DESC_RING_SIZE,
310 				 0,
311 				 busdma_lock_mutex,
312 				 &sc->sc_mtx,
313 				 &sc->desc_dma_tag);
314 	if (err)
315 		return (err);
316 
317 	/* Set up a bus_dma_tag for mbufs. */
318 	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
319 				 BUS_SPACE_MAXADDR_32BIT,
320 				 BUS_SPACE_MAXADDR,
321 				 NULL, NULL,
322 				 MCLBYTES,
323 				 TX_MAX_DMA_SEGS,
324 				 MCLBYTES,
325 				 0,
326 				 busdma_lock_mutex,
327 				 &sc->sc_mtx,
328 				 &sc->mbuf_dma_tag);
329 	if (err)
330 		return (err);
331 
332 	/* Allocate DMA memory in non-cacheable space. */
333 	err = bus_dmamem_alloc(sc->desc_dma_tag,
334 			       (void **)&sc->rxring,
335 			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
336 			       &sc->rxring_dma_map);
337 	if (err)
338 		return (err);
339 
340 	/* Load descriptor DMA memory. */
341 	err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
342 			      (void *)sc->rxring,
343 			      CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
344 			      cgem_getaddr, &sc->rxring_physaddr,
345 			      BUS_DMA_NOWAIT);
346 	if (err)
347 		return (err);
348 
349 	/* Initialize RX descriptors. */
350 	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
351 		sc->rxring[i].addr = CGEM_RXDESC_OWN;
352 		sc->rxring[i].ctl = 0;
353 		sc->rxring_m[i] = NULL;
354 		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
355 					&sc->rxring_m_dmamap[i]);
356 		if (err)
357 			return (err);
358 	}
359 	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
360 
361 	sc->rxring_hd_ptr = 0;
362 	sc->rxring_tl_ptr = 0;
363 	sc->rxring_queued = 0;
364 
365 	/* Allocate DMA memory for TX descriptors in non-cacheable space. */
366 	err = bus_dmamem_alloc(sc->desc_dma_tag,
367 			       (void **)&sc->txring,
368 			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
369 			       &sc->txring_dma_map);
370 	if (err)
371 		return (err);
372 
373 	/* Load TX descriptor DMA memory. */
374 	err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
375 			      (void *)sc->txring,
376 			      CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
377 			      cgem_getaddr, &sc->txring_physaddr,
378 			      BUS_DMA_NOWAIT);
379 	if (err)
380 		return (err);
381 
382 	/* Initialize TX descriptor ring. */
383 	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
384 		sc->txring[i].addr = 0;
385 		sc->txring[i].ctl = CGEM_TXDESC_USED;
386 		sc->txring_m[i] = NULL;
387 		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
388 					&sc->txring_m_dmamap[i]);
389 		if (err)
390 			return (err);
391 	}
392 	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
393 
394 	sc->txring_hd_ptr = 0;
395 	sc->txring_tl_ptr = 0;
396 	sc->txring_queued = 0;
397 
398 	return (0);
399 }
400 
401 /* Fill receive descriptor ring with mbufs. */
402 static void
403 cgem_fill_rqueue(struct cgem_softc *sc)
404 {
405 	struct mbuf *m = NULL;
406 	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
407 	int nsegs;
408 
409 	CGEM_ASSERT_LOCKED(sc);
410 
411 	while (sc->rxring_queued < sc->rxbufs) {
412 		/* Get a cluster mbuf. */
413 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
414 		if (m == NULL)
415 			break;
416 
417 		m->m_len = MCLBYTES;
418 		m->m_pkthdr.len = MCLBYTES;
419 		m->m_pkthdr.rcvif = sc->ifp;
420 
421 		/* Load map and plug in physical address. */
422 		if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
423 			      sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
424 			      segs, &nsegs, BUS_DMA_NOWAIT)) {
425 			/* XXX: warn? */
426 			m_free(m);
427 			break;
428 		}
429 		sc->rxring_m[sc->rxring_hd_ptr] = m;
430 
431 		/* Sync cache with receive buffer. */
432 		bus_dmamap_sync(sc->mbuf_dma_tag,
433 				sc->rxring_m_dmamap[sc->rxring_hd_ptr],
434 				BUS_DMASYNC_PREREAD);
435 
436 		/* Write rx descriptor and increment head pointer. */
437 		sc->rxring[sc->rxring_hd_ptr].ctl = 0;
438 		if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
439 			sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
440 				CGEM_RXDESC_WRAP;
441 			sc->rxring_hd_ptr = 0;
442 		} else
443 			sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
444 
445 		sc->rxring_queued++;
446 	}
447 }
448 
449 /* Pull received packets off of receive descriptor ring. */
450 static void
451 cgem_recv(struct cgem_softc *sc)
452 {
453 	struct ifnet *ifp = sc->ifp;
454 	struct mbuf *m;
455 	uint32_t ctl;
456 
457 	CGEM_ASSERT_LOCKED(sc);
458 
459 	/* Pick up all packets in which the OWN bit is set. */
460 	while (sc->rxring_queued > 0 &&
461 	       (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
462 
463 		ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
464 
465 		/* Grab filled mbuf. */
466 		m = sc->rxring_m[sc->rxring_tl_ptr];
467 		sc->rxring_m[sc->rxring_tl_ptr] = NULL;
468 
469 		/* Sync cache with receive buffer. */
470 		bus_dmamap_sync(sc->mbuf_dma_tag,
471 				sc->rxring_m_dmamap[sc->rxring_tl_ptr],
472 				BUS_DMASYNC_POSTREAD);
473 
474 		/* Unload dmamap. */
475 		bus_dmamap_unload(sc->mbuf_dma_tag,
476 		  	sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
477 
478 		/* Increment tail pointer. */
479 		if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
480 			sc->rxring_tl_ptr = 0;
481 		sc->rxring_queued--;
482 
483 		/* Check FCS and make sure entire packet landed in one mbuf
484 		 * cluster (which is much bigger than the largest ethernet
485 		 * packet).
486 		 */
487 		if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
488 		    (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
489 		           (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
490 			/* discard. */
491 			m_free(m);
492 			ifp->if_ierrors++;
493 			continue;
494 		}
495 
496 		/* Hand it off to upper layers. */
497 		m->m_data += ETHER_ALIGN;
498 		m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
499 		m->m_pkthdr.rcvif = ifp;
500 		m->m_pkthdr.len = m->m_len;
501 
502 		/* Are we using hardware checksumming?  Check the
503 		 * status in the receive descriptor.
504 		 */
505 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
506 			/* TCP or UDP checks out, IP checks out too. */
507 			if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
508 			    CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
509 			    (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
510 			    CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
511 				m->m_pkthdr.csum_flags |=
512 					CSUM_IP_CHECKED | CSUM_IP_VALID |
513 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
514 				m->m_pkthdr.csum_data = 0xffff;
515 			} else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
516 				   CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
517 				/* Only IP checks out. */
518 				m->m_pkthdr.csum_flags |=
519 					CSUM_IP_CHECKED | CSUM_IP_VALID;
520 				m->m_pkthdr.csum_data = 0xffff;
521 			}
522 		}
523 
524 		ifp->if_ipackets++;
525 		CGEM_UNLOCK(sc);
526 		(*ifp->if_input)(ifp, m);
527 		CGEM_LOCK(sc);
528 	}
529 }
530 
531 /* Find completed transmits and free their mbufs. */
532 static void
533 cgem_clean_tx(struct cgem_softc *sc)
534 {
535 	struct mbuf *m;
536 	uint32_t ctl;
537 
538 	CGEM_ASSERT_LOCKED(sc);
539 
540 	/* free up finished transmits. */
541 	while (sc->txring_queued > 0 &&
542 	       ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
543 		CGEM_TXDESC_USED) != 0) {
544 
545 		/* Sync cache.  nop? */
546 		bus_dmamap_sync(sc->mbuf_dma_tag,
547 				sc->txring_m_dmamap[sc->txring_tl_ptr],
548 				BUS_DMASYNC_POSTWRITE);
549 
550 		/* Unload DMA map. */
551 		bus_dmamap_unload(sc->mbuf_dma_tag,
552 				  sc->txring_m_dmamap[sc->txring_tl_ptr]);
553 
554 		/* Free up the mbuf. */
555 		m = sc->txring_m[sc->txring_tl_ptr];
556 		sc->txring_m[sc->txring_tl_ptr] = NULL;
557 		m_freem(m);
558 
559 		/* Check the status. */
560 		if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
561 			/* Serious bus error. log to console. */
562 			device_printf(sc->dev, "cgem_clean_tx: Whoa! "
563 				   "AHB error, addr=0x%x\n",
564 				   sc->txring[sc->txring_tl_ptr].addr);
565 		} else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
566 				   CGEM_TXDESC_LATE_COLL)) != 0) {
567 			sc->ifp->if_oerrors++;
568 		} else
569 			sc->ifp->if_opackets++;
570 
571 		/* If the packet spanned more than one tx descriptor,
572 		 * skip descriptors until we find the end so that only
573 		 * start-of-frame descriptors are processed.
574 		 */
575 		while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
576 			if ((ctl & CGEM_TXDESC_WRAP) != 0)
577 				sc->txring_tl_ptr = 0;
578 			else
579 				sc->txring_tl_ptr++;
580 			sc->txring_queued--;
581 
582 			ctl = sc->txring[sc->txring_tl_ptr].ctl;
583 
584 			sc->txring[sc->txring_tl_ptr].ctl =
585 				ctl | CGEM_TXDESC_USED;
586 		}
587 
588 		/* Next descriptor. */
589 		if ((ctl & CGEM_TXDESC_WRAP) != 0)
590 			sc->txring_tl_ptr = 0;
591 		else
592 			sc->txring_tl_ptr++;
593 		sc->txring_queued--;
594 	}
595 }
596 
597 /* Start transmits. */
598 static void
599 cgem_start_locked(struct ifnet *ifp)
600 {
601 	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
602 	struct mbuf *m;
603 	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
604 	uint32_t ctl;
605 	int i, nsegs, wrap, err;
606 
607 	CGEM_ASSERT_LOCKED(sc);
608 
609 	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0)
610 		return;
611 
612 	for (;;) {
613 		/* Check that there is room in the descriptor ring. */
614 		if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
615 		    TX_MAX_DMA_SEGS - 1) {
616 
617 			/* Try to make room. */
618 			cgem_clean_tx(sc);
619 
620 			/* Still no room? */
621 			if (sc->txring_queued >= CGEM_NUM_TX_DESCS -
622 			    TX_MAX_DMA_SEGS - 1) {
623 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
624 				break;
625 			}
626 		}
627 
628 		/* Grab next transmit packet. */
629 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
630 		if (m == NULL)
631 			break;
632 
633 		/* Load DMA map. */
634 		err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
635 				      sc->txring_m_dmamap[sc->txring_hd_ptr],
636 				      m, segs, &nsegs, BUS_DMA_NOWAIT);
637 		if (err == EFBIG) {
638 			/* Too many segments!  defrag and try again. */
639 			struct mbuf *m2 = m_defrag(m, M_NOWAIT);
640 
641 			if (m2 == NULL) {
642 				m_freem(m);
643 				continue;
644 			}
645 			m = m2;
646 			err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
647 				      sc->txring_m_dmamap[sc->txring_hd_ptr],
648 				      m, segs, &nsegs, BUS_DMA_NOWAIT);
649 		}
650 		if (err) {
651 			/* Give up. */
652 			m_freem(m);
653 			continue;
654 		}
655 		sc->txring_m[sc->txring_hd_ptr] = m;
656 
657 		/* Sync tx buffer with cache. */
658 		bus_dmamap_sync(sc->mbuf_dma_tag,
659 				sc->txring_m_dmamap[sc->txring_hd_ptr],
660 				BUS_DMASYNC_PREWRITE);
661 
662 		/* Set wrap flag if next packet might run off end of ring. */
663 		wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
664 			CGEM_NUM_TX_DESCS;
665 
666 		/* Fill in the TX descriptors back to front so that USED
667 		 * bit in first descriptor is cleared last.
668 		 */
669 		for (i = nsegs - 1; i >= 0; i--) {
670 			/* Descriptor address. */
671 			sc->txring[sc->txring_hd_ptr + i].addr =
672 				segs[i].ds_addr;
673 
674 			/* Descriptor control word. */
675 			ctl = segs[i].ds_len;
676 			if (i == nsegs - 1) {
677 				ctl |= CGEM_TXDESC_LAST_BUF;
678 				if (wrap)
679 					ctl |= CGEM_TXDESC_WRAP;
680 			}
681 			sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
682 
683 			if (i != 0)
684 				sc->txring_m[sc->txring_hd_ptr + i] = NULL;
685 		}
686 
687 		if (wrap)
688 			sc->txring_hd_ptr = 0;
689 		else
690 			sc->txring_hd_ptr += nsegs;
691 		sc->txring_queued += nsegs;
692 
693 		/* Kick the transmitter. */
694 		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
695 		    CGEM_NET_CTRL_START_TX);
696 	}
697 
698 }
699 
700 static void
701 cgem_start(struct ifnet *ifp)
702 {
703 	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
704 
705 	CGEM_LOCK(sc);
706 	cgem_start_locked(ifp);
707 	CGEM_UNLOCK(sc);
708 }
709 
710 /* Respond to changes in media. */
711 static void
712 cgem_media_update(struct cgem_softc *sc, int active)
713 {
714 	uint32_t net_cfg;
715 
716 	CGEM_ASSERT_LOCKED(sc);
717 
718 	/* Update hardware to reflect phy status. */
719 	net_cfg = RD4(sc, CGEM_NET_CFG);
720 	net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
721 		     CGEM_NET_CFG_FULL_DUPLEX);
722 
723 	if (IFM_SUBTYPE(active) == IFM_1000_T)
724 		net_cfg |= (CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN);
725 	else if (IFM_SUBTYPE(active) == IFM_100_TX)
726 		net_cfg |= CGEM_NET_CFG_SPEED100;
727 
728 	if ((active & IFM_FDX) != 0)
729 		net_cfg |= CGEM_NET_CFG_FULL_DUPLEX;
730 	WR4(sc, CGEM_NET_CFG, net_cfg);
731 }
732 
733 static void
734 cgem_tick(void *arg)
735 {
736 	struct cgem_softc *sc = (struct cgem_softc *)arg;
737 	struct mii_data *mii;
738 	int active;
739 
740 	CGEM_ASSERT_LOCKED(sc);
741 
742 	/* Poll the phy. */
743 	if (sc->miibus != NULL) {
744 		mii = device_get_softc(sc->miibus);
745 		active = mii->mii_media_active;
746 		mii_tick(mii);
747 		if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
748 		    (IFM_ACTIVE | IFM_AVALID) &&
749 		    active != mii->mii_media_active)
750 			cgem_media_update(sc, mii->mii_media_active);
751 	}
752 
753 	/* Next callout in one second. */
754 	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
755 }
756 
757 /* Interrupt handler. */
758 static void
759 cgem_intr(void *arg)
760 {
761 	struct cgem_softc *sc = (struct cgem_softc *)arg;
762 	uint32_t istatus;
763 
764 	CGEM_LOCK(sc);
765 
766 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
767 		CGEM_UNLOCK(sc);
768 		return;
769 	}
770 
771 	istatus = RD4(sc, CGEM_INTR_STAT);
772 	WR4(sc, CGEM_INTR_STAT, istatus &
773 	    (CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
774 	     CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK));
775 
776 	/* Hresp not ok.  Something very bad with DMA.  Try to clear. */
777 	if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
778 		printf("cgem_intr: hresp not okay! rx_status=0x%x\n",
779 		       RD4(sc, CGEM_RX_STAT));
780 		WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
781 	}
782 
783 	/* Transmitter has idled.  Free up any spent transmit buffers. */
784 	if ((istatus & CGEM_INTR_TX_USED_READ) != 0)
785 		cgem_clean_tx(sc);
786 
787 	/* Packets received or overflow. */
788 	if ((istatus & (CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN)) != 0) {
789 		cgem_recv(sc);
790 		cgem_fill_rqueue(sc);
791 		if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
792 			/* Clear rx status register. */
793 			sc->rxoverruns++;
794 			WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
795 		}
796 	}
797 
798 	CGEM_UNLOCK(sc);
799 }
800 
801 /* Reset hardware. */
802 static void
803 cgem_reset(struct cgem_softc *sc)
804 {
805 
806 	CGEM_ASSERT_LOCKED(sc);
807 
808 	WR4(sc, CGEM_NET_CTRL, 0);
809 	WR4(sc, CGEM_NET_CFG, 0);
810 	WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
811 	WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
812 	WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
813 	WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
814 	WR4(sc, CGEM_HASH_BOT, 0);
815 	WR4(sc, CGEM_HASH_TOP, 0);
816 	WR4(sc, CGEM_TX_QBAR, 0);	/* manual says do this. */
817 	WR4(sc, CGEM_RX_QBAR, 0);
818 
819 	/* Get management port running even if interface is down. */
820 	WR4(sc, CGEM_NET_CFG,
821 	    CGEM_NET_CFG_DBUS_WIDTH_32 |
822 	    CGEM_NET_CFG_MDC_CLK_DIV_64);
823 
824 	sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
825 	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
826 }
827 
828 /* Bring up the hardware. */
829 static void
830 cgem_config(struct cgem_softc *sc)
831 {
832 	uint32_t net_cfg;
833 	uint32_t dma_cfg;
834 
835 	CGEM_ASSERT_LOCKED(sc);
836 
837 	/* Program Net Config Register. */
838 	net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
839 		CGEM_NET_CFG_MDC_CLK_DIV_64 |
840 		CGEM_NET_CFG_FCS_REMOVE |
841 		CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
842 		CGEM_NET_CFG_GIGE_EN |
843 		CGEM_NET_CFG_FULL_DUPLEX |
844 		CGEM_NET_CFG_SPEED100;
845 
846 	/* Enable receive checksum offloading? */
847 	if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0)
848 		net_cfg |=  CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
849 
850 	WR4(sc, CGEM_NET_CFG, net_cfg);
851 
852 	/* Program DMA Config Register. */
853 	dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
854 		CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
855 		CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
856 		CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16;
857 
858 	/* Enable transmit checksum offloading? */
859 	if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0)
860 		dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
861 
862 	WR4(sc, CGEM_DMA_CFG, dma_cfg);
863 
864 	/* Write the rx and tx descriptor ring addresses to the QBAR regs. */
865 	WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
866 	WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
867 
868 	/* Enable rx and tx. */
869 	sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
870 	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
871 
872 	/* Set up interrupts. */
873 	WR4(sc, CGEM_INTR_EN,
874 	    CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ |
875 	    CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK);
876 }
877 
878 /* Turn on interface and load up receive ring with buffers. */
879 static void
880 cgem_init_locked(struct cgem_softc *sc)
881 {
882 	struct mii_data *mii;
883 
884 	CGEM_ASSERT_LOCKED(sc);
885 
886 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
887 		return;
888 
889 	cgem_config(sc);
890 	cgem_fill_rqueue(sc);
891 
892 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
893 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
894 
895 	mii = device_get_softc(sc->miibus);
896 	mii_pollstat(mii);
897 	cgem_media_update(sc, mii->mii_media_active);
898 	cgem_start_locked(sc->ifp);
899 
900 	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
901 }
902 
903 static void
904 cgem_init(void *arg)
905 {
906 	struct cgem_softc *sc = (struct cgem_softc *)arg;
907 
908 	CGEM_LOCK(sc);
909 	cgem_init_locked(sc);
910 	CGEM_UNLOCK(sc);
911 }
912 
913 /* Turn off interface.  Free up any buffers in transmit or receive queues. */
914 static void
915 cgem_stop(struct cgem_softc *sc)
916 {
917 	int i;
918 
919 	CGEM_ASSERT_LOCKED(sc);
920 
921 	callout_stop(&sc->tick_ch);
922 
923 	/* Shut down hardware. */
924 	cgem_reset(sc);
925 
926 	/* Clear out transmit queue. */
927 	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
928 		sc->txring[i].ctl = CGEM_TXDESC_USED;
929 		sc->txring[i].addr = 0;
930 		if (sc->txring_m[i]) {
931 			bus_dmamap_unload(sc->mbuf_dma_tag,
932 					  sc->txring_m_dmamap[i]);
933 			m_freem(sc->txring_m[i]);
934 			sc->txring_m[i] = NULL;
935 		}
936 	}
937 	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
938 
939 	sc->txring_hd_ptr = 0;
940 	sc->txring_tl_ptr = 0;
941 	sc->txring_queued = 0;
942 
943 	/* Clear out receive queue. */
944 	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
945 		sc->rxring[i].addr = CGEM_RXDESC_OWN;
946 		sc->rxring[i].ctl = 0;
947 		if (sc->rxring_m[i]) {
948 			/* Unload dmamap. */
949 			bus_dmamap_unload(sc->mbuf_dma_tag,
950 				  sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
951 
952 			m_freem(sc->rxring_m[i]);
953 			sc->rxring_m[i] = NULL;
954 		}
955 	}
956 	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
957 
958 	sc->rxring_hd_ptr = 0;
959 	sc->rxring_tl_ptr = 0;
960 	sc->rxring_queued = 0;
961 }
962 
963 
964 static int
965 cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
966 {
967 	struct cgem_softc *sc = ifp->if_softc;
968 	struct ifreq *ifr = (struct ifreq *)data;
969 	struct mii_data *mii;
970 	int error = 0, mask;
971 
972 	switch (cmd) {
973 	case SIOCSIFFLAGS:
974 		CGEM_LOCK(sc);
975 		if ((ifp->if_flags & IFF_UP) != 0) {
976 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
977 				if (((ifp->if_flags ^ sc->if_old_flags) &
978 				     (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
979 					cgem_rx_filter(sc);
980 				}
981 			} else {
982 				cgem_init_locked(sc);
983 			}
984 		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
985 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
986 			cgem_stop(sc);
987 		}
988 		sc->if_old_flags = ifp->if_flags;
989 		CGEM_UNLOCK(sc);
990 		break;
991 
992 	case SIOCADDMULTI:
993 	case SIOCDELMULTI:
994 		/* Set up multi-cast filters. */
995 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
996 			CGEM_LOCK(sc);
997 			cgem_rx_filter(sc);
998 			CGEM_UNLOCK(sc);
999 		}
1000 		break;
1001 
1002 	case SIOCSIFMEDIA:
1003 	case SIOCGIFMEDIA:
1004 		mii = device_get_softc(sc->miibus);
1005 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1006 		break;
1007 
1008 	case SIOCSIFCAP:
1009 		CGEM_LOCK(sc);
1010 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1011 
1012 		if ((mask & IFCAP_TXCSUM) != 0) {
1013 			if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
1014 				/* Turn on TX checksumming. */
1015 				ifp->if_capenable |= (IFCAP_TXCSUM |
1016 						      IFCAP_TXCSUM_IPV6);
1017 				ifp->if_hwassist |= CGEM_CKSUM_ASSIST;
1018 
1019 				WR4(sc, CGEM_DMA_CFG,
1020 				    RD4(sc, CGEM_DMA_CFG) |
1021 				     CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1022 			} else {
1023 				/* Turn off TX checksumming. */
1024 				ifp->if_capenable &= ~(IFCAP_TXCSUM |
1025 						       IFCAP_TXCSUM_IPV6);
1026 				ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST;
1027 
1028 				WR4(sc, CGEM_DMA_CFG,
1029 				    RD4(sc, CGEM_DMA_CFG) &
1030 				     ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
1031 			}
1032 		}
1033 		if ((mask & IFCAP_RXCSUM) != 0) {
1034 			if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
1035 				/* Turn on RX checksumming. */
1036 				ifp->if_capenable |= (IFCAP_RXCSUM |
1037 						      IFCAP_RXCSUM_IPV6);
1038 				WR4(sc, CGEM_NET_CFG,
1039 				    RD4(sc, CGEM_NET_CFG) |
1040 				     CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1041 			} else {
1042 				/* Turn off RX checksumming. */
1043 				ifp->if_capenable &= ~(IFCAP_RXCSUM |
1044 						       IFCAP_RXCSUM_IPV6);
1045 				WR4(sc, CGEM_NET_CFG,
1046 				    RD4(sc, CGEM_NET_CFG) &
1047 				     ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
1048 			}
1049 		}
1050 
1051 		CGEM_UNLOCK(sc);
1052 		break;
1053 	default:
1054 		error = ether_ioctl(ifp, cmd, data);
1055 		break;
1056 	}
1057 
1058 	return (error);
1059 }
1060 
1061 /* MII bus support routines.
1062  */
1063 static void
1064 cgem_child_detached(device_t dev, device_t child)
1065 {
1066 	struct cgem_softc *sc = device_get_softc(dev);
1067 	if (child == sc->miibus)
1068 		sc->miibus = NULL;
1069 }
1070 
1071 static int
1072 cgem_ifmedia_upd(struct ifnet *ifp)
1073 {
1074 	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1075 	struct mii_data *mii;
1076 
1077 	mii = device_get_softc(sc->miibus);
1078 	CGEM_LOCK(sc);
1079 	mii_mediachg(mii);
1080 	CGEM_UNLOCK(sc);
1081 	return (0);
1082 }
1083 
1084 static void
1085 cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1086 {
1087 	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
1088 	struct mii_data *mii;
1089 
1090 	mii = device_get_softc(sc->miibus);
1091 	CGEM_LOCK(sc);
1092 	mii_pollstat(mii);
1093 	ifmr->ifm_active = mii->mii_media_active;
1094 	ifmr->ifm_status = mii->mii_media_status;
1095 	CGEM_UNLOCK(sc);
1096 }
1097 
1098 static int
1099 cgem_miibus_readreg(device_t dev, int phy, int reg)
1100 {
1101 	struct cgem_softc *sc = device_get_softc(dev);
1102 	int tries, val;
1103 
1104 	WR4(sc, CGEM_PHY_MAINT,
1105 	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1106 	    CGEM_PHY_MAINT_OP_READ |
1107 	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1108 	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
1109 
1110 	/* Wait for completion. */
1111 	tries=0;
1112 	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1113 		DELAY(5);
1114 		if (++tries > 200) {
1115 			device_printf(dev, "phy read timeout: %d\n", reg);
1116 			return (-1);
1117 		}
1118 	}
1119 
1120 	val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
1121 
1122 	return (val);
1123 }
1124 
1125 static int
1126 cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
1127 {
1128 	struct cgem_softc *sc = device_get_softc(dev);
1129 	int tries;
1130 
1131 	WR4(sc, CGEM_PHY_MAINT,
1132 	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
1133 	    CGEM_PHY_MAINT_OP_WRITE |
1134 	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
1135 	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
1136 	    (data & CGEM_PHY_MAINT_DATA_MASK));
1137 
1138 	/* Wait for completion. */
1139 	tries = 0;
1140 	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
1141 		DELAY(5);
1142 		if (++tries > 200) {
1143 			device_printf(dev, "phy write timeout: %d\n", reg);
1144 			return (-1);
1145 		}
1146 	}
1147 
1148 	return (0);
1149 }
1150 
1151 
1152 static int
1153 cgem_probe(device_t dev)
1154 {
1155 
1156 	if (!ofw_bus_is_compatible(dev, "cadence,gem"))
1157 		return (ENXIO);
1158 
1159 	device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
1160 	return (0);
1161 }
1162 
1163 static int
1164 cgem_attach(device_t dev)
1165 {
1166 	struct cgem_softc *sc = device_get_softc(dev);
1167 	struct ifnet *ifp = NULL;
1168 	int rid, err;
1169 	u_char eaddr[ETHER_ADDR_LEN];
1170 
1171 	sc->dev = dev;
1172 	CGEM_LOCK_INIT(sc);
1173 
1174 	/* Get memory resource. */
1175 	rid = 0;
1176 	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1177 					     RF_ACTIVE);
1178 	if (sc->mem_res == NULL) {
1179 		device_printf(dev, "could not allocate memory resources.\n");
1180 		return (ENOMEM);
1181 	}
1182 
1183 	/* Get IRQ resource. */
1184 	rid = 0;
1185 	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1186 					     RF_ACTIVE);
1187 	if (sc->irq_res == NULL) {
1188 		device_printf(dev, "could not allocate interrupt resource.\n");
1189 		cgem_detach(dev);
1190 		return (ENOMEM);
1191 	}
1192 
1193 	ifp = sc->ifp = if_alloc(IFT_ETHER);
1194 	if (ifp == NULL) {
1195 		device_printf(dev, "could not allocate ifnet structure\n");
1196 		cgem_detach(dev);
1197 		return (ENOMEM);
1198 	}
1199 
1200 	CGEM_LOCK(sc);
1201 
1202 	/* Reset hardware. */
1203 	cgem_reset(sc);
1204 
1205 	/* Attach phy to mii bus. */
1206 	err = mii_attach(dev, &sc->miibus, ifp,
1207 			 cgem_ifmedia_upd, cgem_ifmedia_sts,
1208 			 BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1209 	if (err) {
1210 		CGEM_UNLOCK(sc);
1211 		device_printf(dev, "attaching PHYs failed\n");
1212 		cgem_detach(dev);
1213 		return (err);
1214 	}
1215 
1216 	/* Set up TX and RX descriptor area. */
1217 	err = cgem_setup_descs(sc);
1218 	if (err) {
1219 		CGEM_UNLOCK(sc);
1220 		device_printf(dev, "could not set up dma mem for descs.\n");
1221 		cgem_detach(dev);
1222 		return (ENOMEM);
1223 	}
1224 
1225 	/* Get a MAC address. */
1226 	cgem_get_mac(sc, eaddr);
1227 
1228 	/* Start ticks. */
1229 	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1230 
1231 	/* Set up ifnet structure. */
1232 	ifp->if_softc = sc;
1233 	if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
1234 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1235 	ifp->if_start = cgem_start;
1236 	ifp->if_ioctl = cgem_ioctl;
1237 	ifp->if_init = cgem_init;
1238 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
1239 	/* XXX: disable hw checksumming for now. */
1240 	ifp->if_hwassist = 0;
1241 	ifp->if_capenable = ifp->if_capabilities &
1242 		~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
1243 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1244 	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1245 	IFQ_SET_READY(&ifp->if_snd);
1246 
1247 	sc->if_old_flags = ifp->if_flags;
1248 	sc->rxbufs = DEFAULT_NUM_RX_BUFS;
1249 
1250 	ether_ifattach(ifp, eaddr);
1251 
1252 	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
1253 			     INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
1254 	if (err) {
1255 		CGEM_UNLOCK(sc);
1256 		device_printf(dev, "could not set interrupt handler.\n");
1257 		ether_ifdetach(ifp);
1258 		cgem_detach(dev);
1259 		return (err);
1260 	}
1261 
1262 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1263 		       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1264 		       OID_AUTO, "rxbufs", CTLFLAG_RW,
1265 		       &sc->rxbufs, 0,
1266 		       "Number receive buffers to provide");
1267 
1268 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1269 		       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1270 		       OID_AUTO, "_rxoverruns", CTLFLAG_RD,
1271 		       &sc->rxoverruns, 0,
1272 		       "Receive ring overrun events");
1273 
1274 	CGEM_UNLOCK(sc);
1275 
1276 	return (0);
1277 }
1278 
1279 static int
1280 cgem_detach(device_t dev)
1281 {
1282 	struct cgem_softc *sc = device_get_softc(dev);
1283 	int i;
1284 
1285 	if (sc == NULL)
1286 		return (ENODEV);
1287 
1288 	if (device_is_attached(dev)) {
1289 		CGEM_LOCK(sc);
1290 		cgem_stop(sc);
1291 		CGEM_UNLOCK(sc);
1292 		callout_drain(&sc->tick_ch);
1293 		sc->ifp->if_flags &= ~IFF_UP;
1294 		ether_ifdetach(sc->ifp);
1295 	}
1296 
1297 	if (sc->miibus != NULL) {
1298 		device_delete_child(dev, sc->miibus);
1299 		sc->miibus = NULL;
1300 	}
1301 
1302 	/* Release resrouces. */
1303 	if (sc->mem_res != NULL) {
1304 		bus_release_resource(dev, SYS_RES_MEMORY,
1305 				     rman_get_rid(sc->mem_res), sc->mem_res);
1306 		sc->mem_res = NULL;
1307 	}
1308 	if (sc->irq_res != NULL) {
1309 		if (sc->intrhand)
1310 			bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1311 		bus_release_resource(dev, SYS_RES_IRQ,
1312 				     rman_get_rid(sc->irq_res), sc->irq_res);
1313 		sc->irq_res = NULL;
1314 	}
1315 
1316 	/* Release DMA resources. */
1317 	if (sc->rxring_dma_map != NULL) {
1318 		bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
1319 				sc->rxring_dma_map);
1320 		sc->rxring_dma_map = NULL;
1321 		for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
1322 			if (sc->rxring_m_dmamap[i] != NULL) {
1323 				bus_dmamap_destroy(sc->mbuf_dma_tag,
1324 						   sc->rxring_m_dmamap[i]);
1325 				sc->rxring_m_dmamap[i] = NULL;
1326 			}
1327 	}
1328 	if (sc->txring_dma_map != NULL) {
1329 		bus_dmamem_free(sc->desc_dma_tag, sc->txring,
1330 				sc->txring_dma_map);
1331 		sc->txring_dma_map = NULL;
1332 		for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
1333 			if (sc->txring_m_dmamap[i] != NULL) {
1334 				bus_dmamap_destroy(sc->mbuf_dma_tag,
1335 						   sc->txring_m_dmamap[i]);
1336 				sc->txring_m_dmamap[i] = NULL;
1337 			}
1338 	}
1339 	if (sc->desc_dma_tag != NULL) {
1340 		bus_dma_tag_destroy(sc->desc_dma_tag);
1341 		sc->desc_dma_tag = NULL;
1342 	}
1343 	if (sc->mbuf_dma_tag != NULL) {
1344 		bus_dma_tag_destroy(sc->mbuf_dma_tag);
1345 		sc->mbuf_dma_tag = NULL;
1346 	}
1347 
1348 	bus_generic_detach(dev);
1349 
1350 	CGEM_LOCK_DESTROY(sc);
1351 
1352 	return (0);
1353 }
1354 
1355 static device_method_t cgem_methods[] = {
1356 	/* Device interface */
1357 	DEVMETHOD(device_probe,		cgem_probe),
1358 	DEVMETHOD(device_attach,	cgem_attach),
1359 	DEVMETHOD(device_detach,	cgem_detach),
1360 
1361 	/* Bus interface */
1362 	DEVMETHOD(bus_child_detached,	cgem_child_detached),
1363 
1364 	/* MII interface */
1365 	DEVMETHOD(miibus_readreg,	cgem_miibus_readreg),
1366 	DEVMETHOD(miibus_writereg,	cgem_miibus_writereg),
1367 
1368 	DEVMETHOD_END
1369 };
1370 
1371 static driver_t cgem_driver = {
1372 	"cgem",
1373 	cgem_methods,
1374 	sizeof(struct cgem_softc),
1375 };
1376 
1377 DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
1378 DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
1379 MODULE_DEPEND(cgem, miibus, 1, 1, 1);
1380 MODULE_DEPEND(cgem, ether, 1, 1, 1);
1381