xref: /freebsd/sys/dev/gem/if_gem.c (revision ba3c1f5972d7b90feb6e6da47905ff2757e0fe57)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2001 Eduardo Horvath.
5  * Copyright (c) 2001-2003 Thomas Moestl
6  * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
38  */
39 
40 #if 0
41 #define	GEM_DEBUG
42 #endif
43 
44 #if 0	/* XXX: In case of emergency, re-enable this. */
45 #define	GEM_RINT_TIMEOUT
46 #endif
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bus.h>
51 #include <sys/callout.h>
52 #include <sys/endian.h>
53 #include <sys/mbuf.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
56 #include <sys/lock.h>
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/rman.h>
62 
63 #include <net/bpf.h>
64 #include <net/ethernet.h>
65 #include <net/if.h>
66 #include <net/if_var.h>
67 #include <net/if_arp.h>
68 #include <net/if_dl.h>
69 #include <net/if_media.h>
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
72 
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/tcp.h>
77 #include <netinet/udp.h>
78 
79 #include <machine/bus.h>
80 
81 #include <dev/mii/mii.h>
82 #include <dev/mii/miivar.h>
83 
84 #include <dev/gem/if_gemreg.h>
85 #include <dev/gem/if_gemvar.h>
86 
87 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
88 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
89 
90 #define	GEM_TRIES	10000
91 
92 /*
93  * The hardware supports basic TCP/UDP checksum offloading.  However,
94  * the hardware doesn't compensate the checksum for UDP datagram which
95  * can yield to 0x0.  As a safe guard, UDP checksum offload is disabled
96  * by default.  It can be reactivated by setting special link option
97  * link0 with ifconfig(8).
98  */
99 #define	GEM_CSUM_FEATURES	(CSUM_TCP)
100 
101 static int	gem_add_rxbuf(struct gem_softc *sc, int idx);
102 static int	gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr,
103 		    uint32_t set);
104 static void	gem_cddma_callback(void *xsc, bus_dma_segment_t *segs,
105 		    int nsegs, int error);
106 static int	gem_disable_rx(struct gem_softc *sc);
107 static int	gem_disable_tx(struct gem_softc *sc);
108 static void	gem_eint(struct gem_softc *sc, u_int status);
109 static void	gem_init(void *xsc);
110 static void	gem_init_locked(struct gem_softc *sc);
111 static void	gem_init_regs(struct gem_softc *sc);
112 static int	gem_ioctl(if_t ifp, u_long cmd, caddr_t data);
113 static int	gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head);
114 static int	gem_meminit(struct gem_softc *sc);
115 static void	gem_mifinit(struct gem_softc *sc);
116 static void	gem_reset(struct gem_softc *sc);
117 static int	gem_reset_rx(struct gem_softc *sc);
118 static void	gem_reset_rxdma(struct gem_softc *sc);
119 static int	gem_reset_tx(struct gem_softc *sc);
120 static u_int	gem_ringsize(u_int sz);
121 static void	gem_rint(struct gem_softc *sc);
122 #ifdef GEM_RINT_TIMEOUT
123 static void	gem_rint_timeout(void *arg);
124 #endif
125 static inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
126 static void	gem_rxdrain(struct gem_softc *sc);
127 static void	gem_setladrf(struct gem_softc *sc);
128 static void	gem_start(if_t ifp);
129 static void	gem_start_locked(if_t ifp);
130 static void	gem_stop(if_t ifp, int disable);
131 static void	gem_tick(void *arg);
132 static void	gem_tint(struct gem_softc *sc);
133 static inline void gem_txkick(struct gem_softc *sc);
134 static int	gem_watchdog(struct gem_softc *sc);
135 
136 DRIVER_MODULE(miibus, gem, miibus_driver, 0, 0);
137 MODULE_DEPEND(gem, miibus, 1, 1, 1);
138 
139 #ifdef GEM_DEBUG
140 #include <sys/ktr.h>
141 #define	KTR_GEM		KTR_SPARE2
142 #endif
143 
144 int
145 gem_attach(struct gem_softc *sc)
146 {
147 	struct gem_txsoft *txs;
148 	if_t ifp;
149 	int error, i, phy;
150 	uint32_t v;
151 
152 	if (bootverbose)
153 		device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags);
154 
155 	/* Set up ifnet structure. */
156 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
157 	if (ifp == NULL)
158 		return (ENOSPC);
159 	sc->sc_csum_features = GEM_CSUM_FEATURES;
160 	if_setsoftc(ifp, sc);
161 	if_initname(ifp, device_get_name(sc->sc_dev),
162 	    device_get_unit(sc->sc_dev));
163 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
164 	if_setstartfn(ifp, gem_start);
165 	if_setioctlfn(ifp, gem_ioctl);
166 	if_setinitfn(ifp, gem_init);
167 	if_setsendqlen(ifp, GEM_TXQUEUELEN);
168 	if_setsendqready(ifp);
169 
170 	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
171 #ifdef GEM_RINT_TIMEOUT
172 	callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
173 #endif
174 
175 	/* Make sure the chip is stopped. */
176 	gem_reset(sc);
177 
178 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
179 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
180 	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
181 	    NULL, &sc->sc_pdmatag);
182 	if (error != 0)
183 		goto fail_ifnet;
184 
185 	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
186 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
187 	    1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
188 	if (error != 0)
189 		goto fail_ptag;
190 
191 	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
192 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
193 	    MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
194 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
195 	if (error != 0)
196 		goto fail_rtag;
197 
198 	error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
199 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
200 	    sizeof(struct gem_control_data), 1,
201 	    sizeof(struct gem_control_data), 0,
202 	    NULL, NULL, &sc->sc_cdmatag);
203 	if (error != 0)
204 		goto fail_ttag;
205 
206 	/*
207 	 * Allocate the control data structures, create and load the
208 	 * DMA map for it.
209 	 */
210 	if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
211 	    (void **)&sc->sc_control_data,
212 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
213 	    &sc->sc_cddmamap)) != 0) {
214 		device_printf(sc->sc_dev,
215 		    "unable to allocate control data, error = %d\n", error);
216 		goto fail_ctag;
217 	}
218 
219 	sc->sc_cddma = 0;
220 	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
221 	    sc->sc_control_data, sizeof(struct gem_control_data),
222 	    gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
223 		device_printf(sc->sc_dev,
224 		    "unable to load control data DMA map, error = %d\n",
225 		    error);
226 		goto fail_cmem;
227 	}
228 
229 	/*
230 	 * Initialize the transmit job descriptors.
231 	 */
232 	STAILQ_INIT(&sc->sc_txfreeq);
233 	STAILQ_INIT(&sc->sc_txdirtyq);
234 
235 	/*
236 	 * Create the transmit buffer DMA maps.
237 	 */
238 	error = ENOMEM;
239 	for (i = 0; i < GEM_TXQUEUELEN; i++) {
240 		txs = &sc->sc_txsoft[i];
241 		txs->txs_mbuf = NULL;
242 		txs->txs_ndescs = 0;
243 		if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
244 		    &txs->txs_dmamap)) != 0) {
245 			device_printf(sc->sc_dev,
246 			    "unable to create TX DMA map %d, error = %d\n",
247 			    i, error);
248 			goto fail_txd;
249 		}
250 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
251 	}
252 
253 	/*
254 	 * Create the receive buffer DMA maps.
255 	 */
256 	for (i = 0; i < GEM_NRXDESC; i++) {
257 		if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
258 		    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
259 			device_printf(sc->sc_dev,
260 			    "unable to create RX DMA map %d, error = %d\n",
261 			    i, error);
262 			goto fail_rxd;
263 		}
264 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
265 	}
266 
267 	/* Bypass probing PHYs if we already know for sure to use a SERDES. */
268 	if ((sc->sc_flags & GEM_SERDES) != 0)
269 		goto serdes;
270 
271 	GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII);
272 	GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
273 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
274 
275 	gem_mifinit(sc);
276 
277 	/*
278 	 * Look for an external PHY.
279 	 */
280 	error = ENXIO;
281 	v = GEM_READ_4(sc, GEM_MIF_CONFIG);
282 	if ((v & GEM_MIF_CONFIG_MDI1) != 0) {
283 		v |= GEM_MIF_CONFIG_PHY_SEL;
284 		GEM_WRITE_4(sc, GEM_MIF_CONFIG, v);
285 		GEM_BARRIER(sc, GEM_MIF_CONFIG, 4,
286 		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
287 		error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
288 		    gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK,
289 		    MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE);
290 	}
291 
292 	/*
293 	 * Fall back on an internal PHY if no external PHY was found.
294 	 * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be
295 	 * trusted when the firmware has powered down the chip.
296 	 */
297 	if (error != 0 &&
298 	    ((v & GEM_MIF_CONFIG_MDI0) != 0 || GEM_IS_APPLE(sc))) {
299 		v &= ~GEM_MIF_CONFIG_PHY_SEL;
300 		GEM_WRITE_4(sc, GEM_MIF_CONFIG, v);
301 		GEM_BARRIER(sc, GEM_MIF_CONFIG, 4,
302 		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
303 		switch (sc->sc_variant) {
304 		case GEM_APPLE_K2_GMAC:
305 			phy = GEM_PHYAD_INTERNAL;
306 			break;
307 		case GEM_APPLE_GMAC:
308 			phy = GEM_PHYAD_EXTERNAL;
309 			break;
310 		default:
311 			phy = MII_PHY_ANY;
312 			break;
313 		}
314 		error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
315 		    gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy,
316 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
317 	}
318 
319 	/*
320 	 * Try the external PCS SERDES if we didn't find any PHYs.
321 	 */
322 	if (error != 0 && sc->sc_variant == GEM_SUN_GEM) {
323  serdes:
324 		GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
325 		    GEM_MII_DATAPATH_SERDES);
326 		GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
327 		    BUS_SPACE_BARRIER_WRITE);
328 		GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
329 		    GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
330 		GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
331 		    BUS_SPACE_BARRIER_WRITE);
332 		GEM_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
333 		GEM_BARRIER(sc, GEM_MII_CONFIG, 4,
334 		    BUS_SPACE_BARRIER_WRITE);
335 		sc->sc_flags |= GEM_SERDES;
336 		error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
337 		    gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK,
338 		    GEM_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE);
339 	}
340 	if (error != 0) {
341 		device_printf(sc->sc_dev, "attaching PHYs failed\n");
342 		goto fail_rxd;
343 	}
344 	sc->sc_mii = device_get_softc(sc->sc_miibus);
345 
346 	/*
347 	 * From this point forward, the attachment cannot fail.  A failure
348 	 * before this point releases all resources that may have been
349 	 * allocated.
350 	 */
351 
352 	/* Get RX FIFO size. */
353 	sc->sc_rxfifosize = 64 *
354 	    GEM_READ_4(sc, GEM_RX_FIFO_SIZE);
355 
356 	/* Get TX FIFO size. */
357 	v = GEM_READ_4(sc, GEM_TX_FIFO_SIZE);
358 	device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
359 	    sc->sc_rxfifosize / 1024, v / 16);
360 
361 	/* Attach the interface. */
362 	ether_ifattach(ifp, sc->sc_enaddr);
363 
364 	/*
365 	 * Tell the upper layer(s) we support long frames/checksum offloads.
366 	 */
367 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
368 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0);
369 	if_sethwassistbits(ifp, sc->sc_csum_features, 0);
370 	if_setcapenablebit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0);
371 
372 	return (0);
373 
374 	/*
375 	 * Free any resources we've allocated during the failed attach
376 	 * attempt.  Do this in reverse order and fall through.
377 	 */
378  fail_rxd:
379 	for (i = 0; i < GEM_NRXDESC; i++)
380 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
381 			bus_dmamap_destroy(sc->sc_rdmatag,
382 			    sc->sc_rxsoft[i].rxs_dmamap);
383  fail_txd:
384 	for (i = 0; i < GEM_TXQUEUELEN; i++)
385 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
386 			bus_dmamap_destroy(sc->sc_tdmatag,
387 			    sc->sc_txsoft[i].txs_dmamap);
388 	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
389  fail_cmem:
390 	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
391 	    sc->sc_cddmamap);
392  fail_ctag:
393 	bus_dma_tag_destroy(sc->sc_cdmatag);
394  fail_ttag:
395 	bus_dma_tag_destroy(sc->sc_tdmatag);
396  fail_rtag:
397 	bus_dma_tag_destroy(sc->sc_rdmatag);
398  fail_ptag:
399 	bus_dma_tag_destroy(sc->sc_pdmatag);
400  fail_ifnet:
401 	if_free(ifp);
402 	return (error);
403 }
404 
405 void
406 gem_detach(struct gem_softc *sc)
407 {
408 	if_t ifp = sc->sc_ifp;
409 	int i;
410 
411 	ether_ifdetach(ifp);
412 	GEM_LOCK(sc);
413 	gem_stop(ifp, 1);
414 	GEM_UNLOCK(sc);
415 	callout_drain(&sc->sc_tick_ch);
416 #ifdef GEM_RINT_TIMEOUT
417 	callout_drain(&sc->sc_rx_ch);
418 #endif
419 	if_free(ifp);
420 	device_delete_child(sc->sc_dev, sc->sc_miibus);
421 
422 	for (i = 0; i < GEM_NRXDESC; i++)
423 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
424 			bus_dmamap_destroy(sc->sc_rdmatag,
425 			    sc->sc_rxsoft[i].rxs_dmamap);
426 	for (i = 0; i < GEM_TXQUEUELEN; i++)
427 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
428 			bus_dmamap_destroy(sc->sc_tdmatag,
429 			    sc->sc_txsoft[i].txs_dmamap);
430 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
431 	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
432 	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
433 	    sc->sc_cddmamap);
434 	bus_dma_tag_destroy(sc->sc_cdmatag);
435 	bus_dma_tag_destroy(sc->sc_tdmatag);
436 	bus_dma_tag_destroy(sc->sc_rdmatag);
437 	bus_dma_tag_destroy(sc->sc_pdmatag);
438 }
439 
440 void
441 gem_suspend(struct gem_softc *sc)
442 {
443 	if_t ifp = sc->sc_ifp;
444 
445 	GEM_LOCK(sc);
446 	gem_stop(ifp, 0);
447 	GEM_UNLOCK(sc);
448 }
449 
450 void
451 gem_resume(struct gem_softc *sc)
452 {
453 	if_t ifp = sc->sc_ifp;
454 
455 	GEM_LOCK(sc);
456 	/*
457 	 * On resume all registers have to be initialized again like
458 	 * after power-on.
459 	 */
460 	sc->sc_flags &= ~GEM_INITED;
461 	if (if_getflags(ifp) & IFF_UP)
462 		gem_init_locked(sc);
463 	GEM_UNLOCK(sc);
464 }
465 
466 static inline void
467 gem_rxcksum(struct mbuf *m, uint64_t flags)
468 {
469 	struct ether_header *eh;
470 	struct ip *ip;
471 	struct udphdr *uh;
472 	uint16_t *opts;
473 	int32_t hlen, len, pktlen;
474 	uint32_t temp32;
475 	uint16_t cksum;
476 
477 	pktlen = m->m_pkthdr.len;
478 	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
479 		return;
480 	eh = mtod(m, struct ether_header *);
481 	if (eh->ether_type != htons(ETHERTYPE_IP))
482 		return;
483 	ip = (struct ip *)(eh + 1);
484 	if (ip->ip_v != IPVERSION)
485 		return;
486 
487 	hlen = ip->ip_hl << 2;
488 	pktlen -= sizeof(struct ether_header);
489 	if (hlen < sizeof(struct ip))
490 		return;
491 	if (ntohs(ip->ip_len) < hlen)
492 		return;
493 	if (ntohs(ip->ip_len) != pktlen)
494 		return;
495 	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
496 		return;	/* Cannot handle fragmented packet. */
497 
498 	switch (ip->ip_p) {
499 	case IPPROTO_TCP:
500 		if (pktlen < (hlen + sizeof(struct tcphdr)))
501 			return;
502 		break;
503 	case IPPROTO_UDP:
504 		if (pktlen < (hlen + sizeof(struct udphdr)))
505 			return;
506 		uh = (struct udphdr *)((uint8_t *)ip + hlen);
507 		if (uh->uh_sum == 0)
508 			return; /* no checksum */
509 		break;
510 	default:
511 		return;
512 	}
513 
514 	cksum = ~(flags & GEM_RD_CHECKSUM);
515 	/* checksum fixup for IP options */
516 	len = hlen - sizeof(struct ip);
517 	if (len > 0) {
518 		opts = (uint16_t *)(ip + 1);
519 		for (; len > 0; len -= sizeof(uint16_t), opts++) {
520 			temp32 = cksum - *opts;
521 			temp32 = (temp32 >> 16) + (temp32 & 65535);
522 			cksum = temp32 & 65535;
523 		}
524 	}
525 	m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
526 	m->m_pkthdr.csum_data = cksum;
527 }
528 
529 static void
530 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
531 {
532 	struct gem_softc *sc = xsc;
533 
534 	if (error != 0)
535 		return;
536 	if (nsegs != 1)
537 		panic("%s: bad control buffer segment count", __func__);
538 	sc->sc_cddma = segs[0].ds_addr;
539 }
540 
541 static void
542 gem_tick(void *arg)
543 {
544 	struct gem_softc *sc = arg;
545 	if_t ifp = sc->sc_ifp;
546 	uint32_t v;
547 
548 	GEM_LOCK_ASSERT(sc, MA_OWNED);
549 
550 	/*
551 	 * Unload collision and error counters.
552 	 */
553 	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
554 	    GEM_READ_4(sc, GEM_MAC_NORM_COLL_CNT) +
555 	    GEM_READ_4(sc, GEM_MAC_FIRST_COLL_CNT));
556 	v = GEM_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) +
557 	    GEM_READ_4(sc, GEM_MAC_LATE_COLL_CNT);
558 	if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v);
559 	if_inc_counter(ifp, IFCOUNTER_OERRORS, v);
560 	if_inc_counter(ifp, IFCOUNTER_IERRORS,
561 	    GEM_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) +
562 	    GEM_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) +
563 	    GEM_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) +
564 	    GEM_READ_4(sc, GEM_MAC_RX_CODE_VIOL));
565 
566 	/*
567 	 * Then clear the hardware counters.
568 	 */
569 	GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
570 	GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
571 	GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
572 	GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
573 	GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
574 	GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
575 	GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
576 	GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
577 
578 	mii_tick(sc->sc_mii);
579 
580 	if (gem_watchdog(sc) == EJUSTRETURN)
581 		return;
582 
583 	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
584 }
585 
586 static int
587 gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set)
588 {
589 	int i;
590 	uint32_t reg;
591 
592 	for (i = GEM_TRIES; i--; DELAY(100)) {
593 		reg = GEM_READ_4(sc, r);
594 		if ((reg & clr) == 0 && (reg & set) == set)
595 			return (1);
596 	}
597 	return (0);
598 }
599 
600 static void
601 gem_reset(struct gem_softc *sc)
602 {
603 
604 #ifdef GEM_DEBUG
605 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
606 #endif
607 	gem_reset_rx(sc);
608 	gem_reset_tx(sc);
609 
610 	/* Do a full reset. */
611 	GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
612 	GEM_BARRIER(sc, GEM_RESET, 4,
613 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
614 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
615 		device_printf(sc->sc_dev, "cannot reset device\n");
616 }
617 
618 static void
619 gem_rxdrain(struct gem_softc *sc)
620 {
621 	struct gem_rxsoft *rxs;
622 	int i;
623 
624 	for (i = 0; i < GEM_NRXDESC; i++) {
625 		rxs = &sc->sc_rxsoft[i];
626 		if (rxs->rxs_mbuf != NULL) {
627 			bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
628 			    BUS_DMASYNC_POSTREAD);
629 			bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
630 			m_freem(rxs->rxs_mbuf);
631 			rxs->rxs_mbuf = NULL;
632 		}
633 	}
634 }
635 
636 static void
637 gem_stop(if_t ifp, int disable)
638 {
639 	struct gem_softc *sc = if_getsoftc(ifp);
640 	struct gem_txsoft *txs;
641 
642 #ifdef GEM_DEBUG
643 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
644 #endif
645 
646 	callout_stop(&sc->sc_tick_ch);
647 #ifdef GEM_RINT_TIMEOUT
648 	callout_stop(&sc->sc_rx_ch);
649 #endif
650 
651 	gem_reset_tx(sc);
652 	gem_reset_rx(sc);
653 
654 	/*
655 	 * Release any queued transmit buffers.
656 	 */
657 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
658 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
659 		if (txs->txs_ndescs != 0) {
660 			bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
661 			    BUS_DMASYNC_POSTWRITE);
662 			bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
663 			if (txs->txs_mbuf != NULL) {
664 				m_freem(txs->txs_mbuf);
665 				txs->txs_mbuf = NULL;
666 			}
667 		}
668 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
669 	}
670 
671 	if (disable)
672 		gem_rxdrain(sc);
673 
674 	/*
675 	 * Mark the interface down and cancel the watchdog timer.
676 	 */
677 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
678 	sc->sc_flags &= ~GEM_LINK;
679 	sc->sc_wdog_timer = 0;
680 }
681 
682 static int
683 gem_reset_rx(struct gem_softc *sc)
684 {
685 
686 	/*
687 	 * Resetting while DMA is in progress can cause a bus hang, so we
688 	 * disable DMA first.
689 	 */
690 	(void)gem_disable_rx(sc);
691 	GEM_WRITE_4(sc, GEM_RX_CONFIG, 0);
692 	GEM_BARRIER(sc, GEM_RX_CONFIG, 4,
693 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
694 	if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0))
695 		device_printf(sc->sc_dev, "cannot disable RX DMA\n");
696 
697 	/* Wait 5ms extra. */
698 	DELAY(5000);
699 
700 	/* Reset the ERX. */
701 	GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX);
702 	GEM_BARRIER(sc, GEM_RESET, 4,
703 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
704 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX, 0)) {
705 		device_printf(sc->sc_dev, "cannot reset receiver\n");
706 		return (1);
707 	}
708 
709 	/* Finally, reset RX MAC. */
710 	GEM_WRITE_4(sc, GEM_MAC_RXRESET, 1);
711 	GEM_BARRIER(sc, GEM_MAC_RXRESET, 4,
712 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
713 	if (!gem_bitwait(sc, GEM_MAC_RXRESET, 1, 0)) {
714 		device_printf(sc->sc_dev, "cannot reset RX MAC\n");
715 		return (1);
716 	}
717 
718 	return (0);
719 }
720 
721 /*
722  * Reset the receiver DMA engine.
723  *
724  * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
725  * etc in order to reset the receiver DMA engine only and not do a full
726  * reset which amongst others also downs the link and clears the FIFOs.
727  */
728 static void
729 gem_reset_rxdma(struct gem_softc *sc)
730 {
731 	int i;
732 
733 	if (gem_reset_rx(sc) != 0) {
734 		if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING);
735 		return (gem_init_locked(sc));
736 	}
737 	for (i = 0; i < GEM_NRXDESC; i++)
738 		if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
739 			GEM_UPDATE_RXDESC(sc, i);
740 	sc->sc_rxptr = 0;
741 	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
742 
743 	/* NOTE: we use only 32-bit DMA addresses here. */
744 	GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
745 	GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
746 	GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
747 	GEM_WRITE_4(sc, GEM_RX_CONFIG,
748 	    gem_ringsize(GEM_NRXDESC /* XXX */) |
749 	    ((ETHER_HDR_LEN + sizeof(struct ip)) <<
750 	    GEM_RX_CONFIG_CXM_START_SHFT) |
751 	    (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
752 	    (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT));
753 	GEM_WRITE_4(sc, GEM_RX_BLANKING,
754 	    ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
755 	    GEM_RX_BLANKING_TIME_SHIFT) | 6);
756 	GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
757 	    (3 * sc->sc_rxfifosize / 256) |
758 	    ((sc->sc_rxfifosize / 256) << 12));
759 	GEM_WRITE_4(sc, GEM_RX_CONFIG,
760 	    GEM_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN);
761 	GEM_WRITE_4(sc, GEM_MAC_RX_MASK,
762 	    GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
763 	/*
764 	 * Clear the RX filter and reprogram it.  This will also set the
765 	 * current RX MAC configuration and enable it.
766 	 */
767 	gem_setladrf(sc);
768 }
769 
770 static int
771 gem_reset_tx(struct gem_softc *sc)
772 {
773 
774 	/*
775 	 * Resetting while DMA is in progress can cause a bus hang, so we
776 	 * disable DMA first.
777 	 */
778 	(void)gem_disable_tx(sc);
779 	GEM_WRITE_4(sc, GEM_TX_CONFIG, 0);
780 	GEM_BARRIER(sc, GEM_TX_CONFIG, 4,
781 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
782 	if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0))
783 		device_printf(sc->sc_dev, "cannot disable TX DMA\n");
784 
785 	/* Wait 5ms extra. */
786 	DELAY(5000);
787 
788 	/* Finally, reset the ETX. */
789 	GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_TX);
790 	GEM_BARRIER(sc, GEM_RESET, 4,
791 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
792 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) {
793 		device_printf(sc->sc_dev, "cannot reset transmitter\n");
794 		return (1);
795 	}
796 	return (0);
797 }
798 
799 static int
800 gem_disable_rx(struct gem_softc *sc)
801 {
802 
803 	GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG,
804 	    GEM_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE);
805 	GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
806 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
807 	if (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
808 		return (1);
809 	device_printf(sc->sc_dev, "cannot disable RX MAC\n");
810 	return (0);
811 }
812 
813 static int
814 gem_disable_tx(struct gem_softc *sc)
815 {
816 
817 	GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG,
818 	    GEM_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE);
819 	GEM_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
820 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
821 	if (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
822 		return (1);
823 	device_printf(sc->sc_dev, "cannot disable TX MAC\n");
824 	return (0);
825 }
826 
827 static int
828 gem_meminit(struct gem_softc *sc)
829 {
830 	struct gem_rxsoft *rxs;
831 	int error, i;
832 
833 	GEM_LOCK_ASSERT(sc, MA_OWNED);
834 
835 	/*
836 	 * Initialize the transmit descriptor ring.
837 	 */
838 	for (i = 0; i < GEM_NTXDESC; i++) {
839 		sc->sc_txdescs[i].gd_flags = 0;
840 		sc->sc_txdescs[i].gd_addr = 0;
841 	}
842 	sc->sc_txfree = GEM_MAXTXFREE;
843 	sc->sc_txnext = 0;
844 	sc->sc_txwin = 0;
845 
846 	/*
847 	 * Initialize the receive descriptor and receive job
848 	 * descriptor rings.
849 	 */
850 	for (i = 0; i < GEM_NRXDESC; i++) {
851 		rxs = &sc->sc_rxsoft[i];
852 		if (rxs->rxs_mbuf == NULL) {
853 			if ((error = gem_add_rxbuf(sc, i)) != 0) {
854 				device_printf(sc->sc_dev,
855 				    "unable to allocate or map RX buffer %d, "
856 				    "error = %d\n", i, error);
857 				/*
858 				 * XXX we should attempt to run with fewer
859 				 * receive buffers instead of just failing.
860 				 */
861 				gem_rxdrain(sc);
862 				return (1);
863 			}
864 		} else
865 			GEM_INIT_RXDESC(sc, i);
866 	}
867 	sc->sc_rxptr = 0;
868 
869 	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
870 
871 	return (0);
872 }
873 
874 static u_int
875 gem_ringsize(u_int sz)
876 {
877 
878 	switch (sz) {
879 	case 32:
880 		return (GEM_RING_SZ_32);
881 	case 64:
882 		return (GEM_RING_SZ_64);
883 	case 128:
884 		return (GEM_RING_SZ_128);
885 	case 256:
886 		return (GEM_RING_SZ_256);
887 	case 512:
888 		return (GEM_RING_SZ_512);
889 	case 1024:
890 		return (GEM_RING_SZ_1024);
891 	case 2048:
892 		return (GEM_RING_SZ_2048);
893 	case 4096:
894 		return (GEM_RING_SZ_4096);
895 	case 8192:
896 		return (GEM_RING_SZ_8192);
897 	default:
898 		printf("%s: invalid ring size %d\n", __func__, sz);
899 		return (GEM_RING_SZ_32);
900 	}
901 }
902 
903 static void
904 gem_init(void *xsc)
905 {
906 	struct gem_softc *sc = xsc;
907 
908 	GEM_LOCK(sc);
909 	gem_init_locked(sc);
910 	GEM_UNLOCK(sc);
911 }
912 
913 /*
914  * Initialization of interface; set up initialization block
915  * and transmit/receive descriptor rings.
916  */
917 static void
918 gem_init_locked(struct gem_softc *sc)
919 {
920 	if_t ifp = sc->sc_ifp;
921 	uint32_t v;
922 
923 	GEM_LOCK_ASSERT(sc, MA_OWNED);
924 
925 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
926 		return;
927 
928 #ifdef GEM_DEBUG
929 	CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
930 	    __func__);
931 #endif
932 	/*
933 	 * Initialization sequence.  The numbered steps below correspond
934 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
935 	 * Channel Engine manual (part of the PCIO manual).
936 	 * See also the STP2002-STQ document from Sun Microsystems.
937 	 */
938 
939 	/* step 1 & 2.  Reset the Ethernet Channel. */
940 	gem_stop(ifp, 0);
941 	gem_reset(sc);
942 #ifdef GEM_DEBUG
943 	CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
944 	    __func__);
945 #endif
946 
947 	if ((sc->sc_flags & GEM_SERDES) == 0)
948 		/* Re-initialize the MIF. */
949 		gem_mifinit(sc);
950 
951 	/* step 3.  Setup data structures in host memory. */
952 	if (gem_meminit(sc) != 0)
953 		return;
954 
955 	/* step 4.  TX MAC registers & counters */
956 	gem_init_regs(sc);
957 
958 	/* step 5.  RX MAC registers & counters */
959 
960 	/* step 6 & 7.  Program Descriptor Ring Base Addresses. */
961 	/* NOTE: we use only 32-bit DMA addresses here. */
962 	GEM_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0);
963 	GEM_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
964 
965 	GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
966 	GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
967 #ifdef GEM_DEBUG
968 	CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx",
969 	    GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
970 #endif
971 
972 	/* step 8.  Global Configuration & Interrupt Mask */
973 
974 	/*
975 	 * Set the internal arbitration to "infinite" bursts of the
976 	 * maximum length of 31 * 64 bytes so DMA transfers aren't
977 	 * split up in cache line size chunks.  This greatly improves
978 	 * RX performance.
979 	 * Enable silicon bug workarounds for the Apple variants.
980 	 */
981 	GEM_WRITE_4(sc, GEM_CONFIG,
982 	    GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
983 	    GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ?
984 	    GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
985 
986 	GEM_WRITE_4(sc, GEM_INTMASK,
987 	    ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
988 	    GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
989 	    GEM_INTR_BERR
990 #ifdef GEM_DEBUG
991 	    | GEM_INTR_PCS | GEM_INTR_MIF
992 #endif
993 	    ));
994 	GEM_WRITE_4(sc, GEM_MAC_RX_MASK,
995 	    GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
996 	GEM_WRITE_4(sc, GEM_MAC_TX_MASK,
997 	    GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
998 	    GEM_MAC_TX_PEAK_EXP);
999 #ifdef GEM_DEBUG
1000 	GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
1001 	    ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
1002 #else
1003 	GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
1004 	    GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
1005 #endif
1006 
1007 	/* step 9.  ETX Configuration: use mostly default values. */
1008 
1009 	/* Enable DMA. */
1010 	v = gem_ringsize(GEM_NTXDESC);
1011 	/* Set TX FIFO threshold and enable DMA. */
1012 	v |= (0x4ff << 10) & GEM_TX_CONFIG_TXFIFO_TH;
1013 	GEM_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
1014 
1015 	/* step 10.  ERX Configuration */
1016 
1017 	/* Encode Receive Descriptor ring size. */
1018 	v = gem_ringsize(GEM_NRXDESC /* XXX */);
1019 	/* RX TCP/UDP checksum offset */
1020 	v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
1021 	    GEM_RX_CONFIG_CXM_START_SHFT);
1022 	/* Set RX FIFO threshold, set first byte offset and enable DMA. */
1023 	GEM_WRITE_4(sc, GEM_RX_CONFIG,
1024 	    v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
1025 	    (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) |
1026 	    GEM_RX_CONFIG_RXDMA_EN);
1027 
1028 	GEM_WRITE_4(sc, GEM_RX_BLANKING,
1029 	    ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
1030 	    GEM_RX_BLANKING_TIME_SHIFT) | 6);
1031 
1032 	/*
1033 	 * The following value is for an OFF Threshold of about 3/4 full
1034 	 * and an ON Threshold of 1/4 full.
1035 	 */
1036 	GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
1037 	    (3 * sc->sc_rxfifosize / 256) |
1038 	    ((sc->sc_rxfifosize / 256) << 12));
1039 
1040 	/* step 11.  Configure Media. */
1041 
1042 	/* step 12.  RX_MAC Configuration Register */
1043 	v = GEM_READ_4(sc, GEM_MAC_RX_CONFIG);
1044 	v &= ~GEM_MAC_RX_ENABLE;
1045 	v |= GEM_MAC_RX_STRIP_CRC;
1046 	sc->sc_mac_rxcfg = v;
1047 	/*
1048 	 * Clear the RX filter and reprogram it.  This will also set the
1049 	 * current RX MAC configuration and enable it.
1050 	 */
1051 	gem_setladrf(sc);
1052 
1053 	/* step 13.  TX_MAC Configuration Register */
1054 	v = GEM_READ_4(sc, GEM_MAC_TX_CONFIG);
1055 	v |= GEM_MAC_TX_ENABLE;
1056 	(void)gem_disable_tx(sc);
1057 	GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, v);
1058 
1059 	/* step 14.  Issue Transmit Pending command. */
1060 
1061 	/* step 15.  Give the receiver a swift kick. */
1062 	GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
1063 
1064 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1065 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1066 
1067 	mii_mediachg(sc->sc_mii);
1068 
1069 	/* Start the one second timer. */
1070 	sc->sc_wdog_timer = 0;
1071 	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1072 }
1073 
1074 static int
1075 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
1076 {
1077 	bus_dma_segment_t txsegs[GEM_NTXSEGS];
1078 	struct gem_txsoft *txs;
1079 	struct ip *ip;
1080 	struct mbuf *m;
1081 	uint64_t cflags, flags;
1082 	int error, nexttx, nsegs, offset, seg;
1083 
1084 	GEM_LOCK_ASSERT(sc, MA_OWNED);
1085 
1086 	/* Get a work queue entry. */
1087 	if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1088 		/* Ran out of descriptors. */
1089 		return (ENOBUFS);
1090 	}
1091 
1092 	cflags = 0;
1093 	if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
1094 		if (M_WRITABLE(*m_head) == 0) {
1095 			m = m_dup(*m_head, M_NOWAIT);
1096 			m_freem(*m_head);
1097 			*m_head = m;
1098 			if (m == NULL)
1099 				return (ENOBUFS);
1100 		}
1101 		offset = sizeof(struct ether_header);
1102 		m = m_pullup(*m_head, offset + sizeof(struct ip));
1103 		if (m == NULL) {
1104 			*m_head = NULL;
1105 			return (ENOBUFS);
1106 		}
1107 		ip = (struct ip *)(mtod(m, caddr_t) + offset);
1108 		offset += (ip->ip_hl << 2);
1109 		cflags = offset << GEM_TD_CXSUM_STARTSHFT |
1110 		    ((offset + m->m_pkthdr.csum_data) <<
1111 		    GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE;
1112 		*m_head = m;
1113 	}
1114 
1115 	error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1116 	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1117 	if (error == EFBIG) {
1118 		m = m_collapse(*m_head, M_NOWAIT, GEM_NTXSEGS);
1119 		if (m == NULL) {
1120 			m_freem(*m_head);
1121 			*m_head = NULL;
1122 			return (ENOBUFS);
1123 		}
1124 		*m_head = m;
1125 		error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1126 		    txs->txs_dmamap, *m_head, txsegs, &nsegs,
1127 		    BUS_DMA_NOWAIT);
1128 		if (error != 0) {
1129 			m_freem(*m_head);
1130 			*m_head = NULL;
1131 			return (error);
1132 		}
1133 	} else if (error != 0)
1134 		return (error);
1135 	/* If nsegs is wrong then the stack is corrupt. */
1136 	KASSERT(nsegs <= GEM_NTXSEGS,
1137 	    ("%s: too many DMA segments (%d)", __func__, nsegs));
1138 	if (nsegs == 0) {
1139 		m_freem(*m_head);
1140 		*m_head = NULL;
1141 		return (EIO);
1142 	}
1143 
1144 	/*
1145 	 * Ensure we have enough descriptors free to describe
1146 	 * the packet.  Note, we always reserve one descriptor
1147 	 * at the end of the ring as a termination point, in
1148 	 * order to prevent wrap-around.
1149 	 */
1150 	if (nsegs > sc->sc_txfree - 1) {
1151 		txs->txs_ndescs = 0;
1152 		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1153 		return (ENOBUFS);
1154 	}
1155 
1156 	txs->txs_ndescs = nsegs;
1157 	txs->txs_firstdesc = sc->sc_txnext;
1158 	nexttx = txs->txs_firstdesc;
1159 	for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1160 #ifdef GEM_DEBUG
1161 		CTR6(KTR_GEM,
1162 		    "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
1163 		    __func__, seg, nexttx, txsegs[seg].ds_len,
1164 		    txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr));
1165 #endif
1166 		sc->sc_txdescs[nexttx].gd_addr = htole64(txsegs[seg].ds_addr);
1167 		KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1168 		    ("%s: segment size too large!", __func__));
1169 		flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1170 		sc->sc_txdescs[nexttx].gd_flags = htole64(flags | cflags);
1171 		txs->txs_lastdesc = nexttx;
1172 	}
1173 
1174 	/* Set EOP on the last descriptor. */
1175 #ifdef GEM_DEBUG
1176 	CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d",
1177 	    __func__, seg, nexttx);
1178 #endif
1179 	sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1180 	    htole64(GEM_TD_END_OF_PACKET);
1181 
1182 	/* Lastly set SOP on the first descriptor. */
1183 #ifdef GEM_DEBUG
1184 	CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d",
1185 	    __func__, seg, nexttx);
1186 #endif
1187 	if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1188 		sc->sc_txwin = 0;
1189 		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1190 		    htole64(GEM_TD_INTERRUPT_ME | GEM_TD_START_OF_PACKET);
1191 	} else
1192 		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1193 		    htole64(GEM_TD_START_OF_PACKET);
1194 
1195 	/* Sync the DMA map. */
1196 	bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1197 	    BUS_DMASYNC_PREWRITE);
1198 
1199 #ifdef GEM_DEBUG
1200 	CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1201 	    __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1202 	    txs->txs_ndescs);
1203 #endif
1204 	STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1205 	STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1206 	txs->txs_mbuf = *m_head;
1207 
1208 	sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1209 	sc->sc_txfree -= txs->txs_ndescs;
1210 
1211 	return (0);
1212 }
1213 
1214 static void
1215 gem_init_regs(struct gem_softc *sc)
1216 {
1217 	const u_char *laddr = if_getlladdr(sc->sc_ifp);
1218 
1219 	GEM_LOCK_ASSERT(sc, MA_OWNED);
1220 
1221 	/* These registers are not cleared on reset. */
1222 	if ((sc->sc_flags & GEM_INITED) == 0) {
1223 		/* magic values */
1224 		GEM_WRITE_4(sc, GEM_MAC_IPG0, 0);
1225 		GEM_WRITE_4(sc, GEM_MAC_IPG1, 8);
1226 		GEM_WRITE_4(sc, GEM_MAC_IPG2, 4);
1227 
1228 		/* min frame length */
1229 		GEM_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1230 		/* max frame length and max burst size */
1231 		GEM_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME,
1232 		    (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
1233 
1234 		/* more magic values */
1235 		GEM_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7);
1236 		GEM_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4);
1237 		GEM_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10);
1238 		GEM_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8808);
1239 
1240 		/* random number seed */
1241 		GEM_WRITE_4(sc, GEM_MAC_RANDOM_SEED,
1242 		    ((laddr[5] << 8) | laddr[4]) & 0x3ff);
1243 
1244 		/* secondary MAC address: 0:0:0:0:0:0 */
1245 		GEM_WRITE_4(sc, GEM_MAC_ADDR3, 0);
1246 		GEM_WRITE_4(sc, GEM_MAC_ADDR4, 0);
1247 		GEM_WRITE_4(sc, GEM_MAC_ADDR5, 0);
1248 
1249 		/* MAC control address: 01:80:c2:00:00:01 */
1250 		GEM_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001);
1251 		GEM_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200);
1252 		GEM_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180);
1253 
1254 		/* MAC filter address: 0:0:0:0:0:0 */
1255 		GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0);
1256 		GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0);
1257 		GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0);
1258 		GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0);
1259 		GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0);
1260 
1261 		sc->sc_flags |= GEM_INITED;
1262 	}
1263 
1264 	/* Counters need to be zeroed. */
1265 	GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
1266 	GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
1267 	GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
1268 	GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
1269 	GEM_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0);
1270 	GEM_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0);
1271 	GEM_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0);
1272 	GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
1273 	GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
1274 	GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
1275 	GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
1276 
1277 	/* Set XOFF PAUSE time. */
1278 	GEM_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1279 
1280 	/* Set the station address. */
1281 	GEM_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
1282 	GEM_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
1283 	GEM_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
1284 
1285 	/* Enable MII outputs. */
1286 	GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA);
1287 }
1288 
1289 static void
1290 gem_start(if_t ifp)
1291 {
1292 	struct gem_softc *sc = if_getsoftc(ifp);
1293 
1294 	GEM_LOCK(sc);
1295 	gem_start_locked(ifp);
1296 	GEM_UNLOCK(sc);
1297 }
1298 
1299 static inline void
1300 gem_txkick(struct gem_softc *sc)
1301 {
1302 
1303 	/*
1304 	 * Update the TX kick register.  This register has to point to the
1305 	 * descriptor after the last valid one and for optimum performance
1306 	 * should be incremented in multiples of 4 (the DMA engine fetches/
1307 	 * updates descriptors in batches of 4).
1308 	 */
1309 #ifdef GEM_DEBUG
1310 	CTR3(KTR_GEM, "%s: %s: kicking TX %d",
1311 	    device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1312 #endif
1313 	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1314 	GEM_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
1315 }
1316 
1317 static void
1318 gem_start_locked(if_t ifp)
1319 {
1320 	struct gem_softc *sc = if_getsoftc(ifp);
1321 	struct mbuf *m;
1322 	int kicked, ntx;
1323 
1324 	GEM_LOCK_ASSERT(sc, MA_OWNED);
1325 
1326 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1327 	    IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
1328 		return;
1329 
1330 #ifdef GEM_DEBUG
1331 	CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1332 	    device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1333 	    sc->sc_txnext);
1334 #endif
1335 	ntx = 0;
1336 	kicked = 0;
1337 	for (; !if_sendq_empty(ifp) && sc->sc_txfree > 1;) {
1338 		m = if_dequeue(ifp);
1339 		if (m == NULL)
1340 			break;
1341 		if (gem_load_txmbuf(sc, &m) != 0) {
1342 			if (m == NULL)
1343 				break;
1344 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1345 			if_sendq_prepend(ifp, m);
1346 			break;
1347 		}
1348 		if ((sc->sc_txnext % 4) == 0) {
1349 			gem_txkick(sc);
1350 			kicked = 1;
1351 		} else
1352 			kicked = 0;
1353 		ntx++;
1354 		BPF_MTAP(ifp, m);
1355 	}
1356 
1357 	if (ntx > 0) {
1358 		if (kicked == 0)
1359 			gem_txkick(sc);
1360 #ifdef GEM_DEBUG
1361 		CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1362 		    device_get_name(sc->sc_dev), sc->sc_txnext);
1363 #endif
1364 
1365 		/* Set a watchdog timer in case the chip flakes out. */
1366 		sc->sc_wdog_timer = 5;
1367 #ifdef GEM_DEBUG
1368 		CTR3(KTR_GEM, "%s: %s: watchdog %d",
1369 		    device_get_name(sc->sc_dev), __func__,
1370 		    sc->sc_wdog_timer);
1371 #endif
1372 	}
1373 }
1374 
1375 static void
1376 gem_tint(struct gem_softc *sc)
1377 {
1378 	if_t ifp = sc->sc_ifp;
1379 	struct gem_txsoft *txs;
1380 	int progress;
1381 	uint32_t txlast;
1382 #ifdef GEM_DEBUG
1383 	int i;
1384 
1385 	GEM_LOCK_ASSERT(sc, MA_OWNED);
1386 
1387 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1388 #endif
1389 
1390 	/*
1391 	 * Go through our TX list and free mbufs for those
1392 	 * frames that have been transmitted.
1393 	 */
1394 	progress = 0;
1395 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1396 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1397 #ifdef GEM_DEBUG
1398 		if ((if_getflags(ifp) & IFF_DEBUG) != 0) {
1399 			printf("    txsoft %p transmit chain:\n", txs);
1400 			for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1401 				printf("descriptor %d: ", i);
1402 				printf("gd_flags: 0x%016llx\t",
1403 				    (long long)le64toh(
1404 				    sc->sc_txdescs[i].gd_flags));
1405 				printf("gd_addr: 0x%016llx\n",
1406 				    (long long)le64toh(
1407 				    sc->sc_txdescs[i].gd_addr));
1408 				if (i == txs->txs_lastdesc)
1409 					break;
1410 			}
1411 		}
1412 #endif
1413 
1414 		/*
1415 		 * In theory, we could harvest some descriptors before
1416 		 * the ring is empty, but that's a bit complicated.
1417 		 *
1418 		 * GEM_TX_COMPLETION points to the last descriptor
1419 		 * processed + 1.
1420 		 */
1421 		txlast = GEM_READ_4(sc, GEM_TX_COMPLETION);
1422 #ifdef GEM_DEBUG
1423 		CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1424 		    "txs->txs_lastdesc = %d, txlast = %d",
1425 		    __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1426 #endif
1427 		if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1428 			if ((txlast >= txs->txs_firstdesc) &&
1429 			    (txlast <= txs->txs_lastdesc))
1430 				break;
1431 		} else {
1432 			/* Ick -- this command wraps. */
1433 			if ((txlast >= txs->txs_firstdesc) ||
1434 			    (txlast <= txs->txs_lastdesc))
1435 				break;
1436 		}
1437 
1438 #ifdef GEM_DEBUG
1439 		CTR1(KTR_GEM, "%s: releasing a descriptor", __func__);
1440 #endif
1441 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1442 
1443 		sc->sc_txfree += txs->txs_ndescs;
1444 
1445 		bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1446 		    BUS_DMASYNC_POSTWRITE);
1447 		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1448 		if (txs->txs_mbuf != NULL) {
1449 			m_freem(txs->txs_mbuf);
1450 			txs->txs_mbuf = NULL;
1451 		}
1452 
1453 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1454 
1455 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1456 		progress = 1;
1457 	}
1458 
1459 #ifdef GEM_DEBUG
1460 	CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx "
1461 	    "GEM_TX_COMPLETION %x",
1462 	    __func__, GEM_READ_4(sc, GEM_TX_STATE_MACHINE),
1463 	    ((long long)GEM_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) |
1464 	    GEM_READ_4(sc, GEM_TX_DATA_PTR_LO),
1465 	    GEM_READ_4(sc, GEM_TX_COMPLETION));
1466 #endif
1467 
1468 	if (progress) {
1469 		if (sc->sc_txfree == GEM_NTXDESC - 1)
1470 			sc->sc_txwin = 0;
1471 
1472 		/*
1473 		 * We freed some descriptors, so reset IFF_DRV_OACTIVE
1474 		 * and restart.
1475 		 */
1476 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1477 		if (STAILQ_EMPTY(&sc->sc_txdirtyq))
1478 		    sc->sc_wdog_timer = 0;
1479 		gem_start_locked(ifp);
1480 	}
1481 
1482 #ifdef GEM_DEBUG
1483 	CTR3(KTR_GEM, "%s: %s: watchdog %d",
1484 	    device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1485 #endif
1486 }
1487 
1488 #ifdef GEM_RINT_TIMEOUT
1489 static void
1490 gem_rint_timeout(void *arg)
1491 {
1492 	struct gem_softc *sc = arg;
1493 
1494 	GEM_LOCK_ASSERT(sc, MA_OWNED);
1495 
1496 	gem_rint(sc);
1497 }
1498 #endif
1499 
1500 static void
1501 gem_rint(struct gem_softc *sc)
1502 {
1503 	if_t ifp = sc->sc_ifp;
1504 	struct mbuf *m;
1505 	uint64_t rxstat;
1506 	uint32_t rxcomp;
1507 
1508 	GEM_LOCK_ASSERT(sc, MA_OWNED);
1509 
1510 #ifdef GEM_RINT_TIMEOUT
1511 	callout_stop(&sc->sc_rx_ch);
1512 #endif
1513 #ifdef GEM_DEBUG
1514 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1515 #endif
1516 
1517 	/*
1518 	 * Read the completion register once.  This limits
1519 	 * how long the following loop can execute.
1520 	 */
1521 	rxcomp = GEM_READ_4(sc, GEM_RX_COMPLETION);
1522 #ifdef GEM_DEBUG
1523 	CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d",
1524 	    __func__, sc->sc_rxptr, rxcomp);
1525 #endif
1526 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1527 	for (; sc->sc_rxptr != rxcomp;) {
1528 		m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
1529 		rxstat = le64toh(sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
1530 
1531 		if (rxstat & GEM_RD_OWN) {
1532 #ifdef GEM_RINT_TIMEOUT
1533 			/*
1534 			 * The descriptor is still marked as owned, although
1535 			 * it is supposed to have completed.  This has been
1536 			 * observed on some machines.  Just exiting here
1537 			 * might leave the packet sitting around until another
1538 			 * one arrives to trigger a new interrupt, which is
1539 			 * generally undesirable, so set up a timeout.
1540 			 */
1541 			callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1542 			    gem_rint_timeout, sc);
1543 #endif
1544 			m = NULL;
1545 			goto kickit;
1546 		}
1547 
1548 		if (rxstat & GEM_RD_BAD_CRC) {
1549 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1550 			device_printf(sc->sc_dev, "receive error: CRC error\n");
1551 			GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1552 			m = NULL;
1553 			goto kickit;
1554 		}
1555 
1556 #ifdef GEM_DEBUG
1557 		if ((if_getflags(ifp) & IFF_DEBUG) != 0) {
1558 			printf("    rxsoft %p descriptor %d: ",
1559 			    &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
1560 			printf("gd_flags: 0x%016llx\t",
1561 			    (long long)le64toh(
1562 			    sc->sc_rxdescs[sc->sc_rxptr].gd_flags));
1563 			printf("gd_addr: 0x%016llx\n",
1564 			    (long long)le64toh(
1565 			    sc->sc_rxdescs[sc->sc_rxptr].gd_addr));
1566 		}
1567 #endif
1568 
1569 		/*
1570 		 * Allocate a new mbuf cluster.  If that fails, we are
1571 		 * out of memory, and must drop the packet and recycle
1572 		 * the buffer that's already attached to this descriptor.
1573 		 */
1574 		if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
1575 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1576 			GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1577 			m = NULL;
1578 		}
1579 
1580  kickit:
1581 		/*
1582 		 * Update the RX kick register.  This register has to point
1583 		 * to the descriptor after the last valid one (before the
1584 		 * current batch) and for optimum performance should be
1585 		 * incremented in multiples of 4 (the DMA engine fetches/
1586 		 * updates descriptors in batches of 4).
1587 		 */
1588 		sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
1589 		if ((sc->sc_rxptr % 4) == 0) {
1590 			GEM_CDSYNC(sc,
1591 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1592 			GEM_WRITE_4(sc, GEM_RX_KICK,
1593 			    (sc->sc_rxptr + GEM_NRXDESC - 4) &
1594 			    GEM_NRXDESC_MASK);
1595 		}
1596 
1597 		if (m == NULL) {
1598 			if (rxstat & GEM_RD_OWN)
1599 				break;
1600 			continue;
1601 		}
1602 
1603 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1604 		m->m_data += ETHER_ALIGN; /* first byte offset */
1605 		m->m_pkthdr.rcvif = ifp;
1606 		m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
1607 
1608 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
1609 			gem_rxcksum(m, rxstat);
1610 
1611 		/* Pass it on. */
1612 		GEM_UNLOCK(sc);
1613 		if_input(ifp, m);
1614 		GEM_LOCK(sc);
1615 	}
1616 
1617 #ifdef GEM_DEBUG
1618 	CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__,
1619 	    sc->sc_rxptr, GEM_READ_4(sc, GEM_RX_COMPLETION));
1620 #endif
1621 }
1622 
1623 static int
1624 gem_add_rxbuf(struct gem_softc *sc, int idx)
1625 {
1626 	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1627 	struct mbuf *m;
1628 	bus_dma_segment_t segs[1];
1629 	int error, nsegs;
1630 
1631 	GEM_LOCK_ASSERT(sc, MA_OWNED);
1632 
1633 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1634 	if (m == NULL)
1635 		return (ENOBUFS);
1636 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1637 
1638 #ifdef GEM_DEBUG
1639 	/* Bzero the packet to check DMA. */
1640 	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1641 #endif
1642 
1643 	if (rxs->rxs_mbuf != NULL) {
1644 		bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1645 		    BUS_DMASYNC_POSTREAD);
1646 		bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1647 	}
1648 
1649 	error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1650 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1651 	if (error != 0) {
1652 		device_printf(sc->sc_dev,
1653 		    "cannot load RS DMA map %d, error = %d\n", idx, error);
1654 		m_freem(m);
1655 		return (error);
1656 	}
1657 	/* If nsegs is wrong then the stack is corrupt. */
1658 	KASSERT(nsegs == 1,
1659 	    ("%s: too many DMA segments (%d)", __func__, nsegs));
1660 	rxs->rxs_mbuf = m;
1661 	rxs->rxs_paddr = segs[0].ds_addr;
1662 
1663 	bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1664 	    BUS_DMASYNC_PREREAD);
1665 
1666 	GEM_INIT_RXDESC(sc, idx);
1667 
1668 	return (0);
1669 }
1670 
1671 static void
1672 gem_eint(struct gem_softc *sc, u_int status)
1673 {
1674 
1675 	if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
1676 	if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1677 		gem_reset_rxdma(sc);
1678 		return;
1679 	}
1680 
1681 	device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
1682 	if ((status & GEM_INTR_BERR) != 0) {
1683 		printf(", PCI bus error 0x%x",
1684 		    GEM_READ_4(sc, GEM_PCI_ERROR_STATUS));
1685 	}
1686 	printf("\n");
1687 }
1688 
1689 void
1690 gem_intr(void *v)
1691 {
1692 	struct gem_softc *sc = v;
1693 	uint32_t status, status2;
1694 
1695 	GEM_LOCK(sc);
1696 	status = GEM_READ_4(sc, GEM_STATUS);
1697 
1698 #ifdef GEM_DEBUG
1699 	CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1700 	    device_get_name(sc->sc_dev), __func__,
1701 	    (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status);
1702 
1703 	/*
1704 	 * PCS interrupts must be cleared, otherwise no traffic is passed!
1705 	 */
1706 	if ((status & GEM_INTR_PCS) != 0) {
1707 		status2 =
1708 		    GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS) |
1709 		    GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS);
1710 		if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
1711 			device_printf(sc->sc_dev,
1712 			    "%s: PCS link status changed\n", __func__);
1713 	}
1714 	if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
1715 		status2 = GEM_READ_4(sc, GEM_MAC_CONTROL_STATUS);
1716 		if ((status2 & GEM_MAC_PAUSED) != 0)
1717 			device_printf(sc->sc_dev,
1718 			    "%s: PAUSE received (PAUSE time %d slots)\n",
1719 			    __func__, GEM_MAC_PAUSE_TIME(status2));
1720 		if ((status2 & GEM_MAC_PAUSE) != 0)
1721 			device_printf(sc->sc_dev,
1722 			    "%s: transited to PAUSE state\n", __func__);
1723 		if ((status2 & GEM_MAC_RESUME) != 0)
1724 			device_printf(sc->sc_dev,
1725 			    "%s: transited to non-PAUSE state\n", __func__);
1726 	}
1727 	if ((status & GEM_INTR_MIF) != 0)
1728 		device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
1729 #endif
1730 
1731 	if (__predict_false(status &
1732 	    (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0)
1733 		gem_eint(sc, status);
1734 
1735 	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1736 		gem_rint(sc);
1737 
1738 	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1739 		gem_tint(sc);
1740 
1741 	if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) {
1742 		status2 = GEM_READ_4(sc, GEM_MAC_TX_STATUS);
1743 		if ((status2 &
1744 		    ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
1745 		    GEM_MAC_TX_PEAK_EXP)) != 0)
1746 			device_printf(sc->sc_dev,
1747 			    "MAC TX fault, status %x\n", status2);
1748 		if ((status2 &
1749 		    (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) {
1750 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1751 			if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING);
1752 			gem_init_locked(sc);
1753 		}
1754 	}
1755 	if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) {
1756 		status2 = GEM_READ_4(sc, GEM_MAC_RX_STATUS);
1757 		/*
1758 		 * At least with GEM_SUN_GEM revisions GEM_MAC_RX_OVERFLOW
1759 		 * happen often due to a silicon bug so handle them silently.
1760 		 * Moreover, it's likely that the receiver has hung so we
1761 		 * reset it.
1762 		 */
1763 		if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) {
1764 			if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
1765 			gem_reset_rxdma(sc);
1766 		} else if ((status2 &
1767 		    ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0)
1768 			device_printf(sc->sc_dev,
1769 			    "MAC RX fault, status %x\n", status2);
1770 	}
1771 	GEM_UNLOCK(sc);
1772 }
1773 
1774 static int
1775 gem_watchdog(struct gem_softc *sc)
1776 {
1777 	if_t ifp = sc->sc_ifp;
1778 
1779 	GEM_LOCK_ASSERT(sc, MA_OWNED);
1780 
1781 #ifdef GEM_DEBUG
1782 	CTR4(KTR_GEM,
1783 	    "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x",
1784 	    __func__, GEM_READ_4(sc, GEM_RX_CONFIG),
1785 	    GEM_READ_4(sc, GEM_MAC_RX_STATUS),
1786 	    GEM_READ_4(sc, GEM_MAC_RX_CONFIG));
1787 	CTR4(KTR_GEM,
1788 	    "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x",
1789 	    __func__, GEM_READ_4(sc, GEM_TX_CONFIG),
1790 	    GEM_READ_4(sc, GEM_MAC_TX_STATUS),
1791 	    GEM_READ_4(sc, GEM_MAC_TX_CONFIG));
1792 #endif
1793 
1794 	if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1795 		return (0);
1796 
1797 	if ((sc->sc_flags & GEM_LINK) != 0)
1798 		device_printf(sc->sc_dev, "device timeout\n");
1799 	else if (bootverbose)
1800 		device_printf(sc->sc_dev, "device timeout (no link)\n");
1801 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1802 
1803 	/* Try to get more packets going. */
1804 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1805 	gem_init_locked(sc);
1806 	gem_start_locked(ifp);
1807 	return (EJUSTRETURN);
1808 }
1809 
1810 static void
1811 gem_mifinit(struct gem_softc *sc)
1812 {
1813 
1814 	/* Configure the MIF in frame mode. */
1815 	GEM_WRITE_4(sc, GEM_MIF_CONFIG,
1816 	    GEM_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
1817 	GEM_BARRIER(sc, GEM_MIF_CONFIG, 4,
1818 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1819 }
1820 
1821 /*
1822  * MII interface
1823  *
1824  * The MII interface supports at least three different operating modes:
1825  *
1826  * Bitbang mode is implemented using data, clock and output enable registers.
1827  *
1828  * Frame mode is implemented by loading a complete frame into the frame
1829  * register and polling the valid bit for completion.
1830  *
1831  * Polling mode uses the frame register but completion is indicated by
1832  * an interrupt.
1833  *
1834  */
1835 int
1836 gem_mii_readreg(device_t dev, int phy, int reg)
1837 {
1838 	struct gem_softc *sc;
1839 	int n;
1840 	uint32_t v;
1841 
1842 #ifdef GEM_DEBUG_PHY
1843 	printf("%s: phy %d reg %d\n", __func__, phy, reg);
1844 #endif
1845 
1846 	sc = device_get_softc(dev);
1847 	if ((sc->sc_flags & GEM_SERDES) != 0) {
1848 		switch (reg) {
1849 		case MII_BMCR:
1850 			reg = GEM_MII_CONTROL;
1851 			break;
1852 		case MII_BMSR:
1853 			reg = GEM_MII_STATUS;
1854 			break;
1855 		case MII_PHYIDR1:
1856 		case MII_PHYIDR2:
1857 			return (0);
1858 		case MII_ANAR:
1859 			reg = GEM_MII_ANAR;
1860 			break;
1861 		case MII_ANLPAR:
1862 			reg = GEM_MII_ANLPAR;
1863 			break;
1864 		case MII_EXTSR:
1865 			return (EXTSR_1000XFDX | EXTSR_1000XHDX);
1866 		default:
1867 			device_printf(sc->sc_dev,
1868 			    "%s: unhandled register %d\n", __func__, reg);
1869 			return (0);
1870 		}
1871 		return (GEM_READ_4(sc, reg));
1872 	}
1873 
1874 	/* Construct the frame command. */
1875 	v = GEM_MIF_FRAME_READ |
1876 	    (phy << GEM_MIF_PHY_SHIFT) |
1877 	    (reg << GEM_MIF_REG_SHIFT);
1878 
1879 	GEM_WRITE_4(sc, GEM_MIF_FRAME, v);
1880 	GEM_BARRIER(sc, GEM_MIF_FRAME, 4,
1881 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1882 	for (n = 0; n < 100; n++) {
1883 		DELAY(1);
1884 		v = GEM_READ_4(sc, GEM_MIF_FRAME);
1885 		if (v & GEM_MIF_FRAME_TA0)
1886 			return (v & GEM_MIF_FRAME_DATA);
1887 	}
1888 
1889 	device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1890 	return (0);
1891 }
1892 
1893 int
1894 gem_mii_writereg(device_t dev, int phy, int reg, int val)
1895 {
1896 	struct gem_softc *sc;
1897 	int n;
1898 	uint32_t v;
1899 
1900 #ifdef GEM_DEBUG_PHY
1901 	printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
1902 #endif
1903 
1904 	sc = device_get_softc(dev);
1905 	if ((sc->sc_flags & GEM_SERDES) != 0) {
1906 		switch (reg) {
1907 		case MII_BMSR:
1908 			reg = GEM_MII_STATUS;
1909 			break;
1910 		case MII_BMCR:
1911 			reg = GEM_MII_CONTROL;
1912 			if ((val & GEM_MII_CONTROL_RESET) == 0)
1913 				break;
1914 			GEM_WRITE_4(sc, GEM_MII_CONTROL, val);
1915 			GEM_BARRIER(sc, GEM_MII_CONTROL, 4,
1916 			    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1917 			if (!gem_bitwait(sc, GEM_MII_CONTROL,
1918 			    GEM_MII_CONTROL_RESET, 0))
1919 				device_printf(sc->sc_dev,
1920 				    "cannot reset PCS\n");
1921 			/* FALLTHROUGH */
1922 		case MII_ANAR:
1923 			GEM_WRITE_4(sc, GEM_MII_CONFIG, 0);
1924 			GEM_BARRIER(sc, GEM_MII_CONFIG, 4,
1925 			    BUS_SPACE_BARRIER_WRITE);
1926 			GEM_WRITE_4(sc, GEM_MII_ANAR, val);
1927 			GEM_BARRIER(sc, GEM_MII_ANAR, 4,
1928 			    BUS_SPACE_BARRIER_WRITE);
1929 			GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
1930 			    GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
1931 			GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
1932 			    BUS_SPACE_BARRIER_WRITE);
1933 			GEM_WRITE_4(sc, GEM_MII_CONFIG,
1934 			    GEM_MII_CONFIG_ENABLE);
1935 			GEM_BARRIER(sc, GEM_MII_CONFIG, 4,
1936 			    BUS_SPACE_BARRIER_WRITE);
1937 			return (0);
1938 		case MII_ANLPAR:
1939 			reg = GEM_MII_ANLPAR;
1940 			break;
1941 		default:
1942 			device_printf(sc->sc_dev,
1943 			    "%s: unhandled register %d\n", __func__, reg);
1944 			return (0);
1945 		}
1946 		GEM_WRITE_4(sc, reg, val);
1947 		GEM_BARRIER(sc, reg, 4,
1948 		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1949 		return (0);
1950 	}
1951 
1952 	/* Construct the frame command. */
1953 	v = GEM_MIF_FRAME_WRITE |
1954 	    (phy << GEM_MIF_PHY_SHIFT) |
1955 	    (reg << GEM_MIF_REG_SHIFT) |
1956 	    (val & GEM_MIF_FRAME_DATA);
1957 
1958 	GEM_WRITE_4(sc, GEM_MIF_FRAME, v);
1959 	GEM_BARRIER(sc, GEM_MIF_FRAME, 4,
1960 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1961 	for (n = 0; n < 100; n++) {
1962 		DELAY(1);
1963 		v = GEM_READ_4(sc, GEM_MIF_FRAME);
1964 		if (v & GEM_MIF_FRAME_TA0)
1965 			return (1);
1966 	}
1967 
1968 	device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1969 	return (0);
1970 }
1971 
1972 void
1973 gem_mii_statchg(device_t dev)
1974 {
1975 	struct gem_softc *sc;
1976 	int gigabit;
1977 	uint32_t rxcfg, txcfg, v;
1978 
1979 	sc = device_get_softc(dev);
1980 
1981 	GEM_LOCK_ASSERT(sc, MA_OWNED);
1982 
1983 #ifdef GEM_DEBUG
1984 	if ((sc->sc_if_getflags(ifp) & IFF_DEBUG) != 0)
1985 		device_printf(sc->sc_dev, "%s: status change\n", __func__);
1986 #endif
1987 
1988 	if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1989 	    IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1990 		sc->sc_flags |= GEM_LINK;
1991 	else
1992 		sc->sc_flags &= ~GEM_LINK;
1993 
1994 	switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
1995 	case IFM_1000_SX:
1996 	case IFM_1000_LX:
1997 	case IFM_1000_CX:
1998 	case IFM_1000_T:
1999 		gigabit = 1;
2000 		break;
2001 	default:
2002 		gigabit = 0;
2003 	}
2004 
2005 	/*
2006 	 * The configuration done here corresponds to the steps F) and
2007 	 * G) and as far as enabling of RX and TX MAC goes also step H)
2008 	 * of the initialization sequence outlined in section 3.2.1 of
2009 	 * the GEM Gigabit Ethernet ASIC Specification.
2010 	 */
2011 
2012 	rxcfg = sc->sc_mac_rxcfg;
2013 	rxcfg &= ~GEM_MAC_RX_CARR_EXTEND;
2014 	txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
2015 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2016 		txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
2017 	else if (gigabit != 0) {
2018 		rxcfg |= GEM_MAC_RX_CARR_EXTEND;
2019 		txcfg |= GEM_MAC_TX_CARR_EXTEND;
2020 	}
2021 	(void)gem_disable_tx(sc);
2022 	GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg);
2023 	(void)gem_disable_rx(sc);
2024 	GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg);
2025 
2026 	v = GEM_READ_4(sc, GEM_MAC_CONTROL_CONFIG) &
2027 	    ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
2028 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2029 	    IFM_ETH_RXPAUSE) != 0)
2030 		v |= GEM_MAC_CC_RX_PAUSE;
2031 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2032 	    IFM_ETH_TXPAUSE) != 0)
2033 		v |= GEM_MAC_CC_TX_PAUSE;
2034 	GEM_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v);
2035 
2036 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2037 	    gigabit != 0)
2038 		GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2039 		    GEM_MAC_SLOT_TIME_CARR_EXTEND);
2040 	else
2041 		GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2042 		    GEM_MAC_SLOT_TIME_NORMAL);
2043 
2044 	/* XIF Configuration */
2045 	v = GEM_MAC_XIF_LINK_LED;
2046 	v |= GEM_MAC_XIF_TX_MII_ENA;
2047 	if ((sc->sc_flags & GEM_SERDES) == 0) {
2048 		if ((GEM_READ_4(sc, GEM_MIF_CONFIG) &
2049 		    GEM_MIF_CONFIG_PHY_SEL) != 0) {
2050 			/* External MII needs echo disable if half duplex. */
2051 			if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2052 			    IFM_FDX) == 0)
2053 				v |= GEM_MAC_XIF_ECHO_DISABL;
2054 		} else
2055 			/*
2056 			 * Internal MII needs buffer enable.
2057 			 * XXX buffer enable makes only sense for an
2058 			 * external PHY.
2059 			 */
2060 			v |= GEM_MAC_XIF_MII_BUF_ENA;
2061 	}
2062 	if (gigabit != 0)
2063 		v |= GEM_MAC_XIF_GMII_MODE;
2064 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2065 		v |= GEM_MAC_XIF_FDPLX_LED;
2066 	GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v);
2067 
2068 	sc->sc_mac_rxcfg = rxcfg;
2069 	if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) != 0 &&
2070 	    (sc->sc_flags & GEM_LINK) != 0) {
2071 		GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG,
2072 		    txcfg | GEM_MAC_TX_ENABLE);
2073 		GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG,
2074 		    rxcfg | GEM_MAC_RX_ENABLE);
2075 	}
2076 }
2077 
2078 int
2079 gem_mediachange(if_t ifp)
2080 {
2081 	struct gem_softc *sc = if_getsoftc(ifp);
2082 	int error;
2083 
2084 	/* XXX add support for serial media. */
2085 
2086 	GEM_LOCK(sc);
2087 	error = mii_mediachg(sc->sc_mii);
2088 	GEM_UNLOCK(sc);
2089 	return (error);
2090 }
2091 
2092 void
2093 gem_mediastatus(if_t ifp, struct ifmediareq *ifmr)
2094 {
2095 	struct gem_softc *sc = if_getsoftc(ifp);
2096 
2097 	GEM_LOCK(sc);
2098 	if ((if_getflags(ifp) & IFF_UP) == 0) {
2099 		GEM_UNLOCK(sc);
2100 		return;
2101 	}
2102 
2103 	mii_pollstat(sc->sc_mii);
2104 	ifmr->ifm_active = sc->sc_mii->mii_media_active;
2105 	ifmr->ifm_status = sc->sc_mii->mii_media_status;
2106 	GEM_UNLOCK(sc);
2107 }
2108 
2109 static int
2110 gem_ioctl(if_t ifp, u_long cmd, caddr_t data)
2111 {
2112 	struct gem_softc *sc = if_getsoftc(ifp);
2113 	struct ifreq *ifr = (struct ifreq *)data;
2114 	int error;
2115 
2116 	error = 0;
2117 	switch (cmd) {
2118 	case SIOCSIFFLAGS:
2119 		GEM_LOCK(sc);
2120 		if ((if_getflags(ifp) & IFF_UP) != 0) {
2121 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
2122 			    ((if_getflags(ifp) ^ sc->sc_ifflags) &
2123 			    (IFF_ALLMULTI | IFF_PROMISC)) != 0)
2124 				gem_setladrf(sc);
2125 			else
2126 				gem_init_locked(sc);
2127 		} else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2128 			gem_stop(ifp, 0);
2129 		if ((if_getflags(ifp) & IFF_LINK0) != 0)
2130 			sc->sc_csum_features |= CSUM_UDP;
2131 		else
2132 			sc->sc_csum_features &= ~CSUM_UDP;
2133 		if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
2134 			if_sethwassist(ifp, sc->sc_csum_features);
2135 		sc->sc_ifflags = if_getflags(ifp);
2136 		GEM_UNLOCK(sc);
2137 		break;
2138 	case SIOCADDMULTI:
2139 	case SIOCDELMULTI:
2140 		GEM_LOCK(sc);
2141 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2142 			gem_setladrf(sc);
2143 		GEM_UNLOCK(sc);
2144 		break;
2145 	case SIOCGIFMEDIA:
2146 	case SIOCSIFMEDIA:
2147 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2148 		break;
2149 	case SIOCSIFCAP:
2150 		GEM_LOCK(sc);
2151 		if_setcapenable(ifp, ifr->ifr_reqcap);
2152 		if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
2153 			if_sethwassist(ifp, sc->sc_csum_features);
2154 		else
2155 			if_sethwassist(ifp, 0);
2156 		GEM_UNLOCK(sc);
2157 		break;
2158 	default:
2159 		error = ether_ioctl(ifp, cmd, data);
2160 		break;
2161 	}
2162 
2163 	return (error);
2164 }
2165 
2166 static u_int
2167 gem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2168 {
2169 	uint32_t crc, *hash = arg;
2170 
2171 	crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
2172 	/* We just want the 8 most significant bits. */
2173 	crc >>= 24;
2174 	/* Set the corresponding bit in the filter. */
2175 	hash[crc >> 4] |= 1 << (15 - (crc & 15));
2176 
2177 	return (1);
2178 }
2179 
2180 static void
2181 gem_setladrf(struct gem_softc *sc)
2182 {
2183 	if_t ifp = sc->sc_ifp;
2184 	int i;
2185 	uint32_t hash[16];
2186 	uint32_t v;
2187 
2188 	GEM_LOCK_ASSERT(sc, MA_OWNED);
2189 
2190 	/*
2191 	 * Turn off the RX MAC and the hash filter as required by the Sun GEM
2192 	 * programming restrictions.
2193 	 */
2194 	v = sc->sc_mac_rxcfg & ~GEM_MAC_RX_HASH_FILTER;
2195 	GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
2196 	GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
2197 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2198 	if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER |
2199 	    GEM_MAC_RX_ENABLE, 0))
2200 		device_printf(sc->sc_dev,
2201 		    "cannot disable RX MAC or hash filter\n");
2202 
2203 	v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_PROMISC_GRP);
2204 	if ((if_getflags(ifp) & IFF_PROMISC) != 0) {
2205 		v |= GEM_MAC_RX_PROMISCUOUS;
2206 		goto chipit;
2207 	}
2208 	if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
2209 		v |= GEM_MAC_RX_PROMISC_GRP;
2210 		goto chipit;
2211 	}
2212 
2213 	/*
2214 	 * Set up multicast address filter by passing all multicast
2215 	 * addresses through a crc generator, and then using the high
2216 	 * order 8 bits as an index into the 256 bit logical address
2217 	 * filter.  The high order 4 bits selects the word, while the
2218 	 * other 4 bits select the bit within the word (where bit 0
2219 	 * is the MSB).
2220 	 */
2221 
2222 	memset(hash, 0, sizeof(hash));
2223 	if_foreach_llmaddr(ifp, gem_hash_maddr, hash);
2224 
2225 	v |= GEM_MAC_RX_HASH_FILTER;
2226 
2227 	/* Now load the hash table into the chip (if we are using it). */
2228 	for (i = 0; i < 16; i++)
2229 		GEM_WRITE_4(sc,
2230 		    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
2231 		    hash[i]);
2232 
2233  chipit:
2234 	sc->sc_mac_rxcfg = v;
2235 	GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v | GEM_MAC_RX_ENABLE);
2236 }
2237