xref: /freebsd/sys/dev/stge/if_stge.c (revision 9336e0699bda8a301cd2bfa37106b6ec5e32012e)
1 /*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Device driver for the Sundance Tech. TC9021 10/100/1000
41  * Ethernet controller.
42  */
43 
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46 
47 #ifdef HAVE_KERNEL_OPTION_HEADERS
48 #include "opt_device_polling.h"
49 #endif
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/endian.h>
54 #include <sys/mbuf.h>
55 #include <sys/malloc.h>
56 #include <sys/kernel.h>
57 #include <sys/module.h>
58 #include <sys/socket.h>
59 #include <sys/sockio.h>
60 #include <sys/sysctl.h>
61 #include <sys/taskqueue.h>
62 
63 #include <net/bpf.h>
64 #include <net/ethernet.h>
65 #include <net/if.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/if_vlan_var.h>
70 
71 #include <machine/bus.h>
72 #include <machine/resource.h>
73 #include <sys/bus.h>
74 #include <sys/rman.h>
75 
76 #include <dev/mii/mii.h>
77 #include <dev/mii/miivar.h>
78 
79 #include <dev/pci/pcireg.h>
80 #include <dev/pci/pcivar.h>
81 
82 #include <dev/stge/if_stgereg.h>
83 
84 #define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
85 
86 MODULE_DEPEND(stge, pci, 1, 1, 1);
87 MODULE_DEPEND(stge, ether, 1, 1, 1);
88 MODULE_DEPEND(stge, miibus, 1, 1, 1);
89 
90 /* "device miibus" required.  See GENERIC if you get errors here. */
91 #include "miibus_if.h"
92 
93 /*
94  * Devices supported by this driver.
95  */
96 static struct stge_product {
97 	uint16_t	stge_vendorid;
98 	uint16_t	stge_deviceid;
99 	const char	*stge_name;
100 } stge_products[] = {
101 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
102 	  "Sundance ST-1023 Gigabit Ethernet" },
103 
104 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
105 	  "Sundance ST-2021 Gigabit Ethernet" },
106 
107 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
108 	  "Tamarack TC9021 Gigabit Ethernet" },
109 
110 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
111 	  "Tamarack TC9021 Gigabit Ethernet" },
112 
113 	/*
114 	 * The Sundance sample boards use the Sundance vendor ID,
115 	 * but the Tamarack product ID.
116 	 */
117 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
118 	  "Sundance TC9021 Gigabit Ethernet" },
119 
120 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
121 	  "Sundance TC9021 Gigabit Ethernet" },
122 
123 	{ VENDOR_DLINK,		DEVICEID_DLINK_DL4000,
124 	  "D-Link DL-4000 Gigabit Ethernet" },
125 
126 	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
127 	  "Antares Gigabit Ethernet" }
128 };
129 
130 static int	stge_probe(device_t);
131 static int	stge_attach(device_t);
132 static int	stge_detach(device_t);
133 static int	stge_shutdown(device_t);
134 static int	stge_suspend(device_t);
135 static int	stge_resume(device_t);
136 
137 static int	stge_encap(struct stge_softc *, struct mbuf **);
138 static void	stge_start(struct ifnet *);
139 static void	stge_start_locked(struct ifnet *);
140 static void	stge_watchdog(struct stge_softc *);
141 static int	stge_ioctl(struct ifnet *, u_long, caddr_t);
142 static void	stge_init(void *);
143 static void	stge_init_locked(struct stge_softc *);
144 static void	stge_vlan_setup(struct stge_softc *);
145 static void	stge_stop(struct stge_softc *);
146 static void	stge_start_tx(struct stge_softc *);
147 static void	stge_start_rx(struct stge_softc *);
148 static void	stge_stop_tx(struct stge_softc *);
149 static void	stge_stop_rx(struct stge_softc *);
150 
151 static void	stge_reset(struct stge_softc *, uint32_t);
152 static int	stge_eeprom_wait(struct stge_softc *);
153 static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
154 static void	stge_tick(void *);
155 static void	stge_stats_update(struct stge_softc *);
156 static void	stge_set_filter(struct stge_softc *);
157 static void	stge_set_multi(struct stge_softc *);
158 
159 static void	stge_link_task(void *, int);
160 static void	stge_intr(void *);
161 static __inline int stge_tx_error(struct stge_softc *);
162 static void	stge_txeof(struct stge_softc *);
163 static void	stge_rxeof(struct stge_softc *);
164 static __inline void stge_discard_rxbuf(struct stge_softc *, int);
165 static int	stge_newbuf(struct stge_softc *, int);
166 #ifndef __NO_STRICT_ALIGNMENT
167 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
168 #endif
169 
170 static void	stge_mii_sync(struct stge_softc *);
171 static void	stge_mii_send(struct stge_softc *, uint32_t, int);
172 static int	stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
173 static int	stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
174 static int	stge_miibus_readreg(device_t, int, int);
175 static int	stge_miibus_writereg(device_t, int, int, int);
176 static void	stge_miibus_statchg(device_t);
177 static int	stge_mediachange(struct ifnet *);
178 static void	stge_mediastatus(struct ifnet *, struct ifmediareq *);
179 
180 static void	stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
181 static int	stge_dma_alloc(struct stge_softc *);
182 static void	stge_dma_free(struct stge_softc *);
183 static void	stge_dma_wait(struct stge_softc *);
184 static void	stge_init_tx_ring(struct stge_softc *);
185 static int	stge_init_rx_ring(struct stge_softc *);
186 #ifdef DEVICE_POLLING
187 static void	stge_poll(struct ifnet *, enum poll_cmd, int);
188 #endif
189 
190 static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
191 static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
192 static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
193 
194 static device_method_t stge_methods[] = {
195 	/* Device interface */
196 	DEVMETHOD(device_probe,		stge_probe),
197 	DEVMETHOD(device_attach,	stge_attach),
198 	DEVMETHOD(device_detach,	stge_detach),
199 	DEVMETHOD(device_shutdown,	stge_shutdown),
200 	DEVMETHOD(device_suspend,	stge_suspend),
201 	DEVMETHOD(device_resume,	stge_resume),
202 
203 	/* MII interface */
204 	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
205 	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
206 	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
207 
208 	{ 0, 0 }
209 
210 };
211 
212 static driver_t stge_driver = {
213 	"stge",
214 	stge_methods,
215 	sizeof(struct stge_softc)
216 };
217 
218 static devclass_t stge_devclass;
219 
220 DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0);
221 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
222 
223 static struct resource_spec stge_res_spec_io[] = {
224 	{ SYS_RES_IOPORT,	PCIR_BAR(0),	RF_ACTIVE },
225 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
226 	{ -1,			0,		0 }
227 };
228 
229 static struct resource_spec stge_res_spec_mem[] = {
230 	{ SYS_RES_MEMORY,	PCIR_BAR(1),	RF_ACTIVE },
231 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
232 	{ -1,			0,		0 }
233 };
234 
235 #define	MII_SET(x)	\
236 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
237 #define	MII_CLR(x)	\
238 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
239 
240 /*
241  * Sync the PHYs by setting data bit and strobing the clock 32 times.
242  */
243 static void
244 stge_mii_sync(struct stge_softc	*sc)
245 {
246 	int i;
247 
248 	MII_SET(PC_MgmtDir | PC_MgmtData);
249 
250 	for (i = 0; i < 32; i++) {
251 		MII_SET(PC_MgmtClk);
252 		DELAY(1);
253 		MII_CLR(PC_MgmtClk);
254 		DELAY(1);
255 	}
256 }
257 
258 /*
259  * Clock a series of bits through the MII.
260  */
261 static void
262 stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
263 {
264 	int i;
265 
266 	MII_CLR(PC_MgmtClk);
267 
268 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
269 		if (bits & i)
270 			MII_SET(PC_MgmtData);
271                 else
272 			MII_CLR(PC_MgmtData);
273 		DELAY(1);
274 		MII_CLR(PC_MgmtClk);
275 		DELAY(1);
276 		MII_SET(PC_MgmtClk);
277 	}
278 }
279 
280 /*
281  * Read an PHY register through the MII.
282  */
283 static int
284 stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
285 {
286 	int i, ack;
287 
288 	/*
289 	 * Set up frame for RX.
290 	 */
291 	frame->mii_stdelim = STGE_MII_STARTDELIM;
292 	frame->mii_opcode = STGE_MII_READOP;
293 	frame->mii_turnaround = 0;
294 	frame->mii_data = 0;
295 
296 	CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
297 	/*
298  	 * Turn on data xmit.
299 	 */
300 	MII_SET(PC_MgmtDir);
301 
302 	stge_mii_sync(sc);
303 
304 	/*
305 	 * Send command/address info.
306 	 */
307 	stge_mii_send(sc, frame->mii_stdelim, 2);
308 	stge_mii_send(sc, frame->mii_opcode, 2);
309 	stge_mii_send(sc, frame->mii_phyaddr, 5);
310 	stge_mii_send(sc, frame->mii_regaddr, 5);
311 
312 	/* Turn off xmit. */
313 	MII_CLR(PC_MgmtDir);
314 
315 	/* Idle bit */
316 	MII_CLR((PC_MgmtClk | PC_MgmtData));
317 	DELAY(1);
318 	MII_SET(PC_MgmtClk);
319 	DELAY(1);
320 
321 	/* Check for ack */
322 	MII_CLR(PC_MgmtClk);
323 	DELAY(1);
324 	ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
325 	MII_SET(PC_MgmtClk);
326 	DELAY(1);
327 
328 	/*
329 	 * Now try reading data bits. If the ack failed, we still
330 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
331 	 */
332 	if (ack) {
333 		for(i = 0; i < 16; i++) {
334 			MII_CLR(PC_MgmtClk);
335 			DELAY(1);
336 			MII_SET(PC_MgmtClk);
337 			DELAY(1);
338 		}
339 		goto fail;
340 	}
341 
342 	for (i = 0x8000; i; i >>= 1) {
343 		MII_CLR(PC_MgmtClk);
344 		DELAY(1);
345 		if (!ack) {
346 			if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
347 				frame->mii_data |= i;
348 			DELAY(1);
349 		}
350 		MII_SET(PC_MgmtClk);
351 		DELAY(1);
352 	}
353 
354 fail:
355 	MII_CLR(PC_MgmtClk);
356 	DELAY(1);
357 	MII_SET(PC_MgmtClk);
358 	DELAY(1);
359 
360 	if (ack)
361 		return(1);
362 	return(0);
363 }
364 
365 /*
366  * Write to a PHY register through the MII.
367  */
368 static int
369 stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
370 {
371 
372 	/*
373 	 * Set up frame for TX.
374 	 */
375 	frame->mii_stdelim = STGE_MII_STARTDELIM;
376 	frame->mii_opcode = STGE_MII_WRITEOP;
377 	frame->mii_turnaround = STGE_MII_TURNAROUND;
378 
379 	/*
380  	 * Turn on data output.
381 	 */
382 	MII_SET(PC_MgmtDir);
383 
384 	stge_mii_sync(sc);
385 
386 	stge_mii_send(sc, frame->mii_stdelim, 2);
387 	stge_mii_send(sc, frame->mii_opcode, 2);
388 	stge_mii_send(sc, frame->mii_phyaddr, 5);
389 	stge_mii_send(sc, frame->mii_regaddr, 5);
390 	stge_mii_send(sc, frame->mii_turnaround, 2);
391 	stge_mii_send(sc, frame->mii_data, 16);
392 
393 	/* Idle bit. */
394 	MII_SET(PC_MgmtClk);
395 	DELAY(1);
396 	MII_CLR(PC_MgmtClk);
397 	DELAY(1);
398 
399 	/*
400 	 * Turn off xmit.
401 	 */
402 	MII_CLR(PC_MgmtDir);
403 
404 	return(0);
405 }
406 
407 /*
408  * sc_miibus_readreg:	[mii interface function]
409  *
410  *	Read a PHY register on the MII of the TC9021.
411  */
412 static int
413 stge_miibus_readreg(device_t dev, int phy, int reg)
414 {
415 	struct stge_softc *sc;
416 	struct stge_mii_frame frame;
417 	int error;
418 
419 	sc = device_get_softc(dev);
420 
421 	if (reg == STGE_PhyCtrl) {
422 		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
423 		STGE_MII_LOCK(sc);
424 		error = CSR_READ_1(sc, STGE_PhyCtrl);
425 		STGE_MII_UNLOCK(sc);
426 		return (error);
427 	}
428 	bzero(&frame, sizeof(frame));
429 	frame.mii_phyaddr = phy;
430 	frame.mii_regaddr = reg;
431 
432 	STGE_MII_LOCK(sc);
433 	error = stge_mii_readreg(sc, &frame);
434 	STGE_MII_UNLOCK(sc);
435 
436 	if (error != 0) {
437 		/* Don't show errors for PHY probe request */
438 		if (reg != 1)
439 			device_printf(sc->sc_dev, "phy read fail\n");
440 		return (0);
441 	}
442 	return (frame.mii_data);
443 }
444 
445 /*
446  * stge_miibus_writereg:	[mii interface function]
447  *
448  *	Write a PHY register on the MII of the TC9021.
449  */
450 static int
451 stge_miibus_writereg(device_t dev, int phy, int reg, int val)
452 {
453 	struct stge_softc *sc;
454 	struct stge_mii_frame frame;
455 	int error;
456 
457 	sc = device_get_softc(dev);
458 
459 	bzero(&frame, sizeof(frame));
460 	frame.mii_phyaddr = phy;
461 	frame.mii_regaddr = reg;
462 	frame.mii_data = val;
463 
464 	STGE_MII_LOCK(sc);
465 	error = stge_mii_writereg(sc, &frame);
466 	STGE_MII_UNLOCK(sc);
467 
468 	if (error != 0)
469 		device_printf(sc->sc_dev, "phy write fail\n");
470 	return (0);
471 }
472 
473 /*
474  * stge_miibus_statchg:	[mii interface function]
475  *
476  *	Callback from MII layer when media changes.
477  */
478 static void
479 stge_miibus_statchg(device_t dev)
480 {
481 	struct stge_softc *sc;
482 
483 	sc = device_get_softc(dev);
484 	taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
485 }
486 
487 /*
488  * stge_mediastatus:	[ifmedia interface function]
489  *
490  *	Get the current interface media status.
491  */
492 static void
493 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
494 {
495 	struct stge_softc *sc;
496 	struct mii_data *mii;
497 
498 	sc = ifp->if_softc;
499 	mii = device_get_softc(sc->sc_miibus);
500 
501 	mii_pollstat(mii);
502 	ifmr->ifm_status = mii->mii_media_status;
503 	ifmr->ifm_active = mii->mii_media_active;
504 }
505 
506 /*
507  * stge_mediachange:	[ifmedia interface function]
508  *
509  *	Set hardware to newly-selected media.
510  */
511 static int
512 stge_mediachange(struct ifnet *ifp)
513 {
514 	struct stge_softc *sc;
515 	struct mii_data *mii;
516 
517 	sc = ifp->if_softc;
518 	mii = device_get_softc(sc->sc_miibus);
519 	mii_mediachg(mii);
520 
521 	return (0);
522 }
523 
524 static int
525 stge_eeprom_wait(struct stge_softc *sc)
526 {
527 	int i;
528 
529 	for (i = 0; i < STGE_TIMEOUT; i++) {
530 		DELAY(1000);
531 		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
532 			return (0);
533 	}
534 	return (1);
535 }
536 
537 /*
538  * stge_read_eeprom:
539  *
540  *	Read data from the serial EEPROM.
541  */
542 static void
543 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
544 {
545 
546 	if (stge_eeprom_wait(sc))
547 		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
548 
549 	CSR_WRITE_2(sc, STGE_EepromCtrl,
550 	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
551 	if (stge_eeprom_wait(sc))
552 		device_printf(sc->sc_dev, "EEPROM read timed out\n");
553 	*data = CSR_READ_2(sc, STGE_EepromData);
554 }
555 
556 
557 static int
558 stge_probe(device_t dev)
559 {
560 	struct stge_product *sp;
561 	int i;
562 	uint16_t vendor, devid;
563 
564 	vendor = pci_get_vendor(dev);
565 	devid = pci_get_device(dev);
566 	sp = stge_products;
567 	for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]);
568 	    i++, sp++) {
569 		if (vendor == sp->stge_vendorid &&
570 		    devid == sp->stge_deviceid) {
571 			device_set_desc(dev, sp->stge_name);
572 			return (BUS_PROBE_DEFAULT);
573 		}
574 	}
575 
576 	return (ENXIO);
577 }
578 
579 static int
580 stge_attach(device_t dev)
581 {
582 	struct stge_softc *sc;
583 	struct ifnet *ifp;
584 	uint8_t enaddr[ETHER_ADDR_LEN];
585 	int error, i;
586 	uint16_t cmd;
587 	uint32_t val;
588 
589 	error = 0;
590 	sc = device_get_softc(dev);
591 	sc->sc_dev = dev;
592 
593 	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
594 	    MTX_DEF);
595 	mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
596 	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
597 	TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
598 
599 	/*
600 	 * Map the device.
601 	 */
602 	pci_enable_busmaster(dev);
603 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
604 	val = pci_read_config(dev, PCIR_BAR(1), 4);
605 	if ((val & 0x01) != 0)
606 		sc->sc_spec = stge_res_spec_mem;
607 	else {
608 		val = pci_read_config(dev, PCIR_BAR(0), 4);
609 		if ((val & 0x01) == 0) {
610 			device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
611 			error = ENXIO;
612 			goto fail;
613 		}
614 		sc->sc_spec = stge_res_spec_io;
615 	}
616 	error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
617 	if (error != 0) {
618 		device_printf(dev, "couldn't allocate %s resources\n",
619 		    sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
620 		goto fail;
621 	}
622 	sc->sc_rev = pci_get_revid(dev);
623 
624 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
625 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
626 	    "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
627 	    sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
628 
629 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
630 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
631 	    "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
632 	    sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
633 
634 	/* Pull in device tunables. */
635 	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
636 	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
637 	    "rxint_nframe", &sc->sc_rxint_nframe);
638 	if (error == 0) {
639 		if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
640 		    sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
641 			device_printf(dev, "rxint_nframe value out of range; "
642 			    "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
643 			sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
644 		}
645 	}
646 
647 	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
648 	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
649 	    "rxint_dmawait", &sc->sc_rxint_dmawait);
650 	if (error == 0) {
651 		if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
652 		    sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
653 			device_printf(dev, "rxint_dmawait value out of range; "
654 			    "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
655 			sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
656 		}
657 	}
658 
659 	if ((error = stge_dma_alloc(sc) != 0))
660 		goto fail;
661 
662 	/*
663 	 * Determine if we're copper or fiber.  It affects how we
664 	 * reset the card.
665 	 */
666 	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
667 		sc->sc_usefiber = 1;
668 	else
669 		sc->sc_usefiber = 0;
670 
671 	/* Load LED configuration from EEPROM. */
672 	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
673 
674 	/*
675 	 * Reset the chip to a known state.
676 	 */
677 	STGE_LOCK(sc);
678 	stge_reset(sc, STGE_RESET_FULL);
679 	STGE_UNLOCK(sc);
680 
681 	/*
682 	 * Reading the station address from the EEPROM doesn't seem
683 	 * to work, at least on my sample boards.  Instead, since
684 	 * the reset sequence does AutoInit, read it from the station
685 	 * address registers. For Sundance 1023 you can only read it
686 	 * from EEPROM.
687 	 */
688 	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
689 		uint16_t v;
690 
691 		v = CSR_READ_2(sc, STGE_StationAddress0);
692 		enaddr[0] = v & 0xff;
693 		enaddr[1] = v >> 8;
694 		v = CSR_READ_2(sc, STGE_StationAddress1);
695 		enaddr[2] = v & 0xff;
696 		enaddr[3] = v >> 8;
697 		v = CSR_READ_2(sc, STGE_StationAddress2);
698 		enaddr[4] = v & 0xff;
699 		enaddr[5] = v >> 8;
700 		sc->sc_stge1023 = 0;
701 	} else {
702 		uint16_t myaddr[ETHER_ADDR_LEN / 2];
703 		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
704 			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
705 			    &myaddr[i]);
706 			myaddr[i] = le16toh(myaddr[i]);
707 		}
708 		bcopy(myaddr, enaddr, sizeof(enaddr));
709 		sc->sc_stge1023 = 1;
710 	}
711 
712 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
713 	if (ifp == NULL) {
714 		device_printf(sc->sc_dev, "failed to if_alloc()\n");
715 		error = ENXIO;
716 		goto fail;
717 	}
718 
719 	ifp->if_softc = sc;
720 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
721 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
722 	ifp->if_ioctl = stge_ioctl;
723 	ifp->if_start = stge_start;
724 	ifp->if_timer = 0;
725 	ifp->if_watchdog = NULL;
726 	ifp->if_init = stge_init;
727 	ifp->if_mtu = ETHERMTU;
728 	ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1;
729 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
730 	IFQ_SET_READY(&ifp->if_snd);
731 	/* Revision B3 and earlier chips have checksum bug. */
732 	if (sc->sc_rev >= 0x0c) {
733 		ifp->if_hwassist = STGE_CSUM_FEATURES;
734 		ifp->if_capabilities = IFCAP_HWCSUM;
735 	} else {
736 		ifp->if_hwassist = 0;
737 		ifp->if_capabilities = 0;
738 	}
739 	ifp->if_capenable = ifp->if_capabilities;
740 
741 	/*
742 	 * Read some important bits from the PhyCtrl register.
743 	 */
744 	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
745 	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
746 
747 	/* Set up MII bus. */
748 	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange,
749 	    stge_mediastatus)) != 0) {
750 		device_printf(sc->sc_dev, "no PHY found!\n");
751 		goto fail;
752 	}
753 
754 	ether_ifattach(ifp, enaddr);
755 
756 	/* VLAN capability setup */
757 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
758 	if (sc->sc_rev >= 0x0c)
759 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
760 	ifp->if_capenable = ifp->if_capabilities;
761 #ifdef DEVICE_POLLING
762 	ifp->if_capabilities |= IFCAP_POLLING;
763 #endif
764 	/*
765 	 * Tell the upper layer(s) we support long frames.
766 	 * Must appear after the call to ether_ifattach() because
767 	 * ether_ifattach() sets ifi_hdrlen to the default value.
768 	 */
769 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
770 
771 	/*
772 	 * The manual recommends disabling early transmit, so we
773 	 * do.  It's disabled anyway, if using IP checksumming,
774 	 * since the entire packet must be in the FIFO in order
775 	 * for the chip to perform the checksum.
776 	 */
777 	sc->sc_txthresh = 0x0fff;
778 
779 	/*
780 	 * Disable MWI if the PCI layer tells us to.
781 	 */
782 	sc->sc_DMACtrl = 0;
783 	if ((cmd & PCIM_CMD_MWRICEN) == 0)
784 		sc->sc_DMACtrl |= DMAC_MWIDisable;
785 
786 	/*
787 	 * Hookup IRQ
788 	 */
789 	error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
790 	    NULL, stge_intr, sc, &sc->sc_ih);
791 	if (error != 0) {
792 		ether_ifdetach(ifp);
793 		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
794 		sc->sc_ifp = NULL;
795 		goto fail;
796 	}
797 
798 fail:
799 	if (error != 0)
800 		stge_detach(dev);
801 
802 	return (error);
803 }
804 
805 static int
806 stge_detach(device_t dev)
807 {
808 	struct stge_softc *sc;
809 	struct ifnet *ifp;
810 
811 	sc = device_get_softc(dev);
812 
813 	ifp = sc->sc_ifp;
814 #ifdef DEVICE_POLLING
815 	if (ifp && ifp->if_capenable & IFCAP_POLLING)
816 		ether_poll_deregister(ifp);
817 #endif
818 	if (device_is_attached(dev)) {
819 		STGE_LOCK(sc);
820 		/* XXX */
821 		sc->sc_detach = 1;
822 		stge_stop(sc);
823 		STGE_UNLOCK(sc);
824 		callout_drain(&sc->sc_tick_ch);
825 		taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
826 		ether_ifdetach(ifp);
827 	}
828 
829 	if (sc->sc_miibus != NULL) {
830 		device_delete_child(dev, sc->sc_miibus);
831 		sc->sc_miibus = NULL;
832 	}
833 	bus_generic_detach(dev);
834 	stge_dma_free(sc);
835 
836 	if (ifp != NULL) {
837 		if_free(ifp);
838 		sc->sc_ifp = NULL;
839 	}
840 
841 	if (sc->sc_ih) {
842 		bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
843 		sc->sc_ih = NULL;
844 	}
845 	bus_release_resources(dev, sc->sc_spec, sc->sc_res);
846 
847 	mtx_destroy(&sc->sc_mii_mtx);
848 	mtx_destroy(&sc->sc_mtx);
849 
850 	return (0);
851 }
852 
853 struct stge_dmamap_arg {
854 	bus_addr_t	stge_busaddr;
855 };
856 
857 static void
858 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
859 {
860 	struct stge_dmamap_arg *ctx;
861 
862 	if (error != 0)
863 		return;
864 
865 	ctx = (struct stge_dmamap_arg *)arg;
866 	ctx->stge_busaddr = segs[0].ds_addr;
867 }
868 
869 static int
870 stge_dma_alloc(struct stge_softc *sc)
871 {
872 	struct stge_dmamap_arg ctx;
873 	struct stge_txdesc *txd;
874 	struct stge_rxdesc *rxd;
875 	int error, i;
876 
877 	/* create parent tag. */
878 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */
879 		    1, 0,			/* algnmnt, boundary */
880 		    STGE_DMA_MAXADDR,		/* lowaddr */
881 		    BUS_SPACE_MAXADDR,		/* highaddr */
882 		    NULL, NULL,			/* filter, filterarg */
883 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
884 		    0,				/* nsegments */
885 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
886 		    0,				/* flags */
887 		    NULL, NULL,			/* lockfunc, lockarg */
888 		    &sc->sc_cdata.stge_parent_tag);
889 	if (error != 0) {
890 		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
891 		goto fail;
892 	}
893 	/* create tag for Tx ring. */
894 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
895 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
896 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
897 		    BUS_SPACE_MAXADDR,		/* highaddr */
898 		    NULL, NULL,			/* filter, filterarg */
899 		    STGE_TX_RING_SZ,		/* maxsize */
900 		    1,				/* nsegments */
901 		    STGE_TX_RING_SZ,		/* maxsegsize */
902 		    0,				/* flags */
903 		    NULL, NULL,			/* lockfunc, lockarg */
904 		    &sc->sc_cdata.stge_tx_ring_tag);
905 	if (error != 0) {
906 		device_printf(sc->sc_dev,
907 		    "failed to allocate Tx ring DMA tag\n");
908 		goto fail;
909 	}
910 
911 	/* create tag for Rx ring. */
912 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
913 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
914 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
915 		    BUS_SPACE_MAXADDR,		/* highaddr */
916 		    NULL, NULL,			/* filter, filterarg */
917 		    STGE_RX_RING_SZ,		/* maxsize */
918 		    1,				/* nsegments */
919 		    STGE_RX_RING_SZ,		/* maxsegsize */
920 		    0,				/* flags */
921 		    NULL, NULL,			/* lockfunc, lockarg */
922 		    &sc->sc_cdata.stge_rx_ring_tag);
923 	if (error != 0) {
924 		device_printf(sc->sc_dev,
925 		    "failed to allocate Rx ring DMA tag\n");
926 		goto fail;
927 	}
928 
929 	/* create tag for Tx buffers. */
930 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
931 		    1, 0,			/* algnmnt, boundary */
932 		    BUS_SPACE_MAXADDR,		/* lowaddr */
933 		    BUS_SPACE_MAXADDR,		/* highaddr */
934 		    NULL, NULL,			/* filter, filterarg */
935 		    MCLBYTES * STGE_MAXTXSEGS,	/* maxsize */
936 		    STGE_MAXTXSEGS,		/* nsegments */
937 		    MCLBYTES,			/* maxsegsize */
938 		    0,				/* flags */
939 		    NULL, NULL,			/* lockfunc, lockarg */
940 		    &sc->sc_cdata.stge_tx_tag);
941 	if (error != 0) {
942 		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
943 		goto fail;
944 	}
945 
946 	/* create tag for Rx buffers. */
947 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
948 		    1, 0,			/* algnmnt, boundary */
949 		    BUS_SPACE_MAXADDR,		/* lowaddr */
950 		    BUS_SPACE_MAXADDR,		/* highaddr */
951 		    NULL, NULL,			/* filter, filterarg */
952 		    MCLBYTES,			/* maxsize */
953 		    1,				/* nsegments */
954 		    MCLBYTES,			/* maxsegsize */
955 		    0,				/* flags */
956 		    NULL, NULL,			/* lockfunc, lockarg */
957 		    &sc->sc_cdata.stge_rx_tag);
958 	if (error != 0) {
959 		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
960 		goto fail;
961 	}
962 
963 	/* allocate DMA'able memory and load the DMA map for Tx ring. */
964 	error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
965 	    (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
966 	    &sc->sc_cdata.stge_tx_ring_map);
967 	if (error != 0) {
968 		device_printf(sc->sc_dev,
969 		    "failed to allocate DMA'able memory for Tx ring\n");
970 		goto fail;
971 	}
972 
973 	ctx.stge_busaddr = 0;
974 	error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
975 	    sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
976 	    STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
977 	if (error != 0 || ctx.stge_busaddr == 0) {
978 		device_printf(sc->sc_dev,
979 		    "failed to load DMA'able memory for Tx ring\n");
980 		goto fail;
981 	}
982 	sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
983 
984 	/* allocate DMA'able memory and load the DMA map for Rx ring. */
985 	error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
986 	    (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
987 	    &sc->sc_cdata.stge_rx_ring_map);
988 	if (error != 0) {
989 		device_printf(sc->sc_dev,
990 		    "failed to allocate DMA'able memory for Rx ring\n");
991 		goto fail;
992 	}
993 
994 	ctx.stge_busaddr = 0;
995 	error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
996 	    sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
997 	    STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
998 	if (error != 0 || ctx.stge_busaddr == 0) {
999 		device_printf(sc->sc_dev,
1000 		    "failed to load DMA'able memory for Rx ring\n");
1001 		goto fail;
1002 	}
1003 	sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
1004 
1005 	/* create DMA maps for Tx buffers. */
1006 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
1007 		txd = &sc->sc_cdata.stge_txdesc[i];
1008 		txd->tx_m = NULL;
1009 		txd->tx_dmamap = 0;
1010 		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
1011 		    &txd->tx_dmamap);
1012 		if (error != 0) {
1013 			device_printf(sc->sc_dev,
1014 			    "failed to create Tx dmamap\n");
1015 			goto fail;
1016 		}
1017 	}
1018 	/* create DMA maps for Rx buffers. */
1019 	if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1020 	    &sc->sc_cdata.stge_rx_sparemap)) != 0) {
1021 		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
1022 		goto fail;
1023 	}
1024 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
1025 		rxd = &sc->sc_cdata.stge_rxdesc[i];
1026 		rxd->rx_m = NULL;
1027 		rxd->rx_dmamap = 0;
1028 		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1029 		    &rxd->rx_dmamap);
1030 		if (error != 0) {
1031 			device_printf(sc->sc_dev,
1032 			    "failed to create Rx dmamap\n");
1033 			goto fail;
1034 		}
1035 	}
1036 
1037 fail:
1038 	return (error);
1039 }
1040 
1041 static void
1042 stge_dma_free(struct stge_softc *sc)
1043 {
1044 	struct stge_txdesc *txd;
1045 	struct stge_rxdesc *rxd;
1046 	int i;
1047 
1048 	/* Tx ring */
1049 	if (sc->sc_cdata.stge_tx_ring_tag) {
1050 		if (sc->sc_cdata.stge_tx_ring_map)
1051 			bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
1052 			    sc->sc_cdata.stge_tx_ring_map);
1053 		if (sc->sc_cdata.stge_tx_ring_map &&
1054 		    sc->sc_rdata.stge_tx_ring)
1055 			bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
1056 			    sc->sc_rdata.stge_tx_ring,
1057 			    sc->sc_cdata.stge_tx_ring_map);
1058 		sc->sc_rdata.stge_tx_ring = NULL;
1059 		sc->sc_cdata.stge_tx_ring_map = 0;
1060 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
1061 		sc->sc_cdata.stge_tx_ring_tag = NULL;
1062 	}
1063 	/* Rx ring */
1064 	if (sc->sc_cdata.stge_rx_ring_tag) {
1065 		if (sc->sc_cdata.stge_rx_ring_map)
1066 			bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
1067 			    sc->sc_cdata.stge_rx_ring_map);
1068 		if (sc->sc_cdata.stge_rx_ring_map &&
1069 		    sc->sc_rdata.stge_rx_ring)
1070 			bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
1071 			    sc->sc_rdata.stge_rx_ring,
1072 			    sc->sc_cdata.stge_rx_ring_map);
1073 		sc->sc_rdata.stge_rx_ring = NULL;
1074 		sc->sc_cdata.stge_rx_ring_map = 0;
1075 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
1076 		sc->sc_cdata.stge_rx_ring_tag = NULL;
1077 	}
1078 	/* Tx buffers */
1079 	if (sc->sc_cdata.stge_tx_tag) {
1080 		for (i = 0; i < STGE_TX_RING_CNT; i++) {
1081 			txd = &sc->sc_cdata.stge_txdesc[i];
1082 			if (txd->tx_dmamap) {
1083 				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
1084 				    txd->tx_dmamap);
1085 				txd->tx_dmamap = 0;
1086 			}
1087 		}
1088 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
1089 		sc->sc_cdata.stge_tx_tag = NULL;
1090 	}
1091 	/* Rx buffers */
1092 	if (sc->sc_cdata.stge_rx_tag) {
1093 		for (i = 0; i < STGE_RX_RING_CNT; i++) {
1094 			rxd = &sc->sc_cdata.stge_rxdesc[i];
1095 			if (rxd->rx_dmamap) {
1096 				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1097 				    rxd->rx_dmamap);
1098 				rxd->rx_dmamap = 0;
1099 			}
1100 		}
1101 		if (sc->sc_cdata.stge_rx_sparemap) {
1102 			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1103 			    sc->sc_cdata.stge_rx_sparemap);
1104 			sc->sc_cdata.stge_rx_sparemap = 0;
1105 		}
1106 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
1107 		sc->sc_cdata.stge_rx_tag = NULL;
1108 	}
1109 
1110 	if (sc->sc_cdata.stge_parent_tag) {
1111 		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
1112 		sc->sc_cdata.stge_parent_tag = NULL;
1113 	}
1114 }
1115 
1116 /*
1117  * stge_shutdown:
1118  *
1119  *	Make sure the interface is stopped at reboot time.
1120  */
1121 static int
1122 stge_shutdown(device_t dev)
1123 {
1124 	struct stge_softc *sc;
1125 
1126 	sc = device_get_softc(dev);
1127 
1128 	STGE_LOCK(sc);
1129 	stge_stop(sc);
1130 	STGE_UNLOCK(sc);
1131 
1132 	return (0);
1133 }
1134 
1135 static int
1136 stge_suspend(device_t dev)
1137 {
1138 	struct stge_softc *sc;
1139 
1140 	sc = device_get_softc(dev);
1141 
1142 	STGE_LOCK(sc);
1143 	stge_stop(sc);
1144 	sc->sc_suspended = 1;
1145 	STGE_UNLOCK(sc);
1146 
1147 	return (0);
1148 }
1149 
1150 static int
1151 stge_resume(device_t dev)
1152 {
1153 	struct stge_softc *sc;
1154 	struct ifnet *ifp;
1155 
1156 	sc = device_get_softc(dev);
1157 
1158 	STGE_LOCK(sc);
1159 	ifp = sc->sc_ifp;
1160 	if (ifp->if_flags & IFF_UP)
1161 		stge_init_locked(sc);
1162 
1163 	sc->sc_suspended = 0;
1164 	STGE_UNLOCK(sc);
1165 
1166 	return (0);
1167 }
1168 
1169 static void
1170 stge_dma_wait(struct stge_softc *sc)
1171 {
1172 	int i;
1173 
1174 	for (i = 0; i < STGE_TIMEOUT; i++) {
1175 		DELAY(2);
1176 		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1177 			break;
1178 	}
1179 
1180 	if (i == STGE_TIMEOUT)
1181 		device_printf(sc->sc_dev, "DMA wait timed out\n");
1182 }
1183 
1184 static int
1185 stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1186 {
1187 	struct stge_txdesc *txd;
1188 	struct stge_tfd *tfd;
1189 	struct mbuf *m;
1190 	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1191 	int error, i, nsegs, si;
1192 	uint64_t csum_flags, tfc;
1193 
1194 	STGE_LOCK_ASSERT(sc);
1195 
1196 	if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1197 		return (ENOBUFS);
1198 
1199 	error =  bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1200 	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1201 	if (error == EFBIG) {
1202 		m = m_defrag(*m_head, M_DONTWAIT);
1203 		if (m == NULL) {
1204 			m_freem(*m_head);
1205 			*m_head = NULL;
1206 			return (ENOMEM);
1207 		}
1208 		*m_head = m;
1209 		error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1210 		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1211 		if (error != 0) {
1212 			m_freem(*m_head);
1213 			*m_head = NULL;
1214 			return (error);
1215 		}
1216 	} else if (error != 0)
1217 		return (error);
1218 	if (nsegs == 0) {
1219 		m_freem(*m_head);
1220 		*m_head = NULL;
1221 		return (EIO);
1222 	}
1223 
1224 	m = *m_head;
1225 	csum_flags = 0;
1226 	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1227 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1228 			csum_flags |= TFD_IPChecksumEnable;
1229 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1230 			csum_flags |= TFD_TCPChecksumEnable;
1231 		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1232 			csum_flags |= TFD_UDPChecksumEnable;
1233 	}
1234 
1235 	si = sc->sc_cdata.stge_tx_prod;
1236 	tfd = &sc->sc_rdata.stge_tx_ring[si];
1237 	for (i = 0; i < nsegs; i++)
1238 		tfd->tfd_frags[i].frag_word0 =
1239 		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1240 		    FRAG_LEN(txsegs[i].ds_len));
1241 	sc->sc_cdata.stge_tx_cnt++;
1242 
1243 	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1244 	    TFD_FragCount(nsegs) | csum_flags;
1245 	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1246 		tfc |= TFD_TxDMAIndicate;
1247 
1248 	/* Update producer index. */
1249 	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1250 
1251 	/* Check if we have a VLAN tag to insert. */
1252 	if (m->m_flags & M_VLANTAG)
1253 		tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag));
1254 	tfd->tfd_control = htole64(tfc);
1255 
1256 	/* Update Tx Queue. */
1257 	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1258 	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1259 	txd->tx_m = m;
1260 
1261 	/* Sync descriptors. */
1262 	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1263 	    BUS_DMASYNC_PREWRITE);
1264 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1265 	    sc->sc_cdata.stge_tx_ring_map,
1266 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1267 
1268 	return (0);
1269 }
1270 
1271 /*
1272  * stge_start:		[ifnet interface function]
1273  *
1274  *	Start packet transmission on the interface.
1275  */
1276 static void
1277 stge_start(struct ifnet *ifp)
1278 {
1279 	struct stge_softc *sc;
1280 
1281 	sc = ifp->if_softc;
1282 	STGE_LOCK(sc);
1283 	stge_start_locked(ifp);
1284 	STGE_UNLOCK(sc);
1285 }
1286 
1287 static void
1288 stge_start_locked(struct ifnet *ifp)
1289 {
1290         struct stge_softc *sc;
1291         struct mbuf *m_head;
1292 	int enq;
1293 
1294 	sc = ifp->if_softc;
1295 
1296 	STGE_LOCK_ASSERT(sc);
1297 
1298 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1299 	    IFF_DRV_RUNNING || sc->sc_link == 0)
1300 		return;
1301 
1302 	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1303 		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1304 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1305 			break;
1306 		}
1307 
1308 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1309 		if (m_head == NULL)
1310 			break;
1311 		/*
1312 		 * Pack the data into the transmit ring. If we
1313 		 * don't have room, set the OACTIVE flag and wait
1314 		 * for the NIC to drain the ring.
1315 		 */
1316 		if (stge_encap(sc, &m_head)) {
1317 			if (m_head == NULL)
1318 				break;
1319 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1320 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1321 			break;
1322 		}
1323 
1324 		enq++;
1325 		/*
1326 		 * If there's a BPF listener, bounce a copy of this frame
1327 		 * to him.
1328 		 */
1329 		ETHER_BPF_MTAP(ifp, m_head);
1330 	}
1331 
1332 	if (enq > 0) {
1333 		/* Transmit */
1334 		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1335 
1336 		/* Set a timeout in case the chip goes out to lunch. */
1337 		sc->sc_watchdog_timer = 5;
1338 	}
1339 }
1340 
1341 /*
1342  * stge_watchdog:
1343  *
1344  *	Watchdog timer handler.
1345  */
1346 static void
1347 stge_watchdog(struct stge_softc *sc)
1348 {
1349 	struct ifnet *ifp;
1350 
1351 	STGE_LOCK_ASSERT(sc);
1352 
1353 	if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
1354 		return;
1355 
1356 	ifp = sc->sc_ifp;
1357 	if_printf(sc->sc_ifp, "device timeout\n");
1358 	ifp->if_oerrors++;
1359 	stge_init_locked(sc);
1360 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1361 		stge_start_locked(ifp);
1362 }
1363 
1364 /*
1365  * stge_ioctl:		[ifnet interface function]
1366  *
1367  *	Handle control requests from the operator.
1368  */
1369 static int
1370 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1371 {
1372 	struct stge_softc *sc;
1373 	struct ifreq *ifr;
1374 	struct mii_data *mii;
1375 	int error, mask;
1376 
1377 	sc = ifp->if_softc;
1378 	ifr = (struct ifreq *)data;
1379 	error = 0;
1380 	switch (cmd) {
1381 	case SIOCSIFMTU:
1382 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1383 			error = EINVAL;
1384 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1385 			ifp->if_mtu = ifr->ifr_mtu;
1386 			STGE_LOCK(sc);
1387 			stge_init_locked(sc);
1388 			STGE_UNLOCK(sc);
1389 		}
1390 		break;
1391 	case SIOCSIFFLAGS:
1392 		STGE_LOCK(sc);
1393 		if ((ifp->if_flags & IFF_UP) != 0) {
1394 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1395 				if (((ifp->if_flags ^ sc->sc_if_flags)
1396 				    & IFF_PROMISC) != 0)
1397 					stge_set_filter(sc);
1398 			} else {
1399 				if (sc->sc_detach == 0)
1400 					stge_init_locked(sc);
1401 			}
1402 		} else {
1403 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1404 				stge_stop(sc);
1405 		}
1406 		sc->sc_if_flags = ifp->if_flags;
1407 		STGE_UNLOCK(sc);
1408 		break;
1409 	case SIOCADDMULTI:
1410 	case SIOCDELMULTI:
1411 		STGE_LOCK(sc);
1412 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1413 			stge_set_multi(sc);
1414 		STGE_UNLOCK(sc);
1415 		break;
1416 	case SIOCSIFMEDIA:
1417 	case SIOCGIFMEDIA:
1418 		mii = device_get_softc(sc->sc_miibus);
1419 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1420 		break;
1421 	case SIOCSIFCAP:
1422 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1423 #ifdef DEVICE_POLLING
1424 		if ((mask & IFCAP_POLLING) != 0) {
1425 			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1426 				error = ether_poll_register(stge_poll, ifp);
1427 				if (error != 0)
1428 					break;
1429 				STGE_LOCK(sc);
1430 				CSR_WRITE_2(sc, STGE_IntEnable, 0);
1431 				ifp->if_capenable |= IFCAP_POLLING;
1432 				STGE_UNLOCK(sc);
1433 			} else {
1434 				error = ether_poll_deregister(ifp);
1435 				if (error != 0)
1436 					break;
1437 				STGE_LOCK(sc);
1438 				CSR_WRITE_2(sc, STGE_IntEnable,
1439 				    sc->sc_IntEnable);
1440 				ifp->if_capenable &= ~IFCAP_POLLING;
1441 				STGE_UNLOCK(sc);
1442 			}
1443 		}
1444 #endif
1445 		if ((mask & IFCAP_HWCSUM) != 0) {
1446 			ifp->if_capenable ^= IFCAP_HWCSUM;
1447 			if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1448 			    (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1449 				ifp->if_hwassist = STGE_CSUM_FEATURES;
1450 			else
1451 				ifp->if_hwassist = 0;
1452 		}
1453 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1454 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1455 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1456 				STGE_LOCK(sc);
1457 				stge_vlan_setup(sc);
1458 				STGE_UNLOCK(sc);
1459 			}
1460 		}
1461 		VLAN_CAPABILITIES(ifp);
1462 		break;
1463 	default:
1464 		error = ether_ioctl(ifp, cmd, data);
1465 		break;
1466 	}
1467 
1468 	return (error);
1469 }
1470 
1471 static void
1472 stge_link_task(void *arg, int pending)
1473 {
1474 	struct stge_softc *sc;
1475 	struct mii_data *mii;
1476 	uint32_t v, ac;
1477 	int i;
1478 
1479 	sc = (struct stge_softc *)arg;
1480 	STGE_LOCK(sc);
1481 
1482 	mii = device_get_softc(sc->sc_miibus);
1483 	if (mii->mii_media_status & IFM_ACTIVE) {
1484 		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1485 			sc->sc_link = 1;
1486 	} else
1487 		sc->sc_link = 0;
1488 
1489 	sc->sc_MACCtrl = 0;
1490 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
1491 		sc->sc_MACCtrl |= MC_DuplexSelect;
1492 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
1493 		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
1494 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
1495 		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
1496 	/*
1497 	 * Update STGE_MACCtrl register depending on link status.
1498 	 * (duplex, flow control etc)
1499 	 */
1500 	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1501 	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1502 	v |= sc->sc_MACCtrl;
1503 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1504 	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1505 		/* Duplex setting changed, reset Tx/Rx functions. */
1506 		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1507 		ac |= AC_TxReset | AC_RxReset;
1508 		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1509 		for (i = 0; i < STGE_TIMEOUT; i++) {
1510 			DELAY(100);
1511 			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1512 				break;
1513 		}
1514 		if (i == STGE_TIMEOUT)
1515 			device_printf(sc->sc_dev, "reset failed to complete\n");
1516 	}
1517 	STGE_UNLOCK(sc);
1518 }
1519 
1520 static __inline int
1521 stge_tx_error(struct stge_softc *sc)
1522 {
1523 	uint32_t txstat;
1524 	int error;
1525 
1526 	for (error = 0;;) {
1527 		txstat = CSR_READ_4(sc, STGE_TxStatus);
1528 		if ((txstat & TS_TxComplete) == 0)
1529 			break;
1530 		/* Tx underrun */
1531 		if ((txstat & TS_TxUnderrun) != 0) {
1532 			/*
1533 			 * XXX
1534 			 * There should be a more better way to recover
1535 			 * from Tx underrun instead of a full reset.
1536 			 */
1537 			if (sc->sc_nerr++ < STGE_MAXERR)
1538 				device_printf(sc->sc_dev, "Tx underrun, "
1539 				    "resetting...\n");
1540 			if (sc->sc_nerr == STGE_MAXERR)
1541 				device_printf(sc->sc_dev, "too many errors; "
1542 				    "not reporting any more\n");
1543 			error = -1;
1544 			break;
1545 		}
1546 		/* Maximum/Late collisions, Re-enable Tx MAC. */
1547 		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1548 			CSR_WRITE_4(sc, STGE_MACCtrl,
1549 			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1550 			    MC_TxEnable);
1551 	}
1552 
1553 	return (error);
1554 }
1555 
1556 /*
1557  * stge_intr:
1558  *
1559  *	Interrupt service routine.
1560  */
1561 static void
1562 stge_intr(void *arg)
1563 {
1564 	struct stge_softc *sc;
1565 	struct ifnet *ifp;
1566 	int reinit;
1567 	uint16_t status;
1568 
1569 	sc = (struct stge_softc *)arg;
1570 	ifp = sc->sc_ifp;
1571 
1572 	STGE_LOCK(sc);
1573 
1574 #ifdef DEVICE_POLLING
1575 	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1576 		goto done_locked;
1577 #endif
1578 	status = CSR_READ_2(sc, STGE_IntStatus);
1579 	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1580 		goto done_locked;
1581 
1582 	/* Disable interrupts. */
1583 	for (reinit = 0;;) {
1584 		status = CSR_READ_2(sc, STGE_IntStatusAck);
1585 		status &= sc->sc_IntEnable;
1586 		if (status == 0)
1587 			break;
1588 		/* Host interface errors. */
1589 		if ((status & IS_HostError) != 0) {
1590 			device_printf(sc->sc_dev,
1591 			    "Host interface error, resetting...\n");
1592 			reinit = 1;
1593 			goto force_init;
1594 		}
1595 
1596 		/* Receive interrupts. */
1597 		if ((status & IS_RxDMAComplete) != 0) {
1598 			stge_rxeof(sc);
1599 			if ((status & IS_RFDListEnd) != 0)
1600 				CSR_WRITE_4(sc, STGE_DMACtrl,
1601 				    DMAC_RxDMAPollNow);
1602 		}
1603 
1604 		/* Transmit interrupts. */
1605 		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1606 			stge_txeof(sc);
1607 
1608 		/* Transmission errors.*/
1609 		if ((status & IS_TxComplete) != 0) {
1610 			if ((reinit = stge_tx_error(sc)) != 0)
1611 				break;
1612 		}
1613 	}
1614 
1615 force_init:
1616 	if (reinit != 0)
1617 		stge_init_locked(sc);
1618 
1619 	/* Re-enable interrupts. */
1620 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1621 
1622 	/* Try to get more packets going. */
1623 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1624 		stge_start_locked(ifp);
1625 
1626 done_locked:
1627 	STGE_UNLOCK(sc);
1628 }
1629 
1630 /*
1631  * stge_txeof:
1632  *
1633  *	Helper; handle transmit interrupts.
1634  */
1635 static void
1636 stge_txeof(struct stge_softc *sc)
1637 {
1638 	struct ifnet *ifp;
1639 	struct stge_txdesc *txd;
1640 	uint64_t control;
1641 	int cons;
1642 
1643 	STGE_LOCK_ASSERT(sc);
1644 
1645 	ifp = sc->sc_ifp;
1646 
1647 	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1648 	if (txd == NULL)
1649 		return;
1650 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1651 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1652 
1653 	/*
1654 	 * Go through our Tx list and free mbufs for those
1655 	 * frames which have been transmitted.
1656 	 */
1657 	for (cons = sc->sc_cdata.stge_tx_cons;;
1658 	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1659 		if (sc->sc_cdata.stge_tx_cnt <= 0)
1660 			break;
1661 		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1662 		if ((control & TFD_TFDDone) == 0)
1663 			break;
1664 		sc->sc_cdata.stge_tx_cnt--;
1665 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1666 
1667 		bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1668 		    BUS_DMASYNC_POSTWRITE);
1669 		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1670 
1671 		/* Output counter is updated with statistics register */
1672 		m_freem(txd->tx_m);
1673 		txd->tx_m = NULL;
1674 		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1675 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1676 		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1677 	}
1678 	sc->sc_cdata.stge_tx_cons = cons;
1679 	if (sc->sc_cdata.stge_tx_cnt == 0)
1680 		sc->sc_watchdog_timer = 0;
1681 
1682         bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1683 	    sc->sc_cdata.stge_tx_ring_map,
1684 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1685 }
1686 
1687 static __inline void
1688 stge_discard_rxbuf(struct stge_softc *sc, int idx)
1689 {
1690 	struct stge_rfd *rfd;
1691 
1692 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1693 	rfd->rfd_status = 0;
1694 }
1695 
1696 #ifndef __NO_STRICT_ALIGNMENT
1697 /*
1698  * It seems that TC9021's DMA engine has alignment restrictions in
1699  * DMA scatter operations. The first DMA segment has no address
1700  * alignment restrictins but the rest should be aligned on 4(?) bytes
1701  * boundary. Otherwise it would corrupt random memory. Since we don't
1702  * know which one is used for the first segment in advance we simply
1703  * don't align at all.
1704  * To avoid copying over an entire frame to align, we allocate a new
1705  * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1706  * prepended into the existing mbuf chain.
1707  */
1708 static __inline struct mbuf *
1709 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1710 {
1711 	struct mbuf *n;
1712 
1713 	n = NULL;
1714 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1715 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1716 		m->m_data += ETHER_HDR_LEN;
1717 		n = m;
1718 	} else {
1719 		MGETHDR(n, M_DONTWAIT, MT_DATA);
1720 		if (n != NULL) {
1721 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1722 			m->m_data += ETHER_HDR_LEN;
1723 			m->m_len -= ETHER_HDR_LEN;
1724 			n->m_len = ETHER_HDR_LEN;
1725 			M_MOVE_PKTHDR(n, m);
1726 			n->m_next = m;
1727 		} else
1728 			m_freem(m);
1729 	}
1730 
1731 	return (n);
1732 }
1733 #endif
1734 
1735 /*
1736  * stge_rxeof:
1737  *
1738  *	Helper; handle receive interrupts.
1739  */
1740 static void
1741 stge_rxeof(struct stge_softc *sc)
1742 {
1743 	struct ifnet *ifp;
1744 	struct stge_rxdesc *rxd;
1745 	struct mbuf *mp, *m;
1746 	uint64_t status64;
1747 	uint32_t status;
1748 	int cons, prog;
1749 
1750 	STGE_LOCK_ASSERT(sc);
1751 
1752 	ifp = sc->sc_ifp;
1753 
1754 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1755 	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1756 
1757 	prog = 0;
1758 	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1759 	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1760 		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1761 		status = RFD_RxStatus(status64);
1762 		if ((status & RFD_RFDDone) == 0)
1763 			break;
1764 #ifdef DEVICE_POLLING
1765 		if (ifp->if_capenable & IFCAP_POLLING) {
1766 			if (sc->sc_cdata.stge_rxcycles <= 0)
1767 				break;
1768 			sc->sc_cdata.stge_rxcycles--;
1769 		}
1770 #endif
1771 		prog++;
1772 		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1773 		mp = rxd->rx_m;
1774 
1775 		/*
1776 		 * If the packet had an error, drop it.  Note we count
1777 		 * the error later in the periodic stats update.
1778 		 */
1779 		if ((status & RFD_FrameEnd) != 0 && (status &
1780 		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1781 		    RFD_RxAlignmentError | RFD_RxFCSError |
1782 		    RFD_RxLengthError)) != 0) {
1783 			stge_discard_rxbuf(sc, cons);
1784 			if (sc->sc_cdata.stge_rxhead != NULL) {
1785 				m_freem(sc->sc_cdata.stge_rxhead);
1786 				STGE_RXCHAIN_RESET(sc);
1787 			}
1788 			continue;
1789 		}
1790 		/*
1791 		 * Add a new receive buffer to the ring.
1792 		 */
1793 		if (stge_newbuf(sc, cons) != 0) {
1794 			ifp->if_iqdrops++;
1795 			stge_discard_rxbuf(sc, cons);
1796 			if (sc->sc_cdata.stge_rxhead != NULL) {
1797 				m_freem(sc->sc_cdata.stge_rxhead);
1798 				STGE_RXCHAIN_RESET(sc);
1799 			}
1800 			continue;
1801 		}
1802 
1803 		if ((status & RFD_FrameEnd) != 0)
1804 			mp->m_len = RFD_RxDMAFrameLen(status) -
1805 			    sc->sc_cdata.stge_rxlen;
1806 		sc->sc_cdata.stge_rxlen += mp->m_len;
1807 
1808 		/* Chain mbufs. */
1809 		if (sc->sc_cdata.stge_rxhead == NULL) {
1810 			sc->sc_cdata.stge_rxhead = mp;
1811 			sc->sc_cdata.stge_rxtail = mp;
1812 		} else {
1813 			mp->m_flags &= ~M_PKTHDR;
1814 			sc->sc_cdata.stge_rxtail->m_next = mp;
1815 			sc->sc_cdata.stge_rxtail = mp;
1816 		}
1817 
1818 		if ((status & RFD_FrameEnd) != 0) {
1819 			m = sc->sc_cdata.stge_rxhead;
1820 			m->m_pkthdr.rcvif = ifp;
1821 			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1822 
1823 			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1824 				m_freem(m);
1825 				STGE_RXCHAIN_RESET(sc);
1826 				continue;
1827 			}
1828 			/*
1829 			 * Set the incoming checksum information for
1830 			 * the packet.
1831 			 */
1832 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1833 				if ((status & RFD_IPDetected) != 0) {
1834 					m->m_pkthdr.csum_flags |=
1835 						CSUM_IP_CHECKED;
1836 					if ((status & RFD_IPError) == 0)
1837 						m->m_pkthdr.csum_flags |=
1838 						    CSUM_IP_VALID;
1839 				}
1840 				if (((status & RFD_TCPDetected) != 0 &&
1841 				    (status & RFD_TCPError) == 0) ||
1842 				    ((status & RFD_UDPDetected) != 0 &&
1843 				    (status & RFD_UDPError) == 0)) {
1844 					m->m_pkthdr.csum_flags |=
1845 					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1846 					m->m_pkthdr.csum_data = 0xffff;
1847 				}
1848 			}
1849 
1850 #ifndef __NO_STRICT_ALIGNMENT
1851 			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1852 				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1853 					STGE_RXCHAIN_RESET(sc);
1854 					continue;
1855 				}
1856 			}
1857 #endif
1858 			/* Check for VLAN tagged packets. */
1859 			if ((status & RFD_VLANDetected) != 0 &&
1860 			    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1861 				m->m_pkthdr.ether_vtag = RFD_TCI(status64);
1862 				m->m_flags |= M_VLANTAG;
1863 			}
1864 
1865 			STGE_UNLOCK(sc);
1866 			/* Pass it on. */
1867 			(*ifp->if_input)(ifp, m);
1868 			STGE_LOCK(sc);
1869 
1870 			STGE_RXCHAIN_RESET(sc);
1871 		}
1872 	}
1873 
1874 	if (prog > 0) {
1875 		/* Update the consumer index. */
1876 		sc->sc_cdata.stge_rx_cons = cons;
1877 		bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1878 		    sc->sc_cdata.stge_rx_ring_map,
1879 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1880 	}
1881 }
1882 
1883 #ifdef DEVICE_POLLING
1884 static void
1885 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1886 {
1887 	struct stge_softc *sc;
1888 	uint16_t status;
1889 
1890 	sc = ifp->if_softc;
1891 	STGE_LOCK(sc);
1892 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1893 		STGE_UNLOCK(sc);
1894 		return;
1895 	}
1896 
1897 	sc->sc_cdata.stge_rxcycles = count;
1898 	stge_rxeof(sc);
1899 	stge_txeof(sc);
1900 
1901 	if (cmd == POLL_AND_CHECK_STATUS) {
1902 		status = CSR_READ_2(sc, STGE_IntStatus);
1903 		status &= sc->sc_IntEnable;
1904 		if (status != 0) {
1905 			if ((status & IS_HostError) != 0) {
1906 				device_printf(sc->sc_dev,
1907 				    "Host interface error, resetting...\n");
1908 				stge_init_locked(sc);
1909 			}
1910 			if ((status & IS_TxComplete) != 0) {
1911 				if (stge_tx_error(sc) != 0)
1912 					stge_init_locked(sc);
1913 			}
1914 		}
1915 
1916 	}
1917 
1918 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1919 		stge_start_locked(ifp);
1920 
1921 	STGE_UNLOCK(sc);
1922 }
1923 #endif	/* DEVICE_POLLING */
1924 
1925 /*
1926  * stge_tick:
1927  *
1928  *	One second timer, used to tick the MII.
1929  */
1930 static void
1931 stge_tick(void *arg)
1932 {
1933 	struct stge_softc *sc;
1934 	struct mii_data *mii;
1935 
1936 	sc = (struct stge_softc *)arg;
1937 
1938 	STGE_LOCK_ASSERT(sc);
1939 
1940 	mii = device_get_softc(sc->sc_miibus);
1941 	mii_tick(mii);
1942 
1943 	/* Update statistics counters. */
1944 	stge_stats_update(sc);
1945 
1946 	/*
1947 	 * Relcaim any pending Tx descriptors to release mbufs in a
1948 	 * timely manner as we don't generate Tx completion interrupts
1949 	 * for every frame. This limits the delay to a maximum of one
1950 	 * second.
1951 	 */
1952 	if (sc->sc_cdata.stge_tx_cnt != 0)
1953 		stge_txeof(sc);
1954 
1955 	stge_watchdog(sc);
1956 
1957 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1958 }
1959 
1960 /*
1961  * stge_stats_update:
1962  *
1963  *	Read the TC9021 statistics counters.
1964  */
1965 static void
1966 stge_stats_update(struct stge_softc *sc)
1967 {
1968 	struct ifnet *ifp;
1969 
1970 	STGE_LOCK_ASSERT(sc);
1971 
1972 	ifp = sc->sc_ifp;
1973 
1974 	CSR_READ_4(sc,STGE_OctetRcvOk);
1975 
1976 	ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1977 
1978 	ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1979 
1980 	CSR_READ_4(sc, STGE_OctetXmtdOk);
1981 
1982 	ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1983 
1984 	ifp->if_collisions +=
1985 	    CSR_READ_4(sc, STGE_LateCollisions) +
1986 	    CSR_READ_4(sc, STGE_MultiColFrames) +
1987 	    CSR_READ_4(sc, STGE_SingleColFrames);
1988 
1989 	ifp->if_oerrors +=
1990 	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1991 	    CSR_READ_2(sc, STGE_FramesWEXDeferal);
1992 }
1993 
1994 /*
1995  * stge_reset:
1996  *
1997  *	Perform a soft reset on the TC9021.
1998  */
1999 static void
2000 stge_reset(struct stge_softc *sc, uint32_t how)
2001 {
2002 	uint32_t ac;
2003 	uint8_t v;
2004 	int i, dv;
2005 
2006 	STGE_LOCK_ASSERT(sc);
2007 
2008 	dv = 5000;
2009 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
2010 	switch (how) {
2011 	case STGE_RESET_TX:
2012 		ac |= AC_TxReset | AC_FIFO;
2013 		dv = 100;
2014 		break;
2015 	case STGE_RESET_RX:
2016 		ac |= AC_RxReset | AC_FIFO;
2017 		dv = 100;
2018 		break;
2019 	case STGE_RESET_FULL:
2020 	default:
2021 		/*
2022 		 * Only assert RstOut if we're fiber.  We need GMII clocks
2023 		 * to be present in order for the reset to complete on fiber
2024 		 * cards.
2025 		 */
2026 		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
2027 		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
2028 		    (sc->sc_usefiber ? AC_RstOut : 0);
2029 		break;
2030 	}
2031 
2032 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
2033 
2034 	/* Account for reset problem at 10Mbps. */
2035 	DELAY(dv);
2036 
2037 	for (i = 0; i < STGE_TIMEOUT; i++) {
2038 		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
2039 			break;
2040 		DELAY(dv);
2041 	}
2042 
2043 	if (i == STGE_TIMEOUT)
2044 		device_printf(sc->sc_dev, "reset failed to complete\n");
2045 
2046 	/* Set LED, from Linux IPG driver. */
2047 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
2048 	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
2049 	if ((sc->sc_led & 0x01) != 0)
2050 		ac |= AC_LEDMode;
2051 	if ((sc->sc_led & 0x03) != 0)
2052 		ac |= AC_LEDModeBit1;
2053 	if ((sc->sc_led & 0x08) != 0)
2054 		ac |= AC_LEDSpeed;
2055 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
2056 
2057 	/* Set PHY, from Linux IPG driver */
2058 	v = CSR_READ_1(sc, STGE_PhySet);
2059 	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
2060 	v |= ((sc->sc_led & 0x70) >> 4);
2061 	CSR_WRITE_1(sc, STGE_PhySet, v);
2062 }
2063 
2064 /*
2065  * stge_init:		[ ifnet interface function ]
2066  *
2067  *	Initialize the interface.
2068  */
2069 static void
2070 stge_init(void *xsc)
2071 {
2072 	struct stge_softc *sc;
2073 
2074 	sc = (struct stge_softc *)xsc;
2075 	STGE_LOCK(sc);
2076 	stge_init_locked(sc);
2077 	STGE_UNLOCK(sc);
2078 }
2079 
2080 static void
2081 stge_init_locked(struct stge_softc *sc)
2082 {
2083 	struct ifnet *ifp;
2084 	struct mii_data *mii;
2085 	uint16_t eaddr[3];
2086 	uint32_t v;
2087 	int error;
2088 
2089 	STGE_LOCK_ASSERT(sc);
2090 
2091 	ifp = sc->sc_ifp;
2092 	mii = device_get_softc(sc->sc_miibus);
2093 
2094 	/*
2095 	 * Cancel any pending I/O.
2096 	 */
2097 	stge_stop(sc);
2098 
2099 	/* Init descriptors. */
2100 	error = stge_init_rx_ring(sc);
2101         if (error != 0) {
2102                 device_printf(sc->sc_dev,
2103                     "initialization failed: no memory for rx buffers\n");
2104                 stge_stop(sc);
2105 		goto out;
2106         }
2107 	stge_init_tx_ring(sc);
2108 
2109 	/* Set the station address. */
2110 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2111 	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2112 	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2113 	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2114 
2115 	/*
2116 	 * Set the statistics masks.  Disable all the RMON stats,
2117 	 * and disable selected stats in the non-RMON stats registers.
2118 	 */
2119 	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2120 	CSR_WRITE_4(sc, STGE_StatisticsMask,
2121 	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2122 	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2123 	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2124 	    (1U << 21));
2125 
2126 	/* Set up the receive filter. */
2127 	stge_set_filter(sc);
2128 	/* Program multicast filter. */
2129 	stge_set_multi(sc);
2130 
2131 	/*
2132 	 * Give the transmit and receive ring to the chip.
2133 	 */
2134 	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2135 	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2136 	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2137 	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2138 
2139 	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2140 	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2141 	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2142 	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2143 
2144 	/*
2145 	 * Initialize the Tx auto-poll period.  It's OK to make this number
2146 	 * large (255 is the max, but we use 127) -- we explicitly kick the
2147 	 * transmit engine when there's actually a packet.
2148 	 */
2149 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2150 
2151 	/* ..and the Rx auto-poll period. */
2152 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2153 
2154 	/* Initialize the Tx start threshold. */
2155 	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2156 
2157 	/* Rx DMA thresholds, from Linux */
2158 	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2159 	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2160 
2161 	/* Rx early threhold, from Linux */
2162 	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2163 
2164 	/* Tx DMA thresholds, from Linux */
2165 	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2166 	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2167 
2168 	/*
2169 	 * Initialize the Rx DMA interrupt control register.  We
2170 	 * request an interrupt after every incoming packet, but
2171 	 * defer it for sc_rxint_dmawait us. When the number of
2172 	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2173 	 * deferring the interrupt, and signal it immediately.
2174 	 */
2175 	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2176 	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2177 	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2178 
2179 	/*
2180 	 * Initialize the interrupt mask.
2181 	 */
2182 	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2183 	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2184 #ifdef DEVICE_POLLING
2185 	/* Disable interrupts if we are polling. */
2186 	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2187 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
2188 	else
2189 #endif
2190 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2191 
2192 	/*
2193 	 * Configure the DMA engine.
2194 	 * XXX Should auto-tune TxBurstLimit.
2195 	 */
2196 	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2197 
2198 	/*
2199 	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2200 	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2201 	 * in the Rx FIFO.
2202 	 */
2203 	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2204 	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2205 
2206 	/*
2207 	 * Set the maximum frame size.
2208 	 */
2209 	sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2210 	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2211 
2212 	/*
2213 	 * Initialize MacCtrl -- do it before setting the media,
2214 	 * as setting the media will actually program the register.
2215 	 *
2216 	 * Note: We have to poke the IFS value before poking
2217 	 * anything else.
2218 	 */
2219 	/* Tx/Rx MAC should be disabled before programming IFS.*/
2220 	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2221 
2222 	stge_vlan_setup(sc);
2223 
2224 	if (sc->sc_rev >= 6) {		/* >= B.2 */
2225 		/* Multi-frag frame bug work-around. */
2226 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2227 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2228 
2229 		/* Tx Poll Now bug work-around. */
2230 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2231 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2232 		/* Tx Poll Now bug work-around. */
2233 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2234 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2235 	}
2236 
2237 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2238 	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2239 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2240 	/*
2241 	 * It seems that transmitting frames without checking the state of
2242 	 * Rx/Tx MAC wedge the hardware.
2243 	 */
2244 	stge_start_tx(sc);
2245 	stge_start_rx(sc);
2246 
2247 	sc->sc_link = 0;
2248 	/*
2249 	 * Set the current media.
2250 	 */
2251 	mii_mediachg(mii);
2252 
2253 	/*
2254 	 * Start the one second MII clock.
2255 	 */
2256 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2257 
2258 	/*
2259 	 * ...all done!
2260 	 */
2261 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2262 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2263 
2264  out:
2265 	if (error != 0)
2266 		device_printf(sc->sc_dev, "interface not running\n");
2267 }
2268 
2269 static void
2270 stge_vlan_setup(struct stge_softc *sc)
2271 {
2272 	struct ifnet *ifp;
2273 	uint32_t v;
2274 
2275 	ifp = sc->sc_ifp;
2276 	/*
2277 	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2278 	 * MC_AutoVLANuntagging bit.
2279 	 * MC_AutoVLANtagging bit selects which VLAN source to use
2280 	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2281 	 * bit has priority over MC_AutoVLANtagging bit. So we always
2282 	 * use TFC instead of STGE_VLANTag register.
2283 	 */
2284 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2285 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2286 		v |= MC_AutoVLANuntagging;
2287 	else
2288 		v &= ~MC_AutoVLANuntagging;
2289 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2290 }
2291 
2292 /*
2293  *	Stop transmission on the interface.
2294  */
2295 static void
2296 stge_stop(struct stge_softc *sc)
2297 {
2298 	struct ifnet *ifp;
2299 	struct stge_txdesc *txd;
2300 	struct stge_rxdesc *rxd;
2301 	uint32_t v;
2302 	int i;
2303 
2304 	STGE_LOCK_ASSERT(sc);
2305 	/*
2306 	 * Stop the one second clock.
2307 	 */
2308 	callout_stop(&sc->sc_tick_ch);
2309 	sc->sc_watchdog_timer = 0;
2310 
2311 	/*
2312 	 * Reset the chip to a known state.
2313 	 */
2314 	stge_reset(sc, STGE_RESET_FULL);
2315 
2316 	/*
2317 	 * Disable interrupts.
2318 	 */
2319 	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2320 
2321 	/*
2322 	 * Stop receiver, transmitter, and stats update.
2323 	 */
2324 	stge_stop_rx(sc);
2325 	stge_stop_tx(sc);
2326 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2327 	v |= MC_StatisticsDisable;
2328 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2329 
2330 	/*
2331 	 * Stop the transmit and receive DMA.
2332 	 */
2333 	stge_dma_wait(sc);
2334 	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2335 	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2336 	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2337 	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2338 
2339 	/*
2340 	 * Free RX and TX mbufs still in the queues.
2341 	 */
2342 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2343 		rxd = &sc->sc_cdata.stge_rxdesc[i];
2344 		if (rxd->rx_m != NULL) {
2345 			bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2346 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2347 			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2348 			    rxd->rx_dmamap);
2349 			m_freem(rxd->rx_m);
2350 			rxd->rx_m = NULL;
2351 		}
2352         }
2353 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2354 		txd = &sc->sc_cdata.stge_txdesc[i];
2355 		if (txd->tx_m != NULL) {
2356 			bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2357 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2358 			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2359 			    txd->tx_dmamap);
2360 			m_freem(txd->tx_m);
2361 			txd->tx_m = NULL;
2362 		}
2363         }
2364 
2365 	/*
2366 	 * Mark the interface down and cancel the watchdog timer.
2367 	 */
2368 	ifp = sc->sc_ifp;
2369 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2370 	sc->sc_link = 0;
2371 }
2372 
2373 static void
2374 stge_start_tx(struct stge_softc *sc)
2375 {
2376 	uint32_t v;
2377 	int i;
2378 
2379 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2380 	if ((v & MC_TxEnabled) != 0)
2381 		return;
2382 	v |= MC_TxEnable;
2383 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2384 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2385 	for (i = STGE_TIMEOUT; i > 0; i--) {
2386 		DELAY(10);
2387 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2388 		if ((v & MC_TxEnabled) != 0)
2389 			break;
2390 	}
2391 	if (i == 0)
2392 		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2393 }
2394 
2395 static void
2396 stge_start_rx(struct stge_softc *sc)
2397 {
2398 	uint32_t v;
2399 	int i;
2400 
2401 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2402 	if ((v & MC_RxEnabled) != 0)
2403 		return;
2404 	v |= MC_RxEnable;
2405 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2406 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2407 	for (i = STGE_TIMEOUT; i > 0; i--) {
2408 		DELAY(10);
2409 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2410 		if ((v & MC_RxEnabled) != 0)
2411 			break;
2412 	}
2413 	if (i == 0)
2414 		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2415 }
2416 
2417 static void
2418 stge_stop_tx(struct stge_softc *sc)
2419 {
2420 	uint32_t v;
2421 	int i;
2422 
2423 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2424 	if ((v & MC_TxEnabled) == 0)
2425 		return;
2426 	v |= MC_TxDisable;
2427 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2428 	for (i = STGE_TIMEOUT; i > 0; i--) {
2429 		DELAY(10);
2430 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2431 		if ((v & MC_TxEnabled) == 0)
2432 			break;
2433 	}
2434 	if (i == 0)
2435 		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2436 }
2437 
2438 static void
2439 stge_stop_rx(struct stge_softc *sc)
2440 {
2441 	uint32_t v;
2442 	int i;
2443 
2444 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2445 	if ((v & MC_RxEnabled) == 0)
2446 		return;
2447 	v |= MC_RxDisable;
2448 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2449 	for (i = STGE_TIMEOUT; i > 0; i--) {
2450 		DELAY(10);
2451 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2452 		if ((v & MC_RxEnabled) == 0)
2453 			break;
2454 	}
2455 	if (i == 0)
2456 		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2457 }
2458 
2459 static void
2460 stge_init_tx_ring(struct stge_softc *sc)
2461 {
2462 	struct stge_ring_data *rd;
2463 	struct stge_txdesc *txd;
2464 	bus_addr_t addr;
2465 	int i;
2466 
2467 	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2468 	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2469 
2470 	sc->sc_cdata.stge_tx_prod = 0;
2471 	sc->sc_cdata.stge_tx_cons = 0;
2472 	sc->sc_cdata.stge_tx_cnt = 0;
2473 
2474 	rd = &sc->sc_rdata;
2475 	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2476 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2477 		if (i == (STGE_TX_RING_CNT - 1))
2478 			addr = STGE_TX_RING_ADDR(sc, 0);
2479 		else
2480 			addr = STGE_TX_RING_ADDR(sc, i + 1);
2481 		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2482 		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2483 		txd = &sc->sc_cdata.stge_txdesc[i];
2484 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2485 	}
2486 
2487 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2488 	    sc->sc_cdata.stge_tx_ring_map,
2489 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2490 
2491 }
2492 
2493 static int
2494 stge_init_rx_ring(struct stge_softc *sc)
2495 {
2496 	struct stge_ring_data *rd;
2497 	bus_addr_t addr;
2498 	int i;
2499 
2500 	sc->sc_cdata.stge_rx_cons = 0;
2501 	STGE_RXCHAIN_RESET(sc);
2502 
2503 	rd = &sc->sc_rdata;
2504 	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2505 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2506 		if (stge_newbuf(sc, i) != 0)
2507 			return (ENOBUFS);
2508 		if (i == (STGE_RX_RING_CNT - 1))
2509 			addr = STGE_RX_RING_ADDR(sc, 0);
2510 		else
2511 			addr = STGE_RX_RING_ADDR(sc, i + 1);
2512 		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2513 		rd->stge_rx_ring[i].rfd_status = 0;
2514 	}
2515 
2516 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2517 	    sc->sc_cdata.stge_rx_ring_map,
2518 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2519 
2520 	return (0);
2521 }
2522 
2523 /*
2524  * stge_newbuf:
2525  *
2526  *	Add a receive buffer to the indicated descriptor.
2527  */
2528 static int
2529 stge_newbuf(struct stge_softc *sc, int idx)
2530 {
2531 	struct stge_rxdesc *rxd;
2532 	struct stge_rfd *rfd;
2533 	struct mbuf *m;
2534 	bus_dma_segment_t segs[1];
2535 	bus_dmamap_t map;
2536 	int nsegs;
2537 
2538 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2539 	if (m == NULL)
2540 		return (ENOBUFS);
2541 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2542 	/*
2543 	 * The hardware requires 4bytes aligned DMA address when JUMBO
2544 	 * frame is used.
2545 	 */
2546 	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2547 		m_adj(m, ETHER_ALIGN);
2548 
2549 	if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
2550 	    sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2551 		m_freem(m);
2552 		return (ENOBUFS);
2553 	}
2554 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2555 
2556 	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2557 	if (rxd->rx_m != NULL) {
2558 		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2559 		    BUS_DMASYNC_POSTREAD);
2560 		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2561 	}
2562 	map = rxd->rx_dmamap;
2563 	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2564 	sc->sc_cdata.stge_rx_sparemap = map;
2565 	bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2566 	    BUS_DMASYNC_PREREAD);
2567 	rxd->rx_m = m;
2568 
2569 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2570 	rfd->rfd_frag.frag_word0 =
2571 	    htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2572 	rfd->rfd_status = 0;
2573 
2574 	return (0);
2575 }
2576 
2577 /*
2578  * stge_set_filter:
2579  *
2580  *	Set up the receive filter.
2581  */
2582 static void
2583 stge_set_filter(struct stge_softc *sc)
2584 {
2585 	struct ifnet *ifp;
2586 	uint16_t mode;
2587 
2588 	STGE_LOCK_ASSERT(sc);
2589 
2590 	ifp = sc->sc_ifp;
2591 
2592 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2593 	mode |= RM_ReceiveUnicast;
2594 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2595 		mode |= RM_ReceiveBroadcast;
2596 	else
2597 		mode &= ~RM_ReceiveBroadcast;
2598 	if ((ifp->if_flags & IFF_PROMISC) != 0)
2599 		mode |= RM_ReceiveAllFrames;
2600 	else
2601 		mode &= ~RM_ReceiveAllFrames;
2602 
2603 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2604 }
2605 
2606 static void
2607 stge_set_multi(struct stge_softc *sc)
2608 {
2609 	struct ifnet *ifp;
2610 	struct ifmultiaddr *ifma;
2611 	uint32_t crc;
2612 	uint32_t mchash[2];
2613 	uint16_t mode;
2614 	int count;
2615 
2616 	STGE_LOCK_ASSERT(sc);
2617 
2618 	ifp = sc->sc_ifp;
2619 
2620 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2621 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2622 		if ((ifp->if_flags & IFF_PROMISC) != 0)
2623 			mode |= RM_ReceiveAllFrames;
2624 		else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2625 			mode |= RM_ReceiveMulticast;
2626 		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2627 		return;
2628 	}
2629 
2630 	/* clear existing filters. */
2631 	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2632 	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2633 
2634 	/*
2635 	 * Set up the multicast address filter by passing all multicast
2636 	 * addresses through a CRC generator, and then using the low-order
2637 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2638 	 * high order bits select the register, while the rest of the bits
2639 	 * select the bit within the register.
2640 	 */
2641 
2642 	bzero(mchash, sizeof(mchash));
2643 
2644 	count = 0;
2645 	IF_ADDR_LOCK(sc->sc_ifp);
2646 	TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) {
2647 		if (ifma->ifma_addr->sa_family != AF_LINK)
2648 			continue;
2649 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2650 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2651 
2652 		/* Just want the 6 least significant bits. */
2653 		crc &= 0x3f;
2654 
2655 		/* Set the corresponding bit in the hash table. */
2656 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2657 		count++;
2658 	}
2659 	IF_ADDR_UNLOCK(ifp);
2660 
2661 	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2662 	if (count > 0)
2663 		mode |= RM_ReceiveMulticastHash;
2664 	else
2665 		mode &= ~RM_ReceiveMulticastHash;
2666 
2667 	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2668 	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2669 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2670 }
2671 
2672 static int
2673 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2674 {
2675 	int error, value;
2676 
2677 	if (!arg1)
2678 		return (EINVAL);
2679 	value = *(int *)arg1;
2680 	error = sysctl_handle_int(oidp, &value, 0, req);
2681 	if (error || !req->newptr)
2682 		return (error);
2683 	if (value < low || value > high)
2684 		return (EINVAL);
2685         *(int *)arg1 = value;
2686 
2687         return (0);
2688 }
2689 
2690 static int
2691 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2692 {
2693 	return (sysctl_int_range(oidp, arg1, arg2, req,
2694 	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2695 }
2696 
2697 static int
2698 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2699 {
2700 	return (sysctl_int_range(oidp, arg1, arg2, req,
2701 	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2702 }
2703