xref: /freebsd/sys/dev/stge/if_stge.c (revision 2b743a9e9ddc6736208dc8ca1ce06ce64ad20a19)
1 /*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Device driver for the Sundance Tech. TC9021 10/100/1000
41  * Ethernet controller.
42  */
43 
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46 
47 #ifdef HAVE_KERNEL_OPTION_HEADERS
48 #include "opt_device_polling.h"
49 #endif
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/endian.h>
54 #include <sys/mbuf.h>
55 #include <sys/malloc.h>
56 #include <sys/kernel.h>
57 #include <sys/module.h>
58 #include <sys/socket.h>
59 #include <sys/sockio.h>
60 #include <sys/sysctl.h>
61 #include <sys/taskqueue.h>
62 
63 #include <net/bpf.h>
64 #include <net/ethernet.h>
65 #include <net/if.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/if_vlan_var.h>
70 
71 #include <machine/bus.h>
72 #include <machine/resource.h>
73 #include <sys/bus.h>
74 #include <sys/rman.h>
75 
76 #include <dev/mii/mii.h>
77 #include <dev/mii/miivar.h>
78 
79 #include <dev/pci/pcireg.h>
80 #include <dev/pci/pcivar.h>
81 
82 #include <dev/stge/if_stgereg.h>
83 
84 #define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
85 
86 MODULE_DEPEND(stge, pci, 1, 1, 1);
87 MODULE_DEPEND(stge, ether, 1, 1, 1);
88 MODULE_DEPEND(stge, miibus, 1, 1, 1);
89 
90 /* "device miibus" required.  See GENERIC if you get errors here. */
91 #include "miibus_if.h"
92 
93 /*
94  * Devices supported by this driver.
95  */
96 static struct stge_product {
97 	uint16_t	stge_vendorid;
98 	uint16_t	stge_deviceid;
99 	const char	*stge_name;
100 } stge_products[] = {
101 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
102 	  "Sundance ST-1023 Gigabit Ethernet" },
103 
104 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
105 	  "Sundance ST-2021 Gigabit Ethernet" },
106 
107 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
108 	  "Tamarack TC9021 Gigabit Ethernet" },
109 
110 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
111 	  "Tamarack TC9021 Gigabit Ethernet" },
112 
113 	/*
114 	 * The Sundance sample boards use the Sundance vendor ID,
115 	 * but the Tamarack product ID.
116 	 */
117 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
118 	  "Sundance TC9021 Gigabit Ethernet" },
119 
120 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
121 	  "Sundance TC9021 Gigabit Ethernet" },
122 
123 	{ VENDOR_DLINK,		DEVICEID_DLINK_DL4000,
124 	  "D-Link DL-4000 Gigabit Ethernet" },
125 
126 	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
127 	  "Antares Gigabit Ethernet" }
128 };
129 
130 static int	stge_probe(device_t);
131 static int	stge_attach(device_t);
132 static int	stge_detach(device_t);
133 static void	stge_shutdown(device_t);
134 static int	stge_suspend(device_t);
135 static int	stge_resume(device_t);
136 
137 static int	stge_encap(struct stge_softc *, struct mbuf **);
138 static void	stge_start(struct ifnet *);
139 static void	stge_start_locked(struct ifnet *);
140 static void	stge_watchdog(struct ifnet *);
141 static int	stge_ioctl(struct ifnet *, u_long, caddr_t);
142 static void	stge_init(void *);
143 static void	stge_init_locked(struct stge_softc *);
144 static void	stge_vlan_setup(struct stge_softc *);
145 static void	stge_stop(struct stge_softc *);
146 static void	stge_start_tx(struct stge_softc *);
147 static void	stge_start_rx(struct stge_softc *);
148 static void	stge_stop_tx(struct stge_softc *);
149 static void	stge_stop_rx(struct stge_softc *);
150 
151 static void	stge_reset(struct stge_softc *, uint32_t);
152 static int	stge_eeprom_wait(struct stge_softc *);
153 static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
154 static void	stge_tick(void *);
155 static void	stge_stats_update(struct stge_softc *);
156 static void	stge_set_filter(struct stge_softc *);
157 static void	stge_set_multi(struct stge_softc *);
158 
159 static void	stge_link_task(void *, int);
160 static void	stge_intr(void *);
161 static __inline int stge_tx_error(struct stge_softc *);
162 static void	stge_txeof(struct stge_softc *);
163 static void	stge_rxeof(struct stge_softc *);
164 static __inline void stge_discard_rxbuf(struct stge_softc *, int);
165 static int	stge_newbuf(struct stge_softc *, int);
166 #ifndef __NO_STRICT_ALIGNMENT
167 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
168 #endif
169 
170 static void	stge_mii_sync(struct stge_softc *);
171 static void	stge_mii_send(struct stge_softc *, uint32_t, int);
172 static int	stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
173 static int	stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
174 static int	stge_miibus_readreg(device_t, int, int);
175 static int	stge_miibus_writereg(device_t, int, int, int);
176 static void	stge_miibus_statchg(device_t);
177 static int	stge_mediachange(struct ifnet *);
178 static void	stge_mediastatus(struct ifnet *, struct ifmediareq *);
179 
180 static void	stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
181 static int	stge_dma_alloc(struct stge_softc *);
182 static void	stge_dma_free(struct stge_softc *);
183 static void	stge_dma_wait(struct stge_softc *);
184 static void	stge_init_tx_ring(struct stge_softc *);
185 static int	stge_init_rx_ring(struct stge_softc *);
186 #ifdef DEVICE_POLLING
187 static void	stge_poll(struct ifnet *, enum poll_cmd, int);
188 #endif
189 
190 static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
191 static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
192 static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
193 
194 static device_method_t stge_methods[] = {
195 	/* Device interface */
196 	DEVMETHOD(device_probe,		stge_probe),
197 	DEVMETHOD(device_attach,	stge_attach),
198 	DEVMETHOD(device_detach,	stge_detach),
199 	DEVMETHOD(device_shutdown,	stge_shutdown),
200 	DEVMETHOD(device_suspend,	stge_suspend),
201 	DEVMETHOD(device_resume,	stge_resume),
202 
203 	/* MII interface */
204 	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
205 	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
206 	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
207 
208 	{ 0, 0 }
209 
210 };
211 
212 static driver_t stge_driver = {
213 	"stge",
214 	stge_methods,
215 	sizeof(struct stge_softc)
216 };
217 
218 static devclass_t stge_devclass;
219 
220 DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0);
221 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
222 
223 static struct resource_spec stge_res_spec_io[] = {
224 	{ SYS_RES_IOPORT,	PCIR_BAR(0),	RF_ACTIVE },
225 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
226 	{ -1,			0,		0 }
227 };
228 
229 static struct resource_spec stge_res_spec_mem[] = {
230 	{ SYS_RES_MEMORY,	PCIR_BAR(1),	RF_ACTIVE },
231 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
232 	{ -1,			0,		0 }
233 };
234 
235 #define	MII_SET(x)	\
236 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
237 #define	MII_CLR(x)	\
238 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
239 
240 /*
241  * Sync the PHYs by setting data bit and strobing the clock 32 times.
242  */
243 static void
244 stge_mii_sync(struct stge_softc	*sc)
245 {
246 	int i;
247 
248 	MII_SET(PC_MgmtDir | PC_MgmtData);
249 
250 	for (i = 0; i < 32; i++) {
251 		MII_SET(PC_MgmtClk);
252 		DELAY(1);
253 		MII_CLR(PC_MgmtClk);
254 		DELAY(1);
255 	}
256 }
257 
258 /*
259  * Clock a series of bits through the MII.
260  */
261 static void
262 stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
263 {
264 	int i;
265 
266 	MII_CLR(PC_MgmtClk);
267 
268 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
269 		if (bits & i)
270 			MII_SET(PC_MgmtData);
271                 else
272 			MII_CLR(PC_MgmtData);
273 		DELAY(1);
274 		MII_CLR(PC_MgmtClk);
275 		DELAY(1);
276 		MII_SET(PC_MgmtClk);
277 	}
278 }
279 
280 /*
281  * Read an PHY register through the MII.
282  */
283 static int
284 stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
285 {
286 	int i, ack;
287 
288 	/*
289 	 * Set up frame for RX.
290 	 */
291 	frame->mii_stdelim = STGE_MII_STARTDELIM;
292 	frame->mii_opcode = STGE_MII_READOP;
293 	frame->mii_turnaround = 0;
294 	frame->mii_data = 0;
295 
296 	CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
297 	/*
298  	 * Turn on data xmit.
299 	 */
300 	MII_SET(PC_MgmtDir);
301 
302 	stge_mii_sync(sc);
303 
304 	/*
305 	 * Send command/address info.
306 	 */
307 	stge_mii_send(sc, frame->mii_stdelim, 2);
308 	stge_mii_send(sc, frame->mii_opcode, 2);
309 	stge_mii_send(sc, frame->mii_phyaddr, 5);
310 	stge_mii_send(sc, frame->mii_regaddr, 5);
311 
312 	/* Turn off xmit. */
313 	MII_CLR(PC_MgmtDir);
314 
315 	/* Idle bit */
316 	MII_CLR((PC_MgmtClk | PC_MgmtData));
317 	DELAY(1);
318 	MII_SET(PC_MgmtClk);
319 	DELAY(1);
320 
321 	/* Check for ack */
322 	MII_CLR(PC_MgmtClk);
323 	DELAY(1);
324 	ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
325 	MII_SET(PC_MgmtClk);
326 	DELAY(1);
327 
328 	/*
329 	 * Now try reading data bits. If the ack failed, we still
330 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
331 	 */
332 	if (ack) {
333 		for(i = 0; i < 16; i++) {
334 			MII_CLR(PC_MgmtClk);
335 			DELAY(1);
336 			MII_SET(PC_MgmtClk);
337 			DELAY(1);
338 		}
339 		goto fail;
340 	}
341 
342 	for (i = 0x8000; i; i >>= 1) {
343 		MII_CLR(PC_MgmtClk);
344 		DELAY(1);
345 		if (!ack) {
346 			if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
347 				frame->mii_data |= i;
348 			DELAY(1);
349 		}
350 		MII_SET(PC_MgmtClk);
351 		DELAY(1);
352 	}
353 
354 fail:
355 	MII_CLR(PC_MgmtClk);
356 	DELAY(1);
357 	MII_SET(PC_MgmtClk);
358 	DELAY(1);
359 
360 	if (ack)
361 		return(1);
362 	return(0);
363 }
364 
365 /*
366  * Write to a PHY register through the MII.
367  */
368 static int
369 stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
370 {
371 
372 	/*
373 	 * Set up frame for TX.
374 	 */
375 	frame->mii_stdelim = STGE_MII_STARTDELIM;
376 	frame->mii_opcode = STGE_MII_WRITEOP;
377 	frame->mii_turnaround = STGE_MII_TURNAROUND;
378 
379 	/*
380  	 * Turn on data output.
381 	 */
382 	MII_SET(PC_MgmtDir);
383 
384 	stge_mii_sync(sc);
385 
386 	stge_mii_send(sc, frame->mii_stdelim, 2);
387 	stge_mii_send(sc, frame->mii_opcode, 2);
388 	stge_mii_send(sc, frame->mii_phyaddr, 5);
389 	stge_mii_send(sc, frame->mii_regaddr, 5);
390 	stge_mii_send(sc, frame->mii_turnaround, 2);
391 	stge_mii_send(sc, frame->mii_data, 16);
392 
393 	/* Idle bit. */
394 	MII_SET(PC_MgmtClk);
395 	DELAY(1);
396 	MII_CLR(PC_MgmtClk);
397 	DELAY(1);
398 
399 	/*
400 	 * Turn off xmit.
401 	 */
402 	MII_CLR(PC_MgmtDir);
403 
404 	return(0);
405 }
406 
407 /*
408  * sc_miibus_readreg:	[mii interface function]
409  *
410  *	Read a PHY register on the MII of the TC9021.
411  */
412 static int
413 stge_miibus_readreg(device_t dev, int phy, int reg)
414 {
415 	struct stge_softc *sc;
416 	struct stge_mii_frame frame;
417 	int error;
418 
419 	sc = device_get_softc(dev);
420 
421 	if (reg == STGE_PhyCtrl) {
422 		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
423 		STGE_MII_LOCK(sc);
424 		error = CSR_READ_1(sc, STGE_PhyCtrl);
425 		STGE_MII_UNLOCK(sc);
426 		return (error);
427 	}
428 	bzero(&frame, sizeof(frame));
429 	frame.mii_phyaddr = phy;
430 	frame.mii_regaddr = reg;
431 
432 	STGE_MII_LOCK(sc);
433 	error = stge_mii_readreg(sc, &frame);
434 	STGE_MII_UNLOCK(sc);
435 
436 	if (error != 0) {
437 		/* Don't show errors for PHY probe request */
438 		if (reg != 1)
439 			device_printf(sc->sc_dev, "phy read fail\n");
440 		return (0);
441 	}
442 	return (frame.mii_data);
443 }
444 
445 /*
446  * stge_miibus_writereg:	[mii interface function]
447  *
448  *	Write a PHY register on the MII of the TC9021.
449  */
450 static int
451 stge_miibus_writereg(device_t dev, int phy, int reg, int val)
452 {
453 	struct stge_softc *sc;
454 	struct stge_mii_frame frame;
455 	int error;
456 
457 	sc = device_get_softc(dev);
458 
459 	bzero(&frame, sizeof(frame));
460 	frame.mii_phyaddr = phy;
461 	frame.mii_regaddr = reg;
462 	frame.mii_data = val;
463 
464 	STGE_MII_LOCK(sc);
465 	error = stge_mii_writereg(sc, &frame);
466 	STGE_MII_UNLOCK(sc);
467 
468 	if (error != 0)
469 		device_printf(sc->sc_dev, "phy write fail\n");
470 	return (0);
471 }
472 
473 /*
474  * stge_miibus_statchg:	[mii interface function]
475  *
476  *	Callback from MII layer when media changes.
477  */
478 static void
479 stge_miibus_statchg(device_t dev)
480 {
481 	struct stge_softc *sc;
482 	struct mii_data *mii;
483 
484 	sc = device_get_softc(dev);
485 	mii = device_get_softc(sc->sc_miibus);
486 
487 	STGE_MII_LOCK(sc);
488 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE) {
489 		STGE_MII_UNLOCK(sc);
490 		return;
491 	}
492 
493 	sc->sc_MACCtrl = 0;
494 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
495 		sc->sc_MACCtrl |= MC_DuplexSelect;
496 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
497 		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
498 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
499 		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
500 	/*
501 	 * We can't access STGE_MACCtrl register in this context due to
502 	 * the races between MII layer and driver which accesses this
503 	 * register to program MAC. In order to solve the race, we defer
504 	 * STGE_MACCtrl programming until we know we are out of MII.
505 	 */
506 	taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
507 	STGE_MII_UNLOCK(sc);
508 }
509 
510 /*
511  * stge_mediastatus:	[ifmedia interface function]
512  *
513  *	Get the current interface media status.
514  */
515 static void
516 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
517 {
518 	struct stge_softc *sc;
519 	struct mii_data *mii;
520 
521 	sc = ifp->if_softc;
522 	mii = device_get_softc(sc->sc_miibus);
523 
524 	mii_pollstat(mii);
525 	ifmr->ifm_status = mii->mii_media_status;
526 	ifmr->ifm_active = mii->mii_media_active;
527 }
528 
529 /*
530  * stge_mediachange:	[ifmedia interface function]
531  *
532  *	Set hardware to newly-selected media.
533  */
534 static int
535 stge_mediachange(struct ifnet *ifp)
536 {
537 	struct stge_softc *sc;
538 	struct mii_data *mii;
539 
540 	sc = ifp->if_softc;
541 	mii = device_get_softc(sc->sc_miibus);
542 	mii_mediachg(mii);
543 
544 	return (0);
545 }
546 
547 static int
548 stge_eeprom_wait(struct stge_softc *sc)
549 {
550 	int i;
551 
552 	for (i = 0; i < STGE_TIMEOUT; i++) {
553 		DELAY(1000);
554 		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
555 			return (0);
556 	}
557 	return (1);
558 }
559 
560 /*
561  * stge_read_eeprom:
562  *
563  *	Read data from the serial EEPROM.
564  */
565 static void
566 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
567 {
568 
569 	if (stge_eeprom_wait(sc))
570 		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
571 
572 	CSR_WRITE_2(sc, STGE_EepromCtrl,
573 	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
574 	if (stge_eeprom_wait(sc))
575 		device_printf(sc->sc_dev, "EEPROM read timed out\n");
576 	*data = CSR_READ_2(sc, STGE_EepromData);
577 }
578 
579 
580 static int
581 stge_probe(device_t dev)
582 {
583 	struct stge_product *sp;
584 	int i;
585 	uint16_t vendor, devid;
586 
587 	vendor = pci_get_vendor(dev);
588 	devid = pci_get_device(dev);
589 	sp = stge_products;
590 	for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]);
591 	    i++, sp++) {
592 		if (vendor == sp->stge_vendorid &&
593 		    devid == sp->stge_deviceid) {
594 			device_set_desc(dev, sp->stge_name);
595 			return (BUS_PROBE_DEFAULT);
596 		}
597 	}
598 
599 	return (ENXIO);
600 }
601 
602 static int
603 stge_attach(device_t dev)
604 {
605 	struct stge_softc *sc;
606 	struct ifnet *ifp;
607 	uint8_t enaddr[ETHER_ADDR_LEN];
608 	int error, i;
609 	uint16_t cmd;
610 	uint32_t val;
611 
612 	error = 0;
613 	sc = device_get_softc(dev);
614 	sc->sc_dev = dev;
615 
616 	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
617 	    MTX_DEF);
618 	mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
619 	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
620 	TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
621 
622 	/*
623 	 * Map the device.
624 	 */
625 	pci_enable_busmaster(dev);
626 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
627 	val = pci_read_config(dev, PCIR_BAR(1), 4);
628 	if ((val & 0x01) != 0)
629 		sc->sc_spec = stge_res_spec_mem;
630 	else {
631 		val = pci_read_config(dev, PCIR_BAR(0), 4);
632 		if ((val & 0x01) == 0) {
633 			device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
634 			error = ENXIO;
635 			goto fail;
636 		}
637 		sc->sc_spec = stge_res_spec_io;
638 	}
639 	error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
640 	if (error != 0) {
641 		device_printf(dev, "couldn't allocate %s resources\n",
642 		    sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
643 		goto fail;
644 	}
645 	sc->sc_rev = pci_get_revid(dev);
646 
647 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
648 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
649 	    "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
650 	    sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
651 
652 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
653 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
654 	    "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
655 	    sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
656 
657 	/* Pull in device tunables. */
658 	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
659 	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
660 	    "rxint_nframe", &sc->sc_rxint_nframe);
661 	if (error == 0) {
662 		if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
663 		    sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
664 			device_printf(dev, "rxint_nframe value out of range; "
665 			    "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
666 			sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
667 		}
668 	}
669 
670 	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
671 	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
672 	    "rxint_dmawait", &sc->sc_rxint_dmawait);
673 	if (error == 0) {
674 		if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
675 		    sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
676 			device_printf(dev, "rxint_dmawait value out of range; "
677 			    "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
678 			sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
679 		}
680 	}
681 
682 	if ((error = stge_dma_alloc(sc) != 0))
683 		goto fail;
684 
685 	/*
686 	 * Determine if we're copper or fiber.  It affects how we
687 	 * reset the card.
688 	 */
689 	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
690 		sc->sc_usefiber = 1;
691 	else
692 		sc->sc_usefiber = 0;
693 
694 	/* Load LED configuration from EEPROM. */
695 	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
696 
697 	/*
698 	 * Reset the chip to a known state.
699 	 */
700 	STGE_LOCK(sc);
701 	stge_reset(sc, STGE_RESET_FULL);
702 	STGE_UNLOCK(sc);
703 
704 	/*
705 	 * Reading the station address from the EEPROM doesn't seem
706 	 * to work, at least on my sample boards.  Instead, since
707 	 * the reset sequence does AutoInit, read it from the station
708 	 * address registers. For Sundance 1023 you can only read it
709 	 * from EEPROM.
710 	 */
711 	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
712 		uint16_t v;
713 
714 		v = CSR_READ_2(sc, STGE_StationAddress0);
715 		enaddr[0] = v & 0xff;
716 		enaddr[1] = v >> 8;
717 		v = CSR_READ_2(sc, STGE_StationAddress1);
718 		enaddr[2] = v & 0xff;
719 		enaddr[3] = v >> 8;
720 		v = CSR_READ_2(sc, STGE_StationAddress2);
721 		enaddr[4] = v & 0xff;
722 		enaddr[5] = v >> 8;
723 		sc->sc_stge1023 = 0;
724 	} else {
725 		uint16_t myaddr[ETHER_ADDR_LEN / 2];
726 		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
727 			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
728 			    &myaddr[i]);
729 			myaddr[i] = le16toh(myaddr[i]);
730 		}
731 		bcopy(myaddr, enaddr, sizeof(enaddr));
732 		sc->sc_stge1023 = 1;
733 	}
734 
735 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
736 	if (ifp == NULL) {
737 		device_printf(sc->sc_dev, "failed to if_alloc()\n");
738 		error = ENXIO;
739 		goto fail;
740 	}
741 
742 	ifp->if_softc = sc;
743 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
744 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
745 	ifp->if_ioctl = stge_ioctl;
746 	ifp->if_start = stge_start;
747 	ifp->if_watchdog = stge_watchdog;
748 	ifp->if_init = stge_init;
749 	ifp->if_mtu = ETHERMTU;
750 	ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1;
751 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
752 	IFQ_SET_READY(&ifp->if_snd);
753 	/* Revision B3 and earlier chips have checksum bug. */
754 	if (sc->sc_rev >= 0x0c) {
755 		ifp->if_hwassist = STGE_CSUM_FEATURES;
756 		ifp->if_capabilities = IFCAP_HWCSUM;
757 	} else {
758 		ifp->if_hwassist = 0;
759 		ifp->if_capabilities = 0;
760 	}
761 	ifp->if_capenable = ifp->if_capabilities;
762 
763 	/*
764 	 * Read some important bits from the PhyCtrl register.
765 	 */
766 	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
767 	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
768 
769 	/* Set up MII bus. */
770 	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange,
771 	    stge_mediastatus)) != 0) {
772 		device_printf(sc->sc_dev, "no PHY found!\n");
773 		goto fail;
774 	}
775 
776 	ether_ifattach(ifp, enaddr);
777 
778 	/* VLAN capability setup */
779 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
780 	if (sc->sc_rev >= 0x0c)
781 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
782 	ifp->if_capenable = ifp->if_capabilities;
783 #ifdef DEVICE_POLLING
784 	ifp->if_capabilities |= IFCAP_POLLING;
785 #endif
786 	/*
787 	 * Tell the upper layer(s) we support long frames.
788 	 * Must appear after the call to ether_ifattach() because
789 	 * ether_ifattach() sets ifi_hdrlen to the default value.
790 	 */
791 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
792 
793 	/*
794 	 * The manual recommends disabling early transmit, so we
795 	 * do.  It's disabled anyway, if using IP checksumming,
796 	 * since the entire packet must be in the FIFO in order
797 	 * for the chip to perform the checksum.
798 	 */
799 	sc->sc_txthresh = 0x0fff;
800 
801 	/*
802 	 * Disable MWI if the PCI layer tells us to.
803 	 */
804 	sc->sc_DMACtrl = 0;
805 	if ((cmd & PCIM_CMD_MWRICEN) == 0)
806 		sc->sc_DMACtrl |= DMAC_MWIDisable;
807 
808 	/*
809 	 * Hookup IRQ
810 	 */
811 	error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
812 	    NULL, stge_intr, sc, &sc->sc_ih);
813 	if (error != 0) {
814 		ether_ifdetach(ifp);
815 		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
816 		sc->sc_ifp = NULL;
817 		goto fail;
818 	}
819 
820 fail:
821 	if (error != 0)
822 		stge_detach(dev);
823 
824 	return (error);
825 }
826 
827 static int
828 stge_detach(device_t dev)
829 {
830 	struct stge_softc *sc;
831 	struct ifnet *ifp;
832 
833 	sc = device_get_softc(dev);
834 
835 	ifp = sc->sc_ifp;
836 #ifdef DEVICE_POLLING
837 	if (ifp && ifp->if_capenable & IFCAP_POLLING)
838 		ether_poll_deregister(ifp);
839 #endif
840 	if (device_is_attached(dev)) {
841 		STGE_LOCK(sc);
842 		/* XXX */
843 		sc->sc_detach = 1;
844 		stge_stop(sc);
845 		STGE_UNLOCK(sc);
846 		callout_drain(&sc->sc_tick_ch);
847 		taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
848 		ether_ifdetach(ifp);
849 	}
850 
851 	if (sc->sc_miibus != NULL) {
852 		device_delete_child(dev, sc->sc_miibus);
853 		sc->sc_miibus = NULL;
854 	}
855 	bus_generic_detach(dev);
856 	stge_dma_free(sc);
857 
858 	if (ifp != NULL) {
859 		if_free(ifp);
860 		sc->sc_ifp = NULL;
861 	}
862 
863 	if (sc->sc_ih) {
864 		bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
865 		sc->sc_ih = NULL;
866 	}
867 	bus_release_resources(dev, sc->sc_spec, sc->sc_res);
868 
869 	mtx_destroy(&sc->sc_mii_mtx);
870 	mtx_destroy(&sc->sc_mtx);
871 
872 	return (0);
873 }
874 
875 struct stge_dmamap_arg {
876 	bus_addr_t	stge_busaddr;
877 };
878 
879 static void
880 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
881 {
882 	struct stge_dmamap_arg *ctx;
883 
884 	if (error != 0)
885 		return;
886 
887 	ctx = (struct stge_dmamap_arg *)arg;
888 	ctx->stge_busaddr = segs[0].ds_addr;
889 }
890 
891 static int
892 stge_dma_alloc(struct stge_softc *sc)
893 {
894 	struct stge_dmamap_arg ctx;
895 	struct stge_txdesc *txd;
896 	struct stge_rxdesc *rxd;
897 	int error, i;
898 
899 	/* create parent tag. */
900 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */
901 		    1, 0,			/* algnmnt, boundary */
902 		    STGE_DMA_MAXADDR,		/* lowaddr */
903 		    BUS_SPACE_MAXADDR,		/* highaddr */
904 		    NULL, NULL,			/* filter, filterarg */
905 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
906 		    0,				/* nsegments */
907 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
908 		    0,				/* flags */
909 		    NULL, NULL,			/* lockfunc, lockarg */
910 		    &sc->sc_cdata.stge_parent_tag);
911 	if (error != 0) {
912 		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
913 		goto fail;
914 	}
915 	/* create tag for Tx ring. */
916 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
917 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
918 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
919 		    BUS_SPACE_MAXADDR,		/* highaddr */
920 		    NULL, NULL,			/* filter, filterarg */
921 		    STGE_TX_RING_SZ,		/* maxsize */
922 		    1,				/* nsegments */
923 		    STGE_TX_RING_SZ,		/* maxsegsize */
924 		    0,				/* flags */
925 		    NULL, NULL,			/* lockfunc, lockarg */
926 		    &sc->sc_cdata.stge_tx_ring_tag);
927 	if (error != 0) {
928 		device_printf(sc->sc_dev,
929 		    "failed to allocate Tx ring DMA tag\n");
930 		goto fail;
931 	}
932 
933 	/* create tag for Rx ring. */
934 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
935 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
936 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
937 		    BUS_SPACE_MAXADDR,		/* highaddr */
938 		    NULL, NULL,			/* filter, filterarg */
939 		    STGE_RX_RING_SZ,		/* maxsize */
940 		    1,				/* nsegments */
941 		    STGE_RX_RING_SZ,		/* maxsegsize */
942 		    0,				/* flags */
943 		    NULL, NULL,			/* lockfunc, lockarg */
944 		    &sc->sc_cdata.stge_rx_ring_tag);
945 	if (error != 0) {
946 		device_printf(sc->sc_dev,
947 		    "failed to allocate Rx ring DMA tag\n");
948 		goto fail;
949 	}
950 
951 	/* create tag for Tx buffers. */
952 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
953 		    1, 0,			/* algnmnt, boundary */
954 		    BUS_SPACE_MAXADDR,		/* lowaddr */
955 		    BUS_SPACE_MAXADDR,		/* highaddr */
956 		    NULL, NULL,			/* filter, filterarg */
957 		    MCLBYTES * STGE_MAXTXSEGS,	/* maxsize */
958 		    STGE_MAXTXSEGS,		/* nsegments */
959 		    MCLBYTES,			/* maxsegsize */
960 		    0,				/* flags */
961 		    NULL, NULL,			/* lockfunc, lockarg */
962 		    &sc->sc_cdata.stge_tx_tag);
963 	if (error != 0) {
964 		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
965 		goto fail;
966 	}
967 
968 	/* create tag for Rx buffers. */
969 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
970 		    1, 0,			/* algnmnt, boundary */
971 		    BUS_SPACE_MAXADDR,		/* lowaddr */
972 		    BUS_SPACE_MAXADDR,		/* highaddr */
973 		    NULL, NULL,			/* filter, filterarg */
974 		    MCLBYTES,			/* maxsize */
975 		    1,				/* nsegments */
976 		    MCLBYTES,			/* maxsegsize */
977 		    0,				/* flags */
978 		    NULL, NULL,			/* lockfunc, lockarg */
979 		    &sc->sc_cdata.stge_rx_tag);
980 	if (error != 0) {
981 		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
982 		goto fail;
983 	}
984 
985 	/* allocate DMA'able memory and load the DMA map for Tx ring. */
986 	error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
987 	    (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
988 	    &sc->sc_cdata.stge_tx_ring_map);
989 	if (error != 0) {
990 		device_printf(sc->sc_dev,
991 		    "failed to allocate DMA'able memory for Tx ring\n");
992 		goto fail;
993 	}
994 
995 	ctx.stge_busaddr = 0;
996 	error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
997 	    sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
998 	    STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
999 	if (error != 0 || ctx.stge_busaddr == 0) {
1000 		device_printf(sc->sc_dev,
1001 		    "failed to load DMA'able memory for Tx ring\n");
1002 		goto fail;
1003 	}
1004 	sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
1005 
1006 	/* allocate DMA'able memory and load the DMA map for Rx ring. */
1007 	error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
1008 	    (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1009 	    &sc->sc_cdata.stge_rx_ring_map);
1010 	if (error != 0) {
1011 		device_printf(sc->sc_dev,
1012 		    "failed to allocate DMA'able memory for Rx ring\n");
1013 		goto fail;
1014 	}
1015 
1016 	ctx.stge_busaddr = 0;
1017 	error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
1018 	    sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
1019 	    STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1020 	if (error != 0 || ctx.stge_busaddr == 0) {
1021 		device_printf(sc->sc_dev,
1022 		    "failed to load DMA'able memory for Rx ring\n");
1023 		goto fail;
1024 	}
1025 	sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
1026 
1027 	/* create DMA maps for Tx buffers. */
1028 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
1029 		txd = &sc->sc_cdata.stge_txdesc[i];
1030 		txd->tx_m = NULL;
1031 		txd->tx_dmamap = 0;
1032 		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
1033 		    &txd->tx_dmamap);
1034 		if (error != 0) {
1035 			device_printf(sc->sc_dev,
1036 			    "failed to create Tx dmamap\n");
1037 			goto fail;
1038 		}
1039 	}
1040 	/* create DMA maps for Rx buffers. */
1041 	if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1042 	    &sc->sc_cdata.stge_rx_sparemap)) != 0) {
1043 		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
1044 		goto fail;
1045 	}
1046 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
1047 		rxd = &sc->sc_cdata.stge_rxdesc[i];
1048 		rxd->rx_m = NULL;
1049 		rxd->rx_dmamap = 0;
1050 		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1051 		    &rxd->rx_dmamap);
1052 		if (error != 0) {
1053 			device_printf(sc->sc_dev,
1054 			    "failed to create Rx dmamap\n");
1055 			goto fail;
1056 		}
1057 	}
1058 
1059 fail:
1060 	return (error);
1061 }
1062 
1063 static void
1064 stge_dma_free(struct stge_softc *sc)
1065 {
1066 	struct stge_txdesc *txd;
1067 	struct stge_rxdesc *rxd;
1068 	int i;
1069 
1070 	/* Tx ring */
1071 	if (sc->sc_cdata.stge_tx_ring_tag) {
1072 		if (sc->sc_cdata.stge_tx_ring_map)
1073 			bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
1074 			    sc->sc_cdata.stge_tx_ring_map);
1075 		if (sc->sc_cdata.stge_tx_ring_map &&
1076 		    sc->sc_rdata.stge_tx_ring)
1077 			bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
1078 			    sc->sc_rdata.stge_tx_ring,
1079 			    sc->sc_cdata.stge_tx_ring_map);
1080 		sc->sc_rdata.stge_tx_ring = NULL;
1081 		sc->sc_cdata.stge_tx_ring_map = 0;
1082 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
1083 		sc->sc_cdata.stge_tx_ring_tag = NULL;
1084 	}
1085 	/* Rx ring */
1086 	if (sc->sc_cdata.stge_rx_ring_tag) {
1087 		if (sc->sc_cdata.stge_rx_ring_map)
1088 			bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
1089 			    sc->sc_cdata.stge_rx_ring_map);
1090 		if (sc->sc_cdata.stge_rx_ring_map &&
1091 		    sc->sc_rdata.stge_rx_ring)
1092 			bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
1093 			    sc->sc_rdata.stge_rx_ring,
1094 			    sc->sc_cdata.stge_rx_ring_map);
1095 		sc->sc_rdata.stge_rx_ring = NULL;
1096 		sc->sc_cdata.stge_rx_ring_map = 0;
1097 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
1098 		sc->sc_cdata.stge_rx_ring_tag = NULL;
1099 	}
1100 	/* Tx buffers */
1101 	if (sc->sc_cdata.stge_tx_tag) {
1102 		for (i = 0; i < STGE_TX_RING_CNT; i++) {
1103 			txd = &sc->sc_cdata.stge_txdesc[i];
1104 			if (txd->tx_dmamap) {
1105 				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
1106 				    txd->tx_dmamap);
1107 				txd->tx_dmamap = 0;
1108 			}
1109 		}
1110 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
1111 		sc->sc_cdata.stge_tx_tag = NULL;
1112 	}
1113 	/* Rx buffers */
1114 	if (sc->sc_cdata.stge_rx_tag) {
1115 		for (i = 0; i < STGE_RX_RING_CNT; i++) {
1116 			rxd = &sc->sc_cdata.stge_rxdesc[i];
1117 			if (rxd->rx_dmamap) {
1118 				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1119 				    rxd->rx_dmamap);
1120 				rxd->rx_dmamap = 0;
1121 			}
1122 		}
1123 		if (sc->sc_cdata.stge_rx_sparemap) {
1124 			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1125 			    sc->sc_cdata.stge_rx_sparemap);
1126 			sc->sc_cdata.stge_rx_sparemap = 0;
1127 		}
1128 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
1129 		sc->sc_cdata.stge_rx_tag = NULL;
1130 	}
1131 
1132 	if (sc->sc_cdata.stge_parent_tag) {
1133 		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
1134 		sc->sc_cdata.stge_parent_tag = NULL;
1135 	}
1136 }
1137 
1138 /*
1139  * stge_shutdown:
1140  *
1141  *	Make sure the interface is stopped at reboot time.
1142  */
1143 static void
1144 stge_shutdown(device_t dev)
1145 {
1146 	struct stge_softc *sc;
1147 
1148 	sc = device_get_softc(dev);
1149 
1150 	STGE_LOCK(sc);
1151 	stge_stop(sc);
1152 	STGE_UNLOCK(sc);
1153 }
1154 
1155 static int
1156 stge_suspend(device_t dev)
1157 {
1158 	struct stge_softc *sc;
1159 
1160 	sc = device_get_softc(dev);
1161 
1162 	STGE_LOCK(sc);
1163 	stge_stop(sc);
1164 	sc->sc_suspended = 1;
1165 	STGE_UNLOCK(sc);
1166 
1167 	return (0);
1168 }
1169 
1170 static int
1171 stge_resume(device_t dev)
1172 {
1173 	struct stge_softc *sc;
1174 	struct ifnet *ifp;
1175 
1176 	sc = device_get_softc(dev);
1177 
1178 	STGE_LOCK(sc);
1179 	ifp = sc->sc_ifp;
1180 	if (ifp->if_flags & IFF_UP)
1181 		stge_init_locked(sc);
1182 
1183 	sc->sc_suspended = 0;
1184 	STGE_UNLOCK(sc);
1185 
1186 	return (0);
1187 }
1188 
1189 static void
1190 stge_dma_wait(struct stge_softc *sc)
1191 {
1192 	int i;
1193 
1194 	for (i = 0; i < STGE_TIMEOUT; i++) {
1195 		DELAY(2);
1196 		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1197 			break;
1198 	}
1199 
1200 	if (i == STGE_TIMEOUT)
1201 		device_printf(sc->sc_dev, "DMA wait timed out\n");
1202 }
1203 
1204 static int
1205 stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1206 {
1207 	struct stge_txdesc *txd;
1208 	struct stge_tfd *tfd;
1209 	struct mbuf *m;
1210 	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1211 	int error, i, nsegs, si;
1212 	uint64_t csum_flags, tfc;
1213 
1214 	STGE_LOCK_ASSERT(sc);
1215 
1216 	if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1217 		return (ENOBUFS);
1218 
1219 	error =  bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1220 	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1221 	if (error == EFBIG) {
1222 		m = m_defrag(*m_head, M_DONTWAIT);
1223 		if (m == NULL) {
1224 			m_freem(*m_head);
1225 			*m_head = NULL;
1226 			return (ENOMEM);
1227 		}
1228 		*m_head = m;
1229 		error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1230 		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1231 		if (error != 0) {
1232 			m_freem(*m_head);
1233 			*m_head = NULL;
1234 			return (error);
1235 		}
1236 	} else if (error != 0)
1237 		return (error);
1238 	if (nsegs == 0) {
1239 		m_freem(*m_head);
1240 		*m_head = NULL;
1241 		return (EIO);
1242 	}
1243 
1244 	m = *m_head;
1245 	csum_flags = 0;
1246 	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1247 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1248 			csum_flags |= TFD_IPChecksumEnable;
1249 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1250 			csum_flags |= TFD_TCPChecksumEnable;
1251 		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1252 			csum_flags |= TFD_UDPChecksumEnable;
1253 	}
1254 
1255 	si = sc->sc_cdata.stge_tx_prod;
1256 	tfd = &sc->sc_rdata.stge_tx_ring[si];
1257 	for (i = 0; i < nsegs; i++)
1258 		tfd->tfd_frags[i].frag_word0 =
1259 		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1260 		    FRAG_LEN(txsegs[i].ds_len));
1261 	sc->sc_cdata.stge_tx_cnt++;
1262 
1263 	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1264 	    TFD_FragCount(nsegs) | csum_flags;
1265 	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1266 		tfc |= TFD_TxDMAIndicate;
1267 
1268 	/* Update producer index. */
1269 	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1270 
1271 	/* Check if we have a VLAN tag to insert. */
1272 	if (m->m_flags & M_VLANTAG)
1273 		tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag));
1274 	tfd->tfd_control = htole64(tfc);
1275 
1276 	/* Update Tx Queue. */
1277 	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1278 	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1279 	txd->tx_m = m;
1280 
1281 	/* Sync descriptors. */
1282 	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1283 	    BUS_DMASYNC_PREWRITE);
1284 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1285 	    sc->sc_cdata.stge_tx_ring_map,
1286 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1287 
1288 	return (0);
1289 }
1290 
1291 /*
1292  * stge_start:		[ifnet interface function]
1293  *
1294  *	Start packet transmission on the interface.
1295  */
1296 static void
1297 stge_start(struct ifnet *ifp)
1298 {
1299 	struct stge_softc *sc;
1300 
1301 	sc = ifp->if_softc;
1302 	STGE_LOCK(sc);
1303 	stge_start_locked(ifp);
1304 	STGE_UNLOCK(sc);
1305 }
1306 
1307 static void
1308 stge_start_locked(struct ifnet *ifp)
1309 {
1310         struct stge_softc *sc;
1311         struct mbuf *m_head;
1312 	int enq;
1313 
1314 	sc = ifp->if_softc;
1315 
1316 	STGE_LOCK_ASSERT(sc);
1317 
1318 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1319 	    IFF_DRV_RUNNING)
1320 		return;
1321 
1322 	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1323 		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1324 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1325 			break;
1326 		}
1327 
1328 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1329 		if (m_head == NULL)
1330 			break;
1331 		/*
1332 		 * Pack the data into the transmit ring. If we
1333 		 * don't have room, set the OACTIVE flag and wait
1334 		 * for the NIC to drain the ring.
1335 		 */
1336 		if (stge_encap(sc, &m_head)) {
1337 			if (m_head == NULL)
1338 				break;
1339 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1340 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1341 			break;
1342 		}
1343 
1344 		enq++;
1345 		/*
1346 		 * If there's a BPF listener, bounce a copy of this frame
1347 		 * to him.
1348 		 */
1349 		BPF_MTAP(ifp, m_head);
1350 	}
1351 
1352 	if (enq > 0) {
1353 		/* Transmit */
1354 		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1355 
1356 		/* Set a timeout in case the chip goes out to lunch. */
1357 		ifp->if_timer = 5;
1358 	}
1359 }
1360 
1361 /*
1362  * stge_watchdog:	[ifnet interface function]
1363  *
1364  *	Watchdog timer handler.
1365  */
1366 static void
1367 stge_watchdog(struct ifnet *ifp)
1368 {
1369 	struct stge_softc *sc;
1370 
1371 	sc = ifp->if_softc;
1372 
1373 	STGE_LOCK(sc);
1374 	if_printf(sc->sc_ifp, "device timeout\n");
1375 	ifp->if_oerrors++;
1376 	stge_init_locked(sc);
1377 	STGE_UNLOCK(sc);
1378 }
1379 
1380 /*
1381  * stge_ioctl:		[ifnet interface function]
1382  *
1383  *	Handle control requests from the operator.
1384  */
1385 static int
1386 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1387 {
1388 	struct stge_softc *sc;
1389 	struct ifreq *ifr;
1390 	struct mii_data *mii;
1391 	int error, mask;
1392 
1393 	sc = ifp->if_softc;
1394 	ifr = (struct ifreq *)data;
1395 	error = 0;
1396 	switch (cmd) {
1397 	case SIOCSIFMTU:
1398 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1399 			error = EINVAL;
1400 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1401 			ifp->if_mtu = ifr->ifr_mtu;
1402 			STGE_LOCK(sc);
1403 			stge_init_locked(sc);
1404 			STGE_UNLOCK(sc);
1405 		}
1406 		break;
1407 	case SIOCSIFFLAGS:
1408 		STGE_LOCK(sc);
1409 		if ((ifp->if_flags & IFF_UP) != 0) {
1410 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1411 				if (((ifp->if_flags ^ sc->sc_if_flags)
1412 				    & IFF_PROMISC) != 0)
1413 					stge_set_filter(sc);
1414 			} else {
1415 				if (sc->sc_detach == 0)
1416 					stge_init_locked(sc);
1417 			}
1418 		} else {
1419 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1420 				stge_stop(sc);
1421 		}
1422 		sc->sc_if_flags = ifp->if_flags;
1423 		STGE_UNLOCK(sc);
1424 		break;
1425 	case SIOCADDMULTI:
1426 	case SIOCDELMULTI:
1427 		STGE_LOCK(sc);
1428 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1429 			stge_set_multi(sc);
1430 		STGE_UNLOCK(sc);
1431 		break;
1432 	case SIOCSIFMEDIA:
1433 	case SIOCGIFMEDIA:
1434 		mii = device_get_softc(sc->sc_miibus);
1435 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1436 		break;
1437 	case SIOCSIFCAP:
1438 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1439 #ifdef DEVICE_POLLING
1440 		if ((mask & IFCAP_POLLING) != 0) {
1441 			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1442 				error = ether_poll_register(stge_poll, ifp);
1443 				if (error != 0)
1444 					break;
1445 				STGE_LOCK(sc);
1446 				CSR_WRITE_2(sc, STGE_IntEnable, 0);
1447 				ifp->if_capenable |= IFCAP_POLLING;
1448 				STGE_UNLOCK(sc);
1449 			} else {
1450 				error = ether_poll_deregister(ifp);
1451 				if (error != 0)
1452 					break;
1453 				STGE_LOCK(sc);
1454 				CSR_WRITE_2(sc, STGE_IntEnable,
1455 				    sc->sc_IntEnable);
1456 				ifp->if_capenable &= ~IFCAP_POLLING;
1457 				STGE_UNLOCK(sc);
1458 			}
1459 		}
1460 #endif
1461 		if ((mask & IFCAP_HWCSUM) != 0) {
1462 			ifp->if_capenable ^= IFCAP_HWCSUM;
1463 			if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1464 			    (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1465 				ifp->if_hwassist = STGE_CSUM_FEATURES;
1466 			else
1467 				ifp->if_hwassist = 0;
1468 		}
1469 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1470 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1471 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1472 				STGE_LOCK(sc);
1473 				stge_vlan_setup(sc);
1474 				STGE_UNLOCK(sc);
1475 			}
1476 		}
1477 		VLAN_CAPABILITIES(ifp);
1478 		break;
1479 	default:
1480 		error = ether_ioctl(ifp, cmd, data);
1481 		break;
1482 	}
1483 
1484 	return (error);
1485 }
1486 
1487 static void
1488 stge_link_task(void *arg, int pending)
1489 {
1490 	struct stge_softc *sc;
1491 	uint32_t v, ac;
1492 	int i;
1493 
1494 	sc = (struct stge_softc *)arg;
1495 	STGE_LOCK(sc);
1496 	/*
1497 	 * Update STGE_MACCtrl register depending on link status.
1498 	 * (duplex, flow control etc)
1499 	 */
1500 	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1501 	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1502 	v |= sc->sc_MACCtrl;
1503 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1504 	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1505 		/* Duplex setting changed, reset Tx/Rx functions. */
1506 		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1507 		ac |= AC_TxReset | AC_RxReset;
1508 		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1509 		for (i = 0; i < STGE_TIMEOUT; i++) {
1510 			DELAY(100);
1511 			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1512 				break;
1513 		}
1514 		if (i == STGE_TIMEOUT)
1515 			device_printf(sc->sc_dev, "reset failed to complete\n");
1516 	}
1517 	STGE_UNLOCK(sc);
1518 }
1519 
1520 static __inline int
1521 stge_tx_error(struct stge_softc *sc)
1522 {
1523 	uint32_t txstat;
1524 	int error;
1525 
1526 	for (error = 0;;) {
1527 		txstat = CSR_READ_4(sc, STGE_TxStatus);
1528 		if ((txstat & TS_TxComplete) == 0)
1529 			break;
1530 		/* Tx underrun */
1531 		if ((txstat & TS_TxUnderrun) != 0) {
1532 			/*
1533 			 * XXX
1534 			 * There should be a more better way to recover
1535 			 * from Tx underrun instead of a full reset.
1536 			 */
1537 			if (sc->sc_nerr++ < STGE_MAXERR)
1538 				device_printf(sc->sc_dev, "Tx underrun, "
1539 				    "resetting...\n");
1540 			if (sc->sc_nerr == STGE_MAXERR)
1541 				device_printf(sc->sc_dev, "too many errors; "
1542 				    "not reporting any more\n");
1543 			error = -1;
1544 			break;
1545 		}
1546 		/* Maximum/Late collisions, Re-enable Tx MAC. */
1547 		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1548 			CSR_WRITE_4(sc, STGE_MACCtrl,
1549 			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1550 			    MC_TxEnable);
1551 	}
1552 
1553 	return (error);
1554 }
1555 
1556 /*
1557  * stge_intr:
1558  *
1559  *	Interrupt service routine.
1560  */
1561 static void
1562 stge_intr(void *arg)
1563 {
1564 	struct stge_softc *sc;
1565 	struct ifnet *ifp;
1566 	int reinit;
1567 	uint16_t status;
1568 
1569 	sc = (struct stge_softc *)arg;
1570 	ifp = sc->sc_ifp;
1571 
1572 	STGE_LOCK(sc);
1573 
1574 #ifdef DEVICE_POLLING
1575 	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1576 		goto done_locked;
1577 #endif
1578 	status = CSR_READ_2(sc, STGE_IntStatus);
1579 	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1580 		goto done_locked;
1581 
1582 	/* Disable interrupts. */
1583 	for (reinit = 0;;) {
1584 		status = CSR_READ_2(sc, STGE_IntStatusAck);
1585 		status &= sc->sc_IntEnable;
1586 		if (status == 0)
1587 			break;
1588 		/* Host interface errors. */
1589 		if ((status & IS_HostError) != 0) {
1590 			device_printf(sc->sc_dev,
1591 			    "Host interface error, resetting...\n");
1592 			reinit = 1;
1593 			goto force_init;
1594 		}
1595 
1596 		/* Receive interrupts. */
1597 		if ((status & IS_RxDMAComplete) != 0) {
1598 			stge_rxeof(sc);
1599 			if ((status & IS_RFDListEnd) != 0)
1600 				CSR_WRITE_4(sc, STGE_DMACtrl,
1601 				    DMAC_RxDMAPollNow);
1602 		}
1603 
1604 		/* Transmit interrupts. */
1605 		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1606 			stge_txeof(sc);
1607 
1608 		/* Transmission errors.*/
1609 		if ((status & IS_TxComplete) != 0) {
1610 			if ((reinit = stge_tx_error(sc)) != 0)
1611 				break;
1612 		}
1613 	}
1614 
1615 force_init:
1616 	if (reinit != 0)
1617 		stge_init_locked(sc);
1618 
1619 	/* Re-enable interrupts. */
1620 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1621 
1622 	/* Try to get more packets going. */
1623 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1624 		stge_start_locked(ifp);
1625 
1626 done_locked:
1627 	STGE_UNLOCK(sc);
1628 }
1629 
1630 /*
1631  * stge_txeof:
1632  *
1633  *	Helper; handle transmit interrupts.
1634  */
1635 static void
1636 stge_txeof(struct stge_softc *sc)
1637 {
1638 	struct ifnet *ifp;
1639 	struct stge_txdesc *txd;
1640 	uint64_t control;
1641 	int cons;
1642 
1643 	STGE_LOCK_ASSERT(sc);
1644 
1645 	ifp = sc->sc_ifp;
1646 
1647 	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1648 	if (txd == NULL)
1649 		return;
1650 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1651 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1652 
1653 	/*
1654 	 * Go through our Tx list and free mbufs for those
1655 	 * frames which have been transmitted.
1656 	 */
1657 	for (cons = sc->sc_cdata.stge_tx_cons;;
1658 	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1659 		if (sc->sc_cdata.stge_tx_cnt <= 0)
1660 			break;
1661 		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1662 		if ((control & TFD_TFDDone) == 0)
1663 			break;
1664 		sc->sc_cdata.stge_tx_cnt--;
1665 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1666 
1667 		bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1668 		    BUS_DMASYNC_POSTWRITE);
1669 		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1670 
1671 		/* Output counter is updated with statistics register */
1672 		m_freem(txd->tx_m);
1673 		txd->tx_m = NULL;
1674 		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1675 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1676 		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1677 	}
1678 	sc->sc_cdata.stge_tx_cons = cons;
1679 	if (sc->sc_cdata.stge_tx_cnt == 0)
1680 		ifp->if_timer = 0;
1681 
1682         bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1683 	    sc->sc_cdata.stge_tx_ring_map,
1684 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1685 }
1686 
1687 static __inline void
1688 stge_discard_rxbuf(struct stge_softc *sc, int idx)
1689 {
1690 	struct stge_rfd *rfd;
1691 
1692 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1693 	rfd->rfd_status = 0;
1694 }
1695 
1696 #ifndef __NO_STRICT_ALIGNMENT
1697 /*
1698  * It seems that TC9021's DMA engine has alignment restrictions in
1699  * DMA scatter operations. The first DMA segment has no address
1700  * alignment restrictins but the rest should be aligned on 4(?) bytes
1701  * boundary. Otherwise it would corrupt random memory. Since we don't
1702  * know which one is used for the first segment in advance we simply
1703  * don't align at all.
1704  * To avoid copying over an entire frame to align, we allocate a new
1705  * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1706  * prepended into the existing mbuf chain.
1707  */
1708 static __inline struct mbuf *
1709 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1710 {
1711 	struct mbuf *n;
1712 
1713 	n = NULL;
1714 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1715 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1716 		m->m_data += ETHER_HDR_LEN;
1717 		n = m;
1718 	} else {
1719 		MGETHDR(n, M_DONTWAIT, MT_DATA);
1720 		if (n != NULL) {
1721 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1722 			m->m_data += ETHER_HDR_LEN;
1723 			m->m_len -= ETHER_HDR_LEN;
1724 			n->m_len = ETHER_HDR_LEN;
1725 			M_MOVE_PKTHDR(n, m);
1726 			n->m_next = m;
1727 		} else
1728 			m_freem(m);
1729 	}
1730 
1731 	return (n);
1732 }
1733 #endif
1734 
1735 /*
1736  * stge_rxeof:
1737  *
1738  *	Helper; handle receive interrupts.
1739  */
1740 static void
1741 stge_rxeof(struct stge_softc *sc)
1742 {
1743 	struct ifnet *ifp;
1744 	struct stge_rxdesc *rxd;
1745 	struct mbuf *mp, *m;
1746 	uint64_t status64;
1747 	uint32_t status;
1748 	int cons, prog;
1749 
1750 	STGE_LOCK_ASSERT(sc);
1751 
1752 	ifp = sc->sc_ifp;
1753 
1754 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1755 	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1756 
1757 	prog = 0;
1758 	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1759 	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1760 		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1761 		status = RFD_RxStatus(status64);
1762 		if ((status & RFD_RFDDone) == 0)
1763 			break;
1764 #ifdef DEVICE_POLLING
1765 		if (ifp->if_capenable & IFCAP_POLLING) {
1766 			if (sc->sc_cdata.stge_rxcycles <= 0)
1767 				break;
1768 			sc->sc_cdata.stge_rxcycles--;
1769 		}
1770 #endif
1771 		prog++;
1772 		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1773 		mp = rxd->rx_m;
1774 
1775 		/*
1776 		 * If the packet had an error, drop it.  Note we count
1777 		 * the error later in the periodic stats update.
1778 		 */
1779 		if ((status & RFD_FrameEnd) != 0 && (status &
1780 		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1781 		    RFD_RxAlignmentError | RFD_RxFCSError |
1782 		    RFD_RxLengthError)) != 0) {
1783 			stge_discard_rxbuf(sc, cons);
1784 			if (sc->sc_cdata.stge_rxhead != NULL) {
1785 				m_freem(sc->sc_cdata.stge_rxhead);
1786 				STGE_RXCHAIN_RESET(sc);
1787 			}
1788 			continue;
1789 		}
1790 		/*
1791 		 * Add a new receive buffer to the ring.
1792 		 */
1793 		if (stge_newbuf(sc, cons) != 0) {
1794 			ifp->if_iqdrops++;
1795 			stge_discard_rxbuf(sc, cons);
1796 			if (sc->sc_cdata.stge_rxhead != NULL) {
1797 				m_freem(sc->sc_cdata.stge_rxhead);
1798 				STGE_RXCHAIN_RESET(sc);
1799 			}
1800 			continue;
1801 		}
1802 
1803 		if ((status & RFD_FrameEnd) != 0)
1804 			mp->m_len = RFD_RxDMAFrameLen(status) -
1805 			    sc->sc_cdata.stge_rxlen;
1806 		sc->sc_cdata.stge_rxlen += mp->m_len;
1807 
1808 		/* Chain mbufs. */
1809 		if (sc->sc_cdata.stge_rxhead == NULL) {
1810 			sc->sc_cdata.stge_rxhead = mp;
1811 			sc->sc_cdata.stge_rxtail = mp;
1812 		} else {
1813 			mp->m_flags &= ~M_PKTHDR;
1814 			sc->sc_cdata.stge_rxtail->m_next = mp;
1815 			sc->sc_cdata.stge_rxtail = mp;
1816 		}
1817 
1818 		if ((status & RFD_FrameEnd) != 0) {
1819 			m = sc->sc_cdata.stge_rxhead;
1820 			m->m_pkthdr.rcvif = ifp;
1821 			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1822 
1823 			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1824 				m_freem(m);
1825 				STGE_RXCHAIN_RESET(sc);
1826 				continue;
1827 			}
1828 			/*
1829 			 * Set the incoming checksum information for
1830 			 * the packet.
1831 			 */
1832 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1833 				if ((status & RFD_IPDetected) != 0) {
1834 					m->m_pkthdr.csum_flags |=
1835 						CSUM_IP_CHECKED;
1836 					if ((status & RFD_IPError) == 0)
1837 						m->m_pkthdr.csum_flags |=
1838 						    CSUM_IP_VALID;
1839 				}
1840 				if (((status & RFD_TCPDetected) != 0 &&
1841 				    (status & RFD_TCPError) == 0) ||
1842 				    ((status & RFD_UDPDetected) != 0 &&
1843 				    (status & RFD_UDPError) == 0)) {
1844 					m->m_pkthdr.csum_flags |=
1845 					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1846 					m->m_pkthdr.csum_data = 0xffff;
1847 				}
1848 			}
1849 
1850 #ifndef __NO_STRICT_ALIGNMENT
1851 			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1852 				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1853 					STGE_RXCHAIN_RESET(sc);
1854 					continue;
1855 				}
1856 			}
1857 #endif
1858 			/* Check for VLAN tagged packets. */
1859 			if ((status & RFD_VLANDetected) != 0 &&
1860 			    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1861 				m->m_pkthdr.ether_vtag = RFD_TCI(status64);
1862 				m->m_flags |= M_VLANTAG;
1863 			}
1864 
1865 			STGE_UNLOCK(sc);
1866 			/* Pass it on. */
1867 			(*ifp->if_input)(ifp, m);
1868 			STGE_LOCK(sc);
1869 
1870 			STGE_RXCHAIN_RESET(sc);
1871 		}
1872 	}
1873 
1874 	if (prog > 0) {
1875 		/* Update the consumer index. */
1876 		sc->sc_cdata.stge_rx_cons = cons;
1877 		bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1878 		    sc->sc_cdata.stge_rx_ring_map,
1879 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1880 	}
1881 }
1882 
1883 #ifdef DEVICE_POLLING
1884 static void
1885 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1886 {
1887 	struct stge_softc *sc;
1888 	uint16_t status;
1889 
1890 	sc = ifp->if_softc;
1891 	STGE_LOCK(sc);
1892 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1893 		STGE_UNLOCK(sc);
1894 		return;
1895 	}
1896 
1897 	sc->sc_cdata.stge_rxcycles = count;
1898 	stge_rxeof(sc);
1899 	stge_txeof(sc);
1900 
1901 	if (cmd == POLL_AND_CHECK_STATUS) {
1902 		status = CSR_READ_2(sc, STGE_IntStatus);
1903 		status &= sc->sc_IntEnable;
1904 		if (status != 0) {
1905 			if ((status & IS_HostError) != 0) {
1906 				device_printf(sc->sc_dev,
1907 				    "Host interface error, resetting...\n");
1908 				stge_init_locked(sc);
1909 			}
1910 			if ((status & IS_TxComplete) != 0) {
1911 				if (stge_tx_error(sc) != 0)
1912 					stge_init_locked(sc);
1913 			}
1914 		}
1915 
1916 	}
1917 
1918 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1919 		stge_start_locked(ifp);
1920 
1921 	STGE_UNLOCK(sc);
1922 }
1923 #endif	/* DEVICE_POLLING */
1924 
1925 /*
1926  * stge_tick:
1927  *
1928  *	One second timer, used to tick the MII.
1929  */
1930 static void
1931 stge_tick(void *arg)
1932 {
1933 	struct stge_softc *sc;
1934 	struct mii_data *mii;
1935 
1936 	sc = (struct stge_softc *)arg;
1937 
1938 	STGE_LOCK_ASSERT(sc);
1939 
1940 	mii = device_get_softc(sc->sc_miibus);
1941 	mii_tick(mii);
1942 
1943 	/* Update statistics counters. */
1944 	stge_stats_update(sc);
1945 
1946 	/*
1947 	 * Relcaim any pending Tx descriptors to release mbufs in a
1948 	 * timely manner as we don't generate Tx completion interrupts
1949 	 * for every frame. This limits the delay to a maximum of one
1950 	 * second.
1951 	 */
1952 	if (sc->sc_cdata.stge_tx_cnt != 0)
1953 		stge_txeof(sc);
1954 
1955 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1956 }
1957 
1958 /*
1959  * stge_stats_update:
1960  *
1961  *	Read the TC9021 statistics counters.
1962  */
1963 static void
1964 stge_stats_update(struct stge_softc *sc)
1965 {
1966 	struct ifnet *ifp;
1967 
1968 	STGE_LOCK_ASSERT(sc);
1969 
1970 	ifp = sc->sc_ifp;
1971 
1972 	CSR_READ_4(sc,STGE_OctetRcvOk);
1973 
1974 	ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1975 
1976 	ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1977 
1978 	CSR_READ_4(sc, STGE_OctetXmtdOk);
1979 
1980 	ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1981 
1982 	ifp->if_collisions +=
1983 	    CSR_READ_4(sc, STGE_LateCollisions) +
1984 	    CSR_READ_4(sc, STGE_MultiColFrames) +
1985 	    CSR_READ_4(sc, STGE_SingleColFrames);
1986 
1987 	ifp->if_oerrors +=
1988 	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1989 	    CSR_READ_2(sc, STGE_FramesWEXDeferal);
1990 }
1991 
1992 /*
1993  * stge_reset:
1994  *
1995  *	Perform a soft reset on the TC9021.
1996  */
1997 static void
1998 stge_reset(struct stge_softc *sc, uint32_t how)
1999 {
2000 	uint32_t ac;
2001 	uint8_t v;
2002 	int i, dv;
2003 
2004 	STGE_LOCK_ASSERT(sc);
2005 
2006 	dv = 5000;
2007 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
2008 	switch (how) {
2009 	case STGE_RESET_TX:
2010 		ac |= AC_TxReset | AC_FIFO;
2011 		dv = 100;
2012 		break;
2013 	case STGE_RESET_RX:
2014 		ac |= AC_RxReset | AC_FIFO;
2015 		dv = 100;
2016 		break;
2017 	case STGE_RESET_FULL:
2018 	default:
2019 		/*
2020 		 * Only assert RstOut if we're fiber.  We need GMII clocks
2021 		 * to be present in order for the reset to complete on fiber
2022 		 * cards.
2023 		 */
2024 		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
2025 		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
2026 		    (sc->sc_usefiber ? AC_RstOut : 0);
2027 		break;
2028 	}
2029 
2030 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
2031 
2032 	/* Account for reset problem at 10Mbps. */
2033 	DELAY(dv);
2034 
2035 	for (i = 0; i < STGE_TIMEOUT; i++) {
2036 		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
2037 			break;
2038 		DELAY(dv);
2039 	}
2040 
2041 	if (i == STGE_TIMEOUT)
2042 		device_printf(sc->sc_dev, "reset failed to complete\n");
2043 
2044 	/* Set LED, from Linux IPG driver. */
2045 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
2046 	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
2047 	if ((sc->sc_led & 0x01) != 0)
2048 		ac |= AC_LEDMode;
2049 	if ((sc->sc_led & 0x03) != 0)
2050 		ac |= AC_LEDModeBit1;
2051 	if ((sc->sc_led & 0x08) != 0)
2052 		ac |= AC_LEDSpeed;
2053 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
2054 
2055 	/* Set PHY, from Linux IPG driver */
2056 	v = CSR_READ_1(sc, STGE_PhySet);
2057 	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
2058 	v |= ((sc->sc_led & 0x70) >> 4);
2059 	CSR_WRITE_1(sc, STGE_PhySet, v);
2060 }
2061 
2062 /*
2063  * stge_init:		[ ifnet interface function ]
2064  *
2065  *	Initialize the interface.
2066  */
2067 static void
2068 stge_init(void *xsc)
2069 {
2070 	struct stge_softc *sc;
2071 
2072 	sc = (struct stge_softc *)xsc;
2073 	STGE_LOCK(sc);
2074 	stge_init_locked(sc);
2075 	STGE_UNLOCK(sc);
2076 }
2077 
2078 static void
2079 stge_init_locked(struct stge_softc *sc)
2080 {
2081 	struct ifnet *ifp;
2082 	struct mii_data *mii;
2083 	uint16_t eaddr[3];
2084 	uint32_t v;
2085 	int error;
2086 
2087 	STGE_LOCK_ASSERT(sc);
2088 
2089 	ifp = sc->sc_ifp;
2090 	mii = device_get_softc(sc->sc_miibus);
2091 
2092 	/*
2093 	 * Cancel any pending I/O.
2094 	 */
2095 	stge_stop(sc);
2096 
2097 	/* Init descriptors. */
2098 	error = stge_init_rx_ring(sc);
2099         if (error != 0) {
2100                 device_printf(sc->sc_dev,
2101                     "initialization failed: no memory for rx buffers\n");
2102                 stge_stop(sc);
2103 		goto out;
2104         }
2105 	stge_init_tx_ring(sc);
2106 
2107 	/* Set the station address. */
2108 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2109 	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2110 	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2111 	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2112 
2113 	/*
2114 	 * Set the statistics masks.  Disable all the RMON stats,
2115 	 * and disable selected stats in the non-RMON stats registers.
2116 	 */
2117 	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2118 	CSR_WRITE_4(sc, STGE_StatisticsMask,
2119 	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2120 	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2121 	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2122 	    (1U << 21));
2123 
2124 	/* Set up the receive filter. */
2125 	stge_set_filter(sc);
2126 	/* Program multicast filter. */
2127 	stge_set_multi(sc);
2128 
2129 	/*
2130 	 * Give the transmit and receive ring to the chip.
2131 	 */
2132 	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2133 	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2134 	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2135 	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2136 
2137 	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2138 	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2139 	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2140 	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2141 
2142 	/*
2143 	 * Initialize the Tx auto-poll period.  It's OK to make this number
2144 	 * large (255 is the max, but we use 127) -- we explicitly kick the
2145 	 * transmit engine when there's actually a packet.
2146 	 */
2147 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2148 
2149 	/* ..and the Rx auto-poll period. */
2150 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2151 
2152 	/* Initialize the Tx start threshold. */
2153 	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2154 
2155 	/* Rx DMA thresholds, from Linux */
2156 	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2157 	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2158 
2159 	/* Rx early threhold, from Linux */
2160 	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2161 
2162 	/* Tx DMA thresholds, from Linux */
2163 	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2164 	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2165 
2166 	/*
2167 	 * Initialize the Rx DMA interrupt control register.  We
2168 	 * request an interrupt after every incoming packet, but
2169 	 * defer it for sc_rxint_dmawait us. When the number of
2170 	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2171 	 * deferring the interrupt, and signal it immediately.
2172 	 */
2173 	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2174 	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2175 	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2176 
2177 	/*
2178 	 * Initialize the interrupt mask.
2179 	 */
2180 	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2181 	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2182 #ifdef DEVICE_POLLING
2183 	/* Disable interrupts if we are polling. */
2184 	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2185 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
2186 	else
2187 #endif
2188 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2189 
2190 	/*
2191 	 * Configure the DMA engine.
2192 	 * XXX Should auto-tune TxBurstLimit.
2193 	 */
2194 	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2195 
2196 	/*
2197 	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2198 	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2199 	 * in the Rx FIFO.
2200 	 */
2201 	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2202 	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2203 
2204 	/*
2205 	 * Set the maximum frame size.
2206 	 */
2207 	sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2208 	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2209 
2210 	/*
2211 	 * Initialize MacCtrl -- do it before setting the media,
2212 	 * as setting the media will actually program the register.
2213 	 *
2214 	 * Note: We have to poke the IFS value before poking
2215 	 * anything else.
2216 	 */
2217 	/* Tx/Rx MAC should be disabled before programming IFS.*/
2218 	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2219 
2220 	stge_vlan_setup(sc);
2221 
2222 	if (sc->sc_rev >= 6) {		/* >= B.2 */
2223 		/* Multi-frag frame bug work-around. */
2224 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2225 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2226 
2227 		/* Tx Poll Now bug work-around. */
2228 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2229 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2230 		/* Tx Poll Now bug work-around. */
2231 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2232 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2233 	}
2234 
2235 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2236 	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2237 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2238 	/*
2239 	 * It seems that transmitting frames without checking the state of
2240 	 * Rx/Tx MAC wedge the hardware.
2241 	 */
2242 	stge_start_tx(sc);
2243 	stge_start_rx(sc);
2244 
2245 	/*
2246 	 * Set the current media.
2247 	 */
2248 	mii_mediachg(mii);
2249 
2250 	/*
2251 	 * Start the one second MII clock.
2252 	 */
2253 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2254 
2255 	/*
2256 	 * ...all done!
2257 	 */
2258 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2259 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2260 
2261  out:
2262 	if (error != 0)
2263 		device_printf(sc->sc_dev, "interface not running\n");
2264 }
2265 
2266 static void
2267 stge_vlan_setup(struct stge_softc *sc)
2268 {
2269 	struct ifnet *ifp;
2270 	uint32_t v;
2271 
2272 	ifp = sc->sc_ifp;
2273 	/*
2274 	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2275 	 * MC_AutoVLANuntagging bit.
2276 	 * MC_AutoVLANtagging bit selects which VLAN source to use
2277 	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2278 	 * bit has priority over MC_AutoVLANtagging bit. So we always
2279 	 * use TFC instead of STGE_VLANTag register.
2280 	 */
2281 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2282 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2283 		v |= MC_AutoVLANuntagging;
2284 	else
2285 		v &= ~MC_AutoVLANuntagging;
2286 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2287 }
2288 
2289 /*
2290  *	Stop transmission on the interface.
2291  */
2292 static void
2293 stge_stop(struct stge_softc *sc)
2294 {
2295 	struct ifnet *ifp;
2296 	struct stge_txdesc *txd;
2297 	struct stge_rxdesc *rxd;
2298 	uint32_t v;
2299 	int i;
2300 
2301 	STGE_LOCK_ASSERT(sc);
2302 	/*
2303 	 * Stop the one second clock.
2304 	 */
2305 	callout_stop(&sc->sc_tick_ch);
2306 
2307 	/*
2308 	 * Reset the chip to a known state.
2309 	 */
2310 	stge_reset(sc, STGE_RESET_FULL);
2311 
2312 	/*
2313 	 * Disable interrupts.
2314 	 */
2315 	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2316 
2317 	/*
2318 	 * Stop receiver, transmitter, and stats update.
2319 	 */
2320 	stge_stop_rx(sc);
2321 	stge_stop_tx(sc);
2322 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2323 	v |= MC_StatisticsDisable;
2324 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2325 
2326 	/*
2327 	 * Stop the transmit and receive DMA.
2328 	 */
2329 	stge_dma_wait(sc);
2330 	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2331 	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2332 	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2333 	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2334 
2335 	/*
2336 	 * Free RX and TX mbufs still in the queues.
2337 	 */
2338 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2339 		rxd = &sc->sc_cdata.stge_rxdesc[i];
2340 		if (rxd->rx_m != NULL) {
2341 			bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2342 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2343 			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2344 			    rxd->rx_dmamap);
2345 			m_freem(rxd->rx_m);
2346 			rxd->rx_m = NULL;
2347 		}
2348         }
2349 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2350 		txd = &sc->sc_cdata.stge_txdesc[i];
2351 		if (txd->tx_m != NULL) {
2352 			bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2353 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2354 			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2355 			    txd->tx_dmamap);
2356 			m_freem(txd->tx_m);
2357 			txd->tx_m = NULL;
2358 		}
2359         }
2360 
2361 	/*
2362 	 * Mark the interface down and cancel the watchdog timer.
2363 	 */
2364 	ifp = sc->sc_ifp;
2365 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2366 	ifp->if_timer = 0;
2367 }
2368 
2369 static void
2370 stge_start_tx(struct stge_softc *sc)
2371 {
2372 	uint32_t v;
2373 	int i;
2374 
2375 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2376 	if ((v & MC_TxEnabled) != 0)
2377 		return;
2378 	v |= MC_TxEnable;
2379 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2380 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2381 	for (i = STGE_TIMEOUT; i > 0; i--) {
2382 		DELAY(10);
2383 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2384 		if ((v & MC_TxEnabled) != 0)
2385 			break;
2386 	}
2387 	if (i == 0)
2388 		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2389 }
2390 
2391 static void
2392 stge_start_rx(struct stge_softc *sc)
2393 {
2394 	uint32_t v;
2395 	int i;
2396 
2397 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2398 	if ((v & MC_RxEnabled) != 0)
2399 		return;
2400 	v |= MC_RxEnable;
2401 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2402 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2403 	for (i = STGE_TIMEOUT; i > 0; i--) {
2404 		DELAY(10);
2405 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2406 		if ((v & MC_RxEnabled) != 0)
2407 			break;
2408 	}
2409 	if (i == 0)
2410 		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2411 }
2412 
2413 static void
2414 stge_stop_tx(struct stge_softc *sc)
2415 {
2416 	uint32_t v;
2417 	int i;
2418 
2419 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2420 	if ((v & MC_TxEnabled) == 0)
2421 		return;
2422 	v |= MC_TxDisable;
2423 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2424 	for (i = STGE_TIMEOUT; i > 0; i--) {
2425 		DELAY(10);
2426 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2427 		if ((v & MC_TxEnabled) == 0)
2428 			break;
2429 	}
2430 	if (i == 0)
2431 		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2432 }
2433 
2434 static void
2435 stge_stop_rx(struct stge_softc *sc)
2436 {
2437 	uint32_t v;
2438 	int i;
2439 
2440 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2441 	if ((v & MC_RxEnabled) == 0)
2442 		return;
2443 	v |= MC_RxDisable;
2444 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2445 	for (i = STGE_TIMEOUT; i > 0; i--) {
2446 		DELAY(10);
2447 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2448 		if ((v & MC_RxEnabled) == 0)
2449 			break;
2450 	}
2451 	if (i == 0)
2452 		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2453 }
2454 
2455 static void
2456 stge_init_tx_ring(struct stge_softc *sc)
2457 {
2458 	struct stge_ring_data *rd;
2459 	struct stge_txdesc *txd;
2460 	bus_addr_t addr;
2461 	int i;
2462 
2463 	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2464 	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2465 
2466 	sc->sc_cdata.stge_tx_prod = 0;
2467 	sc->sc_cdata.stge_tx_cons = 0;
2468 	sc->sc_cdata.stge_tx_cnt = 0;
2469 
2470 	rd = &sc->sc_rdata;
2471 	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2472 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2473 		if (i == (STGE_TX_RING_CNT - 1))
2474 			addr = STGE_TX_RING_ADDR(sc, 0);
2475 		else
2476 			addr = STGE_TX_RING_ADDR(sc, i + 1);
2477 		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2478 		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2479 		txd = &sc->sc_cdata.stge_txdesc[i];
2480 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2481 	}
2482 
2483 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2484 	    sc->sc_cdata.stge_tx_ring_map,
2485 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2486 
2487 }
2488 
2489 static int
2490 stge_init_rx_ring(struct stge_softc *sc)
2491 {
2492 	struct stge_ring_data *rd;
2493 	bus_addr_t addr;
2494 	int i;
2495 
2496 	sc->sc_cdata.stge_rx_cons = 0;
2497 	STGE_RXCHAIN_RESET(sc);
2498 
2499 	rd = &sc->sc_rdata;
2500 	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2501 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2502 		if (stge_newbuf(sc, i) != 0)
2503 			return (ENOBUFS);
2504 		if (i == (STGE_RX_RING_CNT - 1))
2505 			addr = STGE_RX_RING_ADDR(sc, 0);
2506 		else
2507 			addr = STGE_RX_RING_ADDR(sc, i + 1);
2508 		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2509 		rd->stge_rx_ring[i].rfd_status = 0;
2510 	}
2511 
2512 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2513 	    sc->sc_cdata.stge_rx_ring_map,
2514 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2515 
2516 	return (0);
2517 }
2518 
2519 /*
2520  * stge_newbuf:
2521  *
2522  *	Add a receive buffer to the indicated descriptor.
2523  */
2524 static int
2525 stge_newbuf(struct stge_softc *sc, int idx)
2526 {
2527 	struct stge_rxdesc *rxd;
2528 	struct stge_rfd *rfd;
2529 	struct mbuf *m;
2530 	bus_dma_segment_t segs[1];
2531 	bus_dmamap_t map;
2532 	int nsegs;
2533 
2534 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2535 	if (m == NULL)
2536 		return (ENOBUFS);
2537 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2538 	/*
2539 	 * The hardware requires 4bytes aligned DMA address when JUMBO
2540 	 * frame is used.
2541 	 */
2542 	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2543 		m_adj(m, ETHER_ALIGN);
2544 
2545 	if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
2546 	    sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2547 		m_freem(m);
2548 		return (ENOBUFS);
2549 	}
2550 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2551 
2552 	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2553 	if (rxd->rx_m != NULL) {
2554 		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2555 		    BUS_DMASYNC_POSTREAD);
2556 		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2557 	}
2558 	map = rxd->rx_dmamap;
2559 	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2560 	sc->sc_cdata.stge_rx_sparemap = map;
2561 	bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2562 	    BUS_DMASYNC_PREREAD);
2563 	rxd->rx_m = m;
2564 
2565 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2566 	rfd->rfd_frag.frag_word0 =
2567 	    htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2568 	rfd->rfd_status = 0;
2569 
2570 	return (0);
2571 }
2572 
2573 /*
2574  * stge_set_filter:
2575  *
2576  *	Set up the receive filter.
2577  */
2578 static void
2579 stge_set_filter(struct stge_softc *sc)
2580 {
2581 	struct ifnet *ifp;
2582 	uint16_t mode;
2583 
2584 	STGE_LOCK_ASSERT(sc);
2585 
2586 	ifp = sc->sc_ifp;
2587 
2588 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2589 	mode |= RM_ReceiveUnicast;
2590 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2591 		mode |= RM_ReceiveBroadcast;
2592 	else
2593 		mode &= ~RM_ReceiveBroadcast;
2594 	if ((ifp->if_flags & IFF_PROMISC) != 0)
2595 		mode |= RM_ReceiveAllFrames;
2596 	else
2597 		mode &= ~RM_ReceiveAllFrames;
2598 
2599 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2600 }
2601 
2602 static void
2603 stge_set_multi(struct stge_softc *sc)
2604 {
2605 	struct ifnet *ifp;
2606 	struct ifmultiaddr *ifma;
2607 	uint32_t crc;
2608 	uint32_t mchash[2];
2609 	uint16_t mode;
2610 	int count;
2611 
2612 	STGE_LOCK_ASSERT(sc);
2613 
2614 	ifp = sc->sc_ifp;
2615 
2616 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2617 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2618 		if ((ifp->if_flags & IFF_PROMISC) != 0)
2619 			mode |= RM_ReceiveAllFrames;
2620 		else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2621 			mode |= RM_ReceiveMulticast;
2622 		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2623 		return;
2624 	}
2625 
2626 	/* clear existing filters. */
2627 	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2628 	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2629 
2630 	/*
2631 	 * Set up the multicast address filter by passing all multicast
2632 	 * addresses through a CRC generator, and then using the low-order
2633 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2634 	 * high order bits select the register, while the rest of the bits
2635 	 * select the bit within the register.
2636 	 */
2637 
2638 	bzero(mchash, sizeof(mchash));
2639 
2640 	count = 0;
2641 	IF_ADDR_LOCK(sc->sc_ifp);
2642 	TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) {
2643 		if (ifma->ifma_addr->sa_family != AF_LINK)
2644 			continue;
2645 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2646 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2647 
2648 		/* Just want the 6 least significant bits. */
2649 		crc &= 0x3f;
2650 
2651 		/* Set the corresponding bit in the hash table. */
2652 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2653 		count++;
2654 	}
2655 	IF_ADDR_UNLOCK(ifp);
2656 
2657 	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2658 	if (count > 0)
2659 		mode |= RM_ReceiveMulticastHash;
2660 	else
2661 		mode &= ~RM_ReceiveMulticastHash;
2662 
2663 	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2664 	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2665 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2666 }
2667 
2668 static int
2669 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2670 {
2671 	int error, value;
2672 
2673 	if (!arg1)
2674 		return (EINVAL);
2675 	value = *(int *)arg1;
2676 	error = sysctl_handle_int(oidp, &value, 0, req);
2677 	if (error || !req->newptr)
2678 		return (error);
2679 	if (value < low || value > high)
2680 		return (EINVAL);
2681         *(int *)arg1 = value;
2682 
2683         return (0);
2684 }
2685 
2686 static int
2687 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2688 {
2689 	return (sysctl_int_range(oidp, arg1, arg2, req,
2690 	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2691 }
2692 
2693 static int
2694 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2695 {
2696 	return (sysctl_int_range(oidp, arg1, arg2, req,
2697 	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2698 }
2699