xref: /freebsd/sys/dev/stge/if_stge.c (revision 94942af266ac119ede0ca836f9aa5a5ac0582938)
1 /*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Device driver for the Sundance Tech. TC9021 10/100/1000
41  * Ethernet controller.
42  */
43 
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46 
47 #ifdef HAVE_KERNEL_OPTION_HEADERS
48 #include "opt_device_polling.h"
49 #endif
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/endian.h>
54 #include <sys/mbuf.h>
55 #include <sys/malloc.h>
56 #include <sys/kernel.h>
57 #include <sys/module.h>
58 #include <sys/socket.h>
59 #include <sys/sockio.h>
60 #include <sys/sysctl.h>
61 #include <sys/taskqueue.h>
62 
63 #include <net/bpf.h>
64 #include <net/ethernet.h>
65 #include <net/if.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/if_vlan_var.h>
70 
71 #include <machine/bus.h>
72 #include <machine/resource.h>
73 #include <sys/bus.h>
74 #include <sys/rman.h>
75 
76 #include <dev/mii/mii.h>
77 #include <dev/mii/miivar.h>
78 
79 #include <dev/pci/pcireg.h>
80 #include <dev/pci/pcivar.h>
81 
82 #include <dev/stge/if_stgereg.h>
83 
84 #define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
85 
86 MODULE_DEPEND(stge, pci, 1, 1, 1);
87 MODULE_DEPEND(stge, ether, 1, 1, 1);
88 MODULE_DEPEND(stge, miibus, 1, 1, 1);
89 
90 /* "device miibus" required.  See GENERIC if you get errors here. */
91 #include "miibus_if.h"
92 
93 /*
94  * Devices supported by this driver.
95  */
96 static struct stge_product {
97 	uint16_t	stge_vendorid;
98 	uint16_t	stge_deviceid;
99 	const char	*stge_name;
100 } stge_products[] = {
101 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
102 	  "Sundance ST-1023 Gigabit Ethernet" },
103 
104 	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
105 	  "Sundance ST-2021 Gigabit Ethernet" },
106 
107 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
108 	  "Tamarack TC9021 Gigabit Ethernet" },
109 
110 	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
111 	  "Tamarack TC9021 Gigabit Ethernet" },
112 
113 	/*
114 	 * The Sundance sample boards use the Sundance vendor ID,
115 	 * but the Tamarack product ID.
116 	 */
117 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
118 	  "Sundance TC9021 Gigabit Ethernet" },
119 
120 	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
121 	  "Sundance TC9021 Gigabit Ethernet" },
122 
123 	{ VENDOR_DLINK,		DEVICEID_DLINK_DL4000,
124 	  "D-Link DL-4000 Gigabit Ethernet" },
125 
126 	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
127 	  "Antares Gigabit Ethernet" }
128 };
129 
130 static int	stge_probe(device_t);
131 static int	stge_attach(device_t);
132 static int	stge_detach(device_t);
133 static void	stge_shutdown(device_t);
134 static int	stge_suspend(device_t);
135 static int	stge_resume(device_t);
136 
137 static int	stge_encap(struct stge_softc *, struct mbuf **);
138 static void	stge_start(struct ifnet *);
139 static void	stge_start_locked(struct ifnet *);
140 static void	stge_watchdog(struct stge_softc *);
141 static int	stge_ioctl(struct ifnet *, u_long, caddr_t);
142 static void	stge_init(void *);
143 static void	stge_init_locked(struct stge_softc *);
144 static void	stge_vlan_setup(struct stge_softc *);
145 static void	stge_stop(struct stge_softc *);
146 static void	stge_start_tx(struct stge_softc *);
147 static void	stge_start_rx(struct stge_softc *);
148 static void	stge_stop_tx(struct stge_softc *);
149 static void	stge_stop_rx(struct stge_softc *);
150 
151 static void	stge_reset(struct stge_softc *, uint32_t);
152 static int	stge_eeprom_wait(struct stge_softc *);
153 static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
154 static void	stge_tick(void *);
155 static void	stge_stats_update(struct stge_softc *);
156 static void	stge_set_filter(struct stge_softc *);
157 static void	stge_set_multi(struct stge_softc *);
158 
159 static void	stge_link_task(void *, int);
160 static void	stge_intr(void *);
161 static __inline int stge_tx_error(struct stge_softc *);
162 static void	stge_txeof(struct stge_softc *);
163 static void	stge_rxeof(struct stge_softc *);
164 static __inline void stge_discard_rxbuf(struct stge_softc *, int);
165 static int	stge_newbuf(struct stge_softc *, int);
166 #ifndef __NO_STRICT_ALIGNMENT
167 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
168 #endif
169 
170 static void	stge_mii_sync(struct stge_softc *);
171 static void	stge_mii_send(struct stge_softc *, uint32_t, int);
172 static int	stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
173 static int	stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
174 static int	stge_miibus_readreg(device_t, int, int);
175 static int	stge_miibus_writereg(device_t, int, int, int);
176 static void	stge_miibus_statchg(device_t);
177 static int	stge_mediachange(struct ifnet *);
178 static void	stge_mediastatus(struct ifnet *, struct ifmediareq *);
179 
180 static void	stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
181 static int	stge_dma_alloc(struct stge_softc *);
182 static void	stge_dma_free(struct stge_softc *);
183 static void	stge_dma_wait(struct stge_softc *);
184 static void	stge_init_tx_ring(struct stge_softc *);
185 static int	stge_init_rx_ring(struct stge_softc *);
186 #ifdef DEVICE_POLLING
187 static void	stge_poll(struct ifnet *, enum poll_cmd, int);
188 #endif
189 
190 static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
191 static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
192 static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
193 
194 static device_method_t stge_methods[] = {
195 	/* Device interface */
196 	DEVMETHOD(device_probe,		stge_probe),
197 	DEVMETHOD(device_attach,	stge_attach),
198 	DEVMETHOD(device_detach,	stge_detach),
199 	DEVMETHOD(device_shutdown,	stge_shutdown),
200 	DEVMETHOD(device_suspend,	stge_suspend),
201 	DEVMETHOD(device_resume,	stge_resume),
202 
203 	/* MII interface */
204 	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
205 	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
206 	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
207 
208 	{ 0, 0 }
209 
210 };
211 
212 static driver_t stge_driver = {
213 	"stge",
214 	stge_methods,
215 	sizeof(struct stge_softc)
216 };
217 
218 static devclass_t stge_devclass;
219 
220 DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0);
221 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
222 
223 static struct resource_spec stge_res_spec_io[] = {
224 	{ SYS_RES_IOPORT,	PCIR_BAR(0),	RF_ACTIVE },
225 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
226 	{ -1,			0,		0 }
227 };
228 
229 static struct resource_spec stge_res_spec_mem[] = {
230 	{ SYS_RES_MEMORY,	PCIR_BAR(1),	RF_ACTIVE },
231 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
232 	{ -1,			0,		0 }
233 };
234 
235 #define	MII_SET(x)	\
236 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
237 #define	MII_CLR(x)	\
238 	CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
239 
240 /*
241  * Sync the PHYs by setting data bit and strobing the clock 32 times.
242  */
243 static void
244 stge_mii_sync(struct stge_softc	*sc)
245 {
246 	int i;
247 
248 	MII_SET(PC_MgmtDir | PC_MgmtData);
249 
250 	for (i = 0; i < 32; i++) {
251 		MII_SET(PC_MgmtClk);
252 		DELAY(1);
253 		MII_CLR(PC_MgmtClk);
254 		DELAY(1);
255 	}
256 }
257 
258 /*
259  * Clock a series of bits through the MII.
260  */
261 static void
262 stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
263 {
264 	int i;
265 
266 	MII_CLR(PC_MgmtClk);
267 
268 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
269 		if (bits & i)
270 			MII_SET(PC_MgmtData);
271                 else
272 			MII_CLR(PC_MgmtData);
273 		DELAY(1);
274 		MII_CLR(PC_MgmtClk);
275 		DELAY(1);
276 		MII_SET(PC_MgmtClk);
277 	}
278 }
279 
280 /*
281  * Read an PHY register through the MII.
282  */
283 static int
284 stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
285 {
286 	int i, ack;
287 
288 	/*
289 	 * Set up frame for RX.
290 	 */
291 	frame->mii_stdelim = STGE_MII_STARTDELIM;
292 	frame->mii_opcode = STGE_MII_READOP;
293 	frame->mii_turnaround = 0;
294 	frame->mii_data = 0;
295 
296 	CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
297 	/*
298  	 * Turn on data xmit.
299 	 */
300 	MII_SET(PC_MgmtDir);
301 
302 	stge_mii_sync(sc);
303 
304 	/*
305 	 * Send command/address info.
306 	 */
307 	stge_mii_send(sc, frame->mii_stdelim, 2);
308 	stge_mii_send(sc, frame->mii_opcode, 2);
309 	stge_mii_send(sc, frame->mii_phyaddr, 5);
310 	stge_mii_send(sc, frame->mii_regaddr, 5);
311 
312 	/* Turn off xmit. */
313 	MII_CLR(PC_MgmtDir);
314 
315 	/* Idle bit */
316 	MII_CLR((PC_MgmtClk | PC_MgmtData));
317 	DELAY(1);
318 	MII_SET(PC_MgmtClk);
319 	DELAY(1);
320 
321 	/* Check for ack */
322 	MII_CLR(PC_MgmtClk);
323 	DELAY(1);
324 	ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
325 	MII_SET(PC_MgmtClk);
326 	DELAY(1);
327 
328 	/*
329 	 * Now try reading data bits. If the ack failed, we still
330 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
331 	 */
332 	if (ack) {
333 		for(i = 0; i < 16; i++) {
334 			MII_CLR(PC_MgmtClk);
335 			DELAY(1);
336 			MII_SET(PC_MgmtClk);
337 			DELAY(1);
338 		}
339 		goto fail;
340 	}
341 
342 	for (i = 0x8000; i; i >>= 1) {
343 		MII_CLR(PC_MgmtClk);
344 		DELAY(1);
345 		if (!ack) {
346 			if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
347 				frame->mii_data |= i;
348 			DELAY(1);
349 		}
350 		MII_SET(PC_MgmtClk);
351 		DELAY(1);
352 	}
353 
354 fail:
355 	MII_CLR(PC_MgmtClk);
356 	DELAY(1);
357 	MII_SET(PC_MgmtClk);
358 	DELAY(1);
359 
360 	if (ack)
361 		return(1);
362 	return(0);
363 }
364 
365 /*
366  * Write to a PHY register through the MII.
367  */
368 static int
369 stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
370 {
371 
372 	/*
373 	 * Set up frame for TX.
374 	 */
375 	frame->mii_stdelim = STGE_MII_STARTDELIM;
376 	frame->mii_opcode = STGE_MII_WRITEOP;
377 	frame->mii_turnaround = STGE_MII_TURNAROUND;
378 
379 	/*
380  	 * Turn on data output.
381 	 */
382 	MII_SET(PC_MgmtDir);
383 
384 	stge_mii_sync(sc);
385 
386 	stge_mii_send(sc, frame->mii_stdelim, 2);
387 	stge_mii_send(sc, frame->mii_opcode, 2);
388 	stge_mii_send(sc, frame->mii_phyaddr, 5);
389 	stge_mii_send(sc, frame->mii_regaddr, 5);
390 	stge_mii_send(sc, frame->mii_turnaround, 2);
391 	stge_mii_send(sc, frame->mii_data, 16);
392 
393 	/* Idle bit. */
394 	MII_SET(PC_MgmtClk);
395 	DELAY(1);
396 	MII_CLR(PC_MgmtClk);
397 	DELAY(1);
398 
399 	/*
400 	 * Turn off xmit.
401 	 */
402 	MII_CLR(PC_MgmtDir);
403 
404 	return(0);
405 }
406 
407 /*
408  * sc_miibus_readreg:	[mii interface function]
409  *
410  *	Read a PHY register on the MII of the TC9021.
411  */
412 static int
413 stge_miibus_readreg(device_t dev, int phy, int reg)
414 {
415 	struct stge_softc *sc;
416 	struct stge_mii_frame frame;
417 	int error;
418 
419 	sc = device_get_softc(dev);
420 
421 	if (reg == STGE_PhyCtrl) {
422 		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
423 		STGE_MII_LOCK(sc);
424 		error = CSR_READ_1(sc, STGE_PhyCtrl);
425 		STGE_MII_UNLOCK(sc);
426 		return (error);
427 	}
428 	bzero(&frame, sizeof(frame));
429 	frame.mii_phyaddr = phy;
430 	frame.mii_regaddr = reg;
431 
432 	STGE_MII_LOCK(sc);
433 	error = stge_mii_readreg(sc, &frame);
434 	STGE_MII_UNLOCK(sc);
435 
436 	if (error != 0) {
437 		/* Don't show errors for PHY probe request */
438 		if (reg != 1)
439 			device_printf(sc->sc_dev, "phy read fail\n");
440 		return (0);
441 	}
442 	return (frame.mii_data);
443 }
444 
445 /*
446  * stge_miibus_writereg:	[mii interface function]
447  *
448  *	Write a PHY register on the MII of the TC9021.
449  */
450 static int
451 stge_miibus_writereg(device_t dev, int phy, int reg, int val)
452 {
453 	struct stge_softc *sc;
454 	struct stge_mii_frame frame;
455 	int error;
456 
457 	sc = device_get_softc(dev);
458 
459 	bzero(&frame, sizeof(frame));
460 	frame.mii_phyaddr = phy;
461 	frame.mii_regaddr = reg;
462 	frame.mii_data = val;
463 
464 	STGE_MII_LOCK(sc);
465 	error = stge_mii_writereg(sc, &frame);
466 	STGE_MII_UNLOCK(sc);
467 
468 	if (error != 0)
469 		device_printf(sc->sc_dev, "phy write fail\n");
470 	return (0);
471 }
472 
473 /*
474  * stge_miibus_statchg:	[mii interface function]
475  *
476  *	Callback from MII layer when media changes.
477  */
478 static void
479 stge_miibus_statchg(device_t dev)
480 {
481 	struct stge_softc *sc;
482 
483 	sc = device_get_softc(dev);
484 	taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
485 }
486 
487 /*
488  * stge_mediastatus:	[ifmedia interface function]
489  *
490  *	Get the current interface media status.
491  */
492 static void
493 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
494 {
495 	struct stge_softc *sc;
496 	struct mii_data *mii;
497 
498 	sc = ifp->if_softc;
499 	mii = device_get_softc(sc->sc_miibus);
500 
501 	mii_pollstat(mii);
502 	ifmr->ifm_status = mii->mii_media_status;
503 	ifmr->ifm_active = mii->mii_media_active;
504 }
505 
506 /*
507  * stge_mediachange:	[ifmedia interface function]
508  *
509  *	Set hardware to newly-selected media.
510  */
511 static int
512 stge_mediachange(struct ifnet *ifp)
513 {
514 	struct stge_softc *sc;
515 	struct mii_data *mii;
516 
517 	sc = ifp->if_softc;
518 	mii = device_get_softc(sc->sc_miibus);
519 	mii_mediachg(mii);
520 
521 	return (0);
522 }
523 
524 static int
525 stge_eeprom_wait(struct stge_softc *sc)
526 {
527 	int i;
528 
529 	for (i = 0; i < STGE_TIMEOUT; i++) {
530 		DELAY(1000);
531 		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
532 			return (0);
533 	}
534 	return (1);
535 }
536 
537 /*
538  * stge_read_eeprom:
539  *
540  *	Read data from the serial EEPROM.
541  */
542 static void
543 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
544 {
545 
546 	if (stge_eeprom_wait(sc))
547 		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
548 
549 	CSR_WRITE_2(sc, STGE_EepromCtrl,
550 	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
551 	if (stge_eeprom_wait(sc))
552 		device_printf(sc->sc_dev, "EEPROM read timed out\n");
553 	*data = CSR_READ_2(sc, STGE_EepromData);
554 }
555 
556 
557 static int
558 stge_probe(device_t dev)
559 {
560 	struct stge_product *sp;
561 	int i;
562 	uint16_t vendor, devid;
563 
564 	vendor = pci_get_vendor(dev);
565 	devid = pci_get_device(dev);
566 	sp = stge_products;
567 	for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]);
568 	    i++, sp++) {
569 		if (vendor == sp->stge_vendorid &&
570 		    devid == sp->stge_deviceid) {
571 			device_set_desc(dev, sp->stge_name);
572 			return (BUS_PROBE_DEFAULT);
573 		}
574 	}
575 
576 	return (ENXIO);
577 }
578 
579 static int
580 stge_attach(device_t dev)
581 {
582 	struct stge_softc *sc;
583 	struct ifnet *ifp;
584 	uint8_t enaddr[ETHER_ADDR_LEN];
585 	int error, i;
586 	uint16_t cmd;
587 	uint32_t val;
588 
589 	error = 0;
590 	sc = device_get_softc(dev);
591 	sc->sc_dev = dev;
592 
593 	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
594 	    MTX_DEF);
595 	mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
596 	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
597 	TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
598 
599 	/*
600 	 * Map the device.
601 	 */
602 	pci_enable_busmaster(dev);
603 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
604 	val = pci_read_config(dev, PCIR_BAR(1), 4);
605 	if ((val & 0x01) != 0)
606 		sc->sc_spec = stge_res_spec_mem;
607 	else {
608 		val = pci_read_config(dev, PCIR_BAR(0), 4);
609 		if ((val & 0x01) == 0) {
610 			device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
611 			error = ENXIO;
612 			goto fail;
613 		}
614 		sc->sc_spec = stge_res_spec_io;
615 	}
616 	error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
617 	if (error != 0) {
618 		device_printf(dev, "couldn't allocate %s resources\n",
619 		    sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
620 		goto fail;
621 	}
622 	sc->sc_rev = pci_get_revid(dev);
623 
624 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
625 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
626 	    "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
627 	    sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
628 
629 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
630 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
631 	    "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
632 	    sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
633 
634 	/* Pull in device tunables. */
635 	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
636 	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
637 	    "rxint_nframe", &sc->sc_rxint_nframe);
638 	if (error == 0) {
639 		if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
640 		    sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
641 			device_printf(dev, "rxint_nframe value out of range; "
642 			    "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
643 			sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
644 		}
645 	}
646 
647 	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
648 	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
649 	    "rxint_dmawait", &sc->sc_rxint_dmawait);
650 	if (error == 0) {
651 		if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
652 		    sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
653 			device_printf(dev, "rxint_dmawait value out of range; "
654 			    "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
655 			sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
656 		}
657 	}
658 
659 	if ((error = stge_dma_alloc(sc) != 0))
660 		goto fail;
661 
662 	/*
663 	 * Determine if we're copper or fiber.  It affects how we
664 	 * reset the card.
665 	 */
666 	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
667 		sc->sc_usefiber = 1;
668 	else
669 		sc->sc_usefiber = 0;
670 
671 	/* Load LED configuration from EEPROM. */
672 	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
673 
674 	/*
675 	 * Reset the chip to a known state.
676 	 */
677 	STGE_LOCK(sc);
678 	stge_reset(sc, STGE_RESET_FULL);
679 	STGE_UNLOCK(sc);
680 
681 	/*
682 	 * Reading the station address from the EEPROM doesn't seem
683 	 * to work, at least on my sample boards.  Instead, since
684 	 * the reset sequence does AutoInit, read it from the station
685 	 * address registers. For Sundance 1023 you can only read it
686 	 * from EEPROM.
687 	 */
688 	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
689 		uint16_t v;
690 
691 		v = CSR_READ_2(sc, STGE_StationAddress0);
692 		enaddr[0] = v & 0xff;
693 		enaddr[1] = v >> 8;
694 		v = CSR_READ_2(sc, STGE_StationAddress1);
695 		enaddr[2] = v & 0xff;
696 		enaddr[3] = v >> 8;
697 		v = CSR_READ_2(sc, STGE_StationAddress2);
698 		enaddr[4] = v & 0xff;
699 		enaddr[5] = v >> 8;
700 		sc->sc_stge1023 = 0;
701 	} else {
702 		uint16_t myaddr[ETHER_ADDR_LEN / 2];
703 		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
704 			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
705 			    &myaddr[i]);
706 			myaddr[i] = le16toh(myaddr[i]);
707 		}
708 		bcopy(myaddr, enaddr, sizeof(enaddr));
709 		sc->sc_stge1023 = 1;
710 	}
711 
712 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
713 	if (ifp == NULL) {
714 		device_printf(sc->sc_dev, "failed to if_alloc()\n");
715 		error = ENXIO;
716 		goto fail;
717 	}
718 
719 	ifp->if_softc = sc;
720 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
721 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
722 	ifp->if_ioctl = stge_ioctl;
723 	ifp->if_start = stge_start;
724 	ifp->if_timer = 0;
725 	ifp->if_watchdog = NULL;
726 	ifp->if_init = stge_init;
727 	ifp->if_mtu = ETHERMTU;
728 	ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1;
729 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
730 	IFQ_SET_READY(&ifp->if_snd);
731 	/* Revision B3 and earlier chips have checksum bug. */
732 	if (sc->sc_rev >= 0x0c) {
733 		ifp->if_hwassist = STGE_CSUM_FEATURES;
734 		ifp->if_capabilities = IFCAP_HWCSUM;
735 	} else {
736 		ifp->if_hwassist = 0;
737 		ifp->if_capabilities = 0;
738 	}
739 	ifp->if_capenable = ifp->if_capabilities;
740 
741 	/*
742 	 * Read some important bits from the PhyCtrl register.
743 	 */
744 	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
745 	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
746 
747 	/* Set up MII bus. */
748 	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange,
749 	    stge_mediastatus)) != 0) {
750 		device_printf(sc->sc_dev, "no PHY found!\n");
751 		goto fail;
752 	}
753 
754 	ether_ifattach(ifp, enaddr);
755 
756 	/* VLAN capability setup */
757 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
758 	if (sc->sc_rev >= 0x0c)
759 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
760 	ifp->if_capenable = ifp->if_capabilities;
761 #ifdef DEVICE_POLLING
762 	ifp->if_capabilities |= IFCAP_POLLING;
763 #endif
764 	/*
765 	 * Tell the upper layer(s) we support long frames.
766 	 * Must appear after the call to ether_ifattach() because
767 	 * ether_ifattach() sets ifi_hdrlen to the default value.
768 	 */
769 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
770 
771 	/*
772 	 * The manual recommends disabling early transmit, so we
773 	 * do.  It's disabled anyway, if using IP checksumming,
774 	 * since the entire packet must be in the FIFO in order
775 	 * for the chip to perform the checksum.
776 	 */
777 	sc->sc_txthresh = 0x0fff;
778 
779 	/*
780 	 * Disable MWI if the PCI layer tells us to.
781 	 */
782 	sc->sc_DMACtrl = 0;
783 	if ((cmd & PCIM_CMD_MWRICEN) == 0)
784 		sc->sc_DMACtrl |= DMAC_MWIDisable;
785 
786 	/*
787 	 * Hookup IRQ
788 	 */
789 	error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
790 	    NULL, stge_intr, sc, &sc->sc_ih);
791 	if (error != 0) {
792 		ether_ifdetach(ifp);
793 		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
794 		sc->sc_ifp = NULL;
795 		goto fail;
796 	}
797 
798 fail:
799 	if (error != 0)
800 		stge_detach(dev);
801 
802 	return (error);
803 }
804 
805 static int
806 stge_detach(device_t dev)
807 {
808 	struct stge_softc *sc;
809 	struct ifnet *ifp;
810 
811 	sc = device_get_softc(dev);
812 
813 	ifp = sc->sc_ifp;
814 #ifdef DEVICE_POLLING
815 	if (ifp && ifp->if_capenable & IFCAP_POLLING)
816 		ether_poll_deregister(ifp);
817 #endif
818 	if (device_is_attached(dev)) {
819 		STGE_LOCK(sc);
820 		/* XXX */
821 		sc->sc_detach = 1;
822 		stge_stop(sc);
823 		STGE_UNLOCK(sc);
824 		callout_drain(&sc->sc_tick_ch);
825 		taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
826 		ether_ifdetach(ifp);
827 	}
828 
829 	if (sc->sc_miibus != NULL) {
830 		device_delete_child(dev, sc->sc_miibus);
831 		sc->sc_miibus = NULL;
832 	}
833 	bus_generic_detach(dev);
834 	stge_dma_free(sc);
835 
836 	if (ifp != NULL) {
837 		if_free(ifp);
838 		sc->sc_ifp = NULL;
839 	}
840 
841 	if (sc->sc_ih) {
842 		bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
843 		sc->sc_ih = NULL;
844 	}
845 	bus_release_resources(dev, sc->sc_spec, sc->sc_res);
846 
847 	mtx_destroy(&sc->sc_mii_mtx);
848 	mtx_destroy(&sc->sc_mtx);
849 
850 	return (0);
851 }
852 
853 struct stge_dmamap_arg {
854 	bus_addr_t	stge_busaddr;
855 };
856 
857 static void
858 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
859 {
860 	struct stge_dmamap_arg *ctx;
861 
862 	if (error != 0)
863 		return;
864 
865 	ctx = (struct stge_dmamap_arg *)arg;
866 	ctx->stge_busaddr = segs[0].ds_addr;
867 }
868 
869 static int
870 stge_dma_alloc(struct stge_softc *sc)
871 {
872 	struct stge_dmamap_arg ctx;
873 	struct stge_txdesc *txd;
874 	struct stge_rxdesc *rxd;
875 	int error, i;
876 
877 	/* create parent tag. */
878 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */
879 		    1, 0,			/* algnmnt, boundary */
880 		    STGE_DMA_MAXADDR,		/* lowaddr */
881 		    BUS_SPACE_MAXADDR,		/* highaddr */
882 		    NULL, NULL,			/* filter, filterarg */
883 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
884 		    0,				/* nsegments */
885 		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
886 		    0,				/* flags */
887 		    NULL, NULL,			/* lockfunc, lockarg */
888 		    &sc->sc_cdata.stge_parent_tag);
889 	if (error != 0) {
890 		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
891 		goto fail;
892 	}
893 	/* create tag for Tx ring. */
894 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
895 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
896 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
897 		    BUS_SPACE_MAXADDR,		/* highaddr */
898 		    NULL, NULL,			/* filter, filterarg */
899 		    STGE_TX_RING_SZ,		/* maxsize */
900 		    1,				/* nsegments */
901 		    STGE_TX_RING_SZ,		/* maxsegsize */
902 		    0,				/* flags */
903 		    NULL, NULL,			/* lockfunc, lockarg */
904 		    &sc->sc_cdata.stge_tx_ring_tag);
905 	if (error != 0) {
906 		device_printf(sc->sc_dev,
907 		    "failed to allocate Tx ring DMA tag\n");
908 		goto fail;
909 	}
910 
911 	/* create tag for Rx ring. */
912 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
913 		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
914 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
915 		    BUS_SPACE_MAXADDR,		/* highaddr */
916 		    NULL, NULL,			/* filter, filterarg */
917 		    STGE_RX_RING_SZ,		/* maxsize */
918 		    1,				/* nsegments */
919 		    STGE_RX_RING_SZ,		/* maxsegsize */
920 		    0,				/* flags */
921 		    NULL, NULL,			/* lockfunc, lockarg */
922 		    &sc->sc_cdata.stge_rx_ring_tag);
923 	if (error != 0) {
924 		device_printf(sc->sc_dev,
925 		    "failed to allocate Rx ring DMA tag\n");
926 		goto fail;
927 	}
928 
929 	/* create tag for Tx buffers. */
930 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
931 		    1, 0,			/* algnmnt, boundary */
932 		    BUS_SPACE_MAXADDR,		/* lowaddr */
933 		    BUS_SPACE_MAXADDR,		/* highaddr */
934 		    NULL, NULL,			/* filter, filterarg */
935 		    MCLBYTES * STGE_MAXTXSEGS,	/* maxsize */
936 		    STGE_MAXTXSEGS,		/* nsegments */
937 		    MCLBYTES,			/* maxsegsize */
938 		    0,				/* flags */
939 		    NULL, NULL,			/* lockfunc, lockarg */
940 		    &sc->sc_cdata.stge_tx_tag);
941 	if (error != 0) {
942 		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
943 		goto fail;
944 	}
945 
946 	/* create tag for Rx buffers. */
947 	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
948 		    1, 0,			/* algnmnt, boundary */
949 		    BUS_SPACE_MAXADDR,		/* lowaddr */
950 		    BUS_SPACE_MAXADDR,		/* highaddr */
951 		    NULL, NULL,			/* filter, filterarg */
952 		    MCLBYTES,			/* maxsize */
953 		    1,				/* nsegments */
954 		    MCLBYTES,			/* maxsegsize */
955 		    0,				/* flags */
956 		    NULL, NULL,			/* lockfunc, lockarg */
957 		    &sc->sc_cdata.stge_rx_tag);
958 	if (error != 0) {
959 		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
960 		goto fail;
961 	}
962 
963 	/* allocate DMA'able memory and load the DMA map for Tx ring. */
964 	error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
965 	    (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
966 	    &sc->sc_cdata.stge_tx_ring_map);
967 	if (error != 0) {
968 		device_printf(sc->sc_dev,
969 		    "failed to allocate DMA'able memory for Tx ring\n");
970 		goto fail;
971 	}
972 
973 	ctx.stge_busaddr = 0;
974 	error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
975 	    sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
976 	    STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
977 	if (error != 0 || ctx.stge_busaddr == 0) {
978 		device_printf(sc->sc_dev,
979 		    "failed to load DMA'able memory for Tx ring\n");
980 		goto fail;
981 	}
982 	sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
983 
984 	/* allocate DMA'able memory and load the DMA map for Rx ring. */
985 	error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
986 	    (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
987 	    &sc->sc_cdata.stge_rx_ring_map);
988 	if (error != 0) {
989 		device_printf(sc->sc_dev,
990 		    "failed to allocate DMA'able memory for Rx ring\n");
991 		goto fail;
992 	}
993 
994 	ctx.stge_busaddr = 0;
995 	error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
996 	    sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
997 	    STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
998 	if (error != 0 || ctx.stge_busaddr == 0) {
999 		device_printf(sc->sc_dev,
1000 		    "failed to load DMA'able memory for Rx ring\n");
1001 		goto fail;
1002 	}
1003 	sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
1004 
1005 	/* create DMA maps for Tx buffers. */
1006 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
1007 		txd = &sc->sc_cdata.stge_txdesc[i];
1008 		txd->tx_m = NULL;
1009 		txd->tx_dmamap = 0;
1010 		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
1011 		    &txd->tx_dmamap);
1012 		if (error != 0) {
1013 			device_printf(sc->sc_dev,
1014 			    "failed to create Tx dmamap\n");
1015 			goto fail;
1016 		}
1017 	}
1018 	/* create DMA maps for Rx buffers. */
1019 	if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1020 	    &sc->sc_cdata.stge_rx_sparemap)) != 0) {
1021 		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
1022 		goto fail;
1023 	}
1024 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
1025 		rxd = &sc->sc_cdata.stge_rxdesc[i];
1026 		rxd->rx_m = NULL;
1027 		rxd->rx_dmamap = 0;
1028 		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1029 		    &rxd->rx_dmamap);
1030 		if (error != 0) {
1031 			device_printf(sc->sc_dev,
1032 			    "failed to create Rx dmamap\n");
1033 			goto fail;
1034 		}
1035 	}
1036 
1037 fail:
1038 	return (error);
1039 }
1040 
1041 static void
1042 stge_dma_free(struct stge_softc *sc)
1043 {
1044 	struct stge_txdesc *txd;
1045 	struct stge_rxdesc *rxd;
1046 	int i;
1047 
1048 	/* Tx ring */
1049 	if (sc->sc_cdata.stge_tx_ring_tag) {
1050 		if (sc->sc_cdata.stge_tx_ring_map)
1051 			bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
1052 			    sc->sc_cdata.stge_tx_ring_map);
1053 		if (sc->sc_cdata.stge_tx_ring_map &&
1054 		    sc->sc_rdata.stge_tx_ring)
1055 			bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
1056 			    sc->sc_rdata.stge_tx_ring,
1057 			    sc->sc_cdata.stge_tx_ring_map);
1058 		sc->sc_rdata.stge_tx_ring = NULL;
1059 		sc->sc_cdata.stge_tx_ring_map = 0;
1060 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
1061 		sc->sc_cdata.stge_tx_ring_tag = NULL;
1062 	}
1063 	/* Rx ring */
1064 	if (sc->sc_cdata.stge_rx_ring_tag) {
1065 		if (sc->sc_cdata.stge_rx_ring_map)
1066 			bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
1067 			    sc->sc_cdata.stge_rx_ring_map);
1068 		if (sc->sc_cdata.stge_rx_ring_map &&
1069 		    sc->sc_rdata.stge_rx_ring)
1070 			bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
1071 			    sc->sc_rdata.stge_rx_ring,
1072 			    sc->sc_cdata.stge_rx_ring_map);
1073 		sc->sc_rdata.stge_rx_ring = NULL;
1074 		sc->sc_cdata.stge_rx_ring_map = 0;
1075 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
1076 		sc->sc_cdata.stge_rx_ring_tag = NULL;
1077 	}
1078 	/* Tx buffers */
1079 	if (sc->sc_cdata.stge_tx_tag) {
1080 		for (i = 0; i < STGE_TX_RING_CNT; i++) {
1081 			txd = &sc->sc_cdata.stge_txdesc[i];
1082 			if (txd->tx_dmamap) {
1083 				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
1084 				    txd->tx_dmamap);
1085 				txd->tx_dmamap = 0;
1086 			}
1087 		}
1088 		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
1089 		sc->sc_cdata.stge_tx_tag = NULL;
1090 	}
1091 	/* Rx buffers */
1092 	if (sc->sc_cdata.stge_rx_tag) {
1093 		for (i = 0; i < STGE_RX_RING_CNT; i++) {
1094 			rxd = &sc->sc_cdata.stge_rxdesc[i];
1095 			if (rxd->rx_dmamap) {
1096 				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1097 				    rxd->rx_dmamap);
1098 				rxd->rx_dmamap = 0;
1099 			}
1100 		}
1101 		if (sc->sc_cdata.stge_rx_sparemap) {
1102 			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1103 			    sc->sc_cdata.stge_rx_sparemap);
1104 			sc->sc_cdata.stge_rx_sparemap = 0;
1105 		}
1106 		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
1107 		sc->sc_cdata.stge_rx_tag = NULL;
1108 	}
1109 
1110 	if (sc->sc_cdata.stge_parent_tag) {
1111 		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
1112 		sc->sc_cdata.stge_parent_tag = NULL;
1113 	}
1114 }
1115 
1116 /*
1117  * stge_shutdown:
1118  *
1119  *	Make sure the interface is stopped at reboot time.
1120  */
1121 static void
1122 stge_shutdown(device_t dev)
1123 {
1124 	struct stge_softc *sc;
1125 
1126 	sc = device_get_softc(dev);
1127 
1128 	STGE_LOCK(sc);
1129 	stge_stop(sc);
1130 	STGE_UNLOCK(sc);
1131 }
1132 
1133 static int
1134 stge_suspend(device_t dev)
1135 {
1136 	struct stge_softc *sc;
1137 
1138 	sc = device_get_softc(dev);
1139 
1140 	STGE_LOCK(sc);
1141 	stge_stop(sc);
1142 	sc->sc_suspended = 1;
1143 	STGE_UNLOCK(sc);
1144 
1145 	return (0);
1146 }
1147 
1148 static int
1149 stge_resume(device_t dev)
1150 {
1151 	struct stge_softc *sc;
1152 	struct ifnet *ifp;
1153 
1154 	sc = device_get_softc(dev);
1155 
1156 	STGE_LOCK(sc);
1157 	ifp = sc->sc_ifp;
1158 	if (ifp->if_flags & IFF_UP)
1159 		stge_init_locked(sc);
1160 
1161 	sc->sc_suspended = 0;
1162 	STGE_UNLOCK(sc);
1163 
1164 	return (0);
1165 }
1166 
1167 static void
1168 stge_dma_wait(struct stge_softc *sc)
1169 {
1170 	int i;
1171 
1172 	for (i = 0; i < STGE_TIMEOUT; i++) {
1173 		DELAY(2);
1174 		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1175 			break;
1176 	}
1177 
1178 	if (i == STGE_TIMEOUT)
1179 		device_printf(sc->sc_dev, "DMA wait timed out\n");
1180 }
1181 
1182 static int
1183 stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1184 {
1185 	struct stge_txdesc *txd;
1186 	struct stge_tfd *tfd;
1187 	struct mbuf *m;
1188 	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1189 	int error, i, nsegs, si;
1190 	uint64_t csum_flags, tfc;
1191 
1192 	STGE_LOCK_ASSERT(sc);
1193 
1194 	if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1195 		return (ENOBUFS);
1196 
1197 	error =  bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1198 	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1199 	if (error == EFBIG) {
1200 		m = m_defrag(*m_head, M_DONTWAIT);
1201 		if (m == NULL) {
1202 			m_freem(*m_head);
1203 			*m_head = NULL;
1204 			return (ENOMEM);
1205 		}
1206 		*m_head = m;
1207 		error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1208 		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1209 		if (error != 0) {
1210 			m_freem(*m_head);
1211 			*m_head = NULL;
1212 			return (error);
1213 		}
1214 	} else if (error != 0)
1215 		return (error);
1216 	if (nsegs == 0) {
1217 		m_freem(*m_head);
1218 		*m_head = NULL;
1219 		return (EIO);
1220 	}
1221 
1222 	m = *m_head;
1223 	csum_flags = 0;
1224 	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1225 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1226 			csum_flags |= TFD_IPChecksumEnable;
1227 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1228 			csum_flags |= TFD_TCPChecksumEnable;
1229 		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1230 			csum_flags |= TFD_UDPChecksumEnable;
1231 	}
1232 
1233 	si = sc->sc_cdata.stge_tx_prod;
1234 	tfd = &sc->sc_rdata.stge_tx_ring[si];
1235 	for (i = 0; i < nsegs; i++)
1236 		tfd->tfd_frags[i].frag_word0 =
1237 		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1238 		    FRAG_LEN(txsegs[i].ds_len));
1239 	sc->sc_cdata.stge_tx_cnt++;
1240 
1241 	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1242 	    TFD_FragCount(nsegs) | csum_flags;
1243 	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1244 		tfc |= TFD_TxDMAIndicate;
1245 
1246 	/* Update producer index. */
1247 	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1248 
1249 	/* Check if we have a VLAN tag to insert. */
1250 	if (m->m_flags & M_VLANTAG)
1251 		tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag));
1252 	tfd->tfd_control = htole64(tfc);
1253 
1254 	/* Update Tx Queue. */
1255 	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1256 	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1257 	txd->tx_m = m;
1258 
1259 	/* Sync descriptors. */
1260 	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1261 	    BUS_DMASYNC_PREWRITE);
1262 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1263 	    sc->sc_cdata.stge_tx_ring_map,
1264 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1265 
1266 	return (0);
1267 }
1268 
1269 /*
1270  * stge_start:		[ifnet interface function]
1271  *
1272  *	Start packet transmission on the interface.
1273  */
1274 static void
1275 stge_start(struct ifnet *ifp)
1276 {
1277 	struct stge_softc *sc;
1278 
1279 	sc = ifp->if_softc;
1280 	STGE_LOCK(sc);
1281 	stge_start_locked(ifp);
1282 	STGE_UNLOCK(sc);
1283 }
1284 
1285 static void
1286 stge_start_locked(struct ifnet *ifp)
1287 {
1288         struct stge_softc *sc;
1289         struct mbuf *m_head;
1290 	int enq;
1291 
1292 	sc = ifp->if_softc;
1293 
1294 	STGE_LOCK_ASSERT(sc);
1295 
1296 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1297 	    IFF_DRV_RUNNING || sc->sc_link == 0)
1298 		return;
1299 
1300 	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1301 		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1302 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1303 			break;
1304 		}
1305 
1306 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1307 		if (m_head == NULL)
1308 			break;
1309 		/*
1310 		 * Pack the data into the transmit ring. If we
1311 		 * don't have room, set the OACTIVE flag and wait
1312 		 * for the NIC to drain the ring.
1313 		 */
1314 		if (stge_encap(sc, &m_head)) {
1315 			if (m_head == NULL)
1316 				break;
1317 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1318 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1319 			break;
1320 		}
1321 
1322 		enq++;
1323 		/*
1324 		 * If there's a BPF listener, bounce a copy of this frame
1325 		 * to him.
1326 		 */
1327 		ETHER_BPF_MTAP(ifp, m_head);
1328 	}
1329 
1330 	if (enq > 0) {
1331 		/* Transmit */
1332 		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1333 
1334 		/* Set a timeout in case the chip goes out to lunch. */
1335 		sc->sc_watchdog_timer = 5;
1336 	}
1337 }
1338 
1339 /*
1340  * stge_watchdog:
1341  *
1342  *	Watchdog timer handler.
1343  */
1344 static void
1345 stge_watchdog(struct stge_softc *sc)
1346 {
1347 	struct ifnet *ifp;
1348 
1349 	STGE_LOCK_ASSERT(sc);
1350 
1351 	if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
1352 		return;
1353 
1354 	ifp = sc->sc_ifp;
1355 	if_printf(sc->sc_ifp, "device timeout\n");
1356 	ifp->if_oerrors++;
1357 	stge_init_locked(sc);
1358 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1359 		stge_start_locked(ifp);
1360 }
1361 
1362 /*
1363  * stge_ioctl:		[ifnet interface function]
1364  *
1365  *	Handle control requests from the operator.
1366  */
1367 static int
1368 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1369 {
1370 	struct stge_softc *sc;
1371 	struct ifreq *ifr;
1372 	struct mii_data *mii;
1373 	int error, mask;
1374 
1375 	sc = ifp->if_softc;
1376 	ifr = (struct ifreq *)data;
1377 	error = 0;
1378 	switch (cmd) {
1379 	case SIOCSIFMTU:
1380 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1381 			error = EINVAL;
1382 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1383 			ifp->if_mtu = ifr->ifr_mtu;
1384 			STGE_LOCK(sc);
1385 			stge_init_locked(sc);
1386 			STGE_UNLOCK(sc);
1387 		}
1388 		break;
1389 	case SIOCSIFFLAGS:
1390 		STGE_LOCK(sc);
1391 		if ((ifp->if_flags & IFF_UP) != 0) {
1392 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1393 				if (((ifp->if_flags ^ sc->sc_if_flags)
1394 				    & IFF_PROMISC) != 0)
1395 					stge_set_filter(sc);
1396 			} else {
1397 				if (sc->sc_detach == 0)
1398 					stge_init_locked(sc);
1399 			}
1400 		} else {
1401 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1402 				stge_stop(sc);
1403 		}
1404 		sc->sc_if_flags = ifp->if_flags;
1405 		STGE_UNLOCK(sc);
1406 		break;
1407 	case SIOCADDMULTI:
1408 	case SIOCDELMULTI:
1409 		STGE_LOCK(sc);
1410 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1411 			stge_set_multi(sc);
1412 		STGE_UNLOCK(sc);
1413 		break;
1414 	case SIOCSIFMEDIA:
1415 	case SIOCGIFMEDIA:
1416 		mii = device_get_softc(sc->sc_miibus);
1417 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1418 		break;
1419 	case SIOCSIFCAP:
1420 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1421 #ifdef DEVICE_POLLING
1422 		if ((mask & IFCAP_POLLING) != 0) {
1423 			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1424 				error = ether_poll_register(stge_poll, ifp);
1425 				if (error != 0)
1426 					break;
1427 				STGE_LOCK(sc);
1428 				CSR_WRITE_2(sc, STGE_IntEnable, 0);
1429 				ifp->if_capenable |= IFCAP_POLLING;
1430 				STGE_UNLOCK(sc);
1431 			} else {
1432 				error = ether_poll_deregister(ifp);
1433 				if (error != 0)
1434 					break;
1435 				STGE_LOCK(sc);
1436 				CSR_WRITE_2(sc, STGE_IntEnable,
1437 				    sc->sc_IntEnable);
1438 				ifp->if_capenable &= ~IFCAP_POLLING;
1439 				STGE_UNLOCK(sc);
1440 			}
1441 		}
1442 #endif
1443 		if ((mask & IFCAP_HWCSUM) != 0) {
1444 			ifp->if_capenable ^= IFCAP_HWCSUM;
1445 			if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1446 			    (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1447 				ifp->if_hwassist = STGE_CSUM_FEATURES;
1448 			else
1449 				ifp->if_hwassist = 0;
1450 		}
1451 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1452 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1453 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1454 				STGE_LOCK(sc);
1455 				stge_vlan_setup(sc);
1456 				STGE_UNLOCK(sc);
1457 			}
1458 		}
1459 		VLAN_CAPABILITIES(ifp);
1460 		break;
1461 	default:
1462 		error = ether_ioctl(ifp, cmd, data);
1463 		break;
1464 	}
1465 
1466 	return (error);
1467 }
1468 
1469 static void
1470 stge_link_task(void *arg, int pending)
1471 {
1472 	struct stge_softc *sc;
1473 	struct mii_data *mii;
1474 	uint32_t v, ac;
1475 	int i;
1476 
1477 	sc = (struct stge_softc *)arg;
1478 	STGE_LOCK(sc);
1479 
1480 	mii = device_get_softc(sc->sc_miibus);
1481 	if (mii->mii_media_status & IFM_ACTIVE) {
1482 		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1483 			sc->sc_link = 1;
1484 	} else
1485 		sc->sc_link = 0;
1486 
1487 	sc->sc_MACCtrl = 0;
1488 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
1489 		sc->sc_MACCtrl |= MC_DuplexSelect;
1490 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
1491 		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
1492 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
1493 		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
1494 	/*
1495 	 * Update STGE_MACCtrl register depending on link status.
1496 	 * (duplex, flow control etc)
1497 	 */
1498 	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1499 	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1500 	v |= sc->sc_MACCtrl;
1501 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1502 	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1503 		/* Duplex setting changed, reset Tx/Rx functions. */
1504 		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1505 		ac |= AC_TxReset | AC_RxReset;
1506 		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1507 		for (i = 0; i < STGE_TIMEOUT; i++) {
1508 			DELAY(100);
1509 			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1510 				break;
1511 		}
1512 		if (i == STGE_TIMEOUT)
1513 			device_printf(sc->sc_dev, "reset failed to complete\n");
1514 	}
1515 	STGE_UNLOCK(sc);
1516 }
1517 
1518 static __inline int
1519 stge_tx_error(struct stge_softc *sc)
1520 {
1521 	uint32_t txstat;
1522 	int error;
1523 
1524 	for (error = 0;;) {
1525 		txstat = CSR_READ_4(sc, STGE_TxStatus);
1526 		if ((txstat & TS_TxComplete) == 0)
1527 			break;
1528 		/* Tx underrun */
1529 		if ((txstat & TS_TxUnderrun) != 0) {
1530 			/*
1531 			 * XXX
1532 			 * There should be a more better way to recover
1533 			 * from Tx underrun instead of a full reset.
1534 			 */
1535 			if (sc->sc_nerr++ < STGE_MAXERR)
1536 				device_printf(sc->sc_dev, "Tx underrun, "
1537 				    "resetting...\n");
1538 			if (sc->sc_nerr == STGE_MAXERR)
1539 				device_printf(sc->sc_dev, "too many errors; "
1540 				    "not reporting any more\n");
1541 			error = -1;
1542 			break;
1543 		}
1544 		/* Maximum/Late collisions, Re-enable Tx MAC. */
1545 		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1546 			CSR_WRITE_4(sc, STGE_MACCtrl,
1547 			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1548 			    MC_TxEnable);
1549 	}
1550 
1551 	return (error);
1552 }
1553 
1554 /*
1555  * stge_intr:
1556  *
1557  *	Interrupt service routine.
1558  */
1559 static void
1560 stge_intr(void *arg)
1561 {
1562 	struct stge_softc *sc;
1563 	struct ifnet *ifp;
1564 	int reinit;
1565 	uint16_t status;
1566 
1567 	sc = (struct stge_softc *)arg;
1568 	ifp = sc->sc_ifp;
1569 
1570 	STGE_LOCK(sc);
1571 
1572 #ifdef DEVICE_POLLING
1573 	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1574 		goto done_locked;
1575 #endif
1576 	status = CSR_READ_2(sc, STGE_IntStatus);
1577 	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1578 		goto done_locked;
1579 
1580 	/* Disable interrupts. */
1581 	for (reinit = 0;;) {
1582 		status = CSR_READ_2(sc, STGE_IntStatusAck);
1583 		status &= sc->sc_IntEnable;
1584 		if (status == 0)
1585 			break;
1586 		/* Host interface errors. */
1587 		if ((status & IS_HostError) != 0) {
1588 			device_printf(sc->sc_dev,
1589 			    "Host interface error, resetting...\n");
1590 			reinit = 1;
1591 			goto force_init;
1592 		}
1593 
1594 		/* Receive interrupts. */
1595 		if ((status & IS_RxDMAComplete) != 0) {
1596 			stge_rxeof(sc);
1597 			if ((status & IS_RFDListEnd) != 0)
1598 				CSR_WRITE_4(sc, STGE_DMACtrl,
1599 				    DMAC_RxDMAPollNow);
1600 		}
1601 
1602 		/* Transmit interrupts. */
1603 		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1604 			stge_txeof(sc);
1605 
1606 		/* Transmission errors.*/
1607 		if ((status & IS_TxComplete) != 0) {
1608 			if ((reinit = stge_tx_error(sc)) != 0)
1609 				break;
1610 		}
1611 	}
1612 
1613 force_init:
1614 	if (reinit != 0)
1615 		stge_init_locked(sc);
1616 
1617 	/* Re-enable interrupts. */
1618 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1619 
1620 	/* Try to get more packets going. */
1621 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1622 		stge_start_locked(ifp);
1623 
1624 done_locked:
1625 	STGE_UNLOCK(sc);
1626 }
1627 
1628 /*
1629  * stge_txeof:
1630  *
1631  *	Helper; handle transmit interrupts.
1632  */
1633 static void
1634 stge_txeof(struct stge_softc *sc)
1635 {
1636 	struct ifnet *ifp;
1637 	struct stge_txdesc *txd;
1638 	uint64_t control;
1639 	int cons;
1640 
1641 	STGE_LOCK_ASSERT(sc);
1642 
1643 	ifp = sc->sc_ifp;
1644 
1645 	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1646 	if (txd == NULL)
1647 		return;
1648 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1649 	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1650 
1651 	/*
1652 	 * Go through our Tx list and free mbufs for those
1653 	 * frames which have been transmitted.
1654 	 */
1655 	for (cons = sc->sc_cdata.stge_tx_cons;;
1656 	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1657 		if (sc->sc_cdata.stge_tx_cnt <= 0)
1658 			break;
1659 		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1660 		if ((control & TFD_TFDDone) == 0)
1661 			break;
1662 		sc->sc_cdata.stge_tx_cnt--;
1663 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1664 
1665 		bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1666 		    BUS_DMASYNC_POSTWRITE);
1667 		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1668 
1669 		/* Output counter is updated with statistics register */
1670 		m_freem(txd->tx_m);
1671 		txd->tx_m = NULL;
1672 		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1673 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1674 		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1675 	}
1676 	sc->sc_cdata.stge_tx_cons = cons;
1677 	if (sc->sc_cdata.stge_tx_cnt == 0)
1678 		sc->sc_watchdog_timer = 0;
1679 
1680         bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1681 	    sc->sc_cdata.stge_tx_ring_map,
1682 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1683 }
1684 
1685 static __inline void
1686 stge_discard_rxbuf(struct stge_softc *sc, int idx)
1687 {
1688 	struct stge_rfd *rfd;
1689 
1690 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1691 	rfd->rfd_status = 0;
1692 }
1693 
1694 #ifndef __NO_STRICT_ALIGNMENT
1695 /*
1696  * It seems that TC9021's DMA engine has alignment restrictions in
1697  * DMA scatter operations. The first DMA segment has no address
1698  * alignment restrictins but the rest should be aligned on 4(?) bytes
1699  * boundary. Otherwise it would corrupt random memory. Since we don't
1700  * know which one is used for the first segment in advance we simply
1701  * don't align at all.
1702  * To avoid copying over an entire frame to align, we allocate a new
1703  * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1704  * prepended into the existing mbuf chain.
1705  */
1706 static __inline struct mbuf *
1707 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1708 {
1709 	struct mbuf *n;
1710 
1711 	n = NULL;
1712 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1713 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1714 		m->m_data += ETHER_HDR_LEN;
1715 		n = m;
1716 	} else {
1717 		MGETHDR(n, M_DONTWAIT, MT_DATA);
1718 		if (n != NULL) {
1719 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1720 			m->m_data += ETHER_HDR_LEN;
1721 			m->m_len -= ETHER_HDR_LEN;
1722 			n->m_len = ETHER_HDR_LEN;
1723 			M_MOVE_PKTHDR(n, m);
1724 			n->m_next = m;
1725 		} else
1726 			m_freem(m);
1727 	}
1728 
1729 	return (n);
1730 }
1731 #endif
1732 
1733 /*
1734  * stge_rxeof:
1735  *
1736  *	Helper; handle receive interrupts.
1737  */
1738 static void
1739 stge_rxeof(struct stge_softc *sc)
1740 {
1741 	struct ifnet *ifp;
1742 	struct stge_rxdesc *rxd;
1743 	struct mbuf *mp, *m;
1744 	uint64_t status64;
1745 	uint32_t status;
1746 	int cons, prog;
1747 
1748 	STGE_LOCK_ASSERT(sc);
1749 
1750 	ifp = sc->sc_ifp;
1751 
1752 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1753 	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1754 
1755 	prog = 0;
1756 	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1757 	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1758 		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1759 		status = RFD_RxStatus(status64);
1760 		if ((status & RFD_RFDDone) == 0)
1761 			break;
1762 #ifdef DEVICE_POLLING
1763 		if (ifp->if_capenable & IFCAP_POLLING) {
1764 			if (sc->sc_cdata.stge_rxcycles <= 0)
1765 				break;
1766 			sc->sc_cdata.stge_rxcycles--;
1767 		}
1768 #endif
1769 		prog++;
1770 		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1771 		mp = rxd->rx_m;
1772 
1773 		/*
1774 		 * If the packet had an error, drop it.  Note we count
1775 		 * the error later in the periodic stats update.
1776 		 */
1777 		if ((status & RFD_FrameEnd) != 0 && (status &
1778 		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1779 		    RFD_RxAlignmentError | RFD_RxFCSError |
1780 		    RFD_RxLengthError)) != 0) {
1781 			stge_discard_rxbuf(sc, cons);
1782 			if (sc->sc_cdata.stge_rxhead != NULL) {
1783 				m_freem(sc->sc_cdata.stge_rxhead);
1784 				STGE_RXCHAIN_RESET(sc);
1785 			}
1786 			continue;
1787 		}
1788 		/*
1789 		 * Add a new receive buffer to the ring.
1790 		 */
1791 		if (stge_newbuf(sc, cons) != 0) {
1792 			ifp->if_iqdrops++;
1793 			stge_discard_rxbuf(sc, cons);
1794 			if (sc->sc_cdata.stge_rxhead != NULL) {
1795 				m_freem(sc->sc_cdata.stge_rxhead);
1796 				STGE_RXCHAIN_RESET(sc);
1797 			}
1798 			continue;
1799 		}
1800 
1801 		if ((status & RFD_FrameEnd) != 0)
1802 			mp->m_len = RFD_RxDMAFrameLen(status) -
1803 			    sc->sc_cdata.stge_rxlen;
1804 		sc->sc_cdata.stge_rxlen += mp->m_len;
1805 
1806 		/* Chain mbufs. */
1807 		if (sc->sc_cdata.stge_rxhead == NULL) {
1808 			sc->sc_cdata.stge_rxhead = mp;
1809 			sc->sc_cdata.stge_rxtail = mp;
1810 		} else {
1811 			mp->m_flags &= ~M_PKTHDR;
1812 			sc->sc_cdata.stge_rxtail->m_next = mp;
1813 			sc->sc_cdata.stge_rxtail = mp;
1814 		}
1815 
1816 		if ((status & RFD_FrameEnd) != 0) {
1817 			m = sc->sc_cdata.stge_rxhead;
1818 			m->m_pkthdr.rcvif = ifp;
1819 			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1820 
1821 			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1822 				m_freem(m);
1823 				STGE_RXCHAIN_RESET(sc);
1824 				continue;
1825 			}
1826 			/*
1827 			 * Set the incoming checksum information for
1828 			 * the packet.
1829 			 */
1830 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1831 				if ((status & RFD_IPDetected) != 0) {
1832 					m->m_pkthdr.csum_flags |=
1833 						CSUM_IP_CHECKED;
1834 					if ((status & RFD_IPError) == 0)
1835 						m->m_pkthdr.csum_flags |=
1836 						    CSUM_IP_VALID;
1837 				}
1838 				if (((status & RFD_TCPDetected) != 0 &&
1839 				    (status & RFD_TCPError) == 0) ||
1840 				    ((status & RFD_UDPDetected) != 0 &&
1841 				    (status & RFD_UDPError) == 0)) {
1842 					m->m_pkthdr.csum_flags |=
1843 					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1844 					m->m_pkthdr.csum_data = 0xffff;
1845 				}
1846 			}
1847 
1848 #ifndef __NO_STRICT_ALIGNMENT
1849 			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1850 				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1851 					STGE_RXCHAIN_RESET(sc);
1852 					continue;
1853 				}
1854 			}
1855 #endif
1856 			/* Check for VLAN tagged packets. */
1857 			if ((status & RFD_VLANDetected) != 0 &&
1858 			    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1859 				m->m_pkthdr.ether_vtag = RFD_TCI(status64);
1860 				m->m_flags |= M_VLANTAG;
1861 			}
1862 
1863 			STGE_UNLOCK(sc);
1864 			/* Pass it on. */
1865 			(*ifp->if_input)(ifp, m);
1866 			STGE_LOCK(sc);
1867 
1868 			STGE_RXCHAIN_RESET(sc);
1869 		}
1870 	}
1871 
1872 	if (prog > 0) {
1873 		/* Update the consumer index. */
1874 		sc->sc_cdata.stge_rx_cons = cons;
1875 		bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1876 		    sc->sc_cdata.stge_rx_ring_map,
1877 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1878 	}
1879 }
1880 
1881 #ifdef DEVICE_POLLING
1882 static void
1883 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1884 {
1885 	struct stge_softc *sc;
1886 	uint16_t status;
1887 
1888 	sc = ifp->if_softc;
1889 	STGE_LOCK(sc);
1890 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1891 		STGE_UNLOCK(sc);
1892 		return;
1893 	}
1894 
1895 	sc->sc_cdata.stge_rxcycles = count;
1896 	stge_rxeof(sc);
1897 	stge_txeof(sc);
1898 
1899 	if (cmd == POLL_AND_CHECK_STATUS) {
1900 		status = CSR_READ_2(sc, STGE_IntStatus);
1901 		status &= sc->sc_IntEnable;
1902 		if (status != 0) {
1903 			if ((status & IS_HostError) != 0) {
1904 				device_printf(sc->sc_dev,
1905 				    "Host interface error, resetting...\n");
1906 				stge_init_locked(sc);
1907 			}
1908 			if ((status & IS_TxComplete) != 0) {
1909 				if (stge_tx_error(sc) != 0)
1910 					stge_init_locked(sc);
1911 			}
1912 		}
1913 
1914 	}
1915 
1916 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1917 		stge_start_locked(ifp);
1918 
1919 	STGE_UNLOCK(sc);
1920 }
1921 #endif	/* DEVICE_POLLING */
1922 
1923 /*
1924  * stge_tick:
1925  *
1926  *	One second timer, used to tick the MII.
1927  */
1928 static void
1929 stge_tick(void *arg)
1930 {
1931 	struct stge_softc *sc;
1932 	struct mii_data *mii;
1933 
1934 	sc = (struct stge_softc *)arg;
1935 
1936 	STGE_LOCK_ASSERT(sc);
1937 
1938 	mii = device_get_softc(sc->sc_miibus);
1939 	mii_tick(mii);
1940 
1941 	/* Update statistics counters. */
1942 	stge_stats_update(sc);
1943 
1944 	/*
1945 	 * Relcaim any pending Tx descriptors to release mbufs in a
1946 	 * timely manner as we don't generate Tx completion interrupts
1947 	 * for every frame. This limits the delay to a maximum of one
1948 	 * second.
1949 	 */
1950 	if (sc->sc_cdata.stge_tx_cnt != 0)
1951 		stge_txeof(sc);
1952 
1953 	stge_watchdog(sc);
1954 
1955 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1956 }
1957 
1958 /*
1959  * stge_stats_update:
1960  *
1961  *	Read the TC9021 statistics counters.
1962  */
1963 static void
1964 stge_stats_update(struct stge_softc *sc)
1965 {
1966 	struct ifnet *ifp;
1967 
1968 	STGE_LOCK_ASSERT(sc);
1969 
1970 	ifp = sc->sc_ifp;
1971 
1972 	CSR_READ_4(sc,STGE_OctetRcvOk);
1973 
1974 	ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1975 
1976 	ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1977 
1978 	CSR_READ_4(sc, STGE_OctetXmtdOk);
1979 
1980 	ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1981 
1982 	ifp->if_collisions +=
1983 	    CSR_READ_4(sc, STGE_LateCollisions) +
1984 	    CSR_READ_4(sc, STGE_MultiColFrames) +
1985 	    CSR_READ_4(sc, STGE_SingleColFrames);
1986 
1987 	ifp->if_oerrors +=
1988 	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1989 	    CSR_READ_2(sc, STGE_FramesWEXDeferal);
1990 }
1991 
1992 /*
1993  * stge_reset:
1994  *
1995  *	Perform a soft reset on the TC9021.
1996  */
1997 static void
1998 stge_reset(struct stge_softc *sc, uint32_t how)
1999 {
2000 	uint32_t ac;
2001 	uint8_t v;
2002 	int i, dv;
2003 
2004 	STGE_LOCK_ASSERT(sc);
2005 
2006 	dv = 5000;
2007 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
2008 	switch (how) {
2009 	case STGE_RESET_TX:
2010 		ac |= AC_TxReset | AC_FIFO;
2011 		dv = 100;
2012 		break;
2013 	case STGE_RESET_RX:
2014 		ac |= AC_RxReset | AC_FIFO;
2015 		dv = 100;
2016 		break;
2017 	case STGE_RESET_FULL:
2018 	default:
2019 		/*
2020 		 * Only assert RstOut if we're fiber.  We need GMII clocks
2021 		 * to be present in order for the reset to complete on fiber
2022 		 * cards.
2023 		 */
2024 		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
2025 		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
2026 		    (sc->sc_usefiber ? AC_RstOut : 0);
2027 		break;
2028 	}
2029 
2030 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
2031 
2032 	/* Account for reset problem at 10Mbps. */
2033 	DELAY(dv);
2034 
2035 	for (i = 0; i < STGE_TIMEOUT; i++) {
2036 		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
2037 			break;
2038 		DELAY(dv);
2039 	}
2040 
2041 	if (i == STGE_TIMEOUT)
2042 		device_printf(sc->sc_dev, "reset failed to complete\n");
2043 
2044 	/* Set LED, from Linux IPG driver. */
2045 	ac = CSR_READ_4(sc, STGE_AsicCtrl);
2046 	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
2047 	if ((sc->sc_led & 0x01) != 0)
2048 		ac |= AC_LEDMode;
2049 	if ((sc->sc_led & 0x03) != 0)
2050 		ac |= AC_LEDModeBit1;
2051 	if ((sc->sc_led & 0x08) != 0)
2052 		ac |= AC_LEDSpeed;
2053 	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
2054 
2055 	/* Set PHY, from Linux IPG driver */
2056 	v = CSR_READ_1(sc, STGE_PhySet);
2057 	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
2058 	v |= ((sc->sc_led & 0x70) >> 4);
2059 	CSR_WRITE_1(sc, STGE_PhySet, v);
2060 }
2061 
2062 /*
2063  * stge_init:		[ ifnet interface function ]
2064  *
2065  *	Initialize the interface.
2066  */
2067 static void
2068 stge_init(void *xsc)
2069 {
2070 	struct stge_softc *sc;
2071 
2072 	sc = (struct stge_softc *)xsc;
2073 	STGE_LOCK(sc);
2074 	stge_init_locked(sc);
2075 	STGE_UNLOCK(sc);
2076 }
2077 
2078 static void
2079 stge_init_locked(struct stge_softc *sc)
2080 {
2081 	struct ifnet *ifp;
2082 	struct mii_data *mii;
2083 	uint16_t eaddr[3];
2084 	uint32_t v;
2085 	int error;
2086 
2087 	STGE_LOCK_ASSERT(sc);
2088 
2089 	ifp = sc->sc_ifp;
2090 	mii = device_get_softc(sc->sc_miibus);
2091 
2092 	/*
2093 	 * Cancel any pending I/O.
2094 	 */
2095 	stge_stop(sc);
2096 
2097 	/* Init descriptors. */
2098 	error = stge_init_rx_ring(sc);
2099         if (error != 0) {
2100                 device_printf(sc->sc_dev,
2101                     "initialization failed: no memory for rx buffers\n");
2102                 stge_stop(sc);
2103 		goto out;
2104         }
2105 	stge_init_tx_ring(sc);
2106 
2107 	/* Set the station address. */
2108 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2109 	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2110 	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2111 	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2112 
2113 	/*
2114 	 * Set the statistics masks.  Disable all the RMON stats,
2115 	 * and disable selected stats in the non-RMON stats registers.
2116 	 */
2117 	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2118 	CSR_WRITE_4(sc, STGE_StatisticsMask,
2119 	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2120 	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2121 	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2122 	    (1U << 21));
2123 
2124 	/* Set up the receive filter. */
2125 	stge_set_filter(sc);
2126 	/* Program multicast filter. */
2127 	stge_set_multi(sc);
2128 
2129 	/*
2130 	 * Give the transmit and receive ring to the chip.
2131 	 */
2132 	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2133 	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2134 	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2135 	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2136 
2137 	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2138 	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2139 	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2140 	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2141 
2142 	/*
2143 	 * Initialize the Tx auto-poll period.  It's OK to make this number
2144 	 * large (255 is the max, but we use 127) -- we explicitly kick the
2145 	 * transmit engine when there's actually a packet.
2146 	 */
2147 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2148 
2149 	/* ..and the Rx auto-poll period. */
2150 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2151 
2152 	/* Initialize the Tx start threshold. */
2153 	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2154 
2155 	/* Rx DMA thresholds, from Linux */
2156 	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2157 	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2158 
2159 	/* Rx early threhold, from Linux */
2160 	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2161 
2162 	/* Tx DMA thresholds, from Linux */
2163 	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2164 	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2165 
2166 	/*
2167 	 * Initialize the Rx DMA interrupt control register.  We
2168 	 * request an interrupt after every incoming packet, but
2169 	 * defer it for sc_rxint_dmawait us. When the number of
2170 	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2171 	 * deferring the interrupt, and signal it immediately.
2172 	 */
2173 	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2174 	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2175 	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2176 
2177 	/*
2178 	 * Initialize the interrupt mask.
2179 	 */
2180 	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2181 	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2182 #ifdef DEVICE_POLLING
2183 	/* Disable interrupts if we are polling. */
2184 	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2185 		CSR_WRITE_2(sc, STGE_IntEnable, 0);
2186 	else
2187 #endif
2188 	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2189 
2190 	/*
2191 	 * Configure the DMA engine.
2192 	 * XXX Should auto-tune TxBurstLimit.
2193 	 */
2194 	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2195 
2196 	/*
2197 	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2198 	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2199 	 * in the Rx FIFO.
2200 	 */
2201 	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2202 	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2203 
2204 	/*
2205 	 * Set the maximum frame size.
2206 	 */
2207 	sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2208 	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2209 
2210 	/*
2211 	 * Initialize MacCtrl -- do it before setting the media,
2212 	 * as setting the media will actually program the register.
2213 	 *
2214 	 * Note: We have to poke the IFS value before poking
2215 	 * anything else.
2216 	 */
2217 	/* Tx/Rx MAC should be disabled before programming IFS.*/
2218 	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2219 
2220 	stge_vlan_setup(sc);
2221 
2222 	if (sc->sc_rev >= 6) {		/* >= B.2 */
2223 		/* Multi-frag frame bug work-around. */
2224 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2225 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2226 
2227 		/* Tx Poll Now bug work-around. */
2228 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2229 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2230 		/* Tx Poll Now bug work-around. */
2231 		CSR_WRITE_2(sc, STGE_DebugCtrl,
2232 		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2233 	}
2234 
2235 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2236 	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2237 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2238 	/*
2239 	 * It seems that transmitting frames without checking the state of
2240 	 * Rx/Tx MAC wedge the hardware.
2241 	 */
2242 	stge_start_tx(sc);
2243 	stge_start_rx(sc);
2244 
2245 	sc->sc_link = 0;
2246 	/*
2247 	 * Set the current media.
2248 	 */
2249 	mii_mediachg(mii);
2250 
2251 	/*
2252 	 * Start the one second MII clock.
2253 	 */
2254 	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2255 
2256 	/*
2257 	 * ...all done!
2258 	 */
2259 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2260 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2261 
2262  out:
2263 	if (error != 0)
2264 		device_printf(sc->sc_dev, "interface not running\n");
2265 }
2266 
2267 static void
2268 stge_vlan_setup(struct stge_softc *sc)
2269 {
2270 	struct ifnet *ifp;
2271 	uint32_t v;
2272 
2273 	ifp = sc->sc_ifp;
2274 	/*
2275 	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2276 	 * MC_AutoVLANuntagging bit.
2277 	 * MC_AutoVLANtagging bit selects which VLAN source to use
2278 	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2279 	 * bit has priority over MC_AutoVLANtagging bit. So we always
2280 	 * use TFC instead of STGE_VLANTag register.
2281 	 */
2282 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2283 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2284 		v |= MC_AutoVLANuntagging;
2285 	else
2286 		v &= ~MC_AutoVLANuntagging;
2287 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2288 }
2289 
2290 /*
2291  *	Stop transmission on the interface.
2292  */
2293 static void
2294 stge_stop(struct stge_softc *sc)
2295 {
2296 	struct ifnet *ifp;
2297 	struct stge_txdesc *txd;
2298 	struct stge_rxdesc *rxd;
2299 	uint32_t v;
2300 	int i;
2301 
2302 	STGE_LOCK_ASSERT(sc);
2303 	/*
2304 	 * Stop the one second clock.
2305 	 */
2306 	callout_stop(&sc->sc_tick_ch);
2307 	sc->sc_watchdog_timer = 0;
2308 
2309 	/*
2310 	 * Reset the chip to a known state.
2311 	 */
2312 	stge_reset(sc, STGE_RESET_FULL);
2313 
2314 	/*
2315 	 * Disable interrupts.
2316 	 */
2317 	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2318 
2319 	/*
2320 	 * Stop receiver, transmitter, and stats update.
2321 	 */
2322 	stge_stop_rx(sc);
2323 	stge_stop_tx(sc);
2324 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2325 	v |= MC_StatisticsDisable;
2326 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2327 
2328 	/*
2329 	 * Stop the transmit and receive DMA.
2330 	 */
2331 	stge_dma_wait(sc);
2332 	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2333 	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2334 	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2335 	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2336 
2337 	/*
2338 	 * Free RX and TX mbufs still in the queues.
2339 	 */
2340 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2341 		rxd = &sc->sc_cdata.stge_rxdesc[i];
2342 		if (rxd->rx_m != NULL) {
2343 			bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2344 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2345 			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2346 			    rxd->rx_dmamap);
2347 			m_freem(rxd->rx_m);
2348 			rxd->rx_m = NULL;
2349 		}
2350         }
2351 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2352 		txd = &sc->sc_cdata.stge_txdesc[i];
2353 		if (txd->tx_m != NULL) {
2354 			bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2355 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2356 			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2357 			    txd->tx_dmamap);
2358 			m_freem(txd->tx_m);
2359 			txd->tx_m = NULL;
2360 		}
2361         }
2362 
2363 	/*
2364 	 * Mark the interface down and cancel the watchdog timer.
2365 	 */
2366 	ifp = sc->sc_ifp;
2367 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2368 	sc->sc_link = 0;
2369 }
2370 
2371 static void
2372 stge_start_tx(struct stge_softc *sc)
2373 {
2374 	uint32_t v;
2375 	int i;
2376 
2377 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2378 	if ((v & MC_TxEnabled) != 0)
2379 		return;
2380 	v |= MC_TxEnable;
2381 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2382 	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2383 	for (i = STGE_TIMEOUT; i > 0; i--) {
2384 		DELAY(10);
2385 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2386 		if ((v & MC_TxEnabled) != 0)
2387 			break;
2388 	}
2389 	if (i == 0)
2390 		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2391 }
2392 
2393 static void
2394 stge_start_rx(struct stge_softc *sc)
2395 {
2396 	uint32_t v;
2397 	int i;
2398 
2399 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2400 	if ((v & MC_RxEnabled) != 0)
2401 		return;
2402 	v |= MC_RxEnable;
2403 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2404 	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2405 	for (i = STGE_TIMEOUT; i > 0; i--) {
2406 		DELAY(10);
2407 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2408 		if ((v & MC_RxEnabled) != 0)
2409 			break;
2410 	}
2411 	if (i == 0)
2412 		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2413 }
2414 
2415 static void
2416 stge_stop_tx(struct stge_softc *sc)
2417 {
2418 	uint32_t v;
2419 	int i;
2420 
2421 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2422 	if ((v & MC_TxEnabled) == 0)
2423 		return;
2424 	v |= MC_TxDisable;
2425 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2426 	for (i = STGE_TIMEOUT; i > 0; i--) {
2427 		DELAY(10);
2428 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2429 		if ((v & MC_TxEnabled) == 0)
2430 			break;
2431 	}
2432 	if (i == 0)
2433 		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2434 }
2435 
2436 static void
2437 stge_stop_rx(struct stge_softc *sc)
2438 {
2439 	uint32_t v;
2440 	int i;
2441 
2442 	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2443 	if ((v & MC_RxEnabled) == 0)
2444 		return;
2445 	v |= MC_RxDisable;
2446 	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2447 	for (i = STGE_TIMEOUT; i > 0; i--) {
2448 		DELAY(10);
2449 		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2450 		if ((v & MC_RxEnabled) == 0)
2451 			break;
2452 	}
2453 	if (i == 0)
2454 		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2455 }
2456 
2457 static void
2458 stge_init_tx_ring(struct stge_softc *sc)
2459 {
2460 	struct stge_ring_data *rd;
2461 	struct stge_txdesc *txd;
2462 	bus_addr_t addr;
2463 	int i;
2464 
2465 	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2466 	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2467 
2468 	sc->sc_cdata.stge_tx_prod = 0;
2469 	sc->sc_cdata.stge_tx_cons = 0;
2470 	sc->sc_cdata.stge_tx_cnt = 0;
2471 
2472 	rd = &sc->sc_rdata;
2473 	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2474 	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2475 		if (i == (STGE_TX_RING_CNT - 1))
2476 			addr = STGE_TX_RING_ADDR(sc, 0);
2477 		else
2478 			addr = STGE_TX_RING_ADDR(sc, i + 1);
2479 		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2480 		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2481 		txd = &sc->sc_cdata.stge_txdesc[i];
2482 		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2483 	}
2484 
2485 	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2486 	    sc->sc_cdata.stge_tx_ring_map,
2487 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2488 
2489 }
2490 
2491 static int
2492 stge_init_rx_ring(struct stge_softc *sc)
2493 {
2494 	struct stge_ring_data *rd;
2495 	bus_addr_t addr;
2496 	int i;
2497 
2498 	sc->sc_cdata.stge_rx_cons = 0;
2499 	STGE_RXCHAIN_RESET(sc);
2500 
2501 	rd = &sc->sc_rdata;
2502 	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2503 	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2504 		if (stge_newbuf(sc, i) != 0)
2505 			return (ENOBUFS);
2506 		if (i == (STGE_RX_RING_CNT - 1))
2507 			addr = STGE_RX_RING_ADDR(sc, 0);
2508 		else
2509 			addr = STGE_RX_RING_ADDR(sc, i + 1);
2510 		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2511 		rd->stge_rx_ring[i].rfd_status = 0;
2512 	}
2513 
2514 	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2515 	    sc->sc_cdata.stge_rx_ring_map,
2516 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2517 
2518 	return (0);
2519 }
2520 
2521 /*
2522  * stge_newbuf:
2523  *
2524  *	Add a receive buffer to the indicated descriptor.
2525  */
2526 static int
2527 stge_newbuf(struct stge_softc *sc, int idx)
2528 {
2529 	struct stge_rxdesc *rxd;
2530 	struct stge_rfd *rfd;
2531 	struct mbuf *m;
2532 	bus_dma_segment_t segs[1];
2533 	bus_dmamap_t map;
2534 	int nsegs;
2535 
2536 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2537 	if (m == NULL)
2538 		return (ENOBUFS);
2539 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2540 	/*
2541 	 * The hardware requires 4bytes aligned DMA address when JUMBO
2542 	 * frame is used.
2543 	 */
2544 	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2545 		m_adj(m, ETHER_ALIGN);
2546 
2547 	if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
2548 	    sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2549 		m_freem(m);
2550 		return (ENOBUFS);
2551 	}
2552 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2553 
2554 	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2555 	if (rxd->rx_m != NULL) {
2556 		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2557 		    BUS_DMASYNC_POSTREAD);
2558 		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2559 	}
2560 	map = rxd->rx_dmamap;
2561 	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2562 	sc->sc_cdata.stge_rx_sparemap = map;
2563 	bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2564 	    BUS_DMASYNC_PREREAD);
2565 	rxd->rx_m = m;
2566 
2567 	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2568 	rfd->rfd_frag.frag_word0 =
2569 	    htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2570 	rfd->rfd_status = 0;
2571 
2572 	return (0);
2573 }
2574 
2575 /*
2576  * stge_set_filter:
2577  *
2578  *	Set up the receive filter.
2579  */
2580 static void
2581 stge_set_filter(struct stge_softc *sc)
2582 {
2583 	struct ifnet *ifp;
2584 	uint16_t mode;
2585 
2586 	STGE_LOCK_ASSERT(sc);
2587 
2588 	ifp = sc->sc_ifp;
2589 
2590 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2591 	mode |= RM_ReceiveUnicast;
2592 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2593 		mode |= RM_ReceiveBroadcast;
2594 	else
2595 		mode &= ~RM_ReceiveBroadcast;
2596 	if ((ifp->if_flags & IFF_PROMISC) != 0)
2597 		mode |= RM_ReceiveAllFrames;
2598 	else
2599 		mode &= ~RM_ReceiveAllFrames;
2600 
2601 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2602 }
2603 
2604 static void
2605 stge_set_multi(struct stge_softc *sc)
2606 {
2607 	struct ifnet *ifp;
2608 	struct ifmultiaddr *ifma;
2609 	uint32_t crc;
2610 	uint32_t mchash[2];
2611 	uint16_t mode;
2612 	int count;
2613 
2614 	STGE_LOCK_ASSERT(sc);
2615 
2616 	ifp = sc->sc_ifp;
2617 
2618 	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2619 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2620 		if ((ifp->if_flags & IFF_PROMISC) != 0)
2621 			mode |= RM_ReceiveAllFrames;
2622 		else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2623 			mode |= RM_ReceiveMulticast;
2624 		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2625 		return;
2626 	}
2627 
2628 	/* clear existing filters. */
2629 	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2630 	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2631 
2632 	/*
2633 	 * Set up the multicast address filter by passing all multicast
2634 	 * addresses through a CRC generator, and then using the low-order
2635 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2636 	 * high order bits select the register, while the rest of the bits
2637 	 * select the bit within the register.
2638 	 */
2639 
2640 	bzero(mchash, sizeof(mchash));
2641 
2642 	count = 0;
2643 	IF_ADDR_LOCK(sc->sc_ifp);
2644 	TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) {
2645 		if (ifma->ifma_addr->sa_family != AF_LINK)
2646 			continue;
2647 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2648 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2649 
2650 		/* Just want the 6 least significant bits. */
2651 		crc &= 0x3f;
2652 
2653 		/* Set the corresponding bit in the hash table. */
2654 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2655 		count++;
2656 	}
2657 	IF_ADDR_UNLOCK(ifp);
2658 
2659 	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2660 	if (count > 0)
2661 		mode |= RM_ReceiveMulticastHash;
2662 	else
2663 		mode &= ~RM_ReceiveMulticastHash;
2664 
2665 	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2666 	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2667 	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2668 }
2669 
2670 static int
2671 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2672 {
2673 	int error, value;
2674 
2675 	if (!arg1)
2676 		return (EINVAL);
2677 	value = *(int *)arg1;
2678 	error = sysctl_handle_int(oidp, &value, 0, req);
2679 	if (error || !req->newptr)
2680 		return (error);
2681 	if (value < low || value > high)
2682 		return (EINVAL);
2683         *(int *)arg1 = value;
2684 
2685         return (0);
2686 }
2687 
2688 static int
2689 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2690 {
2691 	return (sysctl_int_range(oidp, arg1, arg2, req,
2692 	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2693 }
2694 
2695 static int
2696 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2697 {
2698 	return (sysctl_int_range(oidp, arg1, arg2, req,
2699 	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2700 }
2701