xref: /freebsd/sys/dev/vr/if_vr.c (revision b9c36cc755002809a7d7c7109e3425fdfca036d2)
1 /*-
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * VIA Rhine fast ethernet PCI NIC driver
38  *
39  * Supports various network adapters based on the VIA Rhine
40  * and Rhine II PCI controllers, including the D-Link DFE530TX.
41  * Datasheets are available at http://www.via.com.tw.
42  *
43  * Written by Bill Paul <wpaul@ctr.columbia.edu>
44  * Electrical Engineering Department
45  * Columbia University, New York City
46  */
47 
48 /*
49  * The VIA Rhine controllers are similar in some respects to the
50  * the DEC tulip chips, except less complicated. The controller
51  * uses an MII bus and an external physical layer interface. The
52  * receiver has a one entry perfect filter and a 64-bit hash table
53  * multicast filter. Transmit and receive descriptors are similar
54  * to the tulip.
55  *
56  * Some Rhine chips has a serious flaw in its transmit DMA mechanism:
57  * transmit buffers must be longword aligned. Unfortunately,
58  * FreeBSD doesn't guarantee that mbufs will be filled in starting
59  * at longword boundaries, so we have to do a buffer copy before
60  * transmission.
61  */
62 
63 #ifdef HAVE_KERNEL_OPTION_HEADERS
64 #include "opt_device_polling.h"
65 #endif
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/bus.h>
70 #include <sys/endian.h>
71 #include <sys/kernel.h>
72 #include <sys/malloc.h>
73 #include <sys/mbuf.h>
74 #include <sys/module.h>
75 #include <sys/rman.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
78 #include <sys/sysctl.h>
79 #include <sys/taskqueue.h>
80 
81 #include <net/bpf.h>
82 #include <net/if.h>
83 #include <net/if_var.h>
84 #include <net/ethernet.h>
85 #include <net/if_dl.h>
86 #include <net/if_media.h>
87 #include <net/if_types.h>
88 #include <net/if_vlan_var.h>
89 
90 #include <dev/mii/mii.h>
91 #include <dev/mii/miivar.h>
92 
93 #include <dev/pci/pcireg.h>
94 #include <dev/pci/pcivar.h>
95 
96 #include <machine/bus.h>
97 
98 #include <dev/vr/if_vrreg.h>
99 
100 /* "device miibus" required.  See GENERIC if you get errors here. */
101 #include "miibus_if.h"
102 
103 MODULE_DEPEND(vr, pci, 1, 1, 1);
104 MODULE_DEPEND(vr, ether, 1, 1, 1);
105 MODULE_DEPEND(vr, miibus, 1, 1, 1);
106 
107 /* Define to show Rx/Tx error status. */
108 #undef	VR_SHOW_ERRORS
109 #define	VR_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
110 
111 /*
112  * Various supported device vendors/types, their names & quirks.
113  */
114 #define VR_Q_NEEDALIGN		(1<<0)
115 #define VR_Q_CSUM		(1<<1)
116 #define VR_Q_CAM		(1<<2)
117 
118 static const struct vr_type {
119 	u_int16_t		vr_vid;
120 	u_int16_t		vr_did;
121 	int			vr_quirks;
122 	const char		*vr_name;
123 } vr_devs[] = {
124 	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
125 	    VR_Q_NEEDALIGN,
126 	    "VIA VT3043 Rhine I 10/100BaseTX" },
127 	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
128 	    VR_Q_NEEDALIGN,
129 	    "VIA VT86C100A Rhine II 10/100BaseTX" },
130 	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
131 	    0,
132 	    "VIA VT6102 Rhine II 10/100BaseTX" },
133 	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III,
134 	    0,
135 	    "VIA VT6105 Rhine III 10/100BaseTX" },
136 	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
137 	    VR_Q_CSUM,
138 	    "VIA VT6105M Rhine III 10/100BaseTX" },
139 	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
140 	    VR_Q_NEEDALIGN,
141 	    "Delta Electronics Rhine II 10/100BaseTX" },
142 	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
143 	    VR_Q_NEEDALIGN,
144 	    "Addtron Technology Rhine II 10/100BaseTX" },
145 	{ 0, 0, 0, NULL }
146 };
147 
148 static int vr_probe(device_t);
149 static int vr_attach(device_t);
150 static int vr_detach(device_t);
151 static int vr_shutdown(device_t);
152 static int vr_suspend(device_t);
153 static int vr_resume(device_t);
154 
155 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
156 static int vr_dma_alloc(struct vr_softc *);
157 static void vr_dma_free(struct vr_softc *);
158 static __inline void vr_discard_rxbuf(struct vr_rxdesc *);
159 static int vr_newbuf(struct vr_softc *, int);
160 
161 #ifndef __NO_STRICT_ALIGNMENT
162 static __inline void vr_fixup_rx(struct mbuf *);
163 #endif
164 static int vr_rxeof(struct vr_softc *);
165 static void vr_txeof(struct vr_softc *);
166 static void vr_tick(void *);
167 static int vr_error(struct vr_softc *, uint16_t);
168 static void vr_tx_underrun(struct vr_softc *);
169 static int vr_intr(void *);
170 static void vr_int_task(void *, int);
171 static void vr_start(struct ifnet *);
172 static void vr_start_locked(struct ifnet *);
173 static int vr_encap(struct vr_softc *, struct mbuf **);
174 static int vr_ioctl(struct ifnet *, u_long, caddr_t);
175 static void vr_init(void *);
176 static void vr_init_locked(struct vr_softc *);
177 static void vr_tx_start(struct vr_softc *);
178 static void vr_rx_start(struct vr_softc *);
179 static int vr_tx_stop(struct vr_softc *);
180 static int vr_rx_stop(struct vr_softc *);
181 static void vr_stop(struct vr_softc *);
182 static void vr_watchdog(struct vr_softc *);
183 static int vr_ifmedia_upd(struct ifnet *);
184 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
185 
186 static int vr_miibus_readreg(device_t, int, int);
187 static int vr_miibus_writereg(device_t, int, int, int);
188 static void vr_miibus_statchg(device_t);
189 
190 static void vr_cam_mask(struct vr_softc *, uint32_t, int);
191 static int vr_cam_data(struct vr_softc *, int, int, uint8_t *);
192 static void vr_set_filter(struct vr_softc *);
193 static void vr_reset(const struct vr_softc *);
194 static int vr_tx_ring_init(struct vr_softc *);
195 static int vr_rx_ring_init(struct vr_softc *);
196 static void vr_setwol(struct vr_softc *);
197 static void vr_clrwol(struct vr_softc *);
198 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS);
199 
200 static const struct vr_tx_threshold_table {
201 	int tx_cfg;
202 	int bcr_cfg;
203 	int value;
204 } vr_tx_threshold_tables[] = {
205 	{ VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES,	64 },
206 	{ VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 },
207 	{ VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 },
208 	{ VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 },
209 	{ VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 },
210 	{ VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 }
211 };
212 
213 static device_method_t vr_methods[] = {
214 	/* Device interface */
215 	DEVMETHOD(device_probe,		vr_probe),
216 	DEVMETHOD(device_attach,	vr_attach),
217 	DEVMETHOD(device_detach, 	vr_detach),
218 	DEVMETHOD(device_shutdown,	vr_shutdown),
219 	DEVMETHOD(device_suspend,	vr_suspend),
220 	DEVMETHOD(device_resume,	vr_resume),
221 
222 	/* MII interface */
223 	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
224 	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
225 	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
226 
227 	DEVMETHOD_END
228 };
229 
230 static driver_t vr_driver = {
231 	"vr",
232 	vr_methods,
233 	sizeof(struct vr_softc)
234 };
235 
236 static devclass_t vr_devclass;
237 
238 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
239 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
240 
241 static int
242 vr_miibus_readreg(device_t dev, int phy, int reg)
243 {
244 	struct vr_softc		*sc;
245 	int			i;
246 
247 	sc = device_get_softc(dev);
248 
249 	/* Set the register address. */
250 	CSR_WRITE_1(sc, VR_MIIADDR, reg);
251 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
252 
253 	for (i = 0; i < VR_MII_TIMEOUT; i++) {
254 		DELAY(1);
255 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
256 			break;
257 	}
258 	if (i == VR_MII_TIMEOUT)
259 		device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg);
260 
261 	return (CSR_READ_2(sc, VR_MIIDATA));
262 }
263 
264 static int
265 vr_miibus_writereg(device_t dev, int phy, int reg, int data)
266 {
267 	struct vr_softc		*sc;
268 	int			i;
269 
270 	sc = device_get_softc(dev);
271 
272 	/* Set the register address and data to write. */
273 	CSR_WRITE_1(sc, VR_MIIADDR, reg);
274 	CSR_WRITE_2(sc, VR_MIIDATA, data);
275 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
276 
277 	for (i = 0; i < VR_MII_TIMEOUT; i++) {
278 		DELAY(1);
279 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
280 			break;
281 	}
282 	if (i == VR_MII_TIMEOUT)
283 		device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy,
284 		    reg);
285 
286 	return (0);
287 }
288 
289 /*
290  * In order to fiddle with the
291  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
292  * first have to put the transmit and/or receive logic in the idle state.
293  */
294 static void
295 vr_miibus_statchg(device_t dev)
296 {
297 	struct vr_softc		*sc;
298 	struct mii_data		*mii;
299 	struct ifnet		*ifp;
300 	int			lfdx, mfdx;
301 	uint8_t			cr0, cr1, fc;
302 
303 	sc = device_get_softc(dev);
304 	mii = device_get_softc(sc->vr_miibus);
305 	ifp = sc->vr_ifp;
306 	if (mii == NULL || ifp == NULL ||
307 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
308 		return;
309 
310 	sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
311 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
312 	    (IFM_ACTIVE | IFM_AVALID)) {
313 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
314 		case IFM_10_T:
315 		case IFM_100_TX:
316 			sc->vr_flags |= VR_F_LINK;
317 			break;
318 		default:
319 			break;
320 		}
321 	}
322 
323 	if ((sc->vr_flags & VR_F_LINK) != 0) {
324 		cr0 = CSR_READ_1(sc, VR_CR0);
325 		cr1 = CSR_READ_1(sc, VR_CR1);
326 		mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0;
327 		lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0;
328 		if (mfdx != lfdx) {
329 			if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) {
330 				if (vr_tx_stop(sc) != 0 ||
331 				    vr_rx_stop(sc) != 0) {
332 					device_printf(sc->vr_dev,
333 					    "%s: Tx/Rx shutdown error -- "
334 					    "resetting\n", __func__);
335 					sc->vr_flags |= VR_F_RESTART;
336 					VR_UNLOCK(sc);
337 					return;
338 				}
339 			}
340 			if (lfdx)
341 				cr1 |= VR_CR1_FULLDUPLEX;
342 			else
343 				cr1 &= ~VR_CR1_FULLDUPLEX;
344 			CSR_WRITE_1(sc, VR_CR1, cr1);
345 		}
346 		fc = 0;
347 		/* Configure flow-control. */
348 		if (sc->vr_revid >= REV_ID_VT6105_A0) {
349 			fc = CSR_READ_1(sc, VR_FLOWCR1);
350 			fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE);
351 			if ((IFM_OPTIONS(mii->mii_media_active) &
352 			    IFM_ETH_RXPAUSE) != 0)
353 				fc |= VR_FLOWCR1_RXPAUSE;
354 			if ((IFM_OPTIONS(mii->mii_media_active) &
355 			    IFM_ETH_TXPAUSE) != 0) {
356 				fc |= VR_FLOWCR1_TXPAUSE;
357 				sc->vr_flags |= VR_F_TXPAUSE;
358 			}
359 			CSR_WRITE_1(sc, VR_FLOWCR1, fc);
360 		} else if (sc->vr_revid >= REV_ID_VT6102_A) {
361 			/* No Tx puase capability available for Rhine II. */
362 			fc = CSR_READ_1(sc, VR_MISC_CR0);
363 			fc &= ~VR_MISCCR0_RXPAUSE;
364 			if ((IFM_OPTIONS(mii->mii_media_active) &
365 			    IFM_ETH_RXPAUSE) != 0)
366 				fc |= VR_MISCCR0_RXPAUSE;
367 			CSR_WRITE_1(sc, VR_MISC_CR0, fc);
368 		}
369 		vr_rx_start(sc);
370 		vr_tx_start(sc);
371 	} else {
372 		if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) {
373 			device_printf(sc->vr_dev,
374 			    "%s: Tx/Rx shutdown error -- resetting\n",
375 			    __func__);
376 			sc->vr_flags |= VR_F_RESTART;
377 		}
378 	}
379 }
380 
381 
382 static void
383 vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type)
384 {
385 
386 	if (type == VR_MCAST_CAM)
387 		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
388 	else
389 		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
390 	CSR_WRITE_4(sc, VR_CAMMASK, mask);
391 	CSR_WRITE_1(sc, VR_CAMCTL, 0);
392 }
393 
394 static int
395 vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac)
396 {
397 	int	i;
398 
399 	if (type == VR_MCAST_CAM) {
400 		if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL)
401 			return (EINVAL);
402 		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
403 	} else
404 		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
405 
406 	/* Set CAM entry address. */
407 	CSR_WRITE_1(sc, VR_CAMADDR, idx);
408 	/* Set CAM entry data. */
409 	if (type == VR_MCAST_CAM) {
410 		for (i = 0; i < ETHER_ADDR_LEN; i++)
411 			CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]);
412 	} else {
413 		CSR_WRITE_1(sc, VR_VCAM0, mac[0]);
414 		CSR_WRITE_1(sc, VR_VCAM1, mac[1]);
415 	}
416 	DELAY(10);
417 	/* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */
418 	CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE);
419 	for (i = 0; i < VR_TIMEOUT; i++) {
420 		DELAY(1);
421 		if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0)
422 			break;
423 	}
424 
425 	if (i == VR_TIMEOUT)
426 		device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n",
427 		    __func__);
428 	CSR_WRITE_1(sc, VR_CAMCTL, 0);
429 
430 	return (i == VR_TIMEOUT ? ETIMEDOUT : 0);
431 }
432 
433 /*
434  * Program the 64-bit multicast hash filter.
435  */
436 static void
437 vr_set_filter(struct vr_softc *sc)
438 {
439 	struct ifnet		*ifp;
440 	int			h;
441 	uint32_t		hashes[2] = { 0, 0 };
442 	struct ifmultiaddr	*ifma;
443 	uint8_t			rxfilt;
444 	int			error, mcnt;
445 	uint32_t		cam_mask;
446 
447 	VR_LOCK_ASSERT(sc);
448 
449 	ifp = sc->vr_ifp;
450 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
451 	rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD |
452 	    VR_RXCFG_RX_MULTI);
453 	if (ifp->if_flags & IFF_BROADCAST)
454 		rxfilt |= VR_RXCFG_RX_BROAD;
455 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
456 		rxfilt |= VR_RXCFG_RX_MULTI;
457 		if (ifp->if_flags & IFF_PROMISC)
458 			rxfilt |= VR_RXCFG_RX_PROMISC;
459 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
460 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
461 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
462 		return;
463 	}
464 
465 	/* Now program new ones. */
466 	error = 0;
467 	mcnt = 0;
468 	if_maddr_rlock(ifp);
469 	if ((sc->vr_quirks & VR_Q_CAM) != 0) {
470 		/*
471 		 * For hardwares that have CAM capability, use
472 		 * 32 entries multicast perfect filter.
473 		 */
474 		cam_mask = 0;
475 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
476 			if (ifma->ifma_addr->sa_family != AF_LINK)
477 				continue;
478 			error = vr_cam_data(sc, VR_MCAST_CAM, mcnt,
479 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
480 			if (error != 0) {
481 				cam_mask = 0;
482 				break;
483 			}
484 			cam_mask |= 1 << mcnt;
485 			mcnt++;
486 		}
487 		vr_cam_mask(sc, VR_MCAST_CAM, cam_mask);
488 	}
489 
490 	if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) {
491 		/*
492 		 * If there are too many multicast addresses or
493 		 * setting multicast CAM filter failed, use hash
494 		 * table based filtering.
495 		 */
496 		mcnt = 0;
497 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
498 			if (ifma->ifma_addr->sa_family != AF_LINK)
499 				continue;
500 			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
501 			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
502 			if (h < 32)
503 				hashes[0] |= (1 << h);
504 			else
505 				hashes[1] |= (1 << (h - 32));
506 			mcnt++;
507 		}
508 	}
509 	if_maddr_runlock(ifp);
510 
511 	if (mcnt > 0)
512 		rxfilt |= VR_RXCFG_RX_MULTI;
513 
514 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
515 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
516 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
517 }
518 
519 static void
520 vr_reset(const struct vr_softc *sc)
521 {
522 	int		i;
523 
524 	/*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
525 
526 	CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET);
527 	if (sc->vr_revid < REV_ID_VT6102_A) {
528 		/* VT86C100A needs more delay after reset. */
529 		DELAY(100);
530 	}
531 	for (i = 0; i < VR_TIMEOUT; i++) {
532 		DELAY(10);
533 		if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET))
534 			break;
535 	}
536 	if (i == VR_TIMEOUT) {
537 		if (sc->vr_revid < REV_ID_VT6102_A)
538 			device_printf(sc->vr_dev, "reset never completed!\n");
539 		else {
540 			/* Use newer force reset command. */
541 			device_printf(sc->vr_dev,
542 			    "Using force reset command.\n");
543 			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
544 			/*
545 			 * Wait a little while for the chip to get its brains
546 			 * in order.
547 			 */
548 			DELAY(2000);
549 		}
550 	}
551 
552 }
553 
554 /*
555  * Probe for a VIA Rhine chip. Check the PCI vendor and device
556  * IDs against our list and return a match or NULL
557  */
558 static const struct vr_type *
559 vr_match(device_t dev)
560 {
561 	const struct vr_type	*t = vr_devs;
562 
563 	for (t = vr_devs; t->vr_name != NULL; t++)
564 		if ((pci_get_vendor(dev) == t->vr_vid) &&
565 		    (pci_get_device(dev) == t->vr_did))
566 			return (t);
567 	return (NULL);
568 }
569 
570 /*
571  * Probe for a VIA Rhine chip. Check the PCI vendor and device
572  * IDs against our list and return a device name if we find a match.
573  */
574 static int
575 vr_probe(device_t dev)
576 {
577 	const struct vr_type	*t;
578 
579 	t = vr_match(dev);
580 	if (t != NULL) {
581 		device_set_desc(dev, t->vr_name);
582 		return (BUS_PROBE_DEFAULT);
583 	}
584 	return (ENXIO);
585 }
586 
587 /*
588  * Attach the interface. Allocate softc structures, do ifmedia
589  * setup and ethernet/BPF attach.
590  */
591 static int
592 vr_attach(device_t dev)
593 {
594 	struct vr_softc		*sc;
595 	struct ifnet		*ifp;
596 	const struct vr_type	*t;
597 	uint8_t			eaddr[ETHER_ADDR_LEN];
598 	int			error, rid;
599 	int			i, phy, pmc;
600 
601 	sc = device_get_softc(dev);
602 	sc->vr_dev = dev;
603 	t = vr_match(dev);
604 	KASSERT(t != NULL, ("Lost if_vr device match"));
605 	sc->vr_quirks = t->vr_quirks;
606 	device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
607 
608 	mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
609 	    MTX_DEF);
610 	callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
611 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
612 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
613 	    OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
614 	    vr_sysctl_stats, "I", "Statistics");
615 
616 	error = 0;
617 
618 	/*
619 	 * Map control/status registers.
620 	 */
621 	pci_enable_busmaster(dev);
622 	sc->vr_revid = pci_get_revid(dev);
623 	device_printf(dev, "Revision: 0x%x\n", sc->vr_revid);
624 
625 	sc->vr_res_id = PCIR_BAR(0);
626 	sc->vr_res_type = SYS_RES_IOPORT;
627 	sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type,
628 	    &sc->vr_res_id, RF_ACTIVE);
629 	if (sc->vr_res == NULL) {
630 		device_printf(dev, "couldn't map ports\n");
631 		error = ENXIO;
632 		goto fail;
633 	}
634 
635 	/* Allocate interrupt. */
636 	rid = 0;
637 	sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
638 	    RF_SHAREABLE | RF_ACTIVE);
639 
640 	if (sc->vr_irq == NULL) {
641 		device_printf(dev, "couldn't map interrupt\n");
642 		error = ENXIO;
643 		goto fail;
644 	}
645 
646 	/* Allocate ifnet structure. */
647 	ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
648 	if (ifp == NULL) {
649 		device_printf(dev, "couldn't allocate ifnet structure\n");
650 		error = ENOSPC;
651 		goto fail;
652 	}
653 	ifp->if_softc = sc;
654 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
655 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
656 	ifp->if_ioctl = vr_ioctl;
657 	ifp->if_start = vr_start;
658 	ifp->if_init = vr_init;
659 	IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1);
660 	ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1;
661 	IFQ_SET_READY(&ifp->if_snd);
662 
663 	TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc);
664 
665 	/* Configure Tx FIFO threshold. */
666 	sc->vr_txthresh = VR_TXTHRESH_MIN;
667 	if (sc->vr_revid < REV_ID_VT6105_A0) {
668 		/*
669 		 * Use store and forward mode for Rhine I/II.
670 		 * Otherwise they produce a lot of Tx underruns and
671 		 * it would take a while to get working FIFO threshold
672 		 * value.
673 		 */
674 		sc->vr_txthresh = VR_TXTHRESH_MAX;
675 	}
676 	if ((sc->vr_quirks & VR_Q_CSUM) != 0) {
677 		ifp->if_hwassist = VR_CSUM_FEATURES;
678 		ifp->if_capabilities |= IFCAP_HWCSUM;
679 		/*
680 		 * To update checksum field the hardware may need to
681 		 * store entire frames into FIFO before transmitting.
682 		 */
683 		sc->vr_txthresh = VR_TXTHRESH_MAX;
684 	}
685 
686 	if (sc->vr_revid >= REV_ID_VT6102_A &&
687 	    pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
688 		ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC;
689 
690 	/* Rhine supports oversized VLAN frame. */
691 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
692 	ifp->if_capenable = ifp->if_capabilities;
693 #ifdef DEVICE_POLLING
694 	ifp->if_capabilities |= IFCAP_POLLING;
695 #endif
696 
697 	/*
698 	 * Windows may put the chip in suspend mode when it
699 	 * shuts down. Be sure to kick it in the head to wake it
700 	 * up again.
701 	 */
702 	if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
703 		VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
704 
705 	/*
706 	 * Get station address. The way the Rhine chips work,
707 	 * you're not allowed to directly access the EEPROM once
708 	 * they've been programmed a special way. Consequently,
709 	 * we need to read the node address from the PAR0 and PAR1
710 	 * registers.
711 	 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB,
712 	 * VR_CFGC and VR_CFGD such that memory mapped IO configured
713 	 * by driver is reset to default state.
714 	 */
715 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
716 	for (i = VR_TIMEOUT; i > 0; i--) {
717 		DELAY(1);
718 		if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0)
719 			break;
720 	}
721 	if (i == 0)
722 		device_printf(dev, "Reloading EEPROM timeout!\n");
723 	for (i = 0; i < ETHER_ADDR_LEN; i++)
724 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
725 
726 	/* Reset the adapter. */
727 	vr_reset(sc);
728 	/* Ack intr & disable further interrupts. */
729 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
730 	CSR_WRITE_2(sc, VR_IMR, 0);
731 	if (sc->vr_revid >= REV_ID_VT6102_A)
732 		CSR_WRITE_2(sc, VR_MII_IMR, 0);
733 
734 	if (sc->vr_revid < REV_ID_VT6102_A) {
735 		pci_write_config(dev, VR_PCI_MODE2,
736 		    pci_read_config(dev, VR_PCI_MODE2, 1) |
737 		    VR_MODE2_MODE10T, 1);
738 	} else {
739 		/* Report error instead of retrying forever. */
740 		pci_write_config(dev, VR_PCI_MODE2,
741 		    pci_read_config(dev, VR_PCI_MODE2, 1) |
742 		    VR_MODE2_PCEROPT, 1);
743         	/* Detect MII coding error. */
744 		pci_write_config(dev, VR_PCI_MODE3,
745 		    pci_read_config(dev, VR_PCI_MODE3, 1) |
746 		    VR_MODE3_MIION, 1);
747 		if (sc->vr_revid >= REV_ID_VT6105_LOM &&
748 		    sc->vr_revid < REV_ID_VT6105M_A0)
749 			pci_write_config(dev, VR_PCI_MODE2,
750 			    pci_read_config(dev, VR_PCI_MODE2, 1) |
751 			    VR_MODE2_MODE10T, 1);
752 		/* Enable Memory-Read-Multiple. */
753 		if (sc->vr_revid >= REV_ID_VT6107_A1 &&
754 		    sc->vr_revid < REV_ID_VT6105M_A0)
755 			pci_write_config(dev, VR_PCI_MODE2,
756 			    pci_read_config(dev, VR_PCI_MODE2, 1) |
757 			    VR_MODE2_MRDPL, 1);
758 	}
759 	/* Disable MII AUTOPOLL. */
760 	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
761 
762 	if (vr_dma_alloc(sc) != 0) {
763 		error = ENXIO;
764 		goto fail;
765 	}
766 
767 	/* Do MII setup. */
768 	if (sc->vr_revid >= REV_ID_VT6105_A0)
769 		phy = 1;
770 	else
771 		phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK;
772 	error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd,
773 	    vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY,
774 	    sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0);
775 	if (error != 0) {
776 		device_printf(dev, "attaching PHYs failed\n");
777 		goto fail;
778 	}
779 
780 	/* Call MI attach routine. */
781 	ether_ifattach(ifp, eaddr);
782 	/*
783 	 * Tell the upper layer(s) we support long frames.
784 	 * Must appear after the call to ether_ifattach() because
785 	 * ether_ifattach() sets ifi_hdrlen to the default value.
786 	 */
787 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
788 
789 	/* Hook interrupt last to avoid having to lock softc. */
790 	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
791 	    vr_intr, NULL, sc, &sc->vr_intrhand);
792 
793 	if (error) {
794 		device_printf(dev, "couldn't set up irq\n");
795 		ether_ifdetach(ifp);
796 		goto fail;
797 	}
798 
799 fail:
800 	if (error)
801 		vr_detach(dev);
802 
803 	return (error);
804 }
805 
806 /*
807  * Shutdown hardware and free up resources. This can be called any
808  * time after the mutex has been initialized. It is called in both
809  * the error case in attach and the normal detach case so it needs
810  * to be careful about only freeing resources that have actually been
811  * allocated.
812  */
813 static int
814 vr_detach(device_t dev)
815 {
816 	struct vr_softc		*sc = device_get_softc(dev);
817 	struct ifnet		*ifp = sc->vr_ifp;
818 
819 	KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
820 
821 #ifdef DEVICE_POLLING
822 	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
823 		ether_poll_deregister(ifp);
824 #endif
825 
826 	/* These should only be active if attach succeeded. */
827 	if (device_is_attached(dev)) {
828 		VR_LOCK(sc);
829 		sc->vr_flags |= VR_F_DETACHED;
830 		vr_stop(sc);
831 		VR_UNLOCK(sc);
832 		callout_drain(&sc->vr_stat_callout);
833 		taskqueue_drain(taskqueue_fast, &sc->vr_inttask);
834 		ether_ifdetach(ifp);
835 	}
836 	if (sc->vr_miibus)
837 		device_delete_child(dev, sc->vr_miibus);
838 	bus_generic_detach(dev);
839 
840 	if (sc->vr_intrhand)
841 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
842 	if (sc->vr_irq)
843 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
844 	if (sc->vr_res)
845 		bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id,
846 		    sc->vr_res);
847 
848 	if (ifp)
849 		if_free(ifp);
850 
851 	vr_dma_free(sc);
852 
853 	mtx_destroy(&sc->vr_mtx);
854 
855 	return (0);
856 }
857 
858 struct vr_dmamap_arg {
859 	bus_addr_t	vr_busaddr;
860 };
861 
862 static void
863 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
864 {
865 	struct vr_dmamap_arg	*ctx;
866 
867 	if (error != 0)
868 		return;
869 	ctx = arg;
870 	ctx->vr_busaddr = segs[0].ds_addr;
871 }
872 
873 static int
874 vr_dma_alloc(struct vr_softc *sc)
875 {
876 	struct vr_dmamap_arg	ctx;
877 	struct vr_txdesc	*txd;
878 	struct vr_rxdesc	*rxd;
879 	bus_size_t		tx_alignment;
880 	int			error, i;
881 
882 	/* Create parent DMA tag. */
883 	error = bus_dma_tag_create(
884 	    bus_get_dma_tag(sc->vr_dev),	/* parent */
885 	    1, 0,			/* alignment, boundary */
886 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
887 	    BUS_SPACE_MAXADDR,		/* highaddr */
888 	    NULL, NULL,			/* filter, filterarg */
889 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
890 	    0,				/* nsegments */
891 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
892 	    0,				/* flags */
893 	    NULL, NULL,			/* lockfunc, lockarg */
894 	    &sc->vr_cdata.vr_parent_tag);
895 	if (error != 0) {
896 		device_printf(sc->vr_dev, "failed to create parent DMA tag\n");
897 		goto fail;
898 	}
899 	/* Create tag for Tx ring. */
900 	error = bus_dma_tag_create(
901 	    sc->vr_cdata.vr_parent_tag,	/* parent */
902 	    VR_RING_ALIGN, 0,		/* alignment, boundary */
903 	    BUS_SPACE_MAXADDR,		/* lowaddr */
904 	    BUS_SPACE_MAXADDR,		/* highaddr */
905 	    NULL, NULL,			/* filter, filterarg */
906 	    VR_TX_RING_SIZE,		/* maxsize */
907 	    1,				/* nsegments */
908 	    VR_TX_RING_SIZE,		/* maxsegsize */
909 	    0,				/* flags */
910 	    NULL, NULL,			/* lockfunc, lockarg */
911 	    &sc->vr_cdata.vr_tx_ring_tag);
912 	if (error != 0) {
913 		device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n");
914 		goto fail;
915 	}
916 
917 	/* Create tag for Rx ring. */
918 	error = bus_dma_tag_create(
919 	    sc->vr_cdata.vr_parent_tag,	/* parent */
920 	    VR_RING_ALIGN, 0,		/* alignment, boundary */
921 	    BUS_SPACE_MAXADDR,		/* lowaddr */
922 	    BUS_SPACE_MAXADDR,		/* highaddr */
923 	    NULL, NULL,			/* filter, filterarg */
924 	    VR_RX_RING_SIZE,		/* maxsize */
925 	    1,				/* nsegments */
926 	    VR_RX_RING_SIZE,		/* maxsegsize */
927 	    0,				/* flags */
928 	    NULL, NULL,			/* lockfunc, lockarg */
929 	    &sc->vr_cdata.vr_rx_ring_tag);
930 	if (error != 0) {
931 		device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n");
932 		goto fail;
933 	}
934 
935 	if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0)
936 		tx_alignment = sizeof(uint32_t);
937 	else
938 		tx_alignment = 1;
939 	/* Create tag for Tx buffers. */
940 	error = bus_dma_tag_create(
941 	    sc->vr_cdata.vr_parent_tag,	/* parent */
942 	    tx_alignment, 0,		/* alignment, boundary */
943 	    BUS_SPACE_MAXADDR,		/* lowaddr */
944 	    BUS_SPACE_MAXADDR,		/* highaddr */
945 	    NULL, NULL,			/* filter, filterarg */
946 	    MCLBYTES * VR_MAXFRAGS,	/* maxsize */
947 	    VR_MAXFRAGS,		/* nsegments */
948 	    MCLBYTES,			/* maxsegsize */
949 	    0,				/* flags */
950 	    NULL, NULL,			/* lockfunc, lockarg */
951 	    &sc->vr_cdata.vr_tx_tag);
952 	if (error != 0) {
953 		device_printf(sc->vr_dev, "failed to create Tx DMA tag\n");
954 		goto fail;
955 	}
956 
957 	/* Create tag for Rx buffers. */
958 	error = bus_dma_tag_create(
959 	    sc->vr_cdata.vr_parent_tag,	/* parent */
960 	    VR_RX_ALIGN, 0,		/* alignment, boundary */
961 	    BUS_SPACE_MAXADDR,		/* lowaddr */
962 	    BUS_SPACE_MAXADDR,		/* highaddr */
963 	    NULL, NULL,			/* filter, filterarg */
964 	    MCLBYTES,			/* maxsize */
965 	    1,				/* nsegments */
966 	    MCLBYTES,			/* maxsegsize */
967 	    0,				/* flags */
968 	    NULL, NULL,			/* lockfunc, lockarg */
969 	    &sc->vr_cdata.vr_rx_tag);
970 	if (error != 0) {
971 		device_printf(sc->vr_dev, "failed to create Rx DMA tag\n");
972 		goto fail;
973 	}
974 
975 	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
976 	error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag,
977 	    (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK |
978 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map);
979 	if (error != 0) {
980 		device_printf(sc->vr_dev,
981 		    "failed to allocate DMA'able memory for Tx ring\n");
982 		goto fail;
983 	}
984 
985 	ctx.vr_busaddr = 0;
986 	error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag,
987 	    sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring,
988 	    VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
989 	if (error != 0 || ctx.vr_busaddr == 0) {
990 		device_printf(sc->vr_dev,
991 		    "failed to load DMA'able memory for Tx ring\n");
992 		goto fail;
993 	}
994 	sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr;
995 
996 	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
997 	error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag,
998 	    (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK |
999 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map);
1000 	if (error != 0) {
1001 		device_printf(sc->vr_dev,
1002 		    "failed to allocate DMA'able memory for Rx ring\n");
1003 		goto fail;
1004 	}
1005 
1006 	ctx.vr_busaddr = 0;
1007 	error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag,
1008 	    sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring,
1009 	    VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
1010 	if (error != 0 || ctx.vr_busaddr == 0) {
1011 		device_printf(sc->vr_dev,
1012 		    "failed to load DMA'able memory for Rx ring\n");
1013 		goto fail;
1014 	}
1015 	sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr;
1016 
1017 	/* Create DMA maps for Tx buffers. */
1018 	for (i = 0; i < VR_TX_RING_CNT; i++) {
1019 		txd = &sc->vr_cdata.vr_txdesc[i];
1020 		txd->tx_m = NULL;
1021 		txd->tx_dmamap = NULL;
1022 		error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0,
1023 		    &txd->tx_dmamap);
1024 		if (error != 0) {
1025 			device_printf(sc->vr_dev,
1026 			    "failed to create Tx dmamap\n");
1027 			goto fail;
1028 		}
1029 	}
1030 	/* Create DMA maps for Rx buffers. */
1031 	if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1032 	    &sc->vr_cdata.vr_rx_sparemap)) != 0) {
1033 		device_printf(sc->vr_dev,
1034 		    "failed to create spare Rx dmamap\n");
1035 		goto fail;
1036 	}
1037 	for (i = 0; i < VR_RX_RING_CNT; i++) {
1038 		rxd = &sc->vr_cdata.vr_rxdesc[i];
1039 		rxd->rx_m = NULL;
1040 		rxd->rx_dmamap = NULL;
1041 		error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1042 		    &rxd->rx_dmamap);
1043 		if (error != 0) {
1044 			device_printf(sc->vr_dev,
1045 			    "failed to create Rx dmamap\n");
1046 			goto fail;
1047 		}
1048 	}
1049 
1050 fail:
1051 	return (error);
1052 }
1053 
1054 static void
1055 vr_dma_free(struct vr_softc *sc)
1056 {
1057 	struct vr_txdesc	*txd;
1058 	struct vr_rxdesc	*rxd;
1059 	int			i;
1060 
1061 	/* Tx ring. */
1062 	if (sc->vr_cdata.vr_tx_ring_tag) {
1063 		if (sc->vr_rdata.vr_tx_ring_paddr)
1064 			bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag,
1065 			    sc->vr_cdata.vr_tx_ring_map);
1066 		if (sc->vr_rdata.vr_tx_ring)
1067 			bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag,
1068 			    sc->vr_rdata.vr_tx_ring,
1069 			    sc->vr_cdata.vr_tx_ring_map);
1070 		sc->vr_rdata.vr_tx_ring = NULL;
1071 		sc->vr_rdata.vr_tx_ring_paddr = 0;
1072 		bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag);
1073 		sc->vr_cdata.vr_tx_ring_tag = NULL;
1074 	}
1075 	/* Rx ring. */
1076 	if (sc->vr_cdata.vr_rx_ring_tag) {
1077 		if (sc->vr_rdata.vr_rx_ring_paddr)
1078 			bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag,
1079 			    sc->vr_cdata.vr_rx_ring_map);
1080 		if (sc->vr_rdata.vr_rx_ring)
1081 			bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag,
1082 			    sc->vr_rdata.vr_rx_ring,
1083 			    sc->vr_cdata.vr_rx_ring_map);
1084 		sc->vr_rdata.vr_rx_ring = NULL;
1085 		sc->vr_rdata.vr_rx_ring_paddr = 0;
1086 		bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag);
1087 		sc->vr_cdata.vr_rx_ring_tag = NULL;
1088 	}
1089 	/* Tx buffers. */
1090 	if (sc->vr_cdata.vr_tx_tag) {
1091 		for (i = 0; i < VR_TX_RING_CNT; i++) {
1092 			txd = &sc->vr_cdata.vr_txdesc[i];
1093 			if (txd->tx_dmamap) {
1094 				bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag,
1095 				    txd->tx_dmamap);
1096 				txd->tx_dmamap = NULL;
1097 			}
1098 		}
1099 		bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag);
1100 		sc->vr_cdata.vr_tx_tag = NULL;
1101 	}
1102 	/* Rx buffers. */
1103 	if (sc->vr_cdata.vr_rx_tag) {
1104 		for (i = 0; i < VR_RX_RING_CNT; i++) {
1105 			rxd = &sc->vr_cdata.vr_rxdesc[i];
1106 			if (rxd->rx_dmamap) {
1107 				bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1108 				    rxd->rx_dmamap);
1109 				rxd->rx_dmamap = NULL;
1110 			}
1111 		}
1112 		if (sc->vr_cdata.vr_rx_sparemap) {
1113 			bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1114 			    sc->vr_cdata.vr_rx_sparemap);
1115 			sc->vr_cdata.vr_rx_sparemap = 0;
1116 		}
1117 		bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag);
1118 		sc->vr_cdata.vr_rx_tag = NULL;
1119 	}
1120 
1121 	if (sc->vr_cdata.vr_parent_tag) {
1122 		bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag);
1123 		sc->vr_cdata.vr_parent_tag = NULL;
1124 	}
1125 }
1126 
1127 /*
1128  * Initialize the transmit descriptors.
1129  */
1130 static int
1131 vr_tx_ring_init(struct vr_softc *sc)
1132 {
1133 	struct vr_ring_data	*rd;
1134 	struct vr_txdesc	*txd;
1135 	bus_addr_t		addr;
1136 	int			i;
1137 
1138 	sc->vr_cdata.vr_tx_prod = 0;
1139 	sc->vr_cdata.vr_tx_cons = 0;
1140 	sc->vr_cdata.vr_tx_cnt = 0;
1141 	sc->vr_cdata.vr_tx_pkts = 0;
1142 
1143 	rd = &sc->vr_rdata;
1144 	bzero(rd->vr_tx_ring, VR_TX_RING_SIZE);
1145 	for (i = 0; i < VR_TX_RING_CNT; i++) {
1146 		if (i == VR_TX_RING_CNT - 1)
1147 			addr = VR_TX_RING_ADDR(sc, 0);
1148 		else
1149 			addr = VR_TX_RING_ADDR(sc, i + 1);
1150 		rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1151 		txd = &sc->vr_cdata.vr_txdesc[i];
1152 		txd->tx_m = NULL;
1153 	}
1154 
1155 	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1156 	    sc->vr_cdata.vr_tx_ring_map,
1157 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1158 
1159 	return (0);
1160 }
1161 
1162 /*
1163  * Initialize the RX descriptors and allocate mbufs for them. Note that
1164  * we arrange the descriptors in a closed ring, so that the last descriptor
1165  * points back to the first.
1166  */
1167 static int
1168 vr_rx_ring_init(struct vr_softc *sc)
1169 {
1170 	struct vr_ring_data	*rd;
1171 	struct vr_rxdesc	*rxd;
1172 	bus_addr_t		addr;
1173 	int			i;
1174 
1175 	sc->vr_cdata.vr_rx_cons = 0;
1176 
1177 	rd = &sc->vr_rdata;
1178 	bzero(rd->vr_rx_ring, VR_RX_RING_SIZE);
1179 	for (i = 0; i < VR_RX_RING_CNT; i++) {
1180 		rxd = &sc->vr_cdata.vr_rxdesc[i];
1181 		rxd->rx_m = NULL;
1182 		rxd->desc = &rd->vr_rx_ring[i];
1183 		if (i == VR_RX_RING_CNT - 1)
1184 			addr = VR_RX_RING_ADDR(sc, 0);
1185 		else
1186 			addr = VR_RX_RING_ADDR(sc, i + 1);
1187 		rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1188 		if (vr_newbuf(sc, i) != 0)
1189 			return (ENOBUFS);
1190 	}
1191 
1192 	bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1193 	    sc->vr_cdata.vr_rx_ring_map,
1194 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1195 
1196 	return (0);
1197 }
1198 
1199 static __inline void
1200 vr_discard_rxbuf(struct vr_rxdesc *rxd)
1201 {
1202 	struct vr_desc	*desc;
1203 
1204 	desc = rxd->desc;
1205 	desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t)));
1206 	desc->vr_status = htole32(VR_RXSTAT_OWN);
1207 }
1208 
1209 /*
1210  * Initialize an RX descriptor and attach an MBUF cluster.
1211  * Note: the length fields are only 11 bits wide, which means the
1212  * largest size we can specify is 2047. This is important because
1213  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1214  * overflow the field and make a mess.
1215  */
1216 static int
1217 vr_newbuf(struct vr_softc *sc, int idx)
1218 {
1219 	struct vr_desc		*desc;
1220 	struct vr_rxdesc	*rxd;
1221 	struct mbuf		*m;
1222 	bus_dma_segment_t	segs[1];
1223 	bus_dmamap_t		map;
1224 	int			nsegs;
1225 
1226 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1227 	if (m == NULL)
1228 		return (ENOBUFS);
1229 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1230 	m_adj(m, sizeof(uint64_t));
1231 
1232 	if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag,
1233 	    sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1234 		m_freem(m);
1235 		return (ENOBUFS);
1236 	}
1237 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1238 
1239 	rxd = &sc->vr_cdata.vr_rxdesc[idx];
1240 	if (rxd->rx_m != NULL) {
1241 		bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1242 		    BUS_DMASYNC_POSTREAD);
1243 		bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap);
1244 	}
1245 	map = rxd->rx_dmamap;
1246 	rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap;
1247 	sc->vr_cdata.vr_rx_sparemap = map;
1248 	bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1249 	    BUS_DMASYNC_PREREAD);
1250 	rxd->rx_m = m;
1251 	desc = rxd->desc;
1252 	desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr));
1253 	desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len);
1254 	desc->vr_status = htole32(VR_RXSTAT_OWN);
1255 
1256 	return (0);
1257 }
1258 
1259 #ifndef __NO_STRICT_ALIGNMENT
1260 static __inline void
1261 vr_fixup_rx(struct mbuf *m)
1262 {
1263         uint16_t		*src, *dst;
1264         int			i;
1265 
1266 	src = mtod(m, uint16_t *);
1267 	dst = src - 1;
1268 
1269 	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1270 		*dst++ = *src++;
1271 
1272 	m->m_data -= ETHER_ALIGN;
1273 }
1274 #endif
1275 
1276 /*
1277  * A frame has been uploaded: pass the resulting mbuf chain up to
1278  * the higher level protocols.
1279  */
1280 static int
1281 vr_rxeof(struct vr_softc *sc)
1282 {
1283 	struct vr_rxdesc	*rxd;
1284 	struct mbuf		*m;
1285 	struct ifnet		*ifp;
1286 	struct vr_desc		*cur_rx;
1287 	int			cons, prog, total_len, rx_npkts;
1288 	uint32_t		rxstat, rxctl;
1289 
1290 	VR_LOCK_ASSERT(sc);
1291 	ifp = sc->vr_ifp;
1292 	cons = sc->vr_cdata.vr_rx_cons;
1293 	rx_npkts = 0;
1294 
1295 	bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1296 	    sc->vr_cdata.vr_rx_ring_map,
1297 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1298 
1299 	for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) {
1300 #ifdef DEVICE_POLLING
1301 		if (ifp->if_capenable & IFCAP_POLLING) {
1302 			if (sc->rxcycles <= 0)
1303 				break;
1304 			sc->rxcycles--;
1305 		}
1306 #endif
1307 		cur_rx = &sc->vr_rdata.vr_rx_ring[cons];
1308 		rxstat = le32toh(cur_rx->vr_status);
1309 		rxctl = le32toh(cur_rx->vr_ctl);
1310 		if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN)
1311 			break;
1312 
1313 		prog++;
1314 		rxd = &sc->vr_cdata.vr_rxdesc[cons];
1315 		m = rxd->rx_m;
1316 
1317 		/*
1318 		 * If an error occurs, update stats, clear the
1319 		 * status word and leave the mbuf cluster in place:
1320 		 * it should simply get re-used next time this descriptor
1321 		 * comes up in the ring.
1322 		 * We don't support SG in Rx path yet, so discard
1323 		 * partial frame.
1324 		 */
1325 		if ((rxstat & VR_RXSTAT_RX_OK) == 0 ||
1326 		    (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) !=
1327 		    (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) {
1328 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1329 			sc->vr_stat.rx_errors++;
1330 			if (rxstat & VR_RXSTAT_CRCERR)
1331 				sc->vr_stat.rx_crc_errors++;
1332 			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
1333 				sc->vr_stat.rx_alignment++;
1334 			if (rxstat & VR_RXSTAT_FIFOOFLOW)
1335 				sc->vr_stat.rx_fifo_overflows++;
1336 			if (rxstat & VR_RXSTAT_GIANT)
1337 				sc->vr_stat.rx_giants++;
1338 			if (rxstat & VR_RXSTAT_RUNT)
1339 				sc->vr_stat.rx_runts++;
1340 			if (rxstat & VR_RXSTAT_BUFFERR)
1341 				sc->vr_stat.rx_no_buffers++;
1342 #ifdef	VR_SHOW_ERRORS
1343 			device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1344 			    __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS);
1345 #endif
1346 			vr_discard_rxbuf(rxd);
1347 			continue;
1348 		}
1349 
1350 		if (vr_newbuf(sc, cons) != 0) {
1351 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1352 			sc->vr_stat.rx_errors++;
1353 			sc->vr_stat.rx_no_mbufs++;
1354 			vr_discard_rxbuf(rxd);
1355 			continue;
1356 		}
1357 
1358 		/*
1359 		 * XXX The VIA Rhine chip includes the CRC with every
1360 		 * received frame, and there's no way to turn this
1361 		 * behavior off (at least, I can't find anything in
1362 		 * the manual that explains how to do it) so we have
1363 		 * to trim off the CRC manually.
1364 		 */
1365 		total_len = VR_RXBYTES(rxstat);
1366 		total_len -= ETHER_CRC_LEN;
1367 		m->m_pkthdr.len = m->m_len = total_len;
1368 #ifndef	__NO_STRICT_ALIGNMENT
1369 		/*
1370 		 * RX buffers must be 32-bit aligned.
1371 		 * Ignore the alignment problems on the non-strict alignment
1372 		 * platform. The performance hit incurred due to unaligned
1373 		 * accesses is much smaller than the hit produced by forcing
1374 		 * buffer copies all the time.
1375 		 */
1376 		vr_fixup_rx(m);
1377 #endif
1378 		m->m_pkthdr.rcvif = ifp;
1379 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1380 		sc->vr_stat.rx_ok++;
1381 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1382 		    (rxstat & VR_RXSTAT_FRAG) == 0 &&
1383 		    (rxctl & VR_RXCTL_IP) != 0) {
1384 			/* Checksum is valid for non-fragmented IP packets. */
1385 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1386 			if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) {
1387 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1388 				if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) {
1389 					m->m_pkthdr.csum_flags |=
1390 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1391 					if ((rxctl & VR_RXCTL_TCPUDPOK) != 0)
1392 						m->m_pkthdr.csum_data = 0xffff;
1393 				}
1394 			}
1395 		}
1396 		VR_UNLOCK(sc);
1397 		(*ifp->if_input)(ifp, m);
1398 		VR_LOCK(sc);
1399 		rx_npkts++;
1400 	}
1401 
1402 	if (prog > 0) {
1403 		/*
1404 		 * Let controller know how many number of RX buffers
1405 		 * are posted but avoid expensive register access if
1406 		 * TX pause capability was not negotiated with link
1407 		 * partner.
1408 		 */
1409 		if ((sc->vr_flags & VR_F_TXPAUSE) != 0) {
1410 			if (prog >= VR_RX_RING_CNT)
1411 				prog = VR_RX_RING_CNT - 1;
1412 			CSR_WRITE_1(sc, VR_FLOWCR0, prog);
1413 		}
1414 		sc->vr_cdata.vr_rx_cons = cons;
1415 		bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1416 		    sc->vr_cdata.vr_rx_ring_map,
1417 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1418 	}
1419 	return (rx_npkts);
1420 }
1421 
1422 /*
1423  * A frame was downloaded to the chip. It's safe for us to clean up
1424  * the list buffers.
1425  */
1426 static void
1427 vr_txeof(struct vr_softc *sc)
1428 {
1429 	struct vr_txdesc	*txd;
1430 	struct vr_desc		*cur_tx;
1431 	struct ifnet		*ifp;
1432 	uint32_t		txctl, txstat;
1433 	int			cons, prod;
1434 
1435 	VR_LOCK_ASSERT(sc);
1436 
1437 	cons = sc->vr_cdata.vr_tx_cons;
1438 	prod = sc->vr_cdata.vr_tx_prod;
1439 	if (cons == prod)
1440 		return;
1441 
1442 	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1443 	    sc->vr_cdata.vr_tx_ring_map,
1444 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1445 
1446 	ifp = sc->vr_ifp;
1447 	/*
1448 	 * Go through our tx list and free mbufs for those
1449 	 * frames that have been transmitted.
1450 	 */
1451 	for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) {
1452 		cur_tx = &sc->vr_rdata.vr_tx_ring[cons];
1453 		txctl = le32toh(cur_tx->vr_ctl);
1454 		txstat = le32toh(cur_tx->vr_status);
1455 		if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN)
1456 			break;
1457 
1458 		sc->vr_cdata.vr_tx_cnt--;
1459 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1460 		/* Only the first descriptor in the chain is valid. */
1461 		if ((txctl & VR_TXCTL_FIRSTFRAG) == 0)
1462 			continue;
1463 
1464 		txd = &sc->vr_cdata.vr_txdesc[cons];
1465 		KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n",
1466 		    __func__));
1467 
1468 		if ((txstat & VR_TXSTAT_ERRSUM) != 0) {
1469 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1470 			sc->vr_stat.tx_errors++;
1471 			if ((txstat & VR_TXSTAT_ABRT) != 0) {
1472 				/* Give up and restart Tx. */
1473 				sc->vr_stat.tx_abort++;
1474 				bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
1475 				    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1476 				bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
1477 				    txd->tx_dmamap);
1478 				m_freem(txd->tx_m);
1479 				txd->tx_m = NULL;
1480 				VR_INC(cons, VR_TX_RING_CNT);
1481 				sc->vr_cdata.vr_tx_cons = cons;
1482 				if (vr_tx_stop(sc) != 0) {
1483 					device_printf(sc->vr_dev,
1484 					    "%s: Tx shutdown error -- "
1485 					    "resetting\n", __func__);
1486 					sc->vr_flags |= VR_F_RESTART;
1487 					return;
1488 				}
1489 				vr_tx_start(sc);
1490 				break;
1491 			}
1492 			if ((sc->vr_revid < REV_ID_VT3071_A &&
1493 			    (txstat & VR_TXSTAT_UNDERRUN)) ||
1494 			    (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) {
1495 				sc->vr_stat.tx_underrun++;
1496 				/* Retry and restart Tx. */
1497 				sc->vr_cdata.vr_tx_cnt++;
1498 				sc->vr_cdata.vr_tx_cons = cons;
1499 				cur_tx->vr_status = htole32(VR_TXSTAT_OWN);
1500 				bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1501 				    sc->vr_cdata.vr_tx_ring_map,
1502 				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1503 				vr_tx_underrun(sc);
1504 				return;
1505 			}
1506 			if ((txstat & VR_TXSTAT_DEFER) != 0) {
1507 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1508 				sc->vr_stat.tx_collisions++;
1509 			}
1510 			if ((txstat & VR_TXSTAT_LATECOLL) != 0) {
1511 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1512 				sc->vr_stat.tx_late_collisions++;
1513 			}
1514 		} else {
1515 			sc->vr_stat.tx_ok++;
1516 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1517 		}
1518 
1519 		bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1520 		    BUS_DMASYNC_POSTWRITE);
1521 		bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1522 		if (sc->vr_revid < REV_ID_VT3071_A) {
1523 			if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1524 			    (txstat & VR_TXSTAT_COLLCNT) >> 3);
1525 			sc->vr_stat.tx_collisions +=
1526 			    (txstat & VR_TXSTAT_COLLCNT) >> 3;
1527 		} else {
1528 			if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0x0f));
1529 			sc->vr_stat.tx_collisions += (txstat & 0x0f);
1530 		}
1531 		m_freem(txd->tx_m);
1532 		txd->tx_m = NULL;
1533 	}
1534 
1535 	sc->vr_cdata.vr_tx_cons = cons;
1536 	if (sc->vr_cdata.vr_tx_cnt == 0)
1537 		sc->vr_watchdog_timer = 0;
1538 }
1539 
1540 static void
1541 vr_tick(void *xsc)
1542 {
1543 	struct vr_softc		*sc;
1544 	struct mii_data		*mii;
1545 
1546 	sc = (struct vr_softc *)xsc;
1547 
1548 	VR_LOCK_ASSERT(sc);
1549 
1550 	if ((sc->vr_flags & VR_F_RESTART) != 0) {
1551 		device_printf(sc->vr_dev, "restarting\n");
1552 		sc->vr_stat.num_restart++;
1553 		sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1554 		vr_init_locked(sc);
1555 		sc->vr_flags &= ~VR_F_RESTART;
1556 	}
1557 
1558 	mii = device_get_softc(sc->vr_miibus);
1559 	mii_tick(mii);
1560 	if ((sc->vr_flags & VR_F_LINK) == 0)
1561 		vr_miibus_statchg(sc->vr_dev);
1562 	vr_watchdog(sc);
1563 	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1564 }
1565 
1566 #ifdef DEVICE_POLLING
1567 static poll_handler_t vr_poll;
1568 static poll_handler_t vr_poll_locked;
1569 
1570 static int
1571 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1572 {
1573 	struct vr_softc *sc;
1574 	int rx_npkts;
1575 
1576 	sc = ifp->if_softc;
1577 	rx_npkts = 0;
1578 
1579 	VR_LOCK(sc);
1580 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1581 		rx_npkts = vr_poll_locked(ifp, cmd, count);
1582 	VR_UNLOCK(sc);
1583 	return (rx_npkts);
1584 }
1585 
1586 static int
1587 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1588 {
1589 	struct vr_softc *sc;
1590 	int rx_npkts;
1591 
1592 	sc = ifp->if_softc;
1593 
1594 	VR_LOCK_ASSERT(sc);
1595 
1596 	sc->rxcycles = count;
1597 	rx_npkts = vr_rxeof(sc);
1598 	vr_txeof(sc);
1599 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1600 		vr_start_locked(ifp);
1601 
1602 	if (cmd == POLL_AND_CHECK_STATUS) {
1603 		uint16_t status;
1604 
1605 		/* Also check status register. */
1606 		status = CSR_READ_2(sc, VR_ISR);
1607 		if (status)
1608 			CSR_WRITE_2(sc, VR_ISR, status);
1609 
1610 		if ((status & VR_INTRS) == 0)
1611 			return (rx_npkts);
1612 
1613 		if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1614 		    VR_ISR_STATSOFLOW)) != 0) {
1615 			if (vr_error(sc, status) != 0)
1616 				return (rx_npkts);
1617 		}
1618 		if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1619 #ifdef	VR_SHOW_ERRORS
1620 			device_printf(sc->vr_dev, "%s: receive error : 0x%b\n",
1621 			    __func__, status, VR_ISR_ERR_BITS);
1622 #endif
1623 			vr_rx_start(sc);
1624 		}
1625 	}
1626 	return (rx_npkts);
1627 }
1628 #endif /* DEVICE_POLLING */
1629 
1630 /* Back off the transmit threshold. */
1631 static void
1632 vr_tx_underrun(struct vr_softc *sc)
1633 {
1634 	int	thresh;
1635 
1636 	device_printf(sc->vr_dev, "Tx underrun -- ");
1637 	if (sc->vr_txthresh < VR_TXTHRESH_MAX) {
1638 		thresh = sc->vr_txthresh;
1639 		sc->vr_txthresh++;
1640 		if (sc->vr_txthresh >= VR_TXTHRESH_MAX) {
1641 			sc->vr_txthresh = VR_TXTHRESH_MAX;
1642 			printf("using store and forward mode\n");
1643 		} else
1644 			printf("increasing Tx threshold(%d -> %d)\n",
1645 			    vr_tx_threshold_tables[thresh].value,
1646 			    vr_tx_threshold_tables[thresh + 1].value);
1647 	} else
1648 		printf("\n");
1649 	sc->vr_stat.tx_underrun++;
1650 	if (vr_tx_stop(sc) != 0) {
1651 		device_printf(sc->vr_dev, "%s: Tx shutdown error -- "
1652 		    "resetting\n", __func__);
1653 		sc->vr_flags |= VR_F_RESTART;
1654 		return;
1655 	}
1656 	vr_tx_start(sc);
1657 }
1658 
1659 static int
1660 vr_intr(void *arg)
1661 {
1662 	struct vr_softc		*sc;
1663 	uint16_t		status;
1664 
1665 	sc = (struct vr_softc *)arg;
1666 
1667 	status = CSR_READ_2(sc, VR_ISR);
1668 	if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0)
1669 		return (FILTER_STRAY);
1670 
1671 	/* Disable interrupts. */
1672 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1673 
1674 	taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask);
1675 
1676 	return (FILTER_HANDLED);
1677 }
1678 
1679 static void
1680 vr_int_task(void *arg, int npending)
1681 {
1682 	struct vr_softc		*sc;
1683 	struct ifnet		*ifp;
1684 	uint16_t		status;
1685 
1686 	sc = (struct vr_softc *)arg;
1687 
1688 	VR_LOCK(sc);
1689 
1690 	if ((sc->vr_flags & VR_F_SUSPENDED) != 0)
1691 		goto done_locked;
1692 
1693 	status = CSR_READ_2(sc, VR_ISR);
1694 	ifp = sc->vr_ifp;
1695 #ifdef DEVICE_POLLING
1696 	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1697 		goto done_locked;
1698 #endif
1699 
1700 	/* Suppress unwanted interrupts. */
1701 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1702 	    (sc->vr_flags & VR_F_RESTART) != 0) {
1703 		CSR_WRITE_2(sc, VR_IMR, 0);
1704 		CSR_WRITE_2(sc, VR_ISR, status);
1705 		goto done_locked;
1706 	}
1707 
1708 	for (; (status & VR_INTRS) != 0;) {
1709 		CSR_WRITE_2(sc, VR_ISR, status);
1710 		if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1711 		    VR_ISR_STATSOFLOW)) != 0) {
1712 			if (vr_error(sc, status) != 0) {
1713 				VR_UNLOCK(sc);
1714 				return;
1715 			}
1716 		}
1717 		vr_rxeof(sc);
1718 		if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1719 #ifdef	VR_SHOW_ERRORS
1720 			device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1721 			    __func__, status, VR_ISR_ERR_BITS);
1722 #endif
1723 			/* Restart Rx if RxDMA SM was stopped. */
1724 			vr_rx_start(sc);
1725 		}
1726 		vr_txeof(sc);
1727 
1728 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1729 			vr_start_locked(ifp);
1730 
1731 		status = CSR_READ_2(sc, VR_ISR);
1732 	}
1733 
1734 	/* Re-enable interrupts. */
1735 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1736 
1737 done_locked:
1738 	VR_UNLOCK(sc);
1739 }
1740 
1741 static int
1742 vr_error(struct vr_softc *sc, uint16_t status)
1743 {
1744 	uint16_t pcis;
1745 
1746 	status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW;
1747 	if ((status & VR_ISR_BUSERR) != 0) {
1748 		status &= ~VR_ISR_BUSERR;
1749 		sc->vr_stat.bus_errors++;
1750 		/* Disable further interrupts. */
1751 		CSR_WRITE_2(sc, VR_IMR, 0);
1752 		pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2);
1753 		device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- "
1754 		    "resetting\n", pcis);
1755 		pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2);
1756 		sc->vr_flags |= VR_F_RESTART;
1757 		return (EAGAIN);
1758 	}
1759 	if ((status & VR_ISR_LINKSTAT2) != 0) {
1760 		/* Link state change, duplex changes etc. */
1761 		status &= ~VR_ISR_LINKSTAT2;
1762 	}
1763 	if ((status & VR_ISR_STATSOFLOW) != 0) {
1764 		status &= ~VR_ISR_STATSOFLOW;
1765 		if (sc->vr_revid >= REV_ID_VT6105M_A0) {
1766 			/* Update MIB counters. */
1767 		}
1768 	}
1769 
1770 	if (status != 0)
1771 		device_printf(sc->vr_dev,
1772 		    "unhandled interrupt, status = 0x%04x\n", status);
1773 	return (0);
1774 }
1775 
1776 /*
1777  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1778  * pointers to the fragment pointers.
1779  */
1780 static int
1781 vr_encap(struct vr_softc *sc, struct mbuf **m_head)
1782 {
1783 	struct vr_txdesc	*txd;
1784 	struct vr_desc		*desc;
1785 	struct mbuf		*m;
1786 	bus_dma_segment_t	txsegs[VR_MAXFRAGS];
1787 	uint32_t		csum_flags, txctl;
1788 	int			error, i, nsegs, prod, si;
1789 	int			padlen;
1790 
1791 	VR_LOCK_ASSERT(sc);
1792 
1793 	M_ASSERTPKTHDR((*m_head));
1794 
1795 	/*
1796 	 * Some VIA Rhine wants packet buffers to be longword
1797 	 * aligned, but very often our mbufs aren't. Rather than
1798 	 * waste time trying to decide when to copy and when not
1799 	 * to copy, just do it all the time.
1800 	 */
1801 	if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) {
1802 		m = m_defrag(*m_head, M_NOWAIT);
1803 		if (m == NULL) {
1804 			m_freem(*m_head);
1805 			*m_head = NULL;
1806 			return (ENOBUFS);
1807 		}
1808 		*m_head = m;
1809 	}
1810 
1811 	/*
1812 	 * The Rhine chip doesn't auto-pad, so we have to make
1813 	 * sure to pad short frames out to the minimum frame length
1814 	 * ourselves.
1815 	 */
1816 	if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) {
1817 		m = *m_head;
1818 		padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len;
1819 		if (M_WRITABLE(m) == 0) {
1820 			/* Get a writable copy. */
1821 			m = m_dup(*m_head, M_NOWAIT);
1822 			m_freem(*m_head);
1823 			if (m == NULL) {
1824 				*m_head = NULL;
1825 				return (ENOBUFS);
1826 			}
1827 			*m_head = m;
1828 		}
1829 		if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
1830 			m = m_defrag(m, M_NOWAIT);
1831 			if (m == NULL) {
1832 				m_freem(*m_head);
1833 				*m_head = NULL;
1834 				return (ENOBUFS);
1835 			}
1836 		}
1837 		/*
1838 		 * Manually pad short frames, and zero the pad space
1839 		 * to avoid leaking data.
1840 		 */
1841 		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1842 		m->m_pkthdr.len += padlen;
1843 		m->m_len = m->m_pkthdr.len;
1844 		*m_head = m;
1845 	}
1846 
1847 	prod = sc->vr_cdata.vr_tx_prod;
1848 	txd = &sc->vr_cdata.vr_txdesc[prod];
1849 	error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1850 	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1851 	if (error == EFBIG) {
1852 		m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS);
1853 		if (m == NULL) {
1854 			m_freem(*m_head);
1855 			*m_head = NULL;
1856 			return (ENOBUFS);
1857 		}
1858 		*m_head = m;
1859 		error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag,
1860 		    txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1861 		if (error != 0) {
1862 			m_freem(*m_head);
1863 			*m_head = NULL;
1864 			return (error);
1865 		}
1866 	} else if (error != 0)
1867 		return (error);
1868 	if (nsegs == 0) {
1869 		m_freem(*m_head);
1870 		*m_head = NULL;
1871 		return (EIO);
1872 	}
1873 
1874 	/* Check number of available descriptors. */
1875 	if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) {
1876 		bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1877 		return (ENOBUFS);
1878 	}
1879 
1880 	txd->tx_m = *m_head;
1881 	bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1882 	    BUS_DMASYNC_PREWRITE);
1883 
1884 	/* Set checksum offload. */
1885 	csum_flags = 0;
1886 	if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) {
1887 		if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
1888 			csum_flags |= VR_TXCTL_IPCSUM;
1889 		if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
1890 			csum_flags |= VR_TXCTL_TCPCSUM;
1891 		if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
1892 			csum_flags |= VR_TXCTL_UDPCSUM;
1893 	}
1894 
1895 	/*
1896 	 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit
1897 	 * is required for all descriptors regardless of single or
1898 	 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for
1899 	 * the first descriptor for a multi-fragmented frames. Without
1900 	 * that VIA Rhine chip generates Tx underrun interrupts and can't
1901 	 * send any frames.
1902 	 */
1903 	si = prod;
1904 	for (i = 0; i < nsegs; i++) {
1905 		desc = &sc->vr_rdata.vr_tx_ring[prod];
1906 		desc->vr_status = 0;
1907 		txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags;
1908 		if (i == 0)
1909 			txctl |= VR_TXCTL_FIRSTFRAG;
1910 		desc->vr_ctl = htole32(txctl);
1911 		desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr));
1912 		sc->vr_cdata.vr_tx_cnt++;
1913 		VR_INC(prod, VR_TX_RING_CNT);
1914 	}
1915 	/* Update producer index. */
1916 	sc->vr_cdata.vr_tx_prod = prod;
1917 
1918 	prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT;
1919 	desc = &sc->vr_rdata.vr_tx_ring[prod];
1920 
1921 	/*
1922 	 * Set EOP on the last desciptor and reuqest Tx completion
1923 	 * interrupt for every VR_TX_INTR_THRESH-th frames.
1924 	 */
1925 	VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH);
1926 	if (sc->vr_cdata.vr_tx_pkts == 0)
1927 		desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
1928 	else
1929 		desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG);
1930 
1931 	/* Lastly turn the first descriptor ownership to hardware. */
1932 	desc = &sc->vr_rdata.vr_tx_ring[si];
1933 	desc->vr_status |= htole32(VR_TXSTAT_OWN);
1934 
1935 	/* Sync descriptors. */
1936 	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1937 	    sc->vr_cdata.vr_tx_ring_map,
1938 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1939 
1940 	return (0);
1941 }
1942 
1943 static void
1944 vr_start(struct ifnet *ifp)
1945 {
1946 	struct vr_softc		*sc;
1947 
1948 	sc = ifp->if_softc;
1949 	VR_LOCK(sc);
1950 	vr_start_locked(ifp);
1951 	VR_UNLOCK(sc);
1952 }
1953 
1954 static void
1955 vr_start_locked(struct ifnet *ifp)
1956 {
1957 	struct vr_softc		*sc;
1958 	struct mbuf		*m_head;
1959 	int			enq;
1960 
1961 	sc = ifp->if_softc;
1962 
1963 	VR_LOCK_ASSERT(sc);
1964 
1965 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1966 	    IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0)
1967 		return;
1968 
1969 	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1970 	    sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) {
1971 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1972 		if (m_head == NULL)
1973 			break;
1974 		/*
1975 		 * Pack the data into the transmit ring. If we
1976 		 * don't have room, set the OACTIVE flag and wait
1977 		 * for the NIC to drain the ring.
1978 		 */
1979 		if (vr_encap(sc, &m_head)) {
1980 			if (m_head == NULL)
1981 				break;
1982 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1983 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1984 			break;
1985 		}
1986 
1987 		enq++;
1988 		/*
1989 		 * If there's a BPF listener, bounce a copy of this frame
1990 		 * to him.
1991 		 */
1992 		ETHER_BPF_MTAP(ifp, m_head);
1993 	}
1994 
1995 	if (enq > 0) {
1996 		/* Tell the chip to start transmitting. */
1997 		VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
1998 		/* Set a timeout in case the chip goes out to lunch. */
1999 		sc->vr_watchdog_timer = 5;
2000 	}
2001 }
2002 
2003 static void
2004 vr_init(void *xsc)
2005 {
2006 	struct vr_softc		*sc;
2007 
2008 	sc = (struct vr_softc *)xsc;
2009 	VR_LOCK(sc);
2010 	vr_init_locked(sc);
2011 	VR_UNLOCK(sc);
2012 }
2013 
2014 static void
2015 vr_init_locked(struct vr_softc *sc)
2016 {
2017 	struct ifnet		*ifp;
2018 	struct mii_data		*mii;
2019 	bus_addr_t		addr;
2020 	int			i;
2021 
2022 	VR_LOCK_ASSERT(sc);
2023 
2024 	ifp = sc->vr_ifp;
2025 	mii = device_get_softc(sc->vr_miibus);
2026 
2027 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2028 		return;
2029 
2030 	/* Cancel pending I/O and free all RX/TX buffers. */
2031 	vr_stop(sc);
2032 	vr_reset(sc);
2033 
2034 	/* Set our station address. */
2035 	for (i = 0; i < ETHER_ADDR_LEN; i++)
2036 		CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]);
2037 
2038 	/* Set DMA size. */
2039 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
2040 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
2041 
2042 	/*
2043 	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
2044 	 * so we must set both.
2045 	 */
2046 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
2047 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
2048 
2049 	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
2050 	VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg);
2051 
2052 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
2053 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
2054 
2055 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
2056 	VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg);
2057 
2058 	/* Init circular RX list. */
2059 	if (vr_rx_ring_init(sc) != 0) {
2060 		device_printf(sc->vr_dev,
2061 		    "initialization failed: no memory for rx buffers\n");
2062 		vr_stop(sc);
2063 		return;
2064 	}
2065 
2066 	/* Init tx descriptors. */
2067 	vr_tx_ring_init(sc);
2068 
2069 	if ((sc->vr_quirks & VR_Q_CAM) != 0) {
2070 		uint8_t vcam[2] = { 0, 0 };
2071 
2072 		/* Disable VLAN hardware tag insertion/stripping. */
2073 		VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL);
2074 		/* Disable VLAN hardware filtering. */
2075 		VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB);
2076 		/* Disable all CAM entries. */
2077 		vr_cam_mask(sc, VR_MCAST_CAM, 0);
2078 		vr_cam_mask(sc, VR_VLAN_CAM, 0);
2079 		/* Enable the first VLAN CAM. */
2080 		vr_cam_data(sc, VR_VLAN_CAM, 0, vcam);
2081 		vr_cam_mask(sc, VR_VLAN_CAM, 1);
2082 	}
2083 
2084 	/*
2085 	 * Set up receive filter.
2086 	 */
2087 	vr_set_filter(sc);
2088 
2089 	/*
2090 	 * Load the address of the RX ring.
2091 	 */
2092 	addr = VR_RX_RING_ADDR(sc, 0);
2093 	CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2094 	/*
2095 	 * Load the address of the TX ring.
2096 	 */
2097 	addr = VR_TX_RING_ADDR(sc, 0);
2098 	CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2099 	/* Default : full-duplex, no Tx poll. */
2100 	CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL);
2101 
2102 	/* Set flow-control parameters for Rhine III. */
2103 	if (sc->vr_revid >= REV_ID_VT6105_A0) {
2104 		/*
2105 		 * Configure Rx buffer count available for incoming
2106 		 * packet.
2107 		 * Even though data sheet says almost nothing about
2108 		 * this register, this register should be updated
2109 		 * whenever driver adds new RX buffers to controller.
2110 		 * Otherwise, XON frame is not sent to link partner
2111 		 * even if controller has enough RX buffers and you
2112 		 * would be isolated from network.
2113 		 * The controller is not smart enough to know number
2114 		 * of available RX buffers so driver have to let
2115 		 * controller know how many RX buffers are posted.
2116 		 * In other words, this register works like a residue
2117 		 * counter for RX buffers and should be initialized
2118 		 * to the number of total RX buffers  - 1 before
2119 		 * enabling RX MAC.  Note, this register is 8bits so
2120 		 * it effectively limits the maximum number of RX
2121 		 * buffer to be configured by controller is 255.
2122 		 */
2123 		CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1);
2124 		/*
2125 		 * Tx pause low threshold : 8 free receive buffers
2126 		 * Tx pause XON high threshold : 24 free receive buffers
2127 		 */
2128 		CSR_WRITE_1(sc, VR_FLOWCR1,
2129 		    VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF);
2130 		/* Set Tx pause timer. */
2131 		CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff);
2132 	}
2133 
2134 	/* Enable receiver and transmitter. */
2135 	CSR_WRITE_1(sc, VR_CR0,
2136 	    VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO);
2137 
2138 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2139 #ifdef DEVICE_POLLING
2140 	/*
2141 	 * Disable interrupts if we are polling.
2142 	 */
2143 	if (ifp->if_capenable & IFCAP_POLLING)
2144 		CSR_WRITE_2(sc, VR_IMR, 0);
2145 	else
2146 #endif
2147 	/*
2148 	 * Enable interrupts and disable MII intrs.
2149 	 */
2150 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2151 	if (sc->vr_revid > REV_ID_VT6102_A)
2152 		CSR_WRITE_2(sc, VR_MII_IMR, 0);
2153 
2154 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2155 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2156 
2157 	sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
2158 	mii_mediachg(mii);
2159 
2160 	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
2161 }
2162 
2163 /*
2164  * Set media options.
2165  */
2166 static int
2167 vr_ifmedia_upd(struct ifnet *ifp)
2168 {
2169 	struct vr_softc		*sc;
2170 	struct mii_data		*mii;
2171 	struct mii_softc	*miisc;
2172 	int			error;
2173 
2174 	sc = ifp->if_softc;
2175 	VR_LOCK(sc);
2176 	mii = device_get_softc(sc->vr_miibus);
2177 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2178 		PHY_RESET(miisc);
2179 	sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
2180 	error = mii_mediachg(mii);
2181 	VR_UNLOCK(sc);
2182 
2183 	return (error);
2184 }
2185 
2186 /*
2187  * Report current media status.
2188  */
2189 static void
2190 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2191 {
2192 	struct vr_softc		*sc;
2193 	struct mii_data		*mii;
2194 
2195 	sc = ifp->if_softc;
2196 	mii = device_get_softc(sc->vr_miibus);
2197 	VR_LOCK(sc);
2198 	if ((ifp->if_flags & IFF_UP) == 0) {
2199 		VR_UNLOCK(sc);
2200 		return;
2201 	}
2202 	mii_pollstat(mii);
2203 	ifmr->ifm_active = mii->mii_media_active;
2204 	ifmr->ifm_status = mii->mii_media_status;
2205 	VR_UNLOCK(sc);
2206 }
2207 
2208 static int
2209 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2210 {
2211 	struct vr_softc		*sc;
2212 	struct ifreq		*ifr;
2213 	struct mii_data		*mii;
2214 	int			error, mask;
2215 
2216 	sc = ifp->if_softc;
2217 	ifr = (struct ifreq *)data;
2218 	error = 0;
2219 
2220 	switch (command) {
2221 	case SIOCSIFFLAGS:
2222 		VR_LOCK(sc);
2223 		if (ifp->if_flags & IFF_UP) {
2224 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2225 				if ((ifp->if_flags ^ sc->vr_if_flags) &
2226 				    (IFF_PROMISC | IFF_ALLMULTI))
2227 					vr_set_filter(sc);
2228 			} else {
2229 				if ((sc->vr_flags & VR_F_DETACHED) == 0)
2230 					vr_init_locked(sc);
2231 			}
2232 		} else {
2233 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2234 				vr_stop(sc);
2235 		}
2236 		sc->vr_if_flags = ifp->if_flags;
2237 		VR_UNLOCK(sc);
2238 		break;
2239 	case SIOCADDMULTI:
2240 	case SIOCDELMULTI:
2241 		VR_LOCK(sc);
2242 		vr_set_filter(sc);
2243 		VR_UNLOCK(sc);
2244 		break;
2245 	case SIOCGIFMEDIA:
2246 	case SIOCSIFMEDIA:
2247 		mii = device_get_softc(sc->vr_miibus);
2248 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2249 		break;
2250 	case SIOCSIFCAP:
2251 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2252 #ifdef DEVICE_POLLING
2253 		if (mask & IFCAP_POLLING) {
2254 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
2255 				error = ether_poll_register(vr_poll, ifp);
2256 				if (error != 0)
2257 					break;
2258 				VR_LOCK(sc);
2259 				/* Disable interrupts. */
2260 				CSR_WRITE_2(sc, VR_IMR, 0x0000);
2261 				ifp->if_capenable |= IFCAP_POLLING;
2262 				VR_UNLOCK(sc);
2263 			} else {
2264 				error = ether_poll_deregister(ifp);
2265 				/* Enable interrupts. */
2266 				VR_LOCK(sc);
2267 				CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2268 				ifp->if_capenable &= ~IFCAP_POLLING;
2269 				VR_UNLOCK(sc);
2270 			}
2271 		}
2272 #endif /* DEVICE_POLLING */
2273 		if ((mask & IFCAP_TXCSUM) != 0 &&
2274 		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
2275 			ifp->if_capenable ^= IFCAP_TXCSUM;
2276 			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
2277 				ifp->if_hwassist |= VR_CSUM_FEATURES;
2278 			else
2279 				ifp->if_hwassist &= ~VR_CSUM_FEATURES;
2280 		}
2281 		if ((mask & IFCAP_RXCSUM) != 0 &&
2282 		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
2283 			ifp->if_capenable ^= IFCAP_RXCSUM;
2284 		if ((mask & IFCAP_WOL_UCAST) != 0 &&
2285 		    (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2286 			ifp->if_capenable ^= IFCAP_WOL_UCAST;
2287 		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2288 		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2289 			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2290 		break;
2291 	default:
2292 		error = ether_ioctl(ifp, command, data);
2293 		break;
2294 	}
2295 
2296 	return (error);
2297 }
2298 
2299 static void
2300 vr_watchdog(struct vr_softc *sc)
2301 {
2302 	struct ifnet		*ifp;
2303 
2304 	VR_LOCK_ASSERT(sc);
2305 
2306 	if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer)
2307 		return;
2308 
2309 	ifp = sc->vr_ifp;
2310 	/*
2311 	 * Reclaim first as we don't request interrupt for every packets.
2312 	 */
2313 	vr_txeof(sc);
2314 	if (sc->vr_cdata.vr_tx_cnt == 0)
2315 		return;
2316 
2317 	if ((sc->vr_flags & VR_F_LINK) == 0) {
2318 		if (bootverbose)
2319 			if_printf(sc->vr_ifp, "watchdog timeout "
2320 			   "(missed link)\n");
2321 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2322 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2323 		vr_init_locked(sc);
2324 		return;
2325 	}
2326 
2327 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2328 	if_printf(ifp, "watchdog timeout\n");
2329 
2330 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2331 	vr_init_locked(sc);
2332 
2333 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2334 		vr_start_locked(ifp);
2335 }
2336 
2337 static void
2338 vr_tx_start(struct vr_softc *sc)
2339 {
2340 	bus_addr_t	addr;
2341 	uint8_t		cmd;
2342 
2343 	cmd = CSR_READ_1(sc, VR_CR0);
2344 	if ((cmd & VR_CR0_TX_ON) == 0) {
2345 		addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons);
2346 		CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2347 		cmd |= VR_CR0_TX_ON;
2348 		CSR_WRITE_1(sc, VR_CR0, cmd);
2349 	}
2350 	if (sc->vr_cdata.vr_tx_cnt != 0) {
2351 		sc->vr_watchdog_timer = 5;
2352 		VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
2353 	}
2354 }
2355 
2356 static void
2357 vr_rx_start(struct vr_softc *sc)
2358 {
2359 	bus_addr_t	addr;
2360 	uint8_t		cmd;
2361 
2362 	cmd = CSR_READ_1(sc, VR_CR0);
2363 	if ((cmd & VR_CR0_RX_ON) == 0) {
2364 		addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons);
2365 		CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2366 		cmd |= VR_CR0_RX_ON;
2367 		CSR_WRITE_1(sc, VR_CR0, cmd);
2368 	}
2369 	CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO);
2370 }
2371 
2372 static int
2373 vr_tx_stop(struct vr_softc *sc)
2374 {
2375 	int		i;
2376 	uint8_t		cmd;
2377 
2378 	cmd = CSR_READ_1(sc, VR_CR0);
2379 	if ((cmd & VR_CR0_TX_ON) != 0) {
2380 		cmd &= ~VR_CR0_TX_ON;
2381 		CSR_WRITE_1(sc, VR_CR0, cmd);
2382 		for (i = VR_TIMEOUT; i > 0; i--) {
2383 			DELAY(5);
2384 			cmd = CSR_READ_1(sc, VR_CR0);
2385 			if ((cmd & VR_CR0_TX_ON) == 0)
2386 				break;
2387 		}
2388 		if (i == 0)
2389 			return (ETIMEDOUT);
2390 	}
2391 	return (0);
2392 }
2393 
2394 static int
2395 vr_rx_stop(struct vr_softc *sc)
2396 {
2397 	int		i;
2398 	uint8_t		cmd;
2399 
2400 	cmd = CSR_READ_1(sc, VR_CR0);
2401 	if ((cmd & VR_CR0_RX_ON) != 0) {
2402 		cmd &= ~VR_CR0_RX_ON;
2403 		CSR_WRITE_1(sc, VR_CR0, cmd);
2404 		for (i = VR_TIMEOUT; i > 0; i--) {
2405 			DELAY(5);
2406 			cmd = CSR_READ_1(sc, VR_CR0);
2407 			if ((cmd & VR_CR0_RX_ON) == 0)
2408 				break;
2409 		}
2410 		if (i == 0)
2411 			return (ETIMEDOUT);
2412 	}
2413 	return (0);
2414 }
2415 
2416 /*
2417  * Stop the adapter and free any mbufs allocated to the
2418  * RX and TX lists.
2419  */
2420 static void
2421 vr_stop(struct vr_softc *sc)
2422 {
2423 	struct vr_txdesc	*txd;
2424 	struct vr_rxdesc	*rxd;
2425 	struct ifnet		*ifp;
2426 	int			i;
2427 
2428 	VR_LOCK_ASSERT(sc);
2429 
2430 	ifp = sc->vr_ifp;
2431 	sc->vr_watchdog_timer = 0;
2432 
2433 	callout_stop(&sc->vr_stat_callout);
2434 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2435 
2436 	CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP);
2437 	if (vr_rx_stop(sc) != 0)
2438 		device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__);
2439 	if (vr_tx_stop(sc) != 0)
2440 		device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__);
2441 	/* Clear pending interrupts. */
2442 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2443 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
2444 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
2445 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
2446 
2447 	/*
2448 	 * Free RX and TX mbufs still in the queues.
2449 	 */
2450 	for (i = 0; i < VR_RX_RING_CNT; i++) {
2451 		rxd = &sc->vr_cdata.vr_rxdesc[i];
2452 		if (rxd->rx_m != NULL) {
2453 			bus_dmamap_sync(sc->vr_cdata.vr_rx_tag,
2454 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2455 			bus_dmamap_unload(sc->vr_cdata.vr_rx_tag,
2456 			    rxd->rx_dmamap);
2457 			m_freem(rxd->rx_m);
2458 			rxd->rx_m = NULL;
2459 		}
2460         }
2461 	for (i = 0; i < VR_TX_RING_CNT; i++) {
2462 		txd = &sc->vr_cdata.vr_txdesc[i];
2463 		if (txd->tx_m != NULL) {
2464 			bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
2465 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2466 			bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
2467 			    txd->tx_dmamap);
2468 			m_freem(txd->tx_m);
2469 			txd->tx_m = NULL;
2470 		}
2471         }
2472 }
2473 
2474 /*
2475  * Stop all chip I/O so that the kernel's probe routines don't
2476  * get confused by errant DMAs when rebooting.
2477  */
2478 static int
2479 vr_shutdown(device_t dev)
2480 {
2481 
2482 	return (vr_suspend(dev));
2483 }
2484 
2485 static int
2486 vr_suspend(device_t dev)
2487 {
2488 	struct vr_softc		*sc;
2489 
2490 	sc = device_get_softc(dev);
2491 
2492 	VR_LOCK(sc);
2493 	vr_stop(sc);
2494 	vr_setwol(sc);
2495 	sc->vr_flags |= VR_F_SUSPENDED;
2496 	VR_UNLOCK(sc);
2497 
2498 	return (0);
2499 }
2500 
2501 static int
2502 vr_resume(device_t dev)
2503 {
2504 	struct vr_softc		*sc;
2505 	struct ifnet		*ifp;
2506 
2507 	sc = device_get_softc(dev);
2508 
2509 	VR_LOCK(sc);
2510 	ifp = sc->vr_ifp;
2511 	vr_clrwol(sc);
2512 	vr_reset(sc);
2513 	if (ifp->if_flags & IFF_UP)
2514 		vr_init_locked(sc);
2515 
2516 	sc->vr_flags &= ~VR_F_SUSPENDED;
2517 	VR_UNLOCK(sc);
2518 
2519 	return (0);
2520 }
2521 
2522 static void
2523 vr_setwol(struct vr_softc *sc)
2524 {
2525 	struct ifnet		*ifp;
2526 	int			pmc;
2527 	uint16_t		pmstat;
2528 	uint8_t			v;
2529 
2530 	VR_LOCK_ASSERT(sc);
2531 
2532 	if (sc->vr_revid < REV_ID_VT6102_A ||
2533 	    pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0)
2534 		return;
2535 
2536 	ifp = sc->vr_ifp;
2537 
2538 	/* Clear WOL configuration. */
2539 	CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2540 	CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2541 	CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2542 	CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2543 	if (sc->vr_revid > REV_ID_VT6105_B0) {
2544 		/* Newer Rhine III supports two additional patterns. */
2545 		CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2546 		CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2547 		CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2548 	}
2549 	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2550 		CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST);
2551 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2552 		CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC);
2553 	/*
2554 	 * It seems that multicast wakeup frames require programming pattern
2555 	 * registers and valid CRC as well as pattern mask for each pattern.
2556 	 * While it's possible to setup such a pattern it would complicate
2557 	 * WOL configuration so ignore multicast wakeup frames.
2558 	 */
2559 	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2560 		CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2561 		v = CSR_READ_1(sc, VR_STICKHW);
2562 		CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB);
2563 		CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN);
2564 	}
2565 
2566 	/* Put hardware into sleep. */
2567 	v = CSR_READ_1(sc, VR_STICKHW);
2568 	v |= VR_STICKHW_DS0 | VR_STICKHW_DS1;
2569 	CSR_WRITE_1(sc, VR_STICKHW, v);
2570 
2571 	/* Request PME if WOL is requested. */
2572 	pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2);
2573 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2574 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2575 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2576 	pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2577 }
2578 
2579 static void
2580 vr_clrwol(struct vr_softc *sc)
2581 {
2582 	uint8_t			v;
2583 
2584 	VR_LOCK_ASSERT(sc);
2585 
2586 	if (sc->vr_revid < REV_ID_VT6102_A)
2587 		return;
2588 
2589 	/* Take hardware out of sleep. */
2590 	v = CSR_READ_1(sc, VR_STICKHW);
2591 	v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB);
2592 	CSR_WRITE_1(sc, VR_STICKHW, v);
2593 
2594 	/* Clear WOL configuration as WOL may interfere normal operation. */
2595 	CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2596 	CSR_WRITE_1(sc, VR_WOLCFG_CLR,
2597 	    VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR);
2598 	CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2599 	CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2600 	if (sc->vr_revid > REV_ID_VT6105_B0) {
2601 		/* Newer Rhine III supports two additional patterns. */
2602 		CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2603 		CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2604 		CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2605 	}
2606 }
2607 
2608 static int
2609 vr_sysctl_stats(SYSCTL_HANDLER_ARGS)
2610 {
2611 	struct vr_softc		*sc;
2612 	struct vr_statistics	*stat;
2613 	int			error;
2614 	int			result;
2615 
2616 	result = -1;
2617 	error = sysctl_handle_int(oidp, &result, 0, req);
2618 
2619 	if (error != 0 || req->newptr == NULL)
2620 		return (error);
2621 
2622 	if (result == 1) {
2623 		sc = (struct vr_softc *)arg1;
2624 		stat = &sc->vr_stat;
2625 
2626 		printf("%s statistics:\n", device_get_nameunit(sc->vr_dev));
2627 		printf("Outbound good frames : %ju\n",
2628 		    (uintmax_t)stat->tx_ok);
2629 		printf("Inbound good frames : %ju\n",
2630 		    (uintmax_t)stat->rx_ok);
2631 		printf("Outbound errors : %u\n", stat->tx_errors);
2632 		printf("Inbound errors : %u\n", stat->rx_errors);
2633 		printf("Inbound no buffers : %u\n", stat->rx_no_buffers);
2634 		printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs);
2635 		printf("Inbound FIFO overflows : %d\n",
2636 		    stat->rx_fifo_overflows);
2637 		printf("Inbound CRC errors : %u\n", stat->rx_crc_errors);
2638 		printf("Inbound frame alignment errors : %u\n",
2639 		    stat->rx_alignment);
2640 		printf("Inbound giant frames : %u\n", stat->rx_giants);
2641 		printf("Inbound runt frames : %u\n", stat->rx_runts);
2642 		printf("Outbound aborted with excessive collisions : %u\n",
2643 		    stat->tx_abort);
2644 		printf("Outbound collisions : %u\n", stat->tx_collisions);
2645 		printf("Outbound late collisions : %u\n",
2646 		    stat->tx_late_collisions);
2647 		printf("Outbound underrun : %u\n", stat->tx_underrun);
2648 		printf("PCI bus errors : %u\n", stat->bus_errors);
2649 		printf("driver restarted due to Rx/Tx shutdown failure : %u\n",
2650 		    stat->num_restart);
2651 	}
2652 
2653 	return (error);
2654 }
2655