xref: /freebsd/sys/dev/vr/if_vr.c (revision a1a4f1a0d87b594d3f17a97dc0127eec1417e6f6)
1 /*
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * VIA Rhine fast ethernet PCI NIC driver
37  *
38  * Supports various network adapters based on the VIA Rhine
39  * and Rhine II PCI controllers, including the D-Link DFE530TX.
40  * Datasheets are available at http://www.via.com.tw.
41  *
42  * Written by Bill Paul <wpaul@ctr.columbia.edu>
43  * Electrical Engineering Department
44  * Columbia University, New York City
45  */
46 
47 /*
48  * The VIA Rhine controllers are similar in some respects to the
49  * the DEC tulip chips, except less complicated. The controller
50  * uses an MII bus and an external physical layer interface. The
51  * receiver has a one entry perfect filter and a 64-bit hash table
52  * multicast filter. Transmit and receive descriptors are similar
53  * to the tulip.
54  *
55  * The Rhine has a serious flaw in its transmit DMA mechanism:
56  * transmit buffers must be longword aligned. Unfortunately,
57  * FreeBSD doesn't guarantee that mbufs will be filled in starting
58  * at longword boundaries, so we have to do a buffer copy before
59  * transmission.
60  */
61 
62 #include "bpf.h"
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/sockio.h>
67 #include <sys/mbuf.h>
68 #include <sys/malloc.h>
69 #include <sys/kernel.h>
70 #include <sys/socket.h>
71 
72 #include <net/if.h>
73 #include <net/if_arp.h>
74 #include <net/ethernet.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 
78 #if NBPF > 0
79 #include <net/bpf.h>
80 #endif
81 
82 #include <vm/vm.h>              /* for vtophys */
83 #include <vm/pmap.h>            /* for vtophys */
84 #include <machine/clock.h>      /* for DELAY */
85 #include <machine/bus_pio.h>
86 #include <machine/bus_memio.h>
87 #include <machine/bus.h>
88 #include <machine/resource.h>
89 #include <sys/bus.h>
90 #include <sys/rman.h>
91 
92 #include <pci/pcireg.h>
93 #include <pci/pcivar.h>
94 
95 #define VR_USEIOSPACE
96 
97 /* #define VR_BACKGROUND_AUTONEG */
98 
99 #include <pci/if_vrreg.h>
100 
101 #ifndef lint
102 static const char rcsid[] =
103   "$FreeBSD$";
104 #endif
105 
106 /*
107  * Various supported device vendors/types and their names.
108  */
109 static struct vr_type vr_devs[] = {
110 	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
111 		"VIA VT3043 Rhine I 10/100BaseTX" },
112 	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
113 		"VIA VT86C100A Rhine II 10/100BaseTX" },
114 	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
115 		"Delta Electronics Rhine II 10/100BaseTX" },
116 	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
117 		"Addtron Technology Rhine II 10/100BaseTX" },
118 	{ 0, 0, NULL }
119 };
120 
121 /*
122  * Various supported PHY vendors/types and their names. Note that
123  * this driver will work with pretty much any MII-compliant PHY,
124  * so failure to positively identify the chip is not a fatal error.
125  */
126 
127 static struct vr_type vr_phys[] = {
128 	{ TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
129 	{ TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
130 	{ NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
131 	{ LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
132 	{ INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
133 	{ SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
134 	{ 0, 0, "<MII-compliant physical interface>" }
135 };
136 
137 static int vr_probe		__P((device_t));
138 static int vr_attach		__P((device_t));
139 static int vr_detach		__P((device_t));
140 
141 static int vr_newbuf		__P((struct vr_softc *,
142 					struct vr_chain_onefrag *,
143 					struct mbuf *));
144 static int vr_encap		__P((struct vr_softc *, struct vr_chain *,
145 						struct mbuf * ));
146 
147 static void vr_rxeof		__P((struct vr_softc *));
148 static void vr_rxeoc		__P((struct vr_softc *));
149 static void vr_txeof		__P((struct vr_softc *));
150 static void vr_txeoc		__P((struct vr_softc *));
151 static void vr_intr		__P((void *));
152 static void vr_start		__P((struct ifnet *));
153 static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
154 static void vr_init		__P((void *));
155 static void vr_stop		__P((struct vr_softc *));
156 static void vr_watchdog		__P((struct ifnet *));
157 static void vr_shutdown		__P((device_t));
158 static int vr_ifmedia_upd	__P((struct ifnet *));
159 static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
160 
161 static void vr_mii_sync		__P((struct vr_softc *));
162 static void vr_mii_send		__P((struct vr_softc *, u_int32_t, int));
163 static int vr_mii_readreg	__P((struct vr_softc *, struct vr_mii_frame *));
164 static int vr_mii_writereg	__P((struct vr_softc *, struct vr_mii_frame *));
165 static u_int16_t vr_phy_readreg	__P((struct vr_softc *, int));
166 static void vr_phy_writereg	__P((struct vr_softc *, u_int16_t, u_int16_t));
167 
168 static void vr_autoneg_xmit	__P((struct vr_softc *));
169 static void vr_autoneg_mii	__P((struct vr_softc *, int, int));
170 static void vr_setmode_mii	__P((struct vr_softc *, int));
171 static void vr_getmode_mii	__P((struct vr_softc *));
172 static void vr_setcfg		__P((struct vr_softc *, u_int16_t));
173 static u_int8_t vr_calchash	__P((u_int8_t *));
174 static void vr_setmulti		__P((struct vr_softc *));
175 static void vr_reset		__P((struct vr_softc *));
176 static int vr_list_rx_init	__P((struct vr_softc *));
177 static int vr_list_tx_init	__P((struct vr_softc *));
178 
179 #ifdef VR_USEIOSPACE
180 #define VR_RES			SYS_RES_IOPORT
181 #define VR_RID			VR_PCI_LOIO
182 #else
183 #define VR_RES			SYS_RES_MEMORY
184 #define VR_RID			VR_PCI_LOMEM
185 #endif
186 
187 static device_method_t vr_methods[] = {
188 	/* Device interface */
189 	DEVMETHOD(device_probe,		vr_probe),
190 	DEVMETHOD(device_attach,	vr_attach),
191 	DEVMETHOD(device_detach, 	vr_detach),
192 	DEVMETHOD(device_shutdown,	vr_shutdown),
193 	{ 0, 0 }
194 };
195 
196 static driver_t vr_driver = {
197 	"vr",
198 	vr_methods,
199 	sizeof(struct vr_softc)
200 };
201 
202 static devclass_t vr_devclass;
203 
204 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
205 
206 #define VR_SETBIT(sc, reg, x)				\
207 	CSR_WRITE_1(sc, reg,				\
208 		CSR_READ_1(sc, reg) | x)
209 
210 #define VR_CLRBIT(sc, reg, x)				\
211 	CSR_WRITE_1(sc, reg,				\
212 		CSR_READ_1(sc, reg) & ~x)
213 
214 #define VR_SETBIT16(sc, reg, x)				\
215 	CSR_WRITE_2(sc, reg,				\
216 		CSR_READ_2(sc, reg) | x)
217 
218 #define VR_CLRBIT16(sc, reg, x)				\
219 	CSR_WRITE_2(sc, reg,				\
220 		CSR_READ_2(sc, reg) & ~x)
221 
222 #define VR_SETBIT32(sc, reg, x)				\
223 	CSR_WRITE_4(sc, reg,				\
224 		CSR_READ_4(sc, reg) | x)
225 
226 #define VR_CLRBIT32(sc, reg, x)				\
227 	CSR_WRITE_4(sc, reg,				\
228 		CSR_READ_4(sc, reg) & ~x)
229 
230 #define SIO_SET(x)					\
231 	CSR_WRITE_1(sc, VR_MIICMD,			\
232 		CSR_READ_1(sc, VR_MIICMD) | x)
233 
234 #define SIO_CLR(x)					\
235 	CSR_WRITE_1(sc, VR_MIICMD,			\
236 		CSR_READ_1(sc, VR_MIICMD) & ~x)
237 
238 /*
239  * Sync the PHYs by setting data bit and strobing the clock 32 times.
240  */
241 static void vr_mii_sync(sc)
242 	struct vr_softc		*sc;
243 {
244 	register int		i;
245 
246 	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
247 
248 	for (i = 0; i < 32; i++) {
249 		SIO_SET(VR_MIICMD_CLK);
250 		DELAY(1);
251 		SIO_CLR(VR_MIICMD_CLK);
252 		DELAY(1);
253 	}
254 
255 	return;
256 }
257 
258 /*
259  * Clock a series of bits through the MII.
260  */
261 static void vr_mii_send(sc, bits, cnt)
262 	struct vr_softc		*sc;
263 	u_int32_t		bits;
264 	int			cnt;
265 {
266 	int			i;
267 
268 	SIO_CLR(VR_MIICMD_CLK);
269 
270 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
271                 if (bits & i) {
272 			SIO_SET(VR_MIICMD_DATAIN);
273                 } else {
274 			SIO_CLR(VR_MIICMD_DATAIN);
275                 }
276 		DELAY(1);
277 		SIO_CLR(VR_MIICMD_CLK);
278 		DELAY(1);
279 		SIO_SET(VR_MIICMD_CLK);
280 	}
281 }
282 
283 /*
284  * Read an PHY register through the MII.
285  */
286 static int vr_mii_readreg(sc, frame)
287 	struct vr_softc		*sc;
288 	struct vr_mii_frame	*frame;
289 
290 {
291 	int			i, ack, s;
292 
293 	s = splimp();
294 
295 	/*
296 	 * Set up frame for RX.
297 	 */
298 	frame->mii_stdelim = VR_MII_STARTDELIM;
299 	frame->mii_opcode = VR_MII_READOP;
300 	frame->mii_turnaround = 0;
301 	frame->mii_data = 0;
302 
303 	CSR_WRITE_1(sc, VR_MIICMD, 0);
304 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
305 
306 	/*
307  	 * Turn on data xmit.
308 	 */
309 	SIO_SET(VR_MIICMD_DIR);
310 
311 	vr_mii_sync(sc);
312 
313 	/*
314 	 * Send command/address info.
315 	 */
316 	vr_mii_send(sc, frame->mii_stdelim, 2);
317 	vr_mii_send(sc, frame->mii_opcode, 2);
318 	vr_mii_send(sc, frame->mii_phyaddr, 5);
319 	vr_mii_send(sc, frame->mii_regaddr, 5);
320 
321 	/* Idle bit */
322 	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
323 	DELAY(1);
324 	SIO_SET(VR_MIICMD_CLK);
325 	DELAY(1);
326 
327 	/* Turn off xmit. */
328 	SIO_CLR(VR_MIICMD_DIR);
329 
330 	/* Check for ack */
331 	SIO_CLR(VR_MIICMD_CLK);
332 	DELAY(1);
333 	SIO_SET(VR_MIICMD_CLK);
334 	DELAY(1);
335 	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
336 
337 	/*
338 	 * Now try reading data bits. If the ack failed, we still
339 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
340 	 */
341 	if (ack) {
342 		for(i = 0; i < 16; i++) {
343 			SIO_CLR(VR_MIICMD_CLK);
344 			DELAY(1);
345 			SIO_SET(VR_MIICMD_CLK);
346 			DELAY(1);
347 		}
348 		goto fail;
349 	}
350 
351 	for (i = 0x8000; i; i >>= 1) {
352 		SIO_CLR(VR_MIICMD_CLK);
353 		DELAY(1);
354 		if (!ack) {
355 			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
356 				frame->mii_data |= i;
357 			DELAY(1);
358 		}
359 		SIO_SET(VR_MIICMD_CLK);
360 		DELAY(1);
361 	}
362 
363 fail:
364 
365 	SIO_CLR(VR_MIICMD_CLK);
366 	DELAY(1);
367 	SIO_SET(VR_MIICMD_CLK);
368 	DELAY(1);
369 
370 	splx(s);
371 
372 	if (ack)
373 		return(1);
374 	return(0);
375 }
376 
377 /*
378  * Write to a PHY register through the MII.
379  */
380 static int vr_mii_writereg(sc, frame)
381 	struct vr_softc		*sc;
382 	struct vr_mii_frame	*frame;
383 
384 {
385 	int			s;
386 
387 	s = splimp();
388 
389 	CSR_WRITE_1(sc, VR_MIICMD, 0);
390 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
391 
392 	/*
393 	 * Set up frame for TX.
394 	 */
395 
396 	frame->mii_stdelim = VR_MII_STARTDELIM;
397 	frame->mii_opcode = VR_MII_WRITEOP;
398 	frame->mii_turnaround = VR_MII_TURNAROUND;
399 
400 	/*
401  	 * Turn on data output.
402 	 */
403 	SIO_SET(VR_MIICMD_DIR);
404 
405 	vr_mii_sync(sc);
406 
407 	vr_mii_send(sc, frame->mii_stdelim, 2);
408 	vr_mii_send(sc, frame->mii_opcode, 2);
409 	vr_mii_send(sc, frame->mii_phyaddr, 5);
410 	vr_mii_send(sc, frame->mii_regaddr, 5);
411 	vr_mii_send(sc, frame->mii_turnaround, 2);
412 	vr_mii_send(sc, frame->mii_data, 16);
413 
414 	/* Idle bit. */
415 	SIO_SET(VR_MIICMD_CLK);
416 	DELAY(1);
417 	SIO_CLR(VR_MIICMD_CLK);
418 	DELAY(1);
419 
420 	/*
421 	 * Turn off xmit.
422 	 */
423 	SIO_CLR(VR_MIICMD_DIR);
424 
425 	splx(s);
426 
427 	return(0);
428 }
429 
430 static u_int16_t vr_phy_readreg(sc, reg)
431 	struct vr_softc		*sc;
432 	int			reg;
433 {
434 	struct vr_mii_frame	frame;
435 
436 	bzero((char *)&frame, sizeof(frame));
437 
438 	frame.mii_phyaddr = sc->vr_phy_addr;
439 	frame.mii_regaddr = reg;
440 	vr_mii_readreg(sc, &frame);
441 
442 	return(frame.mii_data);
443 }
444 
445 static void vr_phy_writereg(sc, reg, data)
446 	struct vr_softc		*sc;
447 	u_int16_t		reg;
448 	u_int16_t		data;
449 {
450 	struct vr_mii_frame	frame;
451 
452 	bzero((char *)&frame, sizeof(frame));
453 
454 	frame.mii_phyaddr = sc->vr_phy_addr;
455 	frame.mii_regaddr = reg;
456 	frame.mii_data = data;
457 
458 	vr_mii_writereg(sc, &frame);
459 
460 	return;
461 }
462 
463 /*
464  * Calculate CRC of a multicast group address, return the lower 6 bits.
465  */
466 static u_int8_t vr_calchash(addr)
467 	u_int8_t		*addr;
468 {
469 	u_int32_t		crc, carry;
470 	int			i, j;
471 	u_int8_t		c;
472 
473 	/* Compute CRC for the address value. */
474 	crc = 0xFFFFFFFF; /* initial value */
475 
476 	for (i = 0; i < 6; i++) {
477 		c = *(addr + i);
478 		for (j = 0; j < 8; j++) {
479 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
480 			crc <<= 1;
481 			c >>= 1;
482 			if (carry)
483 				crc = (crc ^ 0x04c11db6) | carry;
484 		}
485 	}
486 
487 	/* return the filter bit position */
488 	return((crc >> 26) & 0x0000003F);
489 }
490 
491 /*
492  * Program the 64-bit multicast hash filter.
493  */
494 static void vr_setmulti(sc)
495 	struct vr_softc		*sc;
496 {
497 	struct ifnet		*ifp;
498 	int			h = 0;
499 	u_int32_t		hashes[2] = { 0, 0 };
500 	struct ifmultiaddr	*ifma;
501 	u_int8_t		rxfilt;
502 	int			mcnt = 0;
503 
504 	ifp = &sc->arpcom.ac_if;
505 
506 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
507 
508 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
509 		rxfilt |= VR_RXCFG_RX_MULTI;
510 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
511 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
512 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
513 		return;
514 	}
515 
516 	/* first, zot all the existing hash bits */
517 	CSR_WRITE_4(sc, VR_MAR0, 0);
518 	CSR_WRITE_4(sc, VR_MAR1, 0);
519 
520 	/* now program new ones */
521 	for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
522 				ifma = ifma->ifma_link.le_next) {
523 		if (ifma->ifma_addr->sa_family != AF_LINK)
524 			continue;
525 		h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
526 		if (h < 32)
527 			hashes[0] |= (1 << h);
528 		else
529 			hashes[1] |= (1 << (h - 32));
530 		mcnt++;
531 	}
532 
533 	if (mcnt)
534 		rxfilt |= VR_RXCFG_RX_MULTI;
535 	else
536 		rxfilt &= ~VR_RXCFG_RX_MULTI;
537 
538 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
539 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
540 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
541 
542 	return;
543 }
544 
545 /*
546  * Initiate an autonegotiation session.
547  */
548 static void vr_autoneg_xmit(sc)
549 	struct vr_softc		*sc;
550 {
551 	u_int16_t		phy_sts;
552 
553 	vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
554 	DELAY(500);
555 	while(vr_phy_readreg(sc, PHY_BMCR)
556 			& PHY_BMCR_RESET);
557 
558 	phy_sts = vr_phy_readreg(sc, PHY_BMCR);
559 	phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
560 	vr_phy_writereg(sc, PHY_BMCR, phy_sts);
561 
562 	return;
563 }
564 
565 /*
566  * Invoke autonegotiation on a PHY.
567  */
568 static void vr_autoneg_mii(sc, flag, verbose)
569 	struct vr_softc		*sc;
570 	int			flag;
571 	int			verbose;
572 {
573 	u_int16_t		phy_sts = 0, media, advert, ability;
574 	struct ifnet		*ifp;
575 	struct ifmedia		*ifm;
576 
577 	ifm = &sc->ifmedia;
578 	ifp = &sc->arpcom.ac_if;
579 
580 	ifm->ifm_media = IFM_ETHER | IFM_AUTO;
581 
582 	/*
583 	 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
584 	 * bit cleared in the status register, but has the 'autoneg enabled'
585 	 * bit set in the control register. This is a contradiction, and
586 	 * I'm not sure how to handle it. If you want to force an attempt
587 	 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
588 	 * and see what happens.
589 	 */
590 #ifndef FORCE_AUTONEG_TFOUR
591 	/*
592 	 * First, see if autoneg is supported. If not, there's
593 	 * no point in continuing.
594 	 */
595 	phy_sts = vr_phy_readreg(sc, PHY_BMSR);
596 	if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
597 		if (verbose)
598 			printf("vr%d: autonegotiation not supported\n",
599 							sc->vr_unit);
600 		ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
601 		return;
602 	}
603 #endif
604 
605 	switch (flag) {
606 	case VR_FLAG_FORCEDELAY:
607 		/*
608 	 	 * XXX Never use this option anywhere but in the probe
609 	 	 * routine: making the kernel stop dead in its tracks
610  		 * for three whole seconds after we've gone multi-user
611 		 * is really bad manners.
612 	 	 */
613 		vr_autoneg_xmit(sc);
614 		DELAY(5000000);
615 		break;
616 	case VR_FLAG_SCHEDDELAY:
617 		/*
618 		 * Wait for the transmitter to go idle before starting
619 		 * an autoneg session, otherwise vr_start() may clobber
620 	 	 * our timeout, and we don't want to allow transmission
621 		 * during an autoneg session since that can screw it up.
622 	 	 */
623 		if (sc->vr_cdata.vr_tx_head != NULL) {
624 			sc->vr_want_auto = 1;
625 			return;
626 		}
627 		vr_autoneg_xmit(sc);
628 		ifp->if_timer = 5;
629 		sc->vr_autoneg = 1;
630 		sc->vr_want_auto = 0;
631 		return;
632 		break;
633 	case VR_FLAG_DELAYTIMEO:
634 		ifp->if_timer = 0;
635 		sc->vr_autoneg = 0;
636 		break;
637 	default:
638 		printf("vr%d: invalid autoneg flag: %d\n", sc->vr_unit, flag);
639 		return;
640 	}
641 
642 	if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
643 		if (verbose)
644 			printf("vr%d: autoneg complete, ", sc->vr_unit);
645 		phy_sts = vr_phy_readreg(sc, PHY_BMSR);
646 	} else {
647 		if (verbose)
648 			printf("vr%d: autoneg not complete, ", sc->vr_unit);
649 	}
650 
651 	media = vr_phy_readreg(sc, PHY_BMCR);
652 
653 	/* Link is good. Report modes and set duplex mode. */
654 	if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
655 		if (verbose)
656 			printf("link status good ");
657 		advert = vr_phy_readreg(sc, PHY_ANAR);
658 		ability = vr_phy_readreg(sc, PHY_LPAR);
659 
660 		if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
661 			ifm->ifm_media = IFM_ETHER|IFM_100_T4;
662 			media |= PHY_BMCR_SPEEDSEL;
663 			media &= ~PHY_BMCR_DUPLEX;
664 			printf("(100baseT4)\n");
665 		} else if (advert & PHY_ANAR_100BTXFULL &&
666 			ability & PHY_ANAR_100BTXFULL) {
667 			ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
668 			media |= PHY_BMCR_SPEEDSEL;
669 			media |= PHY_BMCR_DUPLEX;
670 			printf("(full-duplex, 100Mbps)\n");
671 		} else if (advert & PHY_ANAR_100BTXHALF &&
672 			ability & PHY_ANAR_100BTXHALF) {
673 			ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
674 			media |= PHY_BMCR_SPEEDSEL;
675 			media &= ~PHY_BMCR_DUPLEX;
676 			printf("(half-duplex, 100Mbps)\n");
677 		} else if (advert & PHY_ANAR_10BTFULL &&
678 			ability & PHY_ANAR_10BTFULL) {
679 			ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
680 			media &= ~PHY_BMCR_SPEEDSEL;
681 			media |= PHY_BMCR_DUPLEX;
682 			printf("(full-duplex, 10Mbps)\n");
683 		} else {
684 			ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
685 			media &= ~PHY_BMCR_SPEEDSEL;
686 			media &= ~PHY_BMCR_DUPLEX;
687 			printf("(half-duplex, 10Mbps)\n");
688 		}
689 
690 		media &= ~PHY_BMCR_AUTONEGENBL;
691 
692 		/* Set ASIC's duplex mode to match the PHY. */
693 		vr_setcfg(sc, media);
694 		vr_phy_writereg(sc, PHY_BMCR, media);
695 	} else {
696 		if (verbose)
697 			printf("no carrier\n");
698 	}
699 
700 	vr_init(sc);
701 
702 	if (sc->vr_tx_pend) {
703 		sc->vr_autoneg = 0;
704 		sc->vr_tx_pend = 0;
705 		vr_start(ifp);
706 	}
707 
708 	return;
709 }
710 
711 static void vr_getmode_mii(sc)
712 	struct vr_softc		*sc;
713 {
714 	u_int16_t		bmsr;
715 	struct ifnet		*ifp;
716 
717 	ifp = &sc->arpcom.ac_if;
718 
719 	bmsr = vr_phy_readreg(sc, PHY_BMSR);
720 	if (bootverbose)
721 		printf("vr%d: PHY status word: %x\n", sc->vr_unit, bmsr);
722 
723 	/* fallback */
724 	sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
725 
726 	if (bmsr & PHY_BMSR_10BTHALF) {
727 		if (bootverbose)
728 			printf("vr%d: 10Mbps half-duplex mode supported\n",
729 								sc->vr_unit);
730 		ifmedia_add(&sc->ifmedia,
731 			IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
732 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
733 	}
734 
735 	if (bmsr & PHY_BMSR_10BTFULL) {
736 		if (bootverbose)
737 			printf("vr%d: 10Mbps full-duplex mode supported\n",
738 								sc->vr_unit);
739 		ifmedia_add(&sc->ifmedia,
740 			IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
741 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
742 	}
743 
744 	if (bmsr & PHY_BMSR_100BTXHALF) {
745 		if (bootverbose)
746 			printf("vr%d: 100Mbps half-duplex mode supported\n",
747 								sc->vr_unit);
748 		ifp->if_baudrate = 100000000;
749 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
750 		ifmedia_add(&sc->ifmedia,
751 			IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
752 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
753 	}
754 
755 	if (bmsr & PHY_BMSR_100BTXFULL) {
756 		if (bootverbose)
757 			printf("vr%d: 100Mbps full-duplex mode supported\n",
758 								sc->vr_unit);
759 		ifp->if_baudrate = 100000000;
760 		ifmedia_add(&sc->ifmedia,
761 			IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
762 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
763 	}
764 
765 	/* Some also support 100BaseT4. */
766 	if (bmsr & PHY_BMSR_100BT4) {
767 		if (bootverbose)
768 			printf("vr%d: 100baseT4 mode supported\n", sc->vr_unit);
769 		ifp->if_baudrate = 100000000;
770 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
771 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
772 #ifdef FORCE_AUTONEG_TFOUR
773 		if (bootverbose)
774 			printf("vr%d: forcing on autoneg support for BT4\n",
775 							 sc->vr_unit);
776 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL):
777 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
778 #endif
779 	}
780 
781 	if (bmsr & PHY_BMSR_CANAUTONEG) {
782 		if (bootverbose)
783 			printf("vr%d: autoneg supported\n", sc->vr_unit);
784 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
785 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
786 	}
787 
788 	return;
789 }
790 
791 /*
792  * Set speed and duplex mode.
793  */
794 static void vr_setmode_mii(sc, media)
795 	struct vr_softc		*sc;
796 	int			media;
797 {
798 	u_int16_t		bmcr;
799 	struct ifnet		*ifp;
800 
801 	ifp = &sc->arpcom.ac_if;
802 
803 	/*
804 	 * If an autoneg session is in progress, stop it.
805 	 */
806 	if (sc->vr_autoneg) {
807 		printf("vr%d: canceling autoneg session\n", sc->vr_unit);
808 		ifp->if_timer = sc->vr_autoneg = sc->vr_want_auto = 0;
809 		bmcr = vr_phy_readreg(sc, PHY_BMCR);
810 		bmcr &= ~PHY_BMCR_AUTONEGENBL;
811 		vr_phy_writereg(sc, PHY_BMCR, bmcr);
812 	}
813 
814 	printf("vr%d: selecting MII, ", sc->vr_unit);
815 
816 	bmcr = vr_phy_readreg(sc, PHY_BMCR);
817 
818 	bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
819 			PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
820 
821 	if (IFM_SUBTYPE(media) == IFM_100_T4) {
822 		printf("100Mbps/T4, half-duplex\n");
823 		bmcr |= PHY_BMCR_SPEEDSEL;
824 		bmcr &= ~PHY_BMCR_DUPLEX;
825 	}
826 
827 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
828 		printf("100Mbps, ");
829 		bmcr |= PHY_BMCR_SPEEDSEL;
830 	}
831 
832 	if (IFM_SUBTYPE(media) == IFM_10_T) {
833 		printf("10Mbps, ");
834 		bmcr &= ~PHY_BMCR_SPEEDSEL;
835 	}
836 
837 	if ((media & IFM_GMASK) == IFM_FDX) {
838 		printf("full duplex\n");
839 		bmcr |= PHY_BMCR_DUPLEX;
840 	} else {
841 		printf("half duplex\n");
842 		bmcr &= ~PHY_BMCR_DUPLEX;
843 	}
844 
845 	vr_setcfg(sc, bmcr);
846 	vr_phy_writereg(sc, PHY_BMCR, bmcr);
847 
848 	return;
849 }
850 
851 /*
852  * In order to fiddle with the
853  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
854  * first have to put the transmit and/or receive logic in the idle state.
855  */
856 static void vr_setcfg(sc, bmcr)
857 	struct vr_softc		*sc;
858 	u_int16_t		bmcr;
859 {
860 	int			restart = 0;
861 
862 	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
863 		restart = 1;
864 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
865 	}
866 
867 	if (bmcr & PHY_BMCR_DUPLEX)
868 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
869 	else
870 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
871 
872 	if (restart)
873 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
874 
875 	return;
876 }
877 
878 static void vr_reset(sc)
879 	struct vr_softc		*sc;
880 {
881 	register int		i;
882 
883 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
884 
885 	for (i = 0; i < VR_TIMEOUT; i++) {
886 		DELAY(10);
887 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
888 			break;
889 	}
890 	if (i == VR_TIMEOUT)
891 		printf("vr%d: reset never completed!\n", sc->vr_unit);
892 
893 	/* Wait a little while for the chip to get its brains in order. */
894 	DELAY(1000);
895 
896         return;
897 }
898 
899 /*
900  * Probe for a VIA Rhine chip. Check the PCI vendor and device
901  * IDs against our list and return a device name if we find a match.
902  */
903 static int vr_probe(dev)
904 	device_t		dev;
905 {
906 	struct vr_type		*t;
907 
908 	t = vr_devs;
909 
910 	while(t->vr_name != NULL) {
911 		if ((pci_get_vendor(dev) == t->vr_vid) &&
912 		    (pci_get_device(dev) == t->vr_did)) {
913 			device_set_desc(dev, t->vr_name);
914 			return(0);
915 		}
916 		t++;
917 	}
918 
919 	return(ENXIO);
920 }
921 
922 /*
923  * Attach the interface. Allocate softc structures, do ifmedia
924  * setup and ethernet/BPF attach.
925  */
926 static int vr_attach(dev)
927 	device_t		dev;
928 {
929 	int			s, i;
930 	u_char			eaddr[ETHER_ADDR_LEN];
931 	u_int32_t		command;
932 	struct vr_softc		*sc;
933 	struct ifnet		*ifp;
934 	int			media = IFM_ETHER|IFM_100_TX|IFM_FDX;
935 	unsigned int		round;
936 	caddr_t			roundptr;
937 	struct vr_type		*p;
938 	u_int16_t		phy_vid, phy_did, phy_sts;
939 	int			unit, error = 0, rid;
940 
941 	s = splimp();
942 
943 	sc = device_get_softc(dev);
944 	unit = device_get_unit(dev);
945 	bzero(sc, sizeof(struct vr_softc *));
946 
947 	/*
948 	 * Handle power management nonsense.
949 	 */
950 
951 	command = pci_read_config(dev, VR_PCI_CAPID, 4) & 0x000000FF;
952 	if (command == 0x01) {
953 
954 		command = pci_read_config(dev, VR_PCI_PWRMGMTCTRL, 4);
955 		if (command & VR_PSTATE_MASK) {
956 			u_int32_t		iobase, membase, irq;
957 
958 			/* Save important PCI config data. */
959 			iobase = pci_read_config(dev, VR_PCI_LOIO, 4);
960 			membase = pci_read_config(dev, VR_PCI_LOMEM, 4);
961 			irq = pci_read_config(dev, VR_PCI_INTLINE, 4);
962 
963 			/* Reset the power state. */
964 			printf("vr%d: chip is in D%d power mode "
965 			"-- setting to D0\n", unit, command & VR_PSTATE_MASK);
966 			command &= 0xFFFFFFFC;
967 			pci_write_config(dev, VR_PCI_PWRMGMTCTRL, command, 4);
968 
969 			/* Restore PCI config data. */
970 			pci_write_config(dev, VR_PCI_LOIO, iobase, 4);
971 			pci_write_config(dev, VR_PCI_LOMEM, membase, 4);
972 			pci_write_config(dev, VR_PCI_INTLINE, irq, 4);
973 		}
974 	}
975 
976 	/*
977 	 * Map control/status registers.
978 	 */
979 	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
980 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
981 	pci_write_config(dev, PCI_COMMAND_STATUS_REG, command, 4);
982 	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
983 
984 #ifdef VR_USEIOSPACE
985 	if (!(command & PCIM_CMD_PORTEN)) {
986 		printf("vr%d: failed to enable I/O ports!\n", unit);
987 		free(sc, M_DEVBUF);
988 		goto fail;
989 	}
990 #else
991 	if (!(command & PCIM_CMD_MEMEN)) {
992 		printf("vr%d: failed to enable memory mapping!\n", unit);
993 		goto fail;
994 	}
995 #endif
996 
997 	rid = VR_RID;
998 	sc->vr_res = bus_alloc_resource(dev, VR_RES, &rid,
999 	    0, ~0, 1, RF_ACTIVE);
1000 
1001 	if (sc->vr_res == NULL) {
1002 		printf("vr%d: couldn't map ports/memory\n", unit);
1003 		error = ENXIO;
1004 		goto fail;
1005 	}
1006 
1007 	sc->vr_btag = rman_get_bustag(sc->vr_res);
1008 	sc->vr_bhandle = rman_get_bushandle(sc->vr_res);
1009 
1010 	/* Allocate interrupt */
1011 	rid = 0;
1012 	sc->vr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1013 	    RF_SHAREABLE | RF_ACTIVE);
1014 
1015 	if (sc->vr_irq == NULL) {
1016 		printf("vr%d: couldn't map interrupt\n", unit);
1017 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
1018 		error = ENXIO;
1019 		goto fail;
1020 	}
1021 
1022 	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET,
1023 	    vr_intr, sc, &sc->vr_intrhand);
1024 
1025 	if (error) {
1026 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
1027 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
1028 		printf("vr%d: couldn't set up irq\n", unit);
1029 		goto fail;
1030 	}
1031 
1032 	/* Reset the adapter. */
1033 	vr_reset(sc);
1034 
1035 	/*
1036 	 * Get station address. The way the Rhine chips work,
1037 	 * you're not allowed to directly access the EEPROM once
1038 	 * they've been programmed a special way. Consequently,
1039 	 * we need to read the node address from the PAR0 and PAR1
1040 	 * registers.
1041 	 */
1042 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1043 	DELAY(200);
1044 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1045 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1046 
1047 	/*
1048 	 * A Rhine chip was detected. Inform the world.
1049 	 */
1050 	printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":");
1051 
1052 	sc->vr_unit = unit;
1053 	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1054 
1055 	sc->vr_ldata_ptr = malloc(sizeof(struct vr_list_data) + 8,
1056 				M_DEVBUF, M_NOWAIT);
1057 	if (sc->vr_ldata_ptr == NULL) {
1058 		printf("vr%d: no memory for list buffers!\n", unit);
1059 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
1060 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
1061 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
1062 		error = ENXIO;
1063 		goto fail;
1064 	}
1065 
1066 	sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
1067 	round = (unsigned int)sc->vr_ldata_ptr & 0xF;
1068 	roundptr = sc->vr_ldata_ptr;
1069 	for (i = 0; i < 8; i++) {
1070 		if (round % 8) {
1071 			round++;
1072 			roundptr++;
1073 		} else
1074 			break;
1075 	}
1076 	sc->vr_ldata = (struct vr_list_data *)roundptr;
1077 	bzero(sc->vr_ldata, sizeof(struct vr_list_data));
1078 
1079 	ifp = &sc->arpcom.ac_if;
1080 	ifp->if_softc = sc;
1081 	ifp->if_unit = unit;
1082 	ifp->if_name = "vr";
1083 	ifp->if_mtu = ETHERMTU;
1084 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1085 	ifp->if_ioctl = vr_ioctl;
1086 	ifp->if_output = ether_output;
1087 	ifp->if_start = vr_start;
1088 	ifp->if_watchdog = vr_watchdog;
1089 	ifp->if_init = vr_init;
1090 	ifp->if_baudrate = 10000000;
1091 	ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
1092 
1093 	if (bootverbose)
1094 		printf("vr%d: probing for a PHY\n", sc->vr_unit);
1095 	for (i = VR_PHYADDR_MIN; i < VR_PHYADDR_MAX + 1; i++) {
1096 		if (bootverbose)
1097 			printf("vr%d: checking address: %d\n",
1098 						sc->vr_unit, i);
1099 		sc->vr_phy_addr = i;
1100 		vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
1101 		DELAY(500);
1102 		while(vr_phy_readreg(sc, PHY_BMCR)
1103 				& PHY_BMCR_RESET);
1104 		if ((phy_sts = vr_phy_readreg(sc, PHY_BMSR)))
1105 			break;
1106 	}
1107 	if (phy_sts) {
1108 		phy_vid = vr_phy_readreg(sc, PHY_VENID);
1109 		phy_did = vr_phy_readreg(sc, PHY_DEVID);
1110 		if (bootverbose)
1111 			printf("vr%d: found PHY at address %d, ",
1112 					sc->vr_unit, sc->vr_phy_addr);
1113 		if (bootverbose)
1114 			printf("vendor id: %x device id: %x\n",
1115 				phy_vid, phy_did);
1116 		p = vr_phys;
1117 		while(p->vr_vid) {
1118 			if (phy_vid == p->vr_vid &&
1119 				(phy_did | 0x000F) == p->vr_did) {
1120 				sc->vr_pinfo = p;
1121 				break;
1122 			}
1123 			p++;
1124 		}
1125 		if (sc->vr_pinfo == NULL)
1126 			sc->vr_pinfo = &vr_phys[PHY_UNKNOWN];
1127 		if (bootverbose)
1128 			printf("vr%d: PHY type: %s\n",
1129 				sc->vr_unit, sc->vr_pinfo->vr_name);
1130 	} else {
1131 		printf("vr%d: MII without any phy!\n", sc->vr_unit);
1132 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
1133 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
1134 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
1135 		free(sc->vr_ldata_ptr, M_DEVBUF);
1136 		error = ENXIO;
1137 		goto fail;
1138 	}
1139 
1140 	/*
1141 	 * Do ifmedia setup.
1142 	 */
1143 	ifmedia_init(&sc->ifmedia, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1144 
1145 	vr_getmode_mii(sc);
1146 	if (cold) {
1147 		vr_autoneg_mii(sc, VR_FLAG_FORCEDELAY, 1);
1148 		vr_stop(sc);
1149 	} else {
1150 		vr_init(sc);
1151 		vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1152 	}
1153 
1154 	media = sc->ifmedia.ifm_media;
1155 
1156 	ifmedia_set(&sc->ifmedia, media);
1157 
1158 	/*
1159 	 * Call MI attach routines.
1160 	 */
1161 	if_attach(ifp);
1162 	ether_ifattach(ifp);
1163 
1164 #if NBPF > 0
1165 	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1166 #endif
1167 
1168 fail:
1169 	splx(s);
1170 	return(error);
1171 }
1172 
1173 static int vr_detach(dev)
1174 	device_t		dev;
1175 {
1176 	struct vr_softc		*sc;
1177 	struct ifnet		*ifp;
1178 	int			s;
1179 
1180 	s = splimp();
1181 
1182 	sc = device_get_softc(dev);
1183 	ifp = &sc->arpcom.ac_if;
1184 
1185 	vr_stop(sc);
1186 	if_detach(ifp);
1187 
1188 	bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
1189 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
1190 	bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
1191 
1192 	free(sc->vr_ldata_ptr, M_DEVBUF);
1193 	ifmedia_removeall(&sc->ifmedia);
1194 
1195 	splx(s);
1196 
1197 	return(0);
1198 }
1199 
1200 /*
1201  * Initialize the transmit descriptors.
1202  */
1203 static int vr_list_tx_init(sc)
1204 	struct vr_softc		*sc;
1205 {
1206 	struct vr_chain_data	*cd;
1207 	struct vr_list_data	*ld;
1208 	int			i;
1209 
1210 	cd = &sc->vr_cdata;
1211 	ld = sc->vr_ldata;
1212 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1213 		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
1214 		if (i == (VR_TX_LIST_CNT - 1))
1215 			cd->vr_tx_chain[i].vr_nextdesc =
1216 				&cd->vr_tx_chain[0];
1217 		else
1218 			cd->vr_tx_chain[i].vr_nextdesc =
1219 				&cd->vr_tx_chain[i + 1];
1220 	}
1221 
1222 	cd->vr_tx_free = &cd->vr_tx_chain[0];
1223 	cd->vr_tx_tail = cd->vr_tx_head = NULL;
1224 
1225 	return(0);
1226 }
1227 
1228 
1229 /*
1230  * Initialize the RX descriptors and allocate mbufs for them. Note that
1231  * we arrange the descriptors in a closed ring, so that the last descriptor
1232  * points back to the first.
1233  */
1234 static int vr_list_rx_init(sc)
1235 	struct vr_softc		*sc;
1236 {
1237 	struct vr_chain_data	*cd;
1238 	struct vr_list_data	*ld;
1239 	int			i;
1240 
1241 	cd = &sc->vr_cdata;
1242 	ld = sc->vr_ldata;
1243 
1244 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1245 		cd->vr_rx_chain[i].vr_ptr =
1246 			(struct vr_desc *)&ld->vr_rx_list[i];
1247 		if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS)
1248 			return(ENOBUFS);
1249 		if (i == (VR_RX_LIST_CNT - 1)) {
1250 			cd->vr_rx_chain[i].vr_nextdesc =
1251 					&cd->vr_rx_chain[0];
1252 			ld->vr_rx_list[i].vr_next =
1253 					vtophys(&ld->vr_rx_list[0]);
1254 		} else {
1255 			cd->vr_rx_chain[i].vr_nextdesc =
1256 					&cd->vr_rx_chain[i + 1];
1257 			ld->vr_rx_list[i].vr_next =
1258 					vtophys(&ld->vr_rx_list[i + 1]);
1259 		}
1260 	}
1261 
1262 	cd->vr_rx_head = &cd->vr_rx_chain[0];
1263 
1264 	return(0);
1265 }
1266 
1267 /*
1268  * Initialize an RX descriptor and attach an MBUF cluster.
1269  * Note: the length fields are only 11 bits wide, which means the
1270  * largest size we can specify is 2047. This is important because
1271  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1272  * overflow the field and make a mess.
1273  */
1274 static int vr_newbuf(sc, c, m)
1275 	struct vr_softc		*sc;
1276 	struct vr_chain_onefrag	*c;
1277 	struct mbuf		*m;
1278 {
1279 	struct mbuf		*m_new = NULL;
1280 
1281 	if (m == NULL) {
1282 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1283 		if (m_new == NULL) {
1284 			printf("vr%d: no memory for rx list "
1285 			    "-- packet dropped!\n", sc->vr_unit);
1286 			return(ENOBUFS);
1287 		}
1288 
1289 		MCLGET(m_new, M_DONTWAIT);
1290 		if (!(m_new->m_flags & M_EXT)) {
1291 			printf("vr%d: no memory for rx list "
1292 			    "-- packet dropped!\n", sc->vr_unit);
1293 			m_freem(m_new);
1294 			return(ENOBUFS);
1295 		}
1296 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1297 	} else {
1298 		m_new = m;
1299 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1300 		m_new->m_data = m_new->m_ext.ext_buf;
1301 	}
1302 
1303 	m_adj(m_new, sizeof(u_int64_t));
1304 
1305 	c->vr_mbuf = m_new;
1306 	c->vr_ptr->vr_status = VR_RXSTAT;
1307 	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
1308 	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
1309 
1310 	return(0);
1311 }
1312 
1313 /*
1314  * A frame has been uploaded: pass the resulting mbuf chain up to
1315  * the higher level protocols.
1316  */
1317 static void vr_rxeof(sc)
1318 	struct vr_softc		*sc;
1319 {
1320         struct ether_header	*eh;
1321         struct mbuf		*m;
1322         struct ifnet		*ifp;
1323 	struct vr_chain_onefrag	*cur_rx;
1324 	int			total_len = 0;
1325 	u_int32_t		rxstat;
1326 
1327 	ifp = &sc->arpcom.ac_if;
1328 
1329 	while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
1330 							VR_RXSTAT_OWN)) {
1331 		struct mbuf		*m0 = NULL;
1332 
1333 		cur_rx = sc->vr_cdata.vr_rx_head;
1334 		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
1335 		m = cur_rx->vr_mbuf;
1336 
1337 		/*
1338 		 * If an error occurs, update stats, clear the
1339 		 * status word and leave the mbuf cluster in place:
1340 		 * it should simply get re-used next time this descriptor
1341 	 	 * comes up in the ring.
1342 		 */
1343 		if (rxstat & VR_RXSTAT_RXERR) {
1344 			ifp->if_ierrors++;
1345 			printf("vr%d: rx error: ", sc->vr_unit);
1346 			switch(rxstat & 0x000000FF) {
1347 			case VR_RXSTAT_CRCERR:
1348 				printf("crc error\n");
1349 				break;
1350 			case VR_RXSTAT_FRAMEALIGNERR:
1351 				printf("frame alignment error\n");
1352 				break;
1353 			case VR_RXSTAT_FIFOOFLOW:
1354 				printf("FIFO overflow\n");
1355 				break;
1356 			case VR_RXSTAT_GIANT:
1357 				printf("received giant packet\n");
1358 				break;
1359 			case VR_RXSTAT_RUNT:
1360 				printf("received runt packet\n");
1361 				break;
1362 			case VR_RXSTAT_BUSERR:
1363 				printf("system bus error\n");
1364 				break;
1365 			case VR_RXSTAT_BUFFERR:
1366 				printf("rx buffer error\n");
1367 				break;
1368 			default:
1369 				printf("unknown rx error\n");
1370 				break;
1371 			}
1372 			vr_newbuf(sc, cur_rx, m);
1373 			continue;
1374 		}
1375 
1376 		/* No errors; receive the packet. */
1377 		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1378 
1379 		/*
1380 		 * XXX The VIA Rhine chip includes the CRC with every
1381 		 * received frame, and there's no way to turn this
1382 		 * behavior off (at least, I can't find anything in
1383 	 	 * the manual that explains how to do it) so we have
1384 		 * to trim off the CRC manually.
1385 		 */
1386 		total_len -= ETHER_CRC_LEN;
1387 
1388 		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1389 		    total_len + ETHER_ALIGN, 0, ifp, NULL);
1390 		vr_newbuf(sc, cur_rx, m);
1391 		if (m0 == NULL) {
1392 			ifp->if_ierrors++;
1393 			continue;
1394 		}
1395 		m_adj(m0, ETHER_ALIGN);
1396 		m = m0;
1397 
1398 		ifp->if_ipackets++;
1399 		eh = mtod(m, struct ether_header *);
1400 
1401 #if NBPF > 0
1402 		/*
1403 		 * Handle BPF listeners. Let the BPF user see the packet, but
1404 		 * don't pass it up to the ether_input() layer unless it's
1405 		 * a broadcast packet, multicast packet, matches our ethernet
1406 		 * address or the interface is in promiscuous mode.
1407 		 */
1408 		if (ifp->if_bpf) {
1409 			bpf_mtap(ifp, m);
1410 			if (ifp->if_flags & IFF_PROMISC &&
1411 				(bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1412 						ETHER_ADDR_LEN) &&
1413 					(eh->ether_dhost[0] & 1) == 0)) {
1414 				m_freem(m);
1415 				continue;
1416 			}
1417 		}
1418 #endif
1419 		/* Remove header from mbuf and pass it on. */
1420 		m_adj(m, sizeof(struct ether_header));
1421 		ether_input(ifp, eh, m);
1422 	}
1423 
1424 	return;
1425 }
1426 
1427 void vr_rxeoc(sc)
1428 	struct vr_softc		*sc;
1429 {
1430 
1431 	vr_rxeof(sc);
1432 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1433 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1434 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1435 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1436 
1437 	return;
1438 }
1439 
1440 /*
1441  * A frame was downloaded to the chip. It's safe for us to clean up
1442  * the list buffers.
1443  */
1444 
1445 static void vr_txeof(sc)
1446 	struct vr_softc		*sc;
1447 {
1448 	struct vr_chain		*cur_tx;
1449 	struct ifnet		*ifp;
1450 	register struct mbuf	*n;
1451 
1452 	ifp = &sc->arpcom.ac_if;
1453 
1454 	/* Clear the timeout timer. */
1455 	ifp->if_timer = 0;
1456 
1457 	/* Sanity check. */
1458 	if (sc->vr_cdata.vr_tx_head == NULL)
1459 		return;
1460 
1461 	/*
1462 	 * Go through our tx list and free mbufs for those
1463 	 * frames that have been transmitted.
1464 	 */
1465 	while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1466 		u_int32_t		txstat;
1467 
1468 		cur_tx = sc->vr_cdata.vr_tx_head;
1469 		txstat = cur_tx->vr_ptr->vr_status;
1470 
1471 		if (txstat & VR_TXSTAT_OWN)
1472 			break;
1473 
1474 		if (txstat & VR_TXSTAT_ERRSUM) {
1475 			ifp->if_oerrors++;
1476 			if (txstat & VR_TXSTAT_DEFER)
1477 				ifp->if_collisions++;
1478 			if (txstat & VR_TXSTAT_LATECOLL)
1479 				ifp->if_collisions++;
1480 		}
1481 
1482 		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1483 
1484 		ifp->if_opackets++;
1485         	MFREE(cur_tx->vr_mbuf, n);
1486 		cur_tx->vr_mbuf = NULL;
1487 
1488 		if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1489 			sc->vr_cdata.vr_tx_head = NULL;
1490 			sc->vr_cdata.vr_tx_tail = NULL;
1491 			break;
1492 		}
1493 
1494 		sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1495 	}
1496 
1497 	return;
1498 }
1499 
1500 /*
1501  * TX 'end of channel' interrupt handler.
1502  */
1503 static void vr_txeoc(sc)
1504 	struct vr_softc		*sc;
1505 {
1506 	struct ifnet		*ifp;
1507 
1508 	ifp = &sc->arpcom.ac_if;
1509 
1510 	ifp->if_timer = 0;
1511 
1512 	if (sc->vr_cdata.vr_tx_head == NULL) {
1513 		ifp->if_flags &= ~IFF_OACTIVE;
1514 		sc->vr_cdata.vr_tx_tail = NULL;
1515 		if (sc->vr_want_auto)
1516 			vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1517 	}
1518 
1519 	return;
1520 }
1521 
1522 static void vr_intr(arg)
1523 	void			*arg;
1524 {
1525 	struct vr_softc		*sc;
1526 	struct ifnet		*ifp;
1527 	u_int16_t		status;
1528 
1529 	sc = arg;
1530 	ifp = &sc->arpcom.ac_if;
1531 
1532 	/* Supress unwanted interrupts. */
1533 	if (!(ifp->if_flags & IFF_UP)) {
1534 		vr_stop(sc);
1535 		return;
1536 	}
1537 
1538 	/* Disable interrupts. */
1539 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1540 
1541 	for (;;) {
1542 
1543 		status = CSR_READ_2(sc, VR_ISR);
1544 		if (status)
1545 			CSR_WRITE_2(sc, VR_ISR, status);
1546 
1547 		if ((status & VR_INTRS) == 0)
1548 			break;
1549 
1550 		if (status & VR_ISR_RX_OK)
1551 			vr_rxeof(sc);
1552 
1553 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1554 		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1555 		    (status & VR_ISR_RX_DROPPED)) {
1556 			vr_rxeof(sc);
1557 			vr_rxeoc(sc);
1558 		}
1559 
1560 		if (status & VR_ISR_TX_OK) {
1561 			vr_txeof(sc);
1562 			vr_txeoc(sc);
1563 		}
1564 
1565 		if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){
1566 			ifp->if_oerrors++;
1567 			vr_txeof(sc);
1568 			if (sc->vr_cdata.vr_tx_head != NULL) {
1569 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1570 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1571 			}
1572 		}
1573 
1574 		if (status & VR_ISR_BUSERR) {
1575 			vr_reset(sc);
1576 			vr_init(sc);
1577 		}
1578 	}
1579 
1580 	/* Re-enable interrupts. */
1581 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1582 
1583 	if (ifp->if_snd.ifq_head != NULL) {
1584 		vr_start(ifp);
1585 	}
1586 
1587 	return;
1588 }
1589 
1590 /*
1591  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1592  * pointers to the fragment pointers.
1593  */
1594 static int vr_encap(sc, c, m_head)
1595 	struct vr_softc		*sc;
1596 	struct vr_chain		*c;
1597 	struct mbuf		*m_head;
1598 {
1599 	int			frag = 0;
1600 	struct vr_desc		*f = NULL;
1601 	int			total_len;
1602 	struct mbuf		*m;
1603 
1604 	m = m_head;
1605 	total_len = 0;
1606 
1607 	/*
1608 	 * The VIA Rhine wants packet buffers to be longword
1609 	 * aligned, but very often our mbufs aren't. Rather than
1610 	 * waste time trying to decide when to copy and when not
1611 	 * to copy, just do it all the time.
1612 	 */
1613 	if (m != NULL) {
1614 		struct mbuf		*m_new = NULL;
1615 
1616 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1617 		if (m_new == NULL) {
1618 			printf("vr%d: no memory for tx list", sc->vr_unit);
1619 			return(1);
1620 		}
1621 		if (m_head->m_pkthdr.len > MHLEN) {
1622 			MCLGET(m_new, M_DONTWAIT);
1623 			if (!(m_new->m_flags & M_EXT)) {
1624 				m_freem(m_new);
1625 				printf("vr%d: no memory for tx list",
1626 						sc->vr_unit);
1627 				return(1);
1628 			}
1629 		}
1630 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1631 					mtod(m_new, caddr_t));
1632 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1633 		m_freem(m_head);
1634 		m_head = m_new;
1635 		/*
1636 		 * The Rhine chip doesn't auto-pad, so we have to make
1637 		 * sure to pad short frames out to the minimum frame length
1638 		 * ourselves.
1639 		 */
1640 		if (m_head->m_len < VR_MIN_FRAMELEN) {
1641 			m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1642 			m_new->m_len = m_new->m_pkthdr.len;
1643 		}
1644 		f = c->vr_ptr;
1645 		f->vr_data = vtophys(mtod(m_new, caddr_t));
1646 		f->vr_ctl = total_len = m_new->m_len;
1647 		f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1648 		f->vr_status = 0;
1649 		frag = 1;
1650 	}
1651 
1652 	c->vr_mbuf = m_head;
1653 	c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1654 	c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1655 
1656 	return(0);
1657 }
1658 
1659 /*
1660  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1661  * to the mbuf data regions directly in the transmit lists. We also save a
1662  * copy of the pointers since the transmit list fragment pointers are
1663  * physical addresses.
1664  */
1665 
1666 static void vr_start(ifp)
1667 	struct ifnet		*ifp;
1668 {
1669 	struct vr_softc		*sc;
1670 	struct mbuf		*m_head = NULL;
1671 	struct vr_chain		*cur_tx = NULL, *start_tx;
1672 
1673 	sc = ifp->if_softc;
1674 
1675 	if (sc->vr_autoneg) {
1676 		sc->vr_tx_pend = 1;
1677 		return;
1678 	}
1679 
1680 	/*
1681 	 * Check for an available queue slot. If there are none,
1682 	 * punt.
1683 	 */
1684 	if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1685 		ifp->if_flags |= IFF_OACTIVE;
1686 		return;
1687 	}
1688 
1689 	start_tx = sc->vr_cdata.vr_tx_free;
1690 
1691 	while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1692 		IF_DEQUEUE(&ifp->if_snd, m_head);
1693 		if (m_head == NULL)
1694 			break;
1695 
1696 		/* Pick a descriptor off the free list. */
1697 		cur_tx = sc->vr_cdata.vr_tx_free;
1698 		sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1699 
1700 		/* Pack the data into the descriptor. */
1701 		vr_encap(sc, cur_tx, m_head);
1702 
1703 		if (cur_tx != start_tx)
1704 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1705 
1706 #if NBPF > 0
1707 		/*
1708 		 * If there's a BPF listener, bounce a copy of this frame
1709 		 * to him.
1710 		 */
1711 		if (ifp->if_bpf)
1712 			bpf_mtap(ifp, cur_tx->vr_mbuf);
1713 #endif
1714 		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1715 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1716 	}
1717 
1718 	/*
1719 	 * If there are no frames queued, bail.
1720 	 */
1721 	if (cur_tx == NULL)
1722 		return;
1723 
1724 	sc->vr_cdata.vr_tx_tail = cur_tx;
1725 
1726 	if (sc->vr_cdata.vr_tx_head == NULL)
1727 		sc->vr_cdata.vr_tx_head = start_tx;
1728 
1729 	/*
1730 	 * Set a timeout in case the chip goes out to lunch.
1731 	 */
1732 	ifp->if_timer = 5;
1733 
1734 	return;
1735 }
1736 
1737 static void vr_init(xsc)
1738 	void			*xsc;
1739 {
1740 	struct vr_softc		*sc = xsc;
1741 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1742 	u_int16_t		phy_bmcr = 0;
1743 	int			s;
1744 
1745 	if (sc->vr_autoneg)
1746 		return;
1747 
1748 	s = splimp();
1749 
1750 	if (sc->vr_pinfo != NULL)
1751 		phy_bmcr = vr_phy_readreg(sc, PHY_BMCR);
1752 
1753 	/*
1754 	 * Cancel pending I/O and free all RX/TX buffers.
1755 	 */
1756 	vr_stop(sc);
1757 	vr_reset(sc);
1758 
1759 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1760 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1761 
1762 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1763 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1764 
1765 	/* Init circular RX list. */
1766 	if (vr_list_rx_init(sc) == ENOBUFS) {
1767 		printf("vr%d: initialization failed: no "
1768 			"memory for rx buffers\n", sc->vr_unit);
1769 		vr_stop(sc);
1770 		(void)splx(s);
1771 		return;
1772 	}
1773 
1774 	/*
1775 	 * Init tx descriptors.
1776 	 */
1777 	vr_list_tx_init(sc);
1778 
1779 	/* If we want promiscuous mode, set the allframes bit. */
1780 	if (ifp->if_flags & IFF_PROMISC)
1781 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1782 	else
1783 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1784 
1785 	/* Set capture broadcast bit to capture broadcast frames. */
1786 	if (ifp->if_flags & IFF_BROADCAST)
1787 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1788 	else
1789 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1790 
1791 	/*
1792 	 * Program the multicast filter, if necessary.
1793 	 */
1794 	vr_setmulti(sc);
1795 
1796 	/*
1797 	 * Load the address of the RX list.
1798 	 */
1799 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1800 
1801 	/* Enable receiver and transmitter. */
1802 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1803 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1804 				    VR_CMD_RX_GO);
1805 
1806 	vr_setcfg(sc, vr_phy_readreg(sc, PHY_BMCR));
1807 
1808 	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1809 
1810 	/*
1811 	 * Enable interrupts.
1812 	 */
1813 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1814 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1815 
1816 	/* Restore state of BMCR */
1817 	if (sc->vr_pinfo != NULL)
1818 		vr_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1819 
1820 	ifp->if_flags |= IFF_RUNNING;
1821 	ifp->if_flags &= ~IFF_OACTIVE;
1822 
1823 	(void)splx(s);
1824 
1825 	return;
1826 }
1827 
1828 /*
1829  * Set media options.
1830  */
1831 static int vr_ifmedia_upd(ifp)
1832 	struct ifnet		*ifp;
1833 {
1834 	struct vr_softc		*sc;
1835 	struct ifmedia		*ifm;
1836 
1837 	sc = ifp->if_softc;
1838 	ifm = &sc->ifmedia;
1839 
1840 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1841 		return(EINVAL);
1842 
1843 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1844 		vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1845 	else
1846 		vr_setmode_mii(sc, ifm->ifm_media);
1847 
1848 	return(0);
1849 }
1850 
1851 /*
1852  * Report current media status.
1853  */
1854 static void vr_ifmedia_sts(ifp, ifmr)
1855 	struct ifnet		*ifp;
1856 	struct ifmediareq	*ifmr;
1857 {
1858 	struct vr_softc		*sc;
1859 	u_int16_t		advert = 0, ability = 0;
1860 
1861 	sc = ifp->if_softc;
1862 
1863 	ifmr->ifm_active = IFM_ETHER;
1864 
1865 	if (!(vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1866 		if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1867 			ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1868 		else
1869 			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1870 		if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1871 			ifmr->ifm_active |= IFM_FDX;
1872 		else
1873 			ifmr->ifm_active |= IFM_HDX;
1874 		return;
1875 	}
1876 
1877 	ability = vr_phy_readreg(sc, PHY_LPAR);
1878 	advert = vr_phy_readreg(sc, PHY_ANAR);
1879 	if (advert & PHY_ANAR_100BT4 &&
1880 		ability & PHY_ANAR_100BT4) {
1881 		ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
1882 	} else if (advert & PHY_ANAR_100BTXFULL &&
1883 		ability & PHY_ANAR_100BTXFULL) {
1884 		ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
1885 	} else if (advert & PHY_ANAR_100BTXHALF &&
1886 		ability & PHY_ANAR_100BTXHALF) {
1887 		ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
1888 	} else if (advert & PHY_ANAR_10BTFULL &&
1889 		ability & PHY_ANAR_10BTFULL) {
1890 		ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
1891 	} else if (advert & PHY_ANAR_10BTHALF &&
1892 		ability & PHY_ANAR_10BTHALF) {
1893 		ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
1894 	}
1895 
1896 	return;
1897 }
1898 
1899 static int vr_ioctl(ifp, command, data)
1900 	struct ifnet		*ifp;
1901 	u_long			command;
1902 	caddr_t			data;
1903 {
1904 	struct vr_softc		*sc = ifp->if_softc;
1905 	struct ifreq		*ifr = (struct ifreq *) data;
1906 	int			s, error = 0;
1907 
1908 	s = splimp();
1909 
1910 	switch(command) {
1911 	case SIOCSIFADDR:
1912 	case SIOCGIFADDR:
1913 	case SIOCSIFMTU:
1914 		error = ether_ioctl(ifp, command, data);
1915 		break;
1916 	case SIOCSIFFLAGS:
1917 		if (ifp->if_flags & IFF_UP) {
1918 			vr_init(sc);
1919 		} else {
1920 			if (ifp->if_flags & IFF_RUNNING)
1921 				vr_stop(sc);
1922 		}
1923 		error = 0;
1924 		break;
1925 	case SIOCADDMULTI:
1926 	case SIOCDELMULTI:
1927 		vr_setmulti(sc);
1928 		error = 0;
1929 		break;
1930 	case SIOCGIFMEDIA:
1931 	case SIOCSIFMEDIA:
1932 		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1933 		break;
1934 	default:
1935 		error = EINVAL;
1936 		break;
1937 	}
1938 
1939 	(void)splx(s);
1940 
1941 	return(error);
1942 }
1943 
1944 static void vr_watchdog(ifp)
1945 	struct ifnet		*ifp;
1946 {
1947 	struct vr_softc		*sc;
1948 
1949 	sc = ifp->if_softc;
1950 
1951 	if (sc->vr_autoneg) {
1952 		vr_autoneg_mii(sc, VR_FLAG_DELAYTIMEO, 1);
1953 		if (!(ifp->if_flags & IFF_UP))
1954 			vr_stop(sc);
1955 		return;
1956 	}
1957 
1958 	ifp->if_oerrors++;
1959 	printf("vr%d: watchdog timeout\n", sc->vr_unit);
1960 
1961 	if (!(vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1962 		printf("vr%d: no carrier - transceiver cable problem?\n",
1963 								sc->vr_unit);
1964 
1965 	vr_stop(sc);
1966 	vr_reset(sc);
1967 	vr_init(sc);
1968 
1969 	if (ifp->if_snd.ifq_head != NULL)
1970 		vr_start(ifp);
1971 
1972 	return;
1973 }
1974 
1975 /*
1976  * Stop the adapter and free any mbufs allocated to the
1977  * RX and TX lists.
1978  */
1979 static void vr_stop(sc)
1980 	struct vr_softc		*sc;
1981 {
1982 	register int		i;
1983 	struct ifnet		*ifp;
1984 
1985 	ifp = &sc->arpcom.ac_if;
1986 	ifp->if_timer = 0;
1987 
1988 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1989 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1990 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1991 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1992 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1993 
1994 	/*
1995 	 * Free data in the RX lists.
1996 	 */
1997 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1998 		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1999 			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
2000 			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
2001 		}
2002 	}
2003 	bzero((char *)&sc->vr_ldata->vr_rx_list,
2004 		sizeof(sc->vr_ldata->vr_rx_list));
2005 
2006 	/*
2007 	 * Free the TX list buffers.
2008 	 */
2009 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
2010 		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
2011 			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
2012 			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
2013 		}
2014 	}
2015 
2016 	bzero((char *)&sc->vr_ldata->vr_tx_list,
2017 		sizeof(sc->vr_ldata->vr_tx_list));
2018 
2019 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2020 
2021 	return;
2022 }
2023 
2024 /*
2025  * Stop all chip I/O so that the kernel's probe routines don't
2026  * get confused by errant DMAs when rebooting.
2027  */
2028 static void vr_shutdown(dev)
2029 	device_t		dev;
2030 {
2031 	struct vr_softc		*sc;
2032 
2033 	sc = device_get_softc(dev);
2034 
2035 	vr_stop(sc);
2036 
2037 	return;
2038 }
2039