xref: /freebsd/sys/dev/vr/if_vr.c (revision 2da199da53835ee2d9228a60717fd2d0fccf9e50)
1 /*
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  *	$Id: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
33  */
34 
35 /*
36  * VIA Rhine fast ethernet PCI NIC driver
37  *
38  * Supports various network adapters based on the VIA Rhine
39  * and Rhine II PCI controllers, including the D-Link DFE530TX.
40  * Datasheets are available at http://www.via.com.tw.
41  *
42  * Written by Bill Paul <wpaul@ctr.columbia.edu>
43  * Electrical Engineering Department
44  * Columbia University, New York City
45  */
46 
47 /*
48  * The VIA Rhine controllers are similar in some respects to the
49  * the DEC tulip chips, except less complicated. The controller
50  * uses an MII bus and an external physical layer interface. The
51  * receiver has a one entry perfect filter and a 64-bit hash table
52  * multicast filter. Transmit and receive descriptors are similar
53  * to the tulip.
54  *
55  * The Rhine has a serious flaw in its transmit DMA mechanism:
56  * transmit buffers must be longword aligned. Unfortunately,
57  * FreeBSD doesn't guarantee that mbufs will be filled in starting
58  * at longword boundaries, so we have to do a buffer copy before
59  * transmission.
60  */
61 
62 #include "bpfilter.h"
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/sockio.h>
67 #include <sys/mbuf.h>
68 #include <sys/malloc.h>
69 #include <sys/kernel.h>
70 #include <sys/socket.h>
71 
72 #include <net/if.h>
73 #include <net/if_arp.h>
74 #include <net/ethernet.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #endif
81 
82 #include <vm/vm.h>              /* for vtophys */
83 #include <vm/pmap.h>            /* for vtophys */
84 #include <machine/clock.h>      /* for DELAY */
85 #include <machine/bus_pio.h>
86 #include <machine/bus_memio.h>
87 #include <machine/bus.h>
88 
89 #include <pci/pcireg.h>
90 #include <pci/pcivar.h>
91 
92 #define VR_USEIOSPACE
93 
94 /* #define VR_BACKGROUND_AUTONEG */
95 
96 #include <pci/if_vrreg.h>
97 
98 #ifndef lint
99 static const char rcsid[] =
100 	"$Id: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $";
101 #endif
102 
103 /*
104  * Various supported device vendors/types and their names.
105  */
106 static struct vr_type vr_devs[] = {
107 	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
108 		"VIA VT3043 Rhine I 10/100BaseTX" },
109 	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
110 		"VIA VT86C100A Rhine II 10/100BaseTX" },
111 	{ 0, 0, NULL }
112 };
113 
114 /*
115  * Various supported PHY vendors/types and their names. Note that
116  * this driver will work with pretty much any MII-compliant PHY,
117  * so failure to positively identify the chip is not a fatal error.
118  */
119 
120 static struct vr_type vr_phys[] = {
121 	{ TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
122 	{ TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
123 	{ NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
124 	{ LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
125 	{ INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
126 	{ SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
127 	{ 0, 0, "<MII-compliant physical interface>" }
128 };
129 
130 static unsigned long vr_count = 0;
131 static const char *vr_probe	__P((pcici_t, pcidi_t));
132 static void vr_attach		__P((pcici_t, int));
133 
134 static int vr_newbuf		__P((struct vr_softc *,
135 						struct vr_chain_onefrag *));
136 static int vr_encap		__P((struct vr_softc *, struct vr_chain *,
137 						struct mbuf * ));
138 
139 static void vr_rxeof		__P((struct vr_softc *));
140 static void vr_rxeoc		__P((struct vr_softc *));
141 static void vr_txeof		__P((struct vr_softc *));
142 static void vr_txeoc		__P((struct vr_softc *));
143 static void vr_intr		__P((void *));
144 static void vr_start		__P((struct ifnet *));
145 static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
146 static void vr_init		__P((void *));
147 static void vr_stop		__P((struct vr_softc *));
148 static void vr_watchdog		__P((struct ifnet *));
149 static void vr_shutdown		__P((int, void *));
150 static int vr_ifmedia_upd	__P((struct ifnet *));
151 static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
152 
153 static void vr_mii_sync		__P((struct vr_softc *));
154 static void vr_mii_send		__P((struct vr_softc *, u_int32_t, int));
155 static int vr_mii_readreg	__P((struct vr_softc *, struct vr_mii_frame *));
156 static int vr_mii_writereg	__P((struct vr_softc *, struct vr_mii_frame *));
157 static u_int16_t vr_phy_readreg	__P((struct vr_softc *, int));
158 static void vr_phy_writereg	__P((struct vr_softc *, u_int16_t, u_int16_t));
159 
160 static void vr_autoneg_xmit	__P((struct vr_softc *));
161 static void vr_autoneg_mii	__P((struct vr_softc *, int, int));
162 static void vr_setmode_mii	__P((struct vr_softc *, int));
163 static void vr_getmode_mii	__P((struct vr_softc *));
164 static void vr_setcfg		__P((struct vr_softc *, u_int16_t));
165 static u_int8_t vr_calchash	__P((u_int8_t *));
166 static void vr_setmulti		__P((struct vr_softc *));
167 static void vr_reset		__P((struct vr_softc *));
168 static int vr_list_rx_init	__P((struct vr_softc *));
169 static int vr_list_tx_init	__P((struct vr_softc *));
170 
171 #define VR_SETBIT(sc, reg, x)				\
172 	CSR_WRITE_1(sc, reg,				\
173 		CSR_READ_1(sc, reg) | x)
174 
175 #define VR_CLRBIT(sc, reg, x)				\
176 	CSR_WRITE_1(sc, reg,				\
177 		CSR_READ_1(sc, reg) & ~x)
178 
179 #define VR_SETBIT16(sc, reg, x)				\
180 	CSR_WRITE_2(sc, reg,				\
181 		CSR_READ_2(sc, reg) | x)
182 
183 #define VR_CLRBIT16(sc, reg, x)				\
184 	CSR_WRITE_2(sc, reg,				\
185 		CSR_READ_2(sc, reg) & ~x)
186 
187 #define VR_SETBIT32(sc, reg, x)				\
188 	CSR_WRITE_4(sc, reg,				\
189 		CSR_READ_4(sc, reg) | x)
190 
191 #define VR_CLRBIT32(sc, reg, x)				\
192 	CSR_WRITE_4(sc, reg,				\
193 		CSR_READ_4(sc, reg) & ~x)
194 
195 #define SIO_SET(x)					\
196 	CSR_WRITE_1(sc, VR_MIICMD,			\
197 		CSR_READ_1(sc, VR_MIICMD) | x)
198 
199 #define SIO_CLR(x)					\
200 	CSR_WRITE_1(sc, VR_MIICMD,			\
201 		CSR_READ_1(sc, VR_MIICMD) & ~x)
202 
203 /*
204  * Sync the PHYs by setting data bit and strobing the clock 32 times.
205  */
206 static void vr_mii_sync(sc)
207 	struct vr_softc		*sc;
208 {
209 	register int		i;
210 
211 	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
212 
213 	for (i = 0; i < 32; i++) {
214 		SIO_SET(VR_MIICMD_CLK);
215 		DELAY(1);
216 		SIO_CLR(VR_MIICMD_CLK);
217 		DELAY(1);
218 	}
219 
220 	return;
221 }
222 
223 /*
224  * Clock a series of bits through the MII.
225  */
226 static void vr_mii_send(sc, bits, cnt)
227 	struct vr_softc		*sc;
228 	u_int32_t		bits;
229 	int			cnt;
230 {
231 	int			i;
232 
233 	SIO_CLR(VR_MIICMD_CLK);
234 
235 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
236                 if (bits & i) {
237 			SIO_SET(VR_MIICMD_DATAIN);
238                 } else {
239 			SIO_CLR(VR_MIICMD_DATAIN);
240                 }
241 		DELAY(1);
242 		SIO_CLR(VR_MIICMD_CLK);
243 		DELAY(1);
244 		SIO_SET(VR_MIICMD_CLK);
245 	}
246 }
247 
248 /*
249  * Read an PHY register through the MII.
250  */
251 static int vr_mii_readreg(sc, frame)
252 	struct vr_softc		*sc;
253 	struct vr_mii_frame	*frame;
254 
255 {
256 	int			i, ack, s;
257 
258 	s = splimp();
259 
260 	/*
261 	 * Set up frame for RX.
262 	 */
263 	frame->mii_stdelim = VR_MII_STARTDELIM;
264 	frame->mii_opcode = VR_MII_READOP;
265 	frame->mii_turnaround = 0;
266 	frame->mii_data = 0;
267 
268 	CSR_WRITE_1(sc, VR_MIICMD, 0);
269 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
270 
271 	/*
272  	 * Turn on data xmit.
273 	 */
274 	SIO_SET(VR_MIICMD_DIR);
275 
276 	vr_mii_sync(sc);
277 
278 	/*
279 	 * Send command/address info.
280 	 */
281 	vr_mii_send(sc, frame->mii_stdelim, 2);
282 	vr_mii_send(sc, frame->mii_opcode, 2);
283 	vr_mii_send(sc, frame->mii_phyaddr, 5);
284 	vr_mii_send(sc, frame->mii_regaddr, 5);
285 
286 	/* Idle bit */
287 	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
288 	DELAY(1);
289 	SIO_SET(VR_MIICMD_CLK);
290 	DELAY(1);
291 
292 	/* Turn off xmit. */
293 	SIO_CLR(VR_MIICMD_DIR);
294 
295 	/* Check for ack */
296 	SIO_CLR(VR_MIICMD_CLK);
297 	DELAY(1);
298 	SIO_SET(VR_MIICMD_CLK);
299 	DELAY(1);
300 	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
301 
302 	/*
303 	 * Now try reading data bits. If the ack failed, we still
304 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
305 	 */
306 	if (ack) {
307 		for(i = 0; i < 16; i++) {
308 			SIO_CLR(VR_MIICMD_CLK);
309 			DELAY(1);
310 			SIO_SET(VR_MIICMD_CLK);
311 			DELAY(1);
312 		}
313 		goto fail;
314 	}
315 
316 	for (i = 0x8000; i; i >>= 1) {
317 		SIO_CLR(VR_MIICMD_CLK);
318 		DELAY(1);
319 		if (!ack) {
320 			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
321 				frame->mii_data |= i;
322 			DELAY(1);
323 		}
324 		SIO_SET(VR_MIICMD_CLK);
325 		DELAY(1);
326 	}
327 
328 fail:
329 
330 	SIO_CLR(VR_MIICMD_CLK);
331 	DELAY(1);
332 	SIO_SET(VR_MIICMD_CLK);
333 	DELAY(1);
334 
335 	splx(s);
336 
337 	if (ack)
338 		return(1);
339 	return(0);
340 }
341 
342 /*
343  * Write to a PHY register through the MII.
344  */
345 static int vr_mii_writereg(sc, frame)
346 	struct vr_softc		*sc;
347 	struct vr_mii_frame	*frame;
348 
349 {
350 	int			s;
351 
352 	s = splimp();
353 
354 	CSR_WRITE_1(sc, VR_MIICMD, 0);
355 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
356 
357 	/*
358 	 * Set up frame for TX.
359 	 */
360 
361 	frame->mii_stdelim = VR_MII_STARTDELIM;
362 	frame->mii_opcode = VR_MII_WRITEOP;
363 	frame->mii_turnaround = VR_MII_TURNAROUND;
364 
365 	/*
366  	 * Turn on data output.
367 	 */
368 	SIO_SET(VR_MIICMD_DIR);
369 
370 	vr_mii_sync(sc);
371 
372 	vr_mii_send(sc, frame->mii_stdelim, 2);
373 	vr_mii_send(sc, frame->mii_opcode, 2);
374 	vr_mii_send(sc, frame->mii_phyaddr, 5);
375 	vr_mii_send(sc, frame->mii_regaddr, 5);
376 	vr_mii_send(sc, frame->mii_turnaround, 2);
377 	vr_mii_send(sc, frame->mii_data, 16);
378 
379 	/* Idle bit. */
380 	SIO_SET(VR_MIICMD_CLK);
381 	DELAY(1);
382 	SIO_CLR(VR_MIICMD_CLK);
383 	DELAY(1);
384 
385 	/*
386 	 * Turn off xmit.
387 	 */
388 	SIO_CLR(VR_MIICMD_DIR);
389 
390 	splx(s);
391 
392 	return(0);
393 }
394 
395 static u_int16_t vr_phy_readreg(sc, reg)
396 	struct vr_softc		*sc;
397 	int			reg;
398 {
399 	struct vr_mii_frame	frame;
400 
401 	bzero((char *)&frame, sizeof(frame));
402 
403 	frame.mii_phyaddr = sc->vr_phy_addr;
404 	frame.mii_regaddr = reg;
405 	vr_mii_readreg(sc, &frame);
406 
407 	return(frame.mii_data);
408 }
409 
410 static void vr_phy_writereg(sc, reg, data)
411 	struct vr_softc		*sc;
412 	u_int16_t		reg;
413 	u_int16_t		data;
414 {
415 	struct vr_mii_frame	frame;
416 
417 	bzero((char *)&frame, sizeof(frame));
418 
419 	frame.mii_phyaddr = sc->vr_phy_addr;
420 	frame.mii_regaddr = reg;
421 	frame.mii_data = data;
422 
423 	vr_mii_writereg(sc, &frame);
424 
425 	return;
426 }
427 
428 /*
429  * Calculate CRC of a multicast group address, return the lower 6 bits.
430  */
431 static u_int8_t vr_calchash(addr)
432 	u_int8_t		*addr;
433 {
434 	u_int32_t		crc, carry;
435 	int			i, j;
436 	u_int8_t		c;
437 
438 	/* Compute CRC for the address value. */
439 	crc = 0xFFFFFFFF; /* initial value */
440 
441 	for (i = 0; i < 6; i++) {
442 		c = *(addr + i);
443 		for (j = 0; j < 8; j++) {
444 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
445 			crc <<= 1;
446 			c >>= 1;
447 			if (carry)
448 				crc = (crc ^ 0x04c11db6) | carry;
449 		}
450 	}
451 
452 	/* return the filter bit position */
453 	return((crc >> 26) & 0x0000003F);
454 }
455 
456 /*
457  * Program the 64-bit multicast hash filter.
458  */
459 static void vr_setmulti(sc)
460 	struct vr_softc		*sc;
461 {
462 	struct ifnet		*ifp;
463 	int			h = 0;
464 	u_int32_t		hashes[2] = { 0, 0 };
465 	struct ifmultiaddr	*ifma;
466 	u_int8_t		rxfilt;
467 	int			mcnt = 0;
468 
469 	ifp = &sc->arpcom.ac_if;
470 
471 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
472 
473 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
474 		rxfilt |= VR_RXCFG_RX_MULTI;
475 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
476 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
477 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
478 		return;
479 	}
480 
481 	/* first, zot all the existing hash bits */
482 	CSR_WRITE_4(sc, VR_MAR0, 0);
483 	CSR_WRITE_4(sc, VR_MAR1, 0);
484 
485 	/* now program new ones */
486 	for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
487 				ifma = ifma->ifma_link.le_next) {
488 		if (ifma->ifma_addr->sa_family != AF_LINK)
489 			continue;
490 		h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
491 		if (h < 32)
492 			hashes[0] |= (1 << h);
493 		else
494 			hashes[1] |= (1 << (h - 32));
495 		mcnt++;
496 	}
497 
498 	if (mcnt)
499 		rxfilt |= VR_RXCFG_RX_MULTI;
500 	else
501 		rxfilt &= ~VR_RXCFG_RX_MULTI;
502 
503 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
504 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
505 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
506 
507 	return;
508 }
509 
510 /*
511  * Initiate an autonegotiation session.
512  */
513 static void vr_autoneg_xmit(sc)
514 	struct vr_softc		*sc;
515 {
516 	u_int16_t		phy_sts;
517 
518 	vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
519 	DELAY(500);
520 	while(vr_phy_readreg(sc, PHY_BMCR)
521 			& PHY_BMCR_RESET);
522 
523 	phy_sts = vr_phy_readreg(sc, PHY_BMCR);
524 	phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
525 	vr_phy_writereg(sc, PHY_BMCR, phy_sts);
526 
527 	return;
528 }
529 
530 /*
531  * Invoke autonegotiation on a PHY.
532  */
533 static void vr_autoneg_mii(sc, flag, verbose)
534 	struct vr_softc		*sc;
535 	int			flag;
536 	int			verbose;
537 {
538 	u_int16_t		phy_sts = 0, media, advert, ability;
539 	struct ifnet		*ifp;
540 	struct ifmedia		*ifm;
541 
542 	ifm = &sc->ifmedia;
543 	ifp = &sc->arpcom.ac_if;
544 
545 	ifm->ifm_media = IFM_ETHER | IFM_AUTO;
546 
547 	/*
548 	 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
549 	 * bit cleared in the status register, but has the 'autoneg enabled'
550 	 * bit set in the control register. This is a contradiction, and
551 	 * I'm not sure how to handle it. If you want to force an attempt
552 	 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
553 	 * and see what happens.
554 	 */
555 #ifndef FORCE_AUTONEG_TFOUR
556 	/*
557 	 * First, see if autoneg is supported. If not, there's
558 	 * no point in continuing.
559 	 */
560 	phy_sts = vr_phy_readreg(sc, PHY_BMSR);
561 	if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
562 		if (verbose)
563 			printf("vr%d: autonegotiation not supported\n",
564 							sc->vr_unit);
565 		ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
566 		return;
567 	}
568 #endif
569 
570 	switch (flag) {
571 	case VR_FLAG_FORCEDELAY:
572 		/*
573 	 	 * XXX Never use this option anywhere but in the probe
574 	 	 * routine: making the kernel stop dead in its tracks
575  		 * for three whole seconds after we've gone multi-user
576 		 * is really bad manners.
577 	 	 */
578 		vr_autoneg_xmit(sc);
579 		DELAY(5000000);
580 		break;
581 	case VR_FLAG_SCHEDDELAY:
582 		/*
583 		 * Wait for the transmitter to go idle before starting
584 		 * an autoneg session, otherwise vr_start() may clobber
585 	 	 * our timeout, and we don't want to allow transmission
586 		 * during an autoneg session since that can screw it up.
587 	 	 */
588 		if (sc->vr_cdata.vr_tx_head != NULL) {
589 			sc->vr_want_auto = 1;
590 			return;
591 		}
592 		vr_autoneg_xmit(sc);
593 		ifp->if_timer = 5;
594 		sc->vr_autoneg = 1;
595 		sc->vr_want_auto = 0;
596 		return;
597 		break;
598 	case VR_FLAG_DELAYTIMEO:
599 		ifp->if_timer = 0;
600 		sc->vr_autoneg = 0;
601 		break;
602 	default:
603 		printf("vr%d: invalid autoneg flag: %d\n", sc->vr_unit, flag);
604 		return;
605 	}
606 
607 	if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
608 		if (verbose)
609 			printf("vr%d: autoneg complete, ", sc->vr_unit);
610 		phy_sts = vr_phy_readreg(sc, PHY_BMSR);
611 	} else {
612 		if (verbose)
613 			printf("vr%d: autoneg not complete, ", sc->vr_unit);
614 	}
615 
616 	media = vr_phy_readreg(sc, PHY_BMCR);
617 
618 	/* Link is good. Report modes and set duplex mode. */
619 	if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
620 		if (verbose)
621 			printf("link status good ");
622 		advert = vr_phy_readreg(sc, PHY_ANAR);
623 		ability = vr_phy_readreg(sc, PHY_LPAR);
624 
625 		if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
626 			ifm->ifm_media = IFM_ETHER|IFM_100_T4;
627 			media |= PHY_BMCR_SPEEDSEL;
628 			media &= ~PHY_BMCR_DUPLEX;
629 			printf("(100baseT4)\n");
630 		} else if (advert & PHY_ANAR_100BTXFULL &&
631 			ability & PHY_ANAR_100BTXFULL) {
632 			ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
633 			media |= PHY_BMCR_SPEEDSEL;
634 			media |= PHY_BMCR_DUPLEX;
635 			printf("(full-duplex, 100Mbps)\n");
636 		} else if (advert & PHY_ANAR_100BTXHALF &&
637 			ability & PHY_ANAR_100BTXHALF) {
638 			ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
639 			media |= PHY_BMCR_SPEEDSEL;
640 			media &= ~PHY_BMCR_DUPLEX;
641 			printf("(half-duplex, 100Mbps)\n");
642 		} else if (advert & PHY_ANAR_10BTFULL &&
643 			ability & PHY_ANAR_10BTFULL) {
644 			ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
645 			media &= ~PHY_BMCR_SPEEDSEL;
646 			media |= PHY_BMCR_DUPLEX;
647 			printf("(full-duplex, 10Mbps)\n");
648 		} else {
649 			ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
650 			media &= ~PHY_BMCR_SPEEDSEL;
651 			media &= ~PHY_BMCR_DUPLEX;
652 			printf("(half-duplex, 10Mbps)\n");
653 		}
654 
655 		media &= ~PHY_BMCR_AUTONEGENBL;
656 
657 		/* Set ASIC's duplex mode to match the PHY. */
658 		vr_setcfg(sc, media);
659 		vr_phy_writereg(sc, PHY_BMCR, media);
660 	} else {
661 		if (verbose)
662 			printf("no carrier\n");
663 	}
664 
665 	vr_init(sc);
666 
667 	if (sc->vr_tx_pend) {
668 		sc->vr_autoneg = 0;
669 		sc->vr_tx_pend = 0;
670 		vr_start(ifp);
671 	}
672 
673 	return;
674 }
675 
676 static void vr_getmode_mii(sc)
677 	struct vr_softc		*sc;
678 {
679 	u_int16_t		bmsr;
680 	struct ifnet		*ifp;
681 
682 	ifp = &sc->arpcom.ac_if;
683 
684 	bmsr = vr_phy_readreg(sc, PHY_BMSR);
685 	if (bootverbose)
686 		printf("vr%d: PHY status word: %x\n", sc->vr_unit, bmsr);
687 
688 	/* fallback */
689 	sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
690 
691 	if (bmsr & PHY_BMSR_10BTHALF) {
692 		if (bootverbose)
693 			printf("vr%d: 10Mbps half-duplex mode supported\n",
694 								sc->vr_unit);
695 		ifmedia_add(&sc->ifmedia,
696 			IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
697 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
698 	}
699 
700 	if (bmsr & PHY_BMSR_10BTFULL) {
701 		if (bootverbose)
702 			printf("vr%d: 10Mbps full-duplex mode supported\n",
703 								sc->vr_unit);
704 		ifmedia_add(&sc->ifmedia,
705 			IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
706 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
707 	}
708 
709 	if (bmsr & PHY_BMSR_100BTXHALF) {
710 		if (bootverbose)
711 			printf("vr%d: 100Mbps half-duplex mode supported\n",
712 								sc->vr_unit);
713 		ifp->if_baudrate = 100000000;
714 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
715 		ifmedia_add(&sc->ifmedia,
716 			IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
717 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
718 	}
719 
720 	if (bmsr & PHY_BMSR_100BTXFULL) {
721 		if (bootverbose)
722 			printf("vr%d: 100Mbps full-duplex mode supported\n",
723 								sc->vr_unit);
724 		ifp->if_baudrate = 100000000;
725 		ifmedia_add(&sc->ifmedia,
726 			IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
727 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
728 	}
729 
730 	/* Some also support 100BaseT4. */
731 	if (bmsr & PHY_BMSR_100BT4) {
732 		if (bootverbose)
733 			printf("vr%d: 100baseT4 mode supported\n", sc->vr_unit);
734 		ifp->if_baudrate = 100000000;
735 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
736 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
737 #ifdef FORCE_AUTONEG_TFOUR
738 		if (bootverbose)
739 			printf("vr%d: forcing on autoneg support for BT4\n",
740 							 sc->vr_unit);
741 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL):
742 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
743 #endif
744 	}
745 
746 	if (bmsr & PHY_BMSR_CANAUTONEG) {
747 		if (bootverbose)
748 			printf("vr%d: autoneg supported\n", sc->vr_unit);
749 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
750 		sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
751 	}
752 
753 	return;
754 }
755 
756 /*
757  * Set speed and duplex mode.
758  */
759 static void vr_setmode_mii(sc, media)
760 	struct vr_softc		*sc;
761 	int			media;
762 {
763 	u_int16_t		bmcr;
764 	struct ifnet		*ifp;
765 
766 	ifp = &sc->arpcom.ac_if;
767 
768 	/*
769 	 * If an autoneg session is in progress, stop it.
770 	 */
771 	if (sc->vr_autoneg) {
772 		printf("vr%d: canceling autoneg session\n", sc->vr_unit);
773 		ifp->if_timer = sc->vr_autoneg = sc->vr_want_auto = 0;
774 		bmcr = vr_phy_readreg(sc, PHY_BMCR);
775 		bmcr &= ~PHY_BMCR_AUTONEGENBL;
776 		vr_phy_writereg(sc, PHY_BMCR, bmcr);
777 	}
778 
779 	printf("vr%d: selecting MII, ", sc->vr_unit);
780 
781 	bmcr = vr_phy_readreg(sc, PHY_BMCR);
782 
783 	bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
784 			PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
785 
786 	if (IFM_SUBTYPE(media) == IFM_100_T4) {
787 		printf("100Mbps/T4, half-duplex\n");
788 		bmcr |= PHY_BMCR_SPEEDSEL;
789 		bmcr &= ~PHY_BMCR_DUPLEX;
790 	}
791 
792 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
793 		printf("100Mbps, ");
794 		bmcr |= PHY_BMCR_SPEEDSEL;
795 	}
796 
797 	if (IFM_SUBTYPE(media) == IFM_10_T) {
798 		printf("10Mbps, ");
799 		bmcr &= ~PHY_BMCR_SPEEDSEL;
800 	}
801 
802 	if ((media & IFM_GMASK) == IFM_FDX) {
803 		printf("full duplex\n");
804 		bmcr |= PHY_BMCR_DUPLEX;
805 	} else {
806 		printf("half duplex\n");
807 		bmcr &= ~PHY_BMCR_DUPLEX;
808 	}
809 
810 	vr_setcfg(sc, bmcr);
811 	vr_phy_writereg(sc, PHY_BMCR, bmcr);
812 
813 	return;
814 }
815 
816 /*
817  * In order to fiddle with the
818  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
819  * first have to put the transmit and/or receive logic in the idle state.
820  */
821 static void vr_setcfg(sc, bmcr)
822 	struct vr_softc		*sc;
823 	u_int16_t		bmcr;
824 {
825 	int			restart = 0;
826 
827 	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
828 		restart = 1;
829 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
830 	}
831 
832 	if (bmcr & PHY_BMCR_DUPLEX)
833 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
834 	else
835 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
836 
837 	if (restart)
838 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
839 
840 	return;
841 }
842 
843 static void vr_reset(sc)
844 	struct vr_softc		*sc;
845 {
846 	register int		i;
847 
848 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
849 
850 	for (i = 0; i < VR_TIMEOUT; i++) {
851 		DELAY(10);
852 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
853 			break;
854 	}
855 	if (i == VR_TIMEOUT)
856 		printf("vr%d: reset never completed!\n", sc->vr_unit);
857 
858 	/* Wait a little while for the chip to get its brains in order. */
859 	DELAY(1000);
860 
861         return;
862 }
863 
864 /*
865  * Probe for a VIA Rhine chip. Check the PCI vendor and device
866  * IDs against our list and return a device name if we find a match.
867  */
868 static const char *
869 vr_probe(config_id, device_id)
870 	pcici_t			config_id;
871 	pcidi_t			device_id;
872 {
873 	struct vr_type		*t;
874 
875 	t = vr_devs;
876 
877 	while(t->vr_name != NULL) {
878 		if ((device_id & 0xFFFF) == t->vr_vid &&
879 		    ((device_id >> 16) & 0xFFFF) == t->vr_did) {
880 			return(t->vr_name);
881 		}
882 		t++;
883 	}
884 
885 	return(NULL);
886 }
887 
888 /*
889  * Attach the interface. Allocate softc structures, do ifmedia
890  * setup and ethernet/BPF attach.
891  */
892 static void
893 vr_attach(config_id, unit)
894 	pcici_t			config_id;
895 	int			unit;
896 {
897 	int			s, i;
898 #ifndef VR_USEIOSPACE
899 	vm_offset_t		pbase, vbase;
900 #endif
901 	u_char			eaddr[ETHER_ADDR_LEN];
902 	u_int32_t		command;
903 	struct vr_softc		*sc;
904 	struct ifnet		*ifp;
905 	int			media = IFM_ETHER|IFM_100_TX|IFM_FDX;
906 	unsigned int		round;
907 	caddr_t			roundptr;
908 	struct vr_type		*p;
909 	u_int16_t		phy_vid, phy_did, phy_sts;
910 
911 	s = splimp();
912 
913 	sc = malloc(sizeof(struct vr_softc), M_DEVBUF, M_NOWAIT);
914 	if (sc == NULL) {
915 		printf("vr%d: no memory for softc struct!\n", unit);
916 		return;
917 	}
918 	bzero(sc, sizeof(struct vr_softc));
919 
920 	/*
921 	 * Handle power management nonsense.
922 	 */
923 
924 	command = pci_conf_read(config_id, VR_PCI_CAPID) & 0x000000FF;
925 	if (command == 0x01) {
926 
927 		command = pci_conf_read(config_id, VR_PCI_PWRMGMTCTRL);
928 		if (command & VR_PSTATE_MASK) {
929 			u_int32_t		iobase, membase, irq;
930 
931 			/* Save important PCI config data. */
932 			iobase = pci_conf_read(config_id, VR_PCI_LOIO);
933 			membase = pci_conf_read(config_id, VR_PCI_LOMEM);
934 			irq = pci_conf_read(config_id, VR_PCI_INTLINE);
935 
936 			/* Reset the power state. */
937 			printf("vr%d: chip is in D%d power mode "
938 			"-- setting to D0\n", unit, command & VR_PSTATE_MASK);
939 			command &= 0xFFFFFFFC;
940 			pci_conf_write(config_id, VR_PCI_PWRMGMTCTRL, command);
941 
942 			/* Restore PCI config data. */
943 			pci_conf_write(config_id, VR_PCI_LOIO, iobase);
944 			pci_conf_write(config_id, VR_PCI_LOMEM, membase);
945 			pci_conf_write(config_id, VR_PCI_INTLINE, irq);
946 		}
947 	}
948 
949 	/*
950 	 * Map control/status registers.
951 	 */
952 	command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
953 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
954 	pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command);
955 	command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
956 
957 #ifdef VR_USEIOSPACE
958 	if (!(command & PCIM_CMD_PORTEN)) {
959 		printf("vr%d: failed to enable I/O ports!\n", unit);
960 		free(sc, M_DEVBUF);
961 		goto fail;
962 	}
963 
964 	if (!pci_map_port(config_id, VR_PCI_LOIO,
965 					(u_int16_t *)(&sc->vr_bhandle))) {
966 		printf ("vr%d: couldn't map ports\n", unit);
967 		goto fail;
968 	}
969 	sc->vr_btag = I386_BUS_SPACE_IO;
970 #else
971 	if (!(command & PCIM_CMD_MEMEN)) {
972 		printf("vr%d: failed to enable memory mapping!\n", unit);
973 		goto fail;
974 	}
975 
976 	if (!pci_map_mem(config_id, VR_PCI_LOMEM, &vbase, &pbase)) {
977 		printf ("vr%d: couldn't map memory\n", unit);
978 		goto fail;
979 	}
980 
981 	sc->vr_bhandle = vbase;
982 	sc->vr_btag = I386_BUS_SPACE_MEM;
983 #endif
984 
985 	/* Allocate interrupt */
986 	if (!pci_map_int(config_id, vr_intr, sc, &net_imask)) {
987 		printf("vr%d: couldn't map interrupt\n", unit);
988 		goto fail;
989 	}
990 
991 	/* Reset the adapter. */
992 	vr_reset(sc);
993 
994 	/*
995 	 * Get station address. The way the Rhine chips work,
996 	 * you're not allowed to directly access the EEPROM once
997 	 * they've been programmed a special way. Consequently,
998 	 * we need to read the node address from the PAR0 and PAR1
999 	 * registers.
1000 	 */
1001 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1002 	DELAY(200);
1003 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1004 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1005 
1006 	/*
1007 	 * A Rhine chip was detected. Inform the world.
1008 	 */
1009 	printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":");
1010 
1011 	sc->vr_unit = unit;
1012 	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1013 
1014 	sc->vr_ldata_ptr = malloc(sizeof(struct vr_list_data) + 8,
1015 				M_DEVBUF, M_NOWAIT);
1016 	if (sc->vr_ldata_ptr == NULL) {
1017 		free(sc, M_DEVBUF);
1018 		printf("vr%d: no memory for list buffers!\n", unit);
1019 		return;
1020 	}
1021 
1022 	sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
1023 	round = (unsigned int)sc->vr_ldata_ptr & 0xF;
1024 	roundptr = sc->vr_ldata_ptr;
1025 	for (i = 0; i < 8; i++) {
1026 		if (round % 8) {
1027 			round++;
1028 			roundptr++;
1029 		} else
1030 			break;
1031 	}
1032 	sc->vr_ldata = (struct vr_list_data *)roundptr;
1033 	bzero(sc->vr_ldata, sizeof(struct vr_list_data));
1034 
1035 	ifp = &sc->arpcom.ac_if;
1036 	ifp->if_softc = sc;
1037 	ifp->if_unit = unit;
1038 	ifp->if_name = "vr";
1039 	ifp->if_mtu = ETHERMTU;
1040 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1041 	ifp->if_ioctl = vr_ioctl;
1042 	ifp->if_output = ether_output;
1043 	ifp->if_start = vr_start;
1044 	ifp->if_watchdog = vr_watchdog;
1045 	ifp->if_init = vr_init;
1046 	ifp->if_baudrate = 10000000;
1047 	ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
1048 
1049 	if (bootverbose)
1050 		printf("vr%d: probing for a PHY\n", sc->vr_unit);
1051 	for (i = VR_PHYADDR_MIN; i < VR_PHYADDR_MAX + 1; i++) {
1052 		if (bootverbose)
1053 			printf("vr%d: checking address: %d\n",
1054 						sc->vr_unit, i);
1055 		sc->vr_phy_addr = i;
1056 		vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
1057 		DELAY(500);
1058 		while(vr_phy_readreg(sc, PHY_BMCR)
1059 				& PHY_BMCR_RESET);
1060 		if ((phy_sts = vr_phy_readreg(sc, PHY_BMSR)))
1061 			break;
1062 	}
1063 	if (phy_sts) {
1064 		phy_vid = vr_phy_readreg(sc, PHY_VENID);
1065 		phy_did = vr_phy_readreg(sc, PHY_DEVID);
1066 		if (bootverbose)
1067 			printf("vr%d: found PHY at address %d, ",
1068 					sc->vr_unit, sc->vr_phy_addr);
1069 		if (bootverbose)
1070 			printf("vendor id: %x device id: %x\n",
1071 				phy_vid, phy_did);
1072 		p = vr_phys;
1073 		while(p->vr_vid) {
1074 			if (phy_vid == p->vr_vid &&
1075 				(phy_did | 0x000F) == p->vr_did) {
1076 				sc->vr_pinfo = p;
1077 				break;
1078 			}
1079 			p++;
1080 		}
1081 		if (sc->vr_pinfo == NULL)
1082 			sc->vr_pinfo = &vr_phys[PHY_UNKNOWN];
1083 		if (bootverbose)
1084 			printf("vr%d: PHY type: %s\n",
1085 				sc->vr_unit, sc->vr_pinfo->vr_name);
1086 	} else {
1087 		printf("vr%d: MII without any phy!\n", sc->vr_unit);
1088 		goto fail;
1089 	}
1090 
1091 	/*
1092 	 * Do ifmedia setup.
1093 	 */
1094 	ifmedia_init(&sc->ifmedia, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1095 
1096 	vr_getmode_mii(sc);
1097 	vr_autoneg_mii(sc, VR_FLAG_FORCEDELAY, 1);
1098 	media = sc->ifmedia.ifm_media;
1099 	vr_stop(sc);
1100 
1101 	ifmedia_set(&sc->ifmedia, media);
1102 
1103 	/*
1104 	 * Call MI attach routines.
1105 	 */
1106 	if_attach(ifp);
1107 	ether_ifattach(ifp);
1108 
1109 #if NBPFILTER > 0
1110 	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1111 #endif
1112 
1113 	at_shutdown(vr_shutdown, sc, SHUTDOWN_POST_SYNC);
1114 
1115 fail:
1116 	splx(s);
1117 	return;
1118 }
1119 
1120 /*
1121  * Initialize the transmit descriptors.
1122  */
1123 static int vr_list_tx_init(sc)
1124 	struct vr_softc		*sc;
1125 {
1126 	struct vr_chain_data	*cd;
1127 	struct vr_list_data	*ld;
1128 	int			i;
1129 
1130 	cd = &sc->vr_cdata;
1131 	ld = sc->vr_ldata;
1132 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1133 		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
1134 		if (i == (VR_TX_LIST_CNT - 1))
1135 			cd->vr_tx_chain[i].vr_nextdesc =
1136 				&cd->vr_tx_chain[0];
1137 		else
1138 			cd->vr_tx_chain[i].vr_nextdesc =
1139 				&cd->vr_tx_chain[i + 1];
1140 	}
1141 
1142 	cd->vr_tx_free = &cd->vr_tx_chain[0];
1143 	cd->vr_tx_tail = cd->vr_tx_head = NULL;
1144 
1145 	return(0);
1146 }
1147 
1148 
1149 /*
1150  * Initialize the RX descriptors and allocate mbufs for them. Note that
1151  * we arrange the descriptors in a closed ring, so that the last descriptor
1152  * points back to the first.
1153  */
1154 static int vr_list_rx_init(sc)
1155 	struct vr_softc		*sc;
1156 {
1157 	struct vr_chain_data	*cd;
1158 	struct vr_list_data	*ld;
1159 	int			i;
1160 
1161 	cd = &sc->vr_cdata;
1162 	ld = sc->vr_ldata;
1163 
1164 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1165 		cd->vr_rx_chain[i].vr_ptr =
1166 			(struct vr_desc *)&ld->vr_rx_list[i];
1167 		if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS)
1168 			return(ENOBUFS);
1169 		if (i == (VR_RX_LIST_CNT - 1)) {
1170 			cd->vr_rx_chain[i].vr_nextdesc =
1171 					&cd->vr_rx_chain[0];
1172 			ld->vr_rx_list[i].vr_next =
1173 					vtophys(&ld->vr_rx_list[0]);
1174 		} else {
1175 			cd->vr_rx_chain[i].vr_nextdesc =
1176 					&cd->vr_rx_chain[i + 1];
1177 			ld->vr_rx_list[i].vr_next =
1178 					vtophys(&ld->vr_rx_list[i + 1]);
1179 		}
1180 	}
1181 
1182 	cd->vr_rx_head = &cd->vr_rx_chain[0];
1183 
1184 	return(0);
1185 }
1186 
1187 /*
1188  * Initialize an RX descriptor and attach an MBUF cluster.
1189  * Note: the length fields are only 11 bits wide, which means the
1190  * largest size we can specify is 2047. This is important because
1191  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1192  * overflow the field and make a mess.
1193  */
1194 static int vr_newbuf(sc, c)
1195 	struct vr_softc		*sc;
1196 	struct vr_chain_onefrag	*c;
1197 {
1198 	struct mbuf		*m_new = NULL;
1199 
1200 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1201 	if (m_new == NULL) {
1202 		printf("vr%d: no memory for rx list -- packet dropped!\n",
1203 								sc->vr_unit);
1204 		return(ENOBUFS);
1205 	}
1206 
1207 	MCLGET(m_new, M_DONTWAIT);
1208 	if (!(m_new->m_flags & M_EXT)) {
1209 		printf("vr%d: no memory for rx list -- packet dropped!\n",
1210 								sc->vr_unit);
1211 		m_freem(m_new);
1212 		return(ENOBUFS);
1213 	}
1214 
1215 	c->vr_mbuf = m_new;
1216 	c->vr_ptr->vr_status = VR_RXSTAT;
1217 	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
1218 	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
1219 
1220 	return(0);
1221 }
1222 
1223 /*
1224  * A frame has been uploaded: pass the resulting mbuf chain up to
1225  * the higher level protocols.
1226  */
1227 static void vr_rxeof(sc)
1228 	struct vr_softc		*sc;
1229 {
1230         struct ether_header	*eh;
1231         struct mbuf		*m;
1232         struct ifnet		*ifp;
1233 	struct vr_chain_onefrag	*cur_rx;
1234 	int			total_len = 0;
1235 	u_int32_t		rxstat;
1236 
1237 	ifp = &sc->arpcom.ac_if;
1238 
1239 	while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
1240 							VR_RXSTAT_OWN)) {
1241 		cur_rx = sc->vr_cdata.vr_rx_head;
1242 		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
1243 
1244 		/*
1245 		 * If an error occurs, update stats, clear the
1246 		 * status word and leave the mbuf cluster in place:
1247 		 * it should simply get re-used next time this descriptor
1248 	 	 * comes up in the ring.
1249 		 */
1250 		if (rxstat & VR_RXSTAT_RXERR) {
1251 			ifp->if_ierrors++;
1252 			printf("vr%d: rx error: ", sc->vr_unit);
1253 			switch(rxstat & 0x000000FF) {
1254 			case VR_RXSTAT_CRCERR:
1255 				printf("crc error\n");
1256 				break;
1257 			case VR_RXSTAT_FRAMEALIGNERR:
1258 				printf("frame alignment error\n");
1259 				break;
1260 			case VR_RXSTAT_FIFOOFLOW:
1261 				printf("FIFO overflow\n");
1262 				break;
1263 			case VR_RXSTAT_GIANT:
1264 				printf("received giant packet\n");
1265 				break;
1266 			case VR_RXSTAT_RUNT:
1267 				printf("received runt packet\n");
1268 				break;
1269 			case VR_RXSTAT_BUSERR:
1270 				printf("system bus error\n");
1271 				break;
1272 			case VR_RXSTAT_BUFFERR:
1273 				printf("rx buffer error\n");
1274 				break;
1275 			default:
1276 				printf("unknown rx error\n");
1277 				break;
1278 			}
1279 			cur_rx->vr_ptr->vr_status = VR_RXSTAT;
1280 			cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
1281 			continue;
1282 		}
1283 
1284 		/* No errors; receive the packet. */
1285 		m = cur_rx->vr_mbuf;
1286 		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1287 
1288 		/*
1289 		 * XXX The VIA Rhine chip includes the CRC with every
1290 		 * received frame, and there's no way to turn this
1291 		 * behavior off (at least, I can't find anything in
1292 	 	 * the manual that explains how to do it) so we have
1293 		 * to trim off the CRC manually.
1294 		 */
1295 		total_len -= ETHER_CRC_LEN;
1296 
1297 		/*
1298 		 * Try to conjure up a new mbuf cluster. If that
1299 		 * fails, it means we have an out of memory condition and
1300 		 * should leave the buffer in place and continue. This will
1301 		 * result in a lost packet, but there's little else we
1302 		 * can do in this situation.
1303 		 */
1304 		if (vr_newbuf(sc, cur_rx) == ENOBUFS) {
1305 			ifp->if_ierrors++;
1306 			cur_rx->vr_ptr->vr_status = VR_RXSTAT;
1307 			cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
1308 			continue;
1309 		}
1310 
1311 		ifp->if_ipackets++;
1312 		eh = mtod(m, struct ether_header *);
1313 		m->m_pkthdr.rcvif = ifp;
1314 		m->m_pkthdr.len = m->m_len = total_len;
1315 #if NBPFILTER > 0
1316 		/*
1317 		 * Handle BPF listeners. Let the BPF user see the packet, but
1318 		 * don't pass it up to the ether_input() layer unless it's
1319 		 * a broadcast packet, multicast packet, matches our ethernet
1320 		 * address or the interface is in promiscuous mode.
1321 		 */
1322 		if (ifp->if_bpf) {
1323 			bpf_mtap(ifp, m);
1324 			if (ifp->if_flags & IFF_PROMISC &&
1325 				(bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1326 						ETHER_ADDR_LEN) &&
1327 					(eh->ether_dhost[0] & 1) == 0)) {
1328 				m_freem(m);
1329 				continue;
1330 			}
1331 		}
1332 #endif
1333 		/* Remove header from mbuf and pass it on. */
1334 		m_adj(m, sizeof(struct ether_header));
1335 		ether_input(ifp, eh, m);
1336 	}
1337 
1338 	return;
1339 }
1340 
1341 void vr_rxeoc(sc)
1342 	struct vr_softc		*sc;
1343 {
1344 
1345 	vr_rxeof(sc);
1346 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1347 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1348 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1349 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1350 
1351 	return;
1352 }
1353 
1354 /*
1355  * A frame was downloaded to the chip. It's safe for us to clean up
1356  * the list buffers.
1357  */
1358 
1359 static void vr_txeof(sc)
1360 	struct vr_softc		*sc;
1361 {
1362 	struct vr_chain		*cur_tx;
1363 	struct ifnet		*ifp;
1364 	register struct mbuf	*n;
1365 
1366 	ifp = &sc->arpcom.ac_if;
1367 
1368 	/* Clear the timeout timer. */
1369 	ifp->if_timer = 0;
1370 
1371 	/* Sanity check. */
1372 	if (sc->vr_cdata.vr_tx_head == NULL)
1373 		return;
1374 
1375 	/*
1376 	 * Go through our tx list and free mbufs for those
1377 	 * frames that have been transmitted.
1378 	 */
1379 	while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1380 		u_int32_t		txstat;
1381 
1382 		cur_tx = sc->vr_cdata.vr_tx_head;
1383 		txstat = cur_tx->vr_ptr->vr_status;
1384 
1385 		if (txstat & VR_TXSTAT_OWN)
1386 			break;
1387 
1388 		if (txstat & VR_TXSTAT_ERRSUM) {
1389 			ifp->if_oerrors++;
1390 			if (txstat & VR_TXSTAT_DEFER)
1391 				ifp->if_collisions++;
1392 			if (txstat & VR_TXSTAT_LATECOLL)
1393 				ifp->if_collisions++;
1394 		}
1395 
1396 		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1397 
1398 		ifp->if_opackets++;
1399         	MFREE(cur_tx->vr_mbuf, n);
1400 		cur_tx->vr_mbuf = NULL;
1401 
1402 		if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1403 			sc->vr_cdata.vr_tx_head = NULL;
1404 			sc->vr_cdata.vr_tx_tail = NULL;
1405 			break;
1406 		}
1407 
1408 		sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1409 	}
1410 
1411 	return;
1412 }
1413 
1414 /*
1415  * TX 'end of channel' interrupt handler.
1416  */
1417 static void vr_txeoc(sc)
1418 	struct vr_softc		*sc;
1419 {
1420 	struct ifnet		*ifp;
1421 
1422 	ifp = &sc->arpcom.ac_if;
1423 
1424 	ifp->if_timer = 0;
1425 
1426 	if (sc->vr_cdata.vr_tx_head == NULL) {
1427 		ifp->if_flags &= ~IFF_OACTIVE;
1428 		sc->vr_cdata.vr_tx_tail = NULL;
1429 		if (sc->vr_want_auto)
1430 			vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1431 	}
1432 
1433 	return;
1434 }
1435 
1436 static void vr_intr(arg)
1437 	void			*arg;
1438 {
1439 	struct vr_softc		*sc;
1440 	struct ifnet		*ifp;
1441 	u_int16_t		status;
1442 
1443 	sc = arg;
1444 	ifp = &sc->arpcom.ac_if;
1445 
1446 	/* Supress unwanted interrupts. */
1447 	if (!(ifp->if_flags & IFF_UP)) {
1448 		vr_stop(sc);
1449 		return;
1450 	}
1451 
1452 	/* Disable interrupts. */
1453 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1454 
1455 	for (;;) {
1456 
1457 		status = CSR_READ_2(sc, VR_ISR);
1458 		if (status)
1459 			CSR_WRITE_2(sc, VR_ISR, status);
1460 
1461 		if ((status & VR_INTRS) == 0)
1462 			break;
1463 
1464 		if (status & VR_ISR_RX_OK)
1465 			vr_rxeof(sc);
1466 
1467 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1468 		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1469 		    (status & VR_ISR_RX_DROPPED)) {
1470 			vr_rxeof(sc);
1471 			vr_rxeoc(sc);
1472 		}
1473 
1474 		if (status & VR_ISR_TX_OK) {
1475 			vr_txeof(sc);
1476 			vr_txeoc(sc);
1477 		}
1478 
1479 		if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){
1480 			ifp->if_oerrors++;
1481 			vr_txeof(sc);
1482 			if (sc->vr_cdata.vr_tx_head != NULL) {
1483 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1484 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1485 			}
1486 		}
1487 
1488 		if (status & VR_ISR_BUSERR) {
1489 			vr_reset(sc);
1490 			vr_init(sc);
1491 		}
1492 	}
1493 
1494 	/* Re-enable interrupts. */
1495 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1496 
1497 	if (ifp->if_snd.ifq_head != NULL) {
1498 		vr_start(ifp);
1499 	}
1500 
1501 	return;
1502 }
1503 
1504 /*
1505  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1506  * pointers to the fragment pointers.
1507  */
1508 static int vr_encap(sc, c, m_head)
1509 	struct vr_softc		*sc;
1510 	struct vr_chain		*c;
1511 	struct mbuf		*m_head;
1512 {
1513 	int			frag = 0;
1514 	struct vr_desc		*f = NULL;
1515 	int			total_len;
1516 	struct mbuf		*m;
1517 
1518 	m = m_head;
1519 	total_len = 0;
1520 
1521 	/*
1522 	 * The VIA Rhine wants packet buffers to be longword
1523 	 * aligned, but very often our mbufs aren't. Rather than
1524 	 * waste time trying to decide when to copy and when not
1525 	 * to copy, just do it all the time.
1526 	 */
1527 	if (m != NULL) {
1528 		struct mbuf		*m_new = NULL;
1529 
1530 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1531 		if (m_new == NULL) {
1532 			printf("vr%d: no memory for tx list", sc->vr_unit);
1533 			return(1);
1534 		}
1535 		if (m_head->m_pkthdr.len > MHLEN) {
1536 			MCLGET(m_new, M_DONTWAIT);
1537 			if (!(m_new->m_flags & M_EXT)) {
1538 				m_freem(m_new);
1539 				printf("vr%d: no memory for tx list",
1540 						sc->vr_unit);
1541 				return(1);
1542 			}
1543 		}
1544 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1545 					mtod(m_new, caddr_t));
1546 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1547 		m_freem(m_head);
1548 		m_head = m_new;
1549 		/*
1550 		 * The Rhine chip doesn't auto-pad, so we have to make
1551 		 * sure to pad short frames out to the minimum frame length
1552 		 * ourselves.
1553 		 */
1554 		if (m_head->m_len < VR_MIN_FRAMELEN) {
1555 			m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1556 			m_new->m_len = m_new->m_pkthdr.len;
1557 		}
1558 		f = c->vr_ptr;
1559 		f->vr_data = vtophys(mtod(m_new, caddr_t));
1560 		f->vr_ctl = total_len = m_new->m_len;
1561 		f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1562 		f->vr_status = 0;
1563 		frag = 1;
1564 	}
1565 
1566 	c->vr_mbuf = m_head;
1567 	c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1568 	c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1569 
1570 	return(0);
1571 }
1572 
1573 /*
1574  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1575  * to the mbuf data regions directly in the transmit lists. We also save a
1576  * copy of the pointers since the transmit list fragment pointers are
1577  * physical addresses.
1578  */
1579 
1580 static void vr_start(ifp)
1581 	struct ifnet		*ifp;
1582 {
1583 	struct vr_softc		*sc;
1584 	struct mbuf		*m_head = NULL;
1585 	struct vr_chain		*cur_tx = NULL, *start_tx;
1586 
1587 	sc = ifp->if_softc;
1588 
1589 	if (sc->vr_autoneg) {
1590 		sc->vr_tx_pend = 1;
1591 		return;
1592 	}
1593 
1594 	/*
1595 	 * Check for an available queue slot. If there are none,
1596 	 * punt.
1597 	 */
1598 	if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1599 		ifp->if_flags |= IFF_OACTIVE;
1600 		return;
1601 	}
1602 
1603 	start_tx = sc->vr_cdata.vr_tx_free;
1604 
1605 	while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1606 		IF_DEQUEUE(&ifp->if_snd, m_head);
1607 		if (m_head == NULL)
1608 			break;
1609 
1610 		/* Pick a descriptor off the free list. */
1611 		cur_tx = sc->vr_cdata.vr_tx_free;
1612 		sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1613 
1614 		/* Pack the data into the descriptor. */
1615 		vr_encap(sc, cur_tx, m_head);
1616 
1617 		if (cur_tx != start_tx)
1618 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1619 
1620 #if NBPFILTER > 0
1621 		/*
1622 		 * If there's a BPF listener, bounce a copy of this frame
1623 		 * to him.
1624 		 */
1625 		if (ifp->if_bpf)
1626 			bpf_mtap(ifp, cur_tx->vr_mbuf);
1627 #endif
1628 		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1629 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1630 	}
1631 
1632 	/*
1633 	 * If there are no frames queued, bail.
1634 	 */
1635 	if (cur_tx == NULL)
1636 		return;
1637 
1638 	sc->vr_cdata.vr_tx_tail = cur_tx;
1639 
1640 	if (sc->vr_cdata.vr_tx_head == NULL)
1641 		sc->vr_cdata.vr_tx_head = start_tx;
1642 
1643 	/*
1644 	 * Set a timeout in case the chip goes out to lunch.
1645 	 */
1646 	ifp->if_timer = 5;
1647 
1648 	return;
1649 }
1650 
1651 static void vr_init(xsc)
1652 	void			*xsc;
1653 {
1654 	struct vr_softc		*sc = xsc;
1655 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1656 	u_int16_t		phy_bmcr = 0;
1657 	int			s;
1658 
1659 	if (sc->vr_autoneg)
1660 		return;
1661 
1662 	s = splimp();
1663 
1664 	if (sc->vr_pinfo != NULL)
1665 		phy_bmcr = vr_phy_readreg(sc, PHY_BMCR);
1666 
1667 	/*
1668 	 * Cancel pending I/O and free all RX/TX buffers.
1669 	 */
1670 	vr_stop(sc);
1671 	vr_reset(sc);
1672 
1673 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1674 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1675 
1676 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1677 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1678 
1679 	/* Init circular RX list. */
1680 	if (vr_list_rx_init(sc) == ENOBUFS) {
1681 		printf("vr%d: initialization failed: no "
1682 			"memory for rx buffers\n", sc->vr_unit);
1683 		vr_stop(sc);
1684 		(void)splx(s);
1685 		return;
1686 	}
1687 
1688 	/*
1689 	 * Init tx descriptors.
1690 	 */
1691 	vr_list_tx_init(sc);
1692 
1693 	/* If we want promiscuous mode, set the allframes bit. */
1694 	if (ifp->if_flags & IFF_PROMISC)
1695 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1696 	else
1697 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1698 
1699 	/* Set capture broadcast bit to capture broadcast frames. */
1700 	if (ifp->if_flags & IFF_BROADCAST)
1701 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1702 	else
1703 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1704 
1705 	/*
1706 	 * Program the multicast filter, if necessary.
1707 	 */
1708 	vr_setmulti(sc);
1709 
1710 	/*
1711 	 * Load the address of the RX list.
1712 	 */
1713 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1714 
1715 	/* Enable receiver and transmitter. */
1716 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1717 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1718 				    VR_CMD_RX_GO);
1719 
1720 	vr_setcfg(sc, vr_phy_readreg(sc, PHY_BMCR));
1721 
1722 	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1723 
1724 	/*
1725 	 * Enable interrupts.
1726 	 */
1727 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1728 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1729 
1730 	/* Restore state of BMCR */
1731 	if (sc->vr_pinfo != NULL)
1732 		vr_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1733 
1734 	ifp->if_flags |= IFF_RUNNING;
1735 	ifp->if_flags &= ~IFF_OACTIVE;
1736 
1737 	(void)splx(s);
1738 
1739 	return;
1740 }
1741 
1742 /*
1743  * Set media options.
1744  */
1745 static int vr_ifmedia_upd(ifp)
1746 	struct ifnet		*ifp;
1747 {
1748 	struct vr_softc		*sc;
1749 	struct ifmedia		*ifm;
1750 
1751 	sc = ifp->if_softc;
1752 	ifm = &sc->ifmedia;
1753 
1754 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1755 		return(EINVAL);
1756 
1757 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1758 		vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1759 	else
1760 		vr_setmode_mii(sc, ifm->ifm_media);
1761 
1762 	return(0);
1763 }
1764 
1765 /*
1766  * Report current media status.
1767  */
1768 static void vr_ifmedia_sts(ifp, ifmr)
1769 	struct ifnet		*ifp;
1770 	struct ifmediareq	*ifmr;
1771 {
1772 	struct vr_softc		*sc;
1773 	u_int16_t		advert = 0, ability = 0;
1774 
1775 	sc = ifp->if_softc;
1776 
1777 	ifmr->ifm_active = IFM_ETHER;
1778 
1779 	if (!(vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1780 		if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1781 			ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1782 		else
1783 			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1784 		if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1785 			ifmr->ifm_active |= IFM_FDX;
1786 		else
1787 			ifmr->ifm_active |= IFM_HDX;
1788 		return;
1789 	}
1790 
1791 	ability = vr_phy_readreg(sc, PHY_LPAR);
1792 	advert = vr_phy_readreg(sc, PHY_ANAR);
1793 	if (advert & PHY_ANAR_100BT4 &&
1794 		ability & PHY_ANAR_100BT4) {
1795 		ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
1796 	} else if (advert & PHY_ANAR_100BTXFULL &&
1797 		ability & PHY_ANAR_100BTXFULL) {
1798 		ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
1799 	} else if (advert & PHY_ANAR_100BTXHALF &&
1800 		ability & PHY_ANAR_100BTXHALF) {
1801 		ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
1802 	} else if (advert & PHY_ANAR_10BTFULL &&
1803 		ability & PHY_ANAR_10BTFULL) {
1804 		ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
1805 	} else if (advert & PHY_ANAR_10BTHALF &&
1806 		ability & PHY_ANAR_10BTHALF) {
1807 		ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
1808 	}
1809 
1810 	return;
1811 }
1812 
1813 static int vr_ioctl(ifp, command, data)
1814 	struct ifnet		*ifp;
1815 	u_long			command;
1816 	caddr_t			data;
1817 {
1818 	struct vr_softc		*sc = ifp->if_softc;
1819 	struct ifreq		*ifr = (struct ifreq *) data;
1820 	int			s, error = 0;
1821 
1822 	s = splimp();
1823 
1824 	switch(command) {
1825 	case SIOCSIFADDR:
1826 	case SIOCGIFADDR:
1827 	case SIOCSIFMTU:
1828 		error = ether_ioctl(ifp, command, data);
1829 		break;
1830 	case SIOCSIFFLAGS:
1831 		if (ifp->if_flags & IFF_UP) {
1832 			vr_init(sc);
1833 		} else {
1834 			if (ifp->if_flags & IFF_RUNNING)
1835 				vr_stop(sc);
1836 		}
1837 		error = 0;
1838 		break;
1839 	case SIOCADDMULTI:
1840 	case SIOCDELMULTI:
1841 		vr_setmulti(sc);
1842 		error = 0;
1843 		break;
1844 	case SIOCGIFMEDIA:
1845 	case SIOCSIFMEDIA:
1846 		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1847 		break;
1848 	default:
1849 		error = EINVAL;
1850 		break;
1851 	}
1852 
1853 	(void)splx(s);
1854 
1855 	return(error);
1856 }
1857 
1858 static void vr_watchdog(ifp)
1859 	struct ifnet		*ifp;
1860 {
1861 	struct vr_softc		*sc;
1862 
1863 	sc = ifp->if_softc;
1864 
1865 	if (sc->vr_autoneg) {
1866 		vr_autoneg_mii(sc, VR_FLAG_DELAYTIMEO, 1);
1867 		return;
1868 	}
1869 
1870 	ifp->if_oerrors++;
1871 	printf("vr%d: watchdog timeout\n", sc->vr_unit);
1872 
1873 	if (!(vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1874 		printf("vr%d: no carrier - transceiver cable problem?\n",
1875 								sc->vr_unit);
1876 
1877 	vr_stop(sc);
1878 	vr_reset(sc);
1879 	vr_init(sc);
1880 
1881 	if (ifp->if_snd.ifq_head != NULL)
1882 		vr_start(ifp);
1883 
1884 	return;
1885 }
1886 
1887 /*
1888  * Stop the adapter and free any mbufs allocated to the
1889  * RX and TX lists.
1890  */
1891 static void vr_stop(sc)
1892 	struct vr_softc		*sc;
1893 {
1894 	register int		i;
1895 	struct ifnet		*ifp;
1896 
1897 	ifp = &sc->arpcom.ac_if;
1898 	ifp->if_timer = 0;
1899 
1900 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1901 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1902 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1903 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1904 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1905 
1906 	/*
1907 	 * Free data in the RX lists.
1908 	 */
1909 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1910 		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1911 			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1912 			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1913 		}
1914 	}
1915 	bzero((char *)&sc->vr_ldata->vr_rx_list,
1916 		sizeof(sc->vr_ldata->vr_rx_list));
1917 
1918 	/*
1919 	 * Free the TX list buffers.
1920 	 */
1921 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1922 		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1923 			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1924 			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1925 		}
1926 	}
1927 
1928 	bzero((char *)&sc->vr_ldata->vr_tx_list,
1929 		sizeof(sc->vr_ldata->vr_tx_list));
1930 
1931 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1932 
1933 	return;
1934 }
1935 
1936 /*
1937  * Stop all chip I/O so that the kernel's probe routines don't
1938  * get confused by errant DMAs when rebooting.
1939  */
1940 static void vr_shutdown(howto, arg)
1941 	int			howto;
1942 	void			*arg;
1943 {
1944 	struct vr_softc		*sc = (struct vr_softc *)arg;
1945 
1946 	vr_stop(sc);
1947 
1948 	return;
1949 }
1950 
1951 static struct pci_device vr_device = {
1952 	"vr",
1953 	vr_probe,
1954 	vr_attach,
1955 	&vr_count,
1956 	NULL
1957 };
1958 DATA_SET(pcidevice_set, vr_device);
1959