xref: /freebsd/sys/dev/vr/if_vr.c (revision 23f282aa31e9b6fceacd449020e936e98d6f2298)
1 /*
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * VIA Rhine fast ethernet PCI NIC driver
37  *
38  * Supports various network adapters based on the VIA Rhine
39  * and Rhine II PCI controllers, including the D-Link DFE530TX.
40  * Datasheets are available at http://www.via.com.tw.
41  *
42  * Written by Bill Paul <wpaul@ctr.columbia.edu>
43  * Electrical Engineering Department
44  * Columbia University, New York City
45  */
46 
47 /*
48  * The VIA Rhine controllers are similar in some respects to the
49  * the DEC tulip chips, except less complicated. The controller
50  * uses an MII bus and an external physical layer interface. The
51  * receiver has a one entry perfect filter and a 64-bit hash table
52  * multicast filter. Transmit and receive descriptors are similar
53  * to the tulip.
54  *
55  * The Rhine has a serious flaw in its transmit DMA mechanism:
56  * transmit buffers must be longword aligned. Unfortunately,
57  * FreeBSD doesn't guarantee that mbufs will be filled in starting
58  * at longword boundaries, so we have to do a buffer copy before
59  * transmission.
60  */
61 
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/sockio.h>
65 #include <sys/mbuf.h>
66 #include <sys/malloc.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
69 
70 #include <net/if.h>
71 #include <net/if_arp.h>
72 #include <net/ethernet.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 
76 #include <net/bpf.h>
77 
78 #include "opt_bdg.h"
79 #ifdef BRIDGE
80 #include <net/bridge.h>
81 #endif /* BRIDGE */
82 
83 #include <vm/vm.h>              /* for vtophys */
84 #include <vm/pmap.h>            /* for vtophys */
85 #include <machine/clock.h>      /* for DELAY */
86 #include <machine/bus_pio.h>
87 #include <machine/bus_memio.h>
88 #include <machine/bus.h>
89 #include <machine/resource.h>
90 #include <sys/bus.h>
91 #include <sys/rman.h>
92 
93 #include <dev/mii/mii.h>
94 #include <dev/mii/miivar.h>
95 
96 #include <pci/pcireg.h>
97 #include <pci/pcivar.h>
98 
99 #define VR_USEIOSPACE
100 
101 #include <pci/if_vrreg.h>
102 
103 MODULE_DEPEND(vr, miibus, 1, 1, 1);
104 
105 /* "controller miibus0" required.  See GENERIC if you get errors here. */
106 #include "miibus_if.h"
107 
108 #ifndef lint
109 static const char rcsid[] =
110   "$FreeBSD$";
111 #endif
112 
113 /*
114  * Various supported device vendors/types and their names.
115  */
116 static struct vr_type vr_devs[] = {
117 	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
118 		"VIA VT3043 Rhine I 10/100BaseTX" },
119 	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
120 		"VIA VT86C100A Rhine II 10/100BaseTX" },
121 	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
122 		"Delta Electronics Rhine II 10/100BaseTX" },
123 	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
124 		"Addtron Technology Rhine II 10/100BaseTX" },
125 	{ 0, 0, NULL }
126 };
127 
128 static int vr_probe		__P((device_t));
129 static int vr_attach		__P((device_t));
130 static int vr_detach		__P((device_t));
131 
132 static int vr_newbuf		__P((struct vr_softc *,
133 					struct vr_chain_onefrag *,
134 					struct mbuf *));
135 static int vr_encap		__P((struct vr_softc *, struct vr_chain *,
136 						struct mbuf * ));
137 
138 static void vr_rxeof		__P((struct vr_softc *));
139 static void vr_rxeoc		__P((struct vr_softc *));
140 static void vr_txeof		__P((struct vr_softc *));
141 static void vr_txeoc		__P((struct vr_softc *));
142 static void vr_tick		__P((void *));
143 static void vr_intr		__P((void *));
144 static void vr_start		__P((struct ifnet *));
145 static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
146 static void vr_init		__P((void *));
147 static void vr_stop		__P((struct vr_softc *));
148 static void vr_watchdog		__P((struct ifnet *));
149 static void vr_shutdown		__P((device_t));
150 static int vr_ifmedia_upd	__P((struct ifnet *));
151 static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
152 
153 static void vr_mii_sync		__P((struct vr_softc *));
154 static void vr_mii_send		__P((struct vr_softc *, u_int32_t, int));
155 static int vr_mii_readreg	__P((struct vr_softc *, struct vr_mii_frame *));
156 static int vr_mii_writereg	__P((struct vr_softc *, struct vr_mii_frame *));
157 static int vr_miibus_readreg	__P((device_t, int, int));
158 static int vr_miibus_writereg	__P((device_t, int, int, int));
159 static void vr_miibus_statchg	__P((device_t));
160 
161 static void vr_setcfg		__P((struct vr_softc *, int));
162 static u_int8_t vr_calchash	__P((u_int8_t *));
163 static void vr_setmulti		__P((struct vr_softc *));
164 static void vr_reset		__P((struct vr_softc *));
165 static int vr_list_rx_init	__P((struct vr_softc *));
166 static int vr_list_tx_init	__P((struct vr_softc *));
167 
168 #ifdef VR_USEIOSPACE
169 #define VR_RES			SYS_RES_IOPORT
170 #define VR_RID			VR_PCI_LOIO
171 #else
172 #define VR_RES			SYS_RES_MEMORY
173 #define VR_RID			VR_PCI_LOMEM
174 #endif
175 
176 static device_method_t vr_methods[] = {
177 	/* Device interface */
178 	DEVMETHOD(device_probe,		vr_probe),
179 	DEVMETHOD(device_attach,	vr_attach),
180 	DEVMETHOD(device_detach, 	vr_detach),
181 	DEVMETHOD(device_shutdown,	vr_shutdown),
182 
183 	/* bus interface */
184 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
185 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
186 
187 	/* MII interface */
188 	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
189 	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
190 	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
191 
192 	{ 0, 0 }
193 };
194 
195 static driver_t vr_driver = {
196 	"vr",
197 	vr_methods,
198 	sizeof(struct vr_softc)
199 };
200 
201 static devclass_t vr_devclass;
202 
203 DRIVER_MODULE(if_vr, pci, vr_driver, vr_devclass, 0, 0);
204 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
205 
206 #define VR_SETBIT(sc, reg, x)				\
207 	CSR_WRITE_1(sc, reg,				\
208 		CSR_READ_1(sc, reg) | x)
209 
210 #define VR_CLRBIT(sc, reg, x)				\
211 	CSR_WRITE_1(sc, reg,				\
212 		CSR_READ_1(sc, reg) & ~x)
213 
214 #define VR_SETBIT16(sc, reg, x)				\
215 	CSR_WRITE_2(sc, reg,				\
216 		CSR_READ_2(sc, reg) | x)
217 
218 #define VR_CLRBIT16(sc, reg, x)				\
219 	CSR_WRITE_2(sc, reg,				\
220 		CSR_READ_2(sc, reg) & ~x)
221 
222 #define VR_SETBIT32(sc, reg, x)				\
223 	CSR_WRITE_4(sc, reg,				\
224 		CSR_READ_4(sc, reg) | x)
225 
226 #define VR_CLRBIT32(sc, reg, x)				\
227 	CSR_WRITE_4(sc, reg,				\
228 		CSR_READ_4(sc, reg) & ~x)
229 
230 #define SIO_SET(x)					\
231 	CSR_WRITE_1(sc, VR_MIICMD,			\
232 		CSR_READ_1(sc, VR_MIICMD) | x)
233 
234 #define SIO_CLR(x)					\
235 	CSR_WRITE_1(sc, VR_MIICMD,			\
236 		CSR_READ_1(sc, VR_MIICMD) & ~x)
237 
238 /*
239  * Sync the PHYs by setting data bit and strobing the clock 32 times.
240  */
241 static void vr_mii_sync(sc)
242 	struct vr_softc		*sc;
243 {
244 	register int		i;
245 
246 	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
247 
248 	for (i = 0; i < 32; i++) {
249 		SIO_SET(VR_MIICMD_CLK);
250 		DELAY(1);
251 		SIO_CLR(VR_MIICMD_CLK);
252 		DELAY(1);
253 	}
254 
255 	return;
256 }
257 
258 /*
259  * Clock a series of bits through the MII.
260  */
261 static void vr_mii_send(sc, bits, cnt)
262 	struct vr_softc		*sc;
263 	u_int32_t		bits;
264 	int			cnt;
265 {
266 	int			i;
267 
268 	SIO_CLR(VR_MIICMD_CLK);
269 
270 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
271                 if (bits & i) {
272 			SIO_SET(VR_MIICMD_DATAIN);
273                 } else {
274 			SIO_CLR(VR_MIICMD_DATAIN);
275                 }
276 		DELAY(1);
277 		SIO_CLR(VR_MIICMD_CLK);
278 		DELAY(1);
279 		SIO_SET(VR_MIICMD_CLK);
280 	}
281 }
282 
283 /*
284  * Read an PHY register through the MII.
285  */
286 static int vr_mii_readreg(sc, frame)
287 	struct vr_softc		*sc;
288 	struct vr_mii_frame	*frame;
289 
290 {
291 	int			i, ack, s;
292 
293 	s = splimp();
294 
295 	/*
296 	 * Set up frame for RX.
297 	 */
298 	frame->mii_stdelim = VR_MII_STARTDELIM;
299 	frame->mii_opcode = VR_MII_READOP;
300 	frame->mii_turnaround = 0;
301 	frame->mii_data = 0;
302 
303 	CSR_WRITE_1(sc, VR_MIICMD, 0);
304 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
305 
306 	/*
307  	 * Turn on data xmit.
308 	 */
309 	SIO_SET(VR_MIICMD_DIR);
310 
311 	vr_mii_sync(sc);
312 
313 	/*
314 	 * Send command/address info.
315 	 */
316 	vr_mii_send(sc, frame->mii_stdelim, 2);
317 	vr_mii_send(sc, frame->mii_opcode, 2);
318 	vr_mii_send(sc, frame->mii_phyaddr, 5);
319 	vr_mii_send(sc, frame->mii_regaddr, 5);
320 
321 	/* Idle bit */
322 	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
323 	DELAY(1);
324 	SIO_SET(VR_MIICMD_CLK);
325 	DELAY(1);
326 
327 	/* Turn off xmit. */
328 	SIO_CLR(VR_MIICMD_DIR);
329 
330 	/* Check for ack */
331 	SIO_CLR(VR_MIICMD_CLK);
332 	DELAY(1);
333 	SIO_SET(VR_MIICMD_CLK);
334 	DELAY(1);
335 	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
336 
337 	/*
338 	 * Now try reading data bits. If the ack failed, we still
339 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
340 	 */
341 	if (ack) {
342 		for(i = 0; i < 16; i++) {
343 			SIO_CLR(VR_MIICMD_CLK);
344 			DELAY(1);
345 			SIO_SET(VR_MIICMD_CLK);
346 			DELAY(1);
347 		}
348 		goto fail;
349 	}
350 
351 	for (i = 0x8000; i; i >>= 1) {
352 		SIO_CLR(VR_MIICMD_CLK);
353 		DELAY(1);
354 		if (!ack) {
355 			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
356 				frame->mii_data |= i;
357 			DELAY(1);
358 		}
359 		SIO_SET(VR_MIICMD_CLK);
360 		DELAY(1);
361 	}
362 
363 fail:
364 
365 	SIO_CLR(VR_MIICMD_CLK);
366 	DELAY(1);
367 	SIO_SET(VR_MIICMD_CLK);
368 	DELAY(1);
369 
370 	splx(s);
371 
372 	if (ack)
373 		return(1);
374 	return(0);
375 }
376 
377 /*
378  * Write to a PHY register through the MII.
379  */
380 static int vr_mii_writereg(sc, frame)
381 	struct vr_softc		*sc;
382 	struct vr_mii_frame	*frame;
383 
384 {
385 	int			s;
386 
387 	s = splimp();
388 
389 	CSR_WRITE_1(sc, VR_MIICMD, 0);
390 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
391 
392 	/*
393 	 * Set up frame for TX.
394 	 */
395 
396 	frame->mii_stdelim = VR_MII_STARTDELIM;
397 	frame->mii_opcode = VR_MII_WRITEOP;
398 	frame->mii_turnaround = VR_MII_TURNAROUND;
399 
400 	/*
401  	 * Turn on data output.
402 	 */
403 	SIO_SET(VR_MIICMD_DIR);
404 
405 	vr_mii_sync(sc);
406 
407 	vr_mii_send(sc, frame->mii_stdelim, 2);
408 	vr_mii_send(sc, frame->mii_opcode, 2);
409 	vr_mii_send(sc, frame->mii_phyaddr, 5);
410 	vr_mii_send(sc, frame->mii_regaddr, 5);
411 	vr_mii_send(sc, frame->mii_turnaround, 2);
412 	vr_mii_send(sc, frame->mii_data, 16);
413 
414 	/* Idle bit. */
415 	SIO_SET(VR_MIICMD_CLK);
416 	DELAY(1);
417 	SIO_CLR(VR_MIICMD_CLK);
418 	DELAY(1);
419 
420 	/*
421 	 * Turn off xmit.
422 	 */
423 	SIO_CLR(VR_MIICMD_DIR);
424 
425 	splx(s);
426 
427 	return(0);
428 }
429 
430 static int vr_miibus_readreg(dev, phy, reg)
431 	device_t		dev;
432 	int			phy, reg;
433 {
434 	struct vr_softc		*sc;
435 	struct vr_mii_frame	frame;
436 
437 	sc = device_get_softc(dev);
438 	bzero((char *)&frame, sizeof(frame));
439 
440 	frame.mii_phyaddr = phy;
441 	frame.mii_regaddr = reg;
442 	vr_mii_readreg(sc, &frame);
443 
444 	return(frame.mii_data);
445 }
446 
447 static int vr_miibus_writereg(dev, phy, reg, data)
448 	device_t		dev;
449 	u_int16_t		phy, reg, data;
450 {
451 	struct vr_softc		*sc;
452 	struct vr_mii_frame	frame;
453 
454 	sc = device_get_softc(dev);
455 	bzero((char *)&frame, sizeof(frame));
456 
457 	frame.mii_phyaddr = phy;
458 	frame.mii_regaddr = reg;
459 	frame.mii_data = data;
460 
461 	vr_mii_writereg(sc, &frame);
462 
463 	return(0);
464 }
465 
466 static void vr_miibus_statchg(dev)
467 	device_t		dev;
468 {
469 	struct vr_softc		*sc;
470 	struct mii_data		*mii;
471 
472 	sc = device_get_softc(dev);
473 	mii = device_get_softc(sc->vr_miibus);
474 	vr_setcfg(sc, mii->mii_media_active);
475 
476 	return;
477 }
478 
479 /*
480  * Calculate CRC of a multicast group address, return the lower 6 bits.
481  */
482 static u_int8_t vr_calchash(addr)
483 	u_int8_t		*addr;
484 {
485 	u_int32_t		crc, carry;
486 	int			i, j;
487 	u_int8_t		c;
488 
489 	/* Compute CRC for the address value. */
490 	crc = 0xFFFFFFFF; /* initial value */
491 
492 	for (i = 0; i < 6; i++) {
493 		c = *(addr + i);
494 		for (j = 0; j < 8; j++) {
495 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
496 			crc <<= 1;
497 			c >>= 1;
498 			if (carry)
499 				crc = (crc ^ 0x04c11db6) | carry;
500 		}
501 	}
502 
503 	/* return the filter bit position */
504 	return((crc >> 26) & 0x0000003F);
505 }
506 
507 /*
508  * Program the 64-bit multicast hash filter.
509  */
510 static void vr_setmulti(sc)
511 	struct vr_softc		*sc;
512 {
513 	struct ifnet		*ifp;
514 	int			h = 0;
515 	u_int32_t		hashes[2] = { 0, 0 };
516 	struct ifmultiaddr	*ifma;
517 	u_int8_t		rxfilt;
518 	int			mcnt = 0;
519 
520 	ifp = &sc->arpcom.ac_if;
521 
522 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
523 
524 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
525 		rxfilt |= VR_RXCFG_RX_MULTI;
526 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
527 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
528 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
529 		return;
530 	}
531 
532 	/* first, zot all the existing hash bits */
533 	CSR_WRITE_4(sc, VR_MAR0, 0);
534 	CSR_WRITE_4(sc, VR_MAR1, 0);
535 
536 	/* now program new ones */
537 	for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
538 				ifma = ifma->ifma_link.le_next) {
539 		if (ifma->ifma_addr->sa_family != AF_LINK)
540 			continue;
541 		h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
542 		if (h < 32)
543 			hashes[0] |= (1 << h);
544 		else
545 			hashes[1] |= (1 << (h - 32));
546 		mcnt++;
547 	}
548 
549 	if (mcnt)
550 		rxfilt |= VR_RXCFG_RX_MULTI;
551 	else
552 		rxfilt &= ~VR_RXCFG_RX_MULTI;
553 
554 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
555 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
556 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
557 
558 	return;
559 }
560 
561 /*
562  * In order to fiddle with the
563  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
564  * first have to put the transmit and/or receive logic in the idle state.
565  */
566 static void vr_setcfg(sc, media)
567 	struct vr_softc		*sc;
568 	int			media;
569 {
570 	int			restart = 0;
571 
572 	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
573 		restart = 1;
574 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
575 	}
576 
577 	if ((media & IFM_GMASK) == IFM_FDX)
578 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
579 	else
580 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
581 
582 	if (restart)
583 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
584 
585 	return;
586 }
587 
588 static void vr_reset(sc)
589 	struct vr_softc		*sc;
590 {
591 	register int		i;
592 
593 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
594 
595 	for (i = 0; i < VR_TIMEOUT; i++) {
596 		DELAY(10);
597 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
598 			break;
599 	}
600 	if (i == VR_TIMEOUT)
601 		printf("vr%d: reset never completed!\n", sc->vr_unit);
602 
603 	/* Wait a little while for the chip to get its brains in order. */
604 	DELAY(1000);
605 
606         return;
607 }
608 
609 /*
610  * Probe for a VIA Rhine chip. Check the PCI vendor and device
611  * IDs against our list and return a device name if we find a match.
612  */
613 static int vr_probe(dev)
614 	device_t		dev;
615 {
616 	struct vr_type		*t;
617 
618 	t = vr_devs;
619 
620 	while(t->vr_name != NULL) {
621 		if ((pci_get_vendor(dev) == t->vr_vid) &&
622 		    (pci_get_device(dev) == t->vr_did)) {
623 			device_set_desc(dev, t->vr_name);
624 			return(0);
625 		}
626 		t++;
627 	}
628 
629 	return(ENXIO);
630 }
631 
632 /*
633  * Attach the interface. Allocate softc structures, do ifmedia
634  * setup and ethernet/BPF attach.
635  */
636 static int vr_attach(dev)
637 	device_t		dev;
638 {
639 	int			i, s;
640 	u_char			eaddr[ETHER_ADDR_LEN];
641 	u_int32_t		command;
642 	struct vr_softc		*sc;
643 	struct ifnet		*ifp;
644 	int			unit, error = 0, rid;
645 
646 	s = splimp();
647 
648 	sc = device_get_softc(dev);
649 	unit = device_get_unit(dev);
650 	bzero(sc, sizeof(struct vr_softc *));
651 
652 	/*
653 	 * Handle power management nonsense.
654 	 */
655 
656 	command = pci_read_config(dev, VR_PCI_CAPID, 4) & 0x000000FF;
657 	if (command == 0x01) {
658 
659 		command = pci_read_config(dev, VR_PCI_PWRMGMTCTRL, 4);
660 		if (command & VR_PSTATE_MASK) {
661 			u_int32_t		iobase, membase, irq;
662 
663 			/* Save important PCI config data. */
664 			iobase = pci_read_config(dev, VR_PCI_LOIO, 4);
665 			membase = pci_read_config(dev, VR_PCI_LOMEM, 4);
666 			irq = pci_read_config(dev, VR_PCI_INTLINE, 4);
667 
668 			/* Reset the power state. */
669 			printf("vr%d: chip is in D%d power mode "
670 			"-- setting to D0\n", unit, command & VR_PSTATE_MASK);
671 			command &= 0xFFFFFFFC;
672 			pci_write_config(dev, VR_PCI_PWRMGMTCTRL, command, 4);
673 
674 			/* Restore PCI config data. */
675 			pci_write_config(dev, VR_PCI_LOIO, iobase, 4);
676 			pci_write_config(dev, VR_PCI_LOMEM, membase, 4);
677 			pci_write_config(dev, VR_PCI_INTLINE, irq, 4);
678 		}
679 	}
680 
681 	/*
682 	 * Map control/status registers.
683 	 */
684 	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
685 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
686 	pci_write_config(dev, PCI_COMMAND_STATUS_REG, command, 4);
687 	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
688 
689 #ifdef VR_USEIOSPACE
690 	if (!(command & PCIM_CMD_PORTEN)) {
691 		printf("vr%d: failed to enable I/O ports!\n", unit);
692 		free(sc, M_DEVBUF);
693 		goto fail;
694 	}
695 #else
696 	if (!(command & PCIM_CMD_MEMEN)) {
697 		printf("vr%d: failed to enable memory mapping!\n", unit);
698 		goto fail;
699 	}
700 #endif
701 
702 	rid = VR_RID;
703 	sc->vr_res = bus_alloc_resource(dev, VR_RES, &rid,
704 	    0, ~0, 1, RF_ACTIVE);
705 
706 	if (sc->vr_res == NULL) {
707 		printf("vr%d: couldn't map ports/memory\n", unit);
708 		error = ENXIO;
709 		goto fail;
710 	}
711 
712 	sc->vr_btag = rman_get_bustag(sc->vr_res);
713 	sc->vr_bhandle = rman_get_bushandle(sc->vr_res);
714 
715 	/* Allocate interrupt */
716 	rid = 0;
717 	sc->vr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
718 	    RF_SHAREABLE | RF_ACTIVE);
719 
720 	if (sc->vr_irq == NULL) {
721 		printf("vr%d: couldn't map interrupt\n", unit);
722 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
723 		error = ENXIO;
724 		goto fail;
725 	}
726 
727 	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET,
728 	    vr_intr, sc, &sc->vr_intrhand);
729 
730 	if (error) {
731 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
732 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
733 		printf("vr%d: couldn't set up irq\n", unit);
734 		goto fail;
735 	}
736 
737 	/* Reset the adapter. */
738 	vr_reset(sc);
739 
740 	/*
741 	 * Get station address. The way the Rhine chips work,
742 	 * you're not allowed to directly access the EEPROM once
743 	 * they've been programmed a special way. Consequently,
744 	 * we need to read the node address from the PAR0 and PAR1
745 	 * registers.
746 	 */
747 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
748 	DELAY(200);
749 	for (i = 0; i < ETHER_ADDR_LEN; i++)
750 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
751 
752 	/*
753 	 * A Rhine chip was detected. Inform the world.
754 	 */
755 	printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":");
756 
757 	sc->vr_unit = unit;
758 	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
759 
760 	sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
761 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
762 
763 	if (sc->vr_ldata == NULL) {
764 		printf("vr%d: no memory for list buffers!\n", unit);
765 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
766 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
767 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
768 		error = ENXIO;
769 		goto fail;
770 	}
771 
772 	bzero(sc->vr_ldata, sizeof(struct vr_list_data));
773 
774 	ifp = &sc->arpcom.ac_if;
775 	ifp->if_softc = sc;
776 	ifp->if_unit = unit;
777 	ifp->if_name = "vr";
778 	ifp->if_mtu = ETHERMTU;
779 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
780 	ifp->if_ioctl = vr_ioctl;
781 	ifp->if_output = ether_output;
782 	ifp->if_start = vr_start;
783 	ifp->if_watchdog = vr_watchdog;
784 	ifp->if_init = vr_init;
785 	ifp->if_baudrate = 10000000;
786 	ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
787 
788 	/*
789 	 * Do MII setup.
790 	 */
791 	if (mii_phy_probe(dev, &sc->vr_miibus,
792 	    vr_ifmedia_upd, vr_ifmedia_sts)) {
793 		printf("vr%d: MII without any phy!\n", sc->vr_unit);
794 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
795 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
796 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
797 		contigfree(sc->vr_ldata,
798 		    sizeof(struct vr_list_data), M_DEVBUF);
799 		error = ENXIO;
800 		goto fail;
801 	}
802 
803 	callout_handle_init(&sc->vr_stat_ch);
804 
805 	/*
806 	 * Call MI attach routines.
807 	 */
808 	if_attach(ifp);
809 	ether_ifattach(ifp);
810 
811 	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
812 
813 fail:
814 	splx(s);
815 	return(error);
816 }
817 
818 static int vr_detach(dev)
819 	device_t		dev;
820 {
821 	struct vr_softc		*sc;
822 	struct ifnet		*ifp;
823 	int			s;
824 
825 	s = splimp();
826 
827 	sc = device_get_softc(dev);
828 	ifp = &sc->arpcom.ac_if;
829 
830 	vr_stop(sc);
831 	if_detach(ifp);
832 
833 	bus_generic_detach(dev);
834 	device_delete_child(dev, sc->vr_miibus);
835 
836 	bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
837 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
838 	bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
839 
840 	contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
841 
842 	splx(s);
843 
844 	return(0);
845 }
846 
847 /*
848  * Initialize the transmit descriptors.
849  */
850 static int vr_list_tx_init(sc)
851 	struct vr_softc		*sc;
852 {
853 	struct vr_chain_data	*cd;
854 	struct vr_list_data	*ld;
855 	int			i;
856 
857 	cd = &sc->vr_cdata;
858 	ld = sc->vr_ldata;
859 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
860 		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
861 		if (i == (VR_TX_LIST_CNT - 1))
862 			cd->vr_tx_chain[i].vr_nextdesc =
863 				&cd->vr_tx_chain[0];
864 		else
865 			cd->vr_tx_chain[i].vr_nextdesc =
866 				&cd->vr_tx_chain[i + 1];
867 	}
868 
869 	cd->vr_tx_free = &cd->vr_tx_chain[0];
870 	cd->vr_tx_tail = cd->vr_tx_head = NULL;
871 
872 	return(0);
873 }
874 
875 
876 /*
877  * Initialize the RX descriptors and allocate mbufs for them. Note that
878  * we arrange the descriptors in a closed ring, so that the last descriptor
879  * points back to the first.
880  */
881 static int vr_list_rx_init(sc)
882 	struct vr_softc		*sc;
883 {
884 	struct vr_chain_data	*cd;
885 	struct vr_list_data	*ld;
886 	int			i;
887 
888 	cd = &sc->vr_cdata;
889 	ld = sc->vr_ldata;
890 
891 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
892 		cd->vr_rx_chain[i].vr_ptr =
893 			(struct vr_desc *)&ld->vr_rx_list[i];
894 		if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS)
895 			return(ENOBUFS);
896 		if (i == (VR_RX_LIST_CNT - 1)) {
897 			cd->vr_rx_chain[i].vr_nextdesc =
898 					&cd->vr_rx_chain[0];
899 			ld->vr_rx_list[i].vr_next =
900 					vtophys(&ld->vr_rx_list[0]);
901 		} else {
902 			cd->vr_rx_chain[i].vr_nextdesc =
903 					&cd->vr_rx_chain[i + 1];
904 			ld->vr_rx_list[i].vr_next =
905 					vtophys(&ld->vr_rx_list[i + 1]);
906 		}
907 	}
908 
909 	cd->vr_rx_head = &cd->vr_rx_chain[0];
910 
911 	return(0);
912 }
913 
914 /*
915  * Initialize an RX descriptor and attach an MBUF cluster.
916  * Note: the length fields are only 11 bits wide, which means the
917  * largest size we can specify is 2047. This is important because
918  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
919  * overflow the field and make a mess.
920  */
921 static int vr_newbuf(sc, c, m)
922 	struct vr_softc		*sc;
923 	struct vr_chain_onefrag	*c;
924 	struct mbuf		*m;
925 {
926 	struct mbuf		*m_new = NULL;
927 
928 	if (m == NULL) {
929 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
930 		if (m_new == NULL) {
931 			printf("vr%d: no memory for rx list "
932 			    "-- packet dropped!\n", sc->vr_unit);
933 			return(ENOBUFS);
934 		}
935 
936 		MCLGET(m_new, M_DONTWAIT);
937 		if (!(m_new->m_flags & M_EXT)) {
938 			printf("vr%d: no memory for rx list "
939 			    "-- packet dropped!\n", sc->vr_unit);
940 			m_freem(m_new);
941 			return(ENOBUFS);
942 		}
943 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
944 	} else {
945 		m_new = m;
946 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
947 		m_new->m_data = m_new->m_ext.ext_buf;
948 	}
949 
950 	m_adj(m_new, sizeof(u_int64_t));
951 
952 	c->vr_mbuf = m_new;
953 	c->vr_ptr->vr_status = VR_RXSTAT;
954 	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
955 	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
956 
957 	return(0);
958 }
959 
960 /*
961  * A frame has been uploaded: pass the resulting mbuf chain up to
962  * the higher level protocols.
963  */
964 static void vr_rxeof(sc)
965 	struct vr_softc		*sc;
966 {
967         struct ether_header	*eh;
968         struct mbuf		*m;
969         struct ifnet		*ifp;
970 	struct vr_chain_onefrag	*cur_rx;
971 	int			total_len = 0;
972 	u_int32_t		rxstat;
973 
974 	ifp = &sc->arpcom.ac_if;
975 
976 	while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
977 							VR_RXSTAT_OWN)) {
978 		struct mbuf		*m0 = NULL;
979 
980 		cur_rx = sc->vr_cdata.vr_rx_head;
981 		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
982 		m = cur_rx->vr_mbuf;
983 
984 		/*
985 		 * If an error occurs, update stats, clear the
986 		 * status word and leave the mbuf cluster in place:
987 		 * it should simply get re-used next time this descriptor
988 	 	 * comes up in the ring.
989 		 */
990 		if (rxstat & VR_RXSTAT_RXERR) {
991 			ifp->if_ierrors++;
992 			printf("vr%d: rx error: ", sc->vr_unit);
993 			switch(rxstat & 0x000000FF) {
994 			case VR_RXSTAT_CRCERR:
995 				printf("crc error\n");
996 				break;
997 			case VR_RXSTAT_FRAMEALIGNERR:
998 				printf("frame alignment error\n");
999 				break;
1000 			case VR_RXSTAT_FIFOOFLOW:
1001 				printf("FIFO overflow\n");
1002 				break;
1003 			case VR_RXSTAT_GIANT:
1004 				printf("received giant packet\n");
1005 				break;
1006 			case VR_RXSTAT_RUNT:
1007 				printf("received runt packet\n");
1008 				break;
1009 			case VR_RXSTAT_BUSERR:
1010 				printf("system bus error\n");
1011 				break;
1012 			case VR_RXSTAT_BUFFERR:
1013 				printf("rx buffer error\n");
1014 				break;
1015 			default:
1016 				printf("unknown rx error\n");
1017 				break;
1018 			}
1019 			vr_newbuf(sc, cur_rx, m);
1020 			continue;
1021 		}
1022 
1023 		/* No errors; receive the packet. */
1024 		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1025 
1026 		/*
1027 		 * XXX The VIA Rhine chip includes the CRC with every
1028 		 * received frame, and there's no way to turn this
1029 		 * behavior off (at least, I can't find anything in
1030 	 	 * the manual that explains how to do it) so we have
1031 		 * to trim off the CRC manually.
1032 		 */
1033 		total_len -= ETHER_CRC_LEN;
1034 
1035 		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1036 		    total_len + ETHER_ALIGN, 0, ifp, NULL);
1037 		vr_newbuf(sc, cur_rx, m);
1038 		if (m0 == NULL) {
1039 			ifp->if_ierrors++;
1040 			continue;
1041 		}
1042 		m_adj(m0, ETHER_ALIGN);
1043 		m = m0;
1044 
1045 		ifp->if_ipackets++;
1046 		eh = mtod(m, struct ether_header *);
1047 
1048 		/*
1049 		 * Handle BPF listeners. Let the BPF user see the packet, but
1050 		 * don't pass it up to the ether_input() layer unless it's
1051 		 * a broadcast packet, multicast packet, matches our ethernet
1052 		 * address or the interface is in promiscuous mode.
1053 		 */
1054 		if (ifp->if_bpf) {
1055 			bpf_mtap(ifp, m);
1056 			if (ifp->if_flags & IFF_PROMISC &&
1057 				(bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1058 						ETHER_ADDR_LEN) &&
1059 					(eh->ether_dhost[0] & 1) == 0)) {
1060 				m_freem(m);
1061 				continue;
1062 			}
1063 		}
1064 
1065 #ifdef BRIDGE
1066 		if (do_bridge) {
1067 			struct ifnet		*bdg_ifp;
1068 			bdg_ifp = bridge_in(m);
1069 			if (bdg_ifp != BDG_LOCAL && bdg_ifp != BDG_DROP)
1070 				bdg_forward(&m, bdg_ifp);
1071 			if (((bdg_ifp != BDG_LOCAL) && (bdg_ifp != BDG_BCAST) &&
1072 			    (bdg_ifp != BDG_MCAST)) || bdg_ifp == BDG_DROP) {
1073 				m_freem(m);
1074 				continue;
1075 			}
1076 		}
1077 #endif /* BRIDGE */
1078 
1079 		/* Remove header from mbuf and pass it on. */
1080 		m_adj(m, sizeof(struct ether_header));
1081 		ether_input(ifp, eh, m);
1082 	}
1083 
1084 	return;
1085 }
1086 
1087 void vr_rxeoc(sc)
1088 	struct vr_softc		*sc;
1089 {
1090 
1091 	vr_rxeof(sc);
1092 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1093 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1094 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1095 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1096 
1097 	return;
1098 }
1099 
1100 /*
1101  * A frame was downloaded to the chip. It's safe for us to clean up
1102  * the list buffers.
1103  */
1104 
1105 static void vr_txeof(sc)
1106 	struct vr_softc		*sc;
1107 {
1108 	struct vr_chain		*cur_tx;
1109 	struct ifnet		*ifp;
1110 
1111 	ifp = &sc->arpcom.ac_if;
1112 
1113 	/* Clear the timeout timer. */
1114 	ifp->if_timer = 0;
1115 
1116 	/* Sanity check. */
1117 	if (sc->vr_cdata.vr_tx_head == NULL)
1118 		return;
1119 
1120 	/*
1121 	 * Go through our tx list and free mbufs for those
1122 	 * frames that have been transmitted.
1123 	 */
1124 	while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1125 		u_int32_t		txstat;
1126 
1127 		cur_tx = sc->vr_cdata.vr_tx_head;
1128 		txstat = cur_tx->vr_ptr->vr_status;
1129 
1130 		if (txstat & VR_TXSTAT_OWN)
1131 			break;
1132 
1133 		if (txstat & VR_TXSTAT_ERRSUM) {
1134 			ifp->if_oerrors++;
1135 			if (txstat & VR_TXSTAT_DEFER)
1136 				ifp->if_collisions++;
1137 			if (txstat & VR_TXSTAT_LATECOLL)
1138 				ifp->if_collisions++;
1139 		}
1140 
1141 		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1142 
1143 		ifp->if_opackets++;
1144 		if (cur_tx->vr_mbuf != NULL) {
1145 			m_freem(cur_tx->vr_mbuf);
1146 			cur_tx->vr_mbuf = NULL;
1147 		}
1148 
1149 		if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1150 			sc->vr_cdata.vr_tx_head = NULL;
1151 			sc->vr_cdata.vr_tx_tail = NULL;
1152 			break;
1153 		}
1154 
1155 		sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1156 	}
1157 
1158 	return;
1159 }
1160 
1161 /*
1162  * TX 'end of channel' interrupt handler.
1163  */
1164 static void vr_txeoc(sc)
1165 	struct vr_softc		*sc;
1166 {
1167 	struct ifnet		*ifp;
1168 
1169 	ifp = &sc->arpcom.ac_if;
1170 
1171 	ifp->if_timer = 0;
1172 
1173 	if (sc->vr_cdata.vr_tx_head == NULL) {
1174 		ifp->if_flags &= ~IFF_OACTIVE;
1175 		sc->vr_cdata.vr_tx_tail = NULL;
1176 	}
1177 
1178 	return;
1179 }
1180 
1181 static void vr_tick(xsc)
1182 	void			*xsc;
1183 {
1184 	struct vr_softc		*sc;
1185 	struct mii_data		*mii;
1186 	int			s;
1187 
1188 	s = splimp();
1189 
1190 	sc = xsc;
1191 	mii = device_get_softc(sc->vr_miibus);
1192 	mii_tick(mii);
1193 
1194 	sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1195 
1196 	splx(s);
1197 
1198 	return;
1199 }
1200 
1201 static void vr_intr(arg)
1202 	void			*arg;
1203 {
1204 	struct vr_softc		*sc;
1205 	struct ifnet		*ifp;
1206 	u_int16_t		status;
1207 
1208 	sc = arg;
1209 	ifp = &sc->arpcom.ac_if;
1210 
1211 	/* Supress unwanted interrupts. */
1212 	if (!(ifp->if_flags & IFF_UP)) {
1213 		vr_stop(sc);
1214 		return;
1215 	}
1216 
1217 	/* Disable interrupts. */
1218 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1219 
1220 	for (;;) {
1221 
1222 		status = CSR_READ_2(sc, VR_ISR);
1223 		if (status)
1224 			CSR_WRITE_2(sc, VR_ISR, status);
1225 
1226 		if ((status & VR_INTRS) == 0)
1227 			break;
1228 
1229 		if (status & VR_ISR_RX_OK)
1230 			vr_rxeof(sc);
1231 
1232 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1233 		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1234 		    (status & VR_ISR_RX_DROPPED)) {
1235 			vr_rxeof(sc);
1236 			vr_rxeoc(sc);
1237 		}
1238 
1239 		if (status & VR_ISR_TX_OK) {
1240 			vr_txeof(sc);
1241 			vr_txeoc(sc);
1242 		}
1243 
1244 		if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){
1245 			ifp->if_oerrors++;
1246 			vr_txeof(sc);
1247 			if (sc->vr_cdata.vr_tx_head != NULL) {
1248 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1249 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1250 			}
1251 		}
1252 
1253 		if (status & VR_ISR_BUSERR) {
1254 			vr_reset(sc);
1255 			vr_init(sc);
1256 		}
1257 	}
1258 
1259 	/* Re-enable interrupts. */
1260 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1261 
1262 	if (ifp->if_snd.ifq_head != NULL) {
1263 		vr_start(ifp);
1264 	}
1265 
1266 	return;
1267 }
1268 
1269 /*
1270  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1271  * pointers to the fragment pointers.
1272  */
1273 static int vr_encap(sc, c, m_head)
1274 	struct vr_softc		*sc;
1275 	struct vr_chain		*c;
1276 	struct mbuf		*m_head;
1277 {
1278 	int			frag = 0;
1279 	struct vr_desc		*f = NULL;
1280 	int			total_len;
1281 	struct mbuf		*m;
1282 
1283 	m = m_head;
1284 	total_len = 0;
1285 
1286 	/*
1287 	 * The VIA Rhine wants packet buffers to be longword
1288 	 * aligned, but very often our mbufs aren't. Rather than
1289 	 * waste time trying to decide when to copy and when not
1290 	 * to copy, just do it all the time.
1291 	 */
1292 	if (m != NULL) {
1293 		struct mbuf		*m_new = NULL;
1294 
1295 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1296 		if (m_new == NULL) {
1297 			printf("vr%d: no memory for tx list", sc->vr_unit);
1298 			return(1);
1299 		}
1300 		if (m_head->m_pkthdr.len > MHLEN) {
1301 			MCLGET(m_new, M_DONTWAIT);
1302 			if (!(m_new->m_flags & M_EXT)) {
1303 				m_freem(m_new);
1304 				printf("vr%d: no memory for tx list",
1305 						sc->vr_unit);
1306 				return(1);
1307 			}
1308 		}
1309 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1310 					mtod(m_new, caddr_t));
1311 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1312 		m_freem(m_head);
1313 		m_head = m_new;
1314 		/*
1315 		 * The Rhine chip doesn't auto-pad, so we have to make
1316 		 * sure to pad short frames out to the minimum frame length
1317 		 * ourselves.
1318 		 */
1319 		if (m_head->m_len < VR_MIN_FRAMELEN) {
1320 			m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1321 			m_new->m_len = m_new->m_pkthdr.len;
1322 		}
1323 		f = c->vr_ptr;
1324 		f->vr_data = vtophys(mtod(m_new, caddr_t));
1325 		f->vr_ctl = total_len = m_new->m_len;
1326 		f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1327 		f->vr_status = 0;
1328 		frag = 1;
1329 	}
1330 
1331 	c->vr_mbuf = m_head;
1332 	c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1333 	c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1334 
1335 	return(0);
1336 }
1337 
1338 /*
1339  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1340  * to the mbuf data regions directly in the transmit lists. We also save a
1341  * copy of the pointers since the transmit list fragment pointers are
1342  * physical addresses.
1343  */
1344 
1345 static void vr_start(ifp)
1346 	struct ifnet		*ifp;
1347 {
1348 	struct vr_softc		*sc;
1349 	struct mbuf		*m_head = NULL;
1350 	struct vr_chain		*cur_tx = NULL, *start_tx;
1351 
1352 	sc = ifp->if_softc;
1353 
1354 	if (ifp->if_flags & IFF_OACTIVE)
1355 		return;
1356 
1357 	/*
1358 	 * Check for an available queue slot. If there are none,
1359 	 * punt.
1360 	 */
1361 	if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1362 		ifp->if_flags |= IFF_OACTIVE;
1363 		return;
1364 	}
1365 
1366 	start_tx = sc->vr_cdata.vr_tx_free;
1367 
1368 	while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1369 		IF_DEQUEUE(&ifp->if_snd, m_head);
1370 		if (m_head == NULL)
1371 			break;
1372 
1373 		/* Pick a descriptor off the free list. */
1374 		cur_tx = sc->vr_cdata.vr_tx_free;
1375 		sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1376 
1377 		/* Pack the data into the descriptor. */
1378 		vr_encap(sc, cur_tx, m_head);
1379 
1380 		if (cur_tx != start_tx)
1381 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1382 
1383 		/*
1384 		 * If there's a BPF listener, bounce a copy of this frame
1385 		 * to him.
1386 		 */
1387 		if (ifp->if_bpf)
1388 			bpf_mtap(ifp, cur_tx->vr_mbuf);
1389 
1390 		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1391 		VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1392 	}
1393 
1394 	/*
1395 	 * If there are no frames queued, bail.
1396 	 */
1397 	if (cur_tx == NULL)
1398 		return;
1399 
1400 	sc->vr_cdata.vr_tx_tail = cur_tx;
1401 
1402 	if (sc->vr_cdata.vr_tx_head == NULL)
1403 		sc->vr_cdata.vr_tx_head = start_tx;
1404 
1405 	/*
1406 	 * Set a timeout in case the chip goes out to lunch.
1407 	 */
1408 	ifp->if_timer = 5;
1409 
1410 	return;
1411 }
1412 
1413 static void vr_init(xsc)
1414 	void			*xsc;
1415 {
1416 	struct vr_softc		*sc = xsc;
1417 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1418 	struct mii_data		*mii;
1419 	int			s;
1420 
1421 	s = splimp();
1422 
1423 	mii = device_get_softc(sc->vr_miibus);
1424 
1425 	/*
1426 	 * Cancel pending I/O and free all RX/TX buffers.
1427 	 */
1428 	vr_stop(sc);
1429 	vr_reset(sc);
1430 
1431 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1432 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1433 
1434 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1435 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1436 
1437 	/* Init circular RX list. */
1438 	if (vr_list_rx_init(sc) == ENOBUFS) {
1439 		printf("vr%d: initialization failed: no "
1440 			"memory for rx buffers\n", sc->vr_unit);
1441 		vr_stop(sc);
1442 		(void)splx(s);
1443 		return;
1444 	}
1445 
1446 	/*
1447 	 * Init tx descriptors.
1448 	 */
1449 	vr_list_tx_init(sc);
1450 
1451 	/* If we want promiscuous mode, set the allframes bit. */
1452 	if (ifp->if_flags & IFF_PROMISC)
1453 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1454 	else
1455 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1456 
1457 	/* Set capture broadcast bit to capture broadcast frames. */
1458 	if (ifp->if_flags & IFF_BROADCAST)
1459 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1460 	else
1461 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1462 
1463 	/*
1464 	 * Program the multicast filter, if necessary.
1465 	 */
1466 	vr_setmulti(sc);
1467 
1468 	/*
1469 	 * Load the address of the RX list.
1470 	 */
1471 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1472 
1473 	/* Enable receiver and transmitter. */
1474 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1475 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1476 				    VR_CMD_RX_GO);
1477 
1478 	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1479 
1480 	/*
1481 	 * Enable interrupts.
1482 	 */
1483 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1484 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1485 
1486 	mii_mediachg(mii);
1487 
1488 	ifp->if_flags |= IFF_RUNNING;
1489 	ifp->if_flags &= ~IFF_OACTIVE;
1490 
1491 	(void)splx(s);
1492 
1493 	sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1494 
1495 	return;
1496 }
1497 
1498 /*
1499  * Set media options.
1500  */
1501 static int vr_ifmedia_upd(ifp)
1502 	struct ifnet		*ifp;
1503 {
1504 	struct vr_softc		*sc;
1505 
1506 	sc = ifp->if_softc;
1507 
1508 	if (ifp->if_flags & IFF_UP)
1509 		vr_init(sc);
1510 
1511 	return(0);
1512 }
1513 
1514 /*
1515  * Report current media status.
1516  */
1517 static void vr_ifmedia_sts(ifp, ifmr)
1518 	struct ifnet		*ifp;
1519 	struct ifmediareq	*ifmr;
1520 {
1521 	struct vr_softc		*sc;
1522 	struct mii_data		*mii;
1523 
1524 	sc = ifp->if_softc;
1525 	mii = device_get_softc(sc->vr_miibus);
1526 	mii_pollstat(mii);
1527 	ifmr->ifm_active = mii->mii_media_active;
1528 	ifmr->ifm_status = mii->mii_media_status;
1529 
1530 	return;
1531 }
1532 
1533 static int vr_ioctl(ifp, command, data)
1534 	struct ifnet		*ifp;
1535 	u_long			command;
1536 	caddr_t			data;
1537 {
1538 	struct vr_softc		*sc = ifp->if_softc;
1539 	struct ifreq		*ifr = (struct ifreq *) data;
1540 	struct mii_data		*mii;
1541 	int			s, error = 0;
1542 
1543 	s = splimp();
1544 
1545 	switch(command) {
1546 	case SIOCSIFADDR:
1547 	case SIOCGIFADDR:
1548 	case SIOCSIFMTU:
1549 		error = ether_ioctl(ifp, command, data);
1550 		break;
1551 	case SIOCSIFFLAGS:
1552 		if (ifp->if_flags & IFF_UP) {
1553 			vr_init(sc);
1554 		} else {
1555 			if (ifp->if_flags & IFF_RUNNING)
1556 				vr_stop(sc);
1557 		}
1558 		error = 0;
1559 		break;
1560 	case SIOCADDMULTI:
1561 	case SIOCDELMULTI:
1562 		vr_setmulti(sc);
1563 		error = 0;
1564 		break;
1565 	case SIOCGIFMEDIA:
1566 	case SIOCSIFMEDIA:
1567 		mii = device_get_softc(sc->vr_miibus);
1568 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1569 		break;
1570 	default:
1571 		error = EINVAL;
1572 		break;
1573 	}
1574 
1575 	(void)splx(s);
1576 
1577 	return(error);
1578 }
1579 
1580 static void vr_watchdog(ifp)
1581 	struct ifnet		*ifp;
1582 {
1583 	struct vr_softc		*sc;
1584 
1585 	sc = ifp->if_softc;
1586 
1587 	ifp->if_oerrors++;
1588 	printf("vr%d: watchdog timeout\n", sc->vr_unit);
1589 
1590 	vr_stop(sc);
1591 	vr_reset(sc);
1592 	vr_init(sc);
1593 
1594 	if (ifp->if_snd.ifq_head != NULL)
1595 		vr_start(ifp);
1596 
1597 	return;
1598 }
1599 
1600 /*
1601  * Stop the adapter and free any mbufs allocated to the
1602  * RX and TX lists.
1603  */
1604 static void vr_stop(sc)
1605 	struct vr_softc		*sc;
1606 {
1607 	register int		i;
1608 	struct ifnet		*ifp;
1609 
1610 	ifp = &sc->arpcom.ac_if;
1611 	ifp->if_timer = 0;
1612 
1613 	untimeout(vr_tick, sc, sc->vr_stat_ch);
1614 
1615 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1616 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1617 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1618 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1619 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1620 
1621 	/*
1622 	 * Free data in the RX lists.
1623 	 */
1624 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1625 		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1626 			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1627 			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1628 		}
1629 	}
1630 	bzero((char *)&sc->vr_ldata->vr_rx_list,
1631 		sizeof(sc->vr_ldata->vr_rx_list));
1632 
1633 	/*
1634 	 * Free the TX list buffers.
1635 	 */
1636 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1637 		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1638 			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1639 			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1640 		}
1641 	}
1642 
1643 	bzero((char *)&sc->vr_ldata->vr_tx_list,
1644 		sizeof(sc->vr_ldata->vr_tx_list));
1645 
1646 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1647 
1648 	return;
1649 }
1650 
1651 /*
1652  * Stop all chip I/O so that the kernel's probe routines don't
1653  * get confused by errant DMAs when rebooting.
1654  */
1655 static void vr_shutdown(dev)
1656 	device_t		dev;
1657 {
1658 	struct vr_softc		*sc;
1659 
1660 	sc = device_get_softc(dev);
1661 
1662 	vr_stop(sc);
1663 
1664 	return;
1665 }
1666