xref: /freebsd/sys/dev/vr/if_vr.c (revision a3e8fd0b7f663db7eafff527d5c3ca3bcfa8a537)
1 /*
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * VIA Rhine fast ethernet PCI NIC driver
37  *
38  * Supports various network adapters based on the VIA Rhine
39  * and Rhine II PCI controllers, including the D-Link DFE530TX.
40  * Datasheets are available at http://www.via.com.tw.
41  *
42  * Written by Bill Paul <wpaul@ctr.columbia.edu>
43  * Electrical Engineering Department
44  * Columbia University, New York City
45  */
46 
47 /*
48  * The VIA Rhine controllers are similar in some respects to the
49  * the DEC tulip chips, except less complicated. The controller
50  * uses an MII bus and an external physical layer interface. The
51  * receiver has a one entry perfect filter and a 64-bit hash table
52  * multicast filter. Transmit and receive descriptors are similar
53  * to the tulip.
54  *
55  * The Rhine has a serious flaw in its transmit DMA mechanism:
56  * transmit buffers must be longword aligned. Unfortunately,
57  * FreeBSD doesn't guarantee that mbufs will be filled in starting
58  * at longword boundaries, so we have to do a buffer copy before
59  * transmission.
60  */
61 
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/sockio.h>
65 #include <sys/mbuf.h>
66 #include <sys/malloc.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
69 
70 #include <net/if.h>
71 #include <net/if_arp.h>
72 #include <net/ethernet.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 
76 #include <net/bpf.h>
77 
78 #include <vm/vm.h>              /* for vtophys */
79 #include <vm/pmap.h>            /* for vtophys */
80 #include <machine/bus_pio.h>
81 #include <machine/bus_memio.h>
82 #include <machine/bus.h>
83 #include <machine/resource.h>
84 #include <sys/bus.h>
85 #include <sys/rman.h>
86 
87 #include <dev/mii/mii.h>
88 #include <dev/mii/miivar.h>
89 
90 #include <pci/pcireg.h>
91 #include <pci/pcivar.h>
92 
93 #define VR_USEIOSPACE
94 
95 #include <pci/if_vrreg.h>
96 
97 MODULE_DEPEND(vr, miibus, 1, 1, 1);
98 
99 /* "controller miibus0" required.  See GENERIC if you get errors here. */
100 #include "miibus_if.h"
101 
102 #ifndef lint
103 static const char rcsid[] =
104   "$FreeBSD$";
105 #endif
106 
107 /*
108  * Various supported device vendors/types and their names.
109  */
110 static struct vr_type vr_devs[] = {
111 	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
112 		"VIA VT3043 Rhine I 10/100BaseTX" },
113 	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
114 		"VIA VT86C100A Rhine II 10/100BaseTX" },
115 	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
116 		"VIA VT6102 Rhine II 10/100BaseTX" },
117 	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
118 		"Delta Electronics Rhine II 10/100BaseTX" },
119 	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
120 		"Addtron Technology Rhine II 10/100BaseTX" },
121 	{ 0, 0, NULL }
122 };
123 
124 static int vr_probe		(device_t);
125 static int vr_attach		(device_t);
126 static int vr_detach		(device_t);
127 
128 static int vr_newbuf		(struct vr_softc *,
129 					struct vr_chain_onefrag *,
130 					struct mbuf *);
131 static int vr_encap		(struct vr_softc *, struct vr_chain *,
132 						struct mbuf * );
133 
134 static void vr_rxeof		(struct vr_softc *);
135 static void vr_rxeoc		(struct vr_softc *);
136 static void vr_txeof		(struct vr_softc *);
137 static void vr_txeoc		(struct vr_softc *);
138 static void vr_tick		(void *);
139 static void vr_intr		(void *);
140 static void vr_start		(struct ifnet *);
141 static int vr_ioctl		(struct ifnet *, u_long, caddr_t);
142 static void vr_init		(void *);
143 static void vr_stop		(struct vr_softc *);
144 static void vr_watchdog		(struct ifnet *);
145 static void vr_shutdown		(device_t);
146 static int vr_ifmedia_upd	(struct ifnet *);
147 static void vr_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
148 
149 static void vr_mii_sync		(struct vr_softc *);
150 static void vr_mii_send		(struct vr_softc *, u_int32_t, int);
151 static int vr_mii_readreg	(struct vr_softc *, struct vr_mii_frame *);
152 static int vr_mii_writereg	(struct vr_softc *, struct vr_mii_frame *);
153 static int vr_miibus_readreg	(device_t, int, int);
154 static int vr_miibus_writereg	(device_t, int, int, int);
155 static void vr_miibus_statchg	(device_t);
156 
157 static void vr_setcfg		(struct vr_softc *, int);
158 static u_int8_t vr_calchash	(u_int8_t *);
159 static void vr_setmulti		(struct vr_softc *);
160 static void vr_reset		(struct vr_softc *);
161 static int vr_list_rx_init	(struct vr_softc *);
162 static int vr_list_tx_init	(struct vr_softc *);
163 
164 #ifdef VR_USEIOSPACE
165 #define VR_RES			SYS_RES_IOPORT
166 #define VR_RID			VR_PCI_LOIO
167 #else
168 #define VR_RES			SYS_RES_MEMORY
169 #define VR_RID			VR_PCI_LOMEM
170 #endif
171 
172 static device_method_t vr_methods[] = {
173 	/* Device interface */
174 	DEVMETHOD(device_probe,		vr_probe),
175 	DEVMETHOD(device_attach,	vr_attach),
176 	DEVMETHOD(device_detach, 	vr_detach),
177 	DEVMETHOD(device_shutdown,	vr_shutdown),
178 
179 	/* bus interface */
180 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
181 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
182 
183 	/* MII interface */
184 	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
185 	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
186 	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
187 
188 	{ 0, 0 }
189 };
190 
191 static driver_t vr_driver = {
192 	"vr",
193 	vr_methods,
194 	sizeof(struct vr_softc)
195 };
196 
197 static devclass_t vr_devclass;
198 
199 DRIVER_MODULE(if_vr, pci, vr_driver, vr_devclass, 0, 0);
200 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
201 
202 #define VR_SETBIT(sc, reg, x)				\
203 	CSR_WRITE_1(sc, reg,				\
204 		CSR_READ_1(sc, reg) | (x))
205 
206 #define VR_CLRBIT(sc, reg, x)				\
207 	CSR_WRITE_1(sc, reg,				\
208 		CSR_READ_1(sc, reg) & ~(x))
209 
210 #define VR_SETBIT16(sc, reg, x)				\
211 	CSR_WRITE_2(sc, reg,				\
212 		CSR_READ_2(sc, reg) | (x))
213 
214 #define VR_CLRBIT16(sc, reg, x)				\
215 	CSR_WRITE_2(sc, reg,				\
216 		CSR_READ_2(sc, reg) & ~(x))
217 
218 #define VR_SETBIT32(sc, reg, x)				\
219 	CSR_WRITE_4(sc, reg,				\
220 		CSR_READ_4(sc, reg) | (x))
221 
222 #define VR_CLRBIT32(sc, reg, x)				\
223 	CSR_WRITE_4(sc, reg,				\
224 		CSR_READ_4(sc, reg) & ~(x))
225 
226 #define SIO_SET(x)					\
227 	CSR_WRITE_1(sc, VR_MIICMD,			\
228 		CSR_READ_1(sc, VR_MIICMD) | (x))
229 
230 #define SIO_CLR(x)					\
231 	CSR_WRITE_1(sc, VR_MIICMD,			\
232 		CSR_READ_1(sc, VR_MIICMD) & ~(x))
233 
234 /*
235  * Sync the PHYs by setting data bit and strobing the clock 32 times.
236  */
237 static void
238 vr_mii_sync(sc)
239 	struct vr_softc		*sc;
240 {
241 	register int		i;
242 
243 	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
244 
245 	for (i = 0; i < 32; i++) {
246 		SIO_SET(VR_MIICMD_CLK);
247 		DELAY(1);
248 		SIO_CLR(VR_MIICMD_CLK);
249 		DELAY(1);
250 	}
251 
252 	return;
253 }
254 
255 /*
256  * Clock a series of bits through the MII.
257  */
258 static void
259 vr_mii_send(sc, bits, cnt)
260 	struct vr_softc		*sc;
261 	u_int32_t		bits;
262 	int			cnt;
263 {
264 	int			i;
265 
266 	SIO_CLR(VR_MIICMD_CLK);
267 
268 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
269                 if (bits & i) {
270 			SIO_SET(VR_MIICMD_DATAIN);
271                 } else {
272 			SIO_CLR(VR_MIICMD_DATAIN);
273                 }
274 		DELAY(1);
275 		SIO_CLR(VR_MIICMD_CLK);
276 		DELAY(1);
277 		SIO_SET(VR_MIICMD_CLK);
278 	}
279 }
280 
281 /*
282  * Read an PHY register through the MII.
283  */
284 static int
285 vr_mii_readreg(sc, frame)
286 	struct vr_softc		*sc;
287 	struct vr_mii_frame	*frame;
288 
289 {
290 	int			i, ack;
291 
292 	VR_LOCK(sc);
293 
294 	/*
295 	 * Set up frame for RX.
296 	 */
297 	frame->mii_stdelim = VR_MII_STARTDELIM;
298 	frame->mii_opcode = VR_MII_READOP;
299 	frame->mii_turnaround = 0;
300 	frame->mii_data = 0;
301 
302 	CSR_WRITE_1(sc, VR_MIICMD, 0);
303 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
304 
305 	/*
306  	 * Turn on data xmit.
307 	 */
308 	SIO_SET(VR_MIICMD_DIR);
309 
310 	vr_mii_sync(sc);
311 
312 	/*
313 	 * Send command/address info.
314 	 */
315 	vr_mii_send(sc, frame->mii_stdelim, 2);
316 	vr_mii_send(sc, frame->mii_opcode, 2);
317 	vr_mii_send(sc, frame->mii_phyaddr, 5);
318 	vr_mii_send(sc, frame->mii_regaddr, 5);
319 
320 	/* Idle bit */
321 	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
322 	DELAY(1);
323 	SIO_SET(VR_MIICMD_CLK);
324 	DELAY(1);
325 
326 	/* Turn off xmit. */
327 	SIO_CLR(VR_MIICMD_DIR);
328 
329 	/* Check for ack */
330 	SIO_CLR(VR_MIICMD_CLK);
331 	DELAY(1);
332 	SIO_SET(VR_MIICMD_CLK);
333 	DELAY(1);
334 	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
335 
336 	/*
337 	 * Now try reading data bits. If the ack failed, we still
338 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
339 	 */
340 	if (ack) {
341 		for(i = 0; i < 16; i++) {
342 			SIO_CLR(VR_MIICMD_CLK);
343 			DELAY(1);
344 			SIO_SET(VR_MIICMD_CLK);
345 			DELAY(1);
346 		}
347 		goto fail;
348 	}
349 
350 	for (i = 0x8000; i; i >>= 1) {
351 		SIO_CLR(VR_MIICMD_CLK);
352 		DELAY(1);
353 		if (!ack) {
354 			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
355 				frame->mii_data |= i;
356 			DELAY(1);
357 		}
358 		SIO_SET(VR_MIICMD_CLK);
359 		DELAY(1);
360 	}
361 
362 fail:
363 
364 	SIO_CLR(VR_MIICMD_CLK);
365 	DELAY(1);
366 	SIO_SET(VR_MIICMD_CLK);
367 	DELAY(1);
368 
369 	VR_UNLOCK(sc);
370 
371 	if (ack)
372 		return(1);
373 	return(0);
374 }
375 
376 /*
377  * Write to a PHY register through the MII.
378  */
379 static int
380 vr_mii_writereg(sc, frame)
381 	struct vr_softc		*sc;
382 	struct vr_mii_frame	*frame;
383 
384 {
385 	VR_LOCK(sc);
386 
387 	CSR_WRITE_1(sc, VR_MIICMD, 0);
388 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
389 
390 	/*
391 	 * Set up frame for TX.
392 	 */
393 
394 	frame->mii_stdelim = VR_MII_STARTDELIM;
395 	frame->mii_opcode = VR_MII_WRITEOP;
396 	frame->mii_turnaround = VR_MII_TURNAROUND;
397 
398 	/*
399  	 * Turn on data output.
400 	 */
401 	SIO_SET(VR_MIICMD_DIR);
402 
403 	vr_mii_sync(sc);
404 
405 	vr_mii_send(sc, frame->mii_stdelim, 2);
406 	vr_mii_send(sc, frame->mii_opcode, 2);
407 	vr_mii_send(sc, frame->mii_phyaddr, 5);
408 	vr_mii_send(sc, frame->mii_regaddr, 5);
409 	vr_mii_send(sc, frame->mii_turnaround, 2);
410 	vr_mii_send(sc, frame->mii_data, 16);
411 
412 	/* Idle bit. */
413 	SIO_SET(VR_MIICMD_CLK);
414 	DELAY(1);
415 	SIO_CLR(VR_MIICMD_CLK);
416 	DELAY(1);
417 
418 	/*
419 	 * Turn off xmit.
420 	 */
421 	SIO_CLR(VR_MIICMD_DIR);
422 
423 	VR_UNLOCK(sc);
424 
425 	return(0);
426 }
427 
428 static int
429 vr_miibus_readreg(dev, phy, reg)
430 	device_t		dev;
431 	int			phy, reg;
432 {
433 	struct vr_softc		*sc;
434 	struct vr_mii_frame	frame;
435 
436 	sc = device_get_softc(dev);
437 	bzero((char *)&frame, sizeof(frame));
438 
439 	frame.mii_phyaddr = phy;
440 	frame.mii_regaddr = reg;
441 	vr_mii_readreg(sc, &frame);
442 
443 	return(frame.mii_data);
444 }
445 
446 static int
447 vr_miibus_writereg(dev, phy, reg, data)
448 	device_t		dev;
449 	u_int16_t		phy, reg, data;
450 {
451 	struct vr_softc		*sc;
452 	struct vr_mii_frame	frame;
453 
454 	sc = device_get_softc(dev);
455 	bzero((char *)&frame, sizeof(frame));
456 
457 	frame.mii_phyaddr = phy;
458 	frame.mii_regaddr = reg;
459 	frame.mii_data = data;
460 
461 	vr_mii_writereg(sc, &frame);
462 
463 	return(0);
464 }
465 
466 static void
467 vr_miibus_statchg(dev)
468 	device_t		dev;
469 {
470 	struct vr_softc		*sc;
471 	struct mii_data		*mii;
472 
473 	sc = device_get_softc(dev);
474 	VR_LOCK(sc);
475 	mii = device_get_softc(sc->vr_miibus);
476 	vr_setcfg(sc, mii->mii_media_active);
477 	VR_UNLOCK(sc);
478 
479 	return;
480 }
481 
482 /*
483  * Calculate CRC of a multicast group address, return the lower 6 bits.
484  */
485 static u_int8_t vr_calchash(addr)
486 	u_int8_t		*addr;
487 {
488 	u_int32_t		crc, carry;
489 	int			i, j;
490 	u_int8_t		c;
491 
492 	/* Compute CRC for the address value. */
493 	crc = 0xFFFFFFFF; /* initial value */
494 
495 	for (i = 0; i < 6; i++) {
496 		c = *(addr + i);
497 		for (j = 0; j < 8; j++) {
498 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
499 			crc <<= 1;
500 			c >>= 1;
501 			if (carry)
502 				crc = (crc ^ 0x04c11db6) | carry;
503 		}
504 	}
505 
506 	/* return the filter bit position */
507 	return((crc >> 26) & 0x0000003F);
508 }
509 
510 /*
511  * Program the 64-bit multicast hash filter.
512  */
513 static void
514 vr_setmulti(sc)
515 	struct vr_softc		*sc;
516 {
517 	struct ifnet		*ifp;
518 	int			h = 0;
519 	u_int32_t		hashes[2] = { 0, 0 };
520 	struct ifmultiaddr	*ifma;
521 	u_int8_t		rxfilt;
522 	int			mcnt = 0;
523 
524 	ifp = &sc->arpcom.ac_if;
525 
526 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
527 
528 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
529 		rxfilt |= VR_RXCFG_RX_MULTI;
530 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
531 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
532 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
533 		return;
534 	}
535 
536 	/* first, zot all the existing hash bits */
537 	CSR_WRITE_4(sc, VR_MAR0, 0);
538 	CSR_WRITE_4(sc, VR_MAR1, 0);
539 
540 	/* now program new ones */
541 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
542 		if (ifma->ifma_addr->sa_family != AF_LINK)
543 			continue;
544 		h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
545 		if (h < 32)
546 			hashes[0] |= (1 << h);
547 		else
548 			hashes[1] |= (1 << (h - 32));
549 		mcnt++;
550 	}
551 
552 	if (mcnt)
553 		rxfilt |= VR_RXCFG_RX_MULTI;
554 	else
555 		rxfilt &= ~VR_RXCFG_RX_MULTI;
556 
557 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
558 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
559 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
560 
561 	return;
562 }
563 
564 /*
565  * In order to fiddle with the
566  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
567  * first have to put the transmit and/or receive logic in the idle state.
568  */
569 static void
570 vr_setcfg(sc, media)
571 	struct vr_softc		*sc;
572 	int			media;
573 {
574 	int			restart = 0;
575 
576 	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
577 		restart = 1;
578 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
579 	}
580 
581 	if ((media & IFM_GMASK) == IFM_FDX)
582 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
583 	else
584 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
585 
586 	if (restart)
587 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
588 
589 	return;
590 }
591 
592 static void
593 vr_reset(sc)
594 	struct vr_softc		*sc;
595 {
596 	register int		i;
597 
598 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
599 
600 	for (i = 0; i < VR_TIMEOUT; i++) {
601 		DELAY(10);
602 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
603 			break;
604 	}
605 	if (i == VR_TIMEOUT)
606 		printf("vr%d: reset never completed!\n", sc->vr_unit);
607 
608 	/* Wait a little while for the chip to get its brains in order. */
609 	DELAY(1000);
610 
611         return;
612 }
613 
614 /*
615  * Probe for a VIA Rhine chip. Check the PCI vendor and device
616  * IDs against our list and return a device name if we find a match.
617  */
618 static int
619 vr_probe(dev)
620 	device_t		dev;
621 {
622 	struct vr_type		*t;
623 
624 	t = vr_devs;
625 
626 	while(t->vr_name != NULL) {
627 		if ((pci_get_vendor(dev) == t->vr_vid) &&
628 		    (pci_get_device(dev) == t->vr_did)) {
629 			device_set_desc(dev, t->vr_name);
630 			return(0);
631 		}
632 		t++;
633 	}
634 
635 	return(ENXIO);
636 }
637 
638 /*
639  * Attach the interface. Allocate softc structures, do ifmedia
640  * setup and ethernet/BPF attach.
641  */
642 static int
643 vr_attach(dev)
644 	device_t		dev;
645 {
646 	int			i;
647 	u_char			eaddr[ETHER_ADDR_LEN];
648 	u_int32_t		command;
649 	struct vr_softc		*sc;
650 	struct ifnet		*ifp;
651 	int			unit, error = 0, rid;
652 
653 	sc = device_get_softc(dev);
654 	unit = device_get_unit(dev);
655 	bzero(sc, sizeof(struct vr_softc *));
656 
657 	mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
658 	    MTX_DEF | MTX_RECURSE);
659 	VR_LOCK(sc);
660 
661 	/*
662 	 * Handle power management nonsense.
663 	 */
664 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
665 		u_int32_t		iobase, membase, irq;
666 
667 		/* Save important PCI config data. */
668 		iobase = pci_read_config(dev, VR_PCI_LOIO, 4);
669 		membase = pci_read_config(dev, VR_PCI_LOMEM, 4);
670 		irq = pci_read_config(dev, VR_PCI_INTLINE, 4);
671 
672 		/* Reset the power state. */
673 		printf("vr%d: chip is in D%d power mode "
674 		    "-- setting to D0\n", unit,
675 		    pci_get_powerstate(dev));
676 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
677 
678 			/* Restore PCI config data. */
679 		pci_write_config(dev, VR_PCI_LOIO, iobase, 4);
680 		pci_write_config(dev, VR_PCI_LOMEM, membase, 4);
681 		pci_write_config(dev, VR_PCI_INTLINE, irq, 4);
682 	}
683 
684 	/*
685 	 * Map control/status registers.
686 	 */
687 	pci_enable_busmaster(dev);
688 	pci_enable_io(dev, SYS_RES_IOPORT);
689 	pci_enable_io(dev, SYS_RES_MEMORY);
690 	command = pci_read_config(dev, PCIR_COMMAND, 4);
691 
692 #ifdef VR_USEIOSPACE
693 	if (!(command & PCIM_CMD_PORTEN)) {
694 		printf("vr%d: failed to enable I/O ports!\n", unit);
695 		free(sc, M_DEVBUF);
696 		goto fail;
697 	}
698 #else
699 	if (!(command & PCIM_CMD_MEMEN)) {
700 		printf("vr%d: failed to enable memory mapping!\n", unit);
701 		goto fail;
702 	}
703 #endif
704 
705 	rid = VR_RID;
706 	sc->vr_res = bus_alloc_resource(dev, VR_RES, &rid,
707 	    0, ~0, 1, RF_ACTIVE);
708 
709 	if (sc->vr_res == NULL) {
710 		printf("vr%d: couldn't map ports/memory\n", unit);
711 		error = ENXIO;
712 		goto fail;
713 	}
714 
715 	sc->vr_btag = rman_get_bustag(sc->vr_res);
716 	sc->vr_bhandle = rman_get_bushandle(sc->vr_res);
717 
718 	/* Allocate interrupt */
719 	rid = 0;
720 	sc->vr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
721 	    RF_SHAREABLE | RF_ACTIVE);
722 
723 	if (sc->vr_irq == NULL) {
724 		printf("vr%d: couldn't map interrupt\n", unit);
725 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
726 		error = ENXIO;
727 		goto fail;
728 	}
729 
730 	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET,
731 	    vr_intr, sc, &sc->vr_intrhand);
732 
733 	if (error) {
734 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
735 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
736 		printf("vr%d: couldn't set up irq\n", unit);
737 		goto fail;
738 	}
739 
740 	/*
741 	 * Windows may put the chip in suspend mode when it
742 	 * shuts down. Be sure to kick it in the head to wake it
743 	 * up again.
744 	 */
745 	VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
746 
747 	/* Reset the adapter. */
748 	vr_reset(sc);
749 
750 	/*
751 	 * Get station address. The way the Rhine chips work,
752 	 * you're not allowed to directly access the EEPROM once
753 	 * they've been programmed a special way. Consequently,
754 	 * we need to read the node address from the PAR0 and PAR1
755 	 * registers.
756 	 */
757 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
758 	DELAY(200);
759 	for (i = 0; i < ETHER_ADDR_LEN; i++)
760 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
761 
762 	/*
763 	 * A Rhine chip was detected. Inform the world.
764 	 */
765 	printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":");
766 
767 	sc->vr_unit = unit;
768 	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
769 
770 	sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
771 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
772 
773 	if (sc->vr_ldata == NULL) {
774 		printf("vr%d: no memory for list buffers!\n", unit);
775 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
776 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
777 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
778 		error = ENXIO;
779 		goto fail;
780 	}
781 
782 	bzero(sc->vr_ldata, sizeof(struct vr_list_data));
783 
784 	ifp = &sc->arpcom.ac_if;
785 	ifp->if_softc = sc;
786 	ifp->if_unit = unit;
787 	ifp->if_name = "vr";
788 	ifp->if_mtu = ETHERMTU;
789 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
790 	ifp->if_ioctl = vr_ioctl;
791 	ifp->if_output = ether_output;
792 	ifp->if_start = vr_start;
793 	ifp->if_watchdog = vr_watchdog;
794 	ifp->if_init = vr_init;
795 	ifp->if_baudrate = 10000000;
796 	ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
797 
798 	/*
799 	 * Do MII setup.
800 	 */
801 	if (mii_phy_probe(dev, &sc->vr_miibus,
802 	    vr_ifmedia_upd, vr_ifmedia_sts)) {
803 		printf("vr%d: MII without any phy!\n", sc->vr_unit);
804 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
805 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
806 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
807 		contigfree(sc->vr_ldata,
808 		    sizeof(struct vr_list_data), M_DEVBUF);
809 		error = ENXIO;
810 		goto fail;
811 	}
812 
813 	callout_handle_init(&sc->vr_stat_ch);
814 
815 	/*
816 	 * Call MI attach routine.
817 	 */
818 	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
819 	VR_UNLOCK(sc);
820 	return(0);
821 
822 fail:
823 	VR_UNLOCK(sc);
824 	mtx_destroy(&sc->vr_mtx);
825 
826 	return(error);
827 }
828 
829 static int
830 vr_detach(dev)
831 	device_t		dev;
832 {
833 	struct vr_softc		*sc;
834 	struct ifnet		*ifp;
835 
836 	sc = device_get_softc(dev);
837 	VR_LOCK(sc);
838 	ifp = &sc->arpcom.ac_if;
839 
840 	vr_stop(sc);
841 	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
842 
843 	bus_generic_detach(dev);
844 	device_delete_child(dev, sc->vr_miibus);
845 
846 	bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
847 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
848 	bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
849 
850 	contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
851 
852 	VR_UNLOCK(sc);
853 	mtx_destroy(&sc->vr_mtx);
854 
855 	return(0);
856 }
857 
858 /*
859  * Initialize the transmit descriptors.
860  */
861 static int
862 vr_list_tx_init(sc)
863 	struct vr_softc		*sc;
864 {
865 	struct vr_chain_data	*cd;
866 	struct vr_list_data	*ld;
867 	int			i;
868 
869 	cd = &sc->vr_cdata;
870 	ld = sc->vr_ldata;
871 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
872 		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
873 		if (i == (VR_TX_LIST_CNT - 1))
874 			cd->vr_tx_chain[i].vr_nextdesc =
875 				&cd->vr_tx_chain[0];
876 		else
877 			cd->vr_tx_chain[i].vr_nextdesc =
878 				&cd->vr_tx_chain[i + 1];
879 	}
880 
881 	cd->vr_tx_free = &cd->vr_tx_chain[0];
882 	cd->vr_tx_tail = cd->vr_tx_head = NULL;
883 
884 	return(0);
885 }
886 
887 
888 /*
889  * Initialize the RX descriptors and allocate mbufs for them. Note that
890  * we arrange the descriptors in a closed ring, so that the last descriptor
891  * points back to the first.
892  */
893 static int
894 vr_list_rx_init(sc)
895 	struct vr_softc		*sc;
896 {
897 	struct vr_chain_data	*cd;
898 	struct vr_list_data	*ld;
899 	int			i;
900 
901 	cd = &sc->vr_cdata;
902 	ld = sc->vr_ldata;
903 
904 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
905 		cd->vr_rx_chain[i].vr_ptr =
906 			(struct vr_desc *)&ld->vr_rx_list[i];
907 		if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS)
908 			return(ENOBUFS);
909 		if (i == (VR_RX_LIST_CNT - 1)) {
910 			cd->vr_rx_chain[i].vr_nextdesc =
911 					&cd->vr_rx_chain[0];
912 			ld->vr_rx_list[i].vr_next =
913 					vtophys(&ld->vr_rx_list[0]);
914 		} else {
915 			cd->vr_rx_chain[i].vr_nextdesc =
916 					&cd->vr_rx_chain[i + 1];
917 			ld->vr_rx_list[i].vr_next =
918 					vtophys(&ld->vr_rx_list[i + 1]);
919 		}
920 	}
921 
922 	cd->vr_rx_head = &cd->vr_rx_chain[0];
923 
924 	return(0);
925 }
926 
927 /*
928  * Initialize an RX descriptor and attach an MBUF cluster.
929  * Note: the length fields are only 11 bits wide, which means the
930  * largest size we can specify is 2047. This is important because
931  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
932  * overflow the field and make a mess.
933  */
934 static int
935 vr_newbuf(sc, c, m)
936 	struct vr_softc		*sc;
937 	struct vr_chain_onefrag	*c;
938 	struct mbuf		*m;
939 {
940 	struct mbuf		*m_new = NULL;
941 
942 	if (m == NULL) {
943 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
944 		if (m_new == NULL)
945 			return(ENOBUFS);
946 
947 		MCLGET(m_new, M_DONTWAIT);
948 		if (!(m_new->m_flags & M_EXT)) {
949 			m_freem(m_new);
950 			return(ENOBUFS);
951 		}
952 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
953 	} else {
954 		m_new = m;
955 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
956 		m_new->m_data = m_new->m_ext.ext_buf;
957 	}
958 
959 	m_adj(m_new, sizeof(u_int64_t));
960 
961 	c->vr_mbuf = m_new;
962 	c->vr_ptr->vr_status = VR_RXSTAT;
963 	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
964 	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
965 
966 	return(0);
967 }
968 
969 /*
970  * A frame has been uploaded: pass the resulting mbuf chain up to
971  * the higher level protocols.
972  */
973 static void
974 vr_rxeof(sc)
975 	struct vr_softc		*sc;
976 {
977         struct ether_header	*eh;
978         struct mbuf		*m;
979         struct ifnet		*ifp;
980 	struct vr_chain_onefrag	*cur_rx;
981 	int			total_len = 0;
982 	u_int32_t		rxstat;
983 
984 	ifp = &sc->arpcom.ac_if;
985 
986 	while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
987 							VR_RXSTAT_OWN)) {
988 		struct mbuf		*m0 = NULL;
989 
990 		cur_rx = sc->vr_cdata.vr_rx_head;
991 		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
992 		m = cur_rx->vr_mbuf;
993 
994 		/*
995 		 * If an error occurs, update stats, clear the
996 		 * status word and leave the mbuf cluster in place:
997 		 * it should simply get re-used next time this descriptor
998 	 	 * comes up in the ring.
999 		 */
1000 		if (rxstat & VR_RXSTAT_RXERR) {
1001 			ifp->if_ierrors++;
1002 			printf("vr%d: rx error: ", sc->vr_unit);
1003 			switch(rxstat & 0x000000FF) {
1004 			case VR_RXSTAT_CRCERR:
1005 				printf("crc error\n");
1006 				break;
1007 			case VR_RXSTAT_FRAMEALIGNERR:
1008 				printf("frame alignment error\n");
1009 				break;
1010 			case VR_RXSTAT_FIFOOFLOW:
1011 				printf("FIFO overflow\n");
1012 				break;
1013 			case VR_RXSTAT_GIANT:
1014 				printf("received giant packet\n");
1015 				break;
1016 			case VR_RXSTAT_RUNT:
1017 				printf("received runt packet\n");
1018 				break;
1019 			case VR_RXSTAT_BUSERR:
1020 				printf("system bus error\n");
1021 				break;
1022 			case VR_RXSTAT_BUFFERR:
1023 				printf("rx buffer error\n");
1024 				break;
1025 			default:
1026 				printf("unknown rx error\n");
1027 				break;
1028 			}
1029 			vr_newbuf(sc, cur_rx, m);
1030 			continue;
1031 		}
1032 
1033 		/* No errors; receive the packet. */
1034 		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1035 
1036 		/*
1037 		 * XXX The VIA Rhine chip includes the CRC with every
1038 		 * received frame, and there's no way to turn this
1039 		 * behavior off (at least, I can't find anything in
1040 	 	 * the manual that explains how to do it) so we have
1041 		 * to trim off the CRC manually.
1042 		 */
1043 		total_len -= ETHER_CRC_LEN;
1044 
1045 		m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp,
1046 		    NULL);
1047 		vr_newbuf(sc, cur_rx, m);
1048 		if (m0 == NULL) {
1049 			ifp->if_ierrors++;
1050 			continue;
1051 		}
1052 		m = m0;
1053 
1054 		ifp->if_ipackets++;
1055 		eh = mtod(m, struct ether_header *);
1056 
1057 		/* Remove header from mbuf and pass it on. */
1058 		m_adj(m, sizeof(struct ether_header));
1059 		ether_input(ifp, eh, m);
1060 	}
1061 
1062 	return;
1063 }
1064 
1065 static void
1066 vr_rxeoc(sc)
1067 	struct vr_softc		*sc;
1068 {
1069 
1070 	vr_rxeof(sc);
1071 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1072 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1073 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1074 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1075 
1076 	return;
1077 }
1078 
1079 /*
1080  * A frame was downloaded to the chip. It's safe for us to clean up
1081  * the list buffers.
1082  */
1083 
1084 static void
1085 vr_txeof(sc)
1086 	struct vr_softc		*sc;
1087 {
1088 	struct vr_chain		*cur_tx;
1089 	struct ifnet		*ifp;
1090 
1091 	ifp = &sc->arpcom.ac_if;
1092 
1093 	/* Reset the timeout timer; if_txeoc will clear it. */
1094 	ifp->if_timer = 5;
1095 
1096 	/* Sanity check. */
1097 	if (sc->vr_cdata.vr_tx_head == NULL)
1098 		return;
1099 
1100 	/*
1101 	 * Go through our tx list and free mbufs for those
1102 	 * frames that have been transmitted.
1103 	 */
1104 	while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1105 		u_int32_t		txstat;
1106 
1107 		cur_tx = sc->vr_cdata.vr_tx_head;
1108 		txstat = cur_tx->vr_ptr->vr_status;
1109 
1110 		if ((txstat & VR_TXSTAT_ABRT) ||
1111 		    (txstat & VR_TXSTAT_UDF)) {
1112 			while (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON)
1113 				;	/* Wait for chip to shutdown */
1114 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1115 			CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr));
1116 			break;
1117 		}
1118 
1119 		if (txstat & VR_TXSTAT_OWN)
1120 			break;
1121 
1122 		if (txstat & VR_TXSTAT_ERRSUM) {
1123 			ifp->if_oerrors++;
1124 			if (txstat & VR_TXSTAT_DEFER)
1125 				ifp->if_collisions++;
1126 			if (txstat & VR_TXSTAT_LATECOLL)
1127 				ifp->if_collisions++;
1128 		}
1129 
1130 		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1131 
1132 		ifp->if_opackets++;
1133 		if (cur_tx->vr_mbuf != NULL) {
1134 			m_freem(cur_tx->vr_mbuf);
1135 			cur_tx->vr_mbuf = NULL;
1136 		}
1137 
1138 		if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1139 			sc->vr_cdata.vr_tx_head = NULL;
1140 			sc->vr_cdata.vr_tx_tail = NULL;
1141 			break;
1142 		}
1143 
1144 		sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1145 	}
1146 
1147 	return;
1148 }
1149 
1150 /*
1151  * TX 'end of channel' interrupt handler.
1152  */
1153 static void
1154 vr_txeoc(sc)
1155 	struct vr_softc		*sc;
1156 {
1157 	struct ifnet		*ifp;
1158 
1159 	ifp = &sc->arpcom.ac_if;
1160 
1161 	if (sc->vr_cdata.vr_tx_head == NULL) {
1162 		ifp->if_flags &= ~IFF_OACTIVE;
1163 		sc->vr_cdata.vr_tx_tail = NULL;
1164 		ifp->if_timer = 0;
1165 	}
1166 
1167 	return;
1168 }
1169 
1170 static void
1171 vr_tick(xsc)
1172 	void			*xsc;
1173 {
1174 	struct vr_softc		*sc;
1175 	struct mii_data		*mii;
1176 
1177 	sc = xsc;
1178 	VR_LOCK(sc);
1179 	mii = device_get_softc(sc->vr_miibus);
1180 	mii_tick(mii);
1181 
1182 	sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1183 
1184 	VR_UNLOCK(sc);
1185 
1186 	return;
1187 }
1188 
1189 static void
1190 vr_intr(arg)
1191 	void			*arg;
1192 {
1193 	struct vr_softc		*sc;
1194 	struct ifnet		*ifp;
1195 	u_int16_t		status;
1196 
1197 	sc = arg;
1198 	VR_LOCK(sc);
1199 	ifp = &sc->arpcom.ac_if;
1200 
1201 	/* Supress unwanted interrupts. */
1202 	if (!(ifp->if_flags & IFF_UP)) {
1203 		vr_stop(sc);
1204 		VR_UNLOCK(sc);
1205 		return;
1206 	}
1207 
1208 	/* Disable interrupts. */
1209 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1210 
1211 	for (;;) {
1212 
1213 		status = CSR_READ_2(sc, VR_ISR);
1214 		if (status)
1215 			CSR_WRITE_2(sc, VR_ISR, status);
1216 
1217 		if ((status & VR_INTRS) == 0)
1218 			break;
1219 
1220 		if (status & VR_ISR_RX_OK)
1221 			vr_rxeof(sc);
1222 
1223 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1224 		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1225 		    (status & VR_ISR_RX_DROPPED)) {
1226 			vr_rxeof(sc);
1227 			vr_rxeoc(sc);
1228 		}
1229 
1230 		if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1231 			vr_reset(sc);
1232 			vr_init(sc);
1233 			break;
1234 		}
1235 
1236 		if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1237 		    (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1238 			vr_txeof(sc);
1239 			if ((status & VR_ISR_UDFI) ||
1240 			    (status & VR_ISR_TX_ABRT2) ||
1241 			    (status & VR_ISR_TX_ABRT)) {
1242 				ifp->if_oerrors++;
1243 				if (sc->vr_cdata.vr_tx_head != NULL) {
1244 					VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1245 					VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1246 				}
1247 			} else
1248 				vr_txeoc(sc);
1249 		}
1250 
1251 	}
1252 
1253 	/* Re-enable interrupts. */
1254 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1255 
1256 	if (ifp->if_snd.ifq_head != NULL) {
1257 		vr_start(ifp);
1258 	}
1259 
1260 	VR_UNLOCK(sc);
1261 
1262 	return;
1263 }
1264 
1265 /*
1266  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1267  * pointers to the fragment pointers.
1268  */
1269 static int
1270 vr_encap(sc, c, m_head)
1271 	struct vr_softc		*sc;
1272 	struct vr_chain		*c;
1273 	struct mbuf		*m_head;
1274 {
1275 	int			frag = 0;
1276 	struct vr_desc		*f = NULL;
1277 	int			total_len;
1278 	struct mbuf		*m;
1279 
1280 	m = m_head;
1281 	total_len = 0;
1282 
1283 	/*
1284 	 * The VIA Rhine wants packet buffers to be longword
1285 	 * aligned, but very often our mbufs aren't. Rather than
1286 	 * waste time trying to decide when to copy and when not
1287 	 * to copy, just do it all the time.
1288 	 */
1289 	if (m != NULL) {
1290 		struct mbuf		*m_new = NULL;
1291 
1292 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1293 		if (m_new == NULL) {
1294 			printf("vr%d: no memory for tx list\n", sc->vr_unit);
1295 			return(1);
1296 		}
1297 		if (m_head->m_pkthdr.len > MHLEN) {
1298 			MCLGET(m_new, M_DONTWAIT);
1299 			if (!(m_new->m_flags & M_EXT)) {
1300 				m_freem(m_new);
1301 				printf("vr%d: no memory for tx list\n",
1302 						sc->vr_unit);
1303 				return(1);
1304 			}
1305 		}
1306 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1307 					mtod(m_new, caddr_t));
1308 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1309 		m_freem(m_head);
1310 		m_head = m_new;
1311 		/*
1312 		 * The Rhine chip doesn't auto-pad, so we have to make
1313 		 * sure to pad short frames out to the minimum frame length
1314 		 * ourselves.
1315 		 */
1316 		if (m_head->m_len < VR_MIN_FRAMELEN) {
1317 			m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1318 			m_new->m_len = m_new->m_pkthdr.len;
1319 		}
1320 		f = c->vr_ptr;
1321 		f->vr_data = vtophys(mtod(m_new, caddr_t));
1322 		f->vr_ctl = total_len = m_new->m_len;
1323 		f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1324 		f->vr_status = 0;
1325 		frag = 1;
1326 	}
1327 
1328 	c->vr_mbuf = m_head;
1329 	c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1330 	c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1331 
1332 	return(0);
1333 }
1334 
1335 /*
1336  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1337  * to the mbuf data regions directly in the transmit lists. We also save a
1338  * copy of the pointers since the transmit list fragment pointers are
1339  * physical addresses.
1340  */
1341 
1342 static void
1343 vr_start(ifp)
1344 	struct ifnet		*ifp;
1345 {
1346 	struct vr_softc		*sc;
1347 	struct mbuf		*m_head = NULL;
1348 	struct vr_chain		*cur_tx = NULL, *start_tx;
1349 
1350 	sc = ifp->if_softc;
1351 
1352 	VR_LOCK(sc);
1353 	if (ifp->if_flags & IFF_OACTIVE) {
1354 		VR_UNLOCK(sc);
1355 		return;
1356 	}
1357 
1358 	/*
1359 	 * Check for an available queue slot. If there are none,
1360 	 * punt.
1361 	 */
1362 	if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1363 		ifp->if_flags |= IFF_OACTIVE;
1364 		return;
1365 	}
1366 
1367 	start_tx = sc->vr_cdata.vr_tx_free;
1368 
1369 	while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1370 		IF_DEQUEUE(&ifp->if_snd, m_head);
1371 		if (m_head == NULL)
1372 			break;
1373 
1374 		/* Pick a descriptor off the free list. */
1375 		cur_tx = sc->vr_cdata.vr_tx_free;
1376 		sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1377 
1378 		/* Pack the data into the descriptor. */
1379 		if (vr_encap(sc, cur_tx, m_head)) {
1380 			IF_PREPEND(&ifp->if_snd, m_head);
1381 			ifp->if_flags |= IFF_OACTIVE;
1382 			cur_tx = NULL;
1383 			break;
1384 		}
1385 
1386 		if (cur_tx != start_tx)
1387 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1388 
1389 		/*
1390 		 * If there's a BPF listener, bounce a copy of this frame
1391 		 * to him.
1392 		 */
1393 		if (ifp->if_bpf)
1394 			bpf_mtap(ifp, cur_tx->vr_mbuf);
1395 
1396 		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1397 		VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1398 	}
1399 
1400 	/*
1401 	 * If there are no frames queued, bail.
1402 	 */
1403 	if (cur_tx == NULL) {
1404 		VR_UNLOCK(sc);
1405 		return;
1406 	}
1407 
1408 	sc->vr_cdata.vr_tx_tail = cur_tx;
1409 
1410 	if (sc->vr_cdata.vr_tx_head == NULL)
1411 		sc->vr_cdata.vr_tx_head = start_tx;
1412 
1413 	/*
1414 	 * Set a timeout in case the chip goes out to lunch.
1415 	 */
1416 	ifp->if_timer = 5;
1417 	VR_UNLOCK(sc);
1418 
1419 	return;
1420 }
1421 
1422 static void
1423 vr_init(xsc)
1424 	void			*xsc;
1425 {
1426 	struct vr_softc		*sc = xsc;
1427 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1428 	struct mii_data		*mii;
1429 	int			i;
1430 
1431 	VR_LOCK(sc);
1432 
1433 	mii = device_get_softc(sc->vr_miibus);
1434 
1435 	/*
1436 	 * Cancel pending I/O and free all RX/TX buffers.
1437 	 */
1438 	vr_stop(sc);
1439 	vr_reset(sc);
1440 
1441 	/*
1442 	 * Set our station address.
1443 	 */
1444 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1445 		CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1446 
1447 	/* Set DMA size */
1448 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1449 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1450 
1451 	/*
1452 	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1453 	 * so we must set both.
1454 	 */
1455 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1456 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESHSTORENFWD);
1457 
1458 	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1459 	VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1460 
1461 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1462 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1463 
1464 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1465 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1466 
1467 	/* Init circular RX list. */
1468 	if (vr_list_rx_init(sc) == ENOBUFS) {
1469 		printf("vr%d: initialization failed: no "
1470 			"memory for rx buffers\n", sc->vr_unit);
1471 		vr_stop(sc);
1472 		VR_UNLOCK(sc);
1473 		return;
1474 	}
1475 
1476 	/*
1477 	 * Init tx descriptors.
1478 	 */
1479 	vr_list_tx_init(sc);
1480 
1481 	/* If we want promiscuous mode, set the allframes bit. */
1482 	if (ifp->if_flags & IFF_PROMISC)
1483 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1484 	else
1485 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1486 
1487 	/* Set capture broadcast bit to capture broadcast frames. */
1488 	if (ifp->if_flags & IFF_BROADCAST)
1489 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1490 	else
1491 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1492 
1493 	/*
1494 	 * Program the multicast filter, if necessary.
1495 	 */
1496 	vr_setmulti(sc);
1497 
1498 	/*
1499 	 * Load the address of the RX list.
1500 	 */
1501 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1502 
1503 	/* Enable receiver and transmitter. */
1504 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1505 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1506 				    VR_CMD_RX_GO);
1507 
1508 	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1509 
1510 	/*
1511 	 * Enable interrupts.
1512 	 */
1513 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1514 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1515 
1516 	mii_mediachg(mii);
1517 
1518 	ifp->if_flags |= IFF_RUNNING;
1519 	ifp->if_flags &= ~IFF_OACTIVE;
1520 
1521 	sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1522 
1523 	VR_UNLOCK(sc);
1524 
1525 	return;
1526 }
1527 
1528 /*
1529  * Set media options.
1530  */
1531 static int
1532 vr_ifmedia_upd(ifp)
1533 	struct ifnet		*ifp;
1534 {
1535 	struct vr_softc		*sc;
1536 
1537 	sc = ifp->if_softc;
1538 
1539 	if (ifp->if_flags & IFF_UP)
1540 		vr_init(sc);
1541 
1542 	return(0);
1543 }
1544 
1545 /*
1546  * Report current media status.
1547  */
1548 static void
1549 vr_ifmedia_sts(ifp, ifmr)
1550 	struct ifnet		*ifp;
1551 	struct ifmediareq	*ifmr;
1552 {
1553 	struct vr_softc		*sc;
1554 	struct mii_data		*mii;
1555 
1556 	sc = ifp->if_softc;
1557 	mii = device_get_softc(sc->vr_miibus);
1558 	mii_pollstat(mii);
1559 	ifmr->ifm_active = mii->mii_media_active;
1560 	ifmr->ifm_status = mii->mii_media_status;
1561 
1562 	return;
1563 }
1564 
1565 static int
1566 vr_ioctl(ifp, command, data)
1567 	struct ifnet		*ifp;
1568 	u_long			command;
1569 	caddr_t			data;
1570 {
1571 	struct vr_softc		*sc = ifp->if_softc;
1572 	struct ifreq		*ifr = (struct ifreq *) data;
1573 	struct mii_data		*mii;
1574 	int			error = 0;
1575 
1576 	VR_LOCK(sc);
1577 
1578 	switch(command) {
1579 	case SIOCSIFADDR:
1580 	case SIOCGIFADDR:
1581 	case SIOCSIFMTU:
1582 		error = ether_ioctl(ifp, command, data);
1583 		break;
1584 	case SIOCSIFFLAGS:
1585 		if (ifp->if_flags & IFF_UP) {
1586 			vr_init(sc);
1587 		} else {
1588 			if (ifp->if_flags & IFF_RUNNING)
1589 				vr_stop(sc);
1590 		}
1591 		error = 0;
1592 		break;
1593 	case SIOCADDMULTI:
1594 	case SIOCDELMULTI:
1595 		vr_setmulti(sc);
1596 		error = 0;
1597 		break;
1598 	case SIOCGIFMEDIA:
1599 	case SIOCSIFMEDIA:
1600 		mii = device_get_softc(sc->vr_miibus);
1601 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1602 		break;
1603 	default:
1604 		error = EINVAL;
1605 		break;
1606 	}
1607 
1608 	VR_UNLOCK(sc);
1609 
1610 	return(error);
1611 }
1612 
1613 static void
1614 vr_watchdog(ifp)
1615 	struct ifnet		*ifp;
1616 {
1617 	struct vr_softc		*sc;
1618 
1619 	sc = ifp->if_softc;
1620 
1621 	VR_LOCK(sc);
1622 	ifp->if_oerrors++;
1623 	printf("vr%d: watchdog timeout\n", sc->vr_unit);
1624 
1625 	vr_stop(sc);
1626 	vr_reset(sc);
1627 	vr_init(sc);
1628 
1629 	if (ifp->if_snd.ifq_head != NULL)
1630 		vr_start(ifp);
1631 
1632 	VR_UNLOCK(sc);
1633 
1634 	return;
1635 }
1636 
1637 /*
1638  * Stop the adapter and free any mbufs allocated to the
1639  * RX and TX lists.
1640  */
1641 static void
1642 vr_stop(sc)
1643 	struct vr_softc		*sc;
1644 {
1645 	register int		i;
1646 	struct ifnet		*ifp;
1647 
1648 	VR_LOCK(sc);
1649 
1650 	ifp = &sc->arpcom.ac_if;
1651 	ifp->if_timer = 0;
1652 
1653 	untimeout(vr_tick, sc, sc->vr_stat_ch);
1654 
1655 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1656 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1657 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1658 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1659 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1660 
1661 	/*
1662 	 * Free data in the RX lists.
1663 	 */
1664 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1665 		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1666 			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1667 			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1668 		}
1669 	}
1670 	bzero((char *)&sc->vr_ldata->vr_rx_list,
1671 		sizeof(sc->vr_ldata->vr_rx_list));
1672 
1673 	/*
1674 	 * Free the TX list buffers.
1675 	 */
1676 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1677 		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1678 			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1679 			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1680 		}
1681 	}
1682 
1683 	bzero((char *)&sc->vr_ldata->vr_tx_list,
1684 		sizeof(sc->vr_ldata->vr_tx_list));
1685 
1686 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1687 	VR_UNLOCK(sc);
1688 
1689 	return;
1690 }
1691 
1692 /*
1693  * Stop all chip I/O so that the kernel's probe routines don't
1694  * get confused by errant DMAs when rebooting.
1695  */
1696 static void
1697 vr_shutdown(dev)
1698 	device_t		dev;
1699 {
1700 	struct vr_softc		*sc;
1701 
1702 	sc = device_get_softc(dev);
1703 
1704 	vr_stop(sc);
1705 
1706 	return;
1707 }
1708