xref: /freebsd/sys/dev/vr/if_vr.c (revision daf1cffce2e07931f27c6c6998652e90df6ba87e)
1 /*
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * VIA Rhine fast ethernet PCI NIC driver
37  *
38  * Supports various network adapters based on the VIA Rhine
39  * and Rhine II PCI controllers, including the D-Link DFE530TX.
40  * Datasheets are available at http://www.via.com.tw.
41  *
42  * Written by Bill Paul <wpaul@ctr.columbia.edu>
43  * Electrical Engineering Department
44  * Columbia University, New York City
45  */
46 
47 /*
48  * The VIA Rhine controllers are similar in some respects to the
49  * the DEC tulip chips, except less complicated. The controller
50  * uses an MII bus and an external physical layer interface. The
51  * receiver has a one entry perfect filter and a 64-bit hash table
52  * multicast filter. Transmit and receive descriptors are similar
53  * to the tulip.
54  *
55  * The Rhine has a serious flaw in its transmit DMA mechanism:
56  * transmit buffers must be longword aligned. Unfortunately,
57  * FreeBSD doesn't guarantee that mbufs will be filled in starting
58  * at longword boundaries, so we have to do a buffer copy before
59  * transmission.
60  */
61 
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/sockio.h>
65 #include <sys/mbuf.h>
66 #include <sys/malloc.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
69 
70 #include <net/if.h>
71 #include <net/if_arp.h>
72 #include <net/ethernet.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 
76 #include <net/bpf.h>
77 
78 #include "opt_bdg.h"
79 #ifdef BRIDGE
80 #include <net/bridge.h>
81 #endif /* BRIDGE */
82 
83 #include <vm/vm.h>              /* for vtophys */
84 #include <vm/pmap.h>            /* for vtophys */
85 #include <machine/clock.h>      /* for DELAY */
86 #include <machine/bus_pio.h>
87 #include <machine/bus_memio.h>
88 #include <machine/bus.h>
89 #include <machine/resource.h>
90 #include <sys/bus.h>
91 #include <sys/rman.h>
92 
93 #include <dev/mii/mii.h>
94 #include <dev/mii/miivar.h>
95 
96 #include <pci/pcireg.h>
97 #include <pci/pcivar.h>
98 
99 #define VR_USEIOSPACE
100 
101 #include <pci/if_vrreg.h>
102 
103 /* "controller miibus0" required.  See GENERIC if you get errors here. */
104 #include "miibus_if.h"
105 
106 #ifndef lint
107 static const char rcsid[] =
108   "$FreeBSD$";
109 #endif
110 
111 /*
112  * Various supported device vendors/types and their names.
113  */
114 static struct vr_type vr_devs[] = {
115 	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
116 		"VIA VT3043 Rhine I 10/100BaseTX" },
117 	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
118 		"VIA VT86C100A Rhine II 10/100BaseTX" },
119 	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
120 		"Delta Electronics Rhine II 10/100BaseTX" },
121 	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
122 		"Addtron Technology Rhine II 10/100BaseTX" },
123 	{ 0, 0, NULL }
124 };
125 
126 static int vr_probe		__P((device_t));
127 static int vr_attach		__P((device_t));
128 static int vr_detach		__P((device_t));
129 
130 static int vr_newbuf		__P((struct vr_softc *,
131 					struct vr_chain_onefrag *,
132 					struct mbuf *));
133 static int vr_encap		__P((struct vr_softc *, struct vr_chain *,
134 						struct mbuf * ));
135 
136 static void vr_rxeof		__P((struct vr_softc *));
137 static void vr_rxeoc		__P((struct vr_softc *));
138 static void vr_txeof		__P((struct vr_softc *));
139 static void vr_txeoc		__P((struct vr_softc *));
140 static void vr_tick		__P((void *));
141 static void vr_intr		__P((void *));
142 static void vr_start		__P((struct ifnet *));
143 static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
144 static void vr_init		__P((void *));
145 static void vr_stop		__P((struct vr_softc *));
146 static void vr_watchdog		__P((struct ifnet *));
147 static void vr_shutdown		__P((device_t));
148 static int vr_ifmedia_upd	__P((struct ifnet *));
149 static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
150 
151 static void vr_mii_sync		__P((struct vr_softc *));
152 static void vr_mii_send		__P((struct vr_softc *, u_int32_t, int));
153 static int vr_mii_readreg	__P((struct vr_softc *, struct vr_mii_frame *));
154 static int vr_mii_writereg	__P((struct vr_softc *, struct vr_mii_frame *));
155 static int vr_miibus_readreg	__P((device_t, int, int));
156 static int vr_miibus_writereg	__P((device_t, int, int, int));
157 static void vr_miibus_statchg	__P((device_t));
158 
159 static void vr_setcfg		__P((struct vr_softc *, int));
160 static u_int8_t vr_calchash	__P((u_int8_t *));
161 static void vr_setmulti		__P((struct vr_softc *));
162 static void vr_reset		__P((struct vr_softc *));
163 static int vr_list_rx_init	__P((struct vr_softc *));
164 static int vr_list_tx_init	__P((struct vr_softc *));
165 
166 #ifdef VR_USEIOSPACE
167 #define VR_RES			SYS_RES_IOPORT
168 #define VR_RID			VR_PCI_LOIO
169 #else
170 #define VR_RES			SYS_RES_MEMORY
171 #define VR_RID			VR_PCI_LOMEM
172 #endif
173 
174 static device_method_t vr_methods[] = {
175 	/* Device interface */
176 	DEVMETHOD(device_probe,		vr_probe),
177 	DEVMETHOD(device_attach,	vr_attach),
178 	DEVMETHOD(device_detach, 	vr_detach),
179 	DEVMETHOD(device_shutdown,	vr_shutdown),
180 
181 	/* bus interface */
182 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
183 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
184 
185 	/* MII interface */
186 	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
187 	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
188 	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
189 
190 	{ 0, 0 }
191 };
192 
193 static driver_t vr_driver = {
194 	"vr",
195 	vr_methods,
196 	sizeof(struct vr_softc)
197 };
198 
199 static devclass_t vr_devclass;
200 
201 DRIVER_MODULE(if_vr, pci, vr_driver, vr_devclass, 0, 0);
202 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
203 
204 #define VR_SETBIT(sc, reg, x)				\
205 	CSR_WRITE_1(sc, reg,				\
206 		CSR_READ_1(sc, reg) | x)
207 
208 #define VR_CLRBIT(sc, reg, x)				\
209 	CSR_WRITE_1(sc, reg,				\
210 		CSR_READ_1(sc, reg) & ~x)
211 
212 #define VR_SETBIT16(sc, reg, x)				\
213 	CSR_WRITE_2(sc, reg,				\
214 		CSR_READ_2(sc, reg) | x)
215 
216 #define VR_CLRBIT16(sc, reg, x)				\
217 	CSR_WRITE_2(sc, reg,				\
218 		CSR_READ_2(sc, reg) & ~x)
219 
220 #define VR_SETBIT32(sc, reg, x)				\
221 	CSR_WRITE_4(sc, reg,				\
222 		CSR_READ_4(sc, reg) | x)
223 
224 #define VR_CLRBIT32(sc, reg, x)				\
225 	CSR_WRITE_4(sc, reg,				\
226 		CSR_READ_4(sc, reg) & ~x)
227 
228 #define SIO_SET(x)					\
229 	CSR_WRITE_1(sc, VR_MIICMD,			\
230 		CSR_READ_1(sc, VR_MIICMD) | x)
231 
232 #define SIO_CLR(x)					\
233 	CSR_WRITE_1(sc, VR_MIICMD,			\
234 		CSR_READ_1(sc, VR_MIICMD) & ~x)
235 
236 /*
237  * Sync the PHYs by setting data bit and strobing the clock 32 times.
238  */
239 static void vr_mii_sync(sc)
240 	struct vr_softc		*sc;
241 {
242 	register int		i;
243 
244 	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
245 
246 	for (i = 0; i < 32; i++) {
247 		SIO_SET(VR_MIICMD_CLK);
248 		DELAY(1);
249 		SIO_CLR(VR_MIICMD_CLK);
250 		DELAY(1);
251 	}
252 
253 	return;
254 }
255 
256 /*
257  * Clock a series of bits through the MII.
258  */
259 static void vr_mii_send(sc, bits, cnt)
260 	struct vr_softc		*sc;
261 	u_int32_t		bits;
262 	int			cnt;
263 {
264 	int			i;
265 
266 	SIO_CLR(VR_MIICMD_CLK);
267 
268 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
269                 if (bits & i) {
270 			SIO_SET(VR_MIICMD_DATAIN);
271                 } else {
272 			SIO_CLR(VR_MIICMD_DATAIN);
273                 }
274 		DELAY(1);
275 		SIO_CLR(VR_MIICMD_CLK);
276 		DELAY(1);
277 		SIO_SET(VR_MIICMD_CLK);
278 	}
279 }
280 
281 /*
282  * Read an PHY register through the MII.
283  */
284 static int vr_mii_readreg(sc, frame)
285 	struct vr_softc		*sc;
286 	struct vr_mii_frame	*frame;
287 
288 {
289 	int			i, ack, s;
290 
291 	s = splimp();
292 
293 	/*
294 	 * Set up frame for RX.
295 	 */
296 	frame->mii_stdelim = VR_MII_STARTDELIM;
297 	frame->mii_opcode = VR_MII_READOP;
298 	frame->mii_turnaround = 0;
299 	frame->mii_data = 0;
300 
301 	CSR_WRITE_1(sc, VR_MIICMD, 0);
302 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
303 
304 	/*
305  	 * Turn on data xmit.
306 	 */
307 	SIO_SET(VR_MIICMD_DIR);
308 
309 	vr_mii_sync(sc);
310 
311 	/*
312 	 * Send command/address info.
313 	 */
314 	vr_mii_send(sc, frame->mii_stdelim, 2);
315 	vr_mii_send(sc, frame->mii_opcode, 2);
316 	vr_mii_send(sc, frame->mii_phyaddr, 5);
317 	vr_mii_send(sc, frame->mii_regaddr, 5);
318 
319 	/* Idle bit */
320 	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
321 	DELAY(1);
322 	SIO_SET(VR_MIICMD_CLK);
323 	DELAY(1);
324 
325 	/* Turn off xmit. */
326 	SIO_CLR(VR_MIICMD_DIR);
327 
328 	/* Check for ack */
329 	SIO_CLR(VR_MIICMD_CLK);
330 	DELAY(1);
331 	SIO_SET(VR_MIICMD_CLK);
332 	DELAY(1);
333 	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
334 
335 	/*
336 	 * Now try reading data bits. If the ack failed, we still
337 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
338 	 */
339 	if (ack) {
340 		for(i = 0; i < 16; i++) {
341 			SIO_CLR(VR_MIICMD_CLK);
342 			DELAY(1);
343 			SIO_SET(VR_MIICMD_CLK);
344 			DELAY(1);
345 		}
346 		goto fail;
347 	}
348 
349 	for (i = 0x8000; i; i >>= 1) {
350 		SIO_CLR(VR_MIICMD_CLK);
351 		DELAY(1);
352 		if (!ack) {
353 			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
354 				frame->mii_data |= i;
355 			DELAY(1);
356 		}
357 		SIO_SET(VR_MIICMD_CLK);
358 		DELAY(1);
359 	}
360 
361 fail:
362 
363 	SIO_CLR(VR_MIICMD_CLK);
364 	DELAY(1);
365 	SIO_SET(VR_MIICMD_CLK);
366 	DELAY(1);
367 
368 	splx(s);
369 
370 	if (ack)
371 		return(1);
372 	return(0);
373 }
374 
375 /*
376  * Write to a PHY register through the MII.
377  */
378 static int vr_mii_writereg(sc, frame)
379 	struct vr_softc		*sc;
380 	struct vr_mii_frame	*frame;
381 
382 {
383 	int			s;
384 
385 	s = splimp();
386 
387 	CSR_WRITE_1(sc, VR_MIICMD, 0);
388 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
389 
390 	/*
391 	 * Set up frame for TX.
392 	 */
393 
394 	frame->mii_stdelim = VR_MII_STARTDELIM;
395 	frame->mii_opcode = VR_MII_WRITEOP;
396 	frame->mii_turnaround = VR_MII_TURNAROUND;
397 
398 	/*
399  	 * Turn on data output.
400 	 */
401 	SIO_SET(VR_MIICMD_DIR);
402 
403 	vr_mii_sync(sc);
404 
405 	vr_mii_send(sc, frame->mii_stdelim, 2);
406 	vr_mii_send(sc, frame->mii_opcode, 2);
407 	vr_mii_send(sc, frame->mii_phyaddr, 5);
408 	vr_mii_send(sc, frame->mii_regaddr, 5);
409 	vr_mii_send(sc, frame->mii_turnaround, 2);
410 	vr_mii_send(sc, frame->mii_data, 16);
411 
412 	/* Idle bit. */
413 	SIO_SET(VR_MIICMD_CLK);
414 	DELAY(1);
415 	SIO_CLR(VR_MIICMD_CLK);
416 	DELAY(1);
417 
418 	/*
419 	 * Turn off xmit.
420 	 */
421 	SIO_CLR(VR_MIICMD_DIR);
422 
423 	splx(s);
424 
425 	return(0);
426 }
427 
428 static int vr_miibus_readreg(dev, phy, reg)
429 	device_t		dev;
430 	int			phy, reg;
431 {
432 	struct vr_softc		*sc;
433 	struct vr_mii_frame	frame;
434 
435 	sc = device_get_softc(dev);
436 	bzero((char *)&frame, sizeof(frame));
437 
438 	frame.mii_phyaddr = phy;
439 	frame.mii_regaddr = reg;
440 	vr_mii_readreg(sc, &frame);
441 
442 	return(frame.mii_data);
443 }
444 
445 static int vr_miibus_writereg(dev, phy, reg, data)
446 	device_t		dev;
447 	u_int16_t		phy, reg, data;
448 {
449 	struct vr_softc		*sc;
450 	struct vr_mii_frame	frame;
451 
452 	sc = device_get_softc(dev);
453 	bzero((char *)&frame, sizeof(frame));
454 
455 	frame.mii_phyaddr = phy;
456 	frame.mii_regaddr = reg;
457 	frame.mii_data = data;
458 
459 	vr_mii_writereg(sc, &frame);
460 
461 	return(0);
462 }
463 
464 static void vr_miibus_statchg(dev)
465 	device_t		dev;
466 {
467 	struct vr_softc		*sc;
468 	struct mii_data		*mii;
469 
470 	sc = device_get_softc(dev);
471 	mii = device_get_softc(sc->vr_miibus);
472 	vr_setcfg(sc, mii->mii_media_active);
473 
474 	return;
475 }
476 
477 /*
478  * Calculate CRC of a multicast group address, return the lower 6 bits.
479  */
480 static u_int8_t vr_calchash(addr)
481 	u_int8_t		*addr;
482 {
483 	u_int32_t		crc, carry;
484 	int			i, j;
485 	u_int8_t		c;
486 
487 	/* Compute CRC for the address value. */
488 	crc = 0xFFFFFFFF; /* initial value */
489 
490 	for (i = 0; i < 6; i++) {
491 		c = *(addr + i);
492 		for (j = 0; j < 8; j++) {
493 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
494 			crc <<= 1;
495 			c >>= 1;
496 			if (carry)
497 				crc = (crc ^ 0x04c11db6) | carry;
498 		}
499 	}
500 
501 	/* return the filter bit position */
502 	return((crc >> 26) & 0x0000003F);
503 }
504 
505 /*
506  * Program the 64-bit multicast hash filter.
507  */
508 static void vr_setmulti(sc)
509 	struct vr_softc		*sc;
510 {
511 	struct ifnet		*ifp;
512 	int			h = 0;
513 	u_int32_t		hashes[2] = { 0, 0 };
514 	struct ifmultiaddr	*ifma;
515 	u_int8_t		rxfilt;
516 	int			mcnt = 0;
517 
518 	ifp = &sc->arpcom.ac_if;
519 
520 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
521 
522 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
523 		rxfilt |= VR_RXCFG_RX_MULTI;
524 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
525 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
526 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
527 		return;
528 	}
529 
530 	/* first, zot all the existing hash bits */
531 	CSR_WRITE_4(sc, VR_MAR0, 0);
532 	CSR_WRITE_4(sc, VR_MAR1, 0);
533 
534 	/* now program new ones */
535 	for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
536 				ifma = ifma->ifma_link.le_next) {
537 		if (ifma->ifma_addr->sa_family != AF_LINK)
538 			continue;
539 		h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
540 		if (h < 32)
541 			hashes[0] |= (1 << h);
542 		else
543 			hashes[1] |= (1 << (h - 32));
544 		mcnt++;
545 	}
546 
547 	if (mcnt)
548 		rxfilt |= VR_RXCFG_RX_MULTI;
549 	else
550 		rxfilt &= ~VR_RXCFG_RX_MULTI;
551 
552 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
553 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
554 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
555 
556 	return;
557 }
558 
559 /*
560  * In order to fiddle with the
561  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
562  * first have to put the transmit and/or receive logic in the idle state.
563  */
564 static void vr_setcfg(sc, media)
565 	struct vr_softc		*sc;
566 	int			media;
567 {
568 	int			restart = 0;
569 
570 	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
571 		restart = 1;
572 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
573 	}
574 
575 	if ((media & IFM_GMASK) == IFM_FDX)
576 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
577 	else
578 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
579 
580 	if (restart)
581 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
582 
583 	return;
584 }
585 
586 static void vr_reset(sc)
587 	struct vr_softc		*sc;
588 {
589 	register int		i;
590 
591 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
592 
593 	for (i = 0; i < VR_TIMEOUT; i++) {
594 		DELAY(10);
595 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
596 			break;
597 	}
598 	if (i == VR_TIMEOUT)
599 		printf("vr%d: reset never completed!\n", sc->vr_unit);
600 
601 	/* Wait a little while for the chip to get its brains in order. */
602 	DELAY(1000);
603 
604         return;
605 }
606 
607 /*
608  * Probe for a VIA Rhine chip. Check the PCI vendor and device
609  * IDs against our list and return a device name if we find a match.
610  */
611 static int vr_probe(dev)
612 	device_t		dev;
613 {
614 	struct vr_type		*t;
615 
616 	t = vr_devs;
617 
618 	while(t->vr_name != NULL) {
619 		if ((pci_get_vendor(dev) == t->vr_vid) &&
620 		    (pci_get_device(dev) == t->vr_did)) {
621 			device_set_desc(dev, t->vr_name);
622 			return(0);
623 		}
624 		t++;
625 	}
626 
627 	return(ENXIO);
628 }
629 
630 /*
631  * Attach the interface. Allocate softc structures, do ifmedia
632  * setup and ethernet/BPF attach.
633  */
634 static int vr_attach(dev)
635 	device_t		dev;
636 {
637 	int			i, s;
638 	u_char			eaddr[ETHER_ADDR_LEN];
639 	u_int32_t		command;
640 	struct vr_softc		*sc;
641 	struct ifnet		*ifp;
642 	int			unit, error = 0, rid;
643 
644 	s = splimp();
645 
646 	sc = device_get_softc(dev);
647 	unit = device_get_unit(dev);
648 	bzero(sc, sizeof(struct vr_softc *));
649 
650 	/*
651 	 * Handle power management nonsense.
652 	 */
653 
654 	command = pci_read_config(dev, VR_PCI_CAPID, 4) & 0x000000FF;
655 	if (command == 0x01) {
656 
657 		command = pci_read_config(dev, VR_PCI_PWRMGMTCTRL, 4);
658 		if (command & VR_PSTATE_MASK) {
659 			u_int32_t		iobase, membase, irq;
660 
661 			/* Save important PCI config data. */
662 			iobase = pci_read_config(dev, VR_PCI_LOIO, 4);
663 			membase = pci_read_config(dev, VR_PCI_LOMEM, 4);
664 			irq = pci_read_config(dev, VR_PCI_INTLINE, 4);
665 
666 			/* Reset the power state. */
667 			printf("vr%d: chip is in D%d power mode "
668 			"-- setting to D0\n", unit, command & VR_PSTATE_MASK);
669 			command &= 0xFFFFFFFC;
670 			pci_write_config(dev, VR_PCI_PWRMGMTCTRL, command, 4);
671 
672 			/* Restore PCI config data. */
673 			pci_write_config(dev, VR_PCI_LOIO, iobase, 4);
674 			pci_write_config(dev, VR_PCI_LOMEM, membase, 4);
675 			pci_write_config(dev, VR_PCI_INTLINE, irq, 4);
676 		}
677 	}
678 
679 	/*
680 	 * Map control/status registers.
681 	 */
682 	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
683 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
684 	pci_write_config(dev, PCI_COMMAND_STATUS_REG, command, 4);
685 	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
686 
687 #ifdef VR_USEIOSPACE
688 	if (!(command & PCIM_CMD_PORTEN)) {
689 		printf("vr%d: failed to enable I/O ports!\n", unit);
690 		free(sc, M_DEVBUF);
691 		goto fail;
692 	}
693 #else
694 	if (!(command & PCIM_CMD_MEMEN)) {
695 		printf("vr%d: failed to enable memory mapping!\n", unit);
696 		goto fail;
697 	}
698 #endif
699 
700 	rid = VR_RID;
701 	sc->vr_res = bus_alloc_resource(dev, VR_RES, &rid,
702 	    0, ~0, 1, RF_ACTIVE);
703 
704 	if (sc->vr_res == NULL) {
705 		printf("vr%d: couldn't map ports/memory\n", unit);
706 		error = ENXIO;
707 		goto fail;
708 	}
709 
710 	sc->vr_btag = rman_get_bustag(sc->vr_res);
711 	sc->vr_bhandle = rman_get_bushandle(sc->vr_res);
712 
713 	/* Allocate interrupt */
714 	rid = 0;
715 	sc->vr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
716 	    RF_SHAREABLE | RF_ACTIVE);
717 
718 	if (sc->vr_irq == NULL) {
719 		printf("vr%d: couldn't map interrupt\n", unit);
720 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
721 		error = ENXIO;
722 		goto fail;
723 	}
724 
725 	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET,
726 	    vr_intr, sc, &sc->vr_intrhand);
727 
728 	if (error) {
729 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
730 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
731 		printf("vr%d: couldn't set up irq\n", unit);
732 		goto fail;
733 	}
734 
735 	/* Reset the adapter. */
736 	vr_reset(sc);
737 
738 	/*
739 	 * Get station address. The way the Rhine chips work,
740 	 * you're not allowed to directly access the EEPROM once
741 	 * they've been programmed a special way. Consequently,
742 	 * we need to read the node address from the PAR0 and PAR1
743 	 * registers.
744 	 */
745 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
746 	DELAY(200);
747 	for (i = 0; i < ETHER_ADDR_LEN; i++)
748 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
749 
750 	/*
751 	 * A Rhine chip was detected. Inform the world.
752 	 */
753 	printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":");
754 
755 	sc->vr_unit = unit;
756 	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
757 
758 	sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
759 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
760 
761 	if (sc->vr_ldata == NULL) {
762 		printf("vr%d: no memory for list buffers!\n", unit);
763 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
764 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
765 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
766 		error = ENXIO;
767 		goto fail;
768 	}
769 
770 	bzero(sc->vr_ldata, sizeof(struct vr_list_data));
771 
772 	ifp = &sc->arpcom.ac_if;
773 	ifp->if_softc = sc;
774 	ifp->if_unit = unit;
775 	ifp->if_name = "vr";
776 	ifp->if_mtu = ETHERMTU;
777 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
778 	ifp->if_ioctl = vr_ioctl;
779 	ifp->if_output = ether_output;
780 	ifp->if_start = vr_start;
781 	ifp->if_watchdog = vr_watchdog;
782 	ifp->if_init = vr_init;
783 	ifp->if_baudrate = 10000000;
784 	ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
785 
786 	/*
787 	 * Do MII setup.
788 	 */
789 	if (mii_phy_probe(dev, &sc->vr_miibus,
790 	    vr_ifmedia_upd, vr_ifmedia_sts)) {
791 		printf("vr%d: MII without any phy!\n", sc->vr_unit);
792 		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
793 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
794 		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
795 		contigfree(sc->vr_ldata,
796 		    sizeof(struct vr_list_data), M_DEVBUF);
797 		error = ENXIO;
798 		goto fail;
799 	}
800 
801 	callout_handle_init(&sc->vr_stat_ch);
802 
803 	/*
804 	 * Call MI attach routines.
805 	 */
806 	if_attach(ifp);
807 	ether_ifattach(ifp);
808 
809 	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
810 
811 fail:
812 	splx(s);
813 	return(error);
814 }
815 
816 static int vr_detach(dev)
817 	device_t		dev;
818 {
819 	struct vr_softc		*sc;
820 	struct ifnet		*ifp;
821 	int			s;
822 
823 	s = splimp();
824 
825 	sc = device_get_softc(dev);
826 	ifp = &sc->arpcom.ac_if;
827 
828 	vr_stop(sc);
829 	if_detach(ifp);
830 
831 	bus_generic_detach(dev);
832 	device_delete_child(dev, sc->vr_miibus);
833 
834 	bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
835 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
836 	bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
837 
838 	contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
839 
840 	splx(s);
841 
842 	return(0);
843 }
844 
845 /*
846  * Initialize the transmit descriptors.
847  */
848 static int vr_list_tx_init(sc)
849 	struct vr_softc		*sc;
850 {
851 	struct vr_chain_data	*cd;
852 	struct vr_list_data	*ld;
853 	int			i;
854 
855 	cd = &sc->vr_cdata;
856 	ld = sc->vr_ldata;
857 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
858 		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
859 		if (i == (VR_TX_LIST_CNT - 1))
860 			cd->vr_tx_chain[i].vr_nextdesc =
861 				&cd->vr_tx_chain[0];
862 		else
863 			cd->vr_tx_chain[i].vr_nextdesc =
864 				&cd->vr_tx_chain[i + 1];
865 	}
866 
867 	cd->vr_tx_free = &cd->vr_tx_chain[0];
868 	cd->vr_tx_tail = cd->vr_tx_head = NULL;
869 
870 	return(0);
871 }
872 
873 
874 /*
875  * Initialize the RX descriptors and allocate mbufs for them. Note that
876  * we arrange the descriptors in a closed ring, so that the last descriptor
877  * points back to the first.
878  */
879 static int vr_list_rx_init(sc)
880 	struct vr_softc		*sc;
881 {
882 	struct vr_chain_data	*cd;
883 	struct vr_list_data	*ld;
884 	int			i;
885 
886 	cd = &sc->vr_cdata;
887 	ld = sc->vr_ldata;
888 
889 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
890 		cd->vr_rx_chain[i].vr_ptr =
891 			(struct vr_desc *)&ld->vr_rx_list[i];
892 		if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS)
893 			return(ENOBUFS);
894 		if (i == (VR_RX_LIST_CNT - 1)) {
895 			cd->vr_rx_chain[i].vr_nextdesc =
896 					&cd->vr_rx_chain[0];
897 			ld->vr_rx_list[i].vr_next =
898 					vtophys(&ld->vr_rx_list[0]);
899 		} else {
900 			cd->vr_rx_chain[i].vr_nextdesc =
901 					&cd->vr_rx_chain[i + 1];
902 			ld->vr_rx_list[i].vr_next =
903 					vtophys(&ld->vr_rx_list[i + 1]);
904 		}
905 	}
906 
907 	cd->vr_rx_head = &cd->vr_rx_chain[0];
908 
909 	return(0);
910 }
911 
912 /*
913  * Initialize an RX descriptor and attach an MBUF cluster.
914  * Note: the length fields are only 11 bits wide, which means the
915  * largest size we can specify is 2047. This is important because
916  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
917  * overflow the field and make a mess.
918  */
919 static int vr_newbuf(sc, c, m)
920 	struct vr_softc		*sc;
921 	struct vr_chain_onefrag	*c;
922 	struct mbuf		*m;
923 {
924 	struct mbuf		*m_new = NULL;
925 
926 	if (m == NULL) {
927 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
928 		if (m_new == NULL) {
929 			printf("vr%d: no memory for rx list "
930 			    "-- packet dropped!\n", sc->vr_unit);
931 			return(ENOBUFS);
932 		}
933 
934 		MCLGET(m_new, M_DONTWAIT);
935 		if (!(m_new->m_flags & M_EXT)) {
936 			printf("vr%d: no memory for rx list "
937 			    "-- packet dropped!\n", sc->vr_unit);
938 			m_freem(m_new);
939 			return(ENOBUFS);
940 		}
941 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
942 	} else {
943 		m_new = m;
944 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
945 		m_new->m_data = m_new->m_ext.ext_buf;
946 	}
947 
948 	m_adj(m_new, sizeof(u_int64_t));
949 
950 	c->vr_mbuf = m_new;
951 	c->vr_ptr->vr_status = VR_RXSTAT;
952 	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
953 	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
954 
955 	return(0);
956 }
957 
958 /*
959  * A frame has been uploaded: pass the resulting mbuf chain up to
960  * the higher level protocols.
961  */
962 static void vr_rxeof(sc)
963 	struct vr_softc		*sc;
964 {
965         struct ether_header	*eh;
966         struct mbuf		*m;
967         struct ifnet		*ifp;
968 	struct vr_chain_onefrag	*cur_rx;
969 	int			total_len = 0;
970 	u_int32_t		rxstat;
971 
972 	ifp = &sc->arpcom.ac_if;
973 
974 	while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
975 							VR_RXSTAT_OWN)) {
976 		struct mbuf		*m0 = NULL;
977 
978 		cur_rx = sc->vr_cdata.vr_rx_head;
979 		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
980 		m = cur_rx->vr_mbuf;
981 
982 		/*
983 		 * If an error occurs, update stats, clear the
984 		 * status word and leave the mbuf cluster in place:
985 		 * it should simply get re-used next time this descriptor
986 	 	 * comes up in the ring.
987 		 */
988 		if (rxstat & VR_RXSTAT_RXERR) {
989 			ifp->if_ierrors++;
990 			printf("vr%d: rx error: ", sc->vr_unit);
991 			switch(rxstat & 0x000000FF) {
992 			case VR_RXSTAT_CRCERR:
993 				printf("crc error\n");
994 				break;
995 			case VR_RXSTAT_FRAMEALIGNERR:
996 				printf("frame alignment error\n");
997 				break;
998 			case VR_RXSTAT_FIFOOFLOW:
999 				printf("FIFO overflow\n");
1000 				break;
1001 			case VR_RXSTAT_GIANT:
1002 				printf("received giant packet\n");
1003 				break;
1004 			case VR_RXSTAT_RUNT:
1005 				printf("received runt packet\n");
1006 				break;
1007 			case VR_RXSTAT_BUSERR:
1008 				printf("system bus error\n");
1009 				break;
1010 			case VR_RXSTAT_BUFFERR:
1011 				printf("rx buffer error\n");
1012 				break;
1013 			default:
1014 				printf("unknown rx error\n");
1015 				break;
1016 			}
1017 			vr_newbuf(sc, cur_rx, m);
1018 			continue;
1019 		}
1020 
1021 		/* No errors; receive the packet. */
1022 		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1023 
1024 		/*
1025 		 * XXX The VIA Rhine chip includes the CRC with every
1026 		 * received frame, and there's no way to turn this
1027 		 * behavior off (at least, I can't find anything in
1028 	 	 * the manual that explains how to do it) so we have
1029 		 * to trim off the CRC manually.
1030 		 */
1031 		total_len -= ETHER_CRC_LEN;
1032 
1033 		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1034 		    total_len + ETHER_ALIGN, 0, ifp, NULL);
1035 		vr_newbuf(sc, cur_rx, m);
1036 		if (m0 == NULL) {
1037 			ifp->if_ierrors++;
1038 			continue;
1039 		}
1040 		m_adj(m0, ETHER_ALIGN);
1041 		m = m0;
1042 
1043 		ifp->if_ipackets++;
1044 		eh = mtod(m, struct ether_header *);
1045 
1046 		/*
1047 		 * Handle BPF listeners. Let the BPF user see the packet, but
1048 		 * don't pass it up to the ether_input() layer unless it's
1049 		 * a broadcast packet, multicast packet, matches our ethernet
1050 		 * address or the interface is in promiscuous mode.
1051 		 */
1052 		if (ifp->if_bpf) {
1053 			bpf_mtap(ifp, m);
1054 			if (ifp->if_flags & IFF_PROMISC &&
1055 				(bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1056 						ETHER_ADDR_LEN) &&
1057 					(eh->ether_dhost[0] & 1) == 0)) {
1058 				m_freem(m);
1059 				continue;
1060 			}
1061 		}
1062 
1063 #ifdef BRIDGE
1064 		if (do_bridge) {
1065 			struct ifnet		*bdg_ifp;
1066 			bdg_ifp = bridge_in(m);
1067 			if (bdg_ifp != BDG_LOCAL && bdg_ifp != BDG_DROP)
1068 				bdg_forward(&m, bdg_ifp);
1069 			if (((bdg_ifp != BDG_LOCAL) && (bdg_ifp != BDG_BCAST) &&
1070 			    (bdg_ifp != BDG_MCAST)) || bdg_ifp == BDG_DROP) {
1071 				m_freem(m);
1072 				continue;
1073 			}
1074 		}
1075 #endif /* BRIDGE */
1076 
1077 		/* Remove header from mbuf and pass it on. */
1078 		m_adj(m, sizeof(struct ether_header));
1079 		ether_input(ifp, eh, m);
1080 	}
1081 
1082 	return;
1083 }
1084 
1085 void vr_rxeoc(sc)
1086 	struct vr_softc		*sc;
1087 {
1088 
1089 	vr_rxeof(sc);
1090 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1091 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1092 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1093 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1094 
1095 	return;
1096 }
1097 
1098 /*
1099  * A frame was downloaded to the chip. It's safe for us to clean up
1100  * the list buffers.
1101  */
1102 
1103 static void vr_txeof(sc)
1104 	struct vr_softc		*sc;
1105 {
1106 	struct vr_chain		*cur_tx;
1107 	struct ifnet		*ifp;
1108 
1109 	ifp = &sc->arpcom.ac_if;
1110 
1111 	/* Clear the timeout timer. */
1112 	ifp->if_timer = 0;
1113 
1114 	/* Sanity check. */
1115 	if (sc->vr_cdata.vr_tx_head == NULL)
1116 		return;
1117 
1118 	/*
1119 	 * Go through our tx list and free mbufs for those
1120 	 * frames that have been transmitted.
1121 	 */
1122 	while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1123 		u_int32_t		txstat;
1124 
1125 		cur_tx = sc->vr_cdata.vr_tx_head;
1126 		txstat = cur_tx->vr_ptr->vr_status;
1127 
1128 		if (txstat & VR_TXSTAT_OWN)
1129 			break;
1130 
1131 		if (txstat & VR_TXSTAT_ERRSUM) {
1132 			ifp->if_oerrors++;
1133 			if (txstat & VR_TXSTAT_DEFER)
1134 				ifp->if_collisions++;
1135 			if (txstat & VR_TXSTAT_LATECOLL)
1136 				ifp->if_collisions++;
1137 		}
1138 
1139 		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1140 
1141 		ifp->if_opackets++;
1142 		if (cur_tx->vr_mbuf != NULL) {
1143 			m_freem(cur_tx->vr_mbuf);
1144 			cur_tx->vr_mbuf = NULL;
1145 		}
1146 
1147 		if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1148 			sc->vr_cdata.vr_tx_head = NULL;
1149 			sc->vr_cdata.vr_tx_tail = NULL;
1150 			break;
1151 		}
1152 
1153 		sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1154 	}
1155 
1156 	return;
1157 }
1158 
1159 /*
1160  * TX 'end of channel' interrupt handler.
1161  */
1162 static void vr_txeoc(sc)
1163 	struct vr_softc		*sc;
1164 {
1165 	struct ifnet		*ifp;
1166 
1167 	ifp = &sc->arpcom.ac_if;
1168 
1169 	ifp->if_timer = 0;
1170 
1171 	if (sc->vr_cdata.vr_tx_head == NULL) {
1172 		ifp->if_flags &= ~IFF_OACTIVE;
1173 		sc->vr_cdata.vr_tx_tail = NULL;
1174 	}
1175 
1176 	return;
1177 }
1178 
1179 static void vr_tick(xsc)
1180 	void			*xsc;
1181 {
1182 	struct vr_softc		*sc;
1183 	struct mii_data		*mii;
1184 	int			s;
1185 
1186 	s = splimp();
1187 
1188 	sc = xsc;
1189 	mii = device_get_softc(sc->vr_miibus);
1190 	mii_tick(mii);
1191 
1192 	sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1193 
1194 	splx(s);
1195 
1196 	return;
1197 }
1198 
1199 static void vr_intr(arg)
1200 	void			*arg;
1201 {
1202 	struct vr_softc		*sc;
1203 	struct ifnet		*ifp;
1204 	u_int16_t		status;
1205 
1206 	sc = arg;
1207 	ifp = &sc->arpcom.ac_if;
1208 
1209 	/* Supress unwanted interrupts. */
1210 	if (!(ifp->if_flags & IFF_UP)) {
1211 		vr_stop(sc);
1212 		return;
1213 	}
1214 
1215 	/* Disable interrupts. */
1216 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1217 
1218 	for (;;) {
1219 
1220 		status = CSR_READ_2(sc, VR_ISR);
1221 		if (status)
1222 			CSR_WRITE_2(sc, VR_ISR, status);
1223 
1224 		if ((status & VR_INTRS) == 0)
1225 			break;
1226 
1227 		if (status & VR_ISR_RX_OK)
1228 			vr_rxeof(sc);
1229 
1230 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1231 		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1232 		    (status & VR_ISR_RX_DROPPED)) {
1233 			vr_rxeof(sc);
1234 			vr_rxeoc(sc);
1235 		}
1236 
1237 		if (status & VR_ISR_TX_OK) {
1238 			vr_txeof(sc);
1239 			vr_txeoc(sc);
1240 		}
1241 
1242 		if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){
1243 			ifp->if_oerrors++;
1244 			vr_txeof(sc);
1245 			if (sc->vr_cdata.vr_tx_head != NULL) {
1246 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1247 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1248 			}
1249 		}
1250 
1251 		if (status & VR_ISR_BUSERR) {
1252 			vr_reset(sc);
1253 			vr_init(sc);
1254 		}
1255 	}
1256 
1257 	/* Re-enable interrupts. */
1258 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1259 
1260 	if (ifp->if_snd.ifq_head != NULL) {
1261 		vr_start(ifp);
1262 	}
1263 
1264 	return;
1265 }
1266 
1267 /*
1268  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1269  * pointers to the fragment pointers.
1270  */
1271 static int vr_encap(sc, c, m_head)
1272 	struct vr_softc		*sc;
1273 	struct vr_chain		*c;
1274 	struct mbuf		*m_head;
1275 {
1276 	int			frag = 0;
1277 	struct vr_desc		*f = NULL;
1278 	int			total_len;
1279 	struct mbuf		*m;
1280 
1281 	m = m_head;
1282 	total_len = 0;
1283 
1284 	/*
1285 	 * The VIA Rhine wants packet buffers to be longword
1286 	 * aligned, but very often our mbufs aren't. Rather than
1287 	 * waste time trying to decide when to copy and when not
1288 	 * to copy, just do it all the time.
1289 	 */
1290 	if (m != NULL) {
1291 		struct mbuf		*m_new = NULL;
1292 
1293 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1294 		if (m_new == NULL) {
1295 			printf("vr%d: no memory for tx list", sc->vr_unit);
1296 			return(1);
1297 		}
1298 		if (m_head->m_pkthdr.len > MHLEN) {
1299 			MCLGET(m_new, M_DONTWAIT);
1300 			if (!(m_new->m_flags & M_EXT)) {
1301 				m_freem(m_new);
1302 				printf("vr%d: no memory for tx list",
1303 						sc->vr_unit);
1304 				return(1);
1305 			}
1306 		}
1307 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1308 					mtod(m_new, caddr_t));
1309 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1310 		m_freem(m_head);
1311 		m_head = m_new;
1312 		/*
1313 		 * The Rhine chip doesn't auto-pad, so we have to make
1314 		 * sure to pad short frames out to the minimum frame length
1315 		 * ourselves.
1316 		 */
1317 		if (m_head->m_len < VR_MIN_FRAMELEN) {
1318 			m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1319 			m_new->m_len = m_new->m_pkthdr.len;
1320 		}
1321 		f = c->vr_ptr;
1322 		f->vr_data = vtophys(mtod(m_new, caddr_t));
1323 		f->vr_ctl = total_len = m_new->m_len;
1324 		f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1325 		f->vr_status = 0;
1326 		frag = 1;
1327 	}
1328 
1329 	c->vr_mbuf = m_head;
1330 	c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1331 	c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1332 
1333 	return(0);
1334 }
1335 
1336 /*
1337  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1338  * to the mbuf data regions directly in the transmit lists. We also save a
1339  * copy of the pointers since the transmit list fragment pointers are
1340  * physical addresses.
1341  */
1342 
1343 static void vr_start(ifp)
1344 	struct ifnet		*ifp;
1345 {
1346 	struct vr_softc		*sc;
1347 	struct mbuf		*m_head = NULL;
1348 	struct vr_chain		*cur_tx = NULL, *start_tx;
1349 
1350 	sc = ifp->if_softc;
1351 
1352 	if (ifp->if_flags & IFF_OACTIVE)
1353 		return;
1354 
1355 	/*
1356 	 * Check for an available queue slot. If there are none,
1357 	 * punt.
1358 	 */
1359 	if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1360 		ifp->if_flags |= IFF_OACTIVE;
1361 		return;
1362 	}
1363 
1364 	start_tx = sc->vr_cdata.vr_tx_free;
1365 
1366 	while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1367 		IF_DEQUEUE(&ifp->if_snd, m_head);
1368 		if (m_head == NULL)
1369 			break;
1370 
1371 		/* Pick a descriptor off the free list. */
1372 		cur_tx = sc->vr_cdata.vr_tx_free;
1373 		sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1374 
1375 		/* Pack the data into the descriptor. */
1376 		vr_encap(sc, cur_tx, m_head);
1377 
1378 		if (cur_tx != start_tx)
1379 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1380 
1381 		/*
1382 		 * If there's a BPF listener, bounce a copy of this frame
1383 		 * to him.
1384 		 */
1385 		if (ifp->if_bpf)
1386 			bpf_mtap(ifp, cur_tx->vr_mbuf);
1387 
1388 		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1389 		VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1390 	}
1391 
1392 	/*
1393 	 * If there are no frames queued, bail.
1394 	 */
1395 	if (cur_tx == NULL)
1396 		return;
1397 
1398 	sc->vr_cdata.vr_tx_tail = cur_tx;
1399 
1400 	if (sc->vr_cdata.vr_tx_head == NULL)
1401 		sc->vr_cdata.vr_tx_head = start_tx;
1402 
1403 	/*
1404 	 * Set a timeout in case the chip goes out to lunch.
1405 	 */
1406 	ifp->if_timer = 5;
1407 
1408 	return;
1409 }
1410 
1411 static void vr_init(xsc)
1412 	void			*xsc;
1413 {
1414 	struct vr_softc		*sc = xsc;
1415 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1416 	struct mii_data		*mii;
1417 	int			s;
1418 
1419 	s = splimp();
1420 
1421 	mii = device_get_softc(sc->vr_miibus);
1422 
1423 	/*
1424 	 * Cancel pending I/O and free all RX/TX buffers.
1425 	 */
1426 	vr_stop(sc);
1427 	vr_reset(sc);
1428 
1429 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1430 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1431 
1432 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1433 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1434 
1435 	/* Init circular RX list. */
1436 	if (vr_list_rx_init(sc) == ENOBUFS) {
1437 		printf("vr%d: initialization failed: no "
1438 			"memory for rx buffers\n", sc->vr_unit);
1439 		vr_stop(sc);
1440 		(void)splx(s);
1441 		return;
1442 	}
1443 
1444 	/*
1445 	 * Init tx descriptors.
1446 	 */
1447 	vr_list_tx_init(sc);
1448 
1449 	/* If we want promiscuous mode, set the allframes bit. */
1450 	if (ifp->if_flags & IFF_PROMISC)
1451 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1452 	else
1453 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1454 
1455 	/* Set capture broadcast bit to capture broadcast frames. */
1456 	if (ifp->if_flags & IFF_BROADCAST)
1457 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1458 	else
1459 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1460 
1461 	/*
1462 	 * Program the multicast filter, if necessary.
1463 	 */
1464 	vr_setmulti(sc);
1465 
1466 	/*
1467 	 * Load the address of the RX list.
1468 	 */
1469 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1470 
1471 	/* Enable receiver and transmitter. */
1472 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1473 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1474 				    VR_CMD_RX_GO);
1475 
1476 	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1477 
1478 	/*
1479 	 * Enable interrupts.
1480 	 */
1481 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1482 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1483 
1484 	mii_mediachg(mii);
1485 
1486 	ifp->if_flags |= IFF_RUNNING;
1487 	ifp->if_flags &= ~IFF_OACTIVE;
1488 
1489 	(void)splx(s);
1490 
1491 	sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1492 
1493 	return;
1494 }
1495 
1496 /*
1497  * Set media options.
1498  */
1499 static int vr_ifmedia_upd(ifp)
1500 	struct ifnet		*ifp;
1501 {
1502 	struct vr_softc		*sc;
1503 
1504 	sc = ifp->if_softc;
1505 
1506 	if (ifp->if_flags & IFF_UP)
1507 		vr_init(sc);
1508 
1509 	return(0);
1510 }
1511 
1512 /*
1513  * Report current media status.
1514  */
1515 static void vr_ifmedia_sts(ifp, ifmr)
1516 	struct ifnet		*ifp;
1517 	struct ifmediareq	*ifmr;
1518 {
1519 	struct vr_softc		*sc;
1520 	struct mii_data		*mii;
1521 
1522 	sc = ifp->if_softc;
1523 	mii = device_get_softc(sc->vr_miibus);
1524 	mii_pollstat(mii);
1525 	ifmr->ifm_active = mii->mii_media_active;
1526 	ifmr->ifm_status = mii->mii_media_status;
1527 
1528 	return;
1529 }
1530 
1531 static int vr_ioctl(ifp, command, data)
1532 	struct ifnet		*ifp;
1533 	u_long			command;
1534 	caddr_t			data;
1535 {
1536 	struct vr_softc		*sc = ifp->if_softc;
1537 	struct ifreq		*ifr = (struct ifreq *) data;
1538 	struct mii_data		*mii;
1539 	int			s, error = 0;
1540 
1541 	s = splimp();
1542 
1543 	switch(command) {
1544 	case SIOCSIFADDR:
1545 	case SIOCGIFADDR:
1546 	case SIOCSIFMTU:
1547 		error = ether_ioctl(ifp, command, data);
1548 		break;
1549 	case SIOCSIFFLAGS:
1550 		if (ifp->if_flags & IFF_UP) {
1551 			vr_init(sc);
1552 		} else {
1553 			if (ifp->if_flags & IFF_RUNNING)
1554 				vr_stop(sc);
1555 		}
1556 		error = 0;
1557 		break;
1558 	case SIOCADDMULTI:
1559 	case SIOCDELMULTI:
1560 		vr_setmulti(sc);
1561 		error = 0;
1562 		break;
1563 	case SIOCGIFMEDIA:
1564 	case SIOCSIFMEDIA:
1565 		mii = device_get_softc(sc->vr_miibus);
1566 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1567 		break;
1568 	default:
1569 		error = EINVAL;
1570 		break;
1571 	}
1572 
1573 	(void)splx(s);
1574 
1575 	return(error);
1576 }
1577 
1578 static void vr_watchdog(ifp)
1579 	struct ifnet		*ifp;
1580 {
1581 	struct vr_softc		*sc;
1582 
1583 	sc = ifp->if_softc;
1584 
1585 	ifp->if_oerrors++;
1586 	printf("vr%d: watchdog timeout\n", sc->vr_unit);
1587 
1588 	vr_stop(sc);
1589 	vr_reset(sc);
1590 	vr_init(sc);
1591 
1592 	if (ifp->if_snd.ifq_head != NULL)
1593 		vr_start(ifp);
1594 
1595 	return;
1596 }
1597 
1598 /*
1599  * Stop the adapter and free any mbufs allocated to the
1600  * RX and TX lists.
1601  */
1602 static void vr_stop(sc)
1603 	struct vr_softc		*sc;
1604 {
1605 	register int		i;
1606 	struct ifnet		*ifp;
1607 
1608 	ifp = &sc->arpcom.ac_if;
1609 	ifp->if_timer = 0;
1610 
1611 	untimeout(vr_tick, sc, sc->vr_stat_ch);
1612 
1613 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1614 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1615 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1616 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1617 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1618 
1619 	/*
1620 	 * Free data in the RX lists.
1621 	 */
1622 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1623 		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1624 			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1625 			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1626 		}
1627 	}
1628 	bzero((char *)&sc->vr_ldata->vr_rx_list,
1629 		sizeof(sc->vr_ldata->vr_rx_list));
1630 
1631 	/*
1632 	 * Free the TX list buffers.
1633 	 */
1634 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1635 		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1636 			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1637 			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1638 		}
1639 	}
1640 
1641 	bzero((char *)&sc->vr_ldata->vr_tx_list,
1642 		sizeof(sc->vr_ldata->vr_tx_list));
1643 
1644 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1645 
1646 	return;
1647 }
1648 
1649 /*
1650  * Stop all chip I/O so that the kernel's probe routines don't
1651  * get confused by errant DMAs when rebooting.
1652  */
1653 static void vr_shutdown(dev)
1654 	device_t		dev;
1655 {
1656 	struct vr_softc		*sc;
1657 
1658 	sc = device_get_softc(dev);
1659 
1660 	vr_stop(sc);
1661 
1662 	return;
1663 }
1664