xref: /titanic_44/usr/src/uts/common/io/vr/vr.c (revision 104d3bde5b4ac46904f144d3676110fc57a69603)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/stream.h>
29 #include <sys/strsun.h>
30 #include <sys/stat.h>
31 #include <sys/pci.h>
32 #include <sys/modctl.h>
33 #include <sys/kstat.h>
34 #include <sys/ethernet.h>
35 #include <sys/devops.h>
36 #include <sys/debug.h>
37 #include <sys/conf.h>
38 #include <sys/mac.h>
39 #include <sys/mac_provider.h>
40 #include <sys/mac_ether.h>
41 #include <sys/sysmacros.h>
42 #include <sys/dditypes.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/miiregs.h>
46 #include <sys/byteorder.h>
47 #include <sys/note.h>
48 #include <sys/vlan.h>
49 
50 #include "vr.h"
51 #include "vr_impl.h"
52 
53 /*
54  * VR in a nutshell
55  * The card uses two rings of data structures to communicate with the host.
56  * These are referred to as "descriptor rings" and there is one for transmit
57  * (TX) and one for receive (RX).
58  *
59  * The driver uses a "DMA buffer" data type for mapping to those descriptor
60  * rings. This is a structure with handles and a DMA'able buffer attached to it.
61  *
62  * Receive
63  * The receive ring is filled with DMA buffers. Received packets are copied into
64  * a newly allocated mblk's and passed upstream.
65  *
66  * Transmit
67  * Each transmit descriptor has a DMA buffer attached to it. The data of TX
68  * packets is copied into the DMA buffer which is then enqueued for
69  * transmission.
70  *
71  * Reclaim of transmitted packets is done as a result of a transmit completion
72  * interrupt which is generated 3 times per ring at minimum.
73  */
74 
75 #if defined(DEBUG)
76 uint32_t	vrdebug = 1;
77 #define	VR_DEBUG(args)	do {				\
78 		if (vrdebug > 0)			\
79 			(*vr_debug()) args;		\
80 			_NOTE(CONSTANTCONDITION)	\
81 		} while (0)
82 static	void	vr_prt(const char *fmt, ...);
83 	void	(*vr_debug())(const char *fmt, ...);
84 #else
85 #define	VR_DEBUG(args)	do ; _NOTE(CONSTANTCONDITION) while (0)
86 #endif
87 
88 static char vr_ident[] = "VIA Rhine Ethernet v1.42";
89 
90 /*
91  * Attributes for accessing registers and memory descriptors for this device.
92  */
93 static ddi_device_acc_attr_t vr_dev_dma_accattr = {
94 	DDI_DEVICE_ATTR_V0,
95 	DDI_STRUCTURE_LE_ACC,
96 	DDI_STRICTORDER_ACC
97 };
98 
99 /*
100  * Attributes for accessing data.
101  */
102 static ddi_device_acc_attr_t vr_data_dma_accattr = {
103 	DDI_DEVICE_ATTR_V0,
104 	DDI_NEVERSWAP_ACC,
105 	DDI_STRICTORDER_ACC
106 };
107 
108 /*
109  * DMA attributes for descriptors for communication with the device
110  * This driver assumes that all descriptors of one ring fit in one consequitive
111  * memory area of max 4K (256 descriptors) that does not cross a page boundary.
112  * Therefore, we request 4K alignement.
113  */
114 static ddi_dma_attr_t vr_dev_dma_attr = {
115 	DMA_ATTR_V0,			/* version number */
116 	0,				/* low DMA address range */
117 	0xFFFFFFFF,			/* high DMA address range */
118 	0x7FFFFFFF,			/* DMA counter register */
119 	0x1000,				/* DMA address alignment */
120 	0x7F,				/* DMA burstsizes */
121 	1,				/* min effective DMA size */
122 	0xFFFFFFFF,			/* max DMA xfer size */
123 	0xFFFFFFFF,			/* segment boundary */
124 	1,				/* s/g list length */
125 	1,				/* granularity of device */
126 	0				/* DMA transfer flags */
127 };
128 
129 /*
130  * DMA attributes for the data moved to/from the device
131  * Note that the alignement is set to 2K so hat a 1500 byte packet never
132  * crosses a page boundary and thus that a DMA transfer is not split up in
133  * multiple cookies with a 4K/8K pagesize
134  */
135 static ddi_dma_attr_t vr_data_dma_attr = {
136 	DMA_ATTR_V0,			/* version number */
137 	0,				/* low DMA address range */
138 	0xFFFFFFFF,			/* high DMA address range */
139 	0x7FFFFFFF,			/* DMA counter register */
140 	0x800,				/* DMA address alignment */
141 	0xfff,				/* DMA burstsizes */
142 	1,				/* min effective DMA size */
143 	0xFFFFFFFF,			/* max DMA xfer size */
144 	0xFFFFFFFF,			/* segment boundary */
145 	1,				/* s/g list length */
146 	1,				/* granularity of device */
147 	0				/* DMA transfer flags */
148 };
149 
150 static mac_callbacks_t vr_mac_callbacks = {
151 	MC_SETPROP|MC_GETPROP,	/* Which callbacks are set */
152 	vr_mac_getstat,		/* Get the value of a statistic */
153 	vr_mac_start,		/* Start the device */
154 	vr_mac_stop,		/* Stop the device */
155 	vr_mac_set_promisc,	/* Enable or disable promiscuous mode */
156 	vr_mac_set_multicast,	/* Enable or disable a multicast addr */
157 	vr_mac_set_ether_addr,	/* Set the unicast MAC address */
158 	vr_mac_tx_enqueue_list,	/* Transmit a packet */
159 	NULL,			/* Process an unknown ioctl */
160 	NULL,			/* Get capability information */
161 	NULL,			/* Open the device */
162 	NULL,			/* Close the device */
163 	vr_mac_setprop,		/* Set properties of the device */
164 	vr_mac_getprop		/* Get properties of the device */
165 };
166 
167 /*
168  * Table with bugs and features for each incarnation of the card.
169  */
170 static const chip_info_t vr_chip_info [] = {
171 	{
172 		0x0, 0x0,
173 		"VIA Rhine Fast Ethernet",
174 		(VR_BUG_NO_MEMIO),
175 		(VR_FEATURE_NONE)
176 	},
177 	{
178 		0x04, 0x21,
179 		"VIA VT86C100A Fast Ethernet",
180 		(VR_BUG_NEEDMODE2PCEROPT | VR_BUG_NO_TXQUEUEING |
181 		    VR_BUG_NEEDMODE10T | VR_BUG_TXALIGN | VR_BUG_NO_MEMIO |
182 		    VR_BUG_MIIPOLLSTOP),
183 		(VR_FEATURE_NONE)
184 	},
185 	{
186 		0x40, 0x41,
187 		"VIA VT6102-A Rhine II Fast Ethernet",
188 		(VR_BUG_NEEDMODE2PCEROPT),
189 		(VR_FEATURE_RX_PAUSE_CAP)
190 	},
191 	{
192 		0x42, 0x7f,
193 		"VIA VT6102-C Rhine II Fast Ethernet",
194 		(VR_BUG_NEEDMODE2PCEROPT),
195 		(VR_FEATURE_RX_PAUSE_CAP)
196 	},
197 	{
198 		0x80, 0x82,
199 		"VIA VT6105-A Rhine III Fast Ethernet",
200 		(VR_BUG_NONE),
201 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
202 	},
203 	{
204 		0x83, 0x89,
205 		"VIA VT6105-B Rhine III Fast Ethernet",
206 		(VR_BUG_NONE),
207 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
208 	},
209 	{
210 		0x8a, 0x8b,
211 		"VIA VT6105-LOM Rhine III Fast Ethernet",
212 		(VR_BUG_NONE),
213 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
214 	},
215 	{
216 		0x8c, 0x8c,
217 		"VIA VT6107-A0 Rhine III Fast Ethernet",
218 		(VR_BUG_NONE),
219 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
220 	},
221 	{
222 		0x8d, 0x8f,
223 		"VIA VT6107-A1 Rhine III Fast Ethernet",
224 		(VR_BUG_NONE),
225 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
226 		    VR_FEATURE_MRDLNMULTIPLE)
227 	},
228 	{
229 		0x90, 0x93,
230 		"VIA VT6105M-A0 Rhine III Fast Ethernet Management Adapter",
231 		(VR_BUG_NONE),
232 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
233 		    VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
234 		    VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
235 		    VR_FEATURE_MIBCOUNTER)
236 	},
237 	{
238 		0x94, 0xff,
239 		"VIA VT6105M-B1 Rhine III Fast Ethernet Management Adapter",
240 		(VR_BUG_NONE),
241 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
242 		    VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
243 		    VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
244 		    VR_FEATURE_MIBCOUNTER)
245 	}
246 };
247 
248 /*
249  * Function prototypes
250  */
251 static	vr_result_t	vr_add_intr(vr_t *vrp);
252 static	void		vr_remove_intr(vr_t *vrp);
253 static	int32_t		vr_cam_index(vr_t *vrp, const uint8_t *maddr);
254 static	uint32_t	ether_crc_be(const uint8_t *address);
255 static	void		vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp);
256 static	void		vr_log(vr_t *vrp, int level, const char *fmt, ...);
257 static	int		vr_resume(dev_info_t *devinfo);
258 static	int		vr_suspend(dev_info_t *devinfo);
259 static	vr_result_t	vr_bus_config(vr_t *vrp);
260 static	void		vr_bus_unconfig(vr_t *vrp);
261 static	void		vr_reset(vr_t *vrp);
262 static	int		vr_start(vr_t *vrp);
263 static	int		vr_stop(vr_t *vrp);
264 static	vr_result_t	vr_rings_init(vr_t *vrp);
265 static	void		vr_rings_fini(vr_t *vrp);
266 static	vr_result_t	vr_alloc_ring(vr_t *vrp, vr_ring_t *r, size_t n);
267 static	void		vr_free_ring(vr_ring_t *r, size_t n);
268 static	vr_result_t	vr_rxring_init(vr_t *vrp);
269 static	void		vr_rxring_fini(vr_t *vrp);
270 static	vr_result_t	vr_txring_init(vr_t *vrp);
271 static	void		vr_txring_fini(vr_t *vrp);
272 static	vr_result_t	vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap,
273 			    uint_t flags);
274 static	void		vr_free_dmabuf(vr_data_dma_t *dmap);
275 static	void		vr_param_init(vr_t *vrp);
276 static	mblk_t		*vr_receive(vr_t *vrp);
277 static	void		vr_tx_reclaim(vr_t *vrp);
278 static	void		vr_periodic(void *p);
279 static	void		vr_error(vr_t *vrp);
280 static	void		vr_phy_read(vr_t *vrp, int offset, uint16_t *value);
281 static	void		vr_phy_write(vr_t *vrp, int offset, uint16_t value);
282 static	void		vr_phy_autopoll_disable(vr_t *vrp);
283 static	void		vr_phy_autopoll_enable(vr_t *vrp);
284 static	void		vr_link_init(vr_t *vrp);
285 static	void		vr_link_state(vr_t *vrp);
286 static	void		vr_kstats_init(vr_t *vrp);
287 static	int		vr_update_kstats(kstat_t *ksp, int access);
288 static	void		vr_remove_kstats(vr_t *vrp);
289 
290 static int
291 vr_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
292 {
293 	vr_t		*vrp;
294 	mac_register_t	*macreg;
295 
296 	if (cmd == DDI_RESUME)
297 		return (vr_resume(devinfo));
298 	else if (cmd != DDI_ATTACH)
299 		return (DDI_FAILURE);
300 
301 	/*
302 	 * Attach.
303 	 */
304 	vrp = kmem_zalloc(sizeof (vr_t), KM_SLEEP);
305 	ddi_set_driver_private(devinfo, vrp);
306 	vrp->devinfo = devinfo;
307 
308 	/*
309 	 * Store the name+instance of the module.
310 	 */
311 	(void) snprintf(vrp->ifname, sizeof (vrp->ifname), "%s%d",
312 	    MODULENAME, ddi_get_instance(devinfo));
313 
314 	/*
315 	 * Bus initialization.
316 	 */
317 	if (vr_bus_config(vrp) != VR_SUCCESS) {
318 		vr_log(vrp, CE_WARN, "vr_bus_config failed");
319 		goto fail0;
320 	}
321 
322 	/*
323 	 * Initialize default parameters.
324 	 */
325 	vr_param_init(vrp);
326 
327 	/*
328 	 * Setup the descriptor rings.
329 	 */
330 	if (vr_rings_init(vrp) != VR_SUCCESS) {
331 		vr_log(vrp, CE_WARN, "vr_rings_init failed");
332 		goto fail1;
333 	}
334 
335 	/*
336 	 * Initialize kstats.
337 	 */
338 	vr_kstats_init(vrp);
339 
340 	/*
341 	 * Add interrupt to the OS.
342 	 */
343 	if (vr_add_intr(vrp) != VR_SUCCESS) {
344 		vr_log(vrp, CE_WARN, "vr_add_intr failed in attach");
345 		goto fail3;
346 	}
347 
348 	/*
349 	 * Add mutexes.
350 	 */
351 	mutex_init(&vrp->intrlock, NULL, MUTEX_DRIVER,
352 	    DDI_INTR_PRI(vrp->intr_pri));
353 	mutex_init(&vrp->oplock, NULL, MUTEX_DRIVER, NULL);
354 	mutex_init(&vrp->tx.lock, NULL, MUTEX_DRIVER, NULL);
355 
356 	/*
357 	 * Enable interrupt.
358 	 */
359 	if (ddi_intr_enable(vrp->intr_hdl) != DDI_SUCCESS) {
360 		vr_log(vrp, CE_NOTE, "ddi_intr_enable failed");
361 		goto fail5;
362 	}
363 
364 	/*
365 	 * Register with parent, mac.
366 	 */
367 	if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
368 		vr_log(vrp, CE_WARN, "mac_alloc failed in attach");
369 		goto fail6;
370 	}
371 
372 	macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
373 	macreg->m_driver = vrp;
374 	macreg->m_dip = devinfo;
375 	macreg->m_src_addr = vrp->vendor_ether_addr;
376 	macreg->m_callbacks = &vr_mac_callbacks;
377 	macreg->m_min_sdu = 0;
378 	macreg->m_max_sdu = ETHERMTU;
379 	macreg->m_margin = VLAN_TAGSZ;
380 
381 	if (mac_register(macreg, &vrp->machdl) != 0) {
382 		vr_log(vrp, CE_WARN, "mac_register failed in attach");
383 		goto fail7;
384 	}
385 	mac_free(macreg);
386 	return (DDI_SUCCESS);
387 
388 fail7:
389 	mac_free(macreg);
390 fail6:
391 	(void) ddi_intr_disable(vrp->intr_hdl);
392 fail5:
393 	mutex_destroy(&vrp->tx.lock);
394 	mutex_destroy(&vrp->oplock);
395 	mutex_destroy(&vrp->intrlock);
396 	vr_remove_intr(vrp);
397 fail3:
398 	vr_remove_kstats(vrp);
399 fail2:
400 	vr_rings_fini(vrp);
401 fail1:
402 	vr_bus_unconfig(vrp);
403 fail0:
404 	kmem_free(vrp, sizeof (vr_t));
405 	return (DDI_FAILURE);
406 }
407 
408 static int
409 vr_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
410 {
411 	vr_t		*vrp;
412 
413 	vrp = ddi_get_driver_private(devinfo);
414 
415 	if (cmd == DDI_SUSPEND)
416 		return (vr_suspend(devinfo));
417 	else if (cmd != DDI_DETACH)
418 		return (DDI_FAILURE);
419 
420 	if (vrp->chip.state == CHIPSTATE_RUNNING)
421 		return (DDI_FAILURE);
422 
423 	/*
424 	 * Try to un-register from the MAC layer.
425 	 */
426 	if (mac_unregister(vrp->machdl) != 0)
427 		return (DDI_FAILURE);
428 
429 	(void) ddi_intr_disable(vrp->intr_hdl);
430 	vr_remove_intr(vrp);
431 	mutex_destroy(&vrp->tx.lock);
432 	mutex_destroy(&vrp->oplock);
433 	mutex_destroy(&vrp->intrlock);
434 	vr_remove_kstats(vrp);
435 	vr_rings_fini(vrp);
436 	vr_bus_unconfig(vrp);
437 	kmem_free(vrp, sizeof (vr_t));
438 	return (DDI_SUCCESS);
439 }
440 
441 /*
442  * quiesce the card for fast reboot.
443  */
444 int
445 vr_quiesce(dev_info_t *dev_info)
446 {
447 	vr_t	*vrp;
448 
449 	vrp = (vr_t *)ddi_get_driver_private(dev_info);
450 
451 	/*
452 	 * Stop interrupts.
453 	 */
454 	VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
455 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
456 
457 	/*
458 	 * Stop DMA.
459 	 */
460 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
461 	return (DDI_SUCCESS);
462 }
463 
464 /*
465  * Add an interrupt for our device to the OS.
466  */
467 static vr_result_t
468 vr_add_intr(vr_t *vrp)
469 {
470 	int	nintrs;
471 	int	rc;
472 
473 	rc = ddi_intr_alloc(vrp->devinfo, &vrp->intr_hdl,
474 	    DDI_INTR_TYPE_FIXED,	/* type */
475 	    0,			/* number */
476 	    1,			/* count */
477 	    &nintrs,		/* actualp */
478 	    DDI_INTR_ALLOC_STRICT);
479 
480 	if (rc != DDI_SUCCESS) {
481 		vr_log(vrp, CE_NOTE, "ddi_intr_alloc failed: %d", rc);
482 		return (VR_FAILURE);
483 	}
484 
485 	rc = ddi_intr_add_handler(vrp->intr_hdl, vr_intr, vrp, NULL);
486 	if (rc != DDI_SUCCESS) {
487 		vr_log(vrp, CE_NOTE, "ddi_intr_add_handler failed");
488 		if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
489 			vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
490 		return (VR_FAILURE);
491 	}
492 
493 	rc = ddi_intr_get_pri(vrp->intr_hdl, &vrp->intr_pri);
494 	if (rc != DDI_SUCCESS) {
495 		vr_log(vrp, CE_NOTE, "ddi_intr_get_pri failed");
496 		if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
497 			vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
498 
499 		if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
500 			vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
501 
502 		return (VR_FAILURE);
503 	}
504 	return (VR_SUCCESS);
505 }
506 
507 /*
508  * Remove our interrupt from the OS.
509  */
510 static void
511 vr_remove_intr(vr_t *vrp)
512 {
513 	if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
514 		vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
515 
516 	if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
517 		vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
518 }
519 
520 /*
521  * Resume operation after suspend.
522  */
523 static int
524 vr_resume(dev_info_t *devinfo)
525 {
526 	vr_t *vrp;
527 
528 	vrp = (vr_t *)ddi_get_driver_private(devinfo);
529 	mutex_enter(&vrp->oplock);
530 	if (vrp->chip.state == CHIPSTATE_SUSPENDED_RUNNING)
531 		vr_start(vrp);
532 	mutex_exit(&vrp->oplock);
533 	return (DDI_SUCCESS);
534 }
535 
536 /*
537  * Suspend operation.
538  */
539 static int
540 vr_suspend(dev_info_t *devinfo)
541 {
542 	vr_t *vrp;
543 
544 	vrp = (vr_t *)ddi_get_driver_private(devinfo);
545 	mutex_enter(&vrp->oplock);
546 	if (vrp->chip.state == CHIPSTATE_RUNNING) {
547 		(void) vr_stop(vrp);
548 		vrp->chip.state = CHIPSTATE_SUSPENDED_RUNNING;
549 	}
550 	mutex_exit(&vrp->oplock);
551 	return (DDI_SUCCESS);
552 }
553 
554 /*
555  * Initial bus- and device configuration during attach(9E).
556  */
557 static vr_result_t
558 vr_bus_config(vr_t *vrp)
559 {
560 	uint32_t		addr;
561 	int			n, nsets, rc;
562 	uint_t			elem;
563 	pci_regspec_t		*regs;
564 
565 	/*
566 	 * Get the reg property which describes the various access methods.
567 	 */
568 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, vrp->devinfo,
569 	    0, "reg", (int **)&regs, &elem) != DDI_PROP_SUCCESS) {
570 		vr_log(vrp, CE_WARN, "Can't get reg property");
571 		return (VR_FAILURE);
572 	}
573 	nsets = (elem * sizeof (uint_t)) / sizeof (pci_regspec_t);
574 
575 	/*
576 	 * Setup access to all available sets.
577 	 */
578 	vrp->nsets = nsets;
579 	vrp->regset = kmem_zalloc(nsets * sizeof (vr_acc_t), KM_SLEEP);
580 	for (n = 0; n < nsets; n++) {
581 		rc = ddi_regs_map_setup(vrp->devinfo, n,
582 		    &vrp->regset[n].addr, 0, 0,
583 		    &vr_dev_dma_accattr,
584 		    &vrp->regset[n].hdl);
585 		if (rc != DDI_SUCCESS) {
586 			vr_log(vrp, CE_NOTE,
587 			    "Setup of register set %d failed", n);
588 			while (--n >= 0)
589 				ddi_regs_map_free(&vrp->regset[n].hdl);
590 			kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
591 			ddi_prop_free(regs);
592 			return (VR_FAILURE);
593 		}
594 		bcopy(&regs[n], &vrp->regset[n].reg, sizeof (pci_regspec_t));
595 	}
596 	ddi_prop_free(regs);
597 
598 	/*
599 	 * Assign type-named pointers to the register sets.
600 	 */
601 	for (n = 0; n < nsets; n++) {
602 		addr = vrp->regset[n].reg.pci_phys_hi & PCI_REG_ADDR_M;
603 		if (addr == PCI_ADDR_CONFIG && vrp->acc_cfg == NULL)
604 			vrp->acc_cfg = &vrp->regset[n];
605 		else if (addr == PCI_ADDR_IO && vrp->acc_io == NULL)
606 			vrp->acc_io = &vrp->regset[n];
607 		else if (addr == PCI_ADDR_MEM32 && vrp->acc_mem == NULL)
608 			vrp->acc_mem = &vrp->regset[n];
609 	}
610 
611 	/*
612 	 * Assure there is one of each type.
613 	 */
614 	if (vrp->acc_cfg == NULL ||
615 	    vrp->acc_io == NULL ||
616 	    vrp->acc_mem == NULL) {
617 		for (n = 0; n < nsets; n++)
618 			ddi_regs_map_free(&vrp->regset[n].hdl);
619 		kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
620 		vr_log(vrp, CE_WARN,
621 		    "Config-, I/O- and memory sets not available");
622 		return (VR_FAILURE);
623 	}
624 
625 	/*
626 	 * Store vendor/device/revision.
627 	 */
628 	vrp->chip.vendor = VR_GET16(vrp->acc_cfg, PCI_CONF_VENID);
629 	vrp->chip.device = VR_GET16(vrp->acc_cfg, PCI_CONF_DEVID);
630 	vrp->chip.revision = VR_GET16(vrp->acc_cfg, PCI_CONF_REVID);
631 
632 	/*
633 	 * Copy the matching chip_info_t structure.
634 	 */
635 	elem = sizeof (vr_chip_info) / sizeof (chip_info_t);
636 	for (n = 0; n < elem; n++) {
637 		if (vrp->chip.revision >= vr_chip_info[n].revmin &&
638 		    vrp->chip.revision <= vr_chip_info[n].revmax) {
639 			bcopy((void*)&vr_chip_info[n],
640 			    (void*)&vrp->chip.info,
641 			    sizeof (chip_info_t));
642 			break;
643 		}
644 	}
645 
646 	/*
647 	 * If we didn't find a chip_info_t for this card, copy the first
648 	 * entry of the info structures. This is a generic Rhine whith no
649 	 * bugs and no features.
650 	 */
651 	if (vrp->chip.info.name == NULL) {
652 		bcopy((void*)&vr_chip_info[0],
653 		    (void*) &vrp->chip.info,
654 		    sizeof (chip_info_t));
655 	}
656 
657 	/*
658 	 * Tell what is found.
659 	 */
660 	vr_log(vrp, CE_NOTE, "pci%d,%d,%d: %s, revision 0x%0x",
661 	    PCI_REG_BUS_G(vrp->acc_cfg->reg.pci_phys_hi),
662 	    PCI_REG_DEV_G(vrp->acc_cfg->reg.pci_phys_hi),
663 	    PCI_REG_FUNC_G(vrp->acc_cfg->reg.pci_phys_hi),
664 	    vrp->chip.info.name,
665 	    vrp->chip.revision);
666 
667 	/*
668 	 * Assure that the device is prepared for memory space accesses
669 	 * This should be the default as the device advertises memory
670 	 * access in it's BAR's. However, my VT6102 on a EPIA CL board doesn't
671 	 * and thus we explicetely enable it.
672 	 */
673 	VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
674 
675 	/*
676 	 * Setup a handle for regular usage, prefer memory space accesses.
677 	 */
678 	if (vrp->acc_mem != NULL &&
679 	    (vrp->chip.info.bugs & VR_BUG_NO_MEMIO) == 0)
680 		vrp->acc_reg = vrp->acc_mem;
681 	else
682 		vrp->acc_reg = vrp->acc_io;
683 
684 	/*
685 	 * Store the vendor's MAC address.
686 	 */
687 	for (n = 0; n < ETHERADDRL; n++) {
688 		vrp->vendor_ether_addr[n] = VR_GET8(vrp->acc_reg,
689 		    VR_ETHERADDR + n);
690 	}
691 	return (VR_SUCCESS);
692 }
693 
694 static void
695 vr_bus_unconfig(vr_t *vrp)
696 {
697 	uint_t	n;
698 
699 	/*
700 	 * Free the register access handles.
701 	 */
702 	for (n = 0; n < vrp->nsets; n++)
703 		ddi_regs_map_free(&vrp->regset[n].hdl);
704 	kmem_free(vrp->regset, vrp->nsets * sizeof (vr_acc_t));
705 }
706 
707 /*
708  * Initialize parameter structures.
709  */
710 static void
711 vr_param_init(vr_t *vrp)
712 {
713 	/*
714 	 * Initialize default link configuration parameters.
715 	 */
716 	vrp->param.an_en = VR_LINK_AUTONEG_ON;
717 	vrp->param.anadv_en = 1; /* Select 802.3 autonegotiation */
718 	vrp->param.anadv_en |= MII_ABILITY_100BASE_T4;
719 	vrp->param.anadv_en |= MII_ABILITY_100BASE_TX_FD;
720 	vrp->param.anadv_en |= MII_ABILITY_100BASE_TX;
721 	vrp->param.anadv_en |= MII_ABILITY_10BASE_T_FD;
722 	vrp->param.anadv_en |= MII_ABILITY_10BASE_T;
723 	/* Not a PHY ability, but advertised on behalf of MAC */
724 	vrp->param.anadv_en |= MII_ABILITY_PAUSE;
725 	vrp->param.mtu = ETHERMTU;
726 
727 	/*
728 	 * Store the PHY identity.
729 	 */
730 	vr_phy_read(vrp, MII_PHYIDH, &vrp->chip.mii.identh);
731 	vr_phy_read(vrp, MII_PHYIDL, &vrp->chip.mii.identl);
732 
733 	/*
734 	 * Clear incapabilities imposed by PHY in phymask.
735 	 */
736 	vrp->param.an_phymask = vrp->param.anadv_en;
737 	vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
738 	if ((vrp->chip.mii.status & MII_STATUS_10) == 0)
739 		vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T;
740 
741 	if ((vrp->chip.mii.status & MII_STATUS_10_FD) == 0)
742 		vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T_FD;
743 
744 	if ((vrp->chip.mii.status & MII_STATUS_100_BASEX) == 0)
745 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX;
746 
747 	if ((vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) == 0)
748 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX_FD;
749 
750 	if ((vrp->chip.mii.status & MII_STATUS_100_BASE_T4) == 0)
751 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_T4;
752 
753 	/*
754 	 * Clear incapabilities imposed by MAC in macmask
755 	 * Note that flowcontrol (FCS?) is never masked. All of our adapters
756 	 * have the ability to honor incoming pause frames. Only the newer can
757 	 * transmit pause frames. Since there's no asym flowcontrol in 100Mbit
758 	 * Ethernet, we always advertise (symmetric) pause.
759 	 */
760 	vrp->param.an_macmask = vrp->param.anadv_en;
761 
762 	/*
763 	 * Advertised capabilities is enabled minus incapable.
764 	 */
765 	vrp->chip.mii.anadv = vrp->param.anadv_en &
766 	    (vrp->param.an_phymask & vrp->param.an_macmask);
767 
768 	/*
769 	 * Ensure that autoneg of the PHY matches our default.
770 	 */
771 	if (vrp->param.an_en == VR_LINK_AUTONEG_ON)
772 		vrp->chip.mii.control = MII_CONTROL_ANE;
773 	else
774 		vrp->chip.mii.control =
775 		    (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
776 }
777 
778 /*
779  * Setup the descriptor rings.
780  */
781 static vr_result_t
782 vr_rings_init(vr_t *vrp)
783 {
784 
785 	vrp->rx.ndesc = VR_RX_N_DESC;
786 	vrp->tx.ndesc = VR_TX_N_DESC;
787 
788 	/*
789 	 * Create a ring for receive.
790 	 */
791 	if (vr_alloc_ring(vrp, &vrp->rxring, vrp->rx.ndesc) != VR_SUCCESS)
792 		return (VR_FAILURE);
793 
794 	/*
795 	 * Create a ring for transmit.
796 	 */
797 	if (vr_alloc_ring(vrp, &vrp->txring, vrp->tx.ndesc) != VR_SUCCESS) {
798 		vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
799 		return (VR_FAILURE);
800 	}
801 
802 	vrp->rx.ring = vrp->rxring.desc;
803 	vrp->tx.ring = vrp->txring.desc;
804 	return (VR_SUCCESS);
805 }
806 
807 static void
808 vr_rings_fini(vr_t *vrp)
809 {
810 	vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
811 	vr_free_ring(&vrp->txring, vrp->tx.ndesc);
812 }
813 
814 /*
815  * Allocate a descriptor ring
816  * The number of descriptor entries must fit in a single page so that the
817  * whole ring fits in one consequtive space.
818  *  i386:  4K page / 16 byte descriptor = 256 entries
819  *  sparc: 8K page / 16 byte descriptor = 512 entries
820  */
821 static vr_result_t
822 vr_alloc_ring(vr_t *vrp, vr_ring_t *ring, size_t n)
823 {
824 	ddi_dma_cookie_t	desc_dma_cookie;
825 	uint_t			desc_cookiecnt;
826 	int			i, rc;
827 	size_t			rbytes;
828 
829 	/*
830 	 * Allocate a DMA handle for the chip descriptors.
831 	 */
832 	rc = ddi_dma_alloc_handle(vrp->devinfo,
833 	    &vr_dev_dma_attr,
834 	    DDI_DMA_SLEEP,
835 	    NULL,
836 	    &ring->handle);
837 
838 	if (rc != DDI_SUCCESS) {
839 		vr_log(vrp, CE_WARN,
840 		    "ddi_dma_alloc_handle in vr_alloc_ring failed.");
841 		return (VR_FAILURE);
842 	}
843 
844 	/*
845 	 * Allocate memory for the chip descriptors.
846 	 */
847 	rc = ddi_dma_mem_alloc(ring->handle,
848 	    n * sizeof (vr_chip_desc_t),
849 	    &vr_dev_dma_accattr,
850 	    DDI_DMA_CONSISTENT,
851 	    DDI_DMA_SLEEP,
852 	    NULL,
853 	    (caddr_t *)&ring->cdesc,
854 	    &rbytes,
855 	    &ring->acchdl);
856 
857 	if (rc != DDI_SUCCESS) {
858 		vr_log(vrp, CE_WARN,
859 		    "ddi_dma_mem_alloc in vr_alloc_ring failed.");
860 		ddi_dma_free_handle(&ring->handle);
861 		return (VR_FAILURE);
862 	}
863 
864 	/*
865 	 * Map the descriptor memory.
866 	 */
867 	rc = ddi_dma_addr_bind_handle(ring->handle,
868 	    NULL,
869 	    (caddr_t)ring->cdesc,
870 	    rbytes,
871 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
872 	    DDI_DMA_SLEEP,
873 	    NULL,
874 	    &desc_dma_cookie,
875 	    &desc_cookiecnt);
876 
877 	if (rc != DDI_DMA_MAPPED || desc_cookiecnt > 1) {
878 		vr_log(vrp, CE_WARN,
879 		    "ddi_dma_addr_bind_handle in vr_alloc_ring failed: "
880 		    "rc = %d, cookiecnt = %d", rc, desc_cookiecnt);
881 		ddi_dma_mem_free(&ring->acchdl);
882 		ddi_dma_free_handle(&ring->handle);
883 		return (VR_FAILURE);
884 	}
885 	ring->cdesc_paddr = desc_dma_cookie.dmac_address;
886 
887 	/*
888 	 * Allocate memory for the host descriptor ring.
889 	 */
890 	ring->desc =
891 	    (vr_desc_t *)kmem_zalloc(n * sizeof (vr_desc_t), KM_SLEEP);
892 
893 	/*
894 	 * Interlink the descriptors and connect host- to chip descriptors.
895 	 */
896 	for (i = 0; i < n; i++) {
897 		/*
898 		 * Connect the host descriptor to a chip descriptor.
899 		 */
900 		ring->desc[i].cdesc = &ring->cdesc[i];
901 
902 		/*
903 		 * Store the DMA address and offset in the descriptor
904 		 * Offset is for ddi_dma_sync() and paddr is for ddi_get/-put().
905 		 */
906 		ring->desc[i].offset = i * sizeof (vr_chip_desc_t);
907 		ring->desc[i].paddr = ring->cdesc_paddr + ring->desc[i].offset;
908 
909 		/*
910 		 * Link the previous descriptor to this one.
911 		 */
912 		if (i > 0) {
913 			/* Host */
914 			ring->desc[i-1].next = &ring->desc[i];
915 
916 			/* Chip */
917 			ddi_put32(ring->acchdl,
918 			    &ring->cdesc[i-1].next,
919 			    ring->desc[i].paddr);
920 		}
921 	}
922 
923 	/*
924 	 * Make rings out of this list by pointing last to first.
925 	 */
926 	i = n - 1;
927 	ring->desc[i].next = &ring->desc[0];
928 	ddi_put32(ring->acchdl, &ring->cdesc[i].next, ring->desc[0].paddr);
929 	return (VR_SUCCESS);
930 }
931 
932 /*
933  * Free the memory allocated for a ring.
934  */
935 static void
936 vr_free_ring(vr_ring_t *r, size_t n)
937 {
938 	/*
939 	 * Unmap and free the chip descriptors.
940 	 */
941 	(void) ddi_dma_unbind_handle(r->handle);
942 	ddi_dma_mem_free(&r->acchdl);
943 	ddi_dma_free_handle(&r->handle);
944 
945 	/*
946 	 * Free the memory for storing host descriptors
947 	 */
948 	kmem_free(r->desc, n * sizeof (vr_desc_t));
949 }
950 
951 /*
952  * Initialize the receive ring.
953  */
954 static vr_result_t
955 vr_rxring_init(vr_t *vrp)
956 {
957 	int		i, rc;
958 	vr_desc_t	*rp;
959 
960 	/*
961 	 * Set the read pointer at the start of the ring.
962 	 */
963 	vrp->rx.rp = &vrp->rx.ring[0];
964 
965 	/*
966 	 * Assign a DMA buffer to each receive descriptor.
967 	 */
968 	for (i = 0; i < vrp->rx.ndesc; i++) {
969 		rp = &vrp->rx.ring[i];
970 		rc = vr_alloc_dmabuf(vrp,
971 		    &vrp->rx.ring[i].dmabuf,
972 		    DDI_DMA_STREAMING | DDI_DMA_READ);
973 
974 		if (rc != VR_SUCCESS) {
975 			while (--i >= 0)
976 				vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
977 			return (VR_FAILURE);
978 		}
979 
980 		/*
981 		 * Store the address of the dma buffer in the chip descriptor
982 		 */
983 		ddi_put32(vrp->rxring.acchdl,
984 		    &rp->cdesc->data,
985 		    rp->dmabuf.paddr);
986 
987 		/*
988 		 * Put the buffer length in the chip descriptor. Ensure that
989 		 * length fits in the 11 bits of stat1 (2047/0x7FF)
990 		 */
991 		ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat1,
992 		    MIN(VR_MAX_PKTSZ, rp->dmabuf.bufsz));
993 
994 		/*
995 		 * Set descriptor ownership to the card
996 		 */
997 		ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat0, VR_RDES0_OWN);
998 
999 		/*
1000 		 * Sync the descriptor with main memory
1001 		 */
1002 		(void) ddi_dma_sync(vrp->rxring.handle, rp->offset,
1003 		    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1004 	}
1005 	return (VR_SUCCESS);
1006 }
1007 
1008 /*
1009  * Free the DMA buffers assigned to the receive ring.
1010  */
1011 static void
1012 vr_rxring_fini(vr_t *vrp)
1013 {
1014 	int		i;
1015 
1016 	for (i = 0; i < vrp->rx.ndesc; i++)
1017 		vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
1018 }
1019 
1020 static vr_result_t
1021 vr_txring_init(vr_t *vrp)
1022 {
1023 	vr_desc_t		*wp;
1024 	int			i, rc;
1025 
1026 	/*
1027 	 * Set the write- and claim pointer.
1028 	 */
1029 	vrp->tx.wp = &vrp->tx.ring[0];
1030 	vrp->tx.cp = &vrp->tx.ring[0];
1031 
1032 	/*
1033 	 * (Re)set the TX bookkeeping.
1034 	 */
1035 	vrp->tx.stallticks = 0;
1036 	vrp->tx.resched = 0;
1037 
1038 	/*
1039 	 * Every transmit decreases nfree. Every reclaim increases nfree.
1040 	 */
1041 	vrp->tx.nfree = vrp->tx.ndesc;
1042 
1043 	/*
1044 	 * Attach a DMA buffer to each transmit descriptor.
1045 	 */
1046 	for (i = 0; i < vrp->tx.ndesc; i++) {
1047 		rc = vr_alloc_dmabuf(vrp,
1048 		    &vrp->tx.ring[i].dmabuf,
1049 		    DDI_DMA_STREAMING | DDI_DMA_WRITE);
1050 
1051 		if (rc != VR_SUCCESS) {
1052 			while (--i >= 0)
1053 				vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1054 			return (VR_FAILURE);
1055 		}
1056 	}
1057 
1058 	/*
1059 	 * Init & sync the TX descriptors so the device sees a valid ring.
1060 	 */
1061 	for (i = 0; i < vrp->tx.ndesc; i++) {
1062 		wp = &vrp->tx.ring[i];
1063 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, 0);
1064 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1, 0);
1065 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->data,
1066 		    wp->dmabuf.paddr);
1067 		(void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1068 		    sizeof (vr_chip_desc_t),
1069 		    DDI_DMA_SYNC_FORDEV);
1070 	}
1071 	return (VR_SUCCESS);
1072 }
1073 
1074 /*
1075  * Free the DMA buffers attached to the TX ring.
1076  */
1077 static void
1078 vr_txring_fini(vr_t *vrp)
1079 {
1080 	int		i;
1081 
1082 	/*
1083 	 * Free the DMA buffers attached to the TX ring
1084 	 */
1085 	for (i = 0; i < vrp->tx.ndesc; i++)
1086 		vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1087 }
1088 
1089 /*
1090  * Allocate a DMA buffer.
1091  */
1092 static vr_result_t
1093 vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap, uint_t dmaflags)
1094 {
1095 	ddi_dma_cookie_t	dma_cookie;
1096 	uint_t			cookiecnt;
1097 	int			rc;
1098 
1099 	/*
1100 	 * Allocate a DMA handle for the buffer
1101 	 */
1102 	rc = ddi_dma_alloc_handle(vrp->devinfo,
1103 	    &vr_data_dma_attr,
1104 	    DDI_DMA_DONTWAIT, NULL,
1105 	    &dmap->handle);
1106 
1107 	if (rc != DDI_SUCCESS) {
1108 		vr_log(vrp, CE_WARN,
1109 		    "ddi_dma_alloc_handle failed in vr_alloc_dmabuf");
1110 		return (VR_FAILURE);
1111 	}
1112 
1113 	/*
1114 	 * Allocate the buffer
1115 	 * The allocated buffer is aligned on 2K boundary. This ensures that
1116 	 * a 1500 byte frame never cross a page boundary and thus that the DMA
1117 	 * mapping can be established in 1 fragment.
1118 	 */
1119 	rc = ddi_dma_mem_alloc(dmap->handle,
1120 	    VR_DMABUFSZ,
1121 	    &vr_data_dma_accattr,
1122 	    DDI_DMA_RDWR | DDI_DMA_STREAMING,
1123 	    DDI_DMA_DONTWAIT, NULL,
1124 	    &dmap->buf,
1125 	    &dmap->bufsz,
1126 	    &dmap->acchdl);
1127 
1128 	if (rc != DDI_SUCCESS) {
1129 		vr_log(vrp, CE_WARN,
1130 		    "ddi_dma_mem_alloc failed in vr_alloc_dmabuf");
1131 		ddi_dma_free_handle(&dmap->handle);
1132 		return (VR_FAILURE);
1133 	}
1134 
1135 	/*
1136 	 * Map the memory
1137 	 */
1138 	rc = ddi_dma_addr_bind_handle(dmap->handle,
1139 	    NULL,
1140 	    (caddr_t)dmap->buf,
1141 	    dmap->bufsz,
1142 	    dmaflags,
1143 	    DDI_DMA_DONTWAIT,
1144 	    NULL,
1145 	    &dma_cookie,
1146 	    &cookiecnt);
1147 
1148 	/*
1149 	 * The cookiecount should never > 1 because we requested 2K alignment
1150 	 */
1151 	if (rc != DDI_DMA_MAPPED || cookiecnt > 1) {
1152 		vr_log(vrp, CE_WARN,
1153 		    "dma_addr_bind_handle failed in vr_alloc_dmabuf: "
1154 		    "rc = %d, cookiecnt = %d", rc, cookiecnt);
1155 		ddi_dma_mem_free(&dmap->acchdl);
1156 		ddi_dma_free_handle(&dmap->handle);
1157 		return (VR_FAILURE);
1158 	}
1159 	dmap->paddr = dma_cookie.dmac_address;
1160 	return (VR_SUCCESS);
1161 }
1162 
1163 /*
1164  * Destroy a DMA buffer.
1165  */
1166 static void
1167 vr_free_dmabuf(vr_data_dma_t *dmap)
1168 {
1169 	(void) ddi_dma_unbind_handle(dmap->handle);
1170 	ddi_dma_mem_free(&dmap->acchdl);
1171 	ddi_dma_free_handle(&dmap->handle);
1172 }
1173 
1174 /*
1175  * Interrupt service routine
1176  * When our vector is shared with another device, av_dispatch_autovect calls
1177  * all service routines for the vector until *none* of them return claimed
1178  * That means that, when sharing vectors, this routine is called at least
1179  * twice for each interrupt.
1180  */
1181 uint_t
1182 vr_intr(caddr_t arg1, caddr_t arg2)
1183 {
1184 	vr_t		*vrp;
1185 	uint16_t	status;
1186 	mblk_t		*lp = NULL;
1187 	uint32_t	tx_resched;
1188 	uint32_t	link_change;
1189 
1190 	tx_resched = 0;
1191 	link_change = 0;
1192 	vrp = (void *)arg1;
1193 	_NOTE(ARGUNUSED(arg2))
1194 
1195 	/*
1196 	 * Read the status register to see if the interrupt is from our device
1197 	 * This read also ensures that posted writes are brought to main memory.
1198 	 */
1199 	mutex_enter(&vrp->intrlock);
1200 	status = VR_GET16(vrp->acc_reg, VR_ISR0) & VR_ICR0_CFG;
1201 	if (status == 0) {
1202 		/*
1203 		 * Status contains no configured interrupts
1204 		 * The interrupt was not generated by our device.
1205 		 */
1206 		vrp->stats.intr_unclaimed++;
1207 		mutex_exit(&vrp->intrlock);
1208 		return (DDI_INTR_UNCLAIMED);
1209 	}
1210 	vrp->stats.intr_claimed++;
1211 
1212 	/*
1213 	 * Acknowledge the event(s) that caused interruption.
1214 	 */
1215 	VR_PUT16(vrp->acc_reg, VR_ISR0, status);
1216 
1217 	/*
1218 	 * Receive completion.
1219 	 */
1220 	if ((status & (VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS)) != 0) {
1221 		/*
1222 		 * Received some packets.
1223 		 */
1224 		lp = vr_receive(vrp);
1225 
1226 		/*
1227 		 * DMA stops after a conflict in the FIFO.
1228 		 */
1229 		if ((status & VR_ISR_RX_ERR_BITS) != 0)
1230 			VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1231 		status &= ~(VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS);
1232 	}
1233 
1234 	/*
1235 	 * Transmit completion.
1236 	 */
1237 	if ((status & (VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS)) != 0) {
1238 		/*
1239 		 * Card done with transmitting some packets
1240 		 * TX_DONE is generated 3 times per ring but it appears
1241 		 * more often because it is also set when an RX_DONE
1242 		 * interrupt is generated.
1243 		 */
1244 		mutex_enter(&vrp->tx.lock);
1245 		vr_tx_reclaim(vrp);
1246 		tx_resched = vrp->tx.resched;
1247 		vrp->tx.resched = 0;
1248 		mutex_exit(&vrp->tx.lock);
1249 		status &= ~(VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS);
1250 	}
1251 
1252 	/*
1253 	 * Link status change.
1254 	 */
1255 	if ((status & VR_ICR0_LINKSTATUS) != 0) {
1256 		/*
1257 		 * Get new link state and inform the mac layer.
1258 		 */
1259 		mutex_enter(&vrp->oplock);
1260 		mutex_enter(&vrp->tx.lock);
1261 		vr_link_state(vrp);
1262 		mutex_exit(&vrp->tx.lock);
1263 		mutex_exit(&vrp->oplock);
1264 		status &= ~VR_ICR0_LINKSTATUS;
1265 		vrp->stats.linkchanges++;
1266 		link_change = 1;
1267 	}
1268 
1269 	/*
1270 	 * Bus error.
1271 	 */
1272 	if ((status & VR_ISR0_BUSERR) != 0) {
1273 		vr_log(vrp, CE_WARN, "bus error occured");
1274 		vrp->reset = 1;
1275 		status &= ~VR_ISR0_BUSERR;
1276 	}
1277 
1278 	/*
1279 	 * We must have handled all things here.
1280 	 */
1281 	ASSERT(status == 0);
1282 	mutex_exit(&vrp->intrlock);
1283 
1284 	/*
1285 	 * Reset the device if requested
1286 	 * The request can come from the periodic tx check or from the interrupt
1287 	 * status.
1288 	 */
1289 	if (vrp->reset != 0) {
1290 		vr_error(vrp);
1291 		vrp->reset = 0;
1292 	}
1293 
1294 	/*
1295 	 * Pass up the list with received packets.
1296 	 */
1297 	if (lp != NULL)
1298 		mac_rx(vrp->machdl, 0, lp);
1299 
1300 	/*
1301 	 * Inform the upper layer on the linkstatus if there was a change.
1302 	 */
1303 	if (link_change != 0)
1304 		mac_link_update(vrp->machdl,
1305 		    (link_state_t)vrp->chip.link.state);
1306 	/*
1307 	 * Restart transmissions if we were waiting for tx descriptors.
1308 	 */
1309 	if (tx_resched == 1)
1310 		mac_tx_update(vrp->machdl);
1311 
1312 	/*
1313 	 * Read something from the card to ensure that all of our configuration
1314 	 * writes are delivered to the device before the interrupt is ended.
1315 	 */
1316 	(void) VR_GET8(vrp->acc_reg, VR_ETHERADDR);
1317 	return (DDI_INTR_CLAIMED);
1318 }
1319 
1320 /*
1321  * Respond to an unforseen situation by resetting the card and our bookkeeping.
1322  */
1323 static void
1324 vr_error(vr_t *vrp)
1325 {
1326 	vr_log(vrp, CE_WARN, "resetting MAC.");
1327 	mutex_enter(&vrp->intrlock);
1328 	mutex_enter(&vrp->oplock);
1329 	mutex_enter(&vrp->tx.lock);
1330 	(void) vr_stop(vrp);
1331 	vr_reset(vrp);
1332 	(void) vr_start(vrp);
1333 	mutex_exit(&vrp->tx.lock);
1334 	mutex_exit(&vrp->oplock);
1335 	mutex_exit(&vrp->intrlock);
1336 	vrp->stats.resets++;
1337 }
1338 
1339 /*
1340  * Collect received packets in a list.
1341  */
1342 static mblk_t *
1343 vr_receive(vr_t *vrp)
1344 {
1345 	mblk_t			*lp, *mp, *np;
1346 	vr_desc_t		*rxp;
1347 	vr_data_dma_t		*dmap;
1348 	uint32_t		pklen;
1349 	uint32_t		rxstat0;
1350 	uint32_t		n;
1351 
1352 	lp = NULL;
1353 	n = 0;
1354 	for (rxp = vrp->rx.rp; ; rxp = rxp->next, n++) {
1355 		/*
1356 		 * Sync the descriptor before looking at it.
1357 		 */
1358 		(void) ddi_dma_sync(vrp->rxring.handle, rxp->offset,
1359 		    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORKERNEL);
1360 
1361 		/*
1362 		 * Get the status from the descriptor.
1363 		 */
1364 		rxstat0 = ddi_get32(vrp->rxring.acchdl, &rxp->cdesc->stat0);
1365 
1366 		/*
1367 		 * We're done if the descriptor is owned by the card.
1368 		 */
1369 		if ((rxstat0 & VR_RDES0_OWN) != 0)
1370 			break;
1371 		else if ((rxstat0 & VR_RDES0_RXOK) != 0) {
1372 			/*
1373 			 * Received a good packet
1374 			 */
1375 			dmap = &rxp->dmabuf;
1376 			pklen = (rxstat0 >> 16) - ETHERFCSL;
1377 
1378 			/*
1379 			 * Sync the data.
1380 			 */
1381 			(void) ddi_dma_sync(dmap->handle, 0,
1382 			    pklen, DDI_DMA_SYNC_FORKERNEL);
1383 
1384 			/*
1385 			 * Send a new copied message upstream.
1386 			 */
1387 			np = allocb(pklen, 0);
1388 			if (np != NULL) {
1389 				bcopy(dmap->buf, np->b_rptr, pklen);
1390 				np->b_wptr = np->b_rptr + pklen;
1391 
1392 				vrp->stats.mac_stat_ipackets++;
1393 				vrp->stats.mac_stat_rbytes += pklen;
1394 
1395 				if ((rxstat0 & VR_RDES0_BAR) != 0)
1396 					vrp->stats.mac_stat_brdcstrcv++;
1397 				else if ((rxstat0 & VR_RDES0_MAR) != 0)
1398 					vrp->stats.mac_stat_multircv++;
1399 
1400 				/*
1401 				 * Link this packet in the list.
1402 				 */
1403 				np->b_next = NULL;
1404 				if (lp == NULL)
1405 					lp = mp = np;
1406 				else {
1407 					mp->b_next = np;
1408 					mp = np;
1409 				}
1410 			} else {
1411 				vrp->stats.allocbfail++;
1412 				vrp->stats.mac_stat_norcvbuf++;
1413 			}
1414 
1415 		} else {
1416 			/*
1417 			 * Received with errors.
1418 			 */
1419 			vrp->stats.mac_stat_ierrors++;
1420 			if ((rxstat0 & VR_RDES0_FAE) != 0)
1421 				vrp->stats.ether_stat_align_errors++;
1422 			if ((rxstat0 & VR_RDES0_CRCERR) != 0)
1423 				vrp->stats.ether_stat_fcs_errors++;
1424 			if ((rxstat0 & VR_RDES0_LONG) != 0)
1425 				vrp->stats.ether_stat_toolong_errors++;
1426 			if ((rxstat0 & VR_RDES0_RUNT) != 0)
1427 				vrp->stats.ether_stat_tooshort_errors++;
1428 			if ((rxstat0 & VR_RDES0_FOV) != 0)
1429 				vrp->stats.mac_stat_overflows++;
1430 		}
1431 
1432 		/*
1433 		 * Reset descriptor ownership to the MAC.
1434 		 */
1435 		ddi_put32(vrp->rxring.acchdl,
1436 		    &rxp->cdesc->stat0,
1437 		    VR_RDES0_OWN);
1438 		(void) ddi_dma_sync(vrp->rxring.handle,
1439 		    rxp->offset,
1440 		    sizeof (vr_chip_desc_t),
1441 		    DDI_DMA_SYNC_FORDEV);
1442 	}
1443 	vrp->rx.rp = rxp;
1444 
1445 	/*
1446 	 * If we do flowcontrol and if the card can transmit pause frames,
1447 	 * increment the "available receive descriptors" register.
1448 	 */
1449 	if (n > 0 && vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
1450 		/*
1451 		 * Whenever the card moves a fragment to host memory it
1452 		 * decrements the RXBUFCOUNT register. If the value in the
1453 		 * register reaches a low watermark, the card transmits a pause
1454 		 * frame. If the value in this register reaches a high
1455 		 * watermark, the card sends a "cancel pause" frame
1456 		 *
1457 		 * Non-zero values written to this byte register are added
1458 		 * by the chip to the register's contents, so we must write
1459 		 * the number of descriptors free'd.
1460 		 */
1461 		VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT, MIN(n, 0xFF));
1462 	}
1463 	return (lp);
1464 }
1465 
1466 /*
1467  * Enqueue a list of packets for transmission
1468  * Return the packets not transmitted.
1469  */
1470 mblk_t *
1471 vr_mac_tx_enqueue_list(void *p, mblk_t *mp)
1472 {
1473 	vr_t		*vrp;
1474 	mblk_t		*nextp;
1475 
1476 	vrp = (vr_t *)p;
1477 	mutex_enter(&vrp->tx.lock);
1478 	do {
1479 		if (vrp->tx.nfree == 0) {
1480 			vrp->stats.ether_stat_defer_xmts++;
1481 			vrp->tx.resched = 1;
1482 			break;
1483 		}
1484 		nextp = mp->b_next;
1485 		mp->b_next = mp->b_prev = NULL;
1486 		vr_tx_enqueue_msg(vrp, mp);
1487 		mp = nextp;
1488 		vrp->tx.nfree--;
1489 	} while (mp != NULL);
1490 	mutex_exit(&vrp->tx.lock);
1491 
1492 	/*
1493 	 * Tell the chip to poll the TX ring.
1494 	 */
1495 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1496 	return (mp);
1497 }
1498 
1499 /*
1500  * Enqueue a message for transmission.
1501  */
1502 static void
1503 vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp)
1504 {
1505 	vr_desc_t		*wp;
1506 	vr_data_dma_t		*dmap;
1507 	uint32_t		pklen;
1508 	uint32_t		nextp;
1509 	int			padlen;
1510 
1511 	if ((uchar_t)mp->b_rptr[0] == 0xff &&
1512 	    (uchar_t)mp->b_rptr[1] == 0xff &&
1513 	    (uchar_t)mp->b_rptr[2] == 0xff &&
1514 	    (uchar_t)mp->b_rptr[3] == 0xff &&
1515 	    (uchar_t)mp->b_rptr[4] == 0xff &&
1516 	    (uchar_t)mp->b_rptr[5] == 0xff)
1517 		vrp->stats.mac_stat_brdcstxmt++;
1518 	else if ((uchar_t)mp->b_rptr[0] == 1)
1519 		vrp->stats.mac_stat_multixmt++;
1520 
1521 	pklen = msgsize(mp);
1522 	wp = vrp->tx.wp;
1523 	dmap = &wp->dmabuf;
1524 
1525 	/*
1526 	 * Copy the message into the pre-mapped buffer and free mp
1527 	 */
1528 	mcopymsg(mp, dmap->buf);
1529 
1530 	/*
1531 	 * Clean padlen bytes of short packet.
1532 	 */
1533 	padlen = ETHERMIN - pklen;
1534 	if (padlen > 0) {
1535 		bzero(dmap->buf + pklen, padlen);
1536 		pklen += padlen;
1537 	}
1538 
1539 	/*
1540 	 * Most of the statistics are updated on reclaim, after the actual
1541 	 * transmit. obytes is maintained here because the length is cleared
1542 	 * after transmission
1543 	 */
1544 	vrp->stats.mac_stat_obytes += pklen;
1545 
1546 	/*
1547 	 * Sync the data so the device sees the new content too.
1548 	 */
1549 	(void) ddi_dma_sync(dmap->handle, 0, pklen, DDI_DMA_SYNC_FORDEV);
1550 
1551 	/*
1552 	 * If we have reached the TX interrupt distance, enable a TX interrupt
1553 	 * for this packet. The Interrupt Control (IC) bit in the transmit
1554 	 * descriptor doesn't have any effect on the interrupt generation
1555 	 * despite the vague statements in the datasheet. Thus, we use the
1556 	 * more obscure interrupt suppress bit which is probably part of the
1557 	 * MAC's bookkeeping for TX interrupts and fragmented packets.
1558 	 */
1559 	vrp->tx.intr_distance++;
1560 	nextp = ddi_get32(vrp->txring.acchdl, &wp->cdesc->next);
1561 	if (vrp->tx.intr_distance >= VR_TX_MAX_INTR_DISTANCE) {
1562 		/*
1563 		 * Don't suppress the interrupt for this packet.
1564 		 */
1565 		vrp->tx.intr_distance = 0;
1566 		nextp &= (~VR_TDES3_SUPPRESS_INTR);
1567 	} else {
1568 		/*
1569 		 * Suppress the interrupt for this packet.
1570 		 */
1571 		nextp |= VR_TDES3_SUPPRESS_INTR;
1572 	}
1573 
1574 	/*
1575 	 * Write and sync the chip's descriptor
1576 	 */
1577 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1,
1578 	    pklen | (VR_TDES1_STP | VR_TDES1_EDP | VR_TDES1_CHN));
1579 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->next, nextp);
1580 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, VR_TDES0_OWN);
1581 	(void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1582 	    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1583 
1584 	/*
1585 	 * The ticks counter is cleared by reclaim when it reclaimed some
1586 	 * descriptors and incremented by the periodic TX stall check.
1587 	 */
1588 	vrp->tx.stallticks = 1;
1589 	vrp->tx.wp = wp->next;
1590 }
1591 
1592 /*
1593  * Free transmitted descriptors.
1594  */
1595 static void
1596 vr_tx_reclaim(vr_t *vrp)
1597 {
1598 	vr_desc_t		*cp;
1599 	uint32_t		stat0, stat1, freed, dirty;
1600 
1601 	ASSERT(mutex_owned(&vrp->tx.lock));
1602 
1603 	freed = 0;
1604 	dirty = vrp->tx.ndesc - vrp->tx.nfree;
1605 	for (cp = vrp->tx.cp; dirty > 0; cp = cp->next) {
1606 		/*
1607 		 * Sync & get descriptor status.
1608 		 */
1609 		(void) ddi_dma_sync(vrp->txring.handle, cp->offset,
1610 		    sizeof (vr_chip_desc_t),
1611 		    DDI_DMA_SYNC_FORKERNEL);
1612 		stat0 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat0);
1613 
1614 		if ((stat0 & VR_TDES0_OWN) != 0)
1615 			break;
1616 
1617 		/*
1618 		 * Do stats for the first descriptor in a chain.
1619 		 */
1620 		stat1 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat1);
1621 		if ((stat1 & VR_TDES1_STP) != 0) {
1622 			if ((stat0 & VR_TDES0_TERR) != 0) {
1623 				vrp->stats.ether_stat_macxmt_errors++;
1624 				if ((stat0 & VR_TDES0_UDF) != 0)
1625 					vrp->stats.mac_stat_underflows++;
1626 				if ((stat0 & VR_TDES0_ABT) != 0)
1627 					vrp-> stats.ether_stat_ex_collisions++;
1628 				/*
1629 				 * Abort and FIFO underflow stop the MAC.
1630 				 * Packet queueing must be disabled with HD
1631 				 * links because otherwise the MAC is also lost
1632 				 * after a few of these events.
1633 				 */
1634 				VR_PUT8(vrp->acc_reg, VR_CTRL0,
1635 				    VR_CTRL0_DMA_GO);
1636 			} else
1637 				vrp->stats.mac_stat_opackets++;
1638 
1639 			if ((stat0 & VR_TDES0_COL) != 0) {
1640 				if ((stat0 & VR_TDES0_NCR) == 1) {
1641 					vrp->stats.
1642 					    ether_stat_first_collisions++;
1643 				} else {
1644 					vrp->stats.
1645 					    ether_stat_multi_collisions++;
1646 				}
1647 				vrp->stats.mac_stat_collisions +=
1648 				    (stat0 & VR_TDES0_NCR);
1649 			}
1650 
1651 			if ((stat0 & VR_TDES0_CRS) != 0)
1652 				vrp->stats.ether_stat_carrier_errors++;
1653 
1654 			if ((stat0 & VR_TDES0_OWC) != 0)
1655 				vrp->stats.ether_stat_tx_late_collisions++;
1656 		}
1657 		freed += 1;
1658 		dirty -= 1;
1659 	}
1660 	vrp->tx.cp = cp;
1661 
1662 	if (freed > 0) {
1663 		vrp->tx.nfree += freed;
1664 		vrp->tx.stallticks = 0;
1665 		vrp->stats.txreclaims += 1;
1666 	} else
1667 		vrp->stats.txreclaim0 += 1;
1668 }
1669 
1670 /*
1671  * Check TX health every 2 seconds.
1672  */
1673 static void
1674 vr_periodic(void *p)
1675 {
1676 	vr_t		*vrp;
1677 
1678 	vrp = (vr_t *)p;
1679 	if (vrp->chip.state == CHIPSTATE_RUNNING &&
1680 	    vrp->chip.link.state == VR_LINK_STATE_UP && vrp->reset == 0) {
1681 		if (mutex_tryenter(&vrp->intrlock) != 0) {
1682 			mutex_enter(&vrp->tx.lock);
1683 			if (vrp->tx.resched == 1) {
1684 				if (vrp->tx.stallticks >= VR_MAXTXCHECKS) {
1685 					/*
1686 					 * No succesful reclaim in the last n
1687 					 * intervals. Reset the MAC.
1688 					 */
1689 					vrp->reset = 1;
1690 					vr_log(vrp, CE_WARN,
1691 					    "TX stalled, resetting MAC");
1692 				vrp->stats.txstalls++;
1693 				} else {
1694 					/*
1695 					 * Increase until we find that we've
1696 					 * waited long enough.
1697 					 */
1698 					vrp->tx.stallticks += 1;
1699 				}
1700 			}
1701 			mutex_exit(&vrp->tx.lock);
1702 			mutex_exit(&vrp->intrlock);
1703 			vrp->stats.txchecks++;
1704 		}
1705 	}
1706 	vrp->stats.cyclics++;
1707 }
1708 
1709 /*
1710  * Bring the device to our desired initial state.
1711  */
1712 static void
1713 vr_reset(vr_t *vrp)
1714 {
1715 	uint32_t	time;
1716 
1717 	/*
1718 	 * Reset the MAC
1719 	 * If we don't wait long enough for the forced reset to complete,
1720 	 * MAC looses sync with PHY. Result link up, no link change interrupt
1721 	 * and no data transfer.
1722 	 */
1723 	time = 0;
1724 	VR_PUT8(vrp->acc_io, VR_CTRL1, VR_CTRL1_RESET);
1725 	do {
1726 		drv_usecwait(100);
1727 		time += 100;
1728 		if (time >= 100000) {
1729 			VR_PUT8(vrp->acc_io, VR_MISC1, VR_MISC1_RESET);
1730 			delay(drv_usectohz(200000));
1731 		}
1732 	} while ((VR_GET8(vrp->acc_io, VR_CTRL1) & VR_CTRL1_RESET) != 0);
1733 	delay(drv_usectohz(10000));
1734 
1735 	/*
1736 	 * Load the PROM contents into the MAC again.
1737 	 */
1738 	VR_SETBIT8(vrp->acc_io, VR_PROMCTL, VR_PROMCTL_RELOAD);
1739 	delay(drv_usectohz(100000));
1740 
1741 	/*
1742 	 * Tell the MAC via IO space that we like to use memory space for
1743 	 * accessing registers.
1744 	 */
1745 	VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
1746 }
1747 
1748 /*
1749  * Prepare and enable the card (MAC + PHY + PCI).
1750  */
1751 static int
1752 vr_start(vr_t *vrp)
1753 {
1754 	uint8_t		pci_latency, pci_mode;
1755 
1756 	ASSERT(mutex_owned(&vrp->oplock));
1757 
1758 	/*
1759 	 * Allocate DMA buffers for RX.
1760 	 */
1761 	if (vr_rxring_init(vrp) != VR_SUCCESS) {
1762 		vr_log(vrp, CE_NOTE, "vr_rxring_init() failed");
1763 		return (ENOMEM);
1764 	}
1765 
1766 	/*
1767 	 * Allocate DMA buffers for TX.
1768 	 */
1769 	if (vr_txring_init(vrp) != VR_SUCCESS) {
1770 		vr_log(vrp, CE_NOTE, "vr_txring_init() failed");
1771 		vr_rxring_fini(vrp);
1772 		return (ENOMEM);
1773 	}
1774 
1775 	/*
1776 	 * Changes of the chip specific registers as done in VIA's fet driver
1777 	 * These bits are not in the datasheet and controlled by vr_chip_info.
1778 	 */
1779 	pci_mode = VR_GET8(vrp->acc_reg, VR_MODE2);
1780 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE10T) != 0)
1781 		pci_mode |= VR_MODE2_MODE10T;
1782 
1783 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE2PCEROPT) != 0)
1784 		pci_mode |= VR_MODE2_PCEROPT;
1785 
1786 	if ((vrp->chip.info.features & VR_FEATURE_MRDLNMULTIPLE) != 0)
1787 		pci_mode |= VR_MODE2_MRDPL;
1788 	VR_PUT8(vrp->acc_reg, VR_MODE2, pci_mode);
1789 
1790 	pci_mode = VR_GET8(vrp->acc_reg, VR_MODE3);
1791 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMIION) != 0)
1792 		pci_mode |= VR_MODE3_MIION;
1793 	VR_PUT8(vrp->acc_reg, VR_MODE3, pci_mode);
1794 
1795 	/*
1796 	 * RX: Accept broadcast packets.
1797 	 */
1798 	VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTBROAD);
1799 
1800 	/*
1801 	 * RX: Start DMA when there are 256 bytes in the FIFO.
1802 	 */
1803 	VR_SETBITS8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_FIFO_THRESHOLD_BITS,
1804 	    VR_RXCFG_FIFO_THRESHOLD_256);
1805 	VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_RX_FIFO_THRESHOLD_BITS,
1806 	    VR_BCR0_RX_FIFO_THRESHOLD_256);
1807 
1808 	/*
1809 	 * TX: Start transmit when there are 256 bytes in the FIFO.
1810 	 */
1811 	VR_SETBITS8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_FIFO_THRESHOLD_BITS,
1812 	    VR_TXCFG_FIFO_THRESHOLD_256);
1813 	VR_SETBITS8(vrp->acc_reg, VR_BCR1, VR_BCR1_TX_FIFO_THRESHOLD_BITS,
1814 	    VR_BCR1_TX_FIFO_THRESHOLD_256);
1815 
1816 	/*
1817 	 * Burst transfers up to 256 bytes.
1818 	 */
1819 	VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_DMABITS, VR_BCR0_DMA256);
1820 
1821 	/*
1822 	 * Disable TX autopolling as it is bad for RX performance
1823 	 * I assume this is because the RX process finds the bus often occupied
1824 	 * by the polling process.
1825 	 */
1826 	VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_NOAUTOPOLL);
1827 
1828 	/*
1829 	 * Honor the PCI latency timer if it is reasonable.
1830 	 */
1831 	pci_latency = VR_GET8(vrp->acc_cfg, PCI_CONF_LATENCY_TIMER);
1832 	if (pci_latency != 0 && pci_latency != 0xFF)
1833 		VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1834 	else
1835 		VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1836 
1837 	/*
1838 	 * Ensure that VLAN filtering is off, because this strips the tag.
1839 	 */
1840 	if ((vrp->chip.info.features & VR_FEATURE_VLANTAGGING) != 0) {
1841 		VR_CLRBIT8(vrp->acc_reg, VR_BCR1, VR_BCR1_VLANFILTER);
1842 		VR_CLRBIT8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_8021PQ_EN);
1843 	}
1844 
1845 	/*
1846 	 * Clear the CAM filter.
1847 	 */
1848 	if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
1849 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
1850 		VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 0);
1851 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1852 
1853 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
1854 		    VR_CAM_CTRL_ENABLE|VR_CAM_CTRL_SELECT_VLAN);
1855 		VR_PUT8(vrp->acc_reg, VR_VCAM0, 0);
1856 		VR_PUT8(vrp->acc_reg, VR_VCAM1, 0);
1857 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_WRITE);
1858 		VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 1);
1859 		drv_usecwait(2);
1860 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1861 	}
1862 
1863 	/*
1864 	 * Give the start addresses of the descriptor rings to the DMA
1865 	 * controller on the MAC.
1866 	 */
1867 	VR_PUT32(vrp->acc_reg, VR_RXADDR, vrp->rx.rp->paddr);
1868 	VR_PUT32(vrp->acc_reg, VR_TXADDR, vrp->tx.wp->paddr);
1869 
1870 	/*
1871 	 * We don't use the additionally invented interrupt ICR1 register,
1872 	 * so make sure these are disabled.
1873 	 */
1874 	VR_PUT8(vrp->acc_reg, VR_ISR1, 0xFF);
1875 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1876 
1877 	/*
1878 	 * Enable interrupts.
1879 	 */
1880 	VR_PUT16(vrp->acc_reg, VR_ISR0, 0xFFFF);
1881 	VR_PUT16(vrp->acc_reg, VR_ICR0, VR_ICR0_CFG);
1882 
1883 	/*
1884 	 * Enable the DMA controller.
1885 	 */
1886 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1887 
1888 	/*
1889 	 * Configure the link. Rely on the link change interrupt for getting
1890 	 * the link state into the driver.
1891 	 */
1892 	vr_link_init(vrp);
1893 
1894 	/*
1895 	 * Set the software view on the state to 'running'.
1896 	 */
1897 	vrp->chip.state = CHIPSTATE_RUNNING;
1898 	return (0);
1899 }
1900 
1901 /*
1902  * Stop DMA and interrupts.
1903  */
1904 static int
1905 vr_stop(vr_t *vrp)
1906 {
1907 	ASSERT(mutex_owned(&vrp->oplock));
1908 
1909 	/*
1910 	 * Stop interrupts.
1911 	 */
1912 	VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
1913 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1914 
1915 	/*
1916 	 * Stop DMA.
1917 	 */
1918 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
1919 
1920 	/*
1921 	 * Set the software view on the state to stopped.
1922 	 */
1923 	vrp->chip.state = CHIPSTATE_STOPPED;
1924 
1925 	/*
1926 	 * Remove DMA buffers from the rings.
1927 	 */
1928 	vr_rxring_fini(vrp);
1929 	vr_txring_fini(vrp);
1930 	return (0);
1931 }
1932 
1933 int
1934 vr_mac_start(void *p)
1935 {
1936 	vr_t	*vrp;
1937 	int	rc;
1938 
1939 	vrp = (vr_t *)p;
1940 	mutex_enter(&vrp->oplock);
1941 
1942 	/*
1943 	 * Reset the card.
1944 	 */
1945 	vr_reset(vrp);
1946 
1947 	/*
1948 	 * Prepare and enable the card.
1949 	 */
1950 	rc = vr_start(vrp);
1951 
1952 	/*
1953 	 * Configure a cyclic function to keep the card & driver from diverting.
1954 	 */
1955 	vrp->periodic_id =
1956 	    ddi_periodic_add(vr_periodic, vrp, VR_CHECK_INTERVAL, DDI_IPL_0);
1957 
1958 	mutex_exit(&vrp->oplock);
1959 	return (rc);
1960 }
1961 
1962 void
1963 vr_mac_stop(void *p)
1964 {
1965 	vr_t	*vrp = p;
1966 
1967 	mutex_enter(&vrp->oplock);
1968 	mutex_enter(&vrp->tx.lock);
1969 
1970 	/*
1971 	 * Stop the device.
1972 	 */
1973 	(void) vr_stop(vrp);
1974 	mutex_exit(&vrp->tx.lock);
1975 
1976 	/*
1977 	 * Remove the cyclic from the system.
1978 	 */
1979 	ddi_periodic_delete(vrp->periodic_id);
1980 	mutex_exit(&vrp->oplock);
1981 }
1982 
1983 /*
1984  * Add or remove a multicast address to/from the filter
1985  *
1986  * From the 21143 manual:
1987  *  The 21143 can store 512 bits serving as hash bucket heads, and one physical
1988  *  48-bit Ethernet address. Incoming frames with multicast destination
1989  *  addresses are subjected to imperfect filtering. Frames with physical
1990  *  destination  addresses are checked against the single physical address.
1991  *  For any incoming frame with a multicast destination address, the 21143
1992  *  applies the standard Ethernet cyclic redundancy check (CRC) function to the
1993  *  first 6 bytes containing the destination address, then it uses the most
1994  *  significant 9 bits of the result as a bit index into the table. If the
1995  *  indexed bit is set, the frame is accepted. If the bit is cleared, the frame
1996  *  is rejected. This filtering mode is called imperfect because multicast
1997  *  frames not addressed to this station may slip through, but it still
1998  *  decreases the number of frames that the host can receive.
1999  * I assume the above is also the way the VIA chips work. There's not a single
2000  * word about the multicast filter in the datasheet.
2001  *
2002  * Another word on the CAM filter on VT6105M controllers:
2003  *  The VT6105M has content addressable memory which can be used for perfect
2004  *  filtering of 32 multicast addresses and a few VLAN id's
2005  *
2006  *  I think it works like this: When the controller receives a multicast
2007  *  address, it looks up the address using CAM. When it is found, it takes the
2008  *  matching cell address (index) and compares this to the bit position in the
2009  *  cam mask. If the bit is set, the packet is passed up. If CAM lookup does not
2010  *  result in a match, the packet is filtered using the hash based filter,
2011  *  if that matches, the packet is passed up and dropped otherwise
2012  * Also, there's not a single word in the datasheet on how this cam is supposed
2013  * to work ...
2014  */
2015 int
2016 vr_mac_set_multicast(void *p, boolean_t add, const uint8_t *mca)
2017 {
2018 	vr_t		*vrp;
2019 	uint32_t	crc_index;
2020 	int32_t		cam_index;
2021 	uint32_t	cam_mask;
2022 	boolean_t	use_hash_filter;
2023 	ether_addr_t	taddr;
2024 	uint32_t	a;
2025 
2026 	vrp = (vr_t *)p;
2027 	mutex_enter(&vrp->oplock);
2028 	mutex_enter(&vrp->intrlock);
2029 	use_hash_filter = B_FALSE;
2030 
2031 	if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
2032 		/*
2033 		 * Program the perfect filter.
2034 		 */
2035 		cam_mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2036 		if (add == B_TRUE) {
2037 			/*
2038 			 * Get index of first empty slot.
2039 			 */
2040 			bzero(&taddr, sizeof (taddr));
2041 			cam_index = vr_cam_index(vrp, taddr);
2042 			if (cam_index != -1) {
2043 				/*
2044 				 * Add address at cam_index.
2045 				 */
2046 				cam_mask |= (1 << cam_index);
2047 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2048 				    VR_CAM_CTRL_ENABLE);
2049 				VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, cam_index);
2050 				VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2051 				for (a = 0; a < ETHERADDRL; a++) {
2052 					VR_PUT8(vrp->acc_reg,
2053 					    VR_MCAM0 + a, mca[a]);
2054 				}
2055 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2056 				    VR_CAM_CTRL_WRITE);
2057 				drv_usecwait(2);
2058 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2059 				    VR_CAM_CTRL_DONE);
2060 			} else {
2061 				/*
2062 				 * No free CAM slots available
2063 				 * Add mca to the imperfect filter.
2064 				 */
2065 				use_hash_filter = B_TRUE;
2066 			}
2067 		} else {
2068 			/*
2069 			 * Find the index of the entry to remove
2070 			 * If the entry was not found (-1), the addition was
2071 			 * probably done when the table was full.
2072 			 */
2073 			cam_index = vr_cam_index(vrp, mca);
2074 			if (cam_index != -1) {
2075 				/*
2076 				 * Disable the corresponding mask bit.
2077 				 */
2078 				cam_mask &= ~(1 << cam_index);
2079 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2080 				    VR_CAM_CTRL_ENABLE);
2081 				VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2082 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2083 				    VR_CAM_CTRL_DONE);
2084 			} else {
2085 				/*
2086 				 * The entry to be removed was not found
2087 				 * The likely cause is that the CAM was full
2088 				 * during addition. The entry is added to the
2089 				 * hash filter in that case and needs to be
2090 				 * removed there too.
2091 				 */
2092 				use_hash_filter = B_TRUE;
2093 			}
2094 		}
2095 	} else {
2096 		/*
2097 		 * No CAM in the MAC, thus we need the hash filter.
2098 		 */
2099 		use_hash_filter = B_TRUE;
2100 	}
2101 
2102 	if (use_hash_filter == B_TRUE) {
2103 		/*
2104 		 * Get the CRC-32 of the multicast address
2105 		 * The card uses the "MSB first" direction when calculating the
2106 		 * the CRC. This is odd because ethernet is "LSB first"
2107 		 * We have to use that "big endian" approach as well.
2108 		 */
2109 		crc_index = ether_crc_be(mca) >> (32 - 6);
2110 		if (add == B_TRUE) {
2111 			/*
2112 			 * Turn bit[crc_index] on.
2113 			 */
2114 			if (crc_index < 32)
2115 				vrp->mhash0 |= (1 << crc_index);
2116 			else
2117 				vrp->mhash1 |= (1 << (crc_index - 32));
2118 		} else {
2119 			/*
2120 			 * Turn bit[crc_index] off.
2121 			 */
2122 			if (crc_index < 32)
2123 				vrp->mhash0 &= ~(0 << crc_index);
2124 			else
2125 				vrp->mhash1 &= ~(0 << (crc_index - 32));
2126 		}
2127 
2128 		/*
2129 		 * When not promiscuous write the filter now. When promiscuous,
2130 		 * the filter is open and will be written when promiscuous ends.
2131 		 */
2132 		if (vrp->promisc == B_FALSE) {
2133 			VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2134 			VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2135 		}
2136 	}
2137 
2138 	/*
2139 	 * Enable/disable multicast receivements based on mcount.
2140 	 */
2141 	if (add == B_TRUE)
2142 		vrp->mcount++;
2143 	else if (vrp->mcount != 0)
2144 		vrp->mcount --;
2145 	if (vrp->mcount != 0)
2146 		VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2147 	else
2148 		VR_CLRBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2149 
2150 	mutex_exit(&vrp->intrlock);
2151 	mutex_exit(&vrp->oplock);
2152 	return (0);
2153 }
2154 
2155 /*
2156  * Calculate the CRC32 for 6 bytes of multicast address in MSB(it) first order.
2157  * The MSB first order is a bit odd because Ethernet standard is LSB first
2158  */
2159 static uint32_t
2160 ether_crc_be(const uint8_t *data)
2161 {
2162 	uint32_t	crc = (uint32_t)0xFFFFFFFFU;
2163 	uint32_t	carry;
2164 	uint32_t	bit;
2165 	uint32_t	length;
2166 	uint8_t		c;
2167 
2168 	for (length = 0; length < ETHERADDRL; length++) {
2169 		c = data[length];
2170 		for (bit = 0; bit < 8; bit++) {
2171 			carry = ((crc & 0x80000000U) ? 1 : 0) ^ (c & 0x01);
2172 			crc <<= 1;
2173 			c >>= 1;
2174 			if (carry)
2175 				crc = (crc ^ 0x04C11DB6) | carry;
2176 		}
2177 	}
2178 	return (crc);
2179 }
2180 
2181 
2182 /*
2183  * Return the CAM index (base 0) of maddr or -1 if maddr is not found
2184  * If maddr is 0, return the index of an empty slot in CAM or -1 when no free
2185  * slots available.
2186  */
2187 static int32_t
2188 vr_cam_index(vr_t *vrp, const uint8_t *maddr)
2189 {
2190 	ether_addr_t	taddr;
2191 	int32_t		index;
2192 	uint32_t	mask;
2193 	uint32_t	a;
2194 
2195 	bzero(&taddr, sizeof (taddr));
2196 
2197 	/*
2198 	 * Read the CAM mask from the controller.
2199 	 */
2200 	mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2201 
2202 	/*
2203 	 * If maddr is 0, return the first unused slot or -1 for no unused.
2204 	 */
2205 	if (bcmp(maddr, taddr, ETHERADDRL) == 0) {
2206 		/*
2207 		 * Look for the first unused position in mask.
2208 		 */
2209 		for (index = 0; index < VR_CAM_SZ; index++) {
2210 			if (((mask >> index) & 1) == 0)
2211 				return (index);
2212 		}
2213 		return (-1);
2214 	} else {
2215 		/*
2216 		 * Look for maddr in CAM.
2217 		 */
2218 		for (index = 0; index < VR_CAM_SZ; index++) {
2219 			/* Look at enabled entries only */
2220 			if (((mask >> index) & 1) == 0)
2221 				continue;
2222 
2223 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
2224 			VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, index);
2225 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_READ);
2226 			drv_usecwait(2);
2227 			for (a = 0; a < ETHERADDRL; a++)
2228 				taddr[a] = VR_GET8(vrp->acc_reg, VR_MCAM0 + a);
2229 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
2230 			if (bcmp(maddr, taddr, ETHERADDRL) == 0)
2231 				return (index);
2232 		}
2233 	}
2234 	return (-1);
2235 }
2236 
2237 /*
2238  * Set promiscuous mode on or off.
2239  */
2240 int
2241 vr_mac_set_promisc(void *p, boolean_t promiscflag)
2242 {
2243 	vr_t		*vrp;
2244 	uint8_t		rxcfg;
2245 
2246 	vrp = (vr_t *)p;
2247 
2248 	mutex_enter(&vrp->intrlock);
2249 	mutex_enter(&vrp->oplock);
2250 	mutex_enter(&vrp->tx.lock);
2251 
2252 	/*
2253 	 * Get current receive configuration.
2254 	 */
2255 	rxcfg = VR_GET8(vrp->acc_reg, VR_RXCFG);
2256 	vrp->promisc = promiscflag;
2257 
2258 	if (promiscflag == B_TRUE) {
2259 		/*
2260 		 * Enable promiscuous mode and open the multicast filter.
2261 		 */
2262 		rxcfg |= (VR_RXCFG_PROMISC | VR_RXCFG_ACCEPTMULTI);
2263 		VR_PUT32(vrp->acc_reg, VR_MAR0, 0xffffffff);
2264 		VR_PUT32(vrp->acc_reg, VR_MAR1, 0xffffffff);
2265 	} else {
2266 		/*
2267 		 * Restore the multicast filter and disable promiscuous mode.
2268 		 */
2269 		VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2270 		VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2271 		rxcfg &= ~VR_RXCFG_PROMISC;
2272 		if (vrp->mcount != 0)
2273 			rxcfg |= VR_RXCFG_ACCEPTMULTI;
2274 	}
2275 	VR_PUT8(vrp->acc_reg, VR_RXCFG, rxcfg);
2276 	mutex_exit(&vrp->tx.lock);
2277 	mutex_exit(&vrp->oplock);
2278 	mutex_exit(&vrp->intrlock);
2279 	return (0);
2280 }
2281 
2282 int
2283 vr_mac_getstat(void *arg, uint_t stat, uint64_t *val)
2284 {
2285 	vr_t		*vrp;
2286 	uint64_t	v;
2287 
2288 	vrp = (void *) arg;
2289 
2290 	switch (stat) {
2291 	default:
2292 		return (ENOTSUP);
2293 
2294 	case ETHER_STAT_ADV_CAP_100T4:
2295 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_T4) != 0;
2296 		break;
2297 
2298 	case ETHER_STAT_ADV_CAP_100FDX:
2299 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX_FD) != 0;
2300 		break;
2301 
2302 	case ETHER_STAT_ADV_CAP_100HDX:
2303 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX) != 0;
2304 		break;
2305 
2306 	case ETHER_STAT_ADV_CAP_10FDX:
2307 		v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T_FD) != 0;
2308 		break;
2309 
2310 	case ETHER_STAT_ADV_CAP_10HDX:
2311 		v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T) != 0;
2312 		break;
2313 
2314 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
2315 		v = 0;
2316 		break;
2317 
2318 	case ETHER_STAT_ADV_CAP_AUTONEG:
2319 		v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0;
2320 		break;
2321 
2322 	case ETHER_STAT_ADV_CAP_PAUSE:
2323 		v = (vrp->chip.mii.anadv & MII_ABILITY_PAUSE) != 0;
2324 		break;
2325 
2326 	case ETHER_STAT_ADV_REMFAULT:
2327 		v = (vrp->chip.mii.anadv & MII_AN_ADVERT_REMFAULT) != 0;
2328 		break;
2329 
2330 	case ETHER_STAT_ALIGN_ERRORS:
2331 		v = vrp->stats.ether_stat_align_errors;
2332 		break;
2333 
2334 	case ETHER_STAT_CAP_100T4:
2335 		v = (vrp->chip.mii.status & MII_STATUS_100_BASE_T4) != 0;
2336 		break;
2337 
2338 	case ETHER_STAT_CAP_100FDX:
2339 		v = (vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) != 0;
2340 		break;
2341 
2342 	case ETHER_STAT_CAP_100HDX:
2343 		v = (vrp->chip.mii.status & MII_STATUS_100_BASEX) != 0;
2344 		break;
2345 
2346 	case ETHER_STAT_CAP_10FDX:
2347 		v = (vrp->chip.mii.status & MII_STATUS_10_FD) != 0;
2348 		break;
2349 
2350 	case ETHER_STAT_CAP_10HDX:
2351 		v = (vrp->chip.mii.status & MII_STATUS_10) != 0;
2352 		break;
2353 
2354 	case ETHER_STAT_CAP_ASMPAUSE:
2355 		v = 0;
2356 		break;
2357 
2358 	case ETHER_STAT_CAP_AUTONEG:
2359 		v = (vrp->chip.mii.status & MII_STATUS_CANAUTONEG) != 0;
2360 		break;
2361 
2362 	case ETHER_STAT_CAP_PAUSE:
2363 		v = 1;
2364 		break;
2365 
2366 	case ETHER_STAT_CAP_REMFAULT:
2367 		v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2368 		break;
2369 
2370 	case ETHER_STAT_CARRIER_ERRORS:
2371 		/*
2372 		 * Number of times carrier was lost or never detected on a
2373 		 * transmission attempt.
2374 		 */
2375 		v = vrp->stats.ether_stat_carrier_errors;
2376 		break;
2377 
2378 	case ETHER_STAT_JABBER_ERRORS:
2379 		return (ENOTSUP);
2380 
2381 	case ETHER_STAT_DEFER_XMTS:
2382 		/*
2383 		 * Packets without collisions where first transmit attempt was
2384 		 * delayed because the medium was busy.
2385 		 */
2386 		v = vrp->stats.ether_stat_defer_xmts;
2387 		break;
2388 
2389 	case ETHER_STAT_EX_COLLISIONS:
2390 		/*
2391 		 * Frames where excess collisions occurred on transmit, causing
2392 		 * transmit failure.
2393 		 */
2394 		v = vrp->stats.ether_stat_ex_collisions;
2395 		break;
2396 
2397 	case ETHER_STAT_FCS_ERRORS:
2398 		/*
2399 		 * Packets received with CRC errors.
2400 		 */
2401 		v = vrp->stats.ether_stat_fcs_errors;
2402 		break;
2403 
2404 	case ETHER_STAT_FIRST_COLLISIONS:
2405 		/*
2406 		 * Packets successfully transmitted with exactly one collision.
2407 		 */
2408 		v = vrp->stats.ether_stat_first_collisions;
2409 		break;
2410 
2411 	case ETHER_STAT_LINK_ASMPAUSE:
2412 		v = 0;
2413 		break;
2414 
2415 	case ETHER_STAT_LINK_AUTONEG:
2416 		v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0 &&
2417 		    (vrp->chip.mii.status & MII_STATUS_ANDONE) != 0;
2418 		break;
2419 
2420 	case ETHER_STAT_LINK_DUPLEX:
2421 		v = vrp->chip.link.duplex;
2422 		break;
2423 
2424 	case ETHER_STAT_LINK_PAUSE:
2425 		v = vrp->chip.link.flowctrl;
2426 		break;
2427 
2428 	case ETHER_STAT_LP_CAP_100T4:
2429 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_T4) != 0;
2430 		break;
2431 
2432 	case ETHER_STAT_LP_CAP_1000FDX:
2433 		v = 0;
2434 		break;
2435 
2436 	case ETHER_STAT_LP_CAP_1000HDX:
2437 		v = 0;
2438 		break;
2439 
2440 	case ETHER_STAT_LP_CAP_100FDX:
2441 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX_FD) != 0;
2442 		break;
2443 
2444 	case ETHER_STAT_LP_CAP_100HDX:
2445 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX) != 0;
2446 		break;
2447 
2448 	case ETHER_STAT_LP_CAP_10FDX:
2449 		v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T_FD) != 0;
2450 		break;
2451 
2452 	case ETHER_STAT_LP_CAP_10HDX:
2453 		v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T) != 0;
2454 		break;
2455 
2456 	case ETHER_STAT_LP_CAP_ASMPAUSE:
2457 		v = 0;
2458 		break;
2459 
2460 	case ETHER_STAT_LP_CAP_AUTONEG:
2461 		v = (vrp->chip.mii.anexp & MII_AN_EXP_LPCANAN) != 0;
2462 		break;
2463 
2464 	case ETHER_STAT_LP_CAP_PAUSE:
2465 		v = (vrp->chip.mii.lpable & MII_ABILITY_PAUSE) != 0;
2466 		break;
2467 
2468 	case ETHER_STAT_LP_REMFAULT:
2469 		v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2470 		break;
2471 
2472 	case ETHER_STAT_MACRCV_ERRORS:
2473 		/*
2474 		 * Packets received with MAC errors, except align_errors,
2475 		 * fcs_errors, and toolong_errors.
2476 		 */
2477 		v = vrp->stats.ether_stat_macrcv_errors;
2478 		break;
2479 
2480 	case ETHER_STAT_MACXMT_ERRORS:
2481 		/*
2482 		 * Packets encountering transmit MAC failures, except carrier
2483 		 * and collision failures.
2484 		 */
2485 		v = vrp->stats.ether_stat_macxmt_errors;
2486 		break;
2487 
2488 	case ETHER_STAT_MULTI_COLLISIONS:
2489 		/*
2490 		 * Packets successfully transmitted with multiple collisions.
2491 		 */
2492 		v = vrp->stats.ether_stat_multi_collisions;
2493 		break;
2494 
2495 	case ETHER_STAT_SQE_ERRORS:
2496 		/*
2497 		 * Number of times signal quality error was reported
2498 		 * This one is reported by the PHY.
2499 		 */
2500 		return (ENOTSUP);
2501 
2502 	case ETHER_STAT_TOOLONG_ERRORS:
2503 		/*
2504 		 * Packets received larger than the maximum permitted length.
2505 		 */
2506 		v = vrp->stats.ether_stat_toolong_errors;
2507 		break;
2508 
2509 	case ETHER_STAT_TOOSHORT_ERRORS:
2510 		v = vrp->stats.ether_stat_tooshort_errors;
2511 		break;
2512 
2513 	case ETHER_STAT_TX_LATE_COLLISIONS:
2514 		/*
2515 		 * Number of times a transmit collision occurred late
2516 		 * (after 512 bit times).
2517 		 */
2518 		v = vrp->stats.ether_stat_tx_late_collisions;
2519 		break;
2520 
2521 	case ETHER_STAT_XCVR_ADDR:
2522 		/*
2523 		 * MII address in the 0 to 31 range of the physical layer
2524 		 * device in use for a given Ethernet device.
2525 		 */
2526 		v = vrp->chip.phyaddr;
2527 		break;
2528 
2529 	case ETHER_STAT_XCVR_ID:
2530 		/*
2531 		 * MII transceiver manufacturer and device ID.
2532 		 */
2533 		v = (vrp->chip.mii.identh << 16) | vrp->chip.mii.identl;
2534 		break;
2535 
2536 	case ETHER_STAT_XCVR_INUSE:
2537 		v = vrp->chip.link.mau;
2538 		break;
2539 
2540 	case MAC_STAT_BRDCSTRCV:
2541 		v = vrp->stats.mac_stat_brdcstrcv;
2542 		break;
2543 
2544 	case MAC_STAT_BRDCSTXMT:
2545 		v = vrp->stats.mac_stat_brdcstxmt;
2546 		break;
2547 
2548 	case MAC_STAT_MULTIXMT:
2549 		v = vrp->stats.mac_stat_multixmt;
2550 		break;
2551 
2552 	case MAC_STAT_COLLISIONS:
2553 		v = vrp->stats.mac_stat_collisions;
2554 		break;
2555 
2556 	case MAC_STAT_IERRORS:
2557 		v = vrp->stats.mac_stat_ierrors;
2558 		break;
2559 
2560 	case MAC_STAT_IFSPEED:
2561 		if (vrp->chip.link.speed == VR_LINK_SPEED_100MBS)
2562 			v = 100 * 1000 * 1000;
2563 		else if (vrp->chip.link.speed == VR_LINK_SPEED_10MBS)
2564 			v = 10 * 1000 * 1000;
2565 		else
2566 			v = 0;
2567 		break;
2568 
2569 	case MAC_STAT_IPACKETS:
2570 		v = vrp->stats.mac_stat_ipackets;
2571 		break;
2572 
2573 	case MAC_STAT_MULTIRCV:
2574 		v = vrp->stats.mac_stat_multircv;
2575 		break;
2576 
2577 	case MAC_STAT_NORCVBUF:
2578 		vrp->stats.mac_stat_norcvbuf +=
2579 		    VR_GET16(vrp->acc_reg, VR_TALLY_MPA);
2580 		VR_PUT16(vrp->acc_reg, VR_TALLY_MPA, 0);
2581 		v = vrp->stats.mac_stat_norcvbuf;
2582 		break;
2583 
2584 	case MAC_STAT_NOXMTBUF:
2585 		v = vrp->stats.mac_stat_noxmtbuf;
2586 		break;
2587 
2588 	case MAC_STAT_OBYTES:
2589 		v = vrp->stats.mac_stat_obytes;
2590 		break;
2591 
2592 	case MAC_STAT_OERRORS:
2593 		v = vrp->stats.ether_stat_macxmt_errors +
2594 		    vrp->stats.mac_stat_underflows +
2595 		    vrp->stats.ether_stat_align_errors +
2596 		    vrp->stats.ether_stat_carrier_errors +
2597 		    vrp->stats.ether_stat_fcs_errors;
2598 		break;
2599 
2600 	case MAC_STAT_OPACKETS:
2601 		v = vrp->stats.mac_stat_opackets;
2602 		break;
2603 
2604 	case MAC_STAT_RBYTES:
2605 		v = vrp->stats.mac_stat_rbytes;
2606 		break;
2607 
2608 	case MAC_STAT_UNKNOWNS:
2609 		/*
2610 		 * Isn't this something for the MAC layer to maintain?
2611 		 */
2612 		return (ENOTSUP);
2613 
2614 	case MAC_STAT_UNDERFLOWS:
2615 		v = vrp->stats.mac_stat_underflows;
2616 		break;
2617 
2618 	case MAC_STAT_OVERFLOWS:
2619 		v = vrp->stats.mac_stat_overflows;
2620 		break;
2621 	}
2622 	*val = v;
2623 	return (0);
2624 }
2625 
2626 int
2627 vr_mac_set_ether_addr(void *p, const uint8_t *ea)
2628 {
2629 	vr_t	*vrp;
2630 	int	i;
2631 
2632 	vrp = (vr_t *)p;
2633 	mutex_enter(&vrp->oplock);
2634 	mutex_enter(&vrp->intrlock);
2635 
2636 	/*
2637 	 * Set a new station address.
2638 	 */
2639 	for (i = 0; i < ETHERADDRL; i++)
2640 		VR_PUT8(vrp->acc_reg, VR_ETHERADDR + i, ea[i]);
2641 
2642 	mutex_exit(&vrp->intrlock);
2643 	mutex_exit(&vrp->oplock);
2644 	return (0);
2645 }
2646 
2647 /*
2648  * Configure the ethernet link according to param and chip.mii.
2649  */
2650 static void
2651 vr_link_init(vr_t *vrp)
2652 {
2653 	ASSERT(mutex_owned(&vrp->oplock));
2654 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2655 		/*
2656 		 * If we do autoneg, ensure restart autoneg is ON.
2657 		 */
2658 		vrp->chip.mii.control |= MII_CONTROL_RSAN;
2659 
2660 		/*
2661 		 * The advertisements are prepared by param_init.
2662 		 */
2663 		vr_phy_write(vrp, MII_AN_ADVERT, vrp->chip.mii.anadv);
2664 	} else {
2665 		/*
2666 		 * If we don't autoneg, we need speed, duplex and flowcontrol
2667 		 * to configure the link. However, dladm doesn't allow changes
2668 		 * to speed and duplex (readonly). The way this is solved
2669 		 * (ahem) is to select the highest enabled combination
2670 		 * Speed and duplex should be r/w when autoneg is off.
2671 		 */
2672 		if ((vrp->param.anadv_en &
2673 		    MII_ABILITY_100BASE_TX_FD) != 0) {
2674 			vrp->chip.mii.control |= MII_CONTROL_100MB;
2675 			vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2676 		} else if ((vrp->param.anadv_en &
2677 		    MII_ABILITY_100BASE_TX) != 0) {
2678 			vrp->chip.mii.control |= MII_CONTROL_100MB;
2679 			vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2680 		} else if ((vrp->param.anadv_en &
2681 		    MII_ABILITY_10BASE_T_FD) != 0) {
2682 			vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2683 			vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2684 		} else {
2685 			vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2686 			vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2687 		}
2688 	}
2689 	/*
2690 	 * Write the control register.
2691 	 */
2692 	vr_phy_write(vrp, MII_CONTROL, vrp->chip.mii.control);
2693 
2694 	/*
2695 	 * With autoneg off we cannot rely on the link_change interrupt for
2696 	 * for getting the status into the driver.
2697 	 */
2698 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
2699 		vr_link_state(vrp);
2700 		mac_link_update(vrp->machdl,
2701 		    (link_state_t)vrp->chip.link.state);
2702 	}
2703 }
2704 
2705 /*
2706  * Get link state in the driver and configure the MAC accordingly.
2707  */
2708 static void
2709 vr_link_state(vr_t *vrp)
2710 {
2711 	uint16_t		mask;
2712 
2713 	ASSERT(mutex_owned(&vrp->oplock));
2714 
2715 	vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
2716 	vr_phy_read(vrp, MII_CONTROL, &vrp->chip.mii.control);
2717 	vr_phy_read(vrp, MII_AN_ADVERT, &vrp->chip.mii.anadv);
2718 	vr_phy_read(vrp, MII_AN_LPABLE, &vrp->chip.mii.lpable);
2719 	vr_phy_read(vrp, MII_AN_EXPANSION, &vrp->chip.mii.anexp);
2720 
2721 	/*
2722 	 * If we did autongeg, deduce the link type/speed by selecting the
2723 	 * highest common denominator.
2724 	 */
2725 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2726 		mask = vrp->chip.mii.anadv & vrp->chip.mii.lpable;
2727 		if ((mask & MII_ABILITY_100BASE_TX_FD) != 0) {
2728 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2729 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2730 			vrp->chip.link.mau = VR_MAU_100X;
2731 		} else if ((mask & MII_ABILITY_100BASE_T4) != 0) {
2732 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2733 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2734 			vrp->chip.link.mau = VR_MAU_100T4;
2735 		} else if ((mask & MII_ABILITY_100BASE_TX) != 0) {
2736 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2737 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2738 			vrp->chip.link.mau = VR_MAU_100X;
2739 		} else if ((mask & MII_ABILITY_10BASE_T_FD) != 0) {
2740 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2741 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2742 			vrp->chip.link.mau = VR_MAU_10;
2743 		} else if ((mask & MII_ABILITY_10BASE_T) != 0) {
2744 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2745 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2746 			vrp->chip.link.mau = VR_MAU_10;
2747 		} else {
2748 			vrp->chip.link.speed = VR_LINK_SPEED_UNKNOWN;
2749 			vrp->chip.link.duplex = VR_LINK_DUPLEX_UNKNOWN;
2750 			vrp->chip.link.mau = VR_MAU_UNKNOWN;
2751 		}
2752 
2753 		/*
2754 		 * Did we negotiate pause?
2755 		 */
2756 		if ((mask & MII_ABILITY_PAUSE) != 0 &&
2757 		    vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL)
2758 			vrp->chip.link.flowctrl = VR_PAUSE_BIDIRECTIONAL;
2759 		else
2760 			vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2761 
2762 		/*
2763 		 * Did either one detect a AN fault?
2764 		 */
2765 		if ((vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0)
2766 			vr_log(vrp, CE_WARN,
2767 			    "AN remote fault reported by LP.");
2768 
2769 		if ((vrp->chip.mii.lpable & MII_AN_ADVERT_REMFAULT) != 0)
2770 			vr_log(vrp, CE_WARN, "AN remote fault caused for LP.");
2771 	} else {
2772 		/*
2773 		 * We didn't autoneg
2774 		 * The link type is defined by the control register.
2775 		 */
2776 		if ((vrp->chip.mii.control & MII_CONTROL_100MB) != 0) {
2777 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2778 			vrp->chip.link.mau = VR_MAU_100X;
2779 		} else {
2780 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2781 			vrp->chip.link.mau = VR_MAU_10;
2782 		}
2783 
2784 		if ((vrp->chip.mii.control & MII_CONTROL_FDUPLEX) != 0)
2785 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2786 		else {
2787 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2788 			/*
2789 			 * No pause on HDX links.
2790 			 */
2791 			vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2792 		}
2793 	}
2794 
2795 	/*
2796 	 * Set the duplex mode on the MAC according to that of the PHY.
2797 	 */
2798 	if (vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL) {
2799 		VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2800 		/*
2801 		 * Enable packet queueing on FDX links.
2802 		 */
2803 		if ((vrp->chip.info.bugs & VR_BUG_NO_TXQUEUEING) == 0)
2804 			VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2805 	} else {
2806 		VR_CLRBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2807 		/*
2808 		 * Disable packet queueing on HDX links. With queueing enabled,
2809 		 * this MAC get's lost after a TX abort (too many colisions).
2810 		 */
2811 		VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2812 	}
2813 
2814 	/*
2815 	 * Set pause options on the MAC.
2816 	 */
2817 	if (vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
2818 		/*
2819 		 * All of our MAC's can receive pause frames.
2820 		 */
2821 		VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXRFEN);
2822 
2823 		/*
2824 		 * VT6105 and above can transmit pause frames.
2825 		 */
2826 		if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2827 			/*
2828 			 * Set the number of available receive descriptors
2829 			 * Non-zero values written to this register are added
2830 			 * to the register's contents. Careful: Writing zero
2831 			 * clears the register and thus causes a (long) pause
2832 			 * request.
2833 			 */
2834 			VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT,
2835 			    MIN(vrp->rx.ndesc, 0xFF) -
2836 			    VR_GET8(vrp->acc_reg,
2837 			    VR_FCR0_RXBUFCOUNT));
2838 
2839 			/*
2840 			 * Request pause when we have 4 descs left.
2841 			 */
2842 			VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2843 			    VR_FCR1_PAUSEONBITS, VR_FCR1_PAUSEON_04);
2844 
2845 			/*
2846 			 * Cancel the pause when there are 24 descriptors again.
2847 			 */
2848 			VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2849 			    VR_FCR1_PAUSEOFFBITS, VR_FCR1_PAUSEOFF_24);
2850 
2851 			/*
2852 			 * Request a pause of FFFF bit-times. This long pause
2853 			 * is cancelled when the high watermark is reached.
2854 			 */
2855 			VR_PUT16(vrp->acc_reg, VR_FCR2_PAUSE, 0xFFFF);
2856 
2857 			/*
2858 			 * Enable flow control on the MAC.
2859 			 */
2860 			VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXTFEN);
2861 			VR_SETBIT8(vrp->acc_reg, VR_FCR1, VR_FCR1_FD_RX_EN |
2862 			    VR_FCR1_FD_TX_EN | VR_FCR1_XONXOFF_EN);
2863 		}
2864 	} else {
2865 		/*
2866 		 * Turn flow control OFF.
2867 		 */
2868 		VR_CLRBIT8(vrp->acc_reg,
2869 		    VR_MISC0, VR_MISC0_FDXRFEN | VR_MISC0_FDXTFEN);
2870 		if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2871 			VR_CLRBIT8(vrp->acc_reg, VR_FCR1,
2872 			    VR_FCR1_FD_RX_EN | VR_FCR1_FD_TX_EN |
2873 			    VR_FCR1_XONXOFF_EN);
2874 		}
2875 	}
2876 
2877 	/*
2878 	 * Set link state.
2879 	 */
2880 	if ((vrp->chip.mii.status & MII_STATUS_LINKUP) != 0)
2881 		vrp->chip.link.state = VR_LINK_STATE_UP;
2882 	else
2883 		vrp->chip.link.state = VR_LINK_STATE_DOWN;
2884 }
2885 
2886 /*
2887  * The PHY is automatically polled by the MAC once per 1024 MD clock cycles
2888  * MD is clocked once per 960ns so polling happens about every 1M ns, some
2889  * 1000 times per second
2890  * This polling process is required for the functionality of the link change
2891  * interrupt. Polling process must be disabled in order to access PHY registers
2892  * using MDIO
2893  *
2894  * Turn off PHY polling so that the PHY registers can be accessed.
2895  */
2896 static void
2897 vr_phy_autopoll_disable(vr_t *vrp)
2898 {
2899 	uint32_t	time;
2900 	uint8_t		miicmd, miiaddr;
2901 
2902 	/*
2903 	 * Special procedure to stop the autopolling.
2904 	 */
2905 	if ((vrp->chip.info.bugs & VR_BUG_MIIPOLLSTOP) != 0) {
2906 		/*
2907 		 * If polling is enabled.
2908 		 */
2909 		miicmd = VR_GET8(vrp->acc_reg, VR_MIICMD);
2910 		if ((miicmd & VR_MIICMD_MD_AUTO) != 0) {
2911 			/*
2912 			 * Wait for the end of a cycle (mdone set).
2913 			 */
2914 			time = 0;
2915 			do {
2916 				drv_usecwait(10);
2917 				if (time >= VR_MMI_WAITMAX) {
2918 					vr_log(vrp, CE_WARN,
2919 					    "Timeout in "
2920 					    "disable MII polling");
2921 					break;
2922 				}
2923 				time += VR_MMI_WAITINCR;
2924 				miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2925 			} while ((miiaddr & VR_MIIADDR_MDONE) == 0);
2926 		}
2927 		/*
2928 		 * Once paused, we can disable autopolling.
2929 		 */
2930 		VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2931 	} else {
2932 		/*
2933 		 * Turn off MII polling.
2934 		 */
2935 		VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2936 
2937 		/*
2938 		 * Wait for MIDLE in MII address register.
2939 		 */
2940 		time = 0;
2941 		do {
2942 			drv_usecwait(VR_MMI_WAITINCR);
2943 			if (time >= VR_MMI_WAITMAX) {
2944 				vr_log(vrp, CE_WARN,
2945 				    "Timeout in disable MII polling");
2946 				break;
2947 			}
2948 			time += VR_MMI_WAITINCR;
2949 			miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2950 		} while ((miiaddr & VR_MIIADDR_MIDLE) == 0);
2951 	}
2952 }
2953 
2954 /*
2955  * Turn on PHY polling. PHY's registers cannot be accessed.
2956  */
2957 static void
2958 vr_phy_autopoll_enable(vr_t *vrp)
2959 {
2960 	uint32_t	time;
2961 
2962 	VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2963 	VR_PUT8(vrp->acc_reg, VR_MIIADDR, MII_STATUS|VR_MIIADDR_MAUTO);
2964 	VR_PUT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_AUTO);
2965 
2966 	/*
2967 	 * Wait for the polling process to finish.
2968 	 */
2969 	time = 0;
2970 	do {
2971 		drv_usecwait(VR_MMI_WAITINCR);
2972 		if (time >= VR_MMI_WAITMAX) {
2973 			vr_log(vrp, CE_NOTE, "Timeout in enable MII polling");
2974 			break;
2975 		}
2976 		time += VR_MMI_WAITINCR;
2977 	} while ((VR_GET8(vrp->acc_reg, VR_MIIADDR) & VR_MIIADDR_MDONE) == 0);
2978 
2979 	/*
2980 	 * Initiate a polling.
2981 	 */
2982 	VR_SETBIT8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_MAUTO);
2983 }
2984 
2985 /*
2986  * Read a register from the PHY using MDIO.
2987  */
2988 static void
2989 vr_phy_read(vr_t *vrp, int offset, uint16_t *value)
2990 {
2991 	uint32_t	time;
2992 
2993 	vr_phy_autopoll_disable(vrp);
2994 
2995 	/*
2996 	 * Write the register number to the lower 5 bits of the MII address
2997 	 * register.
2998 	 */
2999 	VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3000 
3001 	/*
3002 	 * Write a READ command to the MII control register
3003 	 * This bit will be cleared when the read is finished.
3004 	 */
3005 	VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_READ);
3006 
3007 	/*
3008 	 * Wait until the read is done.
3009 	 */
3010 	time = 0;
3011 	do {
3012 		drv_usecwait(VR_MMI_WAITINCR);
3013 		if (time >= VR_MMI_WAITMAX) {
3014 			vr_log(vrp, CE_NOTE, "Timeout in MII read command");
3015 			break;
3016 		}
3017 		time += VR_MMI_WAITINCR;
3018 	} while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_READ) != 0);
3019 
3020 	*value = VR_GET16(vrp->acc_reg, VR_MIIDATA);
3021 	vr_phy_autopoll_enable(vrp);
3022 }
3023 
3024 /*
3025  * Write to a PHY's register.
3026  */
3027 static void
3028 vr_phy_write(vr_t *vrp, int offset, uint16_t value)
3029 {
3030 	uint32_t	time;
3031 
3032 	vr_phy_autopoll_disable(vrp);
3033 
3034 	/*
3035 	 * Write the register number to the MII address register.
3036 	 */
3037 	VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3038 
3039 	/*
3040 	 * Write the value to the data register.
3041 	 */
3042 	VR_PUT16(vrp->acc_reg, VR_MIIDATA, value);
3043 
3044 	/*
3045 	 * Issue the WRITE command to the command register.
3046 	 * This bit will be cleared when the write is finished.
3047 	 */
3048 	VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_WRITE);
3049 
3050 	time = 0;
3051 	do {
3052 		drv_usecwait(VR_MMI_WAITINCR);
3053 		if (time >= VR_MMI_WAITMAX) {
3054 			vr_log(vrp, CE_NOTE, "Timeout in MII write command");
3055 			break;
3056 		}
3057 		time += VR_MMI_WAITINCR;
3058 	} while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_WRITE) != 0);
3059 	vr_phy_autopoll_enable(vrp);
3060 }
3061 
3062 /*
3063  * Initialize and install some private kstats.
3064  */
3065 typedef struct {
3066 	char		*name;
3067 	uchar_t		type;
3068 } vr_kstat_t;
3069 
3070 static const vr_kstat_t vr_driver_stats [] = {
3071 	{"allocbfail",		KSTAT_DATA_INT32},
3072 	{"intr_claimed",	KSTAT_DATA_INT64},
3073 	{"intr_unclaimed",	KSTAT_DATA_INT64},
3074 	{"linkchanges",		KSTAT_DATA_INT64},
3075 	{"txnfree",		KSTAT_DATA_INT32},
3076 	{"txstalls",		KSTAT_DATA_INT32},
3077 	{"resets",		KSTAT_DATA_INT32},
3078 	{"txreclaims",		KSTAT_DATA_INT64},
3079 	{"txreclaim0",		KSTAT_DATA_INT64},
3080 	{"cyclics",		KSTAT_DATA_INT64},
3081 	{"txchecks",		KSTAT_DATA_INT64},
3082 };
3083 
3084 static void
3085 vr_kstats_init(vr_t *vrp)
3086 {
3087 	kstat_t			*ksp;
3088 	struct	kstat_named	*knp;
3089 	int			i;
3090 	int			nstats;
3091 
3092 	nstats = sizeof (vr_driver_stats) / sizeof (vr_kstat_t);
3093 
3094 	ksp = kstat_create(MODULENAME, ddi_get_instance(vrp->devinfo),
3095 	    "driver", "net", KSTAT_TYPE_NAMED, nstats, 0);
3096 
3097 	if (ksp == NULL)
3098 		vr_log(vrp, CE_WARN, "kstat_create failed");
3099 
3100 	ksp->ks_update = vr_update_kstats;
3101 	ksp->ks_private = (void*) vrp;
3102 	knp = ksp->ks_data;
3103 
3104 	for (i = 0; i < nstats; i++, knp++) {
3105 		kstat_named_init(knp, vr_driver_stats[i].name,
3106 		    vr_driver_stats[i].type);
3107 	}
3108 	kstat_install(ksp);
3109 	vrp->ksp = ksp;
3110 }
3111 
3112 static int
3113 vr_update_kstats(kstat_t *ksp, int access)
3114 {
3115 	vr_t			*vrp;
3116 	struct kstat_named	*knp;
3117 
3118 	vrp = (vr_t *)ksp->ks_private;
3119 	knp = ksp->ks_data;
3120 
3121 	if (access != KSTAT_READ)
3122 		return (EACCES);
3123 
3124 	(knp++)->value.ui32 = vrp->stats.allocbfail;
3125 	(knp++)->value.ui64 = vrp->stats.intr_claimed;
3126 	(knp++)->value.ui64 = vrp->stats.intr_unclaimed;
3127 	(knp++)->value.ui64 = vrp->stats.linkchanges;
3128 	(knp++)->value.ui32 = vrp->tx.nfree;
3129 	(knp++)->value.ui32 = vrp->stats.txstalls;
3130 	(knp++)->value.ui32 = vrp->stats.resets;
3131 	(knp++)->value.ui64 = vrp->stats.txreclaims;
3132 	(knp++)->value.ui64 = vrp->stats.txreclaim0;
3133 	(knp++)->value.ui64 = vrp->stats.cyclics;
3134 	(knp++)->value.ui64 = vrp->stats.txchecks;
3135 	return (0);
3136 }
3137 
3138 /*
3139  * Remove 'private' kstats.
3140  */
3141 static void
3142 vr_remove_kstats(vr_t *vrp)
3143 {
3144 	if (vrp->ksp != NULL)
3145 		kstat_delete(vrp->ksp);
3146 }
3147 
3148 /*
3149  * Get a property of the device/driver
3150  * Remarks:
3151  * - pr_val is always an integer of size pr_valsize
3152  * - ENABLED (EN) is what is configured via dladm
3153  * - ADVERTISED (ADV) is ENABLED minus constraints, like PHY/MAC capabilities
3154  * - DEFAULT are driver- and hardware defaults (DEFAULT is implemented as a
3155  *   flag in pr_flags instead of MAC_PROP_DEFAULT_)
3156  * - perm is the permission printed on ndd -get /.. \?
3157  */
3158 int
3159 vr_mac_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3160     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3161 {
3162 	vr_t		*vrp;
3163 	uint32_t	err;
3164 	uint64_t	val;
3165 
3166 	/* Since we have no private properties */
3167 	_NOTE(ARGUNUSED(pr_name))
3168 
3169 	err = 0;
3170 	vrp = (vr_t *)arg;
3171 	if ((pr_flags & MAC_PROP_DEFAULT) != 0) {
3172 		/*
3173 		 * Defaults depend on the PHY/MAC's capabilities
3174 		 * All defaults are read/write, otherwise reset-linkprop fails
3175 		 * with enotsup ....
3176 		 */
3177 		*perm = MAC_PROP_PERM_RW;
3178 		switch (pr_num) {
3179 			case MAC_PROP_ADV_1000FDX_CAP:
3180 			case MAC_PROP_EN_1000FDX_CAP:
3181 			case MAC_PROP_ADV_1000HDX_CAP:
3182 			case MAC_PROP_EN_1000HDX_CAP:
3183 				val = 0;
3184 				break;
3185 
3186 			case MAC_PROP_ADV_100FDX_CAP:
3187 			case MAC_PROP_EN_100FDX_CAP:
3188 				val = (vrp->chip.mii.status &
3189 				    MII_STATUS_100_BASEX_FD) != 0;
3190 				break;
3191 
3192 			case MAC_PROP_ADV_100HDX_CAP:
3193 			case MAC_PROP_EN_100HDX_CAP:
3194 				val = (vrp->chip.mii.status &
3195 				    MII_STATUS_100_BASEX) != 0;
3196 				break;
3197 
3198 			case MAC_PROP_ADV_100T4_CAP:
3199 			case MAC_PROP_EN_100T4_CAP:
3200 				val = (vrp->chip.mii.status &
3201 				    MII_STATUS_100_BASE_T4) != 0;
3202 				break;
3203 
3204 			case MAC_PROP_ADV_10FDX_CAP:
3205 			case MAC_PROP_EN_10FDX_CAP:
3206 				val = (vrp->chip.mii.status &
3207 				    MII_STATUS_10_FD) != 0;
3208 				break;
3209 
3210 			case MAC_PROP_ADV_10HDX_CAP:
3211 			case MAC_PROP_EN_10HDX_CAP:
3212 				val = (vrp->chip.mii.status &
3213 				    MII_STATUS_10) != 0;
3214 				break;
3215 
3216 			case MAC_PROP_AUTONEG:
3217 			case MAC_PROP_EN_AUTONEG:
3218 				val = (vrp->chip.mii.status &
3219 				    MII_STATUS_CANAUTONEG) != 0;
3220 				break;
3221 
3222 			case MAC_PROP_DUPLEX:
3223 				val = VR_LINK_DUPLEX_FULL;
3224 				break;
3225 
3226 			case MAC_PROP_FLOWCTRL:
3227 				val = VR_PAUSE_BIDIRECTIONAL;
3228 				break;
3229 
3230 			case MAC_PROP_MTU:
3231 				val = ETHERMTU;
3232 				break;
3233 
3234 			case MAC_PROP_SPEED:
3235 				val = 100 * 1000 * 1000;
3236 				break;
3237 
3238 			case MAC_PROP_STATUS:
3239 				val = VR_LINK_STATE_UP;
3240 				break;
3241 
3242 			default:
3243 				return (ENOTSUP);
3244 		}
3245 	} else {
3246 		switch (pr_num) {
3247 			case MAC_PROP_ADV_1000FDX_CAP:
3248 			case MAC_PROP_ADV_1000HDX_CAP:
3249 				val = 0;
3250 				*perm = MAC_PROP_PERM_READ;
3251 				break;
3252 
3253 			case MAC_PROP_EN_1000FDX_CAP:
3254 			case MAC_PROP_EN_1000HDX_CAP:
3255 				*perm = MAC_PROP_PERM_READ;
3256 				val = 0;
3257 				break;
3258 
3259 			case MAC_PROP_ADV_100FDX_CAP:
3260 				*perm = MAC_PROP_PERM_READ;
3261 				val = (vrp->chip.mii.anadv &
3262 				    MII_ABILITY_100BASE_TX_FD) != 0;
3263 				break;
3264 
3265 			case MAC_PROP_ADV_100HDX_CAP:
3266 				*perm = MAC_PROP_PERM_READ;
3267 				val = (vrp->chip.mii.anadv &
3268 				    MII_ABILITY_100BASE_TX) != 0;
3269 				break;
3270 
3271 			case MAC_PROP_ADV_100T4_CAP:
3272 				*perm = MAC_PROP_PERM_READ;
3273 				val = (vrp->chip.mii.anadv &
3274 				    MII_ABILITY_100BASE_T4) != 0;
3275 				break;
3276 
3277 			case MAC_PROP_ADV_10FDX_CAP:
3278 				*perm = MAC_PROP_PERM_READ;
3279 				val = (vrp->chip.mii.anadv &
3280 				    MII_ABILITY_10BASE_T_FD) != 0;
3281 				break;
3282 
3283 			case MAC_PROP_ADV_10HDX_CAP:
3284 				*perm = MAC_PROP_PERM_READ;
3285 				val = (vrp->chip.mii.anadv &
3286 				    MII_ABILITY_10BASE_T) != 0;
3287 				break;
3288 
3289 			case MAC_PROP_AUTONEG:
3290 				*perm = MAC_PROP_PERM_RW;
3291 				val = (vrp->chip.mii.control &
3292 				    MII_CONTROL_ANE) != 0;
3293 				break;
3294 
3295 			case MAC_PROP_DUPLEX:
3296 				/*
3297 				 * Writability depends on autoneg.
3298 				 */
3299 				if ((vrp->chip.mii.control &
3300 				    MII_CONTROL_ANE) == 0)
3301 					*perm = MAC_PROP_PERM_RW;
3302 				else
3303 					*perm = MAC_PROP_PERM_READ;
3304 				val = vrp->chip.link.duplex;
3305 				break;
3306 
3307 			case MAC_PROP_EN_100FDX_CAP:
3308 				*perm = MAC_PROP_PERM_RW;
3309 				val = (vrp->param.anadv_en &
3310 				    MII_ABILITY_100BASE_TX_FD) != 0;
3311 				break;
3312 
3313 			case MAC_PROP_EN_100HDX_CAP:
3314 				*perm = MAC_PROP_PERM_RW;
3315 				val = (vrp->param.anadv_en &
3316 				    MII_ABILITY_100BASE_TX) != 0;
3317 				break;
3318 
3319 			case MAC_PROP_EN_100T4_CAP:
3320 				*perm = MAC_PROP_PERM_READ;
3321 				val = (vrp->param.anadv_en &
3322 				    MII_ABILITY_100BASE_T4) != 0;
3323 				break;
3324 
3325 			case MAC_PROP_EN_10FDX_CAP:
3326 				*perm = MAC_PROP_PERM_RW;
3327 				val = (vrp->param.anadv_en &
3328 				    MII_ABILITY_10BASE_T_FD) != 0;
3329 				break;
3330 
3331 			case MAC_PROP_EN_10HDX_CAP:
3332 				*perm = MAC_PROP_PERM_RW;
3333 				val = (vrp->param.anadv_en &
3334 				    MII_ABILITY_10BASE_T) != 0;
3335 				break;
3336 
3337 			case MAC_PROP_EN_AUTONEG:
3338 				*perm = MAC_PROP_PERM_RW;
3339 				val = vrp->param.an_en == VR_LINK_AUTONEG_ON;
3340 				break;
3341 
3342 			case MAC_PROP_FLOWCTRL:
3343 				*perm = MAC_PROP_PERM_RW;
3344 				val = vrp->chip.link.flowctrl;
3345 				break;
3346 
3347 			case MAC_PROP_MTU:
3348 				*perm = MAC_PROP_PERM_RW;
3349 				val = vrp->param.mtu;
3350 				break;
3351 
3352 			case MAC_PROP_SPEED:
3353 				/*
3354 				 * Writability depends on autoneg.
3355 				 */
3356 				if ((vrp->chip.mii.control &
3357 				    MII_CONTROL_ANE) == 0)
3358 					*perm = MAC_PROP_PERM_RW;
3359 				else
3360 					*perm = MAC_PROP_PERM_READ;
3361 				if (vrp->chip.link.speed ==
3362 				    VR_LINK_SPEED_100MBS)
3363 					val = 100 * 1000 * 1000;
3364 				else if (vrp->chip.link.speed ==
3365 				    VR_LINK_SPEED_10MBS)
3366 					val = 10 * 1000 * 1000;
3367 				else
3368 					val = 0;
3369 				break;
3370 
3371 			case MAC_PROP_STATUS:
3372 				val = vrp->chip.link.state;
3373 				break;
3374 
3375 			default:
3376 				err = ENOTSUP;
3377 				break;
3378 		}
3379 	}
3380 	if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3381 		if (pr_valsize == sizeof (uint64_t))
3382 			*(uint64_t *)pr_val = val;
3383 		else if (pr_valsize == sizeof (uint32_t))
3384 			*(uint32_t *)pr_val = val;
3385 		else if (pr_valsize == sizeof (uint16_t))
3386 			*(uint16_t *)pr_val = val;
3387 		else if (pr_valsize == sizeof (uint8_t))
3388 			*(uint8_t *)pr_val = val;
3389 		else
3390 			err = EINVAL;
3391 	}
3392 	return (err);
3393 }
3394 
3395 /*
3396  * Set a property of the device.
3397  */
3398 int
3399 vr_mac_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3400 	uint_t pr_valsize, const void *pr_val)
3401 {
3402 	vr_t		*vrp;
3403 	uint32_t	err;
3404 	uint64_t	val;
3405 
3406 	/* Since we have no private properties */
3407 	_NOTE(ARGUNUSED(pr_name))
3408 
3409 	err = 0;
3410 	vrp = (vr_t *)arg;
3411 	mutex_enter(&vrp->oplock);
3412 
3413 	/*
3414 	 * The current set of public property values are passed as integers
3415 	 * Private properties are passed as strings in pr_val length pr_valsize.
3416 	 */
3417 	if (pr_num != MAC_PROP_PRIVATE) {
3418 		if (pr_valsize == sizeof (uint64_t))
3419 			val = *(uint64_t *)pr_val;
3420 		else if (pr_valsize == sizeof (uint32_t))
3421 			val = *(uint32_t *)pr_val;
3422 		else if (pr_valsize == sizeof (uint16_t))
3423 			val = *(uint32_t *)pr_val;
3424 		else if (pr_valsize == sizeof (uint8_t))
3425 			val = *(uint8_t *)pr_val;
3426 		else {
3427 			mutex_exit(&vrp->oplock);
3428 			return (EINVAL);
3429 		}
3430 	}
3431 
3432 	switch (pr_num) {
3433 		case MAC_PROP_DUPLEX:
3434 			if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
3435 				if (val == LINK_DUPLEX_FULL)
3436 					vrp->chip.mii.control |=
3437 					    MII_CONTROL_FDUPLEX;
3438 				else if (val == LINK_DUPLEX_HALF)
3439 					vrp->chip.mii.control &=
3440 					    ~MII_CONTROL_FDUPLEX;
3441 				else
3442 					err = EINVAL;
3443 			} else
3444 				err = EINVAL;
3445 			break;
3446 
3447 		case MAC_PROP_EN_100FDX_CAP:
3448 			if (val == 0)
3449 				vrp->param.anadv_en &=
3450 				    ~MII_ABILITY_100BASE_TX_FD;
3451 			else
3452 				vrp->param.anadv_en |=
3453 				    MII_ABILITY_100BASE_TX_FD;
3454 			break;
3455 
3456 		case MAC_PROP_EN_100HDX_CAP:
3457 			if (val == 0)
3458 				vrp->param.anadv_en &=
3459 				    ~MII_ABILITY_100BASE_TX;
3460 			else
3461 				vrp->param.anadv_en |=
3462 				    MII_ABILITY_100BASE_TX;
3463 			break;
3464 
3465 		case MAC_PROP_EN_100T4_CAP:
3466 			if (val == 0)
3467 				vrp->param.anadv_en &=
3468 				    ~MII_ABILITY_100BASE_T4;
3469 			else
3470 				vrp->param.anadv_en |=
3471 				    MII_ABILITY_100BASE_T4;
3472 			break;
3473 
3474 		case MAC_PROP_EN_10FDX_CAP:
3475 			if (val == 0)
3476 				vrp->param.anadv_en &=
3477 				    ~MII_ABILITY_10BASE_T_FD;
3478 			else
3479 				vrp->param.anadv_en |=
3480 				    MII_ABILITY_10BASE_T_FD;
3481 			break;
3482 
3483 		case MAC_PROP_EN_10HDX_CAP:
3484 			if (val == 0)
3485 				vrp->param.anadv_en &=
3486 				    ~MII_ABILITY_10BASE_T;
3487 			else
3488 				vrp->param.anadv_en |=
3489 				    MII_ABILITY_10BASE_T;
3490 			break;
3491 
3492 		case MAC_PROP_AUTONEG:
3493 		case MAC_PROP_EN_AUTONEG:
3494 			if (val == 0) {
3495 				vrp->param.an_en = VR_LINK_AUTONEG_OFF;
3496 				vrp->chip.mii.control &= ~MII_CONTROL_ANE;
3497 			} else {
3498 				vrp->param.an_en = VR_LINK_AUTONEG_ON;
3499 				if ((vrp->chip.mii.status &
3500 				    MII_STATUS_CANAUTONEG) != 0)
3501 					vrp->chip.mii.control |=
3502 					    MII_CONTROL_ANE;
3503 				else
3504 					err = EINVAL;
3505 			}
3506 			break;
3507 
3508 		case MAC_PROP_FLOWCTRL:
3509 			if (val == LINK_FLOWCTRL_NONE)
3510 				vrp->param.anadv_en &= ~MII_ABILITY_PAUSE;
3511 			else if (val == LINK_FLOWCTRL_BI)
3512 				vrp->param.anadv_en |= MII_ABILITY_PAUSE;
3513 			else
3514 				err = EINVAL;
3515 			break;
3516 
3517 		case MAC_PROP_MTU:
3518 			if (val >= ETHERMIN && val <= ETHERMTU)
3519 				vrp->param.mtu = (uint32_t)val;
3520 			else
3521 				err = EINVAL;
3522 			break;
3523 
3524 		case MAC_PROP_SPEED:
3525 			if (val == 10 * 1000 * 1000)
3526 				vrp->chip.link.speed =
3527 				    VR_LINK_SPEED_10MBS;
3528 			else if (val == 100 * 1000 * 1000)
3529 				vrp->chip.link.speed =
3530 				    VR_LINK_SPEED_100MBS;
3531 			else
3532 				err = EINVAL;
3533 			break;
3534 
3535 		default:
3536 			err = ENOTSUP;
3537 			break;
3538 	}
3539 	if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3540 		vrp->chip.mii.anadv = vrp->param.anadv_en &
3541 		    (vrp->param.an_phymask & vrp->param.an_macmask);
3542 		vr_link_init(vrp);
3543 	}
3544 	mutex_exit(&vrp->oplock);
3545 	return (err);
3546 }
3547 
3548 
3549 /*
3550  * Logging and debug functions.
3551  */
3552 static struct {
3553 	kmutex_t mutex[1];
3554 	const char *ifname;
3555 	const char *fmt;
3556 	int level;
3557 } prtdata;
3558 
3559 static void
3560 vr_vprt(const char *fmt, va_list args)
3561 {
3562 	char buf[512];
3563 
3564 	ASSERT(mutex_owned(prtdata.mutex));
3565 	(void) vsnprintf(buf, sizeof (buf), fmt, args);
3566 	cmn_err(prtdata.level, prtdata.fmt, prtdata.ifname, buf);
3567 }
3568 
3569 static void
3570 vr_log(vr_t *vrp, int level, const char *fmt, ...)
3571 {
3572 	va_list args;
3573 
3574 	mutex_enter(prtdata.mutex);
3575 	prtdata.ifname = vrp->ifname;
3576 	prtdata.fmt = "!%s: %s";
3577 	prtdata.level = level;
3578 
3579 	va_start(args, fmt);
3580 	vr_vprt(fmt, args);
3581 	va_end(args);
3582 
3583 	mutex_exit(prtdata.mutex);
3584 }
3585 
3586 #if defined(DEBUG)
3587 static void
3588 vr_prt(const char *fmt, ...)
3589 {
3590 	va_list args;
3591 
3592 	ASSERT(mutex_owned(prtdata.mutex));
3593 
3594 	va_start(args, fmt);
3595 	vr_vprt(fmt, args);
3596 	va_end(args);
3597 
3598 	mutex_exit(prtdata.mutex);
3599 }
3600 
3601 void
3602 (*vr_debug())(const char *fmt, ...)
3603 {
3604 	mutex_enter(prtdata.mutex);
3605 	prtdata.ifname = MODULENAME;
3606 	prtdata.fmt = "^%s: %s\n";
3607 	prtdata.level = CE_CONT;
3608 
3609 	return (vr_prt);
3610 }
3611 #endif	/* DEBUG */
3612 
3613 DDI_DEFINE_STREAM_OPS(vr_dev_ops, nulldev, nulldev, vr_attach, vr_detach,
3614 nodev, NULL, D_MP, NULL, vr_quiesce);
3615 
3616 static struct modldrv vr_modldrv = {
3617 	&mod_driverops,		/* Type of module. This one is a driver */
3618 	vr_ident,		/* short description */
3619 	&vr_dev_ops		/* driver specific ops */
3620 };
3621 
3622 static struct modlinkage modlinkage = {
3623 	MODREV_1, (void *)&vr_modldrv, NULL
3624 };
3625 
3626 int
3627 _info(struct modinfo *modinfop)
3628 {
3629 	return (mod_info(&modlinkage, modinfop));
3630 }
3631 
3632 int
3633 _init(void)
3634 {
3635 	int	status;
3636 
3637 	mac_init_ops(&vr_dev_ops, MODULENAME);
3638 	status = mod_install(&modlinkage);
3639 	if (status == DDI_SUCCESS)
3640 		mutex_init(prtdata.mutex, NULL, MUTEX_DRIVER, NULL);
3641 	else
3642 		mac_fini_ops(&vr_dev_ops);
3643 	return (status);
3644 }
3645 
3646 int
3647 _fini(void)
3648 {
3649 	int status;
3650 
3651 	status = mod_remove(&modlinkage);
3652 	if (status == 0) {
3653 		mac_fini_ops(&vr_dev_ops);
3654 		mutex_destroy(prtdata.mutex);
3655 	}
3656 	return (status);
3657 }
3658