xref: /illumos-gate/usr/src/uts/common/io/vr/vr.c (revision 6d02032db7b674f185405d42cc8bf10a46a9ab3a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/stream.h>
29 #include <sys/strsun.h>
30 #include <sys/stat.h>
31 #include <sys/pci.h>
32 #include <sys/modctl.h>
33 #include <sys/kstat.h>
34 #include <sys/ethernet.h>
35 #include <sys/devops.h>
36 #include <sys/debug.h>
37 #include <sys/conf.h>
38 #include <sys/mac.h>
39 #include <sys/mac_provider.h>
40 #include <sys/mac_ether.h>
41 #include <sys/sysmacros.h>
42 #include <sys/dditypes.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/miiregs.h>
46 #include <sys/byteorder.h>
47 #include <sys/note.h>
48 #include <sys/vlan.h>
49 
50 #include "vr.h"
51 #include "vr_impl.h"
52 
53 /*
54  * VR in a nutshell
55  * The card uses two rings of data structures to communicate with the host.
56  * These are referred to as "descriptor rings" and there is one for transmit
57  * (TX) and one for receive (RX).
58  *
59  * The driver uses a "DMA buffer" data type for mapping to those descriptor
60  * rings. This is a structure with handles and a DMA'able buffer attached to it.
61  *
62  * Receive
63  * The receive ring is filled with DMA buffers. Received packets are copied into
64  * a newly allocated mblk's and passed upstream.
65  *
66  * Transmit
67  * Each transmit descriptor has a DMA buffer attached to it. The data of TX
68  * packets is copied into the DMA buffer which is then enqueued for
69  * transmission.
70  *
71  * Reclaim of transmitted packets is done as a result of a transmit completion
72  * interrupt which is generated 3 times per ring at minimum.
73  */
74 
75 #if defined(DEBUG)
76 uint32_t	vrdebug = 1;
77 #define	VR_DEBUG(args)	do {				\
78 		if (vrdebug > 0)			\
79 			(*vr_debug()) args;		\
80 			_NOTE(CONSTANTCONDITION)	\
81 		} while (0)
82 static	void	vr_prt(const char *fmt, ...);
83 	void	(*vr_debug())(const char *fmt, ...);
84 #else
85 #define	VR_DEBUG(args)	do ; _NOTE(CONSTANTCONDITION) while (0)
86 #endif
87 
88 static char vr_ident[] = "VIA Rhine Ethernet v1.42";
89 
90 /*
91  * Attributes for accessing registers and memory descriptors for this device.
92  */
93 static ddi_device_acc_attr_t vr_dev_dma_accattr = {
94 	DDI_DEVICE_ATTR_V0,
95 	DDI_STRUCTURE_LE_ACC,
96 	DDI_STRICTORDER_ACC
97 };
98 
99 /*
100  * Attributes for accessing data.
101  */
102 static ddi_device_acc_attr_t vr_data_dma_accattr = {
103 	DDI_DEVICE_ATTR_V0,
104 	DDI_NEVERSWAP_ACC,
105 	DDI_STRICTORDER_ACC
106 };
107 
108 /*
109  * DMA attributes for descriptors for communication with the device
110  * This driver assumes that all descriptors of one ring fit in one consequitive
111  * memory area of max 4K (256 descriptors) that does not cross a page boundary.
112  * Therefore, we request 4K alignement.
113  */
114 static ddi_dma_attr_t vr_dev_dma_attr = {
115 	DMA_ATTR_V0,			/* version number */
116 	0,				/* low DMA address range */
117 	0xFFFFFFFF,			/* high DMA address range */
118 	0x7FFFFFFF,			/* DMA counter register */
119 	0x1000,				/* DMA address alignment */
120 	0x7F,				/* DMA burstsizes */
121 	1,				/* min effective DMA size */
122 	0xFFFFFFFF,			/* max DMA xfer size */
123 	0xFFFFFFFF,			/* segment boundary */
124 	1,				/* s/g list length */
125 	1,				/* granularity of device */
126 	0				/* DMA transfer flags */
127 };
128 
129 /*
130  * DMA attributes for the data moved to/from the device
131  * Note that the alignement is set to 2K so hat a 1500 byte packet never
132  * crosses a page boundary and thus that a DMA transfer is not split up in
133  * multiple cookies with a 4K/8K pagesize
134  */
135 static ddi_dma_attr_t vr_data_dma_attr = {
136 	DMA_ATTR_V0,			/* version number */
137 	0,				/* low DMA address range */
138 	0xFFFFFFFF,			/* high DMA address range */
139 	0x7FFFFFFF,			/* DMA counter register */
140 	0x800,				/* DMA address alignment */
141 	0xfff,				/* DMA burstsizes */
142 	1,				/* min effective DMA size */
143 	0xFFFFFFFF,			/* max DMA xfer size */
144 	0xFFFFFFFF,			/* segment boundary */
145 	1,				/* s/g list length */
146 	1,				/* granularity of device */
147 	0				/* DMA transfer flags */
148 };
149 
150 static mac_callbacks_t vr_mac_callbacks = {
151 	MC_SETPROP|MC_GETPROP|MC_PROPINFO, /* Which callbacks are set */
152 	vr_mac_getstat,		/* Get the value of a statistic */
153 	vr_mac_start,		/* Start the device */
154 	vr_mac_stop,		/* Stop the device */
155 	vr_mac_set_promisc,	/* Enable or disable promiscuous mode */
156 	vr_mac_set_multicast,	/* Enable or disable a multicast addr */
157 	vr_mac_set_ether_addr,	/* Set the unicast MAC address */
158 	vr_mac_tx_enqueue_list,	/* Transmit a packet */
159 	NULL,
160 	NULL,			/* Process an unknown ioctl */
161 	NULL,			/* Get capability information */
162 	NULL,			/* Open the device */
163 	NULL,			/* Close the device */
164 	vr_mac_setprop,		/* Set properties of the device */
165 	vr_mac_getprop,		/* Get properties of the device */
166 	vr_mac_propinfo		/* Get properties attributes */
167 };
168 
169 /*
170  * Table with bugs and features for each incarnation of the card.
171  */
172 static const chip_info_t vr_chip_info [] = {
173 	{
174 		0x0, 0x0,
175 		"VIA Rhine Fast Ethernet",
176 		(VR_BUG_NO_MEMIO),
177 		(VR_FEATURE_NONE)
178 	},
179 	{
180 		0x04, 0x21,
181 		"VIA VT86C100A Fast Ethernet",
182 		(VR_BUG_NEEDMODE2PCEROPT | VR_BUG_NO_TXQUEUEING |
183 		    VR_BUG_NEEDMODE10T | VR_BUG_TXALIGN | VR_BUG_NO_MEMIO |
184 		    VR_BUG_MIIPOLLSTOP),
185 		(VR_FEATURE_NONE)
186 	},
187 	{
188 		0x40, 0x41,
189 		"VIA VT6102-A Rhine II Fast Ethernet",
190 		(VR_BUG_NEEDMODE2PCEROPT),
191 		(VR_FEATURE_RX_PAUSE_CAP)
192 	},
193 	{
194 		0x42, 0x7f,
195 		"VIA VT6102-C Rhine II Fast Ethernet",
196 		(VR_BUG_NEEDMODE2PCEROPT),
197 		(VR_FEATURE_RX_PAUSE_CAP)
198 	},
199 	{
200 		0x80, 0x82,
201 		"VIA VT6105-A Rhine III Fast Ethernet",
202 		(VR_BUG_NONE),
203 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
204 	},
205 	{
206 		0x83, 0x89,
207 		"VIA VT6105-B Rhine III Fast Ethernet",
208 		(VR_BUG_NONE),
209 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
210 	},
211 	{
212 		0x8a, 0x8b,
213 		"VIA VT6105-LOM Rhine III Fast Ethernet",
214 		(VR_BUG_NONE),
215 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
216 	},
217 	{
218 		0x8c, 0x8c,
219 		"VIA VT6107-A0 Rhine III Fast Ethernet",
220 		(VR_BUG_NONE),
221 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
222 	},
223 	{
224 		0x8d, 0x8f,
225 		"VIA VT6107-A1 Rhine III Fast Ethernet",
226 		(VR_BUG_NONE),
227 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
228 		    VR_FEATURE_MRDLNMULTIPLE)
229 	},
230 	{
231 		0x90, 0x93,
232 		"VIA VT6105M-A0 Rhine III Fast Ethernet Management Adapter",
233 		(VR_BUG_NONE),
234 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
235 		    VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
236 		    VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
237 		    VR_FEATURE_MIBCOUNTER)
238 	},
239 	{
240 		0x94, 0xff,
241 		"VIA VT6105M-B1 Rhine III Fast Ethernet Management Adapter",
242 		(VR_BUG_NONE),
243 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
244 		    VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
245 		    VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
246 		    VR_FEATURE_MIBCOUNTER)
247 	}
248 };
249 
250 /*
251  * Function prototypes
252  */
253 static	vr_result_t	vr_add_intr(vr_t *vrp);
254 static	void		vr_remove_intr(vr_t *vrp);
255 static	int32_t		vr_cam_index(vr_t *vrp, const uint8_t *maddr);
256 static	uint32_t	ether_crc_be(const uint8_t *address);
257 static	void		vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp);
258 static	void		vr_log(vr_t *vrp, int level, const char *fmt, ...);
259 static	int		vr_resume(dev_info_t *devinfo);
260 static	int		vr_suspend(dev_info_t *devinfo);
261 static	vr_result_t	vr_bus_config(vr_t *vrp);
262 static	void		vr_bus_unconfig(vr_t *vrp);
263 static	void		vr_reset(vr_t *vrp);
264 static	int		vr_start(vr_t *vrp);
265 static	int		vr_stop(vr_t *vrp);
266 static	vr_result_t	vr_rings_init(vr_t *vrp);
267 static	void		vr_rings_fini(vr_t *vrp);
268 static	vr_result_t	vr_alloc_ring(vr_t *vrp, vr_ring_t *r, size_t n);
269 static	void		vr_free_ring(vr_ring_t *r, size_t n);
270 static	vr_result_t	vr_rxring_init(vr_t *vrp);
271 static	void		vr_rxring_fini(vr_t *vrp);
272 static	vr_result_t	vr_txring_init(vr_t *vrp);
273 static	void		vr_txring_fini(vr_t *vrp);
274 static	vr_result_t	vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap,
275 			    uint_t flags);
276 static	void		vr_free_dmabuf(vr_data_dma_t *dmap);
277 static	void		vr_param_init(vr_t *vrp);
278 static	mblk_t		*vr_receive(vr_t *vrp);
279 static	void		vr_tx_reclaim(vr_t *vrp);
280 static	void		vr_periodic(void *p);
281 static	void		vr_error(vr_t *vrp);
282 static	void		vr_phy_read(vr_t *vrp, int offset, uint16_t *value);
283 static	void		vr_phy_write(vr_t *vrp, int offset, uint16_t value);
284 static	void		vr_phy_autopoll_disable(vr_t *vrp);
285 static	void		vr_phy_autopoll_enable(vr_t *vrp);
286 static	void		vr_link_init(vr_t *vrp);
287 static	void		vr_link_state(vr_t *vrp);
288 static	void		vr_kstats_init(vr_t *vrp);
289 static	int		vr_update_kstats(kstat_t *ksp, int access);
290 static	void		vr_remove_kstats(vr_t *vrp);
291 
292 static int
293 vr_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
294 {
295 	vr_t		*vrp;
296 	mac_register_t	*macreg;
297 
298 	if (cmd == DDI_RESUME)
299 		return (vr_resume(devinfo));
300 	else if (cmd != DDI_ATTACH)
301 		return (DDI_FAILURE);
302 
303 	/*
304 	 * Attach.
305 	 */
306 	vrp = kmem_zalloc(sizeof (vr_t), KM_SLEEP);
307 	ddi_set_driver_private(devinfo, vrp);
308 	vrp->devinfo = devinfo;
309 
310 	/*
311 	 * Store the name+instance of the module.
312 	 */
313 	(void) snprintf(vrp->ifname, sizeof (vrp->ifname), "%s%d",
314 	    MODULENAME, ddi_get_instance(devinfo));
315 
316 	/*
317 	 * Bus initialization.
318 	 */
319 	if (vr_bus_config(vrp) != VR_SUCCESS) {
320 		vr_log(vrp, CE_WARN, "vr_bus_config failed");
321 		goto fail0;
322 	}
323 
324 	/*
325 	 * Initialize default parameters.
326 	 */
327 	vr_param_init(vrp);
328 
329 	/*
330 	 * Setup the descriptor rings.
331 	 */
332 	if (vr_rings_init(vrp) != VR_SUCCESS) {
333 		vr_log(vrp, CE_WARN, "vr_rings_init failed");
334 		goto fail1;
335 	}
336 
337 	/*
338 	 * Initialize kstats.
339 	 */
340 	vr_kstats_init(vrp);
341 
342 	/*
343 	 * Add interrupt to the OS.
344 	 */
345 	if (vr_add_intr(vrp) != VR_SUCCESS) {
346 		vr_log(vrp, CE_WARN, "vr_add_intr failed in attach");
347 		goto fail3;
348 	}
349 
350 	/*
351 	 * Add mutexes.
352 	 */
353 	mutex_init(&vrp->intrlock, NULL, MUTEX_DRIVER,
354 	    DDI_INTR_PRI(vrp->intr_pri));
355 	mutex_init(&vrp->oplock, NULL, MUTEX_DRIVER, NULL);
356 	mutex_init(&vrp->tx.lock, NULL, MUTEX_DRIVER, NULL);
357 
358 	/*
359 	 * Enable interrupt.
360 	 */
361 	if (ddi_intr_enable(vrp->intr_hdl) != DDI_SUCCESS) {
362 		vr_log(vrp, CE_NOTE, "ddi_intr_enable failed");
363 		goto fail5;
364 	}
365 
366 	/*
367 	 * Register with parent, mac.
368 	 */
369 	if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
370 		vr_log(vrp, CE_WARN, "mac_alloc failed in attach");
371 		goto fail6;
372 	}
373 
374 	macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
375 	macreg->m_driver = vrp;
376 	macreg->m_dip = devinfo;
377 	macreg->m_src_addr = vrp->vendor_ether_addr;
378 	macreg->m_callbacks = &vr_mac_callbacks;
379 	macreg->m_min_sdu = 0;
380 	macreg->m_max_sdu = ETHERMTU;
381 	macreg->m_margin = VLAN_TAGSZ;
382 
383 	if (mac_register(macreg, &vrp->machdl) != 0) {
384 		vr_log(vrp, CE_WARN, "mac_register failed in attach");
385 		goto fail7;
386 	}
387 	mac_free(macreg);
388 	return (DDI_SUCCESS);
389 
390 fail7:
391 	mac_free(macreg);
392 fail6:
393 	(void) ddi_intr_disable(vrp->intr_hdl);
394 fail5:
395 	mutex_destroy(&vrp->tx.lock);
396 	mutex_destroy(&vrp->oplock);
397 	mutex_destroy(&vrp->intrlock);
398 	vr_remove_intr(vrp);
399 fail3:
400 	vr_remove_kstats(vrp);
401 fail2:
402 	vr_rings_fini(vrp);
403 fail1:
404 	vr_bus_unconfig(vrp);
405 fail0:
406 	kmem_free(vrp, sizeof (vr_t));
407 	return (DDI_FAILURE);
408 }
409 
410 static int
411 vr_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
412 {
413 	vr_t		*vrp;
414 
415 	vrp = ddi_get_driver_private(devinfo);
416 
417 	if (cmd == DDI_SUSPEND)
418 		return (vr_suspend(devinfo));
419 	else if (cmd != DDI_DETACH)
420 		return (DDI_FAILURE);
421 
422 	if (vrp->chip.state == CHIPSTATE_RUNNING)
423 		return (DDI_FAILURE);
424 
425 	/*
426 	 * Try to un-register from the MAC layer.
427 	 */
428 	if (mac_unregister(vrp->machdl) != 0)
429 		return (DDI_FAILURE);
430 
431 	(void) ddi_intr_disable(vrp->intr_hdl);
432 	vr_remove_intr(vrp);
433 	mutex_destroy(&vrp->tx.lock);
434 	mutex_destroy(&vrp->oplock);
435 	mutex_destroy(&vrp->intrlock);
436 	vr_remove_kstats(vrp);
437 	vr_rings_fini(vrp);
438 	vr_bus_unconfig(vrp);
439 	kmem_free(vrp, sizeof (vr_t));
440 	return (DDI_SUCCESS);
441 }
442 
443 /*
444  * quiesce the card for fast reboot.
445  */
446 int
447 vr_quiesce(dev_info_t *dev_info)
448 {
449 	vr_t	*vrp;
450 
451 	vrp = (vr_t *)ddi_get_driver_private(dev_info);
452 
453 	/*
454 	 * Stop interrupts.
455 	 */
456 	VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
457 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
458 
459 	/*
460 	 * Stop DMA.
461 	 */
462 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
463 	return (DDI_SUCCESS);
464 }
465 
466 /*
467  * Add an interrupt for our device to the OS.
468  */
469 static vr_result_t
470 vr_add_intr(vr_t *vrp)
471 {
472 	int	nintrs;
473 	int	rc;
474 
475 	rc = ddi_intr_alloc(vrp->devinfo, &vrp->intr_hdl,
476 	    DDI_INTR_TYPE_FIXED,	/* type */
477 	    0,			/* number */
478 	    1,			/* count */
479 	    &nintrs,		/* actualp */
480 	    DDI_INTR_ALLOC_STRICT);
481 
482 	if (rc != DDI_SUCCESS) {
483 		vr_log(vrp, CE_NOTE, "ddi_intr_alloc failed: %d", rc);
484 		return (VR_FAILURE);
485 	}
486 
487 	rc = ddi_intr_add_handler(vrp->intr_hdl, vr_intr, vrp, NULL);
488 	if (rc != DDI_SUCCESS) {
489 		vr_log(vrp, CE_NOTE, "ddi_intr_add_handler failed");
490 		if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
491 			vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
492 		return (VR_FAILURE);
493 	}
494 
495 	rc = ddi_intr_get_pri(vrp->intr_hdl, &vrp->intr_pri);
496 	if (rc != DDI_SUCCESS) {
497 		vr_log(vrp, CE_NOTE, "ddi_intr_get_pri failed");
498 		if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
499 			vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
500 
501 		if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
502 			vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
503 
504 		return (VR_FAILURE);
505 	}
506 	return (VR_SUCCESS);
507 }
508 
509 /*
510  * Remove our interrupt from the OS.
511  */
512 static void
513 vr_remove_intr(vr_t *vrp)
514 {
515 	if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
516 		vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
517 
518 	if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
519 		vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
520 }
521 
522 /*
523  * Resume operation after suspend.
524  */
525 static int
526 vr_resume(dev_info_t *devinfo)
527 {
528 	vr_t *vrp;
529 
530 	vrp = (vr_t *)ddi_get_driver_private(devinfo);
531 	mutex_enter(&vrp->oplock);
532 	if (vrp->chip.state == CHIPSTATE_SUSPENDED_RUNNING)
533 		(void) vr_start(vrp);
534 	mutex_exit(&vrp->oplock);
535 	return (DDI_SUCCESS);
536 }
537 
538 /*
539  * Suspend operation.
540  */
541 static int
542 vr_suspend(dev_info_t *devinfo)
543 {
544 	vr_t *vrp;
545 
546 	vrp = (vr_t *)ddi_get_driver_private(devinfo);
547 	mutex_enter(&vrp->oplock);
548 	if (vrp->chip.state == CHIPSTATE_RUNNING) {
549 		(void) vr_stop(vrp);
550 		vrp->chip.state = CHIPSTATE_SUSPENDED_RUNNING;
551 	}
552 	mutex_exit(&vrp->oplock);
553 	return (DDI_SUCCESS);
554 }
555 
556 /*
557  * Initial bus- and device configuration during attach(9E).
558  */
559 static vr_result_t
560 vr_bus_config(vr_t *vrp)
561 {
562 	uint32_t		addr;
563 	int			n, nsets, rc;
564 	uint_t			elem;
565 	pci_regspec_t		*regs;
566 
567 	/*
568 	 * Get the reg property which describes the various access methods.
569 	 */
570 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, vrp->devinfo,
571 	    0, "reg", (int **)&regs, &elem) != DDI_PROP_SUCCESS) {
572 		vr_log(vrp, CE_WARN, "Can't get reg property");
573 		return (VR_FAILURE);
574 	}
575 	nsets = (elem * sizeof (uint_t)) / sizeof (pci_regspec_t);
576 
577 	/*
578 	 * Setup access to all available sets.
579 	 */
580 	vrp->nsets = nsets;
581 	vrp->regset = kmem_zalloc(nsets * sizeof (vr_acc_t), KM_SLEEP);
582 	for (n = 0; n < nsets; n++) {
583 		rc = ddi_regs_map_setup(vrp->devinfo, n,
584 		    &vrp->regset[n].addr, 0, 0,
585 		    &vr_dev_dma_accattr,
586 		    &vrp->regset[n].hdl);
587 		if (rc != DDI_SUCCESS) {
588 			vr_log(vrp, CE_NOTE,
589 			    "Setup of register set %d failed", n);
590 			while (--n >= 0)
591 				ddi_regs_map_free(&vrp->regset[n].hdl);
592 			kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
593 			ddi_prop_free(regs);
594 			return (VR_FAILURE);
595 		}
596 		bcopy(&regs[n], &vrp->regset[n].reg, sizeof (pci_regspec_t));
597 	}
598 	ddi_prop_free(regs);
599 
600 	/*
601 	 * Assign type-named pointers to the register sets.
602 	 */
603 	for (n = 0; n < nsets; n++) {
604 		addr = vrp->regset[n].reg.pci_phys_hi & PCI_REG_ADDR_M;
605 		if (addr == PCI_ADDR_CONFIG && vrp->acc_cfg == NULL)
606 			vrp->acc_cfg = &vrp->regset[n];
607 		else if (addr == PCI_ADDR_IO && vrp->acc_io == NULL)
608 			vrp->acc_io = &vrp->regset[n];
609 		else if (addr == PCI_ADDR_MEM32 && vrp->acc_mem == NULL)
610 			vrp->acc_mem = &vrp->regset[n];
611 	}
612 
613 	/*
614 	 * Assure there is one of each type.
615 	 */
616 	if (vrp->acc_cfg == NULL ||
617 	    vrp->acc_io == NULL ||
618 	    vrp->acc_mem == NULL) {
619 		for (n = 0; n < nsets; n++)
620 			ddi_regs_map_free(&vrp->regset[n].hdl);
621 		kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
622 		vr_log(vrp, CE_WARN,
623 		    "Config-, I/O- and memory sets not available");
624 		return (VR_FAILURE);
625 	}
626 
627 	/*
628 	 * Store vendor/device/revision.
629 	 */
630 	vrp->chip.vendor = VR_GET16(vrp->acc_cfg, PCI_CONF_VENID);
631 	vrp->chip.device = VR_GET16(vrp->acc_cfg, PCI_CONF_DEVID);
632 	vrp->chip.revision = VR_GET16(vrp->acc_cfg, PCI_CONF_REVID);
633 
634 	/*
635 	 * Copy the matching chip_info_t structure.
636 	 */
637 	elem = sizeof (vr_chip_info) / sizeof (chip_info_t);
638 	for (n = 0; n < elem; n++) {
639 		if (vrp->chip.revision >= vr_chip_info[n].revmin &&
640 		    vrp->chip.revision <= vr_chip_info[n].revmax) {
641 			bcopy((void*)&vr_chip_info[n],
642 			    (void*)&vrp->chip.info,
643 			    sizeof (chip_info_t));
644 			break;
645 		}
646 	}
647 
648 	/*
649 	 * If we didn't find a chip_info_t for this card, copy the first
650 	 * entry of the info structures. This is a generic Rhine whith no
651 	 * bugs and no features.
652 	 */
653 	if (vrp->chip.info.name == NULL) {
654 		bcopy((void*)&vr_chip_info[0],
655 		    (void*) &vrp->chip.info,
656 		    sizeof (chip_info_t));
657 	}
658 
659 	/*
660 	 * Tell what is found.
661 	 */
662 	vr_log(vrp, CE_NOTE, "pci%d,%d,%d: %s, revision 0x%0x",
663 	    PCI_REG_BUS_G(vrp->acc_cfg->reg.pci_phys_hi),
664 	    PCI_REG_DEV_G(vrp->acc_cfg->reg.pci_phys_hi),
665 	    PCI_REG_FUNC_G(vrp->acc_cfg->reg.pci_phys_hi),
666 	    vrp->chip.info.name,
667 	    vrp->chip.revision);
668 
669 	/*
670 	 * Assure that the device is prepared for memory space accesses
671 	 * This should be the default as the device advertises memory
672 	 * access in it's BAR's. However, my VT6102 on a EPIA CL board doesn't
673 	 * and thus we explicetely enable it.
674 	 */
675 	VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
676 
677 	/*
678 	 * Setup a handle for regular usage, prefer memory space accesses.
679 	 */
680 	if (vrp->acc_mem != NULL &&
681 	    (vrp->chip.info.bugs & VR_BUG_NO_MEMIO) == 0)
682 		vrp->acc_reg = vrp->acc_mem;
683 	else
684 		vrp->acc_reg = vrp->acc_io;
685 
686 	/*
687 	 * Store the vendor's MAC address.
688 	 */
689 	for (n = 0; n < ETHERADDRL; n++) {
690 		vrp->vendor_ether_addr[n] = VR_GET8(vrp->acc_reg,
691 		    VR_ETHERADDR + n);
692 	}
693 	return (VR_SUCCESS);
694 }
695 
696 static void
697 vr_bus_unconfig(vr_t *vrp)
698 {
699 	uint_t	n;
700 
701 	/*
702 	 * Free the register access handles.
703 	 */
704 	for (n = 0; n < vrp->nsets; n++)
705 		ddi_regs_map_free(&vrp->regset[n].hdl);
706 	kmem_free(vrp->regset, vrp->nsets * sizeof (vr_acc_t));
707 }
708 
709 /*
710  * Initialize parameter structures.
711  */
712 static void
713 vr_param_init(vr_t *vrp)
714 {
715 	/*
716 	 * Initialize default link configuration parameters.
717 	 */
718 	vrp->param.an_en = VR_LINK_AUTONEG_ON;
719 	vrp->param.anadv_en = 1; /* Select 802.3 autonegotiation */
720 	vrp->param.anadv_en |= MII_ABILITY_100BASE_T4;
721 	vrp->param.anadv_en |= MII_ABILITY_100BASE_TX_FD;
722 	vrp->param.anadv_en |= MII_ABILITY_100BASE_TX;
723 	vrp->param.anadv_en |= MII_ABILITY_10BASE_T_FD;
724 	vrp->param.anadv_en |= MII_ABILITY_10BASE_T;
725 	/* Not a PHY ability, but advertised on behalf of MAC */
726 	vrp->param.anadv_en |= MII_ABILITY_PAUSE;
727 	vrp->param.mtu = ETHERMTU;
728 
729 	/*
730 	 * Store the PHY identity.
731 	 */
732 	vr_phy_read(vrp, MII_PHYIDH, &vrp->chip.mii.identh);
733 	vr_phy_read(vrp, MII_PHYIDL, &vrp->chip.mii.identl);
734 
735 	/*
736 	 * Clear incapabilities imposed by PHY in phymask.
737 	 */
738 	vrp->param.an_phymask = vrp->param.anadv_en;
739 	vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
740 	if ((vrp->chip.mii.status & MII_STATUS_10) == 0)
741 		vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T;
742 
743 	if ((vrp->chip.mii.status & MII_STATUS_10_FD) == 0)
744 		vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T_FD;
745 
746 	if ((vrp->chip.mii.status & MII_STATUS_100_BASEX) == 0)
747 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX;
748 
749 	if ((vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) == 0)
750 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX_FD;
751 
752 	if ((vrp->chip.mii.status & MII_STATUS_100_BASE_T4) == 0)
753 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_T4;
754 
755 	/*
756 	 * Clear incapabilities imposed by MAC in macmask
757 	 * Note that flowcontrol (FCS?) is never masked. All of our adapters
758 	 * have the ability to honor incoming pause frames. Only the newer can
759 	 * transmit pause frames. Since there's no asym flowcontrol in 100Mbit
760 	 * Ethernet, we always advertise (symmetric) pause.
761 	 */
762 	vrp->param.an_macmask = vrp->param.anadv_en;
763 
764 	/*
765 	 * Advertised capabilities is enabled minus incapable.
766 	 */
767 	vrp->chip.mii.anadv = vrp->param.anadv_en &
768 	    (vrp->param.an_phymask & vrp->param.an_macmask);
769 
770 	/*
771 	 * Ensure that autoneg of the PHY matches our default.
772 	 */
773 	if (vrp->param.an_en == VR_LINK_AUTONEG_ON)
774 		vrp->chip.mii.control = MII_CONTROL_ANE;
775 	else
776 		vrp->chip.mii.control =
777 		    (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
778 }
779 
780 /*
781  * Setup the descriptor rings.
782  */
783 static vr_result_t
784 vr_rings_init(vr_t *vrp)
785 {
786 
787 	vrp->rx.ndesc = VR_RX_N_DESC;
788 	vrp->tx.ndesc = VR_TX_N_DESC;
789 
790 	/*
791 	 * Create a ring for receive.
792 	 */
793 	if (vr_alloc_ring(vrp, &vrp->rxring, vrp->rx.ndesc) != VR_SUCCESS)
794 		return (VR_FAILURE);
795 
796 	/*
797 	 * Create a ring for transmit.
798 	 */
799 	if (vr_alloc_ring(vrp, &vrp->txring, vrp->tx.ndesc) != VR_SUCCESS) {
800 		vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
801 		return (VR_FAILURE);
802 	}
803 
804 	vrp->rx.ring = vrp->rxring.desc;
805 	vrp->tx.ring = vrp->txring.desc;
806 	return (VR_SUCCESS);
807 }
808 
809 static void
810 vr_rings_fini(vr_t *vrp)
811 {
812 	vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
813 	vr_free_ring(&vrp->txring, vrp->tx.ndesc);
814 }
815 
816 /*
817  * Allocate a descriptor ring
818  * The number of descriptor entries must fit in a single page so that the
819  * whole ring fits in one consequtive space.
820  *  i386:  4K page / 16 byte descriptor = 256 entries
821  *  sparc: 8K page / 16 byte descriptor = 512 entries
822  */
823 static vr_result_t
824 vr_alloc_ring(vr_t *vrp, vr_ring_t *ring, size_t n)
825 {
826 	ddi_dma_cookie_t	desc_dma_cookie;
827 	uint_t			desc_cookiecnt;
828 	int			i, rc;
829 	size_t			rbytes;
830 
831 	/*
832 	 * Allocate a DMA handle for the chip descriptors.
833 	 */
834 	rc = ddi_dma_alloc_handle(vrp->devinfo,
835 	    &vr_dev_dma_attr,
836 	    DDI_DMA_SLEEP,
837 	    NULL,
838 	    &ring->handle);
839 
840 	if (rc != DDI_SUCCESS) {
841 		vr_log(vrp, CE_WARN,
842 		    "ddi_dma_alloc_handle in vr_alloc_ring failed.");
843 		return (VR_FAILURE);
844 	}
845 
846 	/*
847 	 * Allocate memory for the chip descriptors.
848 	 */
849 	rc = ddi_dma_mem_alloc(ring->handle,
850 	    n * sizeof (vr_chip_desc_t),
851 	    &vr_dev_dma_accattr,
852 	    DDI_DMA_CONSISTENT,
853 	    DDI_DMA_SLEEP,
854 	    NULL,
855 	    (caddr_t *)&ring->cdesc,
856 	    &rbytes,
857 	    &ring->acchdl);
858 
859 	if (rc != DDI_SUCCESS) {
860 		vr_log(vrp, CE_WARN,
861 		    "ddi_dma_mem_alloc in vr_alloc_ring failed.");
862 		ddi_dma_free_handle(&ring->handle);
863 		return (VR_FAILURE);
864 	}
865 
866 	/*
867 	 * Map the descriptor memory.
868 	 */
869 	rc = ddi_dma_addr_bind_handle(ring->handle,
870 	    NULL,
871 	    (caddr_t)ring->cdesc,
872 	    rbytes,
873 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
874 	    DDI_DMA_SLEEP,
875 	    NULL,
876 	    &desc_dma_cookie,
877 	    &desc_cookiecnt);
878 
879 	if (rc != DDI_DMA_MAPPED || desc_cookiecnt > 1) {
880 		vr_log(vrp, CE_WARN,
881 		    "ddi_dma_addr_bind_handle in vr_alloc_ring failed: "
882 		    "rc = %d, cookiecnt = %d", rc, desc_cookiecnt);
883 		ddi_dma_mem_free(&ring->acchdl);
884 		ddi_dma_free_handle(&ring->handle);
885 		return (VR_FAILURE);
886 	}
887 	ring->cdesc_paddr = desc_dma_cookie.dmac_address;
888 
889 	/*
890 	 * Allocate memory for the host descriptor ring.
891 	 */
892 	ring->desc =
893 	    (vr_desc_t *)kmem_zalloc(n * sizeof (vr_desc_t), KM_SLEEP);
894 
895 	/*
896 	 * Interlink the descriptors and connect host- to chip descriptors.
897 	 */
898 	for (i = 0; i < n; i++) {
899 		/*
900 		 * Connect the host descriptor to a chip descriptor.
901 		 */
902 		ring->desc[i].cdesc = &ring->cdesc[i];
903 
904 		/*
905 		 * Store the DMA address and offset in the descriptor
906 		 * Offset is for ddi_dma_sync() and paddr is for ddi_get/-put().
907 		 */
908 		ring->desc[i].offset = i * sizeof (vr_chip_desc_t);
909 		ring->desc[i].paddr = ring->cdesc_paddr + ring->desc[i].offset;
910 
911 		/*
912 		 * Link the previous descriptor to this one.
913 		 */
914 		if (i > 0) {
915 			/* Host */
916 			ring->desc[i-1].next = &ring->desc[i];
917 
918 			/* Chip */
919 			ddi_put32(ring->acchdl,
920 			    &ring->cdesc[i-1].next,
921 			    ring->desc[i].paddr);
922 		}
923 	}
924 
925 	/*
926 	 * Make rings out of this list by pointing last to first.
927 	 */
928 	i = n - 1;
929 	ring->desc[i].next = &ring->desc[0];
930 	ddi_put32(ring->acchdl, &ring->cdesc[i].next, ring->desc[0].paddr);
931 	return (VR_SUCCESS);
932 }
933 
934 /*
935  * Free the memory allocated for a ring.
936  */
937 static void
938 vr_free_ring(vr_ring_t *r, size_t n)
939 {
940 	/*
941 	 * Unmap and free the chip descriptors.
942 	 */
943 	(void) ddi_dma_unbind_handle(r->handle);
944 	ddi_dma_mem_free(&r->acchdl);
945 	ddi_dma_free_handle(&r->handle);
946 
947 	/*
948 	 * Free the memory for storing host descriptors
949 	 */
950 	kmem_free(r->desc, n * sizeof (vr_desc_t));
951 }
952 
953 /*
954  * Initialize the receive ring.
955  */
956 static vr_result_t
957 vr_rxring_init(vr_t *vrp)
958 {
959 	int		i, rc;
960 	vr_desc_t	*rp;
961 
962 	/*
963 	 * Set the read pointer at the start of the ring.
964 	 */
965 	vrp->rx.rp = &vrp->rx.ring[0];
966 
967 	/*
968 	 * Assign a DMA buffer to each receive descriptor.
969 	 */
970 	for (i = 0; i < vrp->rx.ndesc; i++) {
971 		rp = &vrp->rx.ring[i];
972 		rc = vr_alloc_dmabuf(vrp,
973 		    &vrp->rx.ring[i].dmabuf,
974 		    DDI_DMA_STREAMING | DDI_DMA_READ);
975 
976 		if (rc != VR_SUCCESS) {
977 			while (--i >= 0)
978 				vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
979 			return (VR_FAILURE);
980 		}
981 
982 		/*
983 		 * Store the address of the dma buffer in the chip descriptor
984 		 */
985 		ddi_put32(vrp->rxring.acchdl,
986 		    &rp->cdesc->data,
987 		    rp->dmabuf.paddr);
988 
989 		/*
990 		 * Put the buffer length in the chip descriptor. Ensure that
991 		 * length fits in the 11 bits of stat1 (2047/0x7FF)
992 		 */
993 		ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat1,
994 		    MIN(VR_MAX_PKTSZ, rp->dmabuf.bufsz));
995 
996 		/*
997 		 * Set descriptor ownership to the card
998 		 */
999 		ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat0, VR_RDES0_OWN);
1000 
1001 		/*
1002 		 * Sync the descriptor with main memory
1003 		 */
1004 		(void) ddi_dma_sync(vrp->rxring.handle, rp->offset,
1005 		    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1006 	}
1007 	return (VR_SUCCESS);
1008 }
1009 
1010 /*
1011  * Free the DMA buffers assigned to the receive ring.
1012  */
1013 static void
1014 vr_rxring_fini(vr_t *vrp)
1015 {
1016 	int		i;
1017 
1018 	for (i = 0; i < vrp->rx.ndesc; i++)
1019 		vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
1020 }
1021 
1022 static vr_result_t
1023 vr_txring_init(vr_t *vrp)
1024 {
1025 	vr_desc_t		*wp;
1026 	int			i, rc;
1027 
1028 	/*
1029 	 * Set the write- and claim pointer.
1030 	 */
1031 	vrp->tx.wp = &vrp->tx.ring[0];
1032 	vrp->tx.cp = &vrp->tx.ring[0];
1033 
1034 	/*
1035 	 * (Re)set the TX bookkeeping.
1036 	 */
1037 	vrp->tx.stallticks = 0;
1038 	vrp->tx.resched = 0;
1039 
1040 	/*
1041 	 * Every transmit decreases nfree. Every reclaim increases nfree.
1042 	 */
1043 	vrp->tx.nfree = vrp->tx.ndesc;
1044 
1045 	/*
1046 	 * Attach a DMA buffer to each transmit descriptor.
1047 	 */
1048 	for (i = 0; i < vrp->tx.ndesc; i++) {
1049 		rc = vr_alloc_dmabuf(vrp,
1050 		    &vrp->tx.ring[i].dmabuf,
1051 		    DDI_DMA_STREAMING | DDI_DMA_WRITE);
1052 
1053 		if (rc != VR_SUCCESS) {
1054 			while (--i >= 0)
1055 				vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1056 			return (VR_FAILURE);
1057 		}
1058 	}
1059 
1060 	/*
1061 	 * Init & sync the TX descriptors so the device sees a valid ring.
1062 	 */
1063 	for (i = 0; i < vrp->tx.ndesc; i++) {
1064 		wp = &vrp->tx.ring[i];
1065 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, 0);
1066 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1, 0);
1067 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->data,
1068 		    wp->dmabuf.paddr);
1069 		(void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1070 		    sizeof (vr_chip_desc_t),
1071 		    DDI_DMA_SYNC_FORDEV);
1072 	}
1073 	return (VR_SUCCESS);
1074 }
1075 
1076 /*
1077  * Free the DMA buffers attached to the TX ring.
1078  */
1079 static void
1080 vr_txring_fini(vr_t *vrp)
1081 {
1082 	int		i;
1083 
1084 	/*
1085 	 * Free the DMA buffers attached to the TX ring
1086 	 */
1087 	for (i = 0; i < vrp->tx.ndesc; i++)
1088 		vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1089 }
1090 
1091 /*
1092  * Allocate a DMA buffer.
1093  */
1094 static vr_result_t
1095 vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap, uint_t dmaflags)
1096 {
1097 	ddi_dma_cookie_t	dma_cookie;
1098 	uint_t			cookiecnt;
1099 	int			rc;
1100 
1101 	/*
1102 	 * Allocate a DMA handle for the buffer
1103 	 */
1104 	rc = ddi_dma_alloc_handle(vrp->devinfo,
1105 	    &vr_data_dma_attr,
1106 	    DDI_DMA_DONTWAIT, NULL,
1107 	    &dmap->handle);
1108 
1109 	if (rc != DDI_SUCCESS) {
1110 		vr_log(vrp, CE_WARN,
1111 		    "ddi_dma_alloc_handle failed in vr_alloc_dmabuf");
1112 		return (VR_FAILURE);
1113 	}
1114 
1115 	/*
1116 	 * Allocate the buffer
1117 	 * The allocated buffer is aligned on 2K boundary. This ensures that
1118 	 * a 1500 byte frame never cross a page boundary and thus that the DMA
1119 	 * mapping can be established in 1 fragment.
1120 	 */
1121 	rc = ddi_dma_mem_alloc(dmap->handle,
1122 	    VR_DMABUFSZ,
1123 	    &vr_data_dma_accattr,
1124 	    DDI_DMA_RDWR | DDI_DMA_STREAMING,
1125 	    DDI_DMA_DONTWAIT, NULL,
1126 	    &dmap->buf,
1127 	    &dmap->bufsz,
1128 	    &dmap->acchdl);
1129 
1130 	if (rc != DDI_SUCCESS) {
1131 		vr_log(vrp, CE_WARN,
1132 		    "ddi_dma_mem_alloc failed in vr_alloc_dmabuf");
1133 		ddi_dma_free_handle(&dmap->handle);
1134 		return (VR_FAILURE);
1135 	}
1136 
1137 	/*
1138 	 * Map the memory
1139 	 */
1140 	rc = ddi_dma_addr_bind_handle(dmap->handle,
1141 	    NULL,
1142 	    (caddr_t)dmap->buf,
1143 	    dmap->bufsz,
1144 	    dmaflags,
1145 	    DDI_DMA_DONTWAIT,
1146 	    NULL,
1147 	    &dma_cookie,
1148 	    &cookiecnt);
1149 
1150 	/*
1151 	 * The cookiecount should never > 1 because we requested 2K alignment
1152 	 */
1153 	if (rc != DDI_DMA_MAPPED || cookiecnt > 1) {
1154 		vr_log(vrp, CE_WARN,
1155 		    "dma_addr_bind_handle failed in vr_alloc_dmabuf: "
1156 		    "rc = %d, cookiecnt = %d", rc, cookiecnt);
1157 		ddi_dma_mem_free(&dmap->acchdl);
1158 		ddi_dma_free_handle(&dmap->handle);
1159 		return (VR_FAILURE);
1160 	}
1161 	dmap->paddr = dma_cookie.dmac_address;
1162 	return (VR_SUCCESS);
1163 }
1164 
1165 /*
1166  * Destroy a DMA buffer.
1167  */
1168 static void
1169 vr_free_dmabuf(vr_data_dma_t *dmap)
1170 {
1171 	(void) ddi_dma_unbind_handle(dmap->handle);
1172 	ddi_dma_mem_free(&dmap->acchdl);
1173 	ddi_dma_free_handle(&dmap->handle);
1174 }
1175 
1176 /*
1177  * Interrupt service routine
1178  * When our vector is shared with another device, av_dispatch_autovect calls
1179  * all service routines for the vector until *none* of them return claimed
1180  * That means that, when sharing vectors, this routine is called at least
1181  * twice for each interrupt.
1182  */
1183 uint_t
1184 vr_intr(caddr_t arg1, caddr_t arg2)
1185 {
1186 	vr_t		*vrp;
1187 	uint16_t	status;
1188 	mblk_t		*lp = NULL;
1189 	uint32_t	tx_resched;
1190 	uint32_t	link_change;
1191 
1192 	tx_resched = 0;
1193 	link_change = 0;
1194 	vrp = (void *)arg1;
1195 	_NOTE(ARGUNUSED(arg2))
1196 
1197 	/*
1198 	 * Read the status register to see if the interrupt is from our device
1199 	 * This read also ensures that posted writes are brought to main memory.
1200 	 */
1201 	mutex_enter(&vrp->intrlock);
1202 	status = VR_GET16(vrp->acc_reg, VR_ISR0) & VR_ICR0_CFG;
1203 	if (status == 0) {
1204 		/*
1205 		 * Status contains no configured interrupts
1206 		 * The interrupt was not generated by our device.
1207 		 */
1208 		vrp->stats.intr_unclaimed++;
1209 		mutex_exit(&vrp->intrlock);
1210 		return (DDI_INTR_UNCLAIMED);
1211 	}
1212 	vrp->stats.intr_claimed++;
1213 
1214 	/*
1215 	 * Acknowledge the event(s) that caused interruption.
1216 	 */
1217 	VR_PUT16(vrp->acc_reg, VR_ISR0, status);
1218 
1219 	/*
1220 	 * Receive completion.
1221 	 */
1222 	if ((status & (VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS)) != 0) {
1223 		/*
1224 		 * Received some packets.
1225 		 */
1226 		lp = vr_receive(vrp);
1227 
1228 		/*
1229 		 * DMA stops after a conflict in the FIFO.
1230 		 */
1231 		if ((status & VR_ISR_RX_ERR_BITS) != 0)
1232 			VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1233 		status &= ~(VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS);
1234 	}
1235 
1236 	/*
1237 	 * Transmit completion.
1238 	 */
1239 	if ((status & (VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS)) != 0) {
1240 		/*
1241 		 * Card done with transmitting some packets
1242 		 * TX_DONE is generated 3 times per ring but it appears
1243 		 * more often because it is also set when an RX_DONE
1244 		 * interrupt is generated.
1245 		 */
1246 		mutex_enter(&vrp->tx.lock);
1247 		vr_tx_reclaim(vrp);
1248 		tx_resched = vrp->tx.resched;
1249 		vrp->tx.resched = 0;
1250 		mutex_exit(&vrp->tx.lock);
1251 		status &= ~(VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS);
1252 	}
1253 
1254 	/*
1255 	 * Link status change.
1256 	 */
1257 	if ((status & VR_ICR0_LINKSTATUS) != 0) {
1258 		/*
1259 		 * Get new link state and inform the mac layer.
1260 		 */
1261 		mutex_enter(&vrp->oplock);
1262 		mutex_enter(&vrp->tx.lock);
1263 		vr_link_state(vrp);
1264 		mutex_exit(&vrp->tx.lock);
1265 		mutex_exit(&vrp->oplock);
1266 		status &= ~VR_ICR0_LINKSTATUS;
1267 		vrp->stats.linkchanges++;
1268 		link_change = 1;
1269 	}
1270 
1271 	/*
1272 	 * Bus error.
1273 	 */
1274 	if ((status & VR_ISR0_BUSERR) != 0) {
1275 		vr_log(vrp, CE_WARN, "bus error occured");
1276 		vrp->reset = 1;
1277 		status &= ~VR_ISR0_BUSERR;
1278 	}
1279 
1280 	/*
1281 	 * We must have handled all things here.
1282 	 */
1283 	ASSERT(status == 0);
1284 	mutex_exit(&vrp->intrlock);
1285 
1286 	/*
1287 	 * Reset the device if requested
1288 	 * The request can come from the periodic tx check or from the interrupt
1289 	 * status.
1290 	 */
1291 	if (vrp->reset != 0) {
1292 		vr_error(vrp);
1293 		vrp->reset = 0;
1294 	}
1295 
1296 	/*
1297 	 * Pass up the list with received packets.
1298 	 */
1299 	if (lp != NULL)
1300 		mac_rx(vrp->machdl, 0, lp);
1301 
1302 	/*
1303 	 * Inform the upper layer on the linkstatus if there was a change.
1304 	 */
1305 	if (link_change != 0)
1306 		mac_link_update(vrp->machdl,
1307 		    (link_state_t)vrp->chip.link.state);
1308 	/*
1309 	 * Restart transmissions if we were waiting for tx descriptors.
1310 	 */
1311 	if (tx_resched == 1)
1312 		mac_tx_update(vrp->machdl);
1313 
1314 	/*
1315 	 * Read something from the card to ensure that all of our configuration
1316 	 * writes are delivered to the device before the interrupt is ended.
1317 	 */
1318 	(void) VR_GET8(vrp->acc_reg, VR_ETHERADDR);
1319 	return (DDI_INTR_CLAIMED);
1320 }
1321 
1322 /*
1323  * Respond to an unforseen situation by resetting the card and our bookkeeping.
1324  */
1325 static void
1326 vr_error(vr_t *vrp)
1327 {
1328 	vr_log(vrp, CE_WARN, "resetting MAC.");
1329 	mutex_enter(&vrp->intrlock);
1330 	mutex_enter(&vrp->oplock);
1331 	mutex_enter(&vrp->tx.lock);
1332 	(void) vr_stop(vrp);
1333 	vr_reset(vrp);
1334 	(void) vr_start(vrp);
1335 	mutex_exit(&vrp->tx.lock);
1336 	mutex_exit(&vrp->oplock);
1337 	mutex_exit(&vrp->intrlock);
1338 	vrp->stats.resets++;
1339 }
1340 
1341 /*
1342  * Collect received packets in a list.
1343  */
1344 static mblk_t *
1345 vr_receive(vr_t *vrp)
1346 {
1347 	mblk_t			*lp, *mp, *np;
1348 	vr_desc_t		*rxp;
1349 	vr_data_dma_t		*dmap;
1350 	uint32_t		pklen;
1351 	uint32_t		rxstat0;
1352 	uint32_t		n;
1353 
1354 	lp = NULL;
1355 	n = 0;
1356 	for (rxp = vrp->rx.rp; ; rxp = rxp->next, n++) {
1357 		/*
1358 		 * Sync the descriptor before looking at it.
1359 		 */
1360 		(void) ddi_dma_sync(vrp->rxring.handle, rxp->offset,
1361 		    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORKERNEL);
1362 
1363 		/*
1364 		 * Get the status from the descriptor.
1365 		 */
1366 		rxstat0 = ddi_get32(vrp->rxring.acchdl, &rxp->cdesc->stat0);
1367 
1368 		/*
1369 		 * We're done if the descriptor is owned by the card.
1370 		 */
1371 		if ((rxstat0 & VR_RDES0_OWN) != 0)
1372 			break;
1373 		else if ((rxstat0 & VR_RDES0_RXOK) != 0) {
1374 			/*
1375 			 * Received a good packet
1376 			 */
1377 			dmap = &rxp->dmabuf;
1378 			pklen = (rxstat0 >> 16) - ETHERFCSL;
1379 
1380 			/*
1381 			 * Sync the data.
1382 			 */
1383 			(void) ddi_dma_sync(dmap->handle, 0,
1384 			    pklen, DDI_DMA_SYNC_FORKERNEL);
1385 
1386 			/*
1387 			 * Send a new copied message upstream.
1388 			 */
1389 			np = allocb(pklen, 0);
1390 			if (np != NULL) {
1391 				bcopy(dmap->buf, np->b_rptr, pklen);
1392 				np->b_wptr = np->b_rptr + pklen;
1393 
1394 				vrp->stats.mac_stat_ipackets++;
1395 				vrp->stats.mac_stat_rbytes += pklen;
1396 
1397 				if ((rxstat0 & VR_RDES0_BAR) != 0)
1398 					vrp->stats.mac_stat_brdcstrcv++;
1399 				else if ((rxstat0 & VR_RDES0_MAR) != 0)
1400 					vrp->stats.mac_stat_multircv++;
1401 
1402 				/*
1403 				 * Link this packet in the list.
1404 				 */
1405 				np->b_next = NULL;
1406 				if (lp == NULL)
1407 					lp = mp = np;
1408 				else {
1409 					mp->b_next = np;
1410 					mp = np;
1411 				}
1412 			} else {
1413 				vrp->stats.allocbfail++;
1414 				vrp->stats.mac_stat_norcvbuf++;
1415 			}
1416 
1417 		} else {
1418 			/*
1419 			 * Received with errors.
1420 			 */
1421 			vrp->stats.mac_stat_ierrors++;
1422 			if ((rxstat0 & VR_RDES0_FAE) != 0)
1423 				vrp->stats.ether_stat_align_errors++;
1424 			if ((rxstat0 & VR_RDES0_CRCERR) != 0)
1425 				vrp->stats.ether_stat_fcs_errors++;
1426 			if ((rxstat0 & VR_RDES0_LONG) != 0)
1427 				vrp->stats.ether_stat_toolong_errors++;
1428 			if ((rxstat0 & VR_RDES0_RUNT) != 0)
1429 				vrp->stats.ether_stat_tooshort_errors++;
1430 			if ((rxstat0 & VR_RDES0_FOV) != 0)
1431 				vrp->stats.mac_stat_overflows++;
1432 		}
1433 
1434 		/*
1435 		 * Reset descriptor ownership to the MAC.
1436 		 */
1437 		ddi_put32(vrp->rxring.acchdl,
1438 		    &rxp->cdesc->stat0,
1439 		    VR_RDES0_OWN);
1440 		(void) ddi_dma_sync(vrp->rxring.handle,
1441 		    rxp->offset,
1442 		    sizeof (vr_chip_desc_t),
1443 		    DDI_DMA_SYNC_FORDEV);
1444 	}
1445 	vrp->rx.rp = rxp;
1446 
1447 	/*
1448 	 * If we do flowcontrol and if the card can transmit pause frames,
1449 	 * increment the "available receive descriptors" register.
1450 	 */
1451 	if (n > 0 && vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
1452 		/*
1453 		 * Whenever the card moves a fragment to host memory it
1454 		 * decrements the RXBUFCOUNT register. If the value in the
1455 		 * register reaches a low watermark, the card transmits a pause
1456 		 * frame. If the value in this register reaches a high
1457 		 * watermark, the card sends a "cancel pause" frame
1458 		 *
1459 		 * Non-zero values written to this byte register are added
1460 		 * by the chip to the register's contents, so we must write
1461 		 * the number of descriptors free'd.
1462 		 */
1463 		VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT, MIN(n, 0xFF));
1464 	}
1465 	return (lp);
1466 }
1467 
1468 /*
1469  * Enqueue a list of packets for transmission
1470  * Return the packets not transmitted.
1471  */
1472 mblk_t *
1473 vr_mac_tx_enqueue_list(void *p, mblk_t *mp)
1474 {
1475 	vr_t		*vrp;
1476 	mblk_t		*nextp;
1477 
1478 	vrp = (vr_t *)p;
1479 	mutex_enter(&vrp->tx.lock);
1480 	do {
1481 		if (vrp->tx.nfree == 0) {
1482 			vrp->stats.ether_stat_defer_xmts++;
1483 			vrp->tx.resched = 1;
1484 			break;
1485 		}
1486 		nextp = mp->b_next;
1487 		mp->b_next = mp->b_prev = NULL;
1488 		vr_tx_enqueue_msg(vrp, mp);
1489 		mp = nextp;
1490 		vrp->tx.nfree--;
1491 	} while (mp != NULL);
1492 	mutex_exit(&vrp->tx.lock);
1493 
1494 	/*
1495 	 * Tell the chip to poll the TX ring.
1496 	 */
1497 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1498 	return (mp);
1499 }
1500 
1501 /*
1502  * Enqueue a message for transmission.
1503  */
1504 static void
1505 vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp)
1506 {
1507 	vr_desc_t		*wp;
1508 	vr_data_dma_t		*dmap;
1509 	uint32_t		pklen;
1510 	uint32_t		nextp;
1511 	int			padlen;
1512 
1513 	if ((uchar_t)mp->b_rptr[0] == 0xff &&
1514 	    (uchar_t)mp->b_rptr[1] == 0xff &&
1515 	    (uchar_t)mp->b_rptr[2] == 0xff &&
1516 	    (uchar_t)mp->b_rptr[3] == 0xff &&
1517 	    (uchar_t)mp->b_rptr[4] == 0xff &&
1518 	    (uchar_t)mp->b_rptr[5] == 0xff)
1519 		vrp->stats.mac_stat_brdcstxmt++;
1520 	else if ((uchar_t)mp->b_rptr[0] == 1)
1521 		vrp->stats.mac_stat_multixmt++;
1522 
1523 	pklen = msgsize(mp);
1524 	wp = vrp->tx.wp;
1525 	dmap = &wp->dmabuf;
1526 
1527 	/*
1528 	 * Copy the message into the pre-mapped buffer and free mp
1529 	 */
1530 	mcopymsg(mp, dmap->buf);
1531 
1532 	/*
1533 	 * Clean padlen bytes of short packet.
1534 	 */
1535 	padlen = ETHERMIN - pklen;
1536 	if (padlen > 0) {
1537 		bzero(dmap->buf + pklen, padlen);
1538 		pklen += padlen;
1539 	}
1540 
1541 	/*
1542 	 * Most of the statistics are updated on reclaim, after the actual
1543 	 * transmit. obytes is maintained here because the length is cleared
1544 	 * after transmission
1545 	 */
1546 	vrp->stats.mac_stat_obytes += pklen;
1547 
1548 	/*
1549 	 * Sync the data so the device sees the new content too.
1550 	 */
1551 	(void) ddi_dma_sync(dmap->handle, 0, pklen, DDI_DMA_SYNC_FORDEV);
1552 
1553 	/*
1554 	 * If we have reached the TX interrupt distance, enable a TX interrupt
1555 	 * for this packet. The Interrupt Control (IC) bit in the transmit
1556 	 * descriptor doesn't have any effect on the interrupt generation
1557 	 * despite the vague statements in the datasheet. Thus, we use the
1558 	 * more obscure interrupt suppress bit which is probably part of the
1559 	 * MAC's bookkeeping for TX interrupts and fragmented packets.
1560 	 */
1561 	vrp->tx.intr_distance++;
1562 	nextp = ddi_get32(vrp->txring.acchdl, &wp->cdesc->next);
1563 	if (vrp->tx.intr_distance >= VR_TX_MAX_INTR_DISTANCE) {
1564 		/*
1565 		 * Don't suppress the interrupt for this packet.
1566 		 */
1567 		vrp->tx.intr_distance = 0;
1568 		nextp &= (~VR_TDES3_SUPPRESS_INTR);
1569 	} else {
1570 		/*
1571 		 * Suppress the interrupt for this packet.
1572 		 */
1573 		nextp |= VR_TDES3_SUPPRESS_INTR;
1574 	}
1575 
1576 	/*
1577 	 * Write and sync the chip's descriptor
1578 	 */
1579 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1,
1580 	    pklen | (VR_TDES1_STP | VR_TDES1_EDP | VR_TDES1_CHN));
1581 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->next, nextp);
1582 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, VR_TDES0_OWN);
1583 	(void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1584 	    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1585 
1586 	/*
1587 	 * The ticks counter is cleared by reclaim when it reclaimed some
1588 	 * descriptors and incremented by the periodic TX stall check.
1589 	 */
1590 	vrp->tx.stallticks = 1;
1591 	vrp->tx.wp = wp->next;
1592 }
1593 
1594 /*
1595  * Free transmitted descriptors.
1596  */
1597 static void
1598 vr_tx_reclaim(vr_t *vrp)
1599 {
1600 	vr_desc_t		*cp;
1601 	uint32_t		stat0, stat1, freed, dirty;
1602 
1603 	ASSERT(mutex_owned(&vrp->tx.lock));
1604 
1605 	freed = 0;
1606 	dirty = vrp->tx.ndesc - vrp->tx.nfree;
1607 	for (cp = vrp->tx.cp; dirty > 0; cp = cp->next) {
1608 		/*
1609 		 * Sync & get descriptor status.
1610 		 */
1611 		(void) ddi_dma_sync(vrp->txring.handle, cp->offset,
1612 		    sizeof (vr_chip_desc_t),
1613 		    DDI_DMA_SYNC_FORKERNEL);
1614 		stat0 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat0);
1615 
1616 		if ((stat0 & VR_TDES0_OWN) != 0)
1617 			break;
1618 
1619 		/*
1620 		 * Do stats for the first descriptor in a chain.
1621 		 */
1622 		stat1 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat1);
1623 		if ((stat1 & VR_TDES1_STP) != 0) {
1624 			if ((stat0 & VR_TDES0_TERR) != 0) {
1625 				vrp->stats.ether_stat_macxmt_errors++;
1626 				if ((stat0 & VR_TDES0_UDF) != 0)
1627 					vrp->stats.mac_stat_underflows++;
1628 				if ((stat0 & VR_TDES0_ABT) != 0)
1629 					vrp-> stats.ether_stat_ex_collisions++;
1630 				/*
1631 				 * Abort and FIFO underflow stop the MAC.
1632 				 * Packet queueing must be disabled with HD
1633 				 * links because otherwise the MAC is also lost
1634 				 * after a few of these events.
1635 				 */
1636 				VR_PUT8(vrp->acc_reg, VR_CTRL0,
1637 				    VR_CTRL0_DMA_GO);
1638 			} else
1639 				vrp->stats.mac_stat_opackets++;
1640 
1641 			if ((stat0 & VR_TDES0_COL) != 0) {
1642 				if ((stat0 & VR_TDES0_NCR) == 1) {
1643 					vrp->stats.
1644 					    ether_stat_first_collisions++;
1645 				} else {
1646 					vrp->stats.
1647 					    ether_stat_multi_collisions++;
1648 				}
1649 				vrp->stats.mac_stat_collisions +=
1650 				    (stat0 & VR_TDES0_NCR);
1651 			}
1652 
1653 			if ((stat0 & VR_TDES0_CRS) != 0)
1654 				vrp->stats.ether_stat_carrier_errors++;
1655 
1656 			if ((stat0 & VR_TDES0_OWC) != 0)
1657 				vrp->stats.ether_stat_tx_late_collisions++;
1658 		}
1659 		freed += 1;
1660 		dirty -= 1;
1661 	}
1662 	vrp->tx.cp = cp;
1663 
1664 	if (freed > 0) {
1665 		vrp->tx.nfree += freed;
1666 		vrp->tx.stallticks = 0;
1667 		vrp->stats.txreclaims += 1;
1668 	} else
1669 		vrp->stats.txreclaim0 += 1;
1670 }
1671 
1672 /*
1673  * Check TX health every 2 seconds.
1674  */
1675 static void
1676 vr_periodic(void *p)
1677 {
1678 	vr_t		*vrp;
1679 
1680 	vrp = (vr_t *)p;
1681 	if (vrp->chip.state == CHIPSTATE_RUNNING &&
1682 	    vrp->chip.link.state == VR_LINK_STATE_UP && vrp->reset == 0) {
1683 		if (mutex_tryenter(&vrp->intrlock) != 0) {
1684 			mutex_enter(&vrp->tx.lock);
1685 			if (vrp->tx.resched == 1) {
1686 				if (vrp->tx.stallticks >= VR_MAXTXCHECKS) {
1687 					/*
1688 					 * No succesful reclaim in the last n
1689 					 * intervals. Reset the MAC.
1690 					 */
1691 					vrp->reset = 1;
1692 					vr_log(vrp, CE_WARN,
1693 					    "TX stalled, resetting MAC");
1694 				vrp->stats.txstalls++;
1695 				} else {
1696 					/*
1697 					 * Increase until we find that we've
1698 					 * waited long enough.
1699 					 */
1700 					vrp->tx.stallticks += 1;
1701 				}
1702 			}
1703 			mutex_exit(&vrp->tx.lock);
1704 			mutex_exit(&vrp->intrlock);
1705 			vrp->stats.txchecks++;
1706 		}
1707 	}
1708 	vrp->stats.cyclics++;
1709 }
1710 
1711 /*
1712  * Bring the device to our desired initial state.
1713  */
1714 static void
1715 vr_reset(vr_t *vrp)
1716 {
1717 	uint32_t	time;
1718 
1719 	/*
1720 	 * Reset the MAC
1721 	 * If we don't wait long enough for the forced reset to complete,
1722 	 * MAC looses sync with PHY. Result link up, no link change interrupt
1723 	 * and no data transfer.
1724 	 */
1725 	time = 0;
1726 	VR_PUT8(vrp->acc_io, VR_CTRL1, VR_CTRL1_RESET);
1727 	do {
1728 		drv_usecwait(100);
1729 		time += 100;
1730 		if (time >= 100000) {
1731 			VR_PUT8(vrp->acc_io, VR_MISC1, VR_MISC1_RESET);
1732 			delay(drv_usectohz(200000));
1733 		}
1734 	} while ((VR_GET8(vrp->acc_io, VR_CTRL1) & VR_CTRL1_RESET) != 0);
1735 	delay(drv_usectohz(10000));
1736 
1737 	/*
1738 	 * Load the PROM contents into the MAC again.
1739 	 */
1740 	VR_SETBIT8(vrp->acc_io, VR_PROMCTL, VR_PROMCTL_RELOAD);
1741 	delay(drv_usectohz(100000));
1742 
1743 	/*
1744 	 * Tell the MAC via IO space that we like to use memory space for
1745 	 * accessing registers.
1746 	 */
1747 	VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
1748 }
1749 
1750 /*
1751  * Prepare and enable the card (MAC + PHY + PCI).
1752  */
1753 static int
1754 vr_start(vr_t *vrp)
1755 {
1756 	uint8_t		pci_latency, pci_mode;
1757 
1758 	ASSERT(mutex_owned(&vrp->oplock));
1759 
1760 	/*
1761 	 * Allocate DMA buffers for RX.
1762 	 */
1763 	if (vr_rxring_init(vrp) != VR_SUCCESS) {
1764 		vr_log(vrp, CE_NOTE, "vr_rxring_init() failed");
1765 		return (ENOMEM);
1766 	}
1767 
1768 	/*
1769 	 * Allocate DMA buffers for TX.
1770 	 */
1771 	if (vr_txring_init(vrp) != VR_SUCCESS) {
1772 		vr_log(vrp, CE_NOTE, "vr_txring_init() failed");
1773 		vr_rxring_fini(vrp);
1774 		return (ENOMEM);
1775 	}
1776 
1777 	/*
1778 	 * Changes of the chip specific registers as done in VIA's fet driver
1779 	 * These bits are not in the datasheet and controlled by vr_chip_info.
1780 	 */
1781 	pci_mode = VR_GET8(vrp->acc_reg, VR_MODE2);
1782 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE10T) != 0)
1783 		pci_mode |= VR_MODE2_MODE10T;
1784 
1785 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE2PCEROPT) != 0)
1786 		pci_mode |= VR_MODE2_PCEROPT;
1787 
1788 	if ((vrp->chip.info.features & VR_FEATURE_MRDLNMULTIPLE) != 0)
1789 		pci_mode |= VR_MODE2_MRDPL;
1790 	VR_PUT8(vrp->acc_reg, VR_MODE2, pci_mode);
1791 
1792 	pci_mode = VR_GET8(vrp->acc_reg, VR_MODE3);
1793 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMIION) != 0)
1794 		pci_mode |= VR_MODE3_MIION;
1795 	VR_PUT8(vrp->acc_reg, VR_MODE3, pci_mode);
1796 
1797 	/*
1798 	 * RX: Accept broadcast packets.
1799 	 */
1800 	VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTBROAD);
1801 
1802 	/*
1803 	 * RX: Start DMA when there are 256 bytes in the FIFO.
1804 	 */
1805 	VR_SETBITS8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_FIFO_THRESHOLD_BITS,
1806 	    VR_RXCFG_FIFO_THRESHOLD_256);
1807 	VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_RX_FIFO_THRESHOLD_BITS,
1808 	    VR_BCR0_RX_FIFO_THRESHOLD_256);
1809 
1810 	/*
1811 	 * TX: Start transmit when there are 256 bytes in the FIFO.
1812 	 */
1813 	VR_SETBITS8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_FIFO_THRESHOLD_BITS,
1814 	    VR_TXCFG_FIFO_THRESHOLD_256);
1815 	VR_SETBITS8(vrp->acc_reg, VR_BCR1, VR_BCR1_TX_FIFO_THRESHOLD_BITS,
1816 	    VR_BCR1_TX_FIFO_THRESHOLD_256);
1817 
1818 	/*
1819 	 * Burst transfers up to 256 bytes.
1820 	 */
1821 	VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_DMABITS, VR_BCR0_DMA256);
1822 
1823 	/*
1824 	 * Disable TX autopolling as it is bad for RX performance
1825 	 * I assume this is because the RX process finds the bus often occupied
1826 	 * by the polling process.
1827 	 */
1828 	VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_NOAUTOPOLL);
1829 
1830 	/*
1831 	 * Honor the PCI latency timer if it is reasonable.
1832 	 */
1833 	pci_latency = VR_GET8(vrp->acc_cfg, PCI_CONF_LATENCY_TIMER);
1834 	if (pci_latency != 0 && pci_latency != 0xFF)
1835 		VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1836 	else
1837 		VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1838 
1839 	/*
1840 	 * Ensure that VLAN filtering is off, because this strips the tag.
1841 	 */
1842 	if ((vrp->chip.info.features & VR_FEATURE_VLANTAGGING) != 0) {
1843 		VR_CLRBIT8(vrp->acc_reg, VR_BCR1, VR_BCR1_VLANFILTER);
1844 		VR_CLRBIT8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_8021PQ_EN);
1845 	}
1846 
1847 	/*
1848 	 * Clear the CAM filter.
1849 	 */
1850 	if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
1851 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
1852 		VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 0);
1853 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1854 
1855 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
1856 		    VR_CAM_CTRL_ENABLE|VR_CAM_CTRL_SELECT_VLAN);
1857 		VR_PUT8(vrp->acc_reg, VR_VCAM0, 0);
1858 		VR_PUT8(vrp->acc_reg, VR_VCAM1, 0);
1859 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_WRITE);
1860 		VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 1);
1861 		drv_usecwait(2);
1862 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1863 	}
1864 
1865 	/*
1866 	 * Give the start addresses of the descriptor rings to the DMA
1867 	 * controller on the MAC.
1868 	 */
1869 	VR_PUT32(vrp->acc_reg, VR_RXADDR, vrp->rx.rp->paddr);
1870 	VR_PUT32(vrp->acc_reg, VR_TXADDR, vrp->tx.wp->paddr);
1871 
1872 	/*
1873 	 * We don't use the additionally invented interrupt ICR1 register,
1874 	 * so make sure these are disabled.
1875 	 */
1876 	VR_PUT8(vrp->acc_reg, VR_ISR1, 0xFF);
1877 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1878 
1879 	/*
1880 	 * Enable interrupts.
1881 	 */
1882 	VR_PUT16(vrp->acc_reg, VR_ISR0, 0xFFFF);
1883 	VR_PUT16(vrp->acc_reg, VR_ICR0, VR_ICR0_CFG);
1884 
1885 	/*
1886 	 * Enable the DMA controller.
1887 	 */
1888 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1889 
1890 	/*
1891 	 * Configure the link. Rely on the link change interrupt for getting
1892 	 * the link state into the driver.
1893 	 */
1894 	vr_link_init(vrp);
1895 
1896 	/*
1897 	 * Set the software view on the state to 'running'.
1898 	 */
1899 	vrp->chip.state = CHIPSTATE_RUNNING;
1900 	return (0);
1901 }
1902 
1903 /*
1904  * Stop DMA and interrupts.
1905  */
1906 static int
1907 vr_stop(vr_t *vrp)
1908 {
1909 	ASSERT(mutex_owned(&vrp->oplock));
1910 
1911 	/*
1912 	 * Stop interrupts.
1913 	 */
1914 	VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
1915 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1916 
1917 	/*
1918 	 * Stop DMA.
1919 	 */
1920 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
1921 
1922 	/*
1923 	 * Set the software view on the state to stopped.
1924 	 */
1925 	vrp->chip.state = CHIPSTATE_STOPPED;
1926 
1927 	/*
1928 	 * Remove DMA buffers from the rings.
1929 	 */
1930 	vr_rxring_fini(vrp);
1931 	vr_txring_fini(vrp);
1932 	return (0);
1933 }
1934 
1935 int
1936 vr_mac_start(void *p)
1937 {
1938 	vr_t	*vrp;
1939 	int	rc;
1940 
1941 	vrp = (vr_t *)p;
1942 	mutex_enter(&vrp->oplock);
1943 
1944 	/*
1945 	 * Reset the card.
1946 	 */
1947 	vr_reset(vrp);
1948 
1949 	/*
1950 	 * Prepare and enable the card.
1951 	 */
1952 	rc = vr_start(vrp);
1953 
1954 	/*
1955 	 * Configure a cyclic function to keep the card & driver from diverting.
1956 	 */
1957 	vrp->periodic_id =
1958 	    ddi_periodic_add(vr_periodic, vrp, VR_CHECK_INTERVAL, DDI_IPL_0);
1959 
1960 	mutex_exit(&vrp->oplock);
1961 	return (rc);
1962 }
1963 
1964 void
1965 vr_mac_stop(void *p)
1966 {
1967 	vr_t	*vrp = p;
1968 
1969 	mutex_enter(&vrp->oplock);
1970 	mutex_enter(&vrp->tx.lock);
1971 
1972 	/*
1973 	 * Stop the device.
1974 	 */
1975 	(void) vr_stop(vrp);
1976 	mutex_exit(&vrp->tx.lock);
1977 
1978 	/*
1979 	 * Remove the cyclic from the system.
1980 	 */
1981 	ddi_periodic_delete(vrp->periodic_id);
1982 	mutex_exit(&vrp->oplock);
1983 }
1984 
1985 /*
1986  * Add or remove a multicast address to/from the filter
1987  *
1988  * From the 21143 manual:
1989  *  The 21143 can store 512 bits serving as hash bucket heads, and one physical
1990  *  48-bit Ethernet address. Incoming frames with multicast destination
1991  *  addresses are subjected to imperfect filtering. Frames with physical
1992  *  destination  addresses are checked against the single physical address.
1993  *  For any incoming frame with a multicast destination address, the 21143
1994  *  applies the standard Ethernet cyclic redundancy check (CRC) function to the
1995  *  first 6 bytes containing the destination address, then it uses the most
1996  *  significant 9 bits of the result as a bit index into the table. If the
1997  *  indexed bit is set, the frame is accepted. If the bit is cleared, the frame
1998  *  is rejected. This filtering mode is called imperfect because multicast
1999  *  frames not addressed to this station may slip through, but it still
2000  *  decreases the number of frames that the host can receive.
2001  * I assume the above is also the way the VIA chips work. There's not a single
2002  * word about the multicast filter in the datasheet.
2003  *
2004  * Another word on the CAM filter on VT6105M controllers:
2005  *  The VT6105M has content addressable memory which can be used for perfect
2006  *  filtering of 32 multicast addresses and a few VLAN id's
2007  *
2008  *  I think it works like this: When the controller receives a multicast
2009  *  address, it looks up the address using CAM. When it is found, it takes the
2010  *  matching cell address (index) and compares this to the bit position in the
2011  *  cam mask. If the bit is set, the packet is passed up. If CAM lookup does not
2012  *  result in a match, the packet is filtered using the hash based filter,
2013  *  if that matches, the packet is passed up and dropped otherwise
2014  * Also, there's not a single word in the datasheet on how this cam is supposed
2015  * to work ...
2016  */
2017 int
2018 vr_mac_set_multicast(void *p, boolean_t add, const uint8_t *mca)
2019 {
2020 	vr_t		*vrp;
2021 	uint32_t	crc_index;
2022 	int32_t		cam_index;
2023 	uint32_t	cam_mask;
2024 	boolean_t	use_hash_filter;
2025 	ether_addr_t	taddr;
2026 	uint32_t	a;
2027 
2028 	vrp = (vr_t *)p;
2029 	mutex_enter(&vrp->oplock);
2030 	mutex_enter(&vrp->intrlock);
2031 	use_hash_filter = B_FALSE;
2032 
2033 	if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
2034 		/*
2035 		 * Program the perfect filter.
2036 		 */
2037 		cam_mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2038 		if (add == B_TRUE) {
2039 			/*
2040 			 * Get index of first empty slot.
2041 			 */
2042 			bzero(&taddr, sizeof (taddr));
2043 			cam_index = vr_cam_index(vrp, taddr);
2044 			if (cam_index != -1) {
2045 				/*
2046 				 * Add address at cam_index.
2047 				 */
2048 				cam_mask |= (1 << cam_index);
2049 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2050 				    VR_CAM_CTRL_ENABLE);
2051 				VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, cam_index);
2052 				VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2053 				for (a = 0; a < ETHERADDRL; a++) {
2054 					VR_PUT8(vrp->acc_reg,
2055 					    VR_MCAM0 + a, mca[a]);
2056 				}
2057 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2058 				    VR_CAM_CTRL_WRITE);
2059 				drv_usecwait(2);
2060 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2061 				    VR_CAM_CTRL_DONE);
2062 			} else {
2063 				/*
2064 				 * No free CAM slots available
2065 				 * Add mca to the imperfect filter.
2066 				 */
2067 				use_hash_filter = B_TRUE;
2068 			}
2069 		} else {
2070 			/*
2071 			 * Find the index of the entry to remove
2072 			 * If the entry was not found (-1), the addition was
2073 			 * probably done when the table was full.
2074 			 */
2075 			cam_index = vr_cam_index(vrp, mca);
2076 			if (cam_index != -1) {
2077 				/*
2078 				 * Disable the corresponding mask bit.
2079 				 */
2080 				cam_mask &= ~(1 << cam_index);
2081 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2082 				    VR_CAM_CTRL_ENABLE);
2083 				VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2084 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2085 				    VR_CAM_CTRL_DONE);
2086 			} else {
2087 				/*
2088 				 * The entry to be removed was not found
2089 				 * The likely cause is that the CAM was full
2090 				 * during addition. The entry is added to the
2091 				 * hash filter in that case and needs to be
2092 				 * removed there too.
2093 				 */
2094 				use_hash_filter = B_TRUE;
2095 			}
2096 		}
2097 	} else {
2098 		/*
2099 		 * No CAM in the MAC, thus we need the hash filter.
2100 		 */
2101 		use_hash_filter = B_TRUE;
2102 	}
2103 
2104 	if (use_hash_filter == B_TRUE) {
2105 		/*
2106 		 * Get the CRC-32 of the multicast address
2107 		 * The card uses the "MSB first" direction when calculating the
2108 		 * the CRC. This is odd because ethernet is "LSB first"
2109 		 * We have to use that "big endian" approach as well.
2110 		 */
2111 		crc_index = ether_crc_be(mca) >> (32 - 6);
2112 		if (add == B_TRUE) {
2113 			/*
2114 			 * Turn bit[crc_index] on.
2115 			 */
2116 			if (crc_index < 32)
2117 				vrp->mhash0 |= (1 << crc_index);
2118 			else
2119 				vrp->mhash1 |= (1 << (crc_index - 32));
2120 		} else {
2121 			/*
2122 			 * Turn bit[crc_index] off.
2123 			 */
2124 			if (crc_index < 32)
2125 				vrp->mhash0 &= ~(0 << crc_index);
2126 			else
2127 				vrp->mhash1 &= ~(0 << (crc_index - 32));
2128 		}
2129 
2130 		/*
2131 		 * When not promiscuous write the filter now. When promiscuous,
2132 		 * the filter is open and will be written when promiscuous ends.
2133 		 */
2134 		if (vrp->promisc == B_FALSE) {
2135 			VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2136 			VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2137 		}
2138 	}
2139 
2140 	/*
2141 	 * Enable/disable multicast receivements based on mcount.
2142 	 */
2143 	if (add == B_TRUE)
2144 		vrp->mcount++;
2145 	else if (vrp->mcount != 0)
2146 		vrp->mcount --;
2147 	if (vrp->mcount != 0)
2148 		VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2149 	else
2150 		VR_CLRBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2151 
2152 	mutex_exit(&vrp->intrlock);
2153 	mutex_exit(&vrp->oplock);
2154 	return (0);
2155 }
2156 
2157 /*
2158  * Calculate the CRC32 for 6 bytes of multicast address in MSB(it) first order.
2159  * The MSB first order is a bit odd because Ethernet standard is LSB first
2160  */
2161 static uint32_t
2162 ether_crc_be(const uint8_t *data)
2163 {
2164 	uint32_t	crc = (uint32_t)0xFFFFFFFFU;
2165 	uint32_t	carry;
2166 	uint32_t	bit;
2167 	uint32_t	length;
2168 	uint8_t		c;
2169 
2170 	for (length = 0; length < ETHERADDRL; length++) {
2171 		c = data[length];
2172 		for (bit = 0; bit < 8; bit++) {
2173 			carry = ((crc & 0x80000000U) ? 1 : 0) ^ (c & 0x01);
2174 			crc <<= 1;
2175 			c >>= 1;
2176 			if (carry)
2177 				crc = (crc ^ 0x04C11DB6) | carry;
2178 		}
2179 	}
2180 	return (crc);
2181 }
2182 
2183 
2184 /*
2185  * Return the CAM index (base 0) of maddr or -1 if maddr is not found
2186  * If maddr is 0, return the index of an empty slot in CAM or -1 when no free
2187  * slots available.
2188  */
2189 static int32_t
2190 vr_cam_index(vr_t *vrp, const uint8_t *maddr)
2191 {
2192 	ether_addr_t	taddr;
2193 	int32_t		index;
2194 	uint32_t	mask;
2195 	uint32_t	a;
2196 
2197 	bzero(&taddr, sizeof (taddr));
2198 
2199 	/*
2200 	 * Read the CAM mask from the controller.
2201 	 */
2202 	mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2203 
2204 	/*
2205 	 * If maddr is 0, return the first unused slot or -1 for no unused.
2206 	 */
2207 	if (bcmp(maddr, taddr, ETHERADDRL) == 0) {
2208 		/*
2209 		 * Look for the first unused position in mask.
2210 		 */
2211 		for (index = 0; index < VR_CAM_SZ; index++) {
2212 			if (((mask >> index) & 1) == 0)
2213 				return (index);
2214 		}
2215 		return (-1);
2216 	} else {
2217 		/*
2218 		 * Look for maddr in CAM.
2219 		 */
2220 		for (index = 0; index < VR_CAM_SZ; index++) {
2221 			/* Look at enabled entries only */
2222 			if (((mask >> index) & 1) == 0)
2223 				continue;
2224 
2225 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
2226 			VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, index);
2227 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_READ);
2228 			drv_usecwait(2);
2229 			for (a = 0; a < ETHERADDRL; a++)
2230 				taddr[a] = VR_GET8(vrp->acc_reg, VR_MCAM0 + a);
2231 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
2232 			if (bcmp(maddr, taddr, ETHERADDRL) == 0)
2233 				return (index);
2234 		}
2235 	}
2236 	return (-1);
2237 }
2238 
2239 /*
2240  * Set promiscuous mode on or off.
2241  */
2242 int
2243 vr_mac_set_promisc(void *p, boolean_t promiscflag)
2244 {
2245 	vr_t		*vrp;
2246 	uint8_t		rxcfg;
2247 
2248 	vrp = (vr_t *)p;
2249 
2250 	mutex_enter(&vrp->intrlock);
2251 	mutex_enter(&vrp->oplock);
2252 	mutex_enter(&vrp->tx.lock);
2253 
2254 	/*
2255 	 * Get current receive configuration.
2256 	 */
2257 	rxcfg = VR_GET8(vrp->acc_reg, VR_RXCFG);
2258 	vrp->promisc = promiscflag;
2259 
2260 	if (promiscflag == B_TRUE) {
2261 		/*
2262 		 * Enable promiscuous mode and open the multicast filter.
2263 		 */
2264 		rxcfg |= (VR_RXCFG_PROMISC | VR_RXCFG_ACCEPTMULTI);
2265 		VR_PUT32(vrp->acc_reg, VR_MAR0, 0xffffffff);
2266 		VR_PUT32(vrp->acc_reg, VR_MAR1, 0xffffffff);
2267 	} else {
2268 		/*
2269 		 * Restore the multicast filter and disable promiscuous mode.
2270 		 */
2271 		VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2272 		VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2273 		rxcfg &= ~VR_RXCFG_PROMISC;
2274 		if (vrp->mcount != 0)
2275 			rxcfg |= VR_RXCFG_ACCEPTMULTI;
2276 	}
2277 	VR_PUT8(vrp->acc_reg, VR_RXCFG, rxcfg);
2278 	mutex_exit(&vrp->tx.lock);
2279 	mutex_exit(&vrp->oplock);
2280 	mutex_exit(&vrp->intrlock);
2281 	return (0);
2282 }
2283 
2284 int
2285 vr_mac_getstat(void *arg, uint_t stat, uint64_t *val)
2286 {
2287 	vr_t		*vrp;
2288 	uint64_t	v;
2289 
2290 	vrp = (void *) arg;
2291 
2292 	switch (stat) {
2293 	default:
2294 		return (ENOTSUP);
2295 
2296 	case ETHER_STAT_ADV_CAP_100T4:
2297 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_T4) != 0;
2298 		break;
2299 
2300 	case ETHER_STAT_ADV_CAP_100FDX:
2301 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX_FD) != 0;
2302 		break;
2303 
2304 	case ETHER_STAT_ADV_CAP_100HDX:
2305 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX) != 0;
2306 		break;
2307 
2308 	case ETHER_STAT_ADV_CAP_10FDX:
2309 		v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T_FD) != 0;
2310 		break;
2311 
2312 	case ETHER_STAT_ADV_CAP_10HDX:
2313 		v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T) != 0;
2314 		break;
2315 
2316 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
2317 		v = 0;
2318 		break;
2319 
2320 	case ETHER_STAT_ADV_CAP_AUTONEG:
2321 		v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0;
2322 		break;
2323 
2324 	case ETHER_STAT_ADV_CAP_PAUSE:
2325 		v = (vrp->chip.mii.anadv & MII_ABILITY_PAUSE) != 0;
2326 		break;
2327 
2328 	case ETHER_STAT_ADV_REMFAULT:
2329 		v = (vrp->chip.mii.anadv & MII_AN_ADVERT_REMFAULT) != 0;
2330 		break;
2331 
2332 	case ETHER_STAT_ALIGN_ERRORS:
2333 		v = vrp->stats.ether_stat_align_errors;
2334 		break;
2335 
2336 	case ETHER_STAT_CAP_100T4:
2337 		v = (vrp->chip.mii.status & MII_STATUS_100_BASE_T4) != 0;
2338 		break;
2339 
2340 	case ETHER_STAT_CAP_100FDX:
2341 		v = (vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) != 0;
2342 		break;
2343 
2344 	case ETHER_STAT_CAP_100HDX:
2345 		v = (vrp->chip.mii.status & MII_STATUS_100_BASEX) != 0;
2346 		break;
2347 
2348 	case ETHER_STAT_CAP_10FDX:
2349 		v = (vrp->chip.mii.status & MII_STATUS_10_FD) != 0;
2350 		break;
2351 
2352 	case ETHER_STAT_CAP_10HDX:
2353 		v = (vrp->chip.mii.status & MII_STATUS_10) != 0;
2354 		break;
2355 
2356 	case ETHER_STAT_CAP_ASMPAUSE:
2357 		v = 0;
2358 		break;
2359 
2360 	case ETHER_STAT_CAP_AUTONEG:
2361 		v = (vrp->chip.mii.status & MII_STATUS_CANAUTONEG) != 0;
2362 		break;
2363 
2364 	case ETHER_STAT_CAP_PAUSE:
2365 		v = 1;
2366 		break;
2367 
2368 	case ETHER_STAT_CAP_REMFAULT:
2369 		v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2370 		break;
2371 
2372 	case ETHER_STAT_CARRIER_ERRORS:
2373 		/*
2374 		 * Number of times carrier was lost or never detected on a
2375 		 * transmission attempt.
2376 		 */
2377 		v = vrp->stats.ether_stat_carrier_errors;
2378 		break;
2379 
2380 	case ETHER_STAT_JABBER_ERRORS:
2381 		return (ENOTSUP);
2382 
2383 	case ETHER_STAT_DEFER_XMTS:
2384 		/*
2385 		 * Packets without collisions where first transmit attempt was
2386 		 * delayed because the medium was busy.
2387 		 */
2388 		v = vrp->stats.ether_stat_defer_xmts;
2389 		break;
2390 
2391 	case ETHER_STAT_EX_COLLISIONS:
2392 		/*
2393 		 * Frames where excess collisions occurred on transmit, causing
2394 		 * transmit failure.
2395 		 */
2396 		v = vrp->stats.ether_stat_ex_collisions;
2397 		break;
2398 
2399 	case ETHER_STAT_FCS_ERRORS:
2400 		/*
2401 		 * Packets received with CRC errors.
2402 		 */
2403 		v = vrp->stats.ether_stat_fcs_errors;
2404 		break;
2405 
2406 	case ETHER_STAT_FIRST_COLLISIONS:
2407 		/*
2408 		 * Packets successfully transmitted with exactly one collision.
2409 		 */
2410 		v = vrp->stats.ether_stat_first_collisions;
2411 		break;
2412 
2413 	case ETHER_STAT_LINK_ASMPAUSE:
2414 		v = 0;
2415 		break;
2416 
2417 	case ETHER_STAT_LINK_AUTONEG:
2418 		v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0 &&
2419 		    (vrp->chip.mii.status & MII_STATUS_ANDONE) != 0;
2420 		break;
2421 
2422 	case ETHER_STAT_LINK_DUPLEX:
2423 		v = vrp->chip.link.duplex;
2424 		break;
2425 
2426 	case ETHER_STAT_LINK_PAUSE:
2427 		v = vrp->chip.link.flowctrl;
2428 		break;
2429 
2430 	case ETHER_STAT_LP_CAP_100T4:
2431 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_T4) != 0;
2432 		break;
2433 
2434 	case ETHER_STAT_LP_CAP_1000FDX:
2435 		v = 0;
2436 		break;
2437 
2438 	case ETHER_STAT_LP_CAP_1000HDX:
2439 		v = 0;
2440 		break;
2441 
2442 	case ETHER_STAT_LP_CAP_100FDX:
2443 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX_FD) != 0;
2444 		break;
2445 
2446 	case ETHER_STAT_LP_CAP_100HDX:
2447 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX) != 0;
2448 		break;
2449 
2450 	case ETHER_STAT_LP_CAP_10FDX:
2451 		v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T_FD) != 0;
2452 		break;
2453 
2454 	case ETHER_STAT_LP_CAP_10HDX:
2455 		v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T) != 0;
2456 		break;
2457 
2458 	case ETHER_STAT_LP_CAP_ASMPAUSE:
2459 		v = 0;
2460 		break;
2461 
2462 	case ETHER_STAT_LP_CAP_AUTONEG:
2463 		v = (vrp->chip.mii.anexp & MII_AN_EXP_LPCANAN) != 0;
2464 		break;
2465 
2466 	case ETHER_STAT_LP_CAP_PAUSE:
2467 		v = (vrp->chip.mii.lpable & MII_ABILITY_PAUSE) != 0;
2468 		break;
2469 
2470 	case ETHER_STAT_LP_REMFAULT:
2471 		v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2472 		break;
2473 
2474 	case ETHER_STAT_MACRCV_ERRORS:
2475 		/*
2476 		 * Packets received with MAC errors, except align_errors,
2477 		 * fcs_errors, and toolong_errors.
2478 		 */
2479 		v = vrp->stats.ether_stat_macrcv_errors;
2480 		break;
2481 
2482 	case ETHER_STAT_MACXMT_ERRORS:
2483 		/*
2484 		 * Packets encountering transmit MAC failures, except carrier
2485 		 * and collision failures.
2486 		 */
2487 		v = vrp->stats.ether_stat_macxmt_errors;
2488 		break;
2489 
2490 	case ETHER_STAT_MULTI_COLLISIONS:
2491 		/*
2492 		 * Packets successfully transmitted with multiple collisions.
2493 		 */
2494 		v = vrp->stats.ether_stat_multi_collisions;
2495 		break;
2496 
2497 	case ETHER_STAT_SQE_ERRORS:
2498 		/*
2499 		 * Number of times signal quality error was reported
2500 		 * This one is reported by the PHY.
2501 		 */
2502 		return (ENOTSUP);
2503 
2504 	case ETHER_STAT_TOOLONG_ERRORS:
2505 		/*
2506 		 * Packets received larger than the maximum permitted length.
2507 		 */
2508 		v = vrp->stats.ether_stat_toolong_errors;
2509 		break;
2510 
2511 	case ETHER_STAT_TOOSHORT_ERRORS:
2512 		v = vrp->stats.ether_stat_tooshort_errors;
2513 		break;
2514 
2515 	case ETHER_STAT_TX_LATE_COLLISIONS:
2516 		/*
2517 		 * Number of times a transmit collision occurred late
2518 		 * (after 512 bit times).
2519 		 */
2520 		v = vrp->stats.ether_stat_tx_late_collisions;
2521 		break;
2522 
2523 	case ETHER_STAT_XCVR_ADDR:
2524 		/*
2525 		 * MII address in the 0 to 31 range of the physical layer
2526 		 * device in use for a given Ethernet device.
2527 		 */
2528 		v = vrp->chip.phyaddr;
2529 		break;
2530 
2531 	case ETHER_STAT_XCVR_ID:
2532 		/*
2533 		 * MII transceiver manufacturer and device ID.
2534 		 */
2535 		v = (vrp->chip.mii.identh << 16) | vrp->chip.mii.identl;
2536 		break;
2537 
2538 	case ETHER_STAT_XCVR_INUSE:
2539 		v = vrp->chip.link.mau;
2540 		break;
2541 
2542 	case MAC_STAT_BRDCSTRCV:
2543 		v = vrp->stats.mac_stat_brdcstrcv;
2544 		break;
2545 
2546 	case MAC_STAT_BRDCSTXMT:
2547 		v = vrp->stats.mac_stat_brdcstxmt;
2548 		break;
2549 
2550 	case MAC_STAT_MULTIXMT:
2551 		v = vrp->stats.mac_stat_multixmt;
2552 		break;
2553 
2554 	case MAC_STAT_COLLISIONS:
2555 		v = vrp->stats.mac_stat_collisions;
2556 		break;
2557 
2558 	case MAC_STAT_IERRORS:
2559 		v = vrp->stats.mac_stat_ierrors;
2560 		break;
2561 
2562 	case MAC_STAT_IFSPEED:
2563 		if (vrp->chip.link.speed == VR_LINK_SPEED_100MBS)
2564 			v = 100 * 1000 * 1000;
2565 		else if (vrp->chip.link.speed == VR_LINK_SPEED_10MBS)
2566 			v = 10 * 1000 * 1000;
2567 		else
2568 			v = 0;
2569 		break;
2570 
2571 	case MAC_STAT_IPACKETS:
2572 		v = vrp->stats.mac_stat_ipackets;
2573 		break;
2574 
2575 	case MAC_STAT_MULTIRCV:
2576 		v = vrp->stats.mac_stat_multircv;
2577 		break;
2578 
2579 	case MAC_STAT_NORCVBUF:
2580 		vrp->stats.mac_stat_norcvbuf +=
2581 		    VR_GET16(vrp->acc_reg, VR_TALLY_MPA);
2582 		VR_PUT16(vrp->acc_reg, VR_TALLY_MPA, 0);
2583 		v = vrp->stats.mac_stat_norcvbuf;
2584 		break;
2585 
2586 	case MAC_STAT_NOXMTBUF:
2587 		v = vrp->stats.mac_stat_noxmtbuf;
2588 		break;
2589 
2590 	case MAC_STAT_OBYTES:
2591 		v = vrp->stats.mac_stat_obytes;
2592 		break;
2593 
2594 	case MAC_STAT_OERRORS:
2595 		v = vrp->stats.ether_stat_macxmt_errors +
2596 		    vrp->stats.mac_stat_underflows +
2597 		    vrp->stats.ether_stat_align_errors +
2598 		    vrp->stats.ether_stat_carrier_errors +
2599 		    vrp->stats.ether_stat_fcs_errors;
2600 		break;
2601 
2602 	case MAC_STAT_OPACKETS:
2603 		v = vrp->stats.mac_stat_opackets;
2604 		break;
2605 
2606 	case MAC_STAT_RBYTES:
2607 		v = vrp->stats.mac_stat_rbytes;
2608 		break;
2609 
2610 	case MAC_STAT_UNKNOWNS:
2611 		/*
2612 		 * Isn't this something for the MAC layer to maintain?
2613 		 */
2614 		return (ENOTSUP);
2615 
2616 	case MAC_STAT_UNDERFLOWS:
2617 		v = vrp->stats.mac_stat_underflows;
2618 		break;
2619 
2620 	case MAC_STAT_OVERFLOWS:
2621 		v = vrp->stats.mac_stat_overflows;
2622 		break;
2623 	}
2624 	*val = v;
2625 	return (0);
2626 }
2627 
2628 int
2629 vr_mac_set_ether_addr(void *p, const uint8_t *ea)
2630 {
2631 	vr_t	*vrp;
2632 	int	i;
2633 
2634 	vrp = (vr_t *)p;
2635 	mutex_enter(&vrp->oplock);
2636 	mutex_enter(&vrp->intrlock);
2637 
2638 	/*
2639 	 * Set a new station address.
2640 	 */
2641 	for (i = 0; i < ETHERADDRL; i++)
2642 		VR_PUT8(vrp->acc_reg, VR_ETHERADDR + i, ea[i]);
2643 
2644 	mutex_exit(&vrp->intrlock);
2645 	mutex_exit(&vrp->oplock);
2646 	return (0);
2647 }
2648 
2649 /*
2650  * Configure the ethernet link according to param and chip.mii.
2651  */
2652 static void
2653 vr_link_init(vr_t *vrp)
2654 {
2655 	ASSERT(mutex_owned(&vrp->oplock));
2656 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2657 		/*
2658 		 * If we do autoneg, ensure restart autoneg is ON.
2659 		 */
2660 		vrp->chip.mii.control |= MII_CONTROL_RSAN;
2661 
2662 		/*
2663 		 * The advertisements are prepared by param_init.
2664 		 */
2665 		vr_phy_write(vrp, MII_AN_ADVERT, vrp->chip.mii.anadv);
2666 	} else {
2667 		/*
2668 		 * If we don't autoneg, we need speed, duplex and flowcontrol
2669 		 * to configure the link. However, dladm doesn't allow changes
2670 		 * to speed and duplex (readonly). The way this is solved
2671 		 * (ahem) is to select the highest enabled combination
2672 		 * Speed and duplex should be r/w when autoneg is off.
2673 		 */
2674 		if ((vrp->param.anadv_en &
2675 		    MII_ABILITY_100BASE_TX_FD) != 0) {
2676 			vrp->chip.mii.control |= MII_CONTROL_100MB;
2677 			vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2678 		} else if ((vrp->param.anadv_en &
2679 		    MII_ABILITY_100BASE_TX) != 0) {
2680 			vrp->chip.mii.control |= MII_CONTROL_100MB;
2681 			vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2682 		} else if ((vrp->param.anadv_en &
2683 		    MII_ABILITY_10BASE_T_FD) != 0) {
2684 			vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2685 			vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2686 		} else {
2687 			vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2688 			vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2689 		}
2690 	}
2691 	/*
2692 	 * Write the control register.
2693 	 */
2694 	vr_phy_write(vrp, MII_CONTROL, vrp->chip.mii.control);
2695 
2696 	/*
2697 	 * With autoneg off we cannot rely on the link_change interrupt for
2698 	 * for getting the status into the driver.
2699 	 */
2700 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
2701 		vr_link_state(vrp);
2702 		mac_link_update(vrp->machdl,
2703 		    (link_state_t)vrp->chip.link.state);
2704 	}
2705 }
2706 
2707 /*
2708  * Get link state in the driver and configure the MAC accordingly.
2709  */
2710 static void
2711 vr_link_state(vr_t *vrp)
2712 {
2713 	uint16_t		mask;
2714 
2715 	ASSERT(mutex_owned(&vrp->oplock));
2716 
2717 	vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
2718 	vr_phy_read(vrp, MII_CONTROL, &vrp->chip.mii.control);
2719 	vr_phy_read(vrp, MII_AN_ADVERT, &vrp->chip.mii.anadv);
2720 	vr_phy_read(vrp, MII_AN_LPABLE, &vrp->chip.mii.lpable);
2721 	vr_phy_read(vrp, MII_AN_EXPANSION, &vrp->chip.mii.anexp);
2722 
2723 	/*
2724 	 * If we did autongeg, deduce the link type/speed by selecting the
2725 	 * highest common denominator.
2726 	 */
2727 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2728 		mask = vrp->chip.mii.anadv & vrp->chip.mii.lpable;
2729 		if ((mask & MII_ABILITY_100BASE_TX_FD) != 0) {
2730 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2731 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2732 			vrp->chip.link.mau = VR_MAU_100X;
2733 		} else if ((mask & MII_ABILITY_100BASE_T4) != 0) {
2734 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2735 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2736 			vrp->chip.link.mau = VR_MAU_100T4;
2737 		} else if ((mask & MII_ABILITY_100BASE_TX) != 0) {
2738 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2739 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2740 			vrp->chip.link.mau = VR_MAU_100X;
2741 		} else if ((mask & MII_ABILITY_10BASE_T_FD) != 0) {
2742 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2743 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2744 			vrp->chip.link.mau = VR_MAU_10;
2745 		} else if ((mask & MII_ABILITY_10BASE_T) != 0) {
2746 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2747 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2748 			vrp->chip.link.mau = VR_MAU_10;
2749 		} else {
2750 			vrp->chip.link.speed = VR_LINK_SPEED_UNKNOWN;
2751 			vrp->chip.link.duplex = VR_LINK_DUPLEX_UNKNOWN;
2752 			vrp->chip.link.mau = VR_MAU_UNKNOWN;
2753 		}
2754 
2755 		/*
2756 		 * Did we negotiate pause?
2757 		 */
2758 		if ((mask & MII_ABILITY_PAUSE) != 0 &&
2759 		    vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL)
2760 			vrp->chip.link.flowctrl = VR_PAUSE_BIDIRECTIONAL;
2761 		else
2762 			vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2763 
2764 		/*
2765 		 * Did either one detect a AN fault?
2766 		 */
2767 		if ((vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0)
2768 			vr_log(vrp, CE_WARN,
2769 			    "AN remote fault reported by LP.");
2770 
2771 		if ((vrp->chip.mii.lpable & MII_AN_ADVERT_REMFAULT) != 0)
2772 			vr_log(vrp, CE_WARN, "AN remote fault caused for LP.");
2773 	} else {
2774 		/*
2775 		 * We didn't autoneg
2776 		 * The link type is defined by the control register.
2777 		 */
2778 		if ((vrp->chip.mii.control & MII_CONTROL_100MB) != 0) {
2779 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2780 			vrp->chip.link.mau = VR_MAU_100X;
2781 		} else {
2782 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2783 			vrp->chip.link.mau = VR_MAU_10;
2784 		}
2785 
2786 		if ((vrp->chip.mii.control & MII_CONTROL_FDUPLEX) != 0)
2787 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2788 		else {
2789 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2790 			/*
2791 			 * No pause on HDX links.
2792 			 */
2793 			vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2794 		}
2795 	}
2796 
2797 	/*
2798 	 * Set the duplex mode on the MAC according to that of the PHY.
2799 	 */
2800 	if (vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL) {
2801 		VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2802 		/*
2803 		 * Enable packet queueing on FDX links.
2804 		 */
2805 		if ((vrp->chip.info.bugs & VR_BUG_NO_TXQUEUEING) == 0)
2806 			VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2807 	} else {
2808 		VR_CLRBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2809 		/*
2810 		 * Disable packet queueing on HDX links. With queueing enabled,
2811 		 * this MAC get's lost after a TX abort (too many colisions).
2812 		 */
2813 		VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2814 	}
2815 
2816 	/*
2817 	 * Set pause options on the MAC.
2818 	 */
2819 	if (vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
2820 		/*
2821 		 * All of our MAC's can receive pause frames.
2822 		 */
2823 		VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXRFEN);
2824 
2825 		/*
2826 		 * VT6105 and above can transmit pause frames.
2827 		 */
2828 		if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2829 			/*
2830 			 * Set the number of available receive descriptors
2831 			 * Non-zero values written to this register are added
2832 			 * to the register's contents. Careful: Writing zero
2833 			 * clears the register and thus causes a (long) pause
2834 			 * request.
2835 			 */
2836 			VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT,
2837 			    MIN(vrp->rx.ndesc, 0xFF) -
2838 			    VR_GET8(vrp->acc_reg,
2839 			    VR_FCR0_RXBUFCOUNT));
2840 
2841 			/*
2842 			 * Request pause when we have 4 descs left.
2843 			 */
2844 			VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2845 			    VR_FCR1_PAUSEONBITS, VR_FCR1_PAUSEON_04);
2846 
2847 			/*
2848 			 * Cancel the pause when there are 24 descriptors again.
2849 			 */
2850 			VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2851 			    VR_FCR1_PAUSEOFFBITS, VR_FCR1_PAUSEOFF_24);
2852 
2853 			/*
2854 			 * Request a pause of FFFF bit-times. This long pause
2855 			 * is cancelled when the high watermark is reached.
2856 			 */
2857 			VR_PUT16(vrp->acc_reg, VR_FCR2_PAUSE, 0xFFFF);
2858 
2859 			/*
2860 			 * Enable flow control on the MAC.
2861 			 */
2862 			VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXTFEN);
2863 			VR_SETBIT8(vrp->acc_reg, VR_FCR1, VR_FCR1_FD_RX_EN |
2864 			    VR_FCR1_FD_TX_EN | VR_FCR1_XONXOFF_EN);
2865 		}
2866 	} else {
2867 		/*
2868 		 * Turn flow control OFF.
2869 		 */
2870 		VR_CLRBIT8(vrp->acc_reg,
2871 		    VR_MISC0, VR_MISC0_FDXRFEN | VR_MISC0_FDXTFEN);
2872 		if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2873 			VR_CLRBIT8(vrp->acc_reg, VR_FCR1,
2874 			    VR_FCR1_FD_RX_EN | VR_FCR1_FD_TX_EN |
2875 			    VR_FCR1_XONXOFF_EN);
2876 		}
2877 	}
2878 
2879 	/*
2880 	 * Set link state.
2881 	 */
2882 	if ((vrp->chip.mii.status & MII_STATUS_LINKUP) != 0)
2883 		vrp->chip.link.state = VR_LINK_STATE_UP;
2884 	else
2885 		vrp->chip.link.state = VR_LINK_STATE_DOWN;
2886 }
2887 
2888 /*
2889  * The PHY is automatically polled by the MAC once per 1024 MD clock cycles
2890  * MD is clocked once per 960ns so polling happens about every 1M ns, some
2891  * 1000 times per second
2892  * This polling process is required for the functionality of the link change
2893  * interrupt. Polling process must be disabled in order to access PHY registers
2894  * using MDIO
2895  *
2896  * Turn off PHY polling so that the PHY registers can be accessed.
2897  */
2898 static void
2899 vr_phy_autopoll_disable(vr_t *vrp)
2900 {
2901 	uint32_t	time;
2902 	uint8_t		miicmd, miiaddr;
2903 
2904 	/*
2905 	 * Special procedure to stop the autopolling.
2906 	 */
2907 	if ((vrp->chip.info.bugs & VR_BUG_MIIPOLLSTOP) != 0) {
2908 		/*
2909 		 * If polling is enabled.
2910 		 */
2911 		miicmd = VR_GET8(vrp->acc_reg, VR_MIICMD);
2912 		if ((miicmd & VR_MIICMD_MD_AUTO) != 0) {
2913 			/*
2914 			 * Wait for the end of a cycle (mdone set).
2915 			 */
2916 			time = 0;
2917 			do {
2918 				drv_usecwait(10);
2919 				if (time >= VR_MMI_WAITMAX) {
2920 					vr_log(vrp, CE_WARN,
2921 					    "Timeout in "
2922 					    "disable MII polling");
2923 					break;
2924 				}
2925 				time += VR_MMI_WAITINCR;
2926 				miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2927 			} while ((miiaddr & VR_MIIADDR_MDONE) == 0);
2928 		}
2929 		/*
2930 		 * Once paused, we can disable autopolling.
2931 		 */
2932 		VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2933 	} else {
2934 		/*
2935 		 * Turn off MII polling.
2936 		 */
2937 		VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2938 
2939 		/*
2940 		 * Wait for MIDLE in MII address register.
2941 		 */
2942 		time = 0;
2943 		do {
2944 			drv_usecwait(VR_MMI_WAITINCR);
2945 			if (time >= VR_MMI_WAITMAX) {
2946 				vr_log(vrp, CE_WARN,
2947 				    "Timeout in disable MII polling");
2948 				break;
2949 			}
2950 			time += VR_MMI_WAITINCR;
2951 			miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2952 		} while ((miiaddr & VR_MIIADDR_MIDLE) == 0);
2953 	}
2954 }
2955 
2956 /*
2957  * Turn on PHY polling. PHY's registers cannot be accessed.
2958  */
2959 static void
2960 vr_phy_autopoll_enable(vr_t *vrp)
2961 {
2962 	uint32_t	time;
2963 
2964 	VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2965 	VR_PUT8(vrp->acc_reg, VR_MIIADDR, MII_STATUS|VR_MIIADDR_MAUTO);
2966 	VR_PUT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_AUTO);
2967 
2968 	/*
2969 	 * Wait for the polling process to finish.
2970 	 */
2971 	time = 0;
2972 	do {
2973 		drv_usecwait(VR_MMI_WAITINCR);
2974 		if (time >= VR_MMI_WAITMAX) {
2975 			vr_log(vrp, CE_NOTE, "Timeout in enable MII polling");
2976 			break;
2977 		}
2978 		time += VR_MMI_WAITINCR;
2979 	} while ((VR_GET8(vrp->acc_reg, VR_MIIADDR) & VR_MIIADDR_MDONE) == 0);
2980 
2981 	/*
2982 	 * Initiate a polling.
2983 	 */
2984 	VR_SETBIT8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_MAUTO);
2985 }
2986 
2987 /*
2988  * Read a register from the PHY using MDIO.
2989  */
2990 static void
2991 vr_phy_read(vr_t *vrp, int offset, uint16_t *value)
2992 {
2993 	uint32_t	time;
2994 
2995 	vr_phy_autopoll_disable(vrp);
2996 
2997 	/*
2998 	 * Write the register number to the lower 5 bits of the MII address
2999 	 * register.
3000 	 */
3001 	VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3002 
3003 	/*
3004 	 * Write a READ command to the MII control register
3005 	 * This bit will be cleared when the read is finished.
3006 	 */
3007 	VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_READ);
3008 
3009 	/*
3010 	 * Wait until the read is done.
3011 	 */
3012 	time = 0;
3013 	do {
3014 		drv_usecwait(VR_MMI_WAITINCR);
3015 		if (time >= VR_MMI_WAITMAX) {
3016 			vr_log(vrp, CE_NOTE, "Timeout in MII read command");
3017 			break;
3018 		}
3019 		time += VR_MMI_WAITINCR;
3020 	} while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_READ) != 0);
3021 
3022 	*value = VR_GET16(vrp->acc_reg, VR_MIIDATA);
3023 	vr_phy_autopoll_enable(vrp);
3024 }
3025 
3026 /*
3027  * Write to a PHY's register.
3028  */
3029 static void
3030 vr_phy_write(vr_t *vrp, int offset, uint16_t value)
3031 {
3032 	uint32_t	time;
3033 
3034 	vr_phy_autopoll_disable(vrp);
3035 
3036 	/*
3037 	 * Write the register number to the MII address register.
3038 	 */
3039 	VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3040 
3041 	/*
3042 	 * Write the value to the data register.
3043 	 */
3044 	VR_PUT16(vrp->acc_reg, VR_MIIDATA, value);
3045 
3046 	/*
3047 	 * Issue the WRITE command to the command register.
3048 	 * This bit will be cleared when the write is finished.
3049 	 */
3050 	VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_WRITE);
3051 
3052 	time = 0;
3053 	do {
3054 		drv_usecwait(VR_MMI_WAITINCR);
3055 		if (time >= VR_MMI_WAITMAX) {
3056 			vr_log(vrp, CE_NOTE, "Timeout in MII write command");
3057 			break;
3058 		}
3059 		time += VR_MMI_WAITINCR;
3060 	} while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_WRITE) != 0);
3061 	vr_phy_autopoll_enable(vrp);
3062 }
3063 
3064 /*
3065  * Initialize and install some private kstats.
3066  */
3067 typedef struct {
3068 	char		*name;
3069 	uchar_t		type;
3070 } vr_kstat_t;
3071 
3072 static const vr_kstat_t vr_driver_stats [] = {
3073 	{"allocbfail",		KSTAT_DATA_INT32},
3074 	{"intr_claimed",	KSTAT_DATA_INT64},
3075 	{"intr_unclaimed",	KSTAT_DATA_INT64},
3076 	{"linkchanges",		KSTAT_DATA_INT64},
3077 	{"txnfree",		KSTAT_DATA_INT32},
3078 	{"txstalls",		KSTAT_DATA_INT32},
3079 	{"resets",		KSTAT_DATA_INT32},
3080 	{"txreclaims",		KSTAT_DATA_INT64},
3081 	{"txreclaim0",		KSTAT_DATA_INT64},
3082 	{"cyclics",		KSTAT_DATA_INT64},
3083 	{"txchecks",		KSTAT_DATA_INT64},
3084 };
3085 
3086 static void
3087 vr_kstats_init(vr_t *vrp)
3088 {
3089 	kstat_t			*ksp;
3090 	struct	kstat_named	*knp;
3091 	int			i;
3092 	int			nstats;
3093 
3094 	nstats = sizeof (vr_driver_stats) / sizeof (vr_kstat_t);
3095 
3096 	ksp = kstat_create(MODULENAME, ddi_get_instance(vrp->devinfo),
3097 	    "driver", "net", KSTAT_TYPE_NAMED, nstats, 0);
3098 
3099 	if (ksp == NULL)
3100 		vr_log(vrp, CE_WARN, "kstat_create failed");
3101 
3102 	ksp->ks_update = vr_update_kstats;
3103 	ksp->ks_private = (void*) vrp;
3104 	knp = ksp->ks_data;
3105 
3106 	for (i = 0; i < nstats; i++, knp++) {
3107 		kstat_named_init(knp, vr_driver_stats[i].name,
3108 		    vr_driver_stats[i].type);
3109 	}
3110 	kstat_install(ksp);
3111 	vrp->ksp = ksp;
3112 }
3113 
3114 static int
3115 vr_update_kstats(kstat_t *ksp, int access)
3116 {
3117 	vr_t			*vrp;
3118 	struct kstat_named	*knp;
3119 
3120 	vrp = (vr_t *)ksp->ks_private;
3121 	knp = ksp->ks_data;
3122 
3123 	if (access != KSTAT_READ)
3124 		return (EACCES);
3125 
3126 	(knp++)->value.ui32 = vrp->stats.allocbfail;
3127 	(knp++)->value.ui64 = vrp->stats.intr_claimed;
3128 	(knp++)->value.ui64 = vrp->stats.intr_unclaimed;
3129 	(knp++)->value.ui64 = vrp->stats.linkchanges;
3130 	(knp++)->value.ui32 = vrp->tx.nfree;
3131 	(knp++)->value.ui32 = vrp->stats.txstalls;
3132 	(knp++)->value.ui32 = vrp->stats.resets;
3133 	(knp++)->value.ui64 = vrp->stats.txreclaims;
3134 	(knp++)->value.ui64 = vrp->stats.txreclaim0;
3135 	(knp++)->value.ui64 = vrp->stats.cyclics;
3136 	(knp++)->value.ui64 = vrp->stats.txchecks;
3137 	return (0);
3138 }
3139 
3140 /*
3141  * Remove 'private' kstats.
3142  */
3143 static void
3144 vr_remove_kstats(vr_t *vrp)
3145 {
3146 	if (vrp->ksp != NULL)
3147 		kstat_delete(vrp->ksp);
3148 }
3149 
3150 /*
3151  * Get a property of the device/driver
3152  * Remarks:
3153  * - pr_val is always an integer of size pr_valsize
3154  * - ENABLED (EN) is what is configured via dladm
3155  * - ADVERTISED (ADV) is ENABLED minus constraints, like PHY/MAC capabilities
3156  * - DEFAULT are driver- and hardware defaults (DEFAULT is implemented as a
3157  *   flag in pr_flags instead of MAC_PROP_DEFAULT_)
3158  * - perm is the permission printed on ndd -get /.. \?
3159  */
3160 int
3161 vr_mac_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3162     uint_t pr_valsize, void *pr_val)
3163 {
3164 	vr_t		*vrp;
3165 	uint32_t	err;
3166 	uint64_t	val;
3167 
3168 	/* Since we have no private properties */
3169 	_NOTE(ARGUNUSED(pr_name))
3170 
3171 	err = 0;
3172 	vrp = (vr_t *)arg;
3173 	switch (pr_num) {
3174 		case MAC_PROP_ADV_1000FDX_CAP:
3175 		case MAC_PROP_ADV_1000HDX_CAP:
3176 		case MAC_PROP_EN_1000FDX_CAP:
3177 		case MAC_PROP_EN_1000HDX_CAP:
3178 			val = 0;
3179 			break;
3180 
3181 		case MAC_PROP_ADV_100FDX_CAP:
3182 			val = (vrp->chip.mii.anadv &
3183 			    MII_ABILITY_100BASE_TX_FD) != 0;
3184 			break;
3185 
3186 		case MAC_PROP_ADV_100HDX_CAP:
3187 			val = (vrp->chip.mii.anadv &
3188 			    MII_ABILITY_100BASE_TX) != 0;
3189 			break;
3190 
3191 		case MAC_PROP_ADV_100T4_CAP:
3192 			val = (vrp->chip.mii.anadv &
3193 			    MII_ABILITY_100BASE_T4) != 0;
3194 			break;
3195 
3196 		case MAC_PROP_ADV_10FDX_CAP:
3197 			val = (vrp->chip.mii.anadv &
3198 			    MII_ABILITY_10BASE_T_FD) != 0;
3199 			break;
3200 
3201 		case MAC_PROP_ADV_10HDX_CAP:
3202 			val = (vrp->chip.mii.anadv &
3203 			    MII_ABILITY_10BASE_T) != 0;
3204 			break;
3205 
3206 		case MAC_PROP_AUTONEG:
3207 			val = (vrp->chip.mii.control &
3208 			    MII_CONTROL_ANE) != 0;
3209 			break;
3210 
3211 		case MAC_PROP_DUPLEX:
3212 			val = vrp->chip.link.duplex;
3213 			break;
3214 
3215 		case MAC_PROP_EN_100FDX_CAP:
3216 			val = (vrp->param.anadv_en &
3217 			    MII_ABILITY_100BASE_TX_FD) != 0;
3218 			break;
3219 
3220 		case MAC_PROP_EN_100HDX_CAP:
3221 			val = (vrp->param.anadv_en &
3222 			    MII_ABILITY_100BASE_TX) != 0;
3223 			break;
3224 
3225 		case MAC_PROP_EN_100T4_CAP:
3226 			val = (vrp->param.anadv_en &
3227 			    MII_ABILITY_100BASE_T4) != 0;
3228 			break;
3229 
3230 		case MAC_PROP_EN_10FDX_CAP:
3231 			val = (vrp->param.anadv_en &
3232 			    MII_ABILITY_10BASE_T_FD) != 0;
3233 			break;
3234 
3235 		case MAC_PROP_EN_10HDX_CAP:
3236 			val = (vrp->param.anadv_en &
3237 			    MII_ABILITY_10BASE_T) != 0;
3238 			break;
3239 
3240 		case MAC_PROP_EN_AUTONEG:
3241 			val = vrp->param.an_en == VR_LINK_AUTONEG_ON;
3242 			break;
3243 
3244 		case MAC_PROP_FLOWCTRL:
3245 			val = vrp->chip.link.flowctrl;
3246 			break;
3247 
3248 		case MAC_PROP_MTU:
3249 			val = vrp->param.mtu;
3250 			break;
3251 
3252 		case MAC_PROP_SPEED:
3253 			if (vrp->chip.link.speed ==
3254 			    VR_LINK_SPEED_100MBS)
3255 				val = 100 * 1000 * 1000;
3256 			else if (vrp->chip.link.speed ==
3257 			    VR_LINK_SPEED_10MBS)
3258 				val = 10 * 1000 * 1000;
3259 			else
3260 				val = 0;
3261 			break;
3262 
3263 		case MAC_PROP_STATUS:
3264 			val = vrp->chip.link.state;
3265 			break;
3266 
3267 		default:
3268 			err = ENOTSUP;
3269 			break;
3270 	}
3271 
3272 	if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3273 		if (pr_valsize == sizeof (uint64_t))
3274 			*(uint64_t *)pr_val = val;
3275 		else if (pr_valsize == sizeof (uint32_t))
3276 			*(uint32_t *)pr_val = val;
3277 		else if (pr_valsize == sizeof (uint16_t))
3278 			*(uint16_t *)pr_val = val;
3279 		else if (pr_valsize == sizeof (uint8_t))
3280 			*(uint8_t *)pr_val = val;
3281 		else
3282 			err = EINVAL;
3283 	}
3284 	return (err);
3285 }
3286 
3287 void
3288 vr_mac_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3289     mac_prop_info_handle_t prh)
3290 {
3291 	vr_t		*vrp = (vr_t *)arg;
3292 	uint8_t		val, perm;
3293 
3294 	/* Since we have no private properties */
3295 	_NOTE(ARGUNUSED(pr_name))
3296 
3297 	switch (pr_num) {
3298 		case MAC_PROP_ADV_1000FDX_CAP:
3299 		case MAC_PROP_ADV_1000HDX_CAP:
3300 		case MAC_PROP_EN_1000FDX_CAP:
3301 		case MAC_PROP_EN_1000HDX_CAP:
3302 		case MAC_PROP_ADV_100FDX_CAP:
3303 		case MAC_PROP_ADV_100HDX_CAP:
3304 		case MAC_PROP_ADV_100T4_CAP:
3305 		case MAC_PROP_ADV_10FDX_CAP:
3306 		case MAC_PROP_ADV_10HDX_CAP:
3307 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3308 			return;
3309 
3310 		case MAC_PROP_EN_100FDX_CAP:
3311 			val = (vrp->chip.mii.status &
3312 			    MII_STATUS_100_BASEX_FD) != 0;
3313 			break;
3314 
3315 		case MAC_PROP_EN_100HDX_CAP:
3316 			val = (vrp->chip.mii.status &
3317 			    MII_STATUS_100_BASEX) != 0;
3318 			break;
3319 
3320 		case MAC_PROP_EN_100T4_CAP:
3321 			val = (vrp->chip.mii.status &
3322 			    MII_STATUS_100_BASE_T4) != 0;
3323 			break;
3324 
3325 		case MAC_PROP_EN_10FDX_CAP:
3326 			val = (vrp->chip.mii.status &
3327 			    MII_STATUS_10_FD) != 0;
3328 			break;
3329 
3330 		case MAC_PROP_EN_10HDX_CAP:
3331 			val = (vrp->chip.mii.status &
3332 			    MII_STATUS_10) != 0;
3333 			break;
3334 
3335 		case MAC_PROP_AUTONEG:
3336 		case MAC_PROP_EN_AUTONEG:
3337 			val = (vrp->chip.mii.status &
3338 			    MII_STATUS_CANAUTONEG) != 0;
3339 			break;
3340 
3341 		case MAC_PROP_FLOWCTRL:
3342 			mac_prop_info_set_default_link_flowctrl(prh,
3343 			    LINK_FLOWCTRL_BI);
3344 			return;
3345 
3346 		case MAC_PROP_MTU:
3347 			mac_prop_info_set_range_uint32(prh,
3348 			    ETHERMTU, ETHERMTU);
3349 			return;
3350 
3351 		case MAC_PROP_DUPLEX:
3352 			/*
3353 			 * Writability depends on autoneg.
3354 			 */
3355 			perm = ((vrp->chip.mii.control &
3356 			    MII_CONTROL_ANE) == 0) ? MAC_PROP_PERM_RW :
3357 			    MAC_PROP_PERM_READ;
3358 			mac_prop_info_set_perm(prh, perm);
3359 
3360 			if (perm == MAC_PROP_PERM_RW) {
3361 				mac_prop_info_set_default_uint8(prh,
3362 				    VR_LINK_DUPLEX_FULL);
3363 			}
3364 			return;
3365 
3366 		case MAC_PROP_SPEED:
3367 			perm = ((vrp->chip.mii.control &
3368 			    MII_CONTROL_ANE) == 0) ?
3369 			    MAC_PROP_PERM_RW : MAC_PROP_PERM_READ;
3370 			mac_prop_info_set_perm(prh, perm);
3371 
3372 			if (perm == MAC_PROP_PERM_RW) {
3373 				mac_prop_info_set_default_uint64(prh,
3374 				    100 * 1000 * 1000);
3375 			}
3376 			return;
3377 
3378 		case MAC_PROP_STATUS:
3379 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3380 			return;
3381 
3382 		default:
3383 			return;
3384 		}
3385 
3386 		mac_prop_info_set_default_uint8(prh, val);
3387 }
3388 
3389 /*
3390  * Set a property of the device.
3391  */
3392 int
3393 vr_mac_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3394 	uint_t pr_valsize, const void *pr_val)
3395 {
3396 	vr_t		*vrp;
3397 	uint32_t	err;
3398 	uint64_t	val;
3399 
3400 	/* Since we have no private properties */
3401 	_NOTE(ARGUNUSED(pr_name))
3402 
3403 	err = 0;
3404 	vrp = (vr_t *)arg;
3405 	mutex_enter(&vrp->oplock);
3406 
3407 	/*
3408 	 * The current set of public property values are passed as integers
3409 	 * Private properties are passed as strings in pr_val length pr_valsize.
3410 	 */
3411 	if (pr_num != MAC_PROP_PRIVATE) {
3412 		if (pr_valsize == sizeof (uint64_t))
3413 			val = *(uint64_t *)pr_val;
3414 		else if (pr_valsize == sizeof (uint32_t))
3415 			val = *(uint32_t *)pr_val;
3416 		else if (pr_valsize == sizeof (uint16_t))
3417 			val = *(uint32_t *)pr_val;
3418 		else if (pr_valsize == sizeof (uint8_t))
3419 			val = *(uint8_t *)pr_val;
3420 		else {
3421 			mutex_exit(&vrp->oplock);
3422 			return (EINVAL);
3423 		}
3424 	}
3425 
3426 	switch (pr_num) {
3427 		case MAC_PROP_DUPLEX:
3428 			if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
3429 				if (val == LINK_DUPLEX_FULL)
3430 					vrp->chip.mii.control |=
3431 					    MII_CONTROL_FDUPLEX;
3432 				else if (val == LINK_DUPLEX_HALF)
3433 					vrp->chip.mii.control &=
3434 					    ~MII_CONTROL_FDUPLEX;
3435 				else
3436 					err = EINVAL;
3437 			} else
3438 				err = EINVAL;
3439 			break;
3440 
3441 		case MAC_PROP_EN_100FDX_CAP:
3442 			if (val == 0)
3443 				vrp->param.anadv_en &=
3444 				    ~MII_ABILITY_100BASE_TX_FD;
3445 			else
3446 				vrp->param.anadv_en |=
3447 				    MII_ABILITY_100BASE_TX_FD;
3448 			break;
3449 
3450 		case MAC_PROP_EN_100HDX_CAP:
3451 			if (val == 0)
3452 				vrp->param.anadv_en &=
3453 				    ~MII_ABILITY_100BASE_TX;
3454 			else
3455 				vrp->param.anadv_en |=
3456 				    MII_ABILITY_100BASE_TX;
3457 			break;
3458 
3459 		case MAC_PROP_EN_100T4_CAP:
3460 			if (val == 0)
3461 				vrp->param.anadv_en &=
3462 				    ~MII_ABILITY_100BASE_T4;
3463 			else
3464 				vrp->param.anadv_en |=
3465 				    MII_ABILITY_100BASE_T4;
3466 			break;
3467 
3468 		case MAC_PROP_EN_10FDX_CAP:
3469 			if (val == 0)
3470 				vrp->param.anadv_en &=
3471 				    ~MII_ABILITY_10BASE_T_FD;
3472 			else
3473 				vrp->param.anadv_en |=
3474 				    MII_ABILITY_10BASE_T_FD;
3475 			break;
3476 
3477 		case MAC_PROP_EN_10HDX_CAP:
3478 			if (val == 0)
3479 				vrp->param.anadv_en &=
3480 				    ~MII_ABILITY_10BASE_T;
3481 			else
3482 				vrp->param.anadv_en |=
3483 				    MII_ABILITY_10BASE_T;
3484 			break;
3485 
3486 		case MAC_PROP_AUTONEG:
3487 		case MAC_PROP_EN_AUTONEG:
3488 			if (val == 0) {
3489 				vrp->param.an_en = VR_LINK_AUTONEG_OFF;
3490 				vrp->chip.mii.control &= ~MII_CONTROL_ANE;
3491 			} else {
3492 				vrp->param.an_en = VR_LINK_AUTONEG_ON;
3493 				if ((vrp->chip.mii.status &
3494 				    MII_STATUS_CANAUTONEG) != 0)
3495 					vrp->chip.mii.control |=
3496 					    MII_CONTROL_ANE;
3497 				else
3498 					err = EINVAL;
3499 			}
3500 			break;
3501 
3502 		case MAC_PROP_FLOWCTRL:
3503 			if (val == LINK_FLOWCTRL_NONE)
3504 				vrp->param.anadv_en &= ~MII_ABILITY_PAUSE;
3505 			else if (val == LINK_FLOWCTRL_BI)
3506 				vrp->param.anadv_en |= MII_ABILITY_PAUSE;
3507 			else
3508 				err = EINVAL;
3509 			break;
3510 
3511 		case MAC_PROP_MTU:
3512 			if (val >= ETHERMIN && val <= ETHERMTU)
3513 				vrp->param.mtu = (uint32_t)val;
3514 			else
3515 				err = EINVAL;
3516 			break;
3517 
3518 		case MAC_PROP_SPEED:
3519 			if (val == 10 * 1000 * 1000)
3520 				vrp->chip.link.speed =
3521 				    VR_LINK_SPEED_10MBS;
3522 			else if (val == 100 * 1000 * 1000)
3523 				vrp->chip.link.speed =
3524 				    VR_LINK_SPEED_100MBS;
3525 			else
3526 				err = EINVAL;
3527 			break;
3528 
3529 		default:
3530 			err = ENOTSUP;
3531 			break;
3532 	}
3533 	if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3534 		vrp->chip.mii.anadv = vrp->param.anadv_en &
3535 		    (vrp->param.an_phymask & vrp->param.an_macmask);
3536 		vr_link_init(vrp);
3537 	}
3538 	mutex_exit(&vrp->oplock);
3539 	return (err);
3540 }
3541 
3542 
3543 /*
3544  * Logging and debug functions.
3545  */
3546 static struct {
3547 	kmutex_t mutex[1];
3548 	const char *ifname;
3549 	const char *fmt;
3550 	int level;
3551 } prtdata;
3552 
3553 static void
3554 vr_vprt(const char *fmt, va_list args)
3555 {
3556 	char buf[512];
3557 
3558 	ASSERT(mutex_owned(prtdata.mutex));
3559 	(void) vsnprintf(buf, sizeof (buf), fmt, args);
3560 	cmn_err(prtdata.level, prtdata.fmt, prtdata.ifname, buf);
3561 }
3562 
3563 static void
3564 vr_log(vr_t *vrp, int level, const char *fmt, ...)
3565 {
3566 	va_list args;
3567 
3568 	mutex_enter(prtdata.mutex);
3569 	prtdata.ifname = vrp->ifname;
3570 	prtdata.fmt = "!%s: %s";
3571 	prtdata.level = level;
3572 
3573 	va_start(args, fmt);
3574 	vr_vprt(fmt, args);
3575 	va_end(args);
3576 
3577 	mutex_exit(prtdata.mutex);
3578 }
3579 
3580 #if defined(DEBUG)
3581 static void
3582 vr_prt(const char *fmt, ...)
3583 {
3584 	va_list args;
3585 
3586 	ASSERT(mutex_owned(prtdata.mutex));
3587 
3588 	va_start(args, fmt);
3589 	vr_vprt(fmt, args);
3590 	va_end(args);
3591 
3592 	mutex_exit(prtdata.mutex);
3593 }
3594 
3595 void
3596 (*vr_debug())(const char *fmt, ...)
3597 {
3598 	mutex_enter(prtdata.mutex);
3599 	prtdata.ifname = MODULENAME;
3600 	prtdata.fmt = "^%s: %s\n";
3601 	prtdata.level = CE_CONT;
3602 
3603 	return (vr_prt);
3604 }
3605 #endif	/* DEBUG */
3606 
3607 DDI_DEFINE_STREAM_OPS(vr_dev_ops, nulldev, nulldev, vr_attach, vr_detach,
3608 nodev, NULL, D_MP, NULL, vr_quiesce);
3609 
3610 static struct modldrv vr_modldrv = {
3611 	&mod_driverops,		/* Type of module. This one is a driver */
3612 	vr_ident,		/* short description */
3613 	&vr_dev_ops		/* driver specific ops */
3614 };
3615 
3616 static struct modlinkage modlinkage = {
3617 	MODREV_1, (void *)&vr_modldrv, NULL
3618 };
3619 
3620 int
3621 _info(struct modinfo *modinfop)
3622 {
3623 	return (mod_info(&modlinkage, modinfop));
3624 }
3625 
3626 int
3627 _init(void)
3628 {
3629 	int	status;
3630 
3631 	mac_init_ops(&vr_dev_ops, MODULENAME);
3632 	status = mod_install(&modlinkage);
3633 	if (status == DDI_SUCCESS)
3634 		mutex_init(prtdata.mutex, NULL, MUTEX_DRIVER, NULL);
3635 	else
3636 		mac_fini_ops(&vr_dev_ops);
3637 	return (status);
3638 }
3639 
3640 int
3641 _fini(void)
3642 {
3643 	int status;
3644 
3645 	status = mod_remove(&modlinkage);
3646 	if (status == 0) {
3647 		mac_fini_ops(&vr_dev_ops);
3648 		mutex_destroy(prtdata.mutex);
3649 	}
3650 	return (status);
3651 }
3652