xref: /illumos-gate/usr/src/uts/common/io/vr/vr.c (revision 33efde4275d24731ef87927237b0ffb0630b6b2d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2018, Joyent, Inc.
29  */
30 
31 #include <sys/types.h>
32 #include <sys/stream.h>
33 #include <sys/strsun.h>
34 #include <sys/stat.h>
35 #include <sys/pci.h>
36 #include <sys/modctl.h>
37 #include <sys/kstat.h>
38 #include <sys/ethernet.h>
39 #include <sys/devops.h>
40 #include <sys/debug.h>
41 #include <sys/conf.h>
42 #include <sys/mac.h>
43 #include <sys/mac_provider.h>
44 #include <sys/mac_ether.h>
45 #include <sys/sysmacros.h>
46 #include <sys/dditypes.h>
47 #include <sys/ddi.h>
48 #include <sys/sunddi.h>
49 #include <sys/miiregs.h>
50 #include <sys/byteorder.h>
51 #include <sys/note.h>
52 #include <sys/vlan.h>
53 
54 #include "vr.h"
55 #include "vr_impl.h"
56 
57 /*
58  * VR in a nutshell
59  * The card uses two rings of data structures to communicate with the host.
60  * These are referred to as "descriptor rings" and there is one for transmit
61  * (TX) and one for receive (RX).
62  *
63  * The driver uses a "DMA buffer" data type for mapping to those descriptor
64  * rings. This is a structure with handles and a DMA'able buffer attached to it.
65  *
66  * Receive
67  * The receive ring is filled with DMA buffers. Received packets are copied into
68  * a newly allocated mblk's and passed upstream.
69  *
70  * Transmit
71  * Each transmit descriptor has a DMA buffer attached to it. The data of TX
72  * packets is copied into the DMA buffer which is then enqueued for
73  * transmission.
74  *
75  * Reclaim of transmitted packets is done as a result of a transmit completion
76  * interrupt which is generated 3 times per ring at minimum.
77  */
78 
79 #if defined(DEBUG)
80 uint32_t	vrdebug = 1;
81 #define	VR_DEBUG(args)	do {				\
82 		if (vrdebug > 0)			\
83 			(*vr_debug()) args;		\
84 			_NOTE(CONSTANTCONDITION)	\
85 		} while (0)
86 static	void	vr_prt(const char *fmt, ...);
87 	void	(*vr_debug())(const char *fmt, ...);
88 #else
89 #define	VR_DEBUG(args)	do ; _NOTE(CONSTANTCONDITION) while (0)
90 #endif
91 
92 static char vr_ident[] = "VIA Rhine Ethernet";
93 
94 /*
95  * Attributes for accessing registers and memory descriptors for this device.
96  */
97 static ddi_device_acc_attr_t vr_dev_dma_accattr = {
98 	DDI_DEVICE_ATTR_V0,
99 	DDI_STRUCTURE_LE_ACC,
100 	DDI_STRICTORDER_ACC
101 };
102 
103 /*
104  * Attributes for accessing data.
105  */
106 static ddi_device_acc_attr_t vr_data_dma_accattr = {
107 	DDI_DEVICE_ATTR_V0,
108 	DDI_NEVERSWAP_ACC,
109 	DDI_STRICTORDER_ACC
110 };
111 
112 /*
113  * DMA attributes for descriptors for communication with the device
114  * This driver assumes that all descriptors of one ring fit in one consequitive
115  * memory area of max 4K (256 descriptors) that does not cross a page boundary.
116  * Therefore, we request 4K alignement.
117  */
118 static ddi_dma_attr_t vr_dev_dma_attr = {
119 	DMA_ATTR_V0,			/* version number */
120 	0,				/* low DMA address range */
121 	0xFFFFFFFF,			/* high DMA address range */
122 	0x7FFFFFFF,			/* DMA counter register */
123 	0x1000,				/* DMA address alignment */
124 	0x7F,				/* DMA burstsizes */
125 	1,				/* min effective DMA size */
126 	0xFFFFFFFF,			/* max DMA xfer size */
127 	0xFFFFFFFF,			/* segment boundary */
128 	1,				/* s/g list length */
129 	1,				/* granularity of device */
130 	0				/* DMA transfer flags */
131 };
132 
133 /*
134  * DMA attributes for the data moved to/from the device
135  * Note that the alignement is set to 2K so hat a 1500 byte packet never
136  * crosses a page boundary and thus that a DMA transfer is not split up in
137  * multiple cookies with a 4K/8K pagesize
138  */
139 static ddi_dma_attr_t vr_data_dma_attr = {
140 	DMA_ATTR_V0,			/* version number */
141 	0,				/* low DMA address range */
142 	0xFFFFFFFF,			/* high DMA address range */
143 	0x7FFFFFFF,			/* DMA counter register */
144 	0x800,				/* DMA address alignment */
145 	0xfff,				/* DMA burstsizes */
146 	1,				/* min effective DMA size */
147 	0xFFFFFFFF,			/* max DMA xfer size */
148 	0xFFFFFFFF,			/* segment boundary */
149 	1,				/* s/g list length */
150 	1,				/* granularity of device */
151 	0				/* DMA transfer flags */
152 };
153 
154 static mac_callbacks_t vr_mac_callbacks = {
155 	MC_SETPROP|MC_GETPROP|MC_PROPINFO, /* Which callbacks are set */
156 	vr_mac_getstat,		/* Get the value of a statistic */
157 	vr_mac_start,		/* Start the device */
158 	vr_mac_stop,		/* Stop the device */
159 	vr_mac_set_promisc,	/* Enable or disable promiscuous mode */
160 	vr_mac_set_multicast,	/* Enable or disable a multicast addr */
161 	vr_mac_set_ether_addr,	/* Set the unicast MAC address */
162 	vr_mac_tx_enqueue_list,	/* Transmit a packet */
163 	NULL,
164 	NULL,			/* Process an unknown ioctl */
165 	NULL,			/* Get capability information */
166 	NULL,			/* Open the device */
167 	NULL,			/* Close the device */
168 	vr_mac_setprop,		/* Set properties of the device */
169 	vr_mac_getprop,		/* Get properties of the device */
170 	vr_mac_propinfo		/* Get properties attributes */
171 };
172 
173 /*
174  * Table with bugs and features for each incarnation of the card.
175  */
176 static const chip_info_t vr_chip_info [] = {
177 	{
178 		0x0, 0x0,
179 		"VIA Rhine Fast Ethernet",
180 		(VR_BUG_NO_MEMIO),
181 		(VR_FEATURE_NONE)
182 	},
183 	{
184 		0x04, 0x21,
185 		"VIA VT86C100A Fast Ethernet",
186 		(VR_BUG_NEEDMODE2PCEROPT | VR_BUG_NO_TXQUEUEING |
187 		    VR_BUG_NEEDMODE10T | VR_BUG_TXALIGN | VR_BUG_NO_MEMIO |
188 		    VR_BUG_MIIPOLLSTOP),
189 		(VR_FEATURE_NONE)
190 	},
191 	{
192 		0x40, 0x41,
193 		"VIA VT6102-A Rhine II Fast Ethernet",
194 		(VR_BUG_NEEDMODE2PCEROPT),
195 		(VR_FEATURE_RX_PAUSE_CAP)
196 	},
197 	{
198 		0x42, 0x7f,
199 		"VIA VT6102-C Rhine II Fast Ethernet",
200 		(VR_BUG_NEEDMODE2PCEROPT),
201 		(VR_FEATURE_RX_PAUSE_CAP)
202 	},
203 	{
204 		0x80, 0x82,
205 		"VIA VT6105-A Rhine III Fast Ethernet",
206 		(VR_BUG_NONE),
207 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
208 	},
209 	{
210 		0x83, 0x89,
211 		"VIA VT6105-B Rhine III Fast Ethernet",
212 		(VR_BUG_NONE),
213 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
214 	},
215 	{
216 		0x8a, 0x8b,
217 		"VIA VT6105-LOM Rhine III Fast Ethernet",
218 		(VR_BUG_NONE),
219 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
220 	},
221 	{
222 		0x8c, 0x8c,
223 		"VIA VT6107-A0 Rhine III Fast Ethernet",
224 		(VR_BUG_NONE),
225 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
226 	},
227 	{
228 		0x8d, 0x8f,
229 		"VIA VT6107-A1 Rhine III Fast Ethernet",
230 		(VR_BUG_NONE),
231 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
232 		    VR_FEATURE_MRDLNMULTIPLE)
233 	},
234 	{
235 		0x90, 0x93,
236 		"VIA VT6105M-A0 Rhine III Fast Ethernet Management Adapter",
237 		(VR_BUG_NONE),
238 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
239 		    VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
240 		    VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
241 		    VR_FEATURE_MIBCOUNTER)
242 	},
243 	{
244 		0x94, 0xff,
245 		"VIA VT6105M-B1 Rhine III Fast Ethernet Management Adapter",
246 		(VR_BUG_NONE),
247 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
248 		    VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
249 		    VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
250 		    VR_FEATURE_MIBCOUNTER)
251 	}
252 };
253 
254 /*
255  * Function prototypes
256  */
257 static	vr_result_t	vr_add_intr(vr_t *vrp);
258 static	void		vr_remove_intr(vr_t *vrp);
259 static	int32_t		vr_cam_index(vr_t *vrp, const uint8_t *maddr);
260 static	uint32_t	ether_crc_be(const uint8_t *address);
261 static	void		vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp);
262 static	void		vr_log(vr_t *vrp, int level, const char *fmt, ...);
263 static	int		vr_resume(dev_info_t *devinfo);
264 static	int		vr_suspend(dev_info_t *devinfo);
265 static	vr_result_t	vr_bus_config(vr_t *vrp);
266 static	void		vr_bus_unconfig(vr_t *vrp);
267 static	void		vr_reset(vr_t *vrp);
268 static	int		vr_start(vr_t *vrp);
269 static	int		vr_stop(vr_t *vrp);
270 static	vr_result_t	vr_rings_init(vr_t *vrp);
271 static	void		vr_rings_fini(vr_t *vrp);
272 static	vr_result_t	vr_alloc_ring(vr_t *vrp, vr_ring_t *r, size_t n);
273 static	void		vr_free_ring(vr_ring_t *r, size_t n);
274 static	vr_result_t	vr_rxring_init(vr_t *vrp);
275 static	void		vr_rxring_fini(vr_t *vrp);
276 static	vr_result_t	vr_txring_init(vr_t *vrp);
277 static	void		vr_txring_fini(vr_t *vrp);
278 static	vr_result_t	vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap,
279 			    uint_t flags);
280 static	void		vr_free_dmabuf(vr_data_dma_t *dmap);
281 static	void		vr_param_init(vr_t *vrp);
282 static	mblk_t		*vr_receive(vr_t *vrp);
283 static	void		vr_tx_reclaim(vr_t *vrp);
284 static	void		vr_periodic(void *p);
285 static	void		vr_error(vr_t *vrp);
286 static	void		vr_phy_read(vr_t *vrp, int offset, uint16_t *value);
287 static	void		vr_phy_write(vr_t *vrp, int offset, uint16_t value);
288 static	void		vr_phy_autopoll_disable(vr_t *vrp);
289 static	void		vr_phy_autopoll_enable(vr_t *vrp);
290 static	void		vr_link_init(vr_t *vrp);
291 static	void		vr_link_state(vr_t *vrp);
292 static	void		vr_kstats_init(vr_t *vrp);
293 static	int		vr_update_kstats(kstat_t *ksp, int access);
294 static	void		vr_remove_kstats(vr_t *vrp);
295 
296 static int
vr_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)297 vr_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
298 {
299 	vr_t		*vrp;
300 	mac_register_t	*macreg;
301 
302 	if (cmd == DDI_RESUME)
303 		return (vr_resume(devinfo));
304 	else if (cmd != DDI_ATTACH)
305 		return (DDI_FAILURE);
306 
307 	/*
308 	 * Attach.
309 	 */
310 	vrp = kmem_zalloc(sizeof (vr_t), KM_SLEEP);
311 	ddi_set_driver_private(devinfo, vrp);
312 	vrp->devinfo = devinfo;
313 
314 	/*
315 	 * Store the name+instance of the module.
316 	 */
317 	(void) snprintf(vrp->ifname, sizeof (vrp->ifname), "%s%d",
318 	    MODULENAME, ddi_get_instance(devinfo));
319 
320 	/*
321 	 * Bus initialization.
322 	 */
323 	if (vr_bus_config(vrp) != VR_SUCCESS) {
324 		vr_log(vrp, CE_WARN, "vr_bus_config failed");
325 		goto fail0;
326 	}
327 
328 	/*
329 	 * Initialize default parameters.
330 	 */
331 	vr_param_init(vrp);
332 
333 	/*
334 	 * Setup the descriptor rings.
335 	 */
336 	if (vr_rings_init(vrp) != VR_SUCCESS) {
337 		vr_log(vrp, CE_WARN, "vr_rings_init failed");
338 		goto fail1;
339 	}
340 
341 	/*
342 	 * Initialize kstats.
343 	 */
344 	vr_kstats_init(vrp);
345 
346 	/*
347 	 * Add interrupt to the OS.
348 	 */
349 	if (vr_add_intr(vrp) != VR_SUCCESS) {
350 		vr_log(vrp, CE_WARN, "vr_add_intr failed in attach");
351 		goto fail3;
352 	}
353 
354 	/*
355 	 * Add mutexes.
356 	 */
357 	mutex_init(&vrp->intrlock, NULL, MUTEX_DRIVER,
358 	    DDI_INTR_PRI(vrp->intr_pri));
359 	mutex_init(&vrp->oplock, NULL, MUTEX_DRIVER, NULL);
360 	mutex_init(&vrp->tx.lock, NULL, MUTEX_DRIVER, NULL);
361 
362 	/*
363 	 * Enable interrupt.
364 	 */
365 	if (ddi_intr_enable(vrp->intr_hdl) != DDI_SUCCESS) {
366 		vr_log(vrp, CE_NOTE, "ddi_intr_enable failed");
367 		goto fail5;
368 	}
369 
370 	/*
371 	 * Register with parent, mac.
372 	 */
373 	if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
374 		vr_log(vrp, CE_WARN, "mac_alloc failed in attach");
375 		goto fail6;
376 	}
377 
378 	macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
379 	macreg->m_driver = vrp;
380 	macreg->m_dip = devinfo;
381 	macreg->m_src_addr = vrp->vendor_ether_addr;
382 	macreg->m_callbacks = &vr_mac_callbacks;
383 	macreg->m_min_sdu = 0;
384 	macreg->m_max_sdu = ETHERMTU;
385 	macreg->m_margin = VLAN_TAGSZ;
386 
387 	if (mac_register(macreg, &vrp->machdl) != 0) {
388 		vr_log(vrp, CE_WARN, "mac_register failed in attach");
389 		goto fail7;
390 	}
391 	mac_free(macreg);
392 	return (DDI_SUCCESS);
393 
394 fail7:
395 	mac_free(macreg);
396 fail6:
397 	(void) ddi_intr_disable(vrp->intr_hdl);
398 fail5:
399 	mutex_destroy(&vrp->tx.lock);
400 	mutex_destroy(&vrp->oplock);
401 	mutex_destroy(&vrp->intrlock);
402 	vr_remove_intr(vrp);
403 fail3:
404 	vr_remove_kstats(vrp);
405 	vr_rings_fini(vrp);
406 fail1:
407 	vr_bus_unconfig(vrp);
408 fail0:
409 	kmem_free(vrp, sizeof (vr_t));
410 	return (DDI_FAILURE);
411 }
412 
413 static int
vr_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)414 vr_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
415 {
416 	vr_t		*vrp;
417 
418 	vrp = ddi_get_driver_private(devinfo);
419 
420 	if (cmd == DDI_SUSPEND)
421 		return (vr_suspend(devinfo));
422 	else if (cmd != DDI_DETACH)
423 		return (DDI_FAILURE);
424 
425 	if (vrp->chip.state == CHIPSTATE_RUNNING)
426 		return (DDI_FAILURE);
427 
428 	/*
429 	 * Try to un-register from the MAC layer.
430 	 */
431 	if (mac_unregister(vrp->machdl) != 0)
432 		return (DDI_FAILURE);
433 
434 	(void) ddi_intr_disable(vrp->intr_hdl);
435 	vr_remove_intr(vrp);
436 	mutex_destroy(&vrp->tx.lock);
437 	mutex_destroy(&vrp->oplock);
438 	mutex_destroy(&vrp->intrlock);
439 	vr_remove_kstats(vrp);
440 	vr_rings_fini(vrp);
441 	vr_bus_unconfig(vrp);
442 	kmem_free(vrp, sizeof (vr_t));
443 	return (DDI_SUCCESS);
444 }
445 
446 /*
447  * quiesce the card for fast reboot.
448  */
449 int
vr_quiesce(dev_info_t * dev_info)450 vr_quiesce(dev_info_t *dev_info)
451 {
452 	vr_t	*vrp;
453 
454 	vrp = (vr_t *)ddi_get_driver_private(dev_info);
455 
456 	/*
457 	 * Stop interrupts.
458 	 */
459 	VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
460 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
461 
462 	/*
463 	 * Stop DMA.
464 	 */
465 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
466 	return (DDI_SUCCESS);
467 }
468 
469 /*
470  * Add an interrupt for our device to the OS.
471  */
472 static vr_result_t
vr_add_intr(vr_t * vrp)473 vr_add_intr(vr_t *vrp)
474 {
475 	int	nintrs;
476 	int	rc;
477 
478 	rc = ddi_intr_alloc(vrp->devinfo, &vrp->intr_hdl,
479 	    DDI_INTR_TYPE_FIXED,	/* type */
480 	    0,			/* number */
481 	    1,			/* count */
482 	    &nintrs,		/* actualp */
483 	    DDI_INTR_ALLOC_STRICT);
484 
485 	if (rc != DDI_SUCCESS) {
486 		vr_log(vrp, CE_NOTE, "ddi_intr_alloc failed: %d", rc);
487 		return (VR_FAILURE);
488 	}
489 
490 	rc = ddi_intr_add_handler(vrp->intr_hdl, vr_intr, vrp, NULL);
491 	if (rc != DDI_SUCCESS) {
492 		vr_log(vrp, CE_NOTE, "ddi_intr_add_handler failed");
493 		if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
494 			vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
495 		return (VR_FAILURE);
496 	}
497 
498 	rc = ddi_intr_get_pri(vrp->intr_hdl, &vrp->intr_pri);
499 	if (rc != DDI_SUCCESS) {
500 		vr_log(vrp, CE_NOTE, "ddi_intr_get_pri failed");
501 		if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
502 			vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
503 
504 		if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
505 			vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
506 
507 		return (VR_FAILURE);
508 	}
509 	return (VR_SUCCESS);
510 }
511 
512 /*
513  * Remove our interrupt from the OS.
514  */
515 static void
vr_remove_intr(vr_t * vrp)516 vr_remove_intr(vr_t *vrp)
517 {
518 	if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
519 		vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
520 
521 	if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
522 		vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
523 }
524 
525 /*
526  * Resume operation after suspend.
527  */
528 static int
vr_resume(dev_info_t * devinfo)529 vr_resume(dev_info_t *devinfo)
530 {
531 	vr_t *vrp;
532 
533 	vrp = (vr_t *)ddi_get_driver_private(devinfo);
534 	mutex_enter(&vrp->oplock);
535 	if (vrp->chip.state == CHIPSTATE_SUSPENDED_RUNNING)
536 		(void) vr_start(vrp);
537 	mutex_exit(&vrp->oplock);
538 	return (DDI_SUCCESS);
539 }
540 
541 /*
542  * Suspend operation.
543  */
544 static int
vr_suspend(dev_info_t * devinfo)545 vr_suspend(dev_info_t *devinfo)
546 {
547 	vr_t *vrp;
548 
549 	vrp = (vr_t *)ddi_get_driver_private(devinfo);
550 	mutex_enter(&vrp->oplock);
551 	if (vrp->chip.state == CHIPSTATE_RUNNING) {
552 		(void) vr_stop(vrp);
553 		vrp->chip.state = CHIPSTATE_SUSPENDED_RUNNING;
554 	}
555 	mutex_exit(&vrp->oplock);
556 	return (DDI_SUCCESS);
557 }
558 
559 /*
560  * Initial bus- and device configuration during attach(9E).
561  */
562 static vr_result_t
vr_bus_config(vr_t * vrp)563 vr_bus_config(vr_t *vrp)
564 {
565 	uint32_t		addr;
566 	int			n, nsets, rc;
567 	uint_t			elem;
568 	pci_regspec_t		*regs;
569 
570 	/*
571 	 * Get the reg property which describes the various access methods.
572 	 */
573 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, vrp->devinfo,
574 	    0, "reg", (int **)&regs, &elem) != DDI_PROP_SUCCESS) {
575 		vr_log(vrp, CE_WARN, "Can't get reg property");
576 		return (VR_FAILURE);
577 	}
578 	nsets = (elem * sizeof (uint_t)) / sizeof (pci_regspec_t);
579 
580 	/*
581 	 * Setup access to all available sets.
582 	 */
583 	vrp->nsets = nsets;
584 	vrp->regset = kmem_zalloc(nsets * sizeof (vr_acc_t), KM_SLEEP);
585 	for (n = 0; n < nsets; n++) {
586 		rc = ddi_regs_map_setup(vrp->devinfo, n,
587 		    &vrp->regset[n].addr, 0, 0,
588 		    &vr_dev_dma_accattr,
589 		    &vrp->regset[n].hdl);
590 		if (rc != DDI_SUCCESS) {
591 			vr_log(vrp, CE_NOTE,
592 			    "Setup of register set %d failed", n);
593 			while (--n >= 0)
594 				ddi_regs_map_free(&vrp->regset[n].hdl);
595 			kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
596 			ddi_prop_free(regs);
597 			return (VR_FAILURE);
598 		}
599 		bcopy(&regs[n], &vrp->regset[n].reg, sizeof (pci_regspec_t));
600 	}
601 	ddi_prop_free(regs);
602 
603 	/*
604 	 * Assign type-named pointers to the register sets.
605 	 */
606 	for (n = 0; n < nsets; n++) {
607 		addr = vrp->regset[n].reg.pci_phys_hi & PCI_REG_ADDR_M;
608 		if (addr == PCI_ADDR_CONFIG && vrp->acc_cfg == NULL)
609 			vrp->acc_cfg = &vrp->regset[n];
610 		else if (addr == PCI_ADDR_IO && vrp->acc_io == NULL)
611 			vrp->acc_io = &vrp->regset[n];
612 		else if (addr == PCI_ADDR_MEM32 && vrp->acc_mem == NULL)
613 			vrp->acc_mem = &vrp->regset[n];
614 	}
615 
616 	/*
617 	 * Assure there is one of each type.
618 	 */
619 	if (vrp->acc_cfg == NULL ||
620 	    vrp->acc_io == NULL ||
621 	    vrp->acc_mem == NULL) {
622 		for (n = 0; n < nsets; n++)
623 			ddi_regs_map_free(&vrp->regset[n].hdl);
624 		kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
625 		vr_log(vrp, CE_WARN,
626 		    "Config-, I/O- and memory sets not available");
627 		return (VR_FAILURE);
628 	}
629 
630 	/*
631 	 * Store vendor/device/revision.
632 	 */
633 	vrp->chip.vendor = VR_GET16(vrp->acc_cfg, PCI_CONF_VENID);
634 	vrp->chip.device = VR_GET16(vrp->acc_cfg, PCI_CONF_DEVID);
635 	vrp->chip.revision = VR_GET16(vrp->acc_cfg, PCI_CONF_REVID);
636 
637 	/*
638 	 * Copy the matching chip_info_t structure.
639 	 */
640 	elem = sizeof (vr_chip_info) / sizeof (chip_info_t);
641 	for (n = 0; n < elem; n++) {
642 		if (vrp->chip.revision >= vr_chip_info[n].revmin &&
643 		    vrp->chip.revision <= vr_chip_info[n].revmax) {
644 			bcopy((void*)&vr_chip_info[n],
645 			    (void*)&vrp->chip.info,
646 			    sizeof (chip_info_t));
647 			break;
648 		}
649 	}
650 
651 	/*
652 	 * If we didn't find a chip_info_t for this card, copy the first
653 	 * entry of the info structures. This is a generic Rhine whith no
654 	 * bugs and no features.
655 	 */
656 	if (vrp->chip.info.name[0] == '\0') {
657 		bcopy((void*)&vr_chip_info[0],
658 		    (void*) &vrp->chip.info,
659 		    sizeof (chip_info_t));
660 	}
661 
662 	/*
663 	 * Tell what is found.
664 	 */
665 	vr_log(vrp, CE_NOTE, "pci%d,%d,%d: %s, revision 0x%0x",
666 	    PCI_REG_BUS_G(vrp->acc_cfg->reg.pci_phys_hi),
667 	    PCI_REG_DEV_G(vrp->acc_cfg->reg.pci_phys_hi),
668 	    PCI_REG_FUNC_G(vrp->acc_cfg->reg.pci_phys_hi),
669 	    vrp->chip.info.name,
670 	    vrp->chip.revision);
671 
672 	/*
673 	 * Assure that the device is prepared for memory space accesses
674 	 * This should be the default as the device advertises memory
675 	 * access in it's BAR's. However, my VT6102 on a EPIA CL board doesn't
676 	 * and thus we explicetely enable it.
677 	 */
678 	VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
679 
680 	/*
681 	 * Setup a handle for regular usage, prefer memory space accesses.
682 	 */
683 	if (vrp->acc_mem != NULL &&
684 	    (vrp->chip.info.bugs & VR_BUG_NO_MEMIO) == 0)
685 		vrp->acc_reg = vrp->acc_mem;
686 	else
687 		vrp->acc_reg = vrp->acc_io;
688 
689 	/*
690 	 * Store the vendor's MAC address.
691 	 */
692 	for (n = 0; n < ETHERADDRL; n++) {
693 		vrp->vendor_ether_addr[n] = VR_GET8(vrp->acc_reg,
694 		    VR_ETHERADDR + n);
695 	}
696 	return (VR_SUCCESS);
697 }
698 
699 static void
vr_bus_unconfig(vr_t * vrp)700 vr_bus_unconfig(vr_t *vrp)
701 {
702 	uint_t	n;
703 
704 	/*
705 	 * Free the register access handles.
706 	 */
707 	for (n = 0; n < vrp->nsets; n++)
708 		ddi_regs_map_free(&vrp->regset[n].hdl);
709 	kmem_free(vrp->regset, vrp->nsets * sizeof (vr_acc_t));
710 }
711 
712 /*
713  * Initialize parameter structures.
714  */
715 static void
vr_param_init(vr_t * vrp)716 vr_param_init(vr_t *vrp)
717 {
718 	/*
719 	 * Initialize default link configuration parameters.
720 	 */
721 	vrp->param.an_en = VR_LINK_AUTONEG_ON;
722 	vrp->param.anadv_en = 1; /* Select 802.3 autonegotiation */
723 	vrp->param.anadv_en |= MII_ABILITY_100BASE_T4;
724 	vrp->param.anadv_en |= MII_ABILITY_100BASE_TX_FD;
725 	vrp->param.anadv_en |= MII_ABILITY_100BASE_TX;
726 	vrp->param.anadv_en |= MII_ABILITY_10BASE_T_FD;
727 	vrp->param.anadv_en |= MII_ABILITY_10BASE_T;
728 	/* Not a PHY ability, but advertised on behalf of MAC */
729 	vrp->param.anadv_en |= MII_ABILITY_PAUSE;
730 	vrp->param.mtu = ETHERMTU;
731 
732 	/*
733 	 * Store the PHY identity.
734 	 */
735 	vr_phy_read(vrp, MII_PHYIDH, &vrp->chip.mii.identh);
736 	vr_phy_read(vrp, MII_PHYIDL, &vrp->chip.mii.identl);
737 
738 	/*
739 	 * Clear incapabilities imposed by PHY in phymask.
740 	 */
741 	vrp->param.an_phymask = vrp->param.anadv_en;
742 	vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
743 	if ((vrp->chip.mii.status & MII_STATUS_10) == 0)
744 		vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T;
745 
746 	if ((vrp->chip.mii.status & MII_STATUS_10_FD) == 0)
747 		vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T_FD;
748 
749 	if ((vrp->chip.mii.status & MII_STATUS_100_BASEX) == 0)
750 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX;
751 
752 	if ((vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) == 0)
753 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX_FD;
754 
755 	if ((vrp->chip.mii.status & MII_STATUS_100_BASE_T4) == 0)
756 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_T4;
757 
758 	/*
759 	 * Clear incapabilities imposed by MAC in macmask
760 	 * Note that flowcontrol (FCS?) is never masked. All of our adapters
761 	 * have the ability to honor incoming pause frames. Only the newer can
762 	 * transmit pause frames. Since there's no asym flowcontrol in 100Mbit
763 	 * Ethernet, we always advertise (symmetric) pause.
764 	 */
765 	vrp->param.an_macmask = vrp->param.anadv_en;
766 
767 	/*
768 	 * Advertised capabilities is enabled minus incapable.
769 	 */
770 	vrp->chip.mii.anadv = vrp->param.anadv_en &
771 	    (vrp->param.an_phymask & vrp->param.an_macmask);
772 
773 	/*
774 	 * Ensure that autoneg of the PHY matches our default.
775 	 */
776 	if (vrp->param.an_en == VR_LINK_AUTONEG_ON)
777 		vrp->chip.mii.control = MII_CONTROL_ANE;
778 	else
779 		vrp->chip.mii.control =
780 		    (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
781 }
782 
783 /*
784  * Setup the descriptor rings.
785  */
786 static vr_result_t
vr_rings_init(vr_t * vrp)787 vr_rings_init(vr_t *vrp)
788 {
789 
790 	vrp->rx.ndesc = VR_RX_N_DESC;
791 	vrp->tx.ndesc = VR_TX_N_DESC;
792 
793 	/*
794 	 * Create a ring for receive.
795 	 */
796 	if (vr_alloc_ring(vrp, &vrp->rxring, vrp->rx.ndesc) != VR_SUCCESS)
797 		return (VR_FAILURE);
798 
799 	/*
800 	 * Create a ring for transmit.
801 	 */
802 	if (vr_alloc_ring(vrp, &vrp->txring, vrp->tx.ndesc) != VR_SUCCESS) {
803 		vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
804 		return (VR_FAILURE);
805 	}
806 
807 	vrp->rx.ring = vrp->rxring.desc;
808 	vrp->tx.ring = vrp->txring.desc;
809 	return (VR_SUCCESS);
810 }
811 
812 static void
vr_rings_fini(vr_t * vrp)813 vr_rings_fini(vr_t *vrp)
814 {
815 	vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
816 	vr_free_ring(&vrp->txring, vrp->tx.ndesc);
817 }
818 
819 /*
820  * Allocate a descriptor ring
821  * The number of descriptor entries must fit in a single page so that the
822  * whole ring fits in one consequtive space.
823  *  i386:  4K page / 16 byte descriptor = 256 entries
824  *  sparc: 8K page / 16 byte descriptor = 512 entries
825  */
826 static vr_result_t
vr_alloc_ring(vr_t * vrp,vr_ring_t * ring,size_t n)827 vr_alloc_ring(vr_t *vrp, vr_ring_t *ring, size_t n)
828 {
829 	ddi_dma_cookie_t	desc_dma_cookie;
830 	uint_t			desc_cookiecnt;
831 	int			i, rc;
832 	size_t			rbytes;
833 
834 	/*
835 	 * Allocate a DMA handle for the chip descriptors.
836 	 */
837 	rc = ddi_dma_alloc_handle(vrp->devinfo,
838 	    &vr_dev_dma_attr,
839 	    DDI_DMA_SLEEP,
840 	    NULL,
841 	    &ring->handle);
842 
843 	if (rc != DDI_SUCCESS) {
844 		vr_log(vrp, CE_WARN,
845 		    "ddi_dma_alloc_handle in vr_alloc_ring failed.");
846 		return (VR_FAILURE);
847 	}
848 
849 	/*
850 	 * Allocate memory for the chip descriptors.
851 	 */
852 	rc = ddi_dma_mem_alloc(ring->handle,
853 	    n * sizeof (vr_chip_desc_t),
854 	    &vr_dev_dma_accattr,
855 	    DDI_DMA_CONSISTENT,
856 	    DDI_DMA_SLEEP,
857 	    NULL,
858 	    (caddr_t *)&ring->cdesc,
859 	    &rbytes,
860 	    &ring->acchdl);
861 
862 	if (rc != DDI_SUCCESS) {
863 		vr_log(vrp, CE_WARN,
864 		    "ddi_dma_mem_alloc in vr_alloc_ring failed.");
865 		ddi_dma_free_handle(&ring->handle);
866 		return (VR_FAILURE);
867 	}
868 
869 	/*
870 	 * Map the descriptor memory.
871 	 */
872 	rc = ddi_dma_addr_bind_handle(ring->handle,
873 	    NULL,
874 	    (caddr_t)ring->cdesc,
875 	    rbytes,
876 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
877 	    DDI_DMA_SLEEP,
878 	    NULL,
879 	    &desc_dma_cookie,
880 	    &desc_cookiecnt);
881 
882 	if (rc != DDI_DMA_MAPPED || desc_cookiecnt > 1) {
883 		vr_log(vrp, CE_WARN,
884 		    "ddi_dma_addr_bind_handle in vr_alloc_ring failed: "
885 		    "rc = %d, cookiecnt = %d", rc, desc_cookiecnt);
886 		ddi_dma_mem_free(&ring->acchdl);
887 		ddi_dma_free_handle(&ring->handle);
888 		return (VR_FAILURE);
889 	}
890 	ring->cdesc_paddr = desc_dma_cookie.dmac_address;
891 
892 	/*
893 	 * Allocate memory for the host descriptor ring.
894 	 */
895 	ring->desc =
896 	    (vr_desc_t *)kmem_zalloc(n * sizeof (vr_desc_t), KM_SLEEP);
897 
898 	/*
899 	 * Interlink the descriptors and connect host- to chip descriptors.
900 	 */
901 	for (i = 0; i < n; i++) {
902 		/*
903 		 * Connect the host descriptor to a chip descriptor.
904 		 */
905 		ring->desc[i].cdesc = &ring->cdesc[i];
906 
907 		/*
908 		 * Store the DMA address and offset in the descriptor
909 		 * Offset is for ddi_dma_sync() and paddr is for ddi_get/-put().
910 		 */
911 		ring->desc[i].offset = i * sizeof (vr_chip_desc_t);
912 		ring->desc[i].paddr = ring->cdesc_paddr + ring->desc[i].offset;
913 
914 		/*
915 		 * Link the previous descriptor to this one.
916 		 */
917 		if (i > 0) {
918 			/* Host */
919 			ring->desc[i-1].next = &ring->desc[i];
920 
921 			/* Chip */
922 			ddi_put32(ring->acchdl,
923 			    &ring->cdesc[i-1].next,
924 			    ring->desc[i].paddr);
925 		}
926 	}
927 
928 	/*
929 	 * Make rings out of this list by pointing last to first.
930 	 */
931 	i = n - 1;
932 	ring->desc[i].next = &ring->desc[0];
933 	ddi_put32(ring->acchdl, &ring->cdesc[i].next, ring->desc[0].paddr);
934 	return (VR_SUCCESS);
935 }
936 
937 /*
938  * Free the memory allocated for a ring.
939  */
940 static void
vr_free_ring(vr_ring_t * r,size_t n)941 vr_free_ring(vr_ring_t *r, size_t n)
942 {
943 	/*
944 	 * Unmap and free the chip descriptors.
945 	 */
946 	(void) ddi_dma_unbind_handle(r->handle);
947 	ddi_dma_mem_free(&r->acchdl);
948 	ddi_dma_free_handle(&r->handle);
949 
950 	/*
951 	 * Free the memory for storing host descriptors
952 	 */
953 	kmem_free(r->desc, n * sizeof (vr_desc_t));
954 }
955 
956 /*
957  * Initialize the receive ring.
958  */
959 static vr_result_t
vr_rxring_init(vr_t * vrp)960 vr_rxring_init(vr_t *vrp)
961 {
962 	int		i, rc;
963 	vr_desc_t	*rp;
964 
965 	/*
966 	 * Set the read pointer at the start of the ring.
967 	 */
968 	vrp->rx.rp = &vrp->rx.ring[0];
969 
970 	/*
971 	 * Assign a DMA buffer to each receive descriptor.
972 	 */
973 	for (i = 0; i < vrp->rx.ndesc; i++) {
974 		rp = &vrp->rx.ring[i];
975 		rc = vr_alloc_dmabuf(vrp,
976 		    &vrp->rx.ring[i].dmabuf,
977 		    DDI_DMA_STREAMING | DDI_DMA_READ);
978 
979 		if (rc != VR_SUCCESS) {
980 			while (--i >= 0)
981 				vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
982 			return (VR_FAILURE);
983 		}
984 
985 		/*
986 		 * Store the address of the dma buffer in the chip descriptor
987 		 */
988 		ddi_put32(vrp->rxring.acchdl,
989 		    &rp->cdesc->data,
990 		    rp->dmabuf.paddr);
991 
992 		/*
993 		 * Put the buffer length in the chip descriptor. Ensure that
994 		 * length fits in the 11 bits of stat1 (2047/0x7FF)
995 		 */
996 		ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat1,
997 		    MIN(VR_MAX_PKTSZ, rp->dmabuf.bufsz));
998 
999 		/*
1000 		 * Set descriptor ownership to the card
1001 		 */
1002 		ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat0, VR_RDES0_OWN);
1003 
1004 		/*
1005 		 * Sync the descriptor with main memory
1006 		 */
1007 		(void) ddi_dma_sync(vrp->rxring.handle, rp->offset,
1008 		    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1009 	}
1010 	return (VR_SUCCESS);
1011 }
1012 
1013 /*
1014  * Free the DMA buffers assigned to the receive ring.
1015  */
1016 static void
vr_rxring_fini(vr_t * vrp)1017 vr_rxring_fini(vr_t *vrp)
1018 {
1019 	int		i;
1020 
1021 	for (i = 0; i < vrp->rx.ndesc; i++)
1022 		vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
1023 }
1024 
1025 static vr_result_t
vr_txring_init(vr_t * vrp)1026 vr_txring_init(vr_t *vrp)
1027 {
1028 	vr_desc_t		*wp;
1029 	int			i, rc;
1030 
1031 	/*
1032 	 * Set the write- and claim pointer.
1033 	 */
1034 	vrp->tx.wp = &vrp->tx.ring[0];
1035 	vrp->tx.cp = &vrp->tx.ring[0];
1036 
1037 	/*
1038 	 * (Re)set the TX bookkeeping.
1039 	 */
1040 	vrp->tx.stallticks = 0;
1041 	vrp->tx.resched = 0;
1042 
1043 	/*
1044 	 * Every transmit decreases nfree. Every reclaim increases nfree.
1045 	 */
1046 	vrp->tx.nfree = vrp->tx.ndesc;
1047 
1048 	/*
1049 	 * Attach a DMA buffer to each transmit descriptor.
1050 	 */
1051 	for (i = 0; i < vrp->tx.ndesc; i++) {
1052 		rc = vr_alloc_dmabuf(vrp,
1053 		    &vrp->tx.ring[i].dmabuf,
1054 		    DDI_DMA_STREAMING | DDI_DMA_WRITE);
1055 
1056 		if (rc != VR_SUCCESS) {
1057 			while (--i >= 0)
1058 				vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1059 			return (VR_FAILURE);
1060 		}
1061 	}
1062 
1063 	/*
1064 	 * Init & sync the TX descriptors so the device sees a valid ring.
1065 	 */
1066 	for (i = 0; i < vrp->tx.ndesc; i++) {
1067 		wp = &vrp->tx.ring[i];
1068 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, 0);
1069 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1, 0);
1070 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->data,
1071 		    wp->dmabuf.paddr);
1072 		(void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1073 		    sizeof (vr_chip_desc_t),
1074 		    DDI_DMA_SYNC_FORDEV);
1075 	}
1076 	return (VR_SUCCESS);
1077 }
1078 
1079 /*
1080  * Free the DMA buffers attached to the TX ring.
1081  */
1082 static void
vr_txring_fini(vr_t * vrp)1083 vr_txring_fini(vr_t *vrp)
1084 {
1085 	int		i;
1086 
1087 	/*
1088 	 * Free the DMA buffers attached to the TX ring
1089 	 */
1090 	for (i = 0; i < vrp->tx.ndesc; i++)
1091 		vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1092 }
1093 
1094 /*
1095  * Allocate a DMA buffer.
1096  */
1097 static vr_result_t
vr_alloc_dmabuf(vr_t * vrp,vr_data_dma_t * dmap,uint_t dmaflags)1098 vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap, uint_t dmaflags)
1099 {
1100 	ddi_dma_cookie_t	dma_cookie;
1101 	uint_t			cookiecnt;
1102 	int			rc;
1103 
1104 	/*
1105 	 * Allocate a DMA handle for the buffer
1106 	 */
1107 	rc = ddi_dma_alloc_handle(vrp->devinfo,
1108 	    &vr_data_dma_attr,
1109 	    DDI_DMA_DONTWAIT, NULL,
1110 	    &dmap->handle);
1111 
1112 	if (rc != DDI_SUCCESS) {
1113 		vr_log(vrp, CE_WARN,
1114 		    "ddi_dma_alloc_handle failed in vr_alloc_dmabuf");
1115 		return (VR_FAILURE);
1116 	}
1117 
1118 	/*
1119 	 * Allocate the buffer
1120 	 * The allocated buffer is aligned on 2K boundary. This ensures that
1121 	 * a 1500 byte frame never cross a page boundary and thus that the DMA
1122 	 * mapping can be established in 1 fragment.
1123 	 */
1124 	rc = ddi_dma_mem_alloc(dmap->handle,
1125 	    VR_DMABUFSZ,
1126 	    &vr_data_dma_accattr,
1127 	    DDI_DMA_RDWR | DDI_DMA_STREAMING,
1128 	    DDI_DMA_DONTWAIT, NULL,
1129 	    &dmap->buf,
1130 	    &dmap->bufsz,
1131 	    &dmap->acchdl);
1132 
1133 	if (rc != DDI_SUCCESS) {
1134 		vr_log(vrp, CE_WARN,
1135 		    "ddi_dma_mem_alloc failed in vr_alloc_dmabuf");
1136 		ddi_dma_free_handle(&dmap->handle);
1137 		return (VR_FAILURE);
1138 	}
1139 
1140 	/*
1141 	 * Map the memory
1142 	 */
1143 	rc = ddi_dma_addr_bind_handle(dmap->handle,
1144 	    NULL,
1145 	    (caddr_t)dmap->buf,
1146 	    dmap->bufsz,
1147 	    dmaflags,
1148 	    DDI_DMA_DONTWAIT,
1149 	    NULL,
1150 	    &dma_cookie,
1151 	    &cookiecnt);
1152 
1153 	/*
1154 	 * The cookiecount should never > 1 because we requested 2K alignment
1155 	 */
1156 	if (rc != DDI_DMA_MAPPED || cookiecnt > 1) {
1157 		vr_log(vrp, CE_WARN,
1158 		    "dma_addr_bind_handle failed in vr_alloc_dmabuf: "
1159 		    "rc = %d, cookiecnt = %d", rc, cookiecnt);
1160 		ddi_dma_mem_free(&dmap->acchdl);
1161 		ddi_dma_free_handle(&dmap->handle);
1162 		return (VR_FAILURE);
1163 	}
1164 	dmap->paddr = dma_cookie.dmac_address;
1165 	return (VR_SUCCESS);
1166 }
1167 
1168 /*
1169  * Destroy a DMA buffer.
1170  */
1171 static void
vr_free_dmabuf(vr_data_dma_t * dmap)1172 vr_free_dmabuf(vr_data_dma_t *dmap)
1173 {
1174 	(void) ddi_dma_unbind_handle(dmap->handle);
1175 	ddi_dma_mem_free(&dmap->acchdl);
1176 	ddi_dma_free_handle(&dmap->handle);
1177 }
1178 
1179 /*
1180  * Interrupt service routine
1181  * When our vector is shared with another device, av_dispatch_autovect calls
1182  * all service routines for the vector until *none* of them return claimed
1183  * That means that, when sharing vectors, this routine is called at least
1184  * twice for each interrupt.
1185  */
1186 uint_t
vr_intr(caddr_t arg1,caddr_t arg2)1187 vr_intr(caddr_t arg1, caddr_t arg2)
1188 {
1189 	vr_t		*vrp;
1190 	uint16_t	status;
1191 	mblk_t		*lp = NULL;
1192 	uint32_t	tx_resched;
1193 	uint32_t	link_change;
1194 
1195 	tx_resched = 0;
1196 	link_change = 0;
1197 	vrp = (void *)arg1;
1198 	_NOTE(ARGUNUSED(arg2))
1199 
1200 	mutex_enter(&vrp->intrlock);
1201 	/*
1202 	 * If the driver is not in running state it is not our interrupt.
1203 	 * Shared interrupts can end up here without us being started.
1204 	 */
1205 	if (vrp->chip.state != CHIPSTATE_RUNNING) {
1206 		mutex_exit(&vrp->intrlock);
1207 		return (DDI_INTR_UNCLAIMED);
1208 	}
1209 
1210 	/*
1211 	 * Read the status register to see if the interrupt is from our device
1212 	 * This read also ensures that posted writes are brought to main memory.
1213 	 */
1214 	status = VR_GET16(vrp->acc_reg, VR_ISR0) & VR_ICR0_CFG;
1215 	if (status == 0) {
1216 		/*
1217 		 * Status contains no configured interrupts
1218 		 * The interrupt was not generated by our device.
1219 		 */
1220 		vrp->stats.intr_unclaimed++;
1221 		mutex_exit(&vrp->intrlock);
1222 		return (DDI_INTR_UNCLAIMED);
1223 	}
1224 	vrp->stats.intr_claimed++;
1225 
1226 	/*
1227 	 * Acknowledge the event(s) that caused interruption.
1228 	 */
1229 	VR_PUT16(vrp->acc_reg, VR_ISR0, status);
1230 
1231 	/*
1232 	 * Receive completion.
1233 	 */
1234 	if ((status & (VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS)) != 0) {
1235 		/*
1236 		 * Received some packets.
1237 		 */
1238 		lp = vr_receive(vrp);
1239 
1240 		/*
1241 		 * DMA stops after a conflict in the FIFO.
1242 		 */
1243 		if ((status & VR_ISR_RX_ERR_BITS) != 0)
1244 			VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1245 		status &= ~(VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS);
1246 	}
1247 
1248 	/*
1249 	 * Transmit completion.
1250 	 */
1251 	if ((status & (VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS)) != 0) {
1252 		/*
1253 		 * Card done with transmitting some packets
1254 		 * TX_DONE is generated 3 times per ring but it appears
1255 		 * more often because it is also set when an RX_DONE
1256 		 * interrupt is generated.
1257 		 */
1258 		mutex_enter(&vrp->tx.lock);
1259 		vr_tx_reclaim(vrp);
1260 		tx_resched = vrp->tx.resched;
1261 		vrp->tx.resched = 0;
1262 		mutex_exit(&vrp->tx.lock);
1263 		status &= ~(VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS);
1264 	}
1265 
1266 	/*
1267 	 * Link status change.
1268 	 */
1269 	if ((status & VR_ICR0_LINKSTATUS) != 0) {
1270 		/*
1271 		 * Get new link state and inform the mac layer.
1272 		 */
1273 		mutex_enter(&vrp->oplock);
1274 		mutex_enter(&vrp->tx.lock);
1275 		vr_link_state(vrp);
1276 		mutex_exit(&vrp->tx.lock);
1277 		mutex_exit(&vrp->oplock);
1278 		status &= ~VR_ICR0_LINKSTATUS;
1279 		vrp->stats.linkchanges++;
1280 		link_change = 1;
1281 	}
1282 
1283 	/*
1284 	 * Bus error.
1285 	 */
1286 	if ((status & VR_ISR0_BUSERR) != 0) {
1287 		vr_log(vrp, CE_WARN, "bus error occured");
1288 		vrp->reset = 1;
1289 		status &= ~VR_ISR0_BUSERR;
1290 	}
1291 
1292 	/*
1293 	 * We must have handled all things here.
1294 	 */
1295 	ASSERT(status == 0);
1296 	mutex_exit(&vrp->intrlock);
1297 
1298 	/*
1299 	 * Reset the device if requested
1300 	 * The request can come from the periodic tx check or from the interrupt
1301 	 * status.
1302 	 */
1303 	if (vrp->reset != 0) {
1304 		vr_error(vrp);
1305 		vrp->reset = 0;
1306 	}
1307 
1308 	/*
1309 	 * Pass up the list with received packets.
1310 	 */
1311 	if (lp != NULL)
1312 		mac_rx(vrp->machdl, 0, lp);
1313 
1314 	/*
1315 	 * Inform the upper layer on the linkstatus if there was a change.
1316 	 */
1317 	if (link_change != 0)
1318 		mac_link_update(vrp->machdl,
1319 		    (link_state_t)vrp->chip.link.state);
1320 	/*
1321 	 * Restart transmissions if we were waiting for tx descriptors.
1322 	 */
1323 	if (tx_resched == 1)
1324 		mac_tx_update(vrp->machdl);
1325 
1326 	/*
1327 	 * Read something from the card to ensure that all of our configuration
1328 	 * writes are delivered to the device before the interrupt is ended.
1329 	 */
1330 	(void) VR_GET8(vrp->acc_reg, VR_ETHERADDR);
1331 	return (DDI_INTR_CLAIMED);
1332 }
1333 
1334 /*
1335  * Respond to an unforseen situation by resetting the card and our bookkeeping.
1336  */
1337 static void
vr_error(vr_t * vrp)1338 vr_error(vr_t *vrp)
1339 {
1340 	vr_log(vrp, CE_WARN, "resetting MAC.");
1341 	mutex_enter(&vrp->intrlock);
1342 	mutex_enter(&vrp->oplock);
1343 	mutex_enter(&vrp->tx.lock);
1344 	(void) vr_stop(vrp);
1345 	vr_reset(vrp);
1346 	(void) vr_start(vrp);
1347 	mutex_exit(&vrp->tx.lock);
1348 	mutex_exit(&vrp->oplock);
1349 	mutex_exit(&vrp->intrlock);
1350 	vrp->stats.resets++;
1351 }
1352 
1353 /*
1354  * Collect received packets in a list.
1355  */
1356 static mblk_t *
vr_receive(vr_t * vrp)1357 vr_receive(vr_t *vrp)
1358 {
1359 	mblk_t			*lp, *mp, *np;
1360 	vr_desc_t		*rxp;
1361 	vr_data_dma_t		*dmap;
1362 	uint32_t		pklen;
1363 	uint32_t		rxstat0;
1364 	uint32_t		n;
1365 
1366 	lp = NULL;
1367 	n = 0;
1368 	for (rxp = vrp->rx.rp; ; rxp = rxp->next, n++) {
1369 		/*
1370 		 * Sync the descriptor before looking at it.
1371 		 */
1372 		(void) ddi_dma_sync(vrp->rxring.handle, rxp->offset,
1373 		    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORKERNEL);
1374 
1375 		/*
1376 		 * Get the status from the descriptor.
1377 		 */
1378 		rxstat0 = ddi_get32(vrp->rxring.acchdl, &rxp->cdesc->stat0);
1379 
1380 		/*
1381 		 * We're done if the descriptor is owned by the card.
1382 		 */
1383 		if ((rxstat0 & VR_RDES0_OWN) != 0)
1384 			break;
1385 		else if ((rxstat0 & VR_RDES0_RXOK) != 0) {
1386 			/*
1387 			 * Received a good packet
1388 			 */
1389 			dmap = &rxp->dmabuf;
1390 			pklen = (rxstat0 >> 16) - ETHERFCSL;
1391 
1392 			/*
1393 			 * Sync the data.
1394 			 */
1395 			(void) ddi_dma_sync(dmap->handle, 0,
1396 			    pklen, DDI_DMA_SYNC_FORKERNEL);
1397 
1398 			/*
1399 			 * Send a new copied message upstream.
1400 			 */
1401 			np = allocb(pklen, 0);
1402 			if (np != NULL) {
1403 				bcopy(dmap->buf, np->b_rptr, pklen);
1404 				np->b_wptr = np->b_rptr + pklen;
1405 
1406 				vrp->stats.mac_stat_ipackets++;
1407 				vrp->stats.mac_stat_rbytes += pklen;
1408 
1409 				if ((rxstat0 & VR_RDES0_BAR) != 0)
1410 					vrp->stats.mac_stat_brdcstrcv++;
1411 				else if ((rxstat0 & VR_RDES0_MAR) != 0)
1412 					vrp->stats.mac_stat_multircv++;
1413 
1414 				/*
1415 				 * Link this packet in the list.
1416 				 */
1417 				np->b_next = NULL;
1418 				if (lp == NULL)
1419 					lp = mp = np;
1420 				else {
1421 					mp->b_next = np;
1422 					mp = np;
1423 				}
1424 			} else {
1425 				vrp->stats.allocbfail++;
1426 				vrp->stats.mac_stat_norcvbuf++;
1427 			}
1428 
1429 		} else {
1430 			/*
1431 			 * Received with errors.
1432 			 */
1433 			vrp->stats.mac_stat_ierrors++;
1434 			if ((rxstat0 & VR_RDES0_FAE) != 0)
1435 				vrp->stats.ether_stat_align_errors++;
1436 			if ((rxstat0 & VR_RDES0_CRCERR) != 0)
1437 				vrp->stats.ether_stat_fcs_errors++;
1438 			if ((rxstat0 & VR_RDES0_LONG) != 0)
1439 				vrp->stats.ether_stat_toolong_errors++;
1440 			if ((rxstat0 & VR_RDES0_RUNT) != 0)
1441 				vrp->stats.ether_stat_tooshort_errors++;
1442 			if ((rxstat0 & VR_RDES0_FOV) != 0)
1443 				vrp->stats.mac_stat_overflows++;
1444 		}
1445 
1446 		/*
1447 		 * Reset descriptor ownership to the MAC.
1448 		 */
1449 		ddi_put32(vrp->rxring.acchdl,
1450 		    &rxp->cdesc->stat0,
1451 		    VR_RDES0_OWN);
1452 		(void) ddi_dma_sync(vrp->rxring.handle,
1453 		    rxp->offset,
1454 		    sizeof (vr_chip_desc_t),
1455 		    DDI_DMA_SYNC_FORDEV);
1456 	}
1457 	vrp->rx.rp = rxp;
1458 
1459 	/*
1460 	 * If we do flowcontrol and if the card can transmit pause frames,
1461 	 * increment the "available receive descriptors" register.
1462 	 */
1463 	if (n > 0 && vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
1464 		/*
1465 		 * Whenever the card moves a fragment to host memory it
1466 		 * decrements the RXBUFCOUNT register. If the value in the
1467 		 * register reaches a low watermark, the card transmits a pause
1468 		 * frame. If the value in this register reaches a high
1469 		 * watermark, the card sends a "cancel pause" frame
1470 		 *
1471 		 * Non-zero values written to this byte register are added
1472 		 * by the chip to the register's contents, so we must write
1473 		 * the number of descriptors free'd.
1474 		 */
1475 		VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT, MIN(n, 0xFF));
1476 	}
1477 	return (lp);
1478 }
1479 
1480 /*
1481  * Enqueue a list of packets for transmission
1482  * Return the packets not transmitted.
1483  */
1484 mblk_t *
vr_mac_tx_enqueue_list(void * p,mblk_t * mp)1485 vr_mac_tx_enqueue_list(void *p, mblk_t *mp)
1486 {
1487 	vr_t		*vrp;
1488 	mblk_t		*nextp;
1489 
1490 	vrp = (vr_t *)p;
1491 	mutex_enter(&vrp->tx.lock);
1492 	do {
1493 		if (vrp->tx.nfree == 0) {
1494 			vrp->stats.ether_stat_defer_xmts++;
1495 			vrp->tx.resched = 1;
1496 			break;
1497 		}
1498 		nextp = mp->b_next;
1499 		mp->b_next = mp->b_prev = NULL;
1500 		vr_tx_enqueue_msg(vrp, mp);
1501 		mp = nextp;
1502 		vrp->tx.nfree--;
1503 	} while (mp != NULL);
1504 	mutex_exit(&vrp->tx.lock);
1505 
1506 	/*
1507 	 * Tell the chip to poll the TX ring.
1508 	 */
1509 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1510 	return (mp);
1511 }
1512 
1513 /*
1514  * Enqueue a message for transmission.
1515  */
1516 static void
vr_tx_enqueue_msg(vr_t * vrp,mblk_t * mp)1517 vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp)
1518 {
1519 	vr_desc_t		*wp;
1520 	vr_data_dma_t		*dmap;
1521 	uint32_t		pklen;
1522 	uint32_t		nextp;
1523 	int			padlen;
1524 
1525 	if ((uchar_t)mp->b_rptr[0] == 0xff &&
1526 	    (uchar_t)mp->b_rptr[1] == 0xff &&
1527 	    (uchar_t)mp->b_rptr[2] == 0xff &&
1528 	    (uchar_t)mp->b_rptr[3] == 0xff &&
1529 	    (uchar_t)mp->b_rptr[4] == 0xff &&
1530 	    (uchar_t)mp->b_rptr[5] == 0xff)
1531 		vrp->stats.mac_stat_brdcstxmt++;
1532 	else if ((uchar_t)mp->b_rptr[0] == 1)
1533 		vrp->stats.mac_stat_multixmt++;
1534 
1535 	pklen = msgsize(mp);
1536 	wp = vrp->tx.wp;
1537 	dmap = &wp->dmabuf;
1538 
1539 	/*
1540 	 * Copy the message into the pre-mapped buffer and free mp
1541 	 */
1542 	mcopymsg(mp, dmap->buf);
1543 
1544 	/*
1545 	 * Clean padlen bytes of short packet.
1546 	 */
1547 	padlen = ETHERMIN - pklen;
1548 	if (padlen > 0) {
1549 		bzero(dmap->buf + pklen, padlen);
1550 		pklen += padlen;
1551 	}
1552 
1553 	/*
1554 	 * Most of the statistics are updated on reclaim, after the actual
1555 	 * transmit. obytes is maintained here because the length is cleared
1556 	 * after transmission
1557 	 */
1558 	vrp->stats.mac_stat_obytes += pklen;
1559 
1560 	/*
1561 	 * Sync the data so the device sees the new content too.
1562 	 */
1563 	(void) ddi_dma_sync(dmap->handle, 0, pklen, DDI_DMA_SYNC_FORDEV);
1564 
1565 	/*
1566 	 * If we have reached the TX interrupt distance, enable a TX interrupt
1567 	 * for this packet. The Interrupt Control (IC) bit in the transmit
1568 	 * descriptor doesn't have any effect on the interrupt generation
1569 	 * despite the vague statements in the datasheet. Thus, we use the
1570 	 * more obscure interrupt suppress bit which is probably part of the
1571 	 * MAC's bookkeeping for TX interrupts and fragmented packets.
1572 	 */
1573 	vrp->tx.intr_distance++;
1574 	nextp = ddi_get32(vrp->txring.acchdl, &wp->cdesc->next);
1575 	if (vrp->tx.intr_distance >= VR_TX_MAX_INTR_DISTANCE) {
1576 		/*
1577 		 * Don't suppress the interrupt for this packet.
1578 		 */
1579 		vrp->tx.intr_distance = 0;
1580 		nextp &= (~VR_TDES3_SUPPRESS_INTR);
1581 	} else {
1582 		/*
1583 		 * Suppress the interrupt for this packet.
1584 		 */
1585 		nextp |= VR_TDES3_SUPPRESS_INTR;
1586 	}
1587 
1588 	/*
1589 	 * Write and sync the chip's descriptor
1590 	 */
1591 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1,
1592 	    pklen | (VR_TDES1_STP | VR_TDES1_EDP | VR_TDES1_CHN));
1593 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->next, nextp);
1594 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, VR_TDES0_OWN);
1595 	(void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1596 	    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1597 
1598 	/*
1599 	 * The ticks counter is cleared by reclaim when it reclaimed some
1600 	 * descriptors and incremented by the periodic TX stall check.
1601 	 */
1602 	vrp->tx.stallticks = 1;
1603 	vrp->tx.wp = wp->next;
1604 }
1605 
1606 /*
1607  * Free transmitted descriptors.
1608  */
1609 static void
vr_tx_reclaim(vr_t * vrp)1610 vr_tx_reclaim(vr_t *vrp)
1611 {
1612 	vr_desc_t		*cp;
1613 	uint32_t		stat0, stat1, freed, dirty;
1614 
1615 	ASSERT(mutex_owned(&vrp->tx.lock));
1616 
1617 	freed = 0;
1618 	dirty = vrp->tx.ndesc - vrp->tx.nfree;
1619 	for (cp = vrp->tx.cp; dirty > 0; cp = cp->next) {
1620 		/*
1621 		 * Sync & get descriptor status.
1622 		 */
1623 		(void) ddi_dma_sync(vrp->txring.handle, cp->offset,
1624 		    sizeof (vr_chip_desc_t),
1625 		    DDI_DMA_SYNC_FORKERNEL);
1626 		stat0 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat0);
1627 
1628 		if ((stat0 & VR_TDES0_OWN) != 0)
1629 			break;
1630 
1631 		/*
1632 		 * Do stats for the first descriptor in a chain.
1633 		 */
1634 		stat1 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat1);
1635 		if ((stat1 & VR_TDES1_STP) != 0) {
1636 			if ((stat0 & VR_TDES0_TERR) != 0) {
1637 				vrp->stats.ether_stat_macxmt_errors++;
1638 				if ((stat0 & VR_TDES0_UDF) != 0)
1639 					vrp->stats.mac_stat_underflows++;
1640 				if ((stat0 & VR_TDES0_ABT) != 0)
1641 					vrp-> stats.ether_stat_ex_collisions++;
1642 				/*
1643 				 * Abort and FIFO underflow stop the MAC.
1644 				 * Packet queueing must be disabled with HD
1645 				 * links because otherwise the MAC is also lost
1646 				 * after a few of these events.
1647 				 */
1648 				VR_PUT8(vrp->acc_reg, VR_CTRL0,
1649 				    VR_CTRL0_DMA_GO);
1650 			} else
1651 				vrp->stats.mac_stat_opackets++;
1652 
1653 			if ((stat0 & VR_TDES0_COL) != 0) {
1654 				if ((stat0 & VR_TDES0_NCR) == 1) {
1655 					vrp->stats.
1656 					    ether_stat_first_collisions++;
1657 				} else {
1658 					vrp->stats.
1659 					    ether_stat_multi_collisions++;
1660 				}
1661 				vrp->stats.mac_stat_collisions +=
1662 				    (stat0 & VR_TDES0_NCR);
1663 			}
1664 
1665 			if ((stat0 & VR_TDES0_CRS) != 0)
1666 				vrp->stats.ether_stat_carrier_errors++;
1667 
1668 			if ((stat0 & VR_TDES0_OWC) != 0)
1669 				vrp->stats.ether_stat_tx_late_collisions++;
1670 		}
1671 		freed += 1;
1672 		dirty -= 1;
1673 	}
1674 	vrp->tx.cp = cp;
1675 
1676 	if (freed > 0) {
1677 		vrp->tx.nfree += freed;
1678 		vrp->tx.stallticks = 0;
1679 		vrp->stats.txreclaims += 1;
1680 	} else
1681 		vrp->stats.txreclaim0 += 1;
1682 }
1683 
1684 /*
1685  * Check TX health every 2 seconds.
1686  */
1687 static void
vr_periodic(void * p)1688 vr_periodic(void *p)
1689 {
1690 	vr_t		*vrp;
1691 
1692 	vrp = (vr_t *)p;
1693 	if (vrp->chip.state == CHIPSTATE_RUNNING &&
1694 	    vrp->chip.link.state == VR_LINK_STATE_UP && vrp->reset == 0) {
1695 		if (mutex_tryenter(&vrp->intrlock) != 0) {
1696 			mutex_enter(&vrp->tx.lock);
1697 			if (vrp->tx.resched == 1) {
1698 				if (vrp->tx.stallticks >= VR_MAXTXCHECKS) {
1699 					/*
1700 					 * No succesful reclaim in the last n
1701 					 * intervals. Reset the MAC.
1702 					 */
1703 					vrp->reset = 1;
1704 					vr_log(vrp, CE_WARN,
1705 					    "TX stalled, resetting MAC");
1706 					vrp->stats.txstalls++;
1707 				} else {
1708 					/*
1709 					 * Increase until we find that we've
1710 					 * waited long enough.
1711 					 */
1712 					vrp->tx.stallticks += 1;
1713 				}
1714 			}
1715 			mutex_exit(&vrp->tx.lock);
1716 			mutex_exit(&vrp->intrlock);
1717 			vrp->stats.txchecks++;
1718 		}
1719 	}
1720 	vrp->stats.cyclics++;
1721 }
1722 
1723 /*
1724  * Bring the device to our desired initial state.
1725  */
1726 static void
vr_reset(vr_t * vrp)1727 vr_reset(vr_t *vrp)
1728 {
1729 	uint32_t	time;
1730 
1731 	/*
1732 	 * Reset the MAC
1733 	 * If we don't wait long enough for the forced reset to complete,
1734 	 * MAC looses sync with PHY. Result link up, no link change interrupt
1735 	 * and no data transfer.
1736 	 */
1737 	time = 0;
1738 	VR_PUT8(vrp->acc_io, VR_CTRL1, VR_CTRL1_RESET);
1739 	do {
1740 		drv_usecwait(100);
1741 		time += 100;
1742 		if (time >= 100000) {
1743 			VR_PUT8(vrp->acc_io, VR_MISC1, VR_MISC1_RESET);
1744 			delay(drv_usectohz(200000));
1745 		}
1746 	} while ((VR_GET8(vrp->acc_io, VR_CTRL1) & VR_CTRL1_RESET) != 0);
1747 	delay(drv_usectohz(10000));
1748 
1749 	/*
1750 	 * Load the PROM contents into the MAC again.
1751 	 */
1752 	VR_SETBIT8(vrp->acc_io, VR_PROMCTL, VR_PROMCTL_RELOAD);
1753 	delay(drv_usectohz(100000));
1754 
1755 	/*
1756 	 * Tell the MAC via IO space that we like to use memory space for
1757 	 * accessing registers.
1758 	 */
1759 	VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
1760 }
1761 
1762 /*
1763  * Prepare and enable the card (MAC + PHY + PCI).
1764  */
1765 static int
vr_start(vr_t * vrp)1766 vr_start(vr_t *vrp)
1767 {
1768 	uint8_t		pci_latency, pci_mode;
1769 
1770 	ASSERT(mutex_owned(&vrp->oplock));
1771 
1772 	/*
1773 	 * Allocate DMA buffers for RX.
1774 	 */
1775 	if (vr_rxring_init(vrp) != VR_SUCCESS) {
1776 		vr_log(vrp, CE_NOTE, "vr_rxring_init() failed");
1777 		return (ENOMEM);
1778 	}
1779 
1780 	/*
1781 	 * Allocate DMA buffers for TX.
1782 	 */
1783 	if (vr_txring_init(vrp) != VR_SUCCESS) {
1784 		vr_log(vrp, CE_NOTE, "vr_txring_init() failed");
1785 		vr_rxring_fini(vrp);
1786 		return (ENOMEM);
1787 	}
1788 
1789 	/*
1790 	 * Changes of the chip specific registers as done in VIA's fet driver
1791 	 * These bits are not in the datasheet and controlled by vr_chip_info.
1792 	 */
1793 	pci_mode = VR_GET8(vrp->acc_reg, VR_MODE2);
1794 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE10T) != 0)
1795 		pci_mode |= VR_MODE2_MODE10T;
1796 
1797 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE2PCEROPT) != 0)
1798 		pci_mode |= VR_MODE2_PCEROPT;
1799 
1800 	if ((vrp->chip.info.features & VR_FEATURE_MRDLNMULTIPLE) != 0)
1801 		pci_mode |= VR_MODE2_MRDPL;
1802 	VR_PUT8(vrp->acc_reg, VR_MODE2, pci_mode);
1803 
1804 	pci_mode = VR_GET8(vrp->acc_reg, VR_MODE3);
1805 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMIION) != 0)
1806 		pci_mode |= VR_MODE3_MIION;
1807 	VR_PUT8(vrp->acc_reg, VR_MODE3, pci_mode);
1808 
1809 	/*
1810 	 * RX: Accept broadcast packets.
1811 	 */
1812 	VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTBROAD);
1813 
1814 	/*
1815 	 * RX: Start DMA when there are 256 bytes in the FIFO.
1816 	 */
1817 	VR_SETBITS8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_FIFO_THRESHOLD_BITS,
1818 	    VR_RXCFG_FIFO_THRESHOLD_256);
1819 	VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_RX_FIFO_THRESHOLD_BITS,
1820 	    VR_BCR0_RX_FIFO_THRESHOLD_256);
1821 
1822 	/*
1823 	 * TX: Start transmit when there are 256 bytes in the FIFO.
1824 	 */
1825 	VR_SETBITS8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_FIFO_THRESHOLD_BITS,
1826 	    VR_TXCFG_FIFO_THRESHOLD_256);
1827 	VR_SETBITS8(vrp->acc_reg, VR_BCR1, VR_BCR1_TX_FIFO_THRESHOLD_BITS,
1828 	    VR_BCR1_TX_FIFO_THRESHOLD_256);
1829 
1830 	/*
1831 	 * Burst transfers up to 256 bytes.
1832 	 */
1833 	VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_DMABITS, VR_BCR0_DMA256);
1834 
1835 	/*
1836 	 * Disable TX autopolling as it is bad for RX performance
1837 	 * I assume this is because the RX process finds the bus often occupied
1838 	 * by the polling process.
1839 	 */
1840 	VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_NOAUTOPOLL);
1841 
1842 	/*
1843 	 * Honor the PCI latency timer if it is reasonable.
1844 	 */
1845 	pci_latency = VR_GET8(vrp->acc_cfg, PCI_CONF_LATENCY_TIMER);
1846 	if (pci_latency != 0 && pci_latency != 0xFF)
1847 		VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1848 	else
1849 		VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1850 
1851 	/*
1852 	 * Ensure that VLAN filtering is off, because this strips the tag.
1853 	 */
1854 	if ((vrp->chip.info.features & VR_FEATURE_VLANTAGGING) != 0) {
1855 		VR_CLRBIT8(vrp->acc_reg, VR_BCR1, VR_BCR1_VLANFILTER);
1856 		VR_CLRBIT8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_8021PQ_EN);
1857 	}
1858 
1859 	/*
1860 	 * Clear the CAM filter.
1861 	 */
1862 	if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
1863 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
1864 		VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 0);
1865 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1866 
1867 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
1868 		    VR_CAM_CTRL_ENABLE|VR_CAM_CTRL_SELECT_VLAN);
1869 		VR_PUT8(vrp->acc_reg, VR_VCAM0, 0);
1870 		VR_PUT8(vrp->acc_reg, VR_VCAM1, 0);
1871 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_WRITE);
1872 		VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 1);
1873 		drv_usecwait(2);
1874 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1875 	}
1876 
1877 	/*
1878 	 * Give the start addresses of the descriptor rings to the DMA
1879 	 * controller on the MAC.
1880 	 */
1881 	VR_PUT32(vrp->acc_reg, VR_RXADDR, vrp->rx.rp->paddr);
1882 	VR_PUT32(vrp->acc_reg, VR_TXADDR, vrp->tx.wp->paddr);
1883 
1884 	/*
1885 	 * We don't use the additionally invented interrupt ICR1 register,
1886 	 * so make sure these are disabled.
1887 	 */
1888 	VR_PUT8(vrp->acc_reg, VR_ISR1, 0xFF);
1889 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1890 
1891 	/*
1892 	 * Enable interrupts.
1893 	 */
1894 	VR_PUT16(vrp->acc_reg, VR_ISR0, 0xFFFF);
1895 	VR_PUT16(vrp->acc_reg, VR_ICR0, VR_ICR0_CFG);
1896 
1897 	/*
1898 	 * Enable the DMA controller.
1899 	 */
1900 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1901 
1902 	/*
1903 	 * Configure the link. Rely on the link change interrupt for getting
1904 	 * the link state into the driver.
1905 	 */
1906 	vr_link_init(vrp);
1907 
1908 	/*
1909 	 * Set the software view on the state to 'running'.
1910 	 */
1911 	vrp->chip.state = CHIPSTATE_RUNNING;
1912 	return (0);
1913 }
1914 
1915 /*
1916  * Stop DMA and interrupts.
1917  */
1918 static int
vr_stop(vr_t * vrp)1919 vr_stop(vr_t *vrp)
1920 {
1921 	ASSERT(mutex_owned(&vrp->oplock));
1922 
1923 	/*
1924 	 * Stop interrupts.
1925 	 */
1926 	VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
1927 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1928 
1929 	/*
1930 	 * Stop DMA.
1931 	 */
1932 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
1933 
1934 	/*
1935 	 * Set the software view on the state to stopped.
1936 	 */
1937 	vrp->chip.state = CHIPSTATE_STOPPED;
1938 
1939 	/*
1940 	 * Remove DMA buffers from the rings.
1941 	 */
1942 	vr_rxring_fini(vrp);
1943 	vr_txring_fini(vrp);
1944 	return (0);
1945 }
1946 
1947 int
vr_mac_start(void * p)1948 vr_mac_start(void *p)
1949 {
1950 	vr_t	*vrp;
1951 	int	rc;
1952 
1953 	vrp = (vr_t *)p;
1954 	mutex_enter(&vrp->oplock);
1955 
1956 	/*
1957 	 * Reset the card.
1958 	 */
1959 	vr_reset(vrp);
1960 
1961 	/*
1962 	 * Prepare and enable the card.
1963 	 */
1964 	rc = vr_start(vrp);
1965 
1966 	/*
1967 	 * Configure a cyclic function to keep the card & driver from diverting.
1968 	 */
1969 	vrp->periodic_id =
1970 	    ddi_periodic_add(vr_periodic, vrp, VR_CHECK_INTERVAL, DDI_IPL_0);
1971 
1972 	mutex_exit(&vrp->oplock);
1973 	return (rc);
1974 }
1975 
1976 void
vr_mac_stop(void * p)1977 vr_mac_stop(void *p)
1978 {
1979 	vr_t	*vrp = p;
1980 
1981 	mutex_enter(&vrp->oplock);
1982 	mutex_enter(&vrp->tx.lock);
1983 
1984 	/*
1985 	 * Stop the device.
1986 	 */
1987 	(void) vr_stop(vrp);
1988 	mutex_exit(&vrp->tx.lock);
1989 
1990 	/*
1991 	 * Remove the cyclic from the system.
1992 	 */
1993 	ddi_periodic_delete(vrp->periodic_id);
1994 	mutex_exit(&vrp->oplock);
1995 }
1996 
1997 /*
1998  * Add or remove a multicast address to/from the filter
1999  *
2000  * From the 21143 manual:
2001  *  The 21143 can store 512 bits serving as hash bucket heads, and one physical
2002  *  48-bit Ethernet address. Incoming frames with multicast destination
2003  *  addresses are subjected to imperfect filtering. Frames with physical
2004  *  destination  addresses are checked against the single physical address.
2005  *  For any incoming frame with a multicast destination address, the 21143
2006  *  applies the standard Ethernet cyclic redundancy check (CRC) function to the
2007  *  first 6 bytes containing the destination address, then it uses the most
2008  *  significant 9 bits of the result as a bit index into the table. If the
2009  *  indexed bit is set, the frame is accepted. If the bit is cleared, the frame
2010  *  is rejected. This filtering mode is called imperfect because multicast
2011  *  frames not addressed to this station may slip through, but it still
2012  *  decreases the number of frames that the host can receive.
2013  * I assume the above is also the way the VIA chips work. There's not a single
2014  * word about the multicast filter in the datasheet.
2015  *
2016  * Another word on the CAM filter on VT6105M controllers:
2017  *  The VT6105M has content addressable memory which can be used for perfect
2018  *  filtering of 32 multicast addresses and a few VLAN id's
2019  *
2020  *  I think it works like this: When the controller receives a multicast
2021  *  address, it looks up the address using CAM. When it is found, it takes the
2022  *  matching cell address (index) and compares this to the bit position in the
2023  *  cam mask. If the bit is set, the packet is passed up. If CAM lookup does not
2024  *  result in a match, the packet is filtered using the hash based filter,
2025  *  if that matches, the packet is passed up and dropped otherwise
2026  * Also, there's not a single word in the datasheet on how this cam is supposed
2027  * to work ...
2028  */
2029 int
vr_mac_set_multicast(void * p,boolean_t add,const uint8_t * mca)2030 vr_mac_set_multicast(void *p, boolean_t add, const uint8_t *mca)
2031 {
2032 	vr_t		*vrp;
2033 	uint32_t	crc_index;
2034 	int32_t		cam_index;
2035 	uint32_t	cam_mask;
2036 	boolean_t	use_hash_filter;
2037 	ether_addr_t	taddr;
2038 	uint32_t	a;
2039 
2040 	vrp = (vr_t *)p;
2041 	mutex_enter(&vrp->oplock);
2042 	mutex_enter(&vrp->intrlock);
2043 	use_hash_filter = B_FALSE;
2044 
2045 	if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
2046 		/*
2047 		 * Program the perfect filter.
2048 		 */
2049 		cam_mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2050 		if (add == B_TRUE) {
2051 			/*
2052 			 * Get index of first empty slot.
2053 			 */
2054 			bzero(&taddr, sizeof (taddr));
2055 			cam_index = vr_cam_index(vrp, taddr);
2056 			if (cam_index != -1) {
2057 				/*
2058 				 * Add address at cam_index.
2059 				 */
2060 				cam_mask |= (1 << cam_index);
2061 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2062 				    VR_CAM_CTRL_ENABLE);
2063 				VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, cam_index);
2064 				VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2065 				for (a = 0; a < ETHERADDRL; a++) {
2066 					VR_PUT8(vrp->acc_reg,
2067 					    VR_MCAM0 + a, mca[a]);
2068 				}
2069 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2070 				    VR_CAM_CTRL_WRITE);
2071 				drv_usecwait(2);
2072 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2073 				    VR_CAM_CTRL_DONE);
2074 			} else {
2075 				/*
2076 				 * No free CAM slots available
2077 				 * Add mca to the imperfect filter.
2078 				 */
2079 				use_hash_filter = B_TRUE;
2080 			}
2081 		} else {
2082 			/*
2083 			 * Find the index of the entry to remove
2084 			 * If the entry was not found (-1), the addition was
2085 			 * probably done when the table was full.
2086 			 */
2087 			cam_index = vr_cam_index(vrp, mca);
2088 			if (cam_index != -1) {
2089 				/*
2090 				 * Disable the corresponding mask bit.
2091 				 */
2092 				cam_mask &= ~(1 << cam_index);
2093 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2094 				    VR_CAM_CTRL_ENABLE);
2095 				VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2096 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2097 				    VR_CAM_CTRL_DONE);
2098 			} else {
2099 				/*
2100 				 * The entry to be removed was not found
2101 				 * The likely cause is that the CAM was full
2102 				 * during addition. The entry is added to the
2103 				 * hash filter in that case and needs to be
2104 				 * removed there too.
2105 				 */
2106 				use_hash_filter = B_TRUE;
2107 			}
2108 		}
2109 	} else {
2110 		/*
2111 		 * No CAM in the MAC, thus we need the hash filter.
2112 		 */
2113 		use_hash_filter = B_TRUE;
2114 	}
2115 
2116 	if (use_hash_filter == B_TRUE) {
2117 		/*
2118 		 * Get the CRC-32 of the multicast address
2119 		 * The card uses the "MSB first" direction when calculating the
2120 		 * the CRC. This is odd because ethernet is "LSB first"
2121 		 * We have to use that "big endian" approach as well.
2122 		 */
2123 		crc_index = ether_crc_be(mca) >> (32 - 6);
2124 		if (add == B_TRUE) {
2125 			/*
2126 			 * Turn bit[crc_index] on.
2127 			 */
2128 			if (crc_index < 32)
2129 				vrp->mhash0 |= (1 << crc_index);
2130 			else
2131 				vrp->mhash1 |= (1 << (crc_index - 32));
2132 		} else {
2133 			/*
2134 			 * Turn bit[crc_index] off.
2135 			 */
2136 			if (crc_index < 32)
2137 				vrp->mhash0 &= ~(0 << crc_index);
2138 			else
2139 				vrp->mhash1 &= ~(0 << (crc_index - 32));
2140 		}
2141 
2142 		/*
2143 		 * When not promiscuous write the filter now. When promiscuous,
2144 		 * the filter is open and will be written when promiscuous ends.
2145 		 */
2146 		if (vrp->promisc == B_FALSE) {
2147 			VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2148 			VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2149 		}
2150 	}
2151 
2152 	/*
2153 	 * Enable/disable multicast receivements based on mcount.
2154 	 */
2155 	if (add == B_TRUE)
2156 		vrp->mcount++;
2157 	else if (vrp->mcount != 0)
2158 		vrp->mcount --;
2159 	if (vrp->mcount != 0)
2160 		VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2161 	else
2162 		VR_CLRBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2163 
2164 	mutex_exit(&vrp->intrlock);
2165 	mutex_exit(&vrp->oplock);
2166 	return (0);
2167 }
2168 
2169 /*
2170  * Calculate the CRC32 for 6 bytes of multicast address in MSB(it) first order.
2171  * The MSB first order is a bit odd because Ethernet standard is LSB first
2172  */
2173 static uint32_t
ether_crc_be(const uint8_t * data)2174 ether_crc_be(const uint8_t *data)
2175 {
2176 	uint32_t	crc = (uint32_t)0xFFFFFFFFU;
2177 	uint32_t	carry;
2178 	uint32_t	bit;
2179 	uint32_t	length;
2180 	uint8_t		c;
2181 
2182 	for (length = 0; length < ETHERADDRL; length++) {
2183 		c = data[length];
2184 		for (bit = 0; bit < 8; bit++) {
2185 			carry = ((crc & 0x80000000U) ? 1 : 0) ^ (c & 0x01);
2186 			crc <<= 1;
2187 			c >>= 1;
2188 			if (carry)
2189 				crc = (crc ^ 0x04C11DB6) | carry;
2190 		}
2191 	}
2192 	return (crc);
2193 }
2194 
2195 
2196 /*
2197  * Return the CAM index (base 0) of maddr or -1 if maddr is not found
2198  * If maddr is 0, return the index of an empty slot in CAM or -1 when no free
2199  * slots available.
2200  */
2201 static int32_t
vr_cam_index(vr_t * vrp,const uint8_t * maddr)2202 vr_cam_index(vr_t *vrp, const uint8_t *maddr)
2203 {
2204 	ether_addr_t	taddr;
2205 	int32_t		index;
2206 	uint32_t	mask;
2207 	uint32_t	a;
2208 
2209 	bzero(&taddr, sizeof (taddr));
2210 
2211 	/*
2212 	 * Read the CAM mask from the controller.
2213 	 */
2214 	mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2215 
2216 	/*
2217 	 * If maddr is 0, return the first unused slot or -1 for no unused.
2218 	 */
2219 	if (bcmp(maddr, taddr, ETHERADDRL) == 0) {
2220 		/*
2221 		 * Look for the first unused position in mask.
2222 		 */
2223 		for (index = 0; index < VR_CAM_SZ; index++) {
2224 			if (((mask >> index) & 1) == 0)
2225 				return (index);
2226 		}
2227 		return (-1);
2228 	} else {
2229 		/*
2230 		 * Look for maddr in CAM.
2231 		 */
2232 		for (index = 0; index < VR_CAM_SZ; index++) {
2233 			/* Look at enabled entries only */
2234 			if (((mask >> index) & 1) == 0)
2235 				continue;
2236 
2237 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
2238 			VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, index);
2239 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_READ);
2240 			drv_usecwait(2);
2241 			for (a = 0; a < ETHERADDRL; a++)
2242 				taddr[a] = VR_GET8(vrp->acc_reg, VR_MCAM0 + a);
2243 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
2244 			if (bcmp(maddr, taddr, ETHERADDRL) == 0)
2245 				return (index);
2246 		}
2247 	}
2248 	return (-1);
2249 }
2250 
2251 /*
2252  * Set promiscuous mode on or off.
2253  */
2254 int
vr_mac_set_promisc(void * p,boolean_t promiscflag)2255 vr_mac_set_promisc(void *p, boolean_t promiscflag)
2256 {
2257 	vr_t		*vrp;
2258 	uint8_t		rxcfg;
2259 
2260 	vrp = (vr_t *)p;
2261 
2262 	mutex_enter(&vrp->intrlock);
2263 	mutex_enter(&vrp->oplock);
2264 	mutex_enter(&vrp->tx.lock);
2265 
2266 	/*
2267 	 * Get current receive configuration.
2268 	 */
2269 	rxcfg = VR_GET8(vrp->acc_reg, VR_RXCFG);
2270 	vrp->promisc = promiscflag;
2271 
2272 	if (promiscflag == B_TRUE) {
2273 		/*
2274 		 * Enable promiscuous mode and open the multicast filter.
2275 		 */
2276 		rxcfg |= (VR_RXCFG_PROMISC | VR_RXCFG_ACCEPTMULTI);
2277 		VR_PUT32(vrp->acc_reg, VR_MAR0, 0xffffffff);
2278 		VR_PUT32(vrp->acc_reg, VR_MAR1, 0xffffffff);
2279 	} else {
2280 		/*
2281 		 * Restore the multicast filter and disable promiscuous mode.
2282 		 */
2283 		VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2284 		VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2285 		rxcfg &= ~VR_RXCFG_PROMISC;
2286 		if (vrp->mcount != 0)
2287 			rxcfg |= VR_RXCFG_ACCEPTMULTI;
2288 	}
2289 	VR_PUT8(vrp->acc_reg, VR_RXCFG, rxcfg);
2290 	mutex_exit(&vrp->tx.lock);
2291 	mutex_exit(&vrp->oplock);
2292 	mutex_exit(&vrp->intrlock);
2293 	return (0);
2294 }
2295 
2296 int
vr_mac_getstat(void * arg,uint_t stat,uint64_t * val)2297 vr_mac_getstat(void *arg, uint_t stat, uint64_t *val)
2298 {
2299 	vr_t		*vrp;
2300 	uint64_t	v;
2301 
2302 	vrp = (void *) arg;
2303 
2304 	switch (stat) {
2305 	default:
2306 		return (ENOTSUP);
2307 
2308 	case ETHER_STAT_ADV_CAP_100T4:
2309 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_T4) != 0;
2310 		break;
2311 
2312 	case ETHER_STAT_ADV_CAP_100FDX:
2313 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX_FD) != 0;
2314 		break;
2315 
2316 	case ETHER_STAT_ADV_CAP_100HDX:
2317 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX) != 0;
2318 		break;
2319 
2320 	case ETHER_STAT_ADV_CAP_10FDX:
2321 		v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T_FD) != 0;
2322 		break;
2323 
2324 	case ETHER_STAT_ADV_CAP_10HDX:
2325 		v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T) != 0;
2326 		break;
2327 
2328 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
2329 		v = 0;
2330 		break;
2331 
2332 	case ETHER_STAT_ADV_CAP_AUTONEG:
2333 		v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0;
2334 		break;
2335 
2336 	case ETHER_STAT_ADV_CAP_PAUSE:
2337 		v = (vrp->chip.mii.anadv & MII_ABILITY_PAUSE) != 0;
2338 		break;
2339 
2340 	case ETHER_STAT_ADV_REMFAULT:
2341 		v = (vrp->chip.mii.anadv & MII_AN_ADVERT_REMFAULT) != 0;
2342 		break;
2343 
2344 	case ETHER_STAT_ALIGN_ERRORS:
2345 		v = vrp->stats.ether_stat_align_errors;
2346 		break;
2347 
2348 	case ETHER_STAT_CAP_100T4:
2349 		v = (vrp->chip.mii.status & MII_STATUS_100_BASE_T4) != 0;
2350 		break;
2351 
2352 	case ETHER_STAT_CAP_100FDX:
2353 		v = (vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) != 0;
2354 		break;
2355 
2356 	case ETHER_STAT_CAP_100HDX:
2357 		v = (vrp->chip.mii.status & MII_STATUS_100_BASEX) != 0;
2358 		break;
2359 
2360 	case ETHER_STAT_CAP_10FDX:
2361 		v = (vrp->chip.mii.status & MII_STATUS_10_FD) != 0;
2362 		break;
2363 
2364 	case ETHER_STAT_CAP_10HDX:
2365 		v = (vrp->chip.mii.status & MII_STATUS_10) != 0;
2366 		break;
2367 
2368 	case ETHER_STAT_CAP_ASMPAUSE:
2369 		v = 0;
2370 		break;
2371 
2372 	case ETHER_STAT_CAP_AUTONEG:
2373 		v = (vrp->chip.mii.status & MII_STATUS_CANAUTONEG) != 0;
2374 		break;
2375 
2376 	case ETHER_STAT_CAP_PAUSE:
2377 		v = 1;
2378 		break;
2379 
2380 	case ETHER_STAT_CAP_REMFAULT:
2381 		v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2382 		break;
2383 
2384 	case ETHER_STAT_CARRIER_ERRORS:
2385 		/*
2386 		 * Number of times carrier was lost or never detected on a
2387 		 * transmission attempt.
2388 		 */
2389 		v = vrp->stats.ether_stat_carrier_errors;
2390 		break;
2391 
2392 	case ETHER_STAT_JABBER_ERRORS:
2393 		return (ENOTSUP);
2394 
2395 	case ETHER_STAT_DEFER_XMTS:
2396 		/*
2397 		 * Packets without collisions where first transmit attempt was
2398 		 * delayed because the medium was busy.
2399 		 */
2400 		v = vrp->stats.ether_stat_defer_xmts;
2401 		break;
2402 
2403 	case ETHER_STAT_EX_COLLISIONS:
2404 		/*
2405 		 * Frames where excess collisions occurred on transmit, causing
2406 		 * transmit failure.
2407 		 */
2408 		v = vrp->stats.ether_stat_ex_collisions;
2409 		break;
2410 
2411 	case ETHER_STAT_FCS_ERRORS:
2412 		/*
2413 		 * Packets received with CRC errors.
2414 		 */
2415 		v = vrp->stats.ether_stat_fcs_errors;
2416 		break;
2417 
2418 	case ETHER_STAT_FIRST_COLLISIONS:
2419 		/*
2420 		 * Packets successfully transmitted with exactly one collision.
2421 		 */
2422 		v = vrp->stats.ether_stat_first_collisions;
2423 		break;
2424 
2425 	case ETHER_STAT_LINK_ASMPAUSE:
2426 		v = 0;
2427 		break;
2428 
2429 	case ETHER_STAT_LINK_AUTONEG:
2430 		v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0 &&
2431 		    (vrp->chip.mii.status & MII_STATUS_ANDONE) != 0;
2432 		break;
2433 
2434 	case ETHER_STAT_LINK_DUPLEX:
2435 		v = vrp->chip.link.duplex;
2436 		break;
2437 
2438 	case ETHER_STAT_LINK_PAUSE:
2439 		v = vrp->chip.link.flowctrl;
2440 		break;
2441 
2442 	case ETHER_STAT_LP_CAP_100T4:
2443 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_T4) != 0;
2444 		break;
2445 
2446 	case ETHER_STAT_LP_CAP_1000FDX:
2447 		v = 0;
2448 		break;
2449 
2450 	case ETHER_STAT_LP_CAP_1000HDX:
2451 		v = 0;
2452 		break;
2453 
2454 	case ETHER_STAT_LP_CAP_100FDX:
2455 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX_FD) != 0;
2456 		break;
2457 
2458 	case ETHER_STAT_LP_CAP_100HDX:
2459 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX) != 0;
2460 		break;
2461 
2462 	case ETHER_STAT_LP_CAP_10FDX:
2463 		v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T_FD) != 0;
2464 		break;
2465 
2466 	case ETHER_STAT_LP_CAP_10HDX:
2467 		v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T) != 0;
2468 		break;
2469 
2470 	case ETHER_STAT_LP_CAP_ASMPAUSE:
2471 		v = 0;
2472 		break;
2473 
2474 	case ETHER_STAT_LP_CAP_AUTONEG:
2475 		v = (vrp->chip.mii.anexp & MII_AN_EXP_LPCANAN) != 0;
2476 		break;
2477 
2478 	case ETHER_STAT_LP_CAP_PAUSE:
2479 		v = (vrp->chip.mii.lpable & MII_ABILITY_PAUSE) != 0;
2480 		break;
2481 
2482 	case ETHER_STAT_LP_REMFAULT:
2483 		v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2484 		break;
2485 
2486 	case ETHER_STAT_MACRCV_ERRORS:
2487 		/*
2488 		 * Packets received with MAC errors, except align_errors,
2489 		 * fcs_errors, and toolong_errors.
2490 		 */
2491 		v = vrp->stats.ether_stat_macrcv_errors;
2492 		break;
2493 
2494 	case ETHER_STAT_MACXMT_ERRORS:
2495 		/*
2496 		 * Packets encountering transmit MAC failures, except carrier
2497 		 * and collision failures.
2498 		 */
2499 		v = vrp->stats.ether_stat_macxmt_errors;
2500 		break;
2501 
2502 	case ETHER_STAT_MULTI_COLLISIONS:
2503 		/*
2504 		 * Packets successfully transmitted with multiple collisions.
2505 		 */
2506 		v = vrp->stats.ether_stat_multi_collisions;
2507 		break;
2508 
2509 	case ETHER_STAT_SQE_ERRORS:
2510 		/*
2511 		 * Number of times signal quality error was reported
2512 		 * This one is reported by the PHY.
2513 		 */
2514 		return (ENOTSUP);
2515 
2516 	case ETHER_STAT_TOOLONG_ERRORS:
2517 		/*
2518 		 * Packets received larger than the maximum permitted length.
2519 		 */
2520 		v = vrp->stats.ether_stat_toolong_errors;
2521 		break;
2522 
2523 	case ETHER_STAT_TOOSHORT_ERRORS:
2524 		v = vrp->stats.ether_stat_tooshort_errors;
2525 		break;
2526 
2527 	case ETHER_STAT_TX_LATE_COLLISIONS:
2528 		/*
2529 		 * Number of times a transmit collision occurred late
2530 		 * (after 512 bit times).
2531 		 */
2532 		v = vrp->stats.ether_stat_tx_late_collisions;
2533 		break;
2534 
2535 	case ETHER_STAT_XCVR_ADDR:
2536 		/*
2537 		 * MII address in the 0 to 31 range of the physical layer
2538 		 * device in use for a given Ethernet device.
2539 		 */
2540 		v = vrp->chip.phyaddr;
2541 		break;
2542 
2543 	case ETHER_STAT_XCVR_ID:
2544 		/*
2545 		 * MII transceiver manufacturer and device ID.
2546 		 */
2547 		v = (vrp->chip.mii.identh << 16) | vrp->chip.mii.identl;
2548 		break;
2549 
2550 	case ETHER_STAT_XCVR_INUSE:
2551 		v = vrp->chip.link.mau;
2552 		break;
2553 
2554 	case MAC_STAT_BRDCSTRCV:
2555 		v = vrp->stats.mac_stat_brdcstrcv;
2556 		break;
2557 
2558 	case MAC_STAT_BRDCSTXMT:
2559 		v = vrp->stats.mac_stat_brdcstxmt;
2560 		break;
2561 
2562 	case MAC_STAT_MULTIXMT:
2563 		v = vrp->stats.mac_stat_multixmt;
2564 		break;
2565 
2566 	case MAC_STAT_COLLISIONS:
2567 		v = vrp->stats.mac_stat_collisions;
2568 		break;
2569 
2570 	case MAC_STAT_IERRORS:
2571 		v = vrp->stats.mac_stat_ierrors;
2572 		break;
2573 
2574 	case MAC_STAT_IFSPEED:
2575 		if (vrp->chip.link.speed == VR_LINK_SPEED_100MBS)
2576 			v = 100 * 1000 * 1000;
2577 		else if (vrp->chip.link.speed == VR_LINK_SPEED_10MBS)
2578 			v = 10 * 1000 * 1000;
2579 		else
2580 			v = 0;
2581 		break;
2582 
2583 	case MAC_STAT_IPACKETS:
2584 		v = vrp->stats.mac_stat_ipackets;
2585 		break;
2586 
2587 	case MAC_STAT_MULTIRCV:
2588 		v = vrp->stats.mac_stat_multircv;
2589 		break;
2590 
2591 	case MAC_STAT_NORCVBUF:
2592 		vrp->stats.mac_stat_norcvbuf +=
2593 		    VR_GET16(vrp->acc_reg, VR_TALLY_MPA);
2594 		VR_PUT16(vrp->acc_reg, VR_TALLY_MPA, 0);
2595 		v = vrp->stats.mac_stat_norcvbuf;
2596 		break;
2597 
2598 	case MAC_STAT_NOXMTBUF:
2599 		v = vrp->stats.mac_stat_noxmtbuf;
2600 		break;
2601 
2602 	case MAC_STAT_OBYTES:
2603 		v = vrp->stats.mac_stat_obytes;
2604 		break;
2605 
2606 	case MAC_STAT_OERRORS:
2607 		v = vrp->stats.ether_stat_macxmt_errors +
2608 		    vrp->stats.mac_stat_underflows +
2609 		    vrp->stats.ether_stat_align_errors +
2610 		    vrp->stats.ether_stat_carrier_errors +
2611 		    vrp->stats.ether_stat_fcs_errors;
2612 		break;
2613 
2614 	case MAC_STAT_OPACKETS:
2615 		v = vrp->stats.mac_stat_opackets;
2616 		break;
2617 
2618 	case MAC_STAT_RBYTES:
2619 		v = vrp->stats.mac_stat_rbytes;
2620 		break;
2621 
2622 	case MAC_STAT_UNKNOWNS:
2623 		/*
2624 		 * Isn't this something for the MAC layer to maintain?
2625 		 */
2626 		return (ENOTSUP);
2627 
2628 	case MAC_STAT_UNDERFLOWS:
2629 		v = vrp->stats.mac_stat_underflows;
2630 		break;
2631 
2632 	case MAC_STAT_OVERFLOWS:
2633 		v = vrp->stats.mac_stat_overflows;
2634 		break;
2635 	}
2636 	*val = v;
2637 	return (0);
2638 }
2639 
2640 int
vr_mac_set_ether_addr(void * p,const uint8_t * ea)2641 vr_mac_set_ether_addr(void *p, const uint8_t *ea)
2642 {
2643 	vr_t	*vrp;
2644 	int	i;
2645 
2646 	vrp = (vr_t *)p;
2647 	mutex_enter(&vrp->oplock);
2648 	mutex_enter(&vrp->intrlock);
2649 
2650 	/*
2651 	 * Set a new station address.
2652 	 */
2653 	for (i = 0; i < ETHERADDRL; i++)
2654 		VR_PUT8(vrp->acc_reg, VR_ETHERADDR + i, ea[i]);
2655 
2656 	mutex_exit(&vrp->intrlock);
2657 	mutex_exit(&vrp->oplock);
2658 	return (0);
2659 }
2660 
2661 /*
2662  * Configure the ethernet link according to param and chip.mii.
2663  */
2664 static void
vr_link_init(vr_t * vrp)2665 vr_link_init(vr_t *vrp)
2666 {
2667 	ASSERT(mutex_owned(&vrp->oplock));
2668 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2669 		/*
2670 		 * If we do autoneg, ensure restart autoneg is ON.
2671 		 */
2672 		vrp->chip.mii.control |= MII_CONTROL_RSAN;
2673 
2674 		/*
2675 		 * The advertisements are prepared by param_init.
2676 		 */
2677 		vr_phy_write(vrp, MII_AN_ADVERT, vrp->chip.mii.anadv);
2678 	} else {
2679 		/*
2680 		 * If we don't autoneg, we need speed, duplex and flowcontrol
2681 		 * to configure the link. However, dladm doesn't allow changes
2682 		 * to speed and duplex (readonly). The way this is solved
2683 		 * (ahem) is to select the highest enabled combination
2684 		 * Speed and duplex should be r/w when autoneg is off.
2685 		 */
2686 		if ((vrp->param.anadv_en &
2687 		    MII_ABILITY_100BASE_TX_FD) != 0) {
2688 			vrp->chip.mii.control |= MII_CONTROL_100MB;
2689 			vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2690 		} else if ((vrp->param.anadv_en &
2691 		    MII_ABILITY_100BASE_TX) != 0) {
2692 			vrp->chip.mii.control |= MII_CONTROL_100MB;
2693 			vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2694 		} else if ((vrp->param.anadv_en &
2695 		    MII_ABILITY_10BASE_T_FD) != 0) {
2696 			vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2697 			vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2698 		} else {
2699 			vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2700 			vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2701 		}
2702 	}
2703 	/*
2704 	 * Write the control register.
2705 	 */
2706 	vr_phy_write(vrp, MII_CONTROL, vrp->chip.mii.control);
2707 
2708 	/*
2709 	 * With autoneg off we cannot rely on the link_change interrupt for
2710 	 * for getting the status into the driver.
2711 	 */
2712 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
2713 		vr_link_state(vrp);
2714 		mac_link_update(vrp->machdl,
2715 		    (link_state_t)vrp->chip.link.state);
2716 	}
2717 }
2718 
2719 /*
2720  * Get link state in the driver and configure the MAC accordingly.
2721  */
2722 static void
vr_link_state(vr_t * vrp)2723 vr_link_state(vr_t *vrp)
2724 {
2725 	uint16_t		mask;
2726 
2727 	ASSERT(mutex_owned(&vrp->oplock));
2728 
2729 	vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
2730 	vr_phy_read(vrp, MII_CONTROL, &vrp->chip.mii.control);
2731 	vr_phy_read(vrp, MII_AN_ADVERT, &vrp->chip.mii.anadv);
2732 	vr_phy_read(vrp, MII_AN_LPABLE, &vrp->chip.mii.lpable);
2733 	vr_phy_read(vrp, MII_AN_EXPANSION, &vrp->chip.mii.anexp);
2734 
2735 	/*
2736 	 * If we did autongeg, deduce the link type/speed by selecting the
2737 	 * highest common denominator.
2738 	 */
2739 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2740 		mask = vrp->chip.mii.anadv & vrp->chip.mii.lpable;
2741 		if ((mask & MII_ABILITY_100BASE_TX_FD) != 0) {
2742 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2743 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2744 			vrp->chip.link.mau = VR_MAU_100X;
2745 		} else if ((mask & MII_ABILITY_100BASE_T4) != 0) {
2746 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2747 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2748 			vrp->chip.link.mau = VR_MAU_100T4;
2749 		} else if ((mask & MII_ABILITY_100BASE_TX) != 0) {
2750 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2751 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2752 			vrp->chip.link.mau = VR_MAU_100X;
2753 		} else if ((mask & MII_ABILITY_10BASE_T_FD) != 0) {
2754 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2755 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2756 			vrp->chip.link.mau = VR_MAU_10;
2757 		} else if ((mask & MII_ABILITY_10BASE_T) != 0) {
2758 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2759 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2760 			vrp->chip.link.mau = VR_MAU_10;
2761 		} else {
2762 			vrp->chip.link.speed = VR_LINK_SPEED_UNKNOWN;
2763 			vrp->chip.link.duplex = VR_LINK_DUPLEX_UNKNOWN;
2764 			vrp->chip.link.mau = VR_MAU_UNKNOWN;
2765 		}
2766 
2767 		/*
2768 		 * Did we negotiate pause?
2769 		 */
2770 		if ((mask & MII_ABILITY_PAUSE) != 0 &&
2771 		    vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL)
2772 			vrp->chip.link.flowctrl = VR_PAUSE_BIDIRECTIONAL;
2773 		else
2774 			vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2775 
2776 		/*
2777 		 * Did either one detect a AN fault?
2778 		 */
2779 		if ((vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0)
2780 			vr_log(vrp, CE_WARN,
2781 			    "AN remote fault reported by LP.");
2782 
2783 		if ((vrp->chip.mii.lpable & MII_AN_ADVERT_REMFAULT) != 0)
2784 			vr_log(vrp, CE_WARN, "AN remote fault caused for LP.");
2785 	} else {
2786 		/*
2787 		 * We didn't autoneg
2788 		 * The link type is defined by the control register.
2789 		 */
2790 		if ((vrp->chip.mii.control & MII_CONTROL_100MB) != 0) {
2791 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2792 			vrp->chip.link.mau = VR_MAU_100X;
2793 		} else {
2794 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2795 			vrp->chip.link.mau = VR_MAU_10;
2796 		}
2797 
2798 		if ((vrp->chip.mii.control & MII_CONTROL_FDUPLEX) != 0)
2799 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2800 		else {
2801 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2802 			/*
2803 			 * No pause on HDX links.
2804 			 */
2805 			vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2806 		}
2807 	}
2808 
2809 	/*
2810 	 * Set the duplex mode on the MAC according to that of the PHY.
2811 	 */
2812 	if (vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL) {
2813 		VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2814 		/*
2815 		 * Enable packet queueing on FDX links.
2816 		 */
2817 		if ((vrp->chip.info.bugs & VR_BUG_NO_TXQUEUEING) == 0)
2818 			VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2819 	} else {
2820 		VR_CLRBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2821 		/*
2822 		 * Disable packet queueing on HDX links. With queueing enabled,
2823 		 * this MAC get's lost after a TX abort (too many colisions).
2824 		 */
2825 		VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2826 	}
2827 
2828 	/*
2829 	 * Set pause options on the MAC.
2830 	 */
2831 	if (vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
2832 		/*
2833 		 * All of our MAC's can receive pause frames.
2834 		 */
2835 		VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXRFEN);
2836 
2837 		/*
2838 		 * VT6105 and above can transmit pause frames.
2839 		 */
2840 		if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2841 			/*
2842 			 * Set the number of available receive descriptors
2843 			 * Non-zero values written to this register are added
2844 			 * to the register's contents. Careful: Writing zero
2845 			 * clears the register and thus causes a (long) pause
2846 			 * request.
2847 			 */
2848 			VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT,
2849 			    MIN(vrp->rx.ndesc, 0xFF) -
2850 			    VR_GET8(vrp->acc_reg,
2851 			    VR_FCR0_RXBUFCOUNT));
2852 
2853 			/*
2854 			 * Request pause when we have 4 descs left.
2855 			 */
2856 			VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2857 			    VR_FCR1_PAUSEONBITS, VR_FCR1_PAUSEON_04);
2858 
2859 			/*
2860 			 * Cancel the pause when there are 24 descriptors again.
2861 			 */
2862 			VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2863 			    VR_FCR1_PAUSEOFFBITS, VR_FCR1_PAUSEOFF_24);
2864 
2865 			/*
2866 			 * Request a pause of FFFF bit-times. This long pause
2867 			 * is cancelled when the high watermark is reached.
2868 			 */
2869 			VR_PUT16(vrp->acc_reg, VR_FCR2_PAUSE, 0xFFFF);
2870 
2871 			/*
2872 			 * Enable flow control on the MAC.
2873 			 */
2874 			VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXTFEN);
2875 			VR_SETBIT8(vrp->acc_reg, VR_FCR1, VR_FCR1_FD_RX_EN |
2876 			    VR_FCR1_FD_TX_EN | VR_FCR1_XONXOFF_EN);
2877 		}
2878 	} else {
2879 		/*
2880 		 * Turn flow control OFF.
2881 		 */
2882 		VR_CLRBIT8(vrp->acc_reg,
2883 		    VR_MISC0, VR_MISC0_FDXRFEN | VR_MISC0_FDXTFEN);
2884 		if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2885 			VR_CLRBIT8(vrp->acc_reg, VR_FCR1,
2886 			    VR_FCR1_FD_RX_EN | VR_FCR1_FD_TX_EN |
2887 			    VR_FCR1_XONXOFF_EN);
2888 		}
2889 	}
2890 
2891 	/*
2892 	 * Set link state.
2893 	 */
2894 	if ((vrp->chip.mii.status & MII_STATUS_LINKUP) != 0)
2895 		vrp->chip.link.state = VR_LINK_STATE_UP;
2896 	else
2897 		vrp->chip.link.state = VR_LINK_STATE_DOWN;
2898 }
2899 
2900 /*
2901  * The PHY is automatically polled by the MAC once per 1024 MD clock cycles
2902  * MD is clocked once per 960ns so polling happens about every 1M ns, some
2903  * 1000 times per second
2904  * This polling process is required for the functionality of the link change
2905  * interrupt. Polling process must be disabled in order to access PHY registers
2906  * using MDIO
2907  *
2908  * Turn off PHY polling so that the PHY registers can be accessed.
2909  */
2910 static void
vr_phy_autopoll_disable(vr_t * vrp)2911 vr_phy_autopoll_disable(vr_t *vrp)
2912 {
2913 	uint32_t	time;
2914 	uint8_t		miicmd, miiaddr;
2915 
2916 	/*
2917 	 * Special procedure to stop the autopolling.
2918 	 */
2919 	if ((vrp->chip.info.bugs & VR_BUG_MIIPOLLSTOP) != 0) {
2920 		/*
2921 		 * If polling is enabled.
2922 		 */
2923 		miicmd = VR_GET8(vrp->acc_reg, VR_MIICMD);
2924 		if ((miicmd & VR_MIICMD_MD_AUTO) != 0) {
2925 			/*
2926 			 * Wait for the end of a cycle (mdone set).
2927 			 */
2928 			time = 0;
2929 			do {
2930 				drv_usecwait(10);
2931 				if (time >= VR_MMI_WAITMAX) {
2932 					vr_log(vrp, CE_WARN,
2933 					    "Timeout in "
2934 					    "disable MII polling");
2935 					break;
2936 				}
2937 				time += VR_MMI_WAITINCR;
2938 				miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2939 			} while ((miiaddr & VR_MIIADDR_MDONE) == 0);
2940 		}
2941 		/*
2942 		 * Once paused, we can disable autopolling.
2943 		 */
2944 		VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2945 	} else {
2946 		/*
2947 		 * Turn off MII polling.
2948 		 */
2949 		VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2950 
2951 		/*
2952 		 * Wait for MIDLE in MII address register.
2953 		 */
2954 		time = 0;
2955 		do {
2956 			drv_usecwait(VR_MMI_WAITINCR);
2957 			if (time >= VR_MMI_WAITMAX) {
2958 				vr_log(vrp, CE_WARN,
2959 				    "Timeout in disable MII polling");
2960 				break;
2961 			}
2962 			time += VR_MMI_WAITINCR;
2963 			miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2964 		} while ((miiaddr & VR_MIIADDR_MIDLE) == 0);
2965 	}
2966 }
2967 
2968 /*
2969  * Turn on PHY polling. PHY's registers cannot be accessed.
2970  */
2971 static void
vr_phy_autopoll_enable(vr_t * vrp)2972 vr_phy_autopoll_enable(vr_t *vrp)
2973 {
2974 	uint32_t	time;
2975 
2976 	VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2977 	VR_PUT8(vrp->acc_reg, VR_MIIADDR, MII_STATUS|VR_MIIADDR_MAUTO);
2978 	VR_PUT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_AUTO);
2979 
2980 	/*
2981 	 * Wait for the polling process to finish.
2982 	 */
2983 	time = 0;
2984 	do {
2985 		drv_usecwait(VR_MMI_WAITINCR);
2986 		if (time >= VR_MMI_WAITMAX) {
2987 			vr_log(vrp, CE_NOTE, "Timeout in enable MII polling");
2988 			break;
2989 		}
2990 		time += VR_MMI_WAITINCR;
2991 	} while ((VR_GET8(vrp->acc_reg, VR_MIIADDR) & VR_MIIADDR_MDONE) == 0);
2992 
2993 	/*
2994 	 * Initiate a polling.
2995 	 */
2996 	VR_SETBIT8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_MAUTO);
2997 }
2998 
2999 /*
3000  * Read a register from the PHY using MDIO.
3001  */
3002 static void
vr_phy_read(vr_t * vrp,int offset,uint16_t * value)3003 vr_phy_read(vr_t *vrp, int offset, uint16_t *value)
3004 {
3005 	uint32_t	time;
3006 
3007 	vr_phy_autopoll_disable(vrp);
3008 
3009 	/*
3010 	 * Write the register number to the lower 5 bits of the MII address
3011 	 * register.
3012 	 */
3013 	VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3014 
3015 	/*
3016 	 * Write a READ command to the MII control register
3017 	 * This bit will be cleared when the read is finished.
3018 	 */
3019 	VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_READ);
3020 
3021 	/*
3022 	 * Wait until the read is done.
3023 	 */
3024 	time = 0;
3025 	do {
3026 		drv_usecwait(VR_MMI_WAITINCR);
3027 		if (time >= VR_MMI_WAITMAX) {
3028 			vr_log(vrp, CE_NOTE, "Timeout in MII read command");
3029 			break;
3030 		}
3031 		time += VR_MMI_WAITINCR;
3032 	} while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_READ) != 0);
3033 
3034 	*value = VR_GET16(vrp->acc_reg, VR_MIIDATA);
3035 	vr_phy_autopoll_enable(vrp);
3036 }
3037 
3038 /*
3039  * Write to a PHY's register.
3040  */
3041 static void
vr_phy_write(vr_t * vrp,int offset,uint16_t value)3042 vr_phy_write(vr_t *vrp, int offset, uint16_t value)
3043 {
3044 	uint32_t	time;
3045 
3046 	vr_phy_autopoll_disable(vrp);
3047 
3048 	/*
3049 	 * Write the register number to the MII address register.
3050 	 */
3051 	VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3052 
3053 	/*
3054 	 * Write the value to the data register.
3055 	 */
3056 	VR_PUT16(vrp->acc_reg, VR_MIIDATA, value);
3057 
3058 	/*
3059 	 * Issue the WRITE command to the command register.
3060 	 * This bit will be cleared when the write is finished.
3061 	 */
3062 	VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_WRITE);
3063 
3064 	time = 0;
3065 	do {
3066 		drv_usecwait(VR_MMI_WAITINCR);
3067 		if (time >= VR_MMI_WAITMAX) {
3068 			vr_log(vrp, CE_NOTE, "Timeout in MII write command");
3069 			break;
3070 		}
3071 		time += VR_MMI_WAITINCR;
3072 	} while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_WRITE) != 0);
3073 	vr_phy_autopoll_enable(vrp);
3074 }
3075 
3076 /*
3077  * Initialize and install some private kstats.
3078  */
3079 typedef struct {
3080 	char		*name;
3081 	uchar_t		type;
3082 } vr_kstat_t;
3083 
3084 static const vr_kstat_t vr_driver_stats [] = {
3085 	{"allocbfail",		KSTAT_DATA_INT32},
3086 	{"intr_claimed",	KSTAT_DATA_INT64},
3087 	{"intr_unclaimed",	KSTAT_DATA_INT64},
3088 	{"linkchanges",		KSTAT_DATA_INT64},
3089 	{"txnfree",		KSTAT_DATA_INT32},
3090 	{"txstalls",		KSTAT_DATA_INT32},
3091 	{"resets",		KSTAT_DATA_INT32},
3092 	{"txreclaims",		KSTAT_DATA_INT64},
3093 	{"txreclaim0",		KSTAT_DATA_INT64},
3094 	{"cyclics",		KSTAT_DATA_INT64},
3095 	{"txchecks",		KSTAT_DATA_INT64},
3096 };
3097 
3098 static void
vr_kstats_init(vr_t * vrp)3099 vr_kstats_init(vr_t *vrp)
3100 {
3101 	kstat_t			*ksp;
3102 	struct	kstat_named	*knp;
3103 	int			i;
3104 	int			nstats;
3105 
3106 	nstats = sizeof (vr_driver_stats) / sizeof (vr_kstat_t);
3107 
3108 	ksp = kstat_create(MODULENAME, ddi_get_instance(vrp->devinfo),
3109 	    "driver", "net", KSTAT_TYPE_NAMED, nstats, 0);
3110 
3111 	if (ksp == NULL)
3112 		vr_log(vrp, CE_WARN, "kstat_create failed");
3113 
3114 	ksp->ks_update = vr_update_kstats;
3115 	ksp->ks_private = (void*) vrp;
3116 	knp = ksp->ks_data;
3117 
3118 	for (i = 0; i < nstats; i++, knp++) {
3119 		kstat_named_init(knp, vr_driver_stats[i].name,
3120 		    vr_driver_stats[i].type);
3121 	}
3122 	kstat_install(ksp);
3123 	vrp->ksp = ksp;
3124 }
3125 
3126 static int
vr_update_kstats(kstat_t * ksp,int access)3127 vr_update_kstats(kstat_t *ksp, int access)
3128 {
3129 	vr_t			*vrp;
3130 	struct kstat_named	*knp;
3131 
3132 	vrp = (vr_t *)ksp->ks_private;
3133 	knp = ksp->ks_data;
3134 
3135 	if (access != KSTAT_READ)
3136 		return (EACCES);
3137 
3138 	(knp++)->value.ui32 = vrp->stats.allocbfail;
3139 	(knp++)->value.ui64 = vrp->stats.intr_claimed;
3140 	(knp++)->value.ui64 = vrp->stats.intr_unclaimed;
3141 	(knp++)->value.ui64 = vrp->stats.linkchanges;
3142 	(knp++)->value.ui32 = vrp->tx.nfree;
3143 	(knp++)->value.ui32 = vrp->stats.txstalls;
3144 	(knp++)->value.ui32 = vrp->stats.resets;
3145 	(knp++)->value.ui64 = vrp->stats.txreclaims;
3146 	(knp++)->value.ui64 = vrp->stats.txreclaim0;
3147 	(knp++)->value.ui64 = vrp->stats.cyclics;
3148 	(knp++)->value.ui64 = vrp->stats.txchecks;
3149 	return (0);
3150 }
3151 
3152 /*
3153  * Remove 'private' kstats.
3154  */
3155 static void
vr_remove_kstats(vr_t * vrp)3156 vr_remove_kstats(vr_t *vrp)
3157 {
3158 	if (vrp->ksp != NULL)
3159 		kstat_delete(vrp->ksp);
3160 }
3161 
3162 /*
3163  * Get a property of the device/driver
3164  * Remarks:
3165  * - pr_val is always an integer of size pr_valsize
3166  * - ENABLED (EN) is what is configured via dladm
3167  * - ADVERTISED (ADV) is ENABLED minus constraints, like PHY/MAC capabilities
3168  * - DEFAULT are driver- and hardware defaults (DEFAULT is implemented as a
3169  *   flag in pr_flags instead of MAC_PROP_DEFAULT_)
3170  * - perm is the permission printed on ndd -get /.. \?
3171  */
3172 int
vr_mac_getprop(void * arg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)3173 vr_mac_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3174     uint_t pr_valsize, void *pr_val)
3175 {
3176 	vr_t		*vrp;
3177 	uint32_t	err;
3178 	uint64_t	val;
3179 
3180 	/* Since we have no private properties */
3181 	_NOTE(ARGUNUSED(pr_name))
3182 
3183 	err = 0;
3184 	vrp = (vr_t *)arg;
3185 	switch (pr_num) {
3186 		case MAC_PROP_ADV_1000FDX_CAP:
3187 		case MAC_PROP_ADV_1000HDX_CAP:
3188 		case MAC_PROP_EN_1000FDX_CAP:
3189 		case MAC_PROP_EN_1000HDX_CAP:
3190 			val = 0;
3191 			break;
3192 
3193 		case MAC_PROP_ADV_100FDX_CAP:
3194 			val = (vrp->chip.mii.anadv &
3195 			    MII_ABILITY_100BASE_TX_FD) != 0;
3196 			break;
3197 
3198 		case MAC_PROP_ADV_100HDX_CAP:
3199 			val = (vrp->chip.mii.anadv &
3200 			    MII_ABILITY_100BASE_TX) != 0;
3201 			break;
3202 
3203 		case MAC_PROP_ADV_100T4_CAP:
3204 			val = (vrp->chip.mii.anadv &
3205 			    MII_ABILITY_100BASE_T4) != 0;
3206 			break;
3207 
3208 		case MAC_PROP_ADV_10FDX_CAP:
3209 			val = (vrp->chip.mii.anadv &
3210 			    MII_ABILITY_10BASE_T_FD) != 0;
3211 			break;
3212 
3213 		case MAC_PROP_ADV_10HDX_CAP:
3214 			val = (vrp->chip.mii.anadv &
3215 			    MII_ABILITY_10BASE_T) != 0;
3216 			break;
3217 
3218 		case MAC_PROP_AUTONEG:
3219 			val = (vrp->chip.mii.control &
3220 			    MII_CONTROL_ANE) != 0;
3221 			break;
3222 
3223 		case MAC_PROP_DUPLEX:
3224 			val = vrp->chip.link.duplex;
3225 			break;
3226 
3227 		case MAC_PROP_EN_100FDX_CAP:
3228 			val = (vrp->param.anadv_en &
3229 			    MII_ABILITY_100BASE_TX_FD) != 0;
3230 			break;
3231 
3232 		case MAC_PROP_EN_100HDX_CAP:
3233 			val = (vrp->param.anadv_en &
3234 			    MII_ABILITY_100BASE_TX) != 0;
3235 			break;
3236 
3237 		case MAC_PROP_EN_100T4_CAP:
3238 			val = (vrp->param.anadv_en &
3239 			    MII_ABILITY_100BASE_T4) != 0;
3240 			break;
3241 
3242 		case MAC_PROP_EN_10FDX_CAP:
3243 			val = (vrp->param.anadv_en &
3244 			    MII_ABILITY_10BASE_T_FD) != 0;
3245 			break;
3246 
3247 		case MAC_PROP_EN_10HDX_CAP:
3248 			val = (vrp->param.anadv_en &
3249 			    MII_ABILITY_10BASE_T) != 0;
3250 			break;
3251 
3252 		case MAC_PROP_EN_AUTONEG:
3253 			val = vrp->param.an_en == VR_LINK_AUTONEG_ON;
3254 			break;
3255 
3256 		case MAC_PROP_FLOWCTRL:
3257 			val = vrp->chip.link.flowctrl;
3258 			break;
3259 
3260 		case MAC_PROP_MTU:
3261 			val = vrp->param.mtu;
3262 			break;
3263 
3264 		case MAC_PROP_SPEED:
3265 			if (vrp->chip.link.speed ==
3266 			    VR_LINK_SPEED_100MBS)
3267 				val = 100 * 1000 * 1000;
3268 			else if (vrp->chip.link.speed ==
3269 			    VR_LINK_SPEED_10MBS)
3270 				val = 10 * 1000 * 1000;
3271 			else
3272 				val = 0;
3273 			break;
3274 
3275 		case MAC_PROP_STATUS:
3276 			val = vrp->chip.link.state;
3277 			break;
3278 
3279 		default:
3280 			err = ENOTSUP;
3281 			break;
3282 	}
3283 
3284 	if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3285 		if (pr_valsize == sizeof (uint64_t))
3286 			*(uint64_t *)pr_val = val;
3287 		else if (pr_valsize == sizeof (uint32_t))
3288 			*(uint32_t *)pr_val = val;
3289 		else if (pr_valsize == sizeof (uint16_t))
3290 			*(uint16_t *)pr_val = val;
3291 		else if (pr_valsize == sizeof (uint8_t))
3292 			*(uint8_t *)pr_val = val;
3293 		else
3294 			err = EINVAL;
3295 	}
3296 	return (err);
3297 }
3298 
3299 void
vr_mac_propinfo(void * arg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)3300 vr_mac_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3301     mac_prop_info_handle_t prh)
3302 {
3303 	vr_t		*vrp = (vr_t *)arg;
3304 	uint8_t		val, perm;
3305 
3306 	/* Since we have no private properties */
3307 	_NOTE(ARGUNUSED(pr_name))
3308 
3309 	switch (pr_num) {
3310 		case MAC_PROP_ADV_1000FDX_CAP:
3311 		case MAC_PROP_ADV_1000HDX_CAP:
3312 		case MAC_PROP_EN_1000FDX_CAP:
3313 		case MAC_PROP_EN_1000HDX_CAP:
3314 		case MAC_PROP_ADV_100FDX_CAP:
3315 		case MAC_PROP_ADV_100HDX_CAP:
3316 		case MAC_PROP_ADV_100T4_CAP:
3317 		case MAC_PROP_ADV_10FDX_CAP:
3318 		case MAC_PROP_ADV_10HDX_CAP:
3319 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3320 			return;
3321 
3322 		case MAC_PROP_EN_100FDX_CAP:
3323 			val = (vrp->chip.mii.status &
3324 			    MII_STATUS_100_BASEX_FD) != 0;
3325 			break;
3326 
3327 		case MAC_PROP_EN_100HDX_CAP:
3328 			val = (vrp->chip.mii.status &
3329 			    MII_STATUS_100_BASEX) != 0;
3330 			break;
3331 
3332 		case MAC_PROP_EN_100T4_CAP:
3333 			val = (vrp->chip.mii.status &
3334 			    MII_STATUS_100_BASE_T4) != 0;
3335 			break;
3336 
3337 		case MAC_PROP_EN_10FDX_CAP:
3338 			val = (vrp->chip.mii.status &
3339 			    MII_STATUS_10_FD) != 0;
3340 			break;
3341 
3342 		case MAC_PROP_EN_10HDX_CAP:
3343 			val = (vrp->chip.mii.status &
3344 			    MII_STATUS_10) != 0;
3345 			break;
3346 
3347 		case MAC_PROP_AUTONEG:
3348 		case MAC_PROP_EN_AUTONEG:
3349 			val = (vrp->chip.mii.status &
3350 			    MII_STATUS_CANAUTONEG) != 0;
3351 			break;
3352 
3353 		case MAC_PROP_FLOWCTRL:
3354 			mac_prop_info_set_default_link_flowctrl(prh,
3355 			    LINK_FLOWCTRL_BI);
3356 			return;
3357 
3358 		case MAC_PROP_MTU:
3359 			mac_prop_info_set_range_uint32(prh,
3360 			    ETHERMTU, ETHERMTU);
3361 			return;
3362 
3363 		case MAC_PROP_DUPLEX:
3364 			/*
3365 			 * Writability depends on autoneg.
3366 			 */
3367 			perm = ((vrp->chip.mii.control &
3368 			    MII_CONTROL_ANE) == 0) ? MAC_PROP_PERM_RW :
3369 			    MAC_PROP_PERM_READ;
3370 			mac_prop_info_set_perm(prh, perm);
3371 
3372 			if (perm == MAC_PROP_PERM_RW) {
3373 				mac_prop_info_set_default_uint8(prh,
3374 				    VR_LINK_DUPLEX_FULL);
3375 			}
3376 			return;
3377 
3378 		case MAC_PROP_SPEED:
3379 			perm = ((vrp->chip.mii.control &
3380 			    MII_CONTROL_ANE) == 0) ?
3381 			    MAC_PROP_PERM_RW : MAC_PROP_PERM_READ;
3382 			mac_prop_info_set_perm(prh, perm);
3383 
3384 			if (perm == MAC_PROP_PERM_RW) {
3385 				mac_prop_info_set_default_uint64(prh,
3386 				    100 * 1000 * 1000);
3387 			}
3388 			return;
3389 
3390 		case MAC_PROP_STATUS:
3391 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3392 			return;
3393 
3394 		default:
3395 			return;
3396 	}
3397 
3398 	mac_prop_info_set_default_uint8(prh, val);
3399 }
3400 
3401 /*
3402  * Set a property of the device.
3403  */
3404 int
vr_mac_setprop(void * arg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)3405 vr_mac_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3406     uint_t pr_valsize, const void *pr_val)
3407 {
3408 	vr_t		*vrp;
3409 	uint32_t	err;
3410 	uint64_t	val;
3411 
3412 	/* Since we have no private properties */
3413 	_NOTE(ARGUNUSED(pr_name))
3414 
3415 	err = 0;
3416 	vrp = (vr_t *)arg;
3417 	mutex_enter(&vrp->oplock);
3418 
3419 	/*
3420 	 * The current set of public property values are passed as integers
3421 	 * Private properties are passed as strings in pr_val length pr_valsize.
3422 	 */
3423 	if (pr_num != MAC_PROP_PRIVATE) {
3424 		if (pr_valsize == sizeof (uint64_t))
3425 			val = *(uint64_t *)pr_val;
3426 		else if (pr_valsize == sizeof (uint32_t))
3427 			val = *(uint32_t *)pr_val;
3428 		else if (pr_valsize == sizeof (uint16_t))
3429 			val = *(uint32_t *)pr_val;
3430 		else if (pr_valsize == sizeof (uint8_t))
3431 			val = *(uint8_t *)pr_val;
3432 		else {
3433 			mutex_exit(&vrp->oplock);
3434 			return (EINVAL);
3435 		}
3436 	}
3437 
3438 	switch (pr_num) {
3439 		case MAC_PROP_DUPLEX:
3440 			if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
3441 				if (val == LINK_DUPLEX_FULL)
3442 					vrp->chip.mii.control |=
3443 					    MII_CONTROL_FDUPLEX;
3444 				else if (val == LINK_DUPLEX_HALF)
3445 					vrp->chip.mii.control &=
3446 					    ~MII_CONTROL_FDUPLEX;
3447 				else
3448 					err = EINVAL;
3449 			} else
3450 				err = EINVAL;
3451 			break;
3452 
3453 		case MAC_PROP_EN_100FDX_CAP:
3454 			if (val == 0)
3455 				vrp->param.anadv_en &=
3456 				    ~MII_ABILITY_100BASE_TX_FD;
3457 			else
3458 				vrp->param.anadv_en |=
3459 				    MII_ABILITY_100BASE_TX_FD;
3460 			break;
3461 
3462 		case MAC_PROP_EN_100HDX_CAP:
3463 			if (val == 0)
3464 				vrp->param.anadv_en &=
3465 				    ~MII_ABILITY_100BASE_TX;
3466 			else
3467 				vrp->param.anadv_en |=
3468 				    MII_ABILITY_100BASE_TX;
3469 			break;
3470 
3471 		case MAC_PROP_EN_100T4_CAP:
3472 			if (val == 0)
3473 				vrp->param.anadv_en &=
3474 				    ~MII_ABILITY_100BASE_T4;
3475 			else
3476 				vrp->param.anadv_en |=
3477 				    MII_ABILITY_100BASE_T4;
3478 			break;
3479 
3480 		case MAC_PROP_EN_10FDX_CAP:
3481 			if (val == 0)
3482 				vrp->param.anadv_en &=
3483 				    ~MII_ABILITY_10BASE_T_FD;
3484 			else
3485 				vrp->param.anadv_en |=
3486 				    MII_ABILITY_10BASE_T_FD;
3487 			break;
3488 
3489 		case MAC_PROP_EN_10HDX_CAP:
3490 			if (val == 0)
3491 				vrp->param.anadv_en &=
3492 				    ~MII_ABILITY_10BASE_T;
3493 			else
3494 				vrp->param.anadv_en |=
3495 				    MII_ABILITY_10BASE_T;
3496 			break;
3497 
3498 		case MAC_PROP_AUTONEG:
3499 		case MAC_PROP_EN_AUTONEG:
3500 			if (val == 0) {
3501 				vrp->param.an_en = VR_LINK_AUTONEG_OFF;
3502 				vrp->chip.mii.control &= ~MII_CONTROL_ANE;
3503 			} else {
3504 				vrp->param.an_en = VR_LINK_AUTONEG_ON;
3505 				if ((vrp->chip.mii.status &
3506 				    MII_STATUS_CANAUTONEG) != 0)
3507 					vrp->chip.mii.control |=
3508 					    MII_CONTROL_ANE;
3509 				else
3510 					err = EINVAL;
3511 			}
3512 			break;
3513 
3514 		case MAC_PROP_FLOWCTRL:
3515 			if (val == LINK_FLOWCTRL_NONE)
3516 				vrp->param.anadv_en &= ~MII_ABILITY_PAUSE;
3517 			else if (val == LINK_FLOWCTRL_BI)
3518 				vrp->param.anadv_en |= MII_ABILITY_PAUSE;
3519 			else
3520 				err = EINVAL;
3521 			break;
3522 
3523 		case MAC_PROP_MTU:
3524 			if (val >= ETHERMIN && val <= ETHERMTU)
3525 				vrp->param.mtu = (uint32_t)val;
3526 			else
3527 				err = EINVAL;
3528 			break;
3529 
3530 		case MAC_PROP_SPEED:
3531 			if (val == 10 * 1000 * 1000)
3532 				vrp->chip.link.speed =
3533 				    VR_LINK_SPEED_10MBS;
3534 			else if (val == 100 * 1000 * 1000)
3535 				vrp->chip.link.speed =
3536 				    VR_LINK_SPEED_100MBS;
3537 			else
3538 				err = EINVAL;
3539 			break;
3540 
3541 		default:
3542 			err = ENOTSUP;
3543 			break;
3544 	}
3545 	if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3546 		vrp->chip.mii.anadv = vrp->param.anadv_en &
3547 		    (vrp->param.an_phymask & vrp->param.an_macmask);
3548 		vr_link_init(vrp);
3549 	}
3550 	mutex_exit(&vrp->oplock);
3551 	return (err);
3552 }
3553 
3554 
3555 /*
3556  * Logging and debug functions.
3557  */
3558 static struct {
3559 	kmutex_t mutex[1];
3560 	const char *ifname;
3561 	const char *fmt;
3562 	int level;
3563 } prtdata;
3564 
3565 static void
vr_vprt(const char * fmt,va_list args)3566 vr_vprt(const char *fmt, va_list args)
3567 {
3568 	char buf[512];
3569 
3570 	ASSERT(mutex_owned(prtdata.mutex));
3571 	(void) vsnprintf(buf, sizeof (buf), fmt, args);
3572 	cmn_err(prtdata.level, prtdata.fmt, prtdata.ifname, buf);
3573 }
3574 
3575 static void
vr_log(vr_t * vrp,int level,const char * fmt,...)3576 vr_log(vr_t *vrp, int level, const char *fmt, ...)
3577 {
3578 	va_list args;
3579 
3580 	mutex_enter(prtdata.mutex);
3581 	prtdata.ifname = vrp->ifname;
3582 	prtdata.fmt = "!%s: %s";
3583 	prtdata.level = level;
3584 
3585 	va_start(args, fmt);
3586 	vr_vprt(fmt, args);
3587 	va_end(args);
3588 
3589 	mutex_exit(prtdata.mutex);
3590 }
3591 
3592 #if defined(DEBUG)
3593 static void
vr_prt(const char * fmt,...)3594 vr_prt(const char *fmt, ...)
3595 {
3596 	va_list args;
3597 
3598 	ASSERT(mutex_owned(prtdata.mutex));
3599 
3600 	va_start(args, fmt);
3601 	vr_vprt(fmt, args);
3602 	va_end(args);
3603 
3604 	mutex_exit(prtdata.mutex);
3605 }
3606 
3607 void
vr_debug()3608 (*vr_debug())(const char *fmt, ...)
3609 {
3610 	mutex_enter(prtdata.mutex);
3611 	prtdata.ifname = MODULENAME;
3612 	prtdata.fmt = "^%s: %s\n";
3613 	prtdata.level = CE_CONT;
3614 
3615 	return (vr_prt);
3616 }
3617 #endif	/* DEBUG */
3618 
3619 DDI_DEFINE_STREAM_OPS(vr_dev_ops, nulldev, nulldev, vr_attach, vr_detach,
3620     nodev, NULL, D_MP, NULL, vr_quiesce);
3621 
3622 static struct modldrv vr_modldrv = {
3623 	&mod_driverops,		/* Type of module. This one is a driver */
3624 	vr_ident,		/* short description */
3625 	&vr_dev_ops		/* driver specific ops */
3626 };
3627 
3628 static struct modlinkage modlinkage = {
3629 	MODREV_1, (void *)&vr_modldrv, NULL
3630 };
3631 
3632 int
_info(struct modinfo * modinfop)3633 _info(struct modinfo *modinfop)
3634 {
3635 	return (mod_info(&modlinkage, modinfop));
3636 }
3637 
3638 int
_init(void)3639 _init(void)
3640 {
3641 	int	status;
3642 
3643 	mac_init_ops(&vr_dev_ops, MODULENAME);
3644 	status = mod_install(&modlinkage);
3645 	if (status == DDI_SUCCESS)
3646 		mutex_init(prtdata.mutex, NULL, MUTEX_DRIVER, NULL);
3647 	else
3648 		mac_fini_ops(&vr_dev_ops);
3649 	return (status);
3650 }
3651 
3652 int
_fini(void)3653 _fini(void)
3654 {
3655 	int status;
3656 
3657 	status = mod_remove(&modlinkage);
3658 	if (status == 0) {
3659 		mac_fini_ops(&vr_dev_ops);
3660 		mutex_destroy(prtdata.mutex);
3661 	}
3662 	return (status);
3663 }
3664