xref: /titanic_41/usr/src/uts/common/io/vr/vr.c (revision 5815e35bec16e886bfd73f1badb4cc85ddcda92e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/stream.h>
29 #include <sys/strsun.h>
30 #include <sys/stat.h>
31 #include <sys/pci.h>
32 #include <sys/modctl.h>
33 #include <sys/kstat.h>
34 #include <sys/ethernet.h>
35 #include <sys/devops.h>
36 #include <sys/debug.h>
37 #include <sys/conf.h>
38 #include <sys/mac.h>
39 #include <sys/mac_provider.h>
40 #include <sys/mac_ether.h>
41 #include <sys/sysmacros.h>
42 #include <sys/dditypes.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/miiregs.h>
46 #include <sys/byteorder.h>
47 #include <sys/note.h>
48 #include <sys/vlan.h>
49 
50 #include "vr.h"
51 #include "vr_impl.h"
52 
53 /*
54  * VR in a nutshell
55  * The card uses two rings of data structures to communicate with the host.
56  * These are referred to as "descriptor rings" and there is one for transmit
57  * (TX) and one for receive (RX).
58  *
59  * The driver uses a "DMA buffer" data type for mapping to those descriptor
60  * rings. This is a structure with handles and a DMA'able buffer attached to it.
61  *
62  * Receive
63  * The receive ring is filled with DMA buffers. Received packets are copied into
64  * a newly allocated mblk's and passed upstream.
65  *
66  * Transmit
67  * Each transmit descriptor has a DMA buffer attached to it. The data of TX
68  * packets is copied into the DMA buffer which is then enqueued for
69  * transmission.
70  *
71  * Reclaim of transmitted packets is done as a result of a transmit completion
72  * interrupt which is generated 3 times per ring at minimum.
73  */
74 
75 #if defined(DEBUG)
76 uint32_t	vrdebug = 1;
77 #define	VR_DEBUG(args)	do {				\
78 		if (vrdebug > 0)			\
79 			(*vr_debug()) args;		\
80 			_NOTE(CONSTANTCONDITION)	\
81 		} while (0)
82 static	void	vr_prt(const char *fmt, ...);
83 	void	(*vr_debug())(const char *fmt, ...);
84 #else
85 #define	VR_DEBUG(args)	do ; _NOTE(CONSTANTCONDITION) while (0)
86 #endif
87 
88 static char vr_ident[] = "VIA Rhine Ethernet";
89 
90 /*
91  * Attributes for accessing registers and memory descriptors for this device.
92  */
93 static ddi_device_acc_attr_t vr_dev_dma_accattr = {
94 	DDI_DEVICE_ATTR_V0,
95 	DDI_STRUCTURE_LE_ACC,
96 	DDI_STRICTORDER_ACC
97 };
98 
99 /*
100  * Attributes for accessing data.
101  */
102 static ddi_device_acc_attr_t vr_data_dma_accattr = {
103 	DDI_DEVICE_ATTR_V0,
104 	DDI_NEVERSWAP_ACC,
105 	DDI_STRICTORDER_ACC
106 };
107 
108 /*
109  * DMA attributes for descriptors for communication with the device
110  * This driver assumes that all descriptors of one ring fit in one consequitive
111  * memory area of max 4K (256 descriptors) that does not cross a page boundary.
112  * Therefore, we request 4K alignement.
113  */
114 static ddi_dma_attr_t vr_dev_dma_attr = {
115 	DMA_ATTR_V0,			/* version number */
116 	0,				/* low DMA address range */
117 	0xFFFFFFFF,			/* high DMA address range */
118 	0x7FFFFFFF,			/* DMA counter register */
119 	0x1000,				/* DMA address alignment */
120 	0x7F,				/* DMA burstsizes */
121 	1,				/* min effective DMA size */
122 	0xFFFFFFFF,			/* max DMA xfer size */
123 	0xFFFFFFFF,			/* segment boundary */
124 	1,				/* s/g list length */
125 	1,				/* granularity of device */
126 	0				/* DMA transfer flags */
127 };
128 
129 /*
130  * DMA attributes for the data moved to/from the device
131  * Note that the alignement is set to 2K so hat a 1500 byte packet never
132  * crosses a page boundary and thus that a DMA transfer is not split up in
133  * multiple cookies with a 4K/8K pagesize
134  */
135 static ddi_dma_attr_t vr_data_dma_attr = {
136 	DMA_ATTR_V0,			/* version number */
137 	0,				/* low DMA address range */
138 	0xFFFFFFFF,			/* high DMA address range */
139 	0x7FFFFFFF,			/* DMA counter register */
140 	0x800,				/* DMA address alignment */
141 	0xfff,				/* DMA burstsizes */
142 	1,				/* min effective DMA size */
143 	0xFFFFFFFF,			/* max DMA xfer size */
144 	0xFFFFFFFF,			/* segment boundary */
145 	1,				/* s/g list length */
146 	1,				/* granularity of device */
147 	0				/* DMA transfer flags */
148 };
149 
150 static mac_callbacks_t vr_mac_callbacks = {
151 	MC_SETPROP|MC_GETPROP|MC_PROPINFO, /* Which callbacks are set */
152 	vr_mac_getstat,		/* Get the value of a statistic */
153 	vr_mac_start,		/* Start the device */
154 	vr_mac_stop,		/* Stop the device */
155 	vr_mac_set_promisc,	/* Enable or disable promiscuous mode */
156 	vr_mac_set_multicast,	/* Enable or disable a multicast addr */
157 	vr_mac_set_ether_addr,	/* Set the unicast MAC address */
158 	vr_mac_tx_enqueue_list,	/* Transmit a packet */
159 	NULL,
160 	NULL,			/* Process an unknown ioctl */
161 	NULL,			/* Get capability information */
162 	NULL,			/* Open the device */
163 	NULL,			/* Close the device */
164 	vr_mac_setprop,		/* Set properties of the device */
165 	vr_mac_getprop,		/* Get properties of the device */
166 	vr_mac_propinfo		/* Get properties attributes */
167 };
168 
169 /*
170  * Table with bugs and features for each incarnation of the card.
171  */
172 static const chip_info_t vr_chip_info [] = {
173 	{
174 		0x0, 0x0,
175 		"VIA Rhine Fast Ethernet",
176 		(VR_BUG_NO_MEMIO),
177 		(VR_FEATURE_NONE)
178 	},
179 	{
180 		0x04, 0x21,
181 		"VIA VT86C100A Fast Ethernet",
182 		(VR_BUG_NEEDMODE2PCEROPT | VR_BUG_NO_TXQUEUEING |
183 		    VR_BUG_NEEDMODE10T | VR_BUG_TXALIGN | VR_BUG_NO_MEMIO |
184 		    VR_BUG_MIIPOLLSTOP),
185 		(VR_FEATURE_NONE)
186 	},
187 	{
188 		0x40, 0x41,
189 		"VIA VT6102-A Rhine II Fast Ethernet",
190 		(VR_BUG_NEEDMODE2PCEROPT),
191 		(VR_FEATURE_RX_PAUSE_CAP)
192 	},
193 	{
194 		0x42, 0x7f,
195 		"VIA VT6102-C Rhine II Fast Ethernet",
196 		(VR_BUG_NEEDMODE2PCEROPT),
197 		(VR_FEATURE_RX_PAUSE_CAP)
198 	},
199 	{
200 		0x80, 0x82,
201 		"VIA VT6105-A Rhine III Fast Ethernet",
202 		(VR_BUG_NONE),
203 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
204 	},
205 	{
206 		0x83, 0x89,
207 		"VIA VT6105-B Rhine III Fast Ethernet",
208 		(VR_BUG_NONE),
209 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
210 	},
211 	{
212 		0x8a, 0x8b,
213 		"VIA VT6105-LOM Rhine III Fast Ethernet",
214 		(VR_BUG_NONE),
215 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
216 	},
217 	{
218 		0x8c, 0x8c,
219 		"VIA VT6107-A0 Rhine III Fast Ethernet",
220 		(VR_BUG_NONE),
221 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
222 	},
223 	{
224 		0x8d, 0x8f,
225 		"VIA VT6107-A1 Rhine III Fast Ethernet",
226 		(VR_BUG_NONE),
227 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
228 		    VR_FEATURE_MRDLNMULTIPLE)
229 	},
230 	{
231 		0x90, 0x93,
232 		"VIA VT6105M-A0 Rhine III Fast Ethernet Management Adapter",
233 		(VR_BUG_NONE),
234 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
235 		    VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
236 		    VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
237 		    VR_FEATURE_MIBCOUNTER)
238 	},
239 	{
240 		0x94, 0xff,
241 		"VIA VT6105M-B1 Rhine III Fast Ethernet Management Adapter",
242 		(VR_BUG_NONE),
243 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
244 		    VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
245 		    VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
246 		    VR_FEATURE_MIBCOUNTER)
247 	}
248 };
249 
250 /*
251  * Function prototypes
252  */
253 static	vr_result_t	vr_add_intr(vr_t *vrp);
254 static	void		vr_remove_intr(vr_t *vrp);
255 static	int32_t		vr_cam_index(vr_t *vrp, const uint8_t *maddr);
256 static	uint32_t	ether_crc_be(const uint8_t *address);
257 static	void		vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp);
258 static	void		vr_log(vr_t *vrp, int level, const char *fmt, ...);
259 static	int		vr_resume(dev_info_t *devinfo);
260 static	int		vr_suspend(dev_info_t *devinfo);
261 static	vr_result_t	vr_bus_config(vr_t *vrp);
262 static	void		vr_bus_unconfig(vr_t *vrp);
263 static	void		vr_reset(vr_t *vrp);
264 static	int		vr_start(vr_t *vrp);
265 static	int		vr_stop(vr_t *vrp);
266 static	vr_result_t	vr_rings_init(vr_t *vrp);
267 static	void		vr_rings_fini(vr_t *vrp);
268 static	vr_result_t	vr_alloc_ring(vr_t *vrp, vr_ring_t *r, size_t n);
269 static	void		vr_free_ring(vr_ring_t *r, size_t n);
270 static	vr_result_t	vr_rxring_init(vr_t *vrp);
271 static	void		vr_rxring_fini(vr_t *vrp);
272 static	vr_result_t	vr_txring_init(vr_t *vrp);
273 static	void		vr_txring_fini(vr_t *vrp);
274 static	vr_result_t	vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap,
275 			    uint_t flags);
276 static	void		vr_free_dmabuf(vr_data_dma_t *dmap);
277 static	void		vr_param_init(vr_t *vrp);
278 static	mblk_t		*vr_receive(vr_t *vrp);
279 static	void		vr_tx_reclaim(vr_t *vrp);
280 static	void		vr_periodic(void *p);
281 static	void		vr_error(vr_t *vrp);
282 static	void		vr_phy_read(vr_t *vrp, int offset, uint16_t *value);
283 static	void		vr_phy_write(vr_t *vrp, int offset, uint16_t value);
284 static	void		vr_phy_autopoll_disable(vr_t *vrp);
285 static	void		vr_phy_autopoll_enable(vr_t *vrp);
286 static	void		vr_link_init(vr_t *vrp);
287 static	void		vr_link_state(vr_t *vrp);
288 static	void		vr_kstats_init(vr_t *vrp);
289 static	int		vr_update_kstats(kstat_t *ksp, int access);
290 static	void		vr_remove_kstats(vr_t *vrp);
291 
292 static int
vr_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)293 vr_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
294 {
295 	vr_t		*vrp;
296 	mac_register_t	*macreg;
297 
298 	if (cmd == DDI_RESUME)
299 		return (vr_resume(devinfo));
300 	else if (cmd != DDI_ATTACH)
301 		return (DDI_FAILURE);
302 
303 	/*
304 	 * Attach.
305 	 */
306 	vrp = kmem_zalloc(sizeof (vr_t), KM_SLEEP);
307 	ddi_set_driver_private(devinfo, vrp);
308 	vrp->devinfo = devinfo;
309 
310 	/*
311 	 * Store the name+instance of the module.
312 	 */
313 	(void) snprintf(vrp->ifname, sizeof (vrp->ifname), "%s%d",
314 	    MODULENAME, ddi_get_instance(devinfo));
315 
316 	/*
317 	 * Bus initialization.
318 	 */
319 	if (vr_bus_config(vrp) != VR_SUCCESS) {
320 		vr_log(vrp, CE_WARN, "vr_bus_config failed");
321 		goto fail0;
322 	}
323 
324 	/*
325 	 * Initialize default parameters.
326 	 */
327 	vr_param_init(vrp);
328 
329 	/*
330 	 * Setup the descriptor rings.
331 	 */
332 	if (vr_rings_init(vrp) != VR_SUCCESS) {
333 		vr_log(vrp, CE_WARN, "vr_rings_init failed");
334 		goto fail1;
335 	}
336 
337 	/*
338 	 * Initialize kstats.
339 	 */
340 	vr_kstats_init(vrp);
341 
342 	/*
343 	 * Add interrupt to the OS.
344 	 */
345 	if (vr_add_intr(vrp) != VR_SUCCESS) {
346 		vr_log(vrp, CE_WARN, "vr_add_intr failed in attach");
347 		goto fail3;
348 	}
349 
350 	/*
351 	 * Add mutexes.
352 	 */
353 	mutex_init(&vrp->intrlock, NULL, MUTEX_DRIVER,
354 	    DDI_INTR_PRI(vrp->intr_pri));
355 	mutex_init(&vrp->oplock, NULL, MUTEX_DRIVER, NULL);
356 	mutex_init(&vrp->tx.lock, NULL, MUTEX_DRIVER, NULL);
357 
358 	/*
359 	 * Enable interrupt.
360 	 */
361 	if (ddi_intr_enable(vrp->intr_hdl) != DDI_SUCCESS) {
362 		vr_log(vrp, CE_NOTE, "ddi_intr_enable failed");
363 		goto fail5;
364 	}
365 
366 	/*
367 	 * Register with parent, mac.
368 	 */
369 	if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
370 		vr_log(vrp, CE_WARN, "mac_alloc failed in attach");
371 		goto fail6;
372 	}
373 
374 	macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
375 	macreg->m_driver = vrp;
376 	macreg->m_dip = devinfo;
377 	macreg->m_src_addr = vrp->vendor_ether_addr;
378 	macreg->m_callbacks = &vr_mac_callbacks;
379 	macreg->m_min_sdu = 0;
380 	macreg->m_max_sdu = ETHERMTU;
381 	macreg->m_margin = VLAN_TAGSZ;
382 
383 	if (mac_register(macreg, &vrp->machdl) != 0) {
384 		vr_log(vrp, CE_WARN, "mac_register failed in attach");
385 		goto fail7;
386 	}
387 	mac_free(macreg);
388 	return (DDI_SUCCESS);
389 
390 fail7:
391 	mac_free(macreg);
392 fail6:
393 	(void) ddi_intr_disable(vrp->intr_hdl);
394 fail5:
395 	mutex_destroy(&vrp->tx.lock);
396 	mutex_destroy(&vrp->oplock);
397 	mutex_destroy(&vrp->intrlock);
398 	vr_remove_intr(vrp);
399 fail3:
400 	vr_remove_kstats(vrp);
401 fail2:
402 	vr_rings_fini(vrp);
403 fail1:
404 	vr_bus_unconfig(vrp);
405 fail0:
406 	kmem_free(vrp, sizeof (vr_t));
407 	return (DDI_FAILURE);
408 }
409 
410 static int
vr_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)411 vr_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
412 {
413 	vr_t		*vrp;
414 
415 	vrp = ddi_get_driver_private(devinfo);
416 
417 	if (cmd == DDI_SUSPEND)
418 		return (vr_suspend(devinfo));
419 	else if (cmd != DDI_DETACH)
420 		return (DDI_FAILURE);
421 
422 	if (vrp->chip.state == CHIPSTATE_RUNNING)
423 		return (DDI_FAILURE);
424 
425 	/*
426 	 * Try to un-register from the MAC layer.
427 	 */
428 	if (mac_unregister(vrp->machdl) != 0)
429 		return (DDI_FAILURE);
430 
431 	(void) ddi_intr_disable(vrp->intr_hdl);
432 	vr_remove_intr(vrp);
433 	mutex_destroy(&vrp->tx.lock);
434 	mutex_destroy(&vrp->oplock);
435 	mutex_destroy(&vrp->intrlock);
436 	vr_remove_kstats(vrp);
437 	vr_rings_fini(vrp);
438 	vr_bus_unconfig(vrp);
439 	kmem_free(vrp, sizeof (vr_t));
440 	return (DDI_SUCCESS);
441 }
442 
443 /*
444  * quiesce the card for fast reboot.
445  */
446 int
vr_quiesce(dev_info_t * dev_info)447 vr_quiesce(dev_info_t *dev_info)
448 {
449 	vr_t	*vrp;
450 
451 	vrp = (vr_t *)ddi_get_driver_private(dev_info);
452 
453 	/*
454 	 * Stop interrupts.
455 	 */
456 	VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
457 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
458 
459 	/*
460 	 * Stop DMA.
461 	 */
462 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
463 	return (DDI_SUCCESS);
464 }
465 
466 /*
467  * Add an interrupt for our device to the OS.
468  */
469 static vr_result_t
vr_add_intr(vr_t * vrp)470 vr_add_intr(vr_t *vrp)
471 {
472 	int	nintrs;
473 	int	rc;
474 
475 	rc = ddi_intr_alloc(vrp->devinfo, &vrp->intr_hdl,
476 	    DDI_INTR_TYPE_FIXED,	/* type */
477 	    0,			/* number */
478 	    1,			/* count */
479 	    &nintrs,		/* actualp */
480 	    DDI_INTR_ALLOC_STRICT);
481 
482 	if (rc != DDI_SUCCESS) {
483 		vr_log(vrp, CE_NOTE, "ddi_intr_alloc failed: %d", rc);
484 		return (VR_FAILURE);
485 	}
486 
487 	rc = ddi_intr_add_handler(vrp->intr_hdl, vr_intr, vrp, NULL);
488 	if (rc != DDI_SUCCESS) {
489 		vr_log(vrp, CE_NOTE, "ddi_intr_add_handler failed");
490 		if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
491 			vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
492 		return (VR_FAILURE);
493 	}
494 
495 	rc = ddi_intr_get_pri(vrp->intr_hdl, &vrp->intr_pri);
496 	if (rc != DDI_SUCCESS) {
497 		vr_log(vrp, CE_NOTE, "ddi_intr_get_pri failed");
498 		if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
499 			vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
500 
501 		if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
502 			vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
503 
504 		return (VR_FAILURE);
505 	}
506 	return (VR_SUCCESS);
507 }
508 
509 /*
510  * Remove our interrupt from the OS.
511  */
512 static void
vr_remove_intr(vr_t * vrp)513 vr_remove_intr(vr_t *vrp)
514 {
515 	if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
516 		vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
517 
518 	if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
519 		vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
520 }
521 
522 /*
523  * Resume operation after suspend.
524  */
525 static int
vr_resume(dev_info_t * devinfo)526 vr_resume(dev_info_t *devinfo)
527 {
528 	vr_t *vrp;
529 
530 	vrp = (vr_t *)ddi_get_driver_private(devinfo);
531 	mutex_enter(&vrp->oplock);
532 	if (vrp->chip.state == CHIPSTATE_SUSPENDED_RUNNING)
533 		(void) vr_start(vrp);
534 	mutex_exit(&vrp->oplock);
535 	return (DDI_SUCCESS);
536 }
537 
538 /*
539  * Suspend operation.
540  */
541 static int
vr_suspend(dev_info_t * devinfo)542 vr_suspend(dev_info_t *devinfo)
543 {
544 	vr_t *vrp;
545 
546 	vrp = (vr_t *)ddi_get_driver_private(devinfo);
547 	mutex_enter(&vrp->oplock);
548 	if (vrp->chip.state == CHIPSTATE_RUNNING) {
549 		(void) vr_stop(vrp);
550 		vrp->chip.state = CHIPSTATE_SUSPENDED_RUNNING;
551 	}
552 	mutex_exit(&vrp->oplock);
553 	return (DDI_SUCCESS);
554 }
555 
556 /*
557  * Initial bus- and device configuration during attach(9E).
558  */
559 static vr_result_t
vr_bus_config(vr_t * vrp)560 vr_bus_config(vr_t *vrp)
561 {
562 	uint32_t		addr;
563 	int			n, nsets, rc;
564 	uint_t			elem;
565 	pci_regspec_t		*regs;
566 
567 	/*
568 	 * Get the reg property which describes the various access methods.
569 	 */
570 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, vrp->devinfo,
571 	    0, "reg", (int **)&regs, &elem) != DDI_PROP_SUCCESS) {
572 		vr_log(vrp, CE_WARN, "Can't get reg property");
573 		return (VR_FAILURE);
574 	}
575 	nsets = (elem * sizeof (uint_t)) / sizeof (pci_regspec_t);
576 
577 	/*
578 	 * Setup access to all available sets.
579 	 */
580 	vrp->nsets = nsets;
581 	vrp->regset = kmem_zalloc(nsets * sizeof (vr_acc_t), KM_SLEEP);
582 	for (n = 0; n < nsets; n++) {
583 		rc = ddi_regs_map_setup(vrp->devinfo, n,
584 		    &vrp->regset[n].addr, 0, 0,
585 		    &vr_dev_dma_accattr,
586 		    &vrp->regset[n].hdl);
587 		if (rc != DDI_SUCCESS) {
588 			vr_log(vrp, CE_NOTE,
589 			    "Setup of register set %d failed", n);
590 			while (--n >= 0)
591 				ddi_regs_map_free(&vrp->regset[n].hdl);
592 			kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
593 			ddi_prop_free(regs);
594 			return (VR_FAILURE);
595 		}
596 		bcopy(&regs[n], &vrp->regset[n].reg, sizeof (pci_regspec_t));
597 	}
598 	ddi_prop_free(regs);
599 
600 	/*
601 	 * Assign type-named pointers to the register sets.
602 	 */
603 	for (n = 0; n < nsets; n++) {
604 		addr = vrp->regset[n].reg.pci_phys_hi & PCI_REG_ADDR_M;
605 		if (addr == PCI_ADDR_CONFIG && vrp->acc_cfg == NULL)
606 			vrp->acc_cfg = &vrp->regset[n];
607 		else if (addr == PCI_ADDR_IO && vrp->acc_io == NULL)
608 			vrp->acc_io = &vrp->regset[n];
609 		else if (addr == PCI_ADDR_MEM32 && vrp->acc_mem == NULL)
610 			vrp->acc_mem = &vrp->regset[n];
611 	}
612 
613 	/*
614 	 * Assure there is one of each type.
615 	 */
616 	if (vrp->acc_cfg == NULL ||
617 	    vrp->acc_io == NULL ||
618 	    vrp->acc_mem == NULL) {
619 		for (n = 0; n < nsets; n++)
620 			ddi_regs_map_free(&vrp->regset[n].hdl);
621 		kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
622 		vr_log(vrp, CE_WARN,
623 		    "Config-, I/O- and memory sets not available");
624 		return (VR_FAILURE);
625 	}
626 
627 	/*
628 	 * Store vendor/device/revision.
629 	 */
630 	vrp->chip.vendor = VR_GET16(vrp->acc_cfg, PCI_CONF_VENID);
631 	vrp->chip.device = VR_GET16(vrp->acc_cfg, PCI_CONF_DEVID);
632 	vrp->chip.revision = VR_GET16(vrp->acc_cfg, PCI_CONF_REVID);
633 
634 	/*
635 	 * Copy the matching chip_info_t structure.
636 	 */
637 	elem = sizeof (vr_chip_info) / sizeof (chip_info_t);
638 	for (n = 0; n < elem; n++) {
639 		if (vrp->chip.revision >= vr_chip_info[n].revmin &&
640 		    vrp->chip.revision <= vr_chip_info[n].revmax) {
641 			bcopy((void*)&vr_chip_info[n],
642 			    (void*)&vrp->chip.info,
643 			    sizeof (chip_info_t));
644 			break;
645 		}
646 	}
647 
648 	/*
649 	 * If we didn't find a chip_info_t for this card, copy the first
650 	 * entry of the info structures. This is a generic Rhine whith no
651 	 * bugs and no features.
652 	 */
653 	if (vrp->chip.info.name == NULL) {
654 		bcopy((void*)&vr_chip_info[0],
655 		    (void*) &vrp->chip.info,
656 		    sizeof (chip_info_t));
657 	}
658 
659 	/*
660 	 * Tell what is found.
661 	 */
662 	vr_log(vrp, CE_NOTE, "pci%d,%d,%d: %s, revision 0x%0x",
663 	    PCI_REG_BUS_G(vrp->acc_cfg->reg.pci_phys_hi),
664 	    PCI_REG_DEV_G(vrp->acc_cfg->reg.pci_phys_hi),
665 	    PCI_REG_FUNC_G(vrp->acc_cfg->reg.pci_phys_hi),
666 	    vrp->chip.info.name,
667 	    vrp->chip.revision);
668 
669 	/*
670 	 * Assure that the device is prepared for memory space accesses
671 	 * This should be the default as the device advertises memory
672 	 * access in it's BAR's. However, my VT6102 on a EPIA CL board doesn't
673 	 * and thus we explicetely enable it.
674 	 */
675 	VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
676 
677 	/*
678 	 * Setup a handle for regular usage, prefer memory space accesses.
679 	 */
680 	if (vrp->acc_mem != NULL &&
681 	    (vrp->chip.info.bugs & VR_BUG_NO_MEMIO) == 0)
682 		vrp->acc_reg = vrp->acc_mem;
683 	else
684 		vrp->acc_reg = vrp->acc_io;
685 
686 	/*
687 	 * Store the vendor's MAC address.
688 	 */
689 	for (n = 0; n < ETHERADDRL; n++) {
690 		vrp->vendor_ether_addr[n] = VR_GET8(vrp->acc_reg,
691 		    VR_ETHERADDR + n);
692 	}
693 	return (VR_SUCCESS);
694 }
695 
696 static void
vr_bus_unconfig(vr_t * vrp)697 vr_bus_unconfig(vr_t *vrp)
698 {
699 	uint_t	n;
700 
701 	/*
702 	 * Free the register access handles.
703 	 */
704 	for (n = 0; n < vrp->nsets; n++)
705 		ddi_regs_map_free(&vrp->regset[n].hdl);
706 	kmem_free(vrp->regset, vrp->nsets * sizeof (vr_acc_t));
707 }
708 
709 /*
710  * Initialize parameter structures.
711  */
712 static void
vr_param_init(vr_t * vrp)713 vr_param_init(vr_t *vrp)
714 {
715 	/*
716 	 * Initialize default link configuration parameters.
717 	 */
718 	vrp->param.an_en = VR_LINK_AUTONEG_ON;
719 	vrp->param.anadv_en = 1; /* Select 802.3 autonegotiation */
720 	vrp->param.anadv_en |= MII_ABILITY_100BASE_T4;
721 	vrp->param.anadv_en |= MII_ABILITY_100BASE_TX_FD;
722 	vrp->param.anadv_en |= MII_ABILITY_100BASE_TX;
723 	vrp->param.anadv_en |= MII_ABILITY_10BASE_T_FD;
724 	vrp->param.anadv_en |= MII_ABILITY_10BASE_T;
725 	/* Not a PHY ability, but advertised on behalf of MAC */
726 	vrp->param.anadv_en |= MII_ABILITY_PAUSE;
727 	vrp->param.mtu = ETHERMTU;
728 
729 	/*
730 	 * Store the PHY identity.
731 	 */
732 	vr_phy_read(vrp, MII_PHYIDH, &vrp->chip.mii.identh);
733 	vr_phy_read(vrp, MII_PHYIDL, &vrp->chip.mii.identl);
734 
735 	/*
736 	 * Clear incapabilities imposed by PHY in phymask.
737 	 */
738 	vrp->param.an_phymask = vrp->param.anadv_en;
739 	vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
740 	if ((vrp->chip.mii.status & MII_STATUS_10) == 0)
741 		vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T;
742 
743 	if ((vrp->chip.mii.status & MII_STATUS_10_FD) == 0)
744 		vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T_FD;
745 
746 	if ((vrp->chip.mii.status & MII_STATUS_100_BASEX) == 0)
747 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX;
748 
749 	if ((vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) == 0)
750 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX_FD;
751 
752 	if ((vrp->chip.mii.status & MII_STATUS_100_BASE_T4) == 0)
753 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_T4;
754 
755 	/*
756 	 * Clear incapabilities imposed by MAC in macmask
757 	 * Note that flowcontrol (FCS?) is never masked. All of our adapters
758 	 * have the ability to honor incoming pause frames. Only the newer can
759 	 * transmit pause frames. Since there's no asym flowcontrol in 100Mbit
760 	 * Ethernet, we always advertise (symmetric) pause.
761 	 */
762 	vrp->param.an_macmask = vrp->param.anadv_en;
763 
764 	/*
765 	 * Advertised capabilities is enabled minus incapable.
766 	 */
767 	vrp->chip.mii.anadv = vrp->param.anadv_en &
768 	    (vrp->param.an_phymask & vrp->param.an_macmask);
769 
770 	/*
771 	 * Ensure that autoneg of the PHY matches our default.
772 	 */
773 	if (vrp->param.an_en == VR_LINK_AUTONEG_ON)
774 		vrp->chip.mii.control = MII_CONTROL_ANE;
775 	else
776 		vrp->chip.mii.control =
777 		    (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
778 }
779 
780 /*
781  * Setup the descriptor rings.
782  */
783 static vr_result_t
vr_rings_init(vr_t * vrp)784 vr_rings_init(vr_t *vrp)
785 {
786 
787 	vrp->rx.ndesc = VR_RX_N_DESC;
788 	vrp->tx.ndesc = VR_TX_N_DESC;
789 
790 	/*
791 	 * Create a ring for receive.
792 	 */
793 	if (vr_alloc_ring(vrp, &vrp->rxring, vrp->rx.ndesc) != VR_SUCCESS)
794 		return (VR_FAILURE);
795 
796 	/*
797 	 * Create a ring for transmit.
798 	 */
799 	if (vr_alloc_ring(vrp, &vrp->txring, vrp->tx.ndesc) != VR_SUCCESS) {
800 		vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
801 		return (VR_FAILURE);
802 	}
803 
804 	vrp->rx.ring = vrp->rxring.desc;
805 	vrp->tx.ring = vrp->txring.desc;
806 	return (VR_SUCCESS);
807 }
808 
809 static void
vr_rings_fini(vr_t * vrp)810 vr_rings_fini(vr_t *vrp)
811 {
812 	vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
813 	vr_free_ring(&vrp->txring, vrp->tx.ndesc);
814 }
815 
816 /*
817  * Allocate a descriptor ring
818  * The number of descriptor entries must fit in a single page so that the
819  * whole ring fits in one consequtive space.
820  *  i386:  4K page / 16 byte descriptor = 256 entries
821  *  sparc: 8K page / 16 byte descriptor = 512 entries
822  */
823 static vr_result_t
vr_alloc_ring(vr_t * vrp,vr_ring_t * ring,size_t n)824 vr_alloc_ring(vr_t *vrp, vr_ring_t *ring, size_t n)
825 {
826 	ddi_dma_cookie_t	desc_dma_cookie;
827 	uint_t			desc_cookiecnt;
828 	int			i, rc;
829 	size_t			rbytes;
830 
831 	/*
832 	 * Allocate a DMA handle for the chip descriptors.
833 	 */
834 	rc = ddi_dma_alloc_handle(vrp->devinfo,
835 	    &vr_dev_dma_attr,
836 	    DDI_DMA_SLEEP,
837 	    NULL,
838 	    &ring->handle);
839 
840 	if (rc != DDI_SUCCESS) {
841 		vr_log(vrp, CE_WARN,
842 		    "ddi_dma_alloc_handle in vr_alloc_ring failed.");
843 		return (VR_FAILURE);
844 	}
845 
846 	/*
847 	 * Allocate memory for the chip descriptors.
848 	 */
849 	rc = ddi_dma_mem_alloc(ring->handle,
850 	    n * sizeof (vr_chip_desc_t),
851 	    &vr_dev_dma_accattr,
852 	    DDI_DMA_CONSISTENT,
853 	    DDI_DMA_SLEEP,
854 	    NULL,
855 	    (caddr_t *)&ring->cdesc,
856 	    &rbytes,
857 	    &ring->acchdl);
858 
859 	if (rc != DDI_SUCCESS) {
860 		vr_log(vrp, CE_WARN,
861 		    "ddi_dma_mem_alloc in vr_alloc_ring failed.");
862 		ddi_dma_free_handle(&ring->handle);
863 		return (VR_FAILURE);
864 	}
865 
866 	/*
867 	 * Map the descriptor memory.
868 	 */
869 	rc = ddi_dma_addr_bind_handle(ring->handle,
870 	    NULL,
871 	    (caddr_t)ring->cdesc,
872 	    rbytes,
873 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
874 	    DDI_DMA_SLEEP,
875 	    NULL,
876 	    &desc_dma_cookie,
877 	    &desc_cookiecnt);
878 
879 	if (rc != DDI_DMA_MAPPED || desc_cookiecnt > 1) {
880 		vr_log(vrp, CE_WARN,
881 		    "ddi_dma_addr_bind_handle in vr_alloc_ring failed: "
882 		    "rc = %d, cookiecnt = %d", rc, desc_cookiecnt);
883 		ddi_dma_mem_free(&ring->acchdl);
884 		ddi_dma_free_handle(&ring->handle);
885 		return (VR_FAILURE);
886 	}
887 	ring->cdesc_paddr = desc_dma_cookie.dmac_address;
888 
889 	/*
890 	 * Allocate memory for the host descriptor ring.
891 	 */
892 	ring->desc =
893 	    (vr_desc_t *)kmem_zalloc(n * sizeof (vr_desc_t), KM_SLEEP);
894 
895 	/*
896 	 * Interlink the descriptors and connect host- to chip descriptors.
897 	 */
898 	for (i = 0; i < n; i++) {
899 		/*
900 		 * Connect the host descriptor to a chip descriptor.
901 		 */
902 		ring->desc[i].cdesc = &ring->cdesc[i];
903 
904 		/*
905 		 * Store the DMA address and offset in the descriptor
906 		 * Offset is for ddi_dma_sync() and paddr is for ddi_get/-put().
907 		 */
908 		ring->desc[i].offset = i * sizeof (vr_chip_desc_t);
909 		ring->desc[i].paddr = ring->cdesc_paddr + ring->desc[i].offset;
910 
911 		/*
912 		 * Link the previous descriptor to this one.
913 		 */
914 		if (i > 0) {
915 			/* Host */
916 			ring->desc[i-1].next = &ring->desc[i];
917 
918 			/* Chip */
919 			ddi_put32(ring->acchdl,
920 			    &ring->cdesc[i-1].next,
921 			    ring->desc[i].paddr);
922 		}
923 	}
924 
925 	/*
926 	 * Make rings out of this list by pointing last to first.
927 	 */
928 	i = n - 1;
929 	ring->desc[i].next = &ring->desc[0];
930 	ddi_put32(ring->acchdl, &ring->cdesc[i].next, ring->desc[0].paddr);
931 	return (VR_SUCCESS);
932 }
933 
934 /*
935  * Free the memory allocated for a ring.
936  */
937 static void
vr_free_ring(vr_ring_t * r,size_t n)938 vr_free_ring(vr_ring_t *r, size_t n)
939 {
940 	/*
941 	 * Unmap and free the chip descriptors.
942 	 */
943 	(void) ddi_dma_unbind_handle(r->handle);
944 	ddi_dma_mem_free(&r->acchdl);
945 	ddi_dma_free_handle(&r->handle);
946 
947 	/*
948 	 * Free the memory for storing host descriptors
949 	 */
950 	kmem_free(r->desc, n * sizeof (vr_desc_t));
951 }
952 
953 /*
954  * Initialize the receive ring.
955  */
956 static vr_result_t
vr_rxring_init(vr_t * vrp)957 vr_rxring_init(vr_t *vrp)
958 {
959 	int		i, rc;
960 	vr_desc_t	*rp;
961 
962 	/*
963 	 * Set the read pointer at the start of the ring.
964 	 */
965 	vrp->rx.rp = &vrp->rx.ring[0];
966 
967 	/*
968 	 * Assign a DMA buffer to each receive descriptor.
969 	 */
970 	for (i = 0; i < vrp->rx.ndesc; i++) {
971 		rp = &vrp->rx.ring[i];
972 		rc = vr_alloc_dmabuf(vrp,
973 		    &vrp->rx.ring[i].dmabuf,
974 		    DDI_DMA_STREAMING | DDI_DMA_READ);
975 
976 		if (rc != VR_SUCCESS) {
977 			while (--i >= 0)
978 				vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
979 			return (VR_FAILURE);
980 		}
981 
982 		/*
983 		 * Store the address of the dma buffer in the chip descriptor
984 		 */
985 		ddi_put32(vrp->rxring.acchdl,
986 		    &rp->cdesc->data,
987 		    rp->dmabuf.paddr);
988 
989 		/*
990 		 * Put the buffer length in the chip descriptor. Ensure that
991 		 * length fits in the 11 bits of stat1 (2047/0x7FF)
992 		 */
993 		ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat1,
994 		    MIN(VR_MAX_PKTSZ, rp->dmabuf.bufsz));
995 
996 		/*
997 		 * Set descriptor ownership to the card
998 		 */
999 		ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat0, VR_RDES0_OWN);
1000 
1001 		/*
1002 		 * Sync the descriptor with main memory
1003 		 */
1004 		(void) ddi_dma_sync(vrp->rxring.handle, rp->offset,
1005 		    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1006 	}
1007 	return (VR_SUCCESS);
1008 }
1009 
1010 /*
1011  * Free the DMA buffers assigned to the receive ring.
1012  */
1013 static void
vr_rxring_fini(vr_t * vrp)1014 vr_rxring_fini(vr_t *vrp)
1015 {
1016 	int		i;
1017 
1018 	for (i = 0; i < vrp->rx.ndesc; i++)
1019 		vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
1020 }
1021 
1022 static vr_result_t
vr_txring_init(vr_t * vrp)1023 vr_txring_init(vr_t *vrp)
1024 {
1025 	vr_desc_t		*wp;
1026 	int			i, rc;
1027 
1028 	/*
1029 	 * Set the write- and claim pointer.
1030 	 */
1031 	vrp->tx.wp = &vrp->tx.ring[0];
1032 	vrp->tx.cp = &vrp->tx.ring[0];
1033 
1034 	/*
1035 	 * (Re)set the TX bookkeeping.
1036 	 */
1037 	vrp->tx.stallticks = 0;
1038 	vrp->tx.resched = 0;
1039 
1040 	/*
1041 	 * Every transmit decreases nfree. Every reclaim increases nfree.
1042 	 */
1043 	vrp->tx.nfree = vrp->tx.ndesc;
1044 
1045 	/*
1046 	 * Attach a DMA buffer to each transmit descriptor.
1047 	 */
1048 	for (i = 0; i < vrp->tx.ndesc; i++) {
1049 		rc = vr_alloc_dmabuf(vrp,
1050 		    &vrp->tx.ring[i].dmabuf,
1051 		    DDI_DMA_STREAMING | DDI_DMA_WRITE);
1052 
1053 		if (rc != VR_SUCCESS) {
1054 			while (--i >= 0)
1055 				vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1056 			return (VR_FAILURE);
1057 		}
1058 	}
1059 
1060 	/*
1061 	 * Init & sync the TX descriptors so the device sees a valid ring.
1062 	 */
1063 	for (i = 0; i < vrp->tx.ndesc; i++) {
1064 		wp = &vrp->tx.ring[i];
1065 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, 0);
1066 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1, 0);
1067 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->data,
1068 		    wp->dmabuf.paddr);
1069 		(void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1070 		    sizeof (vr_chip_desc_t),
1071 		    DDI_DMA_SYNC_FORDEV);
1072 	}
1073 	return (VR_SUCCESS);
1074 }
1075 
1076 /*
1077  * Free the DMA buffers attached to the TX ring.
1078  */
1079 static void
vr_txring_fini(vr_t * vrp)1080 vr_txring_fini(vr_t *vrp)
1081 {
1082 	int		i;
1083 
1084 	/*
1085 	 * Free the DMA buffers attached to the TX ring
1086 	 */
1087 	for (i = 0; i < vrp->tx.ndesc; i++)
1088 		vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1089 }
1090 
1091 /*
1092  * Allocate a DMA buffer.
1093  */
1094 static vr_result_t
vr_alloc_dmabuf(vr_t * vrp,vr_data_dma_t * dmap,uint_t dmaflags)1095 vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap, uint_t dmaflags)
1096 {
1097 	ddi_dma_cookie_t	dma_cookie;
1098 	uint_t			cookiecnt;
1099 	int			rc;
1100 
1101 	/*
1102 	 * Allocate a DMA handle for the buffer
1103 	 */
1104 	rc = ddi_dma_alloc_handle(vrp->devinfo,
1105 	    &vr_data_dma_attr,
1106 	    DDI_DMA_DONTWAIT, NULL,
1107 	    &dmap->handle);
1108 
1109 	if (rc != DDI_SUCCESS) {
1110 		vr_log(vrp, CE_WARN,
1111 		    "ddi_dma_alloc_handle failed in vr_alloc_dmabuf");
1112 		return (VR_FAILURE);
1113 	}
1114 
1115 	/*
1116 	 * Allocate the buffer
1117 	 * The allocated buffer is aligned on 2K boundary. This ensures that
1118 	 * a 1500 byte frame never cross a page boundary and thus that the DMA
1119 	 * mapping can be established in 1 fragment.
1120 	 */
1121 	rc = ddi_dma_mem_alloc(dmap->handle,
1122 	    VR_DMABUFSZ,
1123 	    &vr_data_dma_accattr,
1124 	    DDI_DMA_RDWR | DDI_DMA_STREAMING,
1125 	    DDI_DMA_DONTWAIT, NULL,
1126 	    &dmap->buf,
1127 	    &dmap->bufsz,
1128 	    &dmap->acchdl);
1129 
1130 	if (rc != DDI_SUCCESS) {
1131 		vr_log(vrp, CE_WARN,
1132 		    "ddi_dma_mem_alloc failed in vr_alloc_dmabuf");
1133 		ddi_dma_free_handle(&dmap->handle);
1134 		return (VR_FAILURE);
1135 	}
1136 
1137 	/*
1138 	 * Map the memory
1139 	 */
1140 	rc = ddi_dma_addr_bind_handle(dmap->handle,
1141 	    NULL,
1142 	    (caddr_t)dmap->buf,
1143 	    dmap->bufsz,
1144 	    dmaflags,
1145 	    DDI_DMA_DONTWAIT,
1146 	    NULL,
1147 	    &dma_cookie,
1148 	    &cookiecnt);
1149 
1150 	/*
1151 	 * The cookiecount should never > 1 because we requested 2K alignment
1152 	 */
1153 	if (rc != DDI_DMA_MAPPED || cookiecnt > 1) {
1154 		vr_log(vrp, CE_WARN,
1155 		    "dma_addr_bind_handle failed in vr_alloc_dmabuf: "
1156 		    "rc = %d, cookiecnt = %d", rc, cookiecnt);
1157 		ddi_dma_mem_free(&dmap->acchdl);
1158 		ddi_dma_free_handle(&dmap->handle);
1159 		return (VR_FAILURE);
1160 	}
1161 	dmap->paddr = dma_cookie.dmac_address;
1162 	return (VR_SUCCESS);
1163 }
1164 
1165 /*
1166  * Destroy a DMA buffer.
1167  */
1168 static void
vr_free_dmabuf(vr_data_dma_t * dmap)1169 vr_free_dmabuf(vr_data_dma_t *dmap)
1170 {
1171 	(void) ddi_dma_unbind_handle(dmap->handle);
1172 	ddi_dma_mem_free(&dmap->acchdl);
1173 	ddi_dma_free_handle(&dmap->handle);
1174 }
1175 
1176 /*
1177  * Interrupt service routine
1178  * When our vector is shared with another device, av_dispatch_autovect calls
1179  * all service routines for the vector until *none* of them return claimed
1180  * That means that, when sharing vectors, this routine is called at least
1181  * twice for each interrupt.
1182  */
1183 uint_t
vr_intr(caddr_t arg1,caddr_t arg2)1184 vr_intr(caddr_t arg1, caddr_t arg2)
1185 {
1186 	vr_t		*vrp;
1187 	uint16_t	status;
1188 	mblk_t		*lp = NULL;
1189 	uint32_t	tx_resched;
1190 	uint32_t	link_change;
1191 
1192 	tx_resched = 0;
1193 	link_change = 0;
1194 	vrp = (void *)arg1;
1195 	_NOTE(ARGUNUSED(arg2))
1196 
1197 	mutex_enter(&vrp->intrlock);
1198 	/*
1199 	 * If the driver is not in running state it is not our interrupt.
1200 	 * Shared interrupts can end up here without us being started.
1201 	 */
1202 	if (vrp->chip.state != CHIPSTATE_RUNNING) {
1203 		mutex_exit(&vrp->intrlock);
1204 		return (DDI_INTR_UNCLAIMED);
1205 	}
1206 
1207 	/*
1208 	 * Read the status register to see if the interrupt is from our device
1209 	 * This read also ensures that posted writes are brought to main memory.
1210 	 */
1211 	status = VR_GET16(vrp->acc_reg, VR_ISR0) & VR_ICR0_CFG;
1212 	if (status == 0) {
1213 		/*
1214 		 * Status contains no configured interrupts
1215 		 * The interrupt was not generated by our device.
1216 		 */
1217 		vrp->stats.intr_unclaimed++;
1218 		mutex_exit(&vrp->intrlock);
1219 		return (DDI_INTR_UNCLAIMED);
1220 	}
1221 	vrp->stats.intr_claimed++;
1222 
1223 	/*
1224 	 * Acknowledge the event(s) that caused interruption.
1225 	 */
1226 	VR_PUT16(vrp->acc_reg, VR_ISR0, status);
1227 
1228 	/*
1229 	 * Receive completion.
1230 	 */
1231 	if ((status & (VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS)) != 0) {
1232 		/*
1233 		 * Received some packets.
1234 		 */
1235 		lp = vr_receive(vrp);
1236 
1237 		/*
1238 		 * DMA stops after a conflict in the FIFO.
1239 		 */
1240 		if ((status & VR_ISR_RX_ERR_BITS) != 0)
1241 			VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1242 		status &= ~(VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS);
1243 	}
1244 
1245 	/*
1246 	 * Transmit completion.
1247 	 */
1248 	if ((status & (VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS)) != 0) {
1249 		/*
1250 		 * Card done with transmitting some packets
1251 		 * TX_DONE is generated 3 times per ring but it appears
1252 		 * more often because it is also set when an RX_DONE
1253 		 * interrupt is generated.
1254 		 */
1255 		mutex_enter(&vrp->tx.lock);
1256 		vr_tx_reclaim(vrp);
1257 		tx_resched = vrp->tx.resched;
1258 		vrp->tx.resched = 0;
1259 		mutex_exit(&vrp->tx.lock);
1260 		status &= ~(VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS);
1261 	}
1262 
1263 	/*
1264 	 * Link status change.
1265 	 */
1266 	if ((status & VR_ICR0_LINKSTATUS) != 0) {
1267 		/*
1268 		 * Get new link state and inform the mac layer.
1269 		 */
1270 		mutex_enter(&vrp->oplock);
1271 		mutex_enter(&vrp->tx.lock);
1272 		vr_link_state(vrp);
1273 		mutex_exit(&vrp->tx.lock);
1274 		mutex_exit(&vrp->oplock);
1275 		status &= ~VR_ICR0_LINKSTATUS;
1276 		vrp->stats.linkchanges++;
1277 		link_change = 1;
1278 	}
1279 
1280 	/*
1281 	 * Bus error.
1282 	 */
1283 	if ((status & VR_ISR0_BUSERR) != 0) {
1284 		vr_log(vrp, CE_WARN, "bus error occured");
1285 		vrp->reset = 1;
1286 		status &= ~VR_ISR0_BUSERR;
1287 	}
1288 
1289 	/*
1290 	 * We must have handled all things here.
1291 	 */
1292 	ASSERT(status == 0);
1293 	mutex_exit(&vrp->intrlock);
1294 
1295 	/*
1296 	 * Reset the device if requested
1297 	 * The request can come from the periodic tx check or from the interrupt
1298 	 * status.
1299 	 */
1300 	if (vrp->reset != 0) {
1301 		vr_error(vrp);
1302 		vrp->reset = 0;
1303 	}
1304 
1305 	/*
1306 	 * Pass up the list with received packets.
1307 	 */
1308 	if (lp != NULL)
1309 		mac_rx(vrp->machdl, 0, lp);
1310 
1311 	/*
1312 	 * Inform the upper layer on the linkstatus if there was a change.
1313 	 */
1314 	if (link_change != 0)
1315 		mac_link_update(vrp->machdl,
1316 		    (link_state_t)vrp->chip.link.state);
1317 	/*
1318 	 * Restart transmissions if we were waiting for tx descriptors.
1319 	 */
1320 	if (tx_resched == 1)
1321 		mac_tx_update(vrp->machdl);
1322 
1323 	/*
1324 	 * Read something from the card to ensure that all of our configuration
1325 	 * writes are delivered to the device before the interrupt is ended.
1326 	 */
1327 	(void) VR_GET8(vrp->acc_reg, VR_ETHERADDR);
1328 	return (DDI_INTR_CLAIMED);
1329 }
1330 
1331 /*
1332  * Respond to an unforseen situation by resetting the card and our bookkeeping.
1333  */
1334 static void
vr_error(vr_t * vrp)1335 vr_error(vr_t *vrp)
1336 {
1337 	vr_log(vrp, CE_WARN, "resetting MAC.");
1338 	mutex_enter(&vrp->intrlock);
1339 	mutex_enter(&vrp->oplock);
1340 	mutex_enter(&vrp->tx.lock);
1341 	(void) vr_stop(vrp);
1342 	vr_reset(vrp);
1343 	(void) vr_start(vrp);
1344 	mutex_exit(&vrp->tx.lock);
1345 	mutex_exit(&vrp->oplock);
1346 	mutex_exit(&vrp->intrlock);
1347 	vrp->stats.resets++;
1348 }
1349 
1350 /*
1351  * Collect received packets in a list.
1352  */
1353 static mblk_t *
vr_receive(vr_t * vrp)1354 vr_receive(vr_t *vrp)
1355 {
1356 	mblk_t			*lp, *mp, *np;
1357 	vr_desc_t		*rxp;
1358 	vr_data_dma_t		*dmap;
1359 	uint32_t		pklen;
1360 	uint32_t		rxstat0;
1361 	uint32_t		n;
1362 
1363 	lp = NULL;
1364 	n = 0;
1365 	for (rxp = vrp->rx.rp; ; rxp = rxp->next, n++) {
1366 		/*
1367 		 * Sync the descriptor before looking at it.
1368 		 */
1369 		(void) ddi_dma_sync(vrp->rxring.handle, rxp->offset,
1370 		    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORKERNEL);
1371 
1372 		/*
1373 		 * Get the status from the descriptor.
1374 		 */
1375 		rxstat0 = ddi_get32(vrp->rxring.acchdl, &rxp->cdesc->stat0);
1376 
1377 		/*
1378 		 * We're done if the descriptor is owned by the card.
1379 		 */
1380 		if ((rxstat0 & VR_RDES0_OWN) != 0)
1381 			break;
1382 		else if ((rxstat0 & VR_RDES0_RXOK) != 0) {
1383 			/*
1384 			 * Received a good packet
1385 			 */
1386 			dmap = &rxp->dmabuf;
1387 			pklen = (rxstat0 >> 16) - ETHERFCSL;
1388 
1389 			/*
1390 			 * Sync the data.
1391 			 */
1392 			(void) ddi_dma_sync(dmap->handle, 0,
1393 			    pklen, DDI_DMA_SYNC_FORKERNEL);
1394 
1395 			/*
1396 			 * Send a new copied message upstream.
1397 			 */
1398 			np = allocb(pklen, 0);
1399 			if (np != NULL) {
1400 				bcopy(dmap->buf, np->b_rptr, pklen);
1401 				np->b_wptr = np->b_rptr + pklen;
1402 
1403 				vrp->stats.mac_stat_ipackets++;
1404 				vrp->stats.mac_stat_rbytes += pklen;
1405 
1406 				if ((rxstat0 & VR_RDES0_BAR) != 0)
1407 					vrp->stats.mac_stat_brdcstrcv++;
1408 				else if ((rxstat0 & VR_RDES0_MAR) != 0)
1409 					vrp->stats.mac_stat_multircv++;
1410 
1411 				/*
1412 				 * Link this packet in the list.
1413 				 */
1414 				np->b_next = NULL;
1415 				if (lp == NULL)
1416 					lp = mp = np;
1417 				else {
1418 					mp->b_next = np;
1419 					mp = np;
1420 				}
1421 			} else {
1422 				vrp->stats.allocbfail++;
1423 				vrp->stats.mac_stat_norcvbuf++;
1424 			}
1425 
1426 		} else {
1427 			/*
1428 			 * Received with errors.
1429 			 */
1430 			vrp->stats.mac_stat_ierrors++;
1431 			if ((rxstat0 & VR_RDES0_FAE) != 0)
1432 				vrp->stats.ether_stat_align_errors++;
1433 			if ((rxstat0 & VR_RDES0_CRCERR) != 0)
1434 				vrp->stats.ether_stat_fcs_errors++;
1435 			if ((rxstat0 & VR_RDES0_LONG) != 0)
1436 				vrp->stats.ether_stat_toolong_errors++;
1437 			if ((rxstat0 & VR_RDES0_RUNT) != 0)
1438 				vrp->stats.ether_stat_tooshort_errors++;
1439 			if ((rxstat0 & VR_RDES0_FOV) != 0)
1440 				vrp->stats.mac_stat_overflows++;
1441 		}
1442 
1443 		/*
1444 		 * Reset descriptor ownership to the MAC.
1445 		 */
1446 		ddi_put32(vrp->rxring.acchdl,
1447 		    &rxp->cdesc->stat0,
1448 		    VR_RDES0_OWN);
1449 		(void) ddi_dma_sync(vrp->rxring.handle,
1450 		    rxp->offset,
1451 		    sizeof (vr_chip_desc_t),
1452 		    DDI_DMA_SYNC_FORDEV);
1453 	}
1454 	vrp->rx.rp = rxp;
1455 
1456 	/*
1457 	 * If we do flowcontrol and if the card can transmit pause frames,
1458 	 * increment the "available receive descriptors" register.
1459 	 */
1460 	if (n > 0 && vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
1461 		/*
1462 		 * Whenever the card moves a fragment to host memory it
1463 		 * decrements the RXBUFCOUNT register. If the value in the
1464 		 * register reaches a low watermark, the card transmits a pause
1465 		 * frame. If the value in this register reaches a high
1466 		 * watermark, the card sends a "cancel pause" frame
1467 		 *
1468 		 * Non-zero values written to this byte register are added
1469 		 * by the chip to the register's contents, so we must write
1470 		 * the number of descriptors free'd.
1471 		 */
1472 		VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT, MIN(n, 0xFF));
1473 	}
1474 	return (lp);
1475 }
1476 
1477 /*
1478  * Enqueue a list of packets for transmission
1479  * Return the packets not transmitted.
1480  */
1481 mblk_t *
vr_mac_tx_enqueue_list(void * p,mblk_t * mp)1482 vr_mac_tx_enqueue_list(void *p, mblk_t *mp)
1483 {
1484 	vr_t		*vrp;
1485 	mblk_t		*nextp;
1486 
1487 	vrp = (vr_t *)p;
1488 	mutex_enter(&vrp->tx.lock);
1489 	do {
1490 		if (vrp->tx.nfree == 0) {
1491 			vrp->stats.ether_stat_defer_xmts++;
1492 			vrp->tx.resched = 1;
1493 			break;
1494 		}
1495 		nextp = mp->b_next;
1496 		mp->b_next = mp->b_prev = NULL;
1497 		vr_tx_enqueue_msg(vrp, mp);
1498 		mp = nextp;
1499 		vrp->tx.nfree--;
1500 	} while (mp != NULL);
1501 	mutex_exit(&vrp->tx.lock);
1502 
1503 	/*
1504 	 * Tell the chip to poll the TX ring.
1505 	 */
1506 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1507 	return (mp);
1508 }
1509 
1510 /*
1511  * Enqueue a message for transmission.
1512  */
1513 static void
vr_tx_enqueue_msg(vr_t * vrp,mblk_t * mp)1514 vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp)
1515 {
1516 	vr_desc_t		*wp;
1517 	vr_data_dma_t		*dmap;
1518 	uint32_t		pklen;
1519 	uint32_t		nextp;
1520 	int			padlen;
1521 
1522 	if ((uchar_t)mp->b_rptr[0] == 0xff &&
1523 	    (uchar_t)mp->b_rptr[1] == 0xff &&
1524 	    (uchar_t)mp->b_rptr[2] == 0xff &&
1525 	    (uchar_t)mp->b_rptr[3] == 0xff &&
1526 	    (uchar_t)mp->b_rptr[4] == 0xff &&
1527 	    (uchar_t)mp->b_rptr[5] == 0xff)
1528 		vrp->stats.mac_stat_brdcstxmt++;
1529 	else if ((uchar_t)mp->b_rptr[0] == 1)
1530 		vrp->stats.mac_stat_multixmt++;
1531 
1532 	pklen = msgsize(mp);
1533 	wp = vrp->tx.wp;
1534 	dmap = &wp->dmabuf;
1535 
1536 	/*
1537 	 * Copy the message into the pre-mapped buffer and free mp
1538 	 */
1539 	mcopymsg(mp, dmap->buf);
1540 
1541 	/*
1542 	 * Clean padlen bytes of short packet.
1543 	 */
1544 	padlen = ETHERMIN - pklen;
1545 	if (padlen > 0) {
1546 		bzero(dmap->buf + pklen, padlen);
1547 		pklen += padlen;
1548 	}
1549 
1550 	/*
1551 	 * Most of the statistics are updated on reclaim, after the actual
1552 	 * transmit. obytes is maintained here because the length is cleared
1553 	 * after transmission
1554 	 */
1555 	vrp->stats.mac_stat_obytes += pklen;
1556 
1557 	/*
1558 	 * Sync the data so the device sees the new content too.
1559 	 */
1560 	(void) ddi_dma_sync(dmap->handle, 0, pklen, DDI_DMA_SYNC_FORDEV);
1561 
1562 	/*
1563 	 * If we have reached the TX interrupt distance, enable a TX interrupt
1564 	 * for this packet. The Interrupt Control (IC) bit in the transmit
1565 	 * descriptor doesn't have any effect on the interrupt generation
1566 	 * despite the vague statements in the datasheet. Thus, we use the
1567 	 * more obscure interrupt suppress bit which is probably part of the
1568 	 * MAC's bookkeeping for TX interrupts and fragmented packets.
1569 	 */
1570 	vrp->tx.intr_distance++;
1571 	nextp = ddi_get32(vrp->txring.acchdl, &wp->cdesc->next);
1572 	if (vrp->tx.intr_distance >= VR_TX_MAX_INTR_DISTANCE) {
1573 		/*
1574 		 * Don't suppress the interrupt for this packet.
1575 		 */
1576 		vrp->tx.intr_distance = 0;
1577 		nextp &= (~VR_TDES3_SUPPRESS_INTR);
1578 	} else {
1579 		/*
1580 		 * Suppress the interrupt for this packet.
1581 		 */
1582 		nextp |= VR_TDES3_SUPPRESS_INTR;
1583 	}
1584 
1585 	/*
1586 	 * Write and sync the chip's descriptor
1587 	 */
1588 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1,
1589 	    pklen | (VR_TDES1_STP | VR_TDES1_EDP | VR_TDES1_CHN));
1590 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->next, nextp);
1591 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, VR_TDES0_OWN);
1592 	(void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1593 	    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1594 
1595 	/*
1596 	 * The ticks counter is cleared by reclaim when it reclaimed some
1597 	 * descriptors and incremented by the periodic TX stall check.
1598 	 */
1599 	vrp->tx.stallticks = 1;
1600 	vrp->tx.wp = wp->next;
1601 }
1602 
1603 /*
1604  * Free transmitted descriptors.
1605  */
1606 static void
vr_tx_reclaim(vr_t * vrp)1607 vr_tx_reclaim(vr_t *vrp)
1608 {
1609 	vr_desc_t		*cp;
1610 	uint32_t		stat0, stat1, freed, dirty;
1611 
1612 	ASSERT(mutex_owned(&vrp->tx.lock));
1613 
1614 	freed = 0;
1615 	dirty = vrp->tx.ndesc - vrp->tx.nfree;
1616 	for (cp = vrp->tx.cp; dirty > 0; cp = cp->next) {
1617 		/*
1618 		 * Sync & get descriptor status.
1619 		 */
1620 		(void) ddi_dma_sync(vrp->txring.handle, cp->offset,
1621 		    sizeof (vr_chip_desc_t),
1622 		    DDI_DMA_SYNC_FORKERNEL);
1623 		stat0 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat0);
1624 
1625 		if ((stat0 & VR_TDES0_OWN) != 0)
1626 			break;
1627 
1628 		/*
1629 		 * Do stats for the first descriptor in a chain.
1630 		 */
1631 		stat1 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat1);
1632 		if ((stat1 & VR_TDES1_STP) != 0) {
1633 			if ((stat0 & VR_TDES0_TERR) != 0) {
1634 				vrp->stats.ether_stat_macxmt_errors++;
1635 				if ((stat0 & VR_TDES0_UDF) != 0)
1636 					vrp->stats.mac_stat_underflows++;
1637 				if ((stat0 & VR_TDES0_ABT) != 0)
1638 					vrp-> stats.ether_stat_ex_collisions++;
1639 				/*
1640 				 * Abort and FIFO underflow stop the MAC.
1641 				 * Packet queueing must be disabled with HD
1642 				 * links because otherwise the MAC is also lost
1643 				 * after a few of these events.
1644 				 */
1645 				VR_PUT8(vrp->acc_reg, VR_CTRL0,
1646 				    VR_CTRL0_DMA_GO);
1647 			} else
1648 				vrp->stats.mac_stat_opackets++;
1649 
1650 			if ((stat0 & VR_TDES0_COL) != 0) {
1651 				if ((stat0 & VR_TDES0_NCR) == 1) {
1652 					vrp->stats.
1653 					    ether_stat_first_collisions++;
1654 				} else {
1655 					vrp->stats.
1656 					    ether_stat_multi_collisions++;
1657 				}
1658 				vrp->stats.mac_stat_collisions +=
1659 				    (stat0 & VR_TDES0_NCR);
1660 			}
1661 
1662 			if ((stat0 & VR_TDES0_CRS) != 0)
1663 				vrp->stats.ether_stat_carrier_errors++;
1664 
1665 			if ((stat0 & VR_TDES0_OWC) != 0)
1666 				vrp->stats.ether_stat_tx_late_collisions++;
1667 		}
1668 		freed += 1;
1669 		dirty -= 1;
1670 	}
1671 	vrp->tx.cp = cp;
1672 
1673 	if (freed > 0) {
1674 		vrp->tx.nfree += freed;
1675 		vrp->tx.stallticks = 0;
1676 		vrp->stats.txreclaims += 1;
1677 	} else
1678 		vrp->stats.txreclaim0 += 1;
1679 }
1680 
1681 /*
1682  * Check TX health every 2 seconds.
1683  */
1684 static void
vr_periodic(void * p)1685 vr_periodic(void *p)
1686 {
1687 	vr_t		*vrp;
1688 
1689 	vrp = (vr_t *)p;
1690 	if (vrp->chip.state == CHIPSTATE_RUNNING &&
1691 	    vrp->chip.link.state == VR_LINK_STATE_UP && vrp->reset == 0) {
1692 		if (mutex_tryenter(&vrp->intrlock) != 0) {
1693 			mutex_enter(&vrp->tx.lock);
1694 			if (vrp->tx.resched == 1) {
1695 				if (vrp->tx.stallticks >= VR_MAXTXCHECKS) {
1696 					/*
1697 					 * No succesful reclaim in the last n
1698 					 * intervals. Reset the MAC.
1699 					 */
1700 					vrp->reset = 1;
1701 					vr_log(vrp, CE_WARN,
1702 					    "TX stalled, resetting MAC");
1703 				vrp->stats.txstalls++;
1704 				} else {
1705 					/*
1706 					 * Increase until we find that we've
1707 					 * waited long enough.
1708 					 */
1709 					vrp->tx.stallticks += 1;
1710 				}
1711 			}
1712 			mutex_exit(&vrp->tx.lock);
1713 			mutex_exit(&vrp->intrlock);
1714 			vrp->stats.txchecks++;
1715 		}
1716 	}
1717 	vrp->stats.cyclics++;
1718 }
1719 
1720 /*
1721  * Bring the device to our desired initial state.
1722  */
1723 static void
vr_reset(vr_t * vrp)1724 vr_reset(vr_t *vrp)
1725 {
1726 	uint32_t	time;
1727 
1728 	/*
1729 	 * Reset the MAC
1730 	 * If we don't wait long enough for the forced reset to complete,
1731 	 * MAC looses sync with PHY. Result link up, no link change interrupt
1732 	 * and no data transfer.
1733 	 */
1734 	time = 0;
1735 	VR_PUT8(vrp->acc_io, VR_CTRL1, VR_CTRL1_RESET);
1736 	do {
1737 		drv_usecwait(100);
1738 		time += 100;
1739 		if (time >= 100000) {
1740 			VR_PUT8(vrp->acc_io, VR_MISC1, VR_MISC1_RESET);
1741 			delay(drv_usectohz(200000));
1742 		}
1743 	} while ((VR_GET8(vrp->acc_io, VR_CTRL1) & VR_CTRL1_RESET) != 0);
1744 	delay(drv_usectohz(10000));
1745 
1746 	/*
1747 	 * Load the PROM contents into the MAC again.
1748 	 */
1749 	VR_SETBIT8(vrp->acc_io, VR_PROMCTL, VR_PROMCTL_RELOAD);
1750 	delay(drv_usectohz(100000));
1751 
1752 	/*
1753 	 * Tell the MAC via IO space that we like to use memory space for
1754 	 * accessing registers.
1755 	 */
1756 	VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
1757 }
1758 
1759 /*
1760  * Prepare and enable the card (MAC + PHY + PCI).
1761  */
1762 static int
vr_start(vr_t * vrp)1763 vr_start(vr_t *vrp)
1764 {
1765 	uint8_t		pci_latency, pci_mode;
1766 
1767 	ASSERT(mutex_owned(&vrp->oplock));
1768 
1769 	/*
1770 	 * Allocate DMA buffers for RX.
1771 	 */
1772 	if (vr_rxring_init(vrp) != VR_SUCCESS) {
1773 		vr_log(vrp, CE_NOTE, "vr_rxring_init() failed");
1774 		return (ENOMEM);
1775 	}
1776 
1777 	/*
1778 	 * Allocate DMA buffers for TX.
1779 	 */
1780 	if (vr_txring_init(vrp) != VR_SUCCESS) {
1781 		vr_log(vrp, CE_NOTE, "vr_txring_init() failed");
1782 		vr_rxring_fini(vrp);
1783 		return (ENOMEM);
1784 	}
1785 
1786 	/*
1787 	 * Changes of the chip specific registers as done in VIA's fet driver
1788 	 * These bits are not in the datasheet and controlled by vr_chip_info.
1789 	 */
1790 	pci_mode = VR_GET8(vrp->acc_reg, VR_MODE2);
1791 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE10T) != 0)
1792 		pci_mode |= VR_MODE2_MODE10T;
1793 
1794 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE2PCEROPT) != 0)
1795 		pci_mode |= VR_MODE2_PCEROPT;
1796 
1797 	if ((vrp->chip.info.features & VR_FEATURE_MRDLNMULTIPLE) != 0)
1798 		pci_mode |= VR_MODE2_MRDPL;
1799 	VR_PUT8(vrp->acc_reg, VR_MODE2, pci_mode);
1800 
1801 	pci_mode = VR_GET8(vrp->acc_reg, VR_MODE3);
1802 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMIION) != 0)
1803 		pci_mode |= VR_MODE3_MIION;
1804 	VR_PUT8(vrp->acc_reg, VR_MODE3, pci_mode);
1805 
1806 	/*
1807 	 * RX: Accept broadcast packets.
1808 	 */
1809 	VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTBROAD);
1810 
1811 	/*
1812 	 * RX: Start DMA when there are 256 bytes in the FIFO.
1813 	 */
1814 	VR_SETBITS8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_FIFO_THRESHOLD_BITS,
1815 	    VR_RXCFG_FIFO_THRESHOLD_256);
1816 	VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_RX_FIFO_THRESHOLD_BITS,
1817 	    VR_BCR0_RX_FIFO_THRESHOLD_256);
1818 
1819 	/*
1820 	 * TX: Start transmit when there are 256 bytes in the FIFO.
1821 	 */
1822 	VR_SETBITS8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_FIFO_THRESHOLD_BITS,
1823 	    VR_TXCFG_FIFO_THRESHOLD_256);
1824 	VR_SETBITS8(vrp->acc_reg, VR_BCR1, VR_BCR1_TX_FIFO_THRESHOLD_BITS,
1825 	    VR_BCR1_TX_FIFO_THRESHOLD_256);
1826 
1827 	/*
1828 	 * Burst transfers up to 256 bytes.
1829 	 */
1830 	VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_DMABITS, VR_BCR0_DMA256);
1831 
1832 	/*
1833 	 * Disable TX autopolling as it is bad for RX performance
1834 	 * I assume this is because the RX process finds the bus often occupied
1835 	 * by the polling process.
1836 	 */
1837 	VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_NOAUTOPOLL);
1838 
1839 	/*
1840 	 * Honor the PCI latency timer if it is reasonable.
1841 	 */
1842 	pci_latency = VR_GET8(vrp->acc_cfg, PCI_CONF_LATENCY_TIMER);
1843 	if (pci_latency != 0 && pci_latency != 0xFF)
1844 		VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1845 	else
1846 		VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1847 
1848 	/*
1849 	 * Ensure that VLAN filtering is off, because this strips the tag.
1850 	 */
1851 	if ((vrp->chip.info.features & VR_FEATURE_VLANTAGGING) != 0) {
1852 		VR_CLRBIT8(vrp->acc_reg, VR_BCR1, VR_BCR1_VLANFILTER);
1853 		VR_CLRBIT8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_8021PQ_EN);
1854 	}
1855 
1856 	/*
1857 	 * Clear the CAM filter.
1858 	 */
1859 	if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
1860 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
1861 		VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 0);
1862 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1863 
1864 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
1865 		    VR_CAM_CTRL_ENABLE|VR_CAM_CTRL_SELECT_VLAN);
1866 		VR_PUT8(vrp->acc_reg, VR_VCAM0, 0);
1867 		VR_PUT8(vrp->acc_reg, VR_VCAM1, 0);
1868 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_WRITE);
1869 		VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 1);
1870 		drv_usecwait(2);
1871 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1872 	}
1873 
1874 	/*
1875 	 * Give the start addresses of the descriptor rings to the DMA
1876 	 * controller on the MAC.
1877 	 */
1878 	VR_PUT32(vrp->acc_reg, VR_RXADDR, vrp->rx.rp->paddr);
1879 	VR_PUT32(vrp->acc_reg, VR_TXADDR, vrp->tx.wp->paddr);
1880 
1881 	/*
1882 	 * We don't use the additionally invented interrupt ICR1 register,
1883 	 * so make sure these are disabled.
1884 	 */
1885 	VR_PUT8(vrp->acc_reg, VR_ISR1, 0xFF);
1886 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1887 
1888 	/*
1889 	 * Enable interrupts.
1890 	 */
1891 	VR_PUT16(vrp->acc_reg, VR_ISR0, 0xFFFF);
1892 	VR_PUT16(vrp->acc_reg, VR_ICR0, VR_ICR0_CFG);
1893 
1894 	/*
1895 	 * Enable the DMA controller.
1896 	 */
1897 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1898 
1899 	/*
1900 	 * Configure the link. Rely on the link change interrupt for getting
1901 	 * the link state into the driver.
1902 	 */
1903 	vr_link_init(vrp);
1904 
1905 	/*
1906 	 * Set the software view on the state to 'running'.
1907 	 */
1908 	vrp->chip.state = CHIPSTATE_RUNNING;
1909 	return (0);
1910 }
1911 
1912 /*
1913  * Stop DMA and interrupts.
1914  */
1915 static int
vr_stop(vr_t * vrp)1916 vr_stop(vr_t *vrp)
1917 {
1918 	ASSERT(mutex_owned(&vrp->oplock));
1919 
1920 	/*
1921 	 * Stop interrupts.
1922 	 */
1923 	VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
1924 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1925 
1926 	/*
1927 	 * Stop DMA.
1928 	 */
1929 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
1930 
1931 	/*
1932 	 * Set the software view on the state to stopped.
1933 	 */
1934 	vrp->chip.state = CHIPSTATE_STOPPED;
1935 
1936 	/*
1937 	 * Remove DMA buffers from the rings.
1938 	 */
1939 	vr_rxring_fini(vrp);
1940 	vr_txring_fini(vrp);
1941 	return (0);
1942 }
1943 
1944 int
vr_mac_start(void * p)1945 vr_mac_start(void *p)
1946 {
1947 	vr_t	*vrp;
1948 	int	rc;
1949 
1950 	vrp = (vr_t *)p;
1951 	mutex_enter(&vrp->oplock);
1952 
1953 	/*
1954 	 * Reset the card.
1955 	 */
1956 	vr_reset(vrp);
1957 
1958 	/*
1959 	 * Prepare and enable the card.
1960 	 */
1961 	rc = vr_start(vrp);
1962 
1963 	/*
1964 	 * Configure a cyclic function to keep the card & driver from diverting.
1965 	 */
1966 	vrp->periodic_id =
1967 	    ddi_periodic_add(vr_periodic, vrp, VR_CHECK_INTERVAL, DDI_IPL_0);
1968 
1969 	mutex_exit(&vrp->oplock);
1970 	return (rc);
1971 }
1972 
1973 void
vr_mac_stop(void * p)1974 vr_mac_stop(void *p)
1975 {
1976 	vr_t	*vrp = p;
1977 
1978 	mutex_enter(&vrp->oplock);
1979 	mutex_enter(&vrp->tx.lock);
1980 
1981 	/*
1982 	 * Stop the device.
1983 	 */
1984 	(void) vr_stop(vrp);
1985 	mutex_exit(&vrp->tx.lock);
1986 
1987 	/*
1988 	 * Remove the cyclic from the system.
1989 	 */
1990 	ddi_periodic_delete(vrp->periodic_id);
1991 	mutex_exit(&vrp->oplock);
1992 }
1993 
1994 /*
1995  * Add or remove a multicast address to/from the filter
1996  *
1997  * From the 21143 manual:
1998  *  The 21143 can store 512 bits serving as hash bucket heads, and one physical
1999  *  48-bit Ethernet address. Incoming frames with multicast destination
2000  *  addresses are subjected to imperfect filtering. Frames with physical
2001  *  destination  addresses are checked against the single physical address.
2002  *  For any incoming frame with a multicast destination address, the 21143
2003  *  applies the standard Ethernet cyclic redundancy check (CRC) function to the
2004  *  first 6 bytes containing the destination address, then it uses the most
2005  *  significant 9 bits of the result as a bit index into the table. If the
2006  *  indexed bit is set, the frame is accepted. If the bit is cleared, the frame
2007  *  is rejected. This filtering mode is called imperfect because multicast
2008  *  frames not addressed to this station may slip through, but it still
2009  *  decreases the number of frames that the host can receive.
2010  * I assume the above is also the way the VIA chips work. There's not a single
2011  * word about the multicast filter in the datasheet.
2012  *
2013  * Another word on the CAM filter on VT6105M controllers:
2014  *  The VT6105M has content addressable memory which can be used for perfect
2015  *  filtering of 32 multicast addresses and a few VLAN id's
2016  *
2017  *  I think it works like this: When the controller receives a multicast
2018  *  address, it looks up the address using CAM. When it is found, it takes the
2019  *  matching cell address (index) and compares this to the bit position in the
2020  *  cam mask. If the bit is set, the packet is passed up. If CAM lookup does not
2021  *  result in a match, the packet is filtered using the hash based filter,
2022  *  if that matches, the packet is passed up and dropped otherwise
2023  * Also, there's not a single word in the datasheet on how this cam is supposed
2024  * to work ...
2025  */
2026 int
vr_mac_set_multicast(void * p,boolean_t add,const uint8_t * mca)2027 vr_mac_set_multicast(void *p, boolean_t add, const uint8_t *mca)
2028 {
2029 	vr_t		*vrp;
2030 	uint32_t	crc_index;
2031 	int32_t		cam_index;
2032 	uint32_t	cam_mask;
2033 	boolean_t	use_hash_filter;
2034 	ether_addr_t	taddr;
2035 	uint32_t	a;
2036 
2037 	vrp = (vr_t *)p;
2038 	mutex_enter(&vrp->oplock);
2039 	mutex_enter(&vrp->intrlock);
2040 	use_hash_filter = B_FALSE;
2041 
2042 	if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
2043 		/*
2044 		 * Program the perfect filter.
2045 		 */
2046 		cam_mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2047 		if (add == B_TRUE) {
2048 			/*
2049 			 * Get index of first empty slot.
2050 			 */
2051 			bzero(&taddr, sizeof (taddr));
2052 			cam_index = vr_cam_index(vrp, taddr);
2053 			if (cam_index != -1) {
2054 				/*
2055 				 * Add address at cam_index.
2056 				 */
2057 				cam_mask |= (1 << cam_index);
2058 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2059 				    VR_CAM_CTRL_ENABLE);
2060 				VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, cam_index);
2061 				VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2062 				for (a = 0; a < ETHERADDRL; a++) {
2063 					VR_PUT8(vrp->acc_reg,
2064 					    VR_MCAM0 + a, mca[a]);
2065 				}
2066 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2067 				    VR_CAM_CTRL_WRITE);
2068 				drv_usecwait(2);
2069 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2070 				    VR_CAM_CTRL_DONE);
2071 			} else {
2072 				/*
2073 				 * No free CAM slots available
2074 				 * Add mca to the imperfect filter.
2075 				 */
2076 				use_hash_filter = B_TRUE;
2077 			}
2078 		} else {
2079 			/*
2080 			 * Find the index of the entry to remove
2081 			 * If the entry was not found (-1), the addition was
2082 			 * probably done when the table was full.
2083 			 */
2084 			cam_index = vr_cam_index(vrp, mca);
2085 			if (cam_index != -1) {
2086 				/*
2087 				 * Disable the corresponding mask bit.
2088 				 */
2089 				cam_mask &= ~(1 << cam_index);
2090 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2091 				    VR_CAM_CTRL_ENABLE);
2092 				VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2093 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2094 				    VR_CAM_CTRL_DONE);
2095 			} else {
2096 				/*
2097 				 * The entry to be removed was not found
2098 				 * The likely cause is that the CAM was full
2099 				 * during addition. The entry is added to the
2100 				 * hash filter in that case and needs to be
2101 				 * removed there too.
2102 				 */
2103 				use_hash_filter = B_TRUE;
2104 			}
2105 		}
2106 	} else {
2107 		/*
2108 		 * No CAM in the MAC, thus we need the hash filter.
2109 		 */
2110 		use_hash_filter = B_TRUE;
2111 	}
2112 
2113 	if (use_hash_filter == B_TRUE) {
2114 		/*
2115 		 * Get the CRC-32 of the multicast address
2116 		 * The card uses the "MSB first" direction when calculating the
2117 		 * the CRC. This is odd because ethernet is "LSB first"
2118 		 * We have to use that "big endian" approach as well.
2119 		 */
2120 		crc_index = ether_crc_be(mca) >> (32 - 6);
2121 		if (add == B_TRUE) {
2122 			/*
2123 			 * Turn bit[crc_index] on.
2124 			 */
2125 			if (crc_index < 32)
2126 				vrp->mhash0 |= (1 << crc_index);
2127 			else
2128 				vrp->mhash1 |= (1 << (crc_index - 32));
2129 		} else {
2130 			/*
2131 			 * Turn bit[crc_index] off.
2132 			 */
2133 			if (crc_index < 32)
2134 				vrp->mhash0 &= ~(0 << crc_index);
2135 			else
2136 				vrp->mhash1 &= ~(0 << (crc_index - 32));
2137 		}
2138 
2139 		/*
2140 		 * When not promiscuous write the filter now. When promiscuous,
2141 		 * the filter is open and will be written when promiscuous ends.
2142 		 */
2143 		if (vrp->promisc == B_FALSE) {
2144 			VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2145 			VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2146 		}
2147 	}
2148 
2149 	/*
2150 	 * Enable/disable multicast receivements based on mcount.
2151 	 */
2152 	if (add == B_TRUE)
2153 		vrp->mcount++;
2154 	else if (vrp->mcount != 0)
2155 		vrp->mcount --;
2156 	if (vrp->mcount != 0)
2157 		VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2158 	else
2159 		VR_CLRBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2160 
2161 	mutex_exit(&vrp->intrlock);
2162 	mutex_exit(&vrp->oplock);
2163 	return (0);
2164 }
2165 
2166 /*
2167  * Calculate the CRC32 for 6 bytes of multicast address in MSB(it) first order.
2168  * The MSB first order is a bit odd because Ethernet standard is LSB first
2169  */
2170 static uint32_t
ether_crc_be(const uint8_t * data)2171 ether_crc_be(const uint8_t *data)
2172 {
2173 	uint32_t	crc = (uint32_t)0xFFFFFFFFU;
2174 	uint32_t	carry;
2175 	uint32_t	bit;
2176 	uint32_t	length;
2177 	uint8_t		c;
2178 
2179 	for (length = 0; length < ETHERADDRL; length++) {
2180 		c = data[length];
2181 		for (bit = 0; bit < 8; bit++) {
2182 			carry = ((crc & 0x80000000U) ? 1 : 0) ^ (c & 0x01);
2183 			crc <<= 1;
2184 			c >>= 1;
2185 			if (carry)
2186 				crc = (crc ^ 0x04C11DB6) | carry;
2187 		}
2188 	}
2189 	return (crc);
2190 }
2191 
2192 
2193 /*
2194  * Return the CAM index (base 0) of maddr or -1 if maddr is not found
2195  * If maddr is 0, return the index of an empty slot in CAM or -1 when no free
2196  * slots available.
2197  */
2198 static int32_t
vr_cam_index(vr_t * vrp,const uint8_t * maddr)2199 vr_cam_index(vr_t *vrp, const uint8_t *maddr)
2200 {
2201 	ether_addr_t	taddr;
2202 	int32_t		index;
2203 	uint32_t	mask;
2204 	uint32_t	a;
2205 
2206 	bzero(&taddr, sizeof (taddr));
2207 
2208 	/*
2209 	 * Read the CAM mask from the controller.
2210 	 */
2211 	mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2212 
2213 	/*
2214 	 * If maddr is 0, return the first unused slot or -1 for no unused.
2215 	 */
2216 	if (bcmp(maddr, taddr, ETHERADDRL) == 0) {
2217 		/*
2218 		 * Look for the first unused position in mask.
2219 		 */
2220 		for (index = 0; index < VR_CAM_SZ; index++) {
2221 			if (((mask >> index) & 1) == 0)
2222 				return (index);
2223 		}
2224 		return (-1);
2225 	} else {
2226 		/*
2227 		 * Look for maddr in CAM.
2228 		 */
2229 		for (index = 0; index < VR_CAM_SZ; index++) {
2230 			/* Look at enabled entries only */
2231 			if (((mask >> index) & 1) == 0)
2232 				continue;
2233 
2234 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
2235 			VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, index);
2236 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_READ);
2237 			drv_usecwait(2);
2238 			for (a = 0; a < ETHERADDRL; a++)
2239 				taddr[a] = VR_GET8(vrp->acc_reg, VR_MCAM0 + a);
2240 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
2241 			if (bcmp(maddr, taddr, ETHERADDRL) == 0)
2242 				return (index);
2243 		}
2244 	}
2245 	return (-1);
2246 }
2247 
2248 /*
2249  * Set promiscuous mode on or off.
2250  */
2251 int
vr_mac_set_promisc(void * p,boolean_t promiscflag)2252 vr_mac_set_promisc(void *p, boolean_t promiscflag)
2253 {
2254 	vr_t		*vrp;
2255 	uint8_t		rxcfg;
2256 
2257 	vrp = (vr_t *)p;
2258 
2259 	mutex_enter(&vrp->intrlock);
2260 	mutex_enter(&vrp->oplock);
2261 	mutex_enter(&vrp->tx.lock);
2262 
2263 	/*
2264 	 * Get current receive configuration.
2265 	 */
2266 	rxcfg = VR_GET8(vrp->acc_reg, VR_RXCFG);
2267 	vrp->promisc = promiscflag;
2268 
2269 	if (promiscflag == B_TRUE) {
2270 		/*
2271 		 * Enable promiscuous mode and open the multicast filter.
2272 		 */
2273 		rxcfg |= (VR_RXCFG_PROMISC | VR_RXCFG_ACCEPTMULTI);
2274 		VR_PUT32(vrp->acc_reg, VR_MAR0, 0xffffffff);
2275 		VR_PUT32(vrp->acc_reg, VR_MAR1, 0xffffffff);
2276 	} else {
2277 		/*
2278 		 * Restore the multicast filter and disable promiscuous mode.
2279 		 */
2280 		VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2281 		VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2282 		rxcfg &= ~VR_RXCFG_PROMISC;
2283 		if (vrp->mcount != 0)
2284 			rxcfg |= VR_RXCFG_ACCEPTMULTI;
2285 	}
2286 	VR_PUT8(vrp->acc_reg, VR_RXCFG, rxcfg);
2287 	mutex_exit(&vrp->tx.lock);
2288 	mutex_exit(&vrp->oplock);
2289 	mutex_exit(&vrp->intrlock);
2290 	return (0);
2291 }
2292 
2293 int
vr_mac_getstat(void * arg,uint_t stat,uint64_t * val)2294 vr_mac_getstat(void *arg, uint_t stat, uint64_t *val)
2295 {
2296 	vr_t		*vrp;
2297 	uint64_t	v;
2298 
2299 	vrp = (void *) arg;
2300 
2301 	switch (stat) {
2302 	default:
2303 		return (ENOTSUP);
2304 
2305 	case ETHER_STAT_ADV_CAP_100T4:
2306 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_T4) != 0;
2307 		break;
2308 
2309 	case ETHER_STAT_ADV_CAP_100FDX:
2310 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX_FD) != 0;
2311 		break;
2312 
2313 	case ETHER_STAT_ADV_CAP_100HDX:
2314 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX) != 0;
2315 		break;
2316 
2317 	case ETHER_STAT_ADV_CAP_10FDX:
2318 		v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T_FD) != 0;
2319 		break;
2320 
2321 	case ETHER_STAT_ADV_CAP_10HDX:
2322 		v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T) != 0;
2323 		break;
2324 
2325 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
2326 		v = 0;
2327 		break;
2328 
2329 	case ETHER_STAT_ADV_CAP_AUTONEG:
2330 		v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0;
2331 		break;
2332 
2333 	case ETHER_STAT_ADV_CAP_PAUSE:
2334 		v = (vrp->chip.mii.anadv & MII_ABILITY_PAUSE) != 0;
2335 		break;
2336 
2337 	case ETHER_STAT_ADV_REMFAULT:
2338 		v = (vrp->chip.mii.anadv & MII_AN_ADVERT_REMFAULT) != 0;
2339 		break;
2340 
2341 	case ETHER_STAT_ALIGN_ERRORS:
2342 		v = vrp->stats.ether_stat_align_errors;
2343 		break;
2344 
2345 	case ETHER_STAT_CAP_100T4:
2346 		v = (vrp->chip.mii.status & MII_STATUS_100_BASE_T4) != 0;
2347 		break;
2348 
2349 	case ETHER_STAT_CAP_100FDX:
2350 		v = (vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) != 0;
2351 		break;
2352 
2353 	case ETHER_STAT_CAP_100HDX:
2354 		v = (vrp->chip.mii.status & MII_STATUS_100_BASEX) != 0;
2355 		break;
2356 
2357 	case ETHER_STAT_CAP_10FDX:
2358 		v = (vrp->chip.mii.status & MII_STATUS_10_FD) != 0;
2359 		break;
2360 
2361 	case ETHER_STAT_CAP_10HDX:
2362 		v = (vrp->chip.mii.status & MII_STATUS_10) != 0;
2363 		break;
2364 
2365 	case ETHER_STAT_CAP_ASMPAUSE:
2366 		v = 0;
2367 		break;
2368 
2369 	case ETHER_STAT_CAP_AUTONEG:
2370 		v = (vrp->chip.mii.status & MII_STATUS_CANAUTONEG) != 0;
2371 		break;
2372 
2373 	case ETHER_STAT_CAP_PAUSE:
2374 		v = 1;
2375 		break;
2376 
2377 	case ETHER_STAT_CAP_REMFAULT:
2378 		v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2379 		break;
2380 
2381 	case ETHER_STAT_CARRIER_ERRORS:
2382 		/*
2383 		 * Number of times carrier was lost or never detected on a
2384 		 * transmission attempt.
2385 		 */
2386 		v = vrp->stats.ether_stat_carrier_errors;
2387 		break;
2388 
2389 	case ETHER_STAT_JABBER_ERRORS:
2390 		return (ENOTSUP);
2391 
2392 	case ETHER_STAT_DEFER_XMTS:
2393 		/*
2394 		 * Packets without collisions where first transmit attempt was
2395 		 * delayed because the medium was busy.
2396 		 */
2397 		v = vrp->stats.ether_stat_defer_xmts;
2398 		break;
2399 
2400 	case ETHER_STAT_EX_COLLISIONS:
2401 		/*
2402 		 * Frames where excess collisions occurred on transmit, causing
2403 		 * transmit failure.
2404 		 */
2405 		v = vrp->stats.ether_stat_ex_collisions;
2406 		break;
2407 
2408 	case ETHER_STAT_FCS_ERRORS:
2409 		/*
2410 		 * Packets received with CRC errors.
2411 		 */
2412 		v = vrp->stats.ether_stat_fcs_errors;
2413 		break;
2414 
2415 	case ETHER_STAT_FIRST_COLLISIONS:
2416 		/*
2417 		 * Packets successfully transmitted with exactly one collision.
2418 		 */
2419 		v = vrp->stats.ether_stat_first_collisions;
2420 		break;
2421 
2422 	case ETHER_STAT_LINK_ASMPAUSE:
2423 		v = 0;
2424 		break;
2425 
2426 	case ETHER_STAT_LINK_AUTONEG:
2427 		v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0 &&
2428 		    (vrp->chip.mii.status & MII_STATUS_ANDONE) != 0;
2429 		break;
2430 
2431 	case ETHER_STAT_LINK_DUPLEX:
2432 		v = vrp->chip.link.duplex;
2433 		break;
2434 
2435 	case ETHER_STAT_LINK_PAUSE:
2436 		v = vrp->chip.link.flowctrl;
2437 		break;
2438 
2439 	case ETHER_STAT_LP_CAP_100T4:
2440 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_T4) != 0;
2441 		break;
2442 
2443 	case ETHER_STAT_LP_CAP_1000FDX:
2444 		v = 0;
2445 		break;
2446 
2447 	case ETHER_STAT_LP_CAP_1000HDX:
2448 		v = 0;
2449 		break;
2450 
2451 	case ETHER_STAT_LP_CAP_100FDX:
2452 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX_FD) != 0;
2453 		break;
2454 
2455 	case ETHER_STAT_LP_CAP_100HDX:
2456 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX) != 0;
2457 		break;
2458 
2459 	case ETHER_STAT_LP_CAP_10FDX:
2460 		v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T_FD) != 0;
2461 		break;
2462 
2463 	case ETHER_STAT_LP_CAP_10HDX:
2464 		v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T) != 0;
2465 		break;
2466 
2467 	case ETHER_STAT_LP_CAP_ASMPAUSE:
2468 		v = 0;
2469 		break;
2470 
2471 	case ETHER_STAT_LP_CAP_AUTONEG:
2472 		v = (vrp->chip.mii.anexp & MII_AN_EXP_LPCANAN) != 0;
2473 		break;
2474 
2475 	case ETHER_STAT_LP_CAP_PAUSE:
2476 		v = (vrp->chip.mii.lpable & MII_ABILITY_PAUSE) != 0;
2477 		break;
2478 
2479 	case ETHER_STAT_LP_REMFAULT:
2480 		v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2481 		break;
2482 
2483 	case ETHER_STAT_MACRCV_ERRORS:
2484 		/*
2485 		 * Packets received with MAC errors, except align_errors,
2486 		 * fcs_errors, and toolong_errors.
2487 		 */
2488 		v = vrp->stats.ether_stat_macrcv_errors;
2489 		break;
2490 
2491 	case ETHER_STAT_MACXMT_ERRORS:
2492 		/*
2493 		 * Packets encountering transmit MAC failures, except carrier
2494 		 * and collision failures.
2495 		 */
2496 		v = vrp->stats.ether_stat_macxmt_errors;
2497 		break;
2498 
2499 	case ETHER_STAT_MULTI_COLLISIONS:
2500 		/*
2501 		 * Packets successfully transmitted with multiple collisions.
2502 		 */
2503 		v = vrp->stats.ether_stat_multi_collisions;
2504 		break;
2505 
2506 	case ETHER_STAT_SQE_ERRORS:
2507 		/*
2508 		 * Number of times signal quality error was reported
2509 		 * This one is reported by the PHY.
2510 		 */
2511 		return (ENOTSUP);
2512 
2513 	case ETHER_STAT_TOOLONG_ERRORS:
2514 		/*
2515 		 * Packets received larger than the maximum permitted length.
2516 		 */
2517 		v = vrp->stats.ether_stat_toolong_errors;
2518 		break;
2519 
2520 	case ETHER_STAT_TOOSHORT_ERRORS:
2521 		v = vrp->stats.ether_stat_tooshort_errors;
2522 		break;
2523 
2524 	case ETHER_STAT_TX_LATE_COLLISIONS:
2525 		/*
2526 		 * Number of times a transmit collision occurred late
2527 		 * (after 512 bit times).
2528 		 */
2529 		v = vrp->stats.ether_stat_tx_late_collisions;
2530 		break;
2531 
2532 	case ETHER_STAT_XCVR_ADDR:
2533 		/*
2534 		 * MII address in the 0 to 31 range of the physical layer
2535 		 * device in use for a given Ethernet device.
2536 		 */
2537 		v = vrp->chip.phyaddr;
2538 		break;
2539 
2540 	case ETHER_STAT_XCVR_ID:
2541 		/*
2542 		 * MII transceiver manufacturer and device ID.
2543 		 */
2544 		v = (vrp->chip.mii.identh << 16) | vrp->chip.mii.identl;
2545 		break;
2546 
2547 	case ETHER_STAT_XCVR_INUSE:
2548 		v = vrp->chip.link.mau;
2549 		break;
2550 
2551 	case MAC_STAT_BRDCSTRCV:
2552 		v = vrp->stats.mac_stat_brdcstrcv;
2553 		break;
2554 
2555 	case MAC_STAT_BRDCSTXMT:
2556 		v = vrp->stats.mac_stat_brdcstxmt;
2557 		break;
2558 
2559 	case MAC_STAT_MULTIXMT:
2560 		v = vrp->stats.mac_stat_multixmt;
2561 		break;
2562 
2563 	case MAC_STAT_COLLISIONS:
2564 		v = vrp->stats.mac_stat_collisions;
2565 		break;
2566 
2567 	case MAC_STAT_IERRORS:
2568 		v = vrp->stats.mac_stat_ierrors;
2569 		break;
2570 
2571 	case MAC_STAT_IFSPEED:
2572 		if (vrp->chip.link.speed == VR_LINK_SPEED_100MBS)
2573 			v = 100 * 1000 * 1000;
2574 		else if (vrp->chip.link.speed == VR_LINK_SPEED_10MBS)
2575 			v = 10 * 1000 * 1000;
2576 		else
2577 			v = 0;
2578 		break;
2579 
2580 	case MAC_STAT_IPACKETS:
2581 		v = vrp->stats.mac_stat_ipackets;
2582 		break;
2583 
2584 	case MAC_STAT_MULTIRCV:
2585 		v = vrp->stats.mac_stat_multircv;
2586 		break;
2587 
2588 	case MAC_STAT_NORCVBUF:
2589 		vrp->stats.mac_stat_norcvbuf +=
2590 		    VR_GET16(vrp->acc_reg, VR_TALLY_MPA);
2591 		VR_PUT16(vrp->acc_reg, VR_TALLY_MPA, 0);
2592 		v = vrp->stats.mac_stat_norcvbuf;
2593 		break;
2594 
2595 	case MAC_STAT_NOXMTBUF:
2596 		v = vrp->stats.mac_stat_noxmtbuf;
2597 		break;
2598 
2599 	case MAC_STAT_OBYTES:
2600 		v = vrp->stats.mac_stat_obytes;
2601 		break;
2602 
2603 	case MAC_STAT_OERRORS:
2604 		v = vrp->stats.ether_stat_macxmt_errors +
2605 		    vrp->stats.mac_stat_underflows +
2606 		    vrp->stats.ether_stat_align_errors +
2607 		    vrp->stats.ether_stat_carrier_errors +
2608 		    vrp->stats.ether_stat_fcs_errors;
2609 		break;
2610 
2611 	case MAC_STAT_OPACKETS:
2612 		v = vrp->stats.mac_stat_opackets;
2613 		break;
2614 
2615 	case MAC_STAT_RBYTES:
2616 		v = vrp->stats.mac_stat_rbytes;
2617 		break;
2618 
2619 	case MAC_STAT_UNKNOWNS:
2620 		/*
2621 		 * Isn't this something for the MAC layer to maintain?
2622 		 */
2623 		return (ENOTSUP);
2624 
2625 	case MAC_STAT_UNDERFLOWS:
2626 		v = vrp->stats.mac_stat_underflows;
2627 		break;
2628 
2629 	case MAC_STAT_OVERFLOWS:
2630 		v = vrp->stats.mac_stat_overflows;
2631 		break;
2632 	}
2633 	*val = v;
2634 	return (0);
2635 }
2636 
2637 int
vr_mac_set_ether_addr(void * p,const uint8_t * ea)2638 vr_mac_set_ether_addr(void *p, const uint8_t *ea)
2639 {
2640 	vr_t	*vrp;
2641 	int	i;
2642 
2643 	vrp = (vr_t *)p;
2644 	mutex_enter(&vrp->oplock);
2645 	mutex_enter(&vrp->intrlock);
2646 
2647 	/*
2648 	 * Set a new station address.
2649 	 */
2650 	for (i = 0; i < ETHERADDRL; i++)
2651 		VR_PUT8(vrp->acc_reg, VR_ETHERADDR + i, ea[i]);
2652 
2653 	mutex_exit(&vrp->intrlock);
2654 	mutex_exit(&vrp->oplock);
2655 	return (0);
2656 }
2657 
2658 /*
2659  * Configure the ethernet link according to param and chip.mii.
2660  */
2661 static void
vr_link_init(vr_t * vrp)2662 vr_link_init(vr_t *vrp)
2663 {
2664 	ASSERT(mutex_owned(&vrp->oplock));
2665 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2666 		/*
2667 		 * If we do autoneg, ensure restart autoneg is ON.
2668 		 */
2669 		vrp->chip.mii.control |= MII_CONTROL_RSAN;
2670 
2671 		/*
2672 		 * The advertisements are prepared by param_init.
2673 		 */
2674 		vr_phy_write(vrp, MII_AN_ADVERT, vrp->chip.mii.anadv);
2675 	} else {
2676 		/*
2677 		 * If we don't autoneg, we need speed, duplex and flowcontrol
2678 		 * to configure the link. However, dladm doesn't allow changes
2679 		 * to speed and duplex (readonly). The way this is solved
2680 		 * (ahem) is to select the highest enabled combination
2681 		 * Speed and duplex should be r/w when autoneg is off.
2682 		 */
2683 		if ((vrp->param.anadv_en &
2684 		    MII_ABILITY_100BASE_TX_FD) != 0) {
2685 			vrp->chip.mii.control |= MII_CONTROL_100MB;
2686 			vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2687 		} else if ((vrp->param.anadv_en &
2688 		    MII_ABILITY_100BASE_TX) != 0) {
2689 			vrp->chip.mii.control |= MII_CONTROL_100MB;
2690 			vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2691 		} else if ((vrp->param.anadv_en &
2692 		    MII_ABILITY_10BASE_T_FD) != 0) {
2693 			vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2694 			vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2695 		} else {
2696 			vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2697 			vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2698 		}
2699 	}
2700 	/*
2701 	 * Write the control register.
2702 	 */
2703 	vr_phy_write(vrp, MII_CONTROL, vrp->chip.mii.control);
2704 
2705 	/*
2706 	 * With autoneg off we cannot rely on the link_change interrupt for
2707 	 * for getting the status into the driver.
2708 	 */
2709 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
2710 		vr_link_state(vrp);
2711 		mac_link_update(vrp->machdl,
2712 		    (link_state_t)vrp->chip.link.state);
2713 	}
2714 }
2715 
2716 /*
2717  * Get link state in the driver and configure the MAC accordingly.
2718  */
2719 static void
vr_link_state(vr_t * vrp)2720 vr_link_state(vr_t *vrp)
2721 {
2722 	uint16_t		mask;
2723 
2724 	ASSERT(mutex_owned(&vrp->oplock));
2725 
2726 	vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
2727 	vr_phy_read(vrp, MII_CONTROL, &vrp->chip.mii.control);
2728 	vr_phy_read(vrp, MII_AN_ADVERT, &vrp->chip.mii.anadv);
2729 	vr_phy_read(vrp, MII_AN_LPABLE, &vrp->chip.mii.lpable);
2730 	vr_phy_read(vrp, MII_AN_EXPANSION, &vrp->chip.mii.anexp);
2731 
2732 	/*
2733 	 * If we did autongeg, deduce the link type/speed by selecting the
2734 	 * highest common denominator.
2735 	 */
2736 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2737 		mask = vrp->chip.mii.anadv & vrp->chip.mii.lpable;
2738 		if ((mask & MII_ABILITY_100BASE_TX_FD) != 0) {
2739 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2740 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2741 			vrp->chip.link.mau = VR_MAU_100X;
2742 		} else if ((mask & MII_ABILITY_100BASE_T4) != 0) {
2743 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2744 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2745 			vrp->chip.link.mau = VR_MAU_100T4;
2746 		} else if ((mask & MII_ABILITY_100BASE_TX) != 0) {
2747 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2748 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2749 			vrp->chip.link.mau = VR_MAU_100X;
2750 		} else if ((mask & MII_ABILITY_10BASE_T_FD) != 0) {
2751 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2752 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2753 			vrp->chip.link.mau = VR_MAU_10;
2754 		} else if ((mask & MII_ABILITY_10BASE_T) != 0) {
2755 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2756 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2757 			vrp->chip.link.mau = VR_MAU_10;
2758 		} else {
2759 			vrp->chip.link.speed = VR_LINK_SPEED_UNKNOWN;
2760 			vrp->chip.link.duplex = VR_LINK_DUPLEX_UNKNOWN;
2761 			vrp->chip.link.mau = VR_MAU_UNKNOWN;
2762 		}
2763 
2764 		/*
2765 		 * Did we negotiate pause?
2766 		 */
2767 		if ((mask & MII_ABILITY_PAUSE) != 0 &&
2768 		    vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL)
2769 			vrp->chip.link.flowctrl = VR_PAUSE_BIDIRECTIONAL;
2770 		else
2771 			vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2772 
2773 		/*
2774 		 * Did either one detect a AN fault?
2775 		 */
2776 		if ((vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0)
2777 			vr_log(vrp, CE_WARN,
2778 			    "AN remote fault reported by LP.");
2779 
2780 		if ((vrp->chip.mii.lpable & MII_AN_ADVERT_REMFAULT) != 0)
2781 			vr_log(vrp, CE_WARN, "AN remote fault caused for LP.");
2782 	} else {
2783 		/*
2784 		 * We didn't autoneg
2785 		 * The link type is defined by the control register.
2786 		 */
2787 		if ((vrp->chip.mii.control & MII_CONTROL_100MB) != 0) {
2788 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2789 			vrp->chip.link.mau = VR_MAU_100X;
2790 		} else {
2791 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2792 			vrp->chip.link.mau = VR_MAU_10;
2793 		}
2794 
2795 		if ((vrp->chip.mii.control & MII_CONTROL_FDUPLEX) != 0)
2796 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2797 		else {
2798 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2799 			/*
2800 			 * No pause on HDX links.
2801 			 */
2802 			vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2803 		}
2804 	}
2805 
2806 	/*
2807 	 * Set the duplex mode on the MAC according to that of the PHY.
2808 	 */
2809 	if (vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL) {
2810 		VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2811 		/*
2812 		 * Enable packet queueing on FDX links.
2813 		 */
2814 		if ((vrp->chip.info.bugs & VR_BUG_NO_TXQUEUEING) == 0)
2815 			VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2816 	} else {
2817 		VR_CLRBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2818 		/*
2819 		 * Disable packet queueing on HDX links. With queueing enabled,
2820 		 * this MAC get's lost after a TX abort (too many colisions).
2821 		 */
2822 		VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2823 	}
2824 
2825 	/*
2826 	 * Set pause options on the MAC.
2827 	 */
2828 	if (vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
2829 		/*
2830 		 * All of our MAC's can receive pause frames.
2831 		 */
2832 		VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXRFEN);
2833 
2834 		/*
2835 		 * VT6105 and above can transmit pause frames.
2836 		 */
2837 		if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2838 			/*
2839 			 * Set the number of available receive descriptors
2840 			 * Non-zero values written to this register are added
2841 			 * to the register's contents. Careful: Writing zero
2842 			 * clears the register and thus causes a (long) pause
2843 			 * request.
2844 			 */
2845 			VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT,
2846 			    MIN(vrp->rx.ndesc, 0xFF) -
2847 			    VR_GET8(vrp->acc_reg,
2848 			    VR_FCR0_RXBUFCOUNT));
2849 
2850 			/*
2851 			 * Request pause when we have 4 descs left.
2852 			 */
2853 			VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2854 			    VR_FCR1_PAUSEONBITS, VR_FCR1_PAUSEON_04);
2855 
2856 			/*
2857 			 * Cancel the pause when there are 24 descriptors again.
2858 			 */
2859 			VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2860 			    VR_FCR1_PAUSEOFFBITS, VR_FCR1_PAUSEOFF_24);
2861 
2862 			/*
2863 			 * Request a pause of FFFF bit-times. This long pause
2864 			 * is cancelled when the high watermark is reached.
2865 			 */
2866 			VR_PUT16(vrp->acc_reg, VR_FCR2_PAUSE, 0xFFFF);
2867 
2868 			/*
2869 			 * Enable flow control on the MAC.
2870 			 */
2871 			VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXTFEN);
2872 			VR_SETBIT8(vrp->acc_reg, VR_FCR1, VR_FCR1_FD_RX_EN |
2873 			    VR_FCR1_FD_TX_EN | VR_FCR1_XONXOFF_EN);
2874 		}
2875 	} else {
2876 		/*
2877 		 * Turn flow control OFF.
2878 		 */
2879 		VR_CLRBIT8(vrp->acc_reg,
2880 		    VR_MISC0, VR_MISC0_FDXRFEN | VR_MISC0_FDXTFEN);
2881 		if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2882 			VR_CLRBIT8(vrp->acc_reg, VR_FCR1,
2883 			    VR_FCR1_FD_RX_EN | VR_FCR1_FD_TX_EN |
2884 			    VR_FCR1_XONXOFF_EN);
2885 		}
2886 	}
2887 
2888 	/*
2889 	 * Set link state.
2890 	 */
2891 	if ((vrp->chip.mii.status & MII_STATUS_LINKUP) != 0)
2892 		vrp->chip.link.state = VR_LINK_STATE_UP;
2893 	else
2894 		vrp->chip.link.state = VR_LINK_STATE_DOWN;
2895 }
2896 
2897 /*
2898  * The PHY is automatically polled by the MAC once per 1024 MD clock cycles
2899  * MD is clocked once per 960ns so polling happens about every 1M ns, some
2900  * 1000 times per second
2901  * This polling process is required for the functionality of the link change
2902  * interrupt. Polling process must be disabled in order to access PHY registers
2903  * using MDIO
2904  *
2905  * Turn off PHY polling so that the PHY registers can be accessed.
2906  */
2907 static void
vr_phy_autopoll_disable(vr_t * vrp)2908 vr_phy_autopoll_disable(vr_t *vrp)
2909 {
2910 	uint32_t	time;
2911 	uint8_t		miicmd, miiaddr;
2912 
2913 	/*
2914 	 * Special procedure to stop the autopolling.
2915 	 */
2916 	if ((vrp->chip.info.bugs & VR_BUG_MIIPOLLSTOP) != 0) {
2917 		/*
2918 		 * If polling is enabled.
2919 		 */
2920 		miicmd = VR_GET8(vrp->acc_reg, VR_MIICMD);
2921 		if ((miicmd & VR_MIICMD_MD_AUTO) != 0) {
2922 			/*
2923 			 * Wait for the end of a cycle (mdone set).
2924 			 */
2925 			time = 0;
2926 			do {
2927 				drv_usecwait(10);
2928 				if (time >= VR_MMI_WAITMAX) {
2929 					vr_log(vrp, CE_WARN,
2930 					    "Timeout in "
2931 					    "disable MII polling");
2932 					break;
2933 				}
2934 				time += VR_MMI_WAITINCR;
2935 				miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2936 			} while ((miiaddr & VR_MIIADDR_MDONE) == 0);
2937 		}
2938 		/*
2939 		 * Once paused, we can disable autopolling.
2940 		 */
2941 		VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2942 	} else {
2943 		/*
2944 		 * Turn off MII polling.
2945 		 */
2946 		VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2947 
2948 		/*
2949 		 * Wait for MIDLE in MII address register.
2950 		 */
2951 		time = 0;
2952 		do {
2953 			drv_usecwait(VR_MMI_WAITINCR);
2954 			if (time >= VR_MMI_WAITMAX) {
2955 				vr_log(vrp, CE_WARN,
2956 				    "Timeout in disable MII polling");
2957 				break;
2958 			}
2959 			time += VR_MMI_WAITINCR;
2960 			miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2961 		} while ((miiaddr & VR_MIIADDR_MIDLE) == 0);
2962 	}
2963 }
2964 
2965 /*
2966  * Turn on PHY polling. PHY's registers cannot be accessed.
2967  */
2968 static void
vr_phy_autopoll_enable(vr_t * vrp)2969 vr_phy_autopoll_enable(vr_t *vrp)
2970 {
2971 	uint32_t	time;
2972 
2973 	VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2974 	VR_PUT8(vrp->acc_reg, VR_MIIADDR, MII_STATUS|VR_MIIADDR_MAUTO);
2975 	VR_PUT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_AUTO);
2976 
2977 	/*
2978 	 * Wait for the polling process to finish.
2979 	 */
2980 	time = 0;
2981 	do {
2982 		drv_usecwait(VR_MMI_WAITINCR);
2983 		if (time >= VR_MMI_WAITMAX) {
2984 			vr_log(vrp, CE_NOTE, "Timeout in enable MII polling");
2985 			break;
2986 		}
2987 		time += VR_MMI_WAITINCR;
2988 	} while ((VR_GET8(vrp->acc_reg, VR_MIIADDR) & VR_MIIADDR_MDONE) == 0);
2989 
2990 	/*
2991 	 * Initiate a polling.
2992 	 */
2993 	VR_SETBIT8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_MAUTO);
2994 }
2995 
2996 /*
2997  * Read a register from the PHY using MDIO.
2998  */
2999 static void
vr_phy_read(vr_t * vrp,int offset,uint16_t * value)3000 vr_phy_read(vr_t *vrp, int offset, uint16_t *value)
3001 {
3002 	uint32_t	time;
3003 
3004 	vr_phy_autopoll_disable(vrp);
3005 
3006 	/*
3007 	 * Write the register number to the lower 5 bits of the MII address
3008 	 * register.
3009 	 */
3010 	VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3011 
3012 	/*
3013 	 * Write a READ command to the MII control register
3014 	 * This bit will be cleared when the read is finished.
3015 	 */
3016 	VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_READ);
3017 
3018 	/*
3019 	 * Wait until the read is done.
3020 	 */
3021 	time = 0;
3022 	do {
3023 		drv_usecwait(VR_MMI_WAITINCR);
3024 		if (time >= VR_MMI_WAITMAX) {
3025 			vr_log(vrp, CE_NOTE, "Timeout in MII read command");
3026 			break;
3027 		}
3028 		time += VR_MMI_WAITINCR;
3029 	} while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_READ) != 0);
3030 
3031 	*value = VR_GET16(vrp->acc_reg, VR_MIIDATA);
3032 	vr_phy_autopoll_enable(vrp);
3033 }
3034 
3035 /*
3036  * Write to a PHY's register.
3037  */
3038 static void
vr_phy_write(vr_t * vrp,int offset,uint16_t value)3039 vr_phy_write(vr_t *vrp, int offset, uint16_t value)
3040 {
3041 	uint32_t	time;
3042 
3043 	vr_phy_autopoll_disable(vrp);
3044 
3045 	/*
3046 	 * Write the register number to the MII address register.
3047 	 */
3048 	VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3049 
3050 	/*
3051 	 * Write the value to the data register.
3052 	 */
3053 	VR_PUT16(vrp->acc_reg, VR_MIIDATA, value);
3054 
3055 	/*
3056 	 * Issue the WRITE command to the command register.
3057 	 * This bit will be cleared when the write is finished.
3058 	 */
3059 	VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_WRITE);
3060 
3061 	time = 0;
3062 	do {
3063 		drv_usecwait(VR_MMI_WAITINCR);
3064 		if (time >= VR_MMI_WAITMAX) {
3065 			vr_log(vrp, CE_NOTE, "Timeout in MII write command");
3066 			break;
3067 		}
3068 		time += VR_MMI_WAITINCR;
3069 	} while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_WRITE) != 0);
3070 	vr_phy_autopoll_enable(vrp);
3071 }
3072 
3073 /*
3074  * Initialize and install some private kstats.
3075  */
3076 typedef struct {
3077 	char		*name;
3078 	uchar_t		type;
3079 } vr_kstat_t;
3080 
3081 static const vr_kstat_t vr_driver_stats [] = {
3082 	{"allocbfail",		KSTAT_DATA_INT32},
3083 	{"intr_claimed",	KSTAT_DATA_INT64},
3084 	{"intr_unclaimed",	KSTAT_DATA_INT64},
3085 	{"linkchanges",		KSTAT_DATA_INT64},
3086 	{"txnfree",		KSTAT_DATA_INT32},
3087 	{"txstalls",		KSTAT_DATA_INT32},
3088 	{"resets",		KSTAT_DATA_INT32},
3089 	{"txreclaims",		KSTAT_DATA_INT64},
3090 	{"txreclaim0",		KSTAT_DATA_INT64},
3091 	{"cyclics",		KSTAT_DATA_INT64},
3092 	{"txchecks",		KSTAT_DATA_INT64},
3093 };
3094 
3095 static void
vr_kstats_init(vr_t * vrp)3096 vr_kstats_init(vr_t *vrp)
3097 {
3098 	kstat_t			*ksp;
3099 	struct	kstat_named	*knp;
3100 	int			i;
3101 	int			nstats;
3102 
3103 	nstats = sizeof (vr_driver_stats) / sizeof (vr_kstat_t);
3104 
3105 	ksp = kstat_create(MODULENAME, ddi_get_instance(vrp->devinfo),
3106 	    "driver", "net", KSTAT_TYPE_NAMED, nstats, 0);
3107 
3108 	if (ksp == NULL)
3109 		vr_log(vrp, CE_WARN, "kstat_create failed");
3110 
3111 	ksp->ks_update = vr_update_kstats;
3112 	ksp->ks_private = (void*) vrp;
3113 	knp = ksp->ks_data;
3114 
3115 	for (i = 0; i < nstats; i++, knp++) {
3116 		kstat_named_init(knp, vr_driver_stats[i].name,
3117 		    vr_driver_stats[i].type);
3118 	}
3119 	kstat_install(ksp);
3120 	vrp->ksp = ksp;
3121 }
3122 
3123 static int
vr_update_kstats(kstat_t * ksp,int access)3124 vr_update_kstats(kstat_t *ksp, int access)
3125 {
3126 	vr_t			*vrp;
3127 	struct kstat_named	*knp;
3128 
3129 	vrp = (vr_t *)ksp->ks_private;
3130 	knp = ksp->ks_data;
3131 
3132 	if (access != KSTAT_READ)
3133 		return (EACCES);
3134 
3135 	(knp++)->value.ui32 = vrp->stats.allocbfail;
3136 	(knp++)->value.ui64 = vrp->stats.intr_claimed;
3137 	(knp++)->value.ui64 = vrp->stats.intr_unclaimed;
3138 	(knp++)->value.ui64 = vrp->stats.linkchanges;
3139 	(knp++)->value.ui32 = vrp->tx.nfree;
3140 	(knp++)->value.ui32 = vrp->stats.txstalls;
3141 	(knp++)->value.ui32 = vrp->stats.resets;
3142 	(knp++)->value.ui64 = vrp->stats.txreclaims;
3143 	(knp++)->value.ui64 = vrp->stats.txreclaim0;
3144 	(knp++)->value.ui64 = vrp->stats.cyclics;
3145 	(knp++)->value.ui64 = vrp->stats.txchecks;
3146 	return (0);
3147 }
3148 
3149 /*
3150  * Remove 'private' kstats.
3151  */
3152 static void
vr_remove_kstats(vr_t * vrp)3153 vr_remove_kstats(vr_t *vrp)
3154 {
3155 	if (vrp->ksp != NULL)
3156 		kstat_delete(vrp->ksp);
3157 }
3158 
3159 /*
3160  * Get a property of the device/driver
3161  * Remarks:
3162  * - pr_val is always an integer of size pr_valsize
3163  * - ENABLED (EN) is what is configured via dladm
3164  * - ADVERTISED (ADV) is ENABLED minus constraints, like PHY/MAC capabilities
3165  * - DEFAULT are driver- and hardware defaults (DEFAULT is implemented as a
3166  *   flag in pr_flags instead of MAC_PROP_DEFAULT_)
3167  * - perm is the permission printed on ndd -get /.. \?
3168  */
3169 int
vr_mac_getprop(void * arg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)3170 vr_mac_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3171     uint_t pr_valsize, void *pr_val)
3172 {
3173 	vr_t		*vrp;
3174 	uint32_t	err;
3175 	uint64_t	val;
3176 
3177 	/* Since we have no private properties */
3178 	_NOTE(ARGUNUSED(pr_name))
3179 
3180 	err = 0;
3181 	vrp = (vr_t *)arg;
3182 	switch (pr_num) {
3183 		case MAC_PROP_ADV_1000FDX_CAP:
3184 		case MAC_PROP_ADV_1000HDX_CAP:
3185 		case MAC_PROP_EN_1000FDX_CAP:
3186 		case MAC_PROP_EN_1000HDX_CAP:
3187 			val = 0;
3188 			break;
3189 
3190 		case MAC_PROP_ADV_100FDX_CAP:
3191 			val = (vrp->chip.mii.anadv &
3192 			    MII_ABILITY_100BASE_TX_FD) != 0;
3193 			break;
3194 
3195 		case MAC_PROP_ADV_100HDX_CAP:
3196 			val = (vrp->chip.mii.anadv &
3197 			    MII_ABILITY_100BASE_TX) != 0;
3198 			break;
3199 
3200 		case MAC_PROP_ADV_100T4_CAP:
3201 			val = (vrp->chip.mii.anadv &
3202 			    MII_ABILITY_100BASE_T4) != 0;
3203 			break;
3204 
3205 		case MAC_PROP_ADV_10FDX_CAP:
3206 			val = (vrp->chip.mii.anadv &
3207 			    MII_ABILITY_10BASE_T_FD) != 0;
3208 			break;
3209 
3210 		case MAC_PROP_ADV_10HDX_CAP:
3211 			val = (vrp->chip.mii.anadv &
3212 			    MII_ABILITY_10BASE_T) != 0;
3213 			break;
3214 
3215 		case MAC_PROP_AUTONEG:
3216 			val = (vrp->chip.mii.control &
3217 			    MII_CONTROL_ANE) != 0;
3218 			break;
3219 
3220 		case MAC_PROP_DUPLEX:
3221 			val = vrp->chip.link.duplex;
3222 			break;
3223 
3224 		case MAC_PROP_EN_100FDX_CAP:
3225 			val = (vrp->param.anadv_en &
3226 			    MII_ABILITY_100BASE_TX_FD) != 0;
3227 			break;
3228 
3229 		case MAC_PROP_EN_100HDX_CAP:
3230 			val = (vrp->param.anadv_en &
3231 			    MII_ABILITY_100BASE_TX) != 0;
3232 			break;
3233 
3234 		case MAC_PROP_EN_100T4_CAP:
3235 			val = (vrp->param.anadv_en &
3236 			    MII_ABILITY_100BASE_T4) != 0;
3237 			break;
3238 
3239 		case MAC_PROP_EN_10FDX_CAP:
3240 			val = (vrp->param.anadv_en &
3241 			    MII_ABILITY_10BASE_T_FD) != 0;
3242 			break;
3243 
3244 		case MAC_PROP_EN_10HDX_CAP:
3245 			val = (vrp->param.anadv_en &
3246 			    MII_ABILITY_10BASE_T) != 0;
3247 			break;
3248 
3249 		case MAC_PROP_EN_AUTONEG:
3250 			val = vrp->param.an_en == VR_LINK_AUTONEG_ON;
3251 			break;
3252 
3253 		case MAC_PROP_FLOWCTRL:
3254 			val = vrp->chip.link.flowctrl;
3255 			break;
3256 
3257 		case MAC_PROP_MTU:
3258 			val = vrp->param.mtu;
3259 			break;
3260 
3261 		case MAC_PROP_SPEED:
3262 			if (vrp->chip.link.speed ==
3263 			    VR_LINK_SPEED_100MBS)
3264 				val = 100 * 1000 * 1000;
3265 			else if (vrp->chip.link.speed ==
3266 			    VR_LINK_SPEED_10MBS)
3267 				val = 10 * 1000 * 1000;
3268 			else
3269 				val = 0;
3270 			break;
3271 
3272 		case MAC_PROP_STATUS:
3273 			val = vrp->chip.link.state;
3274 			break;
3275 
3276 		default:
3277 			err = ENOTSUP;
3278 			break;
3279 	}
3280 
3281 	if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3282 		if (pr_valsize == sizeof (uint64_t))
3283 			*(uint64_t *)pr_val = val;
3284 		else if (pr_valsize == sizeof (uint32_t))
3285 			*(uint32_t *)pr_val = val;
3286 		else if (pr_valsize == sizeof (uint16_t))
3287 			*(uint16_t *)pr_val = val;
3288 		else if (pr_valsize == sizeof (uint8_t))
3289 			*(uint8_t *)pr_val = val;
3290 		else
3291 			err = EINVAL;
3292 	}
3293 	return (err);
3294 }
3295 
3296 void
vr_mac_propinfo(void * arg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)3297 vr_mac_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3298     mac_prop_info_handle_t prh)
3299 {
3300 	vr_t		*vrp = (vr_t *)arg;
3301 	uint8_t		val, perm;
3302 
3303 	/* Since we have no private properties */
3304 	_NOTE(ARGUNUSED(pr_name))
3305 
3306 	switch (pr_num) {
3307 		case MAC_PROP_ADV_1000FDX_CAP:
3308 		case MAC_PROP_ADV_1000HDX_CAP:
3309 		case MAC_PROP_EN_1000FDX_CAP:
3310 		case MAC_PROP_EN_1000HDX_CAP:
3311 		case MAC_PROP_ADV_100FDX_CAP:
3312 		case MAC_PROP_ADV_100HDX_CAP:
3313 		case MAC_PROP_ADV_100T4_CAP:
3314 		case MAC_PROP_ADV_10FDX_CAP:
3315 		case MAC_PROP_ADV_10HDX_CAP:
3316 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3317 			return;
3318 
3319 		case MAC_PROP_EN_100FDX_CAP:
3320 			val = (vrp->chip.mii.status &
3321 			    MII_STATUS_100_BASEX_FD) != 0;
3322 			break;
3323 
3324 		case MAC_PROP_EN_100HDX_CAP:
3325 			val = (vrp->chip.mii.status &
3326 			    MII_STATUS_100_BASEX) != 0;
3327 			break;
3328 
3329 		case MAC_PROP_EN_100T4_CAP:
3330 			val = (vrp->chip.mii.status &
3331 			    MII_STATUS_100_BASE_T4) != 0;
3332 			break;
3333 
3334 		case MAC_PROP_EN_10FDX_CAP:
3335 			val = (vrp->chip.mii.status &
3336 			    MII_STATUS_10_FD) != 0;
3337 			break;
3338 
3339 		case MAC_PROP_EN_10HDX_CAP:
3340 			val = (vrp->chip.mii.status &
3341 			    MII_STATUS_10) != 0;
3342 			break;
3343 
3344 		case MAC_PROP_AUTONEG:
3345 		case MAC_PROP_EN_AUTONEG:
3346 			val = (vrp->chip.mii.status &
3347 			    MII_STATUS_CANAUTONEG) != 0;
3348 			break;
3349 
3350 		case MAC_PROP_FLOWCTRL:
3351 			mac_prop_info_set_default_link_flowctrl(prh,
3352 			    LINK_FLOWCTRL_BI);
3353 			return;
3354 
3355 		case MAC_PROP_MTU:
3356 			mac_prop_info_set_range_uint32(prh,
3357 			    ETHERMTU, ETHERMTU);
3358 			return;
3359 
3360 		case MAC_PROP_DUPLEX:
3361 			/*
3362 			 * Writability depends on autoneg.
3363 			 */
3364 			perm = ((vrp->chip.mii.control &
3365 			    MII_CONTROL_ANE) == 0) ? MAC_PROP_PERM_RW :
3366 			    MAC_PROP_PERM_READ;
3367 			mac_prop_info_set_perm(prh, perm);
3368 
3369 			if (perm == MAC_PROP_PERM_RW) {
3370 				mac_prop_info_set_default_uint8(prh,
3371 				    VR_LINK_DUPLEX_FULL);
3372 			}
3373 			return;
3374 
3375 		case MAC_PROP_SPEED:
3376 			perm = ((vrp->chip.mii.control &
3377 			    MII_CONTROL_ANE) == 0) ?
3378 			    MAC_PROP_PERM_RW : MAC_PROP_PERM_READ;
3379 			mac_prop_info_set_perm(prh, perm);
3380 
3381 			if (perm == MAC_PROP_PERM_RW) {
3382 				mac_prop_info_set_default_uint64(prh,
3383 				    100 * 1000 * 1000);
3384 			}
3385 			return;
3386 
3387 		case MAC_PROP_STATUS:
3388 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3389 			return;
3390 
3391 		default:
3392 			return;
3393 		}
3394 
3395 		mac_prop_info_set_default_uint8(prh, val);
3396 }
3397 
3398 /*
3399  * Set a property of the device.
3400  */
3401 int
vr_mac_setprop(void * arg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)3402 vr_mac_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3403 	uint_t pr_valsize, const void *pr_val)
3404 {
3405 	vr_t		*vrp;
3406 	uint32_t	err;
3407 	uint64_t	val;
3408 
3409 	/* Since we have no private properties */
3410 	_NOTE(ARGUNUSED(pr_name))
3411 
3412 	err = 0;
3413 	vrp = (vr_t *)arg;
3414 	mutex_enter(&vrp->oplock);
3415 
3416 	/*
3417 	 * The current set of public property values are passed as integers
3418 	 * Private properties are passed as strings in pr_val length pr_valsize.
3419 	 */
3420 	if (pr_num != MAC_PROP_PRIVATE) {
3421 		if (pr_valsize == sizeof (uint64_t))
3422 			val = *(uint64_t *)pr_val;
3423 		else if (pr_valsize == sizeof (uint32_t))
3424 			val = *(uint32_t *)pr_val;
3425 		else if (pr_valsize == sizeof (uint16_t))
3426 			val = *(uint32_t *)pr_val;
3427 		else if (pr_valsize == sizeof (uint8_t))
3428 			val = *(uint8_t *)pr_val;
3429 		else {
3430 			mutex_exit(&vrp->oplock);
3431 			return (EINVAL);
3432 		}
3433 	}
3434 
3435 	switch (pr_num) {
3436 		case MAC_PROP_DUPLEX:
3437 			if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
3438 				if (val == LINK_DUPLEX_FULL)
3439 					vrp->chip.mii.control |=
3440 					    MII_CONTROL_FDUPLEX;
3441 				else if (val == LINK_DUPLEX_HALF)
3442 					vrp->chip.mii.control &=
3443 					    ~MII_CONTROL_FDUPLEX;
3444 				else
3445 					err = EINVAL;
3446 			} else
3447 				err = EINVAL;
3448 			break;
3449 
3450 		case MAC_PROP_EN_100FDX_CAP:
3451 			if (val == 0)
3452 				vrp->param.anadv_en &=
3453 				    ~MII_ABILITY_100BASE_TX_FD;
3454 			else
3455 				vrp->param.anadv_en |=
3456 				    MII_ABILITY_100BASE_TX_FD;
3457 			break;
3458 
3459 		case MAC_PROP_EN_100HDX_CAP:
3460 			if (val == 0)
3461 				vrp->param.anadv_en &=
3462 				    ~MII_ABILITY_100BASE_TX;
3463 			else
3464 				vrp->param.anadv_en |=
3465 				    MII_ABILITY_100BASE_TX;
3466 			break;
3467 
3468 		case MAC_PROP_EN_100T4_CAP:
3469 			if (val == 0)
3470 				vrp->param.anadv_en &=
3471 				    ~MII_ABILITY_100BASE_T4;
3472 			else
3473 				vrp->param.anadv_en |=
3474 				    MII_ABILITY_100BASE_T4;
3475 			break;
3476 
3477 		case MAC_PROP_EN_10FDX_CAP:
3478 			if (val == 0)
3479 				vrp->param.anadv_en &=
3480 				    ~MII_ABILITY_10BASE_T_FD;
3481 			else
3482 				vrp->param.anadv_en |=
3483 				    MII_ABILITY_10BASE_T_FD;
3484 			break;
3485 
3486 		case MAC_PROP_EN_10HDX_CAP:
3487 			if (val == 0)
3488 				vrp->param.anadv_en &=
3489 				    ~MII_ABILITY_10BASE_T;
3490 			else
3491 				vrp->param.anadv_en |=
3492 				    MII_ABILITY_10BASE_T;
3493 			break;
3494 
3495 		case MAC_PROP_AUTONEG:
3496 		case MAC_PROP_EN_AUTONEG:
3497 			if (val == 0) {
3498 				vrp->param.an_en = VR_LINK_AUTONEG_OFF;
3499 				vrp->chip.mii.control &= ~MII_CONTROL_ANE;
3500 			} else {
3501 				vrp->param.an_en = VR_LINK_AUTONEG_ON;
3502 				if ((vrp->chip.mii.status &
3503 				    MII_STATUS_CANAUTONEG) != 0)
3504 					vrp->chip.mii.control |=
3505 					    MII_CONTROL_ANE;
3506 				else
3507 					err = EINVAL;
3508 			}
3509 			break;
3510 
3511 		case MAC_PROP_FLOWCTRL:
3512 			if (val == LINK_FLOWCTRL_NONE)
3513 				vrp->param.anadv_en &= ~MII_ABILITY_PAUSE;
3514 			else if (val == LINK_FLOWCTRL_BI)
3515 				vrp->param.anadv_en |= MII_ABILITY_PAUSE;
3516 			else
3517 				err = EINVAL;
3518 			break;
3519 
3520 		case MAC_PROP_MTU:
3521 			if (val >= ETHERMIN && val <= ETHERMTU)
3522 				vrp->param.mtu = (uint32_t)val;
3523 			else
3524 				err = EINVAL;
3525 			break;
3526 
3527 		case MAC_PROP_SPEED:
3528 			if (val == 10 * 1000 * 1000)
3529 				vrp->chip.link.speed =
3530 				    VR_LINK_SPEED_10MBS;
3531 			else if (val == 100 * 1000 * 1000)
3532 				vrp->chip.link.speed =
3533 				    VR_LINK_SPEED_100MBS;
3534 			else
3535 				err = EINVAL;
3536 			break;
3537 
3538 		default:
3539 			err = ENOTSUP;
3540 			break;
3541 	}
3542 	if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3543 		vrp->chip.mii.anadv = vrp->param.anadv_en &
3544 		    (vrp->param.an_phymask & vrp->param.an_macmask);
3545 		vr_link_init(vrp);
3546 	}
3547 	mutex_exit(&vrp->oplock);
3548 	return (err);
3549 }
3550 
3551 
3552 /*
3553  * Logging and debug functions.
3554  */
3555 static struct {
3556 	kmutex_t mutex[1];
3557 	const char *ifname;
3558 	const char *fmt;
3559 	int level;
3560 } prtdata;
3561 
3562 static void
vr_vprt(const char * fmt,va_list args)3563 vr_vprt(const char *fmt, va_list args)
3564 {
3565 	char buf[512];
3566 
3567 	ASSERT(mutex_owned(prtdata.mutex));
3568 	(void) vsnprintf(buf, sizeof (buf), fmt, args);
3569 	cmn_err(prtdata.level, prtdata.fmt, prtdata.ifname, buf);
3570 }
3571 
3572 static void
vr_log(vr_t * vrp,int level,const char * fmt,...)3573 vr_log(vr_t *vrp, int level, const char *fmt, ...)
3574 {
3575 	va_list args;
3576 
3577 	mutex_enter(prtdata.mutex);
3578 	prtdata.ifname = vrp->ifname;
3579 	prtdata.fmt = "!%s: %s";
3580 	prtdata.level = level;
3581 
3582 	va_start(args, fmt);
3583 	vr_vprt(fmt, args);
3584 	va_end(args);
3585 
3586 	mutex_exit(prtdata.mutex);
3587 }
3588 
3589 #if defined(DEBUG)
3590 static void
vr_prt(const char * fmt,...)3591 vr_prt(const char *fmt, ...)
3592 {
3593 	va_list args;
3594 
3595 	ASSERT(mutex_owned(prtdata.mutex));
3596 
3597 	va_start(args, fmt);
3598 	vr_vprt(fmt, args);
3599 	va_end(args);
3600 
3601 	mutex_exit(prtdata.mutex);
3602 }
3603 
3604 void
vr_debug()3605 (*vr_debug())(const char *fmt, ...)
3606 {
3607 	mutex_enter(prtdata.mutex);
3608 	prtdata.ifname = MODULENAME;
3609 	prtdata.fmt = "^%s: %s\n";
3610 	prtdata.level = CE_CONT;
3611 
3612 	return (vr_prt);
3613 }
3614 #endif	/* DEBUG */
3615 
3616 DDI_DEFINE_STREAM_OPS(vr_dev_ops, nulldev, nulldev, vr_attach, vr_detach,
3617 nodev, NULL, D_MP, NULL, vr_quiesce);
3618 
3619 static struct modldrv vr_modldrv = {
3620 	&mod_driverops,		/* Type of module. This one is a driver */
3621 	vr_ident,		/* short description */
3622 	&vr_dev_ops		/* driver specific ops */
3623 };
3624 
3625 static struct modlinkage modlinkage = {
3626 	MODREV_1, (void *)&vr_modldrv, NULL
3627 };
3628 
3629 int
_info(struct modinfo * modinfop)3630 _info(struct modinfo *modinfop)
3631 {
3632 	return (mod_info(&modlinkage, modinfop));
3633 }
3634 
3635 int
_init(void)3636 _init(void)
3637 {
3638 	int	status;
3639 
3640 	mac_init_ops(&vr_dev_ops, MODULENAME);
3641 	status = mod_install(&modlinkage);
3642 	if (status == DDI_SUCCESS)
3643 		mutex_init(prtdata.mutex, NULL, MUTEX_DRIVER, NULL);
3644 	else
3645 		mac_fini_ops(&vr_dev_ops);
3646 	return (status);
3647 }
3648 
3649 int
_fini(void)3650 _fini(void)
3651 {
3652 	int status;
3653 
3654 	status = mod_remove(&modlinkage);
3655 	if (status == 0) {
3656 		mac_fini_ops(&vr_dev_ops);
3657 		mutex_destroy(prtdata.mutex);
3658 	}
3659 	return (status);
3660 }
3661