xref: /titanic_41/usr/src/uts/intel/io/dnet/dnet.c (revision bbb1277b6ec1b0daad4e3ed1a2b891d3e2ece2eb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 /*
29  * dnet -- DEC 21x4x
30  *
31  * Currently supports:
32  *	21040, 21041, 21140, 21142, 21143
33  *	SROM versions 1, 3, 3.03, 4
34  *	TP, AUI, BNC, 100BASETX, 100BASET4
35  *
36  * XXX NEEDSWORK
37  *	All media SHOULD work, FX is untested
38  *
39  * Depends on the Generic LAN Driver utility functions in /kernel/misc/mac
40  */
41 
42 #define	BUG_4010796	/* See 4007871, 4010796 */
43 
44 #include <sys/types.h>
45 #include <sys/errno.h>
46 #include <sys/param.h>
47 #include <sys/stropts.h>
48 #include <sys/stream.h>
49 #include <sys/kmem.h>
50 #include <sys/conf.h>
51 #include <sys/devops.h>
52 #include <sys/ksynch.h>
53 #include <sys/stat.h>
54 #include <sys/modctl.h>
55 #include <sys/debug.h>
56 #include <sys/dlpi.h>
57 #include <sys/ethernet.h>
58 #include <sys/vlan.h>
59 #include <sys/mac.h>
60 #include <sys/mac_ether.h>
61 #include <sys/mac_provider.h>
62 #include <sys/pci.h>
63 #include <sys/ddi.h>
64 #include <sys/sunddi.h>
65 #include <sys/strsun.h>
66 
67 #include "dnet_mii.h"
68 #include "dnet.h"
69 
70 /*
71  *	Declarations and Module Linkage
72  */
73 
74 #define	IDENT	"DNET 21x4x"
75 
76 /*
77  * #define	DNET_NOISY
78  * #define	SROMDEBUG
79  * #define	SROMDUMPSTRUCTURES
80  */
81 
82 #ifdef DNETDEBUG
83 #ifdef DNET_NOISY
84 int	dnetdebug = -1;
85 #else
86 int	dnetdebug = 0;
87 #endif
88 #endif
89 
90 /* used for message allocated using desballoc() */
91 struct free_ptr {
92 	struct free_rtn	free_rtn;
93 	caddr_t buf;
94 };
95 
96 struct rbuf_list {
97 	struct rbuf_list	*rbuf_next;	/* next in the list */
98 	caddr_t			rbuf_vaddr;	/* virual addr of the buf */
99 	uint32_t		rbuf_paddr;	/* physical addr of the buf */
100 	uint32_t		rbuf_endpaddr;	/* physical addr at the end */
101 	ddi_dma_handle_t	rbuf_dmahdl;	/* dma handle */
102 	ddi_acc_handle_t	rbuf_acchdl;	/* handle for DDI functions */
103 };
104 
105 /* Required system entry points */
106 static int dnet_probe(dev_info_t *);
107 static int dnet_attach(dev_info_t *, ddi_attach_cmd_t);
108 static int dnet_detach(dev_info_t *, ddi_detach_cmd_t);
109 static int dnet_quiesce(dev_info_t *);
110 
111 /* Required driver entry points for GLDv3 */
112 static int dnet_m_start(void *);
113 static void dnet_m_stop(void *);
114 static int dnet_m_getstat(void *, uint_t, uint64_t *);
115 static int dnet_m_setpromisc(void *, boolean_t);
116 static int dnet_m_multicst(void *, boolean_t, const uint8_t *);
117 static int dnet_m_unicst(void *, const uint8_t *);
118 static mblk_t *dnet_m_tx(void *, mblk_t *);
119 
120 static uint_t dnet_intr(caddr_t);
121 
122 /* Internal functions used by the above entry points */
123 static void write_gpr(struct dnetinstance *dnetp, uint32_t val);
124 static void dnet_reset_board(struct dnetinstance *);
125 static void dnet_init_board(struct dnetinstance *);
126 static void dnet_chip_init(struct dnetinstance *);
127 static uint32_t hashindex(const uint8_t *);
128 static int dnet_start(struct dnetinstance *);
129 static int dnet_set_addr(struct dnetinstance *);
130 
131 static boolean_t dnet_send(struct dnetinstance *, mblk_t *);
132 
133 static void dnet_getp(struct dnetinstance *);
134 static void update_rx_stats(struct dnetinstance *, int);
135 static void update_tx_stats(struct dnetinstance *, int);
136 
137 /* Media Selection Setup Routines */
138 static void set_gpr(struct dnetinstance *);
139 static void set_opr(struct dnetinstance *);
140 static void set_sia(struct dnetinstance *);
141 
142 /* Buffer Management Routines */
143 static int dnet_alloc_bufs(struct dnetinstance *);
144 static void dnet_free_bufs(struct dnetinstance *);
145 static void dnet_init_txrx_bufs(struct dnetinstance *);
146 static int alloc_descriptor(struct dnetinstance *);
147 static void dnet_reclaim_Tx_desc(struct dnetinstance *);
148 static int dnet_rbuf_init(dev_info_t *, int);
149 static int dnet_rbuf_destroy();
150 static struct rbuf_list *dnet_rbuf_alloc(dev_info_t *, int);
151 static void dnet_rbuf_free(caddr_t);
152 static void dnet_freemsg_buf(struct free_ptr *);
153 
154 static void setup_block(struct dnetinstance *);
155 
156 /* SROM read functions */
157 static int dnet_read_srom(dev_info_t *, int, ddi_acc_handle_t, caddr_t,
158     uchar_t *, int);
159 static void dnet_read21040addr(dev_info_t *, ddi_acc_handle_t, caddr_t,
160     uchar_t *, int *);
161 static void dnet_read21140srom(ddi_acc_handle_t, caddr_t, uchar_t *, int);
162 static int get_alternative_srom_image(dev_info_t *, uchar_t *, int);
163 static void dnet_print_srom(SROM_FORMAT *sr);
164 static void dnet_dump_leaf(LEAF_FORMAT *leaf);
165 static void dnet_dump_block(media_block_t *block);
166 #ifdef BUG_4010796
167 static void set_alternative_srom_image(dev_info_t *, uchar_t *, int);
168 static int dnet_hack(dev_info_t *);
169 #endif
170 
171 static int dnet_hack_interrupts(struct dnetinstance *, int);
172 static int dnet_detach_hacked_interrupt(dev_info_t *devinfo);
173 static void enable_interrupts(struct dnetinstance *);
174 
175 /* SROM parsing functions */
176 static void dnet_parse_srom(struct dnetinstance *dnetp, SROM_FORMAT *sr,
177     uchar_t *vi);
178 static void parse_controller_leaf(struct dnetinstance *dnetp, LEAF_FORMAT *leaf,
179     uchar_t *vi);
180 static uchar_t *parse_media_block(struct dnetinstance *dnetp,
181     media_block_t *block, uchar_t *vi);
182 static int check_srom_valid(uchar_t *);
183 static void dnet_dumpbin(char *msg, uchar_t *, int size, int len);
184 static void setup_legacy_blocks();
185 /* Active Media Determination Routines */
186 static void find_active_media(struct dnetinstance *);
187 static int send_test_packet(struct dnetinstance *);
188 static int dnet_link_sense(struct dnetinstance *);
189 
190 /* PHY MII Routines */
191 static ushort_t dnet_mii_read(dev_info_t *dip, int phy_addr, int reg_num);
192 static void dnet_mii_write(dev_info_t *dip, int phy_addr, int reg_num,
193 			int reg_dat);
194 static void write_mii(struct dnetinstance *, uint32_t, int);
195 static void mii_tristate(struct dnetinstance *);
196 static void do_phy(struct dnetinstance *);
197 static void dnet_mii_link_cb(dev_info_t *, int, enum mii_phy_state);
198 static void set_leaf(SROM_FORMAT *sr, LEAF_FORMAT *leaf);
199 
200 #ifdef DNETDEBUG
201 uint32_t dnet_usecelapsed(struct dnetinstance *dnetp);
202 void dnet_timestamp(struct dnetinstance *, char *);
203 void dnet_usectimeout(struct dnetinstance *, uint32_t, int, timercb_t);
204 #endif
205 static char *media_str[] = {
206 	"10BaseT",
207 	"10Base2",
208 	"10Base5",
209 	"100BaseTX",
210 	"10BaseT FD",
211 	"100BaseTX FD",
212 	"100BaseT4",
213 	"100BaseFX",
214 	"100BaseFX FD",
215 	"MII"
216 };
217 
218 /* default SROM info for cards with no SROMs */
219 static LEAF_FORMAT leaf_default_100;
220 static LEAF_FORMAT leaf_asante;
221 static LEAF_FORMAT leaf_phylegacy;
222 static LEAF_FORMAT leaf_cogent_100;
223 static LEAF_FORMAT leaf_21041;
224 static LEAF_FORMAT leaf_21040;
225 
226 /* rx buffer size (rounded up to 4) */
227 int rx_buf_size = (ETHERMAX + ETHERFCSL + VLAN_TAGSZ + 3) & ~3;
228 
229 int max_rx_desc_21040 = MAX_RX_DESC_21040;
230 int max_rx_desc_21140 = MAX_RX_DESC_21140;
231 int max_tx_desc = MAX_TX_DESC;
232 int dnet_xmit_threshold = MAX_TX_DESC >> 2;	/* XXX need tuning? */
233 
234 static kmutex_t dnet_rbuf_lock;		/* mutex to protect rbuf_list data */
235 
236 /* used for buffers allocated by ddi_dma_mem_alloc() */
237 static ddi_dma_attr_t dma_attr = {
238 	DMA_ATTR_V0,		/* dma_attr version */
239 	0,			/* dma_attr_addr_lo */
240 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
241 	0x7FFFFFFF,		/* dma_attr_count_max */
242 	4,			/* dma_attr_align */
243 	0x3F,			/* dma_attr_burstsizes */
244 	1,			/* dma_attr_minxfer */
245 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
246 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
247 	1,			/* dma_attr_sgllen */
248 	1,			/* dma_attr_granular */
249 	0,			/* dma_attr_flags */
250 };
251 
252 /* used for buffers allocated for rbuf, allow 2 cookies */
253 static ddi_dma_attr_t dma_attr_rb = {
254 	DMA_ATTR_V0,		/* dma_attr version */
255 	0,			/* dma_attr_addr_lo */
256 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
257 	0x7FFFFFFF,		/* dma_attr_count_max */
258 	4,			/* dma_attr_align */
259 	0x3F,			/* dma_attr_burstsizes */
260 	1,			/* dma_attr_minxfer */
261 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
262 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
263 	2,			/* dma_attr_sgllen */
264 	1,			/* dma_attr_granular */
265 	0,			/* dma_attr_flags */
266 };
267 /* used for buffers which are NOT from ddi_dma_mem_alloc() - xmit side */
268 static ddi_dma_attr_t dma_attr_tx = {
269 	DMA_ATTR_V0,		/* dma_attr version */
270 	0,			/* dma_attr_addr_lo */
271 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
272 	0x7FFFFFFF,		/* dma_attr_count_max */
273 	1,			/* dma_attr_align */
274 	0x3F,			/* dma_attr_burstsizes */
275 	1,			/* dma_attr_minxfer */
276 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
277 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
278 	0x7FFF,			/* dma_attr_sgllen */
279 	1,			/* dma_attr_granular */
280 	0,			/* dma_attr_flags */
281 };
282 
283 static ddi_device_acc_attr_t accattr = {
284 	DDI_DEVICE_ATTR_V0,
285 	DDI_NEVERSWAP_ACC,
286 	DDI_STRICTORDER_ACC,
287 };
288 
289 uchar_t dnet_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
290 
291 /* Standard Module linkage initialization for a Streams driver */
292 extern struct mod_ops mod_driverops;
293 
294 DDI_DEFINE_STREAM_OPS(dnet_devops, nulldev, dnet_probe, dnet_attach,
295     dnet_detach, nodev, NULL, D_MP, NULL, dnet_quiesce);
296 
297 static struct modldrv dnet_modldrv = {
298 	&mod_driverops,		/* Type of module.  This one is a driver */
299 	IDENT,			/* short description */
300 	&dnet_devops		/* driver specific ops */
301 };
302 
303 static struct modlinkage dnet_modlinkage = {
304 	MODREV_1,		/* ml_rev */
305 	{ &dnet_modldrv, NULL }	/* ml_linkage */
306 };
307 
308 static mac_callbacks_t dnet_m_callbacks = {
309 	0,			/* mc_callbacks */
310 	dnet_m_getstat,		/* mc_getstat */
311 	dnet_m_start,		/* mc_start */
312 	dnet_m_stop,		/* mc_stop */
313 	dnet_m_setpromisc,	/* mc_setpromisc */
314 	dnet_m_multicst,	/* mc_multicst */
315 	dnet_m_unicst,		/* mc_unicst */
316 	dnet_m_tx,		/* mc_tx */
317 	NULL,			/* mc_ioctl */
318 	NULL,			/* mc_getcapab */
319 	NULL,			/* mc_open */
320 	NULL			/* mc_close */
321 };
322 
323 /*
324  * Passed to the hacked interrupt for multiport Cogent and ZNYX cards with
325  * dodgy interrupt routing
326  */
327 #define	MAX_INST 8 /* Maximum instances on a multiport adapter. */
328 struct hackintr_inf
329 {
330 	struct dnetinstance *dnetps[MAX_INST]; /* dnetps for each port */
331 	dev_info_t *devinfo;		    /* Devinfo of the primary device */
332 	kmutex_t lock;
333 		/* Ensures the interrupt doesn't get called while detaching */
334 };
335 static char hackintr_propname[] = "InterruptData";
336 static char macoffset_propname[] = "MAC_offset";
337 static char speed_propname[] = "speed";
338 static char ofloprob_propname[] = "dmaworkaround";
339 static char duplex_propname[] = "full-duplex"; /* Must agree with MII */
340 static char printsrom_propname[] = "print-srom";
341 
342 static uint_t dnet_hack_intr(struct hackintr_inf *);
343 
344 int
345 _init(void)
346 {
347 	int i;
348 
349 	/* Configure fake sroms for legacy cards */
350 	mutex_init(&dnet_rbuf_lock, NULL, MUTEX_DRIVER, NULL);
351 	setup_legacy_blocks();
352 
353 	mac_init_ops(&dnet_devops, "dnet");
354 
355 	if ((i = mod_install(&dnet_modlinkage)) != 0) {
356 		mac_fini_ops(&dnet_devops);
357 		mutex_destroy(&dnet_rbuf_lock);
358 	}
359 	return (i);
360 }
361 
362 int
363 _fini(void)
364 {
365 	int i;
366 
367 	if ((i = mod_remove(&dnet_modlinkage)) == 0) {
368 		mac_fini_ops(&dnet_devops);
369 
370 		/* loop until all the receive buffers are freed */
371 		while (dnet_rbuf_destroy() != 0) {
372 			delay(drv_usectohz(100000));
373 #ifdef DNETDEBUG
374 			if (dnetdebug & DNETDDI)
375 				cmn_err(CE_WARN, "dnet _fini delay");
376 #endif
377 		}
378 		mutex_destroy(&dnet_rbuf_lock);
379 	}
380 	return (i);
381 }
382 
383 int
384 _info(struct modinfo *modinfop)
385 {
386 	return (mod_info(&dnet_modlinkage, modinfop));
387 }
388 
389 /*
390  * probe(9E) -- Determine if a device is present
391  */
392 static int
393 dnet_probe(dev_info_t *devinfo)
394 {
395 	ddi_acc_handle_t handle;
396 	uint16_t	vendorid;
397 	uint16_t	deviceid;
398 
399 	if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS)
400 		return (DDI_PROBE_FAILURE);
401 
402 	vendorid = pci_config_get16(handle, PCI_CONF_VENID);
403 
404 	if (vendorid != DEC_VENDOR_ID) {
405 		pci_config_teardown(&handle);
406 		return (DDI_PROBE_FAILURE);
407 	}
408 
409 	deviceid = pci_config_get16(handle, PCI_CONF_DEVID);
410 	switch (deviceid) {
411 	case DEVICE_ID_21040:
412 	case DEVICE_ID_21041:
413 	case DEVICE_ID_21140:
414 	case DEVICE_ID_21143: /* And 142 */
415 		break;
416 	default:
417 		pci_config_teardown(&handle);
418 		return (DDI_PROBE_FAILURE);
419 	}
420 
421 	pci_config_teardown(&handle);
422 #ifndef BUG_4010796
423 	return (DDI_PROBE_SUCCESS);
424 #else
425 	return (dnet_hack(devinfo));
426 #endif
427 }
428 
429 #ifdef BUG_4010796
430 /*
431  * If we have a device, but we cannot presently access its SROM data,
432  * then we return DDI_PROBE_PARTIAL and hope that sometime later we
433  * will be able to get at the SROM data.  This can only happen if we
434  * are a secondary port with no SROM, and the bootstrap failed to set
435  * our DNET_SROM property, and our primary sibling has not yet probed.
436  */
437 static int
438 dnet_hack(dev_info_t *devinfo)
439 {
440 	uchar_t 	vendor_info[SROM_SIZE];
441 	uint32_t	csr;
442 	uint16_t	deviceid;
443 	ddi_acc_handle_t handle;
444 	uint32_t	retval;
445 	int		secondary;
446 	ddi_acc_handle_t io_handle;
447 	caddr_t		io_reg;
448 
449 #define	DNET_PCI_RNUMBER	1
450 
451 	if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS)
452 		return (DDI_PROBE_FAILURE);
453 
454 	deviceid = pci_config_get16(handle, PCI_CONF_DEVID);
455 
456 	/*
457 	 * Turn on Master Enable and IO Enable bits.
458 	 */
459 	csr = pci_config_get32(handle, PCI_CONF_COMM);
460 	pci_config_put32(handle, PCI_CONF_COMM, (csr |PCI_COMM_ME|PCI_COMM_IO));
461 
462 	pci_config_teardown(&handle);
463 
464 	/* Now map I/O register */
465 	if (ddi_regs_map_setup(devinfo, DNET_PCI_RNUMBER,
466 	    &io_reg, 0, 0, &accattr, &io_handle) != DDI_SUCCESS) {
467 		return (DDI_PROBE_FAILURE);
468 	}
469 
470 	/*
471 	 * Reset the chip
472 	 */
473 	ddi_put32(io_handle, REG32(io_reg, BUS_MODE_REG), SW_RESET);
474 	drv_usecwait(3);
475 	ddi_put32(io_handle, REG32(io_reg, BUS_MODE_REG), 0);
476 	drv_usecwait(8);
477 
478 	secondary = dnet_read_srom(devinfo, deviceid, io_handle,
479 	    io_reg, vendor_info, sizeof (vendor_info));
480 
481 	switch (secondary) {
482 	case -1:
483 		/* We can't access our SROM data! */
484 		retval = DDI_PROBE_PARTIAL;
485 		break;
486 	case 0:
487 		retval = DDI_PROBE_SUCCESS;
488 		break;
489 	default:
490 		retval = DDI_PROBE_SUCCESS;
491 	}
492 
493 	ddi_regs_map_free(&io_handle);
494 	return (retval);
495 }
496 #endif /* BUG_4010796 */
497 
498 /*
499  * attach(9E) -- Attach a device to the system
500  *
501  * Called once for each board successfully probed.
502  */
503 static int
504 dnet_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
505 {
506 	uint16_t revid;
507 	struct dnetinstance 	*dnetp;		/* Our private device info */
508 	mac_register_t		*macp;
509 	uchar_t 		vendor_info[SROM_SIZE];
510 	uint32_t		csr;
511 	uint16_t		deviceid;
512 	ddi_acc_handle_t 	handle;
513 	int			secondary;
514 
515 #define	DNET_PCI_RNUMBER	1
516 
517 	switch (cmd) {
518 	case DDI_ATTACH:
519 		break;
520 
521 	case DDI_RESUME:
522 		/* Get the driver private (dnetinstance) structure */
523 		dnetp = ddi_get_driver_private(devinfo);
524 
525 		mutex_enter(&dnetp->intrlock);
526 		mutex_enter(&dnetp->txlock);
527 		dnet_reset_board(dnetp);
528 		dnet_init_board(dnetp);
529 		dnetp->suspended = B_FALSE;
530 
531 		if (dnetp->running) {
532 			dnetp->need_tx_update = B_FALSE;
533 			mutex_exit(&dnetp->txlock);
534 			(void) dnet_start(dnetp);
535 			mutex_exit(&dnetp->intrlock);
536 			mac_tx_update(dnetp->mac_handle);
537 		} else {
538 			mutex_exit(&dnetp->txlock);
539 			mutex_exit(&dnetp->intrlock);
540 		}
541 		return (DDI_SUCCESS);
542 	default:
543 		return (DDI_FAILURE);
544 	}
545 
546 	if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS)
547 		return (DDI_FAILURE);
548 
549 	deviceid = pci_config_get16(handle, PCI_CONF_DEVID);
550 	switch (deviceid) {
551 	case DEVICE_ID_21040:
552 	case DEVICE_ID_21041:
553 	case DEVICE_ID_21140:
554 	case DEVICE_ID_21143: /* And 142 */
555 		break;
556 	default:
557 		pci_config_teardown(&handle);
558 		return (DDI_FAILURE);
559 	}
560 
561 	/*
562 	 * Turn on Master Enable and IO Enable bits.
563 	 */
564 	csr = pci_config_get32(handle, PCI_CONF_COMM);
565 	pci_config_put32(handle, PCI_CONF_COMM, (csr |PCI_COMM_ME|PCI_COMM_IO));
566 
567 	/* Make sure the device is not asleep */
568 	csr = pci_config_get32(handle, PCI_DNET_CONF_CFDD);
569 	pci_config_put32(handle, PCI_DNET_CONF_CFDD,
570 	    csr &  ~(CFDD_SLEEP|CFDD_SNOOZE));
571 
572 	revid = pci_config_get8(handle, PCI_CONF_REVID);
573 	pci_config_teardown(&handle);
574 
575 	dnetp = kmem_zalloc(sizeof (struct dnetinstance), KM_SLEEP);
576 	ddi_set_driver_private(devinfo, dnetp);
577 
578 	/* Now map I/O register */
579 	if (ddi_regs_map_setup(devinfo, DNET_PCI_RNUMBER, &dnetp->io_reg,
580 	    0, 0, &accattr, &dnetp->io_handle) != DDI_SUCCESS) {
581 		kmem_free(dnetp, sizeof (struct dnetinstance));
582 		return (DDI_FAILURE);
583 	}
584 
585 	dnetp->devinfo = devinfo;
586 	dnetp->board_type = deviceid;
587 
588 	/*
589 	 * Get the iblock cookie with which to initialize the mutexes.
590 	 */
591 	if (ddi_get_iblock_cookie(devinfo, 0, &dnetp->icookie)
592 	    != DDI_SUCCESS)
593 		goto fail;
594 
595 	/*
596 	 * Initialize mutex's for this device.
597 	 * Do this before registering the interrupt handler to avoid
598 	 * condition where interrupt handler can try using uninitialized
599 	 * mutex.
600 	 * Lock ordering rules: always lock intrlock first before
601 	 * txlock if both are required.
602 	 */
603 	mutex_init(&dnetp->txlock, NULL, MUTEX_DRIVER, dnetp->icookie);
604 	mutex_init(&dnetp->intrlock, NULL, MUTEX_DRIVER, dnetp->icookie);
605 
606 	/*
607 	 * Get the BNC/TP indicator from the conf file for 21040
608 	 */
609 	dnetp->bnc_indicator =
610 	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
611 	    "bncaui", -1);
612 
613 	/*
614 	 * For 21140 check the data rate set in the conf file. Default is
615 	 * 100Mb/s. Disallow connections at settings that would conflict
616 	 * with what's in the conf file
617 	 */
618 	dnetp->speed =
619 	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
620 	    speed_propname, 0);
621 	dnetp->full_duplex =
622 	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
623 	    duplex_propname, -1);
624 
625 	if (dnetp->speed == 100) {
626 		dnetp->disallowed_media |= (1UL<<MEDIA_TP) | (1UL<<MEDIA_TP_FD);
627 	} else if (dnetp->speed == 10) {
628 		dnetp->disallowed_media |=
629 		    (1UL<<MEDIA_SYM_SCR) | (1UL<<MEDIA_SYM_SCR_FD);
630 	}
631 
632 	if (dnetp->full_duplex == 1) {
633 		dnetp->disallowed_media |=
634 		    (1UL<<MEDIA_TP) | (1UL<<MEDIA_SYM_SCR);
635 	} else if (dnetp->full_duplex == 0) {
636 		dnetp->disallowed_media |=
637 		    (1UL<<MEDIA_TP_FD) | (1UL<<MEDIA_SYM_SCR_FD);
638 	}
639 
640 	if (dnetp->bnc_indicator == 0) /* Disable BNC and AUI media */
641 		dnetp->disallowed_media |= (1UL<<MEDIA_BNC) | (1UL<<MEDIA_AUI);
642 	else if (dnetp->bnc_indicator == 1) /* Force BNC only */
643 		dnetp->disallowed_media =  (uint32_t)~(1U<<MEDIA_BNC);
644 	else if (dnetp->bnc_indicator == 2) /* Force AUI only */
645 		dnetp->disallowed_media = (uint32_t)~(1U<<MEDIA_AUI);
646 
647 	dnet_reset_board(dnetp);
648 
649 	secondary = dnet_read_srom(devinfo, dnetp->board_type, dnetp->io_handle,
650 	    dnetp->io_reg, vendor_info, sizeof (vendor_info));
651 
652 	if (secondary == -1) /* ASSERT (vendor_info not big enough) */
653 		goto fail1;
654 
655 	dnet_parse_srom(dnetp, &dnetp->sr, vendor_info);
656 
657 	if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
658 	    printsrom_propname, 0))
659 		dnet_print_srom(&dnetp->sr);
660 
661 	dnetp->sr.netaddr[ETHERADDRL-1] += secondary;	/* unique ether addr */
662 
663 	BCOPY((caddr_t)dnetp->sr.netaddr,
664 	    (caddr_t)dnetp->vendor_addr, ETHERADDRL);
665 
666 	BCOPY((caddr_t)dnetp->sr.netaddr,
667 	    (caddr_t)dnetp->curr_macaddr, ETHERADDRL);
668 
669 	/*
670 	 * determine whether to implement workaround from DEC
671 	 * for DMA overrun errata.
672 	 */
673 	dnetp->overrun_workaround =
674 	    ((dnetp->board_type == DEVICE_ID_21140 && revid >= 0x20) ||
675 	    (dnetp->board_type == DEVICE_ID_21143 && revid <= 0x30)) ? 1 : 0;
676 
677 	dnetp->overrun_workaround =
678 	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
679 	    ofloprob_propname, dnetp->overrun_workaround);
680 
681 	/*
682 	 * Add the interrupt handler if dnet_hack_interrupts() returns 0.
683 	 * Otherwise dnet_hack_interrupts() itself adds the handler.
684 	 */
685 	if (!dnet_hack_interrupts(dnetp, secondary)) {
686 		(void) ddi_add_intr(devinfo, 0, NULL,
687 		    NULL, dnet_intr, (caddr_t)dnetp);
688 	}
689 
690 	dnetp->max_tx_desc = max_tx_desc;
691 	dnetp->max_rx_desc = max_rx_desc_21040;
692 	if (dnetp->board_type != DEVICE_ID_21040 &&
693 	    dnetp->board_type != DEVICE_ID_21041 &&
694 	    dnetp->speed != 10)
695 		dnetp->max_rx_desc = max_rx_desc_21140;
696 
697 	/* Allocate the TX and RX descriptors/buffers. */
698 	if (dnet_alloc_bufs(dnetp) == FAILURE) {
699 		cmn_err(CE_WARN, "DNET: Not enough DMA memory for buffers.");
700 		goto fail2;
701 	}
702 
703 	/*
704 	 *	Register ourselves with the GLDv3 interface
705 	 */
706 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
707 		goto fail2;
708 
709 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
710 	macp->m_driver = dnetp;
711 	macp->m_dip = devinfo;
712 	macp->m_src_addr = dnetp->curr_macaddr;
713 	macp->m_callbacks = &dnet_m_callbacks;
714 	macp->m_min_sdu = 0;
715 	macp->m_max_sdu = ETHERMTU;
716 	macp->m_margin = VLAN_TAGSZ;
717 
718 	if (mac_register(macp, &dnetp->mac_handle) == 0) {
719 		mac_free(macp);
720 
721 		mutex_enter(&dnetp->intrlock);
722 
723 		dnetp->phyaddr = -1;
724 		if (dnetp->board_type == DEVICE_ID_21140 ||
725 		    dnetp->board_type == DEVICE_ID_21143)
726 			do_phy(dnetp);	/* Initialize the PHY, if any */
727 		find_active_media(dnetp);
728 
729 		/* if the chosen media is non-MII, stop the port monitor */
730 		if (dnetp->selected_media_block->media_code != MEDIA_MII &&
731 		    dnetp->mii != NULL) {
732 			mii_destroy(dnetp->mii);
733 			dnetp->mii = NULL;
734 			dnetp->phyaddr = -1;
735 		}
736 
737 #ifdef DNETDEBUG
738 		if (dnetdebug & DNETSENSE)
739 			cmn_err(CE_NOTE, "dnet: link configured : %s",
740 			    media_str[dnetp->selected_media_block->media_code]);
741 #endif
742 		bzero(dnetp->setup_buf_vaddr, SETUPBUF_SIZE);
743 
744 		dnet_reset_board(dnetp);
745 		dnet_init_board(dnetp);
746 
747 		mutex_exit(&dnetp->intrlock);
748 
749 		(void) dnet_m_unicst(dnetp, dnetp->curr_macaddr);
750 		(void) dnet_m_multicst(dnetp, B_TRUE, dnet_broadcastaddr);
751 
752 		return (DDI_SUCCESS);
753 	}
754 
755 	mac_free(macp);
756 fail2:
757 	/* XXX function return value ignored */
758 	/*
759 	 * dnet_detach_hacked_interrupt() will remove
760 	 * interrupt for the non-hacked case also.
761 	 */
762 	(void) dnet_detach_hacked_interrupt(devinfo);
763 	dnet_free_bufs(dnetp);
764 fail1:
765 	mutex_destroy(&dnetp->txlock);
766 	mutex_destroy(&dnetp->intrlock);
767 fail:
768 	ddi_regs_map_free(&dnetp->io_handle);
769 	kmem_free(dnetp, sizeof (struct dnetinstance));
770 	return (DDI_FAILURE);
771 }
772 
773 /*
774  * detach(9E) -- Detach a device from the system
775  */
776 static int
777 dnet_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
778 {
779 	int32_t rc;
780 	struct dnetinstance *dnetp;		/* Our private device info */
781 	int32_t		proplen;
782 
783 	/* Get the driver private (dnetinstance) structure */
784 	dnetp = ddi_get_driver_private(devinfo);
785 
786 	switch (cmd) {
787 	case DDI_DETACH:
788 		break;
789 
790 	case DDI_SUSPEND:
791 		/*
792 		 * NB: dnetp->suspended can only be modified (marked true)
793 		 * if both intrlock and txlock are held.  This keeps both
794 		 * tx and rx code paths excluded.
795 		 */
796 		mutex_enter(&dnetp->intrlock);
797 		mutex_enter(&dnetp->txlock);
798 		dnetp->suspended = B_TRUE;
799 		dnet_reset_board(dnetp);
800 		mutex_exit(&dnetp->txlock);
801 		mutex_exit(&dnetp->intrlock);
802 		return (DDI_SUCCESS);
803 
804 	default:
805 		return (DDI_FAILURE);
806 	}
807 
808 	/*
809 	 *	Unregister ourselves from the GLDv3 interface
810 	 */
811 	if (mac_unregister(dnetp->mac_handle) != 0)
812 		return (DDI_FAILURE);
813 
814 	/* stop the board if it is running */
815 	dnet_reset_board(dnetp);
816 
817 	if ((rc = dnet_detach_hacked_interrupt(devinfo)) != DDI_SUCCESS)
818 		return (rc);
819 
820 	if (dnetp->mii != NULL)
821 		mii_destroy(dnetp->mii);
822 
823 	/* Free leaf information */
824 	set_leaf(&dnetp->sr, NULL);
825 
826 	ddi_regs_map_free(&dnetp->io_handle);
827 	dnet_free_bufs(dnetp);
828 	mutex_destroy(&dnetp->txlock);
829 	mutex_destroy(&dnetp->intrlock);
830 	kmem_free(dnetp, sizeof (struct dnetinstance));
831 
832 #ifdef BUG_4010796
833 	if (ddi_getproplen(DDI_DEV_T_ANY, devinfo, 0,
834 	    "DNET_HACK", &proplen) != DDI_PROP_SUCCESS)
835 		return (DDI_SUCCESS);
836 
837 	/*
838 	 * We must remove the properties we added, because if we leave
839 	 * them in the devinfo nodes and the driver is unloaded, when
840 	 * the driver is reloaded the info will still be there, causing
841 	 * nodes which had returned PROBE_PARTIAL the first time to
842 	 * instead return PROBE_SUCCESS, in turn causing the nodes to be
843 	 * attached in a different order, causing their PPA numbers to
844 	 * be different the second time around, which is undesirable.
845 	 */
846 	(void) ddi_prop_remove(DDI_DEV_T_NONE, devinfo, "DNET_HACK");
847 	(void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo),
848 	    "DNET_SROM");
849 	(void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo),
850 	    "DNET_DEVNUM");
851 #endif
852 
853 	return (DDI_SUCCESS);
854 }
855 
856 int
857 dnet_quiesce(dev_info_t *dip)
858 {
859 	struct dnetinstance *dnetp = ddi_get_driver_private(dip);
860 
861 	/*
862 	 * Reset chip (disables interrupts).
863 	 */
864 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 0);
865 	ddi_put32(dnetp->io_handle,
866 	    REG32(dnetp->io_reg, BUS_MODE_REG), SW_RESET);
867 
868 	return (DDI_SUCCESS);
869 }
870 
871 static void
872 dnet_reset_board(struct dnetinstance *dnetp)
873 {
874 	uint32_t	val;
875 
876 	/*
877 	 * before initializing the dnet should be in STOP state
878 	 */
879 	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
880 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
881 	    val & ~(START_TRANSMIT | START_RECEIVE));
882 
883 	/*
884 	 * Reset the chip
885 	 */
886 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 0);
887 	ddi_put32(dnetp->io_handle,
888 	    REG32(dnetp->io_reg, BUS_MODE_REG), SW_RESET);
889 	drv_usecwait(5);
890 }
891 
892 /*
893  * dnet_init_board() -- initialize the specified network board short of
894  * actually starting the board.  Call after dnet_reset_board().
895  * called with intrlock held.
896  */
897 static void
898 dnet_init_board(struct dnetinstance *dnetp)
899 {
900 	set_opr(dnetp);
901 	set_gpr(dnetp);
902 	set_sia(dnetp);
903 	dnet_chip_init(dnetp);
904 }
905 
906 /* dnet_chip_init() - called with intrlock held */
907 static void
908 dnet_chip_init(struct dnetinstance *dnetp)
909 {
910 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, BUS_MODE_REG),
911 	    CACHE_ALIGN | BURST_SIZE);		/* CSR0 */
912 
913 	/*
914 	 * Initialize the TX and RX descriptors/buffers
915 	 */
916 	dnet_init_txrx_bufs(dnetp);
917 
918 	/*
919 	 * Set the base address of the Rx descriptor list in CSR3
920 	 */
921 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, RX_BASE_ADDR_REG),
922 	    dnetp->rx_desc_paddr);
923 
924 	/*
925 	 * Set the base address of the Tx descrptor list in CSR4
926 	 */
927 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, TX_BASE_ADDR_REG),
928 	    dnetp->tx_desc_paddr);
929 
930 	dnetp->tx_current_desc = dnetp->rx_current_desc = 0;
931 	dnetp->transmitted_desc = 0;
932 	dnetp->free_desc = dnetp->max_tx_desc;
933 	enable_interrupts(dnetp);
934 }
935 
936 /*
937  *	dnet_start() -- start the board receiving and allow transmits.
938  *  Called with intrlock held.
939  */
940 static int
941 dnet_start(struct dnetinstance *dnetp)
942 {
943 	uint32_t val;
944 
945 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
946 	/*
947 	 * start the board and enable receiving
948 	 */
949 	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
950 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
951 	    val | START_TRANSMIT);
952 	(void) dnet_set_addr(dnetp);
953 	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
954 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
955 	    val | START_RECEIVE);
956 	enable_interrupts(dnetp);
957 	return (0);
958 }
959 
960 static int
961 dnet_m_start(void *arg)
962 {
963 	struct dnetinstance *dnetp = arg;
964 
965 	mutex_enter(&dnetp->intrlock);
966 	dnetp->running = B_TRUE;
967 	/*
968 	 * start the board and enable receiving
969 	 */
970 	if (!dnetp->suspended)
971 		(void) dnet_start(dnetp);
972 	mutex_exit(&dnetp->intrlock);
973 	return (0);
974 }
975 
976 static void
977 dnet_m_stop(void *arg)
978 {
979 	struct dnetinstance *dnetp = arg;
980 	uint32_t val;
981 
982 	/*
983 	 * stop the board and disable transmit/receive
984 	 */
985 	mutex_enter(&dnetp->intrlock);
986 	if (!dnetp->suspended) {
987 		val = ddi_get32(dnetp->io_handle,
988 		    REG32(dnetp->io_reg, OPN_MODE_REG));
989 		ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
990 		    val & ~(START_TRANSMIT | START_RECEIVE));
991 	}
992 	mac_link_update(dnetp->mac_handle, LINK_STATE_UNKNOWN);
993 	dnetp->running = B_FALSE;
994 	mutex_exit(&dnetp->intrlock);
995 }
996 
997 /*
998  *	dnet_set_addr() -- set the physical network address on the board
999  *  Called with intrlock held.
1000  */
1001 static int
1002 dnet_set_addr(struct dnetinstance *dnetp)
1003 {
1004 	struct tx_desc_type *desc;
1005 	int 		current_desc;
1006 	uint32_t	val;
1007 
1008 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
1009 
1010 	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
1011 	if (!(val & START_TRANSMIT))
1012 		return (0);
1013 
1014 	current_desc = dnetp->tx_current_desc;
1015 	desc = &dnetp->tx_desc[current_desc];
1016 
1017 	mutex_enter(&dnetp->txlock);
1018 	dnetp->need_saddr = 0;
1019 	mutex_exit(&dnetp->txlock);
1020 
1021 	if ((alloc_descriptor(dnetp)) == FAILURE) {
1022 		mutex_enter(&dnetp->txlock);
1023 		dnetp->need_saddr = 1;
1024 		mutex_exit(&dnetp->txlock);
1025 #ifdef DNETDEBUG
1026 		if (dnetdebug & DNETTRACE)
1027 			cmn_err(CE_WARN, "DNET saddr:alloc descriptor failure");
1028 #endif
1029 		return (0);
1030 	}
1031 
1032 	desc->buffer1			= dnetp->setup_buf_paddr;
1033 	desc->buffer2			= 0;
1034 	desc->desc1.buffer_size1 	= SETUPBUF_SIZE;
1035 	desc->desc1.buffer_size2 	= 0;
1036 	desc->desc1.setup_packet	= 1;
1037 	desc->desc1.first_desc		= 0;
1038 	desc->desc1.last_desc 		= 0;
1039 	desc->desc1.filter_type0 	= 1;
1040 	desc->desc1.filter_type1 	= 1;
1041 	desc->desc1.int_on_comp		= 1;
1042 
1043 	desc->desc0.own = 1;
1044 	ddi_put8(dnetp->io_handle, REG8(dnetp->io_reg, TX_POLL_REG),
1045 	    TX_POLL_DEMAND);
1046 	return (0);
1047 }
1048 
1049 static int
1050 dnet_m_unicst(void *arg, const uint8_t *macaddr)
1051 {
1052 	struct dnetinstance *dnetp = arg;
1053 	uint32_t	index;
1054 	uint32_t	*hashp;
1055 
1056 	mutex_enter(&dnetp->intrlock);
1057 
1058 	bcopy(macaddr, dnetp->curr_macaddr, ETHERADDRL);
1059 
1060 	/*
1061 	 * As we are using Imperfect filtering, the broadcast address has to
1062 	 * be set explicitly in the 512 bit hash table.  Hence the index into
1063 	 * the hash table is calculated and the bit set to enable reception
1064 	 * of broadcast packets.
1065 	 *
1066 	 * We also use HASH_ONLY mode, without using the perfect filter for
1067 	 * our station address, because there appears to be a bug in the
1068 	 * 21140 where it fails to receive the specified perfect filter
1069 	 * address.
1070 	 *
1071 	 * Since dlsdmult comes through here, it doesn't matter that the count
1072 	 * is wrong for the two bits that correspond to the cases below. The
1073 	 * worst that could happen is that we'd leave on a bit for an old
1074 	 * macaddr, in the case where the macaddr gets changed, which is rare.
1075 	 * Since filtering is imperfect, it is OK if that happens.
1076 	 */
1077 	hashp = (uint32_t *)dnetp->setup_buf_vaddr;
1078 	index = hashindex((uint8_t *)dnet_broadcastaddr);
1079 	hashp[ index / 16 ] |= 1 << (index % 16);
1080 
1081 	index = hashindex((uint8_t *)dnetp->curr_macaddr);
1082 	hashp[ index / 16 ] |= 1 << (index % 16);
1083 
1084 	if (!dnetp->suspended)
1085 		(void) dnet_set_addr(dnetp);
1086 	mutex_exit(&dnetp->intrlock);
1087 	return (0);
1088 }
1089 
1090 static int
1091 dnet_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
1092 {
1093 	struct dnetinstance *dnetp = arg;
1094 	uint32_t	index;
1095 	uint32_t	*hashp;
1096 	uint32_t	retval;
1097 
1098 	mutex_enter(&dnetp->intrlock);
1099 	index = hashindex(macaddr);
1100 	hashp = (uint32_t *)dnetp->setup_buf_vaddr;
1101 	if (add) {
1102 		if (dnetp->multicast_cnt[index]++) {
1103 			mutex_exit(&dnetp->intrlock);
1104 			return (0);
1105 		}
1106 		hashp[ index / 16 ] |= 1 << (index % 16);
1107 	} else {
1108 		if (--dnetp->multicast_cnt[index]) {
1109 			mutex_exit(&dnetp->intrlock);
1110 			return (0);
1111 		}
1112 		hashp[ index / 16 ] &= ~ (1 << (index % 16));
1113 	}
1114 	if (!dnetp->suspended)
1115 		retval = dnet_set_addr(dnetp);
1116 	else
1117 		retval = 0;
1118 	mutex_exit(&dnetp->intrlock);
1119 	return (retval);
1120 }
1121 
1122 /*
1123  * A hashing function used for setting the
1124  * node address or a multicast address
1125  */
1126 static uint32_t
1127 hashindex(const uint8_t *address)
1128 {
1129 	uint32_t	crc = (uint32_t)HASH_CRC;
1130 	uint32_t const 	POLY = HASH_POLY;
1131 	uint32_t	msb;
1132 	int32_t 	byteslength;
1133 	uint8_t 	currentbyte;
1134 	uint32_t 	index;
1135 	int32_t 	bit;
1136 	int32_t		shift;
1137 
1138 	for (byteslength = 0; byteslength < ETHERADDRL; byteslength++) {
1139 		currentbyte = address[byteslength];
1140 		for (bit = 0; bit < 8; bit++) {
1141 			msb = crc >> 31;
1142 			crc <<= 1;
1143 			if (msb ^ (currentbyte & 1)) {
1144 				crc ^= POLY;
1145 				crc |= 0x00000001;
1146 			}
1147 			currentbyte >>= 1;
1148 		}
1149 	}
1150 
1151 	for (index = 0, bit = 23, shift = 8; shift >= 0; bit++, shift--) {
1152 		index |= (((crc >> bit) & 1) << shift);
1153 	}
1154 	return (index);
1155 }
1156 
1157 static int
1158 dnet_m_setpromisc(void *arg, boolean_t on)
1159 {
1160 	struct dnetinstance *dnetp = arg;
1161 	uint32_t val;
1162 
1163 	mutex_enter(&dnetp->intrlock);
1164 	if (dnetp->promisc == on) {
1165 		mutex_exit(&dnetp->intrlock);
1166 		return (0);
1167 	}
1168 	dnetp->promisc = on;
1169 
1170 	if (!dnetp->suspended) {
1171 		val = ddi_get32(dnetp->io_handle,
1172 		    REG32(dnetp->io_reg, OPN_MODE_REG));
1173 		if (on)
1174 			ddi_put32(dnetp->io_handle,
1175 			    REG32(dnetp->io_reg, OPN_MODE_REG),
1176 			    val | PROM_MODE);
1177 		else
1178 			ddi_put32(dnetp->io_handle,
1179 			    REG32(dnetp->io_reg, OPN_MODE_REG),
1180 			    val & (~PROM_MODE));
1181 	}
1182 	mutex_exit(&dnetp->intrlock);
1183 	return (0);
1184 }
1185 
1186 static int
1187 dnet_m_getstat(void *arg, uint_t stat, uint64_t *val)
1188 {
1189 	struct dnetinstance *dnetp = arg;
1190 
1191 	switch (stat) {
1192 	case MAC_STAT_IFSPEED:
1193 		if (!dnetp->running) {
1194 			*val = 0;
1195 		} else {
1196 			*val = (dnetp->mii_up ?
1197 			    dnetp->mii_speed : dnetp->speed) * 1000000;
1198 		}
1199 		break;
1200 
1201 	case MAC_STAT_NORCVBUF:
1202 		*val = dnetp->stat_norcvbuf;
1203 		break;
1204 
1205 	case MAC_STAT_IERRORS:
1206 		*val = dnetp->stat_errrcv;
1207 		break;
1208 
1209 	case MAC_STAT_OERRORS:
1210 		*val = dnetp->stat_errxmt;
1211 		break;
1212 
1213 	case MAC_STAT_COLLISIONS:
1214 		*val = dnetp->stat_collisions;
1215 		break;
1216 
1217 	case ETHER_STAT_DEFER_XMTS:
1218 		*val = dnetp->stat_defer;
1219 		break;
1220 
1221 	case ETHER_STAT_CARRIER_ERRORS:
1222 		*val = dnetp->stat_nocarrier;
1223 		break;
1224 
1225 	case ETHER_STAT_TOOSHORT_ERRORS:
1226 		*val = dnetp->stat_short;
1227 		break;
1228 
1229 	case ETHER_STAT_LINK_DUPLEX:
1230 		if (!dnetp->running) {
1231 			*val = LINK_DUPLEX_UNKNOWN;
1232 
1233 		} else if (dnetp->mii_up) {
1234 			*val = dnetp->mii_duplex ?
1235 			    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
1236 		} else {
1237 			*val = dnetp->full_duplex ?
1238 			    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
1239 		}
1240 		break;
1241 
1242 	case ETHER_STAT_TX_LATE_COLLISIONS:
1243 		*val = dnetp->stat_xmtlatecoll;
1244 		break;
1245 
1246 	case ETHER_STAT_EX_COLLISIONS:
1247 		*val = dnetp->stat_excoll;
1248 		break;
1249 
1250 	case MAC_STAT_OVERFLOWS:
1251 		*val = dnetp->stat_overflow;
1252 		break;
1253 
1254 	case MAC_STAT_UNDERFLOWS:
1255 		*val = dnetp->stat_underflow;
1256 		break;
1257 
1258 	default:
1259 		return (ENOTSUP);
1260 	}
1261 
1262 	return (0);
1263 }
1264 
1265 #define	NextTXIndex(index) (((index)+1) % dnetp->max_tx_desc)
1266 #define	PrevTXIndex(index) (((index)-1) < 0 ? dnetp->max_tx_desc - 1: (index)-1)
1267 
1268 static mblk_t *
1269 dnet_m_tx(void *arg, mblk_t *mp)
1270 {
1271 	struct dnetinstance *dnetp = arg;
1272 
1273 	mutex_enter(&dnetp->txlock);
1274 
1275 	/* if suspended, drop the packet on the floor, we missed it */
1276 	if (dnetp->suspended) {
1277 		mutex_exit(&dnetp->txlock);
1278 		freemsg(mp);
1279 		return (NULL);
1280 	}
1281 
1282 	if (dnetp->need_saddr) {
1283 		/* XXX function return value ignored */
1284 		mutex_exit(&dnetp->txlock);
1285 		mutex_enter(&dnetp->intrlock);
1286 		(void) dnet_set_addr(dnetp);
1287 		mutex_exit(&dnetp->intrlock);
1288 		mutex_enter(&dnetp->txlock);
1289 	}
1290 
1291 	while (mp != NULL) {
1292 		if (!dnet_send(dnetp, mp)) {
1293 			mutex_exit(&dnetp->txlock);
1294 			return (mp);
1295 		}
1296 		mp = mp->b_next;
1297 	}
1298 
1299 	mutex_exit(&dnetp->txlock);
1300 
1301 	/*
1302 	 * Enable xmit interrupt in case we are running out of xmit descriptors
1303 	 * or there are more packets on the queue waiting to be transmitted.
1304 	 */
1305 	mutex_enter(&dnetp->intrlock);
1306 
1307 	enable_interrupts(dnetp);
1308 
1309 	/*
1310 	 * Kick the transmitter
1311 	 */
1312 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, TX_POLL_REG),
1313 	    TX_POLL_DEMAND);
1314 
1315 	mutex_exit(&dnetp->intrlock);
1316 
1317 	return (NULL);
1318 }
1319 
1320 static boolean_t
1321 dnet_send(struct dnetinstance *dnetp, mblk_t *mp)
1322 {
1323 	struct tx_desc_type	*ring = dnetp->tx_desc;
1324 	int		mblen, totlen;
1325 	int		index, end_index, start_index;
1326 	int		avail;
1327 	int		error;
1328 	int		bufn;
1329 	int		retval;
1330 	mblk_t		*bp;
1331 
1332 	ASSERT(MUTEX_HELD(&dnetp->txlock));
1333 
1334 	/* reclaim any xmit descriptors completed */
1335 	dnet_reclaim_Tx_desc(dnetp);
1336 
1337 	/*
1338 	 * Use the data buffers from the message and construct the
1339 	 * scatter/gather list by calling ddi_dma_addr_bind_handle().
1340 	 */
1341 	error = 0;
1342 	totlen = 0;
1343 	bp = mp;
1344 	bufn = 0;
1345 	index = start_index = dnetp->tx_current_desc;
1346 	avail = dnetp->free_desc;
1347 	while (bp != NULL) {
1348 		uint_t ncookies;
1349 		ddi_dma_cookie_t dma_cookie;
1350 
1351 		mblen = MBLKL(bp);
1352 
1353 		if (!mblen) {	/* skip zero-length message blocks */
1354 			bp = bp->b_cont;
1355 			continue;
1356 		}
1357 
1358 		retval = ddi_dma_addr_bind_handle(dnetp->dma_handle_tx, NULL,
1359 		    (caddr_t)bp->b_rptr, mblen,
1360 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 0,
1361 		    &dma_cookie, &ncookies);
1362 
1363 		switch (retval) {
1364 		case DDI_DMA_MAPPED:
1365 			break;		/* everything's fine */
1366 
1367 		case DDI_DMA_NORESOURCES:
1368 			error = 1;	/* allow retry by gld */
1369 			break;
1370 
1371 		case DDI_DMA_NOMAPPING:
1372 		case DDI_DMA_INUSE:
1373 		case DDI_DMA_TOOBIG:
1374 		default:
1375 			error = 2;	/* error, no retry */
1376 			break;
1377 		}
1378 
1379 		/*
1380 		 * we can use two cookies per descriptor (i.e buffer1 and
1381 		 * buffer2) so we need at least (ncookies+1)/2 descriptors.
1382 		 */
1383 		if (((ncookies + 1) >> 1) > dnetp->free_desc) {
1384 			(void) ddi_dma_unbind_handle(dnetp->dma_handle_tx);
1385 			error = 1;
1386 			break;
1387 		}
1388 
1389 		/* setup the descriptors for this data buffer */
1390 		while (ncookies) {
1391 			end_index = index;
1392 			if (bufn % 2) {
1393 				ring[index].buffer2 =
1394 				    (uint32_t)dma_cookie.dmac_address;
1395 				ring[index].desc1.buffer_size2 =
1396 				    dma_cookie.dmac_size;
1397 				index = NextTXIndex(index); /* goto next desc */
1398 			} else {
1399 				/* initialize the descriptor */
1400 				ASSERT(ring[index].desc0.own == 0);
1401 				*(uint32_t *)&ring[index].desc0 = 0;
1402 				*(uint32_t *)&ring[index].desc1 &=
1403 				    DNET_END_OF_RING;
1404 				ring[index].buffer1 =
1405 				    (uint32_t)dma_cookie.dmac_address;
1406 				ring[index].desc1.buffer_size1 =
1407 				    dma_cookie.dmac_size;
1408 				ring[index].buffer2 = (uint32_t)(0);
1409 				dnetp->free_desc--;
1410 				ASSERT(dnetp->free_desc >= 0);
1411 			}
1412 			totlen += dma_cookie.dmac_size;
1413 			bufn++;
1414 			if (--ncookies)
1415 				ddi_dma_nextcookie(dnetp->dma_handle_tx,
1416 				    &dma_cookie);
1417 		}
1418 		(void) ddi_dma_unbind_handle(dnetp->dma_handle_tx);
1419 		bp = bp->b_cont;
1420 	}
1421 
1422 	if (error == 1) {
1423 		dnetp->stat_defer++;
1424 		dnetp->free_desc = avail;
1425 		dnetp->need_tx_update = B_TRUE;
1426 		return (B_FALSE);
1427 	} else if (error) {
1428 		dnetp->free_desc = avail;
1429 		freemsg(mp);
1430 		return (B_TRUE);	/* Drop packet, don't retry */
1431 	}
1432 
1433 	if (totlen > ETHERMAX + VLAN_TAGSZ) {
1434 		cmn_err(CE_WARN, "DNET: tried to send large %d packet", totlen);
1435 		dnetp->free_desc = avail;
1436 		freemsg(mp);
1437 		return (B_TRUE);	/* Don't repeat this attempt */
1438 	}
1439 
1440 	/*
1441 	 * Remeber the message buffer pointer to do freemsg() at xmit
1442 	 * interrupt time.
1443 	 */
1444 	dnetp->tx_msgbufp[end_index] = mp;
1445 
1446 	/*
1447 	 * Now set the first/last buffer and own bits
1448 	 * Since the 21040 looks for these bits set in the
1449 	 * first buffer, work backwards in multiple buffers.
1450 	 */
1451 	ring[end_index].desc1.last_desc = 1;
1452 	ring[end_index].desc1.int_on_comp = 1;
1453 	for (index = end_index; index != start_index;
1454 	    index = PrevTXIndex(index))
1455 		ring[index].desc0.own = 1;
1456 	ring[start_index].desc1.first_desc = 1;
1457 	ring[start_index].desc0.own = 1;
1458 
1459 	dnetp->tx_current_desc = NextTXIndex(end_index);
1460 
1461 	/*
1462 	 * Safety check: make sure end-of-ring is set in last desc.
1463 	 */
1464 	ASSERT(ring[dnetp->max_tx_desc-1].desc1.end_of_ring != 0);
1465 
1466 	return (B_TRUE);
1467 }
1468 
1469 /*
1470  *	dnet_intr() -- interrupt from board to inform us that a receive or
1471  *	transmit has completed.
1472  */
1473 static uint_t
1474 dnet_intr(caddr_t arg)
1475 {
1476 	struct dnetinstance *dnetp = (struct dnetinstance *)arg;
1477 	uint32_t int_status;
1478 
1479 	mutex_enter(&dnetp->intrlock);
1480 
1481 	if (dnetp->suspended) {
1482 		mutex_exit(&dnetp->intrlock);
1483 		return (DDI_INTR_UNCLAIMED);
1484 	}
1485 
1486 	int_status = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg,
1487 	    STATUS_REG));
1488 
1489 	/*
1490 	 * If interrupt was not from this board
1491 	 */
1492 	if (!(int_status & (NORMAL_INTR_SUMM | ABNORMAL_INTR_SUMM))) {
1493 		mutex_exit(&dnetp->intrlock);
1494 		return (DDI_INTR_UNCLAIMED);
1495 	}
1496 
1497 	dnetp->stat_intr++;
1498 
1499 	if (int_status & GPTIMER_INTR) {
1500 		ddi_put32(dnetp->io_handle,
1501 		    REG32(dnetp->io_reg, STATUS_REG), GPTIMER_INTR);
1502 		if (dnetp->timer.cb)
1503 			dnetp->timer.cb(dnetp);
1504 		else
1505 			cmn_err(CE_WARN, "dnet: unhandled timer interrupt");
1506 	}
1507 
1508 	if (int_status & TX_INTR) {
1509 		ddi_put32(dnetp->io_handle,
1510 		    REG32(dnetp->io_reg, STATUS_REG), TX_INTR);
1511 		mutex_enter(&dnetp->txlock);
1512 		if (dnetp->need_tx_update) {
1513 			mutex_exit(&dnetp->txlock);
1514 			mutex_exit(&dnetp->intrlock);
1515 			mac_tx_update(dnetp->mac_handle);
1516 			mutex_enter(&dnetp->intrlock);
1517 			mutex_enter(&dnetp->txlock);
1518 			dnetp->need_tx_update = B_FALSE;
1519 		}
1520 		/* reclaim any xmit descriptors that are completed */
1521 		dnet_reclaim_Tx_desc(dnetp);
1522 		mutex_exit(&dnetp->txlock);
1523 	}
1524 
1525 	/*
1526 	 * Check if receive interrupt bit is set
1527 	 */
1528 	if (int_status & (RX_INTR | RX_UNAVAIL_INTR)) {
1529 		ddi_put32(dnetp->io_handle,
1530 		    REG32(dnetp->io_reg, STATUS_REG),
1531 		    int_status & (RX_INTR | RX_UNAVAIL_INTR));
1532 		dnet_getp(dnetp);
1533 	}
1534 
1535 	if (int_status & ABNORMAL_INTR_SUMM) {
1536 		/*
1537 		 * Check for system error
1538 		 */
1539 		if (int_status & SYS_ERR) {
1540 			if ((int_status & SYS_ERR_BITS) == MASTER_ABORT)
1541 				cmn_err(CE_WARN, "DNET: Bus Master Abort");
1542 			if ((int_status & SYS_ERR_BITS) == TARGET_ABORT)
1543 				cmn_err(CE_WARN, "DNET: Bus Target Abort");
1544 			if ((int_status & SYS_ERR_BITS) == PARITY_ERROR)
1545 				cmn_err(CE_WARN, "DNET: Parity error");
1546 		}
1547 
1548 		/*
1549 		 * If the jabber has timed out then reset the chip
1550 		 */
1551 		if (int_status & TX_JABBER_TIMEOUT)
1552 			cmn_err(CE_WARN, "DNET: Jabber timeout.");
1553 
1554 		/*
1555 		 * If an underflow has occurred, reset the chip
1556 		 */
1557 		if (int_status & TX_UNDERFLOW)
1558 			cmn_err(CE_WARN, "DNET: Tx Underflow.");
1559 
1560 #ifdef DNETDEBUG
1561 		if (dnetdebug & DNETINT)
1562 			cmn_err(CE_NOTE, "Trying to reset...");
1563 #endif
1564 		dnet_reset_board(dnetp);
1565 		dnet_init_board(dnetp);
1566 		/* XXX function return value ignored */
1567 		(void) dnet_start(dnetp);
1568 	}
1569 
1570 	/*
1571 	 * Enable the interrupts. Enable xmit interrupt in case we are
1572 	 * running out of free descriptors or if there are packets
1573 	 * in the queue waiting to be transmitted.
1574 	 */
1575 	enable_interrupts(dnetp);
1576 	mutex_exit(&dnetp->intrlock);
1577 	return (DDI_INTR_CLAIMED);	/* Indicate it was our interrupt */
1578 }
1579 
1580 static void
1581 dnet_getp(struct dnetinstance *dnetp)
1582 {
1583 	int packet_length, index;
1584 	mblk_t	*mp;
1585 	caddr_t 	virtual_address;
1586 	struct	rx_desc_type *desc = dnetp->rx_desc;
1587 	int marker = dnetp->rx_current_desc;
1588 	int misses;
1589 
1590 	if (!dnetp->overrun_workaround) {
1591 		/*
1592 		 * If the workaround is not in place, we must still update
1593 		 * the missed frame statistic from the on-chip counter.
1594 		 */
1595 		misses = ddi_get32(dnetp->io_handle,
1596 		    REG32(dnetp->io_reg, MISSED_FRAME_REG));
1597 		dnetp->stat_missed += (misses & MISSED_FRAME_MASK);
1598 	}
1599 
1600 	/* While host owns the current descriptor */
1601 	while (!(desc[dnetp->rx_current_desc].desc0.own)) {
1602 		struct free_ptr *frp;
1603 		caddr_t newbuf;
1604 		struct rbuf_list *rp;
1605 
1606 		index = dnetp->rx_current_desc;
1607 		ASSERT(desc[index].desc0.first_desc != 0);
1608 
1609 		/*
1610 		 * DMA overrun errata from DEC: avoid possible bus hangs
1611 		 * and data corruption
1612 		 */
1613 		if (dnetp->overrun_workaround &&
1614 		    marker == dnetp->rx_current_desc) {
1615 			int opn;
1616 			do {
1617 				marker = (marker+1) % dnetp->max_rx_desc;
1618 			} while (!(dnetp->rx_desc[marker].desc0.own) &&
1619 			    marker != index);
1620 
1621 			misses = ddi_get32(dnetp->io_handle,
1622 			    REG32(dnetp->io_reg, MISSED_FRAME_REG));
1623 			dnetp->stat_missed +=
1624 			    (misses & MISSED_FRAME_MASK);
1625 			if (misses & OVERFLOW_COUNTER_MASK) {
1626 				/*
1627 				 * Overflow(s) have occurred : stop receiver,
1628 				 * and wait until in stopped state
1629 				 */
1630 				opn = ddi_get32(dnetp->io_handle,
1631 				    REG32(dnetp->io_reg, OPN_MODE_REG));
1632 				ddi_put32(dnetp->io_handle,
1633 				    REG32(dnetp->io_reg, OPN_MODE_REG),
1634 				    opn & ~(START_RECEIVE));
1635 
1636 				do {
1637 					drv_usecwait(10);
1638 				} while ((ddi_get32(dnetp->io_handle,
1639 				    REG32(dnetp->io_reg, STATUS_REG)) &
1640 				    RECEIVE_PROCESS_STATE) != 0);
1641 #ifdef DNETDEBUG
1642 				if (dnetdebug & DNETRECV)
1643 					cmn_err(CE_CONT, "^*");
1644 #endif
1645 				/* Discard probably corrupt frames */
1646 				while (!(dnetp->rx_desc[index].desc0.own)) {
1647 					dnetp->rx_desc[index].desc0.own = 1;
1648 					index = (index+1) % dnetp->max_rx_desc;
1649 					dnetp->stat_missed++;
1650 				}
1651 
1652 				/* restart the receiver */
1653 				opn = ddi_get32(dnetp->io_handle,
1654 				    REG32(dnetp->io_reg, OPN_MODE_REG));
1655 				ddi_put32(dnetp->io_handle,
1656 				    REG32(dnetp->io_reg, OPN_MODE_REG),
1657 				    opn | START_RECEIVE);
1658 				marker = dnetp->rx_current_desc = index;
1659 				continue;
1660 			}
1661 			/*
1662 			 * At this point, we know that all packets before
1663 			 * "marker" were received before a dma overrun occurred
1664 			 */
1665 		}
1666 
1667 		/*
1668 		 * If we get an oversized packet it could span multiple
1669 		 * descriptors.  If this happens an error bit should be set.
1670 		 */
1671 		while (desc[index].desc0.last_desc == 0) {
1672 			index = (index + 1) % dnetp->max_rx_desc;
1673 			if (desc[index].desc0.own)
1674 				return;	/* not done receiving large packet */
1675 		}
1676 		while (dnetp->rx_current_desc != index) {
1677 			desc[dnetp->rx_current_desc].desc0.own = 1;
1678 			dnetp->rx_current_desc =
1679 			    (dnetp->rx_current_desc + 1) % dnetp->max_rx_desc;
1680 #ifdef DNETDEBUG
1681 			if (dnetdebug & DNETRECV)
1682 				cmn_err(CE_WARN, "dnet: received large packet");
1683 #endif
1684 		}
1685 
1686 		packet_length = desc[index].desc0.frame_len;
1687 
1688 		/*
1689 		 * Remove CRC from received data. This is an artefact of the
1690 		 * 21x4x chip and should not be passed higher up the network
1691 		 * stack.
1692 		 */
1693 		packet_length -= ETHERFCSL;
1694 
1695 		/* get the virtual address of the packet received */
1696 		virtual_address =
1697 		    dnetp->rx_buf_vaddr[index];
1698 
1699 		/*
1700 		 * If no packet errors then do:
1701 		 * 	1. Allocate a new receive buffer so that we can
1702 		 *	   use the current buffer as streams buffer to
1703 		 *	   avoid bcopy.
1704 		 *	2. If we got a new receive buffer then allocate
1705 		 *	   an mblk using desballoc().
1706 		 *	3. Otherwise use the mblk from allocb() and do
1707 		 *	   the bcopy.
1708 		 */
1709 		frp = NULL;
1710 		rp = NULL;
1711 		newbuf = NULL;
1712 		mp = NULL;
1713 		if (!desc[index].desc0.err_summary ||
1714 		    (desc[index].desc0.frame2long &&
1715 		    packet_length < rx_buf_size)) {
1716 			ASSERT(packet_length < rx_buf_size);
1717 			/*
1718 			 * Allocate another receive buffer for this descriptor.
1719 			 * If we fail to allocate then we do the normal bcopy.
1720 			 */
1721 			rp = dnet_rbuf_alloc(dnetp->devinfo, 0);
1722 			if (rp != NULL) {
1723 				newbuf = rp->rbuf_vaddr;
1724 				frp = kmem_zalloc(sizeof (*frp), KM_NOSLEEP);
1725 				if (frp != NULL) {
1726 					frp->free_rtn.free_func =
1727 					    dnet_freemsg_buf;
1728 					frp->free_rtn.free_arg = (char *)frp;
1729 					frp->buf = virtual_address;
1730 					mp = desballoc(
1731 					    (uchar_t *)virtual_address,
1732 					    packet_length, 0, &frp->free_rtn);
1733 					if (mp == NULL) {
1734 						kmem_free(frp, sizeof (*frp));
1735 						dnet_rbuf_free((caddr_t)newbuf);
1736 						frp = NULL;
1737 						newbuf = NULL;
1738 					}
1739 				}
1740 			}
1741 			if (mp == NULL) {
1742 				if (newbuf != NULL)
1743 					dnet_rbuf_free((caddr_t)newbuf);
1744 				mp = allocb(packet_length, 0);
1745 			}
1746 		}
1747 
1748 		if ((desc[index].desc0.err_summary &&
1749 		    packet_length >= rx_buf_size) || mp == NULL) {
1750 
1751 			/* Update gld statistics */
1752 			if (desc[index].desc0.err_summary)
1753 				update_rx_stats(dnetp, index);
1754 			else
1755 				dnetp->stat_norcvbuf++;
1756 
1757 			/*
1758 			 * Reset ownership of the descriptor.
1759 			 */
1760 			desc[index].desc0.own = 1;
1761 			dnetp->rx_current_desc =
1762 			    (dnetp->rx_current_desc+1) % dnetp->max_rx_desc;
1763 
1764 			/* Demand receive polling by the chip */
1765 			ddi_put32(dnetp->io_handle,
1766 			    REG32(dnetp->io_reg, RX_POLL_REG), RX_POLL_DEMAND);
1767 
1768 			continue;
1769 		}
1770 
1771 		if (newbuf != NULL) {
1772 			uint32_t end_paddr;
1773 			/* attach the new buffer to the rx descriptor */
1774 			dnetp->rx_buf_vaddr[index] = newbuf;
1775 			dnetp->rx_buf_paddr[index] = rp->rbuf_paddr;
1776 			desc[index].buffer1 = rp->rbuf_paddr;
1777 			desc[index].desc1.buffer_size1 = rx_buf_size;
1778 			desc[index].desc1.buffer_size2 = 0;
1779 			end_paddr = rp->rbuf_endpaddr;
1780 			if ((desc[index].buffer1 & ~dnetp->pgmask) !=
1781 			    (end_paddr & ~dnetp->pgmask)) {
1782 				/* discontiguous */
1783 				desc[index].buffer2 = end_paddr&~dnetp->pgmask;
1784 				desc[index].desc1.buffer_size2 =
1785 				    (end_paddr & dnetp->pgmask) + 1;
1786 				desc[index].desc1.buffer_size1 =
1787 				    rx_buf_size-desc[index].desc1.buffer_size2;
1788 			}
1789 		} else {
1790 			/* couldn't allocate another buffer; copy the data */
1791 			BCOPY((caddr_t)virtual_address, (caddr_t)mp->b_wptr,
1792 			    packet_length);
1793 		}
1794 
1795 		mp->b_wptr += packet_length;
1796 
1797 		desc[dnetp->rx_current_desc].desc0.own = 1;
1798 
1799 		/*
1800 		 * Increment receive desc index. This is for the scan of
1801 		 * next packet
1802 		 */
1803 		dnetp->rx_current_desc =
1804 		    (dnetp->rx_current_desc+1) % dnetp->max_rx_desc;
1805 
1806 		/* Demand polling by chip */
1807 		ddi_put32(dnetp->io_handle,
1808 		    REG32(dnetp->io_reg, RX_POLL_REG), RX_POLL_DEMAND);
1809 
1810 		/* send the packet upstream */
1811 		mutex_exit(&dnetp->intrlock);
1812 		mac_rx(dnetp->mac_handle, NULL, mp);
1813 		mutex_enter(&dnetp->intrlock);
1814 	}
1815 }
1816 /*
1817  * Function to update receive statistics
1818  */
1819 static void
1820 update_rx_stats(struct dnetinstance *dnetp, int index)
1821 {
1822 	struct rx_desc_type *descp = &(dnetp->rx_desc[index]);
1823 
1824 	/*
1825 	 * Update gld statistics
1826 	 */
1827 	dnetp->stat_errrcv++;
1828 
1829 	if (descp->desc0.overflow)	{
1830 		/* FIFO Overrun */
1831 		dnetp->stat_overflow++;
1832 	}
1833 
1834 	if (descp->desc0.collision) {
1835 		/*EMPTY*/
1836 		/* Late Colllision on receive */
1837 		/* no appropriate counter */
1838 	}
1839 
1840 	if (descp->desc0.crc) {
1841 		/* CRC Error */
1842 		dnetp->stat_crc++;
1843 	}
1844 
1845 	if (descp->desc0.runt_frame) {
1846 		/* Runt Error */
1847 		dnetp->stat_short++;
1848 	}
1849 
1850 	if (descp->desc0.desc_err) {
1851 		/*EMPTY*/
1852 		/* Not enough receive descriptors */
1853 		/* This condition is accounted in dnet_intr() */
1854 	}
1855 
1856 	if (descp->desc0.frame2long) {
1857 		dnetp->stat_frame++;
1858 	}
1859 }
1860 
1861 /*
1862  * Function to update transmit statistics
1863  */
1864 static void
1865 update_tx_stats(struct dnetinstance *dnetp, int index)
1866 {
1867 	struct tx_desc_type *descp = &(dnetp->tx_desc[index]);
1868 	int	fd;
1869 	media_block_t	*block = dnetp->selected_media_block;
1870 
1871 
1872 	/* Update gld statistics */
1873 	dnetp->stat_errxmt++;
1874 
1875 	/* If we're in full-duplex don't count collisions or carrier loss. */
1876 	if (dnetp->mii_up) {
1877 		fd = dnetp->mii_duplex;
1878 	} else {
1879 		/* Rely on media code */
1880 		fd = block->media_code == MEDIA_TP_FD ||
1881 		    block->media_code == MEDIA_SYM_SCR_FD;
1882 	}
1883 
1884 	if (descp->desc0.collision_count && !fd) {
1885 		dnetp->stat_collisions += descp->desc0.collision_count;
1886 	}
1887 
1888 	if (descp->desc0.late_collision && !fd) {
1889 		dnetp->stat_xmtlatecoll++;
1890 	}
1891 
1892 	if (descp->desc0.excess_collision && !fd) {
1893 		dnetp->stat_excoll++;
1894 	}
1895 
1896 	if (descp->desc0.underflow) {
1897 		dnetp->stat_underflow++;
1898 	}
1899 
1900 #if 0
1901 	if (descp->desc0.tx_jabber_to) {
1902 		/* no appropriate counter */
1903 	}
1904 #endif
1905 
1906 	if (descp->desc0.carrier_loss && !fd) {
1907 		dnetp->stat_nocarrier++;
1908 	}
1909 
1910 	if (descp->desc0.no_carrier && !fd) {
1911 		dnetp->stat_nocarrier++;
1912 	}
1913 }
1914 
1915 /*
1916  *	========== Media Selection Setup Routines ==========
1917  */
1918 
1919 
1920 static void
1921 write_gpr(struct dnetinstance *dnetp, uint32_t val)
1922 {
1923 #ifdef DEBUG
1924 	if (dnetdebug & DNETREGCFG)
1925 		cmn_err(CE_NOTE, "GPR: %x", val);
1926 #endif
1927 	switch (dnetp->board_type) {
1928 	case DEVICE_ID_21143:
1929 		/* Set the correct bit for a control write */
1930 		if (val & GPR_CONTROL_WRITE)
1931 			val |= CWE_21143, val &= ~GPR_CONTROL_WRITE;
1932 		/* Write to upper half of CSR15 */
1933 		dnetp->gprsia = (dnetp->gprsia & 0xffff) | (val << 16);
1934 		ddi_put32(dnetp->io_handle,
1935 		    REG32(dnetp->io_reg, SIA_GENERAL_REG), dnetp->gprsia);
1936 		break;
1937 	default:
1938 		/* Set the correct bit for a control write */
1939 		if (val & GPR_CONTROL_WRITE)
1940 			val |= CWE_21140, val &= ~GPR_CONTROL_WRITE;
1941 		ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, GP_REG), val);
1942 		break;
1943 	}
1944 }
1945 
1946 static uint32_t
1947 read_gpr(struct dnetinstance *dnetp)
1948 {
1949 	switch (dnetp->board_type) {
1950 	case DEVICE_ID_21143:
1951 		/* Read upper half of CSR15 */
1952 		return (ddi_get32(dnetp->io_handle,
1953 		    REG32(dnetp->io_reg, SIA_GENERAL_REG)) >> 16);
1954 	default:
1955 		return (ddi_get32(dnetp->io_handle,
1956 		    REG32(dnetp->io_reg, GP_REG)));
1957 	}
1958 }
1959 
1960 static void
1961 set_gpr(struct dnetinstance *dnetp)
1962 {
1963 	uint32_t *sequence;
1964 	int len;
1965 	LEAF_FORMAT *leaf = &dnetp->sr.leaf[dnetp->leaf];
1966 	media_block_t *block = dnetp->selected_media_block;
1967 	int i;
1968 
1969 	if (ddi_getlongprop(DDI_DEV_T_ANY, dnetp->devinfo,
1970 	    DDI_PROP_DONTPASS, "gpr-sequence", (caddr_t)&sequence,
1971 	    &len) == DDI_PROP_SUCCESS) {
1972 		for (i = 0; i < len / sizeof (uint32_t); i++)
1973 			write_gpr(dnetp, sequence[i]);
1974 		kmem_free(sequence, len);
1975 	} else {
1976 		/*
1977 		 * Write the reset sequence if this is the first time this
1978 		 * block has been selected.
1979 		 */
1980 		if (block->rstseqlen) {
1981 			for (i = 0; i < block->rstseqlen; i++)
1982 				write_gpr(dnetp, block->rstseq[i]);
1983 			/*
1984 			 * XXX Legacy blocks do not have reset sequences, so the
1985 			 * static blocks will never be modified by this
1986 			 */
1987 			block->rstseqlen = 0;
1988 		}
1989 		if (leaf->gpr)
1990 			write_gpr(dnetp, leaf->gpr | GPR_CONTROL_WRITE);
1991 
1992 		/* write GPR sequence each time */
1993 		for (i = 0; i < block->gprseqlen; i++)
1994 			write_gpr(dnetp, block->gprseq[i]);
1995 	}
1996 
1997 	/* This has possibly caused a PHY to reset.  Let MII know */
1998 	if (dnetp->phyaddr != -1)
1999 		/* XXX function return value ignored */
2000 		(void) mii_sync(dnetp->mii, dnetp->phyaddr);
2001 	drv_usecwait(5);
2002 }
2003 
2004 /* set_opr() - must be called with intrlock held */
2005 
2006 static void
2007 set_opr(struct dnetinstance *dnetp)
2008 {
2009 	uint32_t fd, mb1, sf;
2010 
2011 	int 		opnmode_len;
2012 	uint32_t val;
2013 	media_block_t *block = dnetp->selected_media_block;
2014 
2015 	ASSERT(block);
2016 
2017 	/* Check for custom "opnmode_reg" property */
2018 	opnmode_len = sizeof (val);
2019 	if (ddi_prop_op(DDI_DEV_T_ANY, dnetp->devinfo,
2020 	    PROP_LEN_AND_VAL_BUF, DDI_PROP_DONTPASS, "opnmode_reg",
2021 	    (caddr_t)&val, &opnmode_len) != DDI_PROP_SUCCESS)
2022 		opnmode_len = 0;
2023 
2024 	/* Some bits exist only on 21140 and greater */
2025 	if (dnetp->board_type != DEVICE_ID_21040 &&
2026 	    dnetp->board_type != DEVICE_ID_21041) {
2027 		mb1 = OPN_REG_MB1;
2028 		sf = STORE_AND_FORWARD;
2029 	} else {
2030 		mb1 = sf = 0;
2031 		mb1 = OPN_REG_MB1; /* Needed for 21040? */
2032 	}
2033 
2034 	if (opnmode_len) {
2035 		ddi_put32(dnetp->io_handle,
2036 		    REG32(dnetp->io_reg, OPN_MODE_REG), val);
2037 		dnet_reset_board(dnetp);
2038 		ddi_put32(dnetp->io_handle,
2039 		    REG32(dnetp->io_reg, OPN_MODE_REG), val);
2040 		return;
2041 	}
2042 
2043 	/*
2044 	 * Set each bit in CSR6 that we want
2045 	 */
2046 
2047 	/* Always want these bits set */
2048 	val = HASH_FILTERING | HASH_ONLY | TX_THRESHOLD_160 | mb1 | sf;
2049 
2050 	/* Promiscuous mode */
2051 	val |= dnetp->promisc ? PROM_MODE : 0;
2052 
2053 	/* Scrambler for SYM style media */
2054 	val |= ((block->command & CMD_SCR) && !dnetp->disable_scrambler) ?
2055 	    SCRAMBLER_MODE : 0;
2056 
2057 	/* Full duplex */
2058 	if (dnetp->mii_up) {
2059 		fd = dnetp->mii_duplex;
2060 	} else {
2061 		/* Rely on media code */
2062 		fd = block->media_code == MEDIA_TP_FD ||
2063 		    block->media_code == MEDIA_SYM_SCR_FD;
2064 	}
2065 
2066 	/* Port select (and therefore, heartbeat disable) */
2067 	val |= block->command & CMD_PS ? (PORT_SELECT | HEARTBEAT_DISABLE) : 0;
2068 
2069 	/* PCS function */
2070 	val |= (block->command) & CMD_PCS ? PCS_FUNCTION : 0;
2071 	val |= fd ? FULL_DUPLEX : 0;
2072 
2073 #ifdef DNETDEBUG
2074 	if (dnetdebug & DNETREGCFG)
2075 		cmn_err(CE_NOTE, "OPN: %x", val);
2076 #endif
2077 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), val);
2078 	dnet_reset_board(dnetp);
2079 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), val);
2080 }
2081 
2082 static void
2083 set_sia(struct dnetinstance *dnetp)
2084 {
2085 	media_block_t *block = dnetp->selected_media_block;
2086 
2087 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
2088 	if (block->type == 2) {
2089 		int sia_delay;
2090 #ifdef DNETDEBUG
2091 		if (dnetdebug & DNETREGCFG)
2092 			cmn_err(CE_NOTE,
2093 			    "SIA: CSR13: %x, CSR14: %x, CSR15: %x",
2094 			    block->un.sia.csr13,
2095 			    block->un.sia.csr14,
2096 			    block->un.sia.csr15);
2097 #endif
2098 		sia_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
2099 		    DDI_PROP_DONTPASS, "sia-delay", 10000);
2100 
2101 		ddi_put32(dnetp->io_handle,
2102 		    REG32(dnetp->io_reg, SIA_CONNECT_REG), 0);
2103 
2104 		ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, SIA_TXRX_REG),
2105 		    block->un.sia.csr14);
2106 
2107 		/*
2108 		 * For '143, we need to write through a copy of the register
2109 		 * to keep the GP half intact
2110 		 */
2111 		dnetp->gprsia = (dnetp->gprsia&0xffff0000)|block->un.sia.csr15;
2112 		ddi_put32(dnetp->io_handle,
2113 		    REG32(dnetp->io_reg, SIA_GENERAL_REG),
2114 		    dnetp->gprsia);
2115 
2116 		ddi_put32(dnetp->io_handle,
2117 		    REG32(dnetp->io_reg, SIA_CONNECT_REG),
2118 		    block->un.sia.csr13);
2119 
2120 		drv_usecwait(sia_delay);
2121 
2122 	} else if (dnetp->board_type != DEVICE_ID_21140) {
2123 		ddi_put32(dnetp->io_handle,
2124 		    REG32(dnetp->io_reg, SIA_CONNECT_REG), 0);
2125 		ddi_put32(dnetp->io_handle,
2126 		    REG32(dnetp->io_reg, SIA_TXRX_REG), 0);
2127 	}
2128 }
2129 
2130 /*
2131  * This function (re)allocates the receive and transmit buffers and
2132  * descriptors.  It can be called more than once per instance, though
2133  * currently it is only called from attach.  It should only be called
2134  * while the device is reset.
2135  */
2136 static int
2137 dnet_alloc_bufs(struct dnetinstance *dnetp)
2138 {
2139 	int i;
2140 	size_t len;
2141 	int page_size;
2142 	int realloc = 0;
2143 	int nrecv_desc_old = 0;
2144 	ddi_dma_cookie_t cookie;
2145 	uint_t ncookies;
2146 
2147 	/*
2148 	 * check if we are trying to reallocate with different xmit/recv
2149 	 * descriptor ring sizes.
2150 	 */
2151 	if ((dnetp->tx_desc != NULL) &&
2152 	    (dnetp->nxmit_desc != dnetp->max_tx_desc))
2153 		realloc = 1;
2154 
2155 	if ((dnetp->rx_desc != NULL) &&
2156 	    (dnetp->nrecv_desc != dnetp->max_rx_desc))
2157 		realloc = 1;
2158 
2159 	/* free up the old buffers if we are reallocating them */
2160 	if (realloc) {
2161 		nrecv_desc_old = dnetp->nrecv_desc;
2162 		dnet_free_bufs(dnetp); /* free the old buffers */
2163 	}
2164 
2165 	if (dnetp->dma_handle == NULL)
2166 		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr,
2167 		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle) != DDI_SUCCESS)
2168 			return (FAILURE);
2169 
2170 	if (dnetp->dma_handle_tx == NULL)
2171 		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr_tx,
2172 		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle_tx) != DDI_SUCCESS)
2173 			return (FAILURE);
2174 
2175 	if (dnetp->dma_handle_txdesc == NULL)
2176 		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr,
2177 		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle_txdesc) != DDI_SUCCESS)
2178 			return (FAILURE);
2179 
2180 	if (dnetp->dma_handle_setbuf == NULL)
2181 		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr,
2182 		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle_setbuf) != DDI_SUCCESS)
2183 			return (FAILURE);
2184 
2185 	page_size = ddi_ptob(dnetp->devinfo, 1);
2186 
2187 	dnetp->pgmask = page_size - 1;
2188 
2189 	/* allocate setup buffer if necessary */
2190 	if (dnetp->setup_buf_vaddr == NULL) {
2191 		if (ddi_dma_mem_alloc(dnetp->dma_handle_setbuf,
2192 		    SETUPBUF_SIZE, &accattr, DDI_DMA_STREAMING,
2193 		    DDI_DMA_DONTWAIT, 0, (caddr_t *)&dnetp->setup_buf_vaddr,
2194 		    &len, &dnetp->setup_buf_acchdl) != DDI_SUCCESS)
2195 			return (FAILURE);
2196 
2197 		if (ddi_dma_addr_bind_handle(dnetp->dma_handle_setbuf,
2198 		    NULL, dnetp->setup_buf_vaddr, SETUPBUF_SIZE,
2199 		    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
2200 		    NULL, &cookie, &ncookies) != DDI_DMA_MAPPED)
2201 			return (FAILURE);
2202 
2203 		dnetp->setup_buf_paddr = cookie.dmac_address;
2204 		bzero(dnetp->setup_buf_vaddr, len);
2205 	}
2206 
2207 	/* allocate xmit descriptor array of size dnetp->max_tx_desc */
2208 	if (dnetp->tx_desc == NULL) {
2209 		if (ddi_dma_mem_alloc(dnetp->dma_handle_txdesc,
2210 		    sizeof (struct tx_desc_type) * dnetp->max_tx_desc,
2211 		    &accattr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2212 		    (caddr_t *)&dnetp->tx_desc, &len,
2213 		    &dnetp->tx_desc_acchdl) != DDI_SUCCESS)
2214 			return (FAILURE);
2215 
2216 		if (ddi_dma_addr_bind_handle(dnetp->dma_handle_txdesc,
2217 		    NULL, (caddr_t)dnetp->tx_desc,
2218 		    sizeof (struct tx_desc_type) * dnetp->max_tx_desc,
2219 		    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
2220 		    NULL, &cookie, &ncookies) != DDI_DMA_MAPPED)
2221 			return (FAILURE);
2222 		dnetp->tx_desc_paddr = cookie.dmac_address;
2223 		bzero(dnetp->tx_desc, len);
2224 		dnetp->nxmit_desc = dnetp->max_tx_desc;
2225 
2226 		dnetp->tx_msgbufp =
2227 		    kmem_zalloc(dnetp->max_tx_desc * sizeof (mblk_t **),
2228 		    KM_SLEEP);
2229 	}
2230 
2231 	/* allocate receive descriptor array of size dnetp->max_rx_desc */
2232 	if (dnetp->rx_desc == NULL) {
2233 		int ndesc;
2234 
2235 		if (ddi_dma_mem_alloc(dnetp->dma_handle,
2236 		    sizeof (struct rx_desc_type) * dnetp->max_rx_desc,
2237 		    &accattr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2238 		    (caddr_t *)&dnetp->rx_desc, &len,
2239 		    &dnetp->rx_desc_acchdl) != DDI_SUCCESS)
2240 			return (FAILURE);
2241 
2242 		if (ddi_dma_addr_bind_handle(dnetp->dma_handle,
2243 		    NULL, (caddr_t)dnetp->rx_desc,
2244 		    sizeof (struct rx_desc_type) * dnetp->max_rx_desc,
2245 		    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
2246 		    NULL, &cookie, &ncookies) != DDI_DMA_MAPPED)
2247 			return (FAILURE);
2248 
2249 		dnetp->rx_desc_paddr = cookie.dmac_address;
2250 		bzero(dnetp->rx_desc, len);
2251 		dnetp->nrecv_desc = dnetp->max_rx_desc;
2252 
2253 		dnetp->rx_buf_vaddr =
2254 		    kmem_zalloc(dnetp->max_rx_desc * sizeof (caddr_t),
2255 		    KM_SLEEP);
2256 		dnetp->rx_buf_paddr =
2257 		    kmem_zalloc(dnetp->max_rx_desc * sizeof (uint32_t),
2258 		    KM_SLEEP);
2259 		/*
2260 		 * Allocate or add to the pool of receive buffers.  The pool
2261 		 * is shared among all instances of dnet.
2262 		 *
2263 		 * XXX NEEDSWORK
2264 		 *
2265 		 * We arbitrarily allocate twice as many receive buffers as
2266 		 * receive descriptors because we use the buffers for streams
2267 		 * messages to pass the packets up the stream.  We should
2268 		 * instead have initialized constants reflecting
2269 		 * MAX_RX_BUF_2104x and MAX_RX_BUF_2114x, and we should also
2270 		 * probably have a total maximum for the free pool, so that we
2271 		 * don't get out of hand when someone puts in an 8-port board.
2272 		 * The maximum for the entire pool should be the total number
2273 		 * of descriptors for all attached instances together, plus the
2274 		 * total maximum for the free pool.  This maximum would only be
2275 		 * reached after some number of instances allocate buffers:
2276 		 * each instance would add (max_rx_buf-max_rx_desc) to the free
2277 		 * pool.
2278 		 */
2279 		ndesc = dnetp->max_rx_desc - nrecv_desc_old;
2280 		if ((ndesc > 0) &&
2281 		    (dnet_rbuf_init(dnetp->devinfo, ndesc * 2) != 0))
2282 			return (FAILURE);
2283 
2284 		for (i = 0; i < dnetp->max_rx_desc; i++) {
2285 			struct rbuf_list *rp;
2286 
2287 			rp = dnet_rbuf_alloc(dnetp->devinfo, 1);
2288 			if (rp == NULL)
2289 				return (FAILURE);
2290 			dnetp->rx_buf_vaddr[i] = rp->rbuf_vaddr;
2291 			dnetp->rx_buf_paddr[i] = rp->rbuf_paddr;
2292 		}
2293 	}
2294 
2295 	return (SUCCESS);
2296 }
2297 /*
2298  * free descriptors/buffers allocated for this device instance.  This routine
2299  * should only be called while the device is reset.
2300  */
2301 static void
2302 dnet_free_bufs(struct dnetinstance *dnetp)
2303 {
2304 	int i;
2305 	/* free up any xmit descriptors/buffers */
2306 	if (dnetp->tx_desc != NULL) {
2307 		ddi_dma_mem_free(&dnetp->tx_desc_acchdl);
2308 		dnetp->tx_desc = NULL;
2309 		/* we use streams buffers for DMA in xmit process */
2310 		if (dnetp->tx_msgbufp != NULL) {
2311 			/* free up any streams message buffers unclaimed */
2312 			for (i = 0; i < dnetp->nxmit_desc; i++) {
2313 				if (dnetp->tx_msgbufp[i] != NULL) {
2314 					freemsg(dnetp->tx_msgbufp[i]);
2315 				}
2316 			}
2317 			kmem_free(dnetp->tx_msgbufp,
2318 			    dnetp->nxmit_desc * sizeof (mblk_t **));
2319 			dnetp->tx_msgbufp = NULL;
2320 		}
2321 		dnetp->nxmit_desc = 0;
2322 	}
2323 
2324 	/* free up any receive descriptors/buffers */
2325 	if (dnetp->rx_desc != NULL) {
2326 		ddi_dma_mem_free(&dnetp->rx_desc_acchdl);
2327 		dnetp->rx_desc = NULL;
2328 		if (dnetp->rx_buf_vaddr != NULL) {
2329 			/* free up the attached rbufs if any */
2330 			for (i = 0; i < dnetp->nrecv_desc; i++) {
2331 				if (dnetp->rx_buf_vaddr[i])
2332 					dnet_rbuf_free(
2333 					    (caddr_t)dnetp->rx_buf_vaddr[i]);
2334 			}
2335 			kmem_free(dnetp->rx_buf_vaddr,
2336 			    dnetp->nrecv_desc * sizeof (caddr_t));
2337 			kmem_free(dnetp->rx_buf_paddr,
2338 			    dnetp->nrecv_desc * sizeof (uint32_t));
2339 			dnetp->rx_buf_vaddr = NULL;
2340 			dnetp->rx_buf_paddr = NULL;
2341 		}
2342 		dnetp->nrecv_desc = 0;
2343 	}
2344 
2345 	if (dnetp->setup_buf_vaddr != NULL) {
2346 		ddi_dma_mem_free(&dnetp->setup_buf_acchdl);
2347 		dnetp->setup_buf_vaddr = NULL;
2348 	}
2349 
2350 	if (dnetp->dma_handle != NULL) {
2351 		(void) ddi_dma_unbind_handle(dnetp->dma_handle);
2352 		ddi_dma_free_handle(&dnetp->dma_handle);
2353 		dnetp->dma_handle = NULL;
2354 	}
2355 
2356 	if (dnetp->dma_handle_tx != NULL) {
2357 		(void) ddi_dma_unbind_handle(dnetp->dma_handle_tx);
2358 		ddi_dma_free_handle(&dnetp->dma_handle_tx);
2359 		dnetp->dma_handle_tx = NULL;
2360 	}
2361 
2362 	if (dnetp->dma_handle_txdesc != NULL) {
2363 		(void) ddi_dma_unbind_handle(dnetp->dma_handle_txdesc);
2364 		ddi_dma_free_handle(&dnetp->dma_handle_txdesc);
2365 		dnetp->dma_handle_txdesc = NULL;
2366 	}
2367 
2368 	if (dnetp->dma_handle_setbuf != NULL) {
2369 		(void) ddi_dma_unbind_handle(dnetp->dma_handle_setbuf);
2370 		ddi_dma_free_handle(&dnetp->dma_handle_setbuf);
2371 		dnetp->dma_handle_setbuf = NULL;
2372 	}
2373 
2374 }
2375 
2376 /*
2377  * Initialize transmit and receive descriptors.
2378  */
2379 static void
2380 dnet_init_txrx_bufs(struct dnetinstance *dnetp)
2381 {
2382 	int		i;
2383 
2384 	/*
2385 	 * Initilize all the Tx descriptors
2386 	 */
2387 	for (i = 0; i < dnetp->nxmit_desc; i++) {
2388 		/*
2389 		 * We may be resetting the device due to errors,
2390 		 * so free up any streams message buffer unclaimed.
2391 		 */
2392 		if (dnetp->tx_msgbufp[i] != NULL) {
2393 			freemsg(dnetp->tx_msgbufp[i]);
2394 			dnetp->tx_msgbufp[i] = NULL;
2395 		}
2396 		*(uint32_t *)&dnetp->tx_desc[i].desc0 = 0;
2397 		*(uint32_t *)&dnetp->tx_desc[i].desc1 = 0;
2398 		dnetp->tx_desc[i].buffer1 = 0;
2399 		dnetp->tx_desc[i].buffer2 = 0;
2400 	}
2401 	dnetp->tx_desc[i - 1].desc1.end_of_ring = 1;
2402 
2403 	/*
2404 	 * Initialize the Rx descriptors
2405 	 */
2406 	for (i = 0; i < dnetp->nrecv_desc; i++) {
2407 		uint32_t end_paddr;
2408 		*(uint32_t *)&dnetp->rx_desc[i].desc0 = 0;
2409 		*(uint32_t *)&dnetp->rx_desc[i].desc1 = 0;
2410 		dnetp->rx_desc[i].desc0.own = 1;
2411 		dnetp->rx_desc[i].desc1.buffer_size1 = rx_buf_size;
2412 		dnetp->rx_desc[i].buffer1 = dnetp->rx_buf_paddr[i];
2413 		dnetp->rx_desc[i].buffer2 = 0;
2414 		end_paddr = dnetp->rx_buf_paddr[i]+rx_buf_size-1;
2415 
2416 		if ((dnetp->rx_desc[i].buffer1 & ~dnetp->pgmask) !=
2417 		    (end_paddr & ~dnetp->pgmask)) {
2418 			/* discontiguous */
2419 			dnetp->rx_desc[i].buffer2 = end_paddr&~dnetp->pgmask;
2420 			dnetp->rx_desc[i].desc1.buffer_size2 =
2421 			    (end_paddr & dnetp->pgmask) + 1;
2422 			dnetp->rx_desc[i].desc1.buffer_size1 =
2423 			    rx_buf_size-dnetp->rx_desc[i].desc1.buffer_size2;
2424 		}
2425 	}
2426 	dnetp->rx_desc[i - 1].desc1.end_of_ring = 1;
2427 }
2428 
2429 static int
2430 alloc_descriptor(struct dnetinstance *dnetp)
2431 {
2432 	int index;
2433 	struct tx_desc_type    *ring = dnetp->tx_desc;
2434 
2435 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
2436 alloctop:
2437 	mutex_enter(&dnetp->txlock);
2438 	index = dnetp->tx_current_desc;
2439 
2440 	dnet_reclaim_Tx_desc(dnetp);
2441 
2442 	/* we do have free descriptors, right? */
2443 	if (dnetp->free_desc <= 0) {
2444 #ifdef DNETDEBUG
2445 		if (dnetdebug & DNETRECV)
2446 			cmn_err(CE_NOTE, "dnet: Ring buffer is full");
2447 #endif
2448 		mutex_exit(&dnetp->txlock);
2449 		return (FAILURE);
2450 	}
2451 
2452 	/* sanity, make sure the next descriptor is free for use (should be) */
2453 	if (ring[index].desc0.own) {
2454 #ifdef DNETDEBUG
2455 		if (dnetdebug & DNETRECV)
2456 			cmn_err(CE_WARN,
2457 			    "dnet: next descriptor is not free for use");
2458 #endif
2459 		mutex_exit(&dnetp->txlock);
2460 		return (FAILURE);
2461 	}
2462 	if (dnetp->need_saddr) {
2463 		mutex_exit(&dnetp->txlock);
2464 		/* XXX function return value ignored */
2465 		if (!dnetp->suspended)
2466 			(void) dnet_set_addr(dnetp);
2467 		goto alloctop;
2468 	}
2469 
2470 	*(uint32_t *)&ring[index].desc0 = 0;  /* init descs */
2471 	*(uint32_t *)&ring[index].desc1 &= DNET_END_OF_RING;
2472 
2473 	/* hardware will own this descriptor when poll activated */
2474 	dnetp->free_desc--;
2475 
2476 	/* point to next free descriptor to be used */
2477 	dnetp->tx_current_desc = NextTXIndex(index);
2478 
2479 #ifdef DNET_NOISY
2480 	cmn_err(CE_WARN, "sfree 0x%x, transmitted 0x%x, tx_current 0x%x",
2481 	    dnetp->free_desc, dnetp->transmitted_desc, dnetp->tx_current_desc);
2482 #endif
2483 	mutex_exit(&dnetp->txlock);
2484 	return (SUCCESS);
2485 }
2486 
2487 /*
2488  * dnet_reclaim_Tx_desc() - called with txlock held.
2489  */
2490 static void
2491 dnet_reclaim_Tx_desc(struct dnetinstance *dnetp)
2492 {
2493 	struct tx_desc_type	*desc = dnetp->tx_desc;
2494 	int index;
2495 
2496 	ASSERT(MUTEX_HELD(&dnetp->txlock));
2497 
2498 	index = dnetp->transmitted_desc;
2499 	while (((dnetp->free_desc == 0) || (index != dnetp->tx_current_desc)) &&
2500 	    !(desc[index].desc0.own)) {
2501 		/*
2502 		 * Check for Tx Error that gets set
2503 		 * in the last desc.
2504 		 */
2505 		if (desc[index].desc1.setup_packet == 0 &&
2506 		    desc[index].desc1.last_desc &&
2507 		    desc[index].desc0.err_summary)
2508 			update_tx_stats(dnetp, index);
2509 
2510 		/*
2511 		 * If we have used the streams message buffer for this
2512 		 * descriptor then free up the message now.
2513 		 */
2514 		if (dnetp->tx_msgbufp[index] != NULL) {
2515 			freemsg(dnetp->tx_msgbufp[index]);
2516 			dnetp->tx_msgbufp[index] = NULL;
2517 		}
2518 		dnetp->free_desc++;
2519 		index = (index+1) % dnetp->max_tx_desc;
2520 	}
2521 
2522 	dnetp->transmitted_desc = index;
2523 }
2524 
2525 /*
2526  * Receive buffer allocation/freeing routines.
2527  *
2528  * There is a common pool of receive buffers shared by all dnet instances.
2529  *
2530  * XXX NEEDSWORK
2531  *
2532  * We arbitrarily allocate twice as many receive buffers as
2533  * receive descriptors because we use the buffers for streams
2534  * messages to pass the packets up the stream.  We should
2535  * instead have initialized constants reflecting
2536  * MAX_RX_BUF_2104x and MAX_RX_BUF_2114x, and we should also
2537  * probably have a total maximum for the free pool, so that we
2538  * don't get out of hand when someone puts in an 8-port board.
2539  * The maximum for the entire pool should be the total number
2540  * of descriptors for all attached instances together, plus the
2541  * total maximum for the free pool.  This maximum would only be
2542  * reached after some number of instances allocate buffers:
2543  * each instance would add (max_rx_buf-max_rx_desc) to the free
2544  * pool.
2545  */
2546 
2547 static struct rbuf_list *rbuf_usedlist_head;
2548 static struct rbuf_list *rbuf_freelist_head;
2549 static struct rbuf_list *rbuf_usedlist_end;	/* last buffer allocated */
2550 
2551 static int rbuf_freebufs;	/* no. of free buffers in the pool */
2552 static int rbuf_pool_size;	/* total no. of buffers in the pool */
2553 
2554 /* initialize/add 'nbufs' buffers to the rbuf pool */
2555 /* ARGSUSED */
2556 static int
2557 dnet_rbuf_init(dev_info_t *dip, int nbufs)
2558 {
2559 	int i;
2560 	struct rbuf_list *rp;
2561 	ddi_dma_cookie_t cookie;
2562 	uint_t ncookies;
2563 	size_t len;
2564 
2565 	mutex_enter(&dnet_rbuf_lock);
2566 
2567 	/* allocate buffers and add them to the pool */
2568 	for (i = 0; i < nbufs; i++) {
2569 		/* allocate rbuf_list element */
2570 		rp = kmem_zalloc(sizeof (struct rbuf_list), KM_SLEEP);
2571 		if (ddi_dma_alloc_handle(dip, &dma_attr_rb, DDI_DMA_SLEEP,
2572 		    0, &rp->rbuf_dmahdl) != DDI_SUCCESS)
2573 			goto fail_kfree;
2574 
2575 		/* allocate dma memory for the buffer */
2576 		if (ddi_dma_mem_alloc(rp->rbuf_dmahdl, rx_buf_size, &accattr,
2577 		    DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2578 		    &rp->rbuf_vaddr, &len,
2579 		    &rp->rbuf_acchdl) != DDI_SUCCESS)
2580 			goto fail_freehdl;
2581 
2582 		if (ddi_dma_addr_bind_handle(rp->rbuf_dmahdl, NULL,
2583 		    rp->rbuf_vaddr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
2584 		    DDI_DMA_SLEEP, NULL, &cookie,
2585 		    &ncookies) != DDI_DMA_MAPPED)
2586 			goto fail_free;
2587 
2588 		if (ncookies > 2)
2589 			goto fail_unbind;
2590 		if (ncookies == 1) {
2591 			rp->rbuf_endpaddr =
2592 			    cookie.dmac_address + rx_buf_size - 1;
2593 		} else {
2594 			ddi_dma_nextcookie(rp->rbuf_dmahdl, &cookie);
2595 			rp->rbuf_endpaddr =
2596 			    cookie.dmac_address + cookie.dmac_size - 1;
2597 		}
2598 		rp->rbuf_paddr = cookie.dmac_address;
2599 
2600 		rp->rbuf_next = rbuf_freelist_head;
2601 		rbuf_freelist_head = rp;
2602 		rbuf_pool_size++;
2603 		rbuf_freebufs++;
2604 	}
2605 
2606 	mutex_exit(&dnet_rbuf_lock);
2607 	return (0);
2608 fail_unbind:
2609 	(void) ddi_dma_unbind_handle(rp->rbuf_dmahdl);
2610 fail_free:
2611 	ddi_dma_mem_free(&rp->rbuf_acchdl);
2612 fail_freehdl:
2613 	ddi_dma_free_handle(&rp->rbuf_dmahdl);
2614 fail_kfree:
2615 	kmem_free(rp, sizeof (struct rbuf_list));
2616 
2617 	mutex_exit(&dnet_rbuf_lock);
2618 	return (-1);
2619 }
2620 
2621 /*
2622  * Try to free up all the rbufs in the pool. Returns 0 if it frees up all
2623  * buffers. The buffers in the used list are considered busy so these
2624  * buffers are not freed.
2625  */
2626 static int
2627 dnet_rbuf_destroy()
2628 {
2629 	struct rbuf_list *rp, *next;
2630 
2631 	mutex_enter(&dnet_rbuf_lock);
2632 
2633 	for (rp = rbuf_freelist_head; rp; rp = next) {
2634 		next = rp->rbuf_next;
2635 		ddi_dma_mem_free(&rp->rbuf_acchdl);
2636 		(void) ddi_dma_unbind_handle(rp->rbuf_dmahdl);
2637 		kmem_free(rp, sizeof (struct rbuf_list));
2638 		rbuf_pool_size--;
2639 		rbuf_freebufs--;
2640 	}
2641 	rbuf_freelist_head = NULL;
2642 
2643 	if (rbuf_pool_size) { /* pool is still not empty */
2644 		mutex_exit(&dnet_rbuf_lock);
2645 		return (-1);
2646 	}
2647 	mutex_exit(&dnet_rbuf_lock);
2648 	return (0);
2649 }
2650 static struct rbuf_list *
2651 dnet_rbuf_alloc(dev_info_t *dip, int cansleep)
2652 {
2653 	struct rbuf_list *rp;
2654 	size_t len;
2655 	ddi_dma_cookie_t cookie;
2656 	uint_t ncookies;
2657 
2658 	mutex_enter(&dnet_rbuf_lock);
2659 
2660 	if (rbuf_freelist_head == NULL) {
2661 
2662 		if (!cansleep) {
2663 			mutex_exit(&dnet_rbuf_lock);
2664 			return (NULL);
2665 		}
2666 
2667 		/* allocate rbuf_list element */
2668 		rp = kmem_zalloc(sizeof (struct rbuf_list), KM_SLEEP);
2669 		if (ddi_dma_alloc_handle(dip, &dma_attr_rb, DDI_DMA_SLEEP,
2670 		    0, &rp->rbuf_dmahdl) != DDI_SUCCESS)
2671 			goto fail_kfree;
2672 
2673 		/* allocate dma memory for the buffer */
2674 		if (ddi_dma_mem_alloc(rp->rbuf_dmahdl, rx_buf_size, &accattr,
2675 		    DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2676 		    &rp->rbuf_vaddr, &len,
2677 		    &rp->rbuf_acchdl) != DDI_SUCCESS)
2678 			goto fail_freehdl;
2679 
2680 		if (ddi_dma_addr_bind_handle(rp->rbuf_dmahdl, NULL,
2681 		    rp->rbuf_vaddr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
2682 		    DDI_DMA_SLEEP, NULL, &cookie,
2683 		    &ncookies) != DDI_DMA_MAPPED)
2684 			goto fail_free;
2685 
2686 		if (ncookies > 2)
2687 			goto fail_unbind;
2688 		if (ncookies == 1) {
2689 			rp->rbuf_endpaddr =
2690 			    cookie.dmac_address + rx_buf_size - 1;
2691 		} else {
2692 			ddi_dma_nextcookie(rp->rbuf_dmahdl, &cookie);
2693 			rp->rbuf_endpaddr =
2694 			    cookie.dmac_address + cookie.dmac_size - 1;
2695 		}
2696 		rp->rbuf_paddr = cookie.dmac_address;
2697 
2698 		rbuf_freelist_head = rp;
2699 		rbuf_pool_size++;
2700 		rbuf_freebufs++;
2701 	}
2702 
2703 	/* take the buffer from the head of the free list */
2704 	rp = rbuf_freelist_head;
2705 	rbuf_freelist_head = rbuf_freelist_head->rbuf_next;
2706 
2707 	/* update the used list; put the entry at the end */
2708 	if (rbuf_usedlist_head == NULL)
2709 		rbuf_usedlist_head = rp;
2710 	else
2711 		rbuf_usedlist_end->rbuf_next = rp;
2712 	rp->rbuf_next = NULL;
2713 	rbuf_usedlist_end = rp;
2714 	rbuf_freebufs--;
2715 
2716 	mutex_exit(&dnet_rbuf_lock);
2717 
2718 	return (rp);
2719 fail_unbind:
2720 	(void) ddi_dma_unbind_handle(rp->rbuf_dmahdl);
2721 fail_free:
2722 	ddi_dma_mem_free(&rp->rbuf_acchdl);
2723 fail_freehdl:
2724 	ddi_dma_free_handle(&rp->rbuf_dmahdl);
2725 fail_kfree:
2726 	kmem_free(rp, sizeof (struct rbuf_list));
2727 	mutex_exit(&dnet_rbuf_lock);
2728 	return (NULL);
2729 }
2730 
2731 static void
2732 dnet_rbuf_free(caddr_t vaddr)
2733 {
2734 	struct rbuf_list *rp, *prev;
2735 
2736 	ASSERT(vaddr != NULL);
2737 	ASSERT(rbuf_usedlist_head != NULL);
2738 
2739 	mutex_enter(&dnet_rbuf_lock);
2740 
2741 	/* find the entry in the used list */
2742 	for (prev = rp = rbuf_usedlist_head; rp; rp = rp->rbuf_next) {
2743 		if (rp->rbuf_vaddr == vaddr)
2744 			break;
2745 		prev = rp;
2746 	}
2747 
2748 	if (rp == NULL) {
2749 		cmn_err(CE_WARN, "DNET: rbuf_free: bad addr 0x%p",
2750 		    (void *)vaddr);
2751 		mutex_exit(&dnet_rbuf_lock);
2752 		return;
2753 	}
2754 
2755 	/* update the used list and put the buffer back in the free list */
2756 	if (rbuf_usedlist_head != rp) {
2757 		prev->rbuf_next = rp->rbuf_next;
2758 		if (rbuf_usedlist_end == rp)
2759 			rbuf_usedlist_end = prev;
2760 	} else {
2761 		rbuf_usedlist_head = rp->rbuf_next;
2762 		if (rbuf_usedlist_end == rp)
2763 			rbuf_usedlist_end = NULL;
2764 	}
2765 	rp->rbuf_next = rbuf_freelist_head;
2766 	rbuf_freelist_head = rp;
2767 	rbuf_freebufs++;
2768 
2769 	mutex_exit(&dnet_rbuf_lock);
2770 }
2771 
2772 /*
2773  * Free the receive buffer used in a stream's message block allocated
2774  * thru desballoc().
2775  */
2776 static void
2777 dnet_freemsg_buf(struct free_ptr *frp)
2778 {
2779 	dnet_rbuf_free((caddr_t)frp->buf); /* buffer goes back to the pool */
2780 	kmem_free(frp, sizeof (*frp)); /* free up the free_rtn structure */
2781 }
2782 
2783 /*
2784  *	========== SROM Read Routines ==========
2785  */
2786 
2787 /*
2788  * The following code gets the SROM information, either by reading it
2789  * from the device or, failing that, by reading a property.
2790  */
2791 static int
2792 dnet_read_srom(dev_info_t *devinfo, int board_type, ddi_acc_handle_t io_handle,
2793     caddr_t io_reg, uchar_t *vi, int maxlen)
2794 {
2795 	int all_ones, zerocheck, i;
2796 
2797 	/*
2798 	 * Load SROM into vendor_info
2799 	 */
2800 	if (board_type == DEVICE_ID_21040)
2801 		dnet_read21040addr(devinfo, io_handle, io_reg, vi, &maxlen);
2802 	else
2803 		/* 21041/21140 serial rom */
2804 		dnet_read21140srom(io_handle, io_reg, vi, maxlen);
2805 	/*
2806 	 * If the dumpsrom property is present in the conf file, print
2807 	 * the contents of the SROM to the console
2808 	 */
2809 	if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
2810 	    "dumpsrom", 0))
2811 		dnet_dumpbin("SROM", vi, 1, maxlen);
2812 
2813 	for (zerocheck = i = 0, all_ones = 0xff; i < maxlen; i++) {
2814 		zerocheck |= vi[i];
2815 		all_ones &= vi[i];
2816 	}
2817 	if (zerocheck == 0 || all_ones == 0xff) {
2818 		return (get_alternative_srom_image(devinfo, vi, maxlen));
2819 	} else {
2820 #ifdef BUG_4010796
2821 		set_alternative_srom_image(devinfo, vi, maxlen);
2822 #endif
2823 		return (0);	/* Primary */
2824 	}
2825 }
2826 
2827 /*
2828  * The function reads the ethernet address of the 21040 adapter
2829  */
2830 static void
2831 dnet_read21040addr(dev_info_t *dip, ddi_acc_handle_t io_handle, caddr_t io_reg,
2832     uchar_t *addr, int *len)
2833 {
2834 	uint32_t	val;
2835 	int		i;
2836 
2837 	/* No point reading more than the ethernet address */
2838 	*len = ddi_getprop(DDI_DEV_T_ANY, dip,
2839 	    DDI_PROP_DONTPASS, macoffset_propname, 0) + ETHERADDRL;
2840 
2841 	/* Reset ROM pointer */
2842 	ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 0);
2843 	for (i = 0; i < *len; i++) {
2844 		do {
2845 			val = ddi_get32(io_handle,
2846 			    REG32(io_reg, ETHER_ROM_REG));
2847 		} while (val & 0x80000000);
2848 		addr[i] = val & 0xFF;
2849 	}
2850 }
2851 
2852 #define	drv_nsecwait(x)	drv_usecwait(((x)+999)/1000) /* XXX */
2853 
2854 /*
2855  * The function reads the SROM	of the 21140 adapter
2856  */
2857 static void
2858 dnet_read21140srom(ddi_acc_handle_t io_handle, caddr_t io_reg, uchar_t *addr,
2859     int maxlen)
2860 {
2861 	uint32_t 	i, j;
2862 	uint32_t	dout;
2863 	uint16_t	word;
2864 	uint8_t		rom_addr;
2865 	uint8_t		bit;
2866 
2867 
2868 	rom_addr = 0;
2869 	for (i = 0; i <	maxlen; i += 2) {
2870 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2871 		    READ_OP | SEL_ROM);
2872 		drv_nsecwait(30);
2873 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2874 		    READ_OP | SEL_ROM | SEL_CHIP);
2875 		drv_nsecwait(50);
2876 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2877 		    READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK);
2878 		drv_nsecwait(250);
2879 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2880 		    READ_OP | SEL_ROM | SEL_CHIP);
2881 		drv_nsecwait(100);
2882 
2883 		/* command */
2884 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2885 		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN);
2886 		drv_nsecwait(150);
2887 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2888 		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN | SEL_CLK);
2889 		drv_nsecwait(250);
2890 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2891 		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN);
2892 		drv_nsecwait(250);
2893 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2894 		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN | SEL_CLK);
2895 		drv_nsecwait(250);
2896 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2897 		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN);
2898 		drv_nsecwait(100);
2899 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2900 		    READ_OP | SEL_ROM | SEL_CHIP);
2901 		drv_nsecwait(150);
2902 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2903 		    READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK);
2904 		drv_nsecwait(250);
2905 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2906 		    READ_OP | SEL_ROM | SEL_CHIP);
2907 		drv_nsecwait(100);
2908 
2909 		/* Address */
2910 		for (j = HIGH_ADDRESS_BIT; j >= 1; j >>= 1) {
2911 			bit = (rom_addr & j) ? DATA_IN : 0;
2912 			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2913 			    READ_OP | SEL_ROM | SEL_CHIP | bit);
2914 			drv_nsecwait(150);
2915 			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2916 			    READ_OP | SEL_ROM | SEL_CHIP | bit | SEL_CLK);
2917 			drv_nsecwait(250);
2918 			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2919 			    READ_OP | SEL_ROM | SEL_CHIP | bit);
2920 			drv_nsecwait(100);
2921 		}
2922 		drv_nsecwait(150);
2923 
2924 		/* Data */
2925 		word = 0;
2926 		for (j = 0x8000; j >= 1; j >>= 1) {
2927 			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2928 			    READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK);
2929 			drv_nsecwait(100);
2930 			dout = ddi_get32(io_handle,
2931 			    REG32(io_reg, ETHER_ROM_REG));
2932 			drv_nsecwait(150);
2933 			if (dout & DATA_OUT)
2934 				word |= j;
2935 			ddi_put32(io_handle,
2936 			    REG32(io_reg, ETHER_ROM_REG),
2937 			    READ_OP | SEL_ROM | SEL_CHIP);
2938 			drv_nsecwait(250);
2939 		}
2940 		addr[i] = (word & 0x0000FF);
2941 		addr[i + 1] = (word >> 8);
2942 		rom_addr++;
2943 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2944 		    READ_OP | SEL_ROM);
2945 		drv_nsecwait(100);
2946 	}
2947 }
2948 
2949 
2950 /*
2951  * XXX NEEDSWORK
2952  *
2953  * Some lame multiport cards have only one SROM, which can be accessed
2954  * only from the "first" 21x4x chip, whichever that one is.  If we can't
2955  * get at our SROM, we look for its contents in a property instead, which
2956  * we rely on the bootstrap to have properly set.
2957  * #ifdef BUG_4010796
2958  * We also have a hack to try to set it ourselves, when the "first" port
2959  * attaches, if it has not already been properly set.  However, this method
2960  * is not reliable, since it makes the unwarrented assumption that the
2961  * "first" port will attach first.
2962  * #endif
2963  */
2964 
2965 static int
2966 get_alternative_srom_image(dev_info_t *devinfo, uchar_t *vi, int len)
2967 {
2968 	int	l = len;
2969 
2970 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
2971 	    "DNET_SROM", (caddr_t)vi, &len) != DDI_PROP_SUCCESS &&
2972 	    (len = l) && ddi_getlongprop_buf(DDI_DEV_T_ANY,
2973 	    ddi_get_parent(devinfo), DDI_PROP_DONTPASS, "DNET_SROM",
2974 	    (caddr_t)vi, &len) != DDI_PROP_SUCCESS)
2975 		return (-1);	/* Can't find it! */
2976 
2977 	/*
2978 	 * The return value from this routine specifies which port number
2979 	 * we are.  The primary port is denoted port 0.  On a QUAD card we
2980 	 * should return 1, 2, and 3 from this routine.  The return value
2981 	 * is used to modify the ethernet address from the SROM data.
2982 	 */
2983 
2984 #ifdef BUG_4010796
2985 	{
2986 	/*
2987 	 * For the present, we remember the device number of our primary
2988 	 * sibling and hope we and our other siblings are consecutively
2989 	 * numbered up from there.  In the future perhaps the bootstrap
2990 	 * will pass us the necessary information telling us which physical
2991 	 * port we really are.
2992 	 */
2993 	pci_regspec_t	*assignp;
2994 	int		assign_len;
2995 	int 		devnum;
2996 	int		primary_devnum;
2997 
2998 	primary_devnum = ddi_getprop(DDI_DEV_T_ANY, devinfo, 0,
2999 	    "DNET_DEVNUM", -1);
3000 	if (primary_devnum == -1)
3001 		return (1);	/* XXX NEEDSWORK -- We have no better idea */
3002 
3003 	if ((ddi_getlongprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3004 	    "assigned-addresses", (caddr_t)&assignp,
3005 	    &assign_len)) != DDI_PROP_SUCCESS)
3006 		return (1);	/* XXX NEEDSWORK -- We have no better idea */
3007 
3008 	devnum = PCI_REG_DEV_G(assignp->pci_phys_hi);
3009 	kmem_free(assignp, assign_len);
3010 	return (devnum - primary_devnum);
3011 	}
3012 #else
3013 	return (1);	/* XXX NEEDSWORK -- We have no better idea */
3014 #endif
3015 }
3016 
3017 
3018 #ifdef BUG_4010796
3019 static void
3020 set_alternative_srom_image(dev_info_t *devinfo, uchar_t *vi, int len)
3021 {
3022 	int 		proplen;
3023 	pci_regspec_t	*assignp;
3024 	int		assign_len;
3025 	int 		devnum;
3026 
3027 	if (ddi_getproplen(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3028 	    "DNET_SROM", &proplen) == DDI_PROP_SUCCESS ||
3029 	    ddi_getproplen(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3030 	    DDI_PROP_DONTPASS, "DNET_SROM", &proplen) == DDI_PROP_SUCCESS)
3031 		return;		/* Already done! */
3032 
3033 	/* function return value ignored */
3034 	(void) ddi_prop_update_byte_array(DDI_DEV_T_NONE,
3035 	    ddi_get_parent(devinfo), "DNET_SROM", (uchar_t *)vi, len);
3036 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, devinfo,
3037 	    "DNET_HACK", "hack");
3038 
3039 	if ((ddi_getlongprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3040 	    "assigned-addresses", (caddr_t)&assignp,
3041 	    &assign_len)) == DDI_PROP_SUCCESS) {
3042 		devnum = PCI_REG_DEV_G(assignp->pci_phys_hi);
3043 		kmem_free(assignp, assign_len);
3044 		/* function return value ignored */
3045 		(void) ddi_prop_update_int(DDI_DEV_T_NONE,
3046 		    ddi_get_parent(devinfo), "DNET_DEVNUM", devnum);
3047 	}
3048 }
3049 #endif
3050 
3051 /*
3052  *	========== SROM Parsing Routines ==========
3053  */
3054 
3055 static int
3056 check_srom_valid(uchar_t *vi)
3057 {
3058 	int		word, bit;
3059 	uint8_t		crc;
3060 	uint16_t	*wvi;		/* word16 pointer to vendor info */
3061 	uint16_t	bitval;
3062 
3063 	/* verify that the number of controllers on the card is within range */
3064 	if (vi[SROM_ADAPTER_CNT] < 1 || vi[SROM_ADAPTER_CNT] > MAX_ADAPTERS)
3065 		return (0);
3066 
3067 	/*
3068 	 * version 1 and 3 of this card did not check the id block CRC value
3069 	 * and this can't be changed without retesting every supported card
3070 	 *
3071 	 * however version 4 of the SROM can have this test applied
3072 	 * without fear of breaking something that used to work.
3073 	 * the CRC algorithm is taken from the Intel document
3074 	 *	"21x4 Serial ROM Format"
3075 	 *	version 4.09
3076 	 *	3-Mar-1999
3077 	 */
3078 
3079 	switch (vi[SROM_VERSION]) {
3080 	case 1:
3081 	    /* fallthru */
3082 	case 3:
3083 		return (vi[SROM_MBZ] == 0 &&	/* must be zero */
3084 		    vi[SROM_MBZ2] == 0 &&	/* must be zero */
3085 		    vi[SROM_MBZ3] == 0);	/* must be zero */
3086 
3087 	case 4:
3088 		wvi = (uint16_t *)vi;
3089 		crc = 0xff;
3090 		for (word = 0; word < 9; word++)
3091 			for (bit = 15; bit >= 0; bit--) {
3092 				if (word == 8 && bit == 7)
3093 					return (crc == vi[16]);
3094 				bitval =
3095 				    ((wvi[word] >> bit) & 1) ^ ((crc >> 7) & 1);
3096 				crc <<= 1;
3097 				if (bitval == 1) {
3098 					crc ^= 7;
3099 				}
3100 			}
3101 
3102 	default:
3103 		return (0);
3104 	}
3105 }
3106 
3107 /*
3108  *	========== Active Media Determination Routines ==========
3109  */
3110 
3111 /* This routine is also called for V3 Compact and extended type 0 SROMs */
3112 static int
3113 is_fdmedia(int media)
3114 {
3115 	if (media == MEDIA_TP_FD || media == MEDIA_SYM_SCR_FD)
3116 		return (1);
3117 	else
3118 		return (0);
3119 }
3120 
3121 /*
3122  * "Linkset" is used to merge media that use the same link test check. So,
3123  * if the TP link is added to the linkset, so is the TP Full duplex link.
3124  * Used to avoid checking the same link status twice.
3125  */
3126 static void
3127 linkset_add(uint32_t *set, int media)
3128 {
3129 	if (media == MEDIA_TP_FD || media == MEDIA_TP)
3130 		*set |= (1UL<<MEDIA_TP_FD) | (1UL<<MEDIA_TP);
3131 	else if (media == MEDIA_SYM_SCR_FD || media == MEDIA_SYM_SCR)
3132 		*set |= (1UL<<MEDIA_SYM_SCR_FD) | (1UL<<MEDIA_SYM_SCR);
3133 	else *set |= 1UL<<media;
3134 }
3135 static int
3136 linkset_isset(uint32_t linkset, int media)
3137 {
3138 	return (((1UL<<media)  & linkset) ? 1:0);
3139 }
3140 
3141 /*
3142  * The following code detects which Media is connected for 21041/21140
3143  * Expect to change this code to support new 21140 variants.
3144  * find_active_media() - called with intrlock held.
3145  */
3146 static void
3147 find_active_media(struct dnetinstance *dnetp)
3148 {
3149 	int i;
3150 	media_block_t *block;
3151 	media_block_t *best_allowed = NULL;
3152 	media_block_t *hd_found = NULL;
3153 	media_block_t *fd_found = NULL;
3154 	LEAF_FORMAT *leaf = &dnetp->sr.leaf[dnetp->leaf];
3155 	uint32_t checked = 0, links_up = 0;
3156 
3157 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3158 
3159 	dnetp->selected_media_block = leaf->default_block;
3160 
3161 	if (dnetp->phyaddr != -1) {
3162 		dnetp->selected_media_block = leaf->mii_block;
3163 		setup_block(dnetp);
3164 
3165 		if (ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3166 		    DDI_PROP_DONTPASS, "portmon", 1)) {
3167 			/* XXX return value ignored */
3168 			(void) mii_start_portmon(dnetp->mii, dnet_mii_link_cb,
3169 			    &dnetp->intrlock);
3170 			/*
3171 			 * If the port monitor detects the link is already
3172 			 * up, there is no point going through the rest of the
3173 			 * link sense
3174 			 */
3175 			if (dnetp->mii_up) {
3176 				return;
3177 			}
3178 		}
3179 	}
3180 
3181 	/*
3182 	 * Media is searched for in order of Precedence. This DEC SROM spec
3183 	 * tells us that the first media entry in the SROM is the lowest
3184 	 * precedence and should be checked last. This is why we go to the last
3185 	 * Media block and work back to the beginning.
3186 	 *
3187 	 * However, some older SROMs (Cogent EM110's etc.) have this the wrong
3188 	 * way around. As a result, following the SROM spec would result in a
3189 	 * 10 link being chosen over a 100 link if both media are available.
3190 	 * So we continue trying the media until we have at least tried the
3191 	 * DEFAULT media.
3192 	 */
3193 
3194 	/* Search for an active medium, and select it */
3195 	for (block = leaf->block + leaf->block_count  - 1;
3196 	    block >= leaf->block; block--) {
3197 		int media = block->media_code;
3198 
3199 		/* User settings disallow selection of this block */
3200 		if (dnetp->disallowed_media & (1UL<<media))
3201 			continue;
3202 
3203 		/* We may not be able to pick the default */
3204 		if (best_allowed == NULL || block == leaf->default_block)
3205 			best_allowed = block;
3206 #ifdef DEBUG
3207 		if (dnetdebug & DNETSENSE)
3208 			cmn_err(CE_NOTE, "Testing %s medium (block type %d)",
3209 			    media_str[media], block->type);
3210 #endif
3211 
3212 		dnetp->selected_media_block = block;
3213 		switch (block->type) {
3214 
3215 		case 2: /* SIA Media block: Best we can do is send a packet */
3216 			setup_block(dnetp);
3217 			if (send_test_packet(dnetp)) {
3218 				if (!is_fdmedia(media))
3219 					return;
3220 				if (!fd_found)
3221 					fd_found = block;
3222 			}
3223 			break;
3224 
3225 		/* SYM/SCR or TP block: Use the link-sense bits */
3226 		case 0:
3227 			if (!linkset_isset(checked, media)) {
3228 				linkset_add(&checked, media);
3229 				if (((media == MEDIA_BNC ||
3230 				    media == MEDIA_AUI) &&
3231 				    send_test_packet(dnetp)) ||
3232 				    dnet_link_sense(dnetp))
3233 					linkset_add(&links_up, media);
3234 			}
3235 
3236 			if (linkset_isset(links_up, media)) {
3237 				/*
3238 				 * Half Duplex is *always* the favoured media.
3239 				 * Full Duplex can be set and forced via the
3240 				 * conf file.
3241 				 */
3242 				if (!is_fdmedia(media) &&
3243 				    dnetp->selected_media_block ==
3244 				    leaf->default_block) {
3245 					/*
3246 					 * Cogent cards have the media in
3247 					 * opposite order to the spec.,
3248 					 * this code forces the media test to
3249 					 * keep going until the default media
3250 					 * is tested.
3251 					 *
3252 					 * In Cogent case, 10, 10FD, 100FD, 100
3253 					 * 100 is the default but 10 could have
3254 					 * been detected and would have been
3255 					 * chosen but now we force it through to
3256 					 * 100.
3257 					 */
3258 					setup_block(dnetp);
3259 					return;
3260 				} else if (!is_fdmedia(media)) {
3261 					/*
3262 					 * This allows all the others to work
3263 					 * properly by remembering the media
3264 					 * that works and not defaulting to
3265 					 * a FD link.
3266 					 */
3267 						if (hd_found == NULL)
3268 							hd_found = block;
3269 				} else if (fd_found == NULL) {
3270 					/*
3271 					 * No media have already been found
3272 					 * so far, this is FD, it works so
3273 					 * remember it and if no others are
3274 					 * detected, use it.
3275 					 */
3276 					fd_found = block;
3277 				}
3278 			}
3279 			break;
3280 
3281 		/*
3282 		 * MII block: May take up to a second or so to settle if
3283 		 * setup causes a PHY reset
3284 		 */
3285 		case 1: case 3:
3286 			setup_block(dnetp);
3287 			for (i = 0; ; i++) {
3288 				if (mii_linkup(dnetp->mii, dnetp->phyaddr)) {
3289 					/* XXX function return value ignored */
3290 					(void) mii_getspeed(dnetp->mii,
3291 					    dnetp->phyaddr,
3292 					    &dnetp->mii_speed,
3293 					    &dnetp->mii_duplex);
3294 					dnetp->mii_up = 1;
3295 					leaf->mii_block = block;
3296 					return;
3297 				}
3298 				if (i == 10)
3299 					break;
3300 				delay(drv_usectohz(150000));
3301 			}
3302 			dnetp->mii_up = 0;
3303 			break;
3304 		}
3305 	} /* for loop */
3306 	if (hd_found) {
3307 		dnetp->selected_media_block = hd_found;
3308 	} else if (fd_found) {
3309 		dnetp->selected_media_block = fd_found;
3310 	} else {
3311 		if (best_allowed == NULL)
3312 			best_allowed = leaf->default_block;
3313 		dnetp->selected_media_block = best_allowed;
3314 		cmn_err(CE_WARN, "!dnet: Default media selected\n");
3315 	}
3316 	setup_block(dnetp);
3317 }
3318 
3319 /*
3320  * Do anything neccessary to select the selected_media_block.
3321  * setup_block() - called with intrlock held.
3322  */
3323 static void
3324 setup_block(struct dnetinstance *dnetp)
3325 {
3326 	dnet_reset_board(dnetp);
3327 	dnet_init_board(dnetp);
3328 	/* XXX function return value ignored */
3329 	(void) dnet_start(dnetp);
3330 }
3331 
3332 /* dnet_link_sense() - called with intrlock held */
3333 static int
3334 dnet_link_sense(struct dnetinstance *dnetp)
3335 {
3336 	/*
3337 	 * This routine makes use of the command word from the srom config.
3338 	 * Details of the auto-sensing information contained in this can
3339 	 * be found in the "Digital Semiconductor 21X4 Serial ROM Format v3.03"
3340 	 * spec. Section 4.3.2.1, and 4.5.2.1.3
3341 	 */
3342 	media_block_t *block = dnetp->selected_media_block;
3343 	uint32_t link, status, mask, polarity;
3344 	int settletime, stabletime, waittime, upsamples;
3345 	int delay_100, delay_10;
3346 
3347 
3348 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3349 	/* Don't autosense if the medium does not support it */
3350 	if (block->command & (1 << 15)) {
3351 		/* This should be the default block */
3352 		if (block->command & (1UL<<14))
3353 			dnetp->sr.leaf[dnetp->leaf].default_block = block;
3354 		return (0);
3355 	}
3356 
3357 	delay_100 = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3358 	    DDI_PROP_DONTPASS, "autosense-delay-100", 2000);
3359 
3360 	delay_10 = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3361 	    DDI_PROP_DONTPASS, "autosense-delay-10", 400);
3362 
3363 	/*
3364 	 * Scrambler may need to be disabled for link sensing
3365 	 * to work
3366 	 */
3367 	dnetp->disable_scrambler = 1;
3368 	setup_block(dnetp);
3369 	dnetp->disable_scrambler = 0;
3370 
3371 	if (block->media_code == MEDIA_TP || block->media_code == MEDIA_TP_FD)
3372 		settletime = delay_10;
3373 	else
3374 		settletime = delay_100;
3375 	stabletime = settletime / 4;
3376 
3377 	mask = 1 << ((block->command & CMD_MEDIABIT_MASK) >> 1);
3378 	polarity = block->command & CMD_POL ? 0xffffffff : 0;
3379 
3380 	for (waittime = 0, upsamples = 0;
3381 	    waittime <= settletime + stabletime && upsamples < 8;
3382 	    waittime += stabletime/8) {
3383 		delay(drv_usectohz(stabletime*1000 / 8));
3384 		status = read_gpr(dnetp);
3385 		link = (status^polarity) & mask;
3386 		if (link)
3387 			upsamples++;
3388 		else
3389 			upsamples = 0;
3390 	}
3391 #ifdef DNETDEBUG
3392 	if (dnetdebug & DNETSENSE)
3393 		cmn_err(CE_NOTE, "%s upsamples:%d stat:%x polarity:%x "
3394 		    "mask:%x link:%x",
3395 		    upsamples == 8 ? "UP":"DOWN",
3396 		    upsamples, status, polarity, mask, link);
3397 #endif
3398 	if (upsamples == 8)
3399 		return (1);
3400 	return (0);
3401 }
3402 
3403 static int
3404 send_test_packet(struct dnetinstance *dnetp)
3405 {
3406 	int packet_delay;
3407 	struct tx_desc_type *desc;
3408 	int bufindex;
3409 	int media_code = dnetp->selected_media_block->media_code;
3410 	uint32_t del;
3411 
3412 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3413 	/*
3414 	 * For a successful test packet, the card must have settled into
3415 	 * its current setting.  Almost all cards we've tested manage to
3416 	 * do this with all media within 50ms.  However, the SMC 8432
3417 	 * requires 300ms to settle into BNC mode.  We now only do this
3418 	 * from attach, and we do sleeping delay() instead of drv_usecwait()
3419 	 * so we hope this .2 second delay won't cause too much suffering.
3420 	 * ALSO: with an autonegotiating hub, an aditional 1 second delay is
3421 	 * required. This is done if the media type is TP
3422 	 */
3423 	if (media_code == MEDIA_TP || media_code == MEDIA_TP_FD) {
3424 		packet_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3425 		    DDI_PROP_DONTPASS, "test_packet_delay_tp", 1300000);
3426 	} else {
3427 		packet_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3428 		    DDI_PROP_DONTPASS, "test_packet_delay", 300000);
3429 	}
3430 	delay(drv_usectohz(packet_delay));
3431 
3432 	desc = dnetp->tx_desc;
3433 
3434 	bufindex = dnetp->tx_current_desc;
3435 	if (alloc_descriptor(dnetp) == FAILURE) {
3436 		cmn_err(CE_WARN, "DNET: send_test_packet: alloc_descriptor"
3437 		    "failed");
3438 		return (0);
3439 	}
3440 
3441 	/*
3442 	 * use setup buffer as the buffer for the test packet
3443 	 * instead of allocating one.
3444 	 */
3445 
3446 	ASSERT(dnetp->setup_buf_vaddr != NULL);
3447 	/* Put something decent in dest address so we don't annoy other cards */
3448 	BCOPY((caddr_t)dnetp->curr_macaddr,
3449 	    (caddr_t)dnetp->setup_buf_vaddr, ETHERADDRL);
3450 	BCOPY((caddr_t)dnetp->curr_macaddr,
3451 	    (caddr_t)dnetp->setup_buf_vaddr+ETHERADDRL, ETHERADDRL);
3452 
3453 	desc[bufindex].buffer1 = dnetp->setup_buf_paddr;
3454 	desc[bufindex].desc1.buffer_size1 = SETUPBUF_SIZE;
3455 	desc[bufindex].buffer2 = (uint32_t)(0);
3456 	desc[bufindex].desc1.first_desc = 1;
3457 	desc[bufindex].desc1.last_desc = 1;
3458 	desc[bufindex].desc1.int_on_comp = 1;
3459 	desc[bufindex].desc0.own = 1;
3460 
3461 	ddi_put8(dnetp->io_handle, REG8(dnetp->io_reg, TX_POLL_REG),
3462 	    TX_POLL_DEMAND);
3463 
3464 	/*
3465 	 * Give enough time for the chip to transmit the packet
3466 	 */
3467 #if 1
3468 	del = 1000;
3469 	while (desc[bufindex].desc0.own && --del)
3470 		drv_usecwait(10);	/* quickly wait up to 10ms */
3471 	if (desc[bufindex].desc0.own)
3472 		delay(drv_usectohz(200000));	/* nicely wait a longer time */
3473 #else
3474 	del = 0x10000;
3475 	while (desc[bufindex].desc0.own && --del)
3476 		drv_usecwait(10);
3477 #endif
3478 
3479 #ifdef DNETDEBUG
3480 	if (dnetdebug & DNETSENSE)
3481 		cmn_err(CE_NOTE, "desc0 bits = %u, %u, %u, %u, %u, %u",
3482 		    desc[bufindex].desc0.own,
3483 		    desc[bufindex].desc0.err_summary,
3484 		    desc[bufindex].desc0.carrier_loss,
3485 		    desc[bufindex].desc0.no_carrier,
3486 		    desc[bufindex].desc0.late_collision,
3487 		    desc[bufindex].desc0.link_fail);
3488 #endif
3489 	if (desc[bufindex].desc0.own) /* it shouldn't take this long, error */
3490 		return (0);
3491 
3492 	return (!desc[bufindex].desc0.err_summary);
3493 }
3494 
3495 /* enable_interrupts - called with intrlock held */
3496 static void
3497 enable_interrupts(struct dnetinstance *dnetp)
3498 {
3499 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3500 	/* Don't enable interrupts if they have been forced off */
3501 	if (dnetp->interrupts_disabled)
3502 		return;
3503 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG),
3504 	    ABNORMAL_INTR_MASK | NORMAL_INTR_MASK | SYSTEM_ERROR_MASK |
3505 	    (dnetp->timer.cb ? GPTIMER_INTR : 0) |
3506 	    RX_INTERRUPT_MASK |
3507 	    TX_INTERRUPT_MASK | TX_JABBER_MASK | TX_UNDERFLOW_MASK);
3508 }
3509 
3510 /*
3511  * Some older multiport cards are non-PCI compliant in their interrupt routing.
3512  * Second and subsequent devices are incorrectly configured by the BIOS
3513  * (either in their ILINE configuration or the MP Configuration Table for PC+MP
3514  * systems).
3515  * The hack stops registering the interrupt routine for the FIRST
3516  * device on the adapter, and registers its own. It builds up a table
3517  * of dnetp structures for each device, and the new interrupt routine
3518  * calls dnet_intr for each of them.
3519  * Known cards that suffer from this problem are:
3520  *	All Cogent multiport cards;
3521  * 	Znyx 314;
3522  *	Znyx 315.
3523  *
3524  * XXX NEEDSWORK -- see comments above get_alternative_srom_image(). This
3525  * hack relies on the fact that the offending cards will have only one SROM.
3526  * It uses this fact to identify devices that are on the same multiport
3527  * adapter, as opposed to multiple devices from the same vendor (as
3528  * indicated by "secondary")
3529  */
3530 static int
3531 dnet_hack_interrupts(struct dnetinstance *dnetp, int secondary)
3532 {
3533 	int i;
3534 	struct hackintr_inf *hackintr_inf;
3535 	dev_info_t *devinfo = dnetp->devinfo;
3536 	uint32_t oui = 0;	/* Organizationally Unique ID */
3537 
3538 	if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3539 	    "no_INTA_workaround", 0) != 0)
3540 		return (0);
3541 
3542 	for (i = 0; i < 3; i++)
3543 		oui = (oui << 8) | dnetp->vendor_addr[i];
3544 
3545 	/* Check wheather or not we need to implement the hack */
3546 
3547 	switch (oui) {
3548 	case ZNYX_ETHER:
3549 		/* Znyx multiport 21040 cards <<==>> ZX314 or ZX315 */
3550 		if (dnetp->board_type != DEVICE_ID_21040)
3551 			return (0);
3552 		break;
3553 
3554 	case COGENT_ETHER:
3555 		/* All known Cogent multiport cards */
3556 		break;
3557 
3558 	case ADAPTEC_ETHER:
3559 		/* Adaptec multiport cards */
3560 		break;
3561 
3562 	default:
3563 		/* Other cards work correctly */
3564 		return (0);
3565 	}
3566 
3567 	/* card is (probably) non-PCI compliant in its interrupt routing */
3568 
3569 
3570 	if (!secondary) {
3571 
3572 		/*
3573 		 * If we have already registered a hacked interrupt, and
3574 		 * this is also a 'primary' adapter, then this is NOT part of
3575 		 * a multiport card, but a second card on the same PCI bus.
3576 		 * BUGID: 4057747
3577 		 */
3578 		if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3579 		    DDI_PROP_DONTPASS, hackintr_propname, 0) != 0)
3580 			return (0);
3581 				/* ... Primary not part of a multiport device */
3582 
3583 #ifdef DNETDEBUG
3584 		if (dnetdebug & DNETTRACE)
3585 			cmn_err(CE_NOTE, "dnet: Implementing hardware "
3586 			    "interrupt flaw workaround");
3587 #endif
3588 		dnetp->hackintr_inf = hackintr_inf =
3589 		    kmem_zalloc(sizeof (struct hackintr_inf), KM_SLEEP);
3590 		if (hackintr_inf == NULL)
3591 			goto fail;
3592 
3593 		hackintr_inf->dnetps[0] = dnetp;
3594 		hackintr_inf->devinfo = devinfo;
3595 
3596 		/*
3597 		 * Add a property to allow successive attaches to find the
3598 		 * table
3599 		 */
3600 
3601 		if (ddi_prop_update_byte_array(DDI_DEV_T_NONE,
3602 		    ddi_get_parent(devinfo), hackintr_propname,
3603 		    (uchar_t *)&dnetp->hackintr_inf,
3604 		    sizeof (void *)) != DDI_PROP_SUCCESS)
3605 			goto fail;
3606 
3607 
3608 		/* Register our hacked interrupt routine */
3609 		if (ddi_add_intr(devinfo, 0, &dnetp->icookie, NULL,
3610 		    (uint_t (*)(char *))dnet_hack_intr,
3611 		    (caddr_t)hackintr_inf) != DDI_SUCCESS) {
3612 			/* XXX function return value ignored */
3613 			(void) ddi_prop_remove(DDI_DEV_T_NONE,
3614 			    ddi_get_parent(devinfo),
3615 			    hackintr_propname);
3616 			goto fail;
3617 		}
3618 
3619 		/*
3620 		 * Mutex required to ensure interrupt routine has completed
3621 		 * when detaching devices
3622 		 */
3623 		mutex_init(&hackintr_inf->lock, NULL, MUTEX_DRIVER,
3624 		    dnetp->icookie);
3625 
3626 		/* Stop GLD registering an interrupt */
3627 		return (-1);
3628 	} else {
3629 
3630 		/* Add the dnetp for this secondary device to the table */
3631 
3632 		hackintr_inf = (struct hackintr_inf *)(uintptr_t)
3633 		    ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3634 		    DDI_PROP_DONTPASS, hackintr_propname, 0);
3635 
3636 		if (hackintr_inf == NULL)
3637 			goto fail;
3638 
3639 		/* Find an empty slot */
3640 		for (i = 0; i < MAX_INST; i++)
3641 			if (hackintr_inf->dnetps[i] == NULL)
3642 				break;
3643 
3644 		/* More than 8 ports on adapter ?! */
3645 		if (i == MAX_INST)
3646 			goto fail;
3647 
3648 		hackintr_inf->dnetps[i] = dnetp;
3649 
3650 		/*
3651 		 * Allow GLD to register a handler for this
3652 		 * device. If the card is actually broken, as we suspect, this
3653 		 * handler will never get called. However, by registering the
3654 		 * interrupt handler, we can copy gracefully with new multiport
3655 		 * Cogent cards that decide to fix the hardware problem
3656 		 */
3657 		return (0);
3658 	}
3659 
3660 fail:
3661 	cmn_err(CE_WARN, "dnet: Could not work around hardware interrupt"
3662 	    " routing problem");
3663 	return (0);
3664 }
3665 
3666 /*
3667  * Call dnet_intr for all adapters on a multiport card
3668  */
3669 static uint_t
3670 dnet_hack_intr(struct hackintr_inf *hackintr_inf)
3671 {
3672 	int i;
3673 	int claimed = DDI_INTR_UNCLAIMED;
3674 
3675 	/* Stop detaches while processing interrupts */
3676 	mutex_enter(&hackintr_inf->lock);
3677 
3678 	for (i = 0; i < MAX_INST; i++) {
3679 		if (hackintr_inf->dnetps[i] &&
3680 		    dnet_intr((caddr_t)hackintr_inf->dnetps[i]) ==
3681 		    DDI_INTR_CLAIMED) {
3682 			claimed = DDI_INTR_CLAIMED;
3683 		}
3684 	}
3685 	mutex_exit(&hackintr_inf->lock);
3686 	return (claimed);
3687 }
3688 
3689 /*
3690  * This removes the detaching device from the table procesed by the hacked
3691  * interrupt routine. Because the interrupts from all devices come in to the
3692  * same interrupt handler, ALL devices must stop interrupting once the
3693  * primary device detaches. This isn't a problem at present, because all
3694  * instances of a device are detached when the driver is unloaded.
3695  */
3696 static int
3697 dnet_detach_hacked_interrupt(dev_info_t *devinfo)
3698 {
3699 	int i;
3700 	struct hackintr_inf *hackintr_inf;
3701 	struct dnetinstance *altdnetp, *dnetp =
3702 	    ddi_get_driver_private(devinfo);
3703 
3704 	hackintr_inf = (struct hackintr_inf *)(uintptr_t)
3705 	    ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3706 	    DDI_PROP_DONTPASS, hackintr_propname, 0);
3707 
3708 	/*
3709 	 * No hackintr_inf implies hack was not required or the primary has
3710 	 * detached, and our interrupts are already disabled
3711 	 */
3712 	if (!hackintr_inf) {
3713 		/* remove the interrupt for the non-hacked case */
3714 		ddi_remove_intr(devinfo, 0, dnetp->icookie);
3715 		return (DDI_SUCCESS);
3716 	}
3717 
3718 	/* Remove this device from the handled table */
3719 	mutex_enter(&hackintr_inf->lock);
3720 	for (i = 0; i < MAX_INST; i++) {
3721 		if (hackintr_inf->dnetps[i] == dnetp) {
3722 			hackintr_inf->dnetps[i] = NULL;
3723 			break;
3724 		}
3725 	}
3726 
3727 	mutex_exit(&hackintr_inf->lock);
3728 
3729 	/* Not the primary card, we are done */
3730 	if (devinfo != hackintr_inf->devinfo)
3731 		return (DDI_SUCCESS);
3732 
3733 	/*
3734 	 * This is the primary card. All remaining adapters on this device
3735 	 * must have their interrupts disabled before we remove the handler
3736 	 */
3737 	for (i = 0; i < MAX_INST; i++) {
3738 		if ((altdnetp = hackintr_inf->dnetps[i]) != NULL) {
3739 			altdnetp->interrupts_disabled = 1;
3740 			ddi_put32(altdnetp->io_handle,
3741 			    REG32(altdnetp->io_reg, INT_MASK_REG), 0);
3742 		}
3743 	}
3744 
3745 	/* It should now be safe to remove the interrupt handler */
3746 
3747 	ddi_remove_intr(devinfo, 0, dnetp->icookie);
3748 	mutex_destroy(&hackintr_inf->lock);
3749 	/* XXX function return value ignored */
3750 	(void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo),
3751 	    hackintr_propname);
3752 	kmem_free(hackintr_inf, sizeof (struct hackintr_inf));
3753 	return (DDI_SUCCESS);
3754 }
3755 
3756 /* do_phy() - called with intrlock held */
3757 static void
3758 do_phy(struct dnetinstance *dnetp)
3759 {
3760 	dev_info_t *dip;
3761 	LEAF_FORMAT *leaf = dnetp->sr.leaf + dnetp->leaf;
3762 	media_block_t *block;
3763 	int phy;
3764 
3765 	dip = dnetp->devinfo;
3766 
3767 	/*
3768 	 * Find and configure the PHY media block. If NO PHY blocks are
3769 	 * found on the SROM, but a PHY device is present, we assume the card
3770 	 * is a legacy device, and that there is ONLY a PHY interface on the
3771 	 * card (ie, no BNC or AUI, and 10BaseT is implemented by the PHY
3772 	 */
3773 
3774 	for (block = leaf->block + leaf->block_count -1;
3775 	    block >= leaf->block; block --) {
3776 		if (block->type == 3 || block->type == 1) {
3777 			leaf->mii_block = block;
3778 			break;
3779 		}
3780 	}
3781 
3782 	/*
3783 	 * If no MII block, select default, and hope this configuration will
3784 	 * allow the phy to be read/written if it is present
3785 	 */
3786 	dnetp->selected_media_block = leaf->mii_block ?
3787 	    leaf->mii_block : leaf->default_block;
3788 
3789 	setup_block(dnetp);
3790 	/* XXX function return value ignored */
3791 	(void) mii_create(dip, dnet_mii_write, dnet_mii_read, &dnetp->mii);
3792 
3793 	/*
3794 	 * We try PHY 0 LAST because it is less likely to be connected
3795 	 */
3796 	for (phy = 1; phy < 33; phy++)
3797 		if (mii_probe_phy(dnetp->mii, phy % 32) == MII_SUCCESS &&
3798 		    mii_init_phy(dnetp->mii, phy % 32) == MII_SUCCESS) {
3799 #ifdef DNETDEBUG
3800 			if (dnetdebug & DNETSENSE)
3801 				cmn_err(CE_NOTE, "dnet: "
3802 				    "PHY at address %d", phy % 32);
3803 #endif
3804 			dnetp->phyaddr = phy % 32;
3805 			if (!leaf->mii_block) {
3806 				/* Legacy card, change the leaf node */
3807 				set_leaf(&dnetp->sr, &leaf_phylegacy);
3808 			}
3809 			return;
3810 		}
3811 #ifdef DNETDEBUG
3812 	if (dnetdebug & DNETSENSE)
3813 		cmn_err(CE_NOTE, "dnet: No PHY found");
3814 #endif
3815 }
3816 
3817 static ushort_t
3818 dnet_mii_read(dev_info_t *dip, int phy_addr, int reg_num)
3819 {
3820 	struct dnetinstance *dnetp;
3821 
3822 	uint32_t command_word;
3823 	uint32_t tmp;
3824 	uint32_t data = 0;
3825 	int i;
3826 	int bits_in_ushort = ((sizeof (ushort_t))*8);
3827 	int turned_around = 0;
3828 
3829 	dnetp = ddi_get_driver_private(dip);
3830 
3831 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3832 	/* Write Preamble */
3833 	write_mii(dnetp, MII_PRE, 2*bits_in_ushort);
3834 
3835 	/* Prepare command word */
3836 	command_word = (uint32_t)phy_addr << MII_PHY_ADDR_ALIGN;
3837 	command_word |= (uint32_t)reg_num << MII_REG_ADDR_ALIGN;
3838 	command_word |= MII_READ_FRAME;
3839 
3840 	write_mii(dnetp, command_word, bits_in_ushort-2);
3841 
3842 	mii_tristate(dnetp);
3843 
3844 	/* Check that the PHY generated a zero bit the 2nd clock */
3845 	tmp = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG));
3846 
3847 	turned_around = (tmp & MII_DATA_IN) ? 0 : 1;
3848 
3849 	/* read data WORD */
3850 	for (i = 0; i < bits_in_ushort; i++) {
3851 		ddi_put32(dnetp->io_handle,
3852 		    REG32(dnetp->io_reg, ETHER_ROM_REG), MII_READ);
3853 		drv_usecwait(MII_DELAY);
3854 		ddi_put32(dnetp->io_handle,
3855 		    REG32(dnetp->io_reg, ETHER_ROM_REG), MII_READ | MII_CLOCK);
3856 		drv_usecwait(MII_DELAY);
3857 		tmp = ddi_get32(dnetp->io_handle,
3858 		    REG32(dnetp->io_reg, ETHER_ROM_REG));
3859 		drv_usecwait(MII_DELAY);
3860 		data = (data << 1) | (tmp >> MII_DATA_IN_POSITION) & 0x0001;
3861 	}
3862 
3863 	mii_tristate(dnetp);
3864 	return (turned_around ? data: -1);
3865 }
3866 
3867 static void
3868 dnet_mii_write(dev_info_t *dip, int phy_addr, int reg_num, int reg_dat)
3869 {
3870 	struct dnetinstance *dnetp;
3871 	uint32_t command_word;
3872 	int bits_in_ushort = ((sizeof (ushort_t))*8);
3873 
3874 	dnetp = ddi_get_driver_private(dip);
3875 
3876 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3877 	write_mii(dnetp, MII_PRE, 2*bits_in_ushort);
3878 
3879 	/* Prepare command word */
3880 	command_word = ((uint32_t)phy_addr << MII_PHY_ADDR_ALIGN);
3881 	command_word |= ((uint32_t)reg_num << MII_REG_ADDR_ALIGN);
3882 	command_word |= (MII_WRITE_FRAME | (uint32_t)reg_dat);
3883 
3884 	write_mii(dnetp, command_word, 2*bits_in_ushort);
3885 	mii_tristate(dnetp);
3886 }
3887 
3888 /*
3889  * Write data size bits from mii_data to the MII control lines.
3890  */
3891 static void
3892 write_mii(struct dnetinstance *dnetp, uint32_t mii_data, int data_size)
3893 {
3894 	int i;
3895 	uint32_t dbit;
3896 
3897 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3898 	for (i = data_size; i > 0; i--) {
3899 		dbit = ((mii_data >>
3900 		    (31 - MII_WRITE_DATA_POSITION)) & MII_WRITE_DATA);
3901 		ddi_put32(dnetp->io_handle,
3902 		    REG32(dnetp->io_reg, ETHER_ROM_REG),
3903 		    MII_WRITE | dbit);
3904 		drv_usecwait(MII_DELAY);
3905 		ddi_put32(dnetp->io_handle,
3906 		    REG32(dnetp->io_reg, ETHER_ROM_REG),
3907 		    MII_WRITE | MII_CLOCK | dbit);
3908 		drv_usecwait(MII_DELAY);
3909 		mii_data <<= 1;
3910 	}
3911 }
3912 
3913 /*
3914  * Put the MDIO port in tri-state for the turn around bits
3915  * in MII read and at end of MII management sequence.
3916  */
3917 static void
3918 mii_tristate(struct dnetinstance *dnetp)
3919 {
3920 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3921 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG),
3922 	    MII_WRITE_TS);
3923 	drv_usecwait(MII_DELAY);
3924 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG),
3925 	    MII_WRITE_TS | MII_CLOCK);
3926 	drv_usecwait(MII_DELAY);
3927 }
3928 
3929 
3930 static void
3931 set_leaf(SROM_FORMAT *sr, LEAF_FORMAT *leaf)
3932 {
3933 	if (sr->leaf && !sr->leaf->is_static)
3934 		kmem_free(sr->leaf, sr->adapters * sizeof (LEAF_FORMAT));
3935 	sr->leaf = leaf;
3936 }
3937 
3938 /*
3939  * Callback from MII module. Makes sure that the CSR registers are
3940  * configured properly if the PHY changes mode.
3941  */
3942 /* ARGSUSED */
3943 /* dnet_mii_link_cb - called with intrlock held */
3944 static void
3945 dnet_mii_link_cb(dev_info_t *dip, int phy, enum mii_phy_state state)
3946 {
3947 	struct dnetinstance *dnetp = ddi_get_driver_private(dip);
3948 	LEAF_FORMAT *leaf;
3949 
3950 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3951 
3952 	leaf = dnetp->sr.leaf + dnetp->leaf;
3953 	if (state == phy_state_linkup) {
3954 		dnetp->mii_up = 1;
3955 
3956 		(void) mii_getspeed(dnetp->mii, dnetp->phyaddr,
3957 		    &dnetp->mii_speed, &dnetp->mii_duplex);
3958 
3959 		dnetp->selected_media_block = leaf->mii_block;
3960 		setup_block(dnetp);
3961 	} else {
3962 		/* NEEDSWORK: Probably can call find_active_media here */
3963 		dnetp->mii_up = 0;
3964 
3965 		if (leaf->default_block->media_code == MEDIA_MII)
3966 			dnetp->selected_media_block = leaf->default_block;
3967 		setup_block(dnetp);
3968 	}
3969 
3970 	if (dnetp->running) {
3971 		mac_link_update(dnetp->mac_handle,
3972 		    (dnetp->mii_up ? LINK_STATE_UP : LINK_STATE_DOWN));
3973 	}
3974 }
3975 
3976 /*
3977  * SROM parsing routines.
3978  * Refer to the Digital 3.03 SROM spec while reading this! (references refer
3979  * to this document)
3980  * Where possible ALL vendor specific changes should be localised here. The
3981  * SROM data should be capable of describing any programmatic irregularities
3982  * of DNET cards (via SIA or GP registers, in particular), so vendor specific
3983  * code elsewhere should not be required
3984  */
3985 static void
3986 dnet_parse_srom(struct dnetinstance *dnetp, SROM_FORMAT *sr, uchar_t *vi)
3987 {
3988 	uint32_t ether_mfg = 0;
3989 	int i;
3990 	uchar_t *p;
3991 
3992 	if (!ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3993 	    DDI_PROP_DONTPASS, "no_sromconfig", 0))
3994 		dnetp->sr.init_from_srom = check_srom_valid(vi);
3995 
3996 	if (dnetp->sr.init_from_srom && dnetp->board_type != DEVICE_ID_21040) {
3997 		/* Section 2/3: General SROM Format/ ID Block */
3998 		p = vi+18;
3999 		sr->version = *p++;
4000 		sr->adapters = *p++;
4001 
4002 		sr->leaf =
4003 		    kmem_zalloc(sr->adapters * sizeof (LEAF_FORMAT), KM_SLEEP);
4004 		for (i = 0; i < 6; i++)
4005 			sr->netaddr[i] = *p++;
4006 
4007 		for (i = 0; i < sr->adapters; i++) {
4008 			uchar_t devno = *p++;
4009 			uint16_t offset = *p++;
4010 			offset |= *p++ << 8;
4011 			sr->leaf[i].device_number = devno;
4012 			parse_controller_leaf(dnetp, sr->leaf+i, vi+offset);
4013 		}
4014 		/*
4015 		 * 'Orrible hack for cogent cards. The 6911A board seems to
4016 		 * have an incorrect SROM. (From the OEMDEMO program
4017 		 * supplied by cogent, it seems that the ROM matches a setup
4018 		 * or a board with a QSI or ICS PHY.
4019 		 */
4020 		for (i = 0; i < 3; i++)
4021 			ether_mfg = (ether_mfg << 8) | sr->netaddr[i];
4022 
4023 		if (ether_mfg == ADAPTEC_ETHER) {
4024 			static uint16_t cogent_gprseq[] = {0x821, 0};
4025 			switch (vi[COGENT_SROM_ID]) {
4026 			case COGENT_ANA6911A_C:
4027 			case COGENT_ANA6911AC_C:
4028 #ifdef DNETDEBUG
4029 				if (dnetdebug & DNETTRACE)
4030 					cmn_err(CE_WARN,
4031 					    "Suspected bad GPR sequence."
4032 					    " Making a guess (821,0)");
4033 #endif
4034 
4035 				/* XXX function return value ignored */
4036 				(void) ddi_prop_update_byte_array(
4037 				    DDI_DEV_T_NONE, dnetp->devinfo,
4038 				    "gpr-sequence", (uchar_t *)cogent_gprseq,
4039 				    sizeof (cogent_gprseq));
4040 				break;
4041 			}
4042 		}
4043 	} else {
4044 		/*
4045 		 * Adhoc SROM, check for some cards which need special handling
4046 		 * Assume vendor info contains ether address in first six bytes
4047 		 */
4048 
4049 		uchar_t *mac = vi + ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
4050 		    DDI_PROP_DONTPASS, macoffset_propname, 0);
4051 
4052 		for (i = 0; i < 6; i++)
4053 			sr->netaddr[i] = mac[i];
4054 
4055 		if (dnetp->board_type == DEVICE_ID_21140) {
4056 			for (i = 0; i < 3; i++)
4057 				ether_mfg = (ether_mfg << 8) | mac[i];
4058 
4059 			switch (ether_mfg) {
4060 			case ASANTE_ETHER:
4061 				dnetp->vendor_21140 = ASANTE_TYPE;
4062 				dnetp->vendor_revision = 0;
4063 				set_leaf(sr, &leaf_asante);
4064 				sr->adapters = 1;
4065 				break;
4066 
4067 			case COGENT_ETHER:
4068 			case ADAPTEC_ETHER:
4069 				dnetp->vendor_21140 = COGENT_EM_TYPE;
4070 				dnetp->vendor_revision =
4071 				    vi[VENDOR_REVISION_OFFSET];
4072 				set_leaf(sr, &leaf_cogent_100);
4073 				sr->adapters = 1;
4074 				break;
4075 
4076 			default:
4077 				dnetp->vendor_21140 = DEFAULT_TYPE;
4078 				dnetp->vendor_revision = 0;
4079 				set_leaf(sr, &leaf_default_100);
4080 				sr->adapters = 1;
4081 				break;
4082 			}
4083 		} else if (dnetp->board_type == DEVICE_ID_21041) {
4084 			set_leaf(sr, &leaf_21041);
4085 		} else if (dnetp->board_type == DEVICE_ID_21040) {
4086 			set_leaf(sr, &leaf_21040);
4087 		}
4088 	}
4089 }
4090 
4091 /* Section 4.2, 4.3, 4.4, 4.5 */
4092 static void
4093 parse_controller_leaf(struct dnetinstance *dnetp, LEAF_FORMAT *leaf,
4094 	uchar_t *vi)
4095 {
4096 	int i;
4097 
4098 	leaf->selected_contype = *vi++;
4099 	leaf->selected_contype |= *vi++ << 8;
4100 
4101 	if (dnetp->board_type == DEVICE_ID_21140) /* Sect. 4.3 */
4102 		leaf->gpr = *vi++;
4103 
4104 	leaf->block_count = *vi++;
4105 
4106 	if (leaf->block_count > MAX_MEDIA) {
4107 		cmn_err(CE_WARN, "dnet: Too many media in SROM!");
4108 		leaf->block_count = 1;
4109 	}
4110 	for (i = 0; i <= leaf->block_count; i++) {
4111 		vi = parse_media_block(dnetp, leaf->block + i, vi);
4112 		if (leaf->block[i].command & CMD_DEFAULT_MEDIUM)
4113 			leaf->default_block = leaf->block+i;
4114 	}
4115 	/* No explicit default block: use last in the ROM */
4116 	if (leaf->default_block == NULL)
4117 		leaf->default_block = leaf->block + leaf->block_count -1;
4118 
4119 }
4120 
4121 static uchar_t *
4122 parse_media_block(struct dnetinstance *dnetp, media_block_t *block, uchar_t *vi)
4123 {
4124 	int i;
4125 
4126 	/*
4127 	 * There are three kinds of media block we need to worry about:
4128 	 * The 21041 blocks.
4129 	 * 21140 blocks from a version 1 SROM
4130 	 * 2114[023] block from a version 3 SROM
4131 	 */
4132 
4133 	if (dnetp->board_type == DEVICE_ID_21041) {
4134 		/* Section 4.2 */
4135 		block->media_code = *vi & 0x3f;
4136 		block->type = 2;
4137 		if (*vi++ & 0x40) {
4138 			block->un.sia.csr13 = *vi++;
4139 			block->un.sia.csr13 |= *vi++ << 8;
4140 			block->un.sia.csr14 = *vi++;
4141 			block->un.sia.csr14 |= *vi++ << 8;
4142 			block->un.sia.csr15 = *vi++;
4143 			block->un.sia.csr15 |= *vi++ << 8;
4144 		} else {
4145 			/* No media data (csrs 13,14,15). Insert defaults */
4146 			switch (block->media_code) {
4147 			case MEDIA_TP:
4148 				block->un.sia.csr13 = 0xef01;
4149 				block->un.sia.csr14 = 0x7f3f;
4150 				block->un.sia.csr15 = 0x0008;
4151 				break;
4152 			case MEDIA_TP_FD:
4153 				block->un.sia.csr13 = 0xef01;
4154 				block->un.sia.csr14 = 0x7f3d;
4155 				block->un.sia.csr15 = 0x0008;
4156 				break;
4157 			case MEDIA_BNC:
4158 				block->un.sia.csr13 = 0xef09;
4159 				block->un.sia.csr14 = 0x0705;
4160 				block->un.sia.csr15 = 0x0006;
4161 				break;
4162 			case MEDIA_AUI:
4163 				block->un.sia.csr13 = 0xef09;
4164 				block->un.sia.csr14 = 0x0705;
4165 				block->un.sia.csr15 = 0x000e;
4166 				break;
4167 			}
4168 		}
4169 	} else  if (*vi & 0x80) {  /* Extended format: Section 4.3.2.2 */
4170 		int blocklen = *vi++ & 0x7f;
4171 		block->type = *vi++;
4172 		switch (block->type) {
4173 		case 0: /* "non-MII": Section 4.3.2.2.1 */
4174 			block->media_code = (*vi++) & 0x3f;
4175 			block->gprseqlen = 1;
4176 			block->gprseq[0] = *vi++;
4177 			block->command = *vi++;
4178 			block->command |= *vi++ << 8;
4179 			break;
4180 
4181 		case 1: /* MII/PHY: Section 4.3.2.2.2 */
4182 			block->command = CMD_PS;
4183 			block->media_code = MEDIA_MII;
4184 				/* This is whats needed in CSR6 */
4185 
4186 			block->un.mii.phy_num = *vi++;
4187 			block->gprseqlen = *vi++;
4188 
4189 			for (i = 0; i < block->gprseqlen; i++)
4190 				block->gprseq[i] = *vi++;
4191 			block->rstseqlen = *vi++;
4192 			for (i = 0; i < block->rstseqlen; i++)
4193 				block->rstseq[i] = *vi++;
4194 
4195 			block->un.mii.mediacaps = *vi++;
4196 			block->un.mii.mediacaps |= *vi++ << 8;
4197 			block->un.mii.nwayadvert = *vi++;
4198 			block->un.mii.nwayadvert |= *vi++ << 8;
4199 			block->un.mii.fdxmask = *vi++;
4200 			block->un.mii.fdxmask |= *vi++ << 8;
4201 			block->un.mii.ttmmask = *vi++;
4202 			block->un.mii.ttmmask |= *vi++ << 8;
4203 			break;
4204 
4205 		case 2: /* SIA Media: Section 4.4.2.1.1 */
4206 			block->media_code = *vi & 0x3f;
4207 			if (*vi++ & 0x40) {
4208 				block->un.sia.csr13 = *vi++;
4209 				block->un.sia.csr13 |= *vi++ << 8;
4210 				block->un.sia.csr14 = *vi++;
4211 				block->un.sia.csr14 |= *vi++ << 8;
4212 				block->un.sia.csr15 = *vi++;
4213 				block->un.sia.csr15 |= *vi++ << 8;
4214 			} else {
4215 				/*
4216 				 * SIA values not provided by SROM; provide
4217 				 * defaults. See appendix D of 2114[23] manuals.
4218 				 */
4219 				switch (block->media_code) {
4220 				case MEDIA_BNC:
4221 					block->un.sia.csr13 = 0x0009;
4222 					block->un.sia.csr14 = 0x0705;
4223 					block->un.sia.csr15 = 0x0000;
4224 					break;
4225 				case MEDIA_AUI:
4226 					block->un.sia.csr13 = 0x0009;
4227 					block->un.sia.csr14 = 0x0705;
4228 					block->un.sia.csr15 = 0x0008;
4229 					break;
4230 				case MEDIA_TP:
4231 					block->un.sia.csr13 = 0x0001;
4232 					block->un.sia.csr14 = 0x7f3f;
4233 					block->un.sia.csr15 = 0x0000;
4234 					break;
4235 				case MEDIA_TP_FD:
4236 					block->un.sia.csr13 = 0x0001;
4237 					block->un.sia.csr14 = 0x7f3d;
4238 					block->un.sia.csr15 = 0x0000;
4239 					break;
4240 				default:
4241 					block->un.sia.csr13 = 0x0000;
4242 					block->un.sia.csr14 = 0x0000;
4243 					block->un.sia.csr15 = 0x0000;
4244 				}
4245 			}
4246 
4247 			/* Treat GP control/data as a GPR sequence */
4248 			block->gprseqlen = 2;
4249 			block->gprseq[0] = *vi++;
4250 			block->gprseq[0] |= *vi++ << 8;
4251 			block->gprseq[0] |= GPR_CONTROL_WRITE;
4252 			block->gprseq[1] = *vi++;
4253 			block->gprseq[1] |= *vi++ << 8;
4254 			break;
4255 
4256 		case 3: /* MII/PHY : Section 4.4.2.1.2 */
4257 			block->command = CMD_PS;
4258 			block->media_code = MEDIA_MII;
4259 			block->un.mii.phy_num = *vi++;
4260 
4261 			block->gprseqlen = *vi++;
4262 			for (i = 0; i < block->gprseqlen; i++) {
4263 				block->gprseq[i] = *vi++;
4264 				block->gprseq[i] |= *vi++ << 8;
4265 			}
4266 
4267 			block->rstseqlen = *vi++;
4268 			for (i = 0; i < block->rstseqlen; i++) {
4269 				block->rstseq[i] = *vi++;
4270 				block->rstseq[i] |= *vi++ << 8;
4271 			}
4272 			block->un.mii.mediacaps = *vi++;
4273 			block->un.mii.mediacaps |= *vi++ << 8;
4274 			block->un.mii.nwayadvert = *vi++;
4275 			block->un.mii.nwayadvert |= *vi++ << 8;
4276 			block->un.mii.fdxmask = *vi++;
4277 			block->un.mii.fdxmask |= *vi++ << 8;
4278 			block->un.mii.ttmmask = *vi++;
4279 			block->un.mii.ttmmask |= *vi++ << 8;
4280 			block->un.mii.miiintr |= *vi++;
4281 			break;
4282 
4283 		case 4: /* SYM Media: 4.5.2.1.3 */
4284 			block->media_code = *vi++ & 0x3f;
4285 			/* Treat GP control and data as a GPR sequence */
4286 			block->gprseqlen = 2;
4287 			block->gprseq[0] = *vi++;
4288 			block->gprseq[0] |= *vi++ << 8;
4289 			block->gprseq[0] |= GPR_CONTROL_WRITE;
4290 			block->gprseq[1]  = *vi++;
4291 			block->gprseq[1] |= *vi++ << 8;
4292 			block->command = *vi++;
4293 			block->command |= *vi++ << 8;
4294 			break;
4295 
4296 		case 5: /* GPR reset sequence:  Section 4.5.2.1.4 */
4297 			block->rstseqlen = *vi++;
4298 			for (i = 0; i < block->rstseqlen; i++)
4299 				block->rstseq[i] = *vi++;
4300 			break;
4301 
4302 		default: /* Unknown media block. Skip it. */
4303 			cmn_err(CE_WARN, "dnet: Unsupported SROM block.");
4304 			vi += blocklen;
4305 			break;
4306 		}
4307 	} else { /* Compact format (or V1 SROM): Section 4.3.2.1 */
4308 		block->type = 0;
4309 		block->media_code = *vi++ & 0x3f;
4310 		block->gprseqlen = 1;
4311 		block->gprseq[0] = *vi++;
4312 		block->command = *vi++;
4313 		block->command |= (*vi++) << 8;
4314 	}
4315 	return (vi);
4316 }
4317 
4318 
4319 /*
4320  * An alternative to doing this would be to store the legacy ROMs in binary
4321  * format in the conf file, and in read_srom, pick out the data. This would
4322  * then allow the parser to continue on as normal. This makes it a little
4323  * easier to read.
4324  */
4325 static void
4326 setup_legacy_blocks()
4327 {
4328 	LEAF_FORMAT *leaf;
4329 	media_block_t *block;
4330 
4331 	/* Default FAKE SROM */
4332 	leaf = &leaf_default_100;
4333 	leaf->is_static = 1;
4334 	leaf->default_block = &leaf->block[3];
4335 	leaf->block_count = 4; /* 100 cards are highly unlikely to have BNC */
4336 	block = leaf->block;
4337 	block->media_code = MEDIA_TP_FD;
4338 	block->type = 0;
4339 	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4340 	block++;
4341 	block->media_code = MEDIA_TP;
4342 	block->type = 0;
4343 	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4344 	block++;
4345 	block->media_code = MEDIA_SYM_SCR_FD;
4346 	block->type = 0;
4347 	block->command = 0x6d;  /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4348 	block++;
4349 	block->media_code = MEDIA_SYM_SCR;
4350 	block->type = 0;
4351 	block->command = 0x406d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4352 
4353 	/* COGENT FAKE SROM */
4354 	leaf = &leaf_cogent_100;
4355 	leaf->is_static = 1;
4356 	leaf->default_block = &leaf->block[4];
4357 	leaf->block_count = 5; /* 100TX, 100TX-FD, 10T 10T-FD, BNC */
4358 	block = leaf->block; /* BNC */
4359 	block->media_code = MEDIA_BNC;
4360 	block->type = 0;
4361 	block->command =  0x8000; /* No media sense, PCS, SCR, PS all off */
4362 	block->gprseqlen = 2;
4363 	block->rstseqlen = 0;
4364 	block->gprseq[0] = 0x13f;
4365 	block->gprseq[1] = 1;
4366 
4367 	block++;
4368 	block->media_code = MEDIA_TP_FD;
4369 	block->type = 0;
4370 	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4371 	block->gprseqlen = 2;
4372 	block->rstseqlen = 0;
4373 	block->gprseq[0] = 0x13f;
4374 	block->gprseq[1] = 0x26;
4375 
4376 	block++; /* 10BaseT */
4377 	block->media_code = MEDIA_TP;
4378 	block->type = 0;
4379 	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4380 	block->gprseqlen = 2;
4381 	block->rstseqlen = 0;
4382 	block->gprseq[0] = 0x13f;
4383 	block->gprseq[1] = 0x3e;
4384 
4385 	block++; /* 100BaseTX-FD */
4386 	block->media_code = MEDIA_SYM_SCR_FD;
4387 	block->type = 0;
4388 	block->command = 0x6d;  /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4389 	block->gprseqlen = 2;
4390 	block->rstseqlen = 0;
4391 	block->gprseq[0] = 0x13f;
4392 	block->gprseq[1] = 1;
4393 
4394 	block++; /* 100BaseTX */
4395 	block->media_code = MEDIA_SYM_SCR;
4396 	block->type = 0;
4397 	block->command = 0x406d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4398 	block->gprseqlen = 2;
4399 	block->rstseqlen = 0;
4400 	block->gprseq[0] = 0x13f;
4401 	block->gprseq[1] = 1;
4402 
4403 	/* Generic legacy card with a PHY. */
4404 	leaf = &leaf_phylegacy;
4405 	leaf->block_count = 1;
4406 	leaf->mii_block = leaf->block;
4407 	leaf->default_block = &leaf->block[0];
4408 	leaf->is_static = 1;
4409 	block = leaf->block;
4410 	block->media_code = MEDIA_MII;
4411 	block->type = 1; /* MII Block type 1 */
4412 	block->command = 1; /* Port select */
4413 	block->gprseqlen = 0;
4414 	block->rstseqlen = 0;
4415 
4416 	/* ASANTE FAKE SROM */
4417 	leaf = &leaf_asante;
4418 	leaf->is_static = 1;
4419 	leaf->default_block = &leaf->block[0];
4420 	leaf->block_count = 1;
4421 	block = leaf->block;
4422 	block->media_code = MEDIA_MII;
4423 	block->type = 1; /* MII Block type 1 */
4424 	block->command = 1; /* Port select */
4425 	block->gprseqlen = 3;
4426 	block->rstseqlen = 0;
4427 	block->gprseq[0] = 0x180;
4428 	block->gprseq[1] = 0x80;
4429 	block->gprseq[2] = 0x0;
4430 
4431 	/* LEGACY 21041 card FAKE SROM */
4432 	leaf = &leaf_21041;
4433 	leaf->is_static = 1;
4434 	leaf->block_count = 4;  /* SIA Blocks for TP, TPfd, BNC, AUI */
4435 	leaf->default_block = &leaf->block[3];
4436 
4437 	block = leaf->block;
4438 	block->media_code = MEDIA_AUI;
4439 	block->type = 2;
4440 	block->un.sia.csr13 = 0xef09;
4441 	block->un.sia.csr14 = 0x0705;
4442 	block->un.sia.csr15 = 0x000e;
4443 
4444 	block++;
4445 	block->media_code = MEDIA_TP_FD;
4446 	block->type = 2;
4447 	block->un.sia.csr13 = 0xef01;
4448 	block->un.sia.csr14 = 0x7f3d;
4449 	block->un.sia.csr15 = 0x0008;
4450 
4451 	block++;
4452 	block->media_code = MEDIA_BNC;
4453 	block->type = 2;
4454 	block->un.sia.csr13 = 0xef09;
4455 	block->un.sia.csr14 = 0x0705;
4456 	block->un.sia.csr15 = 0x0006;
4457 
4458 	block++;
4459 	block->media_code = MEDIA_TP;
4460 	block->type = 2;
4461 	block->un.sia.csr13 = 0xef01;
4462 	block->un.sia.csr14 = 0x7f3f;
4463 	block->un.sia.csr15 = 0x0008;
4464 
4465 	/* LEGACY 21040 card FAKE SROM */
4466 	leaf = &leaf_21040;
4467 	leaf->is_static = 1;
4468 	leaf->block_count = 4;  /* SIA Blocks for TP, TPfd, BNC, AUI */
4469 	block = leaf->block;
4470 	block->media_code = MEDIA_AUI;
4471 	block->type = 2;
4472 	block->un.sia.csr13 = 0x8f09;
4473 	block->un.sia.csr14 = 0x0705;
4474 	block->un.sia.csr15 = 0x000e;
4475 	block++;
4476 	block->media_code = MEDIA_TP_FD;
4477 	block->type = 2;
4478 	block->un.sia.csr13 = 0x0f01;
4479 	block->un.sia.csr14 = 0x7f3d;
4480 	block->un.sia.csr15 = 0x0008;
4481 	block++;
4482 	block->media_code = MEDIA_BNC;
4483 	block->type = 2;
4484 	block->un.sia.csr13 = 0xef09;
4485 	block->un.sia.csr14 = 0x0705;
4486 	block->un.sia.csr15 = 0x0006;
4487 	block++;
4488 	block->media_code = MEDIA_TP;
4489 	block->type = 2;
4490 	block->un.sia.csr13 = 0x8f01;
4491 	block->un.sia.csr14 = 0x7f3f;
4492 	block->un.sia.csr15 = 0x0008;
4493 }
4494 
4495 static void
4496 dnet_print_srom(SROM_FORMAT *sr)
4497 {
4498 	int i;
4499 	uchar_t *a = sr->netaddr;
4500 	cmn_err(CE_NOTE, "SROM Dump: %d. ver %d, Num adapters %d,"
4501 	    "Addr:%x:%x:%x:%x:%x:%x",
4502 	    sr->init_from_srom, sr->version, sr->adapters,
4503 	    a[0], a[1], a[2], a[3], a[4], a[5]);
4504 
4505 	for (i = 0; i < sr->adapters; i++)
4506 		dnet_dump_leaf(sr->leaf+i);
4507 }
4508 
4509 static void
4510 dnet_dump_leaf(LEAF_FORMAT *leaf)
4511 {
4512 	int i;
4513 	cmn_err(CE_NOTE, "Leaf: Device %d, block_count %d, gpr: %x",
4514 	    leaf->device_number, leaf->block_count, leaf->gpr);
4515 	for (i = 0; i < leaf->block_count; i++)
4516 		dnet_dump_block(leaf->block+i);
4517 }
4518 
4519 static void
4520 dnet_dump_block(media_block_t *block)
4521 {
4522 	cmn_err(CE_NOTE, "Block(%p): type %x, media %s, command: %x ",
4523 	    (void *)block,
4524 	    block->type, media_str[block->media_code], block->command);
4525 	dnet_dumpbin("\tGPR Seq", (uchar_t *)block->gprseq, 2,
4526 	    block->gprseqlen *2);
4527 	dnet_dumpbin("\tGPR Reset", (uchar_t *)block->rstseq, 2,
4528 	    block->rstseqlen *2);
4529 	switch (block->type) {
4530 	case 1: case 3:
4531 		cmn_err(CE_NOTE, "\tMII Info: phy %d, nway %x, fdx"
4532 		    "%x, ttm %x, mediacap %x",
4533 		    block->un.mii.phy_num, block->un.mii.nwayadvert,
4534 		    block->un.mii.fdxmask, block->un.mii.ttmmask,
4535 		    block->un.mii.mediacaps);
4536 		break;
4537 	case 2:
4538 		cmn_err(CE_NOTE, "\tSIA Regs: CSR13:%x, CSR14:%x, CSR15:%x",
4539 		    block->un.sia.csr13, block->un.sia.csr14,
4540 		    block->un.sia.csr15);
4541 		break;
4542 	}
4543 }
4544 
4545 
4546 /* Utility to print out binary info dumps. Handy for SROMs, etc */
4547 
4548 static int
4549 hexcode(unsigned val)
4550 {
4551 	if (val <= 9)
4552 		return (val +'0');
4553 	if (val <= 15)
4554 		return (val + 'a' - 10);
4555 	return (-1);
4556 }
4557 
4558 static void
4559 dnet_dumpbin(char *msg, unsigned char *data, int size, int len)
4560 {
4561 	char hex[128], *p = hex;
4562 	char ascii[128], *q = ascii;
4563 	int i, j;
4564 
4565 	if (!len)
4566 		return;
4567 
4568 	for (i = 0; i < len; i += size) {
4569 		for (j = size - 1; j >= 0; j--) { /* PORTABILITY: byte order */
4570 			*p++ = hexcode(data[i+j] >> 4);
4571 			*p++ = hexcode(data[i+j] & 0xf);
4572 			*q++ = (data[i+j] < 32 || data[i+j] > 127) ?
4573 			    '.' : data[i];
4574 		}
4575 		*p++ = ' ';
4576 		if (q-ascii >= 8) {
4577 			*p = *q = 0;
4578 			cmn_err(CE_NOTE, "%s: %s\t%s", msg, hex, ascii);
4579 			p = hex;
4580 			q = ascii;
4581 		}
4582 	}
4583 	if (p != hex) {
4584 		while ((p - hex) < 8*3)
4585 			*p++ = ' ';
4586 		*p = *q = 0;
4587 		cmn_err(CE_NOTE, "%s: %s\t%s", msg, hex, ascii);
4588 	}
4589 }
4590 
4591 #ifdef DNETDEBUG
4592 void
4593 dnet_usectimeout(struct dnetinstance *dnetp, uint32_t usecs, int contin,
4594     timercb_t cback)
4595 {
4596 	mutex_enter(&dnetp->intrlock);
4597 	dnetp->timer.start_ticks = (usecs * 100) / 8192;
4598 	dnetp->timer.cb = cback;
4599 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, GP_TIMER_REG),
4600 	    dnetp->timer.start_ticks | (contin ? GPTIMER_CONT : 0));
4601 	if (dnetp->timer.cb)
4602 		enable_interrupts(dnetp);
4603 	mutex_exit(&dnetp->intrlock);
4604 }
4605 
4606 uint32_t
4607 dnet_usecelapsed(struct dnetinstance *dnetp)
4608 {
4609 	uint32_t ticks = dnetp->timer.start_ticks -
4610 	    (ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, GP_TIMER_REG)) &
4611 	    0xffff);
4612 	return ((ticks * 8192) / 100);
4613 }
4614 
4615 /* ARGSUSED */
4616 void
4617 dnet_timestamp(struct dnetinstance *dnetp,  char *buf)
4618 {
4619 	uint32_t elapsed = dnet_usecelapsed(dnetp);
4620 	char loc[32], *p = loc;
4621 	int firstdigit = 1;
4622 	uint32_t divisor;
4623 
4624 	while (*p++ = *buf++)
4625 		;
4626 	p--;
4627 
4628 	for (divisor = 1000000000; divisor /= 10; ) {
4629 		int digit = (elapsed / divisor);
4630 		elapsed -= digit * divisor;
4631 		if (!firstdigit || digit) {
4632 			*p++ = digit + '0';
4633 			firstdigit = 0;
4634 		}
4635 
4636 	}
4637 
4638 	/* Actual zero, output it */
4639 	if (firstdigit)
4640 		*p++ = '0';
4641 
4642 	*p++ = '-';
4643 	*p++ = '>';
4644 	*p++ = 0;
4645 
4646 	printf(loc);
4647 	dnet_usectimeout(dnetp, 1000000, 0, 0);
4648 }
4649 
4650 #endif
4651