xref: /titanic_51/usr/src/uts/intel/io/dnet/dnet.c (revision 67dbe2be0c0f1e2eb428b89088bb5667e8f0b9f6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 /*
29  * dnet -- DEC 21x4x
30  *
31  * Currently supports:
32  *	21040, 21041, 21140, 21142, 21143
33  *	SROM versions 1, 3, 3.03, 4
34  *	TP, AUI, BNC, 100BASETX, 100BASET4
35  *
36  * XXX NEEDSWORK
37  *	All media SHOULD work, FX is untested
38  *
39  * Depends on the Generic LAN Driver utility functions in /kernel/misc/mac
40  */
41 
42 #define	BUG_4010796	/* See 4007871, 4010796 */
43 
44 #include <sys/types.h>
45 #include <sys/errno.h>
46 #include <sys/param.h>
47 #include <sys/stropts.h>
48 #include <sys/stream.h>
49 #include <sys/kmem.h>
50 #include <sys/conf.h>
51 #include <sys/devops.h>
52 #include <sys/ksynch.h>
53 #include <sys/stat.h>
54 #include <sys/modctl.h>
55 #include <sys/debug.h>
56 #include <sys/dlpi.h>
57 #include <sys/ethernet.h>
58 #include <sys/vlan.h>
59 #include <sys/mac.h>
60 #include <sys/mac_ether.h>
61 #include <sys/mac_provider.h>
62 #include <sys/pci.h>
63 #include <sys/ddi.h>
64 #include <sys/sunddi.h>
65 #include <sys/strsun.h>
66 
67 #include "dnet_mii.h"
68 #include "dnet.h"
69 
70 /*
71  *	Declarations and Module Linkage
72  */
73 
74 #define	IDENT	"DNET 21x4x"
75 
76 /*
77  * #define	DNET_NOISY
78  * #define	SROMDEBUG
79  * #define	SROMDUMPSTRUCTURES
80  */
81 
82 #ifdef DNETDEBUG
83 #ifdef DNET_NOISY
84 int	dnetdebug = -1;
85 #else
86 int	dnetdebug = 0;
87 #endif
88 #endif
89 
90 /* used for message allocated using desballoc() */
91 struct free_ptr {
92 	struct free_rtn	free_rtn;
93 	caddr_t buf;
94 };
95 
96 struct rbuf_list {
97 	struct rbuf_list	*rbuf_next;	/* next in the list */
98 	caddr_t			rbuf_vaddr;	/* virual addr of the buf */
99 	uint32_t		rbuf_paddr;	/* physical addr of the buf */
100 	uint32_t		rbuf_endpaddr;	/* physical addr at the end */
101 	ddi_dma_handle_t	rbuf_dmahdl;	/* dma handle */
102 	ddi_acc_handle_t	rbuf_acchdl;	/* handle for DDI functions */
103 };
104 
105 /* Required system entry points */
106 static int dnet_probe(dev_info_t *);
107 static int dnet_attach(dev_info_t *, ddi_attach_cmd_t);
108 static int dnet_detach(dev_info_t *, ddi_detach_cmd_t);
109 static int dnet_quiesce(dev_info_t *);
110 
111 /* Required driver entry points for GLDv3 */
112 static int dnet_m_start(void *);
113 static void dnet_m_stop(void *);
114 static int dnet_m_getstat(void *, uint_t, uint64_t *);
115 static int dnet_m_setpromisc(void *, boolean_t);
116 static int dnet_m_multicst(void *, boolean_t, const uint8_t *);
117 static int dnet_m_unicst(void *, const uint8_t *);
118 static mblk_t *dnet_m_tx(void *, mblk_t *);
119 
120 static uint_t dnet_intr(caddr_t);
121 
122 /* Internal functions used by the above entry points */
123 static void write_gpr(struct dnetinstance *dnetp, uint32_t val);
124 static void dnet_reset_board(struct dnetinstance *);
125 static void dnet_init_board(struct dnetinstance *);
126 static void dnet_chip_init(struct dnetinstance *);
127 static uint32_t hashindex(const uint8_t *);
128 static int dnet_start(struct dnetinstance *);
129 static int dnet_set_addr(struct dnetinstance *);
130 
131 static boolean_t dnet_send(struct dnetinstance *, mblk_t *);
132 
133 static void dnet_getp(struct dnetinstance *);
134 static void update_rx_stats(struct dnetinstance *, int);
135 static void update_tx_stats(struct dnetinstance *, int);
136 
137 /* Media Selection Setup Routines */
138 static void set_gpr(struct dnetinstance *);
139 static void set_opr(struct dnetinstance *);
140 static void set_sia(struct dnetinstance *);
141 
142 /* Buffer Management Routines */
143 static int dnet_alloc_bufs(struct dnetinstance *);
144 static void dnet_free_bufs(struct dnetinstance *);
145 static void dnet_init_txrx_bufs(struct dnetinstance *);
146 static int alloc_descriptor(struct dnetinstance *);
147 static void dnet_reclaim_Tx_desc(struct dnetinstance *);
148 static int dnet_rbuf_init(dev_info_t *, int);
149 static int dnet_rbuf_destroy();
150 static struct rbuf_list *dnet_rbuf_alloc(dev_info_t *, int);
151 static void dnet_rbuf_free(caddr_t);
152 static void dnet_freemsg_buf(struct free_ptr *);
153 
154 static void setup_block(struct dnetinstance *);
155 
156 /* SROM read functions */
157 static int dnet_read_srom(dev_info_t *, int, ddi_acc_handle_t, caddr_t,
158     uchar_t *, int);
159 static void dnet_read21040addr(dev_info_t *, ddi_acc_handle_t, caddr_t,
160     uchar_t *, int *);
161 static void dnet_read21140srom(ddi_acc_handle_t, caddr_t, uchar_t *, int);
162 static int get_alternative_srom_image(dev_info_t *, uchar_t *, int);
163 static void dnet_print_srom(SROM_FORMAT *sr);
164 static void dnet_dump_leaf(LEAF_FORMAT *leaf);
165 static void dnet_dump_block(media_block_t *block);
166 #ifdef BUG_4010796
167 static void set_alternative_srom_image(dev_info_t *, uchar_t *, int);
168 static int dnet_hack(dev_info_t *);
169 #endif
170 
171 static int dnet_hack_interrupts(struct dnetinstance *, int);
172 static int dnet_detach_hacked_interrupt(dev_info_t *devinfo);
173 static void enable_interrupts(struct dnetinstance *);
174 
175 /* SROM parsing functions */
176 static void dnet_parse_srom(struct dnetinstance *dnetp, SROM_FORMAT *sr,
177     uchar_t *vi);
178 static void parse_controller_leaf(struct dnetinstance *dnetp, LEAF_FORMAT *leaf,
179     uchar_t *vi);
180 static uchar_t *parse_media_block(struct dnetinstance *dnetp,
181     media_block_t *block, uchar_t *vi);
182 static int check_srom_valid(uchar_t *);
183 static void dnet_dumpbin(char *msg, uchar_t *, int size, int len);
184 static void setup_legacy_blocks();
185 /* Active Media Determination Routines */
186 static void find_active_media(struct dnetinstance *);
187 static int send_test_packet(struct dnetinstance *);
188 static int dnet_link_sense(struct dnetinstance *);
189 
190 /* PHY MII Routines */
191 static ushort_t dnet_mii_read(dev_info_t *dip, int phy_addr, int reg_num);
192 static void dnet_mii_write(dev_info_t *dip, int phy_addr, int reg_num,
193 			int reg_dat);
194 static void write_mii(struct dnetinstance *, uint32_t, int);
195 static void mii_tristate(struct dnetinstance *);
196 static void do_phy(struct dnetinstance *);
197 static void dnet_mii_link_cb(dev_info_t *, int, enum mii_phy_state);
198 static void dnet_mii_link_up(struct dnetinstance *);
199 static void set_leaf(SROM_FORMAT *sr, LEAF_FORMAT *leaf);
200 
201 #ifdef DNETDEBUG
202 uint32_t dnet_usecelapsed(struct dnetinstance *dnetp);
203 void dnet_timestamp(struct dnetinstance *, char *);
204 void dnet_usectimeout(struct dnetinstance *, uint32_t, int, timercb_t);
205 #endif
206 static char *media_str[] = {
207 	"10BaseT",
208 	"10Base2",
209 	"10Base5",
210 	"100BaseTX",
211 	"10BaseT FD",
212 	"100BaseTX FD",
213 	"100BaseT4",
214 	"100BaseFX",
215 	"100BaseFX FD",
216 	"MII"
217 };
218 
219 /* default SROM info for cards with no SROMs */
220 static LEAF_FORMAT leaf_default_100;
221 static LEAF_FORMAT leaf_asante;
222 static LEAF_FORMAT leaf_phylegacy;
223 static LEAF_FORMAT leaf_cogent_100;
224 static LEAF_FORMAT leaf_21041;
225 static LEAF_FORMAT leaf_21040;
226 
227 /* rx buffer size (rounded up to 4) */
228 int rx_buf_size = (ETHERMAX + ETHERFCSL + VLAN_TAGSZ + 3) & ~3;
229 
230 int max_rx_desc_21040 = MAX_RX_DESC_21040;
231 int max_rx_desc_21140 = MAX_RX_DESC_21140;
232 int max_tx_desc = MAX_TX_DESC;
233 int dnet_xmit_threshold = MAX_TX_DESC >> 2;	/* XXX need tuning? */
234 
235 static kmutex_t dnet_rbuf_lock;		/* mutex to protect rbuf_list data */
236 
237 /* used for buffers allocated by ddi_dma_mem_alloc() */
238 static ddi_dma_attr_t dma_attr = {
239 	DMA_ATTR_V0,		/* dma_attr version */
240 	0,			/* dma_attr_addr_lo */
241 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
242 	0x7FFFFFFF,		/* dma_attr_count_max */
243 	4,			/* dma_attr_align */
244 	0x3F,			/* dma_attr_burstsizes */
245 	1,			/* dma_attr_minxfer */
246 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
247 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
248 	1,			/* dma_attr_sgllen */
249 	1,			/* dma_attr_granular */
250 	0,			/* dma_attr_flags */
251 };
252 
253 /* used for buffers allocated for rbuf, allow 2 cookies */
254 static ddi_dma_attr_t dma_attr_rb = {
255 	DMA_ATTR_V0,		/* dma_attr version */
256 	0,			/* dma_attr_addr_lo */
257 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
258 	0x7FFFFFFF,		/* dma_attr_count_max */
259 	4,			/* dma_attr_align */
260 	0x3F,			/* dma_attr_burstsizes */
261 	1,			/* dma_attr_minxfer */
262 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
263 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
264 	2,			/* dma_attr_sgllen */
265 	1,			/* dma_attr_granular */
266 	0,			/* dma_attr_flags */
267 };
268 /* used for buffers which are NOT from ddi_dma_mem_alloc() - xmit side */
269 static ddi_dma_attr_t dma_attr_tx = {
270 	DMA_ATTR_V0,		/* dma_attr version */
271 	0,			/* dma_attr_addr_lo */
272 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
273 	0x7FFFFFFF,		/* dma_attr_count_max */
274 	1,			/* dma_attr_align */
275 	0x3F,			/* dma_attr_burstsizes */
276 	1,			/* dma_attr_minxfer */
277 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
278 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
279 	0x7FFF,			/* dma_attr_sgllen */
280 	1,			/* dma_attr_granular */
281 	0,			/* dma_attr_flags */
282 };
283 
284 static ddi_device_acc_attr_t accattr = {
285 	DDI_DEVICE_ATTR_V0,
286 	DDI_NEVERSWAP_ACC,
287 	DDI_STRICTORDER_ACC,
288 };
289 
290 uchar_t dnet_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
291 
292 /* Standard Module linkage initialization for a Streams driver */
293 extern struct mod_ops mod_driverops;
294 
295 DDI_DEFINE_STREAM_OPS(dnet_devops, nulldev, dnet_probe, dnet_attach,
296     dnet_detach, nodev, NULL, D_MP, NULL, dnet_quiesce);
297 
298 static struct modldrv dnet_modldrv = {
299 	&mod_driverops,		/* Type of module.  This one is a driver */
300 	IDENT,			/* short description */
301 	&dnet_devops		/* driver specific ops */
302 };
303 
304 static struct modlinkage dnet_modlinkage = {
305 	MODREV_1,		/* ml_rev */
306 	{ &dnet_modldrv, NULL }	/* ml_linkage */
307 };
308 
309 static mac_callbacks_t dnet_m_callbacks = {
310 	0,			/* mc_callbacks */
311 	dnet_m_getstat,		/* mc_getstat */
312 	dnet_m_start,		/* mc_start */
313 	dnet_m_stop,		/* mc_stop */
314 	dnet_m_setpromisc,	/* mc_setpromisc */
315 	dnet_m_multicst,	/* mc_multicst */
316 	dnet_m_unicst,		/* mc_unicst */
317 	dnet_m_tx,		/* mc_tx */
318 	NULL,			/* mc_ioctl */
319 	NULL,			/* mc_getcapab */
320 	NULL,			/* mc_open */
321 	NULL			/* mc_close */
322 };
323 
324 /*
325  * Passed to the hacked interrupt for multiport Cogent and ZNYX cards with
326  * dodgy interrupt routing
327  */
328 #define	MAX_INST 8 /* Maximum instances on a multiport adapter. */
329 struct hackintr_inf
330 {
331 	struct dnetinstance *dnetps[MAX_INST]; /* dnetps for each port */
332 	dev_info_t *devinfo;		    /* Devinfo of the primary device */
333 	kmutex_t lock;
334 		/* Ensures the interrupt doesn't get called while detaching */
335 };
336 static char hackintr_propname[] = "InterruptData";
337 static char macoffset_propname[] = "MAC_offset";
338 static char speed_propname[] = "speed";
339 static char ofloprob_propname[] = "dmaworkaround";
340 static char duplex_propname[] = "full-duplex"; /* Must agree with MII */
341 static char printsrom_propname[] = "print-srom";
342 
343 static uint_t dnet_hack_intr(struct hackintr_inf *);
344 
345 int
346 _init(void)
347 {
348 	int i;
349 
350 	/* Configure fake sroms for legacy cards */
351 	mutex_init(&dnet_rbuf_lock, NULL, MUTEX_DRIVER, NULL);
352 	setup_legacy_blocks();
353 
354 	mac_init_ops(&dnet_devops, "dnet");
355 
356 	if ((i = mod_install(&dnet_modlinkage)) != 0) {
357 		mac_fini_ops(&dnet_devops);
358 		mutex_destroy(&dnet_rbuf_lock);
359 	}
360 	return (i);
361 }
362 
363 int
364 _fini(void)
365 {
366 	int i;
367 
368 	if ((i = mod_remove(&dnet_modlinkage)) == 0) {
369 		mac_fini_ops(&dnet_devops);
370 
371 		/* loop until all the receive buffers are freed */
372 		while (dnet_rbuf_destroy() != 0) {
373 			delay(drv_usectohz(100000));
374 #ifdef DNETDEBUG
375 			if (dnetdebug & DNETDDI)
376 				cmn_err(CE_WARN, "dnet _fini delay");
377 #endif
378 		}
379 		mutex_destroy(&dnet_rbuf_lock);
380 	}
381 	return (i);
382 }
383 
384 int
385 _info(struct modinfo *modinfop)
386 {
387 	return (mod_info(&dnet_modlinkage, modinfop));
388 }
389 
390 /*
391  * probe(9E) -- Determine if a device is present
392  */
393 static int
394 dnet_probe(dev_info_t *devinfo)
395 {
396 	ddi_acc_handle_t handle;
397 	uint16_t	vendorid;
398 	uint16_t	deviceid;
399 
400 	if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS)
401 		return (DDI_PROBE_FAILURE);
402 
403 	vendorid = pci_config_get16(handle, PCI_CONF_VENID);
404 
405 	if (vendorid != DEC_VENDOR_ID) {
406 		pci_config_teardown(&handle);
407 		return (DDI_PROBE_FAILURE);
408 	}
409 
410 	deviceid = pci_config_get16(handle, PCI_CONF_DEVID);
411 	switch (deviceid) {
412 	case DEVICE_ID_21040:
413 	case DEVICE_ID_21041:
414 	case DEVICE_ID_21140:
415 	case DEVICE_ID_21143: /* And 142 */
416 		break;
417 	default:
418 		pci_config_teardown(&handle);
419 		return (DDI_PROBE_FAILURE);
420 	}
421 
422 	pci_config_teardown(&handle);
423 #ifndef BUG_4010796
424 	return (DDI_PROBE_SUCCESS);
425 #else
426 	return (dnet_hack(devinfo));
427 #endif
428 }
429 
430 #ifdef BUG_4010796
431 /*
432  * If we have a device, but we cannot presently access its SROM data,
433  * then we return DDI_PROBE_PARTIAL and hope that sometime later we
434  * will be able to get at the SROM data.  This can only happen if we
435  * are a secondary port with no SROM, and the bootstrap failed to set
436  * our DNET_SROM property, and our primary sibling has not yet probed.
437  */
438 static int
439 dnet_hack(dev_info_t *devinfo)
440 {
441 	uchar_t 	vendor_info[SROM_SIZE];
442 	uint32_t	csr;
443 	uint16_t	deviceid;
444 	ddi_acc_handle_t handle;
445 	uint32_t	retval;
446 	int		secondary;
447 	ddi_acc_handle_t io_handle;
448 	caddr_t		io_reg;
449 
450 #define	DNET_PCI_RNUMBER	1
451 
452 	if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS)
453 		return (DDI_PROBE_FAILURE);
454 
455 	deviceid = pci_config_get16(handle, PCI_CONF_DEVID);
456 
457 	/*
458 	 * Turn on Master Enable and IO Enable bits.
459 	 */
460 	csr = pci_config_get32(handle, PCI_CONF_COMM);
461 	pci_config_put32(handle, PCI_CONF_COMM, (csr |PCI_COMM_ME|PCI_COMM_IO));
462 
463 	pci_config_teardown(&handle);
464 
465 	/* Now map I/O register */
466 	if (ddi_regs_map_setup(devinfo, DNET_PCI_RNUMBER,
467 	    &io_reg, 0, 0, &accattr, &io_handle) != DDI_SUCCESS) {
468 		return (DDI_PROBE_FAILURE);
469 	}
470 
471 	/*
472 	 * Reset the chip
473 	 */
474 	ddi_put32(io_handle, REG32(io_reg, BUS_MODE_REG), SW_RESET);
475 	drv_usecwait(3);
476 	ddi_put32(io_handle, REG32(io_reg, BUS_MODE_REG), 0);
477 	drv_usecwait(8);
478 
479 	secondary = dnet_read_srom(devinfo, deviceid, io_handle,
480 	    io_reg, vendor_info, sizeof (vendor_info));
481 
482 	switch (secondary) {
483 	case -1:
484 		/* We can't access our SROM data! */
485 		retval = DDI_PROBE_PARTIAL;
486 		break;
487 	case 0:
488 		retval = DDI_PROBE_SUCCESS;
489 		break;
490 	default:
491 		retval = DDI_PROBE_SUCCESS;
492 	}
493 
494 	ddi_regs_map_free(&io_handle);
495 	return (retval);
496 }
497 #endif /* BUG_4010796 */
498 
499 /*
500  * attach(9E) -- Attach a device to the system
501  *
502  * Called once for each board successfully probed.
503  */
504 static int
505 dnet_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
506 {
507 	uint16_t revid;
508 	struct dnetinstance 	*dnetp;		/* Our private device info */
509 	mac_register_t		*macp;
510 	uchar_t 		vendor_info[SROM_SIZE];
511 	uint32_t		csr;
512 	uint16_t		deviceid;
513 	ddi_acc_handle_t 	handle;
514 	int			secondary;
515 
516 #define	DNET_PCI_RNUMBER	1
517 
518 	switch (cmd) {
519 	case DDI_ATTACH:
520 		break;
521 
522 	case DDI_RESUME:
523 		/* Get the driver private (dnetinstance) structure */
524 		dnetp = ddi_get_driver_private(devinfo);
525 
526 		mutex_enter(&dnetp->intrlock);
527 		mutex_enter(&dnetp->txlock);
528 		dnet_reset_board(dnetp);
529 		dnet_init_board(dnetp);
530 		dnetp->suspended = B_FALSE;
531 
532 		if (dnetp->running) {
533 			dnetp->need_tx_update = B_FALSE;
534 			mutex_exit(&dnetp->txlock);
535 			(void) dnet_start(dnetp);
536 			mutex_exit(&dnetp->intrlock);
537 			mac_tx_update(dnetp->mac_handle);
538 		} else {
539 			mutex_exit(&dnetp->txlock);
540 			mutex_exit(&dnetp->intrlock);
541 		}
542 		return (DDI_SUCCESS);
543 	default:
544 		return (DDI_FAILURE);
545 	}
546 
547 	if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS)
548 		return (DDI_FAILURE);
549 
550 	deviceid = pci_config_get16(handle, PCI_CONF_DEVID);
551 	switch (deviceid) {
552 	case DEVICE_ID_21040:
553 	case DEVICE_ID_21041:
554 	case DEVICE_ID_21140:
555 	case DEVICE_ID_21143: /* And 142 */
556 		break;
557 	default:
558 		pci_config_teardown(&handle);
559 		return (DDI_FAILURE);
560 	}
561 
562 	/*
563 	 * Turn on Master Enable and IO Enable bits.
564 	 */
565 	csr = pci_config_get32(handle, PCI_CONF_COMM);
566 	pci_config_put32(handle, PCI_CONF_COMM, (csr |PCI_COMM_ME|PCI_COMM_IO));
567 
568 	/* Make sure the device is not asleep */
569 	csr = pci_config_get32(handle, PCI_DNET_CONF_CFDD);
570 	pci_config_put32(handle, PCI_DNET_CONF_CFDD,
571 	    csr &  ~(CFDD_SLEEP|CFDD_SNOOZE));
572 
573 	revid = pci_config_get8(handle, PCI_CONF_REVID);
574 	pci_config_teardown(&handle);
575 
576 	dnetp = kmem_zalloc(sizeof (struct dnetinstance), KM_SLEEP);
577 	ddi_set_driver_private(devinfo, dnetp);
578 
579 	/* Now map I/O register */
580 	if (ddi_regs_map_setup(devinfo, DNET_PCI_RNUMBER, &dnetp->io_reg,
581 	    0, 0, &accattr, &dnetp->io_handle) != DDI_SUCCESS) {
582 		kmem_free(dnetp, sizeof (struct dnetinstance));
583 		return (DDI_FAILURE);
584 	}
585 
586 	dnetp->devinfo = devinfo;
587 	dnetp->board_type = deviceid;
588 
589 	/*
590 	 * Get the iblock cookie with which to initialize the mutexes.
591 	 */
592 	if (ddi_get_iblock_cookie(devinfo, 0, &dnetp->icookie)
593 	    != DDI_SUCCESS)
594 		goto fail;
595 
596 	/*
597 	 * Initialize mutex's for this device.
598 	 * Do this before registering the interrupt handler to avoid
599 	 * condition where interrupt handler can try using uninitialized
600 	 * mutex.
601 	 * Lock ordering rules: always lock intrlock first before
602 	 * txlock if both are required.
603 	 */
604 	mutex_init(&dnetp->txlock, NULL, MUTEX_DRIVER, dnetp->icookie);
605 	mutex_init(&dnetp->intrlock, NULL, MUTEX_DRIVER, dnetp->icookie);
606 
607 	/*
608 	 * Get the BNC/TP indicator from the conf file for 21040
609 	 */
610 	dnetp->bnc_indicator =
611 	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
612 	    "bncaui", -1);
613 
614 	/*
615 	 * For 21140 check the data rate set in the conf file. Default is
616 	 * 100Mb/s. Disallow connections at settings that would conflict
617 	 * with what's in the conf file
618 	 */
619 	dnetp->speed =
620 	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
621 	    speed_propname, 0);
622 	dnetp->full_duplex =
623 	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
624 	    duplex_propname, -1);
625 
626 	if (dnetp->speed == 100) {
627 		dnetp->disallowed_media |= (1UL<<MEDIA_TP) | (1UL<<MEDIA_TP_FD);
628 	} else if (dnetp->speed == 10) {
629 		dnetp->disallowed_media |=
630 		    (1UL<<MEDIA_SYM_SCR) | (1UL<<MEDIA_SYM_SCR_FD);
631 	}
632 
633 	if (dnetp->full_duplex == 1) {
634 		dnetp->disallowed_media |=
635 		    (1UL<<MEDIA_TP) | (1UL<<MEDIA_SYM_SCR);
636 	} else if (dnetp->full_duplex == 0) {
637 		dnetp->disallowed_media |=
638 		    (1UL<<MEDIA_TP_FD) | (1UL<<MEDIA_SYM_SCR_FD);
639 	}
640 
641 	if (dnetp->bnc_indicator == 0) /* Disable BNC and AUI media */
642 		dnetp->disallowed_media |= (1UL<<MEDIA_BNC) | (1UL<<MEDIA_AUI);
643 	else if (dnetp->bnc_indicator == 1) /* Force BNC only */
644 		dnetp->disallowed_media =  (uint32_t)~(1U<<MEDIA_BNC);
645 	else if (dnetp->bnc_indicator == 2) /* Force AUI only */
646 		dnetp->disallowed_media = (uint32_t)~(1U<<MEDIA_AUI);
647 
648 	dnet_reset_board(dnetp);
649 
650 	secondary = dnet_read_srom(devinfo, dnetp->board_type, dnetp->io_handle,
651 	    dnetp->io_reg, vendor_info, sizeof (vendor_info));
652 
653 	if (secondary == -1) /* ASSERT (vendor_info not big enough) */
654 		goto fail1;
655 
656 	dnet_parse_srom(dnetp, &dnetp->sr, vendor_info);
657 
658 	if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
659 	    printsrom_propname, 0))
660 		dnet_print_srom(&dnetp->sr);
661 
662 	dnetp->sr.netaddr[ETHERADDRL-1] += secondary;	/* unique ether addr */
663 
664 	BCOPY((caddr_t)dnetp->sr.netaddr,
665 	    (caddr_t)dnetp->vendor_addr, ETHERADDRL);
666 
667 	BCOPY((caddr_t)dnetp->sr.netaddr,
668 	    (caddr_t)dnetp->curr_macaddr, ETHERADDRL);
669 
670 	/*
671 	 * determine whether to implement workaround from DEC
672 	 * for DMA overrun errata.
673 	 */
674 	dnetp->overrun_workaround =
675 	    ((dnetp->board_type == DEVICE_ID_21140 && revid >= 0x20) ||
676 	    (dnetp->board_type == DEVICE_ID_21143 && revid <= 0x30)) ? 1 : 0;
677 
678 	dnetp->overrun_workaround =
679 	    ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
680 	    ofloprob_propname, dnetp->overrun_workaround);
681 
682 	/*
683 	 * Add the interrupt handler if dnet_hack_interrupts() returns 0.
684 	 * Otherwise dnet_hack_interrupts() itself adds the handler.
685 	 */
686 	if (!dnet_hack_interrupts(dnetp, secondary)) {
687 		(void) ddi_add_intr(devinfo, 0, NULL,
688 		    NULL, dnet_intr, (caddr_t)dnetp);
689 	}
690 
691 	dnetp->max_tx_desc = max_tx_desc;
692 	dnetp->max_rx_desc = max_rx_desc_21040;
693 	if (dnetp->board_type != DEVICE_ID_21040 &&
694 	    dnetp->board_type != DEVICE_ID_21041 &&
695 	    dnetp->speed != 10)
696 		dnetp->max_rx_desc = max_rx_desc_21140;
697 
698 	/* Allocate the TX and RX descriptors/buffers. */
699 	if (dnet_alloc_bufs(dnetp) == FAILURE) {
700 		cmn_err(CE_WARN, "DNET: Not enough DMA memory for buffers.");
701 		goto fail2;
702 	}
703 
704 	/*
705 	 *	Register ourselves with the GLDv3 interface
706 	 */
707 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
708 		goto fail2;
709 
710 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
711 	macp->m_driver = dnetp;
712 	macp->m_dip = devinfo;
713 	macp->m_src_addr = dnetp->curr_macaddr;
714 	macp->m_callbacks = &dnet_m_callbacks;
715 	macp->m_min_sdu = 0;
716 	macp->m_max_sdu = ETHERMTU;
717 	macp->m_margin = VLAN_TAGSZ;
718 
719 	if (mac_register(macp, &dnetp->mac_handle) == 0) {
720 		mac_free(macp);
721 
722 		mutex_enter(&dnetp->intrlock);
723 
724 		dnetp->phyaddr = -1;
725 		if (dnetp->board_type == DEVICE_ID_21140 ||
726 		    dnetp->board_type == DEVICE_ID_21143)
727 			do_phy(dnetp);	/* Initialize the PHY, if any */
728 		find_active_media(dnetp);
729 
730 		/* if the chosen media is non-MII, stop the port monitor */
731 		if (dnetp->selected_media_block->media_code != MEDIA_MII &&
732 		    dnetp->mii != NULL) {
733 			mii_destroy(dnetp->mii);
734 			dnetp->mii = NULL;
735 			dnetp->phyaddr = -1;
736 		}
737 
738 #ifdef DNETDEBUG
739 		if (dnetdebug & DNETSENSE)
740 			cmn_err(CE_NOTE, "dnet: link configured : %s",
741 			    media_str[dnetp->selected_media_block->media_code]);
742 #endif
743 		bzero(dnetp->setup_buf_vaddr, SETUPBUF_SIZE);
744 
745 		dnet_reset_board(dnetp);
746 		dnet_init_board(dnetp);
747 
748 		mutex_exit(&dnetp->intrlock);
749 
750 		(void) dnet_m_unicst(dnetp, dnetp->curr_macaddr);
751 		(void) dnet_m_multicst(dnetp, B_TRUE, dnet_broadcastaddr);
752 
753 		return (DDI_SUCCESS);
754 	}
755 
756 	mac_free(macp);
757 fail2:
758 	/* XXX function return value ignored */
759 	/*
760 	 * dnet_detach_hacked_interrupt() will remove
761 	 * interrupt for the non-hacked case also.
762 	 */
763 	(void) dnet_detach_hacked_interrupt(devinfo);
764 	dnet_free_bufs(dnetp);
765 fail1:
766 	mutex_destroy(&dnetp->txlock);
767 	mutex_destroy(&dnetp->intrlock);
768 fail:
769 	ddi_regs_map_free(&dnetp->io_handle);
770 	kmem_free(dnetp, sizeof (struct dnetinstance));
771 	return (DDI_FAILURE);
772 }
773 
774 /*
775  * detach(9E) -- Detach a device from the system
776  */
777 static int
778 dnet_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
779 {
780 	int32_t rc;
781 	struct dnetinstance *dnetp;		/* Our private device info */
782 	int32_t		proplen;
783 
784 	/* Get the driver private (dnetinstance) structure */
785 	dnetp = ddi_get_driver_private(devinfo);
786 
787 	switch (cmd) {
788 	case DDI_DETACH:
789 		break;
790 
791 	case DDI_SUSPEND:
792 		/*
793 		 * NB: dnetp->suspended can only be modified (marked true)
794 		 * if both intrlock and txlock are held.  This keeps both
795 		 * tx and rx code paths excluded.
796 		 */
797 		mutex_enter(&dnetp->intrlock);
798 		mutex_enter(&dnetp->txlock);
799 		dnetp->suspended = B_TRUE;
800 		dnet_reset_board(dnetp);
801 		mutex_exit(&dnetp->txlock);
802 		mutex_exit(&dnetp->intrlock);
803 		return (DDI_SUCCESS);
804 
805 	default:
806 		return (DDI_FAILURE);
807 	}
808 
809 	/*
810 	 *	Unregister ourselves from the GLDv3 interface
811 	 */
812 	if (mac_unregister(dnetp->mac_handle) != 0)
813 		return (DDI_FAILURE);
814 
815 	/* stop the board if it is running */
816 	dnet_reset_board(dnetp);
817 
818 	if ((rc = dnet_detach_hacked_interrupt(devinfo)) != DDI_SUCCESS)
819 		return (rc);
820 
821 	if (dnetp->mii != NULL)
822 		mii_destroy(dnetp->mii);
823 
824 	/* Free leaf information */
825 	set_leaf(&dnetp->sr, NULL);
826 
827 	ddi_regs_map_free(&dnetp->io_handle);
828 	dnet_free_bufs(dnetp);
829 	mutex_destroy(&dnetp->txlock);
830 	mutex_destroy(&dnetp->intrlock);
831 	kmem_free(dnetp, sizeof (struct dnetinstance));
832 
833 #ifdef BUG_4010796
834 	if (ddi_getproplen(DDI_DEV_T_ANY, devinfo, 0,
835 	    "DNET_HACK", &proplen) != DDI_PROP_SUCCESS)
836 		return (DDI_SUCCESS);
837 
838 	/*
839 	 * We must remove the properties we added, because if we leave
840 	 * them in the devinfo nodes and the driver is unloaded, when
841 	 * the driver is reloaded the info will still be there, causing
842 	 * nodes which had returned PROBE_PARTIAL the first time to
843 	 * instead return PROBE_SUCCESS, in turn causing the nodes to be
844 	 * attached in a different order, causing their PPA numbers to
845 	 * be different the second time around, which is undesirable.
846 	 */
847 	(void) ddi_prop_remove(DDI_DEV_T_NONE, devinfo, "DNET_HACK");
848 	(void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo),
849 	    "DNET_SROM");
850 	(void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo),
851 	    "DNET_DEVNUM");
852 #endif
853 
854 	return (DDI_SUCCESS);
855 }
856 
857 int
858 dnet_quiesce(dev_info_t *dip)
859 {
860 	struct dnetinstance *dnetp = ddi_get_driver_private(dip);
861 
862 	/*
863 	 * Reset chip (disables interrupts).
864 	 */
865 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 0);
866 	ddi_put32(dnetp->io_handle,
867 	    REG32(dnetp->io_reg, BUS_MODE_REG), SW_RESET);
868 
869 	return (DDI_SUCCESS);
870 }
871 
872 static void
873 dnet_reset_board(struct dnetinstance *dnetp)
874 {
875 	uint32_t	val;
876 
877 	/*
878 	 * before initializing the dnet should be in STOP state
879 	 */
880 	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
881 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
882 	    val & ~(START_TRANSMIT | START_RECEIVE));
883 
884 	/*
885 	 * Reset the chip
886 	 */
887 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 0);
888 	ddi_put32(dnetp->io_handle,
889 	    REG32(dnetp->io_reg, BUS_MODE_REG), SW_RESET);
890 	drv_usecwait(5);
891 }
892 
893 /*
894  * dnet_init_board() -- initialize the specified network board short of
895  * actually starting the board.  Call after dnet_reset_board().
896  * called with intrlock held.
897  */
898 static void
899 dnet_init_board(struct dnetinstance *dnetp)
900 {
901 	set_opr(dnetp);
902 	set_gpr(dnetp);
903 	set_sia(dnetp);
904 	dnet_chip_init(dnetp);
905 }
906 
907 /* dnet_chip_init() - called with intrlock held */
908 static void
909 dnet_chip_init(struct dnetinstance *dnetp)
910 {
911 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, BUS_MODE_REG),
912 	    CACHE_ALIGN | BURST_SIZE);		/* CSR0 */
913 
914 	/*
915 	 * Initialize the TX and RX descriptors/buffers
916 	 */
917 	dnet_init_txrx_bufs(dnetp);
918 
919 	/*
920 	 * Set the base address of the Rx descriptor list in CSR3
921 	 */
922 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, RX_BASE_ADDR_REG),
923 	    dnetp->rx_desc_paddr);
924 
925 	/*
926 	 * Set the base address of the Tx descrptor list in CSR4
927 	 */
928 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, TX_BASE_ADDR_REG),
929 	    dnetp->tx_desc_paddr);
930 
931 	dnetp->tx_current_desc = dnetp->rx_current_desc = 0;
932 	dnetp->transmitted_desc = 0;
933 	dnetp->free_desc = dnetp->max_tx_desc;
934 	enable_interrupts(dnetp);
935 }
936 
937 /*
938  *	dnet_start() -- start the board receiving and allow transmits.
939  *  Called with intrlock held.
940  */
941 static int
942 dnet_start(struct dnetinstance *dnetp)
943 {
944 	uint32_t val;
945 
946 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
947 	/*
948 	 * start the board and enable receiving
949 	 */
950 	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
951 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
952 	    val | START_TRANSMIT);
953 	(void) dnet_set_addr(dnetp);
954 	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
955 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
956 	    val | START_RECEIVE);
957 	enable_interrupts(dnetp);
958 	return (0);
959 }
960 
961 static int
962 dnet_m_start(void *arg)
963 {
964 	struct dnetinstance *dnetp = arg;
965 
966 	mutex_enter(&dnetp->intrlock);
967 	dnetp->running = B_TRUE;
968 	/*
969 	 * start the board and enable receiving
970 	 */
971 	if (!dnetp->suspended)
972 		(void) dnet_start(dnetp);
973 	dnet_mii_link_up(dnetp);
974 	mutex_exit(&dnetp->intrlock);
975 	return (0);
976 }
977 
978 static void
979 dnet_m_stop(void *arg)
980 {
981 	struct dnetinstance *dnetp = arg;
982 	uint32_t val;
983 
984 	/*
985 	 * stop the board and disable transmit/receive
986 	 */
987 	mutex_enter(&dnetp->intrlock);
988 	if (!dnetp->suspended) {
989 		if (dnetp->mii_up) {
990 			dnetp->mii_up = 0;
991 			dnetp->mii_speed = 0;
992 			dnetp->mii_duplex = 0;
993 		}
994 		mac_link_update(dnetp->mac_handle, LINK_STATE_UNKNOWN);
995 		val = ddi_get32(dnetp->io_handle,
996 		    REG32(dnetp->io_reg, OPN_MODE_REG));
997 		ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG),
998 		    val & ~(START_TRANSMIT | START_RECEIVE));
999 	}
1000 	dnetp->running = B_FALSE;
1001 	mutex_exit(&dnetp->intrlock);
1002 }
1003 
1004 /*
1005  *	dnet_set_addr() -- set the physical network address on the board
1006  *  Called with intrlock held.
1007  */
1008 static int
1009 dnet_set_addr(struct dnetinstance *dnetp)
1010 {
1011 	struct tx_desc_type *desc;
1012 	int 		current_desc;
1013 	uint32_t	val;
1014 
1015 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
1016 
1017 	val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG));
1018 	if (!(val & START_TRANSMIT))
1019 		return (0);
1020 
1021 	current_desc = dnetp->tx_current_desc;
1022 	desc = &dnetp->tx_desc[current_desc];
1023 
1024 	mutex_enter(&dnetp->txlock);
1025 	dnetp->need_saddr = 0;
1026 	mutex_exit(&dnetp->txlock);
1027 
1028 	if ((alloc_descriptor(dnetp)) == FAILURE) {
1029 		mutex_enter(&dnetp->txlock);
1030 		dnetp->need_saddr = 1;
1031 		mutex_exit(&dnetp->txlock);
1032 #ifdef DNETDEBUG
1033 		if (dnetdebug & DNETTRACE)
1034 			cmn_err(CE_WARN, "DNET saddr:alloc descriptor failure");
1035 #endif
1036 		return (0);
1037 	}
1038 
1039 	desc->buffer1			= dnetp->setup_buf_paddr;
1040 	desc->buffer2			= 0;
1041 	desc->desc1.buffer_size1 	= SETUPBUF_SIZE;
1042 	desc->desc1.buffer_size2 	= 0;
1043 	desc->desc1.setup_packet	= 1;
1044 	desc->desc1.first_desc		= 0;
1045 	desc->desc1.last_desc 		= 0;
1046 	desc->desc1.filter_type0 	= 1;
1047 	desc->desc1.filter_type1 	= 1;
1048 	desc->desc1.int_on_comp		= 1;
1049 
1050 	desc->desc0.own = 1;
1051 	ddi_put8(dnetp->io_handle, REG8(dnetp->io_reg, TX_POLL_REG),
1052 	    TX_POLL_DEMAND);
1053 	return (0);
1054 }
1055 
1056 static int
1057 dnet_m_unicst(void *arg, const uint8_t *macaddr)
1058 {
1059 	struct dnetinstance *dnetp = arg;
1060 	uint32_t	index;
1061 	uint32_t	*hashp;
1062 
1063 	mutex_enter(&dnetp->intrlock);
1064 
1065 	bcopy(macaddr, dnetp->curr_macaddr, ETHERADDRL);
1066 
1067 	/*
1068 	 * As we are using Imperfect filtering, the broadcast address has to
1069 	 * be set explicitly in the 512 bit hash table.  Hence the index into
1070 	 * the hash table is calculated and the bit set to enable reception
1071 	 * of broadcast packets.
1072 	 *
1073 	 * We also use HASH_ONLY mode, without using the perfect filter for
1074 	 * our station address, because there appears to be a bug in the
1075 	 * 21140 where it fails to receive the specified perfect filter
1076 	 * address.
1077 	 *
1078 	 * Since dlsdmult comes through here, it doesn't matter that the count
1079 	 * is wrong for the two bits that correspond to the cases below. The
1080 	 * worst that could happen is that we'd leave on a bit for an old
1081 	 * macaddr, in the case where the macaddr gets changed, which is rare.
1082 	 * Since filtering is imperfect, it is OK if that happens.
1083 	 */
1084 	hashp = (uint32_t *)dnetp->setup_buf_vaddr;
1085 	index = hashindex((uint8_t *)dnet_broadcastaddr);
1086 	hashp[ index / 16 ] |= 1 << (index % 16);
1087 
1088 	index = hashindex((uint8_t *)dnetp->curr_macaddr);
1089 	hashp[ index / 16 ] |= 1 << (index % 16);
1090 
1091 	if (!dnetp->suspended)
1092 		(void) dnet_set_addr(dnetp);
1093 	mutex_exit(&dnetp->intrlock);
1094 	return (0);
1095 }
1096 
1097 static int
1098 dnet_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
1099 {
1100 	struct dnetinstance *dnetp = arg;
1101 	uint32_t	index;
1102 	uint32_t	*hashp;
1103 	uint32_t	retval;
1104 
1105 	mutex_enter(&dnetp->intrlock);
1106 	index = hashindex(macaddr);
1107 	hashp = (uint32_t *)dnetp->setup_buf_vaddr;
1108 	if (add) {
1109 		if (dnetp->multicast_cnt[index]++) {
1110 			mutex_exit(&dnetp->intrlock);
1111 			return (0);
1112 		}
1113 		hashp[ index / 16 ] |= 1 << (index % 16);
1114 	} else {
1115 		if (--dnetp->multicast_cnt[index]) {
1116 			mutex_exit(&dnetp->intrlock);
1117 			return (0);
1118 		}
1119 		hashp[ index / 16 ] &= ~ (1 << (index % 16));
1120 	}
1121 	if (!dnetp->suspended)
1122 		retval = dnet_set_addr(dnetp);
1123 	else
1124 		retval = 0;
1125 	mutex_exit(&dnetp->intrlock);
1126 	return (retval);
1127 }
1128 
1129 /*
1130  * A hashing function used for setting the
1131  * node address or a multicast address
1132  */
1133 static uint32_t
1134 hashindex(const uint8_t *address)
1135 {
1136 	uint32_t	crc = (uint32_t)HASH_CRC;
1137 	uint32_t const 	POLY = HASH_POLY;
1138 	uint32_t	msb;
1139 	int32_t 	byteslength;
1140 	uint8_t 	currentbyte;
1141 	uint32_t 	index;
1142 	int32_t 	bit;
1143 	int32_t		shift;
1144 
1145 	for (byteslength = 0; byteslength < ETHERADDRL; byteslength++) {
1146 		currentbyte = address[byteslength];
1147 		for (bit = 0; bit < 8; bit++) {
1148 			msb = crc >> 31;
1149 			crc <<= 1;
1150 			if (msb ^ (currentbyte & 1)) {
1151 				crc ^= POLY;
1152 				crc |= 0x00000001;
1153 			}
1154 			currentbyte >>= 1;
1155 		}
1156 	}
1157 
1158 	for (index = 0, bit = 23, shift = 8; shift >= 0; bit++, shift--) {
1159 		index |= (((crc >> bit) & 1) << shift);
1160 	}
1161 	return (index);
1162 }
1163 
1164 static int
1165 dnet_m_setpromisc(void *arg, boolean_t on)
1166 {
1167 	struct dnetinstance *dnetp = arg;
1168 	uint32_t val;
1169 
1170 	mutex_enter(&dnetp->intrlock);
1171 	if (dnetp->promisc == on) {
1172 		mutex_exit(&dnetp->intrlock);
1173 		return (0);
1174 	}
1175 	dnetp->promisc = on;
1176 
1177 	if (!dnetp->suspended) {
1178 		val = ddi_get32(dnetp->io_handle,
1179 		    REG32(dnetp->io_reg, OPN_MODE_REG));
1180 		if (on)
1181 			ddi_put32(dnetp->io_handle,
1182 			    REG32(dnetp->io_reg, OPN_MODE_REG),
1183 			    val | PROM_MODE);
1184 		else
1185 			ddi_put32(dnetp->io_handle,
1186 			    REG32(dnetp->io_reg, OPN_MODE_REG),
1187 			    val & (~PROM_MODE));
1188 	}
1189 	mutex_exit(&dnetp->intrlock);
1190 	return (0);
1191 }
1192 
1193 static int
1194 dnet_m_getstat(void *arg, uint_t stat, uint64_t *val)
1195 {
1196 	struct dnetinstance *dnetp = arg;
1197 
1198 	switch (stat) {
1199 	case MAC_STAT_IFSPEED:
1200 		*val = (dnetp->mii_up ?
1201 		    dnetp->mii_speed : dnetp->speed) * 1000000;
1202 		break;
1203 
1204 	case MAC_STAT_NORCVBUF:
1205 		*val = dnetp->stat_norcvbuf;
1206 		break;
1207 
1208 	case MAC_STAT_IERRORS:
1209 		*val = dnetp->stat_errrcv;
1210 		break;
1211 
1212 	case MAC_STAT_OERRORS:
1213 		*val = dnetp->stat_errxmt;
1214 		break;
1215 
1216 	case MAC_STAT_COLLISIONS:
1217 		*val = dnetp->stat_collisions;
1218 		break;
1219 
1220 	case ETHER_STAT_DEFER_XMTS:
1221 		*val = dnetp->stat_defer;
1222 		break;
1223 
1224 	case ETHER_STAT_CARRIER_ERRORS:
1225 		*val = dnetp->stat_nocarrier;
1226 		break;
1227 
1228 	case ETHER_STAT_TOOSHORT_ERRORS:
1229 		*val = dnetp->stat_short;
1230 		break;
1231 
1232 	case ETHER_STAT_LINK_DUPLEX:
1233 		if (!dnetp->running) {
1234 			*val = LINK_DUPLEX_UNKNOWN;
1235 			break;
1236 		}
1237 
1238 		if (dnetp->mii_up) {
1239 			*val = dnetp->mii_duplex ?
1240 			    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
1241 		} else {
1242 			*val = dnetp->full_duplex ?
1243 			    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
1244 		}
1245 		break;
1246 
1247 	case ETHER_STAT_TX_LATE_COLLISIONS:
1248 		*val = dnetp->stat_xmtlatecoll;
1249 		break;
1250 
1251 	case ETHER_STAT_EX_COLLISIONS:
1252 		*val = dnetp->stat_excoll;
1253 		break;
1254 
1255 	case MAC_STAT_OVERFLOWS:
1256 		*val = dnetp->stat_overflow;
1257 		break;
1258 
1259 	case MAC_STAT_UNDERFLOWS:
1260 		*val = dnetp->stat_underflow;
1261 		break;
1262 
1263 	default:
1264 		return (ENOTSUP);
1265 	}
1266 
1267 	return (0);
1268 }
1269 
1270 #define	NextTXIndex(index) (((index)+1) % dnetp->max_tx_desc)
1271 #define	PrevTXIndex(index) (((index)-1) < 0 ? dnetp->max_tx_desc - 1: (index)-1)
1272 
1273 static mblk_t *
1274 dnet_m_tx(void *arg, mblk_t *mp)
1275 {
1276 	struct dnetinstance *dnetp = arg;
1277 
1278 	mutex_enter(&dnetp->txlock);
1279 
1280 	/* if suspended, drop the packet on the floor, we missed it */
1281 	if (dnetp->suspended) {
1282 		mutex_exit(&dnetp->txlock);
1283 		freemsg(mp);
1284 		return (NULL);
1285 	}
1286 
1287 	if (dnetp->need_saddr) {
1288 		/* XXX function return value ignored */
1289 		mutex_exit(&dnetp->txlock);
1290 		mutex_enter(&dnetp->intrlock);
1291 		(void) dnet_set_addr(dnetp);
1292 		mutex_exit(&dnetp->intrlock);
1293 		mutex_enter(&dnetp->txlock);
1294 	}
1295 
1296 	while (mp != NULL) {
1297 		if (!dnet_send(dnetp, mp)) {
1298 			mutex_exit(&dnetp->txlock);
1299 			return (mp);
1300 		}
1301 		mp = mp->b_next;
1302 	}
1303 
1304 	mutex_exit(&dnetp->txlock);
1305 
1306 	/*
1307 	 * Enable xmit interrupt in case we are running out of xmit descriptors
1308 	 * or there are more packets on the queue waiting to be transmitted.
1309 	 */
1310 	mutex_enter(&dnetp->intrlock);
1311 
1312 	enable_interrupts(dnetp);
1313 
1314 	/*
1315 	 * Kick the transmitter
1316 	 */
1317 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, TX_POLL_REG),
1318 	    TX_POLL_DEMAND);
1319 
1320 	mutex_exit(&dnetp->intrlock);
1321 
1322 	return (NULL);
1323 }
1324 
1325 static boolean_t
1326 dnet_send(struct dnetinstance *dnetp, mblk_t *mp)
1327 {
1328 	struct tx_desc_type	*ring = dnetp->tx_desc;
1329 	int		mblen, totlen;
1330 	int		index, end_index, start_index;
1331 	int		avail;
1332 	int		error;
1333 	int		bufn;
1334 	int		retval;
1335 	mblk_t		*bp;
1336 
1337 	ASSERT(MUTEX_HELD(&dnetp->txlock));
1338 
1339 	/* reclaim any xmit descriptors completed */
1340 	dnet_reclaim_Tx_desc(dnetp);
1341 
1342 	/*
1343 	 * Use the data buffers from the message and construct the
1344 	 * scatter/gather list by calling ddi_dma_addr_bind_handle().
1345 	 */
1346 	error = 0;
1347 	totlen = 0;
1348 	bp = mp;
1349 	bufn = 0;
1350 	index = start_index = dnetp->tx_current_desc;
1351 	avail = dnetp->free_desc;
1352 	while (bp != NULL) {
1353 		uint_t ncookies;
1354 		ddi_dma_cookie_t dma_cookie;
1355 
1356 		mblen = MBLKL(bp);
1357 
1358 		if (!mblen) {	/* skip zero-length message blocks */
1359 			bp = bp->b_cont;
1360 			continue;
1361 		}
1362 
1363 		retval = ddi_dma_addr_bind_handle(dnetp->dma_handle_tx, NULL,
1364 		    (caddr_t)bp->b_rptr, mblen,
1365 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 0,
1366 		    &dma_cookie, &ncookies);
1367 
1368 		switch (retval) {
1369 		case DDI_DMA_MAPPED:
1370 			break;		/* everything's fine */
1371 
1372 		case DDI_DMA_NORESOURCES:
1373 			error = 1;	/* allow retry by gld */
1374 			break;
1375 
1376 		case DDI_DMA_NOMAPPING:
1377 		case DDI_DMA_INUSE:
1378 		case DDI_DMA_TOOBIG:
1379 		default:
1380 			error = 2;	/* error, no retry */
1381 			break;
1382 		}
1383 
1384 		/*
1385 		 * we can use two cookies per descriptor (i.e buffer1 and
1386 		 * buffer2) so we need at least (ncookies+1)/2 descriptors.
1387 		 */
1388 		if (((ncookies + 1) >> 1) > dnetp->free_desc) {
1389 			(void) ddi_dma_unbind_handle(dnetp->dma_handle_tx);
1390 			error = 1;
1391 			break;
1392 		}
1393 
1394 		/* setup the descriptors for this data buffer */
1395 		while (ncookies) {
1396 			end_index = index;
1397 			if (bufn % 2) {
1398 				ring[index].buffer2 =
1399 				    (uint32_t)dma_cookie.dmac_address;
1400 				ring[index].desc1.buffer_size2 =
1401 				    dma_cookie.dmac_size;
1402 				index = NextTXIndex(index); /* goto next desc */
1403 			} else {
1404 				/* initialize the descriptor */
1405 				ASSERT(ring[index].desc0.own == 0);
1406 				*(uint32_t *)&ring[index].desc0 = 0;
1407 				*(uint32_t *)&ring[index].desc1 &=
1408 				    DNET_END_OF_RING;
1409 				ring[index].buffer1 =
1410 				    (uint32_t)dma_cookie.dmac_address;
1411 				ring[index].desc1.buffer_size1 =
1412 				    dma_cookie.dmac_size;
1413 				ring[index].buffer2 = (uint32_t)(0);
1414 				dnetp->free_desc--;
1415 				ASSERT(dnetp->free_desc >= 0);
1416 			}
1417 			totlen += dma_cookie.dmac_size;
1418 			bufn++;
1419 			if (--ncookies)
1420 				ddi_dma_nextcookie(dnetp->dma_handle_tx,
1421 				    &dma_cookie);
1422 		}
1423 		(void) ddi_dma_unbind_handle(dnetp->dma_handle_tx);
1424 		bp = bp->b_cont;
1425 	}
1426 
1427 	if (error == 1) {
1428 		dnetp->stat_defer++;
1429 		dnetp->free_desc = avail;
1430 		dnetp->need_tx_update = B_TRUE;
1431 		return (B_FALSE);
1432 	} else if (error) {
1433 		dnetp->free_desc = avail;
1434 		freemsg(mp);
1435 		return (B_TRUE);	/* Drop packet, don't retry */
1436 	}
1437 
1438 	if (totlen > ETHERMAX + VLAN_TAGSZ) {
1439 		cmn_err(CE_WARN, "DNET: tried to send large %d packet", totlen);
1440 		dnetp->free_desc = avail;
1441 		freemsg(mp);
1442 		return (B_TRUE);	/* Don't repeat this attempt */
1443 	}
1444 
1445 	/*
1446 	 * Remeber the message buffer pointer to do freemsg() at xmit
1447 	 * interrupt time.
1448 	 */
1449 	dnetp->tx_msgbufp[end_index] = mp;
1450 
1451 	/*
1452 	 * Now set the first/last buffer and own bits
1453 	 * Since the 21040 looks for these bits set in the
1454 	 * first buffer, work backwards in multiple buffers.
1455 	 */
1456 	ring[end_index].desc1.last_desc = 1;
1457 	ring[end_index].desc1.int_on_comp = 1;
1458 	for (index = end_index; index != start_index;
1459 	    index = PrevTXIndex(index))
1460 		ring[index].desc0.own = 1;
1461 	ring[start_index].desc1.first_desc = 1;
1462 	ring[start_index].desc0.own = 1;
1463 
1464 	dnetp->tx_current_desc = NextTXIndex(end_index);
1465 
1466 	/*
1467 	 * Safety check: make sure end-of-ring is set in last desc.
1468 	 */
1469 	ASSERT(ring[dnetp->max_tx_desc-1].desc1.end_of_ring != 0);
1470 
1471 	return (B_TRUE);
1472 }
1473 
1474 /*
1475  *	dnet_intr() -- interrupt from board to inform us that a receive or
1476  *	transmit has completed.
1477  */
1478 static uint_t
1479 dnet_intr(caddr_t arg)
1480 {
1481 	struct dnetinstance *dnetp = (struct dnetinstance *)arg;
1482 	uint32_t int_status;
1483 
1484 	mutex_enter(&dnetp->intrlock);
1485 
1486 	if (dnetp->suspended) {
1487 		mutex_exit(&dnetp->intrlock);
1488 		return (DDI_INTR_UNCLAIMED);
1489 	}
1490 
1491 	int_status = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg,
1492 	    STATUS_REG));
1493 
1494 	/*
1495 	 * If interrupt was not from this board
1496 	 */
1497 	if (!(int_status & (NORMAL_INTR_SUMM | ABNORMAL_INTR_SUMM))) {
1498 		mutex_exit(&dnetp->intrlock);
1499 		return (DDI_INTR_UNCLAIMED);
1500 	}
1501 
1502 	dnetp->stat_intr++;
1503 
1504 	if (int_status & GPTIMER_INTR) {
1505 		ddi_put32(dnetp->io_handle,
1506 		    REG32(dnetp->io_reg, STATUS_REG), GPTIMER_INTR);
1507 		if (dnetp->timer.cb)
1508 			dnetp->timer.cb(dnetp);
1509 		else
1510 			cmn_err(CE_WARN, "dnet: unhandled timer interrupt");
1511 	}
1512 
1513 	if (int_status & TX_INTR) {
1514 		ddi_put32(dnetp->io_handle,
1515 		    REG32(dnetp->io_reg, STATUS_REG), TX_INTR);
1516 		mutex_enter(&dnetp->txlock);
1517 		if (dnetp->need_tx_update) {
1518 			mutex_exit(&dnetp->txlock);
1519 			mutex_exit(&dnetp->intrlock);
1520 			mac_tx_update(dnetp->mac_handle);
1521 			mutex_enter(&dnetp->intrlock);
1522 			mutex_enter(&dnetp->txlock);
1523 			dnetp->need_tx_update = B_FALSE;
1524 		}
1525 		/* reclaim any xmit descriptors that are completed */
1526 		dnet_reclaim_Tx_desc(dnetp);
1527 		mutex_exit(&dnetp->txlock);
1528 	}
1529 
1530 	/*
1531 	 * Check if receive interrupt bit is set
1532 	 */
1533 	if (int_status & (RX_INTR | RX_UNAVAIL_INTR)) {
1534 		ddi_put32(dnetp->io_handle,
1535 		    REG32(dnetp->io_reg, STATUS_REG),
1536 		    int_status & (RX_INTR | RX_UNAVAIL_INTR));
1537 		dnet_getp(dnetp);
1538 	}
1539 
1540 	if (int_status & ABNORMAL_INTR_SUMM) {
1541 		/*
1542 		 * Check for system error
1543 		 */
1544 		if (int_status & SYS_ERR) {
1545 			if ((int_status & SYS_ERR_BITS) == MASTER_ABORT)
1546 				cmn_err(CE_WARN, "DNET: Bus Master Abort");
1547 			if ((int_status & SYS_ERR_BITS) == TARGET_ABORT)
1548 				cmn_err(CE_WARN, "DNET: Bus Target Abort");
1549 			if ((int_status & SYS_ERR_BITS) == PARITY_ERROR)
1550 				cmn_err(CE_WARN, "DNET: Parity error");
1551 		}
1552 
1553 		/*
1554 		 * If the jabber has timed out then reset the chip
1555 		 */
1556 		if (int_status & TX_JABBER_TIMEOUT)
1557 			cmn_err(CE_WARN, "DNET: Jabber timeout.");
1558 
1559 		/*
1560 		 * If an underflow has occurred, reset the chip
1561 		 */
1562 		if (int_status & TX_UNDERFLOW)
1563 			cmn_err(CE_WARN, "DNET: Tx Underflow.");
1564 
1565 #ifdef DNETDEBUG
1566 		if (dnetdebug & DNETINT)
1567 			cmn_err(CE_NOTE, "Trying to reset...");
1568 #endif
1569 		dnet_reset_board(dnetp);
1570 		dnet_init_board(dnetp);
1571 		/* XXX function return value ignored */
1572 		(void) dnet_start(dnetp);
1573 	}
1574 
1575 	/*
1576 	 * Enable the interrupts. Enable xmit interrupt in case we are
1577 	 * running out of free descriptors or if there are packets
1578 	 * in the queue waiting to be transmitted.
1579 	 */
1580 	enable_interrupts(dnetp);
1581 	mutex_exit(&dnetp->intrlock);
1582 	return (DDI_INTR_CLAIMED);	/* Indicate it was our interrupt */
1583 }
1584 
1585 static void
1586 dnet_getp(struct dnetinstance *dnetp)
1587 {
1588 	int packet_length, index;
1589 	mblk_t	*mp;
1590 	caddr_t 	virtual_address;
1591 	struct	rx_desc_type *desc = dnetp->rx_desc;
1592 	int marker = dnetp->rx_current_desc;
1593 	int misses;
1594 
1595 	if (!dnetp->overrun_workaround) {
1596 		/*
1597 		 * If the workaround is not in place, we must still update
1598 		 * the missed frame statistic from the on-chip counter.
1599 		 */
1600 		misses = ddi_get32(dnetp->io_handle,
1601 		    REG32(dnetp->io_reg, MISSED_FRAME_REG));
1602 		dnetp->stat_missed += (misses & MISSED_FRAME_MASK);
1603 	}
1604 
1605 	/* While host owns the current descriptor */
1606 	while (!(desc[dnetp->rx_current_desc].desc0.own)) {
1607 		struct free_ptr *frp;
1608 		caddr_t newbuf;
1609 		struct rbuf_list *rp;
1610 
1611 		index = dnetp->rx_current_desc;
1612 		ASSERT(desc[index].desc0.first_desc != 0);
1613 
1614 		/*
1615 		 * DMA overrun errata from DEC: avoid possible bus hangs
1616 		 * and data corruption
1617 		 */
1618 		if (dnetp->overrun_workaround &&
1619 		    marker == dnetp->rx_current_desc) {
1620 			int opn;
1621 			do {
1622 				marker = (marker+1) % dnetp->max_rx_desc;
1623 			} while (!(dnetp->rx_desc[marker].desc0.own) &&
1624 			    marker != index);
1625 
1626 			misses = ddi_get32(dnetp->io_handle,
1627 			    REG32(dnetp->io_reg, MISSED_FRAME_REG));
1628 			dnetp->stat_missed +=
1629 			    (misses & MISSED_FRAME_MASK);
1630 			if (misses & OVERFLOW_COUNTER_MASK) {
1631 				/*
1632 				 * Overflow(s) have occurred : stop receiver,
1633 				 * and wait until in stopped state
1634 				 */
1635 				opn = ddi_get32(dnetp->io_handle,
1636 				    REG32(dnetp->io_reg, OPN_MODE_REG));
1637 				ddi_put32(dnetp->io_handle,
1638 				    REG32(dnetp->io_reg, OPN_MODE_REG),
1639 				    opn & ~(START_RECEIVE));
1640 
1641 				do {
1642 					drv_usecwait(10);
1643 				} while ((ddi_get32(dnetp->io_handle,
1644 				    REG32(dnetp->io_reg, STATUS_REG)) &
1645 				    RECEIVE_PROCESS_STATE) != 0);
1646 #ifdef DNETDEBUG
1647 				if (dnetdebug & DNETRECV)
1648 					cmn_err(CE_CONT, "^*");
1649 #endif
1650 				/* Discard probably corrupt frames */
1651 				while (!(dnetp->rx_desc[index].desc0.own)) {
1652 					dnetp->rx_desc[index].desc0.own = 1;
1653 					index = (index+1) % dnetp->max_rx_desc;
1654 					dnetp->stat_missed++;
1655 				}
1656 
1657 				/* restart the receiver */
1658 				opn = ddi_get32(dnetp->io_handle,
1659 				    REG32(dnetp->io_reg, OPN_MODE_REG));
1660 				ddi_put32(dnetp->io_handle,
1661 				    REG32(dnetp->io_reg, OPN_MODE_REG),
1662 				    opn | START_RECEIVE);
1663 				marker = dnetp->rx_current_desc = index;
1664 				continue;
1665 			}
1666 			/*
1667 			 * At this point, we know that all packets before
1668 			 * "marker" were received before a dma overrun occurred
1669 			 */
1670 		}
1671 
1672 		/*
1673 		 * If we get an oversized packet it could span multiple
1674 		 * descriptors.  If this happens an error bit should be set.
1675 		 */
1676 		while (desc[index].desc0.last_desc == 0) {
1677 			index = (index + 1) % dnetp->max_rx_desc;
1678 			if (desc[index].desc0.own)
1679 				return;	/* not done receiving large packet */
1680 		}
1681 		while (dnetp->rx_current_desc != index) {
1682 			desc[dnetp->rx_current_desc].desc0.own = 1;
1683 			dnetp->rx_current_desc =
1684 			    (dnetp->rx_current_desc + 1) % dnetp->max_rx_desc;
1685 #ifdef DNETDEBUG
1686 			if (dnetdebug & DNETRECV)
1687 				cmn_err(CE_WARN, "dnet: received large packet");
1688 #endif
1689 		}
1690 
1691 		packet_length = desc[index].desc0.frame_len;
1692 
1693 		/*
1694 		 * Remove CRC from received data. This is an artefact of the
1695 		 * 21x4x chip and should not be passed higher up the network
1696 		 * stack.
1697 		 */
1698 		packet_length -= ETHERFCSL;
1699 
1700 		/* get the virtual address of the packet received */
1701 		virtual_address =
1702 		    dnetp->rx_buf_vaddr[index];
1703 
1704 		/*
1705 		 * If no packet errors then do:
1706 		 * 	1. Allocate a new receive buffer so that we can
1707 		 *	   use the current buffer as streams buffer to
1708 		 *	   avoid bcopy.
1709 		 *	2. If we got a new receive buffer then allocate
1710 		 *	   an mblk using desballoc().
1711 		 *	3. Otherwise use the mblk from allocb() and do
1712 		 *	   the bcopy.
1713 		 */
1714 		frp = NULL;
1715 		rp = NULL;
1716 		newbuf = NULL;
1717 		mp = NULL;
1718 		if (!desc[index].desc0.err_summary ||
1719 		    (desc[index].desc0.frame2long &&
1720 		    packet_length < rx_buf_size)) {
1721 			ASSERT(packet_length < rx_buf_size);
1722 			/*
1723 			 * Allocate another receive buffer for this descriptor.
1724 			 * If we fail to allocate then we do the normal bcopy.
1725 			 */
1726 			rp = dnet_rbuf_alloc(dnetp->devinfo, 0);
1727 			if (rp != NULL) {
1728 				newbuf = rp->rbuf_vaddr;
1729 				frp = kmem_zalloc(sizeof (*frp), KM_NOSLEEP);
1730 				if (frp != NULL) {
1731 					frp->free_rtn.free_func =
1732 					    dnet_freemsg_buf;
1733 					frp->free_rtn.free_arg = (char *)frp;
1734 					frp->buf = virtual_address;
1735 					mp = desballoc(
1736 					    (uchar_t *)virtual_address,
1737 					    packet_length, 0, &frp->free_rtn);
1738 					if (mp == NULL) {
1739 						kmem_free(frp, sizeof (*frp));
1740 						dnet_rbuf_free((caddr_t)newbuf);
1741 						frp = NULL;
1742 						newbuf = NULL;
1743 					}
1744 				}
1745 			}
1746 			if (mp == NULL) {
1747 				if (newbuf != NULL)
1748 					dnet_rbuf_free((caddr_t)newbuf);
1749 				mp = allocb(packet_length, 0);
1750 			}
1751 		}
1752 
1753 		if ((desc[index].desc0.err_summary &&
1754 		    packet_length >= rx_buf_size) || mp == NULL) {
1755 
1756 			/* Update gld statistics */
1757 			if (desc[index].desc0.err_summary)
1758 				update_rx_stats(dnetp, index);
1759 			else
1760 				dnetp->stat_norcvbuf++;
1761 
1762 			/*
1763 			 * Reset ownership of the descriptor.
1764 			 */
1765 			desc[index].desc0.own = 1;
1766 			dnetp->rx_current_desc =
1767 			    (dnetp->rx_current_desc+1) % dnetp->max_rx_desc;
1768 
1769 			/* Demand receive polling by the chip */
1770 			ddi_put32(dnetp->io_handle,
1771 			    REG32(dnetp->io_reg, RX_POLL_REG), RX_POLL_DEMAND);
1772 
1773 			continue;
1774 		}
1775 
1776 		if (newbuf != NULL) {
1777 			uint32_t end_paddr;
1778 			/* attach the new buffer to the rx descriptor */
1779 			dnetp->rx_buf_vaddr[index] = newbuf;
1780 			dnetp->rx_buf_paddr[index] = rp->rbuf_paddr;
1781 			desc[index].buffer1 = rp->rbuf_paddr;
1782 			desc[index].desc1.buffer_size1 = rx_buf_size;
1783 			desc[index].desc1.buffer_size2 = 0;
1784 			end_paddr = rp->rbuf_endpaddr;
1785 			if ((desc[index].buffer1 & ~dnetp->pgmask) !=
1786 			    (end_paddr & ~dnetp->pgmask)) {
1787 				/* discontiguous */
1788 				desc[index].buffer2 = end_paddr&~dnetp->pgmask;
1789 				desc[index].desc1.buffer_size2 =
1790 				    (end_paddr & dnetp->pgmask) + 1;
1791 				desc[index].desc1.buffer_size1 =
1792 				    rx_buf_size-desc[index].desc1.buffer_size2;
1793 			}
1794 		} else {
1795 			/* couldn't allocate another buffer; copy the data */
1796 			BCOPY((caddr_t)virtual_address, (caddr_t)mp->b_wptr,
1797 			    packet_length);
1798 		}
1799 
1800 		mp->b_wptr += packet_length;
1801 
1802 		desc[dnetp->rx_current_desc].desc0.own = 1;
1803 
1804 		/*
1805 		 * Increment receive desc index. This is for the scan of
1806 		 * next packet
1807 		 */
1808 		dnetp->rx_current_desc =
1809 		    (dnetp->rx_current_desc+1) % dnetp->max_rx_desc;
1810 
1811 		/* Demand polling by chip */
1812 		ddi_put32(dnetp->io_handle,
1813 		    REG32(dnetp->io_reg, RX_POLL_REG), RX_POLL_DEMAND);
1814 
1815 		/* send the packet upstream */
1816 		mutex_exit(&dnetp->intrlock);
1817 		mac_rx(dnetp->mac_handle, NULL, mp);
1818 		mutex_enter(&dnetp->intrlock);
1819 	}
1820 }
1821 /*
1822  * Function to update receive statistics
1823  */
1824 static void
1825 update_rx_stats(struct dnetinstance *dnetp, int index)
1826 {
1827 	struct rx_desc_type *descp = &(dnetp->rx_desc[index]);
1828 
1829 	/*
1830 	 * Update gld statistics
1831 	 */
1832 	dnetp->stat_errrcv++;
1833 
1834 	if (descp->desc0.overflow)	{
1835 		/* FIFO Overrun */
1836 		dnetp->stat_overflow++;
1837 	}
1838 
1839 	if (descp->desc0.collision) {
1840 		/*EMPTY*/
1841 		/* Late Colllision on receive */
1842 		/* no appropriate counter */
1843 	}
1844 
1845 	if (descp->desc0.crc) {
1846 		/* CRC Error */
1847 		dnetp->stat_crc++;
1848 	}
1849 
1850 	if (descp->desc0.runt_frame) {
1851 		/* Runt Error */
1852 		dnetp->stat_short++;
1853 	}
1854 
1855 	if (descp->desc0.desc_err) {
1856 		/*EMPTY*/
1857 		/* Not enough receive descriptors */
1858 		/* This condition is accounted in dnet_intr() */
1859 	}
1860 
1861 	if (descp->desc0.frame2long) {
1862 		dnetp->stat_frame++;
1863 	}
1864 }
1865 
1866 /*
1867  * Function to update transmit statistics
1868  */
1869 static void
1870 update_tx_stats(struct dnetinstance *dnetp, int index)
1871 {
1872 	struct tx_desc_type *descp = &(dnetp->tx_desc[index]);
1873 	int	fd;
1874 	media_block_t	*block = dnetp->selected_media_block;
1875 
1876 
1877 	/* Update gld statistics */
1878 	dnetp->stat_errxmt++;
1879 
1880 	/* If we're in full-duplex don't count collisions or carrier loss. */
1881 	if (dnetp->mii_up) {
1882 		fd = dnetp->mii_duplex;
1883 	} else {
1884 		/* Rely on media code */
1885 		fd = block->media_code == MEDIA_TP_FD ||
1886 		    block->media_code == MEDIA_SYM_SCR_FD;
1887 	}
1888 
1889 	if (descp->desc0.collision_count && !fd) {
1890 		dnetp->stat_collisions += descp->desc0.collision_count;
1891 	}
1892 
1893 	if (descp->desc0.late_collision && !fd) {
1894 		dnetp->stat_xmtlatecoll++;
1895 	}
1896 
1897 	if (descp->desc0.excess_collision && !fd) {
1898 		dnetp->stat_excoll++;
1899 	}
1900 
1901 	if (descp->desc0.underflow) {
1902 		dnetp->stat_underflow++;
1903 	}
1904 
1905 #if 0
1906 	if (descp->desc0.tx_jabber_to) {
1907 		/* no appropriate counter */
1908 	}
1909 #endif
1910 
1911 	if (descp->desc0.carrier_loss && !fd) {
1912 		dnetp->stat_nocarrier++;
1913 	}
1914 
1915 	if (descp->desc0.no_carrier && !fd) {
1916 		dnetp->stat_nocarrier++;
1917 	}
1918 }
1919 
1920 /*
1921  *	========== Media Selection Setup Routines ==========
1922  */
1923 
1924 
1925 static void
1926 write_gpr(struct dnetinstance *dnetp, uint32_t val)
1927 {
1928 #ifdef DEBUG
1929 	if (dnetdebug & DNETREGCFG)
1930 		cmn_err(CE_NOTE, "GPR: %x", val);
1931 #endif
1932 	switch (dnetp->board_type) {
1933 	case DEVICE_ID_21143:
1934 		/* Set the correct bit for a control write */
1935 		if (val & GPR_CONTROL_WRITE)
1936 			val |= CWE_21143, val &= ~GPR_CONTROL_WRITE;
1937 		/* Write to upper half of CSR15 */
1938 		dnetp->gprsia = (dnetp->gprsia & 0xffff) | (val << 16);
1939 		ddi_put32(dnetp->io_handle,
1940 		    REG32(dnetp->io_reg, SIA_GENERAL_REG), dnetp->gprsia);
1941 		break;
1942 	default:
1943 		/* Set the correct bit for a control write */
1944 		if (val & GPR_CONTROL_WRITE)
1945 			val |= CWE_21140, val &= ~GPR_CONTROL_WRITE;
1946 		ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, GP_REG), val);
1947 		break;
1948 	}
1949 }
1950 
1951 static uint32_t
1952 read_gpr(struct dnetinstance *dnetp)
1953 {
1954 	switch (dnetp->board_type) {
1955 	case DEVICE_ID_21143:
1956 		/* Read upper half of CSR15 */
1957 		return (ddi_get32(dnetp->io_handle,
1958 		    REG32(dnetp->io_reg, SIA_GENERAL_REG)) >> 16);
1959 	default:
1960 		return (ddi_get32(dnetp->io_handle,
1961 		    REG32(dnetp->io_reg, GP_REG)));
1962 	}
1963 }
1964 
1965 static void
1966 set_gpr(struct dnetinstance *dnetp)
1967 {
1968 	uint32_t *sequence;
1969 	int len;
1970 	LEAF_FORMAT *leaf = &dnetp->sr.leaf[dnetp->leaf];
1971 	media_block_t *block = dnetp->selected_media_block;
1972 	int i;
1973 
1974 	if (ddi_getlongprop(DDI_DEV_T_ANY, dnetp->devinfo,
1975 	    DDI_PROP_DONTPASS, "gpr-sequence", (caddr_t)&sequence,
1976 	    &len) == DDI_PROP_SUCCESS) {
1977 		for (i = 0; i < len / sizeof (uint32_t); i++)
1978 			write_gpr(dnetp, sequence[i]);
1979 		kmem_free(sequence, len);
1980 	} else {
1981 		/*
1982 		 * Write the reset sequence if this is the first time this
1983 		 * block has been selected.
1984 		 */
1985 		if (block->rstseqlen) {
1986 			for (i = 0; i < block->rstseqlen; i++)
1987 				write_gpr(dnetp, block->rstseq[i]);
1988 			/*
1989 			 * XXX Legacy blocks do not have reset sequences, so the
1990 			 * static blocks will never be modified by this
1991 			 */
1992 			block->rstseqlen = 0;
1993 		}
1994 		if (leaf->gpr)
1995 			write_gpr(dnetp, leaf->gpr | GPR_CONTROL_WRITE);
1996 
1997 		/* write GPR sequence each time */
1998 		for (i = 0; i < block->gprseqlen; i++)
1999 			write_gpr(dnetp, block->gprseq[i]);
2000 	}
2001 
2002 	/* This has possibly caused a PHY to reset.  Let MII know */
2003 	if (dnetp->phyaddr != -1)
2004 		/* XXX function return value ignored */
2005 		(void) mii_sync(dnetp->mii, dnetp->phyaddr);
2006 	drv_usecwait(5);
2007 }
2008 
2009 /* set_opr() - must be called with intrlock held */
2010 
2011 static void
2012 set_opr(struct dnetinstance *dnetp)
2013 {
2014 	uint32_t fd, mb1, sf;
2015 
2016 	int 		opnmode_len;
2017 	uint32_t val;
2018 	media_block_t *block = dnetp->selected_media_block;
2019 
2020 	ASSERT(block);
2021 
2022 	/* Check for custom "opnmode_reg" property */
2023 	opnmode_len = sizeof (val);
2024 	if (ddi_prop_op(DDI_DEV_T_ANY, dnetp->devinfo,
2025 	    PROP_LEN_AND_VAL_BUF, DDI_PROP_DONTPASS, "opnmode_reg",
2026 	    (caddr_t)&val, &opnmode_len) != DDI_PROP_SUCCESS)
2027 		opnmode_len = 0;
2028 
2029 	/* Some bits exist only on 21140 and greater */
2030 	if (dnetp->board_type != DEVICE_ID_21040 &&
2031 	    dnetp->board_type != DEVICE_ID_21041) {
2032 		mb1 = OPN_REG_MB1;
2033 		sf = STORE_AND_FORWARD;
2034 	} else {
2035 		mb1 = sf = 0;
2036 		mb1 = OPN_REG_MB1; /* Needed for 21040? */
2037 	}
2038 
2039 	if (opnmode_len) {
2040 		ddi_put32(dnetp->io_handle,
2041 		    REG32(dnetp->io_reg, OPN_MODE_REG), val);
2042 		dnet_reset_board(dnetp);
2043 		ddi_put32(dnetp->io_handle,
2044 		    REG32(dnetp->io_reg, OPN_MODE_REG), val);
2045 		return;
2046 	}
2047 
2048 	/*
2049 	 * Set each bit in CSR6 that we want
2050 	 */
2051 
2052 	/* Always want these bits set */
2053 	val = HASH_FILTERING | HASH_ONLY | TX_THRESHOLD_160 | mb1 | sf;
2054 
2055 	/* Promiscuous mode */
2056 	val |= dnetp->promisc ? PROM_MODE : 0;
2057 
2058 	/* Scrambler for SYM style media */
2059 	val |= ((block->command & CMD_SCR) && !dnetp->disable_scrambler) ?
2060 	    SCRAMBLER_MODE : 0;
2061 
2062 	/* Full duplex */
2063 	if (dnetp->mii_up) {
2064 		fd = dnetp->mii_duplex;
2065 	} else {
2066 		/* Rely on media code */
2067 		fd = block->media_code == MEDIA_TP_FD ||
2068 		    block->media_code == MEDIA_SYM_SCR_FD;
2069 	}
2070 
2071 	/* Port select (and therefore, heartbeat disable) */
2072 	val |= block->command & CMD_PS ? (PORT_SELECT | HEARTBEAT_DISABLE) : 0;
2073 
2074 	/* PCS function */
2075 	val |= (block->command) & CMD_PCS ? PCS_FUNCTION : 0;
2076 	val |= fd ? FULL_DUPLEX : 0;
2077 
2078 #ifdef DNETDEBUG
2079 	if (dnetdebug & DNETREGCFG)
2080 		cmn_err(CE_NOTE, "OPN: %x", val);
2081 #endif
2082 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), val);
2083 	dnet_reset_board(dnetp);
2084 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), val);
2085 }
2086 
2087 static void
2088 set_sia(struct dnetinstance *dnetp)
2089 {
2090 	media_block_t *block = dnetp->selected_media_block;
2091 
2092 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
2093 	if (block->type == 2) {
2094 		int sia_delay;
2095 #ifdef DNETDEBUG
2096 		if (dnetdebug & DNETREGCFG)
2097 			cmn_err(CE_NOTE,
2098 			    "SIA: CSR13: %x, CSR14: %x, CSR15: %x",
2099 			    block->un.sia.csr13,
2100 			    block->un.sia.csr14,
2101 			    block->un.sia.csr15);
2102 #endif
2103 		sia_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
2104 		    DDI_PROP_DONTPASS, "sia-delay", 10000);
2105 
2106 		ddi_put32(dnetp->io_handle,
2107 		    REG32(dnetp->io_reg, SIA_CONNECT_REG), 0);
2108 
2109 		ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, SIA_TXRX_REG),
2110 		    block->un.sia.csr14);
2111 
2112 		/*
2113 		 * For '143, we need to write through a copy of the register
2114 		 * to keep the GP half intact
2115 		 */
2116 		dnetp->gprsia = (dnetp->gprsia&0xffff0000)|block->un.sia.csr15;
2117 		ddi_put32(dnetp->io_handle,
2118 		    REG32(dnetp->io_reg, SIA_GENERAL_REG),
2119 		    dnetp->gprsia);
2120 
2121 		ddi_put32(dnetp->io_handle,
2122 		    REG32(dnetp->io_reg, SIA_CONNECT_REG),
2123 		    block->un.sia.csr13);
2124 
2125 		drv_usecwait(sia_delay);
2126 
2127 	} else if (dnetp->board_type != DEVICE_ID_21140) {
2128 		ddi_put32(dnetp->io_handle,
2129 		    REG32(dnetp->io_reg, SIA_CONNECT_REG), 0);
2130 		ddi_put32(dnetp->io_handle,
2131 		    REG32(dnetp->io_reg, SIA_TXRX_REG), 0);
2132 	}
2133 }
2134 
2135 /*
2136  * This function (re)allocates the receive and transmit buffers and
2137  * descriptors.  It can be called more than once per instance, though
2138  * currently it is only called from attach.  It should only be called
2139  * while the device is reset.
2140  */
2141 static int
2142 dnet_alloc_bufs(struct dnetinstance *dnetp)
2143 {
2144 	int i;
2145 	size_t len;
2146 	int page_size;
2147 	int realloc = 0;
2148 	int nrecv_desc_old = 0;
2149 	ddi_dma_cookie_t cookie;
2150 	uint_t ncookies;
2151 
2152 	/*
2153 	 * check if we are trying to reallocate with different xmit/recv
2154 	 * descriptor ring sizes.
2155 	 */
2156 	if ((dnetp->tx_desc != NULL) &&
2157 	    (dnetp->nxmit_desc != dnetp->max_tx_desc))
2158 		realloc = 1;
2159 
2160 	if ((dnetp->rx_desc != NULL) &&
2161 	    (dnetp->nrecv_desc != dnetp->max_rx_desc))
2162 		realloc = 1;
2163 
2164 	/* free up the old buffers if we are reallocating them */
2165 	if (realloc) {
2166 		nrecv_desc_old = dnetp->nrecv_desc;
2167 		dnet_free_bufs(dnetp); /* free the old buffers */
2168 	}
2169 
2170 	if (dnetp->dma_handle == NULL)
2171 		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr,
2172 		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle) != DDI_SUCCESS)
2173 			return (FAILURE);
2174 
2175 	if (dnetp->dma_handle_tx == NULL)
2176 		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr_tx,
2177 		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle_tx) != DDI_SUCCESS)
2178 			return (FAILURE);
2179 
2180 	if (dnetp->dma_handle_txdesc == NULL)
2181 		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr,
2182 		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle_txdesc) != DDI_SUCCESS)
2183 			return (FAILURE);
2184 
2185 	if (dnetp->dma_handle_setbuf == NULL)
2186 		if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr,
2187 		    DDI_DMA_SLEEP, 0, &dnetp->dma_handle_setbuf) != DDI_SUCCESS)
2188 			return (FAILURE);
2189 
2190 	page_size = ddi_ptob(dnetp->devinfo, 1);
2191 
2192 	dnetp->pgmask = page_size - 1;
2193 
2194 	/* allocate setup buffer if necessary */
2195 	if (dnetp->setup_buf_vaddr == NULL) {
2196 		if (ddi_dma_mem_alloc(dnetp->dma_handle_setbuf,
2197 		    SETUPBUF_SIZE, &accattr, DDI_DMA_STREAMING,
2198 		    DDI_DMA_DONTWAIT, 0, (caddr_t *)&dnetp->setup_buf_vaddr,
2199 		    &len, &dnetp->setup_buf_acchdl) != DDI_SUCCESS)
2200 			return (FAILURE);
2201 
2202 		if (ddi_dma_addr_bind_handle(dnetp->dma_handle_setbuf,
2203 		    NULL, dnetp->setup_buf_vaddr, SETUPBUF_SIZE,
2204 		    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
2205 		    NULL, &cookie, &ncookies) != DDI_DMA_MAPPED)
2206 			return (FAILURE);
2207 
2208 		dnetp->setup_buf_paddr = cookie.dmac_address;
2209 		bzero(dnetp->setup_buf_vaddr, len);
2210 	}
2211 
2212 	/* allocate xmit descriptor array of size dnetp->max_tx_desc */
2213 	if (dnetp->tx_desc == NULL) {
2214 		if (ddi_dma_mem_alloc(dnetp->dma_handle_txdesc,
2215 		    sizeof (struct tx_desc_type) * dnetp->max_tx_desc,
2216 		    &accattr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2217 		    (caddr_t *)&dnetp->tx_desc, &len,
2218 		    &dnetp->tx_desc_acchdl) != DDI_SUCCESS)
2219 			return (FAILURE);
2220 
2221 		if (ddi_dma_addr_bind_handle(dnetp->dma_handle_txdesc,
2222 		    NULL, (caddr_t)dnetp->tx_desc,
2223 		    sizeof (struct tx_desc_type) * dnetp->max_tx_desc,
2224 		    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
2225 		    NULL, &cookie, &ncookies) != DDI_DMA_MAPPED)
2226 			return (FAILURE);
2227 		dnetp->tx_desc_paddr = cookie.dmac_address;
2228 		bzero(dnetp->tx_desc, len);
2229 		dnetp->nxmit_desc = dnetp->max_tx_desc;
2230 
2231 		dnetp->tx_msgbufp =
2232 		    kmem_zalloc(dnetp->max_tx_desc * sizeof (mblk_t **),
2233 		    KM_SLEEP);
2234 	}
2235 
2236 	/* allocate receive descriptor array of size dnetp->max_rx_desc */
2237 	if (dnetp->rx_desc == NULL) {
2238 		int ndesc;
2239 
2240 		if (ddi_dma_mem_alloc(dnetp->dma_handle,
2241 		    sizeof (struct rx_desc_type) * dnetp->max_rx_desc,
2242 		    &accattr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2243 		    (caddr_t *)&dnetp->rx_desc, &len,
2244 		    &dnetp->rx_desc_acchdl) != DDI_SUCCESS)
2245 			return (FAILURE);
2246 
2247 		if (ddi_dma_addr_bind_handle(dnetp->dma_handle,
2248 		    NULL, (caddr_t)dnetp->rx_desc,
2249 		    sizeof (struct rx_desc_type) * dnetp->max_rx_desc,
2250 		    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
2251 		    NULL, &cookie, &ncookies) != DDI_DMA_MAPPED)
2252 			return (FAILURE);
2253 
2254 		dnetp->rx_desc_paddr = cookie.dmac_address;
2255 		bzero(dnetp->rx_desc, len);
2256 		dnetp->nrecv_desc = dnetp->max_rx_desc;
2257 
2258 		dnetp->rx_buf_vaddr =
2259 		    kmem_zalloc(dnetp->max_rx_desc * sizeof (caddr_t),
2260 		    KM_SLEEP);
2261 		dnetp->rx_buf_paddr =
2262 		    kmem_zalloc(dnetp->max_rx_desc * sizeof (uint32_t),
2263 		    KM_SLEEP);
2264 		/*
2265 		 * Allocate or add to the pool of receive buffers.  The pool
2266 		 * is shared among all instances of dnet.
2267 		 *
2268 		 * XXX NEEDSWORK
2269 		 *
2270 		 * We arbitrarily allocate twice as many receive buffers as
2271 		 * receive descriptors because we use the buffers for streams
2272 		 * messages to pass the packets up the stream.  We should
2273 		 * instead have initialized constants reflecting
2274 		 * MAX_RX_BUF_2104x and MAX_RX_BUF_2114x, and we should also
2275 		 * probably have a total maximum for the free pool, so that we
2276 		 * don't get out of hand when someone puts in an 8-port board.
2277 		 * The maximum for the entire pool should be the total number
2278 		 * of descriptors for all attached instances together, plus the
2279 		 * total maximum for the free pool.  This maximum would only be
2280 		 * reached after some number of instances allocate buffers:
2281 		 * each instance would add (max_rx_buf-max_rx_desc) to the free
2282 		 * pool.
2283 		 */
2284 		ndesc = dnetp->max_rx_desc - nrecv_desc_old;
2285 		if ((ndesc > 0) &&
2286 		    (dnet_rbuf_init(dnetp->devinfo, ndesc * 2) != 0))
2287 			return (FAILURE);
2288 
2289 		for (i = 0; i < dnetp->max_rx_desc; i++) {
2290 			struct rbuf_list *rp;
2291 
2292 			rp = dnet_rbuf_alloc(dnetp->devinfo, 1);
2293 			if (rp == NULL)
2294 				return (FAILURE);
2295 			dnetp->rx_buf_vaddr[i] = rp->rbuf_vaddr;
2296 			dnetp->rx_buf_paddr[i] = rp->rbuf_paddr;
2297 		}
2298 	}
2299 
2300 	return (SUCCESS);
2301 }
2302 /*
2303  * free descriptors/buffers allocated for this device instance.  This routine
2304  * should only be called while the device is reset.
2305  */
2306 static void
2307 dnet_free_bufs(struct dnetinstance *dnetp)
2308 {
2309 	int i;
2310 	/* free up any xmit descriptors/buffers */
2311 	if (dnetp->tx_desc != NULL) {
2312 		ddi_dma_mem_free(&dnetp->tx_desc_acchdl);
2313 		dnetp->tx_desc = NULL;
2314 		/* we use streams buffers for DMA in xmit process */
2315 		if (dnetp->tx_msgbufp != NULL) {
2316 			/* free up any streams message buffers unclaimed */
2317 			for (i = 0; i < dnetp->nxmit_desc; i++) {
2318 				if (dnetp->tx_msgbufp[i] != NULL) {
2319 					freemsg(dnetp->tx_msgbufp[i]);
2320 				}
2321 			}
2322 			kmem_free(dnetp->tx_msgbufp,
2323 			    dnetp->nxmit_desc * sizeof (mblk_t **));
2324 			dnetp->tx_msgbufp = NULL;
2325 		}
2326 		dnetp->nxmit_desc = 0;
2327 	}
2328 
2329 	/* free up any receive descriptors/buffers */
2330 	if (dnetp->rx_desc != NULL) {
2331 		ddi_dma_mem_free(&dnetp->rx_desc_acchdl);
2332 		dnetp->rx_desc = NULL;
2333 		if (dnetp->rx_buf_vaddr != NULL) {
2334 			/* free up the attached rbufs if any */
2335 			for (i = 0; i < dnetp->nrecv_desc; i++) {
2336 				if (dnetp->rx_buf_vaddr[i])
2337 					dnet_rbuf_free(
2338 					    (caddr_t)dnetp->rx_buf_vaddr[i]);
2339 			}
2340 			kmem_free(dnetp->rx_buf_vaddr,
2341 			    dnetp->nrecv_desc * sizeof (caddr_t));
2342 			kmem_free(dnetp->rx_buf_paddr,
2343 			    dnetp->nrecv_desc * sizeof (uint32_t));
2344 			dnetp->rx_buf_vaddr = NULL;
2345 			dnetp->rx_buf_paddr = NULL;
2346 		}
2347 		dnetp->nrecv_desc = 0;
2348 	}
2349 
2350 	if (dnetp->setup_buf_vaddr != NULL) {
2351 		ddi_dma_mem_free(&dnetp->setup_buf_acchdl);
2352 		dnetp->setup_buf_vaddr = NULL;
2353 	}
2354 
2355 	if (dnetp->dma_handle != NULL) {
2356 		(void) ddi_dma_unbind_handle(dnetp->dma_handle);
2357 		ddi_dma_free_handle(&dnetp->dma_handle);
2358 		dnetp->dma_handle = NULL;
2359 	}
2360 
2361 	if (dnetp->dma_handle_tx != NULL) {
2362 		(void) ddi_dma_unbind_handle(dnetp->dma_handle_tx);
2363 		ddi_dma_free_handle(&dnetp->dma_handle_tx);
2364 		dnetp->dma_handle_tx = NULL;
2365 	}
2366 
2367 	if (dnetp->dma_handle_txdesc != NULL) {
2368 		(void) ddi_dma_unbind_handle(dnetp->dma_handle_txdesc);
2369 		ddi_dma_free_handle(&dnetp->dma_handle_txdesc);
2370 		dnetp->dma_handle_txdesc = NULL;
2371 	}
2372 
2373 	if (dnetp->dma_handle_setbuf != NULL) {
2374 		(void) ddi_dma_unbind_handle(dnetp->dma_handle_setbuf);
2375 		ddi_dma_free_handle(&dnetp->dma_handle_setbuf);
2376 		dnetp->dma_handle_setbuf = NULL;
2377 	}
2378 
2379 }
2380 
2381 /*
2382  * Initialize transmit and receive descriptors.
2383  */
2384 static void
2385 dnet_init_txrx_bufs(struct dnetinstance *dnetp)
2386 {
2387 	int		i;
2388 
2389 	/*
2390 	 * Initilize all the Tx descriptors
2391 	 */
2392 	for (i = 0; i < dnetp->nxmit_desc; i++) {
2393 		/*
2394 		 * We may be resetting the device due to errors,
2395 		 * so free up any streams message buffer unclaimed.
2396 		 */
2397 		if (dnetp->tx_msgbufp[i] != NULL) {
2398 			freemsg(dnetp->tx_msgbufp[i]);
2399 			dnetp->tx_msgbufp[i] = NULL;
2400 		}
2401 		*(uint32_t *)&dnetp->tx_desc[i].desc0 = 0;
2402 		*(uint32_t *)&dnetp->tx_desc[i].desc1 = 0;
2403 		dnetp->tx_desc[i].buffer1 = 0;
2404 		dnetp->tx_desc[i].buffer2 = 0;
2405 	}
2406 	dnetp->tx_desc[i - 1].desc1.end_of_ring = 1;
2407 
2408 	/*
2409 	 * Initialize the Rx descriptors
2410 	 */
2411 	for (i = 0; i < dnetp->nrecv_desc; i++) {
2412 		uint32_t end_paddr;
2413 		*(uint32_t *)&dnetp->rx_desc[i].desc0 = 0;
2414 		*(uint32_t *)&dnetp->rx_desc[i].desc1 = 0;
2415 		dnetp->rx_desc[i].desc0.own = 1;
2416 		dnetp->rx_desc[i].desc1.buffer_size1 = rx_buf_size;
2417 		dnetp->rx_desc[i].buffer1 = dnetp->rx_buf_paddr[i];
2418 		dnetp->rx_desc[i].buffer2 = 0;
2419 		end_paddr = dnetp->rx_buf_paddr[i]+rx_buf_size-1;
2420 
2421 		if ((dnetp->rx_desc[i].buffer1 & ~dnetp->pgmask) !=
2422 		    (end_paddr & ~dnetp->pgmask)) {
2423 			/* discontiguous */
2424 			dnetp->rx_desc[i].buffer2 = end_paddr&~dnetp->pgmask;
2425 			dnetp->rx_desc[i].desc1.buffer_size2 =
2426 			    (end_paddr & dnetp->pgmask) + 1;
2427 			dnetp->rx_desc[i].desc1.buffer_size1 =
2428 			    rx_buf_size-dnetp->rx_desc[i].desc1.buffer_size2;
2429 		}
2430 	}
2431 	dnetp->rx_desc[i - 1].desc1.end_of_ring = 1;
2432 }
2433 
2434 static int
2435 alloc_descriptor(struct dnetinstance *dnetp)
2436 {
2437 	int index;
2438 	struct tx_desc_type    *ring = dnetp->tx_desc;
2439 
2440 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
2441 alloctop:
2442 	mutex_enter(&dnetp->txlock);
2443 	index = dnetp->tx_current_desc;
2444 
2445 	dnet_reclaim_Tx_desc(dnetp);
2446 
2447 	/* we do have free descriptors, right? */
2448 	if (dnetp->free_desc <= 0) {
2449 #ifdef DNETDEBUG
2450 		if (dnetdebug & DNETRECV)
2451 			cmn_err(CE_NOTE, "dnet: Ring buffer is full");
2452 #endif
2453 		mutex_exit(&dnetp->txlock);
2454 		return (FAILURE);
2455 	}
2456 
2457 	/* sanity, make sure the next descriptor is free for use (should be) */
2458 	if (ring[index].desc0.own) {
2459 #ifdef DNETDEBUG
2460 		if (dnetdebug & DNETRECV)
2461 			cmn_err(CE_WARN,
2462 			    "dnet: next descriptor is not free for use");
2463 #endif
2464 		mutex_exit(&dnetp->txlock);
2465 		return (FAILURE);
2466 	}
2467 	if (dnetp->need_saddr) {
2468 		mutex_exit(&dnetp->txlock);
2469 		/* XXX function return value ignored */
2470 		if (!dnetp->suspended)
2471 			(void) dnet_set_addr(dnetp);
2472 		goto alloctop;
2473 	}
2474 
2475 	*(uint32_t *)&ring[index].desc0 = 0;  /* init descs */
2476 	*(uint32_t *)&ring[index].desc1 &= DNET_END_OF_RING;
2477 
2478 	/* hardware will own this descriptor when poll activated */
2479 	dnetp->free_desc--;
2480 
2481 	/* point to next free descriptor to be used */
2482 	dnetp->tx_current_desc = NextTXIndex(index);
2483 
2484 #ifdef DNET_NOISY
2485 	cmn_err(CE_WARN, "sfree 0x%x, transmitted 0x%x, tx_current 0x%x",
2486 	    dnetp->free_desc, dnetp->transmitted_desc, dnetp->tx_current_desc);
2487 #endif
2488 	mutex_exit(&dnetp->txlock);
2489 	return (SUCCESS);
2490 }
2491 
2492 /*
2493  * dnet_reclaim_Tx_desc() - called with txlock held.
2494  */
2495 static void
2496 dnet_reclaim_Tx_desc(struct dnetinstance *dnetp)
2497 {
2498 	struct tx_desc_type	*desc = dnetp->tx_desc;
2499 	int index;
2500 
2501 	ASSERT(MUTEX_HELD(&dnetp->txlock));
2502 
2503 	index = dnetp->transmitted_desc;
2504 	while (((dnetp->free_desc == 0) || (index != dnetp->tx_current_desc)) &&
2505 	    !(desc[index].desc0.own)) {
2506 		/*
2507 		 * Check for Tx Error that gets set
2508 		 * in the last desc.
2509 		 */
2510 		if (desc[index].desc1.setup_packet == 0 &&
2511 		    desc[index].desc1.last_desc &&
2512 		    desc[index].desc0.err_summary)
2513 			update_tx_stats(dnetp, index);
2514 
2515 		/*
2516 		 * If we have used the streams message buffer for this
2517 		 * descriptor then free up the message now.
2518 		 */
2519 		if (dnetp->tx_msgbufp[index] != NULL) {
2520 			freemsg(dnetp->tx_msgbufp[index]);
2521 			dnetp->tx_msgbufp[index] = NULL;
2522 		}
2523 		dnetp->free_desc++;
2524 		index = (index+1) % dnetp->max_tx_desc;
2525 	}
2526 
2527 	dnetp->transmitted_desc = index;
2528 }
2529 
2530 /*
2531  * Receive buffer allocation/freeing routines.
2532  *
2533  * There is a common pool of receive buffers shared by all dnet instances.
2534  *
2535  * XXX NEEDSWORK
2536  *
2537  * We arbitrarily allocate twice as many receive buffers as
2538  * receive descriptors because we use the buffers for streams
2539  * messages to pass the packets up the stream.  We should
2540  * instead have initialized constants reflecting
2541  * MAX_RX_BUF_2104x and MAX_RX_BUF_2114x, and we should also
2542  * probably have a total maximum for the free pool, so that we
2543  * don't get out of hand when someone puts in an 8-port board.
2544  * The maximum for the entire pool should be the total number
2545  * of descriptors for all attached instances together, plus the
2546  * total maximum for the free pool.  This maximum would only be
2547  * reached after some number of instances allocate buffers:
2548  * each instance would add (max_rx_buf-max_rx_desc) to the free
2549  * pool.
2550  */
2551 
2552 static struct rbuf_list *rbuf_usedlist_head;
2553 static struct rbuf_list *rbuf_freelist_head;
2554 static struct rbuf_list *rbuf_usedlist_end;	/* last buffer allocated */
2555 
2556 static int rbuf_freebufs;	/* no. of free buffers in the pool */
2557 static int rbuf_pool_size;	/* total no. of buffers in the pool */
2558 
2559 /* initialize/add 'nbufs' buffers to the rbuf pool */
2560 /* ARGSUSED */
2561 static int
2562 dnet_rbuf_init(dev_info_t *dip, int nbufs)
2563 {
2564 	int i;
2565 	struct rbuf_list *rp;
2566 	ddi_dma_cookie_t cookie;
2567 	uint_t ncookies;
2568 	size_t len;
2569 
2570 	mutex_enter(&dnet_rbuf_lock);
2571 
2572 	/* allocate buffers and add them to the pool */
2573 	for (i = 0; i < nbufs; i++) {
2574 		/* allocate rbuf_list element */
2575 		rp = kmem_zalloc(sizeof (struct rbuf_list), KM_SLEEP);
2576 		if (ddi_dma_alloc_handle(dip, &dma_attr_rb, DDI_DMA_SLEEP,
2577 		    0, &rp->rbuf_dmahdl) != DDI_SUCCESS)
2578 			goto fail_kfree;
2579 
2580 		/* allocate dma memory for the buffer */
2581 		if (ddi_dma_mem_alloc(rp->rbuf_dmahdl, rx_buf_size, &accattr,
2582 		    DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2583 		    &rp->rbuf_vaddr, &len,
2584 		    &rp->rbuf_acchdl) != DDI_SUCCESS)
2585 			goto fail_freehdl;
2586 
2587 		if (ddi_dma_addr_bind_handle(rp->rbuf_dmahdl, NULL,
2588 		    rp->rbuf_vaddr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
2589 		    DDI_DMA_SLEEP, NULL, &cookie,
2590 		    &ncookies) != DDI_DMA_MAPPED)
2591 			goto fail_free;
2592 
2593 		if (ncookies > 2)
2594 			goto fail_unbind;
2595 		if (ncookies == 1) {
2596 			rp->rbuf_endpaddr =
2597 			    cookie.dmac_address + rx_buf_size - 1;
2598 		} else {
2599 			ddi_dma_nextcookie(rp->rbuf_dmahdl, &cookie);
2600 			rp->rbuf_endpaddr =
2601 			    cookie.dmac_address + cookie.dmac_size - 1;
2602 		}
2603 		rp->rbuf_paddr = cookie.dmac_address;
2604 
2605 		rp->rbuf_next = rbuf_freelist_head;
2606 		rbuf_freelist_head = rp;
2607 		rbuf_pool_size++;
2608 		rbuf_freebufs++;
2609 	}
2610 
2611 	mutex_exit(&dnet_rbuf_lock);
2612 	return (0);
2613 fail_unbind:
2614 	(void) ddi_dma_unbind_handle(rp->rbuf_dmahdl);
2615 fail_free:
2616 	ddi_dma_mem_free(&rp->rbuf_acchdl);
2617 fail_freehdl:
2618 	ddi_dma_free_handle(&rp->rbuf_dmahdl);
2619 fail_kfree:
2620 	kmem_free(rp, sizeof (struct rbuf_list));
2621 
2622 	mutex_exit(&dnet_rbuf_lock);
2623 	return (-1);
2624 }
2625 
2626 /*
2627  * Try to free up all the rbufs in the pool. Returns 0 if it frees up all
2628  * buffers. The buffers in the used list are considered busy so these
2629  * buffers are not freed.
2630  */
2631 static int
2632 dnet_rbuf_destroy()
2633 {
2634 	struct rbuf_list *rp, *next;
2635 
2636 	mutex_enter(&dnet_rbuf_lock);
2637 
2638 	for (rp = rbuf_freelist_head; rp; rp = next) {
2639 		next = rp->rbuf_next;
2640 		ddi_dma_mem_free(&rp->rbuf_acchdl);
2641 		(void) ddi_dma_unbind_handle(rp->rbuf_dmahdl);
2642 		kmem_free(rp, sizeof (struct rbuf_list));
2643 		rbuf_pool_size--;
2644 		rbuf_freebufs--;
2645 	}
2646 	rbuf_freelist_head = NULL;
2647 
2648 	if (rbuf_pool_size) { /* pool is still not empty */
2649 		mutex_exit(&dnet_rbuf_lock);
2650 		return (-1);
2651 	}
2652 	mutex_exit(&dnet_rbuf_lock);
2653 	return (0);
2654 }
2655 static struct rbuf_list *
2656 dnet_rbuf_alloc(dev_info_t *dip, int cansleep)
2657 {
2658 	struct rbuf_list *rp;
2659 	size_t len;
2660 	ddi_dma_cookie_t cookie;
2661 	uint_t ncookies;
2662 
2663 	mutex_enter(&dnet_rbuf_lock);
2664 
2665 	if (rbuf_freelist_head == NULL) {
2666 
2667 		if (!cansleep) {
2668 			mutex_exit(&dnet_rbuf_lock);
2669 			return (NULL);
2670 		}
2671 
2672 		/* allocate rbuf_list element */
2673 		rp = kmem_zalloc(sizeof (struct rbuf_list), KM_SLEEP);
2674 		if (ddi_dma_alloc_handle(dip, &dma_attr_rb, DDI_DMA_SLEEP,
2675 		    0, &rp->rbuf_dmahdl) != DDI_SUCCESS)
2676 			goto fail_kfree;
2677 
2678 		/* allocate dma memory for the buffer */
2679 		if (ddi_dma_mem_alloc(rp->rbuf_dmahdl, rx_buf_size, &accattr,
2680 		    DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
2681 		    &rp->rbuf_vaddr, &len,
2682 		    &rp->rbuf_acchdl) != DDI_SUCCESS)
2683 			goto fail_freehdl;
2684 
2685 		if (ddi_dma_addr_bind_handle(rp->rbuf_dmahdl, NULL,
2686 		    rp->rbuf_vaddr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
2687 		    DDI_DMA_SLEEP, NULL, &cookie,
2688 		    &ncookies) != DDI_DMA_MAPPED)
2689 			goto fail_free;
2690 
2691 		if (ncookies > 2)
2692 			goto fail_unbind;
2693 		if (ncookies == 1) {
2694 			rp->rbuf_endpaddr =
2695 			    cookie.dmac_address + rx_buf_size - 1;
2696 		} else {
2697 			ddi_dma_nextcookie(rp->rbuf_dmahdl, &cookie);
2698 			rp->rbuf_endpaddr =
2699 			    cookie.dmac_address + cookie.dmac_size - 1;
2700 		}
2701 		rp->rbuf_paddr = cookie.dmac_address;
2702 
2703 		rbuf_freelist_head = rp;
2704 		rbuf_pool_size++;
2705 		rbuf_freebufs++;
2706 	}
2707 
2708 	/* take the buffer from the head of the free list */
2709 	rp = rbuf_freelist_head;
2710 	rbuf_freelist_head = rbuf_freelist_head->rbuf_next;
2711 
2712 	/* update the used list; put the entry at the end */
2713 	if (rbuf_usedlist_head == NULL)
2714 		rbuf_usedlist_head = rp;
2715 	else
2716 		rbuf_usedlist_end->rbuf_next = rp;
2717 	rp->rbuf_next = NULL;
2718 	rbuf_usedlist_end = rp;
2719 	rbuf_freebufs--;
2720 
2721 	mutex_exit(&dnet_rbuf_lock);
2722 
2723 	return (rp);
2724 fail_unbind:
2725 	(void) ddi_dma_unbind_handle(rp->rbuf_dmahdl);
2726 fail_free:
2727 	ddi_dma_mem_free(&rp->rbuf_acchdl);
2728 fail_freehdl:
2729 	ddi_dma_free_handle(&rp->rbuf_dmahdl);
2730 fail_kfree:
2731 	kmem_free(rp, sizeof (struct rbuf_list));
2732 	mutex_exit(&dnet_rbuf_lock);
2733 	return (NULL);
2734 }
2735 
2736 static void
2737 dnet_rbuf_free(caddr_t vaddr)
2738 {
2739 	struct rbuf_list *rp, *prev;
2740 
2741 	ASSERT(vaddr != NULL);
2742 	ASSERT(rbuf_usedlist_head != NULL);
2743 
2744 	mutex_enter(&dnet_rbuf_lock);
2745 
2746 	/* find the entry in the used list */
2747 	for (prev = rp = rbuf_usedlist_head; rp; rp = rp->rbuf_next) {
2748 		if (rp->rbuf_vaddr == vaddr)
2749 			break;
2750 		prev = rp;
2751 	}
2752 
2753 	if (rp == NULL) {
2754 		cmn_err(CE_WARN, "DNET: rbuf_free: bad addr 0x%p",
2755 		    (void *)vaddr);
2756 		mutex_exit(&dnet_rbuf_lock);
2757 		return;
2758 	}
2759 
2760 	/* update the used list and put the buffer back in the free list */
2761 	if (rbuf_usedlist_head != rp) {
2762 		prev->rbuf_next = rp->rbuf_next;
2763 		if (rbuf_usedlist_end == rp)
2764 			rbuf_usedlist_end = prev;
2765 	} else {
2766 		rbuf_usedlist_head = rp->rbuf_next;
2767 		if (rbuf_usedlist_end == rp)
2768 			rbuf_usedlist_end = NULL;
2769 	}
2770 	rp->rbuf_next = rbuf_freelist_head;
2771 	rbuf_freelist_head = rp;
2772 	rbuf_freebufs++;
2773 
2774 	mutex_exit(&dnet_rbuf_lock);
2775 }
2776 
2777 /*
2778  * Free the receive buffer used in a stream's message block allocated
2779  * thru desballoc().
2780  */
2781 static void
2782 dnet_freemsg_buf(struct free_ptr *frp)
2783 {
2784 	dnet_rbuf_free((caddr_t)frp->buf); /* buffer goes back to the pool */
2785 	kmem_free(frp, sizeof (*frp)); /* free up the free_rtn structure */
2786 }
2787 
2788 /*
2789  *	========== SROM Read Routines ==========
2790  */
2791 
2792 /*
2793  * The following code gets the SROM information, either by reading it
2794  * from the device or, failing that, by reading a property.
2795  */
2796 static int
2797 dnet_read_srom(dev_info_t *devinfo, int board_type, ddi_acc_handle_t io_handle,
2798     caddr_t io_reg, uchar_t *vi, int maxlen)
2799 {
2800 	int all_ones, zerocheck, i;
2801 
2802 	/*
2803 	 * Load SROM into vendor_info
2804 	 */
2805 	if (board_type == DEVICE_ID_21040)
2806 		dnet_read21040addr(devinfo, io_handle, io_reg, vi, &maxlen);
2807 	else
2808 		/* 21041/21140 serial rom */
2809 		dnet_read21140srom(io_handle, io_reg, vi, maxlen);
2810 	/*
2811 	 * If the dumpsrom property is present in the conf file, print
2812 	 * the contents of the SROM to the console
2813 	 */
2814 	if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
2815 	    "dumpsrom", 0))
2816 		dnet_dumpbin("SROM", vi, 1, maxlen);
2817 
2818 	for (zerocheck = i = 0, all_ones = 0xff; i < maxlen; i++) {
2819 		zerocheck |= vi[i];
2820 		all_ones &= vi[i];
2821 	}
2822 	if (zerocheck == 0 || all_ones == 0xff) {
2823 		return (get_alternative_srom_image(devinfo, vi, maxlen));
2824 	} else {
2825 #ifdef BUG_4010796
2826 		set_alternative_srom_image(devinfo, vi, maxlen);
2827 #endif
2828 		return (0);	/* Primary */
2829 	}
2830 }
2831 
2832 /*
2833  * The function reads the ethernet address of the 21040 adapter
2834  */
2835 static void
2836 dnet_read21040addr(dev_info_t *dip, ddi_acc_handle_t io_handle, caddr_t io_reg,
2837     uchar_t *addr, int *len)
2838 {
2839 	uint32_t	val;
2840 	int		i;
2841 
2842 	/* No point reading more than the ethernet address */
2843 	*len = ddi_getprop(DDI_DEV_T_ANY, dip,
2844 	    DDI_PROP_DONTPASS, macoffset_propname, 0) + ETHERADDRL;
2845 
2846 	/* Reset ROM pointer */
2847 	ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 0);
2848 	for (i = 0; i < *len; i++) {
2849 		do {
2850 			val = ddi_get32(io_handle,
2851 			    REG32(io_reg, ETHER_ROM_REG));
2852 		} while (val & 0x80000000);
2853 		addr[i] = val & 0xFF;
2854 	}
2855 }
2856 
2857 #define	drv_nsecwait(x)	drv_usecwait(((x)+999)/1000) /* XXX */
2858 
2859 /*
2860  * The function reads the SROM	of the 21140 adapter
2861  */
2862 static void
2863 dnet_read21140srom(ddi_acc_handle_t io_handle, caddr_t io_reg, uchar_t *addr,
2864     int maxlen)
2865 {
2866 	uint32_t 	i, j;
2867 	uint32_t	dout;
2868 	uint16_t	word;
2869 	uint8_t		rom_addr;
2870 	uint8_t		bit;
2871 
2872 
2873 	rom_addr = 0;
2874 	for (i = 0; i <	maxlen; i += 2) {
2875 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2876 		    READ_OP | SEL_ROM);
2877 		drv_nsecwait(30);
2878 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2879 		    READ_OP | SEL_ROM | SEL_CHIP);
2880 		drv_nsecwait(50);
2881 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2882 		    READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK);
2883 		drv_nsecwait(250);
2884 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2885 		    READ_OP | SEL_ROM | SEL_CHIP);
2886 		drv_nsecwait(100);
2887 
2888 		/* command */
2889 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2890 		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN);
2891 		drv_nsecwait(150);
2892 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2893 		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN | SEL_CLK);
2894 		drv_nsecwait(250);
2895 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2896 		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN);
2897 		drv_nsecwait(250);
2898 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2899 		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN | SEL_CLK);
2900 		drv_nsecwait(250);
2901 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2902 		    READ_OP | SEL_ROM | SEL_CHIP | DATA_IN);
2903 		drv_nsecwait(100);
2904 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2905 		    READ_OP | SEL_ROM | SEL_CHIP);
2906 		drv_nsecwait(150);
2907 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2908 		    READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK);
2909 		drv_nsecwait(250);
2910 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2911 		    READ_OP | SEL_ROM | SEL_CHIP);
2912 		drv_nsecwait(100);
2913 
2914 		/* Address */
2915 		for (j = HIGH_ADDRESS_BIT; j >= 1; j >>= 1) {
2916 			bit = (rom_addr & j) ? DATA_IN : 0;
2917 			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2918 			    READ_OP | SEL_ROM | SEL_CHIP | bit);
2919 			drv_nsecwait(150);
2920 			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2921 			    READ_OP | SEL_ROM | SEL_CHIP | bit | SEL_CLK);
2922 			drv_nsecwait(250);
2923 			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2924 			    READ_OP | SEL_ROM | SEL_CHIP | bit);
2925 			drv_nsecwait(100);
2926 		}
2927 		drv_nsecwait(150);
2928 
2929 		/* Data */
2930 		word = 0;
2931 		for (j = 0x8000; j >= 1; j >>= 1) {
2932 			ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2933 			    READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK);
2934 			drv_nsecwait(100);
2935 			dout = ddi_get32(io_handle,
2936 			    REG32(io_reg, ETHER_ROM_REG));
2937 			drv_nsecwait(150);
2938 			if (dout & DATA_OUT)
2939 				word |= j;
2940 			ddi_put32(io_handle,
2941 			    REG32(io_reg, ETHER_ROM_REG),
2942 			    READ_OP | SEL_ROM | SEL_CHIP);
2943 			drv_nsecwait(250);
2944 		}
2945 		addr[i] = (word & 0x0000FF);
2946 		addr[i + 1] = (word >> 8);
2947 		rom_addr++;
2948 		ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG),
2949 		    READ_OP | SEL_ROM);
2950 		drv_nsecwait(100);
2951 	}
2952 }
2953 
2954 
2955 /*
2956  * XXX NEEDSWORK
2957  *
2958  * Some lame multiport cards have only one SROM, which can be accessed
2959  * only from the "first" 21x4x chip, whichever that one is.  If we can't
2960  * get at our SROM, we look for its contents in a property instead, which
2961  * we rely on the bootstrap to have properly set.
2962  * #ifdef BUG_4010796
2963  * We also have a hack to try to set it ourselves, when the "first" port
2964  * attaches, if it has not already been properly set.  However, this method
2965  * is not reliable, since it makes the unwarrented assumption that the
2966  * "first" port will attach first.
2967  * #endif
2968  */
2969 
2970 static int
2971 get_alternative_srom_image(dev_info_t *devinfo, uchar_t *vi, int len)
2972 {
2973 	int	l = len;
2974 
2975 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
2976 	    "DNET_SROM", (caddr_t)vi, &len) != DDI_PROP_SUCCESS &&
2977 	    (len = l) && ddi_getlongprop_buf(DDI_DEV_T_ANY,
2978 	    ddi_get_parent(devinfo), DDI_PROP_DONTPASS, "DNET_SROM",
2979 	    (caddr_t)vi, &len) != DDI_PROP_SUCCESS)
2980 		return (-1);	/* Can't find it! */
2981 
2982 	/*
2983 	 * The return value from this routine specifies which port number
2984 	 * we are.  The primary port is denoted port 0.  On a QUAD card we
2985 	 * should return 1, 2, and 3 from this routine.  The return value
2986 	 * is used to modify the ethernet address from the SROM data.
2987 	 */
2988 
2989 #ifdef BUG_4010796
2990 	{
2991 	/*
2992 	 * For the present, we remember the device number of our primary
2993 	 * sibling and hope we and our other siblings are consecutively
2994 	 * numbered up from there.  In the future perhaps the bootstrap
2995 	 * will pass us the necessary information telling us which physical
2996 	 * port we really are.
2997 	 */
2998 	pci_regspec_t	*assignp;
2999 	int		assign_len;
3000 	int 		devnum;
3001 	int		primary_devnum;
3002 
3003 	primary_devnum = ddi_getprop(DDI_DEV_T_ANY, devinfo, 0,
3004 	    "DNET_DEVNUM", -1);
3005 	if (primary_devnum == -1)
3006 		return (1);	/* XXX NEEDSWORK -- We have no better idea */
3007 
3008 	if ((ddi_getlongprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3009 	    "assigned-addresses", (caddr_t)&assignp,
3010 	    &assign_len)) != DDI_PROP_SUCCESS)
3011 		return (1);	/* XXX NEEDSWORK -- We have no better idea */
3012 
3013 	devnum = PCI_REG_DEV_G(assignp->pci_phys_hi);
3014 	kmem_free(assignp, assign_len);
3015 	return (devnum - primary_devnum);
3016 	}
3017 #else
3018 	return (1);	/* XXX NEEDSWORK -- We have no better idea */
3019 #endif
3020 }
3021 
3022 
3023 #ifdef BUG_4010796
3024 static void
3025 set_alternative_srom_image(dev_info_t *devinfo, uchar_t *vi, int len)
3026 {
3027 	int 		proplen;
3028 	pci_regspec_t	*assignp;
3029 	int		assign_len;
3030 	int 		devnum;
3031 
3032 	if (ddi_getproplen(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3033 	    "DNET_SROM", &proplen) == DDI_PROP_SUCCESS ||
3034 	    ddi_getproplen(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3035 	    DDI_PROP_DONTPASS, "DNET_SROM", &proplen) == DDI_PROP_SUCCESS)
3036 		return;		/* Already done! */
3037 
3038 	/* function return value ignored */
3039 	(void) ddi_prop_update_byte_array(DDI_DEV_T_NONE,
3040 	    ddi_get_parent(devinfo), "DNET_SROM", (uchar_t *)vi, len);
3041 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, devinfo,
3042 	    "DNET_HACK", "hack");
3043 
3044 	if ((ddi_getlongprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3045 	    "assigned-addresses", (caddr_t)&assignp,
3046 	    &assign_len)) == DDI_PROP_SUCCESS) {
3047 		devnum = PCI_REG_DEV_G(assignp->pci_phys_hi);
3048 		kmem_free(assignp, assign_len);
3049 		/* function return value ignored */
3050 		(void) ddi_prop_update_int(DDI_DEV_T_NONE,
3051 		    ddi_get_parent(devinfo), "DNET_DEVNUM", devnum);
3052 	}
3053 }
3054 #endif
3055 
3056 /*
3057  *	========== SROM Parsing Routines ==========
3058  */
3059 
3060 static int
3061 check_srom_valid(uchar_t *vi)
3062 {
3063 	int		word, bit;
3064 	uint8_t		crc;
3065 	uint16_t	*wvi;		/* word16 pointer to vendor info */
3066 	uint16_t	bitval;
3067 
3068 	/* verify that the number of controllers on the card is within range */
3069 	if (vi[SROM_ADAPTER_CNT] < 1 || vi[SROM_ADAPTER_CNT] > MAX_ADAPTERS)
3070 		return (0);
3071 
3072 	/*
3073 	 * version 1 and 3 of this card did not check the id block CRC value
3074 	 * and this can't be changed without retesting every supported card
3075 	 *
3076 	 * however version 4 of the SROM can have this test applied
3077 	 * without fear of breaking something that used to work.
3078 	 * the CRC algorithm is taken from the Intel document
3079 	 *	"21x4 Serial ROM Format"
3080 	 *	version 4.09
3081 	 *	3-Mar-1999
3082 	 */
3083 
3084 	switch (vi[SROM_VERSION]) {
3085 	case 1:
3086 	    /* fallthru */
3087 	case 3:
3088 		return (vi[SROM_MBZ] == 0 &&	/* must be zero */
3089 		    vi[SROM_MBZ2] == 0 &&	/* must be zero */
3090 		    vi[SROM_MBZ3] == 0);	/* must be zero */
3091 
3092 	case 4:
3093 		wvi = (uint16_t *)vi;
3094 		crc = 0xff;
3095 		for (word = 0; word < 9; word++)
3096 			for (bit = 15; bit >= 0; bit--) {
3097 				if (word == 8 && bit == 7)
3098 					return (crc == vi[16]);
3099 				bitval =
3100 				    ((wvi[word] >> bit) & 1) ^ ((crc >> 7) & 1);
3101 				crc <<= 1;
3102 				if (bitval == 1) {
3103 					crc ^= 7;
3104 				}
3105 			}
3106 
3107 	default:
3108 		return (0);
3109 	}
3110 }
3111 
3112 /*
3113  *	========== Active Media Determination Routines ==========
3114  */
3115 
3116 /* This routine is also called for V3 Compact and extended type 0 SROMs */
3117 static int
3118 is_fdmedia(int media)
3119 {
3120 	if (media == MEDIA_TP_FD || media == MEDIA_SYM_SCR_FD)
3121 		return (1);
3122 	else
3123 		return (0);
3124 }
3125 
3126 /*
3127  * "Linkset" is used to merge media that use the same link test check. So,
3128  * if the TP link is added to the linkset, so is the TP Full duplex link.
3129  * Used to avoid checking the same link status twice.
3130  */
3131 static void
3132 linkset_add(uint32_t *set, int media)
3133 {
3134 	if (media == MEDIA_TP_FD || media == MEDIA_TP)
3135 		*set |= (1UL<<MEDIA_TP_FD) | (1UL<<MEDIA_TP);
3136 	else if (media == MEDIA_SYM_SCR_FD || media == MEDIA_SYM_SCR)
3137 		*set |= (1UL<<MEDIA_SYM_SCR_FD) | (1UL<<MEDIA_SYM_SCR);
3138 	else *set |= 1UL<<media;
3139 }
3140 static int
3141 linkset_isset(uint32_t linkset, int media)
3142 {
3143 	return (((1UL<<media)  & linkset) ? 1:0);
3144 }
3145 
3146 /*
3147  * The following code detects which Media is connected for 21041/21140
3148  * Expect to change this code to support new 21140 variants.
3149  * find_active_media() - called with intrlock held.
3150  */
3151 static void
3152 find_active_media(struct dnetinstance *dnetp)
3153 {
3154 	int i;
3155 	media_block_t *block;
3156 	media_block_t *best_allowed = NULL;
3157 	media_block_t *hd_found = NULL;
3158 	media_block_t *fd_found = NULL;
3159 	LEAF_FORMAT *leaf = &dnetp->sr.leaf[dnetp->leaf];
3160 	uint32_t checked = 0, links_up = 0;
3161 
3162 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3163 
3164 	dnetp->selected_media_block = leaf->default_block;
3165 
3166 	if (dnetp->phyaddr != -1) {
3167 		dnetp->selected_media_block = leaf->mii_block;
3168 		setup_block(dnetp);
3169 
3170 		if (ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3171 		    DDI_PROP_DONTPASS, "portmon", 1)) {
3172 			/* XXX return value ignored */
3173 			(void) mii_start_portmon(dnetp->mii, dnet_mii_link_cb,
3174 			    &dnetp->intrlock);
3175 			/*
3176 			 * If the port monitor detects the link is already
3177 			 * up, there is no point going through the rest of the
3178 			 * link sense
3179 			 */
3180 			if (dnetp->mii_up) {
3181 				return;
3182 			}
3183 		}
3184 	}
3185 
3186 	/*
3187 	 * Media is searched for in order of Precedence. This DEC SROM spec
3188 	 * tells us that the first media entry in the SROM is the lowest
3189 	 * precedence and should be checked last. This is why we go to the last
3190 	 * Media block and work back to the beginning.
3191 	 *
3192 	 * However, some older SROMs (Cogent EM110's etc.) have this the wrong
3193 	 * way around. As a result, following the SROM spec would result in a
3194 	 * 10 link being chosen over a 100 link if both media are available.
3195 	 * So we continue trying the media until we have at least tried the
3196 	 * DEFAULT media.
3197 	 */
3198 
3199 	/* Search for an active medium, and select it */
3200 	for (block = leaf->block + leaf->block_count  - 1;
3201 	    block >= leaf->block; block--) {
3202 		int media = block->media_code;
3203 
3204 		/* User settings disallow selection of this block */
3205 		if (dnetp->disallowed_media & (1UL<<media))
3206 			continue;
3207 
3208 		/* We may not be able to pick the default */
3209 		if (best_allowed == NULL || block == leaf->default_block)
3210 			best_allowed = block;
3211 #ifdef DEBUG
3212 		if (dnetdebug & DNETSENSE)
3213 			cmn_err(CE_NOTE, "Testing %s medium (block type %d)",
3214 			    media_str[media], block->type);
3215 #endif
3216 
3217 		dnetp->selected_media_block = block;
3218 		switch (block->type) {
3219 
3220 		case 2: /* SIA Media block: Best we can do is send a packet */
3221 			setup_block(dnetp);
3222 			if (send_test_packet(dnetp)) {
3223 				if (!is_fdmedia(media))
3224 					return;
3225 				if (!fd_found)
3226 					fd_found = block;
3227 			}
3228 			break;
3229 
3230 		/* SYM/SCR or TP block: Use the link-sense bits */
3231 		case 0:
3232 			if (!linkset_isset(checked, media)) {
3233 				linkset_add(&checked, media);
3234 				if (((media == MEDIA_BNC ||
3235 				    media == MEDIA_AUI) &&
3236 				    send_test_packet(dnetp)) ||
3237 				    dnet_link_sense(dnetp))
3238 					linkset_add(&links_up, media);
3239 			}
3240 
3241 			if (linkset_isset(links_up, media)) {
3242 				/*
3243 				 * Half Duplex is *always* the favoured media.
3244 				 * Full Duplex can be set and forced via the
3245 				 * conf file.
3246 				 */
3247 				if (!is_fdmedia(media) &&
3248 				    dnetp->selected_media_block ==
3249 				    leaf->default_block) {
3250 					/*
3251 					 * Cogent cards have the media in
3252 					 * opposite order to the spec.,
3253 					 * this code forces the media test to
3254 					 * keep going until the default media
3255 					 * is tested.
3256 					 *
3257 					 * In Cogent case, 10, 10FD, 100FD, 100
3258 					 * 100 is the default but 10 could have
3259 					 * been detected and would have been
3260 					 * chosen but now we force it through to
3261 					 * 100.
3262 					 */
3263 					setup_block(dnetp);
3264 					return;
3265 				} else if (!is_fdmedia(media)) {
3266 					/*
3267 					 * This allows all the others to work
3268 					 * properly by remembering the media
3269 					 * that works and not defaulting to
3270 					 * a FD link.
3271 					 */
3272 						if (hd_found == NULL)
3273 							hd_found = block;
3274 				} else if (fd_found == NULL) {
3275 					/*
3276 					 * No media have already been found
3277 					 * so far, this is FD, it works so
3278 					 * remember it and if no others are
3279 					 * detected, use it.
3280 					 */
3281 					fd_found = block;
3282 				}
3283 			}
3284 			break;
3285 
3286 		/*
3287 		 * MII block: May take up to a second or so to settle if
3288 		 * setup causes a PHY reset
3289 		 */
3290 		case 1: case 3:
3291 			setup_block(dnetp);
3292 			for (i = 0; ; i++) {
3293 				if (mii_linkup(dnetp->mii, dnetp->phyaddr)) {
3294 					/* XXX function return value ignored */
3295 					(void) mii_getspeed(dnetp->mii,
3296 					    dnetp->phyaddr,
3297 					    &dnetp->mii_speed,
3298 					    &dnetp->mii_duplex);
3299 					dnetp->mii_up = 1;
3300 					leaf->mii_block = block;
3301 					return;
3302 				}
3303 				if (i == 10)
3304 					break;
3305 				delay(drv_usectohz(150000));
3306 			}
3307 			dnetp->mii_up = 0;
3308 			break;
3309 		}
3310 	} /* for loop */
3311 	if (hd_found) {
3312 		dnetp->selected_media_block = hd_found;
3313 	} else if (fd_found) {
3314 		dnetp->selected_media_block = fd_found;
3315 	} else {
3316 		if (best_allowed == NULL)
3317 			best_allowed = leaf->default_block;
3318 		dnetp->selected_media_block = best_allowed;
3319 		cmn_err(CE_WARN, "!dnet: Default media selected\n");
3320 	}
3321 	setup_block(dnetp);
3322 }
3323 
3324 /*
3325  * Do anything neccessary to select the selected_media_block.
3326  * setup_block() - called with intrlock held.
3327  */
3328 static void
3329 setup_block(struct dnetinstance *dnetp)
3330 {
3331 	dnet_reset_board(dnetp);
3332 	dnet_init_board(dnetp);
3333 	/* XXX function return value ignored */
3334 	(void) dnet_start(dnetp);
3335 }
3336 
3337 /* dnet_link_sense() - called with intrlock held */
3338 static int
3339 dnet_link_sense(struct dnetinstance *dnetp)
3340 {
3341 	/*
3342 	 * This routine makes use of the command word from the srom config.
3343 	 * Details of the auto-sensing information contained in this can
3344 	 * be found in the "Digital Semiconductor 21X4 Serial ROM Format v3.03"
3345 	 * spec. Section 4.3.2.1, and 4.5.2.1.3
3346 	 */
3347 	media_block_t *block = dnetp->selected_media_block;
3348 	uint32_t link, status, mask, polarity;
3349 	int settletime, stabletime, waittime, upsamples;
3350 	int delay_100, delay_10;
3351 
3352 
3353 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3354 	/* Don't autosense if the medium does not support it */
3355 	if (block->command & (1 << 15)) {
3356 		/* This should be the default block */
3357 		if (block->command & (1UL<<14))
3358 			dnetp->sr.leaf[dnetp->leaf].default_block = block;
3359 		return (0);
3360 	}
3361 
3362 	delay_100 = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3363 	    DDI_PROP_DONTPASS, "autosense-delay-100", 2000);
3364 
3365 	delay_10 = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3366 	    DDI_PROP_DONTPASS, "autosense-delay-10", 400);
3367 
3368 	/*
3369 	 * Scrambler may need to be disabled for link sensing
3370 	 * to work
3371 	 */
3372 	dnetp->disable_scrambler = 1;
3373 	setup_block(dnetp);
3374 	dnetp->disable_scrambler = 0;
3375 
3376 	if (block->media_code == MEDIA_TP || block->media_code == MEDIA_TP_FD)
3377 		settletime = delay_10;
3378 	else
3379 		settletime = delay_100;
3380 	stabletime = settletime / 4;
3381 
3382 	mask = 1 << ((block->command & CMD_MEDIABIT_MASK) >> 1);
3383 	polarity = block->command & CMD_POL ? 0xffffffff : 0;
3384 
3385 	for (waittime = 0, upsamples = 0;
3386 	    waittime <= settletime + stabletime && upsamples < 8;
3387 	    waittime += stabletime/8) {
3388 		delay(drv_usectohz(stabletime*1000 / 8));
3389 		status = read_gpr(dnetp);
3390 		link = (status^polarity) & mask;
3391 		if (link)
3392 			upsamples++;
3393 		else
3394 			upsamples = 0;
3395 	}
3396 #ifdef DNETDEBUG
3397 	if (dnetdebug & DNETSENSE)
3398 		cmn_err(CE_NOTE, "%s upsamples:%d stat:%x polarity:%x "
3399 		    "mask:%x link:%x",
3400 		    upsamples == 8 ? "UP":"DOWN",
3401 		    upsamples, status, polarity, mask, link);
3402 #endif
3403 	if (upsamples == 8)
3404 		return (1);
3405 	return (0);
3406 }
3407 
3408 static int
3409 send_test_packet(struct dnetinstance *dnetp)
3410 {
3411 	int packet_delay;
3412 	struct tx_desc_type *desc;
3413 	int bufindex;
3414 	int media_code = dnetp->selected_media_block->media_code;
3415 	uint32_t del;
3416 
3417 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3418 	/*
3419 	 * For a successful test packet, the card must have settled into
3420 	 * its current setting.  Almost all cards we've tested manage to
3421 	 * do this with all media within 50ms.  However, the SMC 8432
3422 	 * requires 300ms to settle into BNC mode.  We now only do this
3423 	 * from attach, and we do sleeping delay() instead of drv_usecwait()
3424 	 * so we hope this .2 second delay won't cause too much suffering.
3425 	 * ALSO: with an autonegotiating hub, an aditional 1 second delay is
3426 	 * required. This is done if the media type is TP
3427 	 */
3428 	if (media_code == MEDIA_TP || media_code == MEDIA_TP_FD) {
3429 		packet_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3430 		    DDI_PROP_DONTPASS, "test_packet_delay_tp", 1300000);
3431 	} else {
3432 		packet_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
3433 		    DDI_PROP_DONTPASS, "test_packet_delay", 300000);
3434 	}
3435 	delay(drv_usectohz(packet_delay));
3436 
3437 	desc = dnetp->tx_desc;
3438 
3439 	bufindex = dnetp->tx_current_desc;
3440 	if (alloc_descriptor(dnetp) == FAILURE) {
3441 		cmn_err(CE_WARN, "DNET: send_test_packet: alloc_descriptor"
3442 		    "failed");
3443 		return (0);
3444 	}
3445 
3446 	/*
3447 	 * use setup buffer as the buffer for the test packet
3448 	 * instead of allocating one.
3449 	 */
3450 
3451 	ASSERT(dnetp->setup_buf_vaddr != NULL);
3452 	/* Put something decent in dest address so we don't annoy other cards */
3453 	BCOPY((caddr_t)dnetp->curr_macaddr,
3454 	    (caddr_t)dnetp->setup_buf_vaddr, ETHERADDRL);
3455 	BCOPY((caddr_t)dnetp->curr_macaddr,
3456 	    (caddr_t)dnetp->setup_buf_vaddr+ETHERADDRL, ETHERADDRL);
3457 
3458 	desc[bufindex].buffer1 = dnetp->setup_buf_paddr;
3459 	desc[bufindex].desc1.buffer_size1 = SETUPBUF_SIZE;
3460 	desc[bufindex].buffer2 = (uint32_t)(0);
3461 	desc[bufindex].desc1.first_desc = 1;
3462 	desc[bufindex].desc1.last_desc = 1;
3463 	desc[bufindex].desc1.int_on_comp = 1;
3464 	desc[bufindex].desc0.own = 1;
3465 
3466 	ddi_put8(dnetp->io_handle, REG8(dnetp->io_reg, TX_POLL_REG),
3467 	    TX_POLL_DEMAND);
3468 
3469 	/*
3470 	 * Give enough time for the chip to transmit the packet
3471 	 */
3472 #if 1
3473 	del = 1000;
3474 	while (desc[bufindex].desc0.own && --del)
3475 		drv_usecwait(10);	/* quickly wait up to 10ms */
3476 	if (desc[bufindex].desc0.own)
3477 		delay(drv_usectohz(200000));	/* nicely wait a longer time */
3478 #else
3479 	del = 0x10000;
3480 	while (desc[bufindex].desc0.own && --del)
3481 		drv_usecwait(10);
3482 #endif
3483 
3484 #ifdef DNETDEBUG
3485 	if (dnetdebug & DNETSENSE)
3486 		cmn_err(CE_NOTE, "desc0 bits = %u, %u, %u, %u, %u, %u",
3487 		    desc[bufindex].desc0.own,
3488 		    desc[bufindex].desc0.err_summary,
3489 		    desc[bufindex].desc0.carrier_loss,
3490 		    desc[bufindex].desc0.no_carrier,
3491 		    desc[bufindex].desc0.late_collision,
3492 		    desc[bufindex].desc0.link_fail);
3493 #endif
3494 	if (desc[bufindex].desc0.own) /* it shouldn't take this long, error */
3495 		return (0);
3496 
3497 	return (!desc[bufindex].desc0.err_summary);
3498 }
3499 
3500 /* enable_interrupts - called with intrlock held */
3501 static void
3502 enable_interrupts(struct dnetinstance *dnetp)
3503 {
3504 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3505 	/* Don't enable interrupts if they have been forced off */
3506 	if (dnetp->interrupts_disabled)
3507 		return;
3508 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG),
3509 	    ABNORMAL_INTR_MASK | NORMAL_INTR_MASK | SYSTEM_ERROR_MASK |
3510 	    (dnetp->timer.cb ? GPTIMER_INTR : 0) |
3511 	    RX_INTERRUPT_MASK |
3512 	    TX_INTERRUPT_MASK | TX_JABBER_MASK | TX_UNDERFLOW_MASK);
3513 }
3514 
3515 /*
3516  * Some older multiport cards are non-PCI compliant in their interrupt routing.
3517  * Second and subsequent devices are incorrectly configured by the BIOS
3518  * (either in their ILINE configuration or the MP Configuration Table for PC+MP
3519  * systems).
3520  * The hack stops registering the interrupt routine for the FIRST
3521  * device on the adapter, and registers its own. It builds up a table
3522  * of dnetp structures for each device, and the new interrupt routine
3523  * calls dnet_intr for each of them.
3524  * Known cards that suffer from this problem are:
3525  *	All Cogent multiport cards;
3526  * 	Znyx 314;
3527  *	Znyx 315.
3528  *
3529  * XXX NEEDSWORK -- see comments above get_alternative_srom_image(). This
3530  * hack relies on the fact that the offending cards will have only one SROM.
3531  * It uses this fact to identify devices that are on the same multiport
3532  * adapter, as opposed to multiple devices from the same vendor (as
3533  * indicated by "secondary")
3534  */
3535 static int
3536 dnet_hack_interrupts(struct dnetinstance *dnetp, int secondary)
3537 {
3538 	int i;
3539 	struct hackintr_inf *hackintr_inf;
3540 	dev_info_t *devinfo = dnetp->devinfo;
3541 	uint32_t oui = 0;	/* Organizationally Unique ID */
3542 
3543 	if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS,
3544 	    "no_INTA_workaround", 0) != 0)
3545 		return (0);
3546 
3547 	for (i = 0; i < 3; i++)
3548 		oui = (oui << 8) | dnetp->vendor_addr[i];
3549 
3550 	/* Check wheather or not we need to implement the hack */
3551 
3552 	switch (oui) {
3553 	case ZNYX_ETHER:
3554 		/* Znyx multiport 21040 cards <<==>> ZX314 or ZX315 */
3555 		if (dnetp->board_type != DEVICE_ID_21040)
3556 			return (0);
3557 		break;
3558 
3559 	case COGENT_ETHER:
3560 		/* All known Cogent multiport cards */
3561 		break;
3562 
3563 	case ADAPTEC_ETHER:
3564 		/* Adaptec multiport cards */
3565 		break;
3566 
3567 	default:
3568 		/* Other cards work correctly */
3569 		return (0);
3570 	}
3571 
3572 	/* card is (probably) non-PCI compliant in its interrupt routing */
3573 
3574 
3575 	if (!secondary) {
3576 
3577 		/*
3578 		 * If we have already registered a hacked interrupt, and
3579 		 * this is also a 'primary' adapter, then this is NOT part of
3580 		 * a multiport card, but a second card on the same PCI bus.
3581 		 * BUGID: 4057747
3582 		 */
3583 		if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3584 		    DDI_PROP_DONTPASS, hackintr_propname, 0) != 0)
3585 			return (0);
3586 				/* ... Primary not part of a multiport device */
3587 
3588 #ifdef DNETDEBUG
3589 		if (dnetdebug & DNETTRACE)
3590 			cmn_err(CE_NOTE, "dnet: Implementing hardware "
3591 			    "interrupt flaw workaround");
3592 #endif
3593 		dnetp->hackintr_inf = hackintr_inf =
3594 		    kmem_zalloc(sizeof (struct hackintr_inf), KM_SLEEP);
3595 		if (hackintr_inf == NULL)
3596 			goto fail;
3597 
3598 		hackintr_inf->dnetps[0] = dnetp;
3599 		hackintr_inf->devinfo = devinfo;
3600 
3601 		/*
3602 		 * Add a property to allow successive attaches to find the
3603 		 * table
3604 		 */
3605 
3606 		if (ddi_prop_update_byte_array(DDI_DEV_T_NONE,
3607 		    ddi_get_parent(devinfo), hackintr_propname,
3608 		    (uchar_t *)&dnetp->hackintr_inf,
3609 		    sizeof (void *)) != DDI_PROP_SUCCESS)
3610 			goto fail;
3611 
3612 
3613 		/* Register our hacked interrupt routine */
3614 		if (ddi_add_intr(devinfo, 0, &dnetp->icookie, NULL,
3615 		    (uint_t (*)(char *))dnet_hack_intr,
3616 		    (caddr_t)hackintr_inf) != DDI_SUCCESS) {
3617 			/* XXX function return value ignored */
3618 			(void) ddi_prop_remove(DDI_DEV_T_NONE,
3619 			    ddi_get_parent(devinfo),
3620 			    hackintr_propname);
3621 			goto fail;
3622 		}
3623 
3624 		/*
3625 		 * Mutex required to ensure interrupt routine has completed
3626 		 * when detaching devices
3627 		 */
3628 		mutex_init(&hackintr_inf->lock, NULL, MUTEX_DRIVER,
3629 		    dnetp->icookie);
3630 
3631 		/* Stop GLD registering an interrupt */
3632 		return (-1);
3633 	} else {
3634 
3635 		/* Add the dnetp for this secondary device to the table */
3636 
3637 		hackintr_inf = (struct hackintr_inf *)(uintptr_t)
3638 		    ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3639 		    DDI_PROP_DONTPASS, hackintr_propname, 0);
3640 
3641 		if (hackintr_inf == NULL)
3642 			goto fail;
3643 
3644 		/* Find an empty slot */
3645 		for (i = 0; i < MAX_INST; i++)
3646 			if (hackintr_inf->dnetps[i] == NULL)
3647 				break;
3648 
3649 		/* More than 8 ports on adapter ?! */
3650 		if (i == MAX_INST)
3651 			goto fail;
3652 
3653 		hackintr_inf->dnetps[i] = dnetp;
3654 
3655 		/*
3656 		 * Allow GLD to register a handler for this
3657 		 * device. If the card is actually broken, as we suspect, this
3658 		 * handler will never get called. However, by registering the
3659 		 * interrupt handler, we can copy gracefully with new multiport
3660 		 * Cogent cards that decide to fix the hardware problem
3661 		 */
3662 		return (0);
3663 	}
3664 
3665 fail:
3666 	cmn_err(CE_WARN, "dnet: Could not work around hardware interrupt"
3667 	    " routing problem");
3668 	return (0);
3669 }
3670 
3671 /*
3672  * Call dnet_intr for all adapters on a multiport card
3673  */
3674 static uint_t
3675 dnet_hack_intr(struct hackintr_inf *hackintr_inf)
3676 {
3677 	int i;
3678 	int claimed = DDI_INTR_UNCLAIMED;
3679 
3680 	/* Stop detaches while processing interrupts */
3681 	mutex_enter(&hackintr_inf->lock);
3682 
3683 	for (i = 0; i < MAX_INST; i++) {
3684 		if (hackintr_inf->dnetps[i] &&
3685 		    dnet_intr((caddr_t)hackintr_inf->dnetps[i]) ==
3686 		    DDI_INTR_CLAIMED) {
3687 			claimed = DDI_INTR_CLAIMED;
3688 		}
3689 	}
3690 	mutex_exit(&hackintr_inf->lock);
3691 	return (claimed);
3692 }
3693 
3694 /*
3695  * This removes the detaching device from the table procesed by the hacked
3696  * interrupt routine. Because the interrupts from all devices come in to the
3697  * same interrupt handler, ALL devices must stop interrupting once the
3698  * primary device detaches. This isn't a problem at present, because all
3699  * instances of a device are detached when the driver is unloaded.
3700  */
3701 static int
3702 dnet_detach_hacked_interrupt(dev_info_t *devinfo)
3703 {
3704 	int i;
3705 	struct hackintr_inf *hackintr_inf;
3706 	struct dnetinstance *altdnetp, *dnetp =
3707 	    ddi_get_driver_private(devinfo);
3708 
3709 	hackintr_inf = (struct hackintr_inf *)(uintptr_t)
3710 	    ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo),
3711 	    DDI_PROP_DONTPASS, hackintr_propname, 0);
3712 
3713 	/*
3714 	 * No hackintr_inf implies hack was not required or the primary has
3715 	 * detached, and our interrupts are already disabled
3716 	 */
3717 	if (!hackintr_inf) {
3718 		/* remove the interrupt for the non-hacked case */
3719 		ddi_remove_intr(devinfo, 0, dnetp->icookie);
3720 		return (DDI_SUCCESS);
3721 	}
3722 
3723 	/* Remove this device from the handled table */
3724 	mutex_enter(&hackintr_inf->lock);
3725 	for (i = 0; i < MAX_INST; i++) {
3726 		if (hackintr_inf->dnetps[i] == dnetp) {
3727 			hackintr_inf->dnetps[i] = NULL;
3728 			break;
3729 		}
3730 	}
3731 
3732 	mutex_exit(&hackintr_inf->lock);
3733 
3734 	/* Not the primary card, we are done */
3735 	if (devinfo != hackintr_inf->devinfo)
3736 		return (DDI_SUCCESS);
3737 
3738 	/*
3739 	 * This is the primary card. All remaining adapters on this device
3740 	 * must have their interrupts disabled before we remove the handler
3741 	 */
3742 	for (i = 0; i < MAX_INST; i++) {
3743 		if ((altdnetp = hackintr_inf->dnetps[i]) != NULL) {
3744 			altdnetp->interrupts_disabled = 1;
3745 			ddi_put32(altdnetp->io_handle,
3746 			    REG32(altdnetp->io_reg, INT_MASK_REG), 0);
3747 		}
3748 	}
3749 
3750 	/* It should now be safe to remove the interrupt handler */
3751 
3752 	ddi_remove_intr(devinfo, 0, dnetp->icookie);
3753 	mutex_destroy(&hackintr_inf->lock);
3754 	/* XXX function return value ignored */
3755 	(void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo),
3756 	    hackintr_propname);
3757 	kmem_free(hackintr_inf, sizeof (struct hackintr_inf));
3758 	return (DDI_SUCCESS);
3759 }
3760 
3761 /* do_phy() - called with intrlock held */
3762 static void
3763 do_phy(struct dnetinstance *dnetp)
3764 {
3765 	dev_info_t *dip;
3766 	LEAF_FORMAT *leaf = dnetp->sr.leaf + dnetp->leaf;
3767 	media_block_t *block;
3768 	int phy;
3769 
3770 	dip = dnetp->devinfo;
3771 
3772 	/*
3773 	 * Find and configure the PHY media block. If NO PHY blocks are
3774 	 * found on the SROM, but a PHY device is present, we assume the card
3775 	 * is a legacy device, and that there is ONLY a PHY interface on the
3776 	 * card (ie, no BNC or AUI, and 10BaseT is implemented by the PHY
3777 	 */
3778 
3779 	for (block = leaf->block + leaf->block_count -1;
3780 	    block >= leaf->block; block --) {
3781 		if (block->type == 3 || block->type == 1) {
3782 			leaf->mii_block = block;
3783 			break;
3784 		}
3785 	}
3786 
3787 	/*
3788 	 * If no MII block, select default, and hope this configuration will
3789 	 * allow the phy to be read/written if it is present
3790 	 */
3791 	dnetp->selected_media_block = leaf->mii_block ?
3792 	    leaf->mii_block : leaf->default_block;
3793 
3794 	setup_block(dnetp);
3795 	/* XXX function return value ignored */
3796 	(void) mii_create(dip, dnet_mii_write, dnet_mii_read, &dnetp->mii);
3797 
3798 	/*
3799 	 * We try PHY 0 LAST because it is less likely to be connected
3800 	 */
3801 	for (phy = 1; phy < 33; phy++)
3802 		if (mii_probe_phy(dnetp->mii, phy % 32) == MII_SUCCESS &&
3803 		    mii_init_phy(dnetp->mii, phy % 32) == MII_SUCCESS) {
3804 #ifdef DNETDEBUG
3805 			if (dnetdebug & DNETSENSE)
3806 				cmn_err(CE_NOTE, "dnet: "
3807 				    "PHY at address %d", phy % 32);
3808 #endif
3809 			dnetp->phyaddr = phy % 32;
3810 			if (!leaf->mii_block) {
3811 				/* Legacy card, change the leaf node */
3812 				set_leaf(&dnetp->sr, &leaf_phylegacy);
3813 			}
3814 			return;
3815 		}
3816 #ifdef DNETDEBUG
3817 	if (dnetdebug & DNETSENSE)
3818 		cmn_err(CE_NOTE, "dnet: No PHY found");
3819 #endif
3820 }
3821 
3822 static ushort_t
3823 dnet_mii_read(dev_info_t *dip, int phy_addr, int reg_num)
3824 {
3825 	struct dnetinstance *dnetp;
3826 
3827 	uint32_t command_word;
3828 	uint32_t tmp;
3829 	uint32_t data = 0;
3830 	int i;
3831 	int bits_in_ushort = ((sizeof (ushort_t))*8);
3832 	int turned_around = 0;
3833 
3834 	dnetp = ddi_get_driver_private(dip);
3835 
3836 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3837 	/* Write Preamble */
3838 	write_mii(dnetp, MII_PRE, 2*bits_in_ushort);
3839 
3840 	/* Prepare command word */
3841 	command_word = (uint32_t)phy_addr << MII_PHY_ADDR_ALIGN;
3842 	command_word |= (uint32_t)reg_num << MII_REG_ADDR_ALIGN;
3843 	command_word |= MII_READ_FRAME;
3844 
3845 	write_mii(dnetp, command_word, bits_in_ushort-2);
3846 
3847 	mii_tristate(dnetp);
3848 
3849 	/* Check that the PHY generated a zero bit the 2nd clock */
3850 	tmp = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG));
3851 
3852 	turned_around = (tmp & MII_DATA_IN) ? 0 : 1;
3853 
3854 	/* read data WORD */
3855 	for (i = 0; i < bits_in_ushort; i++) {
3856 		ddi_put32(dnetp->io_handle,
3857 		    REG32(dnetp->io_reg, ETHER_ROM_REG), MII_READ);
3858 		drv_usecwait(MII_DELAY);
3859 		ddi_put32(dnetp->io_handle,
3860 		    REG32(dnetp->io_reg, ETHER_ROM_REG), MII_READ | MII_CLOCK);
3861 		drv_usecwait(MII_DELAY);
3862 		tmp = ddi_get32(dnetp->io_handle,
3863 		    REG32(dnetp->io_reg, ETHER_ROM_REG));
3864 		drv_usecwait(MII_DELAY);
3865 		data = (data << 1) | (tmp >> MII_DATA_IN_POSITION) & 0x0001;
3866 	}
3867 
3868 	mii_tristate(dnetp);
3869 	return (turned_around ? data: -1);
3870 }
3871 
3872 static void
3873 dnet_mii_write(dev_info_t *dip, int phy_addr, int reg_num, int reg_dat)
3874 {
3875 	struct dnetinstance *dnetp;
3876 	uint32_t command_word;
3877 	int bits_in_ushort = ((sizeof (ushort_t))*8);
3878 
3879 	dnetp = ddi_get_driver_private(dip);
3880 
3881 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3882 	write_mii(dnetp, MII_PRE, 2*bits_in_ushort);
3883 
3884 	/* Prepare command word */
3885 	command_word = ((uint32_t)phy_addr << MII_PHY_ADDR_ALIGN);
3886 	command_word |= ((uint32_t)reg_num << MII_REG_ADDR_ALIGN);
3887 	command_word |= (MII_WRITE_FRAME | (uint32_t)reg_dat);
3888 
3889 	write_mii(dnetp, command_word, 2*bits_in_ushort);
3890 	mii_tristate(dnetp);
3891 }
3892 
3893 /*
3894  * Write data size bits from mii_data to the MII control lines.
3895  */
3896 static void
3897 write_mii(struct dnetinstance *dnetp, uint32_t mii_data, int data_size)
3898 {
3899 	int i;
3900 	uint32_t dbit;
3901 
3902 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3903 	for (i = data_size; i > 0; i--) {
3904 		dbit = ((mii_data >>
3905 		    (31 - MII_WRITE_DATA_POSITION)) & MII_WRITE_DATA);
3906 		ddi_put32(dnetp->io_handle,
3907 		    REG32(dnetp->io_reg, ETHER_ROM_REG),
3908 		    MII_WRITE | dbit);
3909 		drv_usecwait(MII_DELAY);
3910 		ddi_put32(dnetp->io_handle,
3911 		    REG32(dnetp->io_reg, ETHER_ROM_REG),
3912 		    MII_WRITE | MII_CLOCK | dbit);
3913 		drv_usecwait(MII_DELAY);
3914 		mii_data <<= 1;
3915 	}
3916 }
3917 
3918 /*
3919  * Put the MDIO port in tri-state for the turn around bits
3920  * in MII read and at end of MII management sequence.
3921  */
3922 static void
3923 mii_tristate(struct dnetinstance *dnetp)
3924 {
3925 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3926 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG),
3927 	    MII_WRITE_TS);
3928 	drv_usecwait(MII_DELAY);
3929 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG),
3930 	    MII_WRITE_TS | MII_CLOCK);
3931 	drv_usecwait(MII_DELAY);
3932 }
3933 
3934 
3935 static void
3936 set_leaf(SROM_FORMAT *sr, LEAF_FORMAT *leaf)
3937 {
3938 	if (sr->leaf && !sr->leaf->is_static)
3939 		kmem_free(sr->leaf, sr->adapters * sizeof (LEAF_FORMAT));
3940 	sr->leaf = leaf;
3941 }
3942 
3943 /*
3944  * Callback from MII module. Makes sure that the CSR registers are
3945  * configured properly if the PHY changes mode.
3946  */
3947 /* ARGSUSED */
3948 /* dnet_mii_link_cb - called with intrlock held */
3949 static void
3950 dnet_mii_link_cb(dev_info_t *dip, int phy, enum mii_phy_state state)
3951 {
3952 	struct dnetinstance *dnetp = ddi_get_driver_private(dip);
3953 	LEAF_FORMAT *leaf;
3954 
3955 	ASSERT(MUTEX_HELD(&dnetp->intrlock));
3956 
3957 	leaf = dnetp->sr.leaf + dnetp->leaf;
3958 	if (state == phy_state_linkup) {
3959 		dnetp->mii_up = 1;
3960 
3961 		(void) mii_getspeed(dnetp->mii, dnetp->phyaddr,
3962 		    &dnetp->mii_speed, &dnetp->mii_duplex);
3963 
3964 		dnetp->selected_media_block = leaf->mii_block;
3965 		setup_block(dnetp);
3966 	} else {
3967 		/* NEEDSWORK: Probably can call find_active_media here */
3968 		dnetp->mii_up = 0;
3969 
3970 		if (leaf->default_block->media_code == MEDIA_MII)
3971 			dnetp->selected_media_block = leaf->default_block;
3972 		setup_block(dnetp);
3973 	}
3974 	dnet_mii_link_up(dnetp);
3975 }
3976 
3977 static void
3978 dnet_mii_link_up(struct dnetinstance *dnetp)
3979 {
3980 	if (!dnetp->running) {
3981 		return;
3982 	}
3983 
3984 	if (dnetp->mii_up) {
3985 		(void) mii_getspeed(dnetp->mii, dnetp->phyaddr,
3986 		    &dnetp->mii_speed, &dnetp->mii_duplex);
3987 
3988 		mac_link_update(dnetp->mac_handle, LINK_STATE_UP);
3989 
3990 	} else {
3991 		dnetp->mii_speed = 0;
3992 		dnetp->mii_duplex = 0;
3993 
3994 		mac_link_update(dnetp->mac_handle, LINK_STATE_DOWN);
3995 	}
3996 }
3997 
3998 /*
3999  * SROM parsing routines.
4000  * Refer to the Digital 3.03 SROM spec while reading this! (references refer
4001  * to this document)
4002  * Where possible ALL vendor specific changes should be localised here. The
4003  * SROM data should be capable of describing any programmatic irregularities
4004  * of DNET cards (via SIA or GP registers, in particular), so vendor specific
4005  * code elsewhere should not be required
4006  */
4007 static void
4008 dnet_parse_srom(struct dnetinstance *dnetp, SROM_FORMAT *sr, uchar_t *vi)
4009 {
4010 	uint32_t ether_mfg = 0;
4011 	int i;
4012 	uchar_t *p;
4013 
4014 	if (!ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
4015 	    DDI_PROP_DONTPASS, "no_sromconfig", 0))
4016 		dnetp->sr.init_from_srom = check_srom_valid(vi);
4017 
4018 	if (dnetp->sr.init_from_srom && dnetp->board_type != DEVICE_ID_21040) {
4019 		/* Section 2/3: General SROM Format/ ID Block */
4020 		p = vi+18;
4021 		sr->version = *p++;
4022 		sr->adapters = *p++;
4023 
4024 		sr->leaf =
4025 		    kmem_zalloc(sr->adapters * sizeof (LEAF_FORMAT), KM_SLEEP);
4026 		for (i = 0; i < 6; i++)
4027 			sr->netaddr[i] = *p++;
4028 
4029 		for (i = 0; i < sr->adapters; i++) {
4030 			uchar_t devno = *p++;
4031 			uint16_t offset = *p++;
4032 			offset |= *p++ << 8;
4033 			sr->leaf[i].device_number = devno;
4034 			parse_controller_leaf(dnetp, sr->leaf+i, vi+offset);
4035 		}
4036 		/*
4037 		 * 'Orrible hack for cogent cards. The 6911A board seems to
4038 		 * have an incorrect SROM. (From the OEMDEMO program
4039 		 * supplied by cogent, it seems that the ROM matches a setup
4040 		 * or a board with a QSI or ICS PHY.
4041 		 */
4042 		for (i = 0; i < 3; i++)
4043 			ether_mfg = (ether_mfg << 8) | sr->netaddr[i];
4044 
4045 		if (ether_mfg == ADAPTEC_ETHER) {
4046 			static uint16_t cogent_gprseq[] = {0x821, 0};
4047 			switch (vi[COGENT_SROM_ID]) {
4048 			case COGENT_ANA6911A_C:
4049 			case COGENT_ANA6911AC_C:
4050 #ifdef DNETDEBUG
4051 				if (dnetdebug & DNETTRACE)
4052 					cmn_err(CE_WARN,
4053 					    "Suspected bad GPR sequence."
4054 					    " Making a guess (821,0)");
4055 #endif
4056 
4057 				/* XXX function return value ignored */
4058 				(void) ddi_prop_update_byte_array(
4059 				    DDI_DEV_T_NONE, dnetp->devinfo,
4060 				    "gpr-sequence", (uchar_t *)cogent_gprseq,
4061 				    sizeof (cogent_gprseq));
4062 				break;
4063 			}
4064 		}
4065 	} else {
4066 		/*
4067 		 * Adhoc SROM, check for some cards which need special handling
4068 		 * Assume vendor info contains ether address in first six bytes
4069 		 */
4070 
4071 		uchar_t *mac = vi + ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo,
4072 		    DDI_PROP_DONTPASS, macoffset_propname, 0);
4073 
4074 		for (i = 0; i < 6; i++)
4075 			sr->netaddr[i] = mac[i];
4076 
4077 		if (dnetp->board_type == DEVICE_ID_21140) {
4078 			for (i = 0; i < 3; i++)
4079 				ether_mfg = (ether_mfg << 8) | mac[i];
4080 
4081 			switch (ether_mfg) {
4082 			case ASANTE_ETHER:
4083 				dnetp->vendor_21140 = ASANTE_TYPE;
4084 				dnetp->vendor_revision = 0;
4085 				set_leaf(sr, &leaf_asante);
4086 				sr->adapters = 1;
4087 				break;
4088 
4089 			case COGENT_ETHER:
4090 			case ADAPTEC_ETHER:
4091 				dnetp->vendor_21140 = COGENT_EM_TYPE;
4092 				dnetp->vendor_revision =
4093 				    vi[VENDOR_REVISION_OFFSET];
4094 				set_leaf(sr, &leaf_cogent_100);
4095 				sr->adapters = 1;
4096 				break;
4097 
4098 			default:
4099 				dnetp->vendor_21140 = DEFAULT_TYPE;
4100 				dnetp->vendor_revision = 0;
4101 				set_leaf(sr, &leaf_default_100);
4102 				sr->adapters = 1;
4103 				break;
4104 			}
4105 		} else if (dnetp->board_type == DEVICE_ID_21041) {
4106 			set_leaf(sr, &leaf_21041);
4107 		} else if (dnetp->board_type == DEVICE_ID_21040) {
4108 			set_leaf(sr, &leaf_21040);
4109 		}
4110 	}
4111 }
4112 
4113 /* Section 4.2, 4.3, 4.4, 4.5 */
4114 static void
4115 parse_controller_leaf(struct dnetinstance *dnetp, LEAF_FORMAT *leaf,
4116 	uchar_t *vi)
4117 {
4118 	int i;
4119 
4120 	leaf->selected_contype = *vi++;
4121 	leaf->selected_contype |= *vi++ << 8;
4122 
4123 	if (dnetp->board_type == DEVICE_ID_21140) /* Sect. 4.3 */
4124 		leaf->gpr = *vi++;
4125 
4126 	leaf->block_count = *vi++;
4127 
4128 	if (leaf->block_count > MAX_MEDIA) {
4129 		cmn_err(CE_WARN, "dnet: Too many media in SROM!");
4130 		leaf->block_count = 1;
4131 	}
4132 	for (i = 0; i <= leaf->block_count; i++) {
4133 		vi = parse_media_block(dnetp, leaf->block + i, vi);
4134 		if (leaf->block[i].command & CMD_DEFAULT_MEDIUM)
4135 			leaf->default_block = leaf->block+i;
4136 	}
4137 	/* No explicit default block: use last in the ROM */
4138 	if (leaf->default_block == NULL)
4139 		leaf->default_block = leaf->block + leaf->block_count -1;
4140 
4141 }
4142 
4143 static uchar_t *
4144 parse_media_block(struct dnetinstance *dnetp, media_block_t *block, uchar_t *vi)
4145 {
4146 	int i;
4147 
4148 	/*
4149 	 * There are three kinds of media block we need to worry about:
4150 	 * The 21041 blocks.
4151 	 * 21140 blocks from a version 1 SROM
4152 	 * 2114[023] block from a version 3 SROM
4153 	 */
4154 
4155 	if (dnetp->board_type == DEVICE_ID_21041) {
4156 		/* Section 4.2 */
4157 		block->media_code = *vi & 0x3f;
4158 		block->type = 2;
4159 		if (*vi++ & 0x40) {
4160 			block->un.sia.csr13 = *vi++;
4161 			block->un.sia.csr13 |= *vi++ << 8;
4162 			block->un.sia.csr14 = *vi++;
4163 			block->un.sia.csr14 |= *vi++ << 8;
4164 			block->un.sia.csr15 = *vi++;
4165 			block->un.sia.csr15 |= *vi++ << 8;
4166 		} else {
4167 			/* No media data (csrs 13,14,15). Insert defaults */
4168 			switch (block->media_code) {
4169 			case MEDIA_TP:
4170 				block->un.sia.csr13 = 0xef01;
4171 				block->un.sia.csr14 = 0x7f3f;
4172 				block->un.sia.csr15 = 0x0008;
4173 				break;
4174 			case MEDIA_TP_FD:
4175 				block->un.sia.csr13 = 0xef01;
4176 				block->un.sia.csr14 = 0x7f3d;
4177 				block->un.sia.csr15 = 0x0008;
4178 				break;
4179 			case MEDIA_BNC:
4180 				block->un.sia.csr13 = 0xef09;
4181 				block->un.sia.csr14 = 0x0705;
4182 				block->un.sia.csr15 = 0x0006;
4183 				break;
4184 			case MEDIA_AUI:
4185 				block->un.sia.csr13 = 0xef09;
4186 				block->un.sia.csr14 = 0x0705;
4187 				block->un.sia.csr15 = 0x000e;
4188 				break;
4189 			}
4190 		}
4191 	} else  if (*vi & 0x80) {  /* Extended format: Section 4.3.2.2 */
4192 		int blocklen = *vi++ & 0x7f;
4193 		block->type = *vi++;
4194 		switch (block->type) {
4195 		case 0: /* "non-MII": Section 4.3.2.2.1 */
4196 			block->media_code = (*vi++) & 0x3f;
4197 			block->gprseqlen = 1;
4198 			block->gprseq[0] = *vi++;
4199 			block->command = *vi++;
4200 			block->command |= *vi++ << 8;
4201 			break;
4202 
4203 		case 1: /* MII/PHY: Section 4.3.2.2.2 */
4204 			block->command = CMD_PS;
4205 			block->media_code = MEDIA_MII;
4206 				/* This is whats needed in CSR6 */
4207 
4208 			block->un.mii.phy_num = *vi++;
4209 			block->gprseqlen = *vi++;
4210 
4211 			for (i = 0; i < block->gprseqlen; i++)
4212 				block->gprseq[i] = *vi++;
4213 			block->rstseqlen = *vi++;
4214 			for (i = 0; i < block->rstseqlen; i++)
4215 				block->rstseq[i] = *vi++;
4216 
4217 			block->un.mii.mediacaps = *vi++;
4218 			block->un.mii.mediacaps |= *vi++ << 8;
4219 			block->un.mii.nwayadvert = *vi++;
4220 			block->un.mii.nwayadvert |= *vi++ << 8;
4221 			block->un.mii.fdxmask = *vi++;
4222 			block->un.mii.fdxmask |= *vi++ << 8;
4223 			block->un.mii.ttmmask = *vi++;
4224 			block->un.mii.ttmmask |= *vi++ << 8;
4225 			break;
4226 
4227 		case 2: /* SIA Media: Section 4.4.2.1.1 */
4228 			block->media_code = *vi & 0x3f;
4229 			if (*vi++ & 0x40) {
4230 				block->un.sia.csr13 = *vi++;
4231 				block->un.sia.csr13 |= *vi++ << 8;
4232 				block->un.sia.csr14 = *vi++;
4233 				block->un.sia.csr14 |= *vi++ << 8;
4234 				block->un.sia.csr15 = *vi++;
4235 				block->un.sia.csr15 |= *vi++ << 8;
4236 			} else {
4237 				/*
4238 				 * SIA values not provided by SROM; provide
4239 				 * defaults. See appendix D of 2114[23] manuals.
4240 				 */
4241 				switch (block->media_code) {
4242 				case MEDIA_BNC:
4243 					block->un.sia.csr13 = 0x0009;
4244 					block->un.sia.csr14 = 0x0705;
4245 					block->un.sia.csr15 = 0x0000;
4246 					break;
4247 				case MEDIA_AUI:
4248 					block->un.sia.csr13 = 0x0009;
4249 					block->un.sia.csr14 = 0x0705;
4250 					block->un.sia.csr15 = 0x0008;
4251 					break;
4252 				case MEDIA_TP:
4253 					block->un.sia.csr13 = 0x0001;
4254 					block->un.sia.csr14 = 0x7f3f;
4255 					block->un.sia.csr15 = 0x0000;
4256 					break;
4257 				case MEDIA_TP_FD:
4258 					block->un.sia.csr13 = 0x0001;
4259 					block->un.sia.csr14 = 0x7f3d;
4260 					block->un.sia.csr15 = 0x0000;
4261 					break;
4262 				default:
4263 					block->un.sia.csr13 = 0x0000;
4264 					block->un.sia.csr14 = 0x0000;
4265 					block->un.sia.csr15 = 0x0000;
4266 				}
4267 			}
4268 
4269 			/* Treat GP control/data as a GPR sequence */
4270 			block->gprseqlen = 2;
4271 			block->gprseq[0] = *vi++;
4272 			block->gprseq[0] |= *vi++ << 8;
4273 			block->gprseq[0] |= GPR_CONTROL_WRITE;
4274 			block->gprseq[1] = *vi++;
4275 			block->gprseq[1] |= *vi++ << 8;
4276 			break;
4277 
4278 		case 3: /* MII/PHY : Section 4.4.2.1.2 */
4279 			block->command = CMD_PS;
4280 			block->media_code = MEDIA_MII;
4281 			block->un.mii.phy_num = *vi++;
4282 
4283 			block->gprseqlen = *vi++;
4284 			for (i = 0; i < block->gprseqlen; i++) {
4285 				block->gprseq[i] = *vi++;
4286 				block->gprseq[i] |= *vi++ << 8;
4287 			}
4288 
4289 			block->rstseqlen = *vi++;
4290 			for (i = 0; i < block->rstseqlen; i++) {
4291 				block->rstseq[i] = *vi++;
4292 				block->rstseq[i] |= *vi++ << 8;
4293 			}
4294 			block->un.mii.mediacaps = *vi++;
4295 			block->un.mii.mediacaps |= *vi++ << 8;
4296 			block->un.mii.nwayadvert = *vi++;
4297 			block->un.mii.nwayadvert |= *vi++ << 8;
4298 			block->un.mii.fdxmask = *vi++;
4299 			block->un.mii.fdxmask |= *vi++ << 8;
4300 			block->un.mii.ttmmask = *vi++;
4301 			block->un.mii.ttmmask |= *vi++ << 8;
4302 			block->un.mii.miiintr |= *vi++;
4303 			break;
4304 
4305 		case 4: /* SYM Media: 4.5.2.1.3 */
4306 			block->media_code = *vi++ & 0x3f;
4307 			/* Treat GP control and data as a GPR sequence */
4308 			block->gprseqlen = 2;
4309 			block->gprseq[0] = *vi++;
4310 			block->gprseq[0] |= *vi++ << 8;
4311 			block->gprseq[0] |= GPR_CONTROL_WRITE;
4312 			block->gprseq[1]  = *vi++;
4313 			block->gprseq[1] |= *vi++ << 8;
4314 			block->command = *vi++;
4315 			block->command |= *vi++ << 8;
4316 			break;
4317 
4318 		case 5: /* GPR reset sequence:  Section 4.5.2.1.4 */
4319 			block->rstseqlen = *vi++;
4320 			for (i = 0; i < block->rstseqlen; i++)
4321 				block->rstseq[i] = *vi++;
4322 			break;
4323 
4324 		default: /* Unknown media block. Skip it. */
4325 			cmn_err(CE_WARN, "dnet: Unsupported SROM block.");
4326 			vi += blocklen;
4327 			break;
4328 		}
4329 	} else { /* Compact format (or V1 SROM): Section 4.3.2.1 */
4330 		block->type = 0;
4331 		block->media_code = *vi++ & 0x3f;
4332 		block->gprseqlen = 1;
4333 		block->gprseq[0] = *vi++;
4334 		block->command = *vi++;
4335 		block->command |= (*vi++) << 8;
4336 	}
4337 	return (vi);
4338 }
4339 
4340 
4341 /*
4342  * An alternative to doing this would be to store the legacy ROMs in binary
4343  * format in the conf file, and in read_srom, pick out the data. This would
4344  * then allow the parser to continue on as normal. This makes it a little
4345  * easier to read.
4346  */
4347 static void
4348 setup_legacy_blocks()
4349 {
4350 	LEAF_FORMAT *leaf;
4351 	media_block_t *block;
4352 
4353 	/* Default FAKE SROM */
4354 	leaf = &leaf_default_100;
4355 	leaf->is_static = 1;
4356 	leaf->default_block = &leaf->block[3];
4357 	leaf->block_count = 4; /* 100 cards are highly unlikely to have BNC */
4358 	block = leaf->block;
4359 	block->media_code = MEDIA_TP_FD;
4360 	block->type = 0;
4361 	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4362 	block++;
4363 	block->media_code = MEDIA_TP;
4364 	block->type = 0;
4365 	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4366 	block++;
4367 	block->media_code = MEDIA_SYM_SCR_FD;
4368 	block->type = 0;
4369 	block->command = 0x6d;  /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4370 	block++;
4371 	block->media_code = MEDIA_SYM_SCR;
4372 	block->type = 0;
4373 	block->command = 0x406d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4374 
4375 	/* COGENT FAKE SROM */
4376 	leaf = &leaf_cogent_100;
4377 	leaf->is_static = 1;
4378 	leaf->default_block = &leaf->block[4];
4379 	leaf->block_count = 5; /* 100TX, 100TX-FD, 10T 10T-FD, BNC */
4380 	block = leaf->block; /* BNC */
4381 	block->media_code = MEDIA_BNC;
4382 	block->type = 0;
4383 	block->command =  0x8000; /* No media sense, PCS, SCR, PS all off */
4384 	block->gprseqlen = 2;
4385 	block->rstseqlen = 0;
4386 	block->gprseq[0] = 0x13f;
4387 	block->gprseq[1] = 1;
4388 
4389 	block++;
4390 	block->media_code = MEDIA_TP_FD;
4391 	block->type = 0;
4392 	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4393 	block->gprseqlen = 2;
4394 	block->rstseqlen = 0;
4395 	block->gprseq[0] = 0x13f;
4396 	block->gprseq[1] = 0x26;
4397 
4398 	block++; /* 10BaseT */
4399 	block->media_code = MEDIA_TP;
4400 	block->type = 0;
4401 	block->command = 0x8e;  /* PCS, PS off, media sense: bit7, pol=1 */
4402 	block->gprseqlen = 2;
4403 	block->rstseqlen = 0;
4404 	block->gprseq[0] = 0x13f;
4405 	block->gprseq[1] = 0x3e;
4406 
4407 	block++; /* 100BaseTX-FD */
4408 	block->media_code = MEDIA_SYM_SCR_FD;
4409 	block->type = 0;
4410 	block->command = 0x6d;  /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4411 	block->gprseqlen = 2;
4412 	block->rstseqlen = 0;
4413 	block->gprseq[0] = 0x13f;
4414 	block->gprseq[1] = 1;
4415 
4416 	block++; /* 100BaseTX */
4417 	block->media_code = MEDIA_SYM_SCR;
4418 	block->type = 0;
4419 	block->command = 0x406d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */
4420 	block->gprseqlen = 2;
4421 	block->rstseqlen = 0;
4422 	block->gprseq[0] = 0x13f;
4423 	block->gprseq[1] = 1;
4424 
4425 	/* Generic legacy card with a PHY. */
4426 	leaf = &leaf_phylegacy;
4427 	leaf->block_count = 1;
4428 	leaf->mii_block = leaf->block;
4429 	leaf->default_block = &leaf->block[0];
4430 	leaf->is_static = 1;
4431 	block = leaf->block;
4432 	block->media_code = MEDIA_MII;
4433 	block->type = 1; /* MII Block type 1 */
4434 	block->command = 1; /* Port select */
4435 	block->gprseqlen = 0;
4436 	block->rstseqlen = 0;
4437 
4438 	/* ASANTE FAKE SROM */
4439 	leaf = &leaf_asante;
4440 	leaf->is_static = 1;
4441 	leaf->default_block = &leaf->block[0];
4442 	leaf->block_count = 1;
4443 	block = leaf->block;
4444 	block->media_code = MEDIA_MII;
4445 	block->type = 1; /* MII Block type 1 */
4446 	block->command = 1; /* Port select */
4447 	block->gprseqlen = 3;
4448 	block->rstseqlen = 0;
4449 	block->gprseq[0] = 0x180;
4450 	block->gprseq[1] = 0x80;
4451 	block->gprseq[2] = 0x0;
4452 
4453 	/* LEGACY 21041 card FAKE SROM */
4454 	leaf = &leaf_21041;
4455 	leaf->is_static = 1;
4456 	leaf->block_count = 4;  /* SIA Blocks for TP, TPfd, BNC, AUI */
4457 	leaf->default_block = &leaf->block[3];
4458 
4459 	block = leaf->block;
4460 	block->media_code = MEDIA_AUI;
4461 	block->type = 2;
4462 	block->un.sia.csr13 = 0xef09;
4463 	block->un.sia.csr14 = 0x0705;
4464 	block->un.sia.csr15 = 0x000e;
4465 
4466 	block++;
4467 	block->media_code = MEDIA_TP_FD;
4468 	block->type = 2;
4469 	block->un.sia.csr13 = 0xef01;
4470 	block->un.sia.csr14 = 0x7f3d;
4471 	block->un.sia.csr15 = 0x0008;
4472 
4473 	block++;
4474 	block->media_code = MEDIA_BNC;
4475 	block->type = 2;
4476 	block->un.sia.csr13 = 0xef09;
4477 	block->un.sia.csr14 = 0x0705;
4478 	block->un.sia.csr15 = 0x0006;
4479 
4480 	block++;
4481 	block->media_code = MEDIA_TP;
4482 	block->type = 2;
4483 	block->un.sia.csr13 = 0xef01;
4484 	block->un.sia.csr14 = 0x7f3f;
4485 	block->un.sia.csr15 = 0x0008;
4486 
4487 	/* LEGACY 21040 card FAKE SROM */
4488 	leaf = &leaf_21040;
4489 	leaf->is_static = 1;
4490 	leaf->block_count = 4;  /* SIA Blocks for TP, TPfd, BNC, AUI */
4491 	block = leaf->block;
4492 	block->media_code = MEDIA_AUI;
4493 	block->type = 2;
4494 	block->un.sia.csr13 = 0x8f09;
4495 	block->un.sia.csr14 = 0x0705;
4496 	block->un.sia.csr15 = 0x000e;
4497 	block++;
4498 	block->media_code = MEDIA_TP_FD;
4499 	block->type = 2;
4500 	block->un.sia.csr13 = 0x0f01;
4501 	block->un.sia.csr14 = 0x7f3d;
4502 	block->un.sia.csr15 = 0x0008;
4503 	block++;
4504 	block->media_code = MEDIA_BNC;
4505 	block->type = 2;
4506 	block->un.sia.csr13 = 0xef09;
4507 	block->un.sia.csr14 = 0x0705;
4508 	block->un.sia.csr15 = 0x0006;
4509 	block++;
4510 	block->media_code = MEDIA_TP;
4511 	block->type = 2;
4512 	block->un.sia.csr13 = 0x8f01;
4513 	block->un.sia.csr14 = 0x7f3f;
4514 	block->un.sia.csr15 = 0x0008;
4515 }
4516 
4517 static void
4518 dnet_print_srom(SROM_FORMAT *sr)
4519 {
4520 	int i;
4521 	uchar_t *a = sr->netaddr;
4522 	cmn_err(CE_NOTE, "SROM Dump: %d. ver %d, Num adapters %d,"
4523 	    "Addr:%x:%x:%x:%x:%x:%x",
4524 	    sr->init_from_srom, sr->version, sr->adapters,
4525 	    a[0], a[1], a[2], a[3], a[4], a[5]);
4526 
4527 	for (i = 0; i < sr->adapters; i++)
4528 		dnet_dump_leaf(sr->leaf+i);
4529 }
4530 
4531 static void
4532 dnet_dump_leaf(LEAF_FORMAT *leaf)
4533 {
4534 	int i;
4535 	cmn_err(CE_NOTE, "Leaf: Device %d, block_count %d, gpr: %x",
4536 	    leaf->device_number, leaf->block_count, leaf->gpr);
4537 	for (i = 0; i < leaf->block_count; i++)
4538 		dnet_dump_block(leaf->block+i);
4539 }
4540 
4541 static void
4542 dnet_dump_block(media_block_t *block)
4543 {
4544 	cmn_err(CE_NOTE, "Block(%p): type %x, media %s, command: %x ",
4545 	    (void *)block,
4546 	    block->type, media_str[block->media_code], block->command);
4547 	dnet_dumpbin("\tGPR Seq", (uchar_t *)block->gprseq, 2,
4548 	    block->gprseqlen *2);
4549 	dnet_dumpbin("\tGPR Reset", (uchar_t *)block->rstseq, 2,
4550 	    block->rstseqlen *2);
4551 	switch (block->type) {
4552 	case 1: case 3:
4553 		cmn_err(CE_NOTE, "\tMII Info: phy %d, nway %x, fdx"
4554 		    "%x, ttm %x, mediacap %x",
4555 		    block->un.mii.phy_num, block->un.mii.nwayadvert,
4556 		    block->un.mii.fdxmask, block->un.mii.ttmmask,
4557 		    block->un.mii.mediacaps);
4558 		break;
4559 	case 2:
4560 		cmn_err(CE_NOTE, "\tSIA Regs: CSR13:%x, CSR14:%x, CSR15:%x",
4561 		    block->un.sia.csr13, block->un.sia.csr14,
4562 		    block->un.sia.csr15);
4563 		break;
4564 	}
4565 }
4566 
4567 
4568 /* Utility to print out binary info dumps. Handy for SROMs, etc */
4569 
4570 static int
4571 hexcode(unsigned val)
4572 {
4573 	if (val <= 9)
4574 		return (val +'0');
4575 	if (val <= 15)
4576 		return (val + 'a' - 10);
4577 	return (-1);
4578 }
4579 
4580 static void
4581 dnet_dumpbin(char *msg, unsigned char *data, int size, int len)
4582 {
4583 	char hex[128], *p = hex;
4584 	char ascii[128], *q = ascii;
4585 	int i, j;
4586 
4587 	if (!len)
4588 		return;
4589 
4590 	for (i = 0; i < len; i += size) {
4591 		for (j = size - 1; j >= 0; j--) { /* PORTABILITY: byte order */
4592 			*p++ = hexcode(data[i+j] >> 4);
4593 			*p++ = hexcode(data[i+j] & 0xf);
4594 			*q++ = (data[i+j] < 32 || data[i+j] > 127) ?
4595 			    '.' : data[i];
4596 		}
4597 		*p++ = ' ';
4598 		if (q-ascii >= 8) {
4599 			*p = *q = 0;
4600 			cmn_err(CE_NOTE, "%s: %s\t%s", msg, hex, ascii);
4601 			p = hex;
4602 			q = ascii;
4603 		}
4604 	}
4605 	if (p != hex) {
4606 		while ((p - hex) < 8*3)
4607 			*p++ = ' ';
4608 		*p = *q = 0;
4609 		cmn_err(CE_NOTE, "%s: %s\t%s", msg, hex, ascii);
4610 	}
4611 }
4612 
4613 #ifdef DNETDEBUG
4614 void
4615 dnet_usectimeout(struct dnetinstance *dnetp, uint32_t usecs, int contin,
4616     timercb_t cback)
4617 {
4618 	mutex_enter(&dnetp->intrlock);
4619 	dnetp->timer.start_ticks = (usecs * 100) / 8192;
4620 	dnetp->timer.cb = cback;
4621 	ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, GP_TIMER_REG),
4622 	    dnetp->timer.start_ticks | (contin ? GPTIMER_CONT : 0));
4623 	if (dnetp->timer.cb)
4624 		enable_interrupts(dnetp);
4625 	mutex_exit(&dnetp->intrlock);
4626 }
4627 
4628 uint32_t
4629 dnet_usecelapsed(struct dnetinstance *dnetp)
4630 {
4631 	uint32_t ticks = dnetp->timer.start_ticks -
4632 	    (ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, GP_TIMER_REG)) &
4633 	    0xffff);
4634 	return ((ticks * 8192) / 100);
4635 }
4636 
4637 /* ARGSUSED */
4638 void
4639 dnet_timestamp(struct dnetinstance *dnetp,  char *buf)
4640 {
4641 	uint32_t elapsed = dnet_usecelapsed(dnetp);
4642 	char loc[32], *p = loc;
4643 	int firstdigit = 1;
4644 	uint32_t divisor;
4645 
4646 	while (*p++ = *buf++)
4647 		;
4648 	p--;
4649 
4650 	for (divisor = 1000000000; divisor /= 10; ) {
4651 		int digit = (elapsed / divisor);
4652 		elapsed -= digit * divisor;
4653 		if (!firstdigit || digit) {
4654 			*p++ = digit + '0';
4655 			firstdigit = 0;
4656 		}
4657 
4658 	}
4659 
4660 	/* Actual zero, output it */
4661 	if (firstdigit)
4662 		*p++ = '0';
4663 
4664 	*p++ = '-';
4665 	*p++ = '>';
4666 	*p++ = 0;
4667 
4668 	printf(loc);
4669 	dnet_usectimeout(dnetp, 1000000, 0, 0);
4670 }
4671 
4672 #endif
4673