xref: /titanic_50/usr/src/uts/common/io/hme/hme.c (revision b56bf881a9655cb27b53cba1468312f7c6dfb0a2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * SunOS MT STREAMS FEPS(SBus)/Cheerio(PCI) 10/100Mb Ethernet Device Driver
29  */
30 
31 #include	<sys/types.h>
32 #include	<sys/debug.h>
33 #include	<sys/stream.h>
34 #include	<sys/cmn_err.h>
35 #include	<sys/kmem.h>
36 #include	<sys/crc32.h>
37 #include	<sys/modctl.h>
38 #include	<sys/conf.h>
39 #include	<sys/strsun.h>
40 #include	<sys/kstat.h>
41 #include	<sys/pattr.h>
42 #include	<sys/dlpi.h>
43 #include	<sys/strsubr.h>
44 #include	<sys/mac_provider.h>
45 #include	<sys/mac_ether.h>
46 #include	<sys/mii.h>
47 #include	<sys/ethernet.h>
48 #include	<sys/vlan.h>
49 #include	<sys/pci.h>
50 #include	<sys/policy.h>
51 #include	<sys/ddi.h>
52 #include	<sys/sunddi.h>
53 #include	<sys/byteorder.h>
54 #include	"hme_phy.h"
55 #include	"hme_mac.h"
56 #include	"hme.h"
57 
58 typedef void	(*fptrv_t)();
59 
60 typedef enum {
61 	NO_MSG		= 0,
62 	AUTOCONFIG_MSG,
63 	DISPLAY_MSG,
64 	INIT_MSG,
65 	UNINIT_MSG,
66 	CONFIG_MSG,
67 	MII_MSG,
68 	FATAL_ERR_MSG,
69 	NFATAL_ERR_MSG,
70 	XCVR_MSG,
71 	NOXCVR_MSG,
72 	ERX_MSG,
73 	DDI_MSG,
74 } msg_t;
75 
76 msg_t	hme_debug_level =	NO_MSG;
77 
78 static char	*msg_string[] = {
79 	"NONE       ",
80 	"AUTOCONFIG ",
81 	"DISPLAY	"
82 	"INIT       ",
83 	"UNINIT		",
84 	"CONFIG	",
85 	"MII	",
86 	"FATAL_ERR	",
87 	"NFATAL_ERR	",
88 	"XCVR	",
89 	"NOXCVR	",
90 	"ERX	",
91 	"DDI	",
92 };
93 
94 #define	SEVERITY_NONE	0
95 #define	SEVERITY_LOW	0
96 #define	SEVERITY_MID	1
97 #define	SEVERITY_HIGH	2
98 #define	SEVERITY_UNKNOWN 99
99 
100 #define	FEPS_URUN_BUG
101 #define	HME_CODEVIOL_BUG
102 
103 #define	KIOIP	KSTAT_INTR_PTR(hmep->hme_intrstats)
104 
105 /*
106  * The following variables are used for checking fixes in Sbus/FEPS 2.0
107  */
108 static	int	hme_urun_fix = 0;	/* Bug fixed in Sbus/FEPS 2.0 */
109 
110 /*
111  * The following variables are used for configuring various features
112  */
113 static	int	hme_64bit_enable =	1;	/* Use 64-bit sbus transfers */
114 static	int	hme_reject_own =	1;	/* Reject packets with own SA */
115 static	int	hme_ngu_enable =	0;	/* Never Give Up mode */
116 
117 char *hme_priv_prop[] = {
118 	"_ipg0",
119 	"_ipg1",
120 	"_ipg2",
121 	"_lance_mode",
122 	NULL
123 };
124 
125 static	int	hme_lance_mode =	1;	/* to enable lance mode */
126 static	int	hme_ipg0 =		16;
127 static	int	hme_ipg1 =		8;
128 static	int	hme_ipg2 =		4;
129 
130 /*
131  * The following parameters may be configured by the user. If they are not
132  * configured by the user, the values will be based on the capabilities of
133  * the transceiver.
134  * The value "HME_NOTUSR" is ORed with the parameter value to indicate values
135  * which are NOT configured by the user.
136  */
137 
138 #define	HME_NOTUSR	0x0f000000
139 #define	HME_MASK_1BIT	0x1
140 #define	HME_MASK_5BIT	0x1f
141 #define	HME_MASK_8BIT	0xff
142 
143 /*
144  * All strings used by hme messaging functions
145  */
146 
147 static	char *no_xcvr_msg =
148 	"No transceiver found.";
149 
150 static	char *burst_size_msg =
151 	"Could not identify the burst size";
152 
153 static	char *unk_rx_ringsz_msg =
154 	"Unknown receive RINGSZ";
155 
156 static  char *add_intr_fail_msg =
157 	"ddi_add_intr(9F) failed";
158 
159 static  char *mregs_4global_reg_fail_msg =
160 	"ddi_regs_map_setup(9F) for global reg failed";
161 
162 static	char *mregs_4etx_reg_fail_msg =
163 	"ddi_map_regs for etx reg failed";
164 
165 static	char *mregs_4erx_reg_fail_msg =
166 	"ddi_map_regs for erx reg failed";
167 
168 static	char *mregs_4bmac_reg_fail_msg =
169 	"ddi_map_regs for bmac reg failed";
170 
171 static	char *mregs_4mif_reg_fail_msg =
172 	"ddi_map_regs for mif reg failed";
173 
174 static	char *init_fail_gen_msg =
175 	"Failed to initialize hardware/driver";
176 
177 static	char *ddi_nregs_fail_msg =
178 	"ddi_dev_nregs failed(9F), returned %d";
179 
180 static	char *bad_num_regs_msg =
181 	"Invalid number of registers.";
182 
183 
184 /* FATAL ERR msgs */
185 /*
186  * Function prototypes.
187  */
188 /* these two are global so that qfe can use them */
189 int hmeattach(dev_info_t *, ddi_attach_cmd_t);
190 int hmedetach(dev_info_t *, ddi_detach_cmd_t);
191 int hmequiesce(dev_info_t *);
192 static	boolean_t hmeinit_xfer_params(struct hme *);
193 static	uint_t hmestop(struct hme *);
194 static	void hmestatinit(struct hme *);
195 static	int hmeallocthings(struct hme *);
196 static	void hmefreethings(struct hme *);
197 static	int hmeallocbuf(struct hme *, hmebuf_t *, int);
198 static	int hmeallocbufs(struct hme *);
199 static	void hmefreebufs(struct hme *);
200 static	void hmeget_hm_rev_property(struct hme *);
201 static	boolean_t hmestart(struct hme *, mblk_t *);
202 static	uint_t hmeintr(caddr_t);
203 static	void hmereclaim(struct hme *);
204 static	int hmeinit(struct hme *);
205 static	void hmeuninit(struct hme *hmep);
206 static 	mblk_t *hmeread(struct hme *, hmebuf_t *, uint32_t);
207 static	void hmesavecntrs(struct hme *);
208 static	void hme_fatal_err(struct hme *, uint_t);
209 static	void hme_nonfatal_err(struct hme *, uint_t);
210 static	int hmeburstsizes(struct hme *);
211 static	void send_bit(struct hme *, uint16_t);
212 static	uint16_t get_bit_std(uint8_t, struct hme *);
213 static	uint16_t hme_bb_mii_read(struct hme *, uint8_t, uint8_t);
214 static	void hme_bb_mii_write(struct hme *, uint8_t, uint8_t, uint16_t);
215 static	void hme_bb_force_idle(struct hme *);
216 static	uint16_t hme_mii_read(void *, uint8_t, uint8_t);
217 static	void hme_mii_write(void *, uint8_t, uint8_t, uint16_t);
218 static	void hme_setup_mac_address(struct hme *, dev_info_t *);
219 static	void hme_mii_notify(void *, link_state_t);
220 
221 static void hme_fault_msg(struct hme *, uint_t, msg_t, char *, ...);
222 
223 static void hme_check_acc_handle(char *, uint_t, struct hme *,
224     ddi_acc_handle_t);
225 
226 /*
227  * Nemo (GLDv3) Functions.
228  */
229 static int	hme_m_stat(void *, uint_t, uint64_t *);
230 static int	hme_m_start(void *);
231 static void	hme_m_stop(void *);
232 static int	hme_m_promisc(void *, boolean_t);
233 static int	hme_m_multicst(void *, boolean_t, const uint8_t *);
234 static int	hme_m_unicst(void *, const uint8_t *);
235 static mblk_t	*hme_m_tx(void *, mblk_t *);
236 static boolean_t	hme_m_getcapab(void *, mac_capab_t, void *);
237 static int hme_m_getprop(void *, const char *, mac_prop_id_t, uint_t, void *);
238 static void hme_m_propinfo(void *, const char *, mac_prop_id_t,
239     mac_prop_info_handle_t);
240 static int hme_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
241     const void *);
242 
243 static mii_ops_t hme_mii_ops = {
244 	MII_OPS_VERSION,
245 	hme_mii_read,
246 	hme_mii_write,
247 	hme_mii_notify,
248 	NULL
249 };
250 
251 static mac_callbacks_t hme_m_callbacks = {
252 	MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
253 	hme_m_stat,
254 	hme_m_start,
255 	hme_m_stop,
256 	hme_m_promisc,
257 	hme_m_multicst,
258 	hme_m_unicst,
259 	hme_m_tx,
260 	NULL,
261 	NULL,
262 	hme_m_getcapab,
263 	NULL,
264 	NULL,
265 	hme_m_setprop,
266 	hme_m_getprop,
267 	hme_m_propinfo
268 };
269 
270 DDI_DEFINE_STREAM_OPS(hme_dev_ops, nulldev, nulldev, hmeattach, hmedetach,
271     nodev, NULL, D_MP, NULL, hmequiesce);
272 
273 #define	HME_FAULT_MSG1(p, s, t, f) \
274     hme_fault_msg((p), (s), (t), (f));
275 
276 #define	HME_FAULT_MSG2(p, s, t, f, a) \
277     hme_fault_msg((p), (s), (t), (f), (a));
278 
279 #define	HME_FAULT_MSG3(p, s, t, f, a, b) \
280     hme_fault_msg((p), (s), (t), (f), (a), (b));
281 
282 #define	HME_FAULT_MSG4(p, s, t, f, a, b, c) \
283     hme_fault_msg((p), (s), (t), (f), (a), (b), (c));
284 
285 #define	CHECK_MIFREG() \
286 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_mifregh)
287 #define	CHECK_ETXREG() \
288 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_etxregh)
289 #define	CHECK_ERXREG() \
290 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_erxregh)
291 #define	CHECK_MACREG() \
292 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_bmacregh)
293 #define	CHECK_GLOBREG() \
294 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_globregh)
295 
296 /*
297  * Claim the device is ultra-capable of burst in the beginning.  Use
298  * the value returned by ddi_dma_burstsizes() to actually set the HME
299  * global configuration register later.
300  *
301  * Sbus/FEPS supports burst sizes of 16, 32 and 64 bytes. Also, it supports
302  * 32-bit and 64-bit Sbus transfers. Hence the dlim_burstsizes field contains
303  * the the burstsizes in both the lo and hi words.
304  */
305 #define	HMELIMADDRLO	((uint64_t)0x00000000)
306 #define	HMELIMADDRHI	((uint64_t)0xffffffff)
307 
308 /*
309  * Note that rx and tx data buffers can be arbitrarily aligned, but
310  * that the descriptor rings need to be aligned on 2K boundaries, per
311  * the spec.
312  */
313 static ddi_dma_attr_t hme_dma_attr = {
314 	DMA_ATTR_V0,		/* version number. */
315 	(uint64_t)HMELIMADDRLO,	/* low address */
316 	(uint64_t)HMELIMADDRHI,	/* high address */
317 	(uint64_t)0x00ffffff,	/* address counter max */
318 	(uint64_t)HME_HMDALIGN,	/* alignment */
319 	(uint_t)0x00700070,	/* dlim_burstsizes for 32 and 64 bit xfers */
320 	(uint32_t)0x1,		/* minimum transfer size */
321 	(uint64_t)0x7fffffff,	/* maximum transfer size */
322 	(uint64_t)0x00ffffff,	/* maximum segment size */
323 	1,			/* scatter/gather list length */
324 	512,			/* granularity */
325 	0			/* attribute flags */
326 };
327 
328 static ddi_device_acc_attr_t hme_buf_attr = {
329 	DDI_DEVICE_ATTR_V0,
330 	DDI_NEVERSWAP_ACC,
331 	DDI_STRICTORDER_ACC,	/* probably could allow merging & caching */
332 	DDI_DEFAULT_ACC,
333 };
334 
335 static uchar_t pci_latency_timer = 0;
336 
337 /*
338  * Module linkage information for the kernel.
339  */
340 static struct modldrv modldrv = {
341 	&mod_driverops,	/* Type of module.  This one is a driver */
342 	"Sun HME 10/100 Mb Ethernet",
343 	&hme_dev_ops,	/* driver ops */
344 };
345 
346 static struct modlinkage modlinkage = {
347 	MODREV_1, &modldrv, NULL
348 };
349 
350 /* <<<<<<<<<<<<<<<<<<<<<<  Register operations >>>>>>>>>>>>>>>>>>>>> */
351 
352 #define	GET_MIFREG(reg) \
353 	ddi_get32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg)
354 #define	PUT_MIFREG(reg, value) \
355 	ddi_put32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg, value)
356 
357 #define	GET_ETXREG(reg) \
358 	ddi_get32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg)
359 #define	PUT_ETXREG(reg, value) \
360 	ddi_put32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg, value)
361 #define	GET_ERXREG(reg) \
362 	ddi_get32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg)
363 #define	PUT_ERXREG(reg, value) \
364 	ddi_put32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg, value)
365 #define	GET_MACREG(reg) \
366 	ddi_get32(hmep->hme_bmacregh, (uint32_t *)&hmep->hme_bmacregp->reg)
367 #define	PUT_MACREG(reg, value) \
368 	ddi_put32(hmep->hme_bmacregh, \
369 		(uint32_t *)&hmep->hme_bmacregp->reg, value)
370 #define	GET_GLOBREG(reg) \
371 	ddi_get32(hmep->hme_globregh, (uint32_t *)&hmep->hme_globregp->reg)
372 #define	PUT_GLOBREG(reg, value) \
373 	ddi_put32(hmep->hme_globregh, \
374 		(uint32_t *)&hmep->hme_globregp->reg, value)
375 #define	PUT_TMD(ptr, paddr, len, flags)					\
376 	ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_addr, paddr); \
377 	ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags,	\
378 	    len | flags)
379 #define	GET_TMD_FLAGS(ptr)					\
380 	ddi_get32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags)
381 #define	PUT_RMD(ptr, paddr) \
382 	ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_addr, paddr); \
383 	ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags,	\
384 	    (uint32_t)(HMEBUFSIZE << HMERMD_BUFSIZE_SHIFT) | HMERMD_OWN)
385 #define	GET_RMD_FLAGS(ptr)					\
386 	ddi_get32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags)
387 
388 #define	GET_ROM8(offset) \
389 	ddi_get8((hmep->hme_romh), (offset))
390 
391 /*
392  * Ether_copy is not endian-correct. Define an endian-correct version.
393  */
394 #define	ether_bcopy(a, b) (bcopy(a, b, 6))
395 
396 /*
397  * Ether-type is specifically big-endian, but data region is unknown endian
398  */
399 #define	get_ether_type(ptr) \
400 	(((((uint8_t *)ptr)[12] << 8) | (((uint8_t *)ptr)[13])))
401 
402 /* <<<<<<<<<<<<<<<<<<<<<<  Configuration Parameters >>>>>>>>>>>>>>>>>>>>> */
403 
404 #define	BMAC_DEFAULT_JAMSIZE	(0x04)		/* jamsize equals 4 */
405 #define	BMAC_LONG_JAMSIZE	(0x10)		/* jamsize equals 0x10 */
406 static	int 	jamsize = BMAC_DEFAULT_JAMSIZE;
407 
408 
409 /*
410  * Calculate the bit in the multicast address filter that selects the given
411  * address.
412  */
413 
414 static uint32_t
415 hmeladrf_bit(const uint8_t *addr)
416 {
417 	uint32_t crc;
418 
419 	CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
420 
421 	/*
422 	 * Just want the 6 most significant bits.
423 	 */
424 	return (crc >> 26);
425 }
426 
427 /* <<<<<<<<<<<<<<<<<<<<<<<<  Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
428 
429 static void
430 send_bit(struct hme *hmep, uint16_t x)
431 {
432 	PUT_MIFREG(mif_bbdata, x);
433 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
434 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
435 }
436 
437 
438 /*
439  * To read the MII register bits according to the IEEE Standard
440  */
441 static uint16_t
442 get_bit_std(uint8_t phyad, struct hme *hmep)
443 {
444 	uint16_t	x;
445 
446 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
447 	drv_usecwait(1);	/* wait for  >330 ns for stable data */
448 	if (phyad == HME_INTERNAL_PHYAD)
449 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM0) ? 1 : 0;
450 	else
451 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM1) ? 1 : 0;
452 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
453 	return (x);
454 }
455 
456 #define	SEND_BIT(x)		send_bit(hmep, x)
457 #define	GET_BIT_STD(phyad, x)	x = get_bit_std(phyad, hmep)
458 
459 
460 static void
461 hme_bb_mii_write(struct hme *hmep, uint8_t phyad, uint8_t regad, uint16_t data)
462 {
463 	int	i;
464 
465 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
466 	(void) hme_bb_force_idle(hmep);
467 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
468 	SEND_BIT(0); SEND_BIT(1);	/* <OP> */
469 
470 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
471 		SEND_BIT((phyad >> i) & 1);
472 	}
473 
474 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
475 		SEND_BIT((regad >> i) & 1);
476 	}
477 
478 	SEND_BIT(1); SEND_BIT(0);	/* <TA> */
479 
480 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
481 		SEND_BIT((data >> i) & 1);
482 	}
483 
484 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
485 	CHECK_MIFREG();
486 }
487 
488 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
489 static uint16_t
490 hme_bb_mii_read(struct hme *hmep, uint8_t phyad, uint8_t regad)
491 {
492 	int		i;
493 	uint32_t	x;
494 	uint16_t	data = 0;
495 
496 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
497 	(void) hme_bb_force_idle(hmep);
498 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
499 	SEND_BIT(1); SEND_BIT(0);	/* <OP> */
500 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
501 		SEND_BIT((phyad >> i) & 1);
502 	}
503 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
504 		SEND_BIT((regad >> i) & 1);
505 	}
506 
507 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
508 
509 	GET_BIT_STD(phyad, x);
510 	GET_BIT_STD(phyad, x);		/* <TA> */
511 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
512 		GET_BIT_STD(phyad, x);
513 		data += (x << i);
514 	}
515 	/*
516 	 * Kludge to get the Transceiver out of hung mode
517 	 */
518 	GET_BIT_STD(phyad, x);
519 	GET_BIT_STD(phyad, x);
520 	GET_BIT_STD(phyad, x);
521 	CHECK_MIFREG();
522 	return (data);
523 }
524 
525 
526 static void
527 hme_bb_force_idle(struct hme *hmep)
528 {
529 	int	i;
530 
531 	for (i = 0; i < 33; i++) {
532 		SEND_BIT(1);
533 	}
534 }
535 
536 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
537 
538 
539 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
540 
541 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
542 static uint16_t
543 hme_mii_read(void *arg, uint8_t phyad, uint8_t regad)
544 {
545 	struct hme	*hmep = arg;
546 	uint32_t	frame;
547 	uint32_t	tmp_mif;
548 	uint32_t	tmp_xif;
549 
550 	tmp_mif = GET_MIFREG(mif_cfg);
551 	tmp_xif = GET_MACREG(xifc);
552 
553 	switch (phyad) {
554 	case HME_EXTERNAL_PHYAD:
555 		PUT_MIFREG(mif_cfg, tmp_mif | HME_MIF_CFGPS);
556 		PUT_MACREG(xifc, tmp_xif | BMAC_XIFC_MIIBUFDIS);
557 		break;
558 	case HME_INTERNAL_PHYAD:
559 		PUT_MIFREG(mif_cfg, tmp_mif & ~(HME_MIF_CFGPS));
560 		PUT_MACREG(xifc, tmp_xif & ~(BMAC_XIFC_MIIBUFDIS));
561 		break;
562 	default:
563 		return (0xffff);
564 	}
565 
566 	if (!hmep->hme_frame_enable) {
567 		frame = (hme_bb_mii_read(hmep, phyad, regad));
568 		PUT_MACREG(xifc, tmp_xif);
569 		PUT_MIFREG(mif_cfg, tmp_mif);
570 		return (frame & 0xffff);
571 	}
572 
573 	PUT_MIFREG(mif_frame,
574 	    HME_MIF_FRREAD | (phyad << HME_MIF_FRPHYAD_SHIFT) |
575 	    (regad << HME_MIF_FRREGAD_SHIFT));
576 /*
577  *	HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
578  */
579 	HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300);
580 	frame = GET_MIFREG(mif_frame);
581 	CHECK_MIFREG();
582 
583 	PUT_MACREG(xifc, tmp_xif);
584 	PUT_MIFREG(mif_cfg, tmp_mif);
585 
586 	if ((frame & HME_MIF_FRTA0) == 0) {
587 
588 
589 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, MII_MSG,
590 		    "MIF Read failure");
591 		return (0xffff);
592 	}
593 	return ((uint16_t)(frame & HME_MIF_FRDATA));
594 }
595 
596 static void
597 hme_mii_write(void *arg, uint8_t phyad, uint8_t regad, uint16_t data)
598 {
599 	struct hme *hmep = arg;
600 	uint32_t frame;
601 	uint32_t tmp_mif;
602 	uint32_t tmp_xif;
603 
604 	tmp_mif = GET_MIFREG(mif_cfg);
605 	tmp_xif = GET_MACREG(xifc);
606 
607 	switch (phyad) {
608 	case HME_EXTERNAL_PHYAD:
609 		PUT_MIFREG(mif_cfg, tmp_mif | HME_MIF_CFGPS);
610 		PUT_MACREG(xifc, tmp_xif | BMAC_XIFC_MIIBUFDIS);
611 		break;
612 	case HME_INTERNAL_PHYAD:
613 		PUT_MIFREG(mif_cfg, tmp_mif & ~(HME_MIF_CFGPS));
614 		PUT_MACREG(xifc, tmp_xif & ~(BMAC_XIFC_MIIBUFDIS));
615 		break;
616 	default:
617 		return;
618 	}
619 
620 	if (!hmep->hme_frame_enable) {
621 		hme_bb_mii_write(hmep, phyad, regad, data);
622 		PUT_MACREG(xifc, tmp_xif);
623 		PUT_MIFREG(mif_cfg, tmp_mif);
624 		return;
625 	}
626 
627 	PUT_MIFREG(mif_frame,
628 	    HME_MIF_FRWRITE | (phyad << HME_MIF_FRPHYAD_SHIFT) |
629 	    (regad << HME_MIF_FRREGAD_SHIFT) | data);
630 /*
631  *	HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
632  */
633 	HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300);
634 	frame = GET_MIFREG(mif_frame);
635 	PUT_MACREG(xifc, tmp_xif);
636 	PUT_MIFREG(mif_cfg, tmp_mif);
637 	CHECK_MIFREG();
638 	if ((frame & HME_MIF_FRTA0) == 0) {
639 		HME_FAULT_MSG1(hmep, SEVERITY_MID, MII_MSG,
640 		    "MIF Write failure");
641 	}
642 }
643 
644 static void
645 hme_mii_notify(void *arg, link_state_t link)
646 {
647 	struct hme *hmep = arg;
648 
649 	if (link == LINK_STATE_UP) {
650 		(void) hmeinit(hmep);
651 	}
652 	mac_link_update(hmep->hme_mh, link);
653 }
654 
655 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<  LOADABLE ENTRIES  >>>>>>>>>>>>>>>>>>>>>>> */
656 
657 int
658 _init(void)
659 {
660 	int	status;
661 
662 	mac_init_ops(&hme_dev_ops, "hme");
663 	if ((status = mod_install(&modlinkage)) != 0) {
664 		mac_fini_ops(&hme_dev_ops);
665 	}
666 	return (status);
667 }
668 
669 int
670 _fini(void)
671 {
672 	int	status;
673 
674 	if ((status = mod_remove(&modlinkage)) == 0) {
675 		mac_fini_ops(&hme_dev_ops);
676 	}
677 	return (status);
678 }
679 
680 int
681 _info(struct modinfo *modinfop)
682 {
683 	return (mod_info(&modlinkage, modinfop));
684 }
685 
686 /*
687  * ddi_dma_sync() a TMD or RMD descriptor.
688  */
689 #define	HMESYNCRMD(num, who)				\
690 	(void) ddi_dma_sync(hmep->hme_rmd_dmah,		\
691 	    (num * sizeof (struct hme_rmd)),		\
692 	    sizeof (struct hme_rmd),			\
693 	    who)
694 
695 #define	HMESYNCTMD(num, who)				\
696 	(void) ddi_dma_sync(hmep->hme_tmd_dmah,		\
697 	    (num * sizeof (struct hme_tmd)),		\
698 	    sizeof (struct hme_tmd),			\
699 	    who)
700 
701 /*
702  * Ethernet broadcast address definition.
703  */
704 static	struct ether_addr	etherbroadcastaddr = {
705 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
706 };
707 
708 /*
709  * MIB II broadcast/multicast packets
710  */
711 #define	IS_BROADCAST(pkt) (bcmp(pkt, &etherbroadcastaddr, ETHERADDRL) == 0)
712 #define	IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
713 #define	BUMP_InNUcast(hmep, pkt) \
714 	if (IS_MULTICAST(pkt)) {			       \
715 		if (IS_BROADCAST(pkt)) {		       \
716 			hmep->hme_brdcstrcv++;		       \
717 		} else {				       \
718 			hmep->hme_multircv++;		       \
719 		}					       \
720 	}
721 #define	BUMP_OutNUcast(hmep, pkt) \
722 	if (IS_MULTICAST(pkt)) {			       \
723 		if (IS_BROADCAST(pkt)) {		       \
724 			hmep->hme_brdcstxmt++;		       \
725 		} else {				       \
726 			hmep->hme_multixmt++;		       \
727 		}					       \
728 	}
729 
730 static int
731 hme_create_prop_from_kw(dev_info_t *dip, char *vpdname, char *vpdstr)
732 {
733 	char propstr[80];
734 	int i, needprop = 0;
735 	struct ether_addr local_mac;
736 
737 	if (strcmp(vpdname, "NA") == 0) {
738 		(void) strcpy(propstr, "local-mac-address");
739 		needprop = 1;
740 	} else if (strcmp(vpdname, "Z0") == 0) {
741 		(void) strcpy(propstr, "model");
742 		needprop = 1;
743 	} else if (strcmp(vpdname, "Z1") == 0) {
744 		(void) strcpy(propstr, "board-model");
745 		needprop = 1;
746 	}
747 
748 	if (needprop == 1) {
749 
750 		if (strcmp(propstr, "local-mac-address") == 0) {
751 			for (i = 0; i < ETHERADDRL; i++)
752 				local_mac.ether_addr_octet[i] =
753 				    (uchar_t)vpdstr[i];
754 			if (ddi_prop_create(DDI_DEV_T_NONE, dip,
755 			    DDI_PROP_CANSLEEP, propstr,
756 			    (char *)local_mac.ether_addr_octet, ETHERADDRL)
757 			    != DDI_SUCCESS) {
758 				return (DDI_FAILURE);
759 			}
760 		} else {
761 			if (ddi_prop_create(DDI_DEV_T_NONE, dip,
762 			    DDI_PROP_CANSLEEP, propstr, vpdstr,
763 			    strlen(vpdstr)+1) != DDI_SUCCESS) {
764 				return (DDI_FAILURE);
765 			}
766 		}
767 	}
768 	return (0);
769 }
770 
771 /*
772  * Get properties from old VPD
773  * for PCI cards
774  */
775 static int
776 hme_get_oldvpd_props(dev_info_t *dip, int vpd_base)
777 {
778 	struct hme *hmep;
779 	int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
780 	char kw_namestr[3];
781 	char kw_fieldstr[256];
782 	int i;
783 
784 	hmep = ddi_get_driver_private(dip);
785 
786 	vpd_start = vpd_base;
787 
788 	if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
789 		return (1); /* error */
790 	} else {
791 		vpd_len = 9;
792 	}
793 
794 	/* Get local-mac-address */
795 	kw_start = vpd_start + 3; /* Location of 1st keyword */
796 	kw_ptr = kw_start;
797 	while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
798 		kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
799 		kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
800 		kw_namestr[2] = '\0';
801 		kw_len = (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
802 		for (i = 0, kw_ptr += 3; i < kw_len; i++)
803 			kw_fieldstr[i] = GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
804 		kw_fieldstr[i] = '\0';
805 		if (hme_create_prop_from_kw(dip, kw_namestr, kw_fieldstr)) {
806 			return (DDI_FAILURE);
807 		}
808 		kw_ptr += kw_len;
809 	} /* next keyword */
810 
811 	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "model",
812 	    "SUNW,cheerio", strlen("SUNW,cheerio")+1) != DDI_SUCCESS) {
813 		return (DDI_FAILURE);
814 	}
815 	return (0);
816 }
817 
818 
819 /*
820  * Get properties from new VPD
821  * for CompactPCI cards
822  */
823 static int
824 hme_get_newvpd_props(dev_info_t *dip, int vpd_base)
825 {
826 	struct hme *hmep;
827 	int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
828 	char kw_namestr[3];
829 	char kw_fieldstr[256];
830 	int maxvpdsize, i;
831 
832 	hmep = ddi_get_driver_private(dip);
833 
834 	maxvpdsize = 1024; /* Real size not known until after it is read */
835 
836 	vpd_start = (int)((GET_ROM8(&(hmep->hme_romp[vpd_base+1])) & 0xff) |
837 	    ((GET_ROM8(&hmep->hme_romp[vpd_base+2]) & 0xff) << 8)) +3;
838 	vpd_start = vpd_base + vpd_start;
839 	while (vpd_start < (vpd_base + maxvpdsize)) { /* Get all VPDs */
840 		if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
841 			break; /* no VPD found */
842 		} else {
843 			vpd_len = (int)((GET_ROM8(&hmep->hme_romp[vpd_start
844 			    + 1]) & 0xff) | (GET_ROM8(&hmep->hme_romp[vpd_start
845 			    + 2]) & 0xff) << 8);
846 		}
847 		/* Get all keywords in this VPD */
848 		kw_start = vpd_start + 3; /* Location of 1st keyword */
849 		kw_ptr = kw_start;
850 		while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
851 			kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
852 			kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
853 			kw_namestr[2] = '\0';
854 			kw_len =
855 			    (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
856 			for (i = 0, kw_ptr += 3; i < kw_len; i++)
857 				kw_fieldstr[i] =
858 				    GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
859 			kw_fieldstr[i] = '\0';
860 			if (hme_create_prop_from_kw(dip, kw_namestr,
861 			    kw_fieldstr)) {
862 				return (DDI_FAILURE);
863 			}
864 			kw_ptr += kw_len;
865 		} /* next keyword */
866 		vpd_start += (vpd_len + 3);
867 	} /* next VPD */
868 	return (0);
869 }
870 
871 
872 /*
873  * Get properties from VPD
874  */
875 static int
876 hme_get_vpd_props(dev_info_t *dip)
877 {
878 	struct hme *hmep;
879 	int v0, v1, vpd_base;
880 	int i, epromsrchlimit;
881 
882 
883 	hmep = ddi_get_driver_private(dip);
884 
885 	v0 = (int)(GET_ROM8(&(hmep->hme_romp[0])));
886 	v1 = (int)(GET_ROM8(&(hmep->hme_romp[1])));
887 	v0 = ((v0 & 0xff) << 8 | v1);
888 
889 	if ((v0 & 0xffff) != 0x55aa) {
890 		cmn_err(CE_NOTE, " Valid pci prom not found \n");
891 		return (1);
892 	}
893 
894 	epromsrchlimit = 4096;
895 	for (i = 2; i < epromsrchlimit; i++) {
896 		/* "PCIR" */
897 		if (((GET_ROM8(&(hmep->hme_romp[i])) & 0xff) == 'P') &&
898 		    ((GET_ROM8(&(hmep->hme_romp[i+1])) & 0xff) == 'C') &&
899 		    ((GET_ROM8(&(hmep->hme_romp[i+2])) & 0xff) == 'I') &&
900 		    ((GET_ROM8(&(hmep->hme_romp[i+3])) & 0xff) == 'R')) {
901 			vpd_base =
902 			    (int)((GET_ROM8(&(hmep->hme_romp[i+8])) & 0xff) |
903 			    (GET_ROM8(&(hmep->hme_romp[i+9])) & 0xff) << 8);
904 			break; /* VPD pointer found */
905 		}
906 	}
907 
908 	/* No VPD found */
909 	if (vpd_base == 0) {
910 		cmn_err(CE_NOTE, " Vital Product Data pointer not found \n");
911 		return (1);
912 	}
913 
914 	v0 = (int)(GET_ROM8(&(hmep->hme_romp[vpd_base])));
915 	if (v0 == 0x82) {
916 		if (hme_get_newvpd_props(dip, vpd_base))
917 			return (1);
918 		return (0);
919 	} else if (v0 == 0x90) {
920 		/* If we are are SUNW,qfe card, look for the Nth "NA" descr */
921 		if ((GET_ROM8(&hmep->hme_romp[vpd_base + 12])  != 0x79) &&
922 		    GET_ROM8(&hmep->hme_romp[vpd_base + 4 * 12]) == 0x79) {
923 			vpd_base += hmep->hme_devno * 12;
924 		}
925 		if (hme_get_oldvpd_props(dip, vpd_base))
926 			return (1);
927 		return (0);
928 	} else
929 		return (1);	/* unknown start byte in VPD */
930 }
931 
932 /*
933  * For x86, the BIOS doesn't map the PCI Rom register for the qfe
934  * cards, so we have to extract it from the ebus bridge that is
935  * function zero of the same device.  This is a bit of an ugly hack.
936  * (The ebus bridge leaves the entire ROM mapped at base address
937  * register 0x10.)
938  */
939 
940 typedef struct {
941 	struct hme 		*hmep;
942 	dev_info_t		*parent;
943 	uint8_t			bus, dev;
944 	ddi_acc_handle_t	acch;
945 	caddr_t			romp;
946 } ebus_rom_t;
947 
948 static int
949 hme_mapebusrom(dev_info_t *dip, void *arg)
950 {
951 	int		*regs;
952 	unsigned	nregs;
953 	int		reg;
954 	ebus_rom_t	*rom = arg;
955 	struct hme	*hmep = rom->hmep;
956 
957 	/*
958 	 * We only want to look at our peers.  Skip our parent.
959 	 */
960 	if (dip == rom->parent) {
961 		return (DDI_WALK_PRUNESIB);
962 	}
963 
964 	if (ddi_get_parent(dip) != rom->parent)
965 		return (DDI_WALK_CONTINUE);
966 
967 	if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
968 	    "reg", &regs, &nregs)) != DDI_PROP_SUCCESS) {
969 		return (DDI_WALK_PRUNECHILD);
970 	}
971 
972 	if (nregs < 1) {
973 		ddi_prop_free(regs);
974 		return (DDI_WALK_PRUNECHILD);
975 	}
976 	reg = regs[0];
977 	ddi_prop_free(regs);
978 
979 	/*
980 	 * Look for function 0 on our bus and device.  If the device doesn't
981 	 * match, it might be an alternate peer, in which case we don't want
982 	 * to examine any of its children.
983 	 */
984 	if ((PCI_REG_BUS_G(reg) != rom->bus) ||
985 	    (PCI_REG_DEV_G(reg) != rom->dev) ||
986 	    (PCI_REG_FUNC_G(reg) != 0)) {
987 		return (DDI_WALK_PRUNECHILD);
988 	}
989 
990 	(void) ddi_regs_map_setup(dip, 1, &rom->romp, 0, 0, &hmep->hme_dev_attr,
991 	    &rom->acch);
992 	/*
993 	 * If we can't map the registers, the caller will notice that
994 	 * the acch is NULL.
995 	 */
996 	return (DDI_WALK_TERMINATE);
997 }
998 
999 static int
1000 hmeget_promebus(dev_info_t *dip)
1001 {
1002 	ebus_rom_t	rom;
1003 	int		*regs;
1004 	unsigned	nregs;
1005 	struct hme	*hmep;
1006 
1007 	hmep = ddi_get_driver_private(dip);
1008 
1009 	bzero(&rom, sizeof (rom));
1010 
1011 	/*
1012 	 * For x86, the BIOS doesn't map the PCI Rom register for the qfe
1013 	 * cards, so we have to extract it from the eBus bridge that is
1014 	 * function zero.  This is a bit of an ugly hack.
1015 	 */
1016 	if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
1017 	    "reg", &regs, &nregs)) != DDI_PROP_SUCCESS) {
1018 		return (DDI_FAILURE);
1019 	}
1020 
1021 	if (nregs < 5) {
1022 		ddi_prop_free(regs);
1023 		return (DDI_FAILURE);
1024 	}
1025 	rom.hmep = hmep;
1026 	rom.bus = PCI_REG_BUS_G(regs[0]);
1027 	rom.dev = PCI_REG_DEV_G(regs[0]);
1028 	hmep->hme_devno = rom.dev;
1029 	rom.parent = ddi_get_parent(dip);
1030 
1031 	/*
1032 	 * The implementation of ddi_walk_devs says that we must not
1033 	 * be called during autoconfiguration.  However, it turns out
1034 	 * that it is safe to call this during our attach routine,
1035 	 * because we are not a nexus device.
1036 	 *
1037 	 * Previously we rooted our search at our immediate parent,
1038 	 * but this triggered an assertion panic in debug kernels.
1039 	 */
1040 	ddi_walk_devs(ddi_root_node(), hme_mapebusrom, &rom);
1041 
1042 	if (rom.acch) {
1043 		hmep->hme_romh = rom.acch;
1044 		hmep->hme_romp = (unsigned char *)rom.romp;
1045 		return (DDI_SUCCESS);
1046 	}
1047 	return (DDI_FAILURE);
1048 }
1049 
1050 static int
1051 hmeget_promprops(dev_info_t *dip)
1052 {
1053 	struct hme *hmep;
1054 	int rom_bar;
1055 	ddi_acc_handle_t cfg_handle;
1056 	struct {
1057 		uint16_t vendorid;
1058 		uint16_t devid;
1059 		uint16_t command;
1060 		uint16_t status;
1061 		uint32_t junk1;
1062 		uint8_t cache_line;
1063 		uint8_t latency;
1064 		uint8_t header;
1065 		uint8_t bist;
1066 		uint32_t base;
1067 		uint32_t base14;
1068 		uint32_t base18;
1069 		uint32_t base1c;
1070 		uint32_t base20;
1071 		uint32_t base24;
1072 		uint32_t base28;
1073 		uint32_t base2c;
1074 		uint32_t base30;
1075 	} *cfg_ptr;
1076 
1077 	hmep = ddi_get_driver_private(dip);
1078 
1079 
1080 	/*
1081 	 * map configuration space
1082 	 */
1083 	if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
1084 	    0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
1085 		return (DDI_FAILURE);
1086 	}
1087 
1088 	/*
1089 	 * Enable bus-master and memory accesses
1090 	 */
1091 	ddi_put16(cfg_handle, &cfg_ptr->command,
1092 	    PCI_COMM_SERR_ENABLE | PCI_COMM_PARITY_DETECT |
1093 	    PCI_COMM_MAE | PCI_COMM_ME);
1094 
1095 	/*
1096 	 * Enable rom accesses
1097 	 */
1098 	rom_bar = ddi_get32(cfg_handle, &cfg_ptr->base30);
1099 	ddi_put32(cfg_handle, &cfg_ptr->base30, rom_bar | 1);
1100 
1101 
1102 	if ((ddi_regs_map_setup(dip, 2, (caddr_t *)&(hmep->hme_romp), 0, 0,
1103 	    &hmep->hme_dev_attr, &hmep->hme_romh) != DDI_SUCCESS) &&
1104 	    (hmeget_promebus(dip) != DDI_SUCCESS)) {
1105 
1106 		if (cfg_ptr)
1107 			ddi_regs_map_free(&cfg_handle);
1108 		return (DDI_FAILURE);
1109 	} else {
1110 		if (hme_get_vpd_props(dip))
1111 			return (DDI_FAILURE);
1112 	}
1113 	if (hmep->hme_romp)
1114 		ddi_regs_map_free(&hmep->hme_romh);
1115 	if (cfg_ptr)
1116 		ddi_regs_map_free(&cfg_handle);
1117 	return (DDI_SUCCESS);
1118 
1119 }
1120 
1121 static void
1122 hmeget_hm_rev_property(struct hme *hmep)
1123 {
1124 	int	hm_rev;
1125 
1126 
1127 	hm_rev = hmep->asic_rev;
1128 	switch (hm_rev) {
1129 	case HME_2P1_REVID:
1130 	case HME_2P1_REVID_OBP:
1131 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1132 		    "SBus 2.1 Found (Rev Id = %x)", hm_rev);
1133 		hmep->hme_frame_enable = 1;
1134 		break;
1135 
1136 	case HME_2P0_REVID:
1137 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1138 		    "SBus 2.0 Found (Rev Id = %x)", hm_rev);
1139 		break;
1140 
1141 	case HME_1C0_REVID:
1142 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1143 		    "PCI IO 1.0 Found (Rev Id = %x)", hm_rev);
1144 		break;
1145 
1146 	default:
1147 		HME_FAULT_MSG3(hmep, SEVERITY_NONE, DISPLAY_MSG,
1148 		    "%s (Rev Id = %x) Found",
1149 		    (hm_rev == HME_2C0_REVID) ? "PCI IO 2.0" : "Sbus", hm_rev);
1150 		hmep->hme_frame_enable = 1;
1151 		hmep->hme_lance_mode_enable = 1;
1152 		hmep->hme_rxcv_enable = 1;
1153 		break;
1154 	}
1155 }
1156 
1157 /*
1158  * Interface exists: make available by filling in network interface
1159  * record.  System will initialize the interface when it is ready
1160  * to accept packets.
1161  */
1162 int
1163 hmeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1164 {
1165 	struct hme *hmep;
1166 	mac_register_t *macp = NULL;
1167 	int 	regno;
1168 	int hm_rev = 0;
1169 	int prop_len = sizeof (int);
1170 	ddi_acc_handle_t cfg_handle;
1171 	struct {
1172 		uint16_t vendorid;
1173 		uint16_t devid;
1174 		uint16_t command;
1175 		uint16_t status;
1176 		uint8_t revid;
1177 		uint8_t j1;
1178 		uint16_t j2;
1179 	} *cfg_ptr;
1180 
1181 	switch (cmd) {
1182 	case DDI_ATTACH:
1183 		break;
1184 
1185 	case DDI_RESUME:
1186 		if ((hmep = ddi_get_driver_private(dip)) == NULL)
1187 			return (DDI_FAILURE);
1188 
1189 		hmep->hme_flags &= ~HMESUSPENDED;
1190 
1191 		mii_resume(hmep->hme_mii);
1192 
1193 		if (hmep->hme_started)
1194 			(void) hmeinit(hmep);
1195 		return (DDI_SUCCESS);
1196 
1197 	default:
1198 		return (DDI_FAILURE);
1199 	}
1200 
1201 	/*
1202 	 * Allocate soft device data structure
1203 	 */
1204 	hmep = kmem_zalloc(sizeof (*hmep), KM_SLEEP);
1205 
1206 	/*
1207 	 * Might as well set up elements of data structure
1208 	 */
1209 	hmep->dip =		dip;
1210 	hmep->instance = 	ddi_get_instance(dip);
1211 	hmep->pagesize =	ddi_ptob(dip, (ulong_t)1); /* IOMMU PSize */
1212 
1213 	/*
1214 	 *  Might as well setup the driver private
1215 	 * structure as part of the dip.
1216 	 */
1217 	ddi_set_driver_private(dip, hmep);
1218 
1219 	/*
1220 	 * Reject this device if it's in a slave-only slot.
1221 	 */
1222 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
1223 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1224 		    "Dev not used - dev in slave only slot");
1225 		goto error_state;
1226 	}
1227 
1228 	/*
1229 	 * Map in the device registers.
1230 	 *
1231 	 * Reg # 0 is the Global register set
1232 	 * Reg # 1 is the ETX register set
1233 	 * Reg # 2 is the ERX register set
1234 	 * Reg # 3 is the BigMAC register set.
1235 	 * Reg # 4 is the MIF register set
1236 	 */
1237 	if (ddi_dev_nregs(dip, &regno) != (DDI_SUCCESS)) {
1238 		HME_FAULT_MSG2(hmep, SEVERITY_HIGH, INIT_MSG,
1239 		    ddi_nregs_fail_msg, regno);
1240 		goto error_state;
1241 	}
1242 
1243 	switch (regno) {
1244 	case 5:
1245 		hmep->hme_cheerio_mode = 0;
1246 		break;
1247 	case 2:
1248 	case 3: /* for hot swap/plug, there will be 3 entries in "reg" prop */
1249 		hmep->hme_cheerio_mode = 1;
1250 		break;
1251 	default:
1252 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
1253 		    bad_num_regs_msg);
1254 		goto error_state;
1255 	}
1256 
1257 	/* Initialize device attributes structure */
1258 	hmep->hme_dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1259 
1260 	if (hmep->hme_cheerio_mode)
1261 		hmep->hme_dev_attr.devacc_attr_endian_flags =
1262 		    DDI_STRUCTURE_LE_ACC;
1263 	else
1264 		hmep->hme_dev_attr.devacc_attr_endian_flags =
1265 		    DDI_STRUCTURE_BE_ACC;
1266 
1267 	hmep->hme_dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1268 
1269 	if (hmep->hme_cheerio_mode) {
1270 		uint8_t		oldLT;
1271 		uint8_t		newLT = 0;
1272 		dev_info_t	*pdip;
1273 		const char	*pdrvname;
1274 
1275 		/*
1276 		 * Map the PCI config space
1277 		 */
1278 		if (pci_config_setup(dip, &hmep->pci_config_handle) !=
1279 		    DDI_SUCCESS) {
1280 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1281 			    "pci_config_setup() failed..");
1282 			goto error_state;
1283 		}
1284 
1285 		if (ddi_regs_map_setup(dip, 1,
1286 		    (caddr_t *)&(hmep->hme_globregp), 0, 0,
1287 		    &hmep->hme_dev_attr, &hmep->hme_globregh)) {
1288 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1289 			    mregs_4global_reg_fail_msg);
1290 			goto error_unmap;
1291 		}
1292 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1293 		    hmep->hme_mifregh = hmep->hme_globregh;
1294 
1295 		hmep->hme_etxregp =
1296 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x2000);
1297 		hmep->hme_erxregp =
1298 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x4000);
1299 		hmep->hme_bmacregp =
1300 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x6000);
1301 		hmep->hme_mifregp =
1302 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x7000);
1303 
1304 		/*
1305 		 * Get parent pci bridge info.
1306 		 */
1307 		pdip = ddi_get_parent(dip);
1308 		pdrvname = ddi_driver_name(pdip);
1309 
1310 		oldLT = pci_config_get8(hmep->pci_config_handle,
1311 		    PCI_CONF_LATENCY_TIMER);
1312 		/*
1313 		 * Honor value set in /etc/system
1314 		 * "set hme:pci_latency_timer=0xYY"
1315 		 */
1316 		if (pci_latency_timer)
1317 			newLT = pci_latency_timer;
1318 		/*
1319 		 * Modify LT for simba
1320 		 */
1321 		else if (strcmp("simba", pdrvname) == 0)
1322 			newLT = 0xf0;
1323 		/*
1324 		 * Ensure minimum cheerio latency timer of 0x50
1325 		 * Usually OBP or pci bridge should set this value
1326 		 * based on cheerio
1327 		 * min_grant * 8(33MHz) = 0x50 = 0xa * 0x8
1328 		 * Some system set cheerio LT at 0x40
1329 		 */
1330 		else if (oldLT < 0x40)
1331 			newLT = 0x50;
1332 
1333 		/*
1334 		 * Now program cheerio's pci latency timer with newLT
1335 		 */
1336 		if (newLT)
1337 			pci_config_put8(hmep->pci_config_handle,
1338 			    PCI_CONF_LATENCY_TIMER, (uchar_t)newLT);
1339 	} else { /* Map register sets */
1340 		if (ddi_regs_map_setup(dip, 0,
1341 		    (caddr_t *)&(hmep->hme_globregp), 0, 0,
1342 		    &hmep->hme_dev_attr, &hmep->hme_globregh)) {
1343 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1344 			    mregs_4global_reg_fail_msg);
1345 			goto error_state;
1346 		}
1347 		if (ddi_regs_map_setup(dip, 1,
1348 		    (caddr_t *)&(hmep->hme_etxregp), 0, 0,
1349 		    &hmep->hme_dev_attr, &hmep->hme_etxregh)) {
1350 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1351 			    mregs_4etx_reg_fail_msg);
1352 			goto error_unmap;
1353 		}
1354 		if (ddi_regs_map_setup(dip, 2,
1355 		    (caddr_t *)&(hmep->hme_erxregp), 0, 0,
1356 		    &hmep->hme_dev_attr, &hmep->hme_erxregh)) {
1357 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1358 			    mregs_4erx_reg_fail_msg);
1359 			goto error_unmap;
1360 		}
1361 		if (ddi_regs_map_setup(dip, 3,
1362 		    (caddr_t *)&(hmep->hme_bmacregp), 0, 0,
1363 		    &hmep->hme_dev_attr, &hmep->hme_bmacregh)) {
1364 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1365 			    mregs_4bmac_reg_fail_msg);
1366 			goto error_unmap;
1367 		}
1368 
1369 		if (ddi_regs_map_setup(dip, 4,
1370 		    (caddr_t *)&(hmep->hme_mifregp), 0, 0,
1371 		    &hmep->hme_dev_attr, &hmep->hme_mifregh)) {
1372 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1373 			    mregs_4mif_reg_fail_msg);
1374 			goto error_unmap;
1375 		}
1376 	} /* Endif cheerio_mode */
1377 
1378 	/*
1379 	 * Based on the hm-rev, set some capabilities
1380 	 * Set up default capabilities for HM 2.0
1381 	 */
1382 	hmep->hme_frame_enable = 0;
1383 	hmep->hme_lance_mode_enable = 0;
1384 	hmep->hme_rxcv_enable = 0;
1385 
1386 	/* NEW routine to get the properties */
1387 
1388 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, hmep->dip, 0, "hm-rev",
1389 	    (caddr_t)&hm_rev, &prop_len) == DDI_PROP_SUCCESS) {
1390 
1391 		hmep->asic_rev = hm_rev;
1392 		hmeget_hm_rev_property(hmep);
1393 	} else {
1394 		/*
1395 		 * hm_rev property not found so, this is
1396 		 * case of hot insertion of card without interpreting fcode.
1397 		 * Get it from revid in config space after mapping it.
1398 		 */
1399 		if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
1400 		    0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
1401 			return (DDI_FAILURE);
1402 		}
1403 		/*
1404 		 * Since this is cheerio-based PCI card, we write 0xC in the
1405 		 * top 4 bits(4-7) of hm-rev and retain the bottom(0-3) bits
1406 		 * for Cheerio version(1.0 or 2.0 = 0xC0 or 0xC1)
1407 		 */
1408 		hm_rev = ddi_get8(cfg_handle, &cfg_ptr->revid);
1409 		hm_rev = HME_1C0_REVID | (hm_rev & HME_REV_VERS_MASK);
1410 		hmep->asic_rev = hm_rev;
1411 		if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1412 		    "hm-rev", (caddr_t)&hm_rev, sizeof (hm_rev)) !=
1413 		    DDI_SUCCESS) {
1414 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
1415 			    "ddi_prop_create error for hm_rev");
1416 		}
1417 		ddi_regs_map_free(&cfg_handle);
1418 
1419 		hmeget_hm_rev_property(hmep);
1420 
1421 		/* get info via VPD */
1422 		if (hmeget_promprops(dip) != DDI_SUCCESS) {
1423 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
1424 			    "no promprops");
1425 		}
1426 	}
1427 
1428 	if (ddi_intr_hilevel(dip, 0)) {
1429 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, NFATAL_ERR_MSG,
1430 		    " high-level interrupts are not supported");
1431 		goto error_unmap;
1432 	}
1433 
1434 	/*
1435 	 * Get intr. block cookie so that mutex locks can be initialized.
1436 	 */
1437 	if (ddi_get_iblock_cookie(dip, 0, &hmep->hme_cookie) != DDI_SUCCESS)
1438 		goto error_unmap;
1439 
1440 	/*
1441 	 * Initialize mutex's for this device.
1442 	 */
1443 	mutex_init(&hmep->hme_xmitlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
1444 	mutex_init(&hmep->hme_intrlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
1445 
1446 	/*
1447 	 * Quiesce the hardware.
1448 	 */
1449 	(void) hmestop(hmep);
1450 
1451 	/*
1452 	 * Add interrupt to system
1453 	 */
1454 	if (ddi_add_intr(dip, 0, (ddi_iblock_cookie_t *)NULL,
1455 	    (ddi_idevice_cookie_t *)NULL, hmeintr, (caddr_t)hmep)) {
1456 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1457 		    add_intr_fail_msg);
1458 		goto error_mutex;
1459 	}
1460 
1461 	/*
1462 	 * Set up the ethernet mac address.
1463 	 */
1464 	hme_setup_mac_address(hmep, dip);
1465 
1466 	if (!hmeinit_xfer_params(hmep))
1467 		goto error_intr;
1468 
1469 	if (hmeburstsizes(hmep) == DDI_FAILURE) {
1470 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, burst_size_msg);
1471 		goto error_intr;
1472 	}
1473 
1474 	if (hmeallocthings(hmep) != DDI_SUCCESS) {
1475 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1476 		    "resource allocation failed");
1477 		goto error_intr;
1478 	}
1479 
1480 	if (hmeallocbufs(hmep) != DDI_SUCCESS) {
1481 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1482 		    "buffer allocation failed");
1483 		goto error_intr;
1484 	}
1485 
1486 	hmestatinit(hmep);
1487 
1488 	/* our external (preferred) PHY is at address 0 */
1489 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, "first-phy", 0);
1490 
1491 	hmep->hme_mii = mii_alloc(hmep, dip, &hme_mii_ops);
1492 	if (hmep->hme_mii == NULL) {
1493 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1494 		    "mii_alloc failed");
1495 		goto error_intr;
1496 	}
1497 	/* force a probe for the PHY */
1498 	mii_probe(hmep->hme_mii);
1499 
1500 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
1501 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1502 		    "mac_alloc failed");
1503 		goto error_intr;
1504 	}
1505 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1506 	macp->m_driver = hmep;
1507 	macp->m_dip = dip;
1508 	macp->m_src_addr = hmep->hme_ouraddr.ether_addr_octet;
1509 	macp->m_callbacks = &hme_m_callbacks;
1510 	macp->m_min_sdu = 0;
1511 	macp->m_max_sdu = ETHERMTU;
1512 	macp->m_margin = VLAN_TAGSZ;
1513 	macp->m_priv_props = hme_priv_prop;
1514 	if (mac_register(macp, &hmep->hme_mh) != 0) {
1515 		mac_free(macp);
1516 		goto error_intr;
1517 	}
1518 
1519 	mac_free(macp);
1520 
1521 	ddi_report_dev(dip);
1522 	return (DDI_SUCCESS);
1523 
1524 	/*
1525 	 * Failure Exit
1526 	 */
1527 
1528 error_intr:
1529 	if (hmep->hme_cookie)
1530 		ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
1531 
1532 	if (hmep->hme_mii)
1533 		mii_free(hmep->hme_mii);
1534 
1535 error_mutex:
1536 	mutex_destroy(&hmep->hme_xmitlock);
1537 	mutex_destroy(&hmep->hme_intrlock);
1538 
1539 error_unmap:
1540 	if (hmep->hme_globregh)
1541 		ddi_regs_map_free(&hmep->hme_globregh);
1542 	if (hmep->hme_cheerio_mode == 0) {
1543 		if (hmep->hme_etxregh)
1544 			ddi_regs_map_free(&hmep->hme_etxregh);
1545 		if (hmep->hme_erxregh)
1546 			ddi_regs_map_free(&hmep->hme_erxregh);
1547 		if (hmep->hme_bmacregh)
1548 			ddi_regs_map_free(&hmep->hme_bmacregh);
1549 		if (hmep->hme_mifregh)
1550 			ddi_regs_map_free(&hmep->hme_mifregh);
1551 	} else {
1552 		if (hmep->pci_config_handle)
1553 			(void) pci_config_teardown(&hmep->pci_config_handle);
1554 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1555 		    hmep->hme_mifregh = hmep->hme_globregh = NULL;
1556 	}
1557 
1558 error_state:
1559 	hmefreethings(hmep);
1560 	hmefreebufs(hmep);
1561 
1562 	if (hmep) {
1563 		kmem_free((caddr_t)hmep, sizeof (*hmep));
1564 		ddi_set_driver_private(dip, NULL);
1565 	}
1566 
1567 	return (DDI_FAILURE);
1568 }
1569 
1570 int
1571 hmedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1572 {
1573 	struct hme *hmep;
1574 
1575 	if ((hmep = ddi_get_driver_private(dip)) == NULL)
1576 		return (DDI_FAILURE);
1577 
1578 	switch (cmd) {
1579 	case DDI_DETACH:
1580 		break;
1581 
1582 	case DDI_SUSPEND:
1583 		mii_suspend(hmep->hme_mii);
1584 		hmep->hme_flags |= HMESUSPENDED;
1585 		hmeuninit(hmep);
1586 		return (DDI_SUCCESS);
1587 
1588 	default:
1589 		return (DDI_FAILURE);
1590 	}
1591 
1592 
1593 	if (mac_unregister(hmep->hme_mh) != 0) {
1594 		return (DDI_FAILURE);
1595 	}
1596 
1597 	/*
1598 	 * Make driver quiescent, we don't want to prevent the
1599 	 * detach on failure.  Note that this should be redundant,
1600 	 * since mac_stop should already have called hmeuninit().
1601 	 */
1602 	if (!(hmep->hme_flags & HMESUSPENDED)) {
1603 		(void) hmestop(hmep);
1604 	}
1605 
1606 	if (hmep->hme_mii)
1607 		mii_free(hmep->hme_mii);
1608 
1609 	/*
1610 	 * Remove instance of the intr
1611 	 */
1612 	ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
1613 
1614 	/*
1615 	 * Unregister kstats.
1616 	 */
1617 	if (hmep->hme_ksp != NULL)
1618 		kstat_delete(hmep->hme_ksp);
1619 	if (hmep->hme_intrstats != NULL)
1620 		kstat_delete(hmep->hme_intrstats);
1621 
1622 	hmep->hme_ksp = NULL;
1623 	hmep->hme_intrstats = NULL;
1624 
1625 	/*
1626 	 * Destroy all mutexes and data structures allocated during
1627 	 * attach time.
1628 	 *
1629 	 * Note: at this time we should be the only thread accessing
1630 	 * the structures for this instance.
1631 	 */
1632 
1633 	if (hmep->hme_globregh)
1634 		ddi_regs_map_free(&hmep->hme_globregh);
1635 	if (hmep->hme_cheerio_mode == 0) {
1636 		if (hmep->hme_etxregh)
1637 			ddi_regs_map_free(&hmep->hme_etxregh);
1638 		if (hmep->hme_erxregh)
1639 			ddi_regs_map_free(&hmep->hme_erxregh);
1640 		if (hmep->hme_bmacregh)
1641 			ddi_regs_map_free(&hmep->hme_bmacregh);
1642 		if (hmep->hme_mifregh)
1643 			ddi_regs_map_free(&hmep->hme_mifregh);
1644 	} else {
1645 		if (hmep->pci_config_handle)
1646 			(void) pci_config_teardown(&hmep->pci_config_handle);
1647 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1648 		    hmep->hme_mifregh = hmep->hme_globregh = NULL;
1649 	}
1650 
1651 	mutex_destroy(&hmep->hme_xmitlock);
1652 	mutex_destroy(&hmep->hme_intrlock);
1653 
1654 	hmefreethings(hmep);
1655 	hmefreebufs(hmep);
1656 
1657 	ddi_set_driver_private(dip, NULL);
1658 	kmem_free(hmep, sizeof (struct hme));
1659 
1660 	return (DDI_SUCCESS);
1661 }
1662 
1663 int
1664 hmequiesce(dev_info_t *dip)
1665 {
1666 	struct hme *hmep;
1667 
1668 	if ((hmep = ddi_get_driver_private(dip)) == NULL)
1669 		return (DDI_FAILURE);
1670 
1671 	(void) hmestop(hmep);
1672 	return (DDI_SUCCESS);
1673 }
1674 
1675 static boolean_t
1676 hmeinit_xfer_params(struct hme *hmep)
1677 {
1678 	int hme_ipg1_conf, hme_ipg2_conf;
1679 	int hme_ipg0_conf, hme_lance_mode_conf;
1680 	int prop_len = sizeof (int);
1681 	dev_info_t *dip;
1682 
1683 	dip = hmep->dip;
1684 
1685 	/*
1686 	 * Set up the start-up values for user-configurable parameters
1687 	 * Get the values from the global variables first.
1688 	 * Use the MASK to limit the value to allowed maximum.
1689 	 */
1690 	hmep->hme_ipg1 = hme_ipg1 & HME_MASK_8BIT;
1691 	hmep->hme_ipg2 = hme_ipg2 & HME_MASK_8BIT;
1692 	hmep->hme_ipg0 = hme_ipg0 & HME_MASK_5BIT;
1693 
1694 	/*
1695 	 * Get the parameter values configured in .conf file.
1696 	 */
1697 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg1",
1698 	    (caddr_t)&hme_ipg1_conf, &prop_len) == DDI_PROP_SUCCESS) {
1699 		hmep->hme_ipg1 = hme_ipg1_conf & HME_MASK_8BIT;
1700 	}
1701 
1702 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg2",
1703 	    (caddr_t)&hme_ipg2_conf, &prop_len) == DDI_PROP_SUCCESS) {
1704 		hmep->hme_ipg2 = hme_ipg2_conf & HME_MASK_8BIT;
1705 	}
1706 
1707 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg0",
1708 	    (caddr_t)&hme_ipg0_conf, &prop_len) == DDI_PROP_SUCCESS) {
1709 		hmep->hme_ipg0 = hme_ipg0_conf & HME_MASK_5BIT;
1710 	}
1711 
1712 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "lance_mode",
1713 	    (caddr_t)&hme_lance_mode_conf, &prop_len) == DDI_PROP_SUCCESS) {
1714 		hmep->hme_lance_mode = hme_lance_mode_conf & HME_MASK_1BIT;
1715 	}
1716 
1717 	return (B_TRUE);
1718 }
1719 
1720 /*
1721  * Return 0 upon success, 1 on failure.
1722  */
1723 static uint_t
1724 hmestop(struct hme *hmep)
1725 {
1726 	/*
1727 	 * Disable the Tx dma engine.
1728 	 */
1729 	PUT_ETXREG(config, (GET_ETXREG(config) & ~HMET_CONFIG_TXDMA_EN));
1730 	HMEDELAY(((GET_ETXREG(state_mach) & 0x1f) == 0x1), HMEMAXRSTDELAY);
1731 
1732 	/*
1733 	 * Disable the Rx dma engine.
1734 	 */
1735 	PUT_ERXREG(config, (GET_ERXREG(config) & ~HMER_CONFIG_RXDMA_EN));
1736 	HMEDELAY(((GET_ERXREG(state_mach) & 0x3f) == 0), HMEMAXRSTDELAY);
1737 
1738 	/*
1739 	 * By this time all things should be quiet, so hit the
1740 	 * chip with a reset.
1741 	 */
1742 	PUT_GLOBREG(reset, HMEG_RESET_GLOBAL);
1743 
1744 	HMEDELAY((GET_GLOBREG(reset) == 0), HMEMAXRSTDELAY);
1745 	if (GET_GLOBREG(reset)) {
1746 		return (1);
1747 	}
1748 
1749 	CHECK_GLOBREG();
1750 	return (0);
1751 }
1752 
1753 static int
1754 hmestat_kstat_update(kstat_t *ksp, int rw)
1755 {
1756 	struct hme *hmep;
1757 	struct hmekstat *hkp;
1758 
1759 	hmep = (struct hme *)ksp->ks_private;
1760 	hkp = (struct hmekstat *)ksp->ks_data;
1761 
1762 	if (rw != KSTAT_READ)
1763 		return (EACCES);
1764 
1765 	/*
1766 	 * Update all the stats by reading all the counter registers.
1767 	 * Counter register stats are not updated till they overflow
1768 	 * and interrupt.
1769 	 */
1770 
1771 	mutex_enter(&hmep->hme_xmitlock);
1772 	if (hmep->hme_flags & HMERUNNING) {
1773 		hmereclaim(hmep);
1774 		hmesavecntrs(hmep);
1775 	}
1776 	mutex_exit(&hmep->hme_xmitlock);
1777 
1778 	hkp->hk_cvc.value.ul		= hmep->hme_cvc;
1779 	hkp->hk_lenerr.value.ul		= hmep->hme_lenerr;
1780 	hkp->hk_buff.value.ul		= hmep->hme_buff;
1781 	hkp->hk_missed.value.ul		= hmep->hme_missed;
1782 	hkp->hk_allocbfail.value.ul	= hmep->hme_allocbfail;
1783 	hkp->hk_babl.value.ul		= hmep->hme_babl;
1784 	hkp->hk_tmder.value.ul		= hmep->hme_tmder;
1785 	hkp->hk_txlaterr.value.ul	= hmep->hme_txlaterr;
1786 	hkp->hk_rxlaterr.value.ul	= hmep->hme_rxlaterr;
1787 	hkp->hk_slvparerr.value.ul	= hmep->hme_slvparerr;
1788 	hkp->hk_txparerr.value.ul	= hmep->hme_txparerr;
1789 	hkp->hk_rxparerr.value.ul	= hmep->hme_rxparerr;
1790 	hkp->hk_slverrack.value.ul	= hmep->hme_slverrack;
1791 	hkp->hk_txerrack.value.ul	= hmep->hme_txerrack;
1792 	hkp->hk_rxerrack.value.ul	= hmep->hme_rxerrack;
1793 	hkp->hk_txtagerr.value.ul	= hmep->hme_txtagerr;
1794 	hkp->hk_rxtagerr.value.ul	= hmep->hme_rxtagerr;
1795 	hkp->hk_eoperr.value.ul		= hmep->hme_eoperr;
1796 	hkp->hk_notmds.value.ul		= hmep->hme_notmds;
1797 	hkp->hk_notbufs.value.ul	= hmep->hme_notbufs;
1798 	hkp->hk_norbufs.value.ul	= hmep->hme_norbufs;
1799 
1800 	/*
1801 	 * Debug kstats
1802 	 */
1803 	hkp->hk_inits.value.ul		= hmep->inits;
1804 	hkp->hk_phyfail.value.ul	= hmep->phyfail;
1805 
1806 	/*
1807 	 * xcvr kstats
1808 	 */
1809 	hkp->hk_asic_rev.value.ul	= hmep->asic_rev;
1810 
1811 	return (0);
1812 }
1813 
1814 static void
1815 hmestatinit(struct hme *hmep)
1816 {
1817 	struct	kstat	*ksp;
1818 	struct	hmekstat	*hkp;
1819 	const char *driver;
1820 	int	instance;
1821 	char	buf[16];
1822 
1823 	instance = hmep->instance;
1824 	driver = ddi_driver_name(hmep->dip);
1825 
1826 	if ((ksp = kstat_create(driver, instance,
1827 	    "driver_info", "net", KSTAT_TYPE_NAMED,
1828 	    sizeof (struct hmekstat) / sizeof (kstat_named_t), 0)) == NULL) {
1829 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, INIT_MSG,
1830 		    "kstat_create failed");
1831 		return;
1832 	}
1833 
1834 	(void) snprintf(buf, sizeof (buf), "%sc%d", driver, instance);
1835 	hmep->hme_intrstats = kstat_create(driver, instance, buf, "controller",
1836 	    KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
1837 	if (hmep->hme_intrstats)
1838 		kstat_install(hmep->hme_intrstats);
1839 
1840 	hmep->hme_ksp = ksp;
1841 	hkp = (struct hmekstat *)ksp->ks_data;
1842 	kstat_named_init(&hkp->hk_cvc,			"code_violations",
1843 	    KSTAT_DATA_ULONG);
1844 	kstat_named_init(&hkp->hk_lenerr,		"len_errors",
1845 	    KSTAT_DATA_ULONG);
1846 	kstat_named_init(&hkp->hk_buff,			"buff",
1847 	    KSTAT_DATA_ULONG);
1848 	kstat_named_init(&hkp->hk_missed,		"missed",
1849 	    KSTAT_DATA_ULONG);
1850 	kstat_named_init(&hkp->hk_nocanput,		"nocanput",
1851 	    KSTAT_DATA_ULONG);
1852 	kstat_named_init(&hkp->hk_allocbfail,		"allocbfail",
1853 	    KSTAT_DATA_ULONG);
1854 	kstat_named_init(&hkp->hk_babl,			"babble",
1855 	    KSTAT_DATA_ULONG);
1856 	kstat_named_init(&hkp->hk_tmder,		"tmd_error",
1857 	    KSTAT_DATA_ULONG);
1858 	kstat_named_init(&hkp->hk_txlaterr,		"tx_late_error",
1859 	    KSTAT_DATA_ULONG);
1860 	kstat_named_init(&hkp->hk_rxlaterr,		"rx_late_error",
1861 	    KSTAT_DATA_ULONG);
1862 	kstat_named_init(&hkp->hk_slvparerr,		"slv_parity_error",
1863 	    KSTAT_DATA_ULONG);
1864 	kstat_named_init(&hkp->hk_txparerr,		"tx_parity_error",
1865 	    KSTAT_DATA_ULONG);
1866 	kstat_named_init(&hkp->hk_rxparerr,		"rx_parity_error",
1867 	    KSTAT_DATA_ULONG);
1868 	kstat_named_init(&hkp->hk_slverrack,		"slv_error_ack",
1869 	    KSTAT_DATA_ULONG);
1870 	kstat_named_init(&hkp->hk_txerrack,		"tx_error_ack",
1871 	    KSTAT_DATA_ULONG);
1872 	kstat_named_init(&hkp->hk_rxerrack,		"rx_error_ack",
1873 	    KSTAT_DATA_ULONG);
1874 	kstat_named_init(&hkp->hk_txtagerr,		"tx_tag_error",
1875 	    KSTAT_DATA_ULONG);
1876 	kstat_named_init(&hkp->hk_rxtagerr,		"rx_tag_error",
1877 	    KSTAT_DATA_ULONG);
1878 	kstat_named_init(&hkp->hk_eoperr,		"eop_error",
1879 	    KSTAT_DATA_ULONG);
1880 	kstat_named_init(&hkp->hk_notmds,		"no_tmds",
1881 	    KSTAT_DATA_ULONG);
1882 	kstat_named_init(&hkp->hk_notbufs,		"no_tbufs",
1883 	    KSTAT_DATA_ULONG);
1884 	kstat_named_init(&hkp->hk_norbufs,		"no_rbufs",
1885 	    KSTAT_DATA_ULONG);
1886 
1887 	/*
1888 	 * Debugging kstats
1889 	 */
1890 	kstat_named_init(&hkp->hk_inits,		"inits",
1891 	    KSTAT_DATA_ULONG);
1892 	kstat_named_init(&hkp->hk_phyfail,		"phy_failures",
1893 	    KSTAT_DATA_ULONG);
1894 
1895 	/*
1896 	 * xcvr kstats
1897 	 */
1898 	kstat_named_init(&hkp->hk_asic_rev,		"asic_rev",
1899 	    KSTAT_DATA_ULONG);
1900 
1901 	ksp->ks_update = hmestat_kstat_update;
1902 	ksp->ks_private = (void *) hmep;
1903 	kstat_install(ksp);
1904 }
1905 
1906 int
1907 hme_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1908     void *val)
1909 {
1910 	struct hme *hmep = arg;
1911 	int value;
1912 	int rv;
1913 
1914 	rv = mii_m_getprop(hmep->hme_mii, name, num, sz, val);
1915 	if (rv != ENOTSUP)
1916 		return (rv);
1917 
1918 	switch (num) {
1919 	case MAC_PROP_PRIVATE:
1920 		break;
1921 	default:
1922 		return (ENOTSUP);
1923 	}
1924 
1925 	if (strcmp(name, "_ipg0") == 0) {
1926 		value = hmep->hme_ipg0;
1927 	} else if (strcmp(name, "_ipg1") == 0) {
1928 		value = hmep->hme_ipg1;
1929 	} else if (strcmp(name, "_ipg2") == 0) {
1930 		value = hmep->hme_ipg2;
1931 	} else if (strcmp(name, "_lance_mode") == 0) {
1932 		value = hmep->hme_lance_mode;
1933 	} else {
1934 		return (ENOTSUP);
1935 	}
1936 	(void) snprintf(val, sz, "%d", value);
1937 	return (0);
1938 }
1939 
1940 static void
1941 hme_m_propinfo(void *arg, const char *name, mac_prop_id_t num,
1942     mac_prop_info_handle_t mph)
1943 {
1944 	struct hme *hmep = arg;
1945 
1946 	mii_m_propinfo(hmep->hme_mii, name, num, mph);
1947 
1948 	switch (num) {
1949 	case MAC_PROP_PRIVATE: {
1950 		char valstr[64];
1951 		int default_val;
1952 
1953 		if (strcmp(name, "_ipg0") == 0) {
1954 			default_val = hme_ipg0;
1955 		} else if (strcmp(name, "_ipg1") == 0) {
1956 			default_val = hme_ipg1;
1957 		} else if (strcmp(name, "_ipg2") == 0) {
1958 			default_val = hme_ipg2;
1959 		} if (strcmp(name, "_lance_mode") == 0) {
1960 			default_val = hme_lance_mode;
1961 		} else {
1962 			return;
1963 		}
1964 
1965 		(void) snprintf(valstr, sizeof (valstr), "%d", default_val);
1966 		mac_prop_info_set_default_str(mph, valstr);
1967 		break;
1968 	}
1969 	}
1970 }
1971 
1972 int
1973 hme_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1974     const void *val)
1975 {
1976 	struct hme *hmep = arg;
1977 	int rv;
1978 	long lval;
1979 	boolean_t init = B_FALSE;
1980 
1981 	rv = mii_m_setprop(hmep->hme_mii, name, num, sz, val);
1982 	if (rv != ENOTSUP)
1983 		return (rv);
1984 	rv = 0;
1985 
1986 	switch (num) {
1987 	case MAC_PROP_PRIVATE:
1988 		break;
1989 	default:
1990 		return (ENOTSUP);
1991 	}
1992 
1993 	(void) ddi_strtol(val, NULL, 0, &lval);
1994 
1995 	if (strcmp(name, "_ipg1") == 0) {
1996 		if ((lval >= 0) && (lval <= 255)) {
1997 			hmep->hme_ipg1 = lval & 0xff;
1998 			init = B_TRUE;
1999 		} else {
2000 			return (EINVAL);
2001 		}
2002 
2003 	} else if (strcmp(name, "_ipg2") == 0) {
2004 		if ((lval >= 0) && (lval <= 255)) {
2005 			hmep->hme_ipg2 = lval & 0xff;
2006 			init = B_TRUE;
2007 		} else {
2008 			return (EINVAL);
2009 		}
2010 
2011 	} else if (strcmp(name, "_ipg0") == 0) {
2012 		if ((lval >= 0) && (lval <= 31)) {
2013 			hmep->hme_ipg0 = lval & 0xff;
2014 			init = B_TRUE;
2015 		} else {
2016 			return (EINVAL);
2017 		}
2018 	} else if (strcmp(name, "_lance_mode") == 0) {
2019 		if ((lval >= 0) && (lval <= 1)) {
2020 			hmep->hme_lance_mode = lval & 0xff;
2021 			init = B_TRUE;
2022 		} else {
2023 			return (EINVAL);
2024 		}
2025 
2026 	} else {
2027 		rv = ENOTSUP;
2028 	}
2029 
2030 	if (init) {
2031 		(void) hmeinit(hmep);
2032 	}
2033 	return (rv);
2034 }
2035 
2036 
2037 /*ARGSUSED*/
2038 static boolean_t
2039 hme_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2040 {
2041 	switch (cap) {
2042 	case MAC_CAPAB_HCKSUM:
2043 		*(uint32_t *)cap_data = HCKSUM_INET_PARTIAL;
2044 		return (B_TRUE);
2045 	default:
2046 		return (B_FALSE);
2047 	}
2048 }
2049 
2050 static int
2051 hme_m_promisc(void *arg, boolean_t on)
2052 {
2053 	struct hme *hmep = arg;
2054 
2055 	hmep->hme_promisc = on;
2056 	(void) hmeinit(hmep);
2057 	return (0);
2058 }
2059 
2060 static int
2061 hme_m_unicst(void *arg, const uint8_t *macaddr)
2062 {
2063 	struct hme *hmep = arg;
2064 
2065 	/*
2066 	 * Set new interface local address and re-init device.
2067 	 * This is destructive to any other streams attached
2068 	 * to this device.
2069 	 */
2070 	mutex_enter(&hmep->hme_intrlock);
2071 	bcopy(macaddr, &hmep->hme_ouraddr, ETHERADDRL);
2072 	mutex_exit(&hmep->hme_intrlock);
2073 	(void) hmeinit(hmep);
2074 	return (0);
2075 }
2076 
2077 static int
2078 hme_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
2079 {
2080 	struct hme	*hmep = arg;
2081 	uint32_t	ladrf_bit;
2082 	boolean_t	doinit = B_FALSE;
2083 
2084 	/*
2085 	 * If this address's bit was not already set in the local address
2086 	 * filter, add it and re-initialize the Hardware.
2087 	 */
2088 	ladrf_bit = hmeladrf_bit(macaddr);
2089 
2090 	mutex_enter(&hmep->hme_intrlock);
2091 	if (add) {
2092 		hmep->hme_ladrf_refcnt[ladrf_bit]++;
2093 		if (hmep->hme_ladrf_refcnt[ladrf_bit] == 1) {
2094 			hmep->hme_ladrf[ladrf_bit >> 4] |=
2095 			    1 << (ladrf_bit & 0xf);
2096 			hmep->hme_multi++;
2097 			doinit = B_TRUE;
2098 		}
2099 	} else {
2100 		hmep->hme_ladrf_refcnt[ladrf_bit]--;
2101 		if (hmep->hme_ladrf_refcnt[ladrf_bit] == 0) {
2102 			hmep->hme_ladrf[ladrf_bit >> 4] &=
2103 			    ~(1 << (ladrf_bit & 0xf));
2104 			doinit = B_TRUE;
2105 		}
2106 	}
2107 	mutex_exit(&hmep->hme_intrlock);
2108 
2109 	if (doinit) {
2110 		(void) hmeinit(hmep);
2111 	}
2112 
2113 	return (0);
2114 }
2115 
2116 static int
2117 hme_m_start(void *arg)
2118 {
2119 	struct hme *hmep = arg;
2120 
2121 	if (hmeinit(hmep) != 0) {
2122 		/* initialization failed -- really want DL_INITFAILED */
2123 		return (EIO);
2124 	} else {
2125 		hmep->hme_started = B_TRUE;
2126 		mii_start(hmep->hme_mii);
2127 		return (0);
2128 	}
2129 }
2130 
2131 static void
2132 hme_m_stop(void *arg)
2133 {
2134 	struct hme *hmep = arg;
2135 
2136 	mii_stop(hmep->hme_mii);
2137 	hmep->hme_started = B_FALSE;
2138 	hmeuninit(hmep);
2139 }
2140 
2141 static int
2142 hme_m_stat(void *arg, uint_t stat, uint64_t *val)
2143 {
2144 	struct hme	*hmep = arg;
2145 
2146 	mutex_enter(&hmep->hme_xmitlock);
2147 	if (hmep->hme_flags & HMERUNNING) {
2148 		hmereclaim(hmep);
2149 		hmesavecntrs(hmep);
2150 	}
2151 	mutex_exit(&hmep->hme_xmitlock);
2152 
2153 
2154 	if (mii_m_getstat(hmep->hme_mii, stat, val) == 0) {
2155 		return (0);
2156 	}
2157 	switch (stat) {
2158 	case MAC_STAT_IPACKETS:
2159 		*val = hmep->hme_ipackets;
2160 		break;
2161 	case MAC_STAT_RBYTES:
2162 		*val = hmep->hme_rbytes;
2163 		break;
2164 	case MAC_STAT_IERRORS:
2165 		*val = hmep->hme_ierrors;
2166 		break;
2167 	case MAC_STAT_OPACKETS:
2168 		*val = hmep->hme_opackets;
2169 		break;
2170 	case MAC_STAT_OBYTES:
2171 		*val = hmep->hme_obytes;
2172 		break;
2173 	case MAC_STAT_OERRORS:
2174 		*val = hmep->hme_oerrors;
2175 		break;
2176 	case MAC_STAT_MULTIRCV:
2177 		*val = hmep->hme_multircv;
2178 		break;
2179 	case MAC_STAT_MULTIXMT:
2180 		*val = hmep->hme_multixmt;
2181 		break;
2182 	case MAC_STAT_BRDCSTRCV:
2183 		*val = hmep->hme_brdcstrcv;
2184 		break;
2185 	case MAC_STAT_BRDCSTXMT:
2186 		*val = hmep->hme_brdcstxmt;
2187 		break;
2188 	case MAC_STAT_UNDERFLOWS:
2189 		*val = hmep->hme_uflo;
2190 		break;
2191 	case MAC_STAT_OVERFLOWS:
2192 		*val = hmep->hme_oflo;
2193 		break;
2194 	case MAC_STAT_COLLISIONS:
2195 		*val = hmep->hme_coll;
2196 		break;
2197 	case MAC_STAT_NORCVBUF:
2198 		*val = hmep->hme_norcvbuf;
2199 		break;
2200 	case MAC_STAT_NOXMTBUF:
2201 		*val = hmep->hme_noxmtbuf;
2202 		break;
2203 	case ETHER_STAT_LINK_DUPLEX:
2204 		*val = hmep->hme_duplex;
2205 		break;
2206 	case ETHER_STAT_ALIGN_ERRORS:
2207 		*val = hmep->hme_align_errors;
2208 		break;
2209 	case ETHER_STAT_FCS_ERRORS:
2210 		*val = hmep->hme_fcs_errors;
2211 		break;
2212 	case ETHER_STAT_EX_COLLISIONS:
2213 		*val = hmep->hme_excol;
2214 		break;
2215 	case ETHER_STAT_DEFER_XMTS:
2216 		*val = hmep->hme_defer_xmts;
2217 		break;
2218 	case ETHER_STAT_SQE_ERRORS:
2219 		*val = hmep->hme_sqe_errors;
2220 		break;
2221 	case ETHER_STAT_FIRST_COLLISIONS:
2222 		*val = hmep->hme_fstcol;
2223 		break;
2224 	case ETHER_STAT_TX_LATE_COLLISIONS:
2225 		*val = hmep->hme_tlcol;
2226 		break;
2227 	case ETHER_STAT_TOOLONG_ERRORS:
2228 		*val = hmep->hme_toolong_errors;
2229 		break;
2230 	case ETHER_STAT_TOOSHORT_ERRORS:
2231 		*val = hmep->hme_runt;
2232 		break;
2233 	case ETHER_STAT_CARRIER_ERRORS:
2234 		*val = hmep->hme_carrier_errors;
2235 		break;
2236 	default:
2237 		return (EINVAL);
2238 	}
2239 	return (0);
2240 }
2241 
2242 static mblk_t *
2243 hme_m_tx(void *arg, mblk_t *mp)
2244 {
2245 	struct hme *hmep = arg;
2246 	mblk_t *next;
2247 
2248 	while (mp != NULL) {
2249 		next = mp->b_next;
2250 		mp->b_next = NULL;
2251 		if (!hmestart(hmep, mp)) {
2252 			mp->b_next = next;
2253 			break;
2254 		}
2255 		mp = next;
2256 	}
2257 	return (mp);
2258 }
2259 
2260 /*
2261  * Software IP checksum, for the edge cases that the
2262  * hardware can't handle.  See hmestart for more info.
2263  */
2264 static uint16_t
2265 hme_cksum(void *data, int len)
2266 {
2267 	uint16_t	*words = data;
2268 	int		i, nwords = len / 2;
2269 	uint32_t	sum = 0;
2270 
2271 	/* just add up the words */
2272 	for (i = 0; i < nwords; i++) {
2273 		sum += *words++;
2274 	}
2275 
2276 	/* pick up residual byte ... assume even half-word allocations */
2277 	if (len % 2) {
2278 		sum += (*words & htons(0xff00));
2279 	}
2280 
2281 	sum = (sum >> 16) + (sum & 0xffff);
2282 	sum = (sum >> 16) + (sum & 0xffff);
2283 
2284 	return (~(sum & 0xffff));
2285 }
2286 
2287 static boolean_t
2288 hmestart(struct hme *hmep, mblk_t *mp)
2289 {
2290 	uint32_t	len;
2291 	boolean_t	retval = B_TRUE;
2292 	hmebuf_t	*tbuf;
2293 	uint32_t	txptr;
2294 
2295 	uint32_t	csflags = 0;
2296 	uint32_t	flags;
2297 	uint32_t	start_offset;
2298 	uint32_t	stuff_offset;
2299 
2300 	mac_hcksum_get(mp, &start_offset, &stuff_offset, NULL, NULL, &flags);
2301 
2302 	if (flags & HCK_PARTIALCKSUM) {
2303 		if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
2304 			start_offset += sizeof (struct ether_header) + 4;
2305 			stuff_offset += sizeof (struct ether_header) + 4;
2306 		} else {
2307 			start_offset += sizeof (struct ether_header);
2308 			stuff_offset += sizeof (struct ether_header);
2309 		}
2310 		csflags = HMETMD_CSENABL |
2311 		    (start_offset << HMETMD_CSSTART_SHIFT) |
2312 		    (stuff_offset << HMETMD_CSSTUFF_SHIFT);
2313 	}
2314 
2315 	mutex_enter(&hmep->hme_xmitlock);
2316 
2317 	if (hmep->hme_flags & HMESUSPENDED) {
2318 		hmep->hme_carrier_errors++;
2319 		hmep->hme_oerrors++;
2320 		goto bad;
2321 	}
2322 
2323 	if (hmep->hme_txindex != hmep->hme_txreclaim) {
2324 		hmereclaim(hmep);
2325 	}
2326 	if ((hmep->hme_txindex - HME_TMDMAX) == hmep->hme_txreclaim)
2327 		goto notmds;
2328 	txptr = hmep->hme_txindex % HME_TMDMAX;
2329 	tbuf = &hmep->hme_tbuf[txptr];
2330 
2331 	/*
2332 	 * Note that for checksum offload, the hardware cannot
2333 	 * generate correct checksums if the packet is smaller than
2334 	 * 64-bytes.  In such a case, we bcopy the packet and use
2335 	 * a software checksum.
2336 	 */
2337 
2338 	len = msgsize(mp);
2339 	if (len < 64) {
2340 		/* zero fill the padding */
2341 		bzero(tbuf->kaddr, 64);
2342 	}
2343 	mcopymsg(mp, tbuf->kaddr);
2344 
2345 	if ((csflags != 0) && (len < 64)) {
2346 		uint16_t sum;
2347 		sum = hme_cksum(tbuf->kaddr + start_offset,
2348 		    len - start_offset);
2349 		bcopy(&sum, tbuf->kaddr + stuff_offset, sizeof (sum));
2350 		csflags = 0;
2351 	}
2352 
2353 	if (ddi_dma_sync(tbuf->dmah, 0, len, DDI_DMA_SYNC_FORDEV) ==
2354 	    DDI_FAILURE) {
2355 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, DDI_MSG,
2356 		    "ddi_dma_sync failed");
2357 	}
2358 
2359 	/*
2360 	 * update MIB II statistics
2361 	 */
2362 	BUMP_OutNUcast(hmep, tbuf->kaddr);
2363 
2364 	PUT_TMD(txptr, tbuf->paddr, len,
2365 	    HMETMD_OWN | HMETMD_SOP | HMETMD_EOP | csflags);
2366 
2367 	HMESYNCTMD(txptr, DDI_DMA_SYNC_FORDEV);
2368 	hmep->hme_txindex++;
2369 
2370 	PUT_ETXREG(txpend, HMET_TXPEND_TDMD);
2371 	CHECK_ETXREG();
2372 
2373 	mutex_exit(&hmep->hme_xmitlock);
2374 
2375 	hmep->hme_starts++;
2376 	return (B_TRUE);
2377 
2378 bad:
2379 	mutex_exit(&hmep->hme_xmitlock);
2380 	freemsg(mp);
2381 	return (B_TRUE);
2382 
2383 notmds:
2384 	hmep->hme_notmds++;
2385 	hmep->hme_wantw = B_TRUE;
2386 	hmereclaim(hmep);
2387 	retval = B_FALSE;
2388 done:
2389 	mutex_exit(&hmep->hme_xmitlock);
2390 
2391 	return (retval);
2392 }
2393 
2394 /*
2395  * Initialize channel.
2396  * Return 0 on success, nonzero on error.
2397  *
2398  * The recommended sequence for initialization is:
2399  * 1. Issue a Global Reset command to the Ethernet Channel.
2400  * 2. Poll the Global_Reset bits until the execution of the reset has been
2401  *    completed.
2402  * 2(a). Use the MIF Frame/Output register to reset the transceiver.
2403  *	 Poll Register 0 to till the Resetbit is 0.
2404  * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
2405  *	 100Mbps and Non-Isolated mode. The main point here is to bring the
2406  *	 PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
2407  *	 to the MII interface so that the Bigmac core can correctly reset
2408  *	 upon a software reset.
2409  * 2(c).  Issue another Global Reset command to the Ethernet Channel and poll
2410  *	  the Global_Reset bits till completion.
2411  * 3. Set up all the data structures in the host memory.
2412  * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
2413  *    Register).
2414  * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
2415  *    Register).
2416  * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
2417  * 7. Program the Receive Descriptor Ring Base Address in the ERX.
2418  * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
2419  * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
2420  * 10. Program the ERX Configuration register (enable the Receive DMA channel).
2421  * 11. Program the XIF Configuration Register (enable the XIF).
2422  * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
2423  * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
2424  */
2425 
2426 
2427 #ifdef FEPS_URUN_BUG
2428 static int hme_palen = 32;
2429 #endif
2430 
2431 static int
2432 hmeinit(struct hme *hmep)
2433 {
2434 	uint32_t		i;
2435 	int			ret;
2436 	boolean_t		fdx;
2437 	int			phyad;
2438 
2439 	/*
2440 	 * Lock sequence:
2441 	 *	hme_intrlock, hme_xmitlock.
2442 	 */
2443 	mutex_enter(&hmep->hme_intrlock);
2444 
2445 	/*
2446 	 * Don't touch the hardware if we are suspended.  But don't
2447 	 * fail either.  Some time later we may be resumed, and then
2448 	 * we'll be back here to program the device using the settings
2449 	 * in the soft state.
2450 	 */
2451 	if (hmep->hme_flags & HMESUSPENDED) {
2452 		mutex_exit(&hmep->hme_intrlock);
2453 		return (0);
2454 	}
2455 
2456 	/*
2457 	 * This should prevent us from clearing any interrupts that
2458 	 * may occur by temporarily stopping interrupts from occurring
2459 	 * for a short time.  We need to update the interrupt mask
2460 	 * later in this function.
2461 	 */
2462 	PUT_GLOBREG(intmask, ~HMEG_MASK_MIF_INTR);
2463 
2464 
2465 	/*
2466 	 * Rearranged the mutex acquisition order to solve the deadlock
2467 	 * situation as described in bug ID 4065896.
2468 	 */
2469 
2470 	mutex_enter(&hmep->hme_xmitlock);
2471 
2472 	hmep->hme_flags = 0;
2473 	hmep->hme_wantw = B_FALSE;
2474 
2475 	if (hmep->inits)
2476 		hmesavecntrs(hmep);
2477 
2478 	/*
2479 	 * Perform Global reset of the Sbus/FEPS ENET channel.
2480 	 */
2481 	(void) hmestop(hmep);
2482 
2483 	/*
2484 	 * Clear all descriptors.
2485 	 */
2486 	bzero(hmep->hme_rmdp, HME_RMDMAX * sizeof (struct hme_rmd));
2487 	bzero(hmep->hme_tmdp, HME_TMDMAX * sizeof (struct hme_tmd));
2488 
2489 	/*
2490 	 * Hang out receive buffers.
2491 	 */
2492 	for (i = 0; i < HME_RMDMAX; i++) {
2493 		PUT_RMD(i, hmep->hme_rbuf[i].paddr);
2494 	}
2495 
2496 	/*
2497 	 * DMA sync descriptors.
2498 	 */
2499 	(void) ddi_dma_sync(hmep->hme_rmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2500 	(void) ddi_dma_sync(hmep->hme_tmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2501 
2502 	/*
2503 	 * Reset RMD and TMD 'walking' pointers.
2504 	 */
2505 	hmep->hme_rxindex = 0;
2506 	hmep->hme_txindex = hmep->hme_txreclaim = 0;
2507 
2508 	/*
2509 	 * This is the right place to initialize MIF !!!
2510 	 */
2511 
2512 	PUT_MIFREG(mif_imask, HME_MIF_INTMASK);	/* mask all interrupts */
2513 
2514 	if (!hmep->hme_frame_enable)
2515 		PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) | HME_MIF_CFGBB);
2516 	else
2517 		PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) & ~HME_MIF_CFGBB);
2518 						/* enable frame mode */
2519 
2520 	/*
2521 	 * Depending on the transceiver detected, select the source
2522 	 * of the clocks for the MAC. Without the clocks, TX_MAC does
2523 	 * not reset. When the Global Reset is issued to the Sbus/FEPS
2524 	 * ASIC, it selects Internal by default.
2525 	 */
2526 
2527 	switch ((phyad = mii_get_addr(hmep->hme_mii))) {
2528 	case -1:
2529 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG, no_xcvr_msg);
2530 		goto init_fail;	/* abort initialization */
2531 
2532 	case HME_INTERNAL_PHYAD:
2533 		PUT_MACREG(xifc, 0);
2534 		break;
2535 	case HME_EXTERNAL_PHYAD:
2536 		/* Isolate the Int. xcvr */
2537 		PUT_MACREG(xifc, BMAC_XIFC_MIIBUFDIS);
2538 		break;
2539 	}
2540 
2541 	hmep->inits++;
2542 
2543 	/*
2544 	 * Initialize BigMAC registers.
2545 	 * First set the tx enable bit in tx config reg to 0 and poll on
2546 	 * it till it turns to 0. Same for rx config, hash and address
2547 	 * filter reg.
2548 	 * Here is the sequence per the spec.
2549 	 * MADD2 - MAC Address 2
2550 	 * MADD1 - MAC Address 1
2551 	 * MADD0 - MAC Address 0
2552 	 * HASH3, HASH2, HASH1, HASH0 for group address
2553 	 * AFR2, AFR1, AFR0 and AFMR for address filter mask
2554 	 * Program RXMIN and RXMAX for packet length if not 802.3
2555 	 * RXCFG - Rx config for not stripping CRC
2556 	 * XXX Anything else to hme configured in RXCFG
2557 	 * IPG1, IPG2, ALIMIT, SLOT, PALEN, PAPAT, TXSFD, JAM, TXMAX, TXMIN
2558 	 * if not 802.3 compliant
2559 	 * XIF register for speed selection
2560 	 * MASK  - Interrupt mask
2561 	 * Set bit 0 of TXCFG
2562 	 * Set bit 0 of RXCFG
2563 	 */
2564 
2565 	/*
2566 	 * Initialize the TX_MAC registers
2567 	 * Initialization of jamsize to work around rx crc bug
2568 	 */
2569 	PUT_MACREG(jam, jamsize);
2570 
2571 #ifdef	FEPS_URUN_BUG
2572 	if (hme_urun_fix)
2573 		PUT_MACREG(palen, hme_palen);
2574 #endif
2575 
2576 	PUT_MACREG(ipg1, hmep->hme_ipg1);
2577 	PUT_MACREG(ipg2, hmep->hme_ipg2);
2578 
2579 	PUT_MACREG(rseed,
2580 	    ((hmep->hme_ouraddr.ether_addr_octet[0] << 8) & 0x3) |
2581 	    hmep->hme_ouraddr.ether_addr_octet[1]);
2582 
2583 	/* Initialize the RX_MAC registers */
2584 
2585 	/*
2586 	 * Program BigMAC with local individual ethernet address.
2587 	 */
2588 	PUT_MACREG(madd2, (hmep->hme_ouraddr.ether_addr_octet[4] << 8) |
2589 	    hmep->hme_ouraddr.ether_addr_octet[5]);
2590 	PUT_MACREG(madd1, (hmep->hme_ouraddr.ether_addr_octet[2] << 8) |
2591 	    hmep->hme_ouraddr.ether_addr_octet[3]);
2592 	PUT_MACREG(madd0, (hmep->hme_ouraddr.ether_addr_octet[0] << 8) |
2593 	    hmep->hme_ouraddr.ether_addr_octet[1]);
2594 
2595 	/*
2596 	 * Set up multicast address filter by passing all multicast
2597 	 * addresses through a crc generator, and then using the
2598 	 * low order 6 bits as a index into the 64 bit logical
2599 	 * address filter. The high order three bits select the word,
2600 	 * while the rest of the bits select the bit within the word.
2601 	 */
2602 	PUT_MACREG(hash0, hmep->hme_ladrf[0]);
2603 	PUT_MACREG(hash1, hmep->hme_ladrf[1]);
2604 	PUT_MACREG(hash2, hmep->hme_ladrf[2]);
2605 	PUT_MACREG(hash3, hmep->hme_ladrf[3]);
2606 
2607 	/*
2608 	 * Configure parameters to support VLAN.  (VLAN encapsulation adds
2609 	 * four bytes.)
2610 	 */
2611 	PUT_MACREG(txmax, ETHERMAX + ETHERFCSL + 4);
2612 	PUT_MACREG(rxmax, ETHERMAX + ETHERFCSL + 4);
2613 
2614 	/*
2615 	 * Initialize HME Global registers, ETX registers and ERX registers.
2616 	 */
2617 
2618 	PUT_ETXREG(txring, hmep->hme_tmd_paddr);
2619 	PUT_ERXREG(rxring, hmep->hme_rmd_paddr);
2620 
2621 	/*
2622 	 * ERX registers can be written only if they have even no. of bits set.
2623 	 * So, if the value written is not read back, set the lsb and write
2624 	 * again.
2625 	 * static	int	hme_erx_fix = 1;   : Use the fix for erx bug
2626 	 */
2627 	{
2628 		uint32_t temp;
2629 		temp  = hmep->hme_rmd_paddr;
2630 
2631 		if (GET_ERXREG(rxring) != temp)
2632 			PUT_ERXREG(rxring, (temp | 4));
2633 	}
2634 
2635 	PUT_GLOBREG(config, (hmep->hme_config |
2636 	    (hmep->hme_64bit_xfer << HMEG_CONFIG_64BIT_SHIFT)));
2637 
2638 	/*
2639 	 * Significant performance improvements can be achieved by
2640 	 * disabling transmit interrupt. Thus TMD's are reclaimed only
2641 	 * when we run out of them in hmestart().
2642 	 */
2643 	PUT_GLOBREG(intmask,
2644 	    HMEG_MASK_INTR | HMEG_MASK_TINT | HMEG_MASK_TX_ALL);
2645 
2646 	PUT_ETXREG(txring_size, ((HME_TMDMAX -1)>> HMET_RINGSZ_SHIFT));
2647 	PUT_ETXREG(config, (GET_ETXREG(config) | HMET_CONFIG_TXDMA_EN
2648 	    | HMET_CONFIG_TXFIFOTH));
2649 	/* get the rxring size bits */
2650 	switch (HME_RMDMAX) {
2651 	case 32:
2652 		i = HMER_CONFIG_RXRINGSZ32;
2653 		break;
2654 	case 64:
2655 		i = HMER_CONFIG_RXRINGSZ64;
2656 		break;
2657 	case 128:
2658 		i = HMER_CONFIG_RXRINGSZ128;
2659 		break;
2660 	case 256:
2661 		i = HMER_CONFIG_RXRINGSZ256;
2662 		break;
2663 	default:
2664 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2665 		    unk_rx_ringsz_msg);
2666 		goto init_fail;
2667 	}
2668 	i |= (HME_FSTBYTE_OFFSET << HMER_CONFIG_FBO_SHIFT)
2669 	    | HMER_CONFIG_RXDMA_EN;
2670 
2671 	/* h/w checks start offset in half words */
2672 	i |= ((sizeof (struct ether_header) / 2) << HMER_RX_CSSTART_SHIFT);
2673 
2674 	PUT_ERXREG(config, i);
2675 
2676 	/*
2677 	 * Bug related to the parity handling in ERX. When erxp-config is
2678 	 * read back.
2679 	 * Sbus/FEPS drives the parity bit. This value is used while
2680 	 * writing again.
2681 	 * This fixes the RECV problem in SS5.
2682 	 * static	int	hme_erx_fix = 1;   : Use the fix for erx bug
2683 	 */
2684 	{
2685 		uint32_t temp;
2686 		temp = GET_ERXREG(config);
2687 		PUT_ERXREG(config, i);
2688 
2689 		if (GET_ERXREG(config) != i)
2690 			HME_FAULT_MSG4(hmep, SEVERITY_UNKNOWN, ERX_MSG,
2691 			    "error:temp = %x erxp->config = %x, should be %x",
2692 			    temp, GET_ERXREG(config), i);
2693 	}
2694 
2695 	/*
2696 	 * Set up the rxconfig, txconfig and seed register without enabling
2697 	 * them the former two at this time
2698 	 *
2699 	 * BigMAC strips the CRC bytes by default. Since this is
2700 	 * contrary to other pieces of hardware, this bit needs to
2701 	 * enabled to tell BigMAC not to strip the CRC bytes.
2702 	 * Do not filter this node's own packets.
2703 	 */
2704 
2705 	if (hme_reject_own) {
2706 		PUT_MACREG(rxcfg,
2707 		    ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
2708 		    BMAC_RXCFG_MYOWN | BMAC_RXCFG_HASH));
2709 	} else {
2710 		PUT_MACREG(rxcfg,
2711 		    ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
2712 		    BMAC_RXCFG_HASH));
2713 	}
2714 
2715 	drv_usecwait(10);	/* wait after setting Hash Enable bit */
2716 
2717 	fdx = (mii_get_duplex(hmep->hme_mii) == LINK_DUPLEX_FULL);
2718 
2719 	if (hme_ngu_enable)
2720 		PUT_MACREG(txcfg, (fdx ? BMAC_TXCFG_FDX : 0) |
2721 		    BMAC_TXCFG_NGU);
2722 	else
2723 		PUT_MACREG(txcfg, (fdx ? BMAC_TXCFG_FDX: 0));
2724 
2725 	i = 0;
2726 	if ((hmep->hme_lance_mode) && (hmep->hme_lance_mode_enable))
2727 		i = ((hmep->hme_ipg0 & HME_MASK_5BIT) << BMAC_XIFC_IPG0_SHIFT)
2728 		    | BMAC_XIFC_LANCE_ENAB;
2729 	if (phyad == HME_INTERNAL_PHYAD)
2730 		PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB));
2731 	else
2732 		PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB | BMAC_XIFC_MIIBUFDIS));
2733 
2734 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
2735 	PUT_MACREG(txcfg, GET_MACREG(txcfg) | BMAC_TXCFG_ENAB);
2736 
2737 	hmep->hme_flags |= (HMERUNNING | HMEINITIALIZED);
2738 	/*
2739 	 * Update the interrupt mask : this will re-allow interrupts to occur
2740 	 */
2741 	PUT_GLOBREG(intmask, HMEG_MASK_INTR);
2742 	mac_tx_update(hmep->hme_mh);
2743 
2744 init_fail:
2745 	/*
2746 	 * Release the locks in reverse order
2747 	 */
2748 	mutex_exit(&hmep->hme_xmitlock);
2749 	mutex_exit(&hmep->hme_intrlock);
2750 
2751 	ret = !(hmep->hme_flags & HMERUNNING);
2752 	if (ret) {
2753 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2754 		    init_fail_gen_msg);
2755 	}
2756 
2757 	/*
2758 	 * Hardware checks.
2759 	 */
2760 	CHECK_GLOBREG();
2761 	CHECK_MIFREG();
2762 	CHECK_MACREG();
2763 	CHECK_ERXREG();
2764 	CHECK_ETXREG();
2765 
2766 init_exit:
2767 	return (ret);
2768 }
2769 
2770 /*
2771  * Calculate the dvma burstsize by setting up a dvma temporarily.  Return
2772  * 0 as burstsize upon failure as it signifies no burst size.
2773  * Requests for 64-bit transfer setup, if the platform supports it.
2774  * NOTE: Do not use ddi_dma_alloc_handle(9f) then ddi_dma_burstsize(9f),
2775  * sun4u Ultra-2 incorrectly returns a 32bit transfer.
2776  */
2777 static int
2778 hmeburstsizes(struct hme *hmep)
2779 {
2780 	int burstsizes;
2781 	ddi_dma_handle_t handle;
2782 
2783 	if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
2784 	    DDI_DMA_DONTWAIT, NULL, &handle)) {
2785 		return (0);
2786 	}
2787 
2788 	hmep->hme_burstsizes = burstsizes = ddi_dma_burstsizes(handle);
2789 	ddi_dma_free_handle(&handle);
2790 
2791 	/*
2792 	 * Use user-configurable parameter for enabling 64-bit transfers
2793 	 */
2794 	burstsizes = (hmep->hme_burstsizes >> 16);
2795 	if (burstsizes)
2796 		hmep->hme_64bit_xfer = hme_64bit_enable; /* user config value */
2797 	else
2798 		burstsizes = hmep->hme_burstsizes;
2799 
2800 	if (hmep->hme_cheerio_mode)
2801 		hmep->hme_64bit_xfer = 0; /* Disable for cheerio */
2802 
2803 	if (burstsizes & 0x40)
2804 		hmep->hme_config = HMEG_CONFIG_BURST64;
2805 	else if (burstsizes & 0x20)
2806 		hmep->hme_config = HMEG_CONFIG_BURST32;
2807 	else
2808 		hmep->hme_config = HMEG_CONFIG_BURST16;
2809 
2810 	return (DDI_SUCCESS);
2811 }
2812 
2813 static int
2814 hmeallocbuf(struct hme *hmep, hmebuf_t *buf, int dir)
2815 {
2816 	ddi_dma_cookie_t	dmac;
2817 	size_t			len;
2818 	unsigned		ccnt;
2819 
2820 	if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
2821 	    DDI_DMA_DONTWAIT, NULL, &buf->dmah) != DDI_SUCCESS) {
2822 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2823 		    "cannot allocate buf dma handle - failed");
2824 		return (DDI_FAILURE);
2825 	}
2826 
2827 	if (ddi_dma_mem_alloc(buf->dmah, ROUNDUP(HMEBUFSIZE, 512),
2828 	    &hme_buf_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
2829 	    &buf->kaddr, &len, &buf->acch) != DDI_SUCCESS) {
2830 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2831 		    "cannot allocate buf memory - failed");
2832 		return (DDI_FAILURE);
2833 	}
2834 
2835 	if (ddi_dma_addr_bind_handle(buf->dmah, NULL, buf->kaddr,
2836 	    len, dir | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
2837 	    &dmac, &ccnt) != DDI_DMA_MAPPED) {
2838 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2839 		    "cannot map buf for dma - failed");
2840 		return (DDI_FAILURE);
2841 	}
2842 	buf->paddr = dmac.dmac_address;
2843 
2844 	/* apparently they don't handle multiple cookies */
2845 	if (ccnt > 1) {
2846 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2847 		    "too many buf dma cookies");
2848 		return (DDI_FAILURE);
2849 	}
2850 	return (DDI_SUCCESS);
2851 }
2852 
2853 static int
2854 hmeallocbufs(struct hme *hmep)
2855 {
2856 	hmep->hme_tbuf = kmem_zalloc(HME_TMDMAX * sizeof (hmebuf_t), KM_SLEEP);
2857 	hmep->hme_rbuf = kmem_zalloc(HME_RMDMAX * sizeof (hmebuf_t), KM_SLEEP);
2858 
2859 	/* Alloc RX buffers. */
2860 	for (int i = 0; i < HME_RMDMAX; i++) {
2861 		if (hmeallocbuf(hmep, &hmep->hme_rbuf[i], DDI_DMA_READ) !=
2862 		    DDI_SUCCESS) {
2863 			return (DDI_FAILURE);
2864 		}
2865 	}
2866 
2867 	/* Alloc TX buffers. */
2868 	for (int i = 0; i < HME_TMDMAX; i++) {
2869 		if (hmeallocbuf(hmep, &hmep->hme_tbuf[i], DDI_DMA_WRITE) !=
2870 		    DDI_SUCCESS) {
2871 			return (DDI_FAILURE);
2872 		}
2873 	}
2874 	return (DDI_SUCCESS);
2875 }
2876 
2877 static void
2878 hmefreebufs(struct hme *hmep)
2879 {
2880 	int i;
2881 
2882 	if (hmep->hme_rbuf == NULL)
2883 		return;
2884 
2885 	/*
2886 	 * Free and unload pending xmit and recv buffers.
2887 	 * Maintaining the 1-to-1 ordered sequence of
2888 	 * We have written the routine to be idempotent.
2889 	 */
2890 
2891 	for (i = 0; i < HME_TMDMAX; i++) {
2892 		hmebuf_t *tbuf = &hmep->hme_tbuf[i];
2893 		if (tbuf->paddr) {
2894 			(void) ddi_dma_unbind_handle(tbuf->dmah);
2895 		}
2896 		if (tbuf->kaddr) {
2897 			ddi_dma_mem_free(&tbuf->acch);
2898 		}
2899 		if (tbuf->dmah) {
2900 			ddi_dma_free_handle(&tbuf->dmah);
2901 		}
2902 	}
2903 	for (i = 0; i < HME_RMDMAX; i++) {
2904 		hmebuf_t *rbuf = &hmep->hme_rbuf[i];
2905 		if (rbuf->paddr) {
2906 			(void) ddi_dma_unbind_handle(rbuf->dmah);
2907 		}
2908 		if (rbuf->kaddr) {
2909 			ddi_dma_mem_free(&rbuf->acch);
2910 		}
2911 		if (rbuf->dmah) {
2912 			ddi_dma_free_handle(&rbuf->dmah);
2913 		}
2914 	}
2915 	kmem_free(hmep->hme_rbuf, HME_RMDMAX * sizeof (hmebuf_t));
2916 	kmem_free(hmep->hme_tbuf, HME_TMDMAX * sizeof (hmebuf_t));
2917 }
2918 
2919 /*
2920  * Un-initialize (STOP) HME channel.
2921  */
2922 static void
2923 hmeuninit(struct hme *hmep)
2924 {
2925 	/*
2926 	 * Allow up to 'HMEDRAINTIME' for pending xmit's to complete.
2927 	 */
2928 	HMEDELAY((hmep->hme_txindex == hmep->hme_txreclaim), HMEDRAINTIME);
2929 
2930 	mutex_enter(&hmep->hme_intrlock);
2931 	mutex_enter(&hmep->hme_xmitlock);
2932 
2933 	hmep->hme_flags &= ~HMERUNNING;
2934 
2935 	(void) hmestop(hmep);
2936 
2937 	mutex_exit(&hmep->hme_xmitlock);
2938 	mutex_exit(&hmep->hme_intrlock);
2939 }
2940 
2941 /*
2942  * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
2943  * map it in IO space. Allocate space for transmit and receive ddi_dma_handle
2944  * structures to use the DMA interface.
2945  */
2946 static int
2947 hmeallocthings(struct hme *hmep)
2948 {
2949 	int			size;
2950 	int			rval;
2951 	size_t			real_len;
2952 	uint_t			cookiec;
2953 	ddi_dma_cookie_t	dmac;
2954 	dev_info_t		*dip = hmep->dip;
2955 
2956 	/*
2957 	 * Allocate the TMD and RMD descriptors and extra for page alignment.
2958 	 */
2959 
2960 	rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL,
2961 	    &hmep->hme_rmd_dmah);
2962 	if (rval != DDI_SUCCESS) {
2963 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2964 		    "cannot allocate rmd handle - failed");
2965 		return (DDI_FAILURE);
2966 	}
2967 	size = HME_RMDMAX * sizeof (struct hme_rmd);
2968 	rval = ddi_dma_mem_alloc(hmep->hme_rmd_dmah, size,
2969 	    &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
2970 	    &hmep->hme_rmd_kaddr, &real_len, &hmep->hme_rmd_acch);
2971 	if (rval != DDI_SUCCESS) {
2972 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2973 		    "cannot allocate rmd dma mem - failed");
2974 		return (DDI_FAILURE);
2975 	}
2976 	hmep->hme_rmdp = (void *)(hmep->hme_rmd_kaddr);
2977 	rval = ddi_dma_addr_bind_handle(hmep->hme_rmd_dmah, NULL,
2978 	    hmep->hme_rmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2979 	    DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec);
2980 	if (rval != DDI_DMA_MAPPED) {
2981 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2982 		    "cannot allocate rmd dma - failed");
2983 		return (DDI_FAILURE);
2984 	}
2985 	hmep->hme_rmd_paddr = dmac.dmac_address;
2986 	if (cookiec != 1) {
2987 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2988 		    "too many rmd cookies - failed");
2989 		return (DDI_FAILURE);
2990 	}
2991 
2992 	rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL,
2993 	    &hmep->hme_tmd_dmah);
2994 	if (rval != DDI_SUCCESS) {
2995 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2996 		    "cannot allocate tmd handle - failed");
2997 		return (DDI_FAILURE);
2998 	}
2999 	size = HME_TMDMAX * sizeof (struct hme_rmd);
3000 	rval = ddi_dma_mem_alloc(hmep->hme_tmd_dmah, size,
3001 	    &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
3002 	    &hmep->hme_tmd_kaddr, &real_len, &hmep->hme_tmd_acch);
3003 	if (rval != DDI_SUCCESS) {
3004 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
3005 		    "cannot allocate tmd dma mem - failed");
3006 		return (DDI_FAILURE);
3007 	}
3008 	hmep->hme_tmdp = (void *)(hmep->hme_tmd_kaddr);
3009 	rval = ddi_dma_addr_bind_handle(hmep->hme_tmd_dmah, NULL,
3010 	    hmep->hme_tmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3011 	    DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec);
3012 	if (rval != DDI_DMA_MAPPED) {
3013 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
3014 		    "cannot allocate tmd dma - failed");
3015 		return (DDI_FAILURE);
3016 	}
3017 	hmep->hme_tmd_paddr = dmac.dmac_address;
3018 	if (cookiec != 1) {
3019 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
3020 		    "too many tmd cookies - failed");
3021 		return (DDI_FAILURE);
3022 	}
3023 
3024 	return (DDI_SUCCESS);
3025 }
3026 
3027 static void
3028 hmefreethings(struct hme *hmep)
3029 {
3030 	if (hmep->hme_rmd_paddr) {
3031 		(void) ddi_dma_unbind_handle(hmep->hme_rmd_dmah);
3032 		hmep->hme_rmd_paddr = 0;
3033 	}
3034 	if (hmep->hme_rmd_acch)
3035 		ddi_dma_mem_free(&hmep->hme_rmd_acch);
3036 	if (hmep->hme_rmd_dmah)
3037 		ddi_dma_free_handle(&hmep->hme_rmd_dmah);
3038 
3039 	if (hmep->hme_tmd_paddr) {
3040 		(void) ddi_dma_unbind_handle(hmep->hme_tmd_dmah);
3041 		hmep->hme_tmd_paddr = 0;
3042 	}
3043 	if (hmep->hme_tmd_acch)
3044 		ddi_dma_mem_free(&hmep->hme_tmd_acch);
3045 	if (hmep->hme_tmd_dmah)
3046 		ddi_dma_free_handle(&hmep->hme_tmd_dmah);
3047 }
3048 
3049 /*
3050  *	First check to see if it our device interrupting.
3051  */
3052 static uint_t
3053 hmeintr(caddr_t arg)
3054 {
3055 	struct hme	*hmep = (void *)arg;
3056 	uint32_t	hmesbits;
3057 	uint32_t	serviced = DDI_INTR_UNCLAIMED;
3058 	uint32_t	num_reads = 0;
3059 	uint32_t	rflags;
3060 	mblk_t		*mp, *head, **tail;
3061 
3062 
3063 	head = NULL;
3064 	tail = &head;
3065 
3066 	mutex_enter(&hmep->hme_intrlock);
3067 
3068 	/*
3069 	 * The status register auto-clears on read except for
3070 	 * MIF Interrupt bit
3071 	 */
3072 	hmesbits = GET_GLOBREG(status);
3073 	CHECK_GLOBREG();
3074 
3075 	/*
3076 	 * Note: TINT is sometimes enabled in thr hmereclaim()
3077 	 */
3078 
3079 	/*
3080 	 * Bugid 1227832 - to handle spurious interrupts on fusion systems.
3081 	 * Claim the first interrupt after initialization
3082 	 */
3083 	if (hmep->hme_flags & HMEINITIALIZED) {
3084 		hmep->hme_flags &= ~HMEINITIALIZED;
3085 		serviced = DDI_INTR_CLAIMED;
3086 	}
3087 
3088 	if ((hmesbits & (HMEG_STATUS_INTR | HMEG_STATUS_TINT)) == 0) {
3089 						/* No interesting interrupt */
3090 		if (hmep->hme_intrstats) {
3091 			if (serviced == DDI_INTR_UNCLAIMED)
3092 				KIOIP->intrs[KSTAT_INTR_SPURIOUS]++;
3093 			else
3094 				KIOIP->intrs[KSTAT_INTR_HARD]++;
3095 		}
3096 		mutex_exit(&hmep->hme_intrlock);
3097 		return (serviced);
3098 	}
3099 
3100 	serviced = DDI_INTR_CLAIMED;
3101 
3102 	if (!(hmep->hme_flags & HMERUNNING)) {
3103 		if (hmep->hme_intrstats)
3104 			KIOIP->intrs[KSTAT_INTR_HARD]++;
3105 		mutex_exit(&hmep->hme_intrlock);
3106 		hmeuninit(hmep);
3107 		return (serviced);
3108 	}
3109 
3110 	if (hmesbits & (HMEG_STATUS_FATAL_ERR | HMEG_STATUS_NONFATAL_ERR)) {
3111 		if (hmesbits & HMEG_STATUS_FATAL_ERR) {
3112 
3113 			if (hmep->hme_intrstats)
3114 				KIOIP->intrs[KSTAT_INTR_HARD]++;
3115 			hme_fatal_err(hmep, hmesbits);
3116 
3117 			mutex_exit(&hmep->hme_intrlock);
3118 			(void) hmeinit(hmep);
3119 			return (serviced);
3120 		}
3121 		hme_nonfatal_err(hmep, hmesbits);
3122 	}
3123 
3124 	if (hmesbits & (HMEG_STATUS_TX_ALL | HMEG_STATUS_TINT)) {
3125 		mutex_enter(&hmep->hme_xmitlock);
3126 
3127 		hmereclaim(hmep);
3128 		mutex_exit(&hmep->hme_xmitlock);
3129 	}
3130 
3131 	if (hmesbits & HMEG_STATUS_RINT) {
3132 
3133 		/*
3134 		 * This dummy PIO is required to flush the SBus
3135 		 * Bridge buffers in QFE.
3136 		 */
3137 		(void) GET_GLOBREG(config);
3138 
3139 		/*
3140 		 * Loop through each RMD no more than once.
3141 		 */
3142 		while (num_reads++ < HME_RMDMAX) {
3143 			hmebuf_t *rbuf;
3144 			int rxptr;
3145 
3146 			rxptr = hmep->hme_rxindex % HME_RMDMAX;
3147 			HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORKERNEL);
3148 
3149 			rflags = GET_RMD_FLAGS(rxptr);
3150 			if (rflags & HMERMD_OWN) {
3151 				/*
3152 				 * Chip still owns it.  We're done.
3153 				 */
3154 				break;
3155 			}
3156 
3157 			/*
3158 			 * Retrieve the packet.
3159 			 */
3160 			rbuf = &hmep->hme_rbuf[rxptr];
3161 			mp = hmeread(hmep, rbuf, rflags);
3162 
3163 			/*
3164 			 * Return ownership of the RMD.
3165 			 */
3166 			PUT_RMD(rxptr, rbuf->paddr);
3167 			HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORDEV);
3168 
3169 			if (mp != NULL) {
3170 				*tail = mp;
3171 				tail = &mp->b_next;
3172 			}
3173 
3174 			/*
3175 			 * Advance to the next RMD.
3176 			 */
3177 			hmep->hme_rxindex++;
3178 		}
3179 	}
3180 
3181 	if (hmep->hme_intrstats)
3182 		KIOIP->intrs[KSTAT_INTR_HARD]++;
3183 
3184 	mutex_exit(&hmep->hme_intrlock);
3185 
3186 	if (head != NULL)
3187 		mac_rx(hmep->hme_mh, NULL, head);
3188 
3189 	return (serviced);
3190 }
3191 
3192 /*
3193  * Transmit completion reclaiming.
3194  */
3195 static void
3196 hmereclaim(struct hme *hmep)
3197 {
3198 	boolean_t	reclaimed = B_FALSE;
3199 
3200 	/*
3201 	 * Loop through each TMD.
3202 	 */
3203 	while (hmep->hme_txindex > hmep->hme_txreclaim) {
3204 
3205 		int		reclaim;
3206 		uint32_t	flags;
3207 
3208 		reclaim = hmep->hme_txreclaim % HME_TMDMAX;
3209 		HMESYNCTMD(reclaim, DDI_DMA_SYNC_FORKERNEL);
3210 
3211 		flags = GET_TMD_FLAGS(reclaim);
3212 		if (flags & HMETMD_OWN) {
3213 			/*
3214 			 * Chip still owns it.  We're done.
3215 			 */
3216 			break;
3217 		}
3218 
3219 		/*
3220 		 * Count a chained packet only once.
3221 		 */
3222 		if (flags & HMETMD_SOP) {
3223 			hmep->hme_opackets++;
3224 		}
3225 
3226 		/*
3227 		 * MIB II
3228 		 */
3229 		hmep->hme_obytes += flags & HMETMD_BUFSIZE;
3230 
3231 		reclaimed = B_TRUE;
3232 		hmep->hme_txreclaim++;
3233 	}
3234 
3235 	if (reclaimed) {
3236 		/*
3237 		 * we could reclaim some TMDs so turn off interrupts
3238 		 */
3239 		if (hmep->hme_wantw) {
3240 			PUT_GLOBREG(intmask,
3241 			    HMEG_MASK_INTR | HMEG_MASK_TINT |
3242 			    HMEG_MASK_TX_ALL);
3243 			hmep->hme_wantw = B_FALSE;
3244 			mac_tx_update(hmep->hme_mh);
3245 		}
3246 	} else {
3247 		/*
3248 		 * enable TINTS: so that even if there is no further activity
3249 		 * hmereclaim will get called
3250 		 */
3251 		if (hmep->hme_wantw)
3252 			PUT_GLOBREG(intmask,
3253 			    GET_GLOBREG(intmask) & ~HMEG_MASK_TX_ALL);
3254 	}
3255 	CHECK_GLOBREG();
3256 }
3257 
3258 /*
3259  * Handle interrupts for fatal errors
3260  * Need reinitialization of the ENET channel.
3261  */
3262 static void
3263 hme_fatal_err(struct hme *hmep, uint_t hmesbits)
3264 {
3265 
3266 	if (hmesbits & HMEG_STATUS_SLV_PAR_ERR) {
3267 		hmep->hme_slvparerr++;
3268 	}
3269 
3270 	if (hmesbits & HMEG_STATUS_SLV_ERR_ACK) {
3271 		hmep->hme_slverrack++;
3272 	}
3273 
3274 	if (hmesbits & HMEG_STATUS_TX_TAG_ERR) {
3275 		hmep->hme_txtagerr++;
3276 		hmep->hme_oerrors++;
3277 	}
3278 
3279 	if (hmesbits & HMEG_STATUS_TX_PAR_ERR) {
3280 		hmep->hme_txparerr++;
3281 		hmep->hme_oerrors++;
3282 	}
3283 
3284 	if (hmesbits & HMEG_STATUS_TX_LATE_ERR) {
3285 		hmep->hme_txlaterr++;
3286 		hmep->hme_oerrors++;
3287 	}
3288 
3289 	if (hmesbits & HMEG_STATUS_TX_ERR_ACK) {
3290 		hmep->hme_txerrack++;
3291 		hmep->hme_oerrors++;
3292 	}
3293 
3294 	if (hmesbits & HMEG_STATUS_EOP_ERR) {
3295 		hmep->hme_eoperr++;
3296 	}
3297 
3298 	if (hmesbits & HMEG_STATUS_RX_TAG_ERR) {
3299 		hmep->hme_rxtagerr++;
3300 		hmep->hme_ierrors++;
3301 	}
3302 
3303 	if (hmesbits & HMEG_STATUS_RX_PAR_ERR) {
3304 		hmep->hme_rxparerr++;
3305 		hmep->hme_ierrors++;
3306 	}
3307 
3308 	if (hmesbits & HMEG_STATUS_RX_LATE_ERR) {
3309 		hmep->hme_rxlaterr++;
3310 		hmep->hme_ierrors++;
3311 	}
3312 
3313 	if (hmesbits & HMEG_STATUS_RX_ERR_ACK) {
3314 		hmep->hme_rxerrack++;
3315 		hmep->hme_ierrors++;
3316 	}
3317 }
3318 
3319 /*
3320  * Handle interrupts regarding non-fatal errors.
3321  */
3322 static void
3323 hme_nonfatal_err(struct hme *hmep, uint_t hmesbits)
3324 {
3325 
3326 	if (hmesbits & HMEG_STATUS_RX_DROP) {
3327 		hmep->hme_missed++;
3328 		hmep->hme_ierrors++;
3329 	}
3330 
3331 	if (hmesbits & HMEG_STATUS_DEFTIMR_EXP) {
3332 		hmep->hme_defer_xmts++;
3333 	}
3334 
3335 	if (hmesbits & HMEG_STATUS_FSTCOLC_EXP) {
3336 		hmep->hme_fstcol += 256;
3337 	}
3338 
3339 	if (hmesbits & HMEG_STATUS_LATCOLC_EXP) {
3340 		hmep->hme_tlcol += 256;
3341 		hmep->hme_oerrors += 256;
3342 	}
3343 
3344 	if (hmesbits & HMEG_STATUS_EXCOLC_EXP) {
3345 		hmep->hme_excol += 256;
3346 		hmep->hme_oerrors += 256;
3347 	}
3348 
3349 	if (hmesbits & HMEG_STATUS_NRMCOLC_EXP) {
3350 		hmep->hme_coll += 256;
3351 	}
3352 
3353 	if (hmesbits & HMEG_STATUS_MXPKTSZ_ERR) {
3354 		hmep->hme_babl++;
3355 		hmep->hme_oerrors++;
3356 	}
3357 
3358 	/*
3359 	 * This error is fatal and the board needs to
3360 	 * be reinitialized. Comments?
3361 	 */
3362 	if (hmesbits & HMEG_STATUS_TXFIFO_UNDR) {
3363 		hmep->hme_uflo++;
3364 		hmep->hme_oerrors++;
3365 	}
3366 
3367 	if (hmesbits & HMEG_STATUS_SQE_TST_ERR) {
3368 		hmep->hme_sqe_errors++;
3369 	}
3370 
3371 	if (hmesbits & HMEG_STATUS_RCV_CNT_EXP) {
3372 		if (hmep->hme_rxcv_enable) {
3373 			hmep->hme_cvc += 256;
3374 		}
3375 	}
3376 
3377 	if (hmesbits & HMEG_STATUS_RXFIFO_OVFL) {
3378 		hmep->hme_oflo++;
3379 		hmep->hme_ierrors++;
3380 	}
3381 
3382 	if (hmesbits & HMEG_STATUS_LEN_CNT_EXP) {
3383 		hmep->hme_lenerr += 256;
3384 		hmep->hme_ierrors += 256;
3385 	}
3386 
3387 	if (hmesbits & HMEG_STATUS_ALN_CNT_EXP) {
3388 		hmep->hme_align_errors += 256;
3389 		hmep->hme_ierrors += 256;
3390 	}
3391 
3392 	if (hmesbits & HMEG_STATUS_CRC_CNT_EXP) {
3393 		hmep->hme_fcs_errors += 256;
3394 		hmep->hme_ierrors += 256;
3395 	}
3396 }
3397 
3398 static mblk_t *
3399 hmeread(struct hme *hmep, hmebuf_t *rbuf, uint32_t rflags)
3400 {
3401 	mblk_t		*bp;
3402 	uint32_t	len;
3403 	t_uscalar_t	type;
3404 
3405 	len = (rflags & HMERMD_BUFSIZE) >> HMERMD_BUFSIZE_SHIFT;
3406 
3407 	/*
3408 	 * Check for short packet
3409 	 * and check for overflow packet also. The processing is the
3410 	 * same for both the cases - reuse the buffer. Update the Buffer
3411 	 * overflow counter.
3412 	 */
3413 	if ((len < ETHERMIN) || (rflags & HMERMD_OVFLOW) ||
3414 	    (len > (ETHERMAX + 4))) {
3415 		if (len < ETHERMIN)
3416 			hmep->hme_runt++;
3417 
3418 		else {
3419 			hmep->hme_buff++;
3420 			hmep->hme_toolong_errors++;
3421 		}
3422 		hmep->hme_ierrors++;
3423 		return (NULL);
3424 	}
3425 
3426 	/*
3427 	 * Sync the received buffer before looking at it.
3428 	 */
3429 
3430 	(void) ddi_dma_sync(rbuf->dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL);
3431 
3432 	/*
3433 	 * copy the packet data and then recycle the descriptor.
3434 	 */
3435 
3436 	if ((bp = allocb(len + HME_FSTBYTE_OFFSET, BPRI_HI)) == NULL) {
3437 
3438 		hmep->hme_allocbfail++;
3439 		hmep->hme_norcvbuf++;
3440 
3441 		return (NULL);
3442 	}
3443 
3444 	bcopy(rbuf->kaddr, bp->b_rptr, len + HME_FSTBYTE_OFFSET);
3445 
3446 	hmep->hme_ipackets++;
3447 
3448 	/*  Add the First Byte offset to the b_rptr and copy */
3449 	bp->b_rptr += HME_FSTBYTE_OFFSET;
3450 	bp->b_wptr = bp->b_rptr + len;
3451 
3452 	/*
3453 	 * update MIB II statistics
3454 	 */
3455 	BUMP_InNUcast(hmep, bp->b_rptr);
3456 	hmep->hme_rbytes += len;
3457 
3458 	type = get_ether_type(bp->b_rptr);
3459 
3460 	/*
3461 	 * TCP partial checksum in hardware
3462 	 */
3463 	if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) {
3464 		uint16_t cksum = ~rflags & HMERMD_CKSUM;
3465 		uint_t end = len - sizeof (struct ether_header);
3466 		mac_hcksum_set(bp, 0, 0, end, htons(cksum), HCK_PARTIALCKSUM);
3467 	}
3468 
3469 	return (bp);
3470 }
3471 
3472 /*VARARGS*/
3473 static void
3474 hme_fault_msg(struct hme *hmep, uint_t severity, msg_t type, char *fmt, ...)
3475 {
3476 	char	msg_buffer[255];
3477 	va_list	ap;
3478 
3479 	va_start(ap, fmt);
3480 	(void) vsnprintf(msg_buffer, sizeof (msg_buffer), fmt, ap);
3481 
3482 	if (hmep == NULL) {
3483 		cmn_err(CE_NOTE, "hme : %s", msg_buffer);
3484 
3485 	} else if (type == DISPLAY_MSG) {
3486 		cmn_err(CE_CONT, "?%s%d : %s\n", ddi_driver_name(hmep->dip),
3487 		    hmep->instance, msg_buffer);
3488 	} else if (severity == SEVERITY_HIGH) {
3489 		cmn_err(CE_WARN, "%s%d : %s, SEVERITY_HIGH, %s\n",
3490 		    ddi_driver_name(hmep->dip), hmep->instance,
3491 		    msg_buffer, msg_string[type]);
3492 	} else {
3493 		cmn_err(CE_CONT, "%s%d : %s\n", ddi_driver_name(hmep->dip),
3494 		    hmep->instance, msg_buffer);
3495 	}
3496 	va_end(ap);
3497 }
3498 
3499 /*
3500  * if this is the first init do not bother to save the
3501  * counters. They should be 0, but do not count on it.
3502  */
3503 static void
3504 hmesavecntrs(struct hme *hmep)
3505 {
3506 	uint32_t fecnt, aecnt, lecnt, rxcv;
3507 	uint32_t ltcnt, excnt;
3508 
3509 	/* XXX What all gets added in ierrors and oerrors? */
3510 	fecnt = GET_MACREG(fecnt);
3511 	PUT_MACREG(fecnt, 0);
3512 
3513 	aecnt = GET_MACREG(aecnt);
3514 	hmep->hme_align_errors += aecnt;
3515 	PUT_MACREG(aecnt, 0);
3516 
3517 	lecnt = GET_MACREG(lecnt);
3518 	hmep->hme_lenerr += lecnt;
3519 	PUT_MACREG(lecnt, 0);
3520 
3521 	rxcv = GET_MACREG(rxcv);
3522 #ifdef HME_CODEVIOL_BUG
3523 	/*
3524 	 * Ignore rxcv errors for Sbus/FEPS 2.1 or earlier
3525 	 */
3526 	if (!hmep->hme_rxcv_enable) {
3527 		rxcv = 0;
3528 	}
3529 #endif
3530 	hmep->hme_cvc += rxcv;
3531 	PUT_MACREG(rxcv, 0);
3532 
3533 	ltcnt = GET_MACREG(ltcnt);
3534 	hmep->hme_tlcol += ltcnt;
3535 	PUT_MACREG(ltcnt, 0);
3536 
3537 	excnt = GET_MACREG(excnt);
3538 	hmep->hme_excol += excnt;
3539 	PUT_MACREG(excnt, 0);
3540 
3541 	hmep->hme_fcs_errors += fecnt;
3542 	hmep->hme_ierrors += (fecnt + aecnt + lecnt);
3543 	hmep->hme_oerrors += (ltcnt + excnt);
3544 	hmep->hme_coll += (GET_MACREG(nccnt) + ltcnt);
3545 
3546 	PUT_MACREG(nccnt, 0);
3547 	CHECK_MACREG();
3548 }
3549 
3550 /*
3551  * To set up the mac address for the network interface:
3552  * The adapter card may support a local mac address which is published
3553  * in a device node property "local-mac-address". This mac address is
3554  * treated as the factory-installed mac address for DLPI interface.
3555  * If the adapter firmware has used the device for diskless boot
3556  * operation it publishes a property called "mac-address" for use by
3557  * inetboot and the device driver.
3558  * If "mac-address" is not found, the system options property
3559  * "local-mac-address" is used to select the mac-address. If this option
3560  * is set to "true", and "local-mac-address" has been found, then
3561  * local-mac-address is used; otherwise the system mac address is used
3562  * by calling the "localetheraddr()" function.
3563  */
3564 static void
3565 hme_setup_mac_address(struct hme *hmep, dev_info_t *dip)
3566 {
3567 	char	*prop;
3568 	int	prop_len = sizeof (int);
3569 
3570 	hmep->hme_addrflags = 0;
3571 
3572 	/*
3573 	 * Check if it is an adapter with its own local mac address
3574 	 * If it is present, save it as the "factory-address"
3575 	 * for this adapter.
3576 	 */
3577 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3578 	    "local-mac-address",
3579 	    (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3580 		if (prop_len == ETHERADDRL) {
3581 			hmep->hme_addrflags = HME_FACTADDR_PRESENT;
3582 			ether_bcopy(prop, &hmep->hme_factaddr);
3583 			HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
3584 			    "Local Ethernet address = %s",
3585 			    ether_sprintf(&hmep->hme_factaddr));
3586 		}
3587 		kmem_free(prop, prop_len);
3588 	}
3589 
3590 	/*
3591 	 * Check if the adapter has published "mac-address" property.
3592 	 * If it is present, use it as the mac address for this device.
3593 	 */
3594 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3595 	    "mac-address", (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3596 		if (prop_len >= ETHERADDRL) {
3597 			ether_bcopy(prop, &hmep->hme_ouraddr);
3598 			kmem_free(prop, prop_len);
3599 			return;
3600 		}
3601 		kmem_free(prop, prop_len);
3602 	}
3603 
3604 #ifdef	__sparc
3605 	/*
3606 	 * On sparc, we might be able to use the mac address from the
3607 	 * system.  However, on all other systems, we need to use the
3608 	 * address from the PROM.
3609 	 */
3610 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
3611 	    (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3612 		if ((strncmp("true", prop, prop_len) == 0) &&
3613 		    (hmep->hme_addrflags & HME_FACTADDR_PRESENT)) {
3614 			hmep->hme_addrflags |= HME_FACTADDR_USE;
3615 			ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr);
3616 			kmem_free(prop, prop_len);
3617 			HME_FAULT_MSG1(hmep, SEVERITY_NONE, DISPLAY_MSG,
3618 			    "Using local MAC address");
3619 			return;
3620 		}
3621 		kmem_free(prop, prop_len);
3622 	}
3623 
3624 	/*
3625 	 * Get the system ethernet address.
3626 	 */
3627 	(void) localetheraddr((struct ether_addr *)NULL, &hmep->hme_ouraddr);
3628 #else
3629 	ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr);
3630 #endif
3631 }
3632 
3633 /* ARGSUSED */
3634 static void
3635 hme_check_acc_handle(char *file, uint_t line, struct hme *hmep,
3636     ddi_acc_handle_t handle)
3637 {
3638 }
3639