xref: /titanic_52/usr/src/uts/common/io/hme/hme.c (revision 500b1e787b108592a37e3d54dc9b5e676de5386d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * SunOS MT STREAMS FEPS(SBus)/Cheerio(PCI) 10/100Mb Ethernet Device Driver
29  */
30 
31 #include	<sys/types.h>
32 #include	<sys/debug.h>
33 #include	<sys/stream.h>
34 #include	<sys/cmn_err.h>
35 #include	<sys/kmem.h>
36 #include	<sys/crc32.h>
37 #include	<sys/modctl.h>
38 #include	<sys/conf.h>
39 #include	<sys/strsun.h>
40 #include	<sys/kstat.h>
41 #include	<sys/pattr.h>
42 #include	<sys/dlpi.h>
43 #include	<sys/strsubr.h>
44 #include	<sys/mac_provider.h>
45 #include	<sys/mac_ether.h>
46 #include	<sys/mii.h>
47 #include	<sys/ethernet.h>
48 #include	<sys/vlan.h>
49 #include	<sys/pci.h>
50 #include	<sys/policy.h>
51 #include	<sys/ddi.h>
52 #include	<sys/sunddi.h>
53 #include	"hme_phy.h"
54 #include	"hme_mac.h"
55 #include	"hme.h"
56 
57 typedef void	(*fptrv_t)();
58 
59 typedef enum {
60 	NO_MSG		= 0,
61 	AUTOCONFIG_MSG,
62 	DISPLAY_MSG,
63 	INIT_MSG,
64 	UNINIT_MSG,
65 	CONFIG_MSG,
66 	MII_MSG,
67 	FATAL_ERR_MSG,
68 	NFATAL_ERR_MSG,
69 	XCVR_MSG,
70 	NOXCVR_MSG,
71 	ERX_MSG,
72 	DDI_MSG,
73 } msg_t;
74 
75 msg_t	hme_debug_level =	NO_MSG;
76 
77 static char	*msg_string[] = {
78 	"NONE       ",
79 	"AUTOCONFIG ",
80 	"DISPLAY	"
81 	"INIT       ",
82 	"UNINIT		",
83 	"CONFIG	",
84 	"MII	",
85 	"FATAL_ERR	",
86 	"NFATAL_ERR	",
87 	"XCVR	",
88 	"NOXCVR	",
89 	"ERX	",
90 	"DDI	",
91 };
92 
93 #define	SEVERITY_NONE	0
94 #define	SEVERITY_LOW	0
95 #define	SEVERITY_MID	1
96 #define	SEVERITY_HIGH	2
97 #define	SEVERITY_UNKNOWN 99
98 
99 #define	FEPS_URUN_BUG
100 #define	HME_CODEVIOL_BUG
101 
102 #define	KIOIP	KSTAT_INTR_PTR(hmep->hme_intrstats)
103 
104 /*
105  * The following variables are used for checking fixes in Sbus/FEPS 2.0
106  */
107 static	int	hme_urun_fix = 0;	/* Bug fixed in Sbus/FEPS 2.0 */
108 
109 /*
110  * The following variables are used for configuring various features
111  */
112 static	int	hme_64bit_enable =	1;	/* Use 64-bit sbus transfers */
113 static	int	hme_reject_own =	1;	/* Reject packets with own SA */
114 static	int	hme_ngu_enable =	0;	/* Never Give Up mode */
115 
116 mac_priv_prop_t hme_priv_prop[] = {
117 	{	"_ipg0",	MAC_PROP_PERM_RW	},
118 	{	"_ipg1",	MAC_PROP_PERM_RW	},
119 	{	"_ipg2",	MAC_PROP_PERM_RW	},
120 	{	"_lance_mode",	MAC_PROP_PERM_RW	},
121 };
122 
123 static	int	hme_lance_mode =	1;	/* to enable lance mode */
124 static	int	hme_ipg0 =		16;
125 static	int	hme_ipg1 =		8;
126 static	int	hme_ipg2 =		4;
127 
128 /*
129  * The following parameters may be configured by the user. If they are not
130  * configured by the user, the values will be based on the capabilities of
131  * the transceiver.
132  * The value "HME_NOTUSR" is ORed with the parameter value to indicate values
133  * which are NOT configured by the user.
134  */
135 
136 #define	HME_NOTUSR	0x0f000000
137 #define	HME_MASK_1BIT	0x1
138 #define	HME_MASK_5BIT	0x1f
139 #define	HME_MASK_8BIT	0xff
140 
141 /*
142  * All strings used by hme messaging functions
143  */
144 
145 static	char *no_xcvr_msg =
146 	"No transceiver found.";
147 
148 static	char *burst_size_msg =
149 	"Could not identify the burst size";
150 
151 static	char *unk_rx_ringsz_msg =
152 	"Unknown receive RINGSZ";
153 
154 static  char *add_intr_fail_msg =
155 	"ddi_add_intr(9F) failed";
156 
157 static  char *mregs_4global_reg_fail_msg =
158 	"ddi_regs_map_setup(9F) for global reg failed";
159 
160 static	char *mregs_4etx_reg_fail_msg =
161 	"ddi_map_regs for etx reg failed";
162 
163 static	char *mregs_4erx_reg_fail_msg =
164 	"ddi_map_regs for erx reg failed";
165 
166 static	char *mregs_4bmac_reg_fail_msg =
167 	"ddi_map_regs for bmac reg failed";
168 
169 static	char *mregs_4mif_reg_fail_msg =
170 	"ddi_map_regs for mif reg failed";
171 
172 static	char *init_fail_gen_msg =
173 	"Failed to initialize hardware/driver";
174 
175 static	char *ddi_nregs_fail_msg =
176 	"ddi_dev_nregs failed(9F), returned %d";
177 
178 static	char *bad_num_regs_msg =
179 	"Invalid number of registers.";
180 
181 
182 /* FATAL ERR msgs */
183 /*
184  * Function prototypes.
185  */
186 /* these two are global so that qfe can use them */
187 int hmeattach(dev_info_t *, ddi_attach_cmd_t);
188 int hmedetach(dev_info_t *, ddi_detach_cmd_t);
189 int hmequiesce(dev_info_t *);
190 static	boolean_t hmeinit_xfer_params(struct hme *);
191 static	uint_t hmestop(struct hme *);
192 static	void hmestatinit(struct hme *);
193 static	int hmeallocthings(struct hme *);
194 static	void hmefreethings(struct hme *);
195 static	int hmeallocbuf(struct hme *, hmebuf_t *, int);
196 static	int hmeallocbufs(struct hme *);
197 static	void hmefreebufs(struct hme *);
198 static	void hmeget_hm_rev_property(struct hme *);
199 static	boolean_t hmestart(struct hme *, mblk_t *);
200 static	uint_t hmeintr(caddr_t);
201 static	void hmereclaim(struct hme *);
202 static	int hmeinit(struct hme *);
203 static	void hmeuninit(struct hme *hmep);
204 static 	mblk_t *hmeread(struct hme *, hmebuf_t *, uint32_t);
205 static	void hmesavecntrs(struct hme *);
206 static	void hme_fatal_err(struct hme *, uint_t);
207 static	void hme_nonfatal_err(struct hme *, uint_t);
208 static	int hmeburstsizes(struct hme *);
209 static	void send_bit(struct hme *, uint16_t);
210 static	uint16_t get_bit_std(uint8_t, struct hme *);
211 static	uint16_t hme_bb_mii_read(struct hme *, uint8_t, uint8_t);
212 static	void hme_bb_mii_write(struct hme *, uint8_t, uint8_t, uint16_t);
213 static	void hme_bb_force_idle(struct hme *);
214 static	uint16_t hme_mii_read(void *, uint8_t, uint8_t);
215 static	void hme_mii_write(void *, uint8_t, uint8_t, uint16_t);
216 static	void hme_setup_mac_address(struct hme *, dev_info_t *);
217 static	void hme_mii_notify(void *, link_state_t);
218 
219 static void hme_fault_msg(struct hme *, uint_t, msg_t, char *, ...);
220 
221 static void hme_check_acc_handle(char *, uint_t, struct hme *,
222     ddi_acc_handle_t);
223 
224 /*
225  * Nemo (GLDv3) Functions.
226  */
227 static int	hme_m_stat(void *, uint_t, uint64_t *);
228 static int	hme_m_start(void *);
229 static void	hme_m_stop(void *);
230 static int	hme_m_promisc(void *, boolean_t);
231 static int	hme_m_multicst(void *, boolean_t, const uint8_t *);
232 static int	hme_m_unicst(void *, const uint8_t *);
233 static mblk_t	*hme_m_tx(void *, mblk_t *);
234 static boolean_t	hme_m_getcapab(void *, mac_capab_t, void *);
235 static int hme_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
236     uint_t, void *, uint_t *);
237 static int hme_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
238     const void *);
239 
240 static mii_ops_t hme_mii_ops = {
241 	MII_OPS_VERSION,
242 	hme_mii_read,
243 	hme_mii_write,
244 	hme_mii_notify,
245 	NULL
246 };
247 
248 static mac_callbacks_t hme_m_callbacks = {
249 	MC_GETCAPAB | MC_SETPROP | MC_GETPROP,
250 	hme_m_stat,
251 	hme_m_start,
252 	hme_m_stop,
253 	hme_m_promisc,
254 	hme_m_multicst,
255 	hme_m_unicst,
256 	hme_m_tx,
257 	NULL,
258 	hme_m_getcapab,
259 	NULL,
260 	NULL,
261 	hme_m_setprop,
262 	hme_m_getprop,
263 };
264 
265 DDI_DEFINE_STREAM_OPS(hme_dev_ops, nulldev, nulldev, hmeattach, hmedetach,
266     nodev, NULL, D_MP, NULL, hmequiesce);
267 
268 #define	HME_FAULT_MSG1(p, s, t, f) \
269     hme_fault_msg((p), (s), (t), (f));
270 
271 #define	HME_FAULT_MSG2(p, s, t, f, a) \
272     hme_fault_msg((p), (s), (t), (f), (a));
273 
274 #define	HME_FAULT_MSG3(p, s, t, f, a, b) \
275     hme_fault_msg((p), (s), (t), (f), (a), (b));
276 
277 #define	HME_FAULT_MSG4(p, s, t, f, a, b, c) \
278     hme_fault_msg((p), (s), (t), (f), (a), (b), (c));
279 
280 #define	CHECK_MIFREG() \
281 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_mifregh)
282 #define	CHECK_ETXREG() \
283 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_etxregh)
284 #define	CHECK_ERXREG() \
285 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_erxregh)
286 #define	CHECK_MACREG() \
287 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_bmacregh)
288 #define	CHECK_GLOBREG() \
289 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_globregh)
290 
291 /*
292  * Claim the device is ultra-capable of burst in the beginning.  Use
293  * the value returned by ddi_dma_burstsizes() to actually set the HME
294  * global configuration register later.
295  *
296  * Sbus/FEPS supports burst sizes of 16, 32 and 64 bytes. Also, it supports
297  * 32-bit and 64-bit Sbus transfers. Hence the dlim_burstsizes field contains
298  * the the burstsizes in both the lo and hi words.
299  */
300 #define	HMELIMADDRLO	((uint64_t)0x00000000)
301 #define	HMELIMADDRHI	((uint64_t)0xffffffff)
302 
303 /*
304  * Note that rx and tx data buffers can be arbitrarily aligned, but
305  * that the descriptor rings need to be aligned on 2K boundaries, per
306  * the spec.
307  */
308 static ddi_dma_attr_t hme_dma_attr = {
309 	DMA_ATTR_V0,		/* version number. */
310 	(uint64_t)HMELIMADDRLO,	/* low address */
311 	(uint64_t)HMELIMADDRHI,	/* high address */
312 	(uint64_t)0x00ffffff,	/* address counter max */
313 	(uint64_t)HME_HMDALIGN,	/* alignment */
314 	(uint_t)0x00700070,	/* dlim_burstsizes for 32 and 64 bit xfers */
315 	(uint32_t)0x1,		/* minimum transfer size */
316 	(uint64_t)0x7fffffff,	/* maximum transfer size */
317 	(uint64_t)0x00ffffff,	/* maximum segment size */
318 	1,			/* scatter/gather list length */
319 	512,			/* granularity */
320 	0			/* attribute flags */
321 };
322 
323 static ddi_device_acc_attr_t hme_buf_attr = {
324 	DDI_DEVICE_ATTR_V0,
325 	DDI_NEVERSWAP_ACC,
326 	DDI_STRICTORDER_ACC,	/* probably could allow merging & caching */
327 	DDI_DEFAULT_ACC,
328 };
329 
330 static uchar_t pci_latency_timer = 0;
331 
332 /*
333  * Module linkage information for the kernel.
334  */
335 static struct modldrv modldrv = {
336 	&mod_driverops,	/* Type of module.  This one is a driver */
337 	"Sun HME 10/100 Mb Ethernet",
338 	&hme_dev_ops,	/* driver ops */
339 };
340 
341 static struct modlinkage modlinkage = {
342 	MODREV_1, &modldrv, NULL
343 };
344 
345 /* <<<<<<<<<<<<<<<<<<<<<<  Register operations >>>>>>>>>>>>>>>>>>>>> */
346 
347 #define	GET_MIFREG(reg) \
348 	ddi_get32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg)
349 #define	PUT_MIFREG(reg, value) \
350 	ddi_put32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg, value)
351 
352 #define	GET_ETXREG(reg) \
353 	ddi_get32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg)
354 #define	PUT_ETXREG(reg, value) \
355 	ddi_put32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg, value)
356 #define	GET_ERXREG(reg) \
357 	ddi_get32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg)
358 #define	PUT_ERXREG(reg, value) \
359 	ddi_put32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg, value)
360 #define	GET_MACREG(reg) \
361 	ddi_get32(hmep->hme_bmacregh, (uint32_t *)&hmep->hme_bmacregp->reg)
362 #define	PUT_MACREG(reg, value) \
363 	ddi_put32(hmep->hme_bmacregh, \
364 		(uint32_t *)&hmep->hme_bmacregp->reg, value)
365 #define	GET_GLOBREG(reg) \
366 	ddi_get32(hmep->hme_globregh, (uint32_t *)&hmep->hme_globregp->reg)
367 #define	PUT_GLOBREG(reg, value) \
368 	ddi_put32(hmep->hme_globregh, \
369 		(uint32_t *)&hmep->hme_globregp->reg, value)
370 #define	PUT_TMD(ptr, paddr, len, flags)					\
371 	ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_addr, paddr); \
372 	ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags,	\
373 	    len | flags)
374 #define	GET_TMD_FLAGS(ptr)					\
375 	ddi_get32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags)
376 #define	PUT_RMD(ptr, paddr) \
377 	ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_addr, paddr); \
378 	ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags,	\
379 	    (uint32_t)(HMEBUFSIZE << HMERMD_BUFSIZE_SHIFT) | HMERMD_OWN)
380 #define	GET_RMD_FLAGS(ptr)					\
381 	ddi_get32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags)
382 
383 #define	GET_ROM8(offset) \
384 	ddi_get8((hmep->hme_romh), (offset))
385 
386 /*
387  * Ether_copy is not endian-correct. Define an endian-correct version.
388  */
389 #define	ether_bcopy(a, b) (bcopy(a, b, 6))
390 
391 /*
392  * Ether-type is specifically big-endian, but data region is unknown endian
393  */
394 #define	get_ether_type(ptr) \
395 	(((((uint8_t *)ptr)[12] << 8) | (((uint8_t *)ptr)[13])))
396 
397 /* <<<<<<<<<<<<<<<<<<<<<<  Configuration Parameters >>>>>>>>>>>>>>>>>>>>> */
398 
399 #define	BMAC_DEFAULT_JAMSIZE	(0x04)		/* jamsize equals 4 */
400 #define	BMAC_LONG_JAMSIZE	(0x10)		/* jamsize equals 0x10 */
401 static	int 	jamsize = BMAC_DEFAULT_JAMSIZE;
402 
403 
404 /*
405  * Calculate the bit in the multicast address filter that selects the given
406  * address.
407  */
408 
409 static uint32_t
410 hmeladrf_bit(const uint8_t *addr)
411 {
412 	uint32_t crc;
413 
414 	CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
415 
416 	/*
417 	 * Just want the 6 most significant bits.
418 	 */
419 	return (crc >> 26);
420 }
421 
422 /* <<<<<<<<<<<<<<<<<<<<<<<<  Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
423 
424 static void
425 send_bit(struct hme *hmep, uint16_t x)
426 {
427 	PUT_MIFREG(mif_bbdata, x);
428 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
429 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
430 }
431 
432 
433 /*
434  * To read the MII register bits according to the IEEE Standard
435  */
436 static uint16_t
437 get_bit_std(uint8_t phyad, struct hme *hmep)
438 {
439 	uint16_t	x;
440 
441 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
442 	drv_usecwait(1);	/* wait for  >330 ns for stable data */
443 	if (phyad == HME_INTERNAL_PHYAD)
444 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM0) ? 1 : 0;
445 	else
446 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM1) ? 1 : 0;
447 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
448 	return (x);
449 }
450 
451 #define	SEND_BIT(x)		send_bit(hmep, x)
452 #define	GET_BIT_STD(phyad, x)	x = get_bit_std(phyad, hmep)
453 
454 
455 static void
456 hme_bb_mii_write(struct hme *hmep, uint8_t phyad, uint8_t regad, uint16_t data)
457 {
458 	int	i;
459 
460 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
461 	(void) hme_bb_force_idle(hmep);
462 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
463 	SEND_BIT(0); SEND_BIT(1);	/* <OP> */
464 
465 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
466 		SEND_BIT((phyad >> i) & 1);
467 	}
468 
469 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
470 		SEND_BIT((regad >> i) & 1);
471 	}
472 
473 	SEND_BIT(1); SEND_BIT(0);	/* <TA> */
474 
475 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
476 		SEND_BIT((data >> i) & 1);
477 	}
478 
479 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
480 	CHECK_MIFREG();
481 }
482 
483 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
484 static uint16_t
485 hme_bb_mii_read(struct hme *hmep, uint8_t phyad, uint8_t regad)
486 {
487 	int		i;
488 	uint32_t	x;
489 	uint16_t	data = 0;
490 
491 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
492 	(void) hme_bb_force_idle(hmep);
493 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
494 	SEND_BIT(1); SEND_BIT(0);	/* <OP> */
495 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
496 		SEND_BIT((phyad >> i) & 1);
497 	}
498 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
499 		SEND_BIT((regad >> i) & 1);
500 	}
501 
502 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
503 
504 	GET_BIT_STD(phyad, x);
505 	GET_BIT_STD(phyad, x);		/* <TA> */
506 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
507 		GET_BIT_STD(phyad, x);
508 		data += (x << i);
509 	}
510 	/*
511 	 * Kludge to get the Transceiver out of hung mode
512 	 */
513 	GET_BIT_STD(phyad, x);
514 	GET_BIT_STD(phyad, x);
515 	GET_BIT_STD(phyad, x);
516 	CHECK_MIFREG();
517 	return (data);
518 }
519 
520 
521 static void
522 hme_bb_force_idle(struct hme *hmep)
523 {
524 	int	i;
525 
526 	for (i = 0; i < 33; i++) {
527 		SEND_BIT(1);
528 	}
529 }
530 
531 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
532 
533 
534 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
535 
536 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
537 static uint16_t
538 hme_mii_read(void *arg, uint8_t phyad, uint8_t regad)
539 {
540 	struct hme	*hmep = arg;
541 	uint32_t	frame;
542 	uint32_t	tmp_mif;
543 	uint32_t	tmp_xif;
544 
545 	tmp_mif = GET_MIFREG(mif_cfg);
546 	tmp_xif = GET_MACREG(xifc);
547 
548 	switch (phyad) {
549 	case HME_EXTERNAL_PHYAD:
550 		PUT_MIFREG(mif_cfg, tmp_mif | HME_MIF_CFGPS);
551 		PUT_MACREG(xifc, tmp_xif | BMAC_XIFC_MIIBUFDIS);
552 		break;
553 	case HME_INTERNAL_PHYAD:
554 		PUT_MIFREG(mif_cfg, tmp_mif & ~(HME_MIF_CFGPS));
555 		PUT_MACREG(xifc, tmp_xif & ~(BMAC_XIFC_MIIBUFDIS));
556 		break;
557 	default:
558 		return (0xffff);
559 	}
560 
561 	if (!hmep->hme_frame_enable) {
562 		frame = (hme_bb_mii_read(hmep, phyad, regad));
563 		PUT_MACREG(xifc, tmp_xif);
564 		PUT_MIFREG(mif_cfg, tmp_mif);
565 		return (frame & 0xffff);
566 	}
567 
568 	PUT_MIFREG(mif_frame,
569 	    HME_MIF_FRREAD | (phyad << HME_MIF_FRPHYAD_SHIFT) |
570 	    (regad << HME_MIF_FRREGAD_SHIFT));
571 /*
572  *	HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
573  */
574 	HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300);
575 	frame = GET_MIFREG(mif_frame);
576 	CHECK_MIFREG();
577 
578 	PUT_MACREG(xifc, tmp_xif);
579 	PUT_MIFREG(mif_cfg, tmp_mif);
580 
581 	if ((frame & HME_MIF_FRTA0) == 0) {
582 
583 
584 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, MII_MSG,
585 		    "MIF Read failure");
586 		return (0xffff);
587 	}
588 	return ((uint16_t)(frame & HME_MIF_FRDATA));
589 }
590 
591 static void
592 hme_mii_write(void *arg, uint8_t phyad, uint8_t regad, uint16_t data)
593 {
594 	struct hme *hmep = arg;
595 	uint32_t frame;
596 	uint32_t tmp_mif;
597 	uint32_t tmp_xif;
598 
599 	tmp_mif = GET_MIFREG(mif_cfg);
600 	tmp_xif = GET_MACREG(xifc);
601 
602 	switch (phyad) {
603 	case HME_EXTERNAL_PHYAD:
604 		PUT_MIFREG(mif_cfg, tmp_mif | HME_MIF_CFGPS);
605 		PUT_MACREG(xifc, tmp_xif | BMAC_XIFC_MIIBUFDIS);
606 		break;
607 	case HME_INTERNAL_PHYAD:
608 		PUT_MIFREG(mif_cfg, tmp_mif & ~(HME_MIF_CFGPS));
609 		PUT_MACREG(xifc, tmp_xif & ~(BMAC_XIFC_MIIBUFDIS));
610 		break;
611 	default:
612 		return;
613 	}
614 
615 	if (!hmep->hme_frame_enable) {
616 		hme_bb_mii_write(hmep, phyad, regad, data);
617 		PUT_MACREG(xifc, tmp_xif);
618 		PUT_MIFREG(mif_cfg, tmp_mif);
619 		return;
620 	}
621 
622 	PUT_MIFREG(mif_frame,
623 	    HME_MIF_FRWRITE | (phyad << HME_MIF_FRPHYAD_SHIFT) |
624 	    (regad << HME_MIF_FRREGAD_SHIFT) | data);
625 /*
626  *	HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
627  */
628 	HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300);
629 	frame = GET_MIFREG(mif_frame);
630 	PUT_MACREG(xifc, tmp_xif);
631 	PUT_MIFREG(mif_cfg, tmp_mif);
632 	CHECK_MIFREG();
633 	if ((frame & HME_MIF_FRTA0) == 0) {
634 		HME_FAULT_MSG1(hmep, SEVERITY_MID, MII_MSG,
635 		    "MIF Write failure");
636 	}
637 }
638 
639 static void
640 hme_mii_notify(void *arg, link_state_t link)
641 {
642 	struct hme *hmep = arg;
643 
644 	if (link == LINK_STATE_UP) {
645 		(void) hmeinit(hmep);
646 	}
647 	mac_link_update(hmep->hme_mh, link);
648 }
649 
650 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<  LOADABLE ENTRIES  >>>>>>>>>>>>>>>>>>>>>>> */
651 
652 int
653 _init(void)
654 {
655 	int	status;
656 
657 	mac_init_ops(&hme_dev_ops, "hme");
658 	if ((status = mod_install(&modlinkage)) != 0) {
659 		mac_fini_ops(&hme_dev_ops);
660 	}
661 	return (status);
662 }
663 
664 int
665 _fini(void)
666 {
667 	int	status;
668 
669 	if ((status = mod_remove(&modlinkage)) == 0) {
670 		mac_fini_ops(&hme_dev_ops);
671 	}
672 	return (status);
673 }
674 
675 int
676 _info(struct modinfo *modinfop)
677 {
678 	return (mod_info(&modlinkage, modinfop));
679 }
680 
681 /*
682  * ddi_dma_sync() a TMD or RMD descriptor.
683  */
684 #define	HMESYNCRMD(num, who)				\
685 	(void) ddi_dma_sync(hmep->hme_rmd_dmah,		\
686 	    (num * sizeof (struct hme_rmd)),		\
687 	    sizeof (struct hme_rmd),			\
688 	    who)
689 
690 #define	HMESYNCTMD(num, who)				\
691 	(void) ddi_dma_sync(hmep->hme_tmd_dmah,		\
692 	    (num * sizeof (struct hme_tmd)),		\
693 	    sizeof (struct hme_tmd),			\
694 	    who)
695 
696 /*
697  * Ethernet broadcast address definition.
698  */
699 static	struct ether_addr	etherbroadcastaddr = {
700 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
701 };
702 
703 /*
704  * MIB II broadcast/multicast packets
705  */
706 #define	IS_BROADCAST(pkt) (bcmp(pkt, &etherbroadcastaddr, ETHERADDRL) == 0)
707 #define	IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
708 #define	BUMP_InNUcast(hmep, pkt) \
709 	if (IS_MULTICAST(pkt)) {			       \
710 		if (IS_BROADCAST(pkt)) {		       \
711 			hmep->hme_brdcstrcv++;		       \
712 		} else {				       \
713 			hmep->hme_multircv++;		       \
714 		}					       \
715 	}
716 #define	BUMP_OutNUcast(hmep, pkt) \
717 	if (IS_MULTICAST(pkt)) {			       \
718 		if (IS_BROADCAST(pkt)) {		       \
719 			hmep->hme_brdcstxmt++;		       \
720 		} else {				       \
721 			hmep->hme_multixmt++;		       \
722 		}					       \
723 	}
724 
725 static int
726 hme_create_prop_from_kw(dev_info_t *dip, char *vpdname, char *vpdstr)
727 {
728 	char propstr[80];
729 	int i, needprop = 0;
730 	struct ether_addr local_mac;
731 
732 	if (strcmp(vpdname, "NA") == 0) {
733 		(void) strcpy(propstr, "local-mac-address");
734 		needprop = 1;
735 	} else if (strcmp(vpdname, "Z0") == 0) {
736 		(void) strcpy(propstr, "model");
737 		needprop = 1;
738 	} else if (strcmp(vpdname, "Z1") == 0) {
739 		(void) strcpy(propstr, "board-model");
740 		needprop = 1;
741 	}
742 
743 	if (needprop == 1) {
744 
745 		if (strcmp(propstr, "local-mac-address") == 0) {
746 			for (i = 0; i < ETHERADDRL; i++)
747 				local_mac.ether_addr_octet[i] =
748 				    (uchar_t)vpdstr[i];
749 			if (ddi_prop_create(DDI_DEV_T_NONE, dip,
750 			    DDI_PROP_CANSLEEP, propstr,
751 			    (char *)local_mac.ether_addr_octet, ETHERADDRL)
752 			    != DDI_SUCCESS) {
753 				return (DDI_FAILURE);
754 			}
755 		} else {
756 			if (ddi_prop_create(DDI_DEV_T_NONE, dip,
757 			    DDI_PROP_CANSLEEP, propstr, vpdstr,
758 			    strlen(vpdstr)+1) != DDI_SUCCESS) {
759 				return (DDI_FAILURE);
760 			}
761 		}
762 	}
763 	return (0);
764 }
765 
766 /*
767  * Get properties from old VPD
768  * for PCI cards
769  */
770 static int
771 hme_get_oldvpd_props(dev_info_t *dip, int vpd_base)
772 {
773 	struct hme *hmep;
774 	int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
775 	char kw_namestr[3];
776 	char kw_fieldstr[256];
777 	int i;
778 
779 	hmep = ddi_get_driver_private(dip);
780 
781 	vpd_start = vpd_base;
782 
783 	if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
784 		return (1); /* error */
785 	} else {
786 		vpd_len = 9;
787 	}
788 
789 	/* Get local-mac-address */
790 	kw_start = vpd_start + 3; /* Location of 1st keyword */
791 	kw_ptr = kw_start;
792 	while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
793 		kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
794 		kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
795 		kw_namestr[2] = '\0';
796 		kw_len = (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
797 		for (i = 0, kw_ptr += 3; i < kw_len; i++)
798 			kw_fieldstr[i] = GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
799 		kw_fieldstr[i] = '\0';
800 		if (hme_create_prop_from_kw(dip, kw_namestr, kw_fieldstr)) {
801 			return (DDI_FAILURE);
802 		}
803 		kw_ptr += kw_len;
804 	} /* next keyword */
805 
806 	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "model",
807 	    "SUNW,cheerio", strlen("SUNW,cheerio")+1) != DDI_SUCCESS) {
808 		return (DDI_FAILURE);
809 	}
810 	return (0);
811 }
812 
813 
814 /*
815  * Get properties from new VPD
816  * for CompactPCI cards
817  */
818 static int
819 hme_get_newvpd_props(dev_info_t *dip, int vpd_base)
820 {
821 	struct hme *hmep;
822 	int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
823 	char kw_namestr[3];
824 	char kw_fieldstr[256];
825 	int maxvpdsize, i;
826 
827 	hmep = ddi_get_driver_private(dip);
828 
829 	maxvpdsize = 1024; /* Real size not known until after it is read */
830 
831 	vpd_start = (int)((GET_ROM8(&(hmep->hme_romp[vpd_base+1])) & 0xff) |
832 	    ((GET_ROM8(&hmep->hme_romp[vpd_base+2]) & 0xff) << 8)) +3;
833 	vpd_start = vpd_base + vpd_start;
834 	while (vpd_start < (vpd_base + maxvpdsize)) { /* Get all VPDs */
835 		if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
836 			break; /* no VPD found */
837 		} else {
838 			vpd_len = (int)((GET_ROM8(&hmep->hme_romp[vpd_start
839 			    + 1]) & 0xff) | (GET_ROM8(&hmep->hme_romp[vpd_start
840 			    + 2]) & 0xff) << 8);
841 		}
842 		/* Get all keywords in this VPD */
843 		kw_start = vpd_start + 3; /* Location of 1st keyword */
844 		kw_ptr = kw_start;
845 		while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
846 			kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
847 			kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
848 			kw_namestr[2] = '\0';
849 			kw_len =
850 			    (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
851 			for (i = 0, kw_ptr += 3; i < kw_len; i++)
852 				kw_fieldstr[i] =
853 				    GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
854 			kw_fieldstr[i] = '\0';
855 			if (hme_create_prop_from_kw(dip, kw_namestr,
856 			    kw_fieldstr)) {
857 				return (DDI_FAILURE);
858 			}
859 			kw_ptr += kw_len;
860 		} /* next keyword */
861 		vpd_start += (vpd_len + 3);
862 	} /* next VPD */
863 	return (0);
864 }
865 
866 
867 /*
868  * Get properties from VPD
869  */
870 static int
871 hme_get_vpd_props(dev_info_t *dip)
872 {
873 	struct hme *hmep;
874 	int v0, v1, vpd_base;
875 	int i, epromsrchlimit;
876 
877 
878 	hmep = ddi_get_driver_private(dip);
879 
880 	v0 = (int)(GET_ROM8(&(hmep->hme_romp[0])));
881 	v1 = (int)(GET_ROM8(&(hmep->hme_romp[1])));
882 	v0 = ((v0 & 0xff) << 8 | v1);
883 
884 	if ((v0 & 0xffff) != 0x55aa) {
885 		cmn_err(CE_NOTE, " Valid pci prom not found \n");
886 		return (1);
887 	}
888 
889 	epromsrchlimit = 4096;
890 	for (i = 2; i < epromsrchlimit; i++) {
891 		/* "PCIR" */
892 		if (((GET_ROM8(&(hmep->hme_romp[i])) & 0xff) == 'P') &&
893 		    ((GET_ROM8(&(hmep->hme_romp[i+1])) & 0xff) == 'C') &&
894 		    ((GET_ROM8(&(hmep->hme_romp[i+2])) & 0xff) == 'I') &&
895 		    ((GET_ROM8(&(hmep->hme_romp[i+3])) & 0xff) == 'R')) {
896 			vpd_base =
897 			    (int)((GET_ROM8(&(hmep->hme_romp[i+8])) & 0xff) |
898 			    (GET_ROM8(&(hmep->hme_romp[i+9])) & 0xff) << 8);
899 			break; /* VPD pointer found */
900 		}
901 	}
902 
903 	/* No VPD found */
904 	if (vpd_base == 0) {
905 		cmn_err(CE_NOTE, " Vital Product Data pointer not found \n");
906 		return (1);
907 	}
908 
909 	v0 = (int)(GET_ROM8(&(hmep->hme_romp[vpd_base])));
910 	if (v0 == 0x82) {
911 		if (hme_get_newvpd_props(dip, vpd_base))
912 			return (1);
913 		return (0);
914 	} else if (v0 == 0x90) {
915 		/* If we are are SUNW,qfe card, look for the Nth "NA" descr */
916 		if ((GET_ROM8(&hmep->hme_romp[vpd_base + 12])  != 0x79) &&
917 		    GET_ROM8(&hmep->hme_romp[vpd_base + 4 * 12]) == 0x79) {
918 			vpd_base += hmep->hme_devno * 12;
919 		}
920 		if (hme_get_oldvpd_props(dip, vpd_base))
921 			return (1);
922 		return (0);
923 	} else
924 		return (1);	/* unknown start byte in VPD */
925 }
926 
927 /*
928  * For x86, the BIOS doesn't map the PCI Rom register for the qfe
929  * cards, so we have to extract it from the ebus bridge that is
930  * function zero of the same device.  This is a bit of an ugly hack.
931  * (The ebus bridge leaves the entire ROM mapped at base address
932  * register 0x10.)
933  */
934 
935 typedef struct {
936 	struct hme 		*hmep;
937 	dev_info_t		*parent;
938 	uint8_t			bus, dev;
939 	ddi_acc_handle_t	acch;
940 	caddr_t			romp;
941 } ebus_rom_t;
942 
943 static int
944 hme_mapebusrom(dev_info_t *dip, void *arg)
945 {
946 	int		*regs;
947 	unsigned	nregs;
948 	int		reg;
949 	ebus_rom_t	*rom = arg;
950 	struct hme	*hmep = rom->hmep;
951 
952 	/*
953 	 * We only want to look at our peers.  Skip our parent.
954 	 */
955 	if (dip == rom->parent) {
956 		return (DDI_WALK_PRUNESIB);
957 	}
958 
959 	if (ddi_get_parent(dip) != rom->parent)
960 		return (DDI_WALK_CONTINUE);
961 
962 	if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
963 	    "reg", &regs, &nregs)) != DDI_PROP_SUCCESS) {
964 		return (DDI_WALK_PRUNECHILD);
965 	}
966 
967 	if (nregs < 1) {
968 		ddi_prop_free(regs);
969 		return (DDI_WALK_PRUNECHILD);
970 	}
971 	reg = regs[0];
972 	ddi_prop_free(regs);
973 
974 	/*
975 	 * Look for function 0 on our bus and device.  If the device doesn't
976 	 * match, it might be an alternate peer, in which case we don't want
977 	 * to examine any of its children.
978 	 */
979 	if ((PCI_REG_BUS_G(reg) != rom->bus) ||
980 	    (PCI_REG_DEV_G(reg) != rom->dev) ||
981 	    (PCI_REG_FUNC_G(reg) != 0)) {
982 		return (DDI_WALK_PRUNECHILD);
983 	}
984 
985 	(void) ddi_regs_map_setup(dip, 1, &rom->romp, 0, 0, &hmep->hme_dev_attr,
986 	    &rom->acch);
987 	/*
988 	 * If we can't map the registers, the caller will notice that
989 	 * the acch is NULL.
990 	 */
991 	return (DDI_WALK_TERMINATE);
992 }
993 
994 static int
995 hmeget_promebus(dev_info_t *dip)
996 {
997 	ebus_rom_t	rom;
998 	int		*regs;
999 	unsigned	nregs;
1000 	struct hme	*hmep;
1001 
1002 	hmep = ddi_get_driver_private(dip);
1003 
1004 	bzero(&rom, sizeof (rom));
1005 
1006 	/*
1007 	 * For x86, the BIOS doesn't map the PCI Rom register for the qfe
1008 	 * cards, so we have to extract it from the eBus bridge that is
1009 	 * function zero.  This is a bit of an ugly hack.
1010 	 */
1011 	if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
1012 	    "reg", &regs, &nregs)) != DDI_PROP_SUCCESS) {
1013 		return (DDI_FAILURE);
1014 	}
1015 
1016 	if (nregs < 5) {
1017 		ddi_prop_free(regs);
1018 		return (DDI_FAILURE);
1019 	}
1020 	rom.hmep = hmep;
1021 	rom.bus = PCI_REG_BUS_G(regs[0]);
1022 	rom.dev = PCI_REG_DEV_G(regs[0]);
1023 	hmep->hme_devno = rom.dev;
1024 	rom.parent = ddi_get_parent(dip);
1025 
1026 	/*
1027 	 * The implementation of ddi_walk_devs says that we must not
1028 	 * be called during autoconfiguration.  However, it turns out
1029 	 * that it is safe to call this during our attach routine,
1030 	 * because we are not a nexus device.
1031 	 *
1032 	 * Previously we rooted our search at our immediate parent,
1033 	 * but this triggered an assertion panic in debug kernels.
1034 	 */
1035 	ddi_walk_devs(ddi_root_node(), hme_mapebusrom, &rom);
1036 
1037 	if (rom.acch) {
1038 		hmep->hme_romh = rom.acch;
1039 		hmep->hme_romp = (unsigned char *)rom.romp;
1040 		return (DDI_SUCCESS);
1041 	}
1042 	return (DDI_FAILURE);
1043 }
1044 
1045 static int
1046 hmeget_promprops(dev_info_t *dip)
1047 {
1048 	struct hme *hmep;
1049 	int rom_bar;
1050 	ddi_acc_handle_t cfg_handle;
1051 	struct {
1052 		uint16_t vendorid;
1053 		uint16_t devid;
1054 		uint16_t command;
1055 		uint16_t status;
1056 		uint32_t junk1;
1057 		uint8_t cache_line;
1058 		uint8_t latency;
1059 		uint8_t header;
1060 		uint8_t bist;
1061 		uint32_t base;
1062 		uint32_t base14;
1063 		uint32_t base18;
1064 		uint32_t base1c;
1065 		uint32_t base20;
1066 		uint32_t base24;
1067 		uint32_t base28;
1068 		uint32_t base2c;
1069 		uint32_t base30;
1070 	} *cfg_ptr;
1071 
1072 	hmep = ddi_get_driver_private(dip);
1073 
1074 
1075 	/*
1076 	 * map configuration space
1077 	 */
1078 	if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
1079 	    0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
1080 		return (DDI_FAILURE);
1081 	}
1082 
1083 	/*
1084 	 * Enable bus-master and memory accesses
1085 	 */
1086 	ddi_put16(cfg_handle, &cfg_ptr->command,
1087 	    PCI_COMM_SERR_ENABLE | PCI_COMM_PARITY_DETECT |
1088 	    PCI_COMM_MAE | PCI_COMM_ME);
1089 
1090 	/*
1091 	 * Enable rom accesses
1092 	 */
1093 	rom_bar = ddi_get32(cfg_handle, &cfg_ptr->base30);
1094 	ddi_put32(cfg_handle, &cfg_ptr->base30, rom_bar | 1);
1095 
1096 
1097 	if ((ddi_regs_map_setup(dip, 2, (caddr_t *)&(hmep->hme_romp), 0, 0,
1098 	    &hmep->hme_dev_attr, &hmep->hme_romh) != DDI_SUCCESS) &&
1099 	    (hmeget_promebus(dip) != DDI_SUCCESS)) {
1100 
1101 		if (cfg_ptr)
1102 			ddi_regs_map_free(&cfg_handle);
1103 		return (DDI_FAILURE);
1104 	} else {
1105 		if (hme_get_vpd_props(dip))
1106 			return (DDI_FAILURE);
1107 	}
1108 	if (hmep->hme_romp)
1109 		ddi_regs_map_free(&hmep->hme_romh);
1110 	if (cfg_ptr)
1111 		ddi_regs_map_free(&cfg_handle);
1112 	return (DDI_SUCCESS);
1113 
1114 }
1115 
1116 static void
1117 hmeget_hm_rev_property(struct hme *hmep)
1118 {
1119 	int	hm_rev;
1120 
1121 
1122 	hm_rev = hmep->asic_rev;
1123 	switch (hm_rev) {
1124 	case HME_2P1_REVID:
1125 	case HME_2P1_REVID_OBP:
1126 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1127 		    "SBus 2.1 Found (Rev Id = %x)", hm_rev);
1128 		hmep->hme_frame_enable = 1;
1129 		break;
1130 
1131 	case HME_2P0_REVID:
1132 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1133 		    "SBus 2.0 Found (Rev Id = %x)", hm_rev);
1134 		break;
1135 
1136 	case HME_1C0_REVID:
1137 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1138 		    "PCI IO 1.0 Found (Rev Id = %x)", hm_rev);
1139 		break;
1140 
1141 	default:
1142 		HME_FAULT_MSG3(hmep, SEVERITY_NONE, DISPLAY_MSG,
1143 		    "%s (Rev Id = %x) Found",
1144 		    (hm_rev == HME_2C0_REVID) ? "PCI IO 2.0" : "Sbus", hm_rev);
1145 		hmep->hme_frame_enable = 1;
1146 		hmep->hme_lance_mode_enable = 1;
1147 		hmep->hme_rxcv_enable = 1;
1148 		break;
1149 	}
1150 }
1151 
1152 /*
1153  * Interface exists: make available by filling in network interface
1154  * record.  System will initialize the interface when it is ready
1155  * to accept packets.
1156  */
1157 int
1158 hmeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1159 {
1160 	struct hme *hmep;
1161 	mac_register_t *macp = NULL;
1162 	int 	regno;
1163 	int hm_rev = 0;
1164 	int prop_len = sizeof (int);
1165 	ddi_acc_handle_t cfg_handle;
1166 	struct {
1167 		uint16_t vendorid;
1168 		uint16_t devid;
1169 		uint16_t command;
1170 		uint16_t status;
1171 		uint8_t revid;
1172 		uint8_t j1;
1173 		uint16_t j2;
1174 	} *cfg_ptr;
1175 
1176 	switch (cmd) {
1177 	case DDI_ATTACH:
1178 		break;
1179 
1180 	case DDI_RESUME:
1181 		if ((hmep = ddi_get_driver_private(dip)) == NULL)
1182 			return (DDI_FAILURE);
1183 
1184 		hmep->hme_flags &= ~HMESUSPENDED;
1185 
1186 		mii_resume(hmep->hme_mii);
1187 
1188 		if (hmep->hme_started)
1189 			(void) hmeinit(hmep);
1190 		return (DDI_SUCCESS);
1191 
1192 	default:
1193 		return (DDI_FAILURE);
1194 	}
1195 
1196 	/*
1197 	 * Allocate soft device data structure
1198 	 */
1199 	hmep = kmem_zalloc(sizeof (*hmep), KM_SLEEP);
1200 
1201 	/*
1202 	 * Might as well set up elements of data structure
1203 	 */
1204 	hmep->dip =		dip;
1205 	hmep->instance = 	ddi_get_instance(dip);
1206 	hmep->pagesize =	ddi_ptob(dip, (ulong_t)1); /* IOMMU PSize */
1207 
1208 	/*
1209 	 *  Might as well setup the driver private
1210 	 * structure as part of the dip.
1211 	 */
1212 	ddi_set_driver_private(dip, hmep);
1213 
1214 	/*
1215 	 * Reject this device if it's in a slave-only slot.
1216 	 */
1217 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
1218 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1219 		    "Dev not used - dev in slave only slot");
1220 		goto error_state;
1221 	}
1222 
1223 	/*
1224 	 * Map in the device registers.
1225 	 *
1226 	 * Reg # 0 is the Global register set
1227 	 * Reg # 1 is the ETX register set
1228 	 * Reg # 2 is the ERX register set
1229 	 * Reg # 3 is the BigMAC register set.
1230 	 * Reg # 4 is the MIF register set
1231 	 */
1232 	if (ddi_dev_nregs(dip, &regno) != (DDI_SUCCESS)) {
1233 		HME_FAULT_MSG2(hmep, SEVERITY_HIGH, INIT_MSG,
1234 		    ddi_nregs_fail_msg, regno);
1235 		goto error_state;
1236 	}
1237 
1238 	switch (regno) {
1239 	case 5:
1240 		hmep->hme_cheerio_mode = 0;
1241 		break;
1242 	case 2:
1243 	case 3: /* for hot swap/plug, there will be 3 entries in "reg" prop */
1244 		hmep->hme_cheerio_mode = 1;
1245 		break;
1246 	default:
1247 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
1248 		    bad_num_regs_msg);
1249 		goto error_state;
1250 	}
1251 
1252 	/* Initialize device attributes structure */
1253 	hmep->hme_dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1254 
1255 	if (hmep->hme_cheerio_mode)
1256 		hmep->hme_dev_attr.devacc_attr_endian_flags =
1257 		    DDI_STRUCTURE_LE_ACC;
1258 	else
1259 		hmep->hme_dev_attr.devacc_attr_endian_flags =
1260 		    DDI_STRUCTURE_BE_ACC;
1261 
1262 	hmep->hme_dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1263 
1264 	if (hmep->hme_cheerio_mode) {
1265 		uint8_t		oldLT;
1266 		uint8_t		newLT = 0;
1267 		dev_info_t	*pdip;
1268 		const char	*pdrvname;
1269 
1270 		/*
1271 		 * Map the PCI config space
1272 		 */
1273 		if (pci_config_setup(dip, &hmep->pci_config_handle) !=
1274 		    DDI_SUCCESS) {
1275 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1276 			    "pci_config_setup() failed..");
1277 			goto error_state;
1278 		}
1279 
1280 		if (ddi_regs_map_setup(dip, 1,
1281 		    (caddr_t *)&(hmep->hme_globregp), 0, 0,
1282 		    &hmep->hme_dev_attr, &hmep->hme_globregh)) {
1283 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1284 			    mregs_4global_reg_fail_msg);
1285 			goto error_unmap;
1286 		}
1287 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1288 		    hmep->hme_mifregh = hmep->hme_globregh;
1289 
1290 		hmep->hme_etxregp =
1291 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x2000);
1292 		hmep->hme_erxregp =
1293 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x4000);
1294 		hmep->hme_bmacregp =
1295 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x6000);
1296 		hmep->hme_mifregp =
1297 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x7000);
1298 
1299 		/*
1300 		 * Get parent pci bridge info.
1301 		 */
1302 		pdip = ddi_get_parent(dip);
1303 		pdrvname = ddi_driver_name(pdip);
1304 
1305 		oldLT = pci_config_get8(hmep->pci_config_handle,
1306 		    PCI_CONF_LATENCY_TIMER);
1307 		/*
1308 		 * Honor value set in /etc/system
1309 		 * "set hme:pci_latency_timer=0xYY"
1310 		 */
1311 		if (pci_latency_timer)
1312 			newLT = pci_latency_timer;
1313 		/*
1314 		 * Modify LT for simba
1315 		 */
1316 		else if (strcmp("simba", pdrvname) == 0)
1317 			newLT = 0xf0;
1318 		/*
1319 		 * Ensure minimum cheerio latency timer of 0x50
1320 		 * Usually OBP or pci bridge should set this value
1321 		 * based on cheerio
1322 		 * min_grant * 8(33MHz) = 0x50 = 0xa * 0x8
1323 		 * Some system set cheerio LT at 0x40
1324 		 */
1325 		else if (oldLT < 0x40)
1326 			newLT = 0x50;
1327 
1328 		/*
1329 		 * Now program cheerio's pci latency timer with newLT
1330 		 */
1331 		if (newLT)
1332 			pci_config_put8(hmep->pci_config_handle,
1333 			    PCI_CONF_LATENCY_TIMER, (uchar_t)newLT);
1334 	} else { /* Map register sets */
1335 		if (ddi_regs_map_setup(dip, 0,
1336 		    (caddr_t *)&(hmep->hme_globregp), 0, 0,
1337 		    &hmep->hme_dev_attr, &hmep->hme_globregh)) {
1338 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1339 			    mregs_4global_reg_fail_msg);
1340 			goto error_state;
1341 		}
1342 		if (ddi_regs_map_setup(dip, 1,
1343 		    (caddr_t *)&(hmep->hme_etxregp), 0, 0,
1344 		    &hmep->hme_dev_attr, &hmep->hme_etxregh)) {
1345 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1346 			    mregs_4etx_reg_fail_msg);
1347 			goto error_unmap;
1348 		}
1349 		if (ddi_regs_map_setup(dip, 2,
1350 		    (caddr_t *)&(hmep->hme_erxregp), 0, 0,
1351 		    &hmep->hme_dev_attr, &hmep->hme_erxregh)) {
1352 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1353 			    mregs_4erx_reg_fail_msg);
1354 			goto error_unmap;
1355 		}
1356 		if (ddi_regs_map_setup(dip, 3,
1357 		    (caddr_t *)&(hmep->hme_bmacregp), 0, 0,
1358 		    &hmep->hme_dev_attr, &hmep->hme_bmacregh)) {
1359 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1360 			    mregs_4bmac_reg_fail_msg);
1361 			goto error_unmap;
1362 		}
1363 
1364 		if (ddi_regs_map_setup(dip, 4,
1365 		    (caddr_t *)&(hmep->hme_mifregp), 0, 0,
1366 		    &hmep->hme_dev_attr, &hmep->hme_mifregh)) {
1367 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1368 			    mregs_4mif_reg_fail_msg);
1369 			goto error_unmap;
1370 		}
1371 	} /* Endif cheerio_mode */
1372 
1373 	/*
1374 	 * Based on the hm-rev, set some capabilities
1375 	 * Set up default capabilities for HM 2.0
1376 	 */
1377 	hmep->hme_frame_enable = 0;
1378 	hmep->hme_lance_mode_enable = 0;
1379 	hmep->hme_rxcv_enable = 0;
1380 
1381 	/* NEW routine to get the properties */
1382 
1383 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, hmep->dip, 0, "hm-rev",
1384 	    (caddr_t)&hm_rev, &prop_len) == DDI_PROP_SUCCESS) {
1385 
1386 		hmep->asic_rev = hm_rev;
1387 		hmeget_hm_rev_property(hmep);
1388 	} else {
1389 		/*
1390 		 * hm_rev property not found so, this is
1391 		 * case of hot insertion of card without interpreting fcode.
1392 		 * Get it from revid in config space after mapping it.
1393 		 */
1394 		if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
1395 		    0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
1396 			return (DDI_FAILURE);
1397 		}
1398 		/*
1399 		 * Since this is cheerio-based PCI card, we write 0xC in the
1400 		 * top 4 bits(4-7) of hm-rev and retain the bottom(0-3) bits
1401 		 * for Cheerio version(1.0 or 2.0 = 0xC0 or 0xC1)
1402 		 */
1403 		hm_rev = ddi_get8(cfg_handle, &cfg_ptr->revid);
1404 		hm_rev = HME_1C0_REVID | (hm_rev & HME_REV_VERS_MASK);
1405 		hmep->asic_rev = hm_rev;
1406 		if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1407 		    "hm-rev", (caddr_t)&hm_rev, sizeof (hm_rev)) !=
1408 		    DDI_SUCCESS) {
1409 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
1410 			    "ddi_prop_create error for hm_rev");
1411 		}
1412 		ddi_regs_map_free(&cfg_handle);
1413 
1414 		hmeget_hm_rev_property(hmep);
1415 
1416 		/* get info via VPD */
1417 		if (hmeget_promprops(dip) != DDI_SUCCESS) {
1418 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
1419 			    "no promprops");
1420 		}
1421 	}
1422 
1423 	if (ddi_intr_hilevel(dip, 0)) {
1424 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, NFATAL_ERR_MSG,
1425 		    " high-level interrupts are not supported");
1426 		goto error_unmap;
1427 	}
1428 
1429 	/*
1430 	 * Get intr. block cookie so that mutex locks can be initialized.
1431 	 */
1432 	if (ddi_get_iblock_cookie(dip, 0, &hmep->hme_cookie) != DDI_SUCCESS)
1433 		goto error_unmap;
1434 
1435 	/*
1436 	 * Initialize mutex's for this device.
1437 	 */
1438 	mutex_init(&hmep->hme_xmitlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
1439 	mutex_init(&hmep->hme_intrlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
1440 
1441 	/*
1442 	 * Quiesce the hardware.
1443 	 */
1444 	(void) hmestop(hmep);
1445 
1446 	/*
1447 	 * Add interrupt to system
1448 	 */
1449 	if (ddi_add_intr(dip, 0, (ddi_iblock_cookie_t *)NULL,
1450 	    (ddi_idevice_cookie_t *)NULL, hmeintr, (caddr_t)hmep)) {
1451 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1452 		    add_intr_fail_msg);
1453 		goto error_mutex;
1454 	}
1455 
1456 	/*
1457 	 * Set up the ethernet mac address.
1458 	 */
1459 	hme_setup_mac_address(hmep, dip);
1460 
1461 	if (!hmeinit_xfer_params(hmep))
1462 		goto error_intr;
1463 
1464 	if (hmeburstsizes(hmep) == DDI_FAILURE) {
1465 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, burst_size_msg);
1466 		goto error_intr;
1467 	}
1468 
1469 	if (hmeallocthings(hmep) != DDI_SUCCESS) {
1470 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1471 		    "resource allocation failed");
1472 		goto error_intr;
1473 	}
1474 
1475 	if (hmeallocbufs(hmep) != DDI_SUCCESS) {
1476 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1477 		    "buffer allocation failed");
1478 		goto error_intr;
1479 	}
1480 
1481 	hmestatinit(hmep);
1482 
1483 	/* our external (preferred) PHY is at address 0 */
1484 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, "first-phy", 0);
1485 
1486 	hmep->hme_mii = mii_alloc(hmep, dip, &hme_mii_ops);
1487 	if (hmep->hme_mii == NULL) {
1488 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1489 		    "mii_alloc failed");
1490 		goto error_intr;
1491 	}
1492 	/* force a probe for the PHY */
1493 	mii_probe(hmep->hme_mii);
1494 
1495 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
1496 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1497 		    "mac_alloc failed");
1498 		goto error_intr;
1499 	}
1500 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1501 	macp->m_driver = hmep;
1502 	macp->m_dip = dip;
1503 	macp->m_src_addr = hmep->hme_ouraddr.ether_addr_octet;
1504 	macp->m_callbacks = &hme_m_callbacks;
1505 	macp->m_min_sdu = 0;
1506 	macp->m_max_sdu = ETHERMTU;
1507 	macp->m_margin = VLAN_TAGSZ;
1508 	macp->m_priv_props = hme_priv_prop;
1509 	macp->m_priv_prop_count =
1510 	    sizeof (hme_priv_prop) / sizeof (hme_priv_prop[0]);
1511 	if (mac_register(macp, &hmep->hme_mh) != 0) {
1512 		mac_free(macp);
1513 		goto error_intr;
1514 	}
1515 
1516 	mac_free(macp);
1517 
1518 	ddi_report_dev(dip);
1519 	return (DDI_SUCCESS);
1520 
1521 	/*
1522 	 * Failure Exit
1523 	 */
1524 
1525 error_intr:
1526 	if (hmep->hme_cookie)
1527 		ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
1528 
1529 	if (hmep->hme_mii)
1530 		mii_free(hmep->hme_mii);
1531 
1532 error_mutex:
1533 	mutex_destroy(&hmep->hme_xmitlock);
1534 	mutex_destroy(&hmep->hme_intrlock);
1535 
1536 error_unmap:
1537 	if (hmep->hme_globregh)
1538 		ddi_regs_map_free(&hmep->hme_globregh);
1539 	if (hmep->hme_cheerio_mode == 0) {
1540 		if (hmep->hme_etxregh)
1541 			ddi_regs_map_free(&hmep->hme_etxregh);
1542 		if (hmep->hme_erxregh)
1543 			ddi_regs_map_free(&hmep->hme_erxregh);
1544 		if (hmep->hme_bmacregh)
1545 			ddi_regs_map_free(&hmep->hme_bmacregh);
1546 		if (hmep->hme_mifregh)
1547 			ddi_regs_map_free(&hmep->hme_mifregh);
1548 	} else {
1549 		if (hmep->pci_config_handle)
1550 			(void) pci_config_teardown(&hmep->pci_config_handle);
1551 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1552 		    hmep->hme_mifregh = hmep->hme_globregh = NULL;
1553 	}
1554 
1555 error_state:
1556 	hmefreethings(hmep);
1557 	hmefreebufs(hmep);
1558 
1559 	if (hmep) {
1560 		kmem_free((caddr_t)hmep, sizeof (*hmep));
1561 		ddi_set_driver_private(dip, NULL);
1562 	}
1563 
1564 	return (DDI_FAILURE);
1565 }
1566 
1567 int
1568 hmedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1569 {
1570 	struct hme *hmep;
1571 
1572 	if ((hmep = ddi_get_driver_private(dip)) == NULL)
1573 		return (DDI_FAILURE);
1574 
1575 	switch (cmd) {
1576 	case DDI_DETACH:
1577 		break;
1578 
1579 	case DDI_SUSPEND:
1580 		mii_suspend(hmep->hme_mii);
1581 		hmep->hme_flags |= HMESUSPENDED;
1582 		hmeuninit(hmep);
1583 		return (DDI_SUCCESS);
1584 
1585 	default:
1586 		return (DDI_FAILURE);
1587 	}
1588 
1589 
1590 	if (mac_unregister(hmep->hme_mh) != 0) {
1591 		return (DDI_FAILURE);
1592 	}
1593 
1594 	/*
1595 	 * Make driver quiescent, we don't want to prevent the
1596 	 * detach on failure.  Note that this should be redundant,
1597 	 * since mac_stop should already have called hmeuninit().
1598 	 */
1599 	if (!(hmep->hme_flags & HMESUSPENDED)) {
1600 		(void) hmestop(hmep);
1601 	}
1602 
1603 	if (hmep->hme_mii)
1604 		mii_free(hmep->hme_mii);
1605 
1606 	/*
1607 	 * Remove instance of the intr
1608 	 */
1609 	ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
1610 
1611 	/*
1612 	 * Unregister kstats.
1613 	 */
1614 	if (hmep->hme_ksp != NULL)
1615 		kstat_delete(hmep->hme_ksp);
1616 	if (hmep->hme_intrstats != NULL)
1617 		kstat_delete(hmep->hme_intrstats);
1618 
1619 	hmep->hme_ksp = NULL;
1620 	hmep->hme_intrstats = NULL;
1621 
1622 	/*
1623 	 * Destroy all mutexes and data structures allocated during
1624 	 * attach time.
1625 	 *
1626 	 * Note: at this time we should be the only thread accessing
1627 	 * the structures for this instance.
1628 	 */
1629 
1630 	if (hmep->hme_globregh)
1631 		ddi_regs_map_free(&hmep->hme_globregh);
1632 	if (hmep->hme_cheerio_mode == 0) {
1633 		if (hmep->hme_etxregh)
1634 			ddi_regs_map_free(&hmep->hme_etxregh);
1635 		if (hmep->hme_erxregh)
1636 			ddi_regs_map_free(&hmep->hme_erxregh);
1637 		if (hmep->hme_bmacregh)
1638 			ddi_regs_map_free(&hmep->hme_bmacregh);
1639 		if (hmep->hme_mifregh)
1640 			ddi_regs_map_free(&hmep->hme_mifregh);
1641 	} else {
1642 		if (hmep->pci_config_handle)
1643 			(void) pci_config_teardown(&hmep->pci_config_handle);
1644 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1645 		    hmep->hme_mifregh = hmep->hme_globregh = NULL;
1646 	}
1647 
1648 	mutex_destroy(&hmep->hme_xmitlock);
1649 	mutex_destroy(&hmep->hme_intrlock);
1650 
1651 	hmefreethings(hmep);
1652 	hmefreebufs(hmep);
1653 
1654 	ddi_set_driver_private(dip, NULL);
1655 	kmem_free(hmep, sizeof (struct hme));
1656 
1657 	return (DDI_SUCCESS);
1658 }
1659 
1660 int
1661 hmequiesce(dev_info_t *dip)
1662 {
1663 	struct hme *hmep;
1664 
1665 	if ((hmep = ddi_get_driver_private(dip)) == NULL)
1666 		return (DDI_FAILURE);
1667 
1668 	(void) hmestop(hmep);
1669 	return (DDI_SUCCESS);
1670 }
1671 
1672 static boolean_t
1673 hmeinit_xfer_params(struct hme *hmep)
1674 {
1675 	int hme_ipg1_conf, hme_ipg2_conf;
1676 	int hme_ipg0_conf, hme_lance_mode_conf;
1677 	int prop_len = sizeof (int);
1678 	dev_info_t *dip;
1679 
1680 	dip = hmep->dip;
1681 
1682 	/*
1683 	 * Set up the start-up values for user-configurable parameters
1684 	 * Get the values from the global variables first.
1685 	 * Use the MASK to limit the value to allowed maximum.
1686 	 */
1687 	hmep->hme_ipg1 = hme_ipg1 & HME_MASK_8BIT;
1688 	hmep->hme_ipg2 = hme_ipg2 & HME_MASK_8BIT;
1689 	hmep->hme_ipg0 = hme_ipg0 & HME_MASK_5BIT;
1690 
1691 	/*
1692 	 * Get the parameter values configured in .conf file.
1693 	 */
1694 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg1",
1695 	    (caddr_t)&hme_ipg1_conf, &prop_len) == DDI_PROP_SUCCESS) {
1696 		hmep->hme_ipg1 = hme_ipg1_conf & HME_MASK_8BIT;
1697 	}
1698 
1699 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg2",
1700 	    (caddr_t)&hme_ipg2_conf, &prop_len) == DDI_PROP_SUCCESS) {
1701 		hmep->hme_ipg2 = hme_ipg2_conf & HME_MASK_8BIT;
1702 	}
1703 
1704 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg0",
1705 	    (caddr_t)&hme_ipg0_conf, &prop_len) == DDI_PROP_SUCCESS) {
1706 		hmep->hme_ipg0 = hme_ipg0_conf & HME_MASK_5BIT;
1707 	}
1708 
1709 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "lance_mode",
1710 	    (caddr_t)&hme_lance_mode_conf, &prop_len) == DDI_PROP_SUCCESS) {
1711 		hmep->hme_lance_mode = hme_lance_mode_conf & HME_MASK_1BIT;
1712 	}
1713 
1714 	return (B_TRUE);
1715 }
1716 
1717 /*
1718  * Return 0 upon success, 1 on failure.
1719  */
1720 static uint_t
1721 hmestop(struct hme *hmep)
1722 {
1723 	/*
1724 	 * Disable the Tx dma engine.
1725 	 */
1726 	PUT_ETXREG(config, (GET_ETXREG(config) & ~HMET_CONFIG_TXDMA_EN));
1727 	HMEDELAY(((GET_ETXREG(state_mach) & 0x1f) == 0x1), HMEMAXRSTDELAY);
1728 
1729 	/*
1730 	 * Disable the Rx dma engine.
1731 	 */
1732 	PUT_ERXREG(config, (GET_ERXREG(config) & ~HMER_CONFIG_RXDMA_EN));
1733 	HMEDELAY(((GET_ERXREG(state_mach) & 0x3f) == 0), HMEMAXRSTDELAY);
1734 
1735 	/*
1736 	 * By this time all things should be quiet, so hit the
1737 	 * chip with a reset.
1738 	 */
1739 	PUT_GLOBREG(reset, HMEG_RESET_GLOBAL);
1740 
1741 	HMEDELAY((GET_GLOBREG(reset) == 0), HMEMAXRSTDELAY);
1742 	if (GET_GLOBREG(reset)) {
1743 		return (1);
1744 	}
1745 
1746 	CHECK_GLOBREG();
1747 	return (0);
1748 }
1749 
1750 static int
1751 hmestat_kstat_update(kstat_t *ksp, int rw)
1752 {
1753 	struct hme *hmep;
1754 	struct hmekstat *hkp;
1755 
1756 	hmep = (struct hme *)ksp->ks_private;
1757 	hkp = (struct hmekstat *)ksp->ks_data;
1758 
1759 	if (rw != KSTAT_READ)
1760 		return (EACCES);
1761 
1762 	/*
1763 	 * Update all the stats by reading all the counter registers.
1764 	 * Counter register stats are not updated till they overflow
1765 	 * and interrupt.
1766 	 */
1767 
1768 	mutex_enter(&hmep->hme_xmitlock);
1769 	if (hmep->hme_flags & HMERUNNING) {
1770 		hmereclaim(hmep);
1771 		hmesavecntrs(hmep);
1772 	}
1773 	mutex_exit(&hmep->hme_xmitlock);
1774 
1775 	hkp->hk_cvc.value.ul		= hmep->hme_cvc;
1776 	hkp->hk_lenerr.value.ul		= hmep->hme_lenerr;
1777 	hkp->hk_buff.value.ul		= hmep->hme_buff;
1778 	hkp->hk_missed.value.ul		= hmep->hme_missed;
1779 	hkp->hk_allocbfail.value.ul	= hmep->hme_allocbfail;
1780 	hkp->hk_babl.value.ul		= hmep->hme_babl;
1781 	hkp->hk_tmder.value.ul		= hmep->hme_tmder;
1782 	hkp->hk_txlaterr.value.ul	= hmep->hme_txlaterr;
1783 	hkp->hk_rxlaterr.value.ul	= hmep->hme_rxlaterr;
1784 	hkp->hk_slvparerr.value.ul	= hmep->hme_slvparerr;
1785 	hkp->hk_txparerr.value.ul	= hmep->hme_txparerr;
1786 	hkp->hk_rxparerr.value.ul	= hmep->hme_rxparerr;
1787 	hkp->hk_slverrack.value.ul	= hmep->hme_slverrack;
1788 	hkp->hk_txerrack.value.ul	= hmep->hme_txerrack;
1789 	hkp->hk_rxerrack.value.ul	= hmep->hme_rxerrack;
1790 	hkp->hk_txtagerr.value.ul	= hmep->hme_txtagerr;
1791 	hkp->hk_rxtagerr.value.ul	= hmep->hme_rxtagerr;
1792 	hkp->hk_eoperr.value.ul		= hmep->hme_eoperr;
1793 	hkp->hk_notmds.value.ul		= hmep->hme_notmds;
1794 	hkp->hk_notbufs.value.ul	= hmep->hme_notbufs;
1795 	hkp->hk_norbufs.value.ul	= hmep->hme_norbufs;
1796 
1797 	/*
1798 	 * Debug kstats
1799 	 */
1800 	hkp->hk_inits.value.ul		= hmep->inits;
1801 	hkp->hk_phyfail.value.ul	= hmep->phyfail;
1802 
1803 	/*
1804 	 * xcvr kstats
1805 	 */
1806 	hkp->hk_asic_rev.value.ul	= hmep->asic_rev;
1807 
1808 	return (0);
1809 }
1810 
1811 static void
1812 hmestatinit(struct hme *hmep)
1813 {
1814 	struct	kstat	*ksp;
1815 	struct	hmekstat	*hkp;
1816 	const char *driver;
1817 	int	instance;
1818 	char	buf[16];
1819 
1820 	instance = hmep->instance;
1821 	driver = ddi_driver_name(hmep->dip);
1822 
1823 	if ((ksp = kstat_create(driver, instance,
1824 	    "driver_info", "net", KSTAT_TYPE_NAMED,
1825 	    sizeof (struct hmekstat) / sizeof (kstat_named_t), 0)) == NULL) {
1826 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, INIT_MSG,
1827 		    "kstat_create failed");
1828 		return;
1829 	}
1830 
1831 	(void) snprintf(buf, sizeof (buf), "%sc%d", driver, instance);
1832 	hmep->hme_intrstats = kstat_create(driver, instance, buf, "controller",
1833 	    KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
1834 	if (hmep->hme_intrstats)
1835 		kstat_install(hmep->hme_intrstats);
1836 
1837 	hmep->hme_ksp = ksp;
1838 	hkp = (struct hmekstat *)ksp->ks_data;
1839 	kstat_named_init(&hkp->hk_cvc,			"code_violations",
1840 	    KSTAT_DATA_ULONG);
1841 	kstat_named_init(&hkp->hk_lenerr,		"len_errors",
1842 	    KSTAT_DATA_ULONG);
1843 	kstat_named_init(&hkp->hk_buff,			"buff",
1844 	    KSTAT_DATA_ULONG);
1845 	kstat_named_init(&hkp->hk_missed,		"missed",
1846 	    KSTAT_DATA_ULONG);
1847 	kstat_named_init(&hkp->hk_nocanput,		"nocanput",
1848 	    KSTAT_DATA_ULONG);
1849 	kstat_named_init(&hkp->hk_allocbfail,		"allocbfail",
1850 	    KSTAT_DATA_ULONG);
1851 	kstat_named_init(&hkp->hk_babl,			"babble",
1852 	    KSTAT_DATA_ULONG);
1853 	kstat_named_init(&hkp->hk_tmder,		"tmd_error",
1854 	    KSTAT_DATA_ULONG);
1855 	kstat_named_init(&hkp->hk_txlaterr,		"tx_late_error",
1856 	    KSTAT_DATA_ULONG);
1857 	kstat_named_init(&hkp->hk_rxlaterr,		"rx_late_error",
1858 	    KSTAT_DATA_ULONG);
1859 	kstat_named_init(&hkp->hk_slvparerr,		"slv_parity_error",
1860 	    KSTAT_DATA_ULONG);
1861 	kstat_named_init(&hkp->hk_txparerr,		"tx_parity_error",
1862 	    KSTAT_DATA_ULONG);
1863 	kstat_named_init(&hkp->hk_rxparerr,		"rx_parity_error",
1864 	    KSTAT_DATA_ULONG);
1865 	kstat_named_init(&hkp->hk_slverrack,		"slv_error_ack",
1866 	    KSTAT_DATA_ULONG);
1867 	kstat_named_init(&hkp->hk_txerrack,		"tx_error_ack",
1868 	    KSTAT_DATA_ULONG);
1869 	kstat_named_init(&hkp->hk_rxerrack,		"rx_error_ack",
1870 	    KSTAT_DATA_ULONG);
1871 	kstat_named_init(&hkp->hk_txtagerr,		"tx_tag_error",
1872 	    KSTAT_DATA_ULONG);
1873 	kstat_named_init(&hkp->hk_rxtagerr,		"rx_tag_error",
1874 	    KSTAT_DATA_ULONG);
1875 	kstat_named_init(&hkp->hk_eoperr,		"eop_error",
1876 	    KSTAT_DATA_ULONG);
1877 	kstat_named_init(&hkp->hk_notmds,		"no_tmds",
1878 	    KSTAT_DATA_ULONG);
1879 	kstat_named_init(&hkp->hk_notbufs,		"no_tbufs",
1880 	    KSTAT_DATA_ULONG);
1881 	kstat_named_init(&hkp->hk_norbufs,		"no_rbufs",
1882 	    KSTAT_DATA_ULONG);
1883 
1884 	/*
1885 	 * Debugging kstats
1886 	 */
1887 	kstat_named_init(&hkp->hk_inits,		"inits",
1888 	    KSTAT_DATA_ULONG);
1889 	kstat_named_init(&hkp->hk_phyfail,		"phy_failures",
1890 	    KSTAT_DATA_ULONG);
1891 
1892 	/*
1893 	 * xcvr kstats
1894 	 */
1895 	kstat_named_init(&hkp->hk_asic_rev,		"asic_rev",
1896 	    KSTAT_DATA_ULONG);
1897 
1898 	ksp->ks_update = hmestat_kstat_update;
1899 	ksp->ks_private = (void *) hmep;
1900 	kstat_install(ksp);
1901 }
1902 
1903 int
1904 hme_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t flags,
1905     uint_t sz, void *val, uint_t *perm)
1906 {
1907 	struct hme *hmep = arg;
1908 	int value;
1909 	boolean_t is_default;
1910 	int rv;
1911 
1912 	rv = mii_m_getprop(hmep->hme_mii, name, num, flags, sz, val, perm);
1913 	if (rv != ENOTSUP)
1914 		return (rv);
1915 
1916 	switch (num) {
1917 	case MAC_PROP_PRIVATE:
1918 		break;
1919 	default:
1920 		return (ENOTSUP);
1921 	}
1922 
1923 	*perm = MAC_PROP_PERM_RW;
1924 
1925 	is_default = (flags & MAC_PROP_DEFAULT) ? B_TRUE : B_FALSE;
1926 	if (strcmp(name, "_ipg0") == 0) {
1927 		value = is_default ? hme_ipg0 : hmep->hme_ipg0;
1928 
1929 	} else if (strcmp(name, "_ipg1") == 0) {
1930 		value = is_default ? hme_ipg1 : hmep->hme_ipg1;
1931 	} else if (strcmp(name, "_ipg2") == 0) {
1932 		value = is_default ? hme_ipg2 : hmep->hme_ipg2;
1933 	} else if (strcmp(name, "_lance_mode") == 0) {
1934 		value = is_default ? hme_lance_mode : hmep->hme_lance_mode;
1935 	} else {
1936 		return (ENOTSUP);
1937 	}
1938 	(void) snprintf(val, sz, "%d", value);
1939 	return (0);
1940 }
1941 
1942 int
1943 hme_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1944     const void *val)
1945 {
1946 	struct hme *hmep = arg;
1947 	int rv;
1948 	long lval;
1949 	boolean_t init = B_FALSE;
1950 
1951 	rv = mii_m_setprop(hmep->hme_mii, name, num, sz, val);
1952 	if (rv != ENOTSUP)
1953 		return (rv);
1954 	rv = 0;
1955 
1956 	switch (num) {
1957 	case MAC_PROP_PRIVATE:
1958 		break;
1959 	default:
1960 		return (ENOTSUP);
1961 	}
1962 
1963 	(void) ddi_strtol(val, NULL, 0, &lval);
1964 
1965 	if (strcmp(name, "_ipg1") == 0) {
1966 		if ((lval >= 0) && (lval <= 255)) {
1967 			hmep->hme_ipg1 = lval & 0xff;
1968 			init = B_TRUE;
1969 		} else {
1970 			return (EINVAL);
1971 		}
1972 
1973 	} else if (strcmp(name, "_ipg2") == 0) {
1974 		if ((lval >= 0) && (lval <= 255)) {
1975 			hmep->hme_ipg2 = lval & 0xff;
1976 			init = B_TRUE;
1977 		} else {
1978 			return (EINVAL);
1979 		}
1980 
1981 	} else if (strcmp(name, "_ipg0") == 0) {
1982 		if ((lval >= 0) && (lval <= 31)) {
1983 			hmep->hme_ipg0 = lval & 0xff;
1984 			init = B_TRUE;
1985 		} else {
1986 			return (EINVAL);
1987 		}
1988 	} else if (strcmp(name, "_lance_mode") == 0) {
1989 		if ((lval >= 0) && (lval <= 1)) {
1990 			hmep->hme_lance_mode = lval & 0xff;
1991 			init = B_TRUE;
1992 		} else {
1993 			return (EINVAL);
1994 		}
1995 
1996 	} else {
1997 		rv = ENOTSUP;
1998 	}
1999 
2000 	if (init) {
2001 		(void) hmeinit(hmep);
2002 	}
2003 	return (rv);
2004 }
2005 
2006 
2007 /*ARGSUSED*/
2008 static boolean_t
2009 hme_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2010 {
2011 	switch (cap) {
2012 	case MAC_CAPAB_HCKSUM:
2013 		*(uint32_t *)cap_data = HCKSUM_INET_PARTIAL;
2014 		return (B_TRUE);
2015 	default:
2016 		return (B_FALSE);
2017 	}
2018 }
2019 
2020 static int
2021 hme_m_promisc(void *arg, boolean_t on)
2022 {
2023 	struct hme *hmep = arg;
2024 
2025 	hmep->hme_promisc = on;
2026 	(void) hmeinit(hmep);
2027 	return (0);
2028 }
2029 
2030 static int
2031 hme_m_unicst(void *arg, const uint8_t *macaddr)
2032 {
2033 	struct hme *hmep = arg;
2034 
2035 	/*
2036 	 * Set new interface local address and re-init device.
2037 	 * This is destructive to any other streams attached
2038 	 * to this device.
2039 	 */
2040 	mutex_enter(&hmep->hme_intrlock);
2041 	bcopy(macaddr, &hmep->hme_ouraddr, ETHERADDRL);
2042 	mutex_exit(&hmep->hme_intrlock);
2043 	(void) hmeinit(hmep);
2044 	return (0);
2045 }
2046 
2047 static int
2048 hme_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
2049 {
2050 	struct hme	*hmep = arg;
2051 	uint32_t	ladrf_bit;
2052 	boolean_t	doinit = B_FALSE;
2053 
2054 	/*
2055 	 * If this address's bit was not already set in the local address
2056 	 * filter, add it and re-initialize the Hardware.
2057 	 */
2058 	ladrf_bit = hmeladrf_bit(macaddr);
2059 
2060 	mutex_enter(&hmep->hme_intrlock);
2061 	if (add) {
2062 		hmep->hme_ladrf_refcnt[ladrf_bit]++;
2063 		if (hmep->hme_ladrf_refcnt[ladrf_bit] == 1) {
2064 			hmep->hme_ladrf[ladrf_bit >> 4] |=
2065 			    1 << (ladrf_bit & 0xf);
2066 			hmep->hme_multi++;
2067 			doinit = B_TRUE;
2068 		}
2069 	} else {
2070 		hmep->hme_ladrf_refcnt[ladrf_bit]--;
2071 		if (hmep->hme_ladrf_refcnt[ladrf_bit] == 0) {
2072 			hmep->hme_ladrf[ladrf_bit >> 4] &=
2073 			    ~(1 << (ladrf_bit & 0xf));
2074 			doinit = B_TRUE;
2075 		}
2076 	}
2077 	mutex_exit(&hmep->hme_intrlock);
2078 
2079 	if (doinit) {
2080 		(void) hmeinit(hmep);
2081 	}
2082 
2083 	return (0);
2084 }
2085 
2086 static int
2087 hme_m_start(void *arg)
2088 {
2089 	struct hme *hmep = arg;
2090 
2091 	if (hmeinit(hmep) != 0) {
2092 		/* initialization failed -- really want DL_INITFAILED */
2093 		return (EIO);
2094 	} else {
2095 		hmep->hme_started = B_TRUE;
2096 		mii_start(hmep->hme_mii);
2097 		return (0);
2098 	}
2099 }
2100 
2101 static void
2102 hme_m_stop(void *arg)
2103 {
2104 	struct hme *hmep = arg;
2105 
2106 	mii_stop(hmep->hme_mii);
2107 	hmep->hme_started = B_FALSE;
2108 	hmeuninit(hmep);
2109 }
2110 
2111 static int
2112 hme_m_stat(void *arg, uint_t stat, uint64_t *val)
2113 {
2114 	struct hme	*hmep = arg;
2115 
2116 	mutex_enter(&hmep->hme_xmitlock);
2117 	if (hmep->hme_flags & HMERUNNING) {
2118 		hmereclaim(hmep);
2119 		hmesavecntrs(hmep);
2120 	}
2121 	mutex_exit(&hmep->hme_xmitlock);
2122 
2123 
2124 	if (mii_m_getstat(hmep->hme_mii, stat, val) == 0) {
2125 		return (0);
2126 	}
2127 	switch (stat) {
2128 	case MAC_STAT_IPACKETS:
2129 		*val = hmep->hme_ipackets;
2130 		break;
2131 	case MAC_STAT_RBYTES:
2132 		*val = hmep->hme_rbytes;
2133 		break;
2134 	case MAC_STAT_IERRORS:
2135 		*val = hmep->hme_ierrors;
2136 		break;
2137 	case MAC_STAT_OPACKETS:
2138 		*val = hmep->hme_opackets;
2139 		break;
2140 	case MAC_STAT_OBYTES:
2141 		*val = hmep->hme_obytes;
2142 		break;
2143 	case MAC_STAT_OERRORS:
2144 		*val = hmep->hme_oerrors;
2145 		break;
2146 	case MAC_STAT_MULTIRCV:
2147 		*val = hmep->hme_multircv;
2148 		break;
2149 	case MAC_STAT_MULTIXMT:
2150 		*val = hmep->hme_multixmt;
2151 		break;
2152 	case MAC_STAT_BRDCSTRCV:
2153 		*val = hmep->hme_brdcstrcv;
2154 		break;
2155 	case MAC_STAT_BRDCSTXMT:
2156 		*val = hmep->hme_brdcstxmt;
2157 		break;
2158 	case MAC_STAT_UNDERFLOWS:
2159 		*val = hmep->hme_uflo;
2160 		break;
2161 	case MAC_STAT_OVERFLOWS:
2162 		*val = hmep->hme_oflo;
2163 		break;
2164 	case MAC_STAT_COLLISIONS:
2165 		*val = hmep->hme_coll;
2166 		break;
2167 	case MAC_STAT_NORCVBUF:
2168 		*val = hmep->hme_norcvbuf;
2169 		break;
2170 	case MAC_STAT_NOXMTBUF:
2171 		*val = hmep->hme_noxmtbuf;
2172 		break;
2173 	case ETHER_STAT_LINK_DUPLEX:
2174 		*val = hmep->hme_duplex;
2175 		break;
2176 	case ETHER_STAT_ALIGN_ERRORS:
2177 		*val = hmep->hme_align_errors;
2178 		break;
2179 	case ETHER_STAT_FCS_ERRORS:
2180 		*val = hmep->hme_fcs_errors;
2181 		break;
2182 	case ETHER_STAT_EX_COLLISIONS:
2183 		*val = hmep->hme_excol;
2184 		break;
2185 	case ETHER_STAT_DEFER_XMTS:
2186 		*val = hmep->hme_defer_xmts;
2187 		break;
2188 	case ETHER_STAT_SQE_ERRORS:
2189 		*val = hmep->hme_sqe_errors;
2190 		break;
2191 	case ETHER_STAT_FIRST_COLLISIONS:
2192 		*val = hmep->hme_fstcol;
2193 		break;
2194 	case ETHER_STAT_TX_LATE_COLLISIONS:
2195 		*val = hmep->hme_tlcol;
2196 		break;
2197 	case ETHER_STAT_TOOLONG_ERRORS:
2198 		*val = hmep->hme_toolong_errors;
2199 		break;
2200 	case ETHER_STAT_TOOSHORT_ERRORS:
2201 		*val = hmep->hme_runt;
2202 		break;
2203 	case ETHER_STAT_CARRIER_ERRORS:
2204 		*val = hmep->hme_carrier_errors;
2205 		break;
2206 	default:
2207 		return (EINVAL);
2208 	}
2209 	return (0);
2210 }
2211 
2212 static mblk_t *
2213 hme_m_tx(void *arg, mblk_t *mp)
2214 {
2215 	struct hme *hmep = arg;
2216 	mblk_t *next;
2217 
2218 	while (mp != NULL) {
2219 		next = mp->b_next;
2220 		mp->b_next = NULL;
2221 		if (!hmestart(hmep, mp)) {
2222 			mp->b_next = next;
2223 			break;
2224 		}
2225 		mp = next;
2226 	}
2227 	return (mp);
2228 }
2229 
2230 /*
2231  * Software IP checksum, for the edge cases that the
2232  * hardware can't handle.  See hmestart for more info.
2233  */
2234 static uint16_t
2235 hme_cksum(void *data, int len)
2236 {
2237 	uint16_t	*words = data;
2238 	int		i, nwords = len / 2;
2239 	uint32_t	sum = 0;
2240 
2241 	/* just add up the words */
2242 	for (i = 0; i < nwords; i++) {
2243 		sum += *words++;
2244 	}
2245 
2246 	/* pick up residual byte ... assume even half-word allocations */
2247 	if (len % 2) {
2248 		sum += (*words & htons(0xff00));
2249 	}
2250 
2251 	sum = (sum >> 16) + (sum & 0xffff);
2252 	sum = (sum >> 16) + (sum & 0xffff);
2253 
2254 	return (~(sum & 0xffff));
2255 }
2256 
2257 static boolean_t
2258 hmestart(struct hme *hmep, mblk_t *mp)
2259 {
2260 	uint32_t	len;
2261 	boolean_t	retval = B_TRUE;
2262 	hmebuf_t	*tbuf;
2263 	uint32_t	txptr;
2264 
2265 	uint32_t	csflags = 0;
2266 	uint32_t	flags;
2267 	uint32_t	start_offset;
2268 	uint32_t	stuff_offset;
2269 
2270 	hcksum_retrieve(mp, NULL, NULL, &start_offset, &stuff_offset,
2271 	    NULL, NULL, &flags);
2272 
2273 	if (flags & HCK_PARTIALCKSUM) {
2274 		if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
2275 			start_offset += sizeof (struct ether_header) + 4;
2276 			stuff_offset += sizeof (struct ether_header) + 4;
2277 		} else {
2278 			start_offset += sizeof (struct ether_header);
2279 			stuff_offset += sizeof (struct ether_header);
2280 		}
2281 		csflags = HMETMD_CSENABL |
2282 		    (start_offset << HMETMD_CSSTART_SHIFT) |
2283 		    (stuff_offset << HMETMD_CSSTUFF_SHIFT);
2284 	}
2285 
2286 	mutex_enter(&hmep->hme_xmitlock);
2287 
2288 	if (hmep->hme_flags & HMESUSPENDED) {
2289 		hmep->hme_carrier_errors++;
2290 		hmep->hme_oerrors++;
2291 		goto bad;
2292 	}
2293 
2294 	if (hmep->hme_txindex != hmep->hme_txreclaim) {
2295 		hmereclaim(hmep);
2296 	}
2297 	if ((hmep->hme_txindex - HME_TMDMAX) == hmep->hme_txreclaim)
2298 		goto notmds;
2299 	txptr = hmep->hme_txindex % HME_TMDMAX;
2300 	tbuf = &hmep->hme_tbuf[txptr];
2301 
2302 	/*
2303 	 * Note that for checksum offload, the hardware cannot
2304 	 * generate correct checksums if the packet is smaller than
2305 	 * 64-bytes.  In such a case, we bcopy the packet and use
2306 	 * a software checksum.
2307 	 */
2308 
2309 	len = msgsize(mp);
2310 	if (len < 64) {
2311 		/* zero fill the padding */
2312 		bzero(tbuf->kaddr, 64);
2313 	}
2314 	mcopymsg(mp, tbuf->kaddr);
2315 
2316 	if ((csflags != 0) && (len < 64)) {
2317 		uint16_t sum;
2318 		sum = hme_cksum(tbuf->kaddr + start_offset,
2319 		    len - start_offset);
2320 		bcopy(&sum, tbuf->kaddr + stuff_offset, sizeof (sum));
2321 		csflags = 0;
2322 	}
2323 
2324 	if (ddi_dma_sync(tbuf->dmah, 0, len, DDI_DMA_SYNC_FORDEV) ==
2325 	    DDI_FAILURE) {
2326 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, DDI_MSG,
2327 		    "ddi_dma_sync failed");
2328 	}
2329 
2330 	/*
2331 	 * update MIB II statistics
2332 	 */
2333 	BUMP_OutNUcast(hmep, tbuf->kaddr);
2334 
2335 	PUT_TMD(txptr, tbuf->paddr, len,
2336 	    HMETMD_OWN | HMETMD_SOP | HMETMD_EOP | csflags);
2337 
2338 	HMESYNCTMD(txptr, DDI_DMA_SYNC_FORDEV);
2339 	hmep->hme_txindex++;
2340 
2341 	PUT_ETXREG(txpend, HMET_TXPEND_TDMD);
2342 	CHECK_ETXREG();
2343 
2344 	mutex_exit(&hmep->hme_xmitlock);
2345 
2346 	hmep->hme_starts++;
2347 	return (B_TRUE);
2348 
2349 bad:
2350 	mutex_exit(&hmep->hme_xmitlock);
2351 	freemsg(mp);
2352 	return (B_TRUE);
2353 
2354 notmds:
2355 	hmep->hme_notmds++;
2356 	hmep->hme_wantw = B_TRUE;
2357 	hmereclaim(hmep);
2358 	retval = B_FALSE;
2359 done:
2360 	mutex_exit(&hmep->hme_xmitlock);
2361 
2362 	return (retval);
2363 }
2364 
2365 /*
2366  * Initialize channel.
2367  * Return 0 on success, nonzero on error.
2368  *
2369  * The recommended sequence for initialization is:
2370  * 1. Issue a Global Reset command to the Ethernet Channel.
2371  * 2. Poll the Global_Reset bits until the execution of the reset has been
2372  *    completed.
2373  * 2(a). Use the MIF Frame/Output register to reset the transceiver.
2374  *	 Poll Register 0 to till the Resetbit is 0.
2375  * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
2376  *	 100Mbps and Non-Isolated mode. The main point here is to bring the
2377  *	 PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
2378  *	 to the MII interface so that the Bigmac core can correctly reset
2379  *	 upon a software reset.
2380  * 2(c).  Issue another Global Reset command to the Ethernet Channel and poll
2381  *	  the Global_Reset bits till completion.
2382  * 3. Set up all the data structures in the host memory.
2383  * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
2384  *    Register).
2385  * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
2386  *    Register).
2387  * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
2388  * 7. Program the Receive Descriptor Ring Base Address in the ERX.
2389  * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
2390  * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
2391  * 10. Program the ERX Configuration register (enable the Receive DMA channel).
2392  * 11. Program the XIF Configuration Register (enable the XIF).
2393  * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
2394  * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
2395  */
2396 
2397 
2398 #ifdef FEPS_URUN_BUG
2399 static int hme_palen = 32;
2400 #endif
2401 
2402 static int
2403 hmeinit(struct hme *hmep)
2404 {
2405 	uint32_t		i;
2406 	int			ret;
2407 	boolean_t		fdx;
2408 	int			phyad;
2409 
2410 	/*
2411 	 * Lock sequence:
2412 	 *	hme_intrlock, hme_xmitlock.
2413 	 */
2414 	mutex_enter(&hmep->hme_intrlock);
2415 
2416 	/*
2417 	 * Don't touch the hardware if we are suspended.  But don't
2418 	 * fail either.  Some time later we may be resumed, and then
2419 	 * we'll be back here to program the device using the settings
2420 	 * in the soft state.
2421 	 */
2422 	if (hmep->hme_flags & HMESUSPENDED) {
2423 		mutex_exit(&hmep->hme_intrlock);
2424 		return (0);
2425 	}
2426 
2427 	/*
2428 	 * This should prevent us from clearing any interrupts that
2429 	 * may occur by temporarily stopping interrupts from occurring
2430 	 * for a short time.  We need to update the interrupt mask
2431 	 * later in this function.
2432 	 */
2433 	PUT_GLOBREG(intmask, ~HMEG_MASK_MIF_INTR);
2434 
2435 
2436 	/*
2437 	 * Rearranged the mutex acquisition order to solve the deadlock
2438 	 * situation as described in bug ID 4065896.
2439 	 */
2440 
2441 	mutex_enter(&hmep->hme_xmitlock);
2442 
2443 	hmep->hme_flags = 0;
2444 	hmep->hme_wantw = B_FALSE;
2445 
2446 	if (hmep->inits)
2447 		hmesavecntrs(hmep);
2448 
2449 	/*
2450 	 * Perform Global reset of the Sbus/FEPS ENET channel.
2451 	 */
2452 	(void) hmestop(hmep);
2453 
2454 	/*
2455 	 * Clear all descriptors.
2456 	 */
2457 	bzero(hmep->hme_rmdp, HME_RMDMAX * sizeof (struct hme_rmd));
2458 	bzero(hmep->hme_tmdp, HME_TMDMAX * sizeof (struct hme_tmd));
2459 
2460 	/*
2461 	 * Hang out receive buffers.
2462 	 */
2463 	for (i = 0; i < HME_RMDMAX; i++) {
2464 		PUT_RMD(i, hmep->hme_rbuf[i].paddr);
2465 	}
2466 
2467 	/*
2468 	 * DMA sync descriptors.
2469 	 */
2470 	(void) ddi_dma_sync(hmep->hme_rmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2471 	(void) ddi_dma_sync(hmep->hme_tmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2472 
2473 	/*
2474 	 * Reset RMD and TMD 'walking' pointers.
2475 	 */
2476 	hmep->hme_rxindex = 0;
2477 	hmep->hme_txindex = hmep->hme_txreclaim = 0;
2478 
2479 	/*
2480 	 * This is the right place to initialize MIF !!!
2481 	 */
2482 
2483 	PUT_MIFREG(mif_imask, HME_MIF_INTMASK);	/* mask all interrupts */
2484 
2485 	if (!hmep->hme_frame_enable)
2486 		PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) | HME_MIF_CFGBB);
2487 	else
2488 		PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) & ~HME_MIF_CFGBB);
2489 						/* enable frame mode */
2490 
2491 	/*
2492 	 * Depending on the transceiver detected, select the source
2493 	 * of the clocks for the MAC. Without the clocks, TX_MAC does
2494 	 * not reset. When the Global Reset is issued to the Sbus/FEPS
2495 	 * ASIC, it selects Internal by default.
2496 	 */
2497 
2498 	switch ((phyad = mii_get_addr(hmep->hme_mii))) {
2499 	case -1:
2500 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG, no_xcvr_msg);
2501 		goto init_fail;	/* abort initialization */
2502 
2503 	case HME_INTERNAL_PHYAD:
2504 		PUT_MACREG(xifc, 0);
2505 		break;
2506 	case HME_EXTERNAL_PHYAD:
2507 		/* Isolate the Int. xcvr */
2508 		PUT_MACREG(xifc, BMAC_XIFC_MIIBUFDIS);
2509 		break;
2510 	}
2511 
2512 	hmep->inits++;
2513 
2514 	/*
2515 	 * Initialize BigMAC registers.
2516 	 * First set the tx enable bit in tx config reg to 0 and poll on
2517 	 * it till it turns to 0. Same for rx config, hash and address
2518 	 * filter reg.
2519 	 * Here is the sequence per the spec.
2520 	 * MADD2 - MAC Address 2
2521 	 * MADD1 - MAC Address 1
2522 	 * MADD0 - MAC Address 0
2523 	 * HASH3, HASH2, HASH1, HASH0 for group address
2524 	 * AFR2, AFR1, AFR0 and AFMR for address filter mask
2525 	 * Program RXMIN and RXMAX for packet length if not 802.3
2526 	 * RXCFG - Rx config for not stripping CRC
2527 	 * XXX Anything else to hme configured in RXCFG
2528 	 * IPG1, IPG2, ALIMIT, SLOT, PALEN, PAPAT, TXSFD, JAM, TXMAX, TXMIN
2529 	 * if not 802.3 compliant
2530 	 * XIF register for speed selection
2531 	 * MASK  - Interrupt mask
2532 	 * Set bit 0 of TXCFG
2533 	 * Set bit 0 of RXCFG
2534 	 */
2535 
2536 	/*
2537 	 * Initialize the TX_MAC registers
2538 	 * Initialization of jamsize to work around rx crc bug
2539 	 */
2540 	PUT_MACREG(jam, jamsize);
2541 
2542 #ifdef	FEPS_URUN_BUG
2543 	if (hme_urun_fix)
2544 		PUT_MACREG(palen, hme_palen);
2545 #endif
2546 
2547 	PUT_MACREG(ipg1, hmep->hme_ipg1);
2548 	PUT_MACREG(ipg2, hmep->hme_ipg2);
2549 
2550 	PUT_MACREG(rseed,
2551 	    ((hmep->hme_ouraddr.ether_addr_octet[0] << 8) & 0x3) |
2552 	    hmep->hme_ouraddr.ether_addr_octet[1]);
2553 
2554 	/* Initialize the RX_MAC registers */
2555 
2556 	/*
2557 	 * Program BigMAC with local individual ethernet address.
2558 	 */
2559 	PUT_MACREG(madd2, (hmep->hme_ouraddr.ether_addr_octet[4] << 8) |
2560 	    hmep->hme_ouraddr.ether_addr_octet[5]);
2561 	PUT_MACREG(madd1, (hmep->hme_ouraddr.ether_addr_octet[2] << 8) |
2562 	    hmep->hme_ouraddr.ether_addr_octet[3]);
2563 	PUT_MACREG(madd0, (hmep->hme_ouraddr.ether_addr_octet[0] << 8) |
2564 	    hmep->hme_ouraddr.ether_addr_octet[1]);
2565 
2566 	/*
2567 	 * Set up multicast address filter by passing all multicast
2568 	 * addresses through a crc generator, and then using the
2569 	 * low order 6 bits as a index into the 64 bit logical
2570 	 * address filter. The high order three bits select the word,
2571 	 * while the rest of the bits select the bit within the word.
2572 	 */
2573 	PUT_MACREG(hash0, hmep->hme_ladrf[0]);
2574 	PUT_MACREG(hash1, hmep->hme_ladrf[1]);
2575 	PUT_MACREG(hash2, hmep->hme_ladrf[2]);
2576 	PUT_MACREG(hash3, hmep->hme_ladrf[3]);
2577 
2578 	/*
2579 	 * Configure parameters to support VLAN.  (VLAN encapsulation adds
2580 	 * four bytes.)
2581 	 */
2582 	PUT_MACREG(txmax, ETHERMAX + ETHERFCSL + 4);
2583 	PUT_MACREG(rxmax, ETHERMAX + ETHERFCSL + 4);
2584 
2585 	/*
2586 	 * Initialize HME Global registers, ETX registers and ERX registers.
2587 	 */
2588 
2589 	PUT_ETXREG(txring, hmep->hme_tmd_paddr);
2590 	PUT_ERXREG(rxring, hmep->hme_rmd_paddr);
2591 
2592 	/*
2593 	 * ERX registers can be written only if they have even no. of bits set.
2594 	 * So, if the value written is not read back, set the lsb and write
2595 	 * again.
2596 	 * static	int	hme_erx_fix = 1;   : Use the fix for erx bug
2597 	 */
2598 	{
2599 		uint32_t temp;
2600 		temp  = hmep->hme_rmd_paddr;
2601 
2602 		if (GET_ERXREG(rxring) != temp)
2603 			PUT_ERXREG(rxring, (temp | 4));
2604 	}
2605 
2606 	PUT_GLOBREG(config, (hmep->hme_config |
2607 	    (hmep->hme_64bit_xfer << HMEG_CONFIG_64BIT_SHIFT)));
2608 
2609 	/*
2610 	 * Significant performance improvements can be achieved by
2611 	 * disabling transmit interrupt. Thus TMD's are reclaimed only
2612 	 * when we run out of them in hmestart().
2613 	 */
2614 	PUT_GLOBREG(intmask,
2615 	    HMEG_MASK_INTR | HMEG_MASK_TINT | HMEG_MASK_TX_ALL);
2616 
2617 	PUT_ETXREG(txring_size, ((HME_TMDMAX -1)>> HMET_RINGSZ_SHIFT));
2618 	PUT_ETXREG(config, (GET_ETXREG(config) | HMET_CONFIG_TXDMA_EN
2619 	    | HMET_CONFIG_TXFIFOTH));
2620 	/* get the rxring size bits */
2621 	switch (HME_RMDMAX) {
2622 	case 32:
2623 		i = HMER_CONFIG_RXRINGSZ32;
2624 		break;
2625 	case 64:
2626 		i = HMER_CONFIG_RXRINGSZ64;
2627 		break;
2628 	case 128:
2629 		i = HMER_CONFIG_RXRINGSZ128;
2630 		break;
2631 	case 256:
2632 		i = HMER_CONFIG_RXRINGSZ256;
2633 		break;
2634 	default:
2635 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2636 		    unk_rx_ringsz_msg);
2637 		goto init_fail;
2638 	}
2639 	i |= (HME_FSTBYTE_OFFSET << HMER_CONFIG_FBO_SHIFT)
2640 	    | HMER_CONFIG_RXDMA_EN;
2641 
2642 	/* h/w checks start offset in half words */
2643 	i |= ((sizeof (struct ether_header) / 2) << HMER_RX_CSSTART_SHIFT);
2644 
2645 	PUT_ERXREG(config, i);
2646 
2647 	/*
2648 	 * Bug related to the parity handling in ERX. When erxp-config is
2649 	 * read back.
2650 	 * Sbus/FEPS drives the parity bit. This value is used while
2651 	 * writing again.
2652 	 * This fixes the RECV problem in SS5.
2653 	 * static	int	hme_erx_fix = 1;   : Use the fix for erx bug
2654 	 */
2655 	{
2656 		uint32_t temp;
2657 		temp = GET_ERXREG(config);
2658 		PUT_ERXREG(config, i);
2659 
2660 		if (GET_ERXREG(config) != i)
2661 			HME_FAULT_MSG4(hmep, SEVERITY_UNKNOWN, ERX_MSG,
2662 			    "error:temp = %x erxp->config = %x, should be %x",
2663 			    temp, GET_ERXREG(config), i);
2664 	}
2665 
2666 	/*
2667 	 * Set up the rxconfig, txconfig and seed register without enabling
2668 	 * them the former two at this time
2669 	 *
2670 	 * BigMAC strips the CRC bytes by default. Since this is
2671 	 * contrary to other pieces of hardware, this bit needs to
2672 	 * enabled to tell BigMAC not to strip the CRC bytes.
2673 	 * Do not filter this node's own packets.
2674 	 */
2675 
2676 	if (hme_reject_own) {
2677 		PUT_MACREG(rxcfg,
2678 		    ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
2679 		    BMAC_RXCFG_MYOWN | BMAC_RXCFG_HASH));
2680 	} else {
2681 		PUT_MACREG(rxcfg,
2682 		    ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
2683 		    BMAC_RXCFG_HASH));
2684 	}
2685 
2686 	drv_usecwait(10);	/* wait after setting Hash Enable bit */
2687 
2688 	fdx = (mii_get_duplex(hmep->hme_mii) == LINK_DUPLEX_FULL);
2689 
2690 	if (hme_ngu_enable)
2691 		PUT_MACREG(txcfg, (fdx ? BMAC_TXCFG_FDX : 0) |
2692 		    BMAC_TXCFG_NGU);
2693 	else
2694 		PUT_MACREG(txcfg, (fdx ? BMAC_TXCFG_FDX: 0));
2695 
2696 	i = 0;
2697 	if ((hmep->hme_lance_mode) && (hmep->hme_lance_mode_enable))
2698 		i = ((hmep->hme_ipg0 & HME_MASK_5BIT) << BMAC_XIFC_IPG0_SHIFT)
2699 		    | BMAC_XIFC_LANCE_ENAB;
2700 	if (phyad == HME_INTERNAL_PHYAD)
2701 		PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB));
2702 	else
2703 		PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB | BMAC_XIFC_MIIBUFDIS));
2704 
2705 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
2706 	PUT_MACREG(txcfg, GET_MACREG(txcfg) | BMAC_TXCFG_ENAB);
2707 
2708 	hmep->hme_flags |= (HMERUNNING | HMEINITIALIZED);
2709 	/*
2710 	 * Update the interrupt mask : this will re-allow interrupts to occur
2711 	 */
2712 	PUT_GLOBREG(intmask, HMEG_MASK_INTR);
2713 	mac_tx_update(hmep->hme_mh);
2714 
2715 init_fail:
2716 	/*
2717 	 * Release the locks in reverse order
2718 	 */
2719 	mutex_exit(&hmep->hme_xmitlock);
2720 	mutex_exit(&hmep->hme_intrlock);
2721 
2722 	ret = !(hmep->hme_flags & HMERUNNING);
2723 	if (ret) {
2724 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2725 		    init_fail_gen_msg);
2726 	}
2727 
2728 	/*
2729 	 * Hardware checks.
2730 	 */
2731 	CHECK_GLOBREG();
2732 	CHECK_MIFREG();
2733 	CHECK_MACREG();
2734 	CHECK_ERXREG();
2735 	CHECK_ETXREG();
2736 
2737 init_exit:
2738 	return (ret);
2739 }
2740 
2741 /*
2742  * Calculate the dvma burstsize by setting up a dvma temporarily.  Return
2743  * 0 as burstsize upon failure as it signifies no burst size.
2744  * Requests for 64-bit transfer setup, if the platform supports it.
2745  * NOTE: Do not use ddi_dma_alloc_handle(9f) then ddi_dma_burstsize(9f),
2746  * sun4u Ultra-2 incorrectly returns a 32bit transfer.
2747  */
2748 static int
2749 hmeburstsizes(struct hme *hmep)
2750 {
2751 	int burstsizes;
2752 	ddi_dma_handle_t handle;
2753 
2754 	if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
2755 	    DDI_DMA_DONTWAIT, NULL, &handle)) {
2756 		return (0);
2757 	}
2758 
2759 	hmep->hme_burstsizes = burstsizes = ddi_dma_burstsizes(handle);
2760 	ddi_dma_free_handle(&handle);
2761 
2762 	/*
2763 	 * Use user-configurable parameter for enabling 64-bit transfers
2764 	 */
2765 	burstsizes = (hmep->hme_burstsizes >> 16);
2766 	if (burstsizes)
2767 		hmep->hme_64bit_xfer = hme_64bit_enable; /* user config value */
2768 	else
2769 		burstsizes = hmep->hme_burstsizes;
2770 
2771 	if (hmep->hme_cheerio_mode)
2772 		hmep->hme_64bit_xfer = 0; /* Disable for cheerio */
2773 
2774 	if (burstsizes & 0x40)
2775 		hmep->hme_config = HMEG_CONFIG_BURST64;
2776 	else if (burstsizes & 0x20)
2777 		hmep->hme_config = HMEG_CONFIG_BURST32;
2778 	else
2779 		hmep->hme_config = HMEG_CONFIG_BURST16;
2780 
2781 	return (DDI_SUCCESS);
2782 }
2783 
2784 static int
2785 hmeallocbuf(struct hme *hmep, hmebuf_t *buf, int dir)
2786 {
2787 	ddi_dma_cookie_t	dmac;
2788 	size_t			len;
2789 	unsigned		ccnt;
2790 
2791 	if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
2792 	    DDI_DMA_DONTWAIT, NULL, &buf->dmah) != DDI_SUCCESS) {
2793 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2794 		    "cannot allocate buf dma handle - failed");
2795 		return (DDI_FAILURE);
2796 	}
2797 
2798 	if (ddi_dma_mem_alloc(buf->dmah, ROUNDUP(HMEBUFSIZE, 512),
2799 	    &hme_buf_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
2800 	    &buf->kaddr, &len, &buf->acch) != DDI_SUCCESS) {
2801 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2802 		    "cannot allocate buf memory - failed");
2803 		return (DDI_FAILURE);
2804 	}
2805 
2806 	if (ddi_dma_addr_bind_handle(buf->dmah, NULL, buf->kaddr,
2807 	    len, dir | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
2808 	    &dmac, &ccnt) != DDI_DMA_MAPPED) {
2809 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2810 		    "cannot map buf for dma - failed");
2811 		return (DDI_FAILURE);
2812 	}
2813 	buf->paddr = dmac.dmac_address;
2814 
2815 	/* apparently they don't handle multiple cookies */
2816 	if (ccnt > 1) {
2817 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2818 		    "too many buf dma cookies");
2819 		return (DDI_FAILURE);
2820 	}
2821 	return (DDI_SUCCESS);
2822 }
2823 
2824 static int
2825 hmeallocbufs(struct hme *hmep)
2826 {
2827 	hmep->hme_tbuf = kmem_zalloc(HME_TMDMAX * sizeof (hmebuf_t), KM_SLEEP);
2828 	hmep->hme_rbuf = kmem_zalloc(HME_RMDMAX * sizeof (hmebuf_t), KM_SLEEP);
2829 
2830 	/* Alloc RX buffers. */
2831 	for (int i = 0; i < HME_RMDMAX; i++) {
2832 		if (hmeallocbuf(hmep, &hmep->hme_rbuf[i], DDI_DMA_READ) !=
2833 		    DDI_SUCCESS) {
2834 			return (DDI_FAILURE);
2835 		}
2836 	}
2837 
2838 	/* Alloc TX buffers. */
2839 	for (int i = 0; i < HME_TMDMAX; i++) {
2840 		if (hmeallocbuf(hmep, &hmep->hme_tbuf[i], DDI_DMA_WRITE) !=
2841 		    DDI_SUCCESS) {
2842 			return (DDI_FAILURE);
2843 		}
2844 	}
2845 	return (DDI_SUCCESS);
2846 }
2847 
2848 static void
2849 hmefreebufs(struct hme *hmep)
2850 {
2851 	int i;
2852 
2853 	if (hmep->hme_rbuf == NULL)
2854 		return;
2855 
2856 	/*
2857 	 * Free and unload pending xmit and recv buffers.
2858 	 * Maintaining the 1-to-1 ordered sequence of
2859 	 * We have written the routine to be idempotent.
2860 	 */
2861 
2862 	for (i = 0; i < HME_TMDMAX; i++) {
2863 		hmebuf_t *tbuf = &hmep->hme_tbuf[i];
2864 		if (tbuf->paddr) {
2865 			(void) ddi_dma_unbind_handle(tbuf->dmah);
2866 		}
2867 		if (tbuf->kaddr) {
2868 			ddi_dma_mem_free(&tbuf->acch);
2869 		}
2870 		if (tbuf->dmah) {
2871 			ddi_dma_free_handle(&tbuf->dmah);
2872 		}
2873 	}
2874 	for (i = 0; i < HME_RMDMAX; i++) {
2875 		hmebuf_t *rbuf = &hmep->hme_rbuf[i];
2876 		if (rbuf->paddr) {
2877 			(void) ddi_dma_unbind_handle(rbuf->dmah);
2878 		}
2879 		if (rbuf->kaddr) {
2880 			ddi_dma_mem_free(&rbuf->acch);
2881 		}
2882 		if (rbuf->dmah) {
2883 			ddi_dma_free_handle(&rbuf->dmah);
2884 		}
2885 	}
2886 	kmem_free(hmep->hme_rbuf, HME_RMDMAX * sizeof (hmebuf_t));
2887 	kmem_free(hmep->hme_tbuf, HME_TMDMAX * sizeof (hmebuf_t));
2888 }
2889 
2890 /*
2891  * Un-initialize (STOP) HME channel.
2892  */
2893 static void
2894 hmeuninit(struct hme *hmep)
2895 {
2896 	/*
2897 	 * Allow up to 'HMEDRAINTIME' for pending xmit's to complete.
2898 	 */
2899 	HMEDELAY((hmep->hme_txindex == hmep->hme_txreclaim), HMEDRAINTIME);
2900 
2901 	mutex_enter(&hmep->hme_intrlock);
2902 	mutex_enter(&hmep->hme_xmitlock);
2903 
2904 	hmep->hme_flags &= ~HMERUNNING;
2905 
2906 	(void) hmestop(hmep);
2907 
2908 	mutex_exit(&hmep->hme_xmitlock);
2909 	mutex_exit(&hmep->hme_intrlock);
2910 }
2911 
2912 /*
2913  * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
2914  * map it in IO space. Allocate space for transmit and receive ddi_dma_handle
2915  * structures to use the DMA interface.
2916  */
2917 static int
2918 hmeallocthings(struct hme *hmep)
2919 {
2920 	int			size;
2921 	int			rval;
2922 	size_t			real_len;
2923 	uint_t			cookiec;
2924 	ddi_dma_cookie_t	dmac;
2925 	dev_info_t		*dip = hmep->dip;
2926 
2927 	/*
2928 	 * Allocate the TMD and RMD descriptors and extra for page alignment.
2929 	 */
2930 
2931 	rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL,
2932 	    &hmep->hme_rmd_dmah);
2933 	if (rval != DDI_SUCCESS) {
2934 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2935 		    "cannot allocate rmd handle - failed");
2936 		return (DDI_FAILURE);
2937 	}
2938 	size = HME_RMDMAX * sizeof (struct hme_rmd);
2939 	rval = ddi_dma_mem_alloc(hmep->hme_rmd_dmah, size,
2940 	    &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
2941 	    &hmep->hme_rmd_kaddr, &real_len, &hmep->hme_rmd_acch);
2942 	if (rval != DDI_SUCCESS) {
2943 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2944 		    "cannot allocate rmd dma mem - failed");
2945 		return (DDI_FAILURE);
2946 	}
2947 	hmep->hme_rmdp = (void *)(hmep->hme_rmd_kaddr);
2948 	rval = ddi_dma_addr_bind_handle(hmep->hme_rmd_dmah, NULL,
2949 	    hmep->hme_rmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2950 	    DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec);
2951 	if (rval != DDI_DMA_MAPPED) {
2952 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2953 		    "cannot allocate rmd dma - failed");
2954 		return (DDI_FAILURE);
2955 	}
2956 	hmep->hme_rmd_paddr = dmac.dmac_address;
2957 	if (cookiec != 1) {
2958 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2959 		    "too many rmd cookies - failed");
2960 		return (DDI_FAILURE);
2961 	}
2962 
2963 	rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL,
2964 	    &hmep->hme_tmd_dmah);
2965 	if (rval != DDI_SUCCESS) {
2966 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2967 		    "cannot allocate tmd handle - failed");
2968 		return (DDI_FAILURE);
2969 	}
2970 	size = HME_TMDMAX * sizeof (struct hme_rmd);
2971 	rval = ddi_dma_mem_alloc(hmep->hme_tmd_dmah, size,
2972 	    &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
2973 	    &hmep->hme_tmd_kaddr, &real_len, &hmep->hme_tmd_acch);
2974 	if (rval != DDI_SUCCESS) {
2975 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2976 		    "cannot allocate tmd dma mem - failed");
2977 		return (DDI_FAILURE);
2978 	}
2979 	hmep->hme_tmdp = (void *)(hmep->hme_tmd_kaddr);
2980 	rval = ddi_dma_addr_bind_handle(hmep->hme_tmd_dmah, NULL,
2981 	    hmep->hme_tmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2982 	    DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec);
2983 	if (rval != DDI_DMA_MAPPED) {
2984 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2985 		    "cannot allocate tmd dma - failed");
2986 		return (DDI_FAILURE);
2987 	}
2988 	hmep->hme_tmd_paddr = dmac.dmac_address;
2989 	if (cookiec != 1) {
2990 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2991 		    "too many tmd cookies - failed");
2992 		return (DDI_FAILURE);
2993 	}
2994 
2995 	return (DDI_SUCCESS);
2996 }
2997 
2998 static void
2999 hmefreethings(struct hme *hmep)
3000 {
3001 	if (hmep->hme_rmd_paddr) {
3002 		(void) ddi_dma_unbind_handle(hmep->hme_rmd_dmah);
3003 		hmep->hme_rmd_paddr = 0;
3004 	}
3005 	if (hmep->hme_rmd_acch)
3006 		ddi_dma_mem_free(&hmep->hme_rmd_acch);
3007 	if (hmep->hme_rmd_dmah)
3008 		ddi_dma_free_handle(&hmep->hme_rmd_dmah);
3009 
3010 	if (hmep->hme_tmd_paddr) {
3011 		(void) ddi_dma_unbind_handle(hmep->hme_tmd_dmah);
3012 		hmep->hme_tmd_paddr = 0;
3013 	}
3014 	if (hmep->hme_tmd_acch)
3015 		ddi_dma_mem_free(&hmep->hme_tmd_acch);
3016 	if (hmep->hme_tmd_dmah)
3017 		ddi_dma_free_handle(&hmep->hme_tmd_dmah);
3018 }
3019 
3020 /*
3021  *	First check to see if it our device interrupting.
3022  */
3023 static uint_t
3024 hmeintr(caddr_t arg)
3025 {
3026 	struct hme	*hmep = (void *)arg;
3027 	uint32_t	hmesbits;
3028 	uint32_t	serviced = DDI_INTR_UNCLAIMED;
3029 	uint32_t	num_reads = 0;
3030 	uint32_t	rflags;
3031 	mblk_t		*mp, *head, **tail;
3032 
3033 
3034 	head = NULL;
3035 	tail = &head;
3036 
3037 	mutex_enter(&hmep->hme_intrlock);
3038 
3039 	/*
3040 	 * The status register auto-clears on read except for
3041 	 * MIF Interrupt bit
3042 	 */
3043 	hmesbits = GET_GLOBREG(status);
3044 	CHECK_GLOBREG();
3045 
3046 	/*
3047 	 * Note: TINT is sometimes enabled in thr hmereclaim()
3048 	 */
3049 
3050 	/*
3051 	 * Bugid 1227832 - to handle spurious interrupts on fusion systems.
3052 	 * Claim the first interrupt after initialization
3053 	 */
3054 	if (hmep->hme_flags & HMEINITIALIZED) {
3055 		hmep->hme_flags &= ~HMEINITIALIZED;
3056 		serviced = DDI_INTR_CLAIMED;
3057 	}
3058 
3059 	if ((hmesbits & (HMEG_STATUS_INTR | HMEG_STATUS_TINT)) == 0) {
3060 						/* No interesting interrupt */
3061 		if (hmep->hme_intrstats) {
3062 			if (serviced == DDI_INTR_UNCLAIMED)
3063 				KIOIP->intrs[KSTAT_INTR_SPURIOUS]++;
3064 			else
3065 				KIOIP->intrs[KSTAT_INTR_HARD]++;
3066 		}
3067 		mutex_exit(&hmep->hme_intrlock);
3068 		return (serviced);
3069 	}
3070 
3071 	serviced = DDI_INTR_CLAIMED;
3072 
3073 	if (!(hmep->hme_flags & HMERUNNING)) {
3074 		if (hmep->hme_intrstats)
3075 			KIOIP->intrs[KSTAT_INTR_HARD]++;
3076 		mutex_exit(&hmep->hme_intrlock);
3077 		hmeuninit(hmep);
3078 		return (serviced);
3079 	}
3080 
3081 	if (hmesbits & (HMEG_STATUS_FATAL_ERR | HMEG_STATUS_NONFATAL_ERR)) {
3082 		if (hmesbits & HMEG_STATUS_FATAL_ERR) {
3083 
3084 			if (hmep->hme_intrstats)
3085 				KIOIP->intrs[KSTAT_INTR_HARD]++;
3086 			hme_fatal_err(hmep, hmesbits);
3087 
3088 			mutex_exit(&hmep->hme_intrlock);
3089 			(void) hmeinit(hmep);
3090 			return (serviced);
3091 		}
3092 		hme_nonfatal_err(hmep, hmesbits);
3093 	}
3094 
3095 	if (hmesbits & (HMEG_STATUS_TX_ALL | HMEG_STATUS_TINT)) {
3096 		mutex_enter(&hmep->hme_xmitlock);
3097 
3098 		hmereclaim(hmep);
3099 		mutex_exit(&hmep->hme_xmitlock);
3100 	}
3101 
3102 	if (hmesbits & HMEG_STATUS_RINT) {
3103 
3104 		/*
3105 		 * This dummy PIO is required to flush the SBus
3106 		 * Bridge buffers in QFE.
3107 		 */
3108 		(void) GET_GLOBREG(config);
3109 
3110 		/*
3111 		 * Loop through each RMD no more than once.
3112 		 */
3113 		while (num_reads++ < HME_RMDMAX) {
3114 			hmebuf_t *rbuf;
3115 			int rxptr;
3116 
3117 			rxptr = hmep->hme_rxindex % HME_RMDMAX;
3118 			HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORKERNEL);
3119 
3120 			rflags = GET_RMD_FLAGS(rxptr);
3121 			if (rflags & HMERMD_OWN) {
3122 				/*
3123 				 * Chip still owns it.  We're done.
3124 				 */
3125 				break;
3126 			}
3127 
3128 			/*
3129 			 * Retrieve the packet.
3130 			 */
3131 			rbuf = &hmep->hme_rbuf[rxptr];
3132 			mp = hmeread(hmep, rbuf, rflags);
3133 
3134 			/*
3135 			 * Return ownership of the RMD.
3136 			 */
3137 			PUT_RMD(rxptr, rbuf->paddr);
3138 			HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORDEV);
3139 
3140 			if (mp != NULL) {
3141 				*tail = mp;
3142 				tail = &mp->b_next;
3143 			}
3144 
3145 			/*
3146 			 * Advance to the next RMD.
3147 			 */
3148 			hmep->hme_rxindex++;
3149 		}
3150 	}
3151 
3152 	if (hmep->hme_intrstats)
3153 		KIOIP->intrs[KSTAT_INTR_HARD]++;
3154 
3155 	mutex_exit(&hmep->hme_intrlock);
3156 
3157 	if (head != NULL)
3158 		mac_rx(hmep->hme_mh, NULL, head);
3159 
3160 	return (serviced);
3161 }
3162 
3163 /*
3164  * Transmit completion reclaiming.
3165  */
3166 static void
3167 hmereclaim(struct hme *hmep)
3168 {
3169 	boolean_t	reclaimed = B_FALSE;
3170 
3171 	/*
3172 	 * Loop through each TMD.
3173 	 */
3174 	while (hmep->hme_txindex > hmep->hme_txreclaim) {
3175 
3176 		int		reclaim;
3177 		uint32_t	flags;
3178 
3179 		reclaim = hmep->hme_txreclaim % HME_TMDMAX;
3180 		HMESYNCTMD(reclaim, DDI_DMA_SYNC_FORKERNEL);
3181 
3182 		flags = GET_TMD_FLAGS(reclaim);
3183 		if (flags & HMETMD_OWN) {
3184 			/*
3185 			 * Chip still owns it.  We're done.
3186 			 */
3187 			break;
3188 		}
3189 
3190 		/*
3191 		 * Count a chained packet only once.
3192 		 */
3193 		if (flags & HMETMD_SOP) {
3194 			hmep->hme_opackets++;
3195 		}
3196 
3197 		/*
3198 		 * MIB II
3199 		 */
3200 		hmep->hme_obytes += flags & HMETMD_BUFSIZE;
3201 
3202 		reclaimed = B_TRUE;
3203 		hmep->hme_txreclaim++;
3204 	}
3205 
3206 	if (reclaimed) {
3207 		/*
3208 		 * we could reclaim some TMDs so turn off interrupts
3209 		 */
3210 		if (hmep->hme_wantw) {
3211 			PUT_GLOBREG(intmask,
3212 			    HMEG_MASK_INTR | HMEG_MASK_TINT |
3213 			    HMEG_MASK_TX_ALL);
3214 			hmep->hme_wantw = B_FALSE;
3215 			mac_tx_update(hmep->hme_mh);
3216 		}
3217 	} else {
3218 		/*
3219 		 * enable TINTS: so that even if there is no further activity
3220 		 * hmereclaim will get called
3221 		 */
3222 		if (hmep->hme_wantw)
3223 			PUT_GLOBREG(intmask,
3224 			    GET_GLOBREG(intmask) & ~HMEG_MASK_TX_ALL);
3225 	}
3226 	CHECK_GLOBREG();
3227 }
3228 
3229 /*
3230  * Handle interrupts for fatal errors
3231  * Need reinitialization of the ENET channel.
3232  */
3233 static void
3234 hme_fatal_err(struct hme *hmep, uint_t hmesbits)
3235 {
3236 
3237 	if (hmesbits & HMEG_STATUS_SLV_PAR_ERR) {
3238 		hmep->hme_slvparerr++;
3239 	}
3240 
3241 	if (hmesbits & HMEG_STATUS_SLV_ERR_ACK) {
3242 		hmep->hme_slverrack++;
3243 	}
3244 
3245 	if (hmesbits & HMEG_STATUS_TX_TAG_ERR) {
3246 		hmep->hme_txtagerr++;
3247 		hmep->hme_oerrors++;
3248 	}
3249 
3250 	if (hmesbits & HMEG_STATUS_TX_PAR_ERR) {
3251 		hmep->hme_txparerr++;
3252 		hmep->hme_oerrors++;
3253 	}
3254 
3255 	if (hmesbits & HMEG_STATUS_TX_LATE_ERR) {
3256 		hmep->hme_txlaterr++;
3257 		hmep->hme_oerrors++;
3258 	}
3259 
3260 	if (hmesbits & HMEG_STATUS_TX_ERR_ACK) {
3261 		hmep->hme_txerrack++;
3262 		hmep->hme_oerrors++;
3263 	}
3264 
3265 	if (hmesbits & HMEG_STATUS_EOP_ERR) {
3266 		hmep->hme_eoperr++;
3267 	}
3268 
3269 	if (hmesbits & HMEG_STATUS_RX_TAG_ERR) {
3270 		hmep->hme_rxtagerr++;
3271 		hmep->hme_ierrors++;
3272 	}
3273 
3274 	if (hmesbits & HMEG_STATUS_RX_PAR_ERR) {
3275 		hmep->hme_rxparerr++;
3276 		hmep->hme_ierrors++;
3277 	}
3278 
3279 	if (hmesbits & HMEG_STATUS_RX_LATE_ERR) {
3280 		hmep->hme_rxlaterr++;
3281 		hmep->hme_ierrors++;
3282 	}
3283 
3284 	if (hmesbits & HMEG_STATUS_RX_ERR_ACK) {
3285 		hmep->hme_rxerrack++;
3286 		hmep->hme_ierrors++;
3287 	}
3288 }
3289 
3290 /*
3291  * Handle interrupts regarding non-fatal errors.
3292  */
3293 static void
3294 hme_nonfatal_err(struct hme *hmep, uint_t hmesbits)
3295 {
3296 
3297 	if (hmesbits & HMEG_STATUS_RX_DROP) {
3298 		hmep->hme_missed++;
3299 		hmep->hme_ierrors++;
3300 	}
3301 
3302 	if (hmesbits & HMEG_STATUS_DEFTIMR_EXP) {
3303 		hmep->hme_defer_xmts++;
3304 	}
3305 
3306 	if (hmesbits & HMEG_STATUS_FSTCOLC_EXP) {
3307 		hmep->hme_fstcol += 256;
3308 	}
3309 
3310 	if (hmesbits & HMEG_STATUS_LATCOLC_EXP) {
3311 		hmep->hme_tlcol += 256;
3312 		hmep->hme_oerrors += 256;
3313 	}
3314 
3315 	if (hmesbits & HMEG_STATUS_EXCOLC_EXP) {
3316 		hmep->hme_excol += 256;
3317 		hmep->hme_oerrors += 256;
3318 	}
3319 
3320 	if (hmesbits & HMEG_STATUS_NRMCOLC_EXP) {
3321 		hmep->hme_coll += 256;
3322 	}
3323 
3324 	if (hmesbits & HMEG_STATUS_MXPKTSZ_ERR) {
3325 		hmep->hme_babl++;
3326 		hmep->hme_oerrors++;
3327 	}
3328 
3329 	/*
3330 	 * This error is fatal and the board needs to
3331 	 * be reinitialized. Comments?
3332 	 */
3333 	if (hmesbits & HMEG_STATUS_TXFIFO_UNDR) {
3334 		hmep->hme_uflo++;
3335 		hmep->hme_oerrors++;
3336 	}
3337 
3338 	if (hmesbits & HMEG_STATUS_SQE_TST_ERR) {
3339 		hmep->hme_sqe_errors++;
3340 	}
3341 
3342 	if (hmesbits & HMEG_STATUS_RCV_CNT_EXP) {
3343 		if (hmep->hme_rxcv_enable) {
3344 			hmep->hme_cvc += 256;
3345 		}
3346 	}
3347 
3348 	if (hmesbits & HMEG_STATUS_RXFIFO_OVFL) {
3349 		hmep->hme_oflo++;
3350 		hmep->hme_ierrors++;
3351 	}
3352 
3353 	if (hmesbits & HMEG_STATUS_LEN_CNT_EXP) {
3354 		hmep->hme_lenerr += 256;
3355 		hmep->hme_ierrors += 256;
3356 	}
3357 
3358 	if (hmesbits & HMEG_STATUS_ALN_CNT_EXP) {
3359 		hmep->hme_align_errors += 256;
3360 		hmep->hme_ierrors += 256;
3361 	}
3362 
3363 	if (hmesbits & HMEG_STATUS_CRC_CNT_EXP) {
3364 		hmep->hme_fcs_errors += 256;
3365 		hmep->hme_ierrors += 256;
3366 	}
3367 }
3368 
3369 static mblk_t *
3370 hmeread(struct hme *hmep, hmebuf_t *rbuf, uint32_t rflags)
3371 {
3372 	mblk_t		*bp;
3373 	uint32_t	len;
3374 	t_uscalar_t	type;
3375 
3376 	len = (rflags & HMERMD_BUFSIZE) >> HMERMD_BUFSIZE_SHIFT;
3377 
3378 	/*
3379 	 * Check for short packet
3380 	 * and check for overflow packet also. The processing is the
3381 	 * same for both the cases - reuse the buffer. Update the Buffer
3382 	 * overflow counter.
3383 	 */
3384 	if ((len < ETHERMIN) || (rflags & HMERMD_OVFLOW) ||
3385 	    (len > (ETHERMAX + 4))) {
3386 		if (len < ETHERMIN)
3387 			hmep->hme_runt++;
3388 
3389 		else {
3390 			hmep->hme_buff++;
3391 			hmep->hme_toolong_errors++;
3392 		}
3393 		hmep->hme_ierrors++;
3394 		return (NULL);
3395 	}
3396 
3397 	/*
3398 	 * Sync the received buffer before looking at it.
3399 	 */
3400 
3401 	(void) ddi_dma_sync(rbuf->dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL);
3402 
3403 	/*
3404 	 * copy the packet data and then recycle the descriptor.
3405 	 */
3406 
3407 	if ((bp = allocb(len + HME_FSTBYTE_OFFSET, BPRI_HI)) == NULL) {
3408 
3409 		hmep->hme_allocbfail++;
3410 		hmep->hme_norcvbuf++;
3411 
3412 		return (NULL);
3413 	}
3414 
3415 	bcopy(rbuf->kaddr, bp->b_rptr, len + HME_FSTBYTE_OFFSET);
3416 
3417 	hmep->hme_ipackets++;
3418 
3419 	/*  Add the First Byte offset to the b_rptr and copy */
3420 	bp->b_rptr += HME_FSTBYTE_OFFSET;
3421 	bp->b_wptr = bp->b_rptr + len;
3422 
3423 	/*
3424 	 * update MIB II statistics
3425 	 */
3426 	BUMP_InNUcast(hmep, bp->b_rptr);
3427 	hmep->hme_rbytes += len;
3428 
3429 	type = get_ether_type(bp->b_rptr);
3430 
3431 	/*
3432 	 * TCP partial checksum in hardware
3433 	 */
3434 	if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) {
3435 		uint16_t cksum = ~rflags & HMERMD_CKSUM;
3436 		uint_t end = len - sizeof (struct ether_header);
3437 		(void) hcksum_assoc(bp, NULL, NULL, 0,
3438 		    0, end, htons(cksum), HCK_PARTIALCKSUM, 0);
3439 	}
3440 
3441 	return (bp);
3442 }
3443 
3444 /*VARARGS*/
3445 static void
3446 hme_fault_msg(struct hme *hmep, uint_t severity, msg_t type, char *fmt, ...)
3447 {
3448 	char	msg_buffer[255];
3449 	va_list	ap;
3450 
3451 	va_start(ap, fmt);
3452 	(void) vsnprintf(msg_buffer, sizeof (msg_buffer), fmt, ap);
3453 
3454 	if (hmep == NULL) {
3455 		cmn_err(CE_NOTE, "hme : %s", msg_buffer);
3456 
3457 	} else if (type == DISPLAY_MSG) {
3458 		cmn_err(CE_CONT, "?%s%d : %s\n", ddi_driver_name(hmep->dip),
3459 		    hmep->instance, msg_buffer);
3460 	} else if (severity == SEVERITY_HIGH) {
3461 		cmn_err(CE_WARN, "%s%d : %s, SEVERITY_HIGH, %s\n",
3462 		    ddi_driver_name(hmep->dip), hmep->instance,
3463 		    msg_buffer, msg_string[type]);
3464 	} else {
3465 		cmn_err(CE_CONT, "%s%d : %s\n", ddi_driver_name(hmep->dip),
3466 		    hmep->instance, msg_buffer);
3467 	}
3468 	va_end(ap);
3469 }
3470 
3471 /*
3472  * if this is the first init do not bother to save the
3473  * counters. They should be 0, but do not count on it.
3474  */
3475 static void
3476 hmesavecntrs(struct hme *hmep)
3477 {
3478 	uint32_t fecnt, aecnt, lecnt, rxcv;
3479 	uint32_t ltcnt, excnt;
3480 
3481 	/* XXX What all gets added in ierrors and oerrors? */
3482 	fecnt = GET_MACREG(fecnt);
3483 	PUT_MACREG(fecnt, 0);
3484 
3485 	aecnt = GET_MACREG(aecnt);
3486 	hmep->hme_align_errors += aecnt;
3487 	PUT_MACREG(aecnt, 0);
3488 
3489 	lecnt = GET_MACREG(lecnt);
3490 	hmep->hme_lenerr += lecnt;
3491 	PUT_MACREG(lecnt, 0);
3492 
3493 	rxcv = GET_MACREG(rxcv);
3494 #ifdef HME_CODEVIOL_BUG
3495 	/*
3496 	 * Ignore rxcv errors for Sbus/FEPS 2.1 or earlier
3497 	 */
3498 	if (!hmep->hme_rxcv_enable) {
3499 		rxcv = 0;
3500 	}
3501 #endif
3502 	hmep->hme_cvc += rxcv;
3503 	PUT_MACREG(rxcv, 0);
3504 
3505 	ltcnt = GET_MACREG(ltcnt);
3506 	hmep->hme_tlcol += ltcnt;
3507 	PUT_MACREG(ltcnt, 0);
3508 
3509 	excnt = GET_MACREG(excnt);
3510 	hmep->hme_excol += excnt;
3511 	PUT_MACREG(excnt, 0);
3512 
3513 	hmep->hme_fcs_errors += fecnt;
3514 	hmep->hme_ierrors += (fecnt + aecnt + lecnt);
3515 	hmep->hme_oerrors += (ltcnt + excnt);
3516 	hmep->hme_coll += (GET_MACREG(nccnt) + ltcnt);
3517 
3518 	PUT_MACREG(nccnt, 0);
3519 	CHECK_MACREG();
3520 }
3521 
3522 /*
3523  * To set up the mac address for the network interface:
3524  * The adapter card may support a local mac address which is published
3525  * in a device node property "local-mac-address". This mac address is
3526  * treated as the factory-installed mac address for DLPI interface.
3527  * If the adapter firmware has used the device for diskless boot
3528  * operation it publishes a property called "mac-address" for use by
3529  * inetboot and the device driver.
3530  * If "mac-address" is not found, the system options property
3531  * "local-mac-address" is used to select the mac-address. If this option
3532  * is set to "true", and "local-mac-address" has been found, then
3533  * local-mac-address is used; otherwise the system mac address is used
3534  * by calling the "localetheraddr()" function.
3535  */
3536 static void
3537 hme_setup_mac_address(struct hme *hmep, dev_info_t *dip)
3538 {
3539 	char	*prop;
3540 	int	prop_len = sizeof (int);
3541 
3542 	hmep->hme_addrflags = 0;
3543 
3544 	/*
3545 	 * Check if it is an adapter with its own local mac address
3546 	 * If it is present, save it as the "factory-address"
3547 	 * for this adapter.
3548 	 */
3549 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3550 	    "local-mac-address",
3551 	    (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3552 		if (prop_len == ETHERADDRL) {
3553 			hmep->hme_addrflags = HME_FACTADDR_PRESENT;
3554 			ether_bcopy(prop, &hmep->hme_factaddr);
3555 			HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
3556 			    "Local Ethernet address = %s",
3557 			    ether_sprintf(&hmep->hme_factaddr));
3558 		}
3559 		kmem_free(prop, prop_len);
3560 	}
3561 
3562 	/*
3563 	 * Check if the adapter has published "mac-address" property.
3564 	 * If it is present, use it as the mac address for this device.
3565 	 */
3566 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3567 	    "mac-address", (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3568 		if (prop_len >= ETHERADDRL) {
3569 			ether_bcopy(prop, &hmep->hme_ouraddr);
3570 			kmem_free(prop, prop_len);
3571 			return;
3572 		}
3573 		kmem_free(prop, prop_len);
3574 	}
3575 
3576 #ifdef	__sparc
3577 	/*
3578 	 * On sparc, we might be able to use the mac address from the
3579 	 * system.  However, on all other systems, we need to use the
3580 	 * address from the PROM.
3581 	 */
3582 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
3583 	    (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3584 		if ((strncmp("true", prop, prop_len) == 0) &&
3585 		    (hmep->hme_addrflags & HME_FACTADDR_PRESENT)) {
3586 			hmep->hme_addrflags |= HME_FACTADDR_USE;
3587 			ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr);
3588 			kmem_free(prop, prop_len);
3589 			HME_FAULT_MSG1(hmep, SEVERITY_NONE, DISPLAY_MSG,
3590 			    "Using local MAC address");
3591 			return;
3592 		}
3593 		kmem_free(prop, prop_len);
3594 	}
3595 
3596 	/*
3597 	 * Get the system ethernet address.
3598 	 */
3599 	(void) localetheraddr((struct ether_addr *)NULL, &hmep->hme_ouraddr);
3600 #else
3601 	ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr);
3602 #endif
3603 }
3604 
3605 /* ARGSUSED */
3606 static void
3607 hme_check_acc_handle(char *file, uint_t line, struct hme *hmep,
3608     ddi_acc_handle_t handle)
3609 {
3610 }
3611