xref: /titanic_44/usr/src/uts/common/io/hme/hme.c (revision 613a2f6ba31e891e3d947a356daf5e563d43c1ce)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * SunOS MT STREAMS FEPS(SBus)/Cheerio(PCI) 10/100Mb Ethernet Device Driver
29  */
30 
31 #include	<sys/types.h>
32 #include	<sys/debug.h>
33 #include	<sys/stream.h>
34 #include	<sys/cmn_err.h>
35 #include	<sys/kmem.h>
36 #include	<sys/crc32.h>
37 #include	<sys/modctl.h>
38 #include	<sys/conf.h>
39 #include	<sys/strsun.h>
40 #include	<sys/kstat.h>
41 #include	<inet/common.h>
42 #include	<inet/mi.h>
43 #include	<inet/nd.h>
44 #include	<sys/pattr.h>
45 #include	<sys/dlpi.h>
46 #include	<sys/strsubr.h>
47 #include	<sys/mac_provider.h>
48 #include	<sys/mac_ether.h>
49 #include	<sys/ethernet.h>
50 #include	<sys/vlan.h>
51 #include	<sys/pci.h>
52 #include	<sys/policy.h>
53 #include	<sys/ddi.h>
54 #include	<sys/sunddi.h>
55 #include	"hme_phy.h"
56 #include	"hme_mac.h"
57 #include	"hme.h"
58 
59 typedef void	(*fptrv_t)();
60 
61 typedef enum {
62 	NO_MSG		= 0,
63 	AUTOCONFIG_MSG	= 1,
64 	STREAMS_MSG	= 2,
65 	IOCTL_MSG	= 3,
66 	PROTO_MSG	= 4,
67 	INIT_MSG	= 5,
68 	TX_MSG		= 6,
69 	RX_MSG		= 7,
70 	INTR_MSG	= 8,
71 	UNINIT_MSG	= 9,
72 	CONFIG_MSG	= 10,
73 	PROP_MSG	= 11,
74 	ENTER_MSG	= 12,
75 	RESUME_MSG	= 13,
76 	AUTONEG_MSG	= 14,
77 	NAUTONEG_MSG	= 15,
78 	FATAL_ERR_MSG	= 16,
79 	NFATAL_ERR_MSG	= 17,
80 	NDD_MSG		= 18,
81 	PHY_MSG		= 19,
82 	XCVR_MSG	= 20,
83 	NOXCVR_MSG	= 21,
84 	NSUPPORT_MSG	= 22,
85 	ERX_MSG		= 23,
86 	FREE_MSG	= 24,
87 	IPG_MSG		= 25,
88 	DDI_MSG		= 26,
89 	DEFAULT_MSG	= 27,
90 	DISPLAY_MSG	= 28,
91 	LATECOLL_MSG	= 29,
92 	MIFPOLL_MSG	= 30,
93 	LINKPULSE_MSG	= 31
94 } msg_t;
95 
96 msg_t	hme_debug_level =	NO_MSG;
97 
98 static char	*msg_string[] = {
99 	"NONE       ",
100 	"AUTOCONFIG ",
101 	"STREAMS    ",
102 	"IOCTL      ",
103 	"PROTO      ",
104 	"INIT       ",
105 	"TX         ",
106 	"RX         ",
107 	"INTR       ",
108 	"UNINIT		",
109 	"CONFIG	",
110 	"PROP	",
111 	"ENTER	",
112 	"RESUME	",
113 	"AUTONEG	",
114 	"NAUTONEG	",
115 	"FATAL_ERR	",
116 	"NFATAL_ERR	",
117 	"NDD	",
118 	"PHY	",
119 	"XCVR	",
120 	"NOXCVR	",
121 	"NSUPPOR	",
122 	"ERX	",
123 	"FREE	",
124 	"IPG	",
125 	"DDI	",
126 	"DEFAULT	",
127 	"DISPLAY	"
128 	"LATECOLL_MSG	",
129 	"MIFPOLL_MSG	",
130 	"LINKPULSE_MSG	"
131 };
132 
133 #define	SEVERITY_NONE	0
134 #define	SEVERITY_LOW	0
135 #define	SEVERITY_MID	1
136 #define	SEVERITY_HIGH	2
137 #define	SEVERITY_UNKNOWN 99
138 
139 #define	FEPS_URUN_BUG
140 #define	HME_CODEVIOL_BUG
141 
142 #define	KIOIP	KSTAT_INTR_PTR(hmep->hme_intrstats)
143 
144 /*
145  * The following variables are used for checking fixes in Sbus/FEPS 2.0
146  */
147 static	int	hme_urun_fix = 0;	/* Bug fixed in Sbus/FEPS 2.0 */
148 
149 /*
150  * The following variables are used for configuring various features
151  */
152 static	int	hme_64bit_enable =	1;	/* Use 64-bit sbus transfers */
153 static	int	hme_reject_own =	1;	/* Reject packets with own SA */
154 static	int	hme_autoneg_enable =	1;	/* Enable auto-negotiation */
155 
156 static	int	hme_ngu_enable =	1; /* to enable Never Give Up mode */
157 static	int	hme_mifpoll_enable =	1; /* to enable mif poll */
158 
159 /*
160  * The following variables are used for configuring link-operation.
161  * Later these parameters may be changed per interface using "ndd" command
162  * These parameters may also be specified as properties using the .conf
163  * file mechanism for each interface.
164  */
165 
166 static	int	hme_lance_mode =	1;	/* to enable lance mode */
167 static	int	hme_ipg0 =		16;
168 static	int	hme_ipg1 =		8;
169 static	int	hme_ipg2 =		4;
170 static	int	hme_use_int_xcvr =	0;
171 static	int	hme_pace_size =		0;	/* Do not use pacing */
172 
173 /*
174  * The following variable value will be overridden by "link-pulse-disabled"
175  * property which may be created by OBP or hme.conf file.
176  */
177 static	int	hme_link_pulse_disabled = 0;	/* link pulse disabled */
178 
179 /*
180  * The following parameters may be configured by the user. If they are not
181  * configured by the user, the values will be based on the capabilities of
182  * the transceiver.
183  * The value "HME_NOTUSR" is ORed with the parameter value to indicate values
184  * which are NOT configured by the user.
185  */
186 
187 #define	HME_NOTUSR	0x0f000000
188 #define	HME_MASK_1BIT	0x1
189 #define	HME_MASK_5BIT	0x1f
190 #define	HME_MASK_8BIT	0xff
191 
192 static	int	hme_adv_autoneg_cap = HME_NOTUSR | 0;
193 static	int	hme_adv_100T4_cap = HME_NOTUSR | 0;
194 static	int	hme_adv_100fdx_cap = HME_NOTUSR | 0;
195 static	int	hme_adv_100hdx_cap = HME_NOTUSR | 0;
196 static	int	hme_adv_10fdx_cap = HME_NOTUSR | 0;
197 static	int	hme_adv_10hdx_cap = HME_NOTUSR | 0;
198 
199 /*
200  * PHY_IDR1 and PHY_IDR2 values to identify National Semiconductor's DP83840
201  * Rev C chip which needs some work-arounds.
202  */
203 #define	HME_NSIDR1	0x2000
204 #define	HME_NSIDR2	0x5c00 /* IDR2 register for with revision no. 0 */
205 
206 /*
207  * PHY_IDR1 and PHY_IDR2 values to identify Quality Semiconductor's QS6612
208  * chip which needs some work-arounds.
209  * Addition Interface Technologies Group (NPG) 8/28/1997.
210  */
211 #define	HME_QSIDR1	0x0181
212 #define	HME_QSIDR2	0x4400 /* IDR2 register for with revision no. 0 */
213 
214 /*
215  * The least significant 4 bits of HME_NSIDR2 represent the revision
216  * no. of the DP83840 chip. For Rev-C of DP83840, the rev. no. is 0.
217  * The next revision of the chip is called DP83840A and the value of
218  * HME_NSIDR2 is 0x5c01 for this new chip. All the workarounds specific
219  * to DP83840 chip are valid for both the revisions of the chip.
220  * Assuming that these workarounds are valid for the future revisions
221  * also, we will apply these workarounds independent of the revision no.
222  * Hence we mask out the last 4 bits of the IDR2 register and compare
223  * with 0x5c00 value.
224  */
225 
226 #define	HME_DP83840	((hmep->hme_idr1 == HME_NSIDR1) && \
227 			((hmep->hme_idr2 & 0xfff0) == HME_NSIDR2))
228 /*
229  * Likewise for the QSI 6612 Fast ethernet phy.
230  * Addition Interface Technologies Group (NPG) 8/28/1997.
231  */
232 #define	HME_QS6612	((hmep->hme_idr1 == HME_QSIDR1) && \
233 			((hmep->hme_idr2 & 0xfff0) == HME_QSIDR2))
234 /*
235  * All strings used by hme messaging functions
236  */
237 
238 static	char *par_detect_msg =
239 	"Parallel detection fault.";
240 
241 static	char *xcvr_no_mii_msg =
242 	"Transceiver does not talk MII.";
243 
244 static	char *xcvr_isolate_msg =
245 	"Transceiver isolate failed.";
246 
247 static	char *int_xcvr_msg =
248 	"Internal Transceiver Selected.";
249 
250 static	char *ext_xcvr_msg =
251 	"External Transceiver Selected.";
252 
253 static	char *no_xcvr_msg =
254 	"No transceiver found.";
255 
256 static	char *burst_size_msg =
257 	"Could not identify the burst size";
258 
259 static	char *unk_rx_ringsz_msg =
260 	"Unknown receive RINGSZ";
261 
262 static  char *add_intr_fail_msg =
263 	"ddi_add_intr(9F) failed";
264 
265 static  char *mregs_4global_reg_fail_msg =
266 	"ddi_regs_map_setup(9F) for global reg failed";
267 
268 static	char *mregs_4etx_reg_fail_msg =
269 	"ddi_map_regs for etx reg failed";
270 
271 static	char *mregs_4erx_reg_fail_msg =
272 	"ddi_map_regs for erx reg failed";
273 
274 static	char *mregs_4bmac_reg_fail_msg =
275 	"ddi_map_regs for bmac reg failed";
276 
277 static	char *mregs_4mif_reg_fail_msg =
278 	"ddi_map_regs for mif reg failed";
279 
280 static  char *param_reg_fail_msg =
281 	"parameter register error";
282 
283 static	char *init_fail_gen_msg =
284 	"Failed to initialize hardware/driver";
285 
286 static	char *ddi_nregs_fail_msg =
287 	"ddi_dev_nregs failed(9F), returned %d";
288 
289 static	char *bad_num_regs_msg =
290 	"Invalid number of registers.";
291 
292 static	char *anar_not_set_msg =
293 	"External Transceiver: anar not set with speed selection";
294 
295 static	char *par_detect_anar_not_set_msg =
296 	"External Transceiver: anar not set with speed selection";
297 
298 
299 /* FATAL ERR msgs */
300 /*
301  * Function prototypes.
302  */
303 /* these two are global so that qfe can use them */
304 int hmeattach(dev_info_t *, ddi_attach_cmd_t);
305 int hmedetach(dev_info_t *, ddi_detach_cmd_t);
306 int hmequiesce(dev_info_t *);
307 static	boolean_t hmeinit_xfer_params(struct hme *);
308 static	uint_t hmestop(struct hme *);
309 static	void hmestatinit(struct hme *);
310 static	int hmeallocthings(struct hme *);
311 static	void hmefreethings(struct hme *);
312 static	int hmeallocbuf(struct hme *, hmebuf_t *, int);
313 static	int hmeallocbufs(struct hme *);
314 static	void hmefreebufs(struct hme *);
315 static	void hmeget_hm_rev_property(struct hme *);
316 static	boolean_t hmestart(struct hme *, mblk_t *);
317 static	uint_t hmeintr(caddr_t);
318 static	void hmereclaim(struct hme *);
319 static	int hmeinit(struct hme *);
320 static	void hmeuninit(struct hme *hmep);
321 static 	mblk_t *hmeread(struct hme *, hmebuf_t *, uint32_t);
322 static	void hmesavecntrs(struct hme *);
323 static	void hme_fatal_err(struct hme *, uint_t);
324 static	void hme_nonfatal_err(struct hme *, uint_t);
325 static	int hmeburstsizes(struct hme *);
326 static	void hme_start_mifpoll(struct hme *);
327 static	void hme_stop_mifpoll(struct hme *);
328 static	void hme_param_cleanup(struct hme *);
329 static	int hme_param_get(queue_t *q, mblk_t *mp, caddr_t cp);
330 static	int hme_param_register(struct hme *, hmeparam_t *, int);
331 static	int hme_param_set(queue_t *, mblk_t *, char *, caddr_t);
332 static	void send_bit(struct hme *, uint_t);
333 static	uint_t get_bit(struct hme *);
334 static	uint_t get_bit_std(struct hme *);
335 static	uint_t hme_bb_mii_read(struct hme *, uchar_t, uint16_t *);
336 static	void hme_bb_mii_write(struct hme *, uchar_t, uint16_t);
337 static	void hme_bb_force_idle(struct hme *);
338 static	uint_t hme_mii_read(struct hme *, uchar_t, uint16_t *);
339 static	void hme_mii_write(struct hme *, uchar_t, uint16_t);
340 static	void hme_stop_timer(struct hme *);
341 static	void hme_start_timer(struct hme *, fptrv_t, int);
342 static	int hme_select_speed(struct hme *, int);
343 static	void hme_reset_transceiver(struct hme *);
344 static	void hme_check_transceiver(struct hme *);
345 static	void hme_setup_link_default(struct hme *);
346 static	void hme_setup_link_status(struct hme *);
347 static	void hme_setup_link_control(struct hme *);
348 static	int hme_check_txhung(struct hme *hmep);
349 static	void hme_check_link(void *);
350 
351 static	void hme_init_xcvr_info(struct hme *);
352 static	void hme_disable_link_pulse(struct hme *);
353 static	void hme_force_speed(void *);
354 static	void hme_get_autoinfo(struct hme *);
355 static	int hme_try_auto_negotiation(struct hme *);
356 static	void hme_try_speed(void *);
357 static	void hme_link_now_up(struct hme *);
358 static	void hme_setup_mac_address(struct hme *, dev_info_t *);
359 
360 static	void hme_nd_free(caddr_t *nd_pparam);
361 static	int hme_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp);
362 static	boolean_t hme_nd_load(caddr_t *nd_pparam, char *name,
363     pfi_t get_pfi, pfi_t set_pfi, caddr_t data);
364 
365 static void hme_fault_msg(struct hme *, uint_t, msg_t, char *, ...);
366 
367 static void hme_check_acc_handle(char *, uint_t, struct hme *,
368     ddi_acc_handle_t);
369 
370 /*
371  * Nemo (GLDv3) Functions.
372  */
373 static int	hme_m_stat(void *, uint_t, uint64_t *);
374 static int	hme_m_start(void *);
375 static void	hme_m_stop(void *);
376 static int	hme_m_promisc(void *, boolean_t);
377 static int	hme_m_multicst(void *, boolean_t, const uint8_t *);
378 static int	hme_m_unicst(void *, const uint8_t *);
379 static mblk_t	*hme_m_tx(void *, mblk_t *);
380 static void	hme_m_ioctl(void *, queue_t *, mblk_t *);
381 static boolean_t	hme_m_getcapab(void *, mac_capab_t, void *);
382 
383 static mac_callbacks_t hme_m_callbacks = {
384 	MC_IOCTL | MC_GETCAPAB,
385 	hme_m_stat,
386 	hme_m_start,
387 	hme_m_stop,
388 	hme_m_promisc,
389 	hme_m_multicst,
390 	hme_m_unicst,
391 	hme_m_tx,
392 	hme_m_ioctl,
393 	hme_m_getcapab,
394 };
395 
396 DDI_DEFINE_STREAM_OPS(hme_dev_ops, nulldev, nulldev, hmeattach, hmedetach,
397     nodev, NULL, D_MP, NULL, hmequiesce);
398 
399 #define	HME_FAULT_MSG1(p, s, t, f) \
400     hme_fault_msg((p), (s), (t), (f));
401 
402 #define	HME_FAULT_MSG2(p, s, t, f, a) \
403     hme_fault_msg((p), (s), (t), (f), (a));
404 
405 #define	HME_FAULT_MSG3(p, s, t, f, a, b) \
406     hme_fault_msg((p), (s), (t), (f), (a), (b));
407 
408 #define	HME_FAULT_MSG4(p, s, t, f, a, b, c) \
409     hme_fault_msg((p), (s), (t), (f), (a), (b), (c));
410 
411 #define	CHECK_MIFREG() \
412 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_mifregh)
413 #define	CHECK_ETXREG() \
414 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_etxregh)
415 #define	CHECK_ERXREG() \
416 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_erxregh)
417 #define	CHECK_MACREG() \
418 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_bmacregh)
419 #define	CHECK_GLOBREG() \
420 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_globregh)
421 
422 /*
423  * Claim the device is ultra-capable of burst in the beginning.  Use
424  * the value returned by ddi_dma_burstsizes() to actually set the HME
425  * global configuration register later.
426  *
427  * Sbus/FEPS supports burst sizes of 16, 32 and 64 bytes. Also, it supports
428  * 32-bit and 64-bit Sbus transfers. Hence the dlim_burstsizes field contains
429  * the the burstsizes in both the lo and hi words.
430  */
431 #define	HMELIMADDRLO	((uint64_t)0x00000000)
432 #define	HMELIMADDRHI	((uint64_t)0xffffffff)
433 
434 /*
435  * Note that rx and tx data buffers can be arbitrarily aligned, but
436  * that the descriptor rings need to be aligned on 2K boundaries, per
437  * the spec.
438  */
439 static ddi_dma_attr_t hme_dma_attr = {
440 	DMA_ATTR_V0,		/* version number. */
441 	(uint64_t)HMELIMADDRLO,	/* low address */
442 	(uint64_t)HMELIMADDRHI,	/* high address */
443 	(uint64_t)0x00ffffff,	/* address counter max */
444 	(uint64_t)HME_HMDALIGN,	/* alignment */
445 	(uint_t)0x00700070,	/* dlim_burstsizes for 32 and 64 bit xfers */
446 	(uint32_t)0x1,		/* minimum transfer size */
447 	(uint64_t)0x7fffffff,	/* maximum transfer size */
448 	(uint64_t)0x00ffffff,	/* maximum segment size */
449 	1,			/* scatter/gather list length */
450 	512,			/* granularity */
451 	0			/* attribute flags */
452 };
453 
454 static ddi_device_acc_attr_t hme_buf_attr = {
455 	DDI_DEVICE_ATTR_V0,
456 	DDI_NEVERSWAP_ACC,
457 	DDI_STRICTORDER_ACC,	/* probably could allow merging & caching */
458 	DDI_DEFAULT_ACC,
459 };
460 
461 static uchar_t pci_latency_timer = 0;
462 
463 /*
464  * Module linkage information for the kernel.
465  */
466 static struct modldrv modldrv = {
467 	&mod_driverops,	/* Type of module.  This one is a driver */
468 	"Sun HME 10/100 Mb Ethernet",
469 	&hme_dev_ops,	/* driver ops */
470 };
471 
472 static struct modlinkage modlinkage = {
473 	MODREV_1, &modldrv, NULL
474 };
475 
476 /*
477  * Internal PHY Id:
478  */
479 
480 #define	HME_BB1	0x15	/* Babybac1, Rev 1.5 */
481 #define	HME_BB2 0x20	/* Babybac2, Rev 0 */
482 
483 /* <<<<<<<<<<<<<<<<<<<<<<  Register operations >>>>>>>>>>>>>>>>>>>>> */
484 
485 #define	GET_MIFREG(reg) \
486 	ddi_get32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg)
487 #define	PUT_MIFREG(reg, value) \
488 	ddi_put32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg, value)
489 
490 #define	GET_ETXREG(reg) \
491 	ddi_get32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg)
492 #define	PUT_ETXREG(reg, value) \
493 	ddi_put32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg, value)
494 #define	GET_ERXREG(reg) \
495 	ddi_get32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg)
496 #define	PUT_ERXREG(reg, value) \
497 	ddi_put32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg, value)
498 #define	GET_MACREG(reg) \
499 	ddi_get32(hmep->hme_bmacregh, (uint32_t *)&hmep->hme_bmacregp->reg)
500 #define	PUT_MACREG(reg, value) \
501 	ddi_put32(hmep->hme_bmacregh, \
502 		(uint32_t *)&hmep->hme_bmacregp->reg, value)
503 #define	GET_GLOBREG(reg) \
504 	ddi_get32(hmep->hme_globregh, (uint32_t *)&hmep->hme_globregp->reg)
505 #define	PUT_GLOBREG(reg, value) \
506 	ddi_put32(hmep->hme_globregh, \
507 		(uint32_t *)&hmep->hme_globregp->reg, value)
508 #define	PUT_TMD(ptr, paddr, len, flags)					\
509 	ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_addr, paddr); \
510 	ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags,	\
511 	    len | flags)
512 #define	GET_TMD_FLAGS(ptr)					\
513 	ddi_get32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags)
514 #define	PUT_RMD(ptr, paddr) \
515 	ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_addr, paddr); \
516 	ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags,	\
517 	    (uint32_t)(HMEBUFSIZE << HMERMD_BUFSIZE_SHIFT) | HMERMD_OWN)
518 #define	GET_RMD_FLAGS(ptr)					\
519 	ddi_get32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags)
520 
521 #define	GET_ROM8(offset) \
522 	ddi_get8((hmep->hme_romh), (offset))
523 
524 /*
525  * Ether_copy is not endian-correct. Define an endian-correct version.
526  */
527 #define	ether_bcopy(a, b) (bcopy(a, b, 6))
528 
529 /*
530  * Ether-type is specifically big-endian, but data region is unknown endian
531  */
532 #define	get_ether_type(ptr) \
533 	(((((uint8_t *)ptr)[12] << 8) | (((uint8_t *)ptr)[13])))
534 
535 /* <<<<<<<<<<<<<<<<<<<<<<  Configuration Parameters >>>>>>>>>>>>>>>>>>>>> */
536 
537 #define	BMAC_DEFAULT_JAMSIZE	(0x04)		/* jamsize equals 4 */
538 #define	BMAC_LONG_JAMSIZE	(0x10)		/* jamsize equals 0x10 */
539 static	int 	jamsize = BMAC_DEFAULT_JAMSIZE;
540 
541 
542 /*
543  * Calculate the bit in the multicast address filter that selects the given
544  * address.
545  */
546 
547 static uint32_t
548 hmeladrf_bit(const uint8_t *addr)
549 {
550 	uint32_t crc;
551 
552 	CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
553 
554 	/*
555 	 * Just want the 6 most significant bits.
556 	 */
557 	return (crc >> 26);
558 }
559 
560 /* <<<<<<<<<<<<<<<<<<<<<<<<  Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
561 
562 static int hme_internal_phy_id = HME_BB2;	/* Internal PHY is Babybac2  */
563 
564 
565 static void
566 send_bit(struct hme *hmep, uint32_t x)
567 {
568 	PUT_MIFREG(mif_bbdata, x);
569 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
570 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
571 }
572 
573 /*
574  * To read the MII register bits from the Babybac1 transceiver
575  */
576 static uint32_t
577 get_bit(struct hme *hmep)
578 {
579 	uint32_t	x;
580 
581 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
582 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
583 	if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER)
584 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM0) ? 1 : 0;
585 	else
586 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM1) ? 1 : 0;
587 	return (x);
588 }
589 
590 
591 /*
592  * To read the MII register bits according to the IEEE Standard
593  */
594 static uint32_t
595 get_bit_std(struct hme *hmep)
596 {
597 	uint32_t	x;
598 
599 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
600 	drv_usecwait(1);	/* wait for  >330 ns for stable data */
601 	if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER)
602 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM0) ? 1 : 0;
603 	else
604 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM1) ? 1 : 0;
605 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
606 	return (x);
607 }
608 
609 #define	SEND_BIT(x)		send_bit(hmep, x)
610 #define	GET_BIT(x)		x = get_bit(hmep)
611 #define	GET_BIT_STD(x)		x = get_bit_std(hmep)
612 
613 
614 static void
615 hme_bb_mii_write(struct hme *hmep, uint8_t regad, uint16_t data)
616 {
617 	uint8_t	phyad;
618 	int	i;
619 
620 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
621 	phyad = hmep->hme_phyad;
622 	(void) hme_bb_force_idle(hmep);
623 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
624 	SEND_BIT(0); SEND_BIT(1);	/* <OP> */
625 
626 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
627 		SEND_BIT((phyad >> i) & 1);
628 	}
629 
630 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
631 		SEND_BIT((regad >> i) & 1);
632 	}
633 
634 	SEND_BIT(1); SEND_BIT(0);	/* <TA> */
635 
636 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
637 		SEND_BIT((data >> i) & 1);
638 	}
639 
640 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
641 	CHECK_MIFREG();
642 }
643 
644 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
645 static uint_t
646 hme_bb_mii_read(struct hme *hmep, uint8_t regad, uint16_t *datap)
647 {
648 	uint8_t		phyad;
649 	int		i;
650 	uint32_t	x;
651 	uint32_t	y;
652 
653 	*datap = 0;
654 
655 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
656 	phyad = hmep->hme_phyad;
657 	(void) hme_bb_force_idle(hmep);
658 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
659 	SEND_BIT(1); SEND_BIT(0);	/* <OP> */
660 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
661 		SEND_BIT((phyad >> i) & 1);
662 	}
663 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
664 		SEND_BIT((regad >> i) & 1);
665 	}
666 
667 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
668 
669 	if ((hme_internal_phy_id == HME_BB2) ||
670 	    (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER)) {
671 		GET_BIT_STD(x);
672 		GET_BIT_STD(y);		/* <TA> */
673 		for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
674 			GET_BIT_STD(x);
675 			*datap += (x << i);
676 		}
677 		/*
678 		 * Kludge to get the Transceiver out of hung mode
679 		 */
680 		GET_BIT_STD(x);
681 		GET_BIT_STD(x);
682 		GET_BIT_STD(x);
683 	} else {
684 		GET_BIT(x);
685 		GET_BIT(y);		/* <TA> */
686 		for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
687 			GET_BIT(x);
688 			*datap += (x << i);
689 		}
690 		/*
691 		 * Kludge to get the Transceiver out of hung mode
692 		 */
693 		GET_BIT(x);
694 		GET_BIT(x);
695 		GET_BIT(x);
696 	}
697 	CHECK_MIFREG();
698 	return (y);
699 }
700 
701 
702 static void
703 hme_bb_force_idle(struct hme *hmep)
704 {
705 	int	i;
706 
707 	for (i = 0; i < 33; i++) {
708 		SEND_BIT(1);
709 	}
710 }
711 
712 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
713 
714 
715 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
716 
717 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
718 static uint_t
719 hme_mii_read(struct hme *hmep, uchar_t regad, uint16_t *datap)
720 {
721 	uint32_t	frame;
722 	uint8_t		phyad;
723 
724 	if (hmep->hme_transceiver == HME_NO_TRANSCEIVER)
725 		return (1);	/* No transceiver present */
726 
727 	if (!hmep->hme_frame_enable)
728 		return (hme_bb_mii_read(hmep, regad, datap));
729 
730 	phyad = hmep->hme_phyad;
731 
732 	PUT_MIFREG(mif_frame,
733 	    HME_MIF_FRREAD | (phyad << HME_MIF_FRPHYAD_SHIFT) |
734 	    (regad << HME_MIF_FRREGAD_SHIFT));
735 /*
736  *	HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
737  */
738 	HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300);
739 	frame = GET_MIFREG(mif_frame);
740 	CHECK_MIFREG();
741 	if ((frame & HME_MIF_FRTA0) == 0) {
742 
743 
744 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, NAUTONEG_MSG,
745 		    "MIF Read failure");
746 		return (1);
747 	} else {
748 		*datap = (uint16_t)(frame & HME_MIF_FRDATA);
749 		return (0);
750 	}
751 
752 }
753 
754 static void
755 hme_mii_write(struct hme *hmep, uint8_t regad, uint16_t data)
756 {
757 	uint32_t frame;
758 	uint8_t	phyad;
759 
760 	if (!hmep->hme_frame_enable) {
761 		hme_bb_mii_write(hmep, regad, data);
762 		return;
763 	}
764 
765 	phyad = hmep->hme_phyad;
766 
767 	PUT_MIFREG(mif_frame,
768 	    HME_MIF_FRWRITE | (phyad << HME_MIF_FRPHYAD_SHIFT) |
769 	    (regad << HME_MIF_FRREGAD_SHIFT) | data);
770 /*
771  *	HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
772  */
773 	HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300);
774 	frame = GET_MIFREG(mif_frame);
775 	CHECK_MIFREG();
776 	if ((frame & HME_MIF_FRTA0) == 0) {
777 		HME_FAULT_MSG1(hmep, SEVERITY_MID, NAUTONEG_MSG,
778 		    "MIF Write failure");
779 	}
780 }
781 
782 /*
783  * hme_stop_timer function is used by a function before doing link-related
784  * processing. It locks the "hme_linklock" to protect the link-related data
785  * structures. This lock will be subsequently released in hme_start_timer().
786  */
787 static void
788 hme_stop_timer(struct hme *hmep)
789 {
790 	timeout_id_t	tid;
791 
792 	mutex_enter(&hmep->hme_linklock);
793 
794 	if (hmep->hme_timerid) {
795 		tid = hmep->hme_timerid;
796 		hmep->hme_timerid = 0;
797 		mutex_exit(&hmep->hme_linklock);
798 		(void) untimeout(tid);
799 		mutex_enter(&hmep->hme_linklock);
800 	}
801 }
802 
803 static void
804 hme_start_timer(struct hme *hmep, fptrv_t func, int msec)
805 {
806 	hmep->hme_timerid = timeout(func, hmep, drv_usectohz(1000 * msec));
807 
808 	mutex_exit(&hmep->hme_linklock);
809 }
810 
811 /*
812  * hme_select_speed is required only when auto-negotiation is not supported.
813  * It should be used only for the Internal Transceiver and not the External
814  * transceiver because we wouldn't know how to generate Link Down state on
815  * the wire.
816  * Currently it is required to support Electron 1.1 Build machines. When all
817  * these machines are upgraded to 1.2 or better, remove this function.
818  *
819  * Returns 1 if the link is up, 0 otherwise.
820  */
821 
822 static int
823 hme_select_speed(struct hme *hmep, int speed)
824 {
825 	uint16_t	stat;
826 	uint16_t	fdx;
827 
828 	if (hmep->hme_linkup_cnt)  /* not first time */
829 		goto read_status;
830 
831 	if (hmep->hme_fdx)
832 		fdx = PHY_BMCR_FDX;
833 	else
834 		fdx = 0;
835 
836 	switch (speed) {
837 	case HME_SPEED_100:
838 
839 		switch (hmep->hme_transceiver) {
840 		case HME_INTERNAL_TRANSCEIVER:
841 			hme_mii_write(hmep, HME_PHY_BMCR, fdx | PHY_BMCR_100M);
842 			break;
843 		case HME_EXTERNAL_TRANSCEIVER:
844 			if (hmep->hme_delay == 0) {
845 				hme_mii_write(hmep, HME_PHY_BMCR,
846 				    fdx | PHY_BMCR_100M);
847 			}
848 			break;
849 		default:
850 			break;
851 		}
852 		break;
853 	case HME_SPEED_10:
854 		switch (hmep->hme_transceiver) {
855 		case HME_INTERNAL_TRANSCEIVER:
856 			hme_mii_write(hmep, HME_PHY_BMCR, fdx);
857 			break;
858 		case HME_EXTERNAL_TRANSCEIVER:
859 			if (hmep->hme_delay == 0) {
860 				hme_mii_write(hmep, HME_PHY_BMCR, fdx);
861 			}
862 			break;
863 		default:
864 			break;
865 		}
866 		break;
867 	default:
868 		return (0);
869 	}
870 
871 	if (!hmep->hme_linkup_cnt) {  /* first time; select speed */
872 		(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
873 		hmep->hme_linkup_cnt++;
874 		return (0);
875 	}
876 
877 read_status:
878 	hmep->hme_linkup_cnt++;
879 	(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
880 	if (stat & PHY_BMSR_LNKSTS)
881 		return (1);
882 	else
883 		return (0);
884 }
885 
886 
887 #define	HME_PHYRST_PERIOD 600	/* 600 milliseconds, instead of 500 */
888 #define	HME_PDOWN_PERIOD 256	/* 256 milliseconds  power down period to */
889 				/* insure a good reset of the QSI PHY */
890 
891 static void
892 hme_reset_transceiver(struct hme *hmep)
893 {
894 	uint32_t	cfg;
895 	uint16_t	stat;
896 	uint16_t	anar;
897 	uint16_t	control;
898 	uint16_t	csc;
899 	int		n;
900 
901 	cfg = GET_MIFREG(mif_cfg);
902 
903 	if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER) {
904 		/* Isolate the Internal Transceiver */
905 		PUT_MIFREG(mif_cfg, (cfg & ~HME_MIF_CFGPS));
906 		hmep->hme_phyad = HME_INTERNAL_PHYAD;
907 		hmep->hme_transceiver = HME_INTERNAL_TRANSCEIVER;
908 		hme_mii_write(hmep, HME_PHY_BMCR, (PHY_BMCR_ISOLATE |
909 		    PHY_BMCR_PWRDN | PHY_BMCR_LPBK));
910 		if (hme_mii_read(hmep, HME_PHY_BMCR, &control) == 1)
911 			goto start_again;
912 
913 		/* select the External transceiver */
914 		PUT_MIFREG(mif_cfg, (cfg | HME_MIF_CFGPS));
915 		hmep->hme_transceiver = HME_EXTERNAL_TRANSCEIVER;
916 		hmep->hme_phyad = HME_EXTERNAL_PHYAD;
917 
918 	} else if (cfg & HME_MIF_CFGM1) {
919 		/* Isolate the External transceiver, if present */
920 		PUT_MIFREG(mif_cfg, (cfg | HME_MIF_CFGPS));
921 		hmep->hme_phyad = HME_EXTERNAL_PHYAD;
922 		hmep->hme_transceiver = HME_EXTERNAL_TRANSCEIVER;
923 		hme_mii_write(hmep, HME_PHY_BMCR, (PHY_BMCR_ISOLATE |
924 		    PHY_BMCR_PWRDN | PHY_BMCR_LPBK));
925 		if (hme_mii_read(hmep, HME_PHY_BMCR, &control) == 1)
926 			goto start_again;
927 
928 		/* select the Internal transceiver */
929 		PUT_MIFREG(mif_cfg, (cfg & ~HME_MIF_CFGPS));
930 		hmep->hme_transceiver = HME_INTERNAL_TRANSCEIVER;
931 		hmep->hme_phyad = HME_INTERNAL_PHYAD;
932 	}
933 
934 	hme_mii_write(hmep, HME_PHY_BMCR, PHY_BMCR_PWRDN);
935 	drv_usecwait((clock_t)HME_PDOWN_PERIOD);
936 
937 	/*
938 	 * Now reset the transceiver.
939 	 */
940 	hme_mii_write(hmep, HME_PHY_BMCR, PHY_BMCR_RESET);
941 
942 	/*
943 	 * Check for transceiver reset completion.
944 	 */
945 	n = HME_PHYRST_PERIOD / HMEWAITPERIOD;
946 
947 	while (--n > 0) {
948 		if (hme_mii_read(hmep, HME_PHY_BMCR, &control) == 1) {
949 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
950 			    xcvr_no_mii_msg);
951 			goto start_again;
952 		}
953 		if ((control & PHY_BMCR_RESET) == 0)
954 			goto reset_issued;
955 		if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER)
956 			drv_usecwait((clock_t)HMEWAITPERIOD);
957 		else
958 			drv_usecwait((clock_t)(500 * HMEWAITPERIOD));
959 	}
960 	/*
961 	 * phy reset failure
962 	 */
963 	hmep->phyfail++;
964 	goto start_again;
965 
966 reset_issued:
967 
968 	/*
969 	 * Get the PHY id registers. We need this to implement work-arounds
970 	 * for bugs in transceivers which use the National DP83840 PHY chip.
971 	 * National should fix this in the next release.
972 	 */
973 
974 	(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
975 	(void) hme_mii_read(hmep, HME_PHY_IDR1, &hmep->hme_idr1);
976 	(void) hme_mii_read(hmep, HME_PHY_IDR2, &hmep->hme_idr2);
977 	(void) hme_mii_read(hmep, HME_PHY_ANAR, &anar);
978 
979 	hme_init_xcvr_info(hmep);
980 
981 	hmep->hme_bmcr = control;
982 	hmep->hme_anar = anar;
983 	hmep->hme_bmsr = stat;
984 
985 	/*
986 	 * The strapping of AN0 and AN1 pins on DP83840 cannot select
987 	 * 10FDX, 100FDX and Auto-negotiation. So select it here for the
988 	 * Internal Transceiver.
989 	 */
990 	if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER) {
991 		anar = (PHY_ANAR_TXFDX | PHY_ANAR_10FDX |
992 		    PHY_ANAR_TX | PHY_ANAR_10 | PHY_SELECTOR);
993 	}
994 	/*
995 	 * Modify control and bmsr based on anar for Rev-C of DP83840.
996 	 */
997 	if (HME_DP83840) {
998 		n = 0;
999 		if (anar & PHY_ANAR_TXFDX) {
1000 			stat |= PHY_BMSR_100FDX;
1001 			n++;
1002 		} else
1003 			stat &= ~PHY_BMSR_100FDX;
1004 
1005 		if (anar & PHY_ANAR_TX) {
1006 			stat |= PHY_BMSR_100HDX;
1007 			n++;
1008 		} else
1009 			stat &= ~PHY_BMSR_100HDX;
1010 
1011 		if (anar & PHY_ANAR_10FDX) {
1012 			stat |= PHY_BMSR_10FDX;
1013 			n++;
1014 		} else
1015 			stat &= ~PHY_BMSR_10FDX;
1016 
1017 		if (anar & PHY_ANAR_10) {
1018 			stat |= PHY_BMSR_10HDX;
1019 			n++;
1020 		} else
1021 			stat &= ~PHY_BMSR_10HDX;
1022 
1023 		if (n == 1) { 	/* only one mode. disable auto-negotiation */
1024 			stat &= ~PHY_BMSR_ACFG;
1025 			control &= ~PHY_BMCR_ANE;
1026 		}
1027 		if (n) {
1028 			hmep->hme_bmsr = stat;
1029 			hmep->hme_bmcr = control;
1030 		}
1031 	}
1032 	hme_setup_link_default(hmep);
1033 	hme_setup_link_status(hmep);
1034 
1035 
1036 	/*
1037 	 * Place the Transceiver in normal operation mode
1038 	 */
1039 	hme_mii_write(hmep, HME_PHY_BMCR, (control & ~PHY_BMCR_ISOLATE));
1040 
1041 	/*
1042 	 * check if the transceiver is not in Isolate mode
1043 	 */
1044 	n = HME_PHYRST_PERIOD / HMEWAITPERIOD;
1045 
1046 	while (--n > 0) {
1047 		if (hme_mii_read(hmep, HME_PHY_BMCR, &control) == 1) {
1048 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1049 			    xcvr_no_mii_msg);
1050 			goto start_again; /* Transceiver does not talk MII */
1051 		}
1052 		if ((control & PHY_BMCR_ISOLATE) == 0)
1053 			goto setconn;
1054 		drv_usecwait(HMEWAITPERIOD);
1055 	}
1056 	HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1057 	    xcvr_isolate_msg);
1058 	goto start_again;	/* transceiver reset failure */
1059 
1060 setconn:
1061 
1062 	/*
1063 	 * Work-around for the late-collision problem with 100m cables.
1064 	 * National should fix this in the next release !
1065 	 */
1066 	if (HME_DP83840) {
1067 		(void) hme_mii_read(hmep, HME_PHY_CSC, &csc);
1068 
1069 		hme_mii_write(hmep, HME_PHY_CSC, (csc | PHY_CSCR_FCONN));
1070 	}
1071 
1072 	hmep->hme_linkcheck =		0;
1073 	hmep->hme_linkup =		0;
1074 	hme_setup_link_status(hmep);
1075 	hmep->hme_autoneg =		HME_HWAN_TRY;
1076 	hmep->hme_force_linkdown =	HME_FORCE_LINKDOWN;
1077 	hmep->hme_linkup_cnt =		0;
1078 	hmep->hme_delay =		0;
1079 	hme_setup_link_control(hmep);
1080 	hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1081 
1082 	if (hmep->hme_mode == HME_FORCE_SPEED)
1083 		hme_force_speed(hmep);
1084 	else {
1085 		hmep->hme_linkup_10 = 	0;
1086 		hmep->hme_tryspeed =	HME_SPEED_100;
1087 		hmep->hme_ntries =	HME_NTRIES_LOW;
1088 		hmep->hme_nlasttries =	HME_NTRIES_LOW;
1089 		hme_try_speed(hmep);
1090 	}
1091 	return;
1092 
1093 start_again:
1094 	hme_start_timer(hmep, hme_check_link, HME_TICKS);
1095 }
1096 
1097 static void
1098 hme_check_transceiver(struct hme *hmep)
1099 {
1100 	uint32_t	cfgsav;
1101 	uint32_t 	cfg;
1102 	uint32_t 	stat;
1103 
1104 	/*
1105 	 * If the MIF Polling is ON, and Internal transceiver is in use, just
1106 	 * check for the presence of the External Transceiver.
1107 	 * Otherwise:
1108 	 * First check to see what transceivers are out there.
1109 	 * If an external transceiver is present
1110 	 * then use it, regardless of whether there is a Internal transceiver.
1111 	 * If Internal transceiver is present and no external transceiver
1112 	 * then use the Internal transceiver.
1113 	 * If there is no external transceiver and no Internal transceiver,
1114 	 * then something is wrong so print an error message.
1115 	 */
1116 
1117 	cfgsav = GET_MIFREG(mif_cfg);
1118 
1119 	if (hmep->hme_polling_on) {
1120 
1121 		if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER) {
1122 			if ((cfgsav & HME_MIF_CFGM1) && !hme_param_use_intphy) {
1123 				hme_stop_mifpoll(hmep);
1124 				hmep->hme_phyad = HME_EXTERNAL_PHYAD;
1125 				hmep->hme_transceiver =
1126 				    HME_EXTERNAL_TRANSCEIVER;
1127 				PUT_MIFREG(mif_cfg, ((cfgsav & ~HME_MIF_CFGPE)
1128 				    | HME_MIF_CFGPS));
1129 			}
1130 		} else if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER) {
1131 			stat = (GET_MIFREG(mif_bsts) >> 16);
1132 			if ((stat == 0x00) || (hme_param_use_intphy)) {
1133 
1134 				hme_stop_mifpoll(hmep);
1135 				hmep->hme_phyad = HME_INTERNAL_PHYAD;
1136 				hmep->hme_transceiver =
1137 				    HME_INTERNAL_TRANSCEIVER;
1138 				PUT_MIFREG(mif_cfg,
1139 				    (GET_MIFREG(mif_cfg) & ~HME_MIF_CFGPS));
1140 			}
1141 		}
1142 		CHECK_MIFREG();
1143 		return;
1144 	}
1145 
1146 	cfg = GET_MIFREG(mif_cfg);
1147 	if ((cfg & HME_MIF_CFGM1) && !hme_param_use_intphy) {
1148 		PUT_MIFREG(mif_cfg, (cfgsav | HME_MIF_CFGPS));
1149 		hmep->hme_phyad = HME_EXTERNAL_PHYAD;
1150 		hmep->hme_transceiver = HME_EXTERNAL_TRANSCEIVER;
1151 
1152 	} else if (cfg & HME_MIF_CFGM0) {  /* Internal Transceiver OK */
1153 		PUT_MIFREG(mif_cfg, (cfgsav & ~HME_MIF_CFGPS));
1154 		hmep->hme_phyad = HME_INTERNAL_PHYAD;
1155 		hmep->hme_transceiver = HME_INTERNAL_TRANSCEIVER;
1156 
1157 	} else {
1158 		hmep->hme_transceiver = HME_NO_TRANSCEIVER;
1159 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG, no_xcvr_msg);
1160 	}
1161 	CHECK_MIFREG();
1162 }
1163 
1164 static void
1165 hme_setup_link_default(struct hme *hmep)
1166 {
1167 	uint16_t	bmsr;
1168 
1169 	bmsr = hmep->hme_bmsr;
1170 	if (hme_param_autoneg & HME_NOTUSR)
1171 		hme_param_autoneg = HME_NOTUSR |
1172 		    ((bmsr & PHY_BMSR_ACFG) ? 1 : 0);
1173 	if (hme_param_anar_100T4 & HME_NOTUSR)
1174 		hme_param_anar_100T4 = HME_NOTUSR |
1175 		    ((bmsr & PHY_BMSR_100T4) ? 1 : 0);
1176 	if (hme_param_anar_100fdx & HME_NOTUSR)
1177 		hme_param_anar_100fdx = HME_NOTUSR |
1178 		    ((bmsr & PHY_BMSR_100FDX) ? 1 : 0);
1179 	if (hme_param_anar_100hdx & HME_NOTUSR)
1180 		hme_param_anar_100hdx = HME_NOTUSR |
1181 		    ((bmsr & PHY_BMSR_100HDX) ? 1 : 0);
1182 	if (hme_param_anar_10fdx & HME_NOTUSR)
1183 		hme_param_anar_10fdx = HME_NOTUSR |
1184 		    ((bmsr & PHY_BMSR_10FDX) ? 1 : 0);
1185 	if (hme_param_anar_10hdx & HME_NOTUSR)
1186 		hme_param_anar_10hdx = HME_NOTUSR |
1187 		    ((bmsr & PHY_BMSR_10HDX) ? 1 : 0);
1188 }
1189 
1190 static void
1191 hme_setup_link_status(struct hme *hmep)
1192 {
1193 	uint16_t	tmp;
1194 
1195 	if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER)
1196 		hme_param_transceiver = 1;
1197 	else
1198 		hme_param_transceiver = 0;
1199 
1200 	tmp = hmep->hme_bmsr;
1201 	if (tmp & PHY_BMSR_ACFG)
1202 		hme_param_bmsr_ancap = 1;
1203 	else
1204 		hme_param_bmsr_ancap = 0;
1205 	if (tmp & PHY_BMSR_100T4)
1206 		hme_param_bmsr_100T4 = 1;
1207 	else
1208 		hme_param_bmsr_100T4 = 0;
1209 	if (tmp & PHY_BMSR_100FDX)
1210 		hme_param_bmsr_100fdx = 1;
1211 	else
1212 		hme_param_bmsr_100fdx = 0;
1213 	if (tmp & PHY_BMSR_100HDX)
1214 		hme_param_bmsr_100hdx = 1;
1215 	else
1216 		hme_param_bmsr_100hdx = 0;
1217 	if (tmp & PHY_BMSR_10FDX)
1218 		hme_param_bmsr_10fdx = 1;
1219 	else
1220 		hme_param_bmsr_10fdx = 0;
1221 	if (tmp & PHY_BMSR_10HDX)
1222 		hme_param_bmsr_10hdx = 1;
1223 	else
1224 		hme_param_bmsr_10hdx = 0;
1225 
1226 	if (hmep->hme_link_pulse_disabled) {
1227 		hme_param_linkup =	1;
1228 		hme_param_speed =	0;
1229 		hme_param_mode =	0;
1230 		hmep->hme_duplex =	LINK_DUPLEX_HALF;
1231 		mac_link_update(hmep->hme_mh, LINK_STATE_UP);
1232 		return;
1233 	}
1234 
1235 	if (!hmep->hme_linkup) {
1236 		hme_param_linkup =	0;
1237 		hmep->hme_duplex = LINK_DUPLEX_UNKNOWN;
1238 		mac_link_update(hmep->hme_mh, LINK_STATE_DOWN);
1239 		return;
1240 	}
1241 
1242 	hme_param_linkup = 1;
1243 
1244 	if (hmep->hme_fdx == HME_FULL_DUPLEX) {
1245 		hme_param_mode = 1;
1246 		hmep->hme_duplex = LINK_DUPLEX_FULL;
1247 	} else {
1248 		hme_param_mode = 0;
1249 		hmep->hme_duplex = LINK_DUPLEX_HALF;
1250 	}
1251 
1252 	mac_link_update(hmep->hme_mh, LINK_STATE_UP);
1253 
1254 	if (hmep->hme_mode == HME_FORCE_SPEED) {
1255 		if (hmep->hme_forcespeed == HME_SPEED_100)
1256 			hme_param_speed = 1;
1257 		else
1258 			hme_param_speed = 0;
1259 		return;
1260 	}
1261 	if (hmep->hme_tryspeed == HME_SPEED_100)
1262 		hme_param_speed = 1;
1263 	else
1264 		hme_param_speed = 0;
1265 
1266 
1267 	if (!(hmep->hme_aner & PHY_ANER_LPNW)) {
1268 		hme_param_aner_lpancap =	0;
1269 		hme_param_anlpar_100T4 =	0;
1270 		hme_param_anlpar_100fdx =	0;
1271 		hme_param_anlpar_100hdx =	0;
1272 		hme_param_anlpar_10fdx =	0;
1273 		hme_param_anlpar_10hdx =	0;
1274 		return;
1275 	}
1276 	hme_param_aner_lpancap = 1;
1277 	tmp = hmep->hme_anlpar;
1278 	if (tmp & PHY_ANLPAR_T4)
1279 		hme_param_anlpar_100T4 = 1;
1280 	else
1281 		hme_param_anlpar_100T4 = 0;
1282 	if (tmp & PHY_ANLPAR_TXFDX)
1283 		hme_param_anlpar_100fdx = 1;
1284 	else
1285 		hme_param_anlpar_100fdx = 0;
1286 	if (tmp & PHY_ANLPAR_TX)
1287 		hme_param_anlpar_100hdx = 1;
1288 	else
1289 		hme_param_anlpar_100hdx = 0;
1290 	if (tmp & PHY_ANLPAR_10FDX)
1291 		hme_param_anlpar_10fdx = 1;
1292 	else
1293 		hme_param_anlpar_10fdx = 0;
1294 	if (tmp & PHY_ANLPAR_10)
1295 		hme_param_anlpar_10hdx = 1;
1296 	else
1297 		hme_param_anlpar_10hdx = 0;
1298 }
1299 
1300 static void
1301 hme_setup_link_control(struct hme *hmep)
1302 {
1303 	uint16_t anar = PHY_SELECTOR;
1304 	uint32_t autoneg = ~HME_NOTUSR & hme_param_autoneg;
1305 	uint32_t anar_100T4 = ~HME_NOTUSR & hme_param_anar_100T4;
1306 	uint32_t anar_100fdx = ~HME_NOTUSR & hme_param_anar_100fdx;
1307 	uint32_t anar_100hdx = ~HME_NOTUSR & hme_param_anar_100hdx;
1308 	uint32_t anar_10fdx = ~HME_NOTUSR & hme_param_anar_10fdx;
1309 	uint32_t anar_10hdx = ~HME_NOTUSR & hme_param_anar_10hdx;
1310 
1311 	if (autoneg) {
1312 		hmep->hme_mode = HME_AUTO_SPEED;
1313 		hmep->hme_tryspeed = HME_SPEED_100;
1314 		if (anar_100T4)
1315 			anar |= PHY_ANAR_T4;
1316 		if (anar_100fdx)
1317 			anar |= PHY_ANAR_TXFDX;
1318 		if (anar_100hdx)
1319 			anar |= PHY_ANAR_TX;
1320 		if (anar_10fdx)
1321 			anar |= PHY_ANAR_10FDX;
1322 		if (anar_10hdx)
1323 			anar |= PHY_ANAR_10;
1324 		hmep->hme_anar = anar;
1325 	} else {
1326 		hmep->hme_mode = HME_FORCE_SPEED;
1327 		if (anar_100T4) {
1328 			hmep->hme_forcespeed = HME_SPEED_100;
1329 			hmep->hme_fdx = HME_HALF_DUPLEX;
1330 
1331 		} else if (anar_100fdx) {
1332 			/* 100fdx needs to be checked first for 100BaseFX */
1333 			hmep->hme_forcespeed = HME_SPEED_100;
1334 			hmep->hme_fdx = HME_FULL_DUPLEX;
1335 
1336 		} else if (anar_100hdx) {
1337 			hmep->hme_forcespeed = HME_SPEED_100;
1338 			hmep->hme_fdx = HME_HALF_DUPLEX;
1339 		} else if (anar_10hdx) {
1340 			/* 10hdx needs to be checked first for MII-AUI */
1341 			/* MII-AUI BugIds 1252776,4032280,4035106,4028558 */
1342 			hmep->hme_forcespeed = HME_SPEED_10;
1343 			hmep->hme_fdx = HME_HALF_DUPLEX;
1344 
1345 		} else if (anar_10fdx) {
1346 			hmep->hme_forcespeed = HME_SPEED_10;
1347 			hmep->hme_fdx = HME_FULL_DUPLEX;
1348 
1349 		} else {
1350 			hmep->hme_forcespeed = HME_SPEED_10;
1351 			hmep->hme_fdx = HME_HALF_DUPLEX;
1352 		}
1353 	}
1354 }
1355 
1356 /* Decide if transmitter went dead and reinitialize everything */
1357 static int hme_txhung_limit = 3;
1358 static int
1359 hme_check_txhung(struct hme *hmep)
1360 {
1361 	boolean_t status;
1362 
1363 	mutex_enter(&hmep->hme_xmitlock);
1364 	if (hmep->hme_flags & HMERUNNING)
1365 		hmereclaim(hmep);
1366 
1367 	/* Something needs to be sent out but it is not going out */
1368 	if ((hmep->hme_txindex != hmep->hme_txreclaim) &&
1369 	    (hmep->hme_opackets == hmep->hmesave.hme_opackets))
1370 		hmep->hme_txhung++;
1371 	else
1372 		hmep->hme_txhung = 0;
1373 
1374 	hmep->hmesave.hme_opackets = hmep->hme_opackets;
1375 
1376 	status = hmep->hme_txhung >= hme_txhung_limit;
1377 	mutex_exit(&hmep->hme_xmitlock);
1378 
1379 	return (status);
1380 }
1381 
1382 /*
1383  * 	hme_check_link ()
1384  * Called as a result of HME_LINKCHECK_TIMER timeout, to poll for Transceiver
1385  * change or when a transceiver change has been detected by the hme_try_speed
1386  * function.
1387  * This function will also be called from the interrupt handler when polled mode
1388  * is used. Before calling this function the interrupt lock should be freed
1389  * so that the hmeinit() may be called.
1390  * Note that the hmeinit() function calls hme_select_speed() to set the link
1391  * speed and check for link status.
1392  */
1393 
1394 static void
1395 hme_check_link(void *arg)
1396 {
1397 	struct hme *hmep = arg;
1398 	uint16_t	stat;
1399 	uint_t 	temp;
1400 
1401 	hme_stop_timer(hmep);	/* acquire hme_linklock */
1402 
1403 	/*
1404 	 * This condition was added to work around for
1405 	 * a problem with the Synoptics/Bay 28115 switch.
1406 	 * Basically if the link is up but no packets
1407 	 * are being received. This can be checked using
1408 	 * ipackets, which in case of reception will
1409 	 * continue to increment after 'hmep->hme_iipackets'
1410 	 * has been made equal to it and the 'hme_check_link'
1411 	 * timer has expired. Note this could also be done
1412 	 * if there's no traffic on the net.
1413 	 * 'hmep->hme_ipackets' is incremented in hme_read
1414 	 * for successfully received packets.
1415 	 */
1416 	if ((hmep->hme_flags & HMERUNNING) && (hmep->hme_linkup)) {
1417 		if (hmep->hme_ipackets != hmep->hme_iipackets)
1418 			/*
1419 			 * Receptions are occurring set 'hmep->hme_iipackets'
1420 			 * to 'hmep->hme_ipackets' to monitor if receptions
1421 			 * occur during the next timeout interval.
1422 			 */
1423 			hmep->hme_iipackets = hmep->hme_ipackets;
1424 		else
1425 			/*
1426 			 * Receptions not occurring could be due to
1427 			 * Synoptics problem, try switchin of data
1428 			 * scrabbling. That should bring up the link.
1429 			 */
1430 			hme_link_now_up(hmep);
1431 	}
1432 
1433 	if ((hmep->hme_flags & HMERUNNING) &&
1434 	    (hmep->hme_linkup) && (hme_check_txhung(hmep))) {
1435 
1436 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1437 		(void) hmeinit(hmep);	/* To reset the transceiver and */
1438 					/* to init the interface */
1439 		return;
1440 	}
1441 
1442 	/*
1443 	 * check if the transceiver is the same.
1444 	 * init to be done if the external transceiver is
1445 	 * connected/disconnected
1446 	 */
1447 	temp = hmep->hme_transceiver; /* save the transceiver type */
1448 	hme_check_transceiver(hmep);
1449 	if ((temp != hmep->hme_transceiver) || (hmep->hme_linkup == 0)) {
1450 		if (temp != hmep->hme_transceiver) {
1451 			if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER) {
1452 				HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN,
1453 				    XCVR_MSG, ext_xcvr_msg);
1454 			} else {
1455 				HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN,
1456 				    XCVR_MSG, int_xcvr_msg);
1457 			}
1458 		}
1459 		hmep->hme_linkcheck = 0;
1460 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1461 		(void) hmeinit(hmep); /* To reset xcvr and init interface */
1462 		return;
1463 	}
1464 
1465 
1466 	if (hmep->hme_mifpoll_enable) {
1467 		stat = (GET_MIFREG(mif_bsts) >> 16);
1468 
1469 		CHECK_MIFREG(); /* Verify */
1470 
1471 		if (!hmep->hme_mifpoll_flag) {
1472 			if (stat & PHY_BMSR_LNKSTS) {
1473 				hme_start_timer(hmep, hme_check_link,
1474 				    HME_LINKCHECK_TIMER);
1475 				return;
1476 			}
1477 			hme_stop_mifpoll(hmep);
1478 
1479 			temp = (GET_MIFREG(mif_bsts) >> 16);
1480 		} else {
1481 			hmep->hme_mifpoll_flag = 0;
1482 		}
1483 	} else {
1484 		if (hme_mii_read(hmep, HME_PHY_BMSR, &stat) == 1) {
1485 		/* Transceiver does not talk mii */
1486 			hme_start_timer(hmep, hme_check_link,
1487 			    HME_LINKCHECK_TIMER);
1488 			return;
1489 		}
1490 
1491 		if (stat & PHY_BMSR_LNKSTS) {
1492 			hme_start_timer(hmep, hme_check_link,
1493 			    HME_LINKCHECK_TIMER);
1494 			return;
1495 		}
1496 	}
1497 
1498 	(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
1499 
1500 	/*
1501 	 * The PHY may have automatically renegotiated link speed and mode.
1502 	 * Get the new link speed and mode.
1503 	 */
1504 	if ((stat & PHY_BMSR_LNKSTS) && hme_autoneg_enable) {
1505 		if (hmep->hme_mode == HME_AUTO_SPEED) {
1506 			(void) hme_get_autoinfo(hmep);
1507 			hme_setup_link_status(hmep);
1508 			hme_start_mifpoll(hmep);
1509 			if (hmep->hme_fdx != hmep->hme_macfdx) {
1510 				hme_start_timer(hmep, hme_check_link,
1511 				    HME_LINKCHECK_TIMER);
1512 				(void) hmeinit(hmep);
1513 				return;
1514 			}
1515 		}
1516 		hme_start_mifpoll(hmep);
1517 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1518 		return;
1519 	}
1520 	/* Reset the PHY and bring up the link */
1521 	hme_reset_transceiver(hmep);
1522 }
1523 
1524 static void
1525 hme_init_xcvr_info(struct hme *hmep)
1526 {
1527 	uint16_t phy_id1, phy_id2;
1528 
1529 	(void) hme_mii_read(hmep, HME_PHY_IDR1, &phy_id1);
1530 	(void) hme_mii_read(hmep, HME_PHY_IDR2, &phy_id2);
1531 }
1532 
1533 /*
1534  * Disable link pulses for the Internal Transceiver
1535  */
1536 
1537 static void
1538 hme_disable_link_pulse(struct hme *hmep)
1539 {
1540 	uint16_t	nicr;
1541 
1542 	hme_mii_write(hmep, HME_PHY_BMCR, 0); /* force 10 Mbps */
1543 	(void) hme_mii_read(hmep, HME_PHY_NICR, &nicr);
1544 
1545 	hme_mii_write(hmep, HME_PHY_NICR, (nicr & ~PHY_NICR_LD));
1546 
1547 	hmep->hme_linkup = 1;
1548 	hmep->hme_linkcheck = 1;
1549 	hme_setup_link_status(hmep);
1550 	hme_start_mifpoll(hmep);
1551 	hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1552 }
1553 
1554 static void
1555 hme_force_speed(void *arg)
1556 {
1557 	struct hme	*hmep = arg;
1558 	int		linkup;
1559 	uint_t		temp;
1560 	uint16_t	csc;
1561 
1562 	hme_stop_timer(hmep);
1563 	if (hmep->hme_fdx != hmep->hme_macfdx) {
1564 		hme_start_timer(hmep, hme_check_link, HME_TICKS*5);
1565 		return;
1566 	}
1567 	temp = hmep->hme_transceiver; /* save the transceiver type */
1568 	hme_check_transceiver(hmep);
1569 	if (temp != hmep->hme_transceiver) {
1570 		if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER) {
1571 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1572 			    ext_xcvr_msg);
1573 		} else {
1574 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1575 			    int_xcvr_msg);
1576 		}
1577 		hme_start_timer(hmep, hme_check_link, HME_TICKS * 10);
1578 		return;
1579 	}
1580 
1581 	if ((hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER) &&
1582 	    (hmep->hme_link_pulse_disabled)) {
1583 		hmep->hme_forcespeed = HME_SPEED_10;
1584 		hme_disable_link_pulse(hmep);
1585 		return;
1586 	}
1587 
1588 	/*
1589 	 * To interoperate with auto-negotiable capable systems
1590 	 * the link should be brought down for 1 second.
1591 	 * How to do this using only standard registers ?
1592 	 */
1593 	if (HME_DP83840) {
1594 		if (hmep->hme_force_linkdown == HME_FORCE_LINKDOWN) {
1595 			hmep->hme_force_linkdown = HME_LINKDOWN_STARTED;
1596 			hme_mii_write(hmep, HME_PHY_BMCR, PHY_BMCR_100M);
1597 			(void) hme_mii_read(hmep, HME_PHY_CSC, &csc);
1598 			hme_mii_write(hmep, HME_PHY_CSC,
1599 			    (csc | PHY_CSCR_TXOFF));
1600 			hme_start_timer(hmep, hme_force_speed, 10 * HME_TICKS);
1601 			return;
1602 		} else if (hmep->hme_force_linkdown == HME_LINKDOWN_STARTED) {
1603 			(void) hme_mii_read(hmep, HME_PHY_CSC, &csc);
1604 			hme_mii_write(hmep, HME_PHY_CSC,
1605 			    (csc & ~PHY_CSCR_TXOFF));
1606 			hmep->hme_force_linkdown = HME_LINKDOWN_DONE;
1607 		}
1608 	} else {
1609 		if (hmep->hme_force_linkdown == HME_FORCE_LINKDOWN) {
1610 			hmep->hme_force_linkdown = HME_LINKDOWN_STARTED;
1611 			hme_mii_write(hmep, HME_PHY_BMCR, PHY_BMCR_LPBK);
1612 			hme_start_timer(hmep, hme_force_speed, 10 * HME_TICKS);
1613 			return;
1614 		} else if (hmep->hme_force_linkdown == HME_LINKDOWN_STARTED) {
1615 			hmep->hme_force_linkdown = HME_LINKDOWN_DONE;
1616 		}
1617 	}
1618 
1619 
1620 	linkup = hme_select_speed(hmep, hmep->hme_forcespeed);
1621 	if (hmep->hme_linkup_cnt == 1) {
1622 		hme_start_timer(hmep, hme_force_speed, SECOND(4));
1623 		return;
1624 	}
1625 	if (linkup) {
1626 
1627 		hmep->hme_linkup = 1;
1628 		hmep->hme_linkcheck = 1;
1629 		hmep->hme_ifspeed = hmep->hme_forcespeed;
1630 		hme_link_now_up(hmep);
1631 		hme_setup_link_status(hmep);
1632 		hme_start_mifpoll(hmep);
1633 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1634 	} else {
1635 		hme_start_timer(hmep, hme_force_speed, HME_TICKS);
1636 	}
1637 }
1638 
1639 static void
1640 hme_get_autoinfo(struct hme *hmep)
1641 {
1642 	uint16_t	anar;
1643 	uint16_t	aner;
1644 	uint16_t	anlpar;
1645 	uint16_t	tmp;
1646 	uint16_t	ar;
1647 
1648 	(void) hme_mii_read(hmep, HME_PHY_ANER, &aner);
1649 	(void) hme_mii_read(hmep, HME_PHY_ANLPAR, &anlpar);
1650 	(void) hme_mii_read(hmep, HME_PHY_ANAR, &anar);
1651 
1652 	hmep->hme_anlpar = anlpar;
1653 	hmep->hme_aner = aner;
1654 
1655 	if (aner & PHY_ANER_LPNW) {
1656 
1657 		tmp = anar & anlpar;
1658 		if (tmp & PHY_ANAR_TXFDX) {
1659 			hmep->hme_tryspeed = HME_SPEED_100;
1660 			hmep->hme_fdx = HME_FULL_DUPLEX;
1661 		} else if (tmp & PHY_ANAR_TX) {
1662 			hmep->hme_tryspeed = HME_SPEED_100;
1663 			hmep->hme_fdx = HME_HALF_DUPLEX;
1664 		} else if (tmp & PHY_ANLPAR_10FDX) {
1665 			hmep->hme_tryspeed = HME_SPEED_10;
1666 			hmep->hme_fdx = HME_FULL_DUPLEX;
1667 		} else if (tmp & PHY_ANLPAR_10) {
1668 			hmep->hme_tryspeed = HME_SPEED_10;
1669 			hmep->hme_fdx = HME_HALF_DUPLEX;
1670 		} else {
1671 			if (HME_DP83840) {
1672 
1673 				hmep->hme_fdx = HME_HALF_DUPLEX;
1674 				(void) hme_mii_read(hmep, HME_PHY_AR, &ar);
1675 
1676 				if (ar & PHY_AR_SPEED10)
1677 					hmep->hme_tryspeed = HME_SPEED_10;
1678 				else
1679 					hmep->hme_tryspeed = HME_SPEED_100;
1680 			} else
1681 				HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN,
1682 				    AUTONEG_MSG, anar_not_set_msg);
1683 		}
1684 	} else {
1685 		hmep->hme_fdx = HME_HALF_DUPLEX;
1686 		if (anlpar & PHY_ANLPAR_TX)
1687 			hmep->hme_tryspeed = HME_SPEED_100;
1688 		else if (anlpar & PHY_ANLPAR_10)
1689 			hmep->hme_tryspeed = HME_SPEED_10;
1690 		else {
1691 			if (HME_DP83840) {
1692 
1693 				(void) hme_mii_read(hmep, HME_PHY_AR, &ar);
1694 
1695 				if (ar & PHY_AR_SPEED10)
1696 					hmep->hme_tryspeed = HME_SPEED_10;
1697 				else
1698 					hmep->hme_tryspeed = HME_SPEED_100;
1699 			} else
1700 				HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN,
1701 				    AUTONEG_MSG, par_detect_anar_not_set_msg);
1702 		}
1703 	}
1704 
1705 	hmep->hme_linkup = 1;
1706 	hmep->hme_linkcheck = 1;
1707 	hmep->hme_ifspeed = hmep->hme_tryspeed;
1708 	hme_link_now_up(hmep);
1709 }
1710 
1711 /*
1712  * Return 1 if the link is up or auto-negotiation being tried, 0 otherwise.
1713  */
1714 
1715 static int
1716 hme_try_auto_negotiation(struct hme *hmep)
1717 {
1718 	uint16_t	stat;
1719 	uint16_t	aner;
1720 
1721 	if (hmep->hme_autoneg == HME_HWAN_TRY) {
1722 		/* auto negotiation not initiated */
1723 		(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
1724 		if (hme_mii_read(hmep, HME_PHY_BMSR, &stat) == 1) {
1725 			/*
1726 			 * Transceiver does not talk mii
1727 			 */
1728 			goto hme_anfail;
1729 		}
1730 		if ((stat & PHY_BMSR_ACFG) == 0) { /* auto neg. not supported */
1731 
1732 			return (hmep->hme_autoneg = HME_HWAN_FAILED);
1733 		}
1734 
1735 		/*
1736 		 * Read ANER to clear status from previous operations.
1737 		 */
1738 		if (hme_mii_read(hmep, HME_PHY_ANER, &aner) == 1) {
1739 			/*
1740 			 * Transceiver does not talk mii
1741 			 */
1742 			goto hme_anfail;
1743 		}
1744 
1745 		hme_mii_write(hmep, HME_PHY_ANAR, hmep->hme_anar);
1746 		hme_mii_write(hmep, HME_PHY_BMCR, PHY_BMCR_ANE | PHY_BMCR_RAN);
1747 		/*
1748 		 * auto-negotiation initiated
1749 		 */
1750 		hmep->hme_delay = 0;
1751 		hme_start_timer(hmep, hme_try_speed, HME_TICKS);
1752 		return (hmep->hme_autoneg = HME_HWAN_INPROGRESS);
1753 		/*
1754 		 * auto-negotiation in progress
1755 		 */
1756 	}
1757 
1758 	/*
1759 	 * Auto-negotiation has been in progress. Wait for at least
1760 	 * least 3000 ms.
1761 	 * Changed 8/28/97 to fix bug ID 4070989.
1762 	 */
1763 	if (hmep->hme_delay < 30) {
1764 		hmep->hme_delay++;
1765 		hme_start_timer(hmep, hme_try_speed, HME_TICKS);
1766 		return (hmep->hme_autoneg = HME_HWAN_INPROGRESS);
1767 	}
1768 
1769 	(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
1770 	if (hme_mii_read(hmep, HME_PHY_BMSR, &stat) == 1) {
1771 		/*
1772 		 * Transceiver does not talk mii
1773 		 */
1774 		goto hme_anfail;
1775 	}
1776 
1777 	if ((stat & PHY_BMSR_ANC) == 0) {
1778 		/*
1779 		 * wait for a maximum of 5 seconds
1780 		 */
1781 		if (hmep->hme_delay < 50) {
1782 			hmep->hme_delay++;
1783 			hme_start_timer(hmep, hme_try_speed, HME_TICKS);
1784 			return (hmep->hme_autoneg = HME_HWAN_INPROGRESS);
1785 		}
1786 		if (HME_DP83840) {
1787 			(void) hme_mii_read(hmep, HME_PHY_ANER, &aner);
1788 			if (aner & PHY_ANER_MLF) {
1789 
1790 				return (hmep->hme_autoneg = HME_HWAN_FAILED);
1791 			}
1792 		}
1793 
1794 		goto hme_anfail;
1795 	}
1796 
1797 	(void) hme_mii_read(hmep, HME_PHY_ANER, &aner);
1798 	if (aner & PHY_ANER_MLF) {
1799 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
1800 		    par_detect_msg);
1801 		goto hme_anfail;
1802 	}
1803 
1804 	if (!(stat & PHY_BMSR_LNKSTS)) {
1805 		/*
1806 		 * wait for a maximum of 10 seconds
1807 		 */
1808 		if (hmep->hme_delay < 100) {
1809 			hmep->hme_delay++;
1810 			hme_start_timer(hmep, hme_try_speed, HME_TICKS);
1811 			return (hmep->hme_autoneg = HME_HWAN_INPROGRESS);
1812 		}
1813 		goto hme_anfail;
1814 	} else {
1815 		hmep->hme_bmsr |= (PHY_BMSR_LNKSTS);
1816 		hme_get_autoinfo(hmep);
1817 		hmep->hme_force_linkdown = HME_LINKDOWN_DONE;
1818 		hme_setup_link_status(hmep);
1819 		hme_start_mifpoll(hmep);
1820 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1821 		if (hmep->hme_fdx != hmep->hme_macfdx) {
1822 			(void) hmeinit(hmep);
1823 		}
1824 		return (hmep->hme_autoneg = HME_HWAN_SUCCESFUL);
1825 	}
1826 
1827 hme_anfail:
1828 	hme_start_timer(hmep, hme_try_speed, HME_TICKS);
1829 	return (hmep->hme_autoneg = HME_HWAN_TRY);
1830 }
1831 
1832 /*
1833  * This function is used to perform automatic speed detection.
1834  * The Internal Transceiver which is based on the National PHY chip
1835  * 83840 supports auto-negotiation functionality.
1836  * Some External transceivers may not support auto-negotiation.
1837  * In that case, the software performs the speed detection.
1838  * The software tries to bring down the link for about 2 seconds to
1839  * force the Link Partner to notice speed change.
1840  * The software speed detection favors the 100 Mbps speed.
1841  * It does this by setting the 100 Mbps for longer duration ( 5 seconds )
1842  * than the 10 Mbps ( 2 seconds ). Also, even after the link is up
1843  * in 10 Mbps once, the 100 Mbps is also tried. Only if the link
1844  * is not up in 100 Mbps, the 10 Mbps speed is tried again.
1845  */
1846 static void
1847 hme_try_speed(void *arg)
1848 {
1849 	struct hme	*hmep = arg;
1850 	int		linkup;
1851 	uint_t		temp;
1852 
1853 	hme_stop_timer(hmep);
1854 	temp = hmep->hme_transceiver; /* save the transceiver type */
1855 	hme_check_transceiver(hmep);
1856 	if (temp != hmep->hme_transceiver) {
1857 		if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER) {
1858 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1859 			    ext_xcvr_msg);
1860 		} else {
1861 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1862 			    int_xcvr_msg);
1863 		}
1864 		hme_start_timer(hmep, hme_check_link, 10 * HME_TICKS);
1865 		return;
1866 	}
1867 
1868 	if ((hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER) &&
1869 	    (hmep->hme_link_pulse_disabled)) {
1870 		hmep->hme_tryspeed = HME_SPEED_10;
1871 		hme_disable_link_pulse(hmep);
1872 		return;
1873 	}
1874 
1875 	if (hme_autoneg_enable && (hmep->hme_autoneg != HME_HWAN_FAILED)) {
1876 		if (hme_try_auto_negotiation(hmep) != HME_HWAN_FAILED)
1877 			return;	/* auto negotiation successful or being tried */
1878 	}
1879 
1880 	linkup = hme_select_speed(hmep, hmep->hme_tryspeed);
1881 	if (hmep->hme_linkup_cnt == 1) {
1882 		hme_start_timer(hmep, hme_try_speed, SECOND(1));
1883 		return;
1884 	}
1885 	if (linkup) {
1886 		switch (hmep->hme_tryspeed) {
1887 		case HME_SPEED_100:
1888 			if (hmep->hme_linkup_cnt == 4) {
1889 				hmep->hme_ntries =	HME_NTRIES_LOW;
1890 				hmep->hme_nlasttries =	HME_NTRIES_LOW;
1891 				hmep->hme_linkup = 1;
1892 				hmep->hme_linkcheck = 1;
1893 				hme_link_now_up(hmep);
1894 				hme_setup_link_status(hmep);
1895 				hme_start_mifpoll(hmep);
1896 				hme_start_timer(hmep, hme_check_link,
1897 				    HME_LINKCHECK_TIMER);
1898 				if (hmep->hme_fdx != hmep->hme_macfdx) {
1899 					(void) hmeinit(hmep);
1900 				}
1901 			} else
1902 				hme_start_timer(hmep, hme_try_speed, HME_TICKS);
1903 			break;
1904 		case HME_SPEED_10:
1905 			if (hmep->hme_linkup_cnt == 4) {
1906 				if (hmep->hme_linkup_10) {
1907 					hmep->hme_linkup_10 = 0;
1908 					hmep->hme_ntries = HME_NTRIES_LOW;
1909 					hmep->hme_nlasttries = HME_NTRIES_LOW;
1910 					hmep->hme_linkup = 1;
1911 					hmep->hme_linkcheck = 1;
1912 					hmep->hme_ifspeed = HME_SPEED_10;
1913 					hme_setup_link_status(hmep);
1914 					hme_start_mifpoll(hmep);
1915 					hme_start_timer(hmep, hme_check_link,
1916 					    HME_LINKCHECK_TIMER);
1917 					if (hmep->hme_fdx != hmep->hme_macfdx) {
1918 						(void) hmeinit(hmep);
1919 					}
1920 				} else {
1921 					hmep->hme_linkup_10 = 1;
1922 					hmep->hme_tryspeed = HME_SPEED_100;
1923 					hmep->hme_force_linkdown =
1924 					    HME_FORCE_LINKDOWN;
1925 					hmep->hme_linkup_cnt = 0;
1926 					hmep->hme_ntries = HME_NTRIES_LOW;
1927 					hmep->hme_nlasttries = HME_NTRIES_LOW;
1928 					hme_start_timer(hmep,
1929 					    hme_try_speed, HME_TICKS);
1930 				}
1931 
1932 			} else
1933 				hme_start_timer(hmep, hme_try_speed, HME_TICKS);
1934 			break;
1935 		default:
1936 			break;
1937 		}
1938 		return;
1939 	}
1940 
1941 	hmep->hme_ntries--;
1942 	hmep->hme_linkup_cnt = 0;
1943 	if (hmep->hme_ntries == 0) {
1944 		hmep->hme_force_linkdown = HME_FORCE_LINKDOWN;
1945 		switch (hmep->hme_tryspeed) {
1946 		case HME_SPEED_100:
1947 			hmep->hme_tryspeed = HME_SPEED_10;
1948 			hmep->hme_ntries = HME_NTRIES_LOW_10;
1949 			break;
1950 		case HME_SPEED_10:
1951 			hmep->hme_ntries = HME_NTRIES_LOW;
1952 			hmep->hme_tryspeed = HME_SPEED_100;
1953 			break;
1954 		default:
1955 			break;
1956 		}
1957 	}
1958 	hme_start_timer(hmep, hme_try_speed, HME_TICKS);
1959 }
1960 
1961 static void
1962 hme_link_now_up(struct hme *hmep)
1963 {
1964 	uint16_t	btxpc;
1965 	/*
1966 	 * Work-around for the scramble problem with QSI
1967 	 * chip and Synoptics 28115 switch.
1968 	 * Addition Interface Technologies Group (NPG) 8/28/1997.
1969 	 */
1970 	if ((HME_QS6612) && ((hmep->hme_tryspeed  == HME_SPEED_100) ||
1971 	    (hmep->hme_forcespeed == HME_SPEED_100))) {
1972 		/*
1973 		 * Addition of a check for 'hmep->hme_forcespeed'
1974 		 * This is necessary when the autonegotiation is
1975 		 * disabled by the 'hme.conf' file. In this case
1976 		 * hmep->hme_tryspeed is not initialized. Resulting
1977 		 * in the workaround not being applied.
1978 		 */
1979 		if (hme_mii_read(hmep, HME_PHY_BTXPC, &btxpc) == 0) {
1980 			hme_mii_write(hmep, HME_PHY_BTXPC,
1981 			    (btxpc | PHY_BTXPC_DSCRAM));
1982 			drv_usecwait(20);
1983 			hme_mii_write(hmep, HME_PHY_BTXPC, btxpc);
1984 		}
1985 	}
1986 }
1987 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<  LOADABLE ENTRIES  >>>>>>>>>>>>>>>>>>>>>>> */
1988 
1989 int
1990 _init(void)
1991 {
1992 	int	status;
1993 
1994 	mac_init_ops(&hme_dev_ops, "hme");
1995 	if ((status = mod_install(&modlinkage)) != 0) {
1996 		mac_fini_ops(&hme_dev_ops);
1997 	}
1998 	return (status);
1999 }
2000 
2001 int
2002 _fini(void)
2003 {
2004 	int	status;
2005 
2006 	if ((status = mod_remove(&modlinkage)) == 0) {
2007 		mac_fini_ops(&hme_dev_ops);
2008 	}
2009 	return (status);
2010 }
2011 
2012 int
2013 _info(struct modinfo *modinfop)
2014 {
2015 	return (mod_info(&modlinkage, modinfop));
2016 }
2017 
2018 /*
2019  * ddi_dma_sync() a TMD or RMD descriptor.
2020  */
2021 #define	HMESYNCRMD(num, who)				\
2022 	(void) ddi_dma_sync(hmep->hme_rmd_dmah,		\
2023 	    (num * sizeof (struct hme_rmd)),		\
2024 	    sizeof (struct hme_rmd),			\
2025 	    who)
2026 
2027 #define	HMESYNCTMD(num, who)				\
2028 	(void) ddi_dma_sync(hmep->hme_tmd_dmah,		\
2029 	    (num * sizeof (struct hme_tmd)),		\
2030 	    sizeof (struct hme_tmd),			\
2031 	    who)
2032 
2033 /*
2034  * Ethernet broadcast address definition.
2035  */
2036 static	struct ether_addr	etherbroadcastaddr = {
2037 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2038 };
2039 
2040 /*
2041  * MIB II broadcast/multicast packets
2042  */
2043 #define	IS_BROADCAST(pkt) (bcmp(pkt, &etherbroadcastaddr, ETHERADDRL) == 0)
2044 #define	IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
2045 #define	BUMP_InNUcast(hmep, pkt) \
2046 	if (IS_MULTICAST(pkt)) {			       \
2047 		if (IS_BROADCAST(pkt)) {		       \
2048 			hmep->hme_brdcstrcv++;		       \
2049 		} else {				       \
2050 			hmep->hme_multircv++;		       \
2051 		}					       \
2052 	}
2053 #define	BUMP_OutNUcast(hmep, pkt) \
2054 	if (IS_MULTICAST(pkt)) {			       \
2055 		if (IS_BROADCAST(pkt)) {		       \
2056 			hmep->hme_brdcstxmt++;		       \
2057 		} else {				       \
2058 			hmep->hme_multixmt++;		       \
2059 		}					       \
2060 	}
2061 
2062 static int
2063 hme_create_prop_from_kw(dev_info_t *dip, char *vpdname, char *vpdstr)
2064 {
2065 	char propstr[80];
2066 	int i, needprop = 0;
2067 	struct ether_addr local_mac;
2068 
2069 	if (strcmp(vpdname, "NA") == 0) {
2070 		(void) strcpy(propstr, "local-mac-address");
2071 		needprop = 1;
2072 	} else if (strcmp(vpdname, "Z0") == 0) {
2073 		(void) strcpy(propstr, "model");
2074 		needprop = 1;
2075 	} else if (strcmp(vpdname, "Z1") == 0) {
2076 		(void) strcpy(propstr, "board-model");
2077 		needprop = 1;
2078 	}
2079 
2080 	if (needprop == 1) {
2081 
2082 		if (strcmp(propstr, "local-mac-address") == 0) {
2083 			for (i = 0; i < ETHERADDRL; i++)
2084 				local_mac.ether_addr_octet[i] =
2085 				    (uchar_t)vpdstr[i];
2086 			if (ddi_prop_create(DDI_DEV_T_NONE, dip,
2087 			    DDI_PROP_CANSLEEP, propstr,
2088 			    (char *)local_mac.ether_addr_octet, ETHERADDRL)
2089 			    != DDI_SUCCESS) {
2090 				return (DDI_FAILURE);
2091 			}
2092 		} else {
2093 			if (ddi_prop_create(DDI_DEV_T_NONE, dip,
2094 			    DDI_PROP_CANSLEEP, propstr, vpdstr,
2095 			    strlen(vpdstr)+1) != DDI_SUCCESS) {
2096 				return (DDI_FAILURE);
2097 			}
2098 		}
2099 	}
2100 	return (0);
2101 }
2102 
2103 /*
2104  * Get properties from old VPD
2105  * for PCI cards
2106  */
2107 static int
2108 hme_get_oldvpd_props(dev_info_t *dip, int vpd_base)
2109 {
2110 	struct hme *hmep;
2111 	int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
2112 	char kw_namestr[3];
2113 	char kw_fieldstr[256];
2114 	int i;
2115 
2116 	hmep = ddi_get_driver_private(dip);
2117 
2118 	vpd_start = vpd_base;
2119 
2120 	if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
2121 		return (1); /* error */
2122 	} else {
2123 		vpd_len = 9;
2124 	}
2125 
2126 	/* Get local-mac-address */
2127 	kw_start = vpd_start + 3; /* Location of 1st keyword */
2128 	kw_ptr = kw_start;
2129 	while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
2130 		kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
2131 		kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
2132 		kw_namestr[2] = '\0';
2133 		kw_len = (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
2134 		for (i = 0, kw_ptr += 3; i < kw_len; i++)
2135 			kw_fieldstr[i] = GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
2136 		kw_fieldstr[i] = '\0';
2137 		if (hme_create_prop_from_kw(dip, kw_namestr, kw_fieldstr)) {
2138 			return (DDI_FAILURE);
2139 		}
2140 		kw_ptr += kw_len;
2141 	} /* next keyword */
2142 
2143 	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "model",
2144 	    "SUNW,cheerio", strlen("SUNW,cheerio")+1) != DDI_SUCCESS) {
2145 		return (DDI_FAILURE);
2146 	}
2147 	return (0);
2148 }
2149 
2150 
2151 /*
2152  * Get properties from new VPD
2153  * for CompactPCI cards
2154  */
2155 static int
2156 hme_get_newvpd_props(dev_info_t *dip, int vpd_base)
2157 {
2158 	struct hme *hmep;
2159 	int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
2160 	char kw_namestr[3];
2161 	char kw_fieldstr[256];
2162 	int maxvpdsize, i;
2163 
2164 	hmep = ddi_get_driver_private(dip);
2165 
2166 	maxvpdsize = 1024; /* Real size not known until after it is read */
2167 
2168 	vpd_start = (int)((GET_ROM8(&(hmep->hme_romp[vpd_base+1])) & 0xff) |
2169 	    ((GET_ROM8(&hmep->hme_romp[vpd_base+2]) & 0xff) << 8)) +3;
2170 	vpd_start = vpd_base + vpd_start;
2171 	while (vpd_start < (vpd_base + maxvpdsize)) { /* Get all VPDs */
2172 		if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
2173 			break; /* no VPD found */
2174 		} else {
2175 			vpd_len = (int)((GET_ROM8(&hmep->hme_romp[vpd_start
2176 			    + 1]) & 0xff) | (GET_ROM8(&hmep->hme_romp[vpd_start
2177 			    + 2]) & 0xff) << 8);
2178 		}
2179 		/* Get all keywords in this VPD */
2180 		kw_start = vpd_start + 3; /* Location of 1st keyword */
2181 		kw_ptr = kw_start;
2182 		while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
2183 			kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
2184 			kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
2185 			kw_namestr[2] = '\0';
2186 			kw_len =
2187 			    (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
2188 			for (i = 0, kw_ptr += 3; i < kw_len; i++)
2189 				kw_fieldstr[i] =
2190 				    GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
2191 			kw_fieldstr[i] = '\0';
2192 			if (hme_create_prop_from_kw(dip, kw_namestr,
2193 			    kw_fieldstr)) {
2194 				return (DDI_FAILURE);
2195 			}
2196 			kw_ptr += kw_len;
2197 		} /* next keyword */
2198 		vpd_start += (vpd_len + 3);
2199 	} /* next VPD */
2200 	return (0);
2201 }
2202 
2203 
2204 /*
2205  * Get properties from VPD
2206  */
2207 static int
2208 hme_get_vpd_props(dev_info_t *dip)
2209 {
2210 	struct hme *hmep;
2211 	int v0, v1, vpd_base;
2212 	int i, epromsrchlimit;
2213 
2214 
2215 	hmep = ddi_get_driver_private(dip);
2216 
2217 	v0 = (int)(GET_ROM8(&(hmep->hme_romp[0])));
2218 	v1 = (int)(GET_ROM8(&(hmep->hme_romp[1])));
2219 	v0 = ((v0 & 0xff) << 8 | v1);
2220 
2221 	if ((v0 & 0xffff) != 0x55aa) {
2222 		cmn_err(CE_NOTE, " Valid pci prom not found \n");
2223 		return (1);
2224 	}
2225 
2226 	epromsrchlimit = 4096;
2227 	for (i = 2; i < epromsrchlimit; i++) {
2228 		/* "PCIR" */
2229 		if (((GET_ROM8(&(hmep->hme_romp[i])) & 0xff) == 'P') &&
2230 		    ((GET_ROM8(&(hmep->hme_romp[i+1])) & 0xff) == 'C') &&
2231 		    ((GET_ROM8(&(hmep->hme_romp[i+2])) & 0xff) == 'I') &&
2232 		    ((GET_ROM8(&(hmep->hme_romp[i+3])) & 0xff) == 'R')) {
2233 			vpd_base =
2234 			    (int)((GET_ROM8(&(hmep->hme_romp[i+8])) & 0xff) |
2235 			    (GET_ROM8(&(hmep->hme_romp[i+9])) & 0xff) << 8);
2236 			break; /* VPD pointer found */
2237 		}
2238 	}
2239 
2240 	/* No VPD found */
2241 	if (vpd_base == 0) {
2242 		cmn_err(CE_NOTE, " Vital Product Data pointer not found \n");
2243 		return (1);
2244 	}
2245 
2246 	v0 = (int)(GET_ROM8(&(hmep->hme_romp[vpd_base])));
2247 	if (v0 == 0x82) {
2248 		if (hme_get_newvpd_props(dip, vpd_base))
2249 			return (1);
2250 		return (0);
2251 	} else if (v0 == 0x90) {
2252 		/* If we are are SUNW,qfe card, look for the Nth "NA" descr */
2253 		if ((GET_ROM8(&hmep->hme_romp[vpd_base + 12])  != 0x79) &&
2254 		    GET_ROM8(&hmep->hme_romp[vpd_base + 4 * 12]) == 0x79) {
2255 			vpd_base += hmep->hme_devno * 12;
2256 		}
2257 		if (hme_get_oldvpd_props(dip, vpd_base))
2258 			return (1);
2259 		return (0);
2260 	} else
2261 		return (1);	/* unknown start byte in VPD */
2262 }
2263 
2264 /*
2265  * For x86, the BIOS doesn't map the PCI Rom register for the qfe
2266  * cards, so we have to extract it from the ebus bridge that is
2267  * function zero of the same device.  This is a bit of an ugly hack.
2268  * (The ebus bridge leaves the entire ROM mapped at base address
2269  * register 0x10.)
2270  */
2271 
2272 typedef struct {
2273 	struct hme 		*hmep;
2274 	dev_info_t		*parent;
2275 	uint8_t			bus, dev;
2276 	ddi_acc_handle_t	acch;
2277 	caddr_t			romp;
2278 } ebus_rom_t;
2279 
2280 static int
2281 hme_mapebusrom(dev_info_t *dip, void *arg)
2282 {
2283 	int		*regs;
2284 	unsigned	nregs;
2285 	int		reg;
2286 	ebus_rom_t	*rom = arg;
2287 	struct hme	*hmep = rom->hmep;
2288 
2289 	/*
2290 	 * We only want to look at our peers.  Skip our parent.
2291 	 */
2292 	if (dip == rom->parent) {
2293 		return (DDI_WALK_PRUNESIB);
2294 	}
2295 
2296 	if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
2297 	    "reg", &regs, &nregs)) != DDI_PROP_SUCCESS) {
2298 		return (DDI_WALK_PRUNECHILD);
2299 	}
2300 
2301 	if (nregs < 1) {
2302 		ddi_prop_free(regs);
2303 		return (DDI_WALK_PRUNECHILD);
2304 	}
2305 	reg = regs[0];
2306 	ddi_prop_free(regs);
2307 
2308 	/*
2309 	 * Look for function 0 on our bus and device.  If the device doesn't
2310 	 * match, it might be an alternate peer, in which case we don't want
2311 	 * to examine any of its children.
2312 	 */
2313 	if ((PCI_REG_BUS_G(reg) != rom->bus) ||
2314 	    (PCI_REG_DEV_G(reg) != rom->dev) ||
2315 	    (PCI_REG_FUNC_G(reg) != 0)) {
2316 		return (DDI_WALK_PRUNECHILD);
2317 	}
2318 
2319 	(void) ddi_regs_map_setup(dip, 1, &rom->romp, 0, 0, &hmep->hme_dev_attr,
2320 	    &rom->acch);
2321 	/*
2322 	 * If we can't map the registers, the caller will notice that
2323 	 * the acch is NULL.
2324 	 */
2325 	return (DDI_WALK_TERMINATE);
2326 }
2327 
2328 static int
2329 hmeget_promebus(dev_info_t *dip)
2330 {
2331 	ebus_rom_t	rom;
2332 	int		*regs;
2333 	unsigned	nregs;
2334 	struct hme	*hmep;
2335 
2336 	hmep = ddi_get_driver_private(dip);
2337 
2338 	bzero(&rom, sizeof (rom));
2339 
2340 	/*
2341 	 * For x86, the BIOS doesn't map the PCI Rom register for the qfe
2342 	 * cards, so we have to extract it from the eBus bridge that is
2343 	 * function zero.  This is a bit of an ugly hack.
2344 	 */
2345 	if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
2346 	    "reg", &regs, &nregs)) != DDI_PROP_SUCCESS) {
2347 		return (DDI_FAILURE);
2348 	}
2349 
2350 	if (nregs < 5) {
2351 		ddi_prop_free(regs);
2352 		return (DDI_FAILURE);
2353 	}
2354 	rom.hmep = hmep;
2355 	rom.bus = PCI_REG_BUS_G(regs[0]);
2356 	rom.dev = PCI_REG_DEV_G(regs[0]);
2357 	hmep->hme_devno = rom.dev;
2358 	rom.parent = ddi_get_parent(dip);
2359 
2360 	/*
2361 	 * The implementation of ddi_walk_devs says that we must not
2362 	 * be called during autoconfiguration.  However, upon close
2363 	 * examination, one will find the following is true:
2364 	 *
2365 	 * 1) since we're called at attach time,
2366 	 *    DEVI_BUSY_OWNED(ddi_get_parent(dip)) is implicitly true.
2367 	 *
2368 	 * 2) we carefully ensure that we prune siblings for all cases
2369 	 *    except our own device, so we can't wind up walking down
2370 	 *    a changing sibling pointer.
2371 	 *
2372 	 * 3) since we are attaching, our peers will already have their
2373 	 *    dev_info nodes on the tree... hence our own sibling pointer
2374 	 *    (and those of our siblings) will be stable.
2375 	 *
2376 	 * 4) also, because of #3, our parents child pointer will be
2377 	 *    stable.
2378 	 *
2379 	 * So it should be safe to do this, because of our carefully
2380 	 * constructed restrictions.
2381 	 */
2382 	ddi_walk_devs(ddi_get_parent(dip), hme_mapebusrom, &rom);
2383 
2384 	if (rom.acch) {
2385 		hmep->hme_romh = rom.acch;
2386 		hmep->hme_romp = (unsigned char *)rom.romp;
2387 		return (DDI_SUCCESS);
2388 	}
2389 	return (DDI_FAILURE);
2390 }
2391 
2392 static int
2393 hmeget_promprops(dev_info_t *dip)
2394 {
2395 	struct hme *hmep;
2396 	int rom_bar;
2397 	ddi_acc_handle_t cfg_handle;
2398 	struct {
2399 		uint16_t vendorid;
2400 		uint16_t devid;
2401 		uint16_t command;
2402 		uint16_t status;
2403 		uint32_t junk1;
2404 		uint8_t cache_line;
2405 		uint8_t latency;
2406 		uint8_t header;
2407 		uint8_t bist;
2408 		uint32_t base;
2409 		uint32_t base14;
2410 		uint32_t base18;
2411 		uint32_t base1c;
2412 		uint32_t base20;
2413 		uint32_t base24;
2414 		uint32_t base28;
2415 		uint32_t base2c;
2416 		uint32_t base30;
2417 	} *cfg_ptr;
2418 
2419 	hmep = ddi_get_driver_private(dip);
2420 
2421 
2422 	/*
2423 	 * map configuration space
2424 	 */
2425 	if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
2426 	    0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
2427 		return (DDI_FAILURE);
2428 	}
2429 
2430 	/*
2431 	 * Enable bus-master and memory accesses
2432 	 */
2433 	ddi_put16(cfg_handle, &cfg_ptr->command,
2434 	    PCI_COMM_SERR_ENABLE | PCI_COMM_PARITY_DETECT |
2435 	    PCI_COMM_MAE | PCI_COMM_ME);
2436 
2437 	/*
2438 	 * Enable rom accesses
2439 	 */
2440 	rom_bar = ddi_get32(cfg_handle, &cfg_ptr->base30);
2441 	ddi_put32(cfg_handle, &cfg_ptr->base30, rom_bar | 1);
2442 
2443 
2444 	if ((ddi_regs_map_setup(dip, 2, (caddr_t *)&(hmep->hme_romp), 0, 0,
2445 	    &hmep->hme_dev_attr, &hmep->hme_romh) != DDI_SUCCESS) &&
2446 	    (hmeget_promebus(dip) != DDI_SUCCESS)) {
2447 
2448 		if (cfg_ptr)
2449 			ddi_regs_map_free(&cfg_handle);
2450 		return (DDI_FAILURE);
2451 	} else {
2452 		if (hme_get_vpd_props(dip))
2453 			return (DDI_FAILURE);
2454 	}
2455 	if (hmep->hme_romp)
2456 		ddi_regs_map_free(&hmep->hme_romh);
2457 	if (cfg_ptr)
2458 		ddi_regs_map_free(&cfg_handle);
2459 	return (DDI_SUCCESS);
2460 
2461 }
2462 
2463 static void
2464 hmeget_hm_rev_property(struct hme *hmep)
2465 {
2466 	int	hm_rev;
2467 
2468 
2469 	hm_rev = hmep->asic_rev;
2470 	switch (hm_rev) {
2471 	case HME_2P1_REVID:
2472 	case HME_2P1_REVID_OBP:
2473 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
2474 		    "SBus 2.1 Found (Rev Id = %x)", hm_rev);
2475 		hmep->hme_mifpoll_enable = 1;
2476 		hmep->hme_frame_enable = 1;
2477 		break;
2478 
2479 	case HME_2P0_REVID:
2480 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
2481 		    "SBus 2.0 Found (Rev Id = %x)", hm_rev);
2482 		break;
2483 
2484 	case HME_1C0_REVID:
2485 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
2486 		    "PCI IO 1.0 Found (Rev Id = %x)", hm_rev);
2487 		break;
2488 
2489 	default:
2490 		HME_FAULT_MSG3(hmep, SEVERITY_HIGH, DISPLAY_MSG,
2491 		    "%s (Rev Id = %x) Found",
2492 		    (hm_rev == HME_2C0_REVID) ? "PCI IO 2.0" : "Sbus", hm_rev);
2493 		hmep->hme_mifpoll_enable = 1;
2494 		hmep->hme_frame_enable = 1;
2495 		hmep->hme_lance_mode_enable = 1;
2496 		hmep->hme_rxcv_enable = 1;
2497 		break;
2498 	}
2499 }
2500 
2501 /*
2502  * Interface exists: make available by filling in network interface
2503  * record.  System will initialize the interface when it is ready
2504  * to accept packets.
2505  */
2506 int
2507 hmeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2508 {
2509 	struct hme *hmep;
2510 	mac_register_t *macp = NULL;
2511 	int 	regno;
2512 	int hm_rev = 0;
2513 	int prop_len = sizeof (int);
2514 	ddi_acc_handle_t cfg_handle;
2515 	struct {
2516 		uint16_t vendorid;
2517 		uint16_t devid;
2518 		uint16_t command;
2519 		uint16_t status;
2520 		uint8_t revid;
2521 		uint8_t j1;
2522 		uint16_t j2;
2523 	} *cfg_ptr;
2524 
2525 	switch (cmd) {
2526 	case DDI_ATTACH:
2527 		break;
2528 
2529 	case DDI_RESUME:
2530 		if ((hmep = ddi_get_driver_private(dip)) == NULL)
2531 			return (DDI_FAILURE);
2532 
2533 		hmep->hme_flags &= ~HMESUSPENDED;
2534 		hmep->hme_linkcheck = 0;
2535 
2536 		if (hmep->hme_started)
2537 			(void) hmeinit(hmep);
2538 		return (DDI_SUCCESS);
2539 
2540 	default:
2541 		return (DDI_FAILURE);
2542 	}
2543 
2544 	/*
2545 	 * Allocate soft device data structure
2546 	 */
2547 	hmep = kmem_zalloc(sizeof (*hmep), KM_SLEEP);
2548 
2549 	/*
2550 	 * Might as well set up elements of data structure
2551 	 */
2552 	hmep->dip =		dip;
2553 	hmep->instance = 	ddi_get_instance(dip);
2554 	hmep->pagesize =	ddi_ptob(dip, (ulong_t)1); /* IOMMU PSize */
2555 
2556 	/*
2557 	 *  Might as well setup the driver private
2558 	 * structure as part of the dip.
2559 	 */
2560 	ddi_set_driver_private(dip, hmep);
2561 
2562 	/*
2563 	 * Reject this device if it's in a slave-only slot.
2564 	 */
2565 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
2566 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2567 		    "Dev not used - dev in slave only slot");
2568 		goto error_state;
2569 	}
2570 
2571 	/*
2572 	 * Map in the device registers.
2573 	 *
2574 	 * Reg # 0 is the Global register set
2575 	 * Reg # 1 is the ETX register set
2576 	 * Reg # 2 is the ERX register set
2577 	 * Reg # 3 is the BigMAC register set.
2578 	 * Reg # 4 is the MIF register set
2579 	 */
2580 	if (ddi_dev_nregs(dip, &regno) != (DDI_SUCCESS)) {
2581 		HME_FAULT_MSG2(hmep, SEVERITY_HIGH, INIT_MSG,
2582 		    ddi_nregs_fail_msg, regno);
2583 		goto error_state;
2584 	}
2585 
2586 	switch (regno) {
2587 	case 5:
2588 		hmep->hme_cheerio_mode = 0;
2589 		break;
2590 	case 2:
2591 	case 3: /* for hot swap/plug, there will be 3 entries in "reg" prop */
2592 		hmep->hme_cheerio_mode = 1;
2593 		break;
2594 	default:
2595 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2596 		    bad_num_regs_msg);
2597 		goto error_state;
2598 	}
2599 
2600 	/* Initialize device attributes structure */
2601 	hmep->hme_dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2602 
2603 	if (hmep->hme_cheerio_mode)
2604 		hmep->hme_dev_attr.devacc_attr_endian_flags =
2605 		    DDI_STRUCTURE_LE_ACC;
2606 	else
2607 		hmep->hme_dev_attr.devacc_attr_endian_flags =
2608 		    DDI_STRUCTURE_BE_ACC;
2609 
2610 	hmep->hme_dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2611 
2612 	if (hmep->hme_cheerio_mode) {
2613 		uint8_t		oldLT;
2614 		uint8_t		newLT = 0;
2615 		dev_info_t	*pdip;
2616 		const char	*pdrvname;
2617 
2618 		/*
2619 		 * Map the PCI config space
2620 		 */
2621 		if (pci_config_setup(dip, &hmep->pci_config_handle) !=
2622 		    DDI_SUCCESS) {
2623 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2624 			    "pci_config_setup() failed..");
2625 			goto error_state;
2626 		}
2627 
2628 		if (ddi_regs_map_setup(dip, 1,
2629 		    (caddr_t *)&(hmep->hme_globregp), 0, 0,
2630 		    &hmep->hme_dev_attr, &hmep->hme_globregh)) {
2631 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2632 			    mregs_4global_reg_fail_msg);
2633 			goto error_unmap;
2634 		}
2635 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
2636 		    hmep->hme_mifregh = hmep->hme_globregh;
2637 
2638 		hmep->hme_etxregp =
2639 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x2000);
2640 		hmep->hme_erxregp =
2641 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x4000);
2642 		hmep->hme_bmacregp =
2643 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x6000);
2644 		hmep->hme_mifregp =
2645 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x7000);
2646 
2647 		/*
2648 		 * Get parent pci bridge info.
2649 		 */
2650 		pdip = ddi_get_parent(dip);
2651 		pdrvname = ddi_driver_name(pdip);
2652 
2653 		oldLT = pci_config_get8(hmep->pci_config_handle,
2654 		    PCI_CONF_LATENCY_TIMER);
2655 		/*
2656 		 * Honor value set in /etc/system
2657 		 * "set hme:pci_latency_timer=0xYY"
2658 		 */
2659 		if (pci_latency_timer)
2660 			newLT = pci_latency_timer;
2661 		/*
2662 		 * Modify LT for simba
2663 		 */
2664 		else if (strcmp("simba", pdrvname) == 0)
2665 			newLT = 0xf0;
2666 		/*
2667 		 * Ensure minimum cheerio latency timer of 0x50
2668 		 * Usually OBP or pci bridge should set this value
2669 		 * based on cheerio
2670 		 * min_grant * 8(33MHz) = 0x50 = 0xa * 0x8
2671 		 * Some system set cheerio LT at 0x40
2672 		 */
2673 		else if (oldLT < 0x40)
2674 			newLT = 0x50;
2675 
2676 		/*
2677 		 * Now program cheerio's pci latency timer with newLT
2678 		 */
2679 		if (newLT)
2680 			pci_config_put8(hmep->pci_config_handle,
2681 			    PCI_CONF_LATENCY_TIMER, (uchar_t)newLT);
2682 	} else { /* Map register sets */
2683 		if (ddi_regs_map_setup(dip, 0,
2684 		    (caddr_t *)&(hmep->hme_globregp), 0, 0,
2685 		    &hmep->hme_dev_attr, &hmep->hme_globregh)) {
2686 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2687 			    mregs_4global_reg_fail_msg);
2688 			goto error_state;
2689 		}
2690 		if (ddi_regs_map_setup(dip, 1,
2691 		    (caddr_t *)&(hmep->hme_etxregp), 0, 0,
2692 		    &hmep->hme_dev_attr, &hmep->hme_etxregh)) {
2693 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2694 			    mregs_4etx_reg_fail_msg);
2695 			goto error_unmap;
2696 		}
2697 		if (ddi_regs_map_setup(dip, 2,
2698 		    (caddr_t *)&(hmep->hme_erxregp), 0, 0,
2699 		    &hmep->hme_dev_attr, &hmep->hme_erxregh)) {
2700 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2701 			    mregs_4erx_reg_fail_msg);
2702 			goto error_unmap;
2703 		}
2704 		if (ddi_regs_map_setup(dip, 3,
2705 		    (caddr_t *)&(hmep->hme_bmacregp), 0, 0,
2706 		    &hmep->hme_dev_attr, &hmep->hme_bmacregh)) {
2707 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2708 			    mregs_4bmac_reg_fail_msg);
2709 			goto error_unmap;
2710 		}
2711 
2712 		if (ddi_regs_map_setup(dip, 4,
2713 		    (caddr_t *)&(hmep->hme_mifregp), 0, 0,
2714 		    &hmep->hme_dev_attr, &hmep->hme_mifregh)) {
2715 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2716 			    mregs_4mif_reg_fail_msg);
2717 			goto error_unmap;
2718 		}
2719 	} /* Endif cheerio_mode */
2720 
2721 	/*
2722 	 * Based on the hm-rev, set some capabilities
2723 	 * Set up default capabilities for HM 2.0
2724 	 */
2725 	hmep->hme_mifpoll_enable = 0;
2726 	hmep->hme_frame_enable = 0;
2727 	hmep->hme_lance_mode_enable = 0;
2728 	hmep->hme_rxcv_enable = 0;
2729 
2730 	/* NEW routine to get the properties */
2731 
2732 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, hmep->dip, 0, "hm-rev",
2733 	    (caddr_t)&hm_rev, &prop_len) == DDI_PROP_SUCCESS) {
2734 
2735 		hmep->asic_rev = hm_rev;
2736 		hmeget_hm_rev_property(hmep);
2737 	} else {
2738 		/*
2739 		 * hm_rev property not found so, this is
2740 		 * case of hot insertion of card without interpreting fcode.
2741 		 * Get it from revid in config space after mapping it.
2742 		 */
2743 		if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
2744 		    0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
2745 			return (DDI_FAILURE);
2746 		}
2747 		/*
2748 		 * Since this is cheerio-based PCI card, we write 0xC in the
2749 		 * top 4 bits(4-7) of hm-rev and retain the bottom(0-3) bits
2750 		 * for Cheerio version(1.0 or 2.0 = 0xC0 or 0xC1)
2751 		 */
2752 		hm_rev = ddi_get8(cfg_handle, &cfg_ptr->revid);
2753 		hm_rev = HME_1C0_REVID | (hm_rev & HME_REV_VERS_MASK);
2754 		hmep->asic_rev = hm_rev;
2755 		if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
2756 		    "hm-rev", (caddr_t)&hm_rev, sizeof (hm_rev)) !=
2757 		    DDI_SUCCESS) {
2758 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
2759 			    "hmeattach: ddi_prop_create error for hm_rev");
2760 		}
2761 		ddi_regs_map_free(&cfg_handle);
2762 
2763 		hmeget_hm_rev_property(hmep);
2764 
2765 		/* get info via VPD */
2766 		if (hmeget_promprops(dip) != DDI_SUCCESS) {
2767 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
2768 			    "hmeattach: no promprops");
2769 		}
2770 	}
2771 
2772 	if (!hme_mifpoll_enable)
2773 		hmep->hme_mifpoll_enable = 0;
2774 
2775 	if (ddi_intr_hilevel(dip, 0)) {
2776 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, NFATAL_ERR_MSG,
2777 		    " high-level interrupts are not supported");
2778 		goto error_unmap;
2779 	}
2780 
2781 	/*
2782 	 * Get intr. block cookie so that mutex locks can be initialized.
2783 	 */
2784 	if (ddi_get_iblock_cookie(dip, 0, &hmep->hme_cookie) != DDI_SUCCESS)
2785 		goto error_unmap;
2786 
2787 	/*
2788 	 * Initialize mutex's for this device.
2789 	 */
2790 	mutex_init(&hmep->hme_xmitlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
2791 	mutex_init(&hmep->hme_intrlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
2792 	mutex_init(&hmep->hme_linklock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
2793 
2794 	/*
2795 	 * Quiesce the hardware.
2796 	 */
2797 	(void) hmestop(hmep);
2798 
2799 	/*
2800 	 * Add interrupt to system
2801 	 */
2802 	if (ddi_add_intr(dip, 0, (ddi_iblock_cookie_t *)NULL,
2803 	    (ddi_idevice_cookie_t *)NULL, hmeintr, (caddr_t)hmep)) {
2804 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2805 		    add_intr_fail_msg);
2806 		goto error_mutex;
2807 	}
2808 
2809 	/*
2810 	 * Set up the ethernet mac address.
2811 	 */
2812 	hme_setup_mac_address(hmep, dip);
2813 
2814 	if (!hmeinit_xfer_params(hmep))
2815 		goto error_intr;
2816 
2817 	if (hmeburstsizes(hmep) == DDI_FAILURE) {
2818 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, burst_size_msg);
2819 		goto error_intr;
2820 	}
2821 
2822 	if (hmeallocthings(hmep) != DDI_SUCCESS) {
2823 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
2824 		    "resource allocation failed");
2825 		goto error_intr;
2826 	}
2827 
2828 	if (hmeallocbufs(hmep) != DDI_SUCCESS) {
2829 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
2830 		    "buffer allocation failed");
2831 		goto error_intr;
2832 	}
2833 
2834 	hmestatinit(hmep);
2835 
2836 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
2837 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
2838 		    "mac_alloc failed");
2839 		goto error_intr;
2840 	}
2841 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2842 	macp->m_driver = hmep;
2843 	macp->m_dip = dip;
2844 	macp->m_src_addr = hmep->hme_ouraddr.ether_addr_octet;
2845 	macp->m_callbacks = &hme_m_callbacks;
2846 	macp->m_min_sdu = 0;
2847 	macp->m_max_sdu = ETHERMTU;
2848 	macp->m_margin = VLAN_TAGSZ;
2849 	if (mac_register(macp, &hmep->hme_mh) != 0) {
2850 		mac_free(macp);
2851 		goto error_intr;
2852 	}
2853 
2854 	mac_free(macp);
2855 
2856 	ddi_report_dev(dip);
2857 	return (DDI_SUCCESS);
2858 
2859 	/*
2860 	 * Failure Exit
2861 	 */
2862 
2863 error_intr:
2864 	if (hmep->hme_cookie)
2865 		ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
2866 
2867 error_mutex:
2868 	mutex_destroy(&hmep->hme_xmitlock);
2869 	mutex_destroy(&hmep->hme_intrlock);
2870 	mutex_destroy(&hmep->hme_linklock);
2871 
2872 error_unmap:
2873 	if (hmep->hme_globregh)
2874 		ddi_regs_map_free(&hmep->hme_globregh);
2875 	if (hmep->hme_cheerio_mode == 0) {
2876 		if (hmep->hme_etxregh)
2877 			ddi_regs_map_free(&hmep->hme_etxregh);
2878 		if (hmep->hme_erxregh)
2879 			ddi_regs_map_free(&hmep->hme_erxregh);
2880 		if (hmep->hme_bmacregh)
2881 			ddi_regs_map_free(&hmep->hme_bmacregh);
2882 		if (hmep->hme_mifregh)
2883 			ddi_regs_map_free(&hmep->hme_mifregh);
2884 	} else {
2885 		if (hmep->pci_config_handle)
2886 			(void) pci_config_teardown(&hmep->pci_config_handle);
2887 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
2888 		    hmep->hme_mifregh = hmep->hme_globregh = NULL;
2889 	}
2890 
2891 error_state:
2892 	hmefreethings(hmep);
2893 	hmefreebufs(hmep);
2894 
2895 	if (hmep) {
2896 		kmem_free((caddr_t)hmep, sizeof (*hmep));
2897 		ddi_set_driver_private(dip, NULL);
2898 	}
2899 
2900 	return (DDI_FAILURE);
2901 }
2902 
2903 int
2904 hmedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2905 {
2906 	struct hme *hmep;
2907 
2908 	if ((hmep = ddi_get_driver_private(dip)) == NULL)
2909 		return (DDI_FAILURE);
2910 
2911 	switch (cmd) {
2912 	case DDI_DETACH:
2913 		break;
2914 
2915 	case DDI_SUSPEND:
2916 		hmep->hme_flags |= HMESUSPENDED;
2917 		hmeuninit(hmep);
2918 		return (DDI_SUCCESS);
2919 
2920 	default:
2921 		return (DDI_FAILURE);
2922 	}
2923 
2924 
2925 	if (mac_unregister(hmep->hme_mh) != 0) {
2926 		return (DDI_FAILURE);
2927 	}
2928 
2929 	/*
2930 	 * Make driver quiescent, we don't want to prevent the
2931 	 * detach on failure.  Note that this should be redundant,
2932 	 * since mac_stop should already have called hmeuninit().
2933 	 */
2934 	if (!(hmep->hme_flags & HMESUSPENDED)) {
2935 		(void) hmestop(hmep);
2936 	}
2937 
2938 	/*
2939 	 * Remove instance of the intr
2940 	 */
2941 	ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
2942 
2943 	/*
2944 	 * Unregister kstats.
2945 	 */
2946 	if (hmep->hme_ksp != NULL)
2947 		kstat_delete(hmep->hme_ksp);
2948 	if (hmep->hme_intrstats != NULL)
2949 		kstat_delete(hmep->hme_intrstats);
2950 
2951 	hmep->hme_ksp = NULL;
2952 	hmep->hme_intrstats = NULL;
2953 
2954 	/*
2955 	 * Stop asynchronous timer events.
2956 	 */
2957 	hme_stop_timer(hmep);
2958 	mutex_exit(&hmep->hme_linklock);
2959 
2960 	/*
2961 	 * Destroy all mutexes and data structures allocated during
2962 	 * attach time.
2963 	 *
2964 	 * Note: at this time we should be the only thread accessing
2965 	 * the structures for this instance.
2966 	 */
2967 
2968 	if (hmep->hme_globregh)
2969 		ddi_regs_map_free(&hmep->hme_globregh);
2970 	if (hmep->hme_cheerio_mode == 0) {
2971 		if (hmep->hme_etxregh)
2972 			ddi_regs_map_free(&hmep->hme_etxregh);
2973 		if (hmep->hme_erxregh)
2974 			ddi_regs_map_free(&hmep->hme_erxregh);
2975 		if (hmep->hme_bmacregh)
2976 			ddi_regs_map_free(&hmep->hme_bmacregh);
2977 		if (hmep->hme_mifregh)
2978 			ddi_regs_map_free(&hmep->hme_mifregh);
2979 	} else {
2980 		if (hmep->pci_config_handle)
2981 			(void) pci_config_teardown(&hmep->pci_config_handle);
2982 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
2983 		    hmep->hme_mifregh = hmep->hme_globregh = NULL;
2984 	}
2985 
2986 	mutex_destroy(&hmep->hme_xmitlock);
2987 	mutex_destroy(&hmep->hme_intrlock);
2988 	mutex_destroy(&hmep->hme_linklock);
2989 
2990 	hmefreethings(hmep);
2991 	hmefreebufs(hmep);
2992 
2993 	hme_param_cleanup(hmep);
2994 
2995 	ddi_set_driver_private(dip, NULL);
2996 	kmem_free(hmep, sizeof (struct hme));
2997 
2998 	return (DDI_SUCCESS);
2999 }
3000 
3001 int
3002 hmequiesce(dev_info_t *dip)
3003 {
3004 	struct hme *hmep;
3005 
3006 	if ((hmep = ddi_get_driver_private(dip)) == NULL)
3007 		return (DDI_FAILURE);
3008 
3009 	hme_stop_mifpoll(hmep);
3010 	(void) hmestop(hmep);
3011 	return (DDI_SUCCESS);
3012 }
3013 
3014 static boolean_t
3015 hmeinit_xfer_params(struct hme *hmep)
3016 {
3017 	int i;
3018 	int hme_ipg1_conf, hme_ipg2_conf;
3019 	int hme_use_int_xcvr_conf, hme_pace_count_conf;
3020 	int hme_autoneg_conf;
3021 	int hme_anar_100T4_conf;
3022 	int hme_anar_100fdx_conf, hme_anar_100hdx_conf;
3023 	int hme_anar_10fdx_conf, hme_anar_10hdx_conf;
3024 	int hme_ipg0_conf, hme_lance_mode_conf;
3025 	int prop_len = sizeof (int);
3026 	dev_info_t *dip;
3027 
3028 	dip = hmep->dip;
3029 
3030 	for (i = 0; i < A_CNT(hme_param_arr); i++)
3031 		hmep->hme_param_arr[i] = hme_param_arr[i];
3032 
3033 	if (!hmep->hme_g_nd && !hme_param_register(hmep, hmep->hme_param_arr,
3034 	    A_CNT(hme_param_arr))) {
3035 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, NDD_MSG,
3036 		    param_reg_fail_msg);
3037 		return (B_FALSE);
3038 	}
3039 
3040 	/*
3041 	 * Set up the start-up values for user-configurable parameters
3042 	 * Get the values from the global variables first.
3043 	 * Use the MASK to limit the value to allowed maximum.
3044 	 */
3045 	hme_param_ipg1 = hme_ipg1 & HME_MASK_8BIT;
3046 	hme_param_ipg2 = hme_ipg2 & HME_MASK_8BIT;
3047 	hme_param_use_intphy = hme_use_int_xcvr & HME_MASK_1BIT;
3048 	hme_param_pace_count = hme_pace_size & HME_MASK_8BIT;
3049 	hme_param_autoneg = hme_adv_autoneg_cap;
3050 	hme_param_anar_100T4 = hme_adv_100T4_cap;
3051 	hme_param_anar_100fdx = hme_adv_100fdx_cap;
3052 	hme_param_anar_100hdx = hme_adv_100hdx_cap;
3053 	hme_param_anar_10fdx = hme_adv_10fdx_cap;
3054 	hme_param_anar_10hdx = hme_adv_10hdx_cap;
3055 	hme_param_ipg0 = hme_ipg0 & HME_MASK_5BIT;
3056 	hme_param_lance_mode = hme_lance_mode & HME_MASK_1BIT;
3057 
3058 	/*
3059 	 * The link speed may be forced to either 10 Mbps or 100 Mbps using the
3060 	 * property "transfer-speed". This may be done in OBP by using the
3061 	 * command "apply transfer-speed=<speed> <device>". The speed may be
3062 	 * either 10 or 100.
3063 	 */
3064 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0,
3065 	    "transfer-speed", (caddr_t)&i, &prop_len) == DDI_PROP_SUCCESS) {
3066 		hme_param_autoneg = 0;	/* force speed */
3067 		hme_param_anar_100T4 = 0;
3068 		hme_param_anar_100fdx = 0;
3069 		hme_param_anar_10fdx = 0;
3070 		if (i == 10) {
3071 			hme_param_anar_10hdx = 1;
3072 			hme_param_anar_100hdx = 0;
3073 		} else {
3074 			hme_param_anar_10hdx = 0;
3075 			hme_param_anar_100hdx = 1;
3076 		}
3077 	}
3078 
3079 	/*
3080 	 * Get the parameter values configured in .conf file.
3081 	 */
3082 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg1",
3083 	    (caddr_t)&hme_ipg1_conf, &prop_len) == DDI_PROP_SUCCESS) {
3084 		hme_param_ipg1 = hme_ipg1_conf & HME_MASK_8BIT;
3085 	}
3086 
3087 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg2",
3088 	    (caddr_t)&hme_ipg2_conf, &prop_len) == DDI_PROP_SUCCESS) {
3089 		hme_param_ipg2 = hme_ipg2_conf & HME_MASK_8BIT;
3090 	}
3091 
3092 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "use_int_xcvr",
3093 	    (caddr_t)&hme_use_int_xcvr_conf, &prop_len) == DDI_PROP_SUCCESS) {
3094 		hme_param_use_intphy = hme_use_int_xcvr_conf & HME_MASK_1BIT;
3095 	}
3096 
3097 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "pace_size",
3098 	    (caddr_t)&hme_pace_count_conf, &prop_len) == DDI_PROP_SUCCESS) {
3099 		hme_param_pace_count = hme_pace_count_conf & HME_MASK_8BIT;
3100 	}
3101 
3102 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_autoneg_cap",
3103 	    (caddr_t)&hme_autoneg_conf, &prop_len) == DDI_PROP_SUCCESS) {
3104 		hme_param_autoneg = hme_autoneg_conf & HME_MASK_1BIT;
3105 	}
3106 
3107 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_100T4_cap",
3108 	    (caddr_t)&hme_anar_100T4_conf, &prop_len) == DDI_PROP_SUCCESS) {
3109 		hme_param_anar_100T4 = hme_anar_100T4_conf & HME_MASK_1BIT;
3110 	}
3111 
3112 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_100fdx_cap",
3113 	    (caddr_t)&hme_anar_100fdx_conf, &prop_len) == DDI_PROP_SUCCESS) {
3114 		hme_param_anar_100fdx = hme_anar_100fdx_conf & HME_MASK_1BIT;
3115 	}
3116 
3117 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_100hdx_cap",
3118 	    (caddr_t)&hme_anar_100hdx_conf, &prop_len) == DDI_PROP_SUCCESS) {
3119 		hme_param_anar_100hdx = hme_anar_100hdx_conf & HME_MASK_1BIT;
3120 	}
3121 
3122 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_10fdx_cap",
3123 	    (caddr_t)&hme_anar_10fdx_conf, &prop_len) == DDI_PROP_SUCCESS) {
3124 		hme_param_anar_10fdx = hme_anar_10fdx_conf & HME_MASK_1BIT;
3125 	}
3126 
3127 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_10hdx_cap",
3128 	    (caddr_t)&hme_anar_10hdx_conf, &prop_len) == DDI_PROP_SUCCESS) {
3129 		hme_param_anar_10hdx = hme_anar_10hdx_conf & HME_MASK_1BIT;
3130 	}
3131 
3132 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg0",
3133 	    (caddr_t)&hme_ipg0_conf, &prop_len) == DDI_PROP_SUCCESS) {
3134 		hme_param_ipg0 = hme_ipg0_conf & HME_MASK_5BIT;
3135 	}
3136 
3137 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "lance_mode",
3138 	    (caddr_t)&hme_lance_mode_conf, &prop_len) == DDI_PROP_SUCCESS) {
3139 		hme_param_lance_mode = hme_lance_mode_conf & HME_MASK_1BIT;
3140 	}
3141 
3142 	if (hme_link_pulse_disabled)
3143 		hmep->hme_link_pulse_disabled = 1;
3144 	else if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0,
3145 	    "link-pulse-disabled", (caddr_t)&i, &prop_len)
3146 	    == DDI_PROP_SUCCESS) {
3147 		hmep->hme_link_pulse_disabled = 1;
3148 	}
3149 	return (B_TRUE);
3150 }
3151 
3152 /*
3153  * Return 0 upon success, 1 on failure.
3154  */
3155 static uint_t
3156 hmestop(struct hme *hmep)
3157 {
3158 	/*
3159 	 * Disable the Tx dma engine.
3160 	 */
3161 	PUT_ETXREG(config, (GET_ETXREG(config) & ~HMET_CONFIG_TXDMA_EN));
3162 	HMEDELAY(((GET_ETXREG(state_mach) & 0x1f) == 0x1), HMEMAXRSTDELAY);
3163 
3164 	/*
3165 	 * Disable the Rx dma engine.
3166 	 */
3167 	PUT_ERXREG(config, (GET_ERXREG(config) & ~HMER_CONFIG_RXDMA_EN));
3168 	HMEDELAY(((GET_ERXREG(state_mach) & 0x3f) == 0), HMEMAXRSTDELAY);
3169 
3170 	/*
3171 	 * By this time all things should be quiet, so hit the
3172 	 * chip with a reset.
3173 	 */
3174 	PUT_GLOBREG(reset, HMEG_RESET_GLOBAL);
3175 
3176 	HMEDELAY((GET_GLOBREG(reset) == 0), HMEMAXRSTDELAY);
3177 	if (GET_GLOBREG(reset)) {
3178 		return (1);
3179 	}
3180 
3181 	CHECK_GLOBREG();
3182 	return (0);
3183 }
3184 
3185 static int
3186 hmestat_kstat_update(kstat_t *ksp, int rw)
3187 {
3188 	struct hme *hmep;
3189 	struct hmekstat *hkp;
3190 
3191 	hmep = (struct hme *)ksp->ks_private;
3192 	hkp = (struct hmekstat *)ksp->ks_data;
3193 
3194 	if (rw != KSTAT_READ)
3195 		return (EACCES);
3196 
3197 	/*
3198 	 * Update all the stats by reading all the counter registers.
3199 	 * Counter register stats are not updated till they overflow
3200 	 * and interrupt.
3201 	 */
3202 
3203 	mutex_enter(&hmep->hme_xmitlock);
3204 	if (hmep->hme_flags & HMERUNNING) {
3205 		hmereclaim(hmep);
3206 		hmesavecntrs(hmep);
3207 	}
3208 	mutex_exit(&hmep->hme_xmitlock);
3209 
3210 	hkp->hk_cvc.value.ul		= hmep->hme_cvc;
3211 	hkp->hk_lenerr.value.ul		= hmep->hme_lenerr;
3212 	hkp->hk_buff.value.ul		= hmep->hme_buff;
3213 	hkp->hk_missed.value.ul		= hmep->hme_missed;
3214 	hkp->hk_allocbfail.value.ul	= hmep->hme_allocbfail;
3215 	hkp->hk_babl.value.ul		= hmep->hme_babl;
3216 	hkp->hk_tmder.value.ul		= hmep->hme_tmder;
3217 	hkp->hk_txlaterr.value.ul	= hmep->hme_txlaterr;
3218 	hkp->hk_rxlaterr.value.ul	= hmep->hme_rxlaterr;
3219 	hkp->hk_slvparerr.value.ul	= hmep->hme_slvparerr;
3220 	hkp->hk_txparerr.value.ul	= hmep->hme_txparerr;
3221 	hkp->hk_rxparerr.value.ul	= hmep->hme_rxparerr;
3222 	hkp->hk_slverrack.value.ul	= hmep->hme_slverrack;
3223 	hkp->hk_txerrack.value.ul	= hmep->hme_txerrack;
3224 	hkp->hk_rxerrack.value.ul	= hmep->hme_rxerrack;
3225 	hkp->hk_txtagerr.value.ul	= hmep->hme_txtagerr;
3226 	hkp->hk_rxtagerr.value.ul	= hmep->hme_rxtagerr;
3227 	hkp->hk_eoperr.value.ul		= hmep->hme_eoperr;
3228 	hkp->hk_notmds.value.ul		= hmep->hme_notmds;
3229 	hkp->hk_notbufs.value.ul	= hmep->hme_notbufs;
3230 	hkp->hk_norbufs.value.ul	= hmep->hme_norbufs;
3231 
3232 	/*
3233 	 * Debug kstats
3234 	 */
3235 	hkp->hk_inits.value.ul		= hmep->inits;
3236 	hkp->hk_phyfail.value.ul	= hmep->phyfail;
3237 
3238 	/*
3239 	 * xcvr kstats
3240 	 */
3241 	hkp->hk_asic_rev.value.ul	= hmep->asic_rev;
3242 
3243 	return (0);
3244 }
3245 
3246 static void
3247 hmestatinit(struct hme *hmep)
3248 {
3249 	struct	kstat	*ksp;
3250 	struct	hmekstat	*hkp;
3251 	const char *driver;
3252 	int	instance;
3253 	char	buf[16];
3254 
3255 	instance = hmep->instance;
3256 	driver = ddi_driver_name(hmep->dip);
3257 
3258 	if ((ksp = kstat_create(driver, instance,
3259 	    "driver_info", "net", KSTAT_TYPE_NAMED,
3260 	    sizeof (struct hmekstat) / sizeof (kstat_named_t), 0)) == NULL) {
3261 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, INIT_MSG,
3262 		    "kstat_create failed");
3263 		return;
3264 	}
3265 
3266 	(void) snprintf(buf, sizeof (buf), "%sc%d", driver, instance);
3267 	hmep->hme_intrstats = kstat_create(driver, instance, buf, "controller",
3268 	    KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
3269 	if (hmep->hme_intrstats)
3270 		kstat_install(hmep->hme_intrstats);
3271 
3272 	hmep->hme_ksp = ksp;
3273 	hkp = (struct hmekstat *)ksp->ks_data;
3274 	kstat_named_init(&hkp->hk_cvc,			"code_violations",
3275 	    KSTAT_DATA_ULONG);
3276 	kstat_named_init(&hkp->hk_lenerr,		"len_errors",
3277 	    KSTAT_DATA_ULONG);
3278 	kstat_named_init(&hkp->hk_buff,			"buff",
3279 	    KSTAT_DATA_ULONG);
3280 	kstat_named_init(&hkp->hk_missed,		"missed",
3281 	    KSTAT_DATA_ULONG);
3282 	kstat_named_init(&hkp->hk_nocanput,		"nocanput",
3283 	    KSTAT_DATA_ULONG);
3284 	kstat_named_init(&hkp->hk_allocbfail,		"allocbfail",
3285 	    KSTAT_DATA_ULONG);
3286 	kstat_named_init(&hkp->hk_babl,			"babble",
3287 	    KSTAT_DATA_ULONG);
3288 	kstat_named_init(&hkp->hk_tmder,		"tmd_error",
3289 	    KSTAT_DATA_ULONG);
3290 	kstat_named_init(&hkp->hk_txlaterr,		"tx_late_error",
3291 	    KSTAT_DATA_ULONG);
3292 	kstat_named_init(&hkp->hk_rxlaterr,		"rx_late_error",
3293 	    KSTAT_DATA_ULONG);
3294 	kstat_named_init(&hkp->hk_slvparerr,		"slv_parity_error",
3295 	    KSTAT_DATA_ULONG);
3296 	kstat_named_init(&hkp->hk_txparerr,		"tx_parity_error",
3297 	    KSTAT_DATA_ULONG);
3298 	kstat_named_init(&hkp->hk_rxparerr,		"rx_parity_error",
3299 	    KSTAT_DATA_ULONG);
3300 	kstat_named_init(&hkp->hk_slverrack,		"slv_error_ack",
3301 	    KSTAT_DATA_ULONG);
3302 	kstat_named_init(&hkp->hk_txerrack,		"tx_error_ack",
3303 	    KSTAT_DATA_ULONG);
3304 	kstat_named_init(&hkp->hk_rxerrack,		"rx_error_ack",
3305 	    KSTAT_DATA_ULONG);
3306 	kstat_named_init(&hkp->hk_txtagerr,		"tx_tag_error",
3307 	    KSTAT_DATA_ULONG);
3308 	kstat_named_init(&hkp->hk_rxtagerr,		"rx_tag_error",
3309 	    KSTAT_DATA_ULONG);
3310 	kstat_named_init(&hkp->hk_eoperr,		"eop_error",
3311 	    KSTAT_DATA_ULONG);
3312 	kstat_named_init(&hkp->hk_notmds,		"no_tmds",
3313 	    KSTAT_DATA_ULONG);
3314 	kstat_named_init(&hkp->hk_notbufs,		"no_tbufs",
3315 	    KSTAT_DATA_ULONG);
3316 	kstat_named_init(&hkp->hk_norbufs,		"no_rbufs",
3317 	    KSTAT_DATA_ULONG);
3318 
3319 	/*
3320 	 * Debugging kstats
3321 	 */
3322 	kstat_named_init(&hkp->hk_inits,		"inits",
3323 	    KSTAT_DATA_ULONG);
3324 	kstat_named_init(&hkp->hk_phyfail,		"phy_failures",
3325 	    KSTAT_DATA_ULONG);
3326 
3327 	/*
3328 	 * I/O bus kstats
3329 	 * kstat_named_init(&hkp->hk_pci_speed,		"pci_bus_speed",
3330 	 *		KSTAT_DATA_ULONG);
3331 	 * kstat_named_init(&hkp->hk_pci_size,		"pci_bus_width",
3332 	 *		KSTAT_DATA_ULONG);
3333 	 */
3334 
3335 	/*
3336 	 * xcvr kstats
3337 	 */
3338 	kstat_named_init(&hkp->hk_asic_rev,		"asic_rev",
3339 	    KSTAT_DATA_ULONG);
3340 
3341 	ksp->ks_update = hmestat_kstat_update;
3342 	ksp->ks_private = (void *) hmep;
3343 	kstat_install(ksp);
3344 }
3345 
3346 static void
3347 hme_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
3348 {
3349 	struct	hme	*hmep = arg;
3350 	struct	iocblk	*iocp = (void *)mp->b_rptr;
3351 	uint32_t old_ipg1, old_ipg2, old_use_int_xcvr, old_autoneg;
3352 	uint32_t old_100T4;
3353 	uint32_t old_100fdx, old_100hdx, old_10fdx, old_10hdx;
3354 	uint32_t old_ipg0, old_lance_mode;
3355 
3356 	switch (iocp->ioc_cmd) {
3357 
3358 	case HME_ND_GET:
3359 
3360 		old_autoneg = hme_param_autoneg;
3361 		old_100T4 = hme_param_anar_100T4;
3362 		old_100fdx = hme_param_anar_100fdx;
3363 		old_100hdx = hme_param_anar_100hdx;
3364 		old_10fdx = hme_param_anar_10fdx;
3365 		old_10hdx = hme_param_anar_10hdx;
3366 
3367 		hme_param_autoneg = old_autoneg & ~HME_NOTUSR;
3368 		hme_param_anar_100T4 = old_100T4 & ~HME_NOTUSR;
3369 		hme_param_anar_100fdx = old_100fdx & ~HME_NOTUSR;
3370 		hme_param_anar_100hdx = old_100hdx & ~HME_NOTUSR;
3371 		hme_param_anar_10fdx = old_10fdx & ~HME_NOTUSR;
3372 		hme_param_anar_10hdx = old_10hdx & ~HME_NOTUSR;
3373 
3374 		if (!hme_nd_getset(wq, hmep->hme_g_nd, mp)) {
3375 			hme_param_autoneg = old_autoneg;
3376 			hme_param_anar_100T4 = old_100T4;
3377 			hme_param_anar_100fdx = old_100fdx;
3378 			hme_param_anar_100hdx = old_100hdx;
3379 			hme_param_anar_10fdx = old_10fdx;
3380 			hme_param_anar_10hdx = old_10hdx;
3381 			miocnak(wq, mp, 0, EINVAL);
3382 			return;
3383 		}
3384 		hme_param_autoneg = old_autoneg;
3385 		hme_param_anar_100T4 = old_100T4;
3386 		hme_param_anar_100fdx = old_100fdx;
3387 		hme_param_anar_100hdx = old_100hdx;
3388 		hme_param_anar_10fdx = old_10fdx;
3389 		hme_param_anar_10hdx = old_10hdx;
3390 
3391 		qreply(wq, mp);
3392 		break;
3393 
3394 	case HME_ND_SET:
3395 		old_ipg0 = hme_param_ipg0;
3396 		old_lance_mode = hme_param_lance_mode;
3397 		old_ipg1 = hme_param_ipg1;
3398 		old_ipg2 = hme_param_ipg2;
3399 		old_use_int_xcvr = hme_param_use_intphy;
3400 		old_autoneg = hme_param_autoneg;
3401 		hme_param_autoneg = 0xff;
3402 
3403 		if (!hme_nd_getset(wq, hmep->hme_g_nd, mp)) {
3404 			hme_param_autoneg = old_autoneg;
3405 			miocnak(wq, mp, 0, EINVAL);
3406 			return;
3407 		}
3408 
3409 		qreply(wq, mp);
3410 
3411 		if (hme_param_autoneg != 0xff) {
3412 			hmep->hme_linkcheck = 0;
3413 			(void) hmeinit(hmep);
3414 		} else {
3415 			hme_param_autoneg = old_autoneg;
3416 			if (old_use_int_xcvr != hme_param_use_intphy) {
3417 				hmep->hme_linkcheck = 0;
3418 				(void) hmeinit(hmep);
3419 			} else if ((old_ipg1 != hme_param_ipg1) ||
3420 			    (old_ipg2 != hme_param_ipg2) ||
3421 			    (old_ipg0 != hme_param_ipg0) ||
3422 			    (old_lance_mode != hme_param_lance_mode)) {
3423 				(void) hmeinit(hmep);
3424 			}
3425 		}
3426 		break;
3427 
3428 	default:
3429 		miocnak(wq, mp, 0, EINVAL);
3430 		break;
3431 	}
3432 }
3433 
3434 /*ARGSUSED*/
3435 static boolean_t
3436 hme_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3437 {
3438 	switch (cap) {
3439 	case MAC_CAPAB_HCKSUM:
3440 		*(uint32_t *)cap_data = HCKSUM_INET_PARTIAL;
3441 		return (B_TRUE);
3442 	default:
3443 		return (B_FALSE);
3444 	}
3445 }
3446 
3447 static int
3448 hme_m_promisc(void *arg, boolean_t on)
3449 {
3450 	struct hme *hmep = arg;
3451 
3452 	hmep->hme_promisc = on;
3453 	(void) hmeinit(hmep);
3454 	return (0);
3455 }
3456 
3457 static int
3458 hme_m_unicst(void *arg, const uint8_t *macaddr)
3459 {
3460 	struct hme *hmep = arg;
3461 
3462 	/*
3463 	 * Set new interface local address and re-init device.
3464 	 * This is destructive to any other streams attached
3465 	 * to this device.
3466 	 */
3467 	mutex_enter(&hmep->hme_intrlock);
3468 	bcopy(macaddr, &hmep->hme_ouraddr, ETHERADDRL);
3469 	mutex_exit(&hmep->hme_intrlock);
3470 	(void) hmeinit(hmep);
3471 	return (0);
3472 }
3473 
3474 static int
3475 hme_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
3476 {
3477 	struct hme	*hmep = arg;
3478 	uint32_t	ladrf_bit;
3479 	boolean_t	doinit = B_FALSE;
3480 
3481 	/*
3482 	 * If this address's bit was not already set in the local address
3483 	 * filter, add it and re-initialize the Hardware.
3484 	 */
3485 	ladrf_bit = hmeladrf_bit(macaddr);
3486 
3487 	mutex_enter(&hmep->hme_intrlock);
3488 	if (add) {
3489 		hmep->hme_ladrf_refcnt[ladrf_bit]++;
3490 		if (hmep->hme_ladrf_refcnt[ladrf_bit] == 1) {
3491 			hmep->hme_ladrf[ladrf_bit >> 4] |=
3492 			    1 << (ladrf_bit & 0xf);
3493 			hmep->hme_multi++;
3494 			doinit = B_TRUE;
3495 		}
3496 	} else {
3497 		hmep->hme_ladrf_refcnt[ladrf_bit]--;
3498 		if (hmep->hme_ladrf_refcnt[ladrf_bit] == 0) {
3499 			hmep->hme_ladrf[ladrf_bit >> 4] &=
3500 			    ~(1 << (ladrf_bit & 0xf));
3501 			doinit = B_TRUE;
3502 		}
3503 	}
3504 	mutex_exit(&hmep->hme_intrlock);
3505 
3506 	if (doinit) {
3507 		(void) hmeinit(hmep);
3508 	}
3509 
3510 	return (0);
3511 }
3512 
3513 static int
3514 hme_m_start(void *arg)
3515 {
3516 	struct hme *hmep = arg;
3517 
3518 	if (hmeinit(hmep) != 0) {
3519 		/* initialization failed -- really want DL_INITFAILED */
3520 		return (EIO);
3521 	} else {
3522 		hmep->hme_started = B_TRUE;
3523 		return (0);
3524 	}
3525 }
3526 
3527 static void
3528 hme_m_stop(void *arg)
3529 {
3530 	struct hme *hmep = arg;
3531 
3532 	hmep->hme_started = B_FALSE;
3533 	hmeuninit(hmep);
3534 }
3535 
3536 static int
3537 hme_m_stat(void *arg, uint_t stat, uint64_t *val)
3538 {
3539 	struct hme	*hmep = arg;
3540 
3541 	mutex_enter(&hmep->hme_xmitlock);
3542 	if (hmep->hme_flags & HMERUNNING) {
3543 		hmereclaim(hmep);
3544 		hmesavecntrs(hmep);
3545 	}
3546 	mutex_exit(&hmep->hme_xmitlock);
3547 
3548 
3549 	switch (stat) {
3550 	case MAC_STAT_IFSPEED:
3551 		*val = hmep->hme_ifspeed * 1000000;
3552 		break;
3553 	case MAC_STAT_IPACKETS:
3554 		*val = hmep->hme_ipackets;
3555 		break;
3556 	case MAC_STAT_RBYTES:
3557 		*val = hmep->hme_rbytes;
3558 		break;
3559 	case MAC_STAT_IERRORS:
3560 		*val = hmep->hme_ierrors;
3561 		break;
3562 	case MAC_STAT_OPACKETS:
3563 		*val = hmep->hme_opackets;
3564 		break;
3565 	case MAC_STAT_OBYTES:
3566 		*val = hmep->hme_obytes;
3567 		break;
3568 	case MAC_STAT_OERRORS:
3569 		*val = hmep->hme_oerrors;
3570 		break;
3571 	case MAC_STAT_MULTIRCV:
3572 		*val = hmep->hme_multircv;
3573 		break;
3574 	case MAC_STAT_MULTIXMT:
3575 		*val = hmep->hme_multixmt;
3576 		break;
3577 	case MAC_STAT_BRDCSTRCV:
3578 		*val = hmep->hme_brdcstrcv;
3579 		break;
3580 	case MAC_STAT_BRDCSTXMT:
3581 		*val = hmep->hme_brdcstxmt;
3582 		break;
3583 	case MAC_STAT_UNDERFLOWS:
3584 		*val = hmep->hme_uflo;
3585 		break;
3586 	case MAC_STAT_OVERFLOWS:
3587 		*val = hmep->hme_oflo;
3588 		break;
3589 	case MAC_STAT_COLLISIONS:
3590 		*val = hmep->hme_coll;
3591 		break;
3592 	case MAC_STAT_NORCVBUF:
3593 		*val = hmep->hme_norcvbuf;
3594 		break;
3595 	case MAC_STAT_NOXMTBUF:
3596 		*val = hmep->hme_noxmtbuf;
3597 		break;
3598 	case ETHER_STAT_LINK_DUPLEX:
3599 		*val = hmep->hme_duplex;
3600 		break;
3601 	case ETHER_STAT_ALIGN_ERRORS:
3602 		*val = hmep->hme_align_errors;
3603 		break;
3604 	case ETHER_STAT_FCS_ERRORS:
3605 		*val = hmep->hme_fcs_errors;
3606 		break;
3607 	case ETHER_STAT_EX_COLLISIONS:
3608 		*val = hmep->hme_excol;
3609 		break;
3610 	case ETHER_STAT_DEFER_XMTS:
3611 		*val = hmep->hme_defer_xmts;
3612 		break;
3613 	case ETHER_STAT_SQE_ERRORS:
3614 		*val = hmep->hme_sqe_errors;
3615 		break;
3616 	case ETHER_STAT_FIRST_COLLISIONS:
3617 		*val = hmep->hme_fstcol;
3618 		break;
3619 	case ETHER_STAT_TX_LATE_COLLISIONS:
3620 		*val = hmep->hme_tlcol;
3621 		break;
3622 	case ETHER_STAT_TOOLONG_ERRORS:
3623 		*val = hmep->hme_toolong_errors;
3624 		break;
3625 	case ETHER_STAT_TOOSHORT_ERRORS:
3626 		*val = hmep->hme_runt;
3627 		break;
3628 	case ETHER_STAT_CARRIER_ERRORS:
3629 		*val = hmep->hme_carrier_errors;
3630 		break;
3631 	case ETHER_STAT_XCVR_ADDR:
3632 		*val = hmep->hme_phyad;
3633 		break;
3634 	case ETHER_STAT_XCVR_ID:
3635 		*val = (hmep->hme_idr1 << 16U) | (hmep->hme_idr2);
3636 		break;
3637 	case ETHER_STAT_XCVR_INUSE:
3638 		switch (hmep->hme_transceiver) {
3639 		case HME_INTERNAL_TRANSCEIVER:
3640 			*val = XCVR_100X;
3641 			break;
3642 		case HME_NO_TRANSCEIVER:
3643 			*val = XCVR_NONE;
3644 			break;
3645 		default:
3646 			*val = XCVR_UNDEFINED;
3647 			break;
3648 		}
3649 		break;
3650 	case ETHER_STAT_CAP_100T4:
3651 		*val = hme_param_bmsr_100T4;
3652 		break;
3653 	case ETHER_STAT_ADV_CAP_100T4:
3654 		*val = hme_param_anar_100T4 & ~HME_NOTUSR;
3655 		break;
3656 	case ETHER_STAT_LP_CAP_100T4:
3657 		*val = hme_param_anlpar_100T4;
3658 		break;
3659 	case ETHER_STAT_CAP_100FDX:
3660 		*val = hme_param_bmsr_100fdx;
3661 		break;
3662 	case ETHER_STAT_ADV_CAP_100FDX:
3663 		*val = hme_param_anar_100fdx & ~HME_NOTUSR;
3664 		break;
3665 	case ETHER_STAT_LP_CAP_100FDX:
3666 		*val = hme_param_anlpar_100fdx;
3667 		break;
3668 	case ETHER_STAT_CAP_100HDX:
3669 		*val = hme_param_bmsr_100hdx;
3670 		break;
3671 	case ETHER_STAT_ADV_CAP_100HDX:
3672 		*val = hme_param_anar_100hdx & ~HME_NOTUSR;
3673 		break;
3674 	case ETHER_STAT_LP_CAP_100HDX:
3675 		*val = hme_param_anlpar_100hdx;
3676 		break;
3677 	case ETHER_STAT_CAP_10FDX:
3678 		*val = hme_param_bmsr_10fdx;
3679 		break;
3680 	case ETHER_STAT_ADV_CAP_10FDX:
3681 		*val = hme_param_anar_10fdx & ~HME_NOTUSR;
3682 		break;
3683 	case ETHER_STAT_LP_CAP_10FDX:
3684 		*val = hme_param_anlpar_10fdx;
3685 		break;
3686 	case ETHER_STAT_CAP_10HDX:
3687 		*val = hme_param_bmsr_10hdx;
3688 		break;
3689 	case ETHER_STAT_ADV_CAP_10HDX:
3690 		*val = hme_param_anar_10hdx & ~HME_NOTUSR;
3691 		break;
3692 	case ETHER_STAT_LP_CAP_10HDX:
3693 		*val = hme_param_anlpar_10hdx;
3694 		break;
3695 	case ETHER_STAT_CAP_AUTONEG:
3696 		*val = hme_param_bmsr_ancap;
3697 		break;
3698 	case ETHER_STAT_ADV_CAP_AUTONEG:
3699 		*val = hme_param_autoneg & ~HME_NOTUSR;
3700 		break;
3701 	case ETHER_STAT_LP_CAP_AUTONEG:
3702 		*val = hme_param_aner_lpancap;
3703 		break;
3704 	default:
3705 		return (EINVAL);
3706 	}
3707 	return (0);
3708 }
3709 
3710 static mblk_t *
3711 hme_m_tx(void *arg, mblk_t *mp)
3712 {
3713 	struct hme *hmep = arg;
3714 	mblk_t *next;
3715 
3716 	while (mp != NULL) {
3717 		next = mp->b_next;
3718 		mp->b_next = NULL;
3719 		if (!hmestart(hmep, mp)) {
3720 			mp->b_next = next;
3721 			break;
3722 		}
3723 		mp = next;
3724 	}
3725 	return (mp);
3726 }
3727 
3728 /*
3729  * Software IP checksum, for the edge cases that the
3730  * hardware can't handle.  See hmestart for more info.
3731  */
3732 static uint16_t
3733 hme_cksum(void *data, int len)
3734 {
3735 	uint16_t	*words = data;
3736 	int		i, nwords = len / 2;
3737 	uint32_t	sum = 0;
3738 
3739 	/* just add up the words */
3740 	for (i = 0; i < nwords; i++) {
3741 		sum += *words++;
3742 	}
3743 
3744 	/* pick up residual byte ... assume even half-word allocations */
3745 	if (len % 2) {
3746 		sum += (*words & htons(0xff00));
3747 	}
3748 
3749 	sum = (sum >> 16) + (sum & 0xffff);
3750 	sum = (sum >> 16) + (sum & 0xffff);
3751 
3752 	return (~(sum & 0xffff));
3753 }
3754 
3755 static boolean_t
3756 hmestart(struct hme *hmep, mblk_t *mp)
3757 {
3758 	uint32_t	len;
3759 	boolean_t	retval = B_TRUE;
3760 	hmebuf_t	*tbuf;
3761 	uint32_t	txptr;
3762 
3763 	uint32_t	csflags = 0;
3764 	uint32_t	flags;
3765 	uint32_t	start_offset;
3766 	uint32_t	stuff_offset;
3767 
3768 	hcksum_retrieve(mp, NULL, NULL, &start_offset, &stuff_offset,
3769 	    NULL, NULL, &flags);
3770 
3771 	if (flags & HCK_PARTIALCKSUM) {
3772 		if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
3773 			start_offset += sizeof (struct ether_header) + 4;
3774 			stuff_offset += sizeof (struct ether_header) + 4;
3775 		} else {
3776 			start_offset += sizeof (struct ether_header);
3777 			stuff_offset += sizeof (struct ether_header);
3778 		}
3779 		csflags = HMETMD_CSENABL |
3780 		    (start_offset << HMETMD_CSSTART_SHIFT) |
3781 		    (stuff_offset << HMETMD_CSSTUFF_SHIFT);
3782 	}
3783 
3784 	mutex_enter(&hmep->hme_xmitlock);
3785 
3786 	if (hmep->hme_flags & HMESUSPENDED) {
3787 		hmep->hme_carrier_errors++;
3788 		hmep->hme_oerrors++;
3789 		goto bad;
3790 	}
3791 
3792 	if (hmep->hme_txindex != hmep->hme_txreclaim) {
3793 		hmereclaim(hmep);
3794 	}
3795 	if ((hmep->hme_txindex - HME_TMDMAX) == hmep->hme_txreclaim)
3796 		goto notmds;
3797 	txptr = hmep->hme_txindex % HME_TMDMAX;
3798 	tbuf = &hmep->hme_tbuf[txptr];
3799 
3800 	/*
3801 	 * Note that for checksum offload, the hardware cannot
3802 	 * generate correct checksums if the packet is smaller than
3803 	 * 64-bytes.  In such a case, we bcopy the packet and use
3804 	 * a software checksum.
3805 	 */
3806 
3807 	len = msgsize(mp);
3808 	if (len < 64) {
3809 		/* zero fill the padding */
3810 		bzero(tbuf->kaddr, 64);
3811 	}
3812 	mcopymsg(mp, tbuf->kaddr);
3813 
3814 	if ((csflags != 0) && (len < 64)) {
3815 		uint16_t sum;
3816 		sum = hme_cksum(tbuf->kaddr + start_offset,
3817 		    len - start_offset);
3818 		bcopy(&sum, tbuf->kaddr + stuff_offset, sizeof (sum));
3819 		csflags = 0;
3820 	}
3821 
3822 	if (ddi_dma_sync(tbuf->dmah, 0, len, DDI_DMA_SYNC_FORDEV) ==
3823 	    DDI_FAILURE) {
3824 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, DDI_MSG,
3825 		    "ddi_dma_sync failed");
3826 	}
3827 
3828 	/*
3829 	 * update MIB II statistics
3830 	 */
3831 	BUMP_OutNUcast(hmep, tbuf->kaddr);
3832 
3833 	PUT_TMD(txptr, tbuf->paddr, len,
3834 	    HMETMD_OWN | HMETMD_SOP | HMETMD_EOP | csflags);
3835 
3836 	HMESYNCTMD(txptr, DDI_DMA_SYNC_FORDEV);
3837 	hmep->hme_txindex++;
3838 
3839 	PUT_ETXREG(txpend, HMET_TXPEND_TDMD);
3840 	CHECK_ETXREG();
3841 
3842 	mutex_exit(&hmep->hme_xmitlock);
3843 
3844 	hmep->hme_starts++;
3845 	return (B_TRUE);
3846 
3847 bad:
3848 	mutex_exit(&hmep->hme_xmitlock);
3849 	freemsg(mp);
3850 	return (B_TRUE);
3851 
3852 notmds:
3853 	hmep->hme_notmds++;
3854 	hmep->hme_wantw = B_TRUE;
3855 	hmereclaim(hmep);
3856 	retval = B_FALSE;
3857 done:
3858 	mutex_exit(&hmep->hme_xmitlock);
3859 
3860 	return (retval);
3861 }
3862 
3863 /*
3864  * Initialize channel.
3865  * Return 0 on success, nonzero on error.
3866  *
3867  * The recommended sequence for initialization is:
3868  * 1. Issue a Global Reset command to the Ethernet Channel.
3869  * 2. Poll the Global_Reset bits until the execution of the reset has been
3870  *    completed.
3871  * 2(a). Use the MIF Frame/Output register to reset the transceiver.
3872  *	 Poll Register 0 to till the Resetbit is 0.
3873  * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
3874  *	 100Mbps and Non-Isolated mode. The main point here is to bring the
3875  *	 PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
3876  *	 to the MII interface so that the Bigmac core can correctly reset
3877  *	 upon a software reset.
3878  * 2(c).  Issue another Global Reset command to the Ethernet Channel and poll
3879  *	  the Global_Reset bits till completion.
3880  * 3. Set up all the data structures in the host memory.
3881  * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
3882  *    Register).
3883  * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
3884  *    Register).
3885  * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
3886  * 7. Program the Receive Descriptor Ring Base Address in the ERX.
3887  * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
3888  * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
3889  * 10. Program the ERX Configuration register (enable the Receive DMA channel).
3890  * 11. Program the XIF Configuration Register (enable the XIF).
3891  * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
3892  * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
3893  */
3894 
3895 
3896 #ifdef FEPS_URUN_BUG
3897 static int hme_palen = 32;
3898 #endif
3899 
3900 static int
3901 hmeinit(struct hme *hmep)
3902 {
3903 	uint32_t		i;
3904 	int			ret;
3905 
3906 	/*
3907 	 * Lock sequence:
3908 	 *	hme_intrlock, hme_xmitlock.
3909 	 */
3910 	mutex_enter(&hmep->hme_intrlock);
3911 
3912 	/*
3913 	 * Don't touch the hardware if we are suspended.  But don't
3914 	 * fail either.  Some time later we may be resumed, and then
3915 	 * we'll be back here to program the device using the settings
3916 	 * in the soft state.
3917 	 */
3918 	if (hmep->hme_flags & HMESUSPENDED) {
3919 		mutex_exit(&hmep->hme_intrlock);
3920 		return (0);
3921 	}
3922 
3923 	/*
3924 	 * This should prevent us from clearing any interrupts that
3925 	 * may occur by temporarily stopping interrupts from occurring
3926 	 * for a short time.  We need to update the interrupt mask
3927 	 * later in this function.
3928 	 */
3929 	PUT_GLOBREG(intmask, ~HMEG_MASK_MIF_INTR);
3930 
3931 
3932 	/*
3933 	 * Rearranged the mutex acquisition order to solve the deadlock
3934 	 * situation as described in bug ID 4065896.
3935 	 */
3936 
3937 	hme_stop_timer(hmep);	/* acquire hme_linklock */
3938 	mutex_enter(&hmep->hme_xmitlock);
3939 
3940 	hmep->hme_flags = 0;
3941 	hmep->hme_wantw = B_FALSE;
3942 	hmep->hme_txhung = 0;
3943 
3944 	/*
3945 	 * Initializing 'hmep->hme_iipackets' to match current
3946 	 * number of received packets.
3947 	 */
3948 	hmep->hme_iipackets = hmep->hme_ipackets;
3949 
3950 	if (hmep->inits)
3951 		hmesavecntrs(hmep);
3952 
3953 	hme_stop_mifpoll(hmep);
3954 
3955 	/*
3956 	 * Perform Global reset of the Sbus/FEPS ENET channel.
3957 	 */
3958 	(void) hmestop(hmep);
3959 
3960 	/*
3961 	 * Clear all descriptors.
3962 	 */
3963 	bzero(hmep->hme_rmdp, HME_RMDMAX * sizeof (struct hme_rmd));
3964 	bzero(hmep->hme_tmdp, HME_TMDMAX * sizeof (struct hme_tmd));
3965 
3966 	/*
3967 	 * Hang out receive buffers.
3968 	 */
3969 	for (i = 0; i < HME_RMDMAX; i++) {
3970 		PUT_RMD(i, hmep->hme_rbuf[i].paddr);
3971 	}
3972 
3973 	/*
3974 	 * DMA sync descriptors.
3975 	 */
3976 	(void) ddi_dma_sync(hmep->hme_rmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
3977 	(void) ddi_dma_sync(hmep->hme_tmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
3978 
3979 	/*
3980 	 * Reset RMD and TMD 'walking' pointers.
3981 	 */
3982 	hmep->hme_rxindex = 0;
3983 	hmep->hme_txindex = hmep->hme_txreclaim = 0;
3984 
3985 	/*
3986 	 * This is the right place to initialize MIF !!!
3987 	 */
3988 
3989 	PUT_MIFREG(mif_imask, HME_MIF_INTMASK);	/* mask all interrupts */
3990 
3991 	if (!hmep->hme_frame_enable)
3992 		PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) | HME_MIF_CFGBB);
3993 	else
3994 		PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) & ~HME_MIF_CFGBB);
3995 						/* enable frame mode */
3996 
3997 	/*
3998 	 * Depending on the transceiver detected, select the source
3999 	 * of the clocks for the MAC. Without the clocks, TX_MAC does
4000 	 * not reset. When the Global Reset is issued to the Sbus/FEPS
4001 	 * ASIC, it selects Internal by default.
4002 	 */
4003 
4004 	hme_check_transceiver(hmep);
4005 	if (hmep->hme_transceiver == HME_NO_TRANSCEIVER) {
4006 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG, no_xcvr_msg);
4007 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
4008 		goto init_fail;	/* abort initialization */
4009 
4010 	} else if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER)
4011 		PUT_MACREG(xifc, 0);
4012 	else
4013 		PUT_MACREG(xifc, BMAC_XIFC_MIIBUFDIS);
4014 				/* Isolate the Int. xcvr */
4015 	/*
4016 	 * Perform transceiver reset and speed selection only if
4017 	 * the link is down.
4018 	 */
4019 	if (!hmep->hme_linkcheck)
4020 		/*
4021 		 * Reset the PHY and bring up the link
4022 		 * If it fails we will then increment a kstat.
4023 		 */
4024 		hme_reset_transceiver(hmep);
4025 	else {
4026 		if (hmep->hme_linkup)
4027 			hme_start_mifpoll(hmep);
4028 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
4029 	}
4030 	hmep->inits++;
4031 
4032 	/*
4033 	 * Initialize BigMAC registers.
4034 	 * First set the tx enable bit in tx config reg to 0 and poll on
4035 	 * it till it turns to 0. Same for rx config, hash and address
4036 	 * filter reg.
4037 	 * Here is the sequence per the spec.
4038 	 * MADD2 - MAC Address 2
4039 	 * MADD1 - MAC Address 1
4040 	 * MADD0 - MAC Address 0
4041 	 * HASH3, HASH2, HASH1, HASH0 for group address
4042 	 * AFR2, AFR1, AFR0 and AFMR for address filter mask
4043 	 * Program RXMIN and RXMAX for packet length if not 802.3
4044 	 * RXCFG - Rx config for not stripping CRC
4045 	 * XXX Anything else to hme configured in RXCFG
4046 	 * IPG1, IPG2, ALIMIT, SLOT, PALEN, PAPAT, TXSFD, JAM, TXMAX, TXMIN
4047 	 * if not 802.3 compliant
4048 	 * XIF register for speed selection
4049 	 * MASK  - Interrupt mask
4050 	 * Set bit 0 of TXCFG
4051 	 * Set bit 0 of RXCFG
4052 	 */
4053 
4054 	/*
4055 	 * Initialize the TX_MAC registers
4056 	 * Initialization of jamsize to work around rx crc bug
4057 	 */
4058 	PUT_MACREG(jam, jamsize);
4059 
4060 #ifdef	FEPS_URUN_BUG
4061 	if (hme_urun_fix)
4062 		PUT_MACREG(palen, hme_palen);
4063 #endif
4064 
4065 	PUT_MACREG(ipg1, hme_param_ipg1);
4066 	PUT_MACREG(ipg2, hme_param_ipg2);
4067 
4068 	PUT_MACREG(rseed,
4069 	    ((hmep->hme_ouraddr.ether_addr_octet[0] << 8) & 0x3) |
4070 	    hmep->hme_ouraddr.ether_addr_octet[1]);
4071 
4072 	/* Initialize the RX_MAC registers */
4073 
4074 	/*
4075 	 * Program BigMAC with local individual ethernet address.
4076 	 */
4077 	PUT_MACREG(madd2, (hmep->hme_ouraddr.ether_addr_octet[4] << 8) |
4078 	    hmep->hme_ouraddr.ether_addr_octet[5]);
4079 	PUT_MACREG(madd1, (hmep->hme_ouraddr.ether_addr_octet[2] << 8) |
4080 	    hmep->hme_ouraddr.ether_addr_octet[3]);
4081 	PUT_MACREG(madd0, (hmep->hme_ouraddr.ether_addr_octet[0] << 8) |
4082 	    hmep->hme_ouraddr.ether_addr_octet[1]);
4083 
4084 	/*
4085 	 * Set up multicast address filter by passing all multicast
4086 	 * addresses through a crc generator, and then using the
4087 	 * low order 6 bits as a index into the 64 bit logical
4088 	 * address filter. The high order three bits select the word,
4089 	 * while the rest of the bits select the bit within the word.
4090 	 */
4091 	PUT_MACREG(hash0, hmep->hme_ladrf[0]);
4092 	PUT_MACREG(hash1, hmep->hme_ladrf[1]);
4093 	PUT_MACREG(hash2, hmep->hme_ladrf[2]);
4094 	PUT_MACREG(hash3, hmep->hme_ladrf[3]);
4095 
4096 	/*
4097 	 * Configure parameters to support VLAN.  (VLAN encapsulation adds
4098 	 * four bytes.)
4099 	 */
4100 	PUT_MACREG(txmax, ETHERMAX + ETHERFCSL + 4);
4101 	PUT_MACREG(rxmax, ETHERMAX + ETHERFCSL + 4);
4102 
4103 	/*
4104 	 * Initialize HME Global registers, ETX registers and ERX registers.
4105 	 */
4106 
4107 	PUT_ETXREG(txring, hmep->hme_tmd_paddr);
4108 	PUT_ERXREG(rxring, hmep->hme_rmd_paddr);
4109 
4110 	/*
4111 	 * ERX registers can be written only if they have even no. of bits set.
4112 	 * So, if the value written is not read back, set the lsb and write
4113 	 * again.
4114 	 * static	int	hme_erx_fix = 1;   : Use the fix for erx bug
4115 	 */
4116 	{
4117 		uint32_t temp;
4118 		temp  = hmep->hme_rmd_paddr;
4119 
4120 		if (GET_ERXREG(rxring) != temp)
4121 			PUT_ERXREG(rxring, (temp | 4));
4122 	}
4123 
4124 	PUT_GLOBREG(config, (hmep->hme_config |
4125 	    (hmep->hme_64bit_xfer << HMEG_CONFIG_64BIT_SHIFT)));
4126 
4127 	/*
4128 	 * Significant performance improvements can be achieved by
4129 	 * disabling transmit interrupt. Thus TMD's are reclaimed only
4130 	 * when we run out of them in hmestart().
4131 	 */
4132 	PUT_GLOBREG(intmask,
4133 	    HMEG_MASK_INTR | HMEG_MASK_TINT | HMEG_MASK_TX_ALL);
4134 
4135 	PUT_ETXREG(txring_size, ((HME_TMDMAX -1)>> HMET_RINGSZ_SHIFT));
4136 	PUT_ETXREG(config, (GET_ETXREG(config) | HMET_CONFIG_TXDMA_EN
4137 	    | HMET_CONFIG_TXFIFOTH));
4138 	/* get the rxring size bits */
4139 	switch (HME_RMDMAX) {
4140 	case 32:
4141 		i = HMER_CONFIG_RXRINGSZ32;
4142 		break;
4143 	case 64:
4144 		i = HMER_CONFIG_RXRINGSZ64;
4145 		break;
4146 	case 128:
4147 		i = HMER_CONFIG_RXRINGSZ128;
4148 		break;
4149 	case 256:
4150 		i = HMER_CONFIG_RXRINGSZ256;
4151 		break;
4152 	default:
4153 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4154 		    unk_rx_ringsz_msg);
4155 		goto init_fail;
4156 	}
4157 	i |= (HME_FSTBYTE_OFFSET << HMER_CONFIG_FBO_SHIFT)
4158 	    | HMER_CONFIG_RXDMA_EN;
4159 
4160 	/* h/w checks start offset in half words */
4161 	i |= ((sizeof (struct ether_header) / 2) << HMER_RX_CSSTART_SHIFT);
4162 
4163 	PUT_ERXREG(config, i);
4164 
4165 	/*
4166 	 * Bug related to the parity handling in ERX. When erxp-config is
4167 	 * read back.
4168 	 * Sbus/FEPS drives the parity bit. This value is used while
4169 	 * writing again.
4170 	 * This fixes the RECV problem in SS5.
4171 	 * static	int	hme_erx_fix = 1;   : Use the fix for erx bug
4172 	 */
4173 	{
4174 		uint32_t temp;
4175 		temp = GET_ERXREG(config);
4176 		PUT_ERXREG(config, i);
4177 
4178 		if (GET_ERXREG(config) != i)
4179 			HME_FAULT_MSG4(hmep, SEVERITY_UNKNOWN, ERX_MSG,
4180 			    "error:temp = %x erxp->config = %x, should be %x",
4181 			    temp, GET_ERXREG(config), i);
4182 	}
4183 
4184 	/*
4185 	 * Set up the rxconfig, txconfig and seed register without enabling
4186 	 * them the former two at this time
4187 	 *
4188 	 * BigMAC strips the CRC bytes by default. Since this is
4189 	 * contrary to other pieces of hardware, this bit needs to
4190 	 * enabled to tell BigMAC not to strip the CRC bytes.
4191 	 * Do not filter this node's own packets.
4192 	 */
4193 
4194 	if (hme_reject_own) {
4195 		PUT_MACREG(rxcfg,
4196 		    ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
4197 		    BMAC_RXCFG_MYOWN | BMAC_RXCFG_HASH));
4198 	} else {
4199 		PUT_MACREG(rxcfg,
4200 		    ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
4201 		    BMAC_RXCFG_HASH));
4202 	}
4203 
4204 	drv_usecwait(10);	/* wait after setting Hash Enable bit */
4205 
4206 	if (hme_ngu_enable)
4207 		PUT_MACREG(txcfg, (hmep->hme_fdx ? BMAC_TXCFG_FDX: 0) |
4208 		    BMAC_TXCFG_NGU);
4209 	else
4210 		PUT_MACREG(txcfg, (hmep->hme_fdx ? BMAC_TXCFG_FDX: 0));
4211 	hmep->hme_macfdx = hmep->hme_fdx;
4212 
4213 
4214 	i = 0;
4215 	if ((hme_param_lance_mode) && (hmep->hme_lance_mode_enable))
4216 		i = ((hme_param_ipg0 & HME_MASK_5BIT) << BMAC_XIFC_IPG0_SHIFT)
4217 		    | BMAC_XIFC_LANCE_ENAB;
4218 	if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER)
4219 		PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB));
4220 	else
4221 		PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB | BMAC_XIFC_MIIBUFDIS));
4222 
4223 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
4224 	PUT_MACREG(txcfg, GET_MACREG(txcfg) | BMAC_TXCFG_ENAB);
4225 
4226 	hmep->hme_flags |= (HMERUNNING | HMEINITIALIZED);
4227 	/*
4228 	 * Update the interrupt mask : this will re-allow interrupts to occur
4229 	 */
4230 	PUT_GLOBREG(intmask, HMEG_MASK_INTR);
4231 	mac_tx_update(hmep->hme_mh);
4232 
4233 init_fail:
4234 	/*
4235 	 * Release the locks in reverse order
4236 	 */
4237 	mutex_exit(&hmep->hme_xmitlock);
4238 	mutex_exit(&hmep->hme_intrlock);
4239 
4240 	ret = !(hmep->hme_flags & HMERUNNING);
4241 	if (ret) {
4242 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4243 		    init_fail_gen_msg);
4244 	}
4245 
4246 	/*
4247 	 * Hardware checks.
4248 	 */
4249 	CHECK_GLOBREG();
4250 	CHECK_MIFREG();
4251 	CHECK_MACREG();
4252 	CHECK_ERXREG();
4253 	CHECK_ETXREG();
4254 
4255 init_exit:
4256 	return (ret);
4257 }
4258 
4259 /*
4260  * Calculate the dvma burstsize by setting up a dvma temporarily.  Return
4261  * 0 as burstsize upon failure as it signifies no burst size.
4262  * Requests for 64-bit transfer setup, if the platform supports it.
4263  * NOTE: Do not use ddi_dma_alloc_handle(9f) then ddi_dma_burstsize(9f),
4264  * sun4u Ultra-2 incorrectly returns a 32bit transfer.
4265  */
4266 static int
4267 hmeburstsizes(struct hme *hmep)
4268 {
4269 	int burstsizes;
4270 	ddi_dma_handle_t handle;
4271 
4272 	if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
4273 	    DDI_DMA_DONTWAIT, NULL, &handle)) {
4274 		return (0);
4275 	}
4276 
4277 	hmep->hme_burstsizes = burstsizes = ddi_dma_burstsizes(handle);
4278 	ddi_dma_free_handle(&handle);
4279 
4280 	/*
4281 	 * Use user-configurable parameter for enabling 64-bit transfers
4282 	 */
4283 	burstsizes = (hmep->hme_burstsizes >> 16);
4284 	if (burstsizes)
4285 		hmep->hme_64bit_xfer = hme_64bit_enable; /* user config value */
4286 	else
4287 		burstsizes = hmep->hme_burstsizes;
4288 
4289 	if (hmep->hme_cheerio_mode)
4290 		hmep->hme_64bit_xfer = 0; /* Disable for cheerio */
4291 
4292 	if (burstsizes & 0x40)
4293 		hmep->hme_config = HMEG_CONFIG_BURST64;
4294 	else if (burstsizes & 0x20)
4295 		hmep->hme_config = HMEG_CONFIG_BURST32;
4296 	else
4297 		hmep->hme_config = HMEG_CONFIG_BURST16;
4298 
4299 	return (DDI_SUCCESS);
4300 }
4301 
4302 static int
4303 hmeallocbuf(struct hme *hmep, hmebuf_t *buf, int dir)
4304 {
4305 	ddi_dma_cookie_t	dmac;
4306 	size_t			len;
4307 	unsigned		ccnt;
4308 
4309 	if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
4310 	    DDI_DMA_DONTWAIT, NULL, &buf->dmah) != DDI_SUCCESS) {
4311 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4312 		    "cannot allocate buf dma handle - failed");
4313 		return (DDI_FAILURE);
4314 	}
4315 
4316 	if (ddi_dma_mem_alloc(buf->dmah, ROUNDUP(HMEBUFSIZE, 512),
4317 	    &hme_buf_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
4318 	    &buf->kaddr, &len, &buf->acch) != DDI_SUCCESS) {
4319 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4320 		    "cannot allocate buf memory - failed");
4321 		return (DDI_FAILURE);
4322 	}
4323 
4324 	if (ddi_dma_addr_bind_handle(buf->dmah, NULL, buf->kaddr,
4325 	    len, dir | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
4326 	    &dmac, &ccnt) != DDI_DMA_MAPPED) {
4327 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4328 		    "cannot map buf for dma - failed");
4329 		return (DDI_FAILURE);
4330 	}
4331 	buf->paddr = dmac.dmac_address;
4332 
4333 	/* apparently they don't handle multiple cookies */
4334 	if (ccnt > 1) {
4335 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4336 		    "too many buf dma cookies");
4337 		return (DDI_FAILURE);
4338 	}
4339 	return (DDI_SUCCESS);
4340 }
4341 
4342 static int
4343 hmeallocbufs(struct hme *hmep)
4344 {
4345 	hmep->hme_tbuf = kmem_zalloc(HME_TMDMAX * sizeof (hmebuf_t), KM_SLEEP);
4346 	hmep->hme_rbuf = kmem_zalloc(HME_RMDMAX * sizeof (hmebuf_t), KM_SLEEP);
4347 
4348 	/* Alloc RX buffers. */
4349 	for (int i = 0; i < HME_RMDMAX; i++) {
4350 		if (hmeallocbuf(hmep, &hmep->hme_rbuf[i], DDI_DMA_READ) !=
4351 		    DDI_SUCCESS) {
4352 			return (DDI_FAILURE);
4353 		}
4354 	}
4355 
4356 	/* Alloc TX buffers. */
4357 	for (int i = 0; i < HME_TMDMAX; i++) {
4358 		if (hmeallocbuf(hmep, &hmep->hme_tbuf[i], DDI_DMA_WRITE) !=
4359 		    DDI_SUCCESS) {
4360 			return (DDI_FAILURE);
4361 		}
4362 	}
4363 	return (DDI_SUCCESS);
4364 }
4365 
4366 static void
4367 hmefreebufs(struct hme *hmep)
4368 {
4369 	int i;
4370 
4371 	if (hmep->hme_rbuf == NULL)
4372 		return;
4373 
4374 	/*
4375 	 * Free and unload pending xmit and recv buffers.
4376 	 * Maintaining the 1-to-1 ordered sequence of
4377 	 * We have written the routine to be idempotent.
4378 	 */
4379 
4380 	for (i = 0; i < HME_TMDMAX; i++) {
4381 		hmebuf_t *tbuf = &hmep->hme_tbuf[i];
4382 		if (tbuf->paddr) {
4383 			(void) ddi_dma_unbind_handle(tbuf->dmah);
4384 		}
4385 		if (tbuf->kaddr) {
4386 			ddi_dma_mem_free(&tbuf->acch);
4387 		}
4388 		if (tbuf->dmah) {
4389 			ddi_dma_free_handle(&tbuf->dmah);
4390 		}
4391 	}
4392 	for (i = 0; i < HME_RMDMAX; i++) {
4393 		hmebuf_t *rbuf = &hmep->hme_rbuf[i];
4394 		if (rbuf->paddr) {
4395 			(void) ddi_dma_unbind_handle(rbuf->dmah);
4396 		}
4397 		if (rbuf->kaddr) {
4398 			ddi_dma_mem_free(&rbuf->acch);
4399 		}
4400 		if (rbuf->dmah) {
4401 			ddi_dma_free_handle(&rbuf->dmah);
4402 		}
4403 	}
4404 	kmem_free(hmep->hme_rbuf, HME_RMDMAX * sizeof (hmebuf_t));
4405 	kmem_free(hmep->hme_tbuf, HME_TMDMAX * sizeof (hmebuf_t));
4406 }
4407 
4408 /*
4409  * hme_start_mifpoll() - Enables the polling of the BMSR register of the PHY.
4410  * After enabling the poll, delay for atleast 62us for one poll to be done.
4411  * Then read the MIF status register to auto-clear the MIF status field.
4412  * Then program the MIF interrupt mask register to enable interrupts for the
4413  * LINK_STATUS and JABBER_DETECT bits.
4414  */
4415 
4416 static void
4417 hme_start_mifpoll(struct hme *hmep)
4418 {
4419 	uint32_t cfg;
4420 
4421 	if (!hmep->hme_mifpoll_enable)
4422 		return;
4423 
4424 	cfg = (GET_MIFREG(mif_cfg) & ~(HME_MIF_CFGPD | HME_MIF_CFGPR));
4425 	PUT_MIFREG(mif_cfg,
4426 	    (cfg = (cfg | (hmep->hme_phyad << HME_MIF_CFGPD_SHIFT) |
4427 	    (HME_PHY_BMSR << HME_MIF_CFGPR_SHIFT) | HME_MIF_CFGPE)));
4428 
4429 	drv_usecwait(HME_MIF_POLL_DELAY);
4430 	hmep->hme_polling_on =		1;
4431 	hmep->hme_mifpoll_flag =	0;
4432 	hmep->hme_mifpoll_data =	(GET_MIFREG(mif_bsts) >> 16);
4433 
4434 	/* Do not poll for Jabber Detect for 100 Mbps speed */
4435 	if (((hmep->hme_mode == HME_AUTO_SPEED) &&
4436 	    (hmep->hme_tryspeed == HME_SPEED_100)) ||
4437 	    ((hmep->hme_mode == HME_FORCE_SPEED) &&
4438 	    (hmep->hme_forcespeed == HME_SPEED_100)))
4439 		PUT_MIFREG(mif_imask, ((uint16_t)~(PHY_BMSR_LNKSTS)));
4440 	else
4441 		PUT_MIFREG(mif_imask,
4442 		    (uint16_t)~(PHY_BMSR_LNKSTS | PHY_BMSR_JABDET));
4443 
4444 	CHECK_MIFREG();
4445 }
4446 
4447 static void
4448 hme_stop_mifpoll(struct hme *hmep)
4449 {
4450 	if ((!hmep->hme_mifpoll_enable) || (!hmep->hme_polling_on))
4451 		return;
4452 
4453 	PUT_MIFREG(mif_imask, 0xffff);	/* mask interrupts */
4454 	PUT_MIFREG(mif_cfg, (GET_MIFREG(mif_cfg) & ~HME_MIF_CFGPE));
4455 
4456 	hmep->hme_polling_on = 0;
4457 	drv_usecwait(HME_MIF_POLL_DELAY);
4458 	CHECK_MIFREG();
4459 }
4460 
4461 /*
4462  * Un-initialize (STOP) HME channel.
4463  */
4464 static void
4465 hmeuninit(struct hme *hmep)
4466 {
4467 	/*
4468 	 * Allow up to 'HMEDRAINTIME' for pending xmit's to complete.
4469 	 */
4470 	HMEDELAY((hmep->hme_txindex == hmep->hme_txreclaim), HMEDRAINTIME);
4471 
4472 	hme_stop_timer(hmep);   /* acquire hme_linklock */
4473 	mutex_exit(&hmep->hme_linklock);
4474 
4475 	mutex_enter(&hmep->hme_intrlock);
4476 	mutex_enter(&hmep->hme_xmitlock);
4477 
4478 	hme_stop_mifpoll(hmep);
4479 
4480 	hmep->hme_flags &= ~HMERUNNING;
4481 
4482 	(void) hmestop(hmep);
4483 
4484 	mutex_exit(&hmep->hme_xmitlock);
4485 	mutex_exit(&hmep->hme_intrlock);
4486 }
4487 
4488 /*
4489  * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
4490  * map it in IO space. Allocate space for transmit and receive ddi_dma_handle
4491  * structures to use the DMA interface.
4492  */
4493 static int
4494 hmeallocthings(struct hme *hmep)
4495 {
4496 	int			size;
4497 	int			rval;
4498 	size_t			real_len;
4499 	uint_t			cookiec;
4500 	ddi_dma_cookie_t	dmac;
4501 	dev_info_t		*dip = hmep->dip;
4502 
4503 	/*
4504 	 * Allocate the TMD and RMD descriptors and extra for page alignment.
4505 	 */
4506 
4507 	rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL,
4508 	    &hmep->hme_rmd_dmah);
4509 	if (rval != DDI_SUCCESS) {
4510 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4511 		    "cannot allocate rmd handle - failed");
4512 		return (DDI_FAILURE);
4513 	}
4514 	size = HME_RMDMAX * sizeof (struct hme_rmd);
4515 	rval = ddi_dma_mem_alloc(hmep->hme_rmd_dmah, size,
4516 	    &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
4517 	    &hmep->hme_rmd_kaddr, &real_len, &hmep->hme_rmd_acch);
4518 	if (rval != DDI_SUCCESS) {
4519 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4520 		    "cannot allocate rmd dma mem - failed");
4521 		return (DDI_FAILURE);
4522 	}
4523 	hmep->hme_rmdp = (void *)(hmep->hme_rmd_kaddr);
4524 	rval = ddi_dma_addr_bind_handle(hmep->hme_rmd_dmah, NULL,
4525 	    hmep->hme_rmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4526 	    DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec);
4527 	if (rval != DDI_DMA_MAPPED) {
4528 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4529 		    "cannot allocate rmd dma - failed");
4530 		return (DDI_FAILURE);
4531 	}
4532 	hmep->hme_rmd_paddr = dmac.dmac_address;
4533 	if (cookiec != 1) {
4534 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4535 		    "too many rmd cookies - failed");
4536 		return (DDI_FAILURE);
4537 	}
4538 
4539 	rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL,
4540 	    &hmep->hme_tmd_dmah);
4541 	if (rval != DDI_SUCCESS) {
4542 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4543 		    "cannot allocate tmd handle - failed");
4544 		return (DDI_FAILURE);
4545 	}
4546 	size = HME_TMDMAX * sizeof (struct hme_rmd);
4547 	rval = ddi_dma_mem_alloc(hmep->hme_tmd_dmah, size,
4548 	    &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
4549 	    &hmep->hme_tmd_kaddr, &real_len, &hmep->hme_tmd_acch);
4550 	if (rval != DDI_SUCCESS) {
4551 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4552 		    "cannot allocate tmd dma mem - failed");
4553 		return (DDI_FAILURE);
4554 	}
4555 	hmep->hme_tmdp = (void *)(hmep->hme_tmd_kaddr);
4556 	rval = ddi_dma_addr_bind_handle(hmep->hme_tmd_dmah, NULL,
4557 	    hmep->hme_tmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4558 	    DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec);
4559 	if (rval != DDI_DMA_MAPPED) {
4560 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4561 		    "cannot allocate tmd dma - failed");
4562 		return (DDI_FAILURE);
4563 	}
4564 	hmep->hme_tmd_paddr = dmac.dmac_address;
4565 	if (cookiec != 1) {
4566 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4567 		    "too many tmd cookies - failed");
4568 		return (DDI_FAILURE);
4569 	}
4570 
4571 	return (DDI_SUCCESS);
4572 }
4573 
4574 static void
4575 hmefreethings(struct hme *hmep)
4576 {
4577 	if (hmep->hme_rmd_paddr) {
4578 		(void) ddi_dma_unbind_handle(hmep->hme_rmd_dmah);
4579 		hmep->hme_rmd_paddr = 0;
4580 	}
4581 	if (hmep->hme_rmd_acch)
4582 		ddi_dma_mem_free(&hmep->hme_rmd_acch);
4583 	if (hmep->hme_rmd_dmah)
4584 		ddi_dma_free_handle(&hmep->hme_rmd_dmah);
4585 
4586 	if (hmep->hme_tmd_paddr) {
4587 		(void) ddi_dma_unbind_handle(hmep->hme_tmd_dmah);
4588 		hmep->hme_tmd_paddr = 0;
4589 	}
4590 	if (hmep->hme_tmd_acch)
4591 		ddi_dma_mem_free(&hmep->hme_tmd_acch);
4592 	if (hmep->hme_tmd_dmah)
4593 		ddi_dma_free_handle(&hmep->hme_tmd_dmah);
4594 }
4595 
4596 /*
4597  *	First check to see if it our device interrupting.
4598  */
4599 static uint_t
4600 hmeintr(caddr_t arg)
4601 {
4602 	struct hme	*hmep = (void *)arg;
4603 	uint32_t	hmesbits;
4604 	uint32_t	mif_status;
4605 	uint32_t	serviced = DDI_INTR_UNCLAIMED;
4606 	uint32_t	num_reads = 0;
4607 	uint32_t	rflags;
4608 	mblk_t		*mp, *head, **tail;
4609 
4610 
4611 	head = NULL;
4612 	tail = &head;
4613 
4614 	mutex_enter(&hmep->hme_intrlock);
4615 
4616 	/*
4617 	 * The status register auto-clears on read except for
4618 	 * MIF Interrupt bit
4619 	 */
4620 	hmesbits = GET_GLOBREG(status);
4621 	CHECK_GLOBREG();
4622 
4623 	/*
4624 	 * Note: TINT is sometimes enabled in thr hmereclaim()
4625 	 */
4626 
4627 	/*
4628 	 * Bugid 1227832 - to handle spurious interrupts on fusion systems.
4629 	 * Claim the first interrupt after initialization
4630 	 */
4631 	if (hmep->hme_flags & HMEINITIALIZED) {
4632 		hmep->hme_flags &= ~HMEINITIALIZED;
4633 		serviced = DDI_INTR_CLAIMED;
4634 	}
4635 
4636 	if ((hmesbits & (HMEG_STATUS_INTR | HMEG_STATUS_TINT)) == 0) {
4637 						/* No interesting interrupt */
4638 		if (hmep->hme_intrstats) {
4639 			if (serviced == DDI_INTR_UNCLAIMED)
4640 				KIOIP->intrs[KSTAT_INTR_SPURIOUS]++;
4641 			else
4642 				KIOIP->intrs[KSTAT_INTR_HARD]++;
4643 		}
4644 		mutex_exit(&hmep->hme_intrlock);
4645 		return (serviced);
4646 	}
4647 
4648 	serviced = DDI_INTR_CLAIMED;
4649 
4650 	if (!(hmep->hme_flags & HMERUNNING)) {
4651 		if (hmep->hme_intrstats)
4652 			KIOIP->intrs[KSTAT_INTR_HARD]++;
4653 		mutex_exit(&hmep->hme_intrlock);
4654 		hmeuninit(hmep);
4655 		return (serviced);
4656 	}
4657 
4658 	if (hmesbits & (HMEG_STATUS_FATAL_ERR | HMEG_STATUS_NONFATAL_ERR)) {
4659 		if (hmesbits & HMEG_STATUS_FATAL_ERR) {
4660 
4661 			if (hmep->hme_intrstats)
4662 				KIOIP->intrs[KSTAT_INTR_HARD]++;
4663 			hme_fatal_err(hmep, hmesbits);
4664 
4665 			mutex_exit(&hmep->hme_intrlock);
4666 			(void) hmeinit(hmep);
4667 			return (serviced);
4668 		}
4669 		hme_nonfatal_err(hmep, hmesbits);
4670 	}
4671 
4672 	if (hmesbits & HMEG_STATUS_MIF_INTR) {
4673 		mif_status = (GET_MIFREG(mif_bsts) >> 16);
4674 		if (!(mif_status & PHY_BMSR_LNKSTS)) {
4675 
4676 			if (hmep->hme_intrstats)
4677 				KIOIP->intrs[KSTAT_INTR_HARD]++;
4678 
4679 			hme_stop_mifpoll(hmep);
4680 			hmep->hme_mifpoll_flag = 1;
4681 			mutex_exit(&hmep->hme_intrlock);
4682 			hme_stop_timer(hmep);
4683 			hme_start_timer(hmep, hme_check_link, MSECOND(1));
4684 			return (serviced);
4685 		}
4686 		/*
4687 		 *
4688 		 * BugId 1261889 EscId 50699 ftp hangs @ 10 Mbps
4689 		 *
4690 		 * Here could be one cause:
4691 		 * national PHY sees jabber, goes into "Jabber function",
4692 		 * (see section 3.7.6 in PHY specs.), disables transmitter,
4693 		 * and waits for internal transmit enable to be de-asserted
4694 		 * for at least 750ms (the "unjab" time).  Also, the PHY
4695 		 * has asserted COL, the collision detect signal.
4696 		 *
4697 		 * In the meantime, the Sbus/FEPS, in never-give-up mode,
4698 		 * continually retries, backs off 16 times as per spec,
4699 		 * and restarts the transmission, so TX_EN is never
4700 		 * deasserted long enough, in particular TX_EN is turned
4701 		 * on approximately once every 4 microseconds on the
4702 		 * average.  PHY and MAC are deadlocked.
4703 		 *
4704 		 * Here is part of the fix:
4705 		 * On seeing the jabber, treat it like a hme_fatal_err
4706 		 * and reset both the Sbus/FEPS and the PHY.
4707 		 */
4708 
4709 		if (mif_status & (PHY_BMSR_JABDET)) {
4710 
4711 			/* national phy only defines this at 10 Mbps */
4712 			if (hme_param_speed == 0) { /* 10 Mbps speed ? */
4713 				hmep->hme_jab++;
4714 
4715 				/* treat jabber like a fatal error */
4716 				hmep->hme_linkcheck = 0; /* force PHY reset */
4717 				mutex_exit(&hmep->hme_intrlock);
4718 				(void) hmeinit(hmep);
4719 
4720 				return (serviced);
4721 			}
4722 		}
4723 		hme_start_mifpoll(hmep);
4724 	}
4725 
4726 	if (hmesbits & (HMEG_STATUS_TX_ALL | HMEG_STATUS_TINT)) {
4727 		mutex_enter(&hmep->hme_xmitlock);
4728 
4729 		hmereclaim(hmep);
4730 		mutex_exit(&hmep->hme_xmitlock);
4731 	}
4732 
4733 	if (hmesbits & HMEG_STATUS_RINT) {
4734 
4735 		/*
4736 		 * This dummy PIO is required to flush the SBus
4737 		 * Bridge buffers in QFE.
4738 		 */
4739 		(void) GET_GLOBREG(config);
4740 
4741 		/*
4742 		 * Loop through each RMD no more than once.
4743 		 */
4744 		while (num_reads++ < HME_RMDMAX) {
4745 			hmebuf_t *rbuf;
4746 			int rxptr;
4747 
4748 			rxptr = hmep->hme_rxindex % HME_RMDMAX;
4749 			HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORKERNEL);
4750 
4751 			rflags = GET_RMD_FLAGS(rxptr);
4752 			if (rflags & HMERMD_OWN) {
4753 				/*
4754 				 * Chip still owns it.  We're done.
4755 				 */
4756 				break;
4757 			}
4758 
4759 			/*
4760 			 * Retrieve the packet.
4761 			 */
4762 			rbuf = &hmep->hme_rbuf[rxptr];
4763 			mp = hmeread(hmep, rbuf, rflags);
4764 
4765 			/*
4766 			 * Return ownership of the RMD.
4767 			 */
4768 			PUT_RMD(rxptr, rbuf->paddr);
4769 			HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORDEV);
4770 
4771 			if (mp != NULL) {
4772 				*tail = mp;
4773 				tail = &mp->b_next;
4774 			}
4775 
4776 			/*
4777 			 * Advance to the next RMD.
4778 			 */
4779 			hmep->hme_rxindex++;
4780 		}
4781 	}
4782 
4783 	if (hmep->hme_intrstats)
4784 		KIOIP->intrs[KSTAT_INTR_HARD]++;
4785 
4786 	mutex_exit(&hmep->hme_intrlock);
4787 
4788 	if (head != NULL)
4789 		mac_rx(hmep->hme_mh, NULL, head);
4790 
4791 	return (serviced);
4792 }
4793 
4794 /*
4795  * Transmit completion reclaiming.
4796  */
4797 static void
4798 hmereclaim(struct hme *hmep)
4799 {
4800 	boolean_t	reclaimed = B_FALSE;
4801 
4802 	/*
4803 	 * Loop through each TMD.
4804 	 */
4805 	while (hmep->hme_txindex > hmep->hme_txreclaim) {
4806 
4807 		int		reclaim;
4808 		uint32_t	flags;
4809 
4810 		reclaim = hmep->hme_txreclaim % HME_TMDMAX;
4811 		HMESYNCTMD(reclaim, DDI_DMA_SYNC_FORKERNEL);
4812 
4813 		flags = GET_TMD_FLAGS(reclaim);
4814 		if (flags & HMETMD_OWN) {
4815 			/*
4816 			 * Chip still owns it.  We're done.
4817 			 */
4818 			break;
4819 		}
4820 
4821 		/*
4822 		 * Count a chained packet only once.
4823 		 */
4824 		if (flags & HMETMD_SOP) {
4825 			hmep->hme_opackets++;
4826 		}
4827 
4828 		/*
4829 		 * MIB II
4830 		 */
4831 		hmep->hme_obytes += flags & HMETMD_BUFSIZE;
4832 
4833 		reclaimed = B_TRUE;
4834 		hmep->hme_txreclaim++;
4835 	}
4836 
4837 	if (reclaimed) {
4838 		/*
4839 		 * we could reclaim some TMDs so turn off interrupts
4840 		 */
4841 		if (hmep->hme_wantw) {
4842 			PUT_GLOBREG(intmask,
4843 			    HMEG_MASK_INTR | HMEG_MASK_TINT |
4844 			    HMEG_MASK_TX_ALL);
4845 			hmep->hme_wantw = B_FALSE;
4846 			mac_tx_update(hmep->hme_mh);
4847 		}
4848 	} else {
4849 		/*
4850 		 * enable TINTS: so that even if there is no further activity
4851 		 * hmereclaim will get called
4852 		 */
4853 		if (hmep->hme_wantw)
4854 			PUT_GLOBREG(intmask,
4855 			    GET_GLOBREG(intmask) & ~HMEG_MASK_TX_ALL);
4856 	}
4857 	CHECK_GLOBREG();
4858 }
4859 
4860 /*
4861  * Handle interrupts for fatal errors
4862  * Need reinitialization of the ENET channel.
4863  */
4864 static void
4865 hme_fatal_err(struct hme *hmep, uint_t hmesbits)
4866 {
4867 
4868 	if (hmesbits & HMEG_STATUS_SLV_PAR_ERR) {
4869 		hmep->hme_slvparerr++;
4870 	}
4871 
4872 	if (hmesbits & HMEG_STATUS_SLV_ERR_ACK) {
4873 		hmep->hme_slverrack++;
4874 	}
4875 
4876 	if (hmesbits & HMEG_STATUS_TX_TAG_ERR) {
4877 		hmep->hme_txtagerr++;
4878 		hmep->hme_oerrors++;
4879 	}
4880 
4881 	if (hmesbits & HMEG_STATUS_TX_PAR_ERR) {
4882 		hmep->hme_txparerr++;
4883 		hmep->hme_oerrors++;
4884 	}
4885 
4886 	if (hmesbits & HMEG_STATUS_TX_LATE_ERR) {
4887 		hmep->hme_txlaterr++;
4888 		hmep->hme_oerrors++;
4889 	}
4890 
4891 	if (hmesbits & HMEG_STATUS_TX_ERR_ACK) {
4892 		hmep->hme_txerrack++;
4893 		hmep->hme_oerrors++;
4894 	}
4895 
4896 	if (hmesbits & HMEG_STATUS_EOP_ERR) {
4897 		hmep->hme_eoperr++;
4898 	}
4899 
4900 	if (hmesbits & HMEG_STATUS_RX_TAG_ERR) {
4901 		hmep->hme_rxtagerr++;
4902 		hmep->hme_ierrors++;
4903 	}
4904 
4905 	if (hmesbits & HMEG_STATUS_RX_PAR_ERR) {
4906 		hmep->hme_rxparerr++;
4907 		hmep->hme_ierrors++;
4908 	}
4909 
4910 	if (hmesbits & HMEG_STATUS_RX_LATE_ERR) {
4911 		hmep->hme_rxlaterr++;
4912 		hmep->hme_ierrors++;
4913 	}
4914 
4915 	if (hmesbits & HMEG_STATUS_RX_ERR_ACK) {
4916 		hmep->hme_rxerrack++;
4917 		hmep->hme_ierrors++;
4918 	}
4919 }
4920 
4921 /*
4922  * Handle interrupts regarding non-fatal errors.
4923  */
4924 static void
4925 hme_nonfatal_err(struct hme *hmep, uint_t hmesbits)
4926 {
4927 
4928 	if (hmesbits & HMEG_STATUS_RX_DROP) {
4929 		hmep->hme_missed++;
4930 		hmep->hme_ierrors++;
4931 	}
4932 
4933 	if (hmesbits & HMEG_STATUS_DEFTIMR_EXP) {
4934 		hmep->hme_defer_xmts++;
4935 	}
4936 
4937 	if (hmesbits & HMEG_STATUS_FSTCOLC_EXP) {
4938 		hmep->hme_fstcol += 256;
4939 	}
4940 
4941 	if (hmesbits & HMEG_STATUS_LATCOLC_EXP) {
4942 		hmep->hme_tlcol += 256;
4943 		hmep->hme_oerrors += 256;
4944 	}
4945 
4946 	if (hmesbits & HMEG_STATUS_EXCOLC_EXP) {
4947 		hmep->hme_excol += 256;
4948 		hmep->hme_oerrors += 256;
4949 	}
4950 
4951 	if (hmesbits & HMEG_STATUS_NRMCOLC_EXP) {
4952 		hmep->hme_coll += 256;
4953 	}
4954 
4955 	if (hmesbits & HMEG_STATUS_MXPKTSZ_ERR) {
4956 		hmep->hme_babl++;
4957 		hmep->hme_oerrors++;
4958 	}
4959 
4960 	/*
4961 	 * This error is fatal and the board needs to
4962 	 * be reinitialized. Comments?
4963 	 */
4964 	if (hmesbits & HMEG_STATUS_TXFIFO_UNDR) {
4965 		hmep->hme_uflo++;
4966 		hmep->hme_oerrors++;
4967 	}
4968 
4969 	if (hmesbits & HMEG_STATUS_SQE_TST_ERR) {
4970 		hmep->hme_sqe_errors++;
4971 	}
4972 
4973 	if (hmesbits & HMEG_STATUS_RCV_CNT_EXP) {
4974 		if (hmep->hme_rxcv_enable) {
4975 			hmep->hme_cvc += 256;
4976 		}
4977 	}
4978 
4979 	if (hmesbits & HMEG_STATUS_RXFIFO_OVFL) {
4980 		hmep->hme_oflo++;
4981 		hmep->hme_ierrors++;
4982 	}
4983 
4984 	if (hmesbits & HMEG_STATUS_LEN_CNT_EXP) {
4985 		hmep->hme_lenerr += 256;
4986 		hmep->hme_ierrors += 256;
4987 	}
4988 
4989 	if (hmesbits & HMEG_STATUS_ALN_CNT_EXP) {
4990 		hmep->hme_align_errors += 256;
4991 		hmep->hme_ierrors += 256;
4992 	}
4993 
4994 	if (hmesbits & HMEG_STATUS_CRC_CNT_EXP) {
4995 		hmep->hme_fcs_errors += 256;
4996 		hmep->hme_ierrors += 256;
4997 	}
4998 }
4999 
5000 static mblk_t *
5001 hmeread(struct hme *hmep, hmebuf_t *rbuf, uint32_t rflags)
5002 {
5003 	mblk_t		*bp;
5004 	uint32_t	len;
5005 	t_uscalar_t	type;
5006 
5007 	len = (rflags & HMERMD_BUFSIZE) >> HMERMD_BUFSIZE_SHIFT;
5008 
5009 	/*
5010 	 * Check for short packet
5011 	 * and check for overflow packet also. The processing is the
5012 	 * same for both the cases - reuse the buffer. Update the Buffer
5013 	 * overflow counter.
5014 	 */
5015 	if ((len < ETHERMIN) || (rflags & HMERMD_OVFLOW) ||
5016 	    (len > (ETHERMAX + 4))) {
5017 		if (len < ETHERMIN)
5018 			hmep->hme_runt++;
5019 
5020 		else {
5021 			hmep->hme_buff++;
5022 			hmep->hme_toolong_errors++;
5023 		}
5024 		hmep->hme_ierrors++;
5025 		return (NULL);
5026 	}
5027 
5028 	/*
5029 	 * Sync the received buffer before looking at it.
5030 	 */
5031 
5032 	(void) ddi_dma_sync(rbuf->dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL);
5033 
5034 	/*
5035 	 * copy the packet data and then recycle the descriptor.
5036 	 */
5037 
5038 	if ((bp = allocb(len + HME_FSTBYTE_OFFSET, BPRI_HI)) == NULL) {
5039 
5040 		hmep->hme_allocbfail++;
5041 		hmep->hme_norcvbuf++;
5042 
5043 		return (NULL);
5044 	}
5045 
5046 	bcopy(rbuf->kaddr, bp->b_rptr, len + HME_FSTBYTE_OFFSET);
5047 
5048 	hmep->hme_ipackets++;
5049 
5050 	/*  Add the First Byte offset to the b_rptr and copy */
5051 	bp->b_rptr += HME_FSTBYTE_OFFSET;
5052 	bp->b_wptr = bp->b_rptr + len;
5053 
5054 	/*
5055 	 * update MIB II statistics
5056 	 */
5057 	BUMP_InNUcast(hmep, bp->b_rptr);
5058 	hmep->hme_rbytes += len;
5059 
5060 	type = get_ether_type(bp->b_rptr);
5061 
5062 	/*
5063 	 * TCP partial checksum in hardware
5064 	 */
5065 	if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) {
5066 		uint16_t cksum = ~rflags & HMERMD_CKSUM;
5067 		uint_t end = len - sizeof (struct ether_header);
5068 		(void) hcksum_assoc(bp, NULL, NULL, 0,
5069 		    0, end, htons(cksum), HCK_PARTIALCKSUM, 0);
5070 	}
5071 
5072 	return (bp);
5073 }
5074 
5075 /*VARARGS*/
5076 static void
5077 hme_fault_msg(struct hme *hmep, uint_t severity, msg_t type, char *fmt, ...)
5078 {
5079 	char	msg_buffer[255];
5080 	va_list	ap;
5081 
5082 	va_start(ap, fmt);
5083 	(void) vsnprintf(msg_buffer, sizeof (msg_buffer), fmt, ap);
5084 
5085 	if (hmep == NULL) {
5086 		cmn_err(CE_NOTE, "hme : %s", msg_buffer);
5087 
5088 	} else if (type == DISPLAY_MSG) {
5089 		cmn_err(CE_CONT, "?%s%d : %s\n", ddi_driver_name(hmep->dip),
5090 		    hmep->instance, msg_buffer);
5091 	} else if (severity == SEVERITY_HIGH) {
5092 		cmn_err(CE_WARN, "%s%d : %s, SEVERITY_HIGH, %s\n",
5093 		    ddi_driver_name(hmep->dip), hmep->instance,
5094 		    msg_buffer, msg_string[type]);
5095 	} else {
5096 		cmn_err(CE_CONT, "%s%d : %s\n", ddi_driver_name(hmep->dip),
5097 		    hmep->instance, msg_buffer);
5098 	}
5099 	va_end(ap);
5100 }
5101 
5102 /*
5103  * if this is the first init do not bother to save the
5104  * counters. They should be 0, but do not count on it.
5105  */
5106 static void
5107 hmesavecntrs(struct hme *hmep)
5108 {
5109 	uint32_t fecnt, aecnt, lecnt, rxcv;
5110 	uint32_t ltcnt, excnt;
5111 
5112 	/* XXX What all gets added in ierrors and oerrors? */
5113 	fecnt = GET_MACREG(fecnt);
5114 	PUT_MACREG(fecnt, 0);
5115 
5116 	aecnt = GET_MACREG(aecnt);
5117 	hmep->hme_align_errors += aecnt;
5118 	PUT_MACREG(aecnt, 0);
5119 
5120 	lecnt = GET_MACREG(lecnt);
5121 	hmep->hme_lenerr += lecnt;
5122 	PUT_MACREG(lecnt, 0);
5123 
5124 	rxcv = GET_MACREG(rxcv);
5125 #ifdef HME_CODEVIOL_BUG
5126 	/*
5127 	 * Ignore rxcv errors for Sbus/FEPS 2.1 or earlier
5128 	 */
5129 	if (!hmep->hme_rxcv_enable) {
5130 		rxcv = 0;
5131 	}
5132 #endif
5133 	hmep->hme_cvc += rxcv;
5134 	PUT_MACREG(rxcv, 0);
5135 
5136 	ltcnt = GET_MACREG(ltcnt);
5137 	hmep->hme_tlcol += ltcnt;
5138 	PUT_MACREG(ltcnt, 0);
5139 
5140 	excnt = GET_MACREG(excnt);
5141 	hmep->hme_excol += excnt;
5142 	PUT_MACREG(excnt, 0);
5143 
5144 	hmep->hme_fcs_errors += fecnt;
5145 	hmep->hme_ierrors += (fecnt + aecnt + lecnt);
5146 	hmep->hme_oerrors += (ltcnt + excnt);
5147 	hmep->hme_coll += (GET_MACREG(nccnt) + ltcnt);
5148 
5149 	PUT_MACREG(nccnt, 0);
5150 	CHECK_MACREG();
5151 }
5152 
5153 /*
5154  * ndd support functions to get/set parameters
5155  */
5156 /* Free the Named Dispatch Table by calling hme_nd_free */
5157 static void
5158 hme_param_cleanup(struct hme *hmep)
5159 {
5160 	if (hmep->hme_g_nd)
5161 		(void) hme_nd_free(&hmep->hme_g_nd);
5162 }
5163 
5164 /*
5165  * Extracts the value from the hme parameter array and prints the
5166  * parameter value. cp points to the required parameter.
5167  */
5168 /* ARGSUSED */
5169 static int
5170 hme_param_get(queue_t *q, mblk_t *mp, caddr_t cp)
5171 {
5172 	hmeparam_t *hmepa = (void *)cp;
5173 
5174 	(void) mi_mpprintf(mp, "%d", hmepa->hme_param_val);
5175 	return (0);
5176 }
5177 
5178 /*
5179  * Register each element of the parameter array with the
5180  * named dispatch handler. Each element is loaded using
5181  * hme_nd_load()
5182  */
5183 /* ARGSUSED */
5184 static int
5185 hme_param_register(struct hme *hmep, hmeparam_t *hmepa, int cnt)
5186 {
5187 	int i;
5188 
5189 	/* First 4 elements are read-only */
5190 	for (i = 0; i < 4; i++, hmepa++)
5191 		if (!hme_nd_load(&hmep->hme_g_nd, hmepa->hme_param_name,
5192 		    (pfi_t)hme_param_get, (pfi_t)0, (caddr_t)hmepa)) {
5193 			(void) hme_nd_free(&hmep->hme_g_nd);
5194 			return (B_FALSE);
5195 		}
5196 	/* Next 10 elements are read and write */
5197 	for (i = 0; i < 10; i++, hmepa++)
5198 		if (hmepa->hme_param_name && hmepa->hme_param_name[0]) {
5199 			if (!hme_nd_load(&hmep->hme_g_nd,
5200 			    hmepa->hme_param_name, (pfi_t)hme_param_get,
5201 			    (pfi_t)hme_param_set, (caddr_t)hmepa)) {
5202 				(void) hme_nd_free(&hmep->hme_g_nd);
5203 				return (B_FALSE);
5204 
5205 			}
5206 		}
5207 	/* next 12 elements are read-only */
5208 	for (i = 0; i < 12; i++, hmepa++)
5209 		if (!hme_nd_load(&hmep->hme_g_nd, hmepa->hme_param_name,
5210 		    (pfi_t)hme_param_get, (pfi_t)0, (caddr_t)hmepa)) {
5211 			(void) hme_nd_free(&hmep->hme_g_nd);
5212 			return (B_FALSE);
5213 		}
5214 	/* Next 3  elements are read and write */
5215 	for (i = 0; i < 3; i++, hmepa++)
5216 		if (hmepa->hme_param_name && hmepa->hme_param_name[0]) {
5217 			if (!hme_nd_load(&hmep->hme_g_nd,
5218 			    hmepa->hme_param_name, (pfi_t)hme_param_get,
5219 			    (pfi_t)hme_param_set, (caddr_t)hmepa)) {
5220 				(void) hme_nd_free(&hmep->hme_g_nd);
5221 				return (B_FALSE);
5222 			}
5223 		}
5224 
5225 	return (B_TRUE);
5226 }
5227 
5228 /*
5229  * Sets the hme parameter to the value in the hme_param_register using
5230  * hme_nd_load().
5231  */
5232 /* ARGSUSED */
5233 static int
5234 hme_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp)
5235 {
5236 	char *end;
5237 	size_t new_value;
5238 	hmeparam_t *hmepa = (void *)cp;
5239 
5240 	new_value = mi_strtol(value, &end, 10);
5241 	if (end == value || new_value < hmepa->hme_param_min ||
5242 	    new_value > hmepa->hme_param_max) {
5243 			return (EINVAL);
5244 	}
5245 	hmepa->hme_param_val = (uint32_t)new_value;
5246 	return (0);
5247 
5248 }
5249 
5250 /* Free the table pointed to by 'ndp' */
5251 static void
5252 hme_nd_free(caddr_t *nd_pparam)
5253 {
5254 	ND	*nd;
5255 
5256 	if ((nd = (void *)(*nd_pparam)) != NULL) {
5257 		if (nd->nd_tbl)
5258 			mi_free((char *)nd->nd_tbl);
5259 		mi_free((char *)nd);
5260 		*nd_pparam = NULL;
5261 	}
5262 }
5263 
5264 static int
5265 hme_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp)
5266 {
5267 	int	err;
5268 	IOCP	iocp;
5269 	MBLKP	mp1;
5270 	ND	*nd;
5271 	NDE	*nde;
5272 	char	*valp;
5273 	size_t	avail;
5274 
5275 	if (!nd_param)
5276 		return (B_FALSE);
5277 
5278 	nd = (void *)nd_param;
5279 	iocp = (void *)mp->b_rptr;
5280 	if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
5281 		mp->b_datap->db_type = M_IOCACK;
5282 		iocp->ioc_count = 0;
5283 		iocp->ioc_error = EINVAL;
5284 		return (B_TRUE);
5285 	}
5286 
5287 	/*
5288 	 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
5289 	 *	However, existing code sends in some big buffers.
5290 	 */
5291 	avail = iocp->ioc_count;
5292 	if (mp1->b_cont) {
5293 		freemsg(mp1->b_cont);
5294 		mp1->b_cont = NULL;
5295 	}
5296 
5297 	mp1->b_datap->db_lim[-1] = '\0';	/* Force null termination */
5298 	valp = (char *)mp1->b_rptr;
5299 	for (nde = nd->nd_tbl; /* */; nde++) {
5300 		if (!nde->nde_name)
5301 			return (B_FALSE);
5302 		if (mi_strcmp(nde->nde_name, valp) == 0)
5303 			break;
5304 	}
5305 
5306 	err = EINVAL;
5307 	while (*valp++)
5308 		;
5309 	if (!*valp || valp >= (char *)mp1->b_wptr)
5310 		valp = NULL;
5311 	switch (iocp->ioc_cmd) {
5312 	case ND_GET:
5313 /*
5314  * (temporary) hack: "*valp" is size of user buffer for copyout. If result
5315  * of action routine is too big, free excess and return ioc_rval as buffer
5316  * size needed.  Return as many mblocks as will fit, free the rest.  For
5317  * backward compatibility, assume size of original ioctl buffer if "*valp"
5318  * bad or not given.
5319  */
5320 		if (valp)
5321 			avail = mi_strtol(valp, (char **)0, 10);
5322 		/* We overwrite the name/value with the reply data */
5323 		{
5324 			mblk_t *mp2 = mp1;
5325 
5326 			while (mp2) {
5327 				mp2->b_wptr = mp2->b_rptr;
5328 				mp2 = mp2->b_cont;
5329 			}
5330 		}
5331 		err = (*nde->nde_get_pfi)(q, mp1, nde->nde_data, iocp->ioc_cr);
5332 		if (!err) {
5333 			size_t	size_out;
5334 			ssize_t	excess;
5335 
5336 			iocp->ioc_rval = 0;
5337 
5338 			/* Tack on the null */
5339 			(void) mi_mpprintf_putc((char *)mp1, '\0');
5340 			size_out = msgdsize(mp1);
5341 			excess = size_out - avail;
5342 			if (excess > 0) {
5343 				iocp->ioc_rval = (int)size_out;
5344 				size_out -= excess;
5345 				(void) adjmsg(mp1, -(excess + 1));
5346 				(void) mi_mpprintf_putc((char *)mp1, '\0');
5347 			}
5348 			iocp->ioc_count = size_out;
5349 		}
5350 		break;
5351 
5352 	case ND_SET:
5353 		if (valp) {
5354 			if ((iocp->ioc_cr != NULL) &&
5355 			    ((err = secpolicy_net_config(iocp->ioc_cr, B_FALSE))
5356 			    == 0)) {
5357 				err = (*nde->nde_set_pfi)(q, mp1, valp,
5358 				    nde->nde_data, iocp->ioc_cr);
5359 			}
5360 			iocp->ioc_count = 0;
5361 			freemsg(mp1);
5362 			mp->b_cont = NULL;
5363 		}
5364 		break;
5365 
5366 	default:
5367 		break;
5368 	}
5369 
5370 	iocp->ioc_error = err;
5371 	mp->b_datap->db_type = M_IOCACK;
5372 	return (B_TRUE);
5373 }
5374 
5375 /*
5376  * Load 'name' into the named dispatch table pointed to by 'ndp'.
5377  * 'ndp' should be the address of a char pointer cell.  If the table
5378  * does not exist (*ndp == 0), a new table is allocated and 'ndp'
5379  * is stuffed.  If there is not enough space in the table for a new
5380  * entry, more space is allocated.
5381  */
5382 static boolean_t
5383 hme_nd_load(caddr_t *nd_pparam, char *name, pfi_t get_pfi,
5384     pfi_t set_pfi, caddr_t data)
5385 {
5386 	ND	*nd;
5387 	NDE	*nde;
5388 
5389 	if (!nd_pparam)
5390 		return (B_FALSE);
5391 
5392 	if ((nd = (void *)(*nd_pparam)) == NULL) {
5393 		if ((nd = (void *)mi_alloc(sizeof (ND), BPRI_MED)) == NULL)
5394 			return (B_FALSE);
5395 		bzero(nd, sizeof (ND));
5396 		*nd_pparam = (caddr_t)nd;
5397 	}
5398 
5399 	if (nd->nd_tbl) {
5400 		for (nde = nd->nd_tbl; nde->nde_name; nde++) {
5401 			if (mi_strcmp(name, nde->nde_name) == 0)
5402 				goto fill_it;
5403 		}
5404 	}
5405 
5406 	if (nd->nd_free_count <= 1) {
5407 		if ((nde = (NDE *)mi_alloc(nd->nd_size +
5408 		    NDE_ALLOC_SIZE, BPRI_MED)) == NULL)
5409 			return (B_FALSE);
5410 		bzero(nde, nd->nd_size + NDE_ALLOC_SIZE);
5411 		nd->nd_free_count += NDE_ALLOC_COUNT;
5412 		if (nd->nd_tbl) {
5413 			bcopy(nd->nd_tbl, nde, nd->nd_size);
5414 			mi_free((char *)nd->nd_tbl);
5415 		} else {
5416 			nd->nd_free_count--;
5417 			nde->nde_name = "?";
5418 			nde->nde_get_pfi = nd_get_names;
5419 			nde->nde_set_pfi = nd_set_default;
5420 		}
5421 		nde->nde_data = (caddr_t)nd;
5422 		nd->nd_tbl = nde;
5423 		nd->nd_size += NDE_ALLOC_SIZE;
5424 	}
5425 
5426 	for (nde = nd->nd_tbl; nde->nde_name; nde++)
5427 		;
5428 	nd->nd_free_count--;
5429 fill_it:
5430 	nde->nde_name = name;
5431 	nde->nde_get_pfi = get_pfi ? get_pfi : nd_get_default;
5432 	nde->nde_set_pfi = set_pfi ? set_pfi : nd_set_default;
5433 	nde->nde_data = data;
5434 	return (B_TRUE);
5435 }
5436 
5437 /*
5438  * To set up the mac address for the network interface:
5439  * The adapter card may support a local mac address which is published
5440  * in a device node property "local-mac-address". This mac address is
5441  * treated as the factory-installed mac address for DLPI interface.
5442  * If the adapter firmware has used the device for diskless boot
5443  * operation it publishes a property called "mac-address" for use by
5444  * inetboot and the device driver.
5445  * If "mac-address" is not found, the system options property
5446  * "local-mac-address" is used to select the mac-address. If this option
5447  * is set to "true", and "local-mac-address" has been found, then
5448  * local-mac-address is used; otherwise the system mac address is used
5449  * by calling the "localetheraddr()" function.
5450  */
5451 static void
5452 hme_setup_mac_address(struct hme *hmep, dev_info_t *dip)
5453 {
5454 	char	*prop;
5455 	int	prop_len = sizeof (int);
5456 
5457 	hmep->hme_addrflags = 0;
5458 
5459 	/*
5460 	 * Check if it is an adapter with its own local mac address
5461 	 * If it is present, save it as the "factory-address"
5462 	 * for this adapter.
5463 	 */
5464 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5465 	    "local-mac-address",
5466 	    (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
5467 		if (prop_len == ETHERADDRL) {
5468 			hmep->hme_addrflags = HME_FACTADDR_PRESENT;
5469 			ether_bcopy(prop, &hmep->hme_factaddr);
5470 			HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
5471 			    "Local Ethernet address = %s",
5472 			    ether_sprintf(&hmep->hme_factaddr));
5473 		}
5474 		kmem_free(prop, prop_len);
5475 	}
5476 
5477 	/*
5478 	 * Check if the adapter has published "mac-address" property.
5479 	 * If it is present, use it as the mac address for this device.
5480 	 */
5481 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5482 	    "mac-address", (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
5483 		if (prop_len >= ETHERADDRL) {
5484 			ether_bcopy(prop, &hmep->hme_ouraddr);
5485 			kmem_free(prop, prop_len);
5486 			return;
5487 		}
5488 		kmem_free(prop, prop_len);
5489 	}
5490 
5491 #ifdef	__sparc
5492 	/*
5493 	 * On sparc, we might be able to use the mac address from the
5494 	 * system.  However, on all other systems, we need to use the
5495 	 * address from the PROM.
5496 	 */
5497 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
5498 	    (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
5499 		if ((strncmp("true", prop, prop_len) == 0) &&
5500 		    (hmep->hme_addrflags & HME_FACTADDR_PRESENT)) {
5501 			hmep->hme_addrflags |= HME_FACTADDR_USE;
5502 			ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr);
5503 			kmem_free(prop, prop_len);
5504 			HME_FAULT_MSG1(hmep, SEVERITY_NONE, DISPLAY_MSG,
5505 			    "Using local MAC address");
5506 			return;
5507 		}
5508 		kmem_free(prop, prop_len);
5509 	}
5510 
5511 	/*
5512 	 * Get the system ethernet address.
5513 	 */
5514 	(void) localetheraddr((struct ether_addr *)NULL, &hmep->hme_ouraddr);
5515 #else
5516 	ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr);
5517 #endif
5518 }
5519 
5520 /* ARGSUSED */
5521 static void
5522 hme_check_acc_handle(char *file, uint_t line, struct hme *hmep,
5523     ddi_acc_handle_t handle)
5524 {
5525 }
5526