xref: /titanic_41/usr/src/uts/intel/io/amd8111s/amd8111s_main.c (revision b509e89b2befbaa42939abad9da1d7f5a8c6aaae)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2001-2006 Advanced Micro Devices, Inc.  All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * + Redistributions of source code must retain the above copyright notice,
13  * + this list of conditions and the following disclaimer.
14  *
15  * + Redistributions in binary form must reproduce the above copyright
16  * + notice, this list of conditions and the following disclaimer in the
17  * + documentation and/or other materials provided with the distribution.
18  *
19  * + Neither the name of Advanced Micro Devices, Inc. nor the names of its
20  * + contributors may be used to endorse or promote products derived from
21  * + this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
24  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
25  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR
28  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
30  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
34  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
35  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  *
37  * Import/Export/Re-Export/Use/Release/Transfer Restrictions and
38  * Compliance with Applicable Laws.  Notice is hereby given that
39  * the software may be subject to restrictions on use, release,
40  * transfer, importation, exportation and/or re-exportation under
41  * the laws and regulations of the United States or other
42  * countries ("Applicable Laws"), which include but are not
43  * limited to U.S. export control laws such as the Export
44  * Administration Regulations and national security controls as
45  * defined thereunder, as well as State Department controls under
46  * the U.S. Munitions List.  Permission to use and/or
47  * redistribute the software is conditioned upon compliance with
48  * all Applicable Laws, including U.S. export control laws
49  * regarding specifically designated persons, countries and
50  * nationals of countries subject to national security controls.
51  */
52 
53 /* include files */
54 #include <sys/disp.h>
55 #include <sys/atomic.h>
56 #include <sys/vlan.h>
57 #include "amd8111s_main.h"
58 
59 /* Global macro Definations */
60 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
61 #define	INTERFACE_NAME "amd8111s"
62 #define	AMD8111S_SPLIT	128
63 #define	AMD8111S_SEND_MAX	64
64 
65 static char ident[] = "AMD8111 10/100M Ethernet";
66 
67 /*
68  * Driver Entry Points
69  */
70 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t);
71 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t);
72 
73 /*
74  * GLD Entry points prototype
75  */
76 static int amd8111s_m_unicst(void *, const uint8_t *);
77 static int amd8111s_m_promisc(void *, boolean_t);
78 static int amd8111s_m_stat(void *, uint_t, uint64_t *);
79 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *);
80 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr);
81 static int amd8111s_m_start(void *);
82 static void amd8111s_m_stop(void *);
83 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp);
84 static uint_t amd8111s_intr(caddr_t);
85 
86 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *);
87 
88 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *);
89 static int amd8111s_odlInit(struct LayerPointers *);
90 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *);
91 static void amd8111s_free_descriptors(struct LayerPointers *);
92 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *,
93 		struct amd8111s_dma_ringbuf *, uint32_t, uint32_t);
94 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *);
95 
96 
97 static void amd8111s_log(struct LayerPointers *adapter, int level,
98     char *fmt, ...);
99 
100 static struct cb_ops amd8111s_cb_ops = {
101 	nulldev,
102 	nulldev,
103 	nodev,
104 	nodev,
105 	nodev,
106 	nodev,
107 	nodev,
108 	nodev,
109 	nodev,
110 	nodev,
111 	nodev,
112 	nochpoll,
113 	ddi_prop_op,
114 	NULL,
115 	D_NEW | D_MP,
116 	CB_REV,		/* cb_rev */
117 	nodev,		/* cb_aread */
118 	nodev		/* cb_awrite */
119 };
120 
121 static struct dev_ops amd8111s_dev_ops = {
122 	DEVO_REV,		/* devo_rev */
123 	0,			/* devo_refcnt */
124 	NULL,			/* devo_getinfo */
125 	nulldev,		/* devo_identify */
126 	nulldev,		/* devo_probe */
127 	amd8111s_attach,	/* devo_attach */
128 	amd8111s_detach,	/* devo_detach */
129 	nodev,			/* devo_reset */
130 	&amd8111s_cb_ops,	/* devo_cb_ops */
131 	NULL,			/* devo_bus_ops */
132 	nodev,			/* devo_power */
133 	ddi_quiesce_not_supported,	/* devo_quiesce */
134 };
135 
136 struct modldrv amd8111s_modldrv = {
137 	&mod_driverops,		/* Type of module. This one is a driver */
138 	ident,			/* short description */
139 	&amd8111s_dev_ops	/* driver specific ops */
140 };
141 
142 struct modlinkage amd8111s_modlinkage = {
143 	MODREV_1, (void *)&amd8111s_modldrv, NULL
144 };
145 
146 /*
147  * Global Variables
148  */
149 struct LayerPointers *amd8111sadapter;
150 
151 static ddi_dma_attr_t pcn_buff_dma_attr_t = {
152 	DMA_ATTR_V0,	/* dma_attr_version */
153 	(uint64_t)0,		/* dma_attr_addr_lo */
154 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
155 	(uint64_t)0xFFFFFFFF,	/* dma_attr_count_max */
156 	(uint64_t)1,		/* dma_attr_align */
157 	(uint_t)0x7F,		/* dma_attr_burstsizes */
158 	(uint32_t)1,		/* dma_attr_minxfer */
159 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
160 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
161 	(int)1,			/* dma_attr_sgllen */
162 	(uint32_t)1,		/* granularity */
163 	(uint_t)0		/* dma_attr_flags */
164 };
165 
166 static ddi_dma_attr_t pcn_desc_dma_attr_t = {
167 	DMA_ATTR_V0,		/* dma_attr_version */
168 	(uint64_t)0,		/* dma_attr_addr_lo */
169 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
170 	(uint64_t)0x7FFFFFFF,	/* dma_attr_count_max */
171 	(uint64_t)0x10,		/* dma_attr_align */
172 	(uint_t)0xFFFFFFFFU,	/* dma_attr_burstsizes */
173 	(uint32_t)1,		/* dma_attr_minxfer */
174 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
175 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
176 	(int)1,			/* dma_attr_sgllen */
177 	(uint32_t)1,		/* granularity */
178 	(uint_t)0		/* dma_attr_flags */
179 };
180 
181 /* PIO access attributes for registers */
182 static ddi_device_acc_attr_t pcn_acc_attr = {
183 	DDI_DEVICE_ATTR_V0,
184 	DDI_STRUCTURE_LE_ACC,
185 	DDI_STRICTORDER_ACC
186 };
187 
188 
189 static mac_callbacks_t amd8111s_m_callbacks = {
190 	MC_IOCTL,
191 	amd8111s_m_stat,
192 	amd8111s_m_start,
193 	amd8111s_m_stop,
194 	amd8111s_m_promisc,
195 	amd8111s_m_multicst,
196 	amd8111s_m_unicst,
197 	amd8111s_m_tx,
198 	amd8111s_m_ioctl
199 };
200 
201 
202 /*
203  * Standard Driver Load Entry Point
204  * It will be called at load time of driver.
205  */
206 int
207 _init()
208 {
209 	int status;
210 	mac_init_ops(&amd8111s_dev_ops, "amd8111s");
211 
212 	status = mod_install(&amd8111s_modlinkage);
213 	if (status != DDI_SUCCESS) {
214 		mac_fini_ops(&amd8111s_dev_ops);
215 	}
216 
217 	return (status);
218 }
219 
220 /*
221  * Standard Driver Entry Point for Query.
222  * It will be called at any time to get Driver info.
223  */
224 int
225 _info(struct modinfo *modinfop)
226 {
227 	return (mod_info(&amd8111s_modlinkage, modinfop));
228 }
229 
230 /*
231  *	Standard Driver Entry Point for Unload.
232  *	It will be called at unload time of driver.
233  */
234 int
235 _fini()
236 {
237 	int status;
238 
239 	status = mod_remove(&amd8111s_modlinkage);
240 	if (status == DDI_SUCCESS) {
241 		mac_fini_ops(&amd8111s_dev_ops);
242 	}
243 
244 	return (status);
245 }
246 
247 /*
248  * Loopback Support
249  */
250 static lb_property_t loopmodes[] = {
251 	{ normal,	"normal",	AMD8111S_LB_NONE		},
252 	{ external,	"100Mbps",	AMD8111S_LB_EXTERNAL_100	},
253 	{ external,	"10Mbps",	AMD8111S_LB_EXTERNAL_10		},
254 	{ internal,	"MAC",		AMD8111S_LB_INTERNAL_MAC	}
255 };
256 
257 static void
258 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode)
259 {
260 
261 	/*
262 	 * If the mode isn't being changed, there's nothing to do ...
263 	 */
264 	if (mode == adapter->pOdl->loopback_mode)
265 		return;
266 
267 	/*
268 	 * Validate the requested mode and prepare a suitable message
269 	 * to explain the link down/up cycle that the change will
270 	 * probably induce ...
271 	 */
272 	switch (mode) {
273 	default:
274 		return;
275 
276 	case AMD8111S_LB_NONE:
277 		mdlStopChip(adapter);
278 		if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) {
279 			cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB");
280 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
281 			    INLOOP);
282 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
283 			    FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
284 		} else {
285 			cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB");
286 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
287 			    EXLOOP);
288 		}
289 
290 		amd8111s_reset(adapter);
291 		adapter->pOdl->LinkStatus = LINK_STATE_DOWN;
292 		adapter->pOdl->rx_fcs_stripped = B_FALSE;
293 		mdlStartChip(adapter);
294 		break;
295 
296 	case AMD8111S_LB_EXTERNAL_100:
297 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100");
298 		mdlStopChip(adapter);
299 		amd8111s_reset(adapter);
300 		SetIntrCoalesc(adapter, B_FALSE);
301 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100);
302 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
303 		    VAL0 | EXLOOP);
304 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
305 		adapter->pMdl->Speed = 100;
306 		adapter->pMdl->FullDuplex = B_TRUE;
307 		/* Tell GLD the state of the physical link. */
308 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
309 
310 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
311 
312 		mdlStartChip(adapter);
313 		break;
314 
315 	case AMD8111S_LB_EXTERNAL_10:
316 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10");
317 		mdlStopChip(adapter);
318 		amd8111s_reset(adapter);
319 		SetIntrCoalesc(adapter, B_FALSE);
320 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10);
321 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
322 		    VAL0 | EXLOOP);
323 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
324 		adapter->pMdl->Speed = 10;
325 		adapter->pMdl->FullDuplex = B_TRUE;
326 		/* Tell GLD the state of the physical link. */
327 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
328 
329 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
330 
331 		mdlStartChip(adapter);
332 		break;
333 
334 	case AMD8111S_LB_INTERNAL_MAC:
335 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC");
336 		mdlStopChip(adapter);
337 		amd8111s_reset(adapter);
338 		SetIntrCoalesc(adapter, B_FALSE);
339 		/* Disable Port Manager */
340 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
341 		    EN_PMGR);
342 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
343 		    VAL0 | INLOOP);
344 
345 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
346 		    VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
347 
348 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
349 		adapter->pMdl->FullDuplex = B_TRUE;
350 		/* Tell GLD the state of the physical link. */
351 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
352 
353 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
354 
355 		mdlStartChip(adapter);
356 		break;
357 	}
358 
359 	/*
360 	 * All OK; tell the caller to reprogram
361 	 * the PHY and/or MAC for the new mode ...
362 	 */
363 	adapter->pOdl->loopback_mode = mode;
364 }
365 
366 static enum ioc_reply
367 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp,
368     mblk_t *mp)
369 {
370 	lb_info_sz_t *lbsp;
371 	lb_property_t *lbpp;
372 	uint32_t *lbmp;
373 	int cmd;
374 
375 	/*
376 	 * Validate format of ioctl
377 	 */
378 	if (mp->b_cont == NULL)
379 		return (IOC_INVAL);
380 
381 	cmd = iocp->ioc_cmd;
382 	switch (cmd) {
383 	default:
384 		/* NOTREACHED */
385 		amd8111s_log(adapter, CE_NOTE,
386 		    "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd);
387 		return (IOC_INVAL);
388 
389 	case LB_GET_INFO_SIZE:
390 		if (iocp->ioc_count != sizeof (lb_info_sz_t)) {
391 			amd8111s_log(adapter, CE_NOTE,
392 			    "wrong LB_GET_INFO_SIZE size");
393 			return (IOC_INVAL);
394 		}
395 		lbsp = (void *)mp->b_cont->b_rptr;
396 		*lbsp = sizeof (loopmodes);
397 		break;
398 
399 	case LB_GET_INFO:
400 		if (iocp->ioc_count != sizeof (loopmodes)) {
401 			amd8111s_log(adapter, CE_NOTE,
402 			    "Wrong LB_GET_INFO size");
403 			return (IOC_INVAL);
404 		}
405 		lbpp = (void *)mp->b_cont->b_rptr;
406 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
407 		break;
408 
409 	case LB_GET_MODE:
410 		if (iocp->ioc_count != sizeof (uint32_t)) {
411 			amd8111s_log(adapter, CE_NOTE,
412 			    "Wrong LB_GET_MODE size");
413 			return (IOC_INVAL);
414 		}
415 		lbmp = (void *)mp->b_cont->b_rptr;
416 		*lbmp = adapter->pOdl->loopback_mode;
417 		break;
418 
419 	case LB_SET_MODE:
420 		if (iocp->ioc_count != sizeof (uint32_t)) {
421 			amd8111s_log(adapter, CE_NOTE,
422 			    "Wrong LB_SET_MODE size");
423 			return (IOC_INVAL);
424 		}
425 		lbmp = (void *)mp->b_cont->b_rptr;
426 		amd8111s_set_loop_mode(adapter, *lbmp);
427 		break;
428 	}
429 	return (IOC_REPLY);
430 }
431 
432 static void
433 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
434 {
435 	struct iocblk *iocp;
436 	struct LayerPointers *adapter;
437 	enum ioc_reply status;
438 
439 	iocp = (void *)mp->b_rptr;
440 	iocp->ioc_error = 0;
441 	adapter = arg;
442 
443 	ASSERT(adapter);
444 	if (adapter == NULL) {
445 		miocnak(q, mp, 0, EINVAL);
446 		return;
447 	}
448 
449 	switch (iocp->ioc_cmd) {
450 
451 	case LB_GET_INFO_SIZE:
452 	case LB_GET_INFO:
453 	case LB_GET_MODE:
454 	case LB_SET_MODE:
455 		status = amd8111s_loopback_ioctl(adapter, iocp, mp);
456 		break;
457 
458 	default:
459 		status = IOC_INVAL;
460 		break;
461 	}
462 
463 	/*
464 	 * Decide how to reply
465 	 */
466 	switch (status) {
467 	default:
468 	case IOC_INVAL:
469 		/*
470 		 * Error, reply with a NAK and EINVAL or the specified error
471 		 */
472 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
473 		    EINVAL : iocp->ioc_error);
474 		break;
475 
476 	case IOC_DONE:
477 		/*
478 		 * OK, reply already sent
479 		 */
480 		break;
481 
482 	case IOC_ACK:
483 		/*
484 		 * OK, reply with an ACK
485 		 */
486 		miocack(q, mp, 0, 0);
487 		break;
488 
489 	case IOC_REPLY:
490 		/*
491 		 * OK, send prepared reply as ACK or NAK
492 		 */
493 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
494 		    M_IOCACK : M_IOCNAK;
495 		qreply(q, mp);
496 		break;
497 	}
498 }
499 
500 /*
501  * Copy one packet from dma memory to mblk. Inc dma descriptor pointer.
502  */
503 static boolean_t
504 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp)
505 {
506 	int length = 0;
507 	mblk_t *mp;
508 	struct rx_desc *descriptor;
509 	struct odl *pOdl = pLayerPointers->pOdl;
510 	struct amd8111s_statistics *statistics = &pOdl->statistics;
511 	struct nonphysical *pNonphysical = pLayerPointers->pMil
512 	    ->pNonphysical;
513 
514 	mutex_enter(&pOdl->mdlRcvLock);
515 	descriptor = pNonphysical->RxBufDescQRead->descriptor;
516 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle,
517 	    pNonphysical->RxBufDescQRead->descriptor -
518 	    pNonphysical->RxBufDescQStart->descriptor,
519 	    sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU);
520 	if ((descriptor->Rx_OWN) == 0) {
521 	/*
522 	 * If the frame is received with errors, then set MCNT
523 	 * of that pkt in ReceiveArray to 0. This packet would
524 	 * be discarded later and not indicated to OS.
525 	 */
526 		if (descriptor->Rx_ERR) {
527 			statistics->rx_desc_err ++;
528 			descriptor->Rx_ERR = 0;
529 			if (descriptor->Rx_FRAM == 1) {
530 				statistics->rx_desc_err_FRAM ++;
531 				descriptor->Rx_FRAM = 0;
532 			}
533 			if (descriptor->Rx_OFLO == 1) {
534 				statistics->rx_desc_err_OFLO ++;
535 				descriptor->Rx_OFLO = 0;
536 				pOdl->rx_overflow_counter ++;
537 				if ((pOdl->rx_overflow_counter > 5) &&
538 				    (pOdl->pause_interval == 0)) {
539 					statistics->rx_double_overflow ++;
540 					mdlSendPause(pLayerPointers);
541 					pOdl->rx_overflow_counter = 0;
542 					pOdl->pause_interval = 25;
543 				}
544 			}
545 			if (descriptor->Rx_CRC == 1) {
546 				statistics->rx_desc_err_CRC ++;
547 				descriptor->Rx_CRC = 0;
548 			}
549 			if (descriptor->Rx_BUFF == 1) {
550 				statistics->rx_desc_err_BUFF ++;
551 				descriptor->Rx_BUFF = 0;
552 			}
553 			goto Next_Descriptor;
554 		}
555 
556 		/* Length of incoming packet */
557 		if (pOdl->rx_fcs_stripped) {
558 			length = descriptor->Rx_MCNT -4;
559 		} else {
560 			length = descriptor->Rx_MCNT;
561 		}
562 		if (length < 62) {
563 			statistics->rx_error_zerosize ++;
564 		}
565 
566 		if ((mp = allocb(length, BPRI_MED)) == NULL) {
567 			statistics->rx_allocfail ++;
568 			goto failed;
569 		}
570 		/* Copy from virtual address of incoming packet */
571 		bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap),
572 		    mp->b_rptr, length);
573 		mp->b_wptr = mp->b_rptr + length;
574 		statistics->rx_ok_packets ++;
575 		if (*last_mp == NULL) {
576 			*last_mp = mp;
577 		} else {
578 			(*last_mp)->b_next = mp;
579 			*last_mp = mp;
580 		}
581 
582 Next_Descriptor:
583 		descriptor->Rx_MCNT = 0;
584 		descriptor->Rx_SOP = 0;
585 		descriptor->Rx_EOP = 0;
586 		descriptor->Rx_PAM = 0;
587 		descriptor->Rx_BAM = 0;
588 		descriptor->TT = 0;
589 		descriptor->Rx_OWN = 1;
590 		pNonphysical->RxBufDescQRead->descriptor++;
591 		pNonphysical->RxBufDescQRead->USpaceMap++;
592 		if (pNonphysical->RxBufDescQRead->descriptor >
593 		    pNonphysical->RxBufDescQEnd->descriptor) {
594 			pNonphysical->RxBufDescQRead->descriptor =
595 			    pNonphysical->RxBufDescQStart->descriptor;
596 			pNonphysical->RxBufDescQRead->USpaceMap =
597 			    pNonphysical->RxBufDescQStart->USpaceMap;
598 		}
599 		mutex_exit(&pOdl->mdlRcvLock);
600 
601 		return (B_TRUE);
602 	}
603 
604 failed:
605 	mutex_exit(&pOdl->mdlRcvLock);
606 	return (B_FALSE);
607 }
608 
609 /*
610  * Get the received packets from NIC card and send them to GLD.
611  */
612 static void
613 amd8111s_receive(struct LayerPointers *pLayerPointers)
614 {
615 	int numOfPkts = 0;
616 	struct odl *pOdl;
617 	mblk_t *ret_mp = NULL, *last_mp = NULL;
618 
619 	pOdl = pLayerPointers->pOdl;
620 
621 	rw_enter(&pOdl->chip_lock, RW_READER);
622 	if (!pLayerPointers->run) {
623 		rw_exit(&pOdl->chip_lock);
624 		return;
625 	}
626 
627 	if (pOdl->pause_interval > 0)
628 		pOdl->pause_interval --;
629 
630 	while (numOfPkts < RX_RING_SIZE) {
631 
632 		if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) {
633 			break;
634 		}
635 		if (ret_mp == NULL)
636 			ret_mp = last_mp;
637 		numOfPkts++;
638 	}
639 
640 	if (ret_mp) {
641 		mac_rx(pOdl->mh, NULL, ret_mp);
642 	}
643 
644 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0,
645 	    DDI_DMA_SYNC_FORDEV);
646 
647 	mdlReceive(pLayerPointers);
648 
649 	rw_exit(&pOdl->chip_lock);
650 
651 }
652 
653 /*
654  * Print message in release-version driver.
655  */
656 static void
657 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...)
658 {
659 	auto char name[32];
660 	auto char buf[256];
661 	va_list ap;
662 
663 	if (adapter != NULL) {
664 		(void) sprintf(name, "amd8111s%d",
665 		    ddi_get_instance(adapter->pOdl->devinfo));
666 	} else {
667 		(void) sprintf(name, "amd8111s");
668 	}
669 	va_start(ap, fmt);
670 	(void) vsprintf(buf, fmt, ap);
671 	va_end(ap);
672 	cmn_err(level, "%s: %s", name, buf);
673 }
674 
675 /*
676  * To allocate & initilize all resources.
677  * Called by amd8111s_attach().
678  */
679 static int
680 amd8111s_odlInit(struct LayerPointers *pLayerPointers)
681 {
682 	unsigned long mem_req_array[MEM_REQ_MAX];
683 	unsigned long mem_set_array[MEM_REQ_MAX];
684 	unsigned long *pmem_req_array;
685 	unsigned long *pmem_set_array;
686 	int i, size;
687 
688 	for (i = 0; i < MEM_REQ_MAX; i++) {
689 		mem_req_array[i] = 0;
690 		mem_set_array[i] = 0;
691 	}
692 
693 	milRequestResources(mem_req_array);
694 
695 	pmem_req_array = mem_req_array;
696 	pmem_set_array = mem_set_array;
697 	while (*pmem_req_array) {
698 		switch (*pmem_req_array) {
699 		case VIRTUAL:
700 			*pmem_set_array = VIRTUAL;
701 			pmem_req_array++;
702 			pmem_set_array++;
703 			*(pmem_set_array) = *(pmem_req_array);
704 			pmem_set_array++;
705 			*(pmem_set_array) = (unsigned long) kmem_zalloc(
706 			    *(pmem_req_array), KM_NOSLEEP);
707 			if (*pmem_set_array == NULL)
708 				goto odl_init_failure;
709 			break;
710 		}
711 		pmem_req_array++;
712 		pmem_set_array++;
713 	}
714 
715 	/*
716 	 * Initilize memory on lower layers
717 	 */
718 	milSetResources(pLayerPointers, mem_set_array);
719 
720 	/* Allocate Rx/Tx descriptors */
721 	if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) {
722 		*pmem_set_array = NULL;
723 		goto odl_init_failure;
724 	}
725 
726 	/*
727 	 * Allocate Rx buffer for each Rx descriptor. Then call mil layer
728 	 * routine to fill physical address of Rx buffer into Rx descriptor.
729 	 */
730 	if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) {
731 		amd8111s_free_descriptors(pLayerPointers);
732 		*pmem_set_array = NULL;
733 		goto odl_init_failure;
734 	}
735 	milInitGlbds(pLayerPointers);
736 
737 	return (0);
738 
739 odl_init_failure:
740 	/*
741 	 * Free All memory allocated so far
742 	 */
743 	pmem_req_array = mem_set_array;
744 	while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) {
745 		switch (*pmem_req_array) {
746 		case VIRTUAL:
747 			pmem_req_array++;	/* Size */
748 			size = *(pmem_req_array);
749 			pmem_req_array++;	/* Virtual Address */
750 			if (pmem_req_array == NULL)
751 				return (1);
752 			kmem_free((int *)*pmem_req_array, size);
753 			break;
754 		}
755 		pmem_req_array++;
756 	}
757 	return (1);
758 }
759 
760 /*
761  * Allocate and initialize Tx/Rx descriptors
762  */
763 static boolean_t
764 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers)
765 {
766 	struct odl *pOdl = pLayerPointers->pOdl;
767 	struct mil *pMil = pLayerPointers->pMil;
768 	dev_info_t *devinfo = pOdl->devinfo;
769 	uint_t length, count, i;
770 	size_t real_length;
771 
772 	/*
773 	 * Allocate Rx descriptors
774 	 */
775 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
776 	    NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) {
777 		amd8111s_log(pLayerPointers, CE_WARN,
778 		    "ddi_dma_alloc_handle for Rx desc failed");
779 		pOdl->rx_desc_dma_handle = NULL;
780 		return (B_FALSE);
781 	}
782 
783 	length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT;
784 	if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length,
785 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
786 	    NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length,
787 	    &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) {
788 
789 		amd8111s_log(pLayerPointers, CE_WARN,
790 		    "ddi_dma_mem_handle for Rx desc failed");
791 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
792 		pOdl->rx_desc_dma_handle = NULL;
793 		return (B_FALSE);
794 	}
795 
796 	if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle,
797 	    NULL, (caddr_t)pMil->Rx_desc_original, real_length,
798 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
799 	    NULL, &pOdl->rx_desc_dma_cookie,
800 	    &count) != DDI_SUCCESS) {
801 
802 		amd8111s_log(pLayerPointers, CE_WARN,
803 		    "ddi_dma_addr_bind_handle for Rx desc failed");
804 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
805 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
806 		pOdl->rx_desc_dma_handle = NULL;
807 		return (B_FALSE);
808 	}
809 	ASSERT(count == 1);
810 
811 	/* Initialize Rx descriptors related variables */
812 	pMil->Rx_desc = (struct rx_desc *)
813 	    ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT);
814 	pMil->Rx_desc_pa = (unsigned int)
815 	    ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT);
816 
817 	pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa;
818 
819 
820 	/*
821 	 * Allocate Tx descriptors
822 	 */
823 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
824 	    NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) {
825 		amd8111s_log(pLayerPointers, CE_WARN,
826 		    "ddi_dma_alloc_handle for Tx desc failed");
827 		goto allocate_desc_fail;
828 	}
829 
830 	length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT;
831 	if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length,
832 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
833 	    NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length,
834 	    &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) {
835 
836 		amd8111s_log(pLayerPointers, CE_WARN,
837 		    "ddi_dma_mem_handle for Tx desc failed");
838 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
839 		goto allocate_desc_fail;
840 	}
841 
842 	if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle,
843 	    NULL, (caddr_t)pMil->Tx_desc_original, real_length,
844 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
845 	    NULL, &pOdl->tx_desc_dma_cookie,
846 	    &count) != DDI_SUCCESS) {
847 
848 		amd8111s_log(pLayerPointers, CE_WARN,
849 		    "ddi_dma_addr_bind_handle for Tx desc failed");
850 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
851 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
852 		goto allocate_desc_fail;
853 	}
854 	ASSERT(count == 1);
855 	/* Set the DMA area to all zeros */
856 	bzero((caddr_t)pMil->Tx_desc_original, length);
857 
858 	/* Initialize Tx descriptors related variables */
859 	pMil->Tx_desc = (struct tx_desc *)
860 	    ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT);
861 	pMil->pNonphysical->TxDescQRead = pMil->Tx_desc;
862 	pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc;
863 	pMil->pNonphysical->TxDescQStart = pMil->Tx_desc;
864 	pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]);
865 
866 	/* Physical Addr of Tx_desc_original & Tx_desc */
867 	pLayerPointers->pMil->Tx_desc_pa =
868 	    ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) &
869 	    ~ALIGNMENT);
870 
871 	/* Setting the reserved bits in the tx descriptors */
872 	for (i = 0; i < TX_RING_SIZE; i++) {
873 		pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f;
874 		pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0;
875 		pMil->pNonphysical->TxDescQWrite++;
876 	}
877 	pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart;
878 
879 	pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa;
880 
881 	return (B_TRUE);
882 
883 allocate_desc_fail:
884 	pOdl->tx_desc_dma_handle = NULL;
885 	(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
886 	ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
887 	ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
888 	pOdl->rx_desc_dma_handle = NULL;
889 	return (B_FALSE);
890 }
891 
892 /*
893  * Free Tx/Rx descriptors
894  */
895 static void
896 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers)
897 {
898 	struct odl *pOdl = pLayerPointers->pOdl;
899 
900 	/* Free Rx descriptors */
901 	if (pOdl->rx_desc_dma_handle) {
902 		(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
903 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
904 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
905 		pOdl->rx_desc_dma_handle = NULL;
906 	}
907 
908 	/* Free Rx descriptors */
909 	if (pOdl->tx_desc_dma_handle) {
910 		(void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle);
911 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
912 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
913 		pOdl->tx_desc_dma_handle = NULL;
914 	}
915 }
916 
917 /*
918  * Allocate Tx/Rx Ring buffer
919  */
920 static boolean_t
921 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers,
922 			struct amd8111s_dma_ringbuf *pRing,
923 			uint32_t ring_size, uint32_t msg_size)
924 {
925 	uint32_t idx, msg_idx = 0, msg_acc;
926 	dev_info_t *devinfo = pLayerPointers->pOdl->devinfo;
927 	size_t real_length;
928 	uint_t count = 0;
929 
930 	ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1);
931 	pRing->dma_buf_sz = msg_size;
932 	pRing->ring_size = ring_size;
933 	pRing->trunk_num = AMD8111S_SPLIT;
934 	pRing->buf_sz = msg_size * ring_size;
935 	if (ring_size < pRing->trunk_num)
936 		pRing->trunk_num = ring_size;
937 	ASSERT((pRing->buf_sz % pRing->trunk_num) == 0);
938 
939 	pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num;
940 	ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0);
941 
942 	pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) *
943 	    ring_size, KM_NOSLEEP);
944 	pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
945 	    pRing->trunk_num, KM_NOSLEEP);
946 	pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
947 	    pRing->trunk_num, KM_NOSLEEP);
948 	pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) *
949 	    pRing->trunk_num, KM_NOSLEEP);
950 	pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) *
951 	    pRing->trunk_num, KM_NOSLEEP);
952 	if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL ||
953 	    pRing->acc_hdl == NULL || pRing->trunk_addr == NULL ||
954 	    pRing->dma_cookie == NULL) {
955 		amd8111s_log(pLayerPointers, CE_NOTE,
956 		    "kmem_zalloc failed");
957 		goto failed;
958 	}
959 
960 	for (idx = 0; idx < pRing->trunk_num; ++idx) {
961 		if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t,
962 		    DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx]))
963 		    != DDI_SUCCESS) {
964 
965 			amd8111s_log(pLayerPointers, CE_WARN,
966 			    "ddi_dma_alloc_handle failed");
967 			goto failed;
968 		} else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx],
969 		    pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING,
970 		    DDI_DMA_SLEEP, NULL,
971 		    (caddr_t *)&(pRing->trunk_addr[idx]),
972 		    (size_t *)(&real_length), &pRing->acc_hdl[idx])
973 		    != DDI_SUCCESS) {
974 
975 			amd8111s_log(pLayerPointers, CE_WARN,
976 			    "ddi_dma_mem_alloc failed");
977 			goto failed;
978 		} else if (real_length != pRing->trunk_sz) {
979 			amd8111s_log(pLayerPointers, CE_WARN,
980 			    "ddi_dma_mem_alloc failed");
981 			goto failed;
982 		} else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx],
983 		    NULL, (caddr_t)pRing->trunk_addr[idx], real_length,
984 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
985 		    &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) {
986 
987 			amd8111s_log(pLayerPointers, CE_WARN,
988 			    "ddi_dma_addr_bind_handle failed");
989 			goto failed;
990 		} else {
991 			for (msg_acc = 0;
992 			    msg_acc < pRing->trunk_sz / pRing->dma_buf_sz;
993 			    ++ msg_acc) {
994 				pRing->msg_buf[msg_idx].offset =
995 				    msg_acc * pRing->dma_buf_sz;
996 				pRing->msg_buf[msg_idx].vir_addr =
997 				    pRing->trunk_addr[idx] +
998 				    pRing->msg_buf[msg_idx].offset;
999 				pRing->msg_buf[msg_idx].phy_addr =
1000 				    pRing->dma_cookie[idx].dmac_laddress +
1001 				    pRing->msg_buf[msg_idx].offset;
1002 				pRing->msg_buf[msg_idx].p_hdl =
1003 				    pRing->dma_hdl[idx];
1004 				msg_idx ++;
1005 			}
1006 		}
1007 	}
1008 
1009 	pRing->free = pRing->msg_buf;
1010 	pRing->next = pRing->msg_buf;
1011 	pRing->curr = pRing->msg_buf;
1012 
1013 	return (B_TRUE);
1014 failed:
1015 	amd8111s_free_dma_ringbuf(pRing);
1016 	return (B_FALSE);
1017 }
1018 
1019 /*
1020  * Free Tx/Rx ring buffer
1021  */
1022 static void
1023 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing)
1024 {
1025 	int idx;
1026 
1027 	if (pRing->dma_cookie != NULL) {
1028 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1029 			if (pRing->dma_cookie[idx].dmac_laddress == 0) {
1030 				break;
1031 			}
1032 			(void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]);
1033 		}
1034 		kmem_free(pRing->dma_cookie,
1035 		    sizeof (ddi_dma_cookie_t) * pRing->trunk_num);
1036 	}
1037 
1038 	if (pRing->acc_hdl != NULL) {
1039 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1040 			if (pRing->acc_hdl[idx] == NULL)
1041 				break;
1042 			ddi_dma_mem_free(&pRing->acc_hdl[idx]);
1043 		}
1044 		kmem_free(pRing->acc_hdl,
1045 		    sizeof (ddi_acc_handle_t) * pRing->trunk_num);
1046 	}
1047 
1048 	if (pRing->dma_hdl != NULL) {
1049 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1050 			if (pRing->dma_hdl[idx] == 0) {
1051 				break;
1052 			}
1053 			ddi_dma_free_handle(&pRing->dma_hdl[idx]);
1054 		}
1055 		kmem_free(pRing->dma_hdl,
1056 		    sizeof (ddi_dma_handle_t) * pRing->trunk_num);
1057 	}
1058 
1059 	if (pRing->msg_buf != NULL) {
1060 		kmem_free(pRing->msg_buf,
1061 		    sizeof (struct amd8111s_msgbuf) * pRing->ring_size);
1062 	}
1063 
1064 	if (pRing->trunk_addr != NULL) {
1065 		kmem_free(pRing->trunk_addr,
1066 		    sizeof (caddr_t) * pRing->trunk_num);
1067 	}
1068 
1069 	bzero(pRing, sizeof (*pRing));
1070 }
1071 
1072 
1073 /*
1074  * Allocate all Tx buffer.
1075  * Allocate a Rx buffer for each Rx descriptor. Then
1076  * call mil routine to fill physical address of Rx
1077  * buffer into Rx descriptors
1078  */
1079 static boolean_t
1080 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers)
1081 {
1082 	struct odl *pOdl = pLayerPointers->pOdl;
1083 
1084 	/*
1085 	 * Allocate rx Buffers
1086 	 */
1087 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf,
1088 	    RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) {
1089 		amd8111s_log(pLayerPointers, CE_WARN,
1090 		    "amd8111s_alloc_dma_ringbuf for tx failed");
1091 		goto allocate_buf_fail;
1092 	}
1093 
1094 	/*
1095 	 * Allocate Tx buffers
1096 	 */
1097 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf,
1098 	    TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) {
1099 		amd8111s_log(pLayerPointers, CE_WARN,
1100 		    "amd8111s_alloc_dma_ringbuf for tx failed");
1101 		goto allocate_buf_fail;
1102 	}
1103 
1104 	/*
1105 	 * Initilize the mil Queues
1106 	 */
1107 	milInitGlbds(pLayerPointers);
1108 
1109 	milInitRxQ(pLayerPointers);
1110 
1111 	return (B_TRUE);
1112 
1113 allocate_buf_fail:
1114 
1115 	amd8111s_log(pLayerPointers, CE_WARN,
1116 	    "amd8111s_allocate_buffers failed");
1117 	return (B_FALSE);
1118 }
1119 
1120 /*
1121  * Free all Rx/Tx buffer
1122  */
1123 
1124 static void
1125 amd8111s_free_buffers(struct LayerPointers *pLayerPointers)
1126 {
1127 	/* Free Tx buffers */
1128 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf);
1129 
1130 	/* Free Rx Buffers */
1131 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf);
1132 }
1133 
1134 /*
1135  * Try to recycle all the descriptors and Tx buffers
1136  * which are already freed by hardware.
1137  */
1138 static int
1139 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers)
1140 {
1141 	struct nonphysical *pNonphysical;
1142 	uint32_t count = 0;
1143 
1144 	pNonphysical = pLayerPointers->pMil->pNonphysical;
1145 	while (pNonphysical->TxDescQRead->Tx_OWN == 0 &&
1146 	    pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) {
1147 		pLayerPointers->pOdl->tx_buf.free =
1148 		    NEXT(pLayerPointers->pOdl->tx_buf, free);
1149 		pNonphysical->TxDescQRead++;
1150 		if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) {
1151 			pNonphysical->TxDescQRead = pNonphysical->TxDescQStart;
1152 		}
1153 		count ++;
1154 	}
1155 
1156 	if (pLayerPointers->pMil->tx_reschedule)
1157 		ddi_trigger_softintr(pLayerPointers->pOdl->drain_id);
1158 
1159 	return (count);
1160 }
1161 
1162 /*
1163  * Get packets in the Tx buffer, then copy them to the send buffer.
1164  * Trigger hardware to send out packets.
1165  */
1166 static void
1167 amd8111s_send_serial(struct LayerPointers *pLayerPointers)
1168 {
1169 	struct nonphysical *pNonphysical;
1170 	uint32_t count;
1171 
1172 	pNonphysical = pLayerPointers->pMil->pNonphysical;
1173 
1174 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1175 
1176 	for (count = 0; count < AMD8111S_SEND_MAX; count ++) {
1177 		if (pLayerPointers->pOdl->tx_buf.curr ==
1178 		    pLayerPointers->pOdl->tx_buf.next) {
1179 			break;
1180 		}
1181 		/* to verify if it needs to recycle the tx Buf */
1182 		if (((pNonphysical->TxDescQWrite + 1 >
1183 		    pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart :
1184 		    (pNonphysical->TxDescQWrite + 1)) ==
1185 		    pNonphysical->TxDescQRead)
1186 			if (amd8111s_recycle_tx(pLayerPointers) == 0) {
1187 				pLayerPointers->pOdl
1188 				    ->statistics.tx_no_descriptor ++;
1189 				break;
1190 			}
1191 
1192 		/* Fill packet length */
1193 		pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers
1194 		    ->pOdl->tx_buf.curr->msg_size;
1195 
1196 		/* Fill physical buffer address */
1197 		pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int)
1198 		    pLayerPointers->pOdl->tx_buf.curr->phy_addr;
1199 
1200 		pNonphysical->TxDescQWrite->Tx_SOP = 1;
1201 		pNonphysical->TxDescQWrite->Tx_EOP = 1;
1202 		pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1;
1203 		pNonphysical->TxDescQWrite->Tx_LTINT = 1;
1204 		pNonphysical->TxDescQWrite->Tx_USPACE = 0;
1205 		pNonphysical->TxDescQWrite->Tx_OWN = 1;
1206 
1207 		pNonphysical->TxDescQWrite++;
1208 		if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) {
1209 			pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart;
1210 		}
1211 
1212 		pLayerPointers->pOdl->tx_buf.curr =
1213 		    NEXT(pLayerPointers->pOdl->tx_buf, curr);
1214 
1215 	}
1216 
1217 	pLayerPointers->pOdl->statistics.tx_ok_packets += count;
1218 
1219 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1220 
1221 	/* Call mdlTransmit to send the pkt out on the network */
1222 	mdlTransmit(pLayerPointers);
1223 
1224 }
1225 
1226 /*
1227  * Softintr entrance. try to send out packets in the Tx buffer.
1228  * If reschedule is True, call mac_tx_update to re-enable the
1229  * transmit
1230  */
1231 static uint_t
1232 amd8111s_send_drain(caddr_t arg)
1233 {
1234 	struct LayerPointers *pLayerPointers = (void *)arg;
1235 
1236 	amd8111s_send_serial(pLayerPointers);
1237 
1238 	if (pLayerPointers->pMil->tx_reschedule &&
1239 	    NEXT(pLayerPointers->pOdl->tx_buf, next) !=
1240 	    pLayerPointers->pOdl->tx_buf.free) {
1241 		mac_tx_update(pLayerPointers->pOdl->mh);
1242 		pLayerPointers->pMil->tx_reschedule = B_FALSE;
1243 	}
1244 
1245 	return (DDI_INTR_CLAIMED);
1246 }
1247 
1248 /*
1249  * Get a Tx buffer
1250  */
1251 static struct amd8111s_msgbuf *
1252 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers)
1253 {
1254 	struct amd8111s_msgbuf *tmp, *next;
1255 
1256 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1257 	next = NEXT(pLayerPointers->pOdl->tx_buf, next);
1258 	if (next == pLayerPointers->pOdl->tx_buf.free) {
1259 		tmp = NULL;
1260 	} else {
1261 		tmp = pLayerPointers->pOdl->tx_buf.next;
1262 		pLayerPointers->pOdl->tx_buf.next = next;
1263 	}
1264 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1265 
1266 	return (tmp);
1267 }
1268 
1269 static boolean_t
1270 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp)
1271 {
1272 	struct odl *pOdl;
1273 	size_t frag_len;
1274 	mblk_t *tmp;
1275 	struct amd8111s_msgbuf *txBuf;
1276 	uint8_t *pMsg;
1277 
1278 	pOdl = pLayerPointers->pOdl;
1279 
1280 	/* alloc send buffer */
1281 	txBuf = amd8111s_getTxbuf(pLayerPointers);
1282 	if (txBuf == NULL) {
1283 		pOdl->statistics.tx_no_buffer ++;
1284 		pLayerPointers->pMil->tx_reschedule = B_TRUE;
1285 		amd8111s_send_serial(pLayerPointers);
1286 		return (B_FALSE);
1287 	}
1288 
1289 	/* copy packet to send buffer */
1290 	txBuf->msg_size = 0;
1291 	pMsg = (uint8_t *)txBuf->vir_addr;
1292 	for (tmp = mp; tmp; tmp = tmp->b_cont) {
1293 		frag_len = MBLKL(tmp);
1294 		bcopy(tmp->b_rptr, pMsg, frag_len);
1295 		txBuf->msg_size += frag_len;
1296 		pMsg += frag_len;
1297 	}
1298 	freemsg(mp);
1299 
1300 	amd8111s_send_serial(pLayerPointers);
1301 
1302 	return (B_TRUE);
1303 }
1304 
1305 /*
1306  * (GLD Entry Point) Send the message block to lower layer
1307  */
1308 static mblk_t *
1309 amd8111s_m_tx(void *arg, mblk_t *mp)
1310 {
1311 	struct LayerPointers *pLayerPointers = arg;
1312 	mblk_t *next;
1313 
1314 	rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER);
1315 	if (!pLayerPointers->run) {
1316 		pLayerPointers->pOdl->statistics.tx_afterunplumb ++;
1317 		freemsgchain(mp);
1318 		mp = NULL;
1319 	}
1320 
1321 	while (mp != NULL) {
1322 		next = mp->b_next;
1323 		mp->b_next = NULL;
1324 		if (!amd8111s_send(pLayerPointers, mp)) {
1325 			/* Send fail */
1326 			mp->b_next = next;
1327 			break;
1328 		}
1329 		mp = next;
1330 	}
1331 
1332 	rw_exit(&pLayerPointers->pOdl->chip_lock);
1333 	return (mp);
1334 }
1335 
1336 /*
1337  * (GLD Entry Point) Interrupt Service Routine
1338  */
1339 static uint_t
1340 amd8111s_intr(caddr_t arg)
1341 {
1342 	unsigned int intrCauses;
1343 	struct LayerPointers *pLayerPointers = (void *)arg;
1344 
1345 	/* Read the interrupt status from mdl */
1346 	intrCauses = mdlReadInterrupt(pLayerPointers);
1347 
1348 	if (intrCauses == 0) {
1349 		pLayerPointers->pOdl->statistics.intr_OTHER ++;
1350 		return (DDI_INTR_UNCLAIMED);
1351 	}
1352 
1353 	if (intrCauses & LCINT) {
1354 		if (mdlReadLink(pLayerPointers) == LINK_UP) {
1355 			mdlGetActiveMediaInfo(pLayerPointers);
1356 			/* Link status changed */
1357 			if (pLayerPointers->pOdl->LinkStatus !=
1358 			    LINK_STATE_UP) {
1359 				pLayerPointers->pOdl->LinkStatus =
1360 				    LINK_STATE_UP;
1361 				mac_link_update(pLayerPointers->pOdl->mh,
1362 				    LINK_STATE_UP);
1363 			}
1364 		} else {
1365 			if (pLayerPointers->pOdl->LinkStatus !=
1366 			    LINK_STATE_DOWN) {
1367 				pLayerPointers->pOdl->LinkStatus =
1368 				    LINK_STATE_DOWN;
1369 				mac_link_update(pLayerPointers->pOdl->mh,
1370 				    LINK_STATE_DOWN);
1371 			}
1372 		}
1373 	}
1374 	/*
1375 	 * RINT0: Receive Interrupt is set by the controller after the last
1376 	 * descriptor of a receive frame for this ring has been updated by
1377 	 * writing a 0 to the OWNership bit.
1378 	 */
1379 	if (intrCauses & RINT0) {
1380 		pLayerPointers->pOdl->statistics.intr_RINT0 ++;
1381 		amd8111s_receive(pLayerPointers);
1382 	}
1383 
1384 	/*
1385 	 * TINT0: Transmit Interrupt is set by the controller after the OWN bit
1386 	 * in the last descriptor of a transmit frame in this particular ring
1387 	 * has been cleared to indicate the frame has been copied to the
1388 	 * transmit FIFO.
1389 	 */
1390 	if (intrCauses & TINT0) {
1391 		pLayerPointers->pOdl->statistics.intr_TINT0 ++;
1392 		/*
1393 		 * if desc ring is NULL and tx buf is not NULL, it should
1394 		 * drain tx buffer
1395 		 */
1396 		amd8111s_send_serial(pLayerPointers);
1397 	}
1398 
1399 	if (intrCauses & STINT) {
1400 		pLayerPointers->pOdl->statistics.intr_STINT ++;
1401 	}
1402 
1403 
1404 	return (DDI_INTR_CLAIMED);
1405 }
1406 
1407 /*
1408  * To re-initilize data structures.
1409  */
1410 static void
1411 amd8111s_sw_reset(struct LayerPointers *pLayerPointers)
1412 {
1413 	/* Reset all Tx/Rx queues and descriptors */
1414 	milResetTxQ(pLayerPointers);
1415 	milInitRxQ(pLayerPointers);
1416 }
1417 
1418 /*
1419  * Send all pending tx packets
1420  */
1421 static void
1422 amd8111s_tx_drain(struct LayerPointers *adapter)
1423 {
1424 	struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart;
1425 	int i, desc_count = 0;
1426 	for (i = 0; i < 30; i++) {
1427 		while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) {
1428 			/* This packet has been transmitted */
1429 			pTx_desc ++;
1430 			desc_count ++;
1431 		}
1432 		if (desc_count == TX_RING_SIZE) {
1433 			break;
1434 		}
1435 		/* Wait 1 ms */
1436 		drv_usecwait(1000);
1437 	}
1438 	adapter->pOdl->statistics.tx_draintime = i;
1439 }
1440 
1441 /*
1442  * (GLD Entry Point) To start card will be called at
1443  * ifconfig plumb
1444  */
1445 static int
1446 amd8111s_m_start(void *arg)
1447 {
1448 	struct LayerPointers *pLayerPointers = arg;
1449 	struct odl *pOdl = pLayerPointers->pOdl;
1450 
1451 	amd8111s_sw_reset(pLayerPointers);
1452 	mdlHWReset(pLayerPointers);
1453 	rw_enter(&pOdl->chip_lock, RW_WRITER);
1454 	pLayerPointers->run = B_TRUE;
1455 	rw_exit(&pOdl->chip_lock);
1456 	return (0);
1457 }
1458 
1459 /*
1460  * (GLD Entry Point) To stop card will be called at
1461  * ifconfig unplumb
1462  */
1463 static void
1464 amd8111s_m_stop(void *arg)
1465 {
1466 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1467 	struct odl *pOdl = pLayerPointers->pOdl;
1468 
1469 	/* Ensure send all pending tx packets */
1470 	amd8111s_tx_drain(pLayerPointers);
1471 	/*
1472 	 * Stop the controller and disable the controller interrupt
1473 	 */
1474 	rw_enter(&pOdl->chip_lock, RW_WRITER);
1475 	mdlStopChip(pLayerPointers);
1476 	pLayerPointers->run = B_FALSE;
1477 	rw_exit(&pOdl->chip_lock);
1478 }
1479 
1480 /*
1481  *	To clean up all
1482  */
1483 static void
1484 amd8111s_free_resource(struct LayerPointers *pLayerPointers)
1485 {
1486 	unsigned long mem_free_array[100];
1487 	unsigned long *pmem_free_array, size;
1488 
1489 	/* Free Rx/Tx descriptors */
1490 	amd8111s_free_descriptors(pLayerPointers);
1491 
1492 	/* Free memory on lower layers */
1493 	milFreeResources(pLayerPointers, mem_free_array);
1494 	pmem_free_array = mem_free_array;
1495 	while (*pmem_free_array) {
1496 		switch (*pmem_free_array) {
1497 		case VIRTUAL:
1498 			size = *(++pmem_free_array);
1499 			pmem_free_array++;
1500 			kmem_free((void *)*(pmem_free_array), size);
1501 			break;
1502 		}
1503 		pmem_free_array++;
1504 	}
1505 
1506 	amd8111s_free_buffers(pLayerPointers);
1507 }
1508 
1509 /*
1510  * (GLD Enty pointer) To add/delete multi cast addresses
1511  *
1512  */
1513 static int
1514 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
1515 {
1516 	struct LayerPointers *pLayerPointers = arg;
1517 
1518 	if (add) {
1519 		/* Add a multicast entry */
1520 		mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr);
1521 	} else {
1522 		/* Delete a multicast entry */
1523 		mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr);
1524 	}
1525 
1526 	return (0);
1527 }
1528 
1529 #ifdef AMD8111S_DEBUG
1530 /*
1531  * The size of MIB registers is only 32 bits. Dump them before one
1532  * of them overflows.
1533  */
1534 static void
1535 amd8111s_dump_mib(struct LayerPointers *pLayerPointers)
1536 {
1537 	struct amd8111s_statistics *adapterStat;
1538 
1539 	adapterStat = &pLayerPointers->pOdl->statistics;
1540 
1541 	adapterStat->mib_dump_counter ++;
1542 
1543 	/*
1544 	 * Rx Counters
1545 	 */
1546 	adapterStat->rx_mib_unicst_packets +=
1547 	    mdlReadMib(pLayerPointers, RcvUniCastPkts);
1548 	adapterStat->rx_mib_multicst_packets +=
1549 	    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
1550 	adapterStat->rx_mib_broadcst_packets +=
1551 	    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
1552 	adapterStat->rx_mib_macctrl_packets +=
1553 	    mdlReadMib(pLayerPointers, RcvMACCtrl);
1554 	adapterStat->rx_mib_flowctrl_packets +=
1555 	    mdlReadMib(pLayerPointers, RcvFlowCtrl);
1556 
1557 	adapterStat->rx_mib_bytes +=
1558 	    mdlReadMib(pLayerPointers, RcvOctets);
1559 	adapterStat->rx_mib_good_bytes +=
1560 	    mdlReadMib(pLayerPointers, RcvGoodOctets);
1561 
1562 	adapterStat->rx_mib_undersize_packets +=
1563 	    mdlReadMib(pLayerPointers, RcvUndersizePkts);
1564 	adapterStat->rx_mib_oversize_packets +=
1565 	    mdlReadMib(pLayerPointers, RcvOversizePkts);
1566 
1567 	adapterStat->rx_mib_drop_packets +=
1568 	    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
1569 	adapterStat->rx_mib_align_err_packets +=
1570 	    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
1571 	adapterStat->rx_mib_fcs_err_packets +=
1572 	    mdlReadMib(pLayerPointers, RcvFCSErrors);
1573 	adapterStat->rx_mib_symbol_err_packets +=
1574 	    mdlReadMib(pLayerPointers, RcvSymbolErrors);
1575 	adapterStat->rx_mib_miss_packets +=
1576 	    mdlReadMib(pLayerPointers, RcvMissPkts);
1577 
1578 	/*
1579 	 * Tx Counters
1580 	 */
1581 	adapterStat->tx_mib_packets +=
1582 	    mdlReadMib(pLayerPointers, XmtPackets);
1583 	adapterStat->tx_mib_multicst_packets +=
1584 	    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
1585 	adapterStat->tx_mib_broadcst_packets +=
1586 	    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
1587 	adapterStat->tx_mib_flowctrl_packets +=
1588 	    mdlReadMib(pLayerPointers, XmtFlowCtrl);
1589 
1590 	adapterStat->tx_mib_bytes +=
1591 	    mdlReadMib(pLayerPointers, XmtOctets);
1592 
1593 	adapterStat->tx_mib_defer_trans_packets +=
1594 	    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
1595 	adapterStat->tx_mib_collision_packets +=
1596 	    mdlReadMib(pLayerPointers, XmtCollisions);
1597 	adapterStat->tx_mib_one_coll_packets +=
1598 	    mdlReadMib(pLayerPointers, XmtOneCollision);
1599 	adapterStat->tx_mib_multi_coll_packets +=
1600 	    mdlReadMib(pLayerPointers, XmtMultipleCollision);
1601 	adapterStat->tx_mib_late_coll_packets +=
1602 	    mdlReadMib(pLayerPointers, XmtLateCollision);
1603 	adapterStat->tx_mib_ex_coll_packets +=
1604 	    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
1605 
1606 
1607 	/* Clear all MIB registers */
1608 	WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address
1609 	    + MIB_ADDR, MIB_CLEAR);
1610 }
1611 #endif
1612 
1613 /*
1614  * (GLD Entry Point) set/unset promiscus mode
1615  */
1616 static int
1617 amd8111s_m_promisc(void *arg, boolean_t on)
1618 {
1619 	struct LayerPointers *pLayerPointers = arg;
1620 
1621 	if (on) {
1622 		mdlSetPromiscuous(pLayerPointers);
1623 	} else {
1624 		mdlDisablePromiscuous(pLayerPointers);
1625 	}
1626 
1627 	return (0);
1628 }
1629 
1630 /*
1631  * (Gld Entry point) Changes the Mac address of card
1632  */
1633 static int
1634 amd8111s_m_unicst(void *arg, const uint8_t *macaddr)
1635 {
1636 	struct LayerPointers *pLayerPointers = arg;
1637 
1638 	mdlDisableInterrupt(pLayerPointers);
1639 	mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr);
1640 	mdlEnableInterrupt(pLayerPointers);
1641 
1642 	return (0);
1643 }
1644 
1645 /*
1646  * Reset the card
1647  */
1648 void
1649 amd8111s_reset(struct LayerPointers *pLayerPointers)
1650 {
1651 	amd8111s_sw_reset(pLayerPointers);
1652 	mdlHWReset(pLayerPointers);
1653 }
1654 
1655 /*
1656  * attach(9E) -- Attach a device to the system
1657  *
1658  * Called once for each board after successfully probed.
1659  * will do
1660  * 	a. creating minor device node for the instance.
1661  *	b. allocate & Initilize four layers (call odlInit)
1662  *	c. get MAC address
1663  *	d. initilize pLayerPointers to gld private pointer
1664  *	e. register with GLD
1665  * if any action fails does clean up & returns DDI_FAILURE
1666  * else retursn DDI_SUCCESS
1667  */
1668 static int
1669 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1670 {
1671 	mac_register_t *macp;
1672 	struct LayerPointers *pLayerPointers;
1673 	struct odl *pOdl;
1674 	ddi_acc_handle_t *pci_handle;
1675 	ddi_device_acc_attr_t dev_attr;
1676 	caddr_t addrp = NULL;
1677 
1678 	switch (cmd) {
1679 	case DDI_ATTACH:
1680 		break;
1681 	default:
1682 		return (DDI_FAILURE);
1683 	}
1684 
1685 	pLayerPointers = (struct LayerPointers *)
1686 	    kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP);
1687 	amd8111sadapter = pLayerPointers;
1688 
1689 	/* Get device instance number */
1690 	pLayerPointers->instance = ddi_get_instance(devinfo);
1691 	ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers);
1692 
1693 	pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP);
1694 	pLayerPointers->pOdl = pOdl;
1695 
1696 	pOdl->devinfo = devinfo;
1697 
1698 	/*
1699 	 * Here, we only allocate memory for struct odl and initilize it.
1700 	 * All other memory allocation & initilization will be done in odlInit
1701 	 * later on this routine.
1702 	 */
1703 	if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock)
1704 	    != DDI_SUCCESS) {
1705 		amd8111s_log(pLayerPointers, CE_NOTE,
1706 		    "attach: get iblock cookies failed");
1707 		goto attach_failure;
1708 	}
1709 
1710 	rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock);
1711 	mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock",
1712 	    MUTEX_DRIVER, (void *)pOdl->iblock);
1713 	mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock",
1714 	    MUTEX_DRIVER, (void *)pOdl->iblock);
1715 
1716 	/* Setup PCI space */
1717 	if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) {
1718 		return (DDI_FAILURE);
1719 	}
1720 	pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI;
1721 	pci_handle = &pOdl->pci_handle;
1722 
1723 	pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID);
1724 	pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID);
1725 
1726 	/*
1727 	 * Allocate and initialize all resource and map device registers.
1728 	 * If failed, it returns a non-zero value.
1729 	 */
1730 	if (amd8111s_odlInit(pLayerPointers) != 0) {
1731 		goto attach_failure;
1732 	}
1733 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE;
1734 
1735 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1736 	dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1737 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1738 
1739 	if (ddi_regs_map_setup(devinfo, 1, &addrp, 0,  4096, &dev_attr,
1740 	    &(pLayerPointers->pOdl->MemBasehandle)) != 0) {
1741 		amd8111s_log(pLayerPointers, CE_NOTE,
1742 		    "attach: ddi_regs_map_setup failed");
1743 		goto attach_failure;
1744 	}
1745 	pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp;
1746 
1747 	/* Initialize HW */
1748 	mdlOpen(pLayerPointers);
1749 	mdlGetActiveMediaInfo(pLayerPointers);
1750 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS;
1751 
1752 	/*
1753 	 * Setup the interrupt
1754 	 */
1755 	if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr,
1756 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1757 		goto attach_failure;
1758 	}
1759 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED;
1760 
1761 	/*
1762 	 * Setup soft intr
1763 	 */
1764 	if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id,
1765 	    NULL, NULL, amd8111s_send_drain,
1766 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1767 		goto attach_failure;
1768 	}
1769 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED;
1770 
1771 	/*
1772 	 * Initilize the mac structure
1773 	 */
1774 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1775 		goto attach_failure;
1776 
1777 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1778 	macp->m_driver = pLayerPointers;
1779 	macp->m_dip = devinfo;
1780 	/* Get MAC address */
1781 	mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress);
1782 	macp->m_src_addr = pOdl->MacAddress;
1783 	macp->m_callbacks = &amd8111s_m_callbacks;
1784 	macp->m_min_sdu = 0;
1785 	/* 1518 - 14 (ether header) - 4 (CRC) */
1786 	macp->m_max_sdu = ETHERMTU;
1787 	macp->m_margin = VLAN_TAGSZ;
1788 
1789 	/*
1790 	 * Finally, we're ready to register ourselves with the MAC layer
1791 	 * interface; if this succeeds, we're ready to start.
1792 	 */
1793 	if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) {
1794 		mac_free(macp);
1795 		goto attach_failure;
1796 	}
1797 	mac_free(macp);
1798 
1799 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED;
1800 
1801 	return (DDI_SUCCESS);
1802 
1803 attach_failure:
1804 	(void) amd8111s_unattach(devinfo, pLayerPointers);
1805 	return (DDI_FAILURE);
1806 
1807 }
1808 
1809 /*
1810  * detach(9E) -- Detach a device from the system
1811  *
1812  * It is called for each device instance when the system is preparing to
1813  * unload a dynamically unloadable driver.
1814  * will Do
1815  * 	a. check if any driver buffers are held by OS.
1816  *	b. do clean up of all allocated memory if it is not in use by OS.
1817  *	c. un register with GLD
1818  *	d. return DDI_SUCCESS on succes full free & unregister
1819  *	else GLD_FAILURE
1820  */
1821 static int
1822 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1823 {
1824 	struct LayerPointers *pLayerPointers;
1825 
1826 	switch (cmd) {
1827 	case DDI_DETACH:
1828 		break;
1829 	default:
1830 		return (DDI_FAILURE);
1831 	}
1832 
1833 	/*
1834 	 * Get the driver private (struct LayerPointers *) structure
1835 	 */
1836 	if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private
1837 	    (devinfo)) == NULL) {
1838 		return (DDI_FAILURE);
1839 	}
1840 
1841 	return (amd8111s_unattach(devinfo, pLayerPointers));
1842 }
1843 
1844 static int
1845 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers)
1846 {
1847 	struct odl *pOdl = pLayerPointers->pOdl;
1848 
1849 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) {
1850 		/* Unregister driver from the GLD interface */
1851 		if (mac_unregister(pOdl->mh) != DDI_SUCCESS) {
1852 			return (DDI_FAILURE);
1853 		}
1854 	}
1855 
1856 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) {
1857 		ddi_remove_intr(devinfo, 0, pOdl->iblock);
1858 	}
1859 
1860 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) {
1861 		ddi_remove_softintr(pOdl->drain_id);
1862 	}
1863 
1864 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) {
1865 		/* Stop HW */
1866 		mdlStopChip(pLayerPointers);
1867 		ddi_regs_map_free(&(pOdl->MemBasehandle));
1868 	}
1869 
1870 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) {
1871 		/* Free All memory allocated */
1872 		amd8111s_free_resource(pLayerPointers);
1873 	}
1874 
1875 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) {
1876 		pci_config_teardown(&pOdl->pci_handle);
1877 		mutex_destroy(&pOdl->mdlSendLock);
1878 		mutex_destroy(&pOdl->mdlRcvLock);
1879 		rw_destroy(&pOdl->chip_lock);
1880 	}
1881 
1882 	kmem_free(pOdl, sizeof (struct odl));
1883 	kmem_free(pLayerPointers, sizeof (struct LayerPointers));
1884 
1885 	return (DDI_SUCCESS);
1886 }
1887 
1888 /*
1889  * (GLD Entry Point)GLD will call this entry point perodicaly to
1890  * get driver statistices.
1891  */
1892 static int
1893 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val)
1894 {
1895 	struct LayerPointers *pLayerPointers = arg;
1896 	struct amd8111s_statistics *adapterStat;
1897 
1898 	adapterStat = &pLayerPointers->pOdl->statistics;
1899 
1900 	switch (stat) {
1901 
1902 	/*
1903 	 * Current Status
1904 	 */
1905 	case MAC_STAT_IFSPEED:
1906 		*val = 	pLayerPointers->pMdl->Speed * 1000000;
1907 		break;
1908 
1909 	case ETHER_STAT_LINK_DUPLEX:
1910 		if (pLayerPointers->pMdl->FullDuplex) {
1911 			*val = LINK_DUPLEX_FULL;
1912 		} else {
1913 			*val = LINK_DUPLEX_HALF;
1914 		}
1915 		break;
1916 
1917 	/*
1918 	 * Capabilities
1919 	 */
1920 	case ETHER_STAT_CAP_1000FDX:
1921 		*val = 0;
1922 		break;
1923 
1924 	case ETHER_STAT_CAP_1000HDX:
1925 		*val = 0;
1926 		break;
1927 
1928 	case ETHER_STAT_CAP_100FDX:
1929 		*val = 1;
1930 		break;
1931 
1932 	case ETHER_STAT_CAP_100HDX:
1933 		*val = 1;
1934 		break;
1935 
1936 	case ETHER_STAT_CAP_10FDX:
1937 		*val = 1;
1938 		break;
1939 
1940 	case ETHER_STAT_CAP_10HDX:
1941 		*val = 1;
1942 		break;
1943 
1944 	case ETHER_STAT_CAP_ASMPAUSE:
1945 		*val = 1;
1946 		break;
1947 
1948 	case ETHER_STAT_CAP_PAUSE:
1949 		*val = 1;
1950 		break;
1951 
1952 	case ETHER_STAT_CAP_AUTONEG:
1953 		*val = 1;
1954 		break;
1955 
1956 	case ETHER_STAT_ADV_CAP_1000FDX:
1957 		*val = 0;
1958 		break;
1959 
1960 	case ETHER_STAT_ADV_CAP_1000HDX:
1961 		*val = 0;
1962 		break;
1963 
1964 	case ETHER_STAT_ADV_CAP_100FDX:
1965 		*val = 1;
1966 		break;
1967 
1968 	case ETHER_STAT_ADV_CAP_100HDX:
1969 		*val = 1;
1970 		break;
1971 
1972 	case ETHER_STAT_ADV_CAP_10FDX:
1973 		*val = 1;
1974 		break;
1975 
1976 	case ETHER_STAT_ADV_CAP_10HDX:
1977 		*val = 1;
1978 		break;
1979 
1980 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
1981 		*val = 1;
1982 		break;
1983 
1984 	case ETHER_STAT_ADV_CAP_PAUSE:
1985 		*val = 1;
1986 		break;
1987 
1988 	case ETHER_STAT_ADV_CAP_AUTONEG:
1989 		*val = 1;
1990 		break;
1991 
1992 	/*
1993 	 * Rx Counters
1994 	 */
1995 	case MAC_STAT_IPACKETS:
1996 		*val = adapterStat->rx_mib_unicst_packets +
1997 		    adapterStat->rx_mib_multicst_packets +
1998 		    adapterStat->rx_mib_broadcst_packets +
1999 		    mdlReadMib(pLayerPointers, RcvUniCastPkts) +
2000 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts) +
2001 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2002 		break;
2003 
2004 	case MAC_STAT_RBYTES:
2005 		*val = adapterStat->rx_mib_bytes +
2006 		    mdlReadMib(pLayerPointers, RcvOctets);
2007 		break;
2008 
2009 	case MAC_STAT_MULTIRCV:
2010 		*val = adapterStat->rx_mib_multicst_packets +
2011 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
2012 		break;
2013 
2014 	case MAC_STAT_BRDCSTRCV:
2015 		*val = adapterStat->rx_mib_broadcst_packets +
2016 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2017 		break;
2018 
2019 	case MAC_STAT_NORCVBUF:
2020 		*val = adapterStat->rx_allocfail +
2021 		    adapterStat->rx_mib_drop_packets +
2022 		    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
2023 		break;
2024 
2025 	case MAC_STAT_IERRORS:
2026 		*val = adapterStat->rx_mib_align_err_packets +
2027 		    adapterStat->rx_mib_fcs_err_packets +
2028 		    adapterStat->rx_mib_symbol_err_packets +
2029 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors) +
2030 		    mdlReadMib(pLayerPointers, RcvFCSErrors) +
2031 		    mdlReadMib(pLayerPointers, RcvSymbolErrors);
2032 		break;
2033 
2034 	case ETHER_STAT_ALIGN_ERRORS:
2035 		*val = adapterStat->rx_mib_align_err_packets +
2036 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
2037 		break;
2038 
2039 	case ETHER_STAT_FCS_ERRORS:
2040 		*val = adapterStat->rx_mib_fcs_err_packets +
2041 		    mdlReadMib(pLayerPointers, RcvFCSErrors);
2042 		break;
2043 
2044 	/*
2045 	 * Tx Counters
2046 	 */
2047 	case MAC_STAT_OPACKETS:
2048 		*val = adapterStat->tx_mib_packets +
2049 		    mdlReadMib(pLayerPointers, XmtPackets);
2050 		break;
2051 
2052 	case MAC_STAT_OBYTES:
2053 		*val = adapterStat->tx_mib_bytes +
2054 		    mdlReadMib(pLayerPointers, XmtOctets);
2055 		break;
2056 
2057 	case MAC_STAT_MULTIXMT:
2058 		*val = adapterStat->tx_mib_multicst_packets +
2059 		    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
2060 		break;
2061 
2062 	case MAC_STAT_BRDCSTXMT:
2063 		*val = adapterStat->tx_mib_broadcst_packets +
2064 		    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
2065 		break;
2066 
2067 	case MAC_STAT_NOXMTBUF:
2068 		*val = adapterStat->tx_no_descriptor;
2069 		break;
2070 
2071 	case MAC_STAT_OERRORS:
2072 		*val = adapterStat->tx_mib_ex_coll_packets +
2073 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2074 		break;
2075 
2076 	case MAC_STAT_COLLISIONS:
2077 		*val = adapterStat->tx_mib_ex_coll_packets +
2078 		    mdlReadMib(pLayerPointers, XmtCollisions);
2079 		break;
2080 
2081 	case ETHER_STAT_FIRST_COLLISIONS:
2082 		*val = adapterStat->tx_mib_one_coll_packets +
2083 		    mdlReadMib(pLayerPointers, XmtOneCollision);
2084 		break;
2085 
2086 	case ETHER_STAT_MULTI_COLLISIONS:
2087 		*val = adapterStat->tx_mib_multi_coll_packets +
2088 		    mdlReadMib(pLayerPointers, XmtMultipleCollision);
2089 		break;
2090 
2091 	case ETHER_STAT_EX_COLLISIONS:
2092 		*val = adapterStat->tx_mib_ex_coll_packets +
2093 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2094 		break;
2095 
2096 	case ETHER_STAT_TX_LATE_COLLISIONS:
2097 		*val = adapterStat->tx_mib_late_coll_packets +
2098 		    mdlReadMib(pLayerPointers, XmtLateCollision);
2099 		break;
2100 
2101 	case ETHER_STAT_DEFER_XMTS:
2102 		*val = adapterStat->tx_mib_defer_trans_packets +
2103 		    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
2104 		break;
2105 
2106 	default:
2107 		return (ENOTSUP);
2108 	}
2109 	return (0);
2110 }
2111 
2112 /*
2113  *	Memory Read Function Used by MDL to set card registers.
2114  */
2115 unsigned char
2116 READ_REG8(struct LayerPointers *pLayerPointers, long x)
2117 {
2118 	return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x));
2119 }
2120 
2121 int
2122 READ_REG16(struct LayerPointers *pLayerPointers, long x)
2123 {
2124 	return (ddi_get16(pLayerPointers->pOdl->MemBasehandle,
2125 	    (uint16_t *)(x)));
2126 }
2127 
2128 long
2129 READ_REG32(struct LayerPointers *pLayerPointers, long x)
2130 {
2131 	return (ddi_get32(pLayerPointers->pOdl->MemBasehandle,
2132 	    (uint32_t *)(x)));
2133 }
2134 
2135 void
2136 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y)
2137 {
2138 	ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y);
2139 }
2140 
2141 void
2142 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y)
2143 {
2144 	ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y);
2145 }
2146 
2147 void
2148 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y)
2149 {
2150 	ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y);
2151 }
2152 
2153 void
2154 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y)
2155 {
2156 	int i;
2157 	for (i = 0; i < 8; i++) {
2158 		WRITE_REG8(pLayerPointers, (x + i), y[i]);
2159 	}
2160 }
2161