xref: /titanic_52/usr/src/uts/intel/io/amd8111s/amd8111s_main.c (revision 2a9459bdd821c1cf59590a7a9069ac9c591e8a6b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Copyright (c) 2001-2006 Advanced Micro Devices, Inc.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are met:
34  *
35  * + Redistributions of source code must retain the above copyright notice,
36  * + this list of conditions and the following disclaimer.
37  *
38  * + Redistributions in binary form must reproduce the above copyright
39  * + notice, this list of conditions and the following disclaimer in the
40  * + documentation and/or other materials provided with the distribution.
41  *
42  * + Neither the name of Advanced Micro Devices, Inc. nor the names of its
43  * + contributors may be used to endorse or promote products derived from
44  * + this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
47  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
48  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
49  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
50  * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR
51  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
53  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
54  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
56  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
58  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  *
60  * Import/Export/Re-Export/Use/Release/Transfer Restrictions and
61  * Compliance with Applicable Laws.  Notice is hereby given that
62  * the software may be subject to restrictions on use, release,
63  * transfer, importation, exportation and/or re-exportation under
64  * the laws and regulations of the United States or other
65  * countries ("Applicable Laws"), which include but are not
66  * limited to U.S. export control laws such as the Export
67  * Administration Regulations and national security controls as
68  * defined thereunder, as well as State Department controls under
69  * the U.S. Munitions List.  Permission to use and/or
70  * redistribute the software is conditioned upon compliance with
71  * all Applicable Laws, including U.S. export control laws
72  * regarding specifically designated persons, countries and
73  * nationals of countries subject to national security controls.
74  */
75 
76 
77 #pragma ident "@(#)$RCSfile: solaris_odl.c,v $ $Revision: 1.3 $ " \
78 " $Date: 2004/04/22 15:22:54 $ AMD"
79 
80 
81 /* include files */
82 #include <sys/disp.h>
83 #include <sys/atomic.h>
84 #include "amd8111s_main.h"
85 
86 /* Global macro Definations */
87 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
88 #define	INTERFACE_NAME "amd8111s"
89 #define	AMD8111S_SPLIT	128
90 #define	AMD8111S_SEND_MAX	64
91 
92 static char ident[] = "AMD8111 10/100M Ethernet 1.0";
93 
94 /*
95  * Driver Entry Points
96  */
97 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t);
98 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t);
99 
100 /*
101  * GLD Entry points prototype
102  */
103 static int amd8111s_m_unicst(void *, const uint8_t *);
104 static int amd8111s_m_promisc(void *, boolean_t);
105 static int amd8111s_m_stat(void *, uint_t, uint64_t *);
106 static void amd8111s_m_resources(void *arg);
107 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *);
108 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr);
109 static int amd8111s_m_start(void *);
110 static void amd8111s_m_stop(void *);
111 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp);
112 static uint_t amd8111s_intr(caddr_t);
113 
114 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *);
115 
116 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *);
117 static int amd8111s_odlInit(struct LayerPointers *);
118 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *);
119 static void amd8111s_free_descriptors(struct LayerPointers *);
120 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *,
121 		struct amd8111s_dma_ringbuf *, uint32_t, uint32_t);
122 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *);
123 
124 
125 static void amd8111s_log(struct LayerPointers *adapter, int level,
126     char *fmt, ...);
127 
128 static struct cb_ops amd8111s_cb_ops = {
129 	nulldev,
130 	nulldev,
131 	nodev,
132 	nodev,
133 	nodev,
134 	nodev,
135 	nodev,
136 	nodev,
137 	nodev,
138 	nodev,
139 	nodev,
140 	nochpoll,
141 	ddi_prop_op,
142 	NULL,
143 	D_NEW | D_MP,
144 	CB_REV,		/* cb_rev */
145 	nodev,		/* cb_aread */
146 	nodev		/* cb_awrite */
147 };
148 
149 static struct dev_ops amd8111s_dev_ops = {
150 	DEVO_REV,		/* devo_rev */
151 	0,			/* devo_refcnt */
152 	NULL,			/* devo_getinfo */
153 	nulldev,		/* devo_identify */
154 	nulldev,		/* devo_probe */
155 	amd8111s_attach,	/* devo_attach */
156 	amd8111s_detach,	/* devo_detach */
157 	nodev,			/* devo_reset */
158 	&amd8111s_cb_ops,	/* devo_cb_ops */
159 	NULL,			/* devo_bus_ops */
160 	nodev
161 };
162 
163 struct modldrv amd8111s_modldrv = {
164 	&mod_driverops,		/* Type of module. This one is a driver */
165 	ident,			/* short description */
166 	&amd8111s_dev_ops	/* driver specific ops */
167 };
168 
169 struct modlinkage amd8111s_modlinkage = {
170 	MODREV_1, (void *)&amd8111s_modldrv, NULL
171 };
172 
173 /*
174  * Global Variables
175  */
176 struct LayerPointers *amd8111sadapter;
177 
178 static ddi_dma_attr_t pcn_buff_dma_attr_t = {
179 	DMA_ATTR_V0,	/* dma_attr_version */
180 	(uint64_t)0,		/* dma_attr_addr_lo */
181 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
182 	(uint64_t)0xFFFFFFFF,	/* dma_attr_count_max */
183 	(uint64_t)1,		/* dma_attr_align */
184 	(uint_t)0x7F,		/* dma_attr_burstsizes */
185 	(uint32_t)1,		/* dma_attr_minxfer */
186 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
187 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
188 	(int)1,			/* dma_attr_sgllen */
189 	(uint32_t)1,		/* granularity */
190 	(uint_t)0		/* dma_attr_flags */
191 };
192 
193 static ddi_dma_attr_t pcn_desc_dma_attr_t = {
194 	DMA_ATTR_V0,		/* dma_attr_version */
195 	(uint64_t)0,		/* dma_attr_addr_lo */
196 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
197 	(uint64_t)0x7FFFFFFF,	/* dma_attr_count_max */
198 	(uint64_t)0x10,		/* dma_attr_align */
199 	(uint_t)0xFFFFFFFFU,	/* dma_attr_burstsizes */
200 	(uint32_t)1,		/* dma_attr_minxfer */
201 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
202 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
203 	(int)1,			/* dma_attr_sgllen */
204 	(uint32_t)1,		/* granularity */
205 	(uint_t)0		/* dma_attr_flags */
206 };
207 
208 /* PIO access attributes for registers */
209 static ddi_device_acc_attr_t pcn_acc_attr = {
210 	DDI_DEVICE_ATTR_V0,
211 	DDI_STRUCTURE_LE_ACC,
212 	DDI_STRICTORDER_ACC
213 };
214 
215 #define	AMD8111S_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL)
216 
217 
218 static mac_callbacks_t amd8111s_m_callbacks = {
219 	AMD8111S_M_CALLBACK_FLAGS,
220 	amd8111s_m_stat,
221 	amd8111s_m_start,
222 	amd8111s_m_stop,
223 	amd8111s_m_promisc,
224 	amd8111s_m_multicst,
225 	amd8111s_m_unicst,
226 	amd8111s_m_tx,
227 	amd8111s_m_resources,
228 	amd8111s_m_ioctl
229 };
230 
231 
232 /*
233  * Standard Driver Load Entry Point
234  * It will be called at load time of driver.
235  */
236 int
237 _init()
238 {
239 	int status;
240 	mac_init_ops(&amd8111s_dev_ops, "amd8111s");
241 
242 	status = mod_install(&amd8111s_modlinkage);
243 	if (status != DDI_SUCCESS) {
244 		mac_fini_ops(&amd8111s_dev_ops);
245 	}
246 
247 	return (status);
248 }
249 
250 /*
251  * Standard Driver Entry Point for Query.
252  * It will be called at any time to get Driver info.
253  */
254 int
255 _info(struct modinfo *modinfop)
256 {
257 	return (mod_info(&amd8111s_modlinkage, modinfop));
258 }
259 
260 /*
261  *	Standard Driver Entry Point for Unload.
262  *	It will be called at unload time of driver.
263  */
264 int
265 _fini()
266 {
267 	int status;
268 
269 	status = mod_remove(&amd8111s_modlinkage);
270 	if (status == DDI_SUCCESS) {
271 		mac_fini_ops(&amd8111s_dev_ops);
272 	}
273 
274 	return (status);
275 }
276 
277 /* Adjust Interrupt Coalescing Register to coalesce interrupts */
278 static void
279 amd8111s_m_blank(void *arg, time_t ticks, uint32_t count)
280 {
281 	_NOTE(ARGUNUSED(arg, ticks, count));
282 }
283 
284 static void
285 amd8111s_m_resources(void *arg)
286 {
287 	struct LayerPointers *adapter = arg;
288 	mac_rx_fifo_t mrf;
289 
290 	mrf.mrf_type = MAC_RX_FIFO;
291 	mrf.mrf_blank = amd8111s_m_blank;
292 	mrf.mrf_arg = (void *)adapter;
293 	mrf.mrf_normal_blank_time = 128;
294 	mrf.mrf_normal_pkt_count = 8;
295 
296 	adapter->pOdl->mrh = mac_resource_add(adapter->pOdl->mh,
297 	    (mac_resource_t *)&mrf);
298 }
299 
300 /*
301  * Loopback Support
302  */
303 static lb_property_t loopmodes[] = {
304 	{ normal,	"normal",	AMD8111S_LB_NONE		},
305 	{ external,	"100Mbps",	AMD8111S_LB_EXTERNAL_100	},
306 	{ external,	"10Mbps",	AMD8111S_LB_EXTERNAL_10		},
307 	{ internal,	"MAC",		AMD8111S_LB_INTERNAL_MAC	}
308 };
309 
310 static void
311 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode)
312 {
313 
314 	/*
315 	 * If the mode isn't being changed, there's nothing to do ...
316 	 */
317 	if (mode == adapter->pOdl->loopback_mode)
318 		return;
319 
320 	/*
321 	 * Validate the requested mode and prepare a suitable message
322 	 * to explain the link down/up cycle that the change will
323 	 * probably induce ...
324 	 */
325 	switch (mode) {
326 	default:
327 		return;
328 
329 	case AMD8111S_LB_NONE:
330 		mdlStopChip(adapter);
331 		if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) {
332 			cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB");
333 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
334 			    INLOOP);
335 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
336 			    FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
337 		} else {
338 			cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB");
339 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
340 			    EXLOOP);
341 		}
342 
343 		amd8111s_reset(adapter);
344 		adapter->pOdl->LinkStatus = LINK_STATE_DOWN;
345 		adapter->pOdl->rx_fcs_stripped = B_FALSE;
346 		mdlStartChip(adapter);
347 		break;
348 
349 	case AMD8111S_LB_EXTERNAL_100:
350 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100");
351 		mdlStopChip(adapter);
352 		amd8111s_reset(adapter);
353 		SetIntrCoalesc(adapter, B_FALSE);
354 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100);
355 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
356 		    VAL0 | EXLOOP);
357 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
358 		adapter->pMdl->Speed = 100;
359 		adapter->pMdl->FullDuplex = B_TRUE;
360 		/* Tell GLD the state of the physical link. */
361 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
362 
363 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
364 
365 		mdlStartChip(adapter);
366 		break;
367 
368 	case AMD8111S_LB_EXTERNAL_10:
369 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10");
370 		mdlStopChip(adapter);
371 		amd8111s_reset(adapter);
372 		SetIntrCoalesc(adapter, B_FALSE);
373 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10);
374 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
375 		    VAL0 | EXLOOP);
376 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
377 		adapter->pMdl->Speed = 10;
378 		adapter->pMdl->FullDuplex = B_TRUE;
379 		/* Tell GLD the state of the physical link. */
380 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
381 
382 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
383 
384 		mdlStartChip(adapter);
385 		break;
386 
387 	case AMD8111S_LB_INTERNAL_MAC:
388 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC");
389 		mdlStopChip(adapter);
390 		amd8111s_reset(adapter);
391 		SetIntrCoalesc(adapter, B_FALSE);
392 		/* Disable Port Manager */
393 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
394 		    EN_PMGR);
395 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
396 		    VAL0 | INLOOP);
397 
398 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
399 		    VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
400 
401 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
402 		adapter->pMdl->FullDuplex = B_TRUE;
403 		/* Tell GLD the state of the physical link. */
404 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
405 
406 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
407 
408 		mdlStartChip(adapter);
409 		break;
410 	}
411 
412 	/*
413 	 * All OK; tell the caller to reprogram
414 	 * the PHY and/or MAC for the new mode ...
415 	 */
416 	adapter->pOdl->loopback_mode = mode;
417 }
418 
419 static enum ioc_reply
420 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp,
421     mblk_t *mp)
422 {
423 	lb_info_sz_t *lbsp;
424 	lb_property_t *lbpp;
425 	uint32_t *lbmp;
426 	int cmd;
427 
428 	/*
429 	 * Validate format of ioctl
430 	 */
431 	if (mp->b_cont == NULL)
432 		return (IOC_INVAL);
433 
434 	cmd = iocp->ioc_cmd;
435 	switch (cmd) {
436 	default:
437 		/* NOTREACHED */
438 		amd8111s_log(adapter, CE_NOTE,
439 		    "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd);
440 		return (IOC_INVAL);
441 
442 	case LB_GET_INFO_SIZE:
443 		if (iocp->ioc_count != sizeof (lb_info_sz_t)) {
444 			amd8111s_log(adapter, CE_NOTE,
445 			    "wrong LB_GET_INFO_SIZE size");
446 			return (IOC_INVAL);
447 		}
448 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
449 		*lbsp = sizeof (loopmodes);
450 		break;
451 
452 	case LB_GET_INFO:
453 		if (iocp->ioc_count != sizeof (loopmodes)) {
454 			amd8111s_log(adapter, CE_NOTE,
455 			    "Wrong LB_GET_INFO size");
456 			return (IOC_INVAL);
457 		}
458 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
459 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
460 		break;
461 
462 	case LB_GET_MODE:
463 		if (iocp->ioc_count != sizeof (uint32_t)) {
464 			amd8111s_log(adapter, CE_NOTE,
465 			    "Wrong LB_GET_MODE size");
466 			return (IOC_INVAL);
467 		}
468 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
469 		*lbmp = adapter->pOdl->loopback_mode;
470 		break;
471 
472 	case LB_SET_MODE:
473 		if (iocp->ioc_count != sizeof (uint32_t)) {
474 			amd8111s_log(adapter, CE_NOTE,
475 			    "Wrong LB_SET_MODE size");
476 			return (IOC_INVAL);
477 		}
478 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
479 		amd8111s_set_loop_mode(adapter, *lbmp);
480 		break;
481 	}
482 	return (IOC_REPLY);
483 }
484 
485 static void
486 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
487 {
488 	struct iocblk *iocp;
489 	struct LayerPointers *adapter;
490 	enum ioc_reply status;
491 
492 	iocp = (struct iocblk *)mp->b_rptr;
493 	iocp->ioc_error = 0;
494 	adapter = (struct LayerPointers *)arg;
495 
496 	ASSERT(adapter);
497 	if (adapter == NULL) {
498 		miocnak(q, mp, 0, EINVAL);
499 		return;
500 	}
501 
502 	switch (iocp->ioc_cmd) {
503 
504 	case LB_GET_INFO_SIZE:
505 	case LB_GET_INFO:
506 	case LB_GET_MODE:
507 	case LB_SET_MODE:
508 		status = amd8111s_loopback_ioctl(adapter, iocp, mp);
509 		break;
510 
511 	default:
512 		status = IOC_INVAL;
513 		break;
514 	}
515 
516 	/*
517 	 * Decide how to reply
518 	 */
519 	switch (status) {
520 	default:
521 	case IOC_INVAL:
522 		/*
523 		 * Error, reply with a NAK and EINVAL or the specified error
524 		 */
525 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
526 		    EINVAL : iocp->ioc_error);
527 		break;
528 
529 	case IOC_DONE:
530 		/*
531 		 * OK, reply already sent
532 		 */
533 		break;
534 
535 	case IOC_ACK:
536 		/*
537 		 * OK, reply with an ACK
538 		 */
539 		miocack(q, mp, 0, 0);
540 		break;
541 
542 	case IOC_REPLY:
543 		/*
544 		 * OK, send prepared reply as ACK or NAK
545 		 */
546 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
547 		    M_IOCACK : M_IOCNAK;
548 		qreply(q, mp);
549 		break;
550 	}
551 }
552 
553 /*
554  * Copy one packet from dma memory to mblk. Inc dma descriptor pointer.
555  */
556 static boolean_t
557 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp)
558 {
559 	int length = 0;
560 	mblk_t *mp;
561 	struct rx_desc *descriptor;
562 	struct odl *pOdl = pLayerPointers->pOdl;
563 	struct amd8111s_statistics *statistics = &pOdl->statistics;
564 	struct nonphysical *pNonphysical = pLayerPointers->pMil
565 	    ->pNonphysical;
566 
567 	mutex_enter(&pOdl->mdlRcvLock);
568 	descriptor = pNonphysical->RxBufDescQRead->descriptor;
569 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle,
570 	    pNonphysical->RxBufDescQRead->descriptor -
571 	    pNonphysical->RxBufDescQStart->descriptor,
572 	    sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU);
573 	if ((descriptor->Rx_OWN) == 0) {
574 	/*
575 	 * If the frame is received with errors, then set MCNT
576 	 * of that pkt in ReceiveArray to 0. This packet would
577 	 * be discarded later and not indicated to OS.
578 	 */
579 		if (descriptor->Rx_ERR) {
580 			statistics->rx_desc_err ++;
581 			descriptor->Rx_ERR = 0;
582 			if (descriptor->Rx_FRAM == 1) {
583 				statistics->rx_desc_err_FRAM ++;
584 				descriptor->Rx_FRAM = 0;
585 			}
586 			if (descriptor->Rx_OFLO == 1) {
587 				statistics->rx_desc_err_OFLO ++;
588 				descriptor->Rx_OFLO = 0;
589 				pOdl->rx_overflow_counter ++;
590 				if ((pOdl->rx_overflow_counter > 5) &&
591 				    (pOdl->pause_interval == 0)) {
592 					statistics->rx_double_overflow ++;
593 					mdlSendPause(pLayerPointers);
594 					pOdl->rx_overflow_counter = 0;
595 					pOdl->pause_interval = 25;
596 				}
597 			}
598 			if (descriptor->Rx_CRC == 1) {
599 				statistics->rx_desc_err_CRC ++;
600 				descriptor->Rx_CRC = 0;
601 			}
602 			if (descriptor->Rx_BUFF == 1) {
603 				statistics->rx_desc_err_BUFF ++;
604 				descriptor->Rx_BUFF = 0;
605 			}
606 			goto Next_Descriptor;
607 		}
608 
609 		/* Length of incoming packet */
610 		if (pOdl->rx_fcs_stripped) {
611 			length = descriptor->Rx_MCNT -4;
612 		} else {
613 			length = descriptor->Rx_MCNT;
614 		}
615 		if (length < 62) {
616 			statistics->rx_error_zerosize ++;
617 		}
618 
619 		if ((mp = allocb(length, BPRI_MED)) == NULL) {
620 			statistics->rx_allocfail ++;
621 			goto failed;
622 		}
623 		/* Copy from virtual address of incoming packet */
624 		bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap),
625 		    mp->b_rptr, length);
626 		mp->b_wptr = mp->b_rptr + length;
627 		statistics->rx_ok_packets ++;
628 		if (*last_mp == NULL) {
629 			*last_mp = mp;
630 		} else {
631 			(*last_mp)->b_next = mp;
632 			*last_mp = mp;
633 		}
634 
635 Next_Descriptor:
636 		descriptor->Rx_MCNT = 0;
637 		descriptor->Rx_SOP = 0;
638 		descriptor->Rx_EOP = 0;
639 		descriptor->Rx_PAM = 0;
640 		descriptor->Rx_BAM = 0;
641 		descriptor->TT = 0;
642 		descriptor->Rx_OWN = 1;
643 		pNonphysical->RxBufDescQRead->descriptor++;
644 		pNonphysical->RxBufDescQRead->USpaceMap++;
645 		if (pNonphysical->RxBufDescQRead->descriptor >
646 		    pNonphysical->RxBufDescQEnd->descriptor) {
647 			pNonphysical->RxBufDescQRead->descriptor =
648 			    pNonphysical->RxBufDescQStart->descriptor;
649 			pNonphysical->RxBufDescQRead->USpaceMap =
650 			    pNonphysical->RxBufDescQStart->USpaceMap;
651 		}
652 		mutex_exit(&pOdl->mdlRcvLock);
653 
654 		return (B_TRUE);
655 	}
656 
657 failed:
658 	mutex_exit(&pOdl->mdlRcvLock);
659 	return (B_FALSE);
660 }
661 
662 /*
663  * Get the received packets from NIC card and send them to GLD.
664  */
665 static void
666 amd8111s_receive(struct LayerPointers *pLayerPointers)
667 {
668 	int numOfPkts = 0;
669 	struct odl *pOdl;
670 	mblk_t *ret_mp = NULL, *last_mp = NULL;
671 
672 	pOdl = pLayerPointers->pOdl;
673 
674 	rw_enter(&pOdl->chip_lock, RW_READER);
675 	if (!pLayerPointers->run) {
676 		rw_exit(&pOdl->chip_lock);
677 		return;
678 	}
679 
680 	if (pOdl->pause_interval > 0)
681 		pOdl->pause_interval --;
682 
683 	while (numOfPkts < RX_RING_SIZE) {
684 
685 		if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) {
686 			break;
687 		}
688 		if (ret_mp == NULL)
689 			ret_mp = last_mp;
690 		numOfPkts++;
691 	}
692 
693 	if (ret_mp) {
694 		mac_rx(pOdl->mh, pOdl->mrh, ret_mp);
695 	}
696 
697 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0,
698 	    DDI_DMA_SYNC_FORDEV);
699 
700 	mdlReceive(pLayerPointers);
701 
702 	rw_exit(&pOdl->chip_lock);
703 
704 }
705 
706 /*
707  * Print message in release-version driver.
708  */
709 static void
710 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...)
711 {
712 	auto char name[32];
713 	auto char buf[256];
714 	va_list ap;
715 
716 	if (adapter != NULL) {
717 		(void) sprintf(name, "amd8111s%d",
718 		    ddi_get_instance(adapter->pOdl->devinfo));
719 	} else {
720 		(void) sprintf(name, "amd8111s");
721 	}
722 	va_start(ap, fmt);
723 	(void) vsprintf(buf, fmt, ap);
724 	va_end(ap);
725 	cmn_err(level, "%s: %s", name, buf);
726 }
727 
728 /*
729  * To allocate & initilize all resources.
730  * Called by amd8111s_attach().
731  */
732 static int
733 amd8111s_odlInit(struct LayerPointers *pLayerPointers)
734 {
735 	unsigned long mem_req_array[MEM_REQ_MAX];
736 	unsigned long mem_set_array[MEM_REQ_MAX];
737 	unsigned long *pmem_req_array;
738 	unsigned long *pmem_set_array;
739 	int i, size;
740 
741 	for (i = 0; i < MEM_REQ_MAX; i++) {
742 		mem_req_array[i] = 0;
743 		mem_set_array[i] = 0;
744 	}
745 
746 	milRequestResources(mem_req_array);
747 
748 	pmem_req_array = mem_req_array;
749 	pmem_set_array = mem_set_array;
750 	while (*pmem_req_array) {
751 		switch (*pmem_req_array) {
752 		case VIRTUAL:
753 			*pmem_set_array = VIRTUAL;
754 			pmem_req_array++;
755 			pmem_set_array++;
756 			*(pmem_set_array) = *(pmem_req_array);
757 			pmem_set_array++;
758 			*(pmem_set_array) = (unsigned long) kmem_zalloc(
759 			    *(pmem_req_array), KM_NOSLEEP);
760 			if (*pmem_set_array == NULL)
761 				goto odl_init_failure;
762 			break;
763 		}
764 		pmem_req_array++;
765 		pmem_set_array++;
766 	}
767 
768 	/*
769 	 * Initilize memory on lower layers
770 	 */
771 	milSetResources(pLayerPointers, mem_set_array);
772 
773 	/* Allocate Rx/Tx descriptors */
774 	if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) {
775 		*pmem_set_array = NULL;
776 		goto odl_init_failure;
777 	}
778 
779 	/*
780 	 * Allocate Rx buffer for each Rx descriptor. Then call mil layer
781 	 * routine to fill physical address of Rx buffer into Rx descriptor.
782 	 */
783 	if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) {
784 		amd8111s_free_descriptors(pLayerPointers);
785 		*pmem_set_array = NULL;
786 		goto odl_init_failure;
787 	}
788 	milInitGlbds(pLayerPointers);
789 
790 	return (0);
791 
792 odl_init_failure:
793 	/*
794 	 * Free All memory allocated so far
795 	 */
796 	pmem_req_array = mem_set_array;
797 	while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) {
798 		switch (*pmem_req_array) {
799 		case VIRTUAL:
800 			pmem_req_array++;	/* Size */
801 			size = *(pmem_req_array);
802 			pmem_req_array++;	/* Virtual Address */
803 			if (pmem_req_array == NULL)
804 				return (1);
805 			kmem_free((int *)*pmem_req_array, size);
806 			break;
807 		}
808 		pmem_req_array++;
809 	}
810 	return (1);
811 }
812 
813 /*
814  * Allocate and initialize Tx/Rx descriptors
815  */
816 static boolean_t
817 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers)
818 {
819 	struct odl *pOdl = pLayerPointers->pOdl;
820 	struct mil *pMil = pLayerPointers->pMil;
821 	dev_info_t *devinfo = pOdl->devinfo;
822 	uint_t length, count, i;
823 	size_t real_length;
824 
825 	/*
826 	 * Allocate Rx descriptors
827 	 */
828 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
829 	    NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) {
830 		amd8111s_log(pLayerPointers, CE_WARN,
831 		    "ddi_dma_alloc_handle for Rx desc failed");
832 		pOdl->rx_desc_dma_handle = NULL;
833 		return (B_FALSE);
834 	}
835 
836 	length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT;
837 	if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length,
838 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
839 	    NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length,
840 	    &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) {
841 
842 		amd8111s_log(pLayerPointers, CE_WARN,
843 		    "ddi_dma_mem_handle for Rx desc failed");
844 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
845 		pOdl->rx_desc_dma_handle = NULL;
846 		return (B_FALSE);
847 	}
848 
849 	if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle,
850 	    NULL, (caddr_t)pMil->Rx_desc_original, real_length,
851 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
852 	    NULL, &pOdl->rx_desc_dma_cookie,
853 	    &count) != DDI_SUCCESS) {
854 
855 		amd8111s_log(pLayerPointers, CE_WARN,
856 		    "ddi_dma_addr_bind_handle for Rx desc failed");
857 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
858 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
859 		pOdl->rx_desc_dma_handle = NULL;
860 		return (B_FALSE);
861 	}
862 	ASSERT(count == 1);
863 
864 	/* Initialize Rx descriptors related variables */
865 	pMil->Rx_desc = (struct rx_desc *)
866 	    ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT);
867 	pMil->Rx_desc_pa = (unsigned int)
868 	    ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT);
869 
870 	pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa;
871 
872 
873 	/*
874 	 * Allocate Tx descriptors
875 	 */
876 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
877 	    NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) {
878 		amd8111s_log(pLayerPointers, CE_WARN,
879 		    "ddi_dma_alloc_handle for Tx desc failed");
880 		goto allocate_desc_fail;
881 	}
882 
883 	length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT;
884 	if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length,
885 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
886 	    NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length,
887 	    &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) {
888 
889 		amd8111s_log(pLayerPointers, CE_WARN,
890 		    "ddi_dma_mem_handle for Tx desc failed");
891 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
892 		goto allocate_desc_fail;
893 	}
894 
895 	if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle,
896 	    NULL, (caddr_t)pMil->Tx_desc_original, real_length,
897 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
898 	    NULL, &pOdl->tx_desc_dma_cookie,
899 	    &count) != DDI_SUCCESS) {
900 
901 		amd8111s_log(pLayerPointers, CE_WARN,
902 		    "ddi_dma_addr_bind_handle for Tx desc failed");
903 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
904 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
905 		goto allocate_desc_fail;
906 	}
907 	ASSERT(count == 1);
908 	/* Set the DMA area to all zeros */
909 	bzero((caddr_t)pMil->Tx_desc_original, length);
910 
911 	/* Initialize Tx descriptors related variables */
912 	pMil->Tx_desc = (struct tx_desc *)
913 	    ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT);
914 	pMil->pNonphysical->TxDescQRead = pMil->Tx_desc;
915 	pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc;
916 	pMil->pNonphysical->TxDescQStart = pMil->Tx_desc;
917 	pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]);
918 
919 	/* Physical Addr of Tx_desc_original & Tx_desc */
920 	pLayerPointers->pMil->Tx_desc_pa =
921 	    ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) &
922 	    ~ALIGNMENT);
923 
924 	/* Setting the reserved bits in the tx descriptors */
925 	for (i = 0; i < TX_RING_SIZE; i++) {
926 		pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f;
927 		pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0;
928 		pMil->pNonphysical->TxDescQWrite++;
929 	}
930 	pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart;
931 
932 	pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa;
933 
934 	return (B_TRUE);
935 
936 allocate_desc_fail:
937 	pOdl->tx_desc_dma_handle = NULL;
938 	(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
939 	ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
940 	ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
941 	pOdl->rx_desc_dma_handle = NULL;
942 	return (B_FALSE);
943 }
944 
945 /*
946  * Free Tx/Rx descriptors
947  */
948 static void
949 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers)
950 {
951 	struct odl *pOdl = pLayerPointers->pOdl;
952 
953 	/* Free Rx descriptors */
954 	if (pOdl->rx_desc_dma_handle) {
955 		(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
956 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
957 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
958 		pOdl->rx_desc_dma_handle = NULL;
959 	}
960 
961 	/* Free Rx descriptors */
962 	if (pOdl->tx_desc_dma_handle) {
963 		(void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle);
964 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
965 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
966 		pOdl->tx_desc_dma_handle = NULL;
967 	}
968 }
969 
970 /*
971  * Allocate Tx/Rx Ring buffer
972  */
973 static boolean_t
974 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers,
975 			struct amd8111s_dma_ringbuf *pRing,
976 			uint32_t ring_size, uint32_t msg_size)
977 {
978 	uint32_t idx, msg_idx = 0, msg_acc;
979 	dev_info_t *devinfo = pLayerPointers->pOdl->devinfo;
980 	size_t real_length;
981 	uint_t count = 0;
982 
983 	ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1);
984 	pRing->dma_buf_sz = msg_size;
985 	pRing->ring_size = ring_size;
986 	pRing->trunk_num = AMD8111S_SPLIT;
987 	pRing->buf_sz = msg_size * ring_size;
988 	if (ring_size < pRing->trunk_num)
989 		pRing->trunk_num = ring_size;
990 	ASSERT((pRing->buf_sz % pRing->trunk_num) == 0);
991 
992 	pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num;
993 	ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0);
994 
995 	pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) *
996 	    ring_size, KM_NOSLEEP);
997 	pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
998 	    pRing->trunk_num, KM_NOSLEEP);
999 	pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
1000 	    pRing->trunk_num, KM_NOSLEEP);
1001 	pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) *
1002 	    pRing->trunk_num, KM_NOSLEEP);
1003 	pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) *
1004 	    pRing->trunk_num, KM_NOSLEEP);
1005 	if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL ||
1006 	    pRing->acc_hdl == NULL || pRing->trunk_addr == NULL ||
1007 	    pRing->dma_cookie == NULL) {
1008 		amd8111s_log(pLayerPointers, CE_NOTE,
1009 		    "kmem_zalloc failed");
1010 		goto failed;
1011 	}
1012 
1013 	for (idx = 0; idx < pRing->trunk_num; ++idx) {
1014 		if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t,
1015 		    DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx]))
1016 		    != DDI_SUCCESS) {
1017 
1018 			amd8111s_log(pLayerPointers, CE_WARN,
1019 			    "ddi_dma_alloc_handle failed");
1020 			goto failed;
1021 		} else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx],
1022 		    pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING,
1023 		    DDI_DMA_SLEEP, NULL,
1024 		    (caddr_t *)&(pRing->trunk_addr[idx]),
1025 		    (size_t *)(&real_length), &pRing->acc_hdl[idx])
1026 		    != DDI_SUCCESS) {
1027 
1028 			amd8111s_log(pLayerPointers, CE_WARN,
1029 			    "ddi_dma_mem_alloc failed");
1030 			goto failed;
1031 		} else if (real_length != pRing->trunk_sz) {
1032 			amd8111s_log(pLayerPointers, CE_WARN,
1033 			    "ddi_dma_mem_alloc failed");
1034 			goto failed;
1035 		} else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx],
1036 		    NULL, (caddr_t)pRing->trunk_addr[idx], real_length,
1037 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
1038 		    &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) {
1039 
1040 			amd8111s_log(pLayerPointers, CE_WARN,
1041 			    "ddi_dma_addr_bind_handle failed");
1042 			goto failed;
1043 		} else {
1044 			for (msg_acc = 0;
1045 			    msg_acc < pRing->trunk_sz / pRing->dma_buf_sz;
1046 			    ++ msg_acc) {
1047 				pRing->msg_buf[msg_idx].offset =
1048 				    msg_acc * pRing->dma_buf_sz;
1049 				pRing->msg_buf[msg_idx].vir_addr =
1050 				    pRing->trunk_addr[idx] +
1051 				    pRing->msg_buf[msg_idx].offset;
1052 				pRing->msg_buf[msg_idx].phy_addr =
1053 				    pRing->dma_cookie[idx].dmac_laddress +
1054 				    pRing->msg_buf[msg_idx].offset;
1055 				pRing->msg_buf[msg_idx].p_hdl =
1056 				    pRing->dma_hdl[idx];
1057 				msg_idx ++;
1058 			}
1059 		}
1060 	}
1061 
1062 	pRing->free = pRing->msg_buf;
1063 	pRing->next = pRing->msg_buf;
1064 	pRing->curr = pRing->msg_buf;
1065 
1066 	return (B_TRUE);
1067 failed:
1068 	amd8111s_free_dma_ringbuf(pRing);
1069 	return (B_FALSE);
1070 }
1071 
1072 /*
1073  * Free Tx/Rx ring buffer
1074  */
1075 static void
1076 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing)
1077 {
1078 	int idx;
1079 
1080 	if (pRing->dma_cookie != NULL) {
1081 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1082 			if (pRing->dma_cookie[idx].dmac_laddress == 0) {
1083 				break;
1084 			}
1085 			(void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]);
1086 		}
1087 		kmem_free(pRing->dma_cookie,
1088 		    sizeof (ddi_dma_cookie_t) * pRing->trunk_num);
1089 	}
1090 
1091 	if (pRing->acc_hdl != NULL) {
1092 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1093 			if (pRing->acc_hdl[idx] == NULL)
1094 				break;
1095 			ddi_dma_mem_free(&pRing->acc_hdl[idx]);
1096 		}
1097 		kmem_free(pRing->acc_hdl,
1098 		    sizeof (ddi_acc_handle_t) * pRing->trunk_num);
1099 	}
1100 
1101 	if (pRing->dma_hdl != NULL) {
1102 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1103 			if (pRing->dma_hdl[idx] == 0) {
1104 				break;
1105 			}
1106 			ddi_dma_free_handle(&pRing->dma_hdl[idx]);
1107 		}
1108 		kmem_free(pRing->dma_hdl,
1109 		    sizeof (ddi_dma_handle_t) * pRing->trunk_num);
1110 	}
1111 
1112 	if (pRing->msg_buf != NULL) {
1113 		kmem_free(pRing->msg_buf,
1114 		    sizeof (struct amd8111s_msgbuf) * pRing->ring_size);
1115 	}
1116 
1117 	if (pRing->trunk_addr != NULL) {
1118 		kmem_free(pRing->trunk_addr,
1119 		    sizeof (caddr_t) * pRing->trunk_num);
1120 	}
1121 
1122 	bzero(pRing, sizeof (*pRing));
1123 }
1124 
1125 
1126 /*
1127  * Allocate all Tx buffer.
1128  * Allocate a Rx buffer for each Rx descriptor. Then
1129  * call mil routine to fill physical address of Rx
1130  * buffer into Rx descriptors
1131  */
1132 static boolean_t
1133 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers)
1134 {
1135 	struct odl *pOdl = pLayerPointers->pOdl;
1136 
1137 	/*
1138 	 * Allocate rx Buffers
1139 	 */
1140 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf,
1141 	    RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) {
1142 		amd8111s_log(pLayerPointers, CE_WARN,
1143 		    "amd8111s_alloc_dma_ringbuf for tx failed");
1144 		goto allocate_buf_fail;
1145 	}
1146 
1147 	/*
1148 	 * Allocate Tx buffers
1149 	 */
1150 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf,
1151 	    TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) {
1152 		amd8111s_log(pLayerPointers, CE_WARN,
1153 		    "amd8111s_alloc_dma_ringbuf for tx failed");
1154 		goto allocate_buf_fail;
1155 	}
1156 
1157 	/*
1158 	 * Initilize the mil Queues
1159 	 */
1160 	milInitGlbds(pLayerPointers);
1161 
1162 	milInitRxQ(pLayerPointers);
1163 
1164 	return (B_TRUE);
1165 
1166 allocate_buf_fail:
1167 
1168 	amd8111s_log(pLayerPointers, CE_WARN,
1169 	    "amd8111s_allocate_buffers failed");
1170 	return (B_FALSE);
1171 }
1172 
1173 /*
1174  * Free all Rx/Tx buffer
1175  */
1176 
1177 static void
1178 amd8111s_free_buffers(struct LayerPointers *pLayerPointers)
1179 {
1180 	/* Free Tx buffers */
1181 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf);
1182 
1183 	/* Free Rx Buffers */
1184 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf);
1185 }
1186 
1187 /*
1188  * Try to recycle all the descriptors and Tx buffers
1189  * which are already freed by hardware.
1190  */
1191 static int
1192 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers)
1193 {
1194 	struct nonphysical *pNonphysical;
1195 	uint32_t count = 0;
1196 
1197 	pNonphysical = pLayerPointers->pMil->pNonphysical;
1198 	while (pNonphysical->TxDescQRead->Tx_OWN == 0 &&
1199 	    pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) {
1200 		pLayerPointers->pOdl->tx_buf.free =
1201 		    NEXT(pLayerPointers->pOdl->tx_buf, free);
1202 		pNonphysical->TxDescQRead++;
1203 		if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) {
1204 			pNonphysical->TxDescQRead = pNonphysical->TxDescQStart;
1205 		}
1206 		count ++;
1207 	}
1208 
1209 	if (pLayerPointers->pMil->tx_reschedule)
1210 		ddi_trigger_softintr(pLayerPointers->pOdl->drain_id);
1211 
1212 	return (count);
1213 }
1214 
1215 /*
1216  * Get packets in the Tx buffer, then copy them to the send buffer.
1217  * Trigger hardware to send out packets.
1218  */
1219 static void
1220 amd8111s_send_serial(struct LayerPointers *pLayerPointers)
1221 {
1222 	struct nonphysical *pNonphysical;
1223 	uint32_t count;
1224 
1225 	pNonphysical = pLayerPointers->pMil->pNonphysical;
1226 
1227 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1228 
1229 	for (count = 0; count < AMD8111S_SEND_MAX; count ++) {
1230 		if (pLayerPointers->pOdl->tx_buf.curr ==
1231 		    pLayerPointers->pOdl->tx_buf.next) {
1232 			break;
1233 		}
1234 		/* to verify if it needs to recycle the tx Buf */
1235 		if (((pNonphysical->TxDescQWrite + 1 >
1236 		    pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart :
1237 		    (pNonphysical->TxDescQWrite + 1)) ==
1238 		    pNonphysical->TxDescQRead)
1239 			if (amd8111s_recycle_tx(pLayerPointers) == 0) {
1240 				pLayerPointers->pOdl
1241 				    ->statistics.tx_no_descriptor ++;
1242 				break;
1243 			}
1244 
1245 		/* Fill packet length */
1246 		pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers
1247 		    ->pOdl->tx_buf.curr->msg_size;
1248 
1249 		/* Fill physical buffer address */
1250 		pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int)
1251 		    pLayerPointers->pOdl->tx_buf.curr->phy_addr;
1252 
1253 		pNonphysical->TxDescQWrite->Tx_SOP = 1;
1254 		pNonphysical->TxDescQWrite->Tx_EOP = 1;
1255 		pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1;
1256 		pNonphysical->TxDescQWrite->Tx_LTINT = 1;
1257 		pNonphysical->TxDescQWrite->Tx_USPACE = 0;
1258 		pNonphysical->TxDescQWrite->Tx_OWN = 1;
1259 
1260 		pNonphysical->TxDescQWrite++;
1261 		if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) {
1262 			pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart;
1263 		}
1264 
1265 		pLayerPointers->pOdl->tx_buf.curr =
1266 		    NEXT(pLayerPointers->pOdl->tx_buf, curr);
1267 
1268 	}
1269 
1270 	pLayerPointers->pOdl->statistics.tx_ok_packets += count;
1271 
1272 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1273 
1274 	/* Call mdlTransmit to send the pkt out on the network */
1275 	mdlTransmit(pLayerPointers);
1276 
1277 }
1278 
1279 /*
1280  * Softintr entrance. try to send out packets in the Tx buffer.
1281  * If reschedule is True, call mac_tx_update to re-enable the
1282  * transmit
1283  */
1284 static uint_t
1285 amd8111s_send_drain(caddr_t arg)
1286 {
1287 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1288 
1289 	amd8111s_send_serial(pLayerPointers);
1290 
1291 	if (pLayerPointers->pMil->tx_reschedule &&
1292 	    NEXT(pLayerPointers->pOdl->tx_buf, next) !=
1293 	    pLayerPointers->pOdl->tx_buf.free) {
1294 		mac_tx_update(pLayerPointers->pOdl->mh);
1295 		pLayerPointers->pMil->tx_reschedule = B_FALSE;
1296 	}
1297 
1298 	return (DDI_INTR_CLAIMED);
1299 }
1300 
1301 /*
1302  * Get a Tx buffer
1303  */
1304 static struct amd8111s_msgbuf *
1305 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers)
1306 {
1307 	struct amd8111s_msgbuf *tmp, *next;
1308 
1309 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1310 	next = NEXT(pLayerPointers->pOdl->tx_buf, next);
1311 	if (next == pLayerPointers->pOdl->tx_buf.free) {
1312 		tmp = NULL;
1313 	} else {
1314 		tmp = pLayerPointers->pOdl->tx_buf.next;
1315 		pLayerPointers->pOdl->tx_buf.next = next;
1316 	}
1317 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1318 
1319 	return (tmp);
1320 }
1321 
1322 static boolean_t
1323 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp)
1324 {
1325 	struct odl *pOdl;
1326 	size_t frag_len;
1327 	mblk_t *tmp;
1328 	struct amd8111s_msgbuf *txBuf;
1329 	uint8_t *pMsg;
1330 
1331 	pOdl = pLayerPointers->pOdl;
1332 
1333 	/* alloc send buffer */
1334 	txBuf = amd8111s_getTxbuf(pLayerPointers);
1335 	if (txBuf == NULL) {
1336 		pOdl->statistics.tx_no_buffer ++;
1337 		pLayerPointers->pMil->tx_reschedule = B_TRUE;
1338 		amd8111s_send_serial(pLayerPointers);
1339 		return (B_FALSE);
1340 	}
1341 
1342 	/* copy packet to send buffer */
1343 	txBuf->msg_size = 0;
1344 	pMsg = (uint8_t *)txBuf->vir_addr;
1345 	for (tmp = mp; tmp; tmp = tmp->b_cont) {
1346 		frag_len = MBLKL(tmp);
1347 		bcopy(tmp->b_rptr, pMsg, frag_len);
1348 		txBuf->msg_size += frag_len;
1349 		pMsg += frag_len;
1350 	}
1351 	freemsg(mp);
1352 
1353 	amd8111s_send_serial(pLayerPointers);
1354 
1355 	return (B_TRUE);
1356 }
1357 
1358 /*
1359  * (GLD Entry Point) Send the message block to lower layer
1360  */
1361 static mblk_t *
1362 amd8111s_m_tx(void *arg, mblk_t *mp)
1363 {
1364 	struct LayerPointers *pLayerPointers = arg;
1365 	mblk_t *next;
1366 
1367 	rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER);
1368 	if (!pLayerPointers->run) {
1369 		pLayerPointers->pOdl->statistics.tx_afterunplumb ++;
1370 		freemsgchain(mp);
1371 		mp = NULL;
1372 	}
1373 
1374 	while (mp != NULL) {
1375 		next = mp->b_next;
1376 		mp->b_next = NULL;
1377 		if (!amd8111s_send(pLayerPointers, mp)) {
1378 			/* Send fail */
1379 			mp->b_next = next;
1380 			break;
1381 		}
1382 		mp = next;
1383 	}
1384 
1385 	rw_exit(&pLayerPointers->pOdl->chip_lock);
1386 	return (mp);
1387 }
1388 
1389 /*
1390  * (GLD Entry Point) Interrupt Service Routine
1391  */
1392 static uint_t
1393 amd8111s_intr(caddr_t arg)
1394 {
1395 	unsigned int intrCauses;
1396 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1397 
1398 	/* Read the interrupt status from mdl */
1399 	intrCauses = mdlReadInterrupt(pLayerPointers);
1400 
1401 	if (intrCauses == 0) {
1402 		pLayerPointers->pOdl->statistics.intr_OTHER ++;
1403 		return (DDI_INTR_UNCLAIMED);
1404 	}
1405 
1406 	if (intrCauses & LCINT) {
1407 		if (mdlReadLink(pLayerPointers) == LINK_UP) {
1408 			mdlGetActiveMediaInfo(pLayerPointers);
1409 			/* Link status changed */
1410 			if (pLayerPointers->pOdl->LinkStatus !=
1411 			    LINK_STATE_UP) {
1412 				pLayerPointers->pOdl->LinkStatus =
1413 				    LINK_STATE_UP;
1414 				mac_link_update(pLayerPointers->pOdl->mh,
1415 				    LINK_STATE_UP);
1416 			}
1417 		} else {
1418 			if (pLayerPointers->pOdl->LinkStatus !=
1419 			    LINK_STATE_DOWN) {
1420 				pLayerPointers->pOdl->LinkStatus =
1421 				    LINK_STATE_DOWN;
1422 				mac_link_update(pLayerPointers->pOdl->mh,
1423 				    LINK_STATE_DOWN);
1424 			}
1425 		}
1426 	}
1427 	/*
1428 	 * RINT0: Receive Interrupt is set by the controller after the last
1429 	 * descriptor of a receive frame for this ring has been updated by
1430 	 * writing a 0 to the OWNership bit.
1431 	 */
1432 	if (intrCauses & RINT0) {
1433 		pLayerPointers->pOdl->statistics.intr_RINT0 ++;
1434 		amd8111s_receive(pLayerPointers);
1435 	}
1436 
1437 	/*
1438 	 * TINT0: Transmit Interrupt is set by the controller after the OWN bit
1439 	 * in the last descriptor of a transmit frame in this particular ring
1440 	 * has been cleared to indicate the frame has been copied to the
1441 	 * transmit FIFO.
1442 	 */
1443 	if (intrCauses & TINT0) {
1444 		pLayerPointers->pOdl->statistics.intr_TINT0 ++;
1445 		/*
1446 		 * if desc ring is NULL and tx buf is not NULL, it should
1447 		 * drain tx buffer
1448 		 */
1449 		amd8111s_send_serial(pLayerPointers);
1450 	}
1451 
1452 	if (intrCauses & STINT) {
1453 		pLayerPointers->pOdl->statistics.intr_STINT ++;
1454 	}
1455 
1456 
1457 	return (DDI_INTR_CLAIMED);
1458 }
1459 
1460 /*
1461  * To re-initilize data structures.
1462  */
1463 static void
1464 amd8111s_sw_reset(struct LayerPointers *pLayerPointers)
1465 {
1466 	/* Reset all Tx/Rx queues and descriptors */
1467 	milResetTxQ(pLayerPointers);
1468 	milInitRxQ(pLayerPointers);
1469 }
1470 
1471 /*
1472  * Send all pending tx packets
1473  */
1474 static void
1475 amd8111s_tx_drain(struct LayerPointers *adapter)
1476 {
1477 	struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart;
1478 	int i, desc_count = 0;
1479 	for (i = 0; i < 30; i++) {
1480 		while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) {
1481 			/* This packet has been transmitted */
1482 			pTx_desc ++;
1483 			desc_count ++;
1484 		}
1485 		if (desc_count == TX_RING_SIZE) {
1486 			break;
1487 		}
1488 		/* Wait 1 ms */
1489 		drv_usecwait(1000);
1490 	}
1491 	adapter->pOdl->statistics.tx_draintime = i;
1492 }
1493 
1494 /*
1495  * (GLD Entry Point) To start card will be called at
1496  * ifconfig plumb
1497  */
1498 static int
1499 amd8111s_m_start(void *arg)
1500 {
1501 	struct LayerPointers *pLayerPointers = arg;
1502 	struct odl *pOdl = pLayerPointers->pOdl;
1503 
1504 	amd8111s_sw_reset(pLayerPointers);
1505 	mdlHWReset(pLayerPointers);
1506 	rw_enter(&pOdl->chip_lock, RW_WRITER);
1507 	pLayerPointers->run = B_TRUE;
1508 	rw_exit(&pOdl->chip_lock);
1509 	return (0);
1510 }
1511 
1512 /*
1513  * (GLD Entry Point) To stop card will be called at
1514  * ifconfig unplumb
1515  */
1516 static void
1517 amd8111s_m_stop(void *arg)
1518 {
1519 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1520 	struct odl *pOdl = pLayerPointers->pOdl;
1521 
1522 	/* Ensure send all pending tx packets */
1523 	amd8111s_tx_drain(pLayerPointers);
1524 	/*
1525 	 * Stop the controller and disable the controller interrupt
1526 	 */
1527 	rw_enter(&pOdl->chip_lock, RW_WRITER);
1528 	mdlStopChip(pLayerPointers);
1529 	pLayerPointers->run = B_FALSE;
1530 	rw_exit(&pOdl->chip_lock);
1531 }
1532 
1533 /*
1534  *	To clean up all
1535  */
1536 static void
1537 amd8111s_free_resource(struct LayerPointers *pLayerPointers)
1538 {
1539 	unsigned long mem_free_array[100];
1540 	unsigned long *pmem_free_array, size;
1541 
1542 	/* Free Rx/Tx descriptors */
1543 	amd8111s_free_descriptors(pLayerPointers);
1544 
1545 	/* Free memory on lower layers */
1546 	milFreeResources(pLayerPointers, mem_free_array);
1547 	pmem_free_array = mem_free_array;
1548 	while (*pmem_free_array) {
1549 		switch (*pmem_free_array) {
1550 		case VIRTUAL:
1551 			size = *(++pmem_free_array);
1552 			pmem_free_array++;
1553 			kmem_free((void *)*(pmem_free_array), size);
1554 			break;
1555 		}
1556 		pmem_free_array++;
1557 	}
1558 
1559 	amd8111s_free_buffers(pLayerPointers);
1560 }
1561 
1562 /*
1563  * (GLD Enty pointer) To add/delete multi cast addresses
1564  *
1565  */
1566 static int
1567 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
1568 {
1569 	struct LayerPointers *pLayerPointers = arg;
1570 
1571 	if (add) {
1572 		/* Add a multicast entry */
1573 		mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr);
1574 	} else {
1575 		/* Delete a multicast entry */
1576 		mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr);
1577 	}
1578 
1579 	return (0);
1580 }
1581 
1582 #ifdef AMD8111S_DEBUG
1583 /*
1584  * The size of MIB registers is only 32 bits. Dump them before one
1585  * of them overflows.
1586  */
1587 static void
1588 amd8111s_dump_mib(struct LayerPointers *pLayerPointers)
1589 {
1590 	struct amd8111s_statistics *adapterStat;
1591 
1592 	adapterStat = &pLayerPointers->pOdl->statistics;
1593 
1594 	adapterStat->mib_dump_counter ++;
1595 
1596 	/*
1597 	 * Rx Counters
1598 	 */
1599 	adapterStat->rx_mib_unicst_packets +=
1600 	    mdlReadMib(pLayerPointers, RcvUniCastPkts);
1601 	adapterStat->rx_mib_multicst_packets +=
1602 	    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
1603 	adapterStat->rx_mib_broadcst_packets +=
1604 	    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
1605 	adapterStat->rx_mib_macctrl_packets +=
1606 	    mdlReadMib(pLayerPointers, RcvMACCtrl);
1607 	adapterStat->rx_mib_flowctrl_packets +=
1608 	    mdlReadMib(pLayerPointers, RcvFlowCtrl);
1609 
1610 	adapterStat->rx_mib_bytes +=
1611 	    mdlReadMib(pLayerPointers, RcvOctets);
1612 	adapterStat->rx_mib_good_bytes +=
1613 	    mdlReadMib(pLayerPointers, RcvGoodOctets);
1614 
1615 	adapterStat->rx_mib_undersize_packets +=
1616 	    mdlReadMib(pLayerPointers, RcvUndersizePkts);
1617 	adapterStat->rx_mib_oversize_packets +=
1618 	    mdlReadMib(pLayerPointers, RcvOversizePkts);
1619 
1620 	adapterStat->rx_mib_drop_packets +=
1621 	    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
1622 	adapterStat->rx_mib_align_err_packets +=
1623 	    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
1624 	adapterStat->rx_mib_fcs_err_packets +=
1625 	    mdlReadMib(pLayerPointers, RcvFCSErrors);
1626 	adapterStat->rx_mib_symbol_err_packets +=
1627 	    mdlReadMib(pLayerPointers, RcvSymbolErrors);
1628 	adapterStat->rx_mib_miss_packets +=
1629 	    mdlReadMib(pLayerPointers, RcvMissPkts);
1630 
1631 	/*
1632 	 * Tx Counters
1633 	 */
1634 	adapterStat->tx_mib_packets +=
1635 	    mdlReadMib(pLayerPointers, XmtPackets);
1636 	adapterStat->tx_mib_multicst_packets +=
1637 	    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
1638 	adapterStat->tx_mib_broadcst_packets +=
1639 	    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
1640 	adapterStat->tx_mib_flowctrl_packets +=
1641 	    mdlReadMib(pLayerPointers, XmtFlowCtrl);
1642 
1643 	adapterStat->tx_mib_bytes +=
1644 	    mdlReadMib(pLayerPointers, XmtOctets);
1645 
1646 	adapterStat->tx_mib_defer_trans_packets +=
1647 	    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
1648 	adapterStat->tx_mib_collision_packets +=
1649 	    mdlReadMib(pLayerPointers, XmtCollisions);
1650 	adapterStat->tx_mib_one_coll_packets +=
1651 	    mdlReadMib(pLayerPointers, XmtOneCollision);
1652 	adapterStat->tx_mib_multi_coll_packets +=
1653 	    mdlReadMib(pLayerPointers, XmtMultipleCollision);
1654 	adapterStat->tx_mib_late_coll_packets +=
1655 	    mdlReadMib(pLayerPointers, XmtLateCollision);
1656 	adapterStat->tx_mib_ex_coll_packets +=
1657 	    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
1658 
1659 
1660 	/* Clear all MIB registers */
1661 	WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address
1662 	    + MIB_ADDR, MIB_CLEAR);
1663 }
1664 #endif
1665 
1666 /*
1667  * (GLD Entry Point) set/unset promiscus mode
1668  */
1669 static int
1670 amd8111s_m_promisc(void *arg, boolean_t on)
1671 {
1672 	struct LayerPointers *pLayerPointers = arg;
1673 
1674 	if (on) {
1675 		mdlSetPromiscuous(pLayerPointers);
1676 	} else {
1677 		mdlDisablePromiscuous(pLayerPointers);
1678 	}
1679 
1680 	return (0);
1681 }
1682 
1683 /*
1684  * (Gld Entry point) Changes the Mac address of card
1685  */
1686 static int
1687 amd8111s_m_unicst(void *arg, const uint8_t *macaddr)
1688 {
1689 	struct LayerPointers *pLayerPointers = arg;
1690 
1691 	mdlDisableInterrupt(pLayerPointers);
1692 	mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr);
1693 	mdlEnableInterrupt(pLayerPointers);
1694 
1695 	return (0);
1696 }
1697 
1698 /*
1699  * Reset the card
1700  */
1701 void
1702 amd8111s_reset(struct LayerPointers *pLayerPointers)
1703 {
1704 	amd8111s_sw_reset(pLayerPointers);
1705 	mdlHWReset(pLayerPointers);
1706 }
1707 
1708 /*
1709  * attach(9E) -- Attach a device to the system
1710  *
1711  * Called once for each board after successfully probed.
1712  * will do
1713  * 	a. creating minor device node for the instance.
1714  *	b. allocate & Initilize four layers (call odlInit)
1715  *	c. get MAC address
1716  *	d. initilize pLayerPointers to gld private pointer
1717  *	e. register with GLD
1718  * if any action fails does clean up & returns DDI_FAILURE
1719  * else retursn DDI_SUCCESS
1720  */
1721 static int
1722 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1723 {
1724 	mac_register_t *macp;
1725 	struct LayerPointers *pLayerPointers;
1726 	struct odl *pOdl;
1727 	ddi_acc_handle_t *pci_handle;
1728 	ddi_device_acc_attr_t dev_attr;
1729 	caddr_t addrp = NULL;
1730 
1731 	switch (cmd) {
1732 	case DDI_ATTACH:
1733 		break;
1734 	default:
1735 		return (DDI_FAILURE);
1736 	}
1737 
1738 	pLayerPointers = (struct LayerPointers *)
1739 	    kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP);
1740 	amd8111sadapter = pLayerPointers;
1741 
1742 	/* Get device instance number */
1743 	pLayerPointers->instance = ddi_get_instance(devinfo);
1744 	ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers);
1745 
1746 	pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP);
1747 	pLayerPointers->pOdl = pOdl;
1748 
1749 	pOdl->devinfo = devinfo;
1750 
1751 	/*
1752 	 * Here, we only allocate memory for struct odl and initilize it.
1753 	 * All other memory allocation & initilization will be done in odlInit
1754 	 * later on this routine.
1755 	 */
1756 	if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock)
1757 	    != DDI_SUCCESS) {
1758 		amd8111s_log(pLayerPointers, CE_NOTE,
1759 		    "attach: get iblock cookies failed");
1760 		goto attach_failure;
1761 	}
1762 
1763 	rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock);
1764 	mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock",
1765 	    MUTEX_DRIVER, (void *)pOdl->iblock);
1766 	mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock",
1767 	    MUTEX_DRIVER, (void *)pOdl->iblock);
1768 
1769 	/* Setup PCI space */
1770 	if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) {
1771 		return (DDI_FAILURE);
1772 	}
1773 	pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI;
1774 	pci_handle = &pOdl->pci_handle;
1775 
1776 	pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID);
1777 	pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID);
1778 
1779 	/*
1780 	 * Allocate and initialize all resource and map device registers.
1781 	 * If failed, it returns a non-zero value.
1782 	 */
1783 	if (amd8111s_odlInit(pLayerPointers) != 0) {
1784 		goto attach_failure;
1785 	}
1786 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE;
1787 
1788 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1789 	dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1790 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1791 
1792 	if (ddi_regs_map_setup(devinfo, 1, &addrp, 0,  4096, &dev_attr,
1793 	    &(pLayerPointers->pOdl->MemBasehandle)) != 0) {
1794 		amd8111s_log(pLayerPointers, CE_NOTE,
1795 		    "attach: ddi_regs_map_setup failed");
1796 		goto attach_failure;
1797 	}
1798 	pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp;
1799 
1800 	/* Initialize HW */
1801 	mdlOpen(pLayerPointers);
1802 	mdlGetActiveMediaInfo(pLayerPointers);
1803 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS;
1804 
1805 	/*
1806 	 * Setup the interrupt
1807 	 */
1808 	if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr,
1809 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1810 		goto attach_failure;
1811 	}
1812 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED;
1813 
1814 	/*
1815 	 * Setup soft intr
1816 	 */
1817 	if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id,
1818 	    NULL, NULL, amd8111s_send_drain,
1819 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1820 		goto attach_failure;
1821 	}
1822 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED;
1823 
1824 	/*
1825 	 * Initilize the mac structure
1826 	 */
1827 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1828 		goto attach_failure;
1829 
1830 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1831 	macp->m_driver = pLayerPointers;
1832 	macp->m_dip = devinfo;
1833 	/* Get MAC address */
1834 	mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress);
1835 	macp->m_src_addr = pOdl->MacAddress;
1836 	macp->m_callbacks = &amd8111s_m_callbacks;
1837 	macp->m_min_sdu = 0;
1838 	/* 1518 - 14 (ether header) - 4 (CRC) */
1839 	macp->m_max_sdu = ETHERMTU;
1840 
1841 	/*
1842 	 * Finally, we're ready to register ourselves with the MAC layer
1843 	 * interface; if this succeeds, we're ready to start.
1844 	 */
1845 	if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) {
1846 		mac_free(macp);
1847 		goto attach_failure;
1848 	}
1849 	mac_free(macp);
1850 
1851 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED;
1852 
1853 	return (DDI_SUCCESS);
1854 
1855 attach_failure:
1856 	(void) amd8111s_unattach(devinfo, pLayerPointers);
1857 	return (DDI_FAILURE);
1858 
1859 }
1860 
1861 /*
1862  * detach(9E) -- Detach a device from the system
1863  *
1864  * It is called for each device instance when the system is preparing to
1865  * unload a dynamically unloadable driver.
1866  * will Do
1867  * 	a. check if any driver buffers are held by OS.
1868  *	b. do clean up of all allocated memory if it is not in use by OS.
1869  *	c. un register with GLD
1870  *	d. return DDI_SUCCESS on succes full free & unregister
1871  *	else GLD_FAILURE
1872  */
1873 static int
1874 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1875 {
1876 	struct LayerPointers *pLayerPointers;
1877 
1878 	switch (cmd) {
1879 	case DDI_DETACH:
1880 		break;
1881 	default:
1882 		return (DDI_FAILURE);
1883 	}
1884 
1885 	/*
1886 	 * Get the driver private (struct LayerPointers *) structure
1887 	 */
1888 	if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private
1889 	    (devinfo)) == NULL) {
1890 		return (DDI_FAILURE);
1891 	}
1892 
1893 	return (amd8111s_unattach(devinfo, pLayerPointers));
1894 }
1895 
1896 static int
1897 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers)
1898 {
1899 	struct odl *pOdl = pLayerPointers->pOdl;
1900 
1901 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) {
1902 		/* Unregister driver from the GLD interface */
1903 		if (mac_unregister(pOdl->mh) != DDI_SUCCESS) {
1904 			return (DDI_FAILURE);
1905 		}
1906 	}
1907 
1908 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) {
1909 		ddi_remove_intr(devinfo, 0, pOdl->iblock);
1910 	}
1911 
1912 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) {
1913 		ddi_remove_softintr(pOdl->drain_id);
1914 	}
1915 
1916 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) {
1917 		/* Stop HW */
1918 		mdlStopChip(pLayerPointers);
1919 		ddi_regs_map_free(&(pOdl->MemBasehandle));
1920 	}
1921 
1922 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) {
1923 		/* Free All memory allocated */
1924 		amd8111s_free_resource(pLayerPointers);
1925 	}
1926 
1927 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) {
1928 		pci_config_teardown(&pOdl->pci_handle);
1929 		mutex_destroy(&pOdl->mdlSendLock);
1930 		mutex_destroy(&pOdl->mdlRcvLock);
1931 		rw_destroy(&pOdl->chip_lock);
1932 	}
1933 
1934 	kmem_free(pOdl, sizeof (struct odl));
1935 	kmem_free(pLayerPointers, sizeof (struct LayerPointers));
1936 
1937 	return (DDI_SUCCESS);
1938 }
1939 
1940 /*
1941  * (GLD Entry Point)GLD will call this entry point perodicaly to
1942  * get driver statistices.
1943  */
1944 static int
1945 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val)
1946 {
1947 	struct LayerPointers *pLayerPointers = arg;
1948 	struct amd8111s_statistics *adapterStat;
1949 
1950 	adapterStat = &pLayerPointers->pOdl->statistics;
1951 
1952 	switch (stat) {
1953 
1954 	/*
1955 	 * Current Status
1956 	 */
1957 	case MAC_STAT_IFSPEED:
1958 		*val = 	pLayerPointers->pMdl->Speed * 1000000;
1959 		break;
1960 
1961 	case ETHER_STAT_LINK_DUPLEX:
1962 		if (pLayerPointers->pMdl->FullDuplex) {
1963 			*val = LINK_DUPLEX_FULL;
1964 		} else {
1965 			*val = LINK_DUPLEX_HALF;
1966 		}
1967 		break;
1968 
1969 	/*
1970 	 * Capabilities
1971 	 */
1972 	case ETHER_STAT_CAP_1000FDX:
1973 		*val = 0;
1974 		break;
1975 
1976 	case ETHER_STAT_CAP_1000HDX:
1977 		*val = 0;
1978 		break;
1979 
1980 	case ETHER_STAT_CAP_100FDX:
1981 		*val = 1;
1982 		break;
1983 
1984 	case ETHER_STAT_CAP_100HDX:
1985 		*val = 1;
1986 		break;
1987 
1988 	case ETHER_STAT_CAP_10FDX:
1989 		*val = 1;
1990 		break;
1991 
1992 	case ETHER_STAT_CAP_10HDX:
1993 		*val = 1;
1994 		break;
1995 
1996 	case ETHER_STAT_CAP_ASMPAUSE:
1997 		*val = 1;
1998 		break;
1999 
2000 	case ETHER_STAT_CAP_PAUSE:
2001 		*val = 1;
2002 		break;
2003 
2004 	case ETHER_STAT_CAP_AUTONEG:
2005 		*val = 1;
2006 		break;
2007 
2008 	case ETHER_STAT_ADV_CAP_1000FDX:
2009 		*val = 0;
2010 		break;
2011 
2012 	case ETHER_STAT_ADV_CAP_1000HDX:
2013 		*val = 0;
2014 		break;
2015 
2016 	case ETHER_STAT_ADV_CAP_100FDX:
2017 		*val = 1;
2018 		break;
2019 
2020 	case ETHER_STAT_ADV_CAP_100HDX:
2021 		*val = 1;
2022 		break;
2023 
2024 	case ETHER_STAT_ADV_CAP_10FDX:
2025 		*val = 1;
2026 		break;
2027 
2028 	case ETHER_STAT_ADV_CAP_10HDX:
2029 		*val = 1;
2030 		break;
2031 
2032 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
2033 		*val = 1;
2034 		break;
2035 
2036 	case ETHER_STAT_ADV_CAP_PAUSE:
2037 		*val = 1;
2038 		break;
2039 
2040 	case ETHER_STAT_ADV_CAP_AUTONEG:
2041 		*val = 1;
2042 		break;
2043 
2044 	/*
2045 	 * Rx Counters
2046 	 */
2047 	case MAC_STAT_IPACKETS:
2048 		*val = adapterStat->rx_mib_unicst_packets +
2049 		    adapterStat->rx_mib_multicst_packets +
2050 		    adapterStat->rx_mib_broadcst_packets +
2051 		    mdlReadMib(pLayerPointers, RcvUniCastPkts) +
2052 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts) +
2053 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2054 		break;
2055 
2056 	case MAC_STAT_RBYTES:
2057 		*val = adapterStat->rx_mib_bytes +
2058 		    mdlReadMib(pLayerPointers, RcvOctets);
2059 		break;
2060 
2061 	case MAC_STAT_MULTIRCV:
2062 		*val = adapterStat->rx_mib_multicst_packets +
2063 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
2064 		break;
2065 
2066 	case MAC_STAT_BRDCSTRCV:
2067 		*val = adapterStat->rx_mib_broadcst_packets +
2068 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2069 		break;
2070 
2071 	case MAC_STAT_NORCVBUF:
2072 		*val = adapterStat->rx_allocfail +
2073 		    adapterStat->rx_mib_drop_packets +
2074 		    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
2075 		break;
2076 
2077 	case MAC_STAT_IERRORS:
2078 		*val = adapterStat->rx_mib_align_err_packets +
2079 		    adapterStat->rx_mib_fcs_err_packets +
2080 		    adapterStat->rx_mib_symbol_err_packets +
2081 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors) +
2082 		    mdlReadMib(pLayerPointers, RcvFCSErrors) +
2083 		    mdlReadMib(pLayerPointers, RcvSymbolErrors);
2084 		break;
2085 
2086 	case ETHER_STAT_ALIGN_ERRORS:
2087 		*val = adapterStat->rx_mib_align_err_packets +
2088 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
2089 		break;
2090 
2091 	case ETHER_STAT_FCS_ERRORS:
2092 		*val = adapterStat->rx_mib_fcs_err_packets +
2093 		    mdlReadMib(pLayerPointers, RcvFCSErrors);
2094 		break;
2095 
2096 	/*
2097 	 * Tx Counters
2098 	 */
2099 	case MAC_STAT_OPACKETS:
2100 		*val = adapterStat->tx_mib_packets +
2101 		    mdlReadMib(pLayerPointers, XmtPackets);
2102 		break;
2103 
2104 	case MAC_STAT_OBYTES:
2105 		*val = adapterStat->tx_mib_bytes +
2106 		    mdlReadMib(pLayerPointers, XmtOctets);
2107 		break;
2108 
2109 	case MAC_STAT_MULTIXMT:
2110 		*val = adapterStat->tx_mib_multicst_packets +
2111 		    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
2112 		break;
2113 
2114 	case MAC_STAT_BRDCSTXMT:
2115 		*val = adapterStat->tx_mib_broadcst_packets +
2116 		    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
2117 		break;
2118 
2119 	case MAC_STAT_NOXMTBUF:
2120 		*val = adapterStat->tx_no_descriptor;
2121 		break;
2122 
2123 	case MAC_STAT_OERRORS:
2124 		*val = adapterStat->tx_mib_ex_coll_packets +
2125 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2126 		break;
2127 
2128 	case MAC_STAT_COLLISIONS:
2129 		*val = adapterStat->tx_mib_ex_coll_packets +
2130 		    mdlReadMib(pLayerPointers, XmtCollisions);
2131 		break;
2132 
2133 	case ETHER_STAT_FIRST_COLLISIONS:
2134 		*val = adapterStat->tx_mib_one_coll_packets +
2135 		    mdlReadMib(pLayerPointers, XmtOneCollision);
2136 		break;
2137 
2138 	case ETHER_STAT_MULTI_COLLISIONS:
2139 		*val = adapterStat->tx_mib_multi_coll_packets +
2140 		    mdlReadMib(pLayerPointers, XmtMultipleCollision);
2141 		break;
2142 
2143 	case ETHER_STAT_EX_COLLISIONS:
2144 		*val = adapterStat->tx_mib_ex_coll_packets +
2145 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2146 		break;
2147 
2148 	case ETHER_STAT_TX_LATE_COLLISIONS:
2149 		*val = adapterStat->tx_mib_late_coll_packets +
2150 		    mdlReadMib(pLayerPointers, XmtLateCollision);
2151 		break;
2152 
2153 	case ETHER_STAT_DEFER_XMTS:
2154 		*val = adapterStat->tx_mib_defer_trans_packets +
2155 		    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
2156 		break;
2157 
2158 	default:
2159 		return (ENOTSUP);
2160 	}
2161 	return (0);
2162 }
2163 
2164 /*
2165  *	Memory Read Function Used by MDL to set card registers.
2166  */
2167 unsigned char
2168 READ_REG8(struct LayerPointers *pLayerPointers, long x)
2169 {
2170 	return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x));
2171 }
2172 
2173 int
2174 READ_REG16(struct LayerPointers *pLayerPointers, long x)
2175 {
2176 	return (ddi_get16(pLayerPointers->pOdl->MemBasehandle,
2177 	    (uint16_t *)(x)));
2178 }
2179 
2180 long
2181 READ_REG32(struct LayerPointers *pLayerPointers, long x)
2182 {
2183 	return (ddi_get32(pLayerPointers->pOdl->MemBasehandle,
2184 	    (uint32_t *)(x)));
2185 }
2186 
2187 void
2188 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y)
2189 {
2190 	ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y);
2191 }
2192 
2193 void
2194 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y)
2195 {
2196 	ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y);
2197 }
2198 
2199 void
2200 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y)
2201 {
2202 	ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y);
2203 }
2204 
2205 void
2206 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y)
2207 {
2208 	int i;
2209 	for (i = 0; i < 8; i++) {
2210 		WRITE_REG8(pLayerPointers, (x + i), y[i]);
2211 	}
2212 }
2213