xref: /titanic_44/usr/src/uts/intel/io/amd8111s/amd8111s_main.c (revision 23b4d00c19075d9d50f296d4437a3f48579b483d)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2001-2006 Advanced Micro Devices, Inc.  All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * + Redistributions of source code must retain the above copyright notice,
13  * + this list of conditions and the following disclaimer.
14  *
15  * + Redistributions in binary form must reproduce the above copyright
16  * + notice, this list of conditions and the following disclaimer in the
17  * + documentation and/or other materials provided with the distribution.
18  *
19  * + Neither the name of Advanced Micro Devices, Inc. nor the names of its
20  * + contributors may be used to endorse or promote products derived from
21  * + this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
24  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
25  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR
28  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
30  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
34  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
35  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  *
37  * Import/Export/Re-Export/Use/Release/Transfer Restrictions and
38  * Compliance with Applicable Laws.  Notice is hereby given that
39  * the software may be subject to restrictions on use, release,
40  * transfer, importation, exportation and/or re-exportation under
41  * the laws and regulations of the United States or other
42  * countries ("Applicable Laws"), which include but are not
43  * limited to U.S. export control laws such as the Export
44  * Administration Regulations and national security controls as
45  * defined thereunder, as well as State Department controls under
46  * the U.S. Munitions List.  Permission to use and/or
47  * redistribute the software is conditioned upon compliance with
48  * all Applicable Laws, including U.S. export control laws
49  * regarding specifically designated persons, countries and
50  * nationals of countries subject to national security controls.
51  */
52 
53 /* include files */
54 #include <sys/disp.h>
55 #include <sys/atomic.h>
56 #include <sys/vlan.h>
57 #include "amd8111s_main.h"
58 
59 /* Global macro Definations */
60 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
61 #define	INTERFACE_NAME "amd8111s"
62 #define	AMD8111S_SPLIT	128
63 #define	AMD8111S_SEND_MAX	64
64 
65 static char ident[] = "AMD8111 10/100M Ethernet";
66 
67 /*
68  * Driver Entry Points
69  */
70 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t);
71 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t);
72 
73 /*
74  * GLD Entry points prototype
75  */
76 static int amd8111s_m_unicst(void *, const uint8_t *);
77 static int amd8111s_m_promisc(void *, boolean_t);
78 static int amd8111s_m_stat(void *, uint_t, uint64_t *);
79 static void amd8111s_m_resources(void *arg);
80 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *);
81 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr);
82 static int amd8111s_m_start(void *);
83 static void amd8111s_m_stop(void *);
84 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp);
85 static uint_t amd8111s_intr(caddr_t);
86 
87 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *);
88 
89 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *);
90 static int amd8111s_odlInit(struct LayerPointers *);
91 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *);
92 static void amd8111s_free_descriptors(struct LayerPointers *);
93 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *,
94 		struct amd8111s_dma_ringbuf *, uint32_t, uint32_t);
95 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *);
96 
97 
98 static void amd8111s_log(struct LayerPointers *adapter, int level,
99     char *fmt, ...);
100 
101 static struct cb_ops amd8111s_cb_ops = {
102 	nulldev,
103 	nulldev,
104 	nodev,
105 	nodev,
106 	nodev,
107 	nodev,
108 	nodev,
109 	nodev,
110 	nodev,
111 	nodev,
112 	nodev,
113 	nochpoll,
114 	ddi_prop_op,
115 	NULL,
116 	D_NEW | D_MP,
117 	CB_REV,		/* cb_rev */
118 	nodev,		/* cb_aread */
119 	nodev		/* cb_awrite */
120 };
121 
122 static struct dev_ops amd8111s_dev_ops = {
123 	DEVO_REV,		/* devo_rev */
124 	0,			/* devo_refcnt */
125 	NULL,			/* devo_getinfo */
126 	nulldev,		/* devo_identify */
127 	nulldev,		/* devo_probe */
128 	amd8111s_attach,	/* devo_attach */
129 	amd8111s_detach,	/* devo_detach */
130 	nodev,			/* devo_reset */
131 	&amd8111s_cb_ops,	/* devo_cb_ops */
132 	NULL,			/* devo_bus_ops */
133 	nodev,			/* devo_power */
134 	ddi_quiesce_not_supported,	/* devo_quiesce */
135 };
136 
137 struct modldrv amd8111s_modldrv = {
138 	&mod_driverops,		/* Type of module. This one is a driver */
139 	ident,			/* short description */
140 	&amd8111s_dev_ops	/* driver specific ops */
141 };
142 
143 struct modlinkage amd8111s_modlinkage = {
144 	MODREV_1, (void *)&amd8111s_modldrv, NULL
145 };
146 
147 /*
148  * Global Variables
149  */
150 struct LayerPointers *amd8111sadapter;
151 
152 static ddi_dma_attr_t pcn_buff_dma_attr_t = {
153 	DMA_ATTR_V0,	/* dma_attr_version */
154 	(uint64_t)0,		/* dma_attr_addr_lo */
155 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
156 	(uint64_t)0xFFFFFFFF,	/* dma_attr_count_max */
157 	(uint64_t)1,		/* dma_attr_align */
158 	(uint_t)0x7F,		/* dma_attr_burstsizes */
159 	(uint32_t)1,		/* dma_attr_minxfer */
160 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
161 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
162 	(int)1,			/* dma_attr_sgllen */
163 	(uint32_t)1,		/* granularity */
164 	(uint_t)0		/* dma_attr_flags */
165 };
166 
167 static ddi_dma_attr_t pcn_desc_dma_attr_t = {
168 	DMA_ATTR_V0,		/* dma_attr_version */
169 	(uint64_t)0,		/* dma_attr_addr_lo */
170 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
171 	(uint64_t)0x7FFFFFFF,	/* dma_attr_count_max */
172 	(uint64_t)0x10,		/* dma_attr_align */
173 	(uint_t)0xFFFFFFFFU,	/* dma_attr_burstsizes */
174 	(uint32_t)1,		/* dma_attr_minxfer */
175 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
176 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
177 	(int)1,			/* dma_attr_sgllen */
178 	(uint32_t)1,		/* granularity */
179 	(uint_t)0		/* dma_attr_flags */
180 };
181 
182 /* PIO access attributes for registers */
183 static ddi_device_acc_attr_t pcn_acc_attr = {
184 	DDI_DEVICE_ATTR_V0,
185 	DDI_STRUCTURE_LE_ACC,
186 	DDI_STRICTORDER_ACC
187 };
188 
189 #define	AMD8111S_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL)
190 
191 
192 static mac_callbacks_t amd8111s_m_callbacks = {
193 	AMD8111S_M_CALLBACK_FLAGS,
194 	amd8111s_m_stat,
195 	amd8111s_m_start,
196 	amd8111s_m_stop,
197 	amd8111s_m_promisc,
198 	amd8111s_m_multicst,
199 	amd8111s_m_unicst,
200 	amd8111s_m_tx,
201 	amd8111s_m_resources,
202 	amd8111s_m_ioctl
203 };
204 
205 
206 /*
207  * Standard Driver Load Entry Point
208  * It will be called at load time of driver.
209  */
210 int
211 _init()
212 {
213 	int status;
214 	mac_init_ops(&amd8111s_dev_ops, "amd8111s");
215 
216 	status = mod_install(&amd8111s_modlinkage);
217 	if (status != DDI_SUCCESS) {
218 		mac_fini_ops(&amd8111s_dev_ops);
219 	}
220 
221 	return (status);
222 }
223 
224 /*
225  * Standard Driver Entry Point for Query.
226  * It will be called at any time to get Driver info.
227  */
228 int
229 _info(struct modinfo *modinfop)
230 {
231 	return (mod_info(&amd8111s_modlinkage, modinfop));
232 }
233 
234 /*
235  *	Standard Driver Entry Point for Unload.
236  *	It will be called at unload time of driver.
237  */
238 int
239 _fini()
240 {
241 	int status;
242 
243 	status = mod_remove(&amd8111s_modlinkage);
244 	if (status == DDI_SUCCESS) {
245 		mac_fini_ops(&amd8111s_dev_ops);
246 	}
247 
248 	return (status);
249 }
250 
251 /* Adjust Interrupt Coalescing Register to coalesce interrupts */
252 static void
253 amd8111s_m_blank(void *arg, time_t ticks, uint32_t count)
254 {
255 	_NOTE(ARGUNUSED(arg, ticks, count));
256 }
257 
258 static void
259 amd8111s_m_resources(void *arg)
260 {
261 	struct LayerPointers *adapter = arg;
262 	mac_rx_fifo_t mrf;
263 
264 	mrf.mrf_type = MAC_RX_FIFO;
265 	mrf.mrf_blank = amd8111s_m_blank;
266 	mrf.mrf_arg = (void *)adapter;
267 	mrf.mrf_normal_blank_time = 128;
268 	mrf.mrf_normal_pkt_count = 8;
269 
270 	adapter->pOdl->mrh = mac_resource_add(adapter->pOdl->mh,
271 	    (mac_resource_t *)&mrf);
272 }
273 
274 /*
275  * Loopback Support
276  */
277 static lb_property_t loopmodes[] = {
278 	{ normal,	"normal",	AMD8111S_LB_NONE		},
279 	{ external,	"100Mbps",	AMD8111S_LB_EXTERNAL_100	},
280 	{ external,	"10Mbps",	AMD8111S_LB_EXTERNAL_10		},
281 	{ internal,	"MAC",		AMD8111S_LB_INTERNAL_MAC	}
282 };
283 
284 static void
285 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode)
286 {
287 
288 	/*
289 	 * If the mode isn't being changed, there's nothing to do ...
290 	 */
291 	if (mode == adapter->pOdl->loopback_mode)
292 		return;
293 
294 	/*
295 	 * Validate the requested mode and prepare a suitable message
296 	 * to explain the link down/up cycle that the change will
297 	 * probably induce ...
298 	 */
299 	switch (mode) {
300 	default:
301 		return;
302 
303 	case AMD8111S_LB_NONE:
304 		mdlStopChip(adapter);
305 		if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) {
306 			cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB");
307 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
308 			    INLOOP);
309 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
310 			    FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
311 		} else {
312 			cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB");
313 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
314 			    EXLOOP);
315 		}
316 
317 		amd8111s_reset(adapter);
318 		adapter->pOdl->LinkStatus = LINK_STATE_DOWN;
319 		adapter->pOdl->rx_fcs_stripped = B_FALSE;
320 		mdlStartChip(adapter);
321 		break;
322 
323 	case AMD8111S_LB_EXTERNAL_100:
324 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100");
325 		mdlStopChip(adapter);
326 		amd8111s_reset(adapter);
327 		SetIntrCoalesc(adapter, B_FALSE);
328 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100);
329 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
330 		    VAL0 | EXLOOP);
331 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
332 		adapter->pMdl->Speed = 100;
333 		adapter->pMdl->FullDuplex = B_TRUE;
334 		/* Tell GLD the state of the physical link. */
335 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
336 
337 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
338 
339 		mdlStartChip(adapter);
340 		break;
341 
342 	case AMD8111S_LB_EXTERNAL_10:
343 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10");
344 		mdlStopChip(adapter);
345 		amd8111s_reset(adapter);
346 		SetIntrCoalesc(adapter, B_FALSE);
347 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10);
348 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
349 		    VAL0 | EXLOOP);
350 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
351 		adapter->pMdl->Speed = 10;
352 		adapter->pMdl->FullDuplex = B_TRUE;
353 		/* Tell GLD the state of the physical link. */
354 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
355 
356 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
357 
358 		mdlStartChip(adapter);
359 		break;
360 
361 	case AMD8111S_LB_INTERNAL_MAC:
362 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC");
363 		mdlStopChip(adapter);
364 		amd8111s_reset(adapter);
365 		SetIntrCoalesc(adapter, B_FALSE);
366 		/* Disable Port Manager */
367 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
368 		    EN_PMGR);
369 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
370 		    VAL0 | INLOOP);
371 
372 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
373 		    VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
374 
375 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
376 		adapter->pMdl->FullDuplex = B_TRUE;
377 		/* Tell GLD the state of the physical link. */
378 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
379 
380 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
381 
382 		mdlStartChip(adapter);
383 		break;
384 	}
385 
386 	/*
387 	 * All OK; tell the caller to reprogram
388 	 * the PHY and/or MAC for the new mode ...
389 	 */
390 	adapter->pOdl->loopback_mode = mode;
391 }
392 
393 static enum ioc_reply
394 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp,
395     mblk_t *mp)
396 {
397 	lb_info_sz_t *lbsp;
398 	lb_property_t *lbpp;
399 	uint32_t *lbmp;
400 	int cmd;
401 
402 	/*
403 	 * Validate format of ioctl
404 	 */
405 	if (mp->b_cont == NULL)
406 		return (IOC_INVAL);
407 
408 	cmd = iocp->ioc_cmd;
409 	switch (cmd) {
410 	default:
411 		/* NOTREACHED */
412 		amd8111s_log(adapter, CE_NOTE,
413 		    "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd);
414 		return (IOC_INVAL);
415 
416 	case LB_GET_INFO_SIZE:
417 		if (iocp->ioc_count != sizeof (lb_info_sz_t)) {
418 			amd8111s_log(adapter, CE_NOTE,
419 			    "wrong LB_GET_INFO_SIZE size");
420 			return (IOC_INVAL);
421 		}
422 		lbsp = (void *)mp->b_cont->b_rptr;
423 		*lbsp = sizeof (loopmodes);
424 		break;
425 
426 	case LB_GET_INFO:
427 		if (iocp->ioc_count != sizeof (loopmodes)) {
428 			amd8111s_log(adapter, CE_NOTE,
429 			    "Wrong LB_GET_INFO size");
430 			return (IOC_INVAL);
431 		}
432 		lbpp = (void *)mp->b_cont->b_rptr;
433 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
434 		break;
435 
436 	case LB_GET_MODE:
437 		if (iocp->ioc_count != sizeof (uint32_t)) {
438 			amd8111s_log(adapter, CE_NOTE,
439 			    "Wrong LB_GET_MODE size");
440 			return (IOC_INVAL);
441 		}
442 		lbmp = (void *)mp->b_cont->b_rptr;
443 		*lbmp = adapter->pOdl->loopback_mode;
444 		break;
445 
446 	case LB_SET_MODE:
447 		if (iocp->ioc_count != sizeof (uint32_t)) {
448 			amd8111s_log(adapter, CE_NOTE,
449 			    "Wrong LB_SET_MODE size");
450 			return (IOC_INVAL);
451 		}
452 		lbmp = (void *)mp->b_cont->b_rptr;
453 		amd8111s_set_loop_mode(adapter, *lbmp);
454 		break;
455 	}
456 	return (IOC_REPLY);
457 }
458 
459 static void
460 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
461 {
462 	struct iocblk *iocp;
463 	struct LayerPointers *adapter;
464 	enum ioc_reply status;
465 
466 	iocp = (void *)mp->b_rptr;
467 	iocp->ioc_error = 0;
468 	adapter = arg;
469 
470 	ASSERT(adapter);
471 	if (adapter == NULL) {
472 		miocnak(q, mp, 0, EINVAL);
473 		return;
474 	}
475 
476 	switch (iocp->ioc_cmd) {
477 
478 	case LB_GET_INFO_SIZE:
479 	case LB_GET_INFO:
480 	case LB_GET_MODE:
481 	case LB_SET_MODE:
482 		status = amd8111s_loopback_ioctl(adapter, iocp, mp);
483 		break;
484 
485 	default:
486 		status = IOC_INVAL;
487 		break;
488 	}
489 
490 	/*
491 	 * Decide how to reply
492 	 */
493 	switch (status) {
494 	default:
495 	case IOC_INVAL:
496 		/*
497 		 * Error, reply with a NAK and EINVAL or the specified error
498 		 */
499 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
500 		    EINVAL : iocp->ioc_error);
501 		break;
502 
503 	case IOC_DONE:
504 		/*
505 		 * OK, reply already sent
506 		 */
507 		break;
508 
509 	case IOC_ACK:
510 		/*
511 		 * OK, reply with an ACK
512 		 */
513 		miocack(q, mp, 0, 0);
514 		break;
515 
516 	case IOC_REPLY:
517 		/*
518 		 * OK, send prepared reply as ACK or NAK
519 		 */
520 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
521 		    M_IOCACK : M_IOCNAK;
522 		qreply(q, mp);
523 		break;
524 	}
525 }
526 
527 /*
528  * Copy one packet from dma memory to mblk. Inc dma descriptor pointer.
529  */
530 static boolean_t
531 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp)
532 {
533 	int length = 0;
534 	mblk_t *mp;
535 	struct rx_desc *descriptor;
536 	struct odl *pOdl = pLayerPointers->pOdl;
537 	struct amd8111s_statistics *statistics = &pOdl->statistics;
538 	struct nonphysical *pNonphysical = pLayerPointers->pMil
539 	    ->pNonphysical;
540 
541 	mutex_enter(&pOdl->mdlRcvLock);
542 	descriptor = pNonphysical->RxBufDescQRead->descriptor;
543 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle,
544 	    pNonphysical->RxBufDescQRead->descriptor -
545 	    pNonphysical->RxBufDescQStart->descriptor,
546 	    sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU);
547 	if ((descriptor->Rx_OWN) == 0) {
548 	/*
549 	 * If the frame is received with errors, then set MCNT
550 	 * of that pkt in ReceiveArray to 0. This packet would
551 	 * be discarded later and not indicated to OS.
552 	 */
553 		if (descriptor->Rx_ERR) {
554 			statistics->rx_desc_err ++;
555 			descriptor->Rx_ERR = 0;
556 			if (descriptor->Rx_FRAM == 1) {
557 				statistics->rx_desc_err_FRAM ++;
558 				descriptor->Rx_FRAM = 0;
559 			}
560 			if (descriptor->Rx_OFLO == 1) {
561 				statistics->rx_desc_err_OFLO ++;
562 				descriptor->Rx_OFLO = 0;
563 				pOdl->rx_overflow_counter ++;
564 				if ((pOdl->rx_overflow_counter > 5) &&
565 				    (pOdl->pause_interval == 0)) {
566 					statistics->rx_double_overflow ++;
567 					mdlSendPause(pLayerPointers);
568 					pOdl->rx_overflow_counter = 0;
569 					pOdl->pause_interval = 25;
570 				}
571 			}
572 			if (descriptor->Rx_CRC == 1) {
573 				statistics->rx_desc_err_CRC ++;
574 				descriptor->Rx_CRC = 0;
575 			}
576 			if (descriptor->Rx_BUFF == 1) {
577 				statistics->rx_desc_err_BUFF ++;
578 				descriptor->Rx_BUFF = 0;
579 			}
580 			goto Next_Descriptor;
581 		}
582 
583 		/* Length of incoming packet */
584 		if (pOdl->rx_fcs_stripped) {
585 			length = descriptor->Rx_MCNT -4;
586 		} else {
587 			length = descriptor->Rx_MCNT;
588 		}
589 		if (length < 62) {
590 			statistics->rx_error_zerosize ++;
591 		}
592 
593 		if ((mp = allocb(length, BPRI_MED)) == NULL) {
594 			statistics->rx_allocfail ++;
595 			goto failed;
596 		}
597 		/* Copy from virtual address of incoming packet */
598 		bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap),
599 		    mp->b_rptr, length);
600 		mp->b_wptr = mp->b_rptr + length;
601 		statistics->rx_ok_packets ++;
602 		if (*last_mp == NULL) {
603 			*last_mp = mp;
604 		} else {
605 			(*last_mp)->b_next = mp;
606 			*last_mp = mp;
607 		}
608 
609 Next_Descriptor:
610 		descriptor->Rx_MCNT = 0;
611 		descriptor->Rx_SOP = 0;
612 		descriptor->Rx_EOP = 0;
613 		descriptor->Rx_PAM = 0;
614 		descriptor->Rx_BAM = 0;
615 		descriptor->TT = 0;
616 		descriptor->Rx_OWN = 1;
617 		pNonphysical->RxBufDescQRead->descriptor++;
618 		pNonphysical->RxBufDescQRead->USpaceMap++;
619 		if (pNonphysical->RxBufDescQRead->descriptor >
620 		    pNonphysical->RxBufDescQEnd->descriptor) {
621 			pNonphysical->RxBufDescQRead->descriptor =
622 			    pNonphysical->RxBufDescQStart->descriptor;
623 			pNonphysical->RxBufDescQRead->USpaceMap =
624 			    pNonphysical->RxBufDescQStart->USpaceMap;
625 		}
626 		mutex_exit(&pOdl->mdlRcvLock);
627 
628 		return (B_TRUE);
629 	}
630 
631 failed:
632 	mutex_exit(&pOdl->mdlRcvLock);
633 	return (B_FALSE);
634 }
635 
636 /*
637  * Get the received packets from NIC card and send them to GLD.
638  */
639 static void
640 amd8111s_receive(struct LayerPointers *pLayerPointers)
641 {
642 	int numOfPkts = 0;
643 	struct odl *pOdl;
644 	mblk_t *ret_mp = NULL, *last_mp = NULL;
645 
646 	pOdl = pLayerPointers->pOdl;
647 
648 	rw_enter(&pOdl->chip_lock, RW_READER);
649 	if (!pLayerPointers->run) {
650 		rw_exit(&pOdl->chip_lock);
651 		return;
652 	}
653 
654 	if (pOdl->pause_interval > 0)
655 		pOdl->pause_interval --;
656 
657 	while (numOfPkts < RX_RING_SIZE) {
658 
659 		if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) {
660 			break;
661 		}
662 		if (ret_mp == NULL)
663 			ret_mp = last_mp;
664 		numOfPkts++;
665 	}
666 
667 	if (ret_mp) {
668 		mac_rx(pOdl->mh, pOdl->mrh, ret_mp);
669 	}
670 
671 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0,
672 	    DDI_DMA_SYNC_FORDEV);
673 
674 	mdlReceive(pLayerPointers);
675 
676 	rw_exit(&pOdl->chip_lock);
677 
678 }
679 
680 /*
681  * Print message in release-version driver.
682  */
683 static void
684 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...)
685 {
686 	auto char name[32];
687 	auto char buf[256];
688 	va_list ap;
689 
690 	if (adapter != NULL) {
691 		(void) sprintf(name, "amd8111s%d",
692 		    ddi_get_instance(adapter->pOdl->devinfo));
693 	} else {
694 		(void) sprintf(name, "amd8111s");
695 	}
696 	va_start(ap, fmt);
697 	(void) vsprintf(buf, fmt, ap);
698 	va_end(ap);
699 	cmn_err(level, "%s: %s", name, buf);
700 }
701 
702 /*
703  * To allocate & initilize all resources.
704  * Called by amd8111s_attach().
705  */
706 static int
707 amd8111s_odlInit(struct LayerPointers *pLayerPointers)
708 {
709 	unsigned long mem_req_array[MEM_REQ_MAX];
710 	unsigned long mem_set_array[MEM_REQ_MAX];
711 	unsigned long *pmem_req_array;
712 	unsigned long *pmem_set_array;
713 	int i, size;
714 
715 	for (i = 0; i < MEM_REQ_MAX; i++) {
716 		mem_req_array[i] = 0;
717 		mem_set_array[i] = 0;
718 	}
719 
720 	milRequestResources(mem_req_array);
721 
722 	pmem_req_array = mem_req_array;
723 	pmem_set_array = mem_set_array;
724 	while (*pmem_req_array) {
725 		switch (*pmem_req_array) {
726 		case VIRTUAL:
727 			*pmem_set_array = VIRTUAL;
728 			pmem_req_array++;
729 			pmem_set_array++;
730 			*(pmem_set_array) = *(pmem_req_array);
731 			pmem_set_array++;
732 			*(pmem_set_array) = (unsigned long) kmem_zalloc(
733 			    *(pmem_req_array), KM_NOSLEEP);
734 			if (*pmem_set_array == NULL)
735 				goto odl_init_failure;
736 			break;
737 		}
738 		pmem_req_array++;
739 		pmem_set_array++;
740 	}
741 
742 	/*
743 	 * Initilize memory on lower layers
744 	 */
745 	milSetResources(pLayerPointers, mem_set_array);
746 
747 	/* Allocate Rx/Tx descriptors */
748 	if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) {
749 		*pmem_set_array = NULL;
750 		goto odl_init_failure;
751 	}
752 
753 	/*
754 	 * Allocate Rx buffer for each Rx descriptor. Then call mil layer
755 	 * routine to fill physical address of Rx buffer into Rx descriptor.
756 	 */
757 	if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) {
758 		amd8111s_free_descriptors(pLayerPointers);
759 		*pmem_set_array = NULL;
760 		goto odl_init_failure;
761 	}
762 	milInitGlbds(pLayerPointers);
763 
764 	return (0);
765 
766 odl_init_failure:
767 	/*
768 	 * Free All memory allocated so far
769 	 */
770 	pmem_req_array = mem_set_array;
771 	while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) {
772 		switch (*pmem_req_array) {
773 		case VIRTUAL:
774 			pmem_req_array++;	/* Size */
775 			size = *(pmem_req_array);
776 			pmem_req_array++;	/* Virtual Address */
777 			if (pmem_req_array == NULL)
778 				return (1);
779 			kmem_free((int *)*pmem_req_array, size);
780 			break;
781 		}
782 		pmem_req_array++;
783 	}
784 	return (1);
785 }
786 
787 /*
788  * Allocate and initialize Tx/Rx descriptors
789  */
790 static boolean_t
791 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers)
792 {
793 	struct odl *pOdl = pLayerPointers->pOdl;
794 	struct mil *pMil = pLayerPointers->pMil;
795 	dev_info_t *devinfo = pOdl->devinfo;
796 	uint_t length, count, i;
797 	size_t real_length;
798 
799 	/*
800 	 * Allocate Rx descriptors
801 	 */
802 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
803 	    NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) {
804 		amd8111s_log(pLayerPointers, CE_WARN,
805 		    "ddi_dma_alloc_handle for Rx desc failed");
806 		pOdl->rx_desc_dma_handle = NULL;
807 		return (B_FALSE);
808 	}
809 
810 	length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT;
811 	if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length,
812 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
813 	    NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length,
814 	    &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) {
815 
816 		amd8111s_log(pLayerPointers, CE_WARN,
817 		    "ddi_dma_mem_handle for Rx desc failed");
818 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
819 		pOdl->rx_desc_dma_handle = NULL;
820 		return (B_FALSE);
821 	}
822 
823 	if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle,
824 	    NULL, (caddr_t)pMil->Rx_desc_original, real_length,
825 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
826 	    NULL, &pOdl->rx_desc_dma_cookie,
827 	    &count) != DDI_SUCCESS) {
828 
829 		amd8111s_log(pLayerPointers, CE_WARN,
830 		    "ddi_dma_addr_bind_handle for Rx desc failed");
831 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
832 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
833 		pOdl->rx_desc_dma_handle = NULL;
834 		return (B_FALSE);
835 	}
836 	ASSERT(count == 1);
837 
838 	/* Initialize Rx descriptors related variables */
839 	pMil->Rx_desc = (struct rx_desc *)
840 	    ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT);
841 	pMil->Rx_desc_pa = (unsigned int)
842 	    ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT);
843 
844 	pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa;
845 
846 
847 	/*
848 	 * Allocate Tx descriptors
849 	 */
850 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
851 	    NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) {
852 		amd8111s_log(pLayerPointers, CE_WARN,
853 		    "ddi_dma_alloc_handle for Tx desc failed");
854 		goto allocate_desc_fail;
855 	}
856 
857 	length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT;
858 	if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length,
859 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
860 	    NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length,
861 	    &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) {
862 
863 		amd8111s_log(pLayerPointers, CE_WARN,
864 		    "ddi_dma_mem_handle for Tx desc failed");
865 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
866 		goto allocate_desc_fail;
867 	}
868 
869 	if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle,
870 	    NULL, (caddr_t)pMil->Tx_desc_original, real_length,
871 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
872 	    NULL, &pOdl->tx_desc_dma_cookie,
873 	    &count) != DDI_SUCCESS) {
874 
875 		amd8111s_log(pLayerPointers, CE_WARN,
876 		    "ddi_dma_addr_bind_handle for Tx desc failed");
877 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
878 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
879 		goto allocate_desc_fail;
880 	}
881 	ASSERT(count == 1);
882 	/* Set the DMA area to all zeros */
883 	bzero((caddr_t)pMil->Tx_desc_original, length);
884 
885 	/* Initialize Tx descriptors related variables */
886 	pMil->Tx_desc = (struct tx_desc *)
887 	    ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT);
888 	pMil->pNonphysical->TxDescQRead = pMil->Tx_desc;
889 	pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc;
890 	pMil->pNonphysical->TxDescQStart = pMil->Tx_desc;
891 	pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]);
892 
893 	/* Physical Addr of Tx_desc_original & Tx_desc */
894 	pLayerPointers->pMil->Tx_desc_pa =
895 	    ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) &
896 	    ~ALIGNMENT);
897 
898 	/* Setting the reserved bits in the tx descriptors */
899 	for (i = 0; i < TX_RING_SIZE; i++) {
900 		pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f;
901 		pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0;
902 		pMil->pNonphysical->TxDescQWrite++;
903 	}
904 	pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart;
905 
906 	pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa;
907 
908 	return (B_TRUE);
909 
910 allocate_desc_fail:
911 	pOdl->tx_desc_dma_handle = NULL;
912 	(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
913 	ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
914 	ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
915 	pOdl->rx_desc_dma_handle = NULL;
916 	return (B_FALSE);
917 }
918 
919 /*
920  * Free Tx/Rx descriptors
921  */
922 static void
923 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers)
924 {
925 	struct odl *pOdl = pLayerPointers->pOdl;
926 
927 	/* Free Rx descriptors */
928 	if (pOdl->rx_desc_dma_handle) {
929 		(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
930 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
931 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
932 		pOdl->rx_desc_dma_handle = NULL;
933 	}
934 
935 	/* Free Rx descriptors */
936 	if (pOdl->tx_desc_dma_handle) {
937 		(void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle);
938 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
939 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
940 		pOdl->tx_desc_dma_handle = NULL;
941 	}
942 }
943 
944 /*
945  * Allocate Tx/Rx Ring buffer
946  */
947 static boolean_t
948 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers,
949 			struct amd8111s_dma_ringbuf *pRing,
950 			uint32_t ring_size, uint32_t msg_size)
951 {
952 	uint32_t idx, msg_idx = 0, msg_acc;
953 	dev_info_t *devinfo = pLayerPointers->pOdl->devinfo;
954 	size_t real_length;
955 	uint_t count = 0;
956 
957 	ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1);
958 	pRing->dma_buf_sz = msg_size;
959 	pRing->ring_size = ring_size;
960 	pRing->trunk_num = AMD8111S_SPLIT;
961 	pRing->buf_sz = msg_size * ring_size;
962 	if (ring_size < pRing->trunk_num)
963 		pRing->trunk_num = ring_size;
964 	ASSERT((pRing->buf_sz % pRing->trunk_num) == 0);
965 
966 	pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num;
967 	ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0);
968 
969 	pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) *
970 	    ring_size, KM_NOSLEEP);
971 	pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
972 	    pRing->trunk_num, KM_NOSLEEP);
973 	pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
974 	    pRing->trunk_num, KM_NOSLEEP);
975 	pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) *
976 	    pRing->trunk_num, KM_NOSLEEP);
977 	pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) *
978 	    pRing->trunk_num, KM_NOSLEEP);
979 	if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL ||
980 	    pRing->acc_hdl == NULL || pRing->trunk_addr == NULL ||
981 	    pRing->dma_cookie == NULL) {
982 		amd8111s_log(pLayerPointers, CE_NOTE,
983 		    "kmem_zalloc failed");
984 		goto failed;
985 	}
986 
987 	for (idx = 0; idx < pRing->trunk_num; ++idx) {
988 		if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t,
989 		    DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx]))
990 		    != DDI_SUCCESS) {
991 
992 			amd8111s_log(pLayerPointers, CE_WARN,
993 			    "ddi_dma_alloc_handle failed");
994 			goto failed;
995 		} else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx],
996 		    pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING,
997 		    DDI_DMA_SLEEP, NULL,
998 		    (caddr_t *)&(pRing->trunk_addr[idx]),
999 		    (size_t *)(&real_length), &pRing->acc_hdl[idx])
1000 		    != DDI_SUCCESS) {
1001 
1002 			amd8111s_log(pLayerPointers, CE_WARN,
1003 			    "ddi_dma_mem_alloc failed");
1004 			goto failed;
1005 		} else if (real_length != pRing->trunk_sz) {
1006 			amd8111s_log(pLayerPointers, CE_WARN,
1007 			    "ddi_dma_mem_alloc failed");
1008 			goto failed;
1009 		} else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx],
1010 		    NULL, (caddr_t)pRing->trunk_addr[idx], real_length,
1011 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
1012 		    &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) {
1013 
1014 			amd8111s_log(pLayerPointers, CE_WARN,
1015 			    "ddi_dma_addr_bind_handle failed");
1016 			goto failed;
1017 		} else {
1018 			for (msg_acc = 0;
1019 			    msg_acc < pRing->trunk_sz / pRing->dma_buf_sz;
1020 			    ++ msg_acc) {
1021 				pRing->msg_buf[msg_idx].offset =
1022 				    msg_acc * pRing->dma_buf_sz;
1023 				pRing->msg_buf[msg_idx].vir_addr =
1024 				    pRing->trunk_addr[idx] +
1025 				    pRing->msg_buf[msg_idx].offset;
1026 				pRing->msg_buf[msg_idx].phy_addr =
1027 				    pRing->dma_cookie[idx].dmac_laddress +
1028 				    pRing->msg_buf[msg_idx].offset;
1029 				pRing->msg_buf[msg_idx].p_hdl =
1030 				    pRing->dma_hdl[idx];
1031 				msg_idx ++;
1032 			}
1033 		}
1034 	}
1035 
1036 	pRing->free = pRing->msg_buf;
1037 	pRing->next = pRing->msg_buf;
1038 	pRing->curr = pRing->msg_buf;
1039 
1040 	return (B_TRUE);
1041 failed:
1042 	amd8111s_free_dma_ringbuf(pRing);
1043 	return (B_FALSE);
1044 }
1045 
1046 /*
1047  * Free Tx/Rx ring buffer
1048  */
1049 static void
1050 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing)
1051 {
1052 	int idx;
1053 
1054 	if (pRing->dma_cookie != NULL) {
1055 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1056 			if (pRing->dma_cookie[idx].dmac_laddress == 0) {
1057 				break;
1058 			}
1059 			(void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]);
1060 		}
1061 		kmem_free(pRing->dma_cookie,
1062 		    sizeof (ddi_dma_cookie_t) * pRing->trunk_num);
1063 	}
1064 
1065 	if (pRing->acc_hdl != NULL) {
1066 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1067 			if (pRing->acc_hdl[idx] == NULL)
1068 				break;
1069 			ddi_dma_mem_free(&pRing->acc_hdl[idx]);
1070 		}
1071 		kmem_free(pRing->acc_hdl,
1072 		    sizeof (ddi_acc_handle_t) * pRing->trunk_num);
1073 	}
1074 
1075 	if (pRing->dma_hdl != NULL) {
1076 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1077 			if (pRing->dma_hdl[idx] == 0) {
1078 				break;
1079 			}
1080 			ddi_dma_free_handle(&pRing->dma_hdl[idx]);
1081 		}
1082 		kmem_free(pRing->dma_hdl,
1083 		    sizeof (ddi_dma_handle_t) * pRing->trunk_num);
1084 	}
1085 
1086 	if (pRing->msg_buf != NULL) {
1087 		kmem_free(pRing->msg_buf,
1088 		    sizeof (struct amd8111s_msgbuf) * pRing->ring_size);
1089 	}
1090 
1091 	if (pRing->trunk_addr != NULL) {
1092 		kmem_free(pRing->trunk_addr,
1093 		    sizeof (caddr_t) * pRing->trunk_num);
1094 	}
1095 
1096 	bzero(pRing, sizeof (*pRing));
1097 }
1098 
1099 
1100 /*
1101  * Allocate all Tx buffer.
1102  * Allocate a Rx buffer for each Rx descriptor. Then
1103  * call mil routine to fill physical address of Rx
1104  * buffer into Rx descriptors
1105  */
1106 static boolean_t
1107 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers)
1108 {
1109 	struct odl *pOdl = pLayerPointers->pOdl;
1110 
1111 	/*
1112 	 * Allocate rx Buffers
1113 	 */
1114 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf,
1115 	    RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) {
1116 		amd8111s_log(pLayerPointers, CE_WARN,
1117 		    "amd8111s_alloc_dma_ringbuf for tx failed");
1118 		goto allocate_buf_fail;
1119 	}
1120 
1121 	/*
1122 	 * Allocate Tx buffers
1123 	 */
1124 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf,
1125 	    TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) {
1126 		amd8111s_log(pLayerPointers, CE_WARN,
1127 		    "amd8111s_alloc_dma_ringbuf for tx failed");
1128 		goto allocate_buf_fail;
1129 	}
1130 
1131 	/*
1132 	 * Initilize the mil Queues
1133 	 */
1134 	milInitGlbds(pLayerPointers);
1135 
1136 	milInitRxQ(pLayerPointers);
1137 
1138 	return (B_TRUE);
1139 
1140 allocate_buf_fail:
1141 
1142 	amd8111s_log(pLayerPointers, CE_WARN,
1143 	    "amd8111s_allocate_buffers failed");
1144 	return (B_FALSE);
1145 }
1146 
1147 /*
1148  * Free all Rx/Tx buffer
1149  */
1150 
1151 static void
1152 amd8111s_free_buffers(struct LayerPointers *pLayerPointers)
1153 {
1154 	/* Free Tx buffers */
1155 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf);
1156 
1157 	/* Free Rx Buffers */
1158 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf);
1159 }
1160 
1161 /*
1162  * Try to recycle all the descriptors and Tx buffers
1163  * which are already freed by hardware.
1164  */
1165 static int
1166 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers)
1167 {
1168 	struct nonphysical *pNonphysical;
1169 	uint32_t count = 0;
1170 
1171 	pNonphysical = pLayerPointers->pMil->pNonphysical;
1172 	while (pNonphysical->TxDescQRead->Tx_OWN == 0 &&
1173 	    pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) {
1174 		pLayerPointers->pOdl->tx_buf.free =
1175 		    NEXT(pLayerPointers->pOdl->tx_buf, free);
1176 		pNonphysical->TxDescQRead++;
1177 		if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) {
1178 			pNonphysical->TxDescQRead = pNonphysical->TxDescQStart;
1179 		}
1180 		count ++;
1181 	}
1182 
1183 	if (pLayerPointers->pMil->tx_reschedule)
1184 		ddi_trigger_softintr(pLayerPointers->pOdl->drain_id);
1185 
1186 	return (count);
1187 }
1188 
1189 /*
1190  * Get packets in the Tx buffer, then copy them to the send buffer.
1191  * Trigger hardware to send out packets.
1192  */
1193 static void
1194 amd8111s_send_serial(struct LayerPointers *pLayerPointers)
1195 {
1196 	struct nonphysical *pNonphysical;
1197 	uint32_t count;
1198 
1199 	pNonphysical = pLayerPointers->pMil->pNonphysical;
1200 
1201 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1202 
1203 	for (count = 0; count < AMD8111S_SEND_MAX; count ++) {
1204 		if (pLayerPointers->pOdl->tx_buf.curr ==
1205 		    pLayerPointers->pOdl->tx_buf.next) {
1206 			break;
1207 		}
1208 		/* to verify if it needs to recycle the tx Buf */
1209 		if (((pNonphysical->TxDescQWrite + 1 >
1210 		    pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart :
1211 		    (pNonphysical->TxDescQWrite + 1)) ==
1212 		    pNonphysical->TxDescQRead)
1213 			if (amd8111s_recycle_tx(pLayerPointers) == 0) {
1214 				pLayerPointers->pOdl
1215 				    ->statistics.tx_no_descriptor ++;
1216 				break;
1217 			}
1218 
1219 		/* Fill packet length */
1220 		pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers
1221 		    ->pOdl->tx_buf.curr->msg_size;
1222 
1223 		/* Fill physical buffer address */
1224 		pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int)
1225 		    pLayerPointers->pOdl->tx_buf.curr->phy_addr;
1226 
1227 		pNonphysical->TxDescQWrite->Tx_SOP = 1;
1228 		pNonphysical->TxDescQWrite->Tx_EOP = 1;
1229 		pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1;
1230 		pNonphysical->TxDescQWrite->Tx_LTINT = 1;
1231 		pNonphysical->TxDescQWrite->Tx_USPACE = 0;
1232 		pNonphysical->TxDescQWrite->Tx_OWN = 1;
1233 
1234 		pNonphysical->TxDescQWrite++;
1235 		if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) {
1236 			pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart;
1237 		}
1238 
1239 		pLayerPointers->pOdl->tx_buf.curr =
1240 		    NEXT(pLayerPointers->pOdl->tx_buf, curr);
1241 
1242 	}
1243 
1244 	pLayerPointers->pOdl->statistics.tx_ok_packets += count;
1245 
1246 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1247 
1248 	/* Call mdlTransmit to send the pkt out on the network */
1249 	mdlTransmit(pLayerPointers);
1250 
1251 }
1252 
1253 /*
1254  * Softintr entrance. try to send out packets in the Tx buffer.
1255  * If reschedule is True, call mac_tx_update to re-enable the
1256  * transmit
1257  */
1258 static uint_t
1259 amd8111s_send_drain(caddr_t arg)
1260 {
1261 	struct LayerPointers *pLayerPointers = (void *)arg;
1262 
1263 	amd8111s_send_serial(pLayerPointers);
1264 
1265 	if (pLayerPointers->pMil->tx_reschedule &&
1266 	    NEXT(pLayerPointers->pOdl->tx_buf, next) !=
1267 	    pLayerPointers->pOdl->tx_buf.free) {
1268 		mac_tx_update(pLayerPointers->pOdl->mh);
1269 		pLayerPointers->pMil->tx_reschedule = B_FALSE;
1270 	}
1271 
1272 	return (DDI_INTR_CLAIMED);
1273 }
1274 
1275 /*
1276  * Get a Tx buffer
1277  */
1278 static struct amd8111s_msgbuf *
1279 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers)
1280 {
1281 	struct amd8111s_msgbuf *tmp, *next;
1282 
1283 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1284 	next = NEXT(pLayerPointers->pOdl->tx_buf, next);
1285 	if (next == pLayerPointers->pOdl->tx_buf.free) {
1286 		tmp = NULL;
1287 	} else {
1288 		tmp = pLayerPointers->pOdl->tx_buf.next;
1289 		pLayerPointers->pOdl->tx_buf.next = next;
1290 	}
1291 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1292 
1293 	return (tmp);
1294 }
1295 
1296 static boolean_t
1297 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp)
1298 {
1299 	struct odl *pOdl;
1300 	size_t frag_len;
1301 	mblk_t *tmp;
1302 	struct amd8111s_msgbuf *txBuf;
1303 	uint8_t *pMsg;
1304 
1305 	pOdl = pLayerPointers->pOdl;
1306 
1307 	/* alloc send buffer */
1308 	txBuf = amd8111s_getTxbuf(pLayerPointers);
1309 	if (txBuf == NULL) {
1310 		pOdl->statistics.tx_no_buffer ++;
1311 		pLayerPointers->pMil->tx_reschedule = B_TRUE;
1312 		amd8111s_send_serial(pLayerPointers);
1313 		return (B_FALSE);
1314 	}
1315 
1316 	/* copy packet to send buffer */
1317 	txBuf->msg_size = 0;
1318 	pMsg = (uint8_t *)txBuf->vir_addr;
1319 	for (tmp = mp; tmp; tmp = tmp->b_cont) {
1320 		frag_len = MBLKL(tmp);
1321 		bcopy(tmp->b_rptr, pMsg, frag_len);
1322 		txBuf->msg_size += frag_len;
1323 		pMsg += frag_len;
1324 	}
1325 	freemsg(mp);
1326 
1327 	amd8111s_send_serial(pLayerPointers);
1328 
1329 	return (B_TRUE);
1330 }
1331 
1332 /*
1333  * (GLD Entry Point) Send the message block to lower layer
1334  */
1335 static mblk_t *
1336 amd8111s_m_tx(void *arg, mblk_t *mp)
1337 {
1338 	struct LayerPointers *pLayerPointers = arg;
1339 	mblk_t *next;
1340 
1341 	rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER);
1342 	if (!pLayerPointers->run) {
1343 		pLayerPointers->pOdl->statistics.tx_afterunplumb ++;
1344 		freemsgchain(mp);
1345 		mp = NULL;
1346 	}
1347 
1348 	while (mp != NULL) {
1349 		next = mp->b_next;
1350 		mp->b_next = NULL;
1351 		if (!amd8111s_send(pLayerPointers, mp)) {
1352 			/* Send fail */
1353 			mp->b_next = next;
1354 			break;
1355 		}
1356 		mp = next;
1357 	}
1358 
1359 	rw_exit(&pLayerPointers->pOdl->chip_lock);
1360 	return (mp);
1361 }
1362 
1363 /*
1364  * (GLD Entry Point) Interrupt Service Routine
1365  */
1366 static uint_t
1367 amd8111s_intr(caddr_t arg)
1368 {
1369 	unsigned int intrCauses;
1370 	struct LayerPointers *pLayerPointers = (void *)arg;
1371 
1372 	/* Read the interrupt status from mdl */
1373 	intrCauses = mdlReadInterrupt(pLayerPointers);
1374 
1375 	if (intrCauses == 0) {
1376 		pLayerPointers->pOdl->statistics.intr_OTHER ++;
1377 		return (DDI_INTR_UNCLAIMED);
1378 	}
1379 
1380 	if (intrCauses & LCINT) {
1381 		if (mdlReadLink(pLayerPointers) == LINK_UP) {
1382 			mdlGetActiveMediaInfo(pLayerPointers);
1383 			/* Link status changed */
1384 			if (pLayerPointers->pOdl->LinkStatus !=
1385 			    LINK_STATE_UP) {
1386 				pLayerPointers->pOdl->LinkStatus =
1387 				    LINK_STATE_UP;
1388 				mac_link_update(pLayerPointers->pOdl->mh,
1389 				    LINK_STATE_UP);
1390 			}
1391 		} else {
1392 			if (pLayerPointers->pOdl->LinkStatus !=
1393 			    LINK_STATE_DOWN) {
1394 				pLayerPointers->pOdl->LinkStatus =
1395 				    LINK_STATE_DOWN;
1396 				mac_link_update(pLayerPointers->pOdl->mh,
1397 				    LINK_STATE_DOWN);
1398 			}
1399 		}
1400 	}
1401 	/*
1402 	 * RINT0: Receive Interrupt is set by the controller after the last
1403 	 * descriptor of a receive frame for this ring has been updated by
1404 	 * writing a 0 to the OWNership bit.
1405 	 */
1406 	if (intrCauses & RINT0) {
1407 		pLayerPointers->pOdl->statistics.intr_RINT0 ++;
1408 		amd8111s_receive(pLayerPointers);
1409 	}
1410 
1411 	/*
1412 	 * TINT0: Transmit Interrupt is set by the controller after the OWN bit
1413 	 * in the last descriptor of a transmit frame in this particular ring
1414 	 * has been cleared to indicate the frame has been copied to the
1415 	 * transmit FIFO.
1416 	 */
1417 	if (intrCauses & TINT0) {
1418 		pLayerPointers->pOdl->statistics.intr_TINT0 ++;
1419 		/*
1420 		 * if desc ring is NULL and tx buf is not NULL, it should
1421 		 * drain tx buffer
1422 		 */
1423 		amd8111s_send_serial(pLayerPointers);
1424 	}
1425 
1426 	if (intrCauses & STINT) {
1427 		pLayerPointers->pOdl->statistics.intr_STINT ++;
1428 	}
1429 
1430 
1431 	return (DDI_INTR_CLAIMED);
1432 }
1433 
1434 /*
1435  * To re-initilize data structures.
1436  */
1437 static void
1438 amd8111s_sw_reset(struct LayerPointers *pLayerPointers)
1439 {
1440 	/* Reset all Tx/Rx queues and descriptors */
1441 	milResetTxQ(pLayerPointers);
1442 	milInitRxQ(pLayerPointers);
1443 }
1444 
1445 /*
1446  * Send all pending tx packets
1447  */
1448 static void
1449 amd8111s_tx_drain(struct LayerPointers *adapter)
1450 {
1451 	struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart;
1452 	int i, desc_count = 0;
1453 	for (i = 0; i < 30; i++) {
1454 		while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) {
1455 			/* This packet has been transmitted */
1456 			pTx_desc ++;
1457 			desc_count ++;
1458 		}
1459 		if (desc_count == TX_RING_SIZE) {
1460 			break;
1461 		}
1462 		/* Wait 1 ms */
1463 		drv_usecwait(1000);
1464 	}
1465 	adapter->pOdl->statistics.tx_draintime = i;
1466 }
1467 
1468 /*
1469  * (GLD Entry Point) To start card will be called at
1470  * ifconfig plumb
1471  */
1472 static int
1473 amd8111s_m_start(void *arg)
1474 {
1475 	struct LayerPointers *pLayerPointers = arg;
1476 	struct odl *pOdl = pLayerPointers->pOdl;
1477 
1478 	amd8111s_sw_reset(pLayerPointers);
1479 	mdlHWReset(pLayerPointers);
1480 	rw_enter(&pOdl->chip_lock, RW_WRITER);
1481 	pLayerPointers->run = B_TRUE;
1482 	rw_exit(&pOdl->chip_lock);
1483 	return (0);
1484 }
1485 
1486 /*
1487  * (GLD Entry Point) To stop card will be called at
1488  * ifconfig unplumb
1489  */
1490 static void
1491 amd8111s_m_stop(void *arg)
1492 {
1493 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1494 	struct odl *pOdl = pLayerPointers->pOdl;
1495 
1496 	/* Ensure send all pending tx packets */
1497 	amd8111s_tx_drain(pLayerPointers);
1498 	/*
1499 	 * Stop the controller and disable the controller interrupt
1500 	 */
1501 	rw_enter(&pOdl->chip_lock, RW_WRITER);
1502 	mdlStopChip(pLayerPointers);
1503 	pLayerPointers->run = B_FALSE;
1504 	rw_exit(&pOdl->chip_lock);
1505 }
1506 
1507 /*
1508  *	To clean up all
1509  */
1510 static void
1511 amd8111s_free_resource(struct LayerPointers *pLayerPointers)
1512 {
1513 	unsigned long mem_free_array[100];
1514 	unsigned long *pmem_free_array, size;
1515 
1516 	/* Free Rx/Tx descriptors */
1517 	amd8111s_free_descriptors(pLayerPointers);
1518 
1519 	/* Free memory on lower layers */
1520 	milFreeResources(pLayerPointers, mem_free_array);
1521 	pmem_free_array = mem_free_array;
1522 	while (*pmem_free_array) {
1523 		switch (*pmem_free_array) {
1524 		case VIRTUAL:
1525 			size = *(++pmem_free_array);
1526 			pmem_free_array++;
1527 			kmem_free((void *)*(pmem_free_array), size);
1528 			break;
1529 		}
1530 		pmem_free_array++;
1531 	}
1532 
1533 	amd8111s_free_buffers(pLayerPointers);
1534 }
1535 
1536 /*
1537  * (GLD Enty pointer) To add/delete multi cast addresses
1538  *
1539  */
1540 static int
1541 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
1542 {
1543 	struct LayerPointers *pLayerPointers = arg;
1544 
1545 	if (add) {
1546 		/* Add a multicast entry */
1547 		mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr);
1548 	} else {
1549 		/* Delete a multicast entry */
1550 		mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr);
1551 	}
1552 
1553 	return (0);
1554 }
1555 
1556 #ifdef AMD8111S_DEBUG
1557 /*
1558  * The size of MIB registers is only 32 bits. Dump them before one
1559  * of them overflows.
1560  */
1561 static void
1562 amd8111s_dump_mib(struct LayerPointers *pLayerPointers)
1563 {
1564 	struct amd8111s_statistics *adapterStat;
1565 
1566 	adapterStat = &pLayerPointers->pOdl->statistics;
1567 
1568 	adapterStat->mib_dump_counter ++;
1569 
1570 	/*
1571 	 * Rx Counters
1572 	 */
1573 	adapterStat->rx_mib_unicst_packets +=
1574 	    mdlReadMib(pLayerPointers, RcvUniCastPkts);
1575 	adapterStat->rx_mib_multicst_packets +=
1576 	    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
1577 	adapterStat->rx_mib_broadcst_packets +=
1578 	    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
1579 	adapterStat->rx_mib_macctrl_packets +=
1580 	    mdlReadMib(pLayerPointers, RcvMACCtrl);
1581 	adapterStat->rx_mib_flowctrl_packets +=
1582 	    mdlReadMib(pLayerPointers, RcvFlowCtrl);
1583 
1584 	adapterStat->rx_mib_bytes +=
1585 	    mdlReadMib(pLayerPointers, RcvOctets);
1586 	adapterStat->rx_mib_good_bytes +=
1587 	    mdlReadMib(pLayerPointers, RcvGoodOctets);
1588 
1589 	adapterStat->rx_mib_undersize_packets +=
1590 	    mdlReadMib(pLayerPointers, RcvUndersizePkts);
1591 	adapterStat->rx_mib_oversize_packets +=
1592 	    mdlReadMib(pLayerPointers, RcvOversizePkts);
1593 
1594 	adapterStat->rx_mib_drop_packets +=
1595 	    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
1596 	adapterStat->rx_mib_align_err_packets +=
1597 	    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
1598 	adapterStat->rx_mib_fcs_err_packets +=
1599 	    mdlReadMib(pLayerPointers, RcvFCSErrors);
1600 	adapterStat->rx_mib_symbol_err_packets +=
1601 	    mdlReadMib(pLayerPointers, RcvSymbolErrors);
1602 	adapterStat->rx_mib_miss_packets +=
1603 	    mdlReadMib(pLayerPointers, RcvMissPkts);
1604 
1605 	/*
1606 	 * Tx Counters
1607 	 */
1608 	adapterStat->tx_mib_packets +=
1609 	    mdlReadMib(pLayerPointers, XmtPackets);
1610 	adapterStat->tx_mib_multicst_packets +=
1611 	    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
1612 	adapterStat->tx_mib_broadcst_packets +=
1613 	    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
1614 	adapterStat->tx_mib_flowctrl_packets +=
1615 	    mdlReadMib(pLayerPointers, XmtFlowCtrl);
1616 
1617 	adapterStat->tx_mib_bytes +=
1618 	    mdlReadMib(pLayerPointers, XmtOctets);
1619 
1620 	adapterStat->tx_mib_defer_trans_packets +=
1621 	    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
1622 	adapterStat->tx_mib_collision_packets +=
1623 	    mdlReadMib(pLayerPointers, XmtCollisions);
1624 	adapterStat->tx_mib_one_coll_packets +=
1625 	    mdlReadMib(pLayerPointers, XmtOneCollision);
1626 	adapterStat->tx_mib_multi_coll_packets +=
1627 	    mdlReadMib(pLayerPointers, XmtMultipleCollision);
1628 	adapterStat->tx_mib_late_coll_packets +=
1629 	    mdlReadMib(pLayerPointers, XmtLateCollision);
1630 	adapterStat->tx_mib_ex_coll_packets +=
1631 	    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
1632 
1633 
1634 	/* Clear all MIB registers */
1635 	WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address
1636 	    + MIB_ADDR, MIB_CLEAR);
1637 }
1638 #endif
1639 
1640 /*
1641  * (GLD Entry Point) set/unset promiscus mode
1642  */
1643 static int
1644 amd8111s_m_promisc(void *arg, boolean_t on)
1645 {
1646 	struct LayerPointers *pLayerPointers = arg;
1647 
1648 	if (on) {
1649 		mdlSetPromiscuous(pLayerPointers);
1650 	} else {
1651 		mdlDisablePromiscuous(pLayerPointers);
1652 	}
1653 
1654 	return (0);
1655 }
1656 
1657 /*
1658  * (Gld Entry point) Changes the Mac address of card
1659  */
1660 static int
1661 amd8111s_m_unicst(void *arg, const uint8_t *macaddr)
1662 {
1663 	struct LayerPointers *pLayerPointers = arg;
1664 
1665 	mdlDisableInterrupt(pLayerPointers);
1666 	mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr);
1667 	mdlEnableInterrupt(pLayerPointers);
1668 
1669 	return (0);
1670 }
1671 
1672 /*
1673  * Reset the card
1674  */
1675 void
1676 amd8111s_reset(struct LayerPointers *pLayerPointers)
1677 {
1678 	amd8111s_sw_reset(pLayerPointers);
1679 	mdlHWReset(pLayerPointers);
1680 }
1681 
1682 /*
1683  * attach(9E) -- Attach a device to the system
1684  *
1685  * Called once for each board after successfully probed.
1686  * will do
1687  * 	a. creating minor device node for the instance.
1688  *	b. allocate & Initilize four layers (call odlInit)
1689  *	c. get MAC address
1690  *	d. initilize pLayerPointers to gld private pointer
1691  *	e. register with GLD
1692  * if any action fails does clean up & returns DDI_FAILURE
1693  * else retursn DDI_SUCCESS
1694  */
1695 static int
1696 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1697 {
1698 	mac_register_t *macp;
1699 	struct LayerPointers *pLayerPointers;
1700 	struct odl *pOdl;
1701 	ddi_acc_handle_t *pci_handle;
1702 	ddi_device_acc_attr_t dev_attr;
1703 	caddr_t addrp = NULL;
1704 
1705 	switch (cmd) {
1706 	case DDI_ATTACH:
1707 		break;
1708 	default:
1709 		return (DDI_FAILURE);
1710 	}
1711 
1712 	pLayerPointers = (struct LayerPointers *)
1713 	    kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP);
1714 	amd8111sadapter = pLayerPointers;
1715 
1716 	/* Get device instance number */
1717 	pLayerPointers->instance = ddi_get_instance(devinfo);
1718 	ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers);
1719 
1720 	pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP);
1721 	pLayerPointers->pOdl = pOdl;
1722 
1723 	pOdl->devinfo = devinfo;
1724 
1725 	/*
1726 	 * Here, we only allocate memory for struct odl and initilize it.
1727 	 * All other memory allocation & initilization will be done in odlInit
1728 	 * later on this routine.
1729 	 */
1730 	if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock)
1731 	    != DDI_SUCCESS) {
1732 		amd8111s_log(pLayerPointers, CE_NOTE,
1733 		    "attach: get iblock cookies failed");
1734 		goto attach_failure;
1735 	}
1736 
1737 	rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock);
1738 	mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock",
1739 	    MUTEX_DRIVER, (void *)pOdl->iblock);
1740 	mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock",
1741 	    MUTEX_DRIVER, (void *)pOdl->iblock);
1742 
1743 	/* Setup PCI space */
1744 	if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) {
1745 		return (DDI_FAILURE);
1746 	}
1747 	pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI;
1748 	pci_handle = &pOdl->pci_handle;
1749 
1750 	pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID);
1751 	pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID);
1752 
1753 	/*
1754 	 * Allocate and initialize all resource and map device registers.
1755 	 * If failed, it returns a non-zero value.
1756 	 */
1757 	if (amd8111s_odlInit(pLayerPointers) != 0) {
1758 		goto attach_failure;
1759 	}
1760 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE;
1761 
1762 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1763 	dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1764 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1765 
1766 	if (ddi_regs_map_setup(devinfo, 1, &addrp, 0,  4096, &dev_attr,
1767 	    &(pLayerPointers->pOdl->MemBasehandle)) != 0) {
1768 		amd8111s_log(pLayerPointers, CE_NOTE,
1769 		    "attach: ddi_regs_map_setup failed");
1770 		goto attach_failure;
1771 	}
1772 	pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp;
1773 
1774 	/* Initialize HW */
1775 	mdlOpen(pLayerPointers);
1776 	mdlGetActiveMediaInfo(pLayerPointers);
1777 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS;
1778 
1779 	/*
1780 	 * Setup the interrupt
1781 	 */
1782 	if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr,
1783 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1784 		goto attach_failure;
1785 	}
1786 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED;
1787 
1788 	/*
1789 	 * Setup soft intr
1790 	 */
1791 	if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id,
1792 	    NULL, NULL, amd8111s_send_drain,
1793 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1794 		goto attach_failure;
1795 	}
1796 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED;
1797 
1798 	/*
1799 	 * Initilize the mac structure
1800 	 */
1801 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1802 		goto attach_failure;
1803 
1804 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1805 	macp->m_driver = pLayerPointers;
1806 	macp->m_dip = devinfo;
1807 	/* Get MAC address */
1808 	mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress);
1809 	macp->m_src_addr = pOdl->MacAddress;
1810 	macp->m_callbacks = &amd8111s_m_callbacks;
1811 	macp->m_min_sdu = 0;
1812 	/* 1518 - 14 (ether header) - 4 (CRC) */
1813 	macp->m_max_sdu = ETHERMTU;
1814 	macp->m_margin = VLAN_TAGSZ;
1815 
1816 	/*
1817 	 * Finally, we're ready to register ourselves with the MAC layer
1818 	 * interface; if this succeeds, we're ready to start.
1819 	 */
1820 	if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) {
1821 		mac_free(macp);
1822 		goto attach_failure;
1823 	}
1824 	mac_free(macp);
1825 
1826 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED;
1827 
1828 	return (DDI_SUCCESS);
1829 
1830 attach_failure:
1831 	(void) amd8111s_unattach(devinfo, pLayerPointers);
1832 	return (DDI_FAILURE);
1833 
1834 }
1835 
1836 /*
1837  * detach(9E) -- Detach a device from the system
1838  *
1839  * It is called for each device instance when the system is preparing to
1840  * unload a dynamically unloadable driver.
1841  * will Do
1842  * 	a. check if any driver buffers are held by OS.
1843  *	b. do clean up of all allocated memory if it is not in use by OS.
1844  *	c. un register with GLD
1845  *	d. return DDI_SUCCESS on succes full free & unregister
1846  *	else GLD_FAILURE
1847  */
1848 static int
1849 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1850 {
1851 	struct LayerPointers *pLayerPointers;
1852 
1853 	switch (cmd) {
1854 	case DDI_DETACH:
1855 		break;
1856 	default:
1857 		return (DDI_FAILURE);
1858 	}
1859 
1860 	/*
1861 	 * Get the driver private (struct LayerPointers *) structure
1862 	 */
1863 	if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private
1864 	    (devinfo)) == NULL) {
1865 		return (DDI_FAILURE);
1866 	}
1867 
1868 	return (amd8111s_unattach(devinfo, pLayerPointers));
1869 }
1870 
1871 static int
1872 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers)
1873 {
1874 	struct odl *pOdl = pLayerPointers->pOdl;
1875 
1876 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) {
1877 		/* Unregister driver from the GLD interface */
1878 		if (mac_unregister(pOdl->mh) != DDI_SUCCESS) {
1879 			return (DDI_FAILURE);
1880 		}
1881 	}
1882 
1883 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) {
1884 		ddi_remove_intr(devinfo, 0, pOdl->iblock);
1885 	}
1886 
1887 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) {
1888 		ddi_remove_softintr(pOdl->drain_id);
1889 	}
1890 
1891 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) {
1892 		/* Stop HW */
1893 		mdlStopChip(pLayerPointers);
1894 		ddi_regs_map_free(&(pOdl->MemBasehandle));
1895 	}
1896 
1897 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) {
1898 		/* Free All memory allocated */
1899 		amd8111s_free_resource(pLayerPointers);
1900 	}
1901 
1902 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) {
1903 		pci_config_teardown(&pOdl->pci_handle);
1904 		mutex_destroy(&pOdl->mdlSendLock);
1905 		mutex_destroy(&pOdl->mdlRcvLock);
1906 		rw_destroy(&pOdl->chip_lock);
1907 	}
1908 
1909 	kmem_free(pOdl, sizeof (struct odl));
1910 	kmem_free(pLayerPointers, sizeof (struct LayerPointers));
1911 
1912 	return (DDI_SUCCESS);
1913 }
1914 
1915 /*
1916  * (GLD Entry Point)GLD will call this entry point perodicaly to
1917  * get driver statistices.
1918  */
1919 static int
1920 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val)
1921 {
1922 	struct LayerPointers *pLayerPointers = arg;
1923 	struct amd8111s_statistics *adapterStat;
1924 
1925 	adapterStat = &pLayerPointers->pOdl->statistics;
1926 
1927 	switch (stat) {
1928 
1929 	/*
1930 	 * Current Status
1931 	 */
1932 	case MAC_STAT_IFSPEED:
1933 		*val = 	pLayerPointers->pMdl->Speed * 1000000;
1934 		break;
1935 
1936 	case ETHER_STAT_LINK_DUPLEX:
1937 		if (pLayerPointers->pMdl->FullDuplex) {
1938 			*val = LINK_DUPLEX_FULL;
1939 		} else {
1940 			*val = LINK_DUPLEX_HALF;
1941 		}
1942 		break;
1943 
1944 	/*
1945 	 * Capabilities
1946 	 */
1947 	case ETHER_STAT_CAP_1000FDX:
1948 		*val = 0;
1949 		break;
1950 
1951 	case ETHER_STAT_CAP_1000HDX:
1952 		*val = 0;
1953 		break;
1954 
1955 	case ETHER_STAT_CAP_100FDX:
1956 		*val = 1;
1957 		break;
1958 
1959 	case ETHER_STAT_CAP_100HDX:
1960 		*val = 1;
1961 		break;
1962 
1963 	case ETHER_STAT_CAP_10FDX:
1964 		*val = 1;
1965 		break;
1966 
1967 	case ETHER_STAT_CAP_10HDX:
1968 		*val = 1;
1969 		break;
1970 
1971 	case ETHER_STAT_CAP_ASMPAUSE:
1972 		*val = 1;
1973 		break;
1974 
1975 	case ETHER_STAT_CAP_PAUSE:
1976 		*val = 1;
1977 		break;
1978 
1979 	case ETHER_STAT_CAP_AUTONEG:
1980 		*val = 1;
1981 		break;
1982 
1983 	case ETHER_STAT_ADV_CAP_1000FDX:
1984 		*val = 0;
1985 		break;
1986 
1987 	case ETHER_STAT_ADV_CAP_1000HDX:
1988 		*val = 0;
1989 		break;
1990 
1991 	case ETHER_STAT_ADV_CAP_100FDX:
1992 		*val = 1;
1993 		break;
1994 
1995 	case ETHER_STAT_ADV_CAP_100HDX:
1996 		*val = 1;
1997 		break;
1998 
1999 	case ETHER_STAT_ADV_CAP_10FDX:
2000 		*val = 1;
2001 		break;
2002 
2003 	case ETHER_STAT_ADV_CAP_10HDX:
2004 		*val = 1;
2005 		break;
2006 
2007 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
2008 		*val = 1;
2009 		break;
2010 
2011 	case ETHER_STAT_ADV_CAP_PAUSE:
2012 		*val = 1;
2013 		break;
2014 
2015 	case ETHER_STAT_ADV_CAP_AUTONEG:
2016 		*val = 1;
2017 		break;
2018 
2019 	/*
2020 	 * Rx Counters
2021 	 */
2022 	case MAC_STAT_IPACKETS:
2023 		*val = adapterStat->rx_mib_unicst_packets +
2024 		    adapterStat->rx_mib_multicst_packets +
2025 		    adapterStat->rx_mib_broadcst_packets +
2026 		    mdlReadMib(pLayerPointers, RcvUniCastPkts) +
2027 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts) +
2028 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2029 		break;
2030 
2031 	case MAC_STAT_RBYTES:
2032 		*val = adapterStat->rx_mib_bytes +
2033 		    mdlReadMib(pLayerPointers, RcvOctets);
2034 		break;
2035 
2036 	case MAC_STAT_MULTIRCV:
2037 		*val = adapterStat->rx_mib_multicst_packets +
2038 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
2039 		break;
2040 
2041 	case MAC_STAT_BRDCSTRCV:
2042 		*val = adapterStat->rx_mib_broadcst_packets +
2043 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2044 		break;
2045 
2046 	case MAC_STAT_NORCVBUF:
2047 		*val = adapterStat->rx_allocfail +
2048 		    adapterStat->rx_mib_drop_packets +
2049 		    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
2050 		break;
2051 
2052 	case MAC_STAT_IERRORS:
2053 		*val = adapterStat->rx_mib_align_err_packets +
2054 		    adapterStat->rx_mib_fcs_err_packets +
2055 		    adapterStat->rx_mib_symbol_err_packets +
2056 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors) +
2057 		    mdlReadMib(pLayerPointers, RcvFCSErrors) +
2058 		    mdlReadMib(pLayerPointers, RcvSymbolErrors);
2059 		break;
2060 
2061 	case ETHER_STAT_ALIGN_ERRORS:
2062 		*val = adapterStat->rx_mib_align_err_packets +
2063 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
2064 		break;
2065 
2066 	case ETHER_STAT_FCS_ERRORS:
2067 		*val = adapterStat->rx_mib_fcs_err_packets +
2068 		    mdlReadMib(pLayerPointers, RcvFCSErrors);
2069 		break;
2070 
2071 	/*
2072 	 * Tx Counters
2073 	 */
2074 	case MAC_STAT_OPACKETS:
2075 		*val = adapterStat->tx_mib_packets +
2076 		    mdlReadMib(pLayerPointers, XmtPackets);
2077 		break;
2078 
2079 	case MAC_STAT_OBYTES:
2080 		*val = adapterStat->tx_mib_bytes +
2081 		    mdlReadMib(pLayerPointers, XmtOctets);
2082 		break;
2083 
2084 	case MAC_STAT_MULTIXMT:
2085 		*val = adapterStat->tx_mib_multicst_packets +
2086 		    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
2087 		break;
2088 
2089 	case MAC_STAT_BRDCSTXMT:
2090 		*val = adapterStat->tx_mib_broadcst_packets +
2091 		    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
2092 		break;
2093 
2094 	case MAC_STAT_NOXMTBUF:
2095 		*val = adapterStat->tx_no_descriptor;
2096 		break;
2097 
2098 	case MAC_STAT_OERRORS:
2099 		*val = adapterStat->tx_mib_ex_coll_packets +
2100 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2101 		break;
2102 
2103 	case MAC_STAT_COLLISIONS:
2104 		*val = adapterStat->tx_mib_ex_coll_packets +
2105 		    mdlReadMib(pLayerPointers, XmtCollisions);
2106 		break;
2107 
2108 	case ETHER_STAT_FIRST_COLLISIONS:
2109 		*val = adapterStat->tx_mib_one_coll_packets +
2110 		    mdlReadMib(pLayerPointers, XmtOneCollision);
2111 		break;
2112 
2113 	case ETHER_STAT_MULTI_COLLISIONS:
2114 		*val = adapterStat->tx_mib_multi_coll_packets +
2115 		    mdlReadMib(pLayerPointers, XmtMultipleCollision);
2116 		break;
2117 
2118 	case ETHER_STAT_EX_COLLISIONS:
2119 		*val = adapterStat->tx_mib_ex_coll_packets +
2120 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2121 		break;
2122 
2123 	case ETHER_STAT_TX_LATE_COLLISIONS:
2124 		*val = adapterStat->tx_mib_late_coll_packets +
2125 		    mdlReadMib(pLayerPointers, XmtLateCollision);
2126 		break;
2127 
2128 	case ETHER_STAT_DEFER_XMTS:
2129 		*val = adapterStat->tx_mib_defer_trans_packets +
2130 		    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
2131 		break;
2132 
2133 	default:
2134 		return (ENOTSUP);
2135 	}
2136 	return (0);
2137 }
2138 
2139 /*
2140  *	Memory Read Function Used by MDL to set card registers.
2141  */
2142 unsigned char
2143 READ_REG8(struct LayerPointers *pLayerPointers, long x)
2144 {
2145 	return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x));
2146 }
2147 
2148 int
2149 READ_REG16(struct LayerPointers *pLayerPointers, long x)
2150 {
2151 	return (ddi_get16(pLayerPointers->pOdl->MemBasehandle,
2152 	    (uint16_t *)(x)));
2153 }
2154 
2155 long
2156 READ_REG32(struct LayerPointers *pLayerPointers, long x)
2157 {
2158 	return (ddi_get32(pLayerPointers->pOdl->MemBasehandle,
2159 	    (uint32_t *)(x)));
2160 }
2161 
2162 void
2163 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y)
2164 {
2165 	ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y);
2166 }
2167 
2168 void
2169 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y)
2170 {
2171 	ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y);
2172 }
2173 
2174 void
2175 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y)
2176 {
2177 	ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y);
2178 }
2179 
2180 void
2181 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y)
2182 {
2183 	int i;
2184 	for (i = 0; i < 8; i++) {
2185 		WRITE_REG8(pLayerPointers, (x + i), y[i]);
2186 	}
2187 }
2188