xref: /titanic_50/usr/src/uts/intel/io/amd8111s/amd8111s_main.c (revision 90bcde942a3919300ffc73f98ea903b58386c395)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 #pragma ident	"%Z%%M%	%I%	%E% SMI"
7 
8 /*
9  * Copyright (c) 2001-2006 Advanced Micro Devices, Inc.  All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions are met:
13  *
14  * + Redistributions of source code must retain the above copyright notice,
15  * + this list of conditions and the following disclaimer.
16  *
17  * + Redistributions in binary form must reproduce the above copyright
18  * + notice, this list of conditions and the following disclaimer in the
19  * + documentation and/or other materials provided with the distribution.
20  *
21  * + Neither the name of Advanced Micro Devices, Inc. nor the names of its
22  * + contributors may be used to endorse or promote products derived from
23  * + this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
26  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
27  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
28  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29  * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR
30  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
33  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
36  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
37  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Import/Export/Re-Export/Use/Release/Transfer Restrictions and
40  * Compliance with Applicable Laws.  Notice is hereby given that
41  * the software may be subject to restrictions on use, release,
42  * transfer, importation, exportation and/or re-exportation under
43  * the laws and regulations of the United States or other
44  * countries ("Applicable Laws"), which include but are not
45  * limited to U.S. export control laws such as the Export
46  * Administration Regulations and national security controls as
47  * defined thereunder, as well as State Department controls under
48  * the U.S. Munitions List.  Permission to use and/or
49  * redistribute the software is conditioned upon compliance with
50  * all Applicable Laws, including U.S. export control laws
51  * regarding specifically designated persons, countries and
52  * nationals of countries subject to national security controls.
53  */
54 
55 
56 #pragma ident "@(#)$RCSfile: solaris_odl.c,v $ $Revision: 1.3 $ " \
57 " $Date: 2004/04/22 15:22:54 $ AMD"
58 
59 
60 /* include files */
61 #include <sys/disp.h>
62 #include <sys/atomic.h>
63 #include <sys/vlan.h>
64 #include "amd8111s_main.h"
65 
66 /* Global macro Definations */
67 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
68 #define	INTERFACE_NAME "amd8111s"
69 #define	AMD8111S_SPLIT	128
70 #define	AMD8111S_SEND_MAX	64
71 
72 static char ident[] = "AMD8111 10/100M Ethernet 1.0";
73 
74 /*
75  * Driver Entry Points
76  */
77 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t);
78 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t);
79 
80 /*
81  * GLD Entry points prototype
82  */
83 static int amd8111s_m_unicst(void *, const uint8_t *);
84 static int amd8111s_m_promisc(void *, boolean_t);
85 static int amd8111s_m_stat(void *, uint_t, uint64_t *);
86 static void amd8111s_m_resources(void *arg);
87 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *);
88 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr);
89 static int amd8111s_m_start(void *);
90 static void amd8111s_m_stop(void *);
91 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp);
92 static uint_t amd8111s_intr(caddr_t);
93 
94 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *);
95 
96 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *);
97 static int amd8111s_odlInit(struct LayerPointers *);
98 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *);
99 static void amd8111s_free_descriptors(struct LayerPointers *);
100 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *,
101 		struct amd8111s_dma_ringbuf *, uint32_t, uint32_t);
102 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *);
103 
104 
105 static void amd8111s_log(struct LayerPointers *adapter, int level,
106     char *fmt, ...);
107 
108 static struct cb_ops amd8111s_cb_ops = {
109 	nulldev,
110 	nulldev,
111 	nodev,
112 	nodev,
113 	nodev,
114 	nodev,
115 	nodev,
116 	nodev,
117 	nodev,
118 	nodev,
119 	nodev,
120 	nochpoll,
121 	ddi_prop_op,
122 	NULL,
123 	D_NEW | D_MP,
124 	CB_REV,		/* cb_rev */
125 	nodev,		/* cb_aread */
126 	nodev		/* cb_awrite */
127 };
128 
129 static struct dev_ops amd8111s_dev_ops = {
130 	DEVO_REV,		/* devo_rev */
131 	0,			/* devo_refcnt */
132 	NULL,			/* devo_getinfo */
133 	nulldev,		/* devo_identify */
134 	nulldev,		/* devo_probe */
135 	amd8111s_attach,	/* devo_attach */
136 	amd8111s_detach,	/* devo_detach */
137 	nodev,			/* devo_reset */
138 	&amd8111s_cb_ops,	/* devo_cb_ops */
139 	NULL,			/* devo_bus_ops */
140 	nodev
141 };
142 
143 struct modldrv amd8111s_modldrv = {
144 	&mod_driverops,		/* Type of module. This one is a driver */
145 	ident,			/* short description */
146 	&amd8111s_dev_ops	/* driver specific ops */
147 };
148 
149 struct modlinkage amd8111s_modlinkage = {
150 	MODREV_1, (void *)&amd8111s_modldrv, NULL
151 };
152 
153 /*
154  * Global Variables
155  */
156 struct LayerPointers *amd8111sadapter;
157 
158 static ddi_dma_attr_t pcn_buff_dma_attr_t = {
159 	DMA_ATTR_V0,	/* dma_attr_version */
160 	(uint64_t)0,		/* dma_attr_addr_lo */
161 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
162 	(uint64_t)0xFFFFFFFF,	/* dma_attr_count_max */
163 	(uint64_t)1,		/* dma_attr_align */
164 	(uint_t)0x7F,		/* dma_attr_burstsizes */
165 	(uint32_t)1,		/* dma_attr_minxfer */
166 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
167 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
168 	(int)1,			/* dma_attr_sgllen */
169 	(uint32_t)1,		/* granularity */
170 	(uint_t)0		/* dma_attr_flags */
171 };
172 
173 static ddi_dma_attr_t pcn_desc_dma_attr_t = {
174 	DMA_ATTR_V0,		/* dma_attr_version */
175 	(uint64_t)0,		/* dma_attr_addr_lo */
176 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
177 	(uint64_t)0x7FFFFFFF,	/* dma_attr_count_max */
178 	(uint64_t)0x10,		/* dma_attr_align */
179 	(uint_t)0xFFFFFFFFU,	/* dma_attr_burstsizes */
180 	(uint32_t)1,		/* dma_attr_minxfer */
181 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
182 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
183 	(int)1,			/* dma_attr_sgllen */
184 	(uint32_t)1,		/* granularity */
185 	(uint_t)0		/* dma_attr_flags */
186 };
187 
188 /* PIO access attributes for registers */
189 static ddi_device_acc_attr_t pcn_acc_attr = {
190 	DDI_DEVICE_ATTR_V0,
191 	DDI_STRUCTURE_LE_ACC,
192 	DDI_STRICTORDER_ACC
193 };
194 
195 #define	AMD8111S_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL)
196 
197 
198 static mac_callbacks_t amd8111s_m_callbacks = {
199 	AMD8111S_M_CALLBACK_FLAGS,
200 	amd8111s_m_stat,
201 	amd8111s_m_start,
202 	amd8111s_m_stop,
203 	amd8111s_m_promisc,
204 	amd8111s_m_multicst,
205 	amd8111s_m_unicst,
206 	amd8111s_m_tx,
207 	amd8111s_m_resources,
208 	amd8111s_m_ioctl
209 };
210 
211 
212 /*
213  * Standard Driver Load Entry Point
214  * It will be called at load time of driver.
215  */
216 int
217 _init()
218 {
219 	int status;
220 	mac_init_ops(&amd8111s_dev_ops, "amd8111s");
221 
222 	status = mod_install(&amd8111s_modlinkage);
223 	if (status != DDI_SUCCESS) {
224 		mac_fini_ops(&amd8111s_dev_ops);
225 	}
226 
227 	return (status);
228 }
229 
230 /*
231  * Standard Driver Entry Point for Query.
232  * It will be called at any time to get Driver info.
233  */
234 int
235 _info(struct modinfo *modinfop)
236 {
237 	return (mod_info(&amd8111s_modlinkage, modinfop));
238 }
239 
240 /*
241  *	Standard Driver Entry Point for Unload.
242  *	It will be called at unload time of driver.
243  */
244 int
245 _fini()
246 {
247 	int status;
248 
249 	status = mod_remove(&amd8111s_modlinkage);
250 	if (status == DDI_SUCCESS) {
251 		mac_fini_ops(&amd8111s_dev_ops);
252 	}
253 
254 	return (status);
255 }
256 
257 /* Adjust Interrupt Coalescing Register to coalesce interrupts */
258 static void
259 amd8111s_m_blank(void *arg, time_t ticks, uint32_t count)
260 {
261 	_NOTE(ARGUNUSED(arg, ticks, count));
262 }
263 
264 static void
265 amd8111s_m_resources(void *arg)
266 {
267 	struct LayerPointers *adapter = arg;
268 	mac_rx_fifo_t mrf;
269 
270 	mrf.mrf_type = MAC_RX_FIFO;
271 	mrf.mrf_blank = amd8111s_m_blank;
272 	mrf.mrf_arg = (void *)adapter;
273 	mrf.mrf_normal_blank_time = 128;
274 	mrf.mrf_normal_pkt_count = 8;
275 
276 	adapter->pOdl->mrh = mac_resource_add(adapter->pOdl->mh,
277 	    (mac_resource_t *)&mrf);
278 }
279 
280 /*
281  * Loopback Support
282  */
283 static lb_property_t loopmodes[] = {
284 	{ normal,	"normal",	AMD8111S_LB_NONE		},
285 	{ external,	"100Mbps",	AMD8111S_LB_EXTERNAL_100	},
286 	{ external,	"10Mbps",	AMD8111S_LB_EXTERNAL_10		},
287 	{ internal,	"MAC",		AMD8111S_LB_INTERNAL_MAC	}
288 };
289 
290 static void
291 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode)
292 {
293 
294 	/*
295 	 * If the mode isn't being changed, there's nothing to do ...
296 	 */
297 	if (mode == adapter->pOdl->loopback_mode)
298 		return;
299 
300 	/*
301 	 * Validate the requested mode and prepare a suitable message
302 	 * to explain the link down/up cycle that the change will
303 	 * probably induce ...
304 	 */
305 	switch (mode) {
306 	default:
307 		return;
308 
309 	case AMD8111S_LB_NONE:
310 		mdlStopChip(adapter);
311 		if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) {
312 			cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB");
313 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
314 			    INLOOP);
315 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
316 			    FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
317 		} else {
318 			cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB");
319 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
320 			    EXLOOP);
321 		}
322 
323 		amd8111s_reset(adapter);
324 		adapter->pOdl->LinkStatus = LINK_STATE_DOWN;
325 		adapter->pOdl->rx_fcs_stripped = B_FALSE;
326 		mdlStartChip(adapter);
327 		break;
328 
329 	case AMD8111S_LB_EXTERNAL_100:
330 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100");
331 		mdlStopChip(adapter);
332 		amd8111s_reset(adapter);
333 		SetIntrCoalesc(adapter, B_FALSE);
334 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100);
335 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
336 		    VAL0 | EXLOOP);
337 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
338 		adapter->pMdl->Speed = 100;
339 		adapter->pMdl->FullDuplex = B_TRUE;
340 		/* Tell GLD the state of the physical link. */
341 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
342 
343 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
344 
345 		mdlStartChip(adapter);
346 		break;
347 
348 	case AMD8111S_LB_EXTERNAL_10:
349 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10");
350 		mdlStopChip(adapter);
351 		amd8111s_reset(adapter);
352 		SetIntrCoalesc(adapter, B_FALSE);
353 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10);
354 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
355 		    VAL0 | EXLOOP);
356 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
357 		adapter->pMdl->Speed = 10;
358 		adapter->pMdl->FullDuplex = B_TRUE;
359 		/* Tell GLD the state of the physical link. */
360 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
361 
362 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
363 
364 		mdlStartChip(adapter);
365 		break;
366 
367 	case AMD8111S_LB_INTERNAL_MAC:
368 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC");
369 		mdlStopChip(adapter);
370 		amd8111s_reset(adapter);
371 		SetIntrCoalesc(adapter, B_FALSE);
372 		/* Disable Port Manager */
373 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
374 		    EN_PMGR);
375 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
376 		    VAL0 | INLOOP);
377 
378 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
379 		    VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
380 
381 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
382 		adapter->pMdl->FullDuplex = B_TRUE;
383 		/* Tell GLD the state of the physical link. */
384 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
385 
386 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
387 
388 		mdlStartChip(adapter);
389 		break;
390 	}
391 
392 	/*
393 	 * All OK; tell the caller to reprogram
394 	 * the PHY and/or MAC for the new mode ...
395 	 */
396 	adapter->pOdl->loopback_mode = mode;
397 }
398 
399 static enum ioc_reply
400 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp,
401     mblk_t *mp)
402 {
403 	lb_info_sz_t *lbsp;
404 	lb_property_t *lbpp;
405 	uint32_t *lbmp;
406 	int cmd;
407 
408 	/*
409 	 * Validate format of ioctl
410 	 */
411 	if (mp->b_cont == NULL)
412 		return (IOC_INVAL);
413 
414 	cmd = iocp->ioc_cmd;
415 	switch (cmd) {
416 	default:
417 		/* NOTREACHED */
418 		amd8111s_log(adapter, CE_NOTE,
419 		    "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd);
420 		return (IOC_INVAL);
421 
422 	case LB_GET_INFO_SIZE:
423 		if (iocp->ioc_count != sizeof (lb_info_sz_t)) {
424 			amd8111s_log(adapter, CE_NOTE,
425 			    "wrong LB_GET_INFO_SIZE size");
426 			return (IOC_INVAL);
427 		}
428 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
429 		*lbsp = sizeof (loopmodes);
430 		break;
431 
432 	case LB_GET_INFO:
433 		if (iocp->ioc_count != sizeof (loopmodes)) {
434 			amd8111s_log(adapter, CE_NOTE,
435 			    "Wrong LB_GET_INFO size");
436 			return (IOC_INVAL);
437 		}
438 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
439 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
440 		break;
441 
442 	case LB_GET_MODE:
443 		if (iocp->ioc_count != sizeof (uint32_t)) {
444 			amd8111s_log(adapter, CE_NOTE,
445 			    "Wrong LB_GET_MODE size");
446 			return (IOC_INVAL);
447 		}
448 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
449 		*lbmp = adapter->pOdl->loopback_mode;
450 		break;
451 
452 	case LB_SET_MODE:
453 		if (iocp->ioc_count != sizeof (uint32_t)) {
454 			amd8111s_log(adapter, CE_NOTE,
455 			    "Wrong LB_SET_MODE size");
456 			return (IOC_INVAL);
457 		}
458 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
459 		amd8111s_set_loop_mode(adapter, *lbmp);
460 		break;
461 	}
462 	return (IOC_REPLY);
463 }
464 
465 static void
466 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
467 {
468 	struct iocblk *iocp;
469 	struct LayerPointers *adapter;
470 	enum ioc_reply status;
471 
472 	iocp = (struct iocblk *)mp->b_rptr;
473 	iocp->ioc_error = 0;
474 	adapter = (struct LayerPointers *)arg;
475 
476 	ASSERT(adapter);
477 	if (adapter == NULL) {
478 		miocnak(q, mp, 0, EINVAL);
479 		return;
480 	}
481 
482 	switch (iocp->ioc_cmd) {
483 
484 	case LB_GET_INFO_SIZE:
485 	case LB_GET_INFO:
486 	case LB_GET_MODE:
487 	case LB_SET_MODE:
488 		status = amd8111s_loopback_ioctl(adapter, iocp, mp);
489 		break;
490 
491 	default:
492 		status = IOC_INVAL;
493 		break;
494 	}
495 
496 	/*
497 	 * Decide how to reply
498 	 */
499 	switch (status) {
500 	default:
501 	case IOC_INVAL:
502 		/*
503 		 * Error, reply with a NAK and EINVAL or the specified error
504 		 */
505 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
506 		    EINVAL : iocp->ioc_error);
507 		break;
508 
509 	case IOC_DONE:
510 		/*
511 		 * OK, reply already sent
512 		 */
513 		break;
514 
515 	case IOC_ACK:
516 		/*
517 		 * OK, reply with an ACK
518 		 */
519 		miocack(q, mp, 0, 0);
520 		break;
521 
522 	case IOC_REPLY:
523 		/*
524 		 * OK, send prepared reply as ACK or NAK
525 		 */
526 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
527 		    M_IOCACK : M_IOCNAK;
528 		qreply(q, mp);
529 		break;
530 	}
531 }
532 
533 /*
534  * Copy one packet from dma memory to mblk. Inc dma descriptor pointer.
535  */
536 static boolean_t
537 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp)
538 {
539 	int length = 0;
540 	mblk_t *mp;
541 	struct rx_desc *descriptor;
542 	struct odl *pOdl = pLayerPointers->pOdl;
543 	struct amd8111s_statistics *statistics = &pOdl->statistics;
544 	struct nonphysical *pNonphysical = pLayerPointers->pMil
545 	    ->pNonphysical;
546 
547 	mutex_enter(&pOdl->mdlRcvLock);
548 	descriptor = pNonphysical->RxBufDescQRead->descriptor;
549 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle,
550 	    pNonphysical->RxBufDescQRead->descriptor -
551 	    pNonphysical->RxBufDescQStart->descriptor,
552 	    sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU);
553 	if ((descriptor->Rx_OWN) == 0) {
554 	/*
555 	 * If the frame is received with errors, then set MCNT
556 	 * of that pkt in ReceiveArray to 0. This packet would
557 	 * be discarded later and not indicated to OS.
558 	 */
559 		if (descriptor->Rx_ERR) {
560 			statistics->rx_desc_err ++;
561 			descriptor->Rx_ERR = 0;
562 			if (descriptor->Rx_FRAM == 1) {
563 				statistics->rx_desc_err_FRAM ++;
564 				descriptor->Rx_FRAM = 0;
565 			}
566 			if (descriptor->Rx_OFLO == 1) {
567 				statistics->rx_desc_err_OFLO ++;
568 				descriptor->Rx_OFLO = 0;
569 				pOdl->rx_overflow_counter ++;
570 				if ((pOdl->rx_overflow_counter > 5) &&
571 				    (pOdl->pause_interval == 0)) {
572 					statistics->rx_double_overflow ++;
573 					mdlSendPause(pLayerPointers);
574 					pOdl->rx_overflow_counter = 0;
575 					pOdl->pause_interval = 25;
576 				}
577 			}
578 			if (descriptor->Rx_CRC == 1) {
579 				statistics->rx_desc_err_CRC ++;
580 				descriptor->Rx_CRC = 0;
581 			}
582 			if (descriptor->Rx_BUFF == 1) {
583 				statistics->rx_desc_err_BUFF ++;
584 				descriptor->Rx_BUFF = 0;
585 			}
586 			goto Next_Descriptor;
587 		}
588 
589 		/* Length of incoming packet */
590 		if (pOdl->rx_fcs_stripped) {
591 			length = descriptor->Rx_MCNT -4;
592 		} else {
593 			length = descriptor->Rx_MCNT;
594 		}
595 		if (length < 62) {
596 			statistics->rx_error_zerosize ++;
597 		}
598 
599 		if ((mp = allocb(length, BPRI_MED)) == NULL) {
600 			statistics->rx_allocfail ++;
601 			goto failed;
602 		}
603 		/* Copy from virtual address of incoming packet */
604 		bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap),
605 		    mp->b_rptr, length);
606 		mp->b_wptr = mp->b_rptr + length;
607 		statistics->rx_ok_packets ++;
608 		if (*last_mp == NULL) {
609 			*last_mp = mp;
610 		} else {
611 			(*last_mp)->b_next = mp;
612 			*last_mp = mp;
613 		}
614 
615 Next_Descriptor:
616 		descriptor->Rx_MCNT = 0;
617 		descriptor->Rx_SOP = 0;
618 		descriptor->Rx_EOP = 0;
619 		descriptor->Rx_PAM = 0;
620 		descriptor->Rx_BAM = 0;
621 		descriptor->TT = 0;
622 		descriptor->Rx_OWN = 1;
623 		pNonphysical->RxBufDescQRead->descriptor++;
624 		pNonphysical->RxBufDescQRead->USpaceMap++;
625 		if (pNonphysical->RxBufDescQRead->descriptor >
626 		    pNonphysical->RxBufDescQEnd->descriptor) {
627 			pNonphysical->RxBufDescQRead->descriptor =
628 			    pNonphysical->RxBufDescQStart->descriptor;
629 			pNonphysical->RxBufDescQRead->USpaceMap =
630 			    pNonphysical->RxBufDescQStart->USpaceMap;
631 		}
632 		mutex_exit(&pOdl->mdlRcvLock);
633 
634 		return (B_TRUE);
635 	}
636 
637 failed:
638 	mutex_exit(&pOdl->mdlRcvLock);
639 	return (B_FALSE);
640 }
641 
642 /*
643  * Get the received packets from NIC card and send them to GLD.
644  */
645 static void
646 amd8111s_receive(struct LayerPointers *pLayerPointers)
647 {
648 	int numOfPkts = 0;
649 	struct odl *pOdl;
650 	mblk_t *ret_mp = NULL, *last_mp = NULL;
651 
652 	pOdl = pLayerPointers->pOdl;
653 
654 	rw_enter(&pOdl->chip_lock, RW_READER);
655 	if (!pLayerPointers->run) {
656 		rw_exit(&pOdl->chip_lock);
657 		return;
658 	}
659 
660 	if (pOdl->pause_interval > 0)
661 		pOdl->pause_interval --;
662 
663 	while (numOfPkts < RX_RING_SIZE) {
664 
665 		if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) {
666 			break;
667 		}
668 		if (ret_mp == NULL)
669 			ret_mp = last_mp;
670 		numOfPkts++;
671 	}
672 
673 	if (ret_mp) {
674 		mac_rx(pOdl->mh, pOdl->mrh, ret_mp);
675 	}
676 
677 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0,
678 	    DDI_DMA_SYNC_FORDEV);
679 
680 	mdlReceive(pLayerPointers);
681 
682 	rw_exit(&pOdl->chip_lock);
683 
684 }
685 
686 /*
687  * Print message in release-version driver.
688  */
689 static void
690 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...)
691 {
692 	auto char name[32];
693 	auto char buf[256];
694 	va_list ap;
695 
696 	if (adapter != NULL) {
697 		(void) sprintf(name, "amd8111s%d",
698 		    ddi_get_instance(adapter->pOdl->devinfo));
699 	} else {
700 		(void) sprintf(name, "amd8111s");
701 	}
702 	va_start(ap, fmt);
703 	(void) vsprintf(buf, fmt, ap);
704 	va_end(ap);
705 	cmn_err(level, "%s: %s", name, buf);
706 }
707 
708 /*
709  * To allocate & initilize all resources.
710  * Called by amd8111s_attach().
711  */
712 static int
713 amd8111s_odlInit(struct LayerPointers *pLayerPointers)
714 {
715 	unsigned long mem_req_array[MEM_REQ_MAX];
716 	unsigned long mem_set_array[MEM_REQ_MAX];
717 	unsigned long *pmem_req_array;
718 	unsigned long *pmem_set_array;
719 	int i, size;
720 
721 	for (i = 0; i < MEM_REQ_MAX; i++) {
722 		mem_req_array[i] = 0;
723 		mem_set_array[i] = 0;
724 	}
725 
726 	milRequestResources(mem_req_array);
727 
728 	pmem_req_array = mem_req_array;
729 	pmem_set_array = mem_set_array;
730 	while (*pmem_req_array) {
731 		switch (*pmem_req_array) {
732 		case VIRTUAL:
733 			*pmem_set_array = VIRTUAL;
734 			pmem_req_array++;
735 			pmem_set_array++;
736 			*(pmem_set_array) = *(pmem_req_array);
737 			pmem_set_array++;
738 			*(pmem_set_array) = (unsigned long) kmem_zalloc(
739 			    *(pmem_req_array), KM_NOSLEEP);
740 			if (*pmem_set_array == NULL)
741 				goto odl_init_failure;
742 			break;
743 		}
744 		pmem_req_array++;
745 		pmem_set_array++;
746 	}
747 
748 	/*
749 	 * Initilize memory on lower layers
750 	 */
751 	milSetResources(pLayerPointers, mem_set_array);
752 
753 	/* Allocate Rx/Tx descriptors */
754 	if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) {
755 		*pmem_set_array = NULL;
756 		goto odl_init_failure;
757 	}
758 
759 	/*
760 	 * Allocate Rx buffer for each Rx descriptor. Then call mil layer
761 	 * routine to fill physical address of Rx buffer into Rx descriptor.
762 	 */
763 	if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) {
764 		amd8111s_free_descriptors(pLayerPointers);
765 		*pmem_set_array = NULL;
766 		goto odl_init_failure;
767 	}
768 	milInitGlbds(pLayerPointers);
769 
770 	return (0);
771 
772 odl_init_failure:
773 	/*
774 	 * Free All memory allocated so far
775 	 */
776 	pmem_req_array = mem_set_array;
777 	while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) {
778 		switch (*pmem_req_array) {
779 		case VIRTUAL:
780 			pmem_req_array++;	/* Size */
781 			size = *(pmem_req_array);
782 			pmem_req_array++;	/* Virtual Address */
783 			if (pmem_req_array == NULL)
784 				return (1);
785 			kmem_free((int *)*pmem_req_array, size);
786 			break;
787 		}
788 		pmem_req_array++;
789 	}
790 	return (1);
791 }
792 
793 /*
794  * Allocate and initialize Tx/Rx descriptors
795  */
796 static boolean_t
797 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers)
798 {
799 	struct odl *pOdl = pLayerPointers->pOdl;
800 	struct mil *pMil = pLayerPointers->pMil;
801 	dev_info_t *devinfo = pOdl->devinfo;
802 	uint_t length, count, i;
803 	size_t real_length;
804 
805 	/*
806 	 * Allocate Rx descriptors
807 	 */
808 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
809 	    NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) {
810 		amd8111s_log(pLayerPointers, CE_WARN,
811 		    "ddi_dma_alloc_handle for Rx desc failed");
812 		pOdl->rx_desc_dma_handle = NULL;
813 		return (B_FALSE);
814 	}
815 
816 	length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT;
817 	if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length,
818 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
819 	    NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length,
820 	    &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) {
821 
822 		amd8111s_log(pLayerPointers, CE_WARN,
823 		    "ddi_dma_mem_handle for Rx desc failed");
824 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
825 		pOdl->rx_desc_dma_handle = NULL;
826 		return (B_FALSE);
827 	}
828 
829 	if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle,
830 	    NULL, (caddr_t)pMil->Rx_desc_original, real_length,
831 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
832 	    NULL, &pOdl->rx_desc_dma_cookie,
833 	    &count) != DDI_SUCCESS) {
834 
835 		amd8111s_log(pLayerPointers, CE_WARN,
836 		    "ddi_dma_addr_bind_handle for Rx desc failed");
837 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
838 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
839 		pOdl->rx_desc_dma_handle = NULL;
840 		return (B_FALSE);
841 	}
842 	ASSERT(count == 1);
843 
844 	/* Initialize Rx descriptors related variables */
845 	pMil->Rx_desc = (struct rx_desc *)
846 	    ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT);
847 	pMil->Rx_desc_pa = (unsigned int)
848 	    ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT);
849 
850 	pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa;
851 
852 
853 	/*
854 	 * Allocate Tx descriptors
855 	 */
856 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
857 	    NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) {
858 		amd8111s_log(pLayerPointers, CE_WARN,
859 		    "ddi_dma_alloc_handle for Tx desc failed");
860 		goto allocate_desc_fail;
861 	}
862 
863 	length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT;
864 	if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length,
865 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
866 	    NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length,
867 	    &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) {
868 
869 		amd8111s_log(pLayerPointers, CE_WARN,
870 		    "ddi_dma_mem_handle for Tx desc failed");
871 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
872 		goto allocate_desc_fail;
873 	}
874 
875 	if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle,
876 	    NULL, (caddr_t)pMil->Tx_desc_original, real_length,
877 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
878 	    NULL, &pOdl->tx_desc_dma_cookie,
879 	    &count) != DDI_SUCCESS) {
880 
881 		amd8111s_log(pLayerPointers, CE_WARN,
882 		    "ddi_dma_addr_bind_handle for Tx desc failed");
883 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
884 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
885 		goto allocate_desc_fail;
886 	}
887 	ASSERT(count == 1);
888 	/* Set the DMA area to all zeros */
889 	bzero((caddr_t)pMil->Tx_desc_original, length);
890 
891 	/* Initialize Tx descriptors related variables */
892 	pMil->Tx_desc = (struct tx_desc *)
893 	    ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT);
894 	pMil->pNonphysical->TxDescQRead = pMil->Tx_desc;
895 	pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc;
896 	pMil->pNonphysical->TxDescQStart = pMil->Tx_desc;
897 	pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]);
898 
899 	/* Physical Addr of Tx_desc_original & Tx_desc */
900 	pLayerPointers->pMil->Tx_desc_pa =
901 	    ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) &
902 	    ~ALIGNMENT);
903 
904 	/* Setting the reserved bits in the tx descriptors */
905 	for (i = 0; i < TX_RING_SIZE; i++) {
906 		pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f;
907 		pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0;
908 		pMil->pNonphysical->TxDescQWrite++;
909 	}
910 	pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart;
911 
912 	pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa;
913 
914 	return (B_TRUE);
915 
916 allocate_desc_fail:
917 	pOdl->tx_desc_dma_handle = NULL;
918 	(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
919 	ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
920 	ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
921 	pOdl->rx_desc_dma_handle = NULL;
922 	return (B_FALSE);
923 }
924 
925 /*
926  * Free Tx/Rx descriptors
927  */
928 static void
929 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers)
930 {
931 	struct odl *pOdl = pLayerPointers->pOdl;
932 
933 	/* Free Rx descriptors */
934 	if (pOdl->rx_desc_dma_handle) {
935 		(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
936 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
937 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
938 		pOdl->rx_desc_dma_handle = NULL;
939 	}
940 
941 	/* Free Rx descriptors */
942 	if (pOdl->tx_desc_dma_handle) {
943 		(void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle);
944 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
945 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
946 		pOdl->tx_desc_dma_handle = NULL;
947 	}
948 }
949 
950 /*
951  * Allocate Tx/Rx Ring buffer
952  */
953 static boolean_t
954 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers,
955 			struct amd8111s_dma_ringbuf *pRing,
956 			uint32_t ring_size, uint32_t msg_size)
957 {
958 	uint32_t idx, msg_idx = 0, msg_acc;
959 	dev_info_t *devinfo = pLayerPointers->pOdl->devinfo;
960 	size_t real_length;
961 	uint_t count = 0;
962 
963 	ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1);
964 	pRing->dma_buf_sz = msg_size;
965 	pRing->ring_size = ring_size;
966 	pRing->trunk_num = AMD8111S_SPLIT;
967 	pRing->buf_sz = msg_size * ring_size;
968 	if (ring_size < pRing->trunk_num)
969 		pRing->trunk_num = ring_size;
970 	ASSERT((pRing->buf_sz % pRing->trunk_num) == 0);
971 
972 	pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num;
973 	ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0);
974 
975 	pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) *
976 	    ring_size, KM_NOSLEEP);
977 	pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
978 	    pRing->trunk_num, KM_NOSLEEP);
979 	pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
980 	    pRing->trunk_num, KM_NOSLEEP);
981 	pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) *
982 	    pRing->trunk_num, KM_NOSLEEP);
983 	pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) *
984 	    pRing->trunk_num, KM_NOSLEEP);
985 	if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL ||
986 	    pRing->acc_hdl == NULL || pRing->trunk_addr == NULL ||
987 	    pRing->dma_cookie == NULL) {
988 		amd8111s_log(pLayerPointers, CE_NOTE,
989 		    "kmem_zalloc failed");
990 		goto failed;
991 	}
992 
993 	for (idx = 0; idx < pRing->trunk_num; ++idx) {
994 		if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t,
995 		    DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx]))
996 		    != DDI_SUCCESS) {
997 
998 			amd8111s_log(pLayerPointers, CE_WARN,
999 			    "ddi_dma_alloc_handle failed");
1000 			goto failed;
1001 		} else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx],
1002 		    pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING,
1003 		    DDI_DMA_SLEEP, NULL,
1004 		    (caddr_t *)&(pRing->trunk_addr[idx]),
1005 		    (size_t *)(&real_length), &pRing->acc_hdl[idx])
1006 		    != DDI_SUCCESS) {
1007 
1008 			amd8111s_log(pLayerPointers, CE_WARN,
1009 			    "ddi_dma_mem_alloc failed");
1010 			goto failed;
1011 		} else if (real_length != pRing->trunk_sz) {
1012 			amd8111s_log(pLayerPointers, CE_WARN,
1013 			    "ddi_dma_mem_alloc failed");
1014 			goto failed;
1015 		} else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx],
1016 		    NULL, (caddr_t)pRing->trunk_addr[idx], real_length,
1017 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
1018 		    &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) {
1019 
1020 			amd8111s_log(pLayerPointers, CE_WARN,
1021 			    "ddi_dma_addr_bind_handle failed");
1022 			goto failed;
1023 		} else {
1024 			for (msg_acc = 0;
1025 			    msg_acc < pRing->trunk_sz / pRing->dma_buf_sz;
1026 			    ++ msg_acc) {
1027 				pRing->msg_buf[msg_idx].offset =
1028 				    msg_acc * pRing->dma_buf_sz;
1029 				pRing->msg_buf[msg_idx].vir_addr =
1030 				    pRing->trunk_addr[idx] +
1031 				    pRing->msg_buf[msg_idx].offset;
1032 				pRing->msg_buf[msg_idx].phy_addr =
1033 				    pRing->dma_cookie[idx].dmac_laddress +
1034 				    pRing->msg_buf[msg_idx].offset;
1035 				pRing->msg_buf[msg_idx].p_hdl =
1036 				    pRing->dma_hdl[idx];
1037 				msg_idx ++;
1038 			}
1039 		}
1040 	}
1041 
1042 	pRing->free = pRing->msg_buf;
1043 	pRing->next = pRing->msg_buf;
1044 	pRing->curr = pRing->msg_buf;
1045 
1046 	return (B_TRUE);
1047 failed:
1048 	amd8111s_free_dma_ringbuf(pRing);
1049 	return (B_FALSE);
1050 }
1051 
1052 /*
1053  * Free Tx/Rx ring buffer
1054  */
1055 static void
1056 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing)
1057 {
1058 	int idx;
1059 
1060 	if (pRing->dma_cookie != NULL) {
1061 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1062 			if (pRing->dma_cookie[idx].dmac_laddress == 0) {
1063 				break;
1064 			}
1065 			(void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]);
1066 		}
1067 		kmem_free(pRing->dma_cookie,
1068 		    sizeof (ddi_dma_cookie_t) * pRing->trunk_num);
1069 	}
1070 
1071 	if (pRing->acc_hdl != NULL) {
1072 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1073 			if (pRing->acc_hdl[idx] == NULL)
1074 				break;
1075 			ddi_dma_mem_free(&pRing->acc_hdl[idx]);
1076 		}
1077 		kmem_free(pRing->acc_hdl,
1078 		    sizeof (ddi_acc_handle_t) * pRing->trunk_num);
1079 	}
1080 
1081 	if (pRing->dma_hdl != NULL) {
1082 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1083 			if (pRing->dma_hdl[idx] == 0) {
1084 				break;
1085 			}
1086 			ddi_dma_free_handle(&pRing->dma_hdl[idx]);
1087 		}
1088 		kmem_free(pRing->dma_hdl,
1089 		    sizeof (ddi_dma_handle_t) * pRing->trunk_num);
1090 	}
1091 
1092 	if (pRing->msg_buf != NULL) {
1093 		kmem_free(pRing->msg_buf,
1094 		    sizeof (struct amd8111s_msgbuf) * pRing->ring_size);
1095 	}
1096 
1097 	if (pRing->trunk_addr != NULL) {
1098 		kmem_free(pRing->trunk_addr,
1099 		    sizeof (caddr_t) * pRing->trunk_num);
1100 	}
1101 
1102 	bzero(pRing, sizeof (*pRing));
1103 }
1104 
1105 
1106 /*
1107  * Allocate all Tx buffer.
1108  * Allocate a Rx buffer for each Rx descriptor. Then
1109  * call mil routine to fill physical address of Rx
1110  * buffer into Rx descriptors
1111  */
1112 static boolean_t
1113 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers)
1114 {
1115 	struct odl *pOdl = pLayerPointers->pOdl;
1116 
1117 	/*
1118 	 * Allocate rx Buffers
1119 	 */
1120 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf,
1121 	    RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) {
1122 		amd8111s_log(pLayerPointers, CE_WARN,
1123 		    "amd8111s_alloc_dma_ringbuf for tx failed");
1124 		goto allocate_buf_fail;
1125 	}
1126 
1127 	/*
1128 	 * Allocate Tx buffers
1129 	 */
1130 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf,
1131 	    TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) {
1132 		amd8111s_log(pLayerPointers, CE_WARN,
1133 		    "amd8111s_alloc_dma_ringbuf for tx failed");
1134 		goto allocate_buf_fail;
1135 	}
1136 
1137 	/*
1138 	 * Initilize the mil Queues
1139 	 */
1140 	milInitGlbds(pLayerPointers);
1141 
1142 	milInitRxQ(pLayerPointers);
1143 
1144 	return (B_TRUE);
1145 
1146 allocate_buf_fail:
1147 
1148 	amd8111s_log(pLayerPointers, CE_WARN,
1149 	    "amd8111s_allocate_buffers failed");
1150 	return (B_FALSE);
1151 }
1152 
1153 /*
1154  * Free all Rx/Tx buffer
1155  */
1156 
1157 static void
1158 amd8111s_free_buffers(struct LayerPointers *pLayerPointers)
1159 {
1160 	/* Free Tx buffers */
1161 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf);
1162 
1163 	/* Free Rx Buffers */
1164 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf);
1165 }
1166 
1167 /*
1168  * Try to recycle all the descriptors and Tx buffers
1169  * which are already freed by hardware.
1170  */
1171 static int
1172 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers)
1173 {
1174 	struct nonphysical *pNonphysical;
1175 	uint32_t count = 0;
1176 
1177 	pNonphysical = pLayerPointers->pMil->pNonphysical;
1178 	while (pNonphysical->TxDescQRead->Tx_OWN == 0 &&
1179 	    pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) {
1180 		pLayerPointers->pOdl->tx_buf.free =
1181 		    NEXT(pLayerPointers->pOdl->tx_buf, free);
1182 		pNonphysical->TxDescQRead++;
1183 		if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) {
1184 			pNonphysical->TxDescQRead = pNonphysical->TxDescQStart;
1185 		}
1186 		count ++;
1187 	}
1188 
1189 	if (pLayerPointers->pMil->tx_reschedule)
1190 		ddi_trigger_softintr(pLayerPointers->pOdl->drain_id);
1191 
1192 	return (count);
1193 }
1194 
1195 /*
1196  * Get packets in the Tx buffer, then copy them to the send buffer.
1197  * Trigger hardware to send out packets.
1198  */
1199 static void
1200 amd8111s_send_serial(struct LayerPointers *pLayerPointers)
1201 {
1202 	struct nonphysical *pNonphysical;
1203 	uint32_t count;
1204 
1205 	pNonphysical = pLayerPointers->pMil->pNonphysical;
1206 
1207 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1208 
1209 	for (count = 0; count < AMD8111S_SEND_MAX; count ++) {
1210 		if (pLayerPointers->pOdl->tx_buf.curr ==
1211 		    pLayerPointers->pOdl->tx_buf.next) {
1212 			break;
1213 		}
1214 		/* to verify if it needs to recycle the tx Buf */
1215 		if (((pNonphysical->TxDescQWrite + 1 >
1216 		    pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart :
1217 		    (pNonphysical->TxDescQWrite + 1)) ==
1218 		    pNonphysical->TxDescQRead)
1219 			if (amd8111s_recycle_tx(pLayerPointers) == 0) {
1220 				pLayerPointers->pOdl
1221 				    ->statistics.tx_no_descriptor ++;
1222 				break;
1223 			}
1224 
1225 		/* Fill packet length */
1226 		pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers
1227 		    ->pOdl->tx_buf.curr->msg_size;
1228 
1229 		/* Fill physical buffer address */
1230 		pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int)
1231 		    pLayerPointers->pOdl->tx_buf.curr->phy_addr;
1232 
1233 		pNonphysical->TxDescQWrite->Tx_SOP = 1;
1234 		pNonphysical->TxDescQWrite->Tx_EOP = 1;
1235 		pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1;
1236 		pNonphysical->TxDescQWrite->Tx_LTINT = 1;
1237 		pNonphysical->TxDescQWrite->Tx_USPACE = 0;
1238 		pNonphysical->TxDescQWrite->Tx_OWN = 1;
1239 
1240 		pNonphysical->TxDescQWrite++;
1241 		if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) {
1242 			pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart;
1243 		}
1244 
1245 		pLayerPointers->pOdl->tx_buf.curr =
1246 		    NEXT(pLayerPointers->pOdl->tx_buf, curr);
1247 
1248 	}
1249 
1250 	pLayerPointers->pOdl->statistics.tx_ok_packets += count;
1251 
1252 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1253 
1254 	/* Call mdlTransmit to send the pkt out on the network */
1255 	mdlTransmit(pLayerPointers);
1256 
1257 }
1258 
1259 /*
1260  * Softintr entrance. try to send out packets in the Tx buffer.
1261  * If reschedule is True, call mac_tx_update to re-enable the
1262  * transmit
1263  */
1264 static uint_t
1265 amd8111s_send_drain(caddr_t arg)
1266 {
1267 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1268 
1269 	amd8111s_send_serial(pLayerPointers);
1270 
1271 	if (pLayerPointers->pMil->tx_reschedule &&
1272 	    NEXT(pLayerPointers->pOdl->tx_buf, next) !=
1273 	    pLayerPointers->pOdl->tx_buf.free) {
1274 		mac_tx_update(pLayerPointers->pOdl->mh);
1275 		pLayerPointers->pMil->tx_reschedule = B_FALSE;
1276 	}
1277 
1278 	return (DDI_INTR_CLAIMED);
1279 }
1280 
1281 /*
1282  * Get a Tx buffer
1283  */
1284 static struct amd8111s_msgbuf *
1285 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers)
1286 {
1287 	struct amd8111s_msgbuf *tmp, *next;
1288 
1289 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1290 	next = NEXT(pLayerPointers->pOdl->tx_buf, next);
1291 	if (next == pLayerPointers->pOdl->tx_buf.free) {
1292 		tmp = NULL;
1293 	} else {
1294 		tmp = pLayerPointers->pOdl->tx_buf.next;
1295 		pLayerPointers->pOdl->tx_buf.next = next;
1296 	}
1297 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1298 
1299 	return (tmp);
1300 }
1301 
1302 static boolean_t
1303 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp)
1304 {
1305 	struct odl *pOdl;
1306 	size_t frag_len;
1307 	mblk_t *tmp;
1308 	struct amd8111s_msgbuf *txBuf;
1309 	uint8_t *pMsg;
1310 
1311 	pOdl = pLayerPointers->pOdl;
1312 
1313 	/* alloc send buffer */
1314 	txBuf = amd8111s_getTxbuf(pLayerPointers);
1315 	if (txBuf == NULL) {
1316 		pOdl->statistics.tx_no_buffer ++;
1317 		pLayerPointers->pMil->tx_reschedule = B_TRUE;
1318 		amd8111s_send_serial(pLayerPointers);
1319 		return (B_FALSE);
1320 	}
1321 
1322 	/* copy packet to send buffer */
1323 	txBuf->msg_size = 0;
1324 	pMsg = (uint8_t *)txBuf->vir_addr;
1325 	for (tmp = mp; tmp; tmp = tmp->b_cont) {
1326 		frag_len = MBLKL(tmp);
1327 		bcopy(tmp->b_rptr, pMsg, frag_len);
1328 		txBuf->msg_size += frag_len;
1329 		pMsg += frag_len;
1330 	}
1331 	freemsg(mp);
1332 
1333 	amd8111s_send_serial(pLayerPointers);
1334 
1335 	return (B_TRUE);
1336 }
1337 
1338 /*
1339  * (GLD Entry Point) Send the message block to lower layer
1340  */
1341 static mblk_t *
1342 amd8111s_m_tx(void *arg, mblk_t *mp)
1343 {
1344 	struct LayerPointers *pLayerPointers = arg;
1345 	mblk_t *next;
1346 
1347 	rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER);
1348 	if (!pLayerPointers->run) {
1349 		pLayerPointers->pOdl->statistics.tx_afterunplumb ++;
1350 		freemsgchain(mp);
1351 		mp = NULL;
1352 	}
1353 
1354 	while (mp != NULL) {
1355 		next = mp->b_next;
1356 		mp->b_next = NULL;
1357 		if (!amd8111s_send(pLayerPointers, mp)) {
1358 			/* Send fail */
1359 			mp->b_next = next;
1360 			break;
1361 		}
1362 		mp = next;
1363 	}
1364 
1365 	rw_exit(&pLayerPointers->pOdl->chip_lock);
1366 	return (mp);
1367 }
1368 
1369 /*
1370  * (GLD Entry Point) Interrupt Service Routine
1371  */
1372 static uint_t
1373 amd8111s_intr(caddr_t arg)
1374 {
1375 	unsigned int intrCauses;
1376 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1377 
1378 	/* Read the interrupt status from mdl */
1379 	intrCauses = mdlReadInterrupt(pLayerPointers);
1380 
1381 	if (intrCauses == 0) {
1382 		pLayerPointers->pOdl->statistics.intr_OTHER ++;
1383 		return (DDI_INTR_UNCLAIMED);
1384 	}
1385 
1386 	if (intrCauses & LCINT) {
1387 		if (mdlReadLink(pLayerPointers) == LINK_UP) {
1388 			mdlGetActiveMediaInfo(pLayerPointers);
1389 			/* Link status changed */
1390 			if (pLayerPointers->pOdl->LinkStatus !=
1391 			    LINK_STATE_UP) {
1392 				pLayerPointers->pOdl->LinkStatus =
1393 				    LINK_STATE_UP;
1394 				mac_link_update(pLayerPointers->pOdl->mh,
1395 				    LINK_STATE_UP);
1396 			}
1397 		} else {
1398 			if (pLayerPointers->pOdl->LinkStatus !=
1399 			    LINK_STATE_DOWN) {
1400 				pLayerPointers->pOdl->LinkStatus =
1401 				    LINK_STATE_DOWN;
1402 				mac_link_update(pLayerPointers->pOdl->mh,
1403 				    LINK_STATE_DOWN);
1404 			}
1405 		}
1406 	}
1407 	/*
1408 	 * RINT0: Receive Interrupt is set by the controller after the last
1409 	 * descriptor of a receive frame for this ring has been updated by
1410 	 * writing a 0 to the OWNership bit.
1411 	 */
1412 	if (intrCauses & RINT0) {
1413 		pLayerPointers->pOdl->statistics.intr_RINT0 ++;
1414 		amd8111s_receive(pLayerPointers);
1415 	}
1416 
1417 	/*
1418 	 * TINT0: Transmit Interrupt is set by the controller after the OWN bit
1419 	 * in the last descriptor of a transmit frame in this particular ring
1420 	 * has been cleared to indicate the frame has been copied to the
1421 	 * transmit FIFO.
1422 	 */
1423 	if (intrCauses & TINT0) {
1424 		pLayerPointers->pOdl->statistics.intr_TINT0 ++;
1425 		/*
1426 		 * if desc ring is NULL and tx buf is not NULL, it should
1427 		 * drain tx buffer
1428 		 */
1429 		amd8111s_send_serial(pLayerPointers);
1430 	}
1431 
1432 	if (intrCauses & STINT) {
1433 		pLayerPointers->pOdl->statistics.intr_STINT ++;
1434 	}
1435 
1436 
1437 	return (DDI_INTR_CLAIMED);
1438 }
1439 
1440 /*
1441  * To re-initilize data structures.
1442  */
1443 static void
1444 amd8111s_sw_reset(struct LayerPointers *pLayerPointers)
1445 {
1446 	/* Reset all Tx/Rx queues and descriptors */
1447 	milResetTxQ(pLayerPointers);
1448 	milInitRxQ(pLayerPointers);
1449 }
1450 
1451 /*
1452  * Send all pending tx packets
1453  */
1454 static void
1455 amd8111s_tx_drain(struct LayerPointers *adapter)
1456 {
1457 	struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart;
1458 	int i, desc_count = 0;
1459 	for (i = 0; i < 30; i++) {
1460 		while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) {
1461 			/* This packet has been transmitted */
1462 			pTx_desc ++;
1463 			desc_count ++;
1464 		}
1465 		if (desc_count == TX_RING_SIZE) {
1466 			break;
1467 		}
1468 		/* Wait 1 ms */
1469 		drv_usecwait(1000);
1470 	}
1471 	adapter->pOdl->statistics.tx_draintime = i;
1472 }
1473 
1474 /*
1475  * (GLD Entry Point) To start card will be called at
1476  * ifconfig plumb
1477  */
1478 static int
1479 amd8111s_m_start(void *arg)
1480 {
1481 	struct LayerPointers *pLayerPointers = arg;
1482 	struct odl *pOdl = pLayerPointers->pOdl;
1483 
1484 	amd8111s_sw_reset(pLayerPointers);
1485 	mdlHWReset(pLayerPointers);
1486 	rw_enter(&pOdl->chip_lock, RW_WRITER);
1487 	pLayerPointers->run = B_TRUE;
1488 	rw_exit(&pOdl->chip_lock);
1489 	return (0);
1490 }
1491 
1492 /*
1493  * (GLD Entry Point) To stop card will be called at
1494  * ifconfig unplumb
1495  */
1496 static void
1497 amd8111s_m_stop(void *arg)
1498 {
1499 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1500 	struct odl *pOdl = pLayerPointers->pOdl;
1501 
1502 	/* Ensure send all pending tx packets */
1503 	amd8111s_tx_drain(pLayerPointers);
1504 	/*
1505 	 * Stop the controller and disable the controller interrupt
1506 	 */
1507 	rw_enter(&pOdl->chip_lock, RW_WRITER);
1508 	mdlStopChip(pLayerPointers);
1509 	pLayerPointers->run = B_FALSE;
1510 	rw_exit(&pOdl->chip_lock);
1511 }
1512 
1513 /*
1514  *	To clean up all
1515  */
1516 static void
1517 amd8111s_free_resource(struct LayerPointers *pLayerPointers)
1518 {
1519 	unsigned long mem_free_array[100];
1520 	unsigned long *pmem_free_array, size;
1521 
1522 	/* Free Rx/Tx descriptors */
1523 	amd8111s_free_descriptors(pLayerPointers);
1524 
1525 	/* Free memory on lower layers */
1526 	milFreeResources(pLayerPointers, mem_free_array);
1527 	pmem_free_array = mem_free_array;
1528 	while (*pmem_free_array) {
1529 		switch (*pmem_free_array) {
1530 		case VIRTUAL:
1531 			size = *(++pmem_free_array);
1532 			pmem_free_array++;
1533 			kmem_free((void *)*(pmem_free_array), size);
1534 			break;
1535 		}
1536 		pmem_free_array++;
1537 	}
1538 
1539 	amd8111s_free_buffers(pLayerPointers);
1540 }
1541 
1542 /*
1543  * (GLD Enty pointer) To add/delete multi cast addresses
1544  *
1545  */
1546 static int
1547 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
1548 {
1549 	struct LayerPointers *pLayerPointers = arg;
1550 
1551 	if (add) {
1552 		/* Add a multicast entry */
1553 		mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr);
1554 	} else {
1555 		/* Delete a multicast entry */
1556 		mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr);
1557 	}
1558 
1559 	return (0);
1560 }
1561 
1562 #ifdef AMD8111S_DEBUG
1563 /*
1564  * The size of MIB registers is only 32 bits. Dump them before one
1565  * of them overflows.
1566  */
1567 static void
1568 amd8111s_dump_mib(struct LayerPointers *pLayerPointers)
1569 {
1570 	struct amd8111s_statistics *adapterStat;
1571 
1572 	adapterStat = &pLayerPointers->pOdl->statistics;
1573 
1574 	adapterStat->mib_dump_counter ++;
1575 
1576 	/*
1577 	 * Rx Counters
1578 	 */
1579 	adapterStat->rx_mib_unicst_packets +=
1580 	    mdlReadMib(pLayerPointers, RcvUniCastPkts);
1581 	adapterStat->rx_mib_multicst_packets +=
1582 	    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
1583 	adapterStat->rx_mib_broadcst_packets +=
1584 	    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
1585 	adapterStat->rx_mib_macctrl_packets +=
1586 	    mdlReadMib(pLayerPointers, RcvMACCtrl);
1587 	adapterStat->rx_mib_flowctrl_packets +=
1588 	    mdlReadMib(pLayerPointers, RcvFlowCtrl);
1589 
1590 	adapterStat->rx_mib_bytes +=
1591 	    mdlReadMib(pLayerPointers, RcvOctets);
1592 	adapterStat->rx_mib_good_bytes +=
1593 	    mdlReadMib(pLayerPointers, RcvGoodOctets);
1594 
1595 	adapterStat->rx_mib_undersize_packets +=
1596 	    mdlReadMib(pLayerPointers, RcvUndersizePkts);
1597 	adapterStat->rx_mib_oversize_packets +=
1598 	    mdlReadMib(pLayerPointers, RcvOversizePkts);
1599 
1600 	adapterStat->rx_mib_drop_packets +=
1601 	    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
1602 	adapterStat->rx_mib_align_err_packets +=
1603 	    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
1604 	adapterStat->rx_mib_fcs_err_packets +=
1605 	    mdlReadMib(pLayerPointers, RcvFCSErrors);
1606 	adapterStat->rx_mib_symbol_err_packets +=
1607 	    mdlReadMib(pLayerPointers, RcvSymbolErrors);
1608 	adapterStat->rx_mib_miss_packets +=
1609 	    mdlReadMib(pLayerPointers, RcvMissPkts);
1610 
1611 	/*
1612 	 * Tx Counters
1613 	 */
1614 	adapterStat->tx_mib_packets +=
1615 	    mdlReadMib(pLayerPointers, XmtPackets);
1616 	adapterStat->tx_mib_multicst_packets +=
1617 	    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
1618 	adapterStat->tx_mib_broadcst_packets +=
1619 	    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
1620 	adapterStat->tx_mib_flowctrl_packets +=
1621 	    mdlReadMib(pLayerPointers, XmtFlowCtrl);
1622 
1623 	adapterStat->tx_mib_bytes +=
1624 	    mdlReadMib(pLayerPointers, XmtOctets);
1625 
1626 	adapterStat->tx_mib_defer_trans_packets +=
1627 	    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
1628 	adapterStat->tx_mib_collision_packets +=
1629 	    mdlReadMib(pLayerPointers, XmtCollisions);
1630 	adapterStat->tx_mib_one_coll_packets +=
1631 	    mdlReadMib(pLayerPointers, XmtOneCollision);
1632 	adapterStat->tx_mib_multi_coll_packets +=
1633 	    mdlReadMib(pLayerPointers, XmtMultipleCollision);
1634 	adapterStat->tx_mib_late_coll_packets +=
1635 	    mdlReadMib(pLayerPointers, XmtLateCollision);
1636 	adapterStat->tx_mib_ex_coll_packets +=
1637 	    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
1638 
1639 
1640 	/* Clear all MIB registers */
1641 	WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address
1642 	    + MIB_ADDR, MIB_CLEAR);
1643 }
1644 #endif
1645 
1646 /*
1647  * (GLD Entry Point) set/unset promiscus mode
1648  */
1649 static int
1650 amd8111s_m_promisc(void *arg, boolean_t on)
1651 {
1652 	struct LayerPointers *pLayerPointers = arg;
1653 
1654 	if (on) {
1655 		mdlSetPromiscuous(pLayerPointers);
1656 	} else {
1657 		mdlDisablePromiscuous(pLayerPointers);
1658 	}
1659 
1660 	return (0);
1661 }
1662 
1663 /*
1664  * (Gld Entry point) Changes the Mac address of card
1665  */
1666 static int
1667 amd8111s_m_unicst(void *arg, const uint8_t *macaddr)
1668 {
1669 	struct LayerPointers *pLayerPointers = arg;
1670 
1671 	mdlDisableInterrupt(pLayerPointers);
1672 	mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr);
1673 	mdlEnableInterrupt(pLayerPointers);
1674 
1675 	return (0);
1676 }
1677 
1678 /*
1679  * Reset the card
1680  */
1681 void
1682 amd8111s_reset(struct LayerPointers *pLayerPointers)
1683 {
1684 	amd8111s_sw_reset(pLayerPointers);
1685 	mdlHWReset(pLayerPointers);
1686 }
1687 
1688 /*
1689  * attach(9E) -- Attach a device to the system
1690  *
1691  * Called once for each board after successfully probed.
1692  * will do
1693  * 	a. creating minor device node for the instance.
1694  *	b. allocate & Initilize four layers (call odlInit)
1695  *	c. get MAC address
1696  *	d. initilize pLayerPointers to gld private pointer
1697  *	e. register with GLD
1698  * if any action fails does clean up & returns DDI_FAILURE
1699  * else retursn DDI_SUCCESS
1700  */
1701 static int
1702 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1703 {
1704 	mac_register_t *macp;
1705 	struct LayerPointers *pLayerPointers;
1706 	struct odl *pOdl;
1707 	ddi_acc_handle_t *pci_handle;
1708 	ddi_device_acc_attr_t dev_attr;
1709 	caddr_t addrp = NULL;
1710 
1711 	switch (cmd) {
1712 	case DDI_ATTACH:
1713 		break;
1714 	default:
1715 		return (DDI_FAILURE);
1716 	}
1717 
1718 	pLayerPointers = (struct LayerPointers *)
1719 	    kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP);
1720 	amd8111sadapter = pLayerPointers;
1721 
1722 	/* Get device instance number */
1723 	pLayerPointers->instance = ddi_get_instance(devinfo);
1724 	ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers);
1725 
1726 	pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP);
1727 	pLayerPointers->pOdl = pOdl;
1728 
1729 	pOdl->devinfo = devinfo;
1730 
1731 	/*
1732 	 * Here, we only allocate memory for struct odl and initilize it.
1733 	 * All other memory allocation & initilization will be done in odlInit
1734 	 * later on this routine.
1735 	 */
1736 	if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock)
1737 	    != DDI_SUCCESS) {
1738 		amd8111s_log(pLayerPointers, CE_NOTE,
1739 		    "attach: get iblock cookies failed");
1740 		goto attach_failure;
1741 	}
1742 
1743 	rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock);
1744 	mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock",
1745 	    MUTEX_DRIVER, (void *)pOdl->iblock);
1746 	mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock",
1747 	    MUTEX_DRIVER, (void *)pOdl->iblock);
1748 
1749 	/* Setup PCI space */
1750 	if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) {
1751 		return (DDI_FAILURE);
1752 	}
1753 	pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI;
1754 	pci_handle = &pOdl->pci_handle;
1755 
1756 	pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID);
1757 	pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID);
1758 
1759 	/*
1760 	 * Allocate and initialize all resource and map device registers.
1761 	 * If failed, it returns a non-zero value.
1762 	 */
1763 	if (amd8111s_odlInit(pLayerPointers) != 0) {
1764 		goto attach_failure;
1765 	}
1766 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE;
1767 
1768 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1769 	dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1770 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1771 
1772 	if (ddi_regs_map_setup(devinfo, 1, &addrp, 0,  4096, &dev_attr,
1773 	    &(pLayerPointers->pOdl->MemBasehandle)) != 0) {
1774 		amd8111s_log(pLayerPointers, CE_NOTE,
1775 		    "attach: ddi_regs_map_setup failed");
1776 		goto attach_failure;
1777 	}
1778 	pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp;
1779 
1780 	/* Initialize HW */
1781 	mdlOpen(pLayerPointers);
1782 	mdlGetActiveMediaInfo(pLayerPointers);
1783 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS;
1784 
1785 	/*
1786 	 * Setup the interrupt
1787 	 */
1788 	if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr,
1789 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1790 		goto attach_failure;
1791 	}
1792 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED;
1793 
1794 	/*
1795 	 * Setup soft intr
1796 	 */
1797 	if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id,
1798 	    NULL, NULL, amd8111s_send_drain,
1799 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1800 		goto attach_failure;
1801 	}
1802 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED;
1803 
1804 	/*
1805 	 * Initilize the mac structure
1806 	 */
1807 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1808 		goto attach_failure;
1809 
1810 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1811 	macp->m_driver = pLayerPointers;
1812 	macp->m_dip = devinfo;
1813 	/* Get MAC address */
1814 	mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress);
1815 	macp->m_src_addr = pOdl->MacAddress;
1816 	macp->m_callbacks = &amd8111s_m_callbacks;
1817 	macp->m_min_sdu = 0;
1818 	/* 1518 - 14 (ether header) - 4 (CRC) */
1819 	macp->m_max_sdu = ETHERMTU;
1820 	macp->m_margin = VLAN_TAGSZ;
1821 
1822 	/*
1823 	 * Finally, we're ready to register ourselves with the MAC layer
1824 	 * interface; if this succeeds, we're ready to start.
1825 	 */
1826 	if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) {
1827 		mac_free(macp);
1828 		goto attach_failure;
1829 	}
1830 	mac_free(macp);
1831 
1832 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED;
1833 
1834 	return (DDI_SUCCESS);
1835 
1836 attach_failure:
1837 	(void) amd8111s_unattach(devinfo, pLayerPointers);
1838 	return (DDI_FAILURE);
1839 
1840 }
1841 
1842 /*
1843  * detach(9E) -- Detach a device from the system
1844  *
1845  * It is called for each device instance when the system is preparing to
1846  * unload a dynamically unloadable driver.
1847  * will Do
1848  * 	a. check if any driver buffers are held by OS.
1849  *	b. do clean up of all allocated memory if it is not in use by OS.
1850  *	c. un register with GLD
1851  *	d. return DDI_SUCCESS on succes full free & unregister
1852  *	else GLD_FAILURE
1853  */
1854 static int
1855 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1856 {
1857 	struct LayerPointers *pLayerPointers;
1858 
1859 	switch (cmd) {
1860 	case DDI_DETACH:
1861 		break;
1862 	default:
1863 		return (DDI_FAILURE);
1864 	}
1865 
1866 	/*
1867 	 * Get the driver private (struct LayerPointers *) structure
1868 	 */
1869 	if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private
1870 	    (devinfo)) == NULL) {
1871 		return (DDI_FAILURE);
1872 	}
1873 
1874 	return (amd8111s_unattach(devinfo, pLayerPointers));
1875 }
1876 
1877 static int
1878 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers)
1879 {
1880 	struct odl *pOdl = pLayerPointers->pOdl;
1881 
1882 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) {
1883 		/* Unregister driver from the GLD interface */
1884 		if (mac_unregister(pOdl->mh) != DDI_SUCCESS) {
1885 			return (DDI_FAILURE);
1886 		}
1887 	}
1888 
1889 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) {
1890 		ddi_remove_intr(devinfo, 0, pOdl->iblock);
1891 	}
1892 
1893 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) {
1894 		ddi_remove_softintr(pOdl->drain_id);
1895 	}
1896 
1897 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) {
1898 		/* Stop HW */
1899 		mdlStopChip(pLayerPointers);
1900 		ddi_regs_map_free(&(pOdl->MemBasehandle));
1901 	}
1902 
1903 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) {
1904 		/* Free All memory allocated */
1905 		amd8111s_free_resource(pLayerPointers);
1906 	}
1907 
1908 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) {
1909 		pci_config_teardown(&pOdl->pci_handle);
1910 		mutex_destroy(&pOdl->mdlSendLock);
1911 		mutex_destroy(&pOdl->mdlRcvLock);
1912 		rw_destroy(&pOdl->chip_lock);
1913 	}
1914 
1915 	kmem_free(pOdl, sizeof (struct odl));
1916 	kmem_free(pLayerPointers, sizeof (struct LayerPointers));
1917 
1918 	return (DDI_SUCCESS);
1919 }
1920 
1921 /*
1922  * (GLD Entry Point)GLD will call this entry point perodicaly to
1923  * get driver statistices.
1924  */
1925 static int
1926 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val)
1927 {
1928 	struct LayerPointers *pLayerPointers = arg;
1929 	struct amd8111s_statistics *adapterStat;
1930 
1931 	adapterStat = &pLayerPointers->pOdl->statistics;
1932 
1933 	switch (stat) {
1934 
1935 	/*
1936 	 * Current Status
1937 	 */
1938 	case MAC_STAT_IFSPEED:
1939 		*val = 	pLayerPointers->pMdl->Speed * 1000000;
1940 		break;
1941 
1942 	case ETHER_STAT_LINK_DUPLEX:
1943 		if (pLayerPointers->pMdl->FullDuplex) {
1944 			*val = LINK_DUPLEX_FULL;
1945 		} else {
1946 			*val = LINK_DUPLEX_HALF;
1947 		}
1948 		break;
1949 
1950 	/*
1951 	 * Capabilities
1952 	 */
1953 	case ETHER_STAT_CAP_1000FDX:
1954 		*val = 0;
1955 		break;
1956 
1957 	case ETHER_STAT_CAP_1000HDX:
1958 		*val = 0;
1959 		break;
1960 
1961 	case ETHER_STAT_CAP_100FDX:
1962 		*val = 1;
1963 		break;
1964 
1965 	case ETHER_STAT_CAP_100HDX:
1966 		*val = 1;
1967 		break;
1968 
1969 	case ETHER_STAT_CAP_10FDX:
1970 		*val = 1;
1971 		break;
1972 
1973 	case ETHER_STAT_CAP_10HDX:
1974 		*val = 1;
1975 		break;
1976 
1977 	case ETHER_STAT_CAP_ASMPAUSE:
1978 		*val = 1;
1979 		break;
1980 
1981 	case ETHER_STAT_CAP_PAUSE:
1982 		*val = 1;
1983 		break;
1984 
1985 	case ETHER_STAT_CAP_AUTONEG:
1986 		*val = 1;
1987 		break;
1988 
1989 	case ETHER_STAT_ADV_CAP_1000FDX:
1990 		*val = 0;
1991 		break;
1992 
1993 	case ETHER_STAT_ADV_CAP_1000HDX:
1994 		*val = 0;
1995 		break;
1996 
1997 	case ETHER_STAT_ADV_CAP_100FDX:
1998 		*val = 1;
1999 		break;
2000 
2001 	case ETHER_STAT_ADV_CAP_100HDX:
2002 		*val = 1;
2003 		break;
2004 
2005 	case ETHER_STAT_ADV_CAP_10FDX:
2006 		*val = 1;
2007 		break;
2008 
2009 	case ETHER_STAT_ADV_CAP_10HDX:
2010 		*val = 1;
2011 		break;
2012 
2013 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
2014 		*val = 1;
2015 		break;
2016 
2017 	case ETHER_STAT_ADV_CAP_PAUSE:
2018 		*val = 1;
2019 		break;
2020 
2021 	case ETHER_STAT_ADV_CAP_AUTONEG:
2022 		*val = 1;
2023 		break;
2024 
2025 	/*
2026 	 * Rx Counters
2027 	 */
2028 	case MAC_STAT_IPACKETS:
2029 		*val = adapterStat->rx_mib_unicst_packets +
2030 		    adapterStat->rx_mib_multicst_packets +
2031 		    adapterStat->rx_mib_broadcst_packets +
2032 		    mdlReadMib(pLayerPointers, RcvUniCastPkts) +
2033 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts) +
2034 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2035 		break;
2036 
2037 	case MAC_STAT_RBYTES:
2038 		*val = adapterStat->rx_mib_bytes +
2039 		    mdlReadMib(pLayerPointers, RcvOctets);
2040 		break;
2041 
2042 	case MAC_STAT_MULTIRCV:
2043 		*val = adapterStat->rx_mib_multicst_packets +
2044 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
2045 		break;
2046 
2047 	case MAC_STAT_BRDCSTRCV:
2048 		*val = adapterStat->rx_mib_broadcst_packets +
2049 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2050 		break;
2051 
2052 	case MAC_STAT_NORCVBUF:
2053 		*val = adapterStat->rx_allocfail +
2054 		    adapterStat->rx_mib_drop_packets +
2055 		    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
2056 		break;
2057 
2058 	case MAC_STAT_IERRORS:
2059 		*val = adapterStat->rx_mib_align_err_packets +
2060 		    adapterStat->rx_mib_fcs_err_packets +
2061 		    adapterStat->rx_mib_symbol_err_packets +
2062 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors) +
2063 		    mdlReadMib(pLayerPointers, RcvFCSErrors) +
2064 		    mdlReadMib(pLayerPointers, RcvSymbolErrors);
2065 		break;
2066 
2067 	case ETHER_STAT_ALIGN_ERRORS:
2068 		*val = adapterStat->rx_mib_align_err_packets +
2069 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
2070 		break;
2071 
2072 	case ETHER_STAT_FCS_ERRORS:
2073 		*val = adapterStat->rx_mib_fcs_err_packets +
2074 		    mdlReadMib(pLayerPointers, RcvFCSErrors);
2075 		break;
2076 
2077 	/*
2078 	 * Tx Counters
2079 	 */
2080 	case MAC_STAT_OPACKETS:
2081 		*val = adapterStat->tx_mib_packets +
2082 		    mdlReadMib(pLayerPointers, XmtPackets);
2083 		break;
2084 
2085 	case MAC_STAT_OBYTES:
2086 		*val = adapterStat->tx_mib_bytes +
2087 		    mdlReadMib(pLayerPointers, XmtOctets);
2088 		break;
2089 
2090 	case MAC_STAT_MULTIXMT:
2091 		*val = adapterStat->tx_mib_multicst_packets +
2092 		    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
2093 		break;
2094 
2095 	case MAC_STAT_BRDCSTXMT:
2096 		*val = adapterStat->tx_mib_broadcst_packets +
2097 		    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
2098 		break;
2099 
2100 	case MAC_STAT_NOXMTBUF:
2101 		*val = adapterStat->tx_no_descriptor;
2102 		break;
2103 
2104 	case MAC_STAT_OERRORS:
2105 		*val = adapterStat->tx_mib_ex_coll_packets +
2106 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2107 		break;
2108 
2109 	case MAC_STAT_COLLISIONS:
2110 		*val = adapterStat->tx_mib_ex_coll_packets +
2111 		    mdlReadMib(pLayerPointers, XmtCollisions);
2112 		break;
2113 
2114 	case ETHER_STAT_FIRST_COLLISIONS:
2115 		*val = adapterStat->tx_mib_one_coll_packets +
2116 		    mdlReadMib(pLayerPointers, XmtOneCollision);
2117 		break;
2118 
2119 	case ETHER_STAT_MULTI_COLLISIONS:
2120 		*val = adapterStat->tx_mib_multi_coll_packets +
2121 		    mdlReadMib(pLayerPointers, XmtMultipleCollision);
2122 		break;
2123 
2124 	case ETHER_STAT_EX_COLLISIONS:
2125 		*val = adapterStat->tx_mib_ex_coll_packets +
2126 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2127 		break;
2128 
2129 	case ETHER_STAT_TX_LATE_COLLISIONS:
2130 		*val = adapterStat->tx_mib_late_coll_packets +
2131 		    mdlReadMib(pLayerPointers, XmtLateCollision);
2132 		break;
2133 
2134 	case ETHER_STAT_DEFER_XMTS:
2135 		*val = adapterStat->tx_mib_defer_trans_packets +
2136 		    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
2137 		break;
2138 
2139 	default:
2140 		return (ENOTSUP);
2141 	}
2142 	return (0);
2143 }
2144 
2145 /*
2146  *	Memory Read Function Used by MDL to set card registers.
2147  */
2148 unsigned char
2149 READ_REG8(struct LayerPointers *pLayerPointers, long x)
2150 {
2151 	return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x));
2152 }
2153 
2154 int
2155 READ_REG16(struct LayerPointers *pLayerPointers, long x)
2156 {
2157 	return (ddi_get16(pLayerPointers->pOdl->MemBasehandle,
2158 	    (uint16_t *)(x)));
2159 }
2160 
2161 long
2162 READ_REG32(struct LayerPointers *pLayerPointers, long x)
2163 {
2164 	return (ddi_get32(pLayerPointers->pOdl->MemBasehandle,
2165 	    (uint32_t *)(x)));
2166 }
2167 
2168 void
2169 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y)
2170 {
2171 	ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y);
2172 }
2173 
2174 void
2175 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y)
2176 {
2177 	ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y);
2178 }
2179 
2180 void
2181 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y)
2182 {
2183 	ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y);
2184 }
2185 
2186 void
2187 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y)
2188 {
2189 	int i;
2190 	for (i = 0; i < 8; i++) {
2191 		WRITE_REG8(pLayerPointers, (x + i), y[i]);
2192 	}
2193 }
2194