xref: /titanic_50/usr/src/uts/intel/io/amd8111s/amd8111s_main.c (revision 0dc2366f7b9f9f36e10909b1e95edbf2a261c2ac)
175ab5f91Slh155975 /*
2*0dc2366fSVenugopal Iyer  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
375ab5f91Slh155975  * Use is subject to license terms.
475ab5f91Slh155975  */
575ab5f91Slh155975 
675ab5f91Slh155975 /*
775ab5f91Slh155975  * Copyright (c) 2001-2006 Advanced Micro Devices, Inc.  All rights reserved.
875ab5f91Slh155975  *
975ab5f91Slh155975  * Redistribution and use in source and binary forms, with or without
1075ab5f91Slh155975  * modification, are permitted provided that the following conditions are met:
1175ab5f91Slh155975  *
1275ab5f91Slh155975  * + Redistributions of source code must retain the above copyright notice,
1375ab5f91Slh155975  * + this list of conditions and the following disclaimer.
1475ab5f91Slh155975  *
1575ab5f91Slh155975  * + Redistributions in binary form must reproduce the above copyright
1675ab5f91Slh155975  * + notice, this list of conditions and the following disclaimer in the
1775ab5f91Slh155975  * + documentation and/or other materials provided with the distribution.
1875ab5f91Slh155975  *
1975ab5f91Slh155975  * + Neither the name of Advanced Micro Devices, Inc. nor the names of its
2075ab5f91Slh155975  * + contributors may be used to endorse or promote products derived from
2175ab5f91Slh155975  * + this software without specific prior written permission.
2275ab5f91Slh155975  *
2375ab5f91Slh155975  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
2475ab5f91Slh155975  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
2575ab5f91Slh155975  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
2675ab5f91Slh155975  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2775ab5f91Slh155975  * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR
2875ab5f91Slh155975  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2975ab5f91Slh155975  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
3075ab5f91Slh155975  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3175ab5f91Slh155975  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3275ab5f91Slh155975  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3375ab5f91Slh155975  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
3475ab5f91Slh155975  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
3575ab5f91Slh155975  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3675ab5f91Slh155975  *
3775ab5f91Slh155975  * Import/Export/Re-Export/Use/Release/Transfer Restrictions and
3875ab5f91Slh155975  * Compliance with Applicable Laws.  Notice is hereby given that
3975ab5f91Slh155975  * the software may be subject to restrictions on use, release,
4075ab5f91Slh155975  * transfer, importation, exportation and/or re-exportation under
4175ab5f91Slh155975  * the laws and regulations of the United States or other
4275ab5f91Slh155975  * countries ("Applicable Laws"), which include but are not
4375ab5f91Slh155975  * limited to U.S. export control laws such as the Export
4475ab5f91Slh155975  * Administration Regulations and national security controls as
4575ab5f91Slh155975  * defined thereunder, as well as State Department controls under
4675ab5f91Slh155975  * the U.S. Munitions List.  Permission to use and/or
4775ab5f91Slh155975  * redistribute the software is conditioned upon compliance with
4875ab5f91Slh155975  * all Applicable Laws, including U.S. export control laws
4975ab5f91Slh155975  * regarding specifically designated persons, countries and
5075ab5f91Slh155975  * nationals of countries subject to national security controls.
5175ab5f91Slh155975  */
5275ab5f91Slh155975 
5375ab5f91Slh155975 /* include files */
5475ab5f91Slh155975 #include <sys/disp.h>
5575ab5f91Slh155975 #include <sys/atomic.h>
56d62bc4baSyz147064 #include <sys/vlan.h>
5775ab5f91Slh155975 #include "amd8111s_main.h"
5875ab5f91Slh155975 
5975ab5f91Slh155975 /* Global macro Definations */
6075ab5f91Slh155975 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
6175ab5f91Slh155975 #define	INTERFACE_NAME "amd8111s"
6275ab5f91Slh155975 #define	AMD8111S_SPLIT	128
6375ab5f91Slh155975 #define	AMD8111S_SEND_MAX	64
6475ab5f91Slh155975 
6519397407SSherry Moore static char ident[] = "AMD8111 10/100M Ethernet";
6675ab5f91Slh155975 
6775ab5f91Slh155975 /*
6875ab5f91Slh155975  * Driver Entry Points
6975ab5f91Slh155975  */
7075ab5f91Slh155975 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t);
7175ab5f91Slh155975 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t);
7275ab5f91Slh155975 
7375ab5f91Slh155975 /*
7475ab5f91Slh155975  * GLD Entry points prototype
7575ab5f91Slh155975  */
7675ab5f91Slh155975 static int amd8111s_m_unicst(void *, const uint8_t *);
7775ab5f91Slh155975 static int amd8111s_m_promisc(void *, boolean_t);
7875ab5f91Slh155975 static int amd8111s_m_stat(void *, uint_t, uint64_t *);
7975ab5f91Slh155975 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *);
8075ab5f91Slh155975 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr);
8175ab5f91Slh155975 static int amd8111s_m_start(void *);
8275ab5f91Slh155975 static void amd8111s_m_stop(void *);
8375ab5f91Slh155975 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp);
8475ab5f91Slh155975 static uint_t amd8111s_intr(caddr_t);
8575ab5f91Slh155975 
8675ab5f91Slh155975 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *);
8775ab5f91Slh155975 
8875ab5f91Slh155975 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *);
8975ab5f91Slh155975 static int amd8111s_odlInit(struct LayerPointers *);
9075ab5f91Slh155975 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *);
9175ab5f91Slh155975 static void amd8111s_free_descriptors(struct LayerPointers *);
9275ab5f91Slh155975 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *,
9375ab5f91Slh155975 		struct amd8111s_dma_ringbuf *, uint32_t, uint32_t);
9475ab5f91Slh155975 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *);
9575ab5f91Slh155975 
9675ab5f91Slh155975 
9775ab5f91Slh155975 static void amd8111s_log(struct LayerPointers *adapter, int level,
9875ab5f91Slh155975     char *fmt, ...);
9975ab5f91Slh155975 
10075ab5f91Slh155975 static struct cb_ops amd8111s_cb_ops = {
10175ab5f91Slh155975 	nulldev,
10275ab5f91Slh155975 	nulldev,
10375ab5f91Slh155975 	nodev,
10475ab5f91Slh155975 	nodev,
10575ab5f91Slh155975 	nodev,
10675ab5f91Slh155975 	nodev,
10775ab5f91Slh155975 	nodev,
10875ab5f91Slh155975 	nodev,
10975ab5f91Slh155975 	nodev,
11075ab5f91Slh155975 	nodev,
11175ab5f91Slh155975 	nodev,
11275ab5f91Slh155975 	nochpoll,
11375ab5f91Slh155975 	ddi_prop_op,
11475ab5f91Slh155975 	NULL,
11575ab5f91Slh155975 	D_NEW | D_MP,
11675ab5f91Slh155975 	CB_REV,		/* cb_rev */
11775ab5f91Slh155975 	nodev,		/* cb_aread */
11875ab5f91Slh155975 	nodev		/* cb_awrite */
11975ab5f91Slh155975 };
12075ab5f91Slh155975 
12175ab5f91Slh155975 static struct dev_ops amd8111s_dev_ops = {
12275ab5f91Slh155975 	DEVO_REV,		/* devo_rev */
12375ab5f91Slh155975 	0,			/* devo_refcnt */
12475ab5f91Slh155975 	NULL,			/* devo_getinfo */
12575ab5f91Slh155975 	nulldev,		/* devo_identify */
12675ab5f91Slh155975 	nulldev,		/* devo_probe */
12775ab5f91Slh155975 	amd8111s_attach,	/* devo_attach */
12875ab5f91Slh155975 	amd8111s_detach,	/* devo_detach */
12975ab5f91Slh155975 	nodev,			/* devo_reset */
13075ab5f91Slh155975 	&amd8111s_cb_ops,	/* devo_cb_ops */
13175ab5f91Slh155975 	NULL,			/* devo_bus_ops */
13219397407SSherry Moore 	nodev,			/* devo_power */
13319397407SSherry Moore 	ddi_quiesce_not_supported,	/* devo_quiesce */
13475ab5f91Slh155975 };
13575ab5f91Slh155975 
13675ab5f91Slh155975 struct modldrv amd8111s_modldrv = {
13775ab5f91Slh155975 	&mod_driverops,		/* Type of module. This one is a driver */
13875ab5f91Slh155975 	ident,			/* short description */
13975ab5f91Slh155975 	&amd8111s_dev_ops	/* driver specific ops */
14075ab5f91Slh155975 };
14175ab5f91Slh155975 
14275ab5f91Slh155975 struct modlinkage amd8111s_modlinkage = {
14375ab5f91Slh155975 	MODREV_1, (void *)&amd8111s_modldrv, NULL
14475ab5f91Slh155975 };
14575ab5f91Slh155975 
14675ab5f91Slh155975 /*
14775ab5f91Slh155975  * Global Variables
14875ab5f91Slh155975  */
14975ab5f91Slh155975 struct LayerPointers *amd8111sadapter;
15075ab5f91Slh155975 
15175ab5f91Slh155975 static ddi_dma_attr_t pcn_buff_dma_attr_t = {
15275ab5f91Slh155975 	DMA_ATTR_V0,	/* dma_attr_version */
15375ab5f91Slh155975 	(uint64_t)0,		/* dma_attr_addr_lo */
15475ab5f91Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
15575ab5f91Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_count_max */
15675ab5f91Slh155975 	(uint64_t)1,		/* dma_attr_align */
15775ab5f91Slh155975 	(uint_t)0x7F,		/* dma_attr_burstsizes */
15875ab5f91Slh155975 	(uint32_t)1,		/* dma_attr_minxfer */
15975ab5f91Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
16075ab5f91Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
16175ab5f91Slh155975 	(int)1,			/* dma_attr_sgllen */
16275ab5f91Slh155975 	(uint32_t)1,		/* granularity */
16375ab5f91Slh155975 	(uint_t)0		/* dma_attr_flags */
16475ab5f91Slh155975 };
16575ab5f91Slh155975 
16675ab5f91Slh155975 static ddi_dma_attr_t pcn_desc_dma_attr_t = {
16775ab5f91Slh155975 	DMA_ATTR_V0,		/* dma_attr_version */
16875ab5f91Slh155975 	(uint64_t)0,		/* dma_attr_addr_lo */
16975ab5f91Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
17075ab5f91Slh155975 	(uint64_t)0x7FFFFFFF,	/* dma_attr_count_max */
17175ab5f91Slh155975 	(uint64_t)0x10,		/* dma_attr_align */
17275ab5f91Slh155975 	(uint_t)0xFFFFFFFFU,	/* dma_attr_burstsizes */
17375ab5f91Slh155975 	(uint32_t)1,		/* dma_attr_minxfer */
17475ab5f91Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
17575ab5f91Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
17675ab5f91Slh155975 	(int)1,			/* dma_attr_sgllen */
17775ab5f91Slh155975 	(uint32_t)1,		/* granularity */
17875ab5f91Slh155975 	(uint_t)0		/* dma_attr_flags */
17975ab5f91Slh155975 };
18075ab5f91Slh155975 
18175ab5f91Slh155975 /* PIO access attributes for registers */
18275ab5f91Slh155975 static ddi_device_acc_attr_t pcn_acc_attr = {
18375ab5f91Slh155975 	DDI_DEVICE_ATTR_V0,
18475ab5f91Slh155975 	DDI_STRUCTURE_LE_ACC,
18575ab5f91Slh155975 	DDI_STRICTORDER_ACC
18675ab5f91Slh155975 };
18775ab5f91Slh155975 
18875ab5f91Slh155975 
18975ab5f91Slh155975 static mac_callbacks_t amd8111s_m_callbacks = {
190da14cebeSEric Cheng 	MC_IOCTL,
19175ab5f91Slh155975 	amd8111s_m_stat,
19275ab5f91Slh155975 	amd8111s_m_start,
19375ab5f91Slh155975 	amd8111s_m_stop,
19475ab5f91Slh155975 	amd8111s_m_promisc,
19575ab5f91Slh155975 	amd8111s_m_multicst,
19675ab5f91Slh155975 	amd8111s_m_unicst,
19775ab5f91Slh155975 	amd8111s_m_tx,
198*0dc2366fSVenugopal Iyer 	NULL,
19975ab5f91Slh155975 	amd8111s_m_ioctl
20075ab5f91Slh155975 };
20175ab5f91Slh155975 
20275ab5f91Slh155975 
20375ab5f91Slh155975 /*
20475ab5f91Slh155975  * Standard Driver Load Entry Point
20575ab5f91Slh155975  * It will be called at load time of driver.
20675ab5f91Slh155975  */
20775ab5f91Slh155975 int
_init()20875ab5f91Slh155975 _init()
20975ab5f91Slh155975 {
21075ab5f91Slh155975 	int status;
21175ab5f91Slh155975 	mac_init_ops(&amd8111s_dev_ops, "amd8111s");
21275ab5f91Slh155975 
21375ab5f91Slh155975 	status = mod_install(&amd8111s_modlinkage);
21475ab5f91Slh155975 	if (status != DDI_SUCCESS) {
21575ab5f91Slh155975 		mac_fini_ops(&amd8111s_dev_ops);
21675ab5f91Slh155975 	}
21775ab5f91Slh155975 
21875ab5f91Slh155975 	return (status);
21975ab5f91Slh155975 }
22075ab5f91Slh155975 
22175ab5f91Slh155975 /*
22275ab5f91Slh155975  * Standard Driver Entry Point for Query.
22375ab5f91Slh155975  * It will be called at any time to get Driver info.
22475ab5f91Slh155975  */
22575ab5f91Slh155975 int
_info(struct modinfo * modinfop)22675ab5f91Slh155975 _info(struct modinfo *modinfop)
22775ab5f91Slh155975 {
22875ab5f91Slh155975 	return (mod_info(&amd8111s_modlinkage, modinfop));
22975ab5f91Slh155975 }
23075ab5f91Slh155975 
23175ab5f91Slh155975 /*
23275ab5f91Slh155975  *	Standard Driver Entry Point for Unload.
23375ab5f91Slh155975  *	It will be called at unload time of driver.
23475ab5f91Slh155975  */
23575ab5f91Slh155975 int
_fini()23675ab5f91Slh155975 _fini()
23775ab5f91Slh155975 {
23875ab5f91Slh155975 	int status;
23975ab5f91Slh155975 
24075ab5f91Slh155975 	status = mod_remove(&amd8111s_modlinkage);
24175ab5f91Slh155975 	if (status == DDI_SUCCESS) {
24275ab5f91Slh155975 		mac_fini_ops(&amd8111s_dev_ops);
24375ab5f91Slh155975 	}
24475ab5f91Slh155975 
24575ab5f91Slh155975 	return (status);
24675ab5f91Slh155975 }
24775ab5f91Slh155975 
24875ab5f91Slh155975 /*
24975ab5f91Slh155975  * Loopback Support
25075ab5f91Slh155975  */
25175ab5f91Slh155975 static lb_property_t loopmodes[] = {
25275ab5f91Slh155975 	{ normal,	"normal",	AMD8111S_LB_NONE		},
25375ab5f91Slh155975 	{ external,	"100Mbps",	AMD8111S_LB_EXTERNAL_100	},
25475ab5f91Slh155975 	{ external,	"10Mbps",	AMD8111S_LB_EXTERNAL_10		},
25575ab5f91Slh155975 	{ internal,	"MAC",		AMD8111S_LB_INTERNAL_MAC	}
25675ab5f91Slh155975 };
25775ab5f91Slh155975 
25875ab5f91Slh155975 static void
amd8111s_set_loop_mode(struct LayerPointers * adapter,uint32_t mode)25975ab5f91Slh155975 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode)
26075ab5f91Slh155975 {
26175ab5f91Slh155975 
26275ab5f91Slh155975 	/*
26375ab5f91Slh155975 	 * If the mode isn't being changed, there's nothing to do ...
26475ab5f91Slh155975 	 */
26575ab5f91Slh155975 	if (mode == adapter->pOdl->loopback_mode)
26675ab5f91Slh155975 		return;
26775ab5f91Slh155975 
26875ab5f91Slh155975 	/*
26975ab5f91Slh155975 	 * Validate the requested mode and prepare a suitable message
27075ab5f91Slh155975 	 * to explain the link down/up cycle that the change will
27175ab5f91Slh155975 	 * probably induce ...
27275ab5f91Slh155975 	 */
27375ab5f91Slh155975 	switch (mode) {
27475ab5f91Slh155975 	default:
27575ab5f91Slh155975 		return;
27675ab5f91Slh155975 
27775ab5f91Slh155975 	case AMD8111S_LB_NONE:
27875ab5f91Slh155975 		mdlStopChip(adapter);
27975ab5f91Slh155975 		if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) {
28075ab5f91Slh155975 			cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB");
28175ab5f91Slh155975 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
28275ab5f91Slh155975 			    INLOOP);
28375ab5f91Slh155975 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
28475ab5f91Slh155975 			    FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
28575ab5f91Slh155975 		} else {
28675ab5f91Slh155975 			cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB");
28775ab5f91Slh155975 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
28875ab5f91Slh155975 			    EXLOOP);
28975ab5f91Slh155975 		}
29075ab5f91Slh155975 
29175ab5f91Slh155975 		amd8111s_reset(adapter);
29275ab5f91Slh155975 		adapter->pOdl->LinkStatus = LINK_STATE_DOWN;
29375ab5f91Slh155975 		adapter->pOdl->rx_fcs_stripped = B_FALSE;
29475ab5f91Slh155975 		mdlStartChip(adapter);
29575ab5f91Slh155975 		break;
29675ab5f91Slh155975 
29775ab5f91Slh155975 	case AMD8111S_LB_EXTERNAL_100:
29875ab5f91Slh155975 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100");
29975ab5f91Slh155975 		mdlStopChip(adapter);
30075ab5f91Slh155975 		amd8111s_reset(adapter);
30175ab5f91Slh155975 		SetIntrCoalesc(adapter, B_FALSE);
30275ab5f91Slh155975 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100);
30375ab5f91Slh155975 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
30475ab5f91Slh155975 		    VAL0 | EXLOOP);
30575ab5f91Slh155975 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
30675ab5f91Slh155975 		adapter->pMdl->Speed = 100;
30775ab5f91Slh155975 		adapter->pMdl->FullDuplex = B_TRUE;
30875ab5f91Slh155975 		/* Tell GLD the state of the physical link. */
30975ab5f91Slh155975 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
31075ab5f91Slh155975 
31175ab5f91Slh155975 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
31275ab5f91Slh155975 
31375ab5f91Slh155975 		mdlStartChip(adapter);
31475ab5f91Slh155975 		break;
31575ab5f91Slh155975 
31675ab5f91Slh155975 	case AMD8111S_LB_EXTERNAL_10:
31775ab5f91Slh155975 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10");
31875ab5f91Slh155975 		mdlStopChip(adapter);
31975ab5f91Slh155975 		amd8111s_reset(adapter);
32075ab5f91Slh155975 		SetIntrCoalesc(adapter, B_FALSE);
32175ab5f91Slh155975 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10);
32275ab5f91Slh155975 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
32375ab5f91Slh155975 		    VAL0 | EXLOOP);
32475ab5f91Slh155975 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
32575ab5f91Slh155975 		adapter->pMdl->Speed = 10;
32675ab5f91Slh155975 		adapter->pMdl->FullDuplex = B_TRUE;
32775ab5f91Slh155975 		/* Tell GLD the state of the physical link. */
32875ab5f91Slh155975 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
32975ab5f91Slh155975 
33075ab5f91Slh155975 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
33175ab5f91Slh155975 
33275ab5f91Slh155975 		mdlStartChip(adapter);
33375ab5f91Slh155975 		break;
33475ab5f91Slh155975 
33575ab5f91Slh155975 	case AMD8111S_LB_INTERNAL_MAC:
33675ab5f91Slh155975 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC");
33775ab5f91Slh155975 		mdlStopChip(adapter);
33875ab5f91Slh155975 		amd8111s_reset(adapter);
33975ab5f91Slh155975 		SetIntrCoalesc(adapter, B_FALSE);
34075ab5f91Slh155975 		/* Disable Port Manager */
34175ab5f91Slh155975 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
34275ab5f91Slh155975 		    EN_PMGR);
34375ab5f91Slh155975 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
34475ab5f91Slh155975 		    VAL0 | INLOOP);
34575ab5f91Slh155975 
34675ab5f91Slh155975 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
34775ab5f91Slh155975 		    VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
34875ab5f91Slh155975 
34975ab5f91Slh155975 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
35075ab5f91Slh155975 		adapter->pMdl->FullDuplex = B_TRUE;
35175ab5f91Slh155975 		/* Tell GLD the state of the physical link. */
35275ab5f91Slh155975 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
35375ab5f91Slh155975 
35475ab5f91Slh155975 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
35575ab5f91Slh155975 
35675ab5f91Slh155975 		mdlStartChip(adapter);
35775ab5f91Slh155975 		break;
35875ab5f91Slh155975 	}
35975ab5f91Slh155975 
36075ab5f91Slh155975 	/*
36175ab5f91Slh155975 	 * All OK; tell the caller to reprogram
36275ab5f91Slh155975 	 * the PHY and/or MAC for the new mode ...
36375ab5f91Slh155975 	 */
36475ab5f91Slh155975 	adapter->pOdl->loopback_mode = mode;
36575ab5f91Slh155975 }
36675ab5f91Slh155975 
36775ab5f91Slh155975 static enum ioc_reply
amd8111s_loopback_ioctl(struct LayerPointers * adapter,struct iocblk * iocp,mblk_t * mp)36875ab5f91Slh155975 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp,
36975ab5f91Slh155975     mblk_t *mp)
37075ab5f91Slh155975 {
37175ab5f91Slh155975 	lb_info_sz_t *lbsp;
37275ab5f91Slh155975 	lb_property_t *lbpp;
37375ab5f91Slh155975 	uint32_t *lbmp;
37475ab5f91Slh155975 	int cmd;
37575ab5f91Slh155975 
37675ab5f91Slh155975 	/*
37775ab5f91Slh155975 	 * Validate format of ioctl
37875ab5f91Slh155975 	 */
37975ab5f91Slh155975 	if (mp->b_cont == NULL)
38075ab5f91Slh155975 		return (IOC_INVAL);
38175ab5f91Slh155975 
38275ab5f91Slh155975 	cmd = iocp->ioc_cmd;
38375ab5f91Slh155975 	switch (cmd) {
38475ab5f91Slh155975 	default:
38575ab5f91Slh155975 		/* NOTREACHED */
38675ab5f91Slh155975 		amd8111s_log(adapter, CE_NOTE,
38775ab5f91Slh155975 		    "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd);
38875ab5f91Slh155975 		return (IOC_INVAL);
38975ab5f91Slh155975 
39075ab5f91Slh155975 	case LB_GET_INFO_SIZE:
39175ab5f91Slh155975 		if (iocp->ioc_count != sizeof (lb_info_sz_t)) {
39275ab5f91Slh155975 			amd8111s_log(adapter, CE_NOTE,
39375ab5f91Slh155975 			    "wrong LB_GET_INFO_SIZE size");
39475ab5f91Slh155975 			return (IOC_INVAL);
39575ab5f91Slh155975 		}
39622eb7cb5Sgd78059 		lbsp = (void *)mp->b_cont->b_rptr;
39775ab5f91Slh155975 		*lbsp = sizeof (loopmodes);
39875ab5f91Slh155975 		break;
39975ab5f91Slh155975 
40075ab5f91Slh155975 	case LB_GET_INFO:
40175ab5f91Slh155975 		if (iocp->ioc_count != sizeof (loopmodes)) {
40275ab5f91Slh155975 			amd8111s_log(adapter, CE_NOTE,
40375ab5f91Slh155975 			    "Wrong LB_GET_INFO size");
40475ab5f91Slh155975 			return (IOC_INVAL);
40575ab5f91Slh155975 		}
40622eb7cb5Sgd78059 		lbpp = (void *)mp->b_cont->b_rptr;
40775ab5f91Slh155975 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
40875ab5f91Slh155975 		break;
40975ab5f91Slh155975 
41075ab5f91Slh155975 	case LB_GET_MODE:
41175ab5f91Slh155975 		if (iocp->ioc_count != sizeof (uint32_t)) {
41275ab5f91Slh155975 			amd8111s_log(adapter, CE_NOTE,
41375ab5f91Slh155975 			    "Wrong LB_GET_MODE size");
41475ab5f91Slh155975 			return (IOC_INVAL);
41575ab5f91Slh155975 		}
41622eb7cb5Sgd78059 		lbmp = (void *)mp->b_cont->b_rptr;
41775ab5f91Slh155975 		*lbmp = adapter->pOdl->loopback_mode;
41875ab5f91Slh155975 		break;
41975ab5f91Slh155975 
42075ab5f91Slh155975 	case LB_SET_MODE:
42175ab5f91Slh155975 		if (iocp->ioc_count != sizeof (uint32_t)) {
42275ab5f91Slh155975 			amd8111s_log(adapter, CE_NOTE,
42375ab5f91Slh155975 			    "Wrong LB_SET_MODE size");
42475ab5f91Slh155975 			return (IOC_INVAL);
42575ab5f91Slh155975 		}
42622eb7cb5Sgd78059 		lbmp = (void *)mp->b_cont->b_rptr;
42775ab5f91Slh155975 		amd8111s_set_loop_mode(adapter, *lbmp);
42875ab5f91Slh155975 		break;
42975ab5f91Slh155975 	}
43075ab5f91Slh155975 	return (IOC_REPLY);
43175ab5f91Slh155975 }
43275ab5f91Slh155975 
43375ab5f91Slh155975 static void
amd8111s_m_ioctl(void * arg,queue_t * q,mblk_t * mp)43475ab5f91Slh155975 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
43575ab5f91Slh155975 {
43675ab5f91Slh155975 	struct iocblk *iocp;
43775ab5f91Slh155975 	struct LayerPointers *adapter;
43875ab5f91Slh155975 	enum ioc_reply status;
43975ab5f91Slh155975 
44022eb7cb5Sgd78059 	iocp = (void *)mp->b_rptr;
44175ab5f91Slh155975 	iocp->ioc_error = 0;
44222eb7cb5Sgd78059 	adapter = arg;
44375ab5f91Slh155975 
44475ab5f91Slh155975 	ASSERT(adapter);
44575ab5f91Slh155975 	if (adapter == NULL) {
44675ab5f91Slh155975 		miocnak(q, mp, 0, EINVAL);
44775ab5f91Slh155975 		return;
44875ab5f91Slh155975 	}
44975ab5f91Slh155975 
45075ab5f91Slh155975 	switch (iocp->ioc_cmd) {
45175ab5f91Slh155975 
45275ab5f91Slh155975 	case LB_GET_INFO_SIZE:
45375ab5f91Slh155975 	case LB_GET_INFO:
45475ab5f91Slh155975 	case LB_GET_MODE:
45575ab5f91Slh155975 	case LB_SET_MODE:
45675ab5f91Slh155975 		status = amd8111s_loopback_ioctl(adapter, iocp, mp);
45775ab5f91Slh155975 		break;
45875ab5f91Slh155975 
45975ab5f91Slh155975 	default:
46075ab5f91Slh155975 		status = IOC_INVAL;
46175ab5f91Slh155975 		break;
46275ab5f91Slh155975 	}
46375ab5f91Slh155975 
46475ab5f91Slh155975 	/*
46575ab5f91Slh155975 	 * Decide how to reply
46675ab5f91Slh155975 	 */
46775ab5f91Slh155975 	switch (status) {
46875ab5f91Slh155975 	default:
46975ab5f91Slh155975 	case IOC_INVAL:
47075ab5f91Slh155975 		/*
47175ab5f91Slh155975 		 * Error, reply with a NAK and EINVAL or the specified error
47275ab5f91Slh155975 		 */
47375ab5f91Slh155975 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
47475ab5f91Slh155975 		    EINVAL : iocp->ioc_error);
47575ab5f91Slh155975 		break;
47675ab5f91Slh155975 
47775ab5f91Slh155975 	case IOC_DONE:
47875ab5f91Slh155975 		/*
47975ab5f91Slh155975 		 * OK, reply already sent
48075ab5f91Slh155975 		 */
48175ab5f91Slh155975 		break;
48275ab5f91Slh155975 
48375ab5f91Slh155975 	case IOC_ACK:
48475ab5f91Slh155975 		/*
48575ab5f91Slh155975 		 * OK, reply with an ACK
48675ab5f91Slh155975 		 */
48775ab5f91Slh155975 		miocack(q, mp, 0, 0);
48875ab5f91Slh155975 		break;
48975ab5f91Slh155975 
49075ab5f91Slh155975 	case IOC_REPLY:
49175ab5f91Slh155975 		/*
49275ab5f91Slh155975 		 * OK, send prepared reply as ACK or NAK
49375ab5f91Slh155975 		 */
49475ab5f91Slh155975 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
49575ab5f91Slh155975 		    M_IOCACK : M_IOCNAK;
49675ab5f91Slh155975 		qreply(q, mp);
49775ab5f91Slh155975 		break;
49875ab5f91Slh155975 	}
49975ab5f91Slh155975 }
50075ab5f91Slh155975 
50175ab5f91Slh155975 /*
50275ab5f91Slh155975  * Copy one packet from dma memory to mblk. Inc dma descriptor pointer.
50375ab5f91Slh155975  */
50475ab5f91Slh155975 static boolean_t
amd8111s_recv_copy(struct LayerPointers * pLayerPointers,mblk_t ** last_mp)50575ab5f91Slh155975 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp)
50675ab5f91Slh155975 {
50775ab5f91Slh155975 	int length = 0;
50875ab5f91Slh155975 	mblk_t *mp;
50975ab5f91Slh155975 	struct rx_desc *descriptor;
51075ab5f91Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
51175ab5f91Slh155975 	struct amd8111s_statistics *statistics = &pOdl->statistics;
51275ab5f91Slh155975 	struct nonphysical *pNonphysical = pLayerPointers->pMil
51375ab5f91Slh155975 	    ->pNonphysical;
51475ab5f91Slh155975 
51575ab5f91Slh155975 	mutex_enter(&pOdl->mdlRcvLock);
51675ab5f91Slh155975 	descriptor = pNonphysical->RxBufDescQRead->descriptor;
51775ab5f91Slh155975 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle,
51875ab5f91Slh155975 	    pNonphysical->RxBufDescQRead->descriptor -
51975ab5f91Slh155975 	    pNonphysical->RxBufDescQStart->descriptor,
52075ab5f91Slh155975 	    sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU);
52175ab5f91Slh155975 	if ((descriptor->Rx_OWN) == 0) {
52275ab5f91Slh155975 	/*
52375ab5f91Slh155975 	 * If the frame is received with errors, then set MCNT
52475ab5f91Slh155975 	 * of that pkt in ReceiveArray to 0. This packet would
52575ab5f91Slh155975 	 * be discarded later and not indicated to OS.
52675ab5f91Slh155975 	 */
52775ab5f91Slh155975 		if (descriptor->Rx_ERR) {
52875ab5f91Slh155975 			statistics->rx_desc_err ++;
52975ab5f91Slh155975 			descriptor->Rx_ERR = 0;
53075ab5f91Slh155975 			if (descriptor->Rx_FRAM == 1) {
53175ab5f91Slh155975 				statistics->rx_desc_err_FRAM ++;
53275ab5f91Slh155975 				descriptor->Rx_FRAM = 0;
53375ab5f91Slh155975 			}
53475ab5f91Slh155975 			if (descriptor->Rx_OFLO == 1) {
53575ab5f91Slh155975 				statistics->rx_desc_err_OFLO ++;
53675ab5f91Slh155975 				descriptor->Rx_OFLO = 0;
53775ab5f91Slh155975 				pOdl->rx_overflow_counter ++;
53875ab5f91Slh155975 				if ((pOdl->rx_overflow_counter > 5) &&
53975ab5f91Slh155975 				    (pOdl->pause_interval == 0)) {
54075ab5f91Slh155975 					statistics->rx_double_overflow ++;
54175ab5f91Slh155975 					mdlSendPause(pLayerPointers);
54275ab5f91Slh155975 					pOdl->rx_overflow_counter = 0;
54375ab5f91Slh155975 					pOdl->pause_interval = 25;
54475ab5f91Slh155975 				}
54575ab5f91Slh155975 			}
54675ab5f91Slh155975 			if (descriptor->Rx_CRC == 1) {
54775ab5f91Slh155975 				statistics->rx_desc_err_CRC ++;
54875ab5f91Slh155975 				descriptor->Rx_CRC = 0;
54975ab5f91Slh155975 			}
55075ab5f91Slh155975 			if (descriptor->Rx_BUFF == 1) {
55175ab5f91Slh155975 				statistics->rx_desc_err_BUFF ++;
55275ab5f91Slh155975 				descriptor->Rx_BUFF = 0;
55375ab5f91Slh155975 			}
55475ab5f91Slh155975 			goto Next_Descriptor;
55575ab5f91Slh155975 		}
55675ab5f91Slh155975 
55775ab5f91Slh155975 		/* Length of incoming packet */
55875ab5f91Slh155975 		if (pOdl->rx_fcs_stripped) {
55975ab5f91Slh155975 			length = descriptor->Rx_MCNT -4;
56075ab5f91Slh155975 		} else {
56175ab5f91Slh155975 			length = descriptor->Rx_MCNT;
56275ab5f91Slh155975 		}
56375ab5f91Slh155975 		if (length < 62) {
56475ab5f91Slh155975 			statistics->rx_error_zerosize ++;
56575ab5f91Slh155975 		}
56675ab5f91Slh155975 
56775ab5f91Slh155975 		if ((mp = allocb(length, BPRI_MED)) == NULL) {
56875ab5f91Slh155975 			statistics->rx_allocfail ++;
56975ab5f91Slh155975 			goto failed;
57075ab5f91Slh155975 		}
57175ab5f91Slh155975 		/* Copy from virtual address of incoming packet */
57275ab5f91Slh155975 		bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap),
57375ab5f91Slh155975 		    mp->b_rptr, length);
57475ab5f91Slh155975 		mp->b_wptr = mp->b_rptr + length;
57575ab5f91Slh155975 		statistics->rx_ok_packets ++;
57675ab5f91Slh155975 		if (*last_mp == NULL) {
57775ab5f91Slh155975 			*last_mp = mp;
57875ab5f91Slh155975 		} else {
57975ab5f91Slh155975 			(*last_mp)->b_next = mp;
58075ab5f91Slh155975 			*last_mp = mp;
58175ab5f91Slh155975 		}
58275ab5f91Slh155975 
58375ab5f91Slh155975 Next_Descriptor:
58475ab5f91Slh155975 		descriptor->Rx_MCNT = 0;
58575ab5f91Slh155975 		descriptor->Rx_SOP = 0;
58675ab5f91Slh155975 		descriptor->Rx_EOP = 0;
58775ab5f91Slh155975 		descriptor->Rx_PAM = 0;
58875ab5f91Slh155975 		descriptor->Rx_BAM = 0;
58975ab5f91Slh155975 		descriptor->TT = 0;
59075ab5f91Slh155975 		descriptor->Rx_OWN = 1;
59175ab5f91Slh155975 		pNonphysical->RxBufDescQRead->descriptor++;
59275ab5f91Slh155975 		pNonphysical->RxBufDescQRead->USpaceMap++;
59375ab5f91Slh155975 		if (pNonphysical->RxBufDescQRead->descriptor >
59475ab5f91Slh155975 		    pNonphysical->RxBufDescQEnd->descriptor) {
59575ab5f91Slh155975 			pNonphysical->RxBufDescQRead->descriptor =
59675ab5f91Slh155975 			    pNonphysical->RxBufDescQStart->descriptor;
59775ab5f91Slh155975 			pNonphysical->RxBufDescQRead->USpaceMap =
59875ab5f91Slh155975 			    pNonphysical->RxBufDescQStart->USpaceMap;
59975ab5f91Slh155975 		}
60075ab5f91Slh155975 		mutex_exit(&pOdl->mdlRcvLock);
60175ab5f91Slh155975 
60275ab5f91Slh155975 		return (B_TRUE);
60375ab5f91Slh155975 	}
60475ab5f91Slh155975 
60575ab5f91Slh155975 failed:
60675ab5f91Slh155975 	mutex_exit(&pOdl->mdlRcvLock);
60775ab5f91Slh155975 	return (B_FALSE);
60875ab5f91Slh155975 }
60975ab5f91Slh155975 
61075ab5f91Slh155975 /*
61175ab5f91Slh155975  * Get the received packets from NIC card and send them to GLD.
61275ab5f91Slh155975  */
61375ab5f91Slh155975 static void
amd8111s_receive(struct LayerPointers * pLayerPointers)61475ab5f91Slh155975 amd8111s_receive(struct LayerPointers *pLayerPointers)
61575ab5f91Slh155975 {
61675ab5f91Slh155975 	int numOfPkts = 0;
61775ab5f91Slh155975 	struct odl *pOdl;
61875ab5f91Slh155975 	mblk_t *ret_mp = NULL, *last_mp = NULL;
61975ab5f91Slh155975 
62075ab5f91Slh155975 	pOdl = pLayerPointers->pOdl;
62175ab5f91Slh155975 
62275ab5f91Slh155975 	rw_enter(&pOdl->chip_lock, RW_READER);
62375ab5f91Slh155975 	if (!pLayerPointers->run) {
62475ab5f91Slh155975 		rw_exit(&pOdl->chip_lock);
62575ab5f91Slh155975 		return;
62675ab5f91Slh155975 	}
62775ab5f91Slh155975 
62875ab5f91Slh155975 	if (pOdl->pause_interval > 0)
62975ab5f91Slh155975 		pOdl->pause_interval --;
63075ab5f91Slh155975 
63175ab5f91Slh155975 	while (numOfPkts < RX_RING_SIZE) {
63275ab5f91Slh155975 
63375ab5f91Slh155975 		if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) {
63475ab5f91Slh155975 			break;
63575ab5f91Slh155975 		}
63675ab5f91Slh155975 		if (ret_mp == NULL)
63775ab5f91Slh155975 			ret_mp = last_mp;
63875ab5f91Slh155975 		numOfPkts++;
63975ab5f91Slh155975 	}
64075ab5f91Slh155975 
64175ab5f91Slh155975 	if (ret_mp) {
642da14cebeSEric Cheng 		mac_rx(pOdl->mh, NULL, ret_mp);
64375ab5f91Slh155975 	}
64475ab5f91Slh155975 
64575ab5f91Slh155975 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0,
64675ab5f91Slh155975 	    DDI_DMA_SYNC_FORDEV);
64775ab5f91Slh155975 
64875ab5f91Slh155975 	mdlReceive(pLayerPointers);
64975ab5f91Slh155975 
65075ab5f91Slh155975 	rw_exit(&pOdl->chip_lock);
65175ab5f91Slh155975 
65275ab5f91Slh155975 }
65375ab5f91Slh155975 
65475ab5f91Slh155975 /*
65575ab5f91Slh155975  * Print message in release-version driver.
65675ab5f91Slh155975  */
65775ab5f91Slh155975 static void
amd8111s_log(struct LayerPointers * adapter,int level,char * fmt,...)65875ab5f91Slh155975 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...)
65975ab5f91Slh155975 {
66075ab5f91Slh155975 	auto char name[32];
66175ab5f91Slh155975 	auto char buf[256];
66275ab5f91Slh155975 	va_list ap;
66375ab5f91Slh155975 
66475ab5f91Slh155975 	if (adapter != NULL) {
66575ab5f91Slh155975 		(void) sprintf(name, "amd8111s%d",
66675ab5f91Slh155975 		    ddi_get_instance(adapter->pOdl->devinfo));
66775ab5f91Slh155975 	} else {
66875ab5f91Slh155975 		(void) sprintf(name, "amd8111s");
66975ab5f91Slh155975 	}
67075ab5f91Slh155975 	va_start(ap, fmt);
67175ab5f91Slh155975 	(void) vsprintf(buf, fmt, ap);
67275ab5f91Slh155975 	va_end(ap);
67375ab5f91Slh155975 	cmn_err(level, "%s: %s", name, buf);
67475ab5f91Slh155975 }
67575ab5f91Slh155975 
67675ab5f91Slh155975 /*
67775ab5f91Slh155975  * To allocate & initilize all resources.
67875ab5f91Slh155975  * Called by amd8111s_attach().
67975ab5f91Slh155975  */
68075ab5f91Slh155975 static int
amd8111s_odlInit(struct LayerPointers * pLayerPointers)68175ab5f91Slh155975 amd8111s_odlInit(struct LayerPointers *pLayerPointers)
68275ab5f91Slh155975 {
68375ab5f91Slh155975 	unsigned long mem_req_array[MEM_REQ_MAX];
68475ab5f91Slh155975 	unsigned long mem_set_array[MEM_REQ_MAX];
68575ab5f91Slh155975 	unsigned long *pmem_req_array;
68675ab5f91Slh155975 	unsigned long *pmem_set_array;
68775ab5f91Slh155975 	int i, size;
68875ab5f91Slh155975 
68975ab5f91Slh155975 	for (i = 0; i < MEM_REQ_MAX; i++) {
69075ab5f91Slh155975 		mem_req_array[i] = 0;
69175ab5f91Slh155975 		mem_set_array[i] = 0;
69275ab5f91Slh155975 	}
69375ab5f91Slh155975 
69475ab5f91Slh155975 	milRequestResources(mem_req_array);
69575ab5f91Slh155975 
69675ab5f91Slh155975 	pmem_req_array = mem_req_array;
69775ab5f91Slh155975 	pmem_set_array = mem_set_array;
69875ab5f91Slh155975 	while (*pmem_req_array) {
69975ab5f91Slh155975 		switch (*pmem_req_array) {
70075ab5f91Slh155975 		case VIRTUAL:
70175ab5f91Slh155975 			*pmem_set_array = VIRTUAL;
70275ab5f91Slh155975 			pmem_req_array++;
70375ab5f91Slh155975 			pmem_set_array++;
70475ab5f91Slh155975 			*(pmem_set_array) = *(pmem_req_array);
70575ab5f91Slh155975 			pmem_set_array++;
70675ab5f91Slh155975 			*(pmem_set_array) = (unsigned long) kmem_zalloc(
70775ab5f91Slh155975 			    *(pmem_req_array), KM_NOSLEEP);
70875ab5f91Slh155975 			if (*pmem_set_array == NULL)
70975ab5f91Slh155975 				goto odl_init_failure;
71075ab5f91Slh155975 			break;
71175ab5f91Slh155975 		}
71275ab5f91Slh155975 		pmem_req_array++;
71375ab5f91Slh155975 		pmem_set_array++;
71475ab5f91Slh155975 	}
71575ab5f91Slh155975 
71675ab5f91Slh155975 	/*
71775ab5f91Slh155975 	 * Initilize memory on lower layers
71875ab5f91Slh155975 	 */
71975ab5f91Slh155975 	milSetResources(pLayerPointers, mem_set_array);
72075ab5f91Slh155975 
72175ab5f91Slh155975 	/* Allocate Rx/Tx descriptors */
72275ab5f91Slh155975 	if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) {
72375ab5f91Slh155975 		*pmem_set_array = NULL;
72475ab5f91Slh155975 		goto odl_init_failure;
72575ab5f91Slh155975 	}
72675ab5f91Slh155975 
72775ab5f91Slh155975 	/*
72875ab5f91Slh155975 	 * Allocate Rx buffer for each Rx descriptor. Then call mil layer
72975ab5f91Slh155975 	 * routine to fill physical address of Rx buffer into Rx descriptor.
73075ab5f91Slh155975 	 */
73175ab5f91Slh155975 	if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) {
73275ab5f91Slh155975 		amd8111s_free_descriptors(pLayerPointers);
73375ab5f91Slh155975 		*pmem_set_array = NULL;
73475ab5f91Slh155975 		goto odl_init_failure;
73575ab5f91Slh155975 	}
73675ab5f91Slh155975 	milInitGlbds(pLayerPointers);
73775ab5f91Slh155975 
73875ab5f91Slh155975 	return (0);
73975ab5f91Slh155975 
74075ab5f91Slh155975 odl_init_failure:
74175ab5f91Slh155975 	/*
74275ab5f91Slh155975 	 * Free All memory allocated so far
74375ab5f91Slh155975 	 */
74475ab5f91Slh155975 	pmem_req_array = mem_set_array;
74575ab5f91Slh155975 	while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) {
74675ab5f91Slh155975 		switch (*pmem_req_array) {
74775ab5f91Slh155975 		case VIRTUAL:
74875ab5f91Slh155975 			pmem_req_array++;	/* Size */
74975ab5f91Slh155975 			size = *(pmem_req_array);
75075ab5f91Slh155975 			pmem_req_array++;	/* Virtual Address */
75175ab5f91Slh155975 			if (pmem_req_array == NULL)
75275ab5f91Slh155975 				return (1);
75375ab5f91Slh155975 			kmem_free((int *)*pmem_req_array, size);
75475ab5f91Slh155975 			break;
75575ab5f91Slh155975 		}
75675ab5f91Slh155975 		pmem_req_array++;
75775ab5f91Slh155975 	}
75875ab5f91Slh155975 	return (1);
75975ab5f91Slh155975 }
76075ab5f91Slh155975 
76175ab5f91Slh155975 /*
76275ab5f91Slh155975  * Allocate and initialize Tx/Rx descriptors
76375ab5f91Slh155975  */
76475ab5f91Slh155975 static boolean_t
amd8111s_allocate_descriptors(struct LayerPointers * pLayerPointers)76575ab5f91Slh155975 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers)
76675ab5f91Slh155975 {
76775ab5f91Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
76875ab5f91Slh155975 	struct mil *pMil = pLayerPointers->pMil;
76975ab5f91Slh155975 	dev_info_t *devinfo = pOdl->devinfo;
77075ab5f91Slh155975 	uint_t length, count, i;
77175ab5f91Slh155975 	size_t real_length;
77275ab5f91Slh155975 
77375ab5f91Slh155975 	/*
77475ab5f91Slh155975 	 * Allocate Rx descriptors
77575ab5f91Slh155975 	 */
77675ab5f91Slh155975 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
77775ab5f91Slh155975 	    NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) {
77875ab5f91Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
77975ab5f91Slh155975 		    "ddi_dma_alloc_handle for Rx desc failed");
78075ab5f91Slh155975 		pOdl->rx_desc_dma_handle = NULL;
78175ab5f91Slh155975 		return (B_FALSE);
78275ab5f91Slh155975 	}
78375ab5f91Slh155975 
78475ab5f91Slh155975 	length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT;
78575ab5f91Slh155975 	if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length,
78675ab5f91Slh155975 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
78775ab5f91Slh155975 	    NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length,
78875ab5f91Slh155975 	    &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) {
78975ab5f91Slh155975 
79075ab5f91Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
79175ab5f91Slh155975 		    "ddi_dma_mem_handle for Rx desc failed");
79275ab5f91Slh155975 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
79375ab5f91Slh155975 		pOdl->rx_desc_dma_handle = NULL;
79475ab5f91Slh155975 		return (B_FALSE);
79575ab5f91Slh155975 	}
79675ab5f91Slh155975 
79775ab5f91Slh155975 	if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle,
79875ab5f91Slh155975 	    NULL, (caddr_t)pMil->Rx_desc_original, real_length,
79975ab5f91Slh155975 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
80075ab5f91Slh155975 	    NULL, &pOdl->rx_desc_dma_cookie,
80175ab5f91Slh155975 	    &count) != DDI_SUCCESS) {
80275ab5f91Slh155975 
80375ab5f91Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
80475ab5f91Slh155975 		    "ddi_dma_addr_bind_handle for Rx desc failed");
80575ab5f91Slh155975 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
80675ab5f91Slh155975 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
80775ab5f91Slh155975 		pOdl->rx_desc_dma_handle = NULL;
80875ab5f91Slh155975 		return (B_FALSE);
80975ab5f91Slh155975 	}
81075ab5f91Slh155975 	ASSERT(count == 1);
81175ab5f91Slh155975 
81275ab5f91Slh155975 	/* Initialize Rx descriptors related variables */
81375ab5f91Slh155975 	pMil->Rx_desc = (struct rx_desc *)
81475ab5f91Slh155975 	    ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT);
81575ab5f91Slh155975 	pMil->Rx_desc_pa = (unsigned int)
81675ab5f91Slh155975 	    ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT);
81775ab5f91Slh155975 
81875ab5f91Slh155975 	pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa;
81975ab5f91Slh155975 
82075ab5f91Slh155975 
82175ab5f91Slh155975 	/*
82275ab5f91Slh155975 	 * Allocate Tx descriptors
82375ab5f91Slh155975 	 */
82475ab5f91Slh155975 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
82575ab5f91Slh155975 	    NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) {
82675ab5f91Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
82775ab5f91Slh155975 		    "ddi_dma_alloc_handle for Tx desc failed");
82875ab5f91Slh155975 		goto allocate_desc_fail;
82975ab5f91Slh155975 	}
83075ab5f91Slh155975 
83175ab5f91Slh155975 	length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT;
83275ab5f91Slh155975 	if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length,
83375ab5f91Slh155975 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
83475ab5f91Slh155975 	    NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length,
83575ab5f91Slh155975 	    &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) {
83675ab5f91Slh155975 
83775ab5f91Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
83875ab5f91Slh155975 		    "ddi_dma_mem_handle for Tx desc failed");
83975ab5f91Slh155975 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
84075ab5f91Slh155975 		goto allocate_desc_fail;
84175ab5f91Slh155975 	}
84275ab5f91Slh155975 
84375ab5f91Slh155975 	if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle,
84475ab5f91Slh155975 	    NULL, (caddr_t)pMil->Tx_desc_original, real_length,
84575ab5f91Slh155975 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
84675ab5f91Slh155975 	    NULL, &pOdl->tx_desc_dma_cookie,
84775ab5f91Slh155975 	    &count) != DDI_SUCCESS) {
84875ab5f91Slh155975 
84975ab5f91Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
85075ab5f91Slh155975 		    "ddi_dma_addr_bind_handle for Tx desc failed");
85175ab5f91Slh155975 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
85275ab5f91Slh155975 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
85375ab5f91Slh155975 		goto allocate_desc_fail;
85475ab5f91Slh155975 	}
85575ab5f91Slh155975 	ASSERT(count == 1);
85675ab5f91Slh155975 	/* Set the DMA area to all zeros */
85775ab5f91Slh155975 	bzero((caddr_t)pMil->Tx_desc_original, length);
85875ab5f91Slh155975 
85975ab5f91Slh155975 	/* Initialize Tx descriptors related variables */
86075ab5f91Slh155975 	pMil->Tx_desc = (struct tx_desc *)
86175ab5f91Slh155975 	    ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT);
86275ab5f91Slh155975 	pMil->pNonphysical->TxDescQRead = pMil->Tx_desc;
86375ab5f91Slh155975 	pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc;
86475ab5f91Slh155975 	pMil->pNonphysical->TxDescQStart = pMil->Tx_desc;
86575ab5f91Slh155975 	pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]);
86675ab5f91Slh155975 
86775ab5f91Slh155975 	/* Physical Addr of Tx_desc_original & Tx_desc */
86875ab5f91Slh155975 	pLayerPointers->pMil->Tx_desc_pa =
86975ab5f91Slh155975 	    ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) &
87075ab5f91Slh155975 	    ~ALIGNMENT);
87175ab5f91Slh155975 
87275ab5f91Slh155975 	/* Setting the reserved bits in the tx descriptors */
87375ab5f91Slh155975 	for (i = 0; i < TX_RING_SIZE; i++) {
87475ab5f91Slh155975 		pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f;
87575ab5f91Slh155975 		pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0;
87675ab5f91Slh155975 		pMil->pNonphysical->TxDescQWrite++;
87775ab5f91Slh155975 	}
87875ab5f91Slh155975 	pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart;
87975ab5f91Slh155975 
88075ab5f91Slh155975 	pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa;
88175ab5f91Slh155975 
88275ab5f91Slh155975 	return (B_TRUE);
88375ab5f91Slh155975 
88475ab5f91Slh155975 allocate_desc_fail:
88575ab5f91Slh155975 	pOdl->tx_desc_dma_handle = NULL;
88675ab5f91Slh155975 	(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
88775ab5f91Slh155975 	ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
88875ab5f91Slh155975 	ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
88975ab5f91Slh155975 	pOdl->rx_desc_dma_handle = NULL;
89075ab5f91Slh155975 	return (B_FALSE);
89175ab5f91Slh155975 }
89275ab5f91Slh155975 
89375ab5f91Slh155975 /*
89475ab5f91Slh155975  * Free Tx/Rx descriptors
89575ab5f91Slh155975  */
89675ab5f91Slh155975 static void
amd8111s_free_descriptors(struct LayerPointers * pLayerPointers)89775ab5f91Slh155975 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers)
89875ab5f91Slh155975 {
89975ab5f91Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
90075ab5f91Slh155975 
90175ab5f91Slh155975 	/* Free Rx descriptors */
90275ab5f91Slh155975 	if (pOdl->rx_desc_dma_handle) {
90375ab5f91Slh155975 		(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
90475ab5f91Slh155975 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
90575ab5f91Slh155975 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
90675ab5f91Slh155975 		pOdl->rx_desc_dma_handle = NULL;
90775ab5f91Slh155975 	}
90875ab5f91Slh155975 
90975ab5f91Slh155975 	/* Free Rx descriptors */
91075ab5f91Slh155975 	if (pOdl->tx_desc_dma_handle) {
91175ab5f91Slh155975 		(void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle);
91275ab5f91Slh155975 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
91375ab5f91Slh155975 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
91475ab5f91Slh155975 		pOdl->tx_desc_dma_handle = NULL;
91575ab5f91Slh155975 	}
91675ab5f91Slh155975 }
91775ab5f91Slh155975 
91875ab5f91Slh155975 /*
91975ab5f91Slh155975  * Allocate Tx/Rx Ring buffer
92075ab5f91Slh155975  */
92175ab5f91Slh155975 static boolean_t
amd8111s_alloc_dma_ringbuf(struct LayerPointers * pLayerPointers,struct amd8111s_dma_ringbuf * pRing,uint32_t ring_size,uint32_t msg_size)92275ab5f91Slh155975 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers,
92375ab5f91Slh155975 			struct amd8111s_dma_ringbuf *pRing,
92475ab5f91Slh155975 			uint32_t ring_size, uint32_t msg_size)
92575ab5f91Slh155975 {
92675ab5f91Slh155975 	uint32_t idx, msg_idx = 0, msg_acc;
92775ab5f91Slh155975 	dev_info_t *devinfo = pLayerPointers->pOdl->devinfo;
92875ab5f91Slh155975 	size_t real_length;
92975ab5f91Slh155975 	uint_t count = 0;
93075ab5f91Slh155975 
93175ab5f91Slh155975 	ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1);
93275ab5f91Slh155975 	pRing->dma_buf_sz = msg_size;
93375ab5f91Slh155975 	pRing->ring_size = ring_size;
93475ab5f91Slh155975 	pRing->trunk_num = AMD8111S_SPLIT;
93575ab5f91Slh155975 	pRing->buf_sz = msg_size * ring_size;
93675ab5f91Slh155975 	if (ring_size < pRing->trunk_num)
93775ab5f91Slh155975 		pRing->trunk_num = ring_size;
93875ab5f91Slh155975 	ASSERT((pRing->buf_sz % pRing->trunk_num) == 0);
93975ab5f91Slh155975 
94075ab5f91Slh155975 	pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num;
94175ab5f91Slh155975 	ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0);
94275ab5f91Slh155975 
94375ab5f91Slh155975 	pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) *
94475ab5f91Slh155975 	    ring_size, KM_NOSLEEP);
94575ab5f91Slh155975 	pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
94675ab5f91Slh155975 	    pRing->trunk_num, KM_NOSLEEP);
94775ab5f91Slh155975 	pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
94875ab5f91Slh155975 	    pRing->trunk_num, KM_NOSLEEP);
94975ab5f91Slh155975 	pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) *
95075ab5f91Slh155975 	    pRing->trunk_num, KM_NOSLEEP);
95175ab5f91Slh155975 	pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) *
95275ab5f91Slh155975 	    pRing->trunk_num, KM_NOSLEEP);
95375ab5f91Slh155975 	if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL ||
95475ab5f91Slh155975 	    pRing->acc_hdl == NULL || pRing->trunk_addr == NULL ||
95575ab5f91Slh155975 	    pRing->dma_cookie == NULL) {
95675ab5f91Slh155975 		amd8111s_log(pLayerPointers, CE_NOTE,
95775ab5f91Slh155975 		    "kmem_zalloc failed");
95875ab5f91Slh155975 		goto failed;
95975ab5f91Slh155975 	}
96075ab5f91Slh155975 
96175ab5f91Slh155975 	for (idx = 0; idx < pRing->trunk_num; ++idx) {
96275ab5f91Slh155975 		if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t,
96375ab5f91Slh155975 		    DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx]))
96475ab5f91Slh155975 		    != DDI_SUCCESS) {
96575ab5f91Slh155975 
96675ab5f91Slh155975 			amd8111s_log(pLayerPointers, CE_WARN,
96775ab5f91Slh155975 			    "ddi_dma_alloc_handle failed");
96875ab5f91Slh155975 			goto failed;
96975ab5f91Slh155975 		} else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx],
97075ab5f91Slh155975 		    pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING,
97175ab5f91Slh155975 		    DDI_DMA_SLEEP, NULL,
97275ab5f91Slh155975 		    (caddr_t *)&(pRing->trunk_addr[idx]),
97375ab5f91Slh155975 		    (size_t *)(&real_length), &pRing->acc_hdl[idx])
97475ab5f91Slh155975 		    != DDI_SUCCESS) {
97575ab5f91Slh155975 
97675ab5f91Slh155975 			amd8111s_log(pLayerPointers, CE_WARN,
97775ab5f91Slh155975 			    "ddi_dma_mem_alloc failed");
97875ab5f91Slh155975 			goto failed;
97975ab5f91Slh155975 		} else if (real_length != pRing->trunk_sz) {
98075ab5f91Slh155975 			amd8111s_log(pLayerPointers, CE_WARN,
98175ab5f91Slh155975 			    "ddi_dma_mem_alloc failed");
98275ab5f91Slh155975 			goto failed;
98375ab5f91Slh155975 		} else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx],
98475ab5f91Slh155975 		    NULL, (caddr_t)pRing->trunk_addr[idx], real_length,
98575ab5f91Slh155975 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
98675ab5f91Slh155975 		    &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) {
98775ab5f91Slh155975 
98875ab5f91Slh155975 			amd8111s_log(pLayerPointers, CE_WARN,
98975ab5f91Slh155975 			    "ddi_dma_addr_bind_handle failed");
99075ab5f91Slh155975 			goto failed;
99175ab5f91Slh155975 		} else {
99275ab5f91Slh155975 			for (msg_acc = 0;
99375ab5f91Slh155975 			    msg_acc < pRing->trunk_sz / pRing->dma_buf_sz;
99475ab5f91Slh155975 			    ++ msg_acc) {
99575ab5f91Slh155975 				pRing->msg_buf[msg_idx].offset =
99675ab5f91Slh155975 				    msg_acc * pRing->dma_buf_sz;
99775ab5f91Slh155975 				pRing->msg_buf[msg_idx].vir_addr =
99875ab5f91Slh155975 				    pRing->trunk_addr[idx] +
99975ab5f91Slh155975 				    pRing->msg_buf[msg_idx].offset;
100075ab5f91Slh155975 				pRing->msg_buf[msg_idx].phy_addr =
100175ab5f91Slh155975 				    pRing->dma_cookie[idx].dmac_laddress +
100275ab5f91Slh155975 				    pRing->msg_buf[msg_idx].offset;
100375ab5f91Slh155975 				pRing->msg_buf[msg_idx].p_hdl =
100475ab5f91Slh155975 				    pRing->dma_hdl[idx];
100575ab5f91Slh155975 				msg_idx ++;
100675ab5f91Slh155975 			}
100775ab5f91Slh155975 		}
100875ab5f91Slh155975 	}
100975ab5f91Slh155975 
101075ab5f91Slh155975 	pRing->free = pRing->msg_buf;
101175ab5f91Slh155975 	pRing->next = pRing->msg_buf;
101275ab5f91Slh155975 	pRing->curr = pRing->msg_buf;
101375ab5f91Slh155975 
101475ab5f91Slh155975 	return (B_TRUE);
101575ab5f91Slh155975 failed:
101675ab5f91Slh155975 	amd8111s_free_dma_ringbuf(pRing);
101775ab5f91Slh155975 	return (B_FALSE);
101875ab5f91Slh155975 }
101975ab5f91Slh155975 
102075ab5f91Slh155975 /*
102175ab5f91Slh155975  * Free Tx/Rx ring buffer
102275ab5f91Slh155975  */
102375ab5f91Slh155975 static void
amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf * pRing)102475ab5f91Slh155975 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing)
102575ab5f91Slh155975 {
102675ab5f91Slh155975 	int idx;
102775ab5f91Slh155975 
102875ab5f91Slh155975 	if (pRing->dma_cookie != NULL) {
102975ab5f91Slh155975 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
103075ab5f91Slh155975 			if (pRing->dma_cookie[idx].dmac_laddress == 0) {
103175ab5f91Slh155975 				break;
103275ab5f91Slh155975 			}
103375ab5f91Slh155975 			(void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]);
103475ab5f91Slh155975 		}
103575ab5f91Slh155975 		kmem_free(pRing->dma_cookie,
103675ab5f91Slh155975 		    sizeof (ddi_dma_cookie_t) * pRing->trunk_num);
103775ab5f91Slh155975 	}
103875ab5f91Slh155975 
103975ab5f91Slh155975 	if (pRing->acc_hdl != NULL) {
104075ab5f91Slh155975 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
104175ab5f91Slh155975 			if (pRing->acc_hdl[idx] == NULL)
104275ab5f91Slh155975 				break;
104375ab5f91Slh155975 			ddi_dma_mem_free(&pRing->acc_hdl[idx]);
104475ab5f91Slh155975 		}
104575ab5f91Slh155975 		kmem_free(pRing->acc_hdl,
104675ab5f91Slh155975 		    sizeof (ddi_acc_handle_t) * pRing->trunk_num);
104775ab5f91Slh155975 	}
104875ab5f91Slh155975 
104975ab5f91Slh155975 	if (pRing->dma_hdl != NULL) {
105075ab5f91Slh155975 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
105175ab5f91Slh155975 			if (pRing->dma_hdl[idx] == 0) {
105275ab5f91Slh155975 				break;
105375ab5f91Slh155975 			}
105475ab5f91Slh155975 			ddi_dma_free_handle(&pRing->dma_hdl[idx]);
105575ab5f91Slh155975 		}
105675ab5f91Slh155975 		kmem_free(pRing->dma_hdl,
105775ab5f91Slh155975 		    sizeof (ddi_dma_handle_t) * pRing->trunk_num);
105875ab5f91Slh155975 	}
105975ab5f91Slh155975 
106075ab5f91Slh155975 	if (pRing->msg_buf != NULL) {
106175ab5f91Slh155975 		kmem_free(pRing->msg_buf,
106275ab5f91Slh155975 		    sizeof (struct amd8111s_msgbuf) * pRing->ring_size);
106375ab5f91Slh155975 	}
106475ab5f91Slh155975 
106575ab5f91Slh155975 	if (pRing->trunk_addr != NULL) {
106675ab5f91Slh155975 		kmem_free(pRing->trunk_addr,
106775ab5f91Slh155975 		    sizeof (caddr_t) * pRing->trunk_num);
106875ab5f91Slh155975 	}
106975ab5f91Slh155975 
107075ab5f91Slh155975 	bzero(pRing, sizeof (*pRing));
107175ab5f91Slh155975 }
107275ab5f91Slh155975 
107375ab5f91Slh155975 
107475ab5f91Slh155975 /*
107575ab5f91Slh155975  * Allocate all Tx buffer.
107675ab5f91Slh155975  * Allocate a Rx buffer for each Rx descriptor. Then
107775ab5f91Slh155975  * call mil routine to fill physical address of Rx
107875ab5f91Slh155975  * buffer into Rx descriptors
107975ab5f91Slh155975  */
108075ab5f91Slh155975 static boolean_t
amd8111s_allocate_buffers(struct LayerPointers * pLayerPointers)108175ab5f91Slh155975 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers)
108275ab5f91Slh155975 {
108375ab5f91Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
108475ab5f91Slh155975 
108575ab5f91Slh155975 	/*
108675ab5f91Slh155975 	 * Allocate rx Buffers
108775ab5f91Slh155975 	 */
108875ab5f91Slh155975 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf,
108975ab5f91Slh155975 	    RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) {
109075ab5f91Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
109175ab5f91Slh155975 		    "amd8111s_alloc_dma_ringbuf for tx failed");
109275ab5f91Slh155975 		goto allocate_buf_fail;
109375ab5f91Slh155975 	}
109475ab5f91Slh155975 
109575ab5f91Slh155975 	/*
109675ab5f91Slh155975 	 * Allocate Tx buffers
109775ab5f91Slh155975 	 */
109875ab5f91Slh155975 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf,
109975ab5f91Slh155975 	    TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) {
110075ab5f91Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
110175ab5f91Slh155975 		    "amd8111s_alloc_dma_ringbuf for tx failed");
110275ab5f91Slh155975 		goto allocate_buf_fail;
110375ab5f91Slh155975 	}
110475ab5f91Slh155975 
110575ab5f91Slh155975 	/*
110675ab5f91Slh155975 	 * Initilize the mil Queues
110775ab5f91Slh155975 	 */
110875ab5f91Slh155975 	milInitGlbds(pLayerPointers);
110975ab5f91Slh155975 
111075ab5f91Slh155975 	milInitRxQ(pLayerPointers);
111175ab5f91Slh155975 
111275ab5f91Slh155975 	return (B_TRUE);
111375ab5f91Slh155975 
111475ab5f91Slh155975 allocate_buf_fail:
111575ab5f91Slh155975 
111675ab5f91Slh155975 	amd8111s_log(pLayerPointers, CE_WARN,
111775ab5f91Slh155975 	    "amd8111s_allocate_buffers failed");
111875ab5f91Slh155975 	return (B_FALSE);
111975ab5f91Slh155975 }
112075ab5f91Slh155975 
112175ab5f91Slh155975 /*
112275ab5f91Slh155975  * Free all Rx/Tx buffer
112375ab5f91Slh155975  */
112475ab5f91Slh155975 
112575ab5f91Slh155975 static void
amd8111s_free_buffers(struct LayerPointers * pLayerPointers)112675ab5f91Slh155975 amd8111s_free_buffers(struct LayerPointers *pLayerPointers)
112775ab5f91Slh155975 {
112875ab5f91Slh155975 	/* Free Tx buffers */
112975ab5f91Slh155975 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf);
113075ab5f91Slh155975 
113175ab5f91Slh155975 	/* Free Rx Buffers */
113275ab5f91Slh155975 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf);
113375ab5f91Slh155975 }
113475ab5f91Slh155975 
113575ab5f91Slh155975 /*
113675ab5f91Slh155975  * Try to recycle all the descriptors and Tx buffers
113775ab5f91Slh155975  * which are already freed by hardware.
113875ab5f91Slh155975  */
113975ab5f91Slh155975 static int
amd8111s_recycle_tx(struct LayerPointers * pLayerPointers)114075ab5f91Slh155975 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers)
114175ab5f91Slh155975 {
114275ab5f91Slh155975 	struct nonphysical *pNonphysical;
114375ab5f91Slh155975 	uint32_t count = 0;
114475ab5f91Slh155975 
114575ab5f91Slh155975 	pNonphysical = pLayerPointers->pMil->pNonphysical;
114675ab5f91Slh155975 	while (pNonphysical->TxDescQRead->Tx_OWN == 0 &&
114775ab5f91Slh155975 	    pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) {
114875ab5f91Slh155975 		pLayerPointers->pOdl->tx_buf.free =
114975ab5f91Slh155975 		    NEXT(pLayerPointers->pOdl->tx_buf, free);
115075ab5f91Slh155975 		pNonphysical->TxDescQRead++;
115175ab5f91Slh155975 		if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) {
115275ab5f91Slh155975 			pNonphysical->TxDescQRead = pNonphysical->TxDescQStart;
115375ab5f91Slh155975 		}
115475ab5f91Slh155975 		count ++;
115575ab5f91Slh155975 	}
115675ab5f91Slh155975 
115775ab5f91Slh155975 	if (pLayerPointers->pMil->tx_reschedule)
115875ab5f91Slh155975 		ddi_trigger_softintr(pLayerPointers->pOdl->drain_id);
115975ab5f91Slh155975 
116075ab5f91Slh155975 	return (count);
116175ab5f91Slh155975 }
116275ab5f91Slh155975 
116375ab5f91Slh155975 /*
116475ab5f91Slh155975  * Get packets in the Tx buffer, then copy them to the send buffer.
116575ab5f91Slh155975  * Trigger hardware to send out packets.
116675ab5f91Slh155975  */
116775ab5f91Slh155975 static void
amd8111s_send_serial(struct LayerPointers * pLayerPointers)116875ab5f91Slh155975 amd8111s_send_serial(struct LayerPointers *pLayerPointers)
116975ab5f91Slh155975 {
117075ab5f91Slh155975 	struct nonphysical *pNonphysical;
117175ab5f91Slh155975 	uint32_t count;
117275ab5f91Slh155975 
117375ab5f91Slh155975 	pNonphysical = pLayerPointers->pMil->pNonphysical;
117475ab5f91Slh155975 
117575ab5f91Slh155975 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
117675ab5f91Slh155975 
117775ab5f91Slh155975 	for (count = 0; count < AMD8111S_SEND_MAX; count ++) {
117875ab5f91Slh155975 		if (pLayerPointers->pOdl->tx_buf.curr ==
117975ab5f91Slh155975 		    pLayerPointers->pOdl->tx_buf.next) {
118075ab5f91Slh155975 			break;
118175ab5f91Slh155975 		}
118275ab5f91Slh155975 		/* to verify if it needs to recycle the tx Buf */
118375ab5f91Slh155975 		if (((pNonphysical->TxDescQWrite + 1 >
118475ab5f91Slh155975 		    pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart :
118575ab5f91Slh155975 		    (pNonphysical->TxDescQWrite + 1)) ==
118675ab5f91Slh155975 		    pNonphysical->TxDescQRead)
118775ab5f91Slh155975 			if (amd8111s_recycle_tx(pLayerPointers) == 0) {
118875ab5f91Slh155975 				pLayerPointers->pOdl
118975ab5f91Slh155975 				    ->statistics.tx_no_descriptor ++;
119075ab5f91Slh155975 				break;
119175ab5f91Slh155975 			}
119275ab5f91Slh155975 
119375ab5f91Slh155975 		/* Fill packet length */
119475ab5f91Slh155975 		pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers
119575ab5f91Slh155975 		    ->pOdl->tx_buf.curr->msg_size;
119675ab5f91Slh155975 
119775ab5f91Slh155975 		/* Fill physical buffer address */
119875ab5f91Slh155975 		pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int)
119975ab5f91Slh155975 		    pLayerPointers->pOdl->tx_buf.curr->phy_addr;
120075ab5f91Slh155975 
120175ab5f91Slh155975 		pNonphysical->TxDescQWrite->Tx_SOP = 1;
120275ab5f91Slh155975 		pNonphysical->TxDescQWrite->Tx_EOP = 1;
120375ab5f91Slh155975 		pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1;
120475ab5f91Slh155975 		pNonphysical->TxDescQWrite->Tx_LTINT = 1;
120575ab5f91Slh155975 		pNonphysical->TxDescQWrite->Tx_USPACE = 0;
120675ab5f91Slh155975 		pNonphysical->TxDescQWrite->Tx_OWN = 1;
120775ab5f91Slh155975 
120875ab5f91Slh155975 		pNonphysical->TxDescQWrite++;
120975ab5f91Slh155975 		if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) {
121075ab5f91Slh155975 			pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart;
121175ab5f91Slh155975 		}
121275ab5f91Slh155975 
121375ab5f91Slh155975 		pLayerPointers->pOdl->tx_buf.curr =
121475ab5f91Slh155975 		    NEXT(pLayerPointers->pOdl->tx_buf, curr);
121575ab5f91Slh155975 
121675ab5f91Slh155975 	}
121775ab5f91Slh155975 
121875ab5f91Slh155975 	pLayerPointers->pOdl->statistics.tx_ok_packets += count;
121975ab5f91Slh155975 
122075ab5f91Slh155975 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
122175ab5f91Slh155975 
122275ab5f91Slh155975 	/* Call mdlTransmit to send the pkt out on the network */
122375ab5f91Slh155975 	mdlTransmit(pLayerPointers);
122475ab5f91Slh155975 
122575ab5f91Slh155975 }
122675ab5f91Slh155975 
122775ab5f91Slh155975 /*
122875ab5f91Slh155975  * Softintr entrance. try to send out packets in the Tx buffer.
122975ab5f91Slh155975  * If reschedule is True, call mac_tx_update to re-enable the
123075ab5f91Slh155975  * transmit
123175ab5f91Slh155975  */
123275ab5f91Slh155975 static uint_t
amd8111s_send_drain(caddr_t arg)123375ab5f91Slh155975 amd8111s_send_drain(caddr_t arg)
123475ab5f91Slh155975 {
123522eb7cb5Sgd78059 	struct LayerPointers *pLayerPointers = (void *)arg;
123675ab5f91Slh155975 
123775ab5f91Slh155975 	amd8111s_send_serial(pLayerPointers);
123875ab5f91Slh155975 
123975ab5f91Slh155975 	if (pLayerPointers->pMil->tx_reschedule &&
124075ab5f91Slh155975 	    NEXT(pLayerPointers->pOdl->tx_buf, next) !=
124175ab5f91Slh155975 	    pLayerPointers->pOdl->tx_buf.free) {
124275ab5f91Slh155975 		mac_tx_update(pLayerPointers->pOdl->mh);
124375ab5f91Slh155975 		pLayerPointers->pMil->tx_reschedule = B_FALSE;
124475ab5f91Slh155975 	}
124575ab5f91Slh155975 
124675ab5f91Slh155975 	return (DDI_INTR_CLAIMED);
124775ab5f91Slh155975 }
124875ab5f91Slh155975 
124975ab5f91Slh155975 /*
125075ab5f91Slh155975  * Get a Tx buffer
125175ab5f91Slh155975  */
125275ab5f91Slh155975 static struct amd8111s_msgbuf *
amd8111s_getTxbuf(struct LayerPointers * pLayerPointers)125375ab5f91Slh155975 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers)
125475ab5f91Slh155975 {
125575ab5f91Slh155975 	struct amd8111s_msgbuf *tmp, *next;
125675ab5f91Slh155975 
125775ab5f91Slh155975 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
125875ab5f91Slh155975 	next = NEXT(pLayerPointers->pOdl->tx_buf, next);
125975ab5f91Slh155975 	if (next == pLayerPointers->pOdl->tx_buf.free) {
126075ab5f91Slh155975 		tmp = NULL;
126175ab5f91Slh155975 	} else {
126275ab5f91Slh155975 		tmp = pLayerPointers->pOdl->tx_buf.next;
126375ab5f91Slh155975 		pLayerPointers->pOdl->tx_buf.next = next;
126475ab5f91Slh155975 	}
126575ab5f91Slh155975 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
126675ab5f91Slh155975 
126775ab5f91Slh155975 	return (tmp);
126875ab5f91Slh155975 }
126975ab5f91Slh155975 
127075ab5f91Slh155975 static boolean_t
amd8111s_send(struct LayerPointers * pLayerPointers,mblk_t * mp)127175ab5f91Slh155975 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp)
127275ab5f91Slh155975 {
127375ab5f91Slh155975 	struct odl *pOdl;
127475ab5f91Slh155975 	size_t frag_len;
127575ab5f91Slh155975 	mblk_t *tmp;
127675ab5f91Slh155975 	struct amd8111s_msgbuf *txBuf;
127775ab5f91Slh155975 	uint8_t *pMsg;
127875ab5f91Slh155975 
127975ab5f91Slh155975 	pOdl = pLayerPointers->pOdl;
128075ab5f91Slh155975 
128175ab5f91Slh155975 	/* alloc send buffer */
128275ab5f91Slh155975 	txBuf = amd8111s_getTxbuf(pLayerPointers);
128375ab5f91Slh155975 	if (txBuf == NULL) {
128475ab5f91Slh155975 		pOdl->statistics.tx_no_buffer ++;
128575ab5f91Slh155975 		pLayerPointers->pMil->tx_reschedule = B_TRUE;
128675ab5f91Slh155975 		amd8111s_send_serial(pLayerPointers);
128775ab5f91Slh155975 		return (B_FALSE);
128875ab5f91Slh155975 	}
128975ab5f91Slh155975 
129075ab5f91Slh155975 	/* copy packet to send buffer */
129175ab5f91Slh155975 	txBuf->msg_size = 0;
129275ab5f91Slh155975 	pMsg = (uint8_t *)txBuf->vir_addr;
129375ab5f91Slh155975 	for (tmp = mp; tmp; tmp = tmp->b_cont) {
129475ab5f91Slh155975 		frag_len = MBLKL(tmp);
129575ab5f91Slh155975 		bcopy(tmp->b_rptr, pMsg, frag_len);
129675ab5f91Slh155975 		txBuf->msg_size += frag_len;
129775ab5f91Slh155975 		pMsg += frag_len;
129875ab5f91Slh155975 	}
129975ab5f91Slh155975 	freemsg(mp);
130075ab5f91Slh155975 
130175ab5f91Slh155975 	amd8111s_send_serial(pLayerPointers);
130275ab5f91Slh155975 
130375ab5f91Slh155975 	return (B_TRUE);
130475ab5f91Slh155975 }
130575ab5f91Slh155975 
130675ab5f91Slh155975 /*
130775ab5f91Slh155975  * (GLD Entry Point) Send the message block to lower layer
130875ab5f91Slh155975  */
130975ab5f91Slh155975 static mblk_t *
amd8111s_m_tx(void * arg,mblk_t * mp)131075ab5f91Slh155975 amd8111s_m_tx(void *arg, mblk_t *mp)
131175ab5f91Slh155975 {
131275ab5f91Slh155975 	struct LayerPointers *pLayerPointers = arg;
131375ab5f91Slh155975 	mblk_t *next;
131475ab5f91Slh155975 
131575ab5f91Slh155975 	rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER);
131675ab5f91Slh155975 	if (!pLayerPointers->run) {
131775ab5f91Slh155975 		pLayerPointers->pOdl->statistics.tx_afterunplumb ++;
131875ab5f91Slh155975 		freemsgchain(mp);
131975ab5f91Slh155975 		mp = NULL;
132075ab5f91Slh155975 	}
132175ab5f91Slh155975 
132275ab5f91Slh155975 	while (mp != NULL) {
132375ab5f91Slh155975 		next = mp->b_next;
132475ab5f91Slh155975 		mp->b_next = NULL;
132575ab5f91Slh155975 		if (!amd8111s_send(pLayerPointers, mp)) {
132675ab5f91Slh155975 			/* Send fail */
132775ab5f91Slh155975 			mp->b_next = next;
132875ab5f91Slh155975 			break;
132975ab5f91Slh155975 		}
133075ab5f91Slh155975 		mp = next;
133175ab5f91Slh155975 	}
133275ab5f91Slh155975 
133375ab5f91Slh155975 	rw_exit(&pLayerPointers->pOdl->chip_lock);
133475ab5f91Slh155975 	return (mp);
133575ab5f91Slh155975 }
133675ab5f91Slh155975 
133775ab5f91Slh155975 /*
133875ab5f91Slh155975  * (GLD Entry Point) Interrupt Service Routine
133975ab5f91Slh155975  */
134075ab5f91Slh155975 static uint_t
amd8111s_intr(caddr_t arg)134175ab5f91Slh155975 amd8111s_intr(caddr_t arg)
134275ab5f91Slh155975 {
134375ab5f91Slh155975 	unsigned int intrCauses;
134422eb7cb5Sgd78059 	struct LayerPointers *pLayerPointers = (void *)arg;
134575ab5f91Slh155975 
134675ab5f91Slh155975 	/* Read the interrupt status from mdl */
134775ab5f91Slh155975 	intrCauses = mdlReadInterrupt(pLayerPointers);
134875ab5f91Slh155975 
134975ab5f91Slh155975 	if (intrCauses == 0) {
135075ab5f91Slh155975 		pLayerPointers->pOdl->statistics.intr_OTHER ++;
135175ab5f91Slh155975 		return (DDI_INTR_UNCLAIMED);
135275ab5f91Slh155975 	}
135375ab5f91Slh155975 
135475ab5f91Slh155975 	if (intrCauses & LCINT) {
135575ab5f91Slh155975 		if (mdlReadLink(pLayerPointers) == LINK_UP) {
135675ab5f91Slh155975 			mdlGetActiveMediaInfo(pLayerPointers);
135775ab5f91Slh155975 			/* Link status changed */
135875ab5f91Slh155975 			if (pLayerPointers->pOdl->LinkStatus !=
135975ab5f91Slh155975 			    LINK_STATE_UP) {
136075ab5f91Slh155975 				pLayerPointers->pOdl->LinkStatus =
136175ab5f91Slh155975 				    LINK_STATE_UP;
136275ab5f91Slh155975 				mac_link_update(pLayerPointers->pOdl->mh,
136375ab5f91Slh155975 				    LINK_STATE_UP);
136475ab5f91Slh155975 			}
136575ab5f91Slh155975 		} else {
136675ab5f91Slh155975 			if (pLayerPointers->pOdl->LinkStatus !=
136775ab5f91Slh155975 			    LINK_STATE_DOWN) {
136875ab5f91Slh155975 				pLayerPointers->pOdl->LinkStatus =
136975ab5f91Slh155975 				    LINK_STATE_DOWN;
137075ab5f91Slh155975 				mac_link_update(pLayerPointers->pOdl->mh,
137175ab5f91Slh155975 				    LINK_STATE_DOWN);
137275ab5f91Slh155975 			}
137375ab5f91Slh155975 		}
137475ab5f91Slh155975 	}
137575ab5f91Slh155975 	/*
137675ab5f91Slh155975 	 * RINT0: Receive Interrupt is set by the controller after the last
137775ab5f91Slh155975 	 * descriptor of a receive frame for this ring has been updated by
137875ab5f91Slh155975 	 * writing a 0 to the OWNership bit.
137975ab5f91Slh155975 	 */
138075ab5f91Slh155975 	if (intrCauses & RINT0) {
138175ab5f91Slh155975 		pLayerPointers->pOdl->statistics.intr_RINT0 ++;
138275ab5f91Slh155975 		amd8111s_receive(pLayerPointers);
138375ab5f91Slh155975 	}
138475ab5f91Slh155975 
138575ab5f91Slh155975 	/*
138675ab5f91Slh155975 	 * TINT0: Transmit Interrupt is set by the controller after the OWN bit
138775ab5f91Slh155975 	 * in the last descriptor of a transmit frame in this particular ring
138875ab5f91Slh155975 	 * has been cleared to indicate the frame has been copied to the
138975ab5f91Slh155975 	 * transmit FIFO.
139075ab5f91Slh155975 	 */
139175ab5f91Slh155975 	if (intrCauses & TINT0) {
139275ab5f91Slh155975 		pLayerPointers->pOdl->statistics.intr_TINT0 ++;
139375ab5f91Slh155975 		/*
139475ab5f91Slh155975 		 * if desc ring is NULL and tx buf is not NULL, it should
139575ab5f91Slh155975 		 * drain tx buffer
139675ab5f91Slh155975 		 */
139775ab5f91Slh155975 		amd8111s_send_serial(pLayerPointers);
139875ab5f91Slh155975 	}
139975ab5f91Slh155975 
140075ab5f91Slh155975 	if (intrCauses & STINT) {
140175ab5f91Slh155975 		pLayerPointers->pOdl->statistics.intr_STINT ++;
140275ab5f91Slh155975 	}
140375ab5f91Slh155975 
140475ab5f91Slh155975 
140575ab5f91Slh155975 	return (DDI_INTR_CLAIMED);
140675ab5f91Slh155975 }
140775ab5f91Slh155975 
140875ab5f91Slh155975 /*
140975ab5f91Slh155975  * To re-initilize data structures.
141075ab5f91Slh155975  */
141175ab5f91Slh155975 static void
amd8111s_sw_reset(struct LayerPointers * pLayerPointers)141275ab5f91Slh155975 amd8111s_sw_reset(struct LayerPointers *pLayerPointers)
141375ab5f91Slh155975 {
141475ab5f91Slh155975 	/* Reset all Tx/Rx queues and descriptors */
141575ab5f91Slh155975 	milResetTxQ(pLayerPointers);
141675ab5f91Slh155975 	milInitRxQ(pLayerPointers);
141775ab5f91Slh155975 }
141875ab5f91Slh155975 
141975ab5f91Slh155975 /*
142075ab5f91Slh155975  * Send all pending tx packets
142175ab5f91Slh155975  */
142275ab5f91Slh155975 static void
amd8111s_tx_drain(struct LayerPointers * adapter)142375ab5f91Slh155975 amd8111s_tx_drain(struct LayerPointers *adapter)
142475ab5f91Slh155975 {
142575ab5f91Slh155975 	struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart;
142675ab5f91Slh155975 	int i, desc_count = 0;
142775ab5f91Slh155975 	for (i = 0; i < 30; i++) {
142875ab5f91Slh155975 		while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) {
142975ab5f91Slh155975 			/* This packet has been transmitted */
143075ab5f91Slh155975 			pTx_desc ++;
143175ab5f91Slh155975 			desc_count ++;
143275ab5f91Slh155975 		}
143375ab5f91Slh155975 		if (desc_count == TX_RING_SIZE) {
143475ab5f91Slh155975 			break;
143575ab5f91Slh155975 		}
143675ab5f91Slh155975 		/* Wait 1 ms */
143775ab5f91Slh155975 		drv_usecwait(1000);
143875ab5f91Slh155975 	}
143975ab5f91Slh155975 	adapter->pOdl->statistics.tx_draintime = i;
144075ab5f91Slh155975 }
144175ab5f91Slh155975 
144275ab5f91Slh155975 /*
144375ab5f91Slh155975  * (GLD Entry Point) To start card will be called at
144475ab5f91Slh155975  * ifconfig plumb
144575ab5f91Slh155975  */
144675ab5f91Slh155975 static int
amd8111s_m_start(void * arg)144775ab5f91Slh155975 amd8111s_m_start(void *arg)
144875ab5f91Slh155975 {
144975ab5f91Slh155975 	struct LayerPointers *pLayerPointers = arg;
145075ab5f91Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
145175ab5f91Slh155975 
145275ab5f91Slh155975 	amd8111s_sw_reset(pLayerPointers);
145375ab5f91Slh155975 	mdlHWReset(pLayerPointers);
145475ab5f91Slh155975 	rw_enter(&pOdl->chip_lock, RW_WRITER);
145575ab5f91Slh155975 	pLayerPointers->run = B_TRUE;
145675ab5f91Slh155975 	rw_exit(&pOdl->chip_lock);
145775ab5f91Slh155975 	return (0);
145875ab5f91Slh155975 }
145975ab5f91Slh155975 
146075ab5f91Slh155975 /*
146175ab5f91Slh155975  * (GLD Entry Point) To stop card will be called at
146275ab5f91Slh155975  * ifconfig unplumb
146375ab5f91Slh155975  */
146475ab5f91Slh155975 static void
amd8111s_m_stop(void * arg)146575ab5f91Slh155975 amd8111s_m_stop(void *arg)
146675ab5f91Slh155975 {
146775ab5f91Slh155975 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
146875ab5f91Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
146975ab5f91Slh155975 
147075ab5f91Slh155975 	/* Ensure send all pending tx packets */
147175ab5f91Slh155975 	amd8111s_tx_drain(pLayerPointers);
147275ab5f91Slh155975 	/*
147375ab5f91Slh155975 	 * Stop the controller and disable the controller interrupt
147475ab5f91Slh155975 	 */
147575ab5f91Slh155975 	rw_enter(&pOdl->chip_lock, RW_WRITER);
147675ab5f91Slh155975 	mdlStopChip(pLayerPointers);
147775ab5f91Slh155975 	pLayerPointers->run = B_FALSE;
147875ab5f91Slh155975 	rw_exit(&pOdl->chip_lock);
147975ab5f91Slh155975 }
148075ab5f91Slh155975 
148175ab5f91Slh155975 /*
148275ab5f91Slh155975  *	To clean up all
148375ab5f91Slh155975  */
148475ab5f91Slh155975 static void
amd8111s_free_resource(struct LayerPointers * pLayerPointers)148575ab5f91Slh155975 amd8111s_free_resource(struct LayerPointers *pLayerPointers)
148675ab5f91Slh155975 {
148775ab5f91Slh155975 	unsigned long mem_free_array[100];
148875ab5f91Slh155975 	unsigned long *pmem_free_array, size;
148975ab5f91Slh155975 
149075ab5f91Slh155975 	/* Free Rx/Tx descriptors */
149175ab5f91Slh155975 	amd8111s_free_descriptors(pLayerPointers);
149275ab5f91Slh155975 
149375ab5f91Slh155975 	/* Free memory on lower layers */
149475ab5f91Slh155975 	milFreeResources(pLayerPointers, mem_free_array);
149575ab5f91Slh155975 	pmem_free_array = mem_free_array;
149675ab5f91Slh155975 	while (*pmem_free_array) {
149775ab5f91Slh155975 		switch (*pmem_free_array) {
149875ab5f91Slh155975 		case VIRTUAL:
149975ab5f91Slh155975 			size = *(++pmem_free_array);
150075ab5f91Slh155975 			pmem_free_array++;
150175ab5f91Slh155975 			kmem_free((void *)*(pmem_free_array), size);
150275ab5f91Slh155975 			break;
150375ab5f91Slh155975 		}
150475ab5f91Slh155975 		pmem_free_array++;
150575ab5f91Slh155975 	}
150675ab5f91Slh155975 
150775ab5f91Slh155975 	amd8111s_free_buffers(pLayerPointers);
150875ab5f91Slh155975 }
150975ab5f91Slh155975 
151075ab5f91Slh155975 /*
151175ab5f91Slh155975  * (GLD Enty pointer) To add/delete multi cast addresses
151275ab5f91Slh155975  *
151375ab5f91Slh155975  */
151475ab5f91Slh155975 static int
amd8111s_m_multicst(void * arg,boolean_t add,const uint8_t * addr)151575ab5f91Slh155975 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
151675ab5f91Slh155975 {
151775ab5f91Slh155975 	struct LayerPointers *pLayerPointers = arg;
151875ab5f91Slh155975 
151975ab5f91Slh155975 	if (add) {
152075ab5f91Slh155975 		/* Add a multicast entry */
152175ab5f91Slh155975 		mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr);
152275ab5f91Slh155975 	} else {
152375ab5f91Slh155975 		/* Delete a multicast entry */
152475ab5f91Slh155975 		mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr);
152575ab5f91Slh155975 	}
152675ab5f91Slh155975 
152775ab5f91Slh155975 	return (0);
152875ab5f91Slh155975 }
152975ab5f91Slh155975 
153075ab5f91Slh155975 #ifdef AMD8111S_DEBUG
153175ab5f91Slh155975 /*
153275ab5f91Slh155975  * The size of MIB registers is only 32 bits. Dump them before one
153375ab5f91Slh155975  * of them overflows.
153475ab5f91Slh155975  */
153575ab5f91Slh155975 static void
amd8111s_dump_mib(struct LayerPointers * pLayerPointers)153675ab5f91Slh155975 amd8111s_dump_mib(struct LayerPointers *pLayerPointers)
153775ab5f91Slh155975 {
153875ab5f91Slh155975 	struct amd8111s_statistics *adapterStat;
153975ab5f91Slh155975 
154075ab5f91Slh155975 	adapterStat = &pLayerPointers->pOdl->statistics;
154175ab5f91Slh155975 
154275ab5f91Slh155975 	adapterStat->mib_dump_counter ++;
154375ab5f91Slh155975 
154475ab5f91Slh155975 	/*
154575ab5f91Slh155975 	 * Rx Counters
154675ab5f91Slh155975 	 */
154775ab5f91Slh155975 	adapterStat->rx_mib_unicst_packets +=
154875ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvUniCastPkts);
154975ab5f91Slh155975 	adapterStat->rx_mib_multicst_packets +=
155075ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
155175ab5f91Slh155975 	adapterStat->rx_mib_broadcst_packets +=
155275ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
155375ab5f91Slh155975 	adapterStat->rx_mib_macctrl_packets +=
155475ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvMACCtrl);
155575ab5f91Slh155975 	adapterStat->rx_mib_flowctrl_packets +=
155675ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvFlowCtrl);
155775ab5f91Slh155975 
155875ab5f91Slh155975 	adapterStat->rx_mib_bytes +=
155975ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvOctets);
156075ab5f91Slh155975 	adapterStat->rx_mib_good_bytes +=
156175ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvGoodOctets);
156275ab5f91Slh155975 
156375ab5f91Slh155975 	adapterStat->rx_mib_undersize_packets +=
156475ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvUndersizePkts);
156575ab5f91Slh155975 	adapterStat->rx_mib_oversize_packets +=
156675ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvOversizePkts);
156775ab5f91Slh155975 
156875ab5f91Slh155975 	adapterStat->rx_mib_drop_packets +=
156975ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
157075ab5f91Slh155975 	adapterStat->rx_mib_align_err_packets +=
157175ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
157275ab5f91Slh155975 	adapterStat->rx_mib_fcs_err_packets +=
157375ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvFCSErrors);
157475ab5f91Slh155975 	adapterStat->rx_mib_symbol_err_packets +=
157575ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvSymbolErrors);
157675ab5f91Slh155975 	adapterStat->rx_mib_miss_packets +=
157775ab5f91Slh155975 	    mdlReadMib(pLayerPointers, RcvMissPkts);
157875ab5f91Slh155975 
157975ab5f91Slh155975 	/*
158075ab5f91Slh155975 	 * Tx Counters
158175ab5f91Slh155975 	 */
158275ab5f91Slh155975 	adapterStat->tx_mib_packets +=
158375ab5f91Slh155975 	    mdlReadMib(pLayerPointers, XmtPackets);
158475ab5f91Slh155975 	adapterStat->tx_mib_multicst_packets +=
158575ab5f91Slh155975 	    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
158675ab5f91Slh155975 	adapterStat->tx_mib_broadcst_packets +=
158775ab5f91Slh155975 	    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
158875ab5f91Slh155975 	adapterStat->tx_mib_flowctrl_packets +=
158975ab5f91Slh155975 	    mdlReadMib(pLayerPointers, XmtFlowCtrl);
159075ab5f91Slh155975 
159175ab5f91Slh155975 	adapterStat->tx_mib_bytes +=
159275ab5f91Slh155975 	    mdlReadMib(pLayerPointers, XmtOctets);
159375ab5f91Slh155975 
159475ab5f91Slh155975 	adapterStat->tx_mib_defer_trans_packets +=
159575ab5f91Slh155975 	    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
159675ab5f91Slh155975 	adapterStat->tx_mib_collision_packets +=
159775ab5f91Slh155975 	    mdlReadMib(pLayerPointers, XmtCollisions);
159875ab5f91Slh155975 	adapterStat->tx_mib_one_coll_packets +=
159975ab5f91Slh155975 	    mdlReadMib(pLayerPointers, XmtOneCollision);
160075ab5f91Slh155975 	adapterStat->tx_mib_multi_coll_packets +=
160175ab5f91Slh155975 	    mdlReadMib(pLayerPointers, XmtMultipleCollision);
160275ab5f91Slh155975 	adapterStat->tx_mib_late_coll_packets +=
160375ab5f91Slh155975 	    mdlReadMib(pLayerPointers, XmtLateCollision);
160475ab5f91Slh155975 	adapterStat->tx_mib_ex_coll_packets +=
160575ab5f91Slh155975 	    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
160675ab5f91Slh155975 
160775ab5f91Slh155975 
160875ab5f91Slh155975 	/* Clear all MIB registers */
160975ab5f91Slh155975 	WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address
161075ab5f91Slh155975 	    + MIB_ADDR, MIB_CLEAR);
161175ab5f91Slh155975 }
161275ab5f91Slh155975 #endif
161375ab5f91Slh155975 
161475ab5f91Slh155975 /*
161575ab5f91Slh155975  * (GLD Entry Point) set/unset promiscus mode
161675ab5f91Slh155975  */
161775ab5f91Slh155975 static int
amd8111s_m_promisc(void * arg,boolean_t on)161875ab5f91Slh155975 amd8111s_m_promisc(void *arg, boolean_t on)
161975ab5f91Slh155975 {
162075ab5f91Slh155975 	struct LayerPointers *pLayerPointers = arg;
162175ab5f91Slh155975 
162275ab5f91Slh155975 	if (on) {
162375ab5f91Slh155975 		mdlSetPromiscuous(pLayerPointers);
162475ab5f91Slh155975 	} else {
162575ab5f91Slh155975 		mdlDisablePromiscuous(pLayerPointers);
162675ab5f91Slh155975 	}
162775ab5f91Slh155975 
162875ab5f91Slh155975 	return (0);
162975ab5f91Slh155975 }
163075ab5f91Slh155975 
163175ab5f91Slh155975 /*
163275ab5f91Slh155975  * (Gld Entry point) Changes the Mac address of card
163375ab5f91Slh155975  */
163475ab5f91Slh155975 static int
amd8111s_m_unicst(void * arg,const uint8_t * macaddr)163575ab5f91Slh155975 amd8111s_m_unicst(void *arg, const uint8_t *macaddr)
163675ab5f91Slh155975 {
163775ab5f91Slh155975 	struct LayerPointers *pLayerPointers = arg;
163875ab5f91Slh155975 
163975ab5f91Slh155975 	mdlDisableInterrupt(pLayerPointers);
164075ab5f91Slh155975 	mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr);
164175ab5f91Slh155975 	mdlEnableInterrupt(pLayerPointers);
164275ab5f91Slh155975 
164375ab5f91Slh155975 	return (0);
164475ab5f91Slh155975 }
164575ab5f91Slh155975 
164675ab5f91Slh155975 /*
164775ab5f91Slh155975  * Reset the card
164875ab5f91Slh155975  */
164975ab5f91Slh155975 void
amd8111s_reset(struct LayerPointers * pLayerPointers)165075ab5f91Slh155975 amd8111s_reset(struct LayerPointers *pLayerPointers)
165175ab5f91Slh155975 {
165275ab5f91Slh155975 	amd8111s_sw_reset(pLayerPointers);
165375ab5f91Slh155975 	mdlHWReset(pLayerPointers);
165475ab5f91Slh155975 }
165575ab5f91Slh155975 
165675ab5f91Slh155975 /*
165775ab5f91Slh155975  * attach(9E) -- Attach a device to the system
165875ab5f91Slh155975  *
165975ab5f91Slh155975  * Called once for each board after successfully probed.
166075ab5f91Slh155975  * will do
166175ab5f91Slh155975  * 	a. creating minor device node for the instance.
166275ab5f91Slh155975  *	b. allocate & Initilize four layers (call odlInit)
166375ab5f91Slh155975  *	c. get MAC address
166475ab5f91Slh155975  *	d. initilize pLayerPointers to gld private pointer
166575ab5f91Slh155975  *	e. register with GLD
166675ab5f91Slh155975  * if any action fails does clean up & returns DDI_FAILURE
166775ab5f91Slh155975  * else retursn DDI_SUCCESS
166875ab5f91Slh155975  */
166975ab5f91Slh155975 static int
amd8111s_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)167075ab5f91Slh155975 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
167175ab5f91Slh155975 {
167275ab5f91Slh155975 	mac_register_t *macp;
167375ab5f91Slh155975 	struct LayerPointers *pLayerPointers;
167475ab5f91Slh155975 	struct odl *pOdl;
167575ab5f91Slh155975 	ddi_acc_handle_t *pci_handle;
167675ab5f91Slh155975 	ddi_device_acc_attr_t dev_attr;
167775ab5f91Slh155975 	caddr_t addrp = NULL;
167875ab5f91Slh155975 
167975ab5f91Slh155975 	switch (cmd) {
168075ab5f91Slh155975 	case DDI_ATTACH:
168175ab5f91Slh155975 		break;
168275ab5f91Slh155975 	default:
168375ab5f91Slh155975 		return (DDI_FAILURE);
168475ab5f91Slh155975 	}
168575ab5f91Slh155975 
168675ab5f91Slh155975 	pLayerPointers = (struct LayerPointers *)
168775ab5f91Slh155975 	    kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP);
168875ab5f91Slh155975 	amd8111sadapter = pLayerPointers;
168975ab5f91Slh155975 
169075ab5f91Slh155975 	/* Get device instance number */
169175ab5f91Slh155975 	pLayerPointers->instance = ddi_get_instance(devinfo);
169275ab5f91Slh155975 	ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers);
169375ab5f91Slh155975 
169475ab5f91Slh155975 	pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP);
169575ab5f91Slh155975 	pLayerPointers->pOdl = pOdl;
169675ab5f91Slh155975 
169775ab5f91Slh155975 	pOdl->devinfo = devinfo;
169875ab5f91Slh155975 
169975ab5f91Slh155975 	/*
170075ab5f91Slh155975 	 * Here, we only allocate memory for struct odl and initilize it.
170175ab5f91Slh155975 	 * All other memory allocation & initilization will be done in odlInit
170275ab5f91Slh155975 	 * later on this routine.
170375ab5f91Slh155975 	 */
170475ab5f91Slh155975 	if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock)
170575ab5f91Slh155975 	    != DDI_SUCCESS) {
170675ab5f91Slh155975 		amd8111s_log(pLayerPointers, CE_NOTE,
170775ab5f91Slh155975 		    "attach: get iblock cookies failed");
170875ab5f91Slh155975 		goto attach_failure;
170975ab5f91Slh155975 	}
171075ab5f91Slh155975 
171175ab5f91Slh155975 	rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock);
171275ab5f91Slh155975 	mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock",
171375ab5f91Slh155975 	    MUTEX_DRIVER, (void *)pOdl->iblock);
171475ab5f91Slh155975 	mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock",
171575ab5f91Slh155975 	    MUTEX_DRIVER, (void *)pOdl->iblock);
171675ab5f91Slh155975 
171775ab5f91Slh155975 	/* Setup PCI space */
171875ab5f91Slh155975 	if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) {
171975ab5f91Slh155975 		return (DDI_FAILURE);
172075ab5f91Slh155975 	}
172175ab5f91Slh155975 	pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI;
172275ab5f91Slh155975 	pci_handle = &pOdl->pci_handle;
172375ab5f91Slh155975 
172475ab5f91Slh155975 	pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID);
172575ab5f91Slh155975 	pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID);
172675ab5f91Slh155975 
172775ab5f91Slh155975 	/*
172875ab5f91Slh155975 	 * Allocate and initialize all resource and map device registers.
172975ab5f91Slh155975 	 * If failed, it returns a non-zero value.
173075ab5f91Slh155975 	 */
173175ab5f91Slh155975 	if (amd8111s_odlInit(pLayerPointers) != 0) {
173275ab5f91Slh155975 		goto attach_failure;
173375ab5f91Slh155975 	}
173475ab5f91Slh155975 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE;
173575ab5f91Slh155975 
173675ab5f91Slh155975 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
173775ab5f91Slh155975 	dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
173875ab5f91Slh155975 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
173975ab5f91Slh155975 
174075ab5f91Slh155975 	if (ddi_regs_map_setup(devinfo, 1, &addrp, 0,  4096, &dev_attr,
174175ab5f91Slh155975 	    &(pLayerPointers->pOdl->MemBasehandle)) != 0) {
174275ab5f91Slh155975 		amd8111s_log(pLayerPointers, CE_NOTE,
174375ab5f91Slh155975 		    "attach: ddi_regs_map_setup failed");
174475ab5f91Slh155975 		goto attach_failure;
174575ab5f91Slh155975 	}
174675ab5f91Slh155975 	pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp;
174775ab5f91Slh155975 
174875ab5f91Slh155975 	/* Initialize HW */
174975ab5f91Slh155975 	mdlOpen(pLayerPointers);
175075ab5f91Slh155975 	mdlGetActiveMediaInfo(pLayerPointers);
175175ab5f91Slh155975 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS;
175275ab5f91Slh155975 
175375ab5f91Slh155975 	/*
175475ab5f91Slh155975 	 * Setup the interrupt
175575ab5f91Slh155975 	 */
175675ab5f91Slh155975 	if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr,
175775ab5f91Slh155975 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
175875ab5f91Slh155975 		goto attach_failure;
175975ab5f91Slh155975 	}
176075ab5f91Slh155975 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED;
176175ab5f91Slh155975 
176275ab5f91Slh155975 	/*
176375ab5f91Slh155975 	 * Setup soft intr
176475ab5f91Slh155975 	 */
176575ab5f91Slh155975 	if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id,
176675ab5f91Slh155975 	    NULL, NULL, amd8111s_send_drain,
176775ab5f91Slh155975 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
176875ab5f91Slh155975 		goto attach_failure;
176975ab5f91Slh155975 	}
177075ab5f91Slh155975 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED;
177175ab5f91Slh155975 
177275ab5f91Slh155975 	/*
177375ab5f91Slh155975 	 * Initilize the mac structure
177475ab5f91Slh155975 	 */
177575ab5f91Slh155975 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
177675ab5f91Slh155975 		goto attach_failure;
177775ab5f91Slh155975 
177875ab5f91Slh155975 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
177975ab5f91Slh155975 	macp->m_driver = pLayerPointers;
178075ab5f91Slh155975 	macp->m_dip = devinfo;
178175ab5f91Slh155975 	/* Get MAC address */
178275ab5f91Slh155975 	mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress);
178375ab5f91Slh155975 	macp->m_src_addr = pOdl->MacAddress;
178475ab5f91Slh155975 	macp->m_callbacks = &amd8111s_m_callbacks;
178575ab5f91Slh155975 	macp->m_min_sdu = 0;
178675ab5f91Slh155975 	/* 1518 - 14 (ether header) - 4 (CRC) */
178775ab5f91Slh155975 	macp->m_max_sdu = ETHERMTU;
1788d62bc4baSyz147064 	macp->m_margin = VLAN_TAGSZ;
178975ab5f91Slh155975 
179075ab5f91Slh155975 	/*
179175ab5f91Slh155975 	 * Finally, we're ready to register ourselves with the MAC layer
179275ab5f91Slh155975 	 * interface; if this succeeds, we're ready to start.
179375ab5f91Slh155975 	 */
179475ab5f91Slh155975 	if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) {
179575ab5f91Slh155975 		mac_free(macp);
179675ab5f91Slh155975 		goto attach_failure;
179775ab5f91Slh155975 	}
179875ab5f91Slh155975 	mac_free(macp);
179975ab5f91Slh155975 
180075ab5f91Slh155975 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED;
180175ab5f91Slh155975 
180275ab5f91Slh155975 	return (DDI_SUCCESS);
180375ab5f91Slh155975 
180475ab5f91Slh155975 attach_failure:
180575ab5f91Slh155975 	(void) amd8111s_unattach(devinfo, pLayerPointers);
180675ab5f91Slh155975 	return (DDI_FAILURE);
180775ab5f91Slh155975 
180875ab5f91Slh155975 }
180975ab5f91Slh155975 
181075ab5f91Slh155975 /*
181175ab5f91Slh155975  * detach(9E) -- Detach a device from the system
181275ab5f91Slh155975  *
181375ab5f91Slh155975  * It is called for each device instance when the system is preparing to
181475ab5f91Slh155975  * unload a dynamically unloadable driver.
181575ab5f91Slh155975  * will Do
181675ab5f91Slh155975  * 	a. check if any driver buffers are held by OS.
181775ab5f91Slh155975  *	b. do clean up of all allocated memory if it is not in use by OS.
181875ab5f91Slh155975  *	c. un register with GLD
181975ab5f91Slh155975  *	d. return DDI_SUCCESS on succes full free & unregister
182075ab5f91Slh155975  *	else GLD_FAILURE
182175ab5f91Slh155975  */
182275ab5f91Slh155975 static int
amd8111s_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)182375ab5f91Slh155975 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
182475ab5f91Slh155975 {
182575ab5f91Slh155975 	struct LayerPointers *pLayerPointers;
182675ab5f91Slh155975 
182775ab5f91Slh155975 	switch (cmd) {
182875ab5f91Slh155975 	case DDI_DETACH:
182975ab5f91Slh155975 		break;
183075ab5f91Slh155975 	default:
183175ab5f91Slh155975 		return (DDI_FAILURE);
183275ab5f91Slh155975 	}
183375ab5f91Slh155975 
183475ab5f91Slh155975 	/*
183575ab5f91Slh155975 	 * Get the driver private (struct LayerPointers *) structure
183675ab5f91Slh155975 	 */
183775ab5f91Slh155975 	if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private
183875ab5f91Slh155975 	    (devinfo)) == NULL) {
183975ab5f91Slh155975 		return (DDI_FAILURE);
184075ab5f91Slh155975 	}
184175ab5f91Slh155975 
184275ab5f91Slh155975 	return (amd8111s_unattach(devinfo, pLayerPointers));
184375ab5f91Slh155975 }
184475ab5f91Slh155975 
184575ab5f91Slh155975 static int
amd8111s_unattach(dev_info_t * devinfo,struct LayerPointers * pLayerPointers)184675ab5f91Slh155975 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers)
184775ab5f91Slh155975 {
184875ab5f91Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
184975ab5f91Slh155975 
185075ab5f91Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) {
185175ab5f91Slh155975 		/* Unregister driver from the GLD interface */
185275ab5f91Slh155975 		if (mac_unregister(pOdl->mh) != DDI_SUCCESS) {
185375ab5f91Slh155975 			return (DDI_FAILURE);
185475ab5f91Slh155975 		}
185575ab5f91Slh155975 	}
185675ab5f91Slh155975 
185775ab5f91Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) {
185875ab5f91Slh155975 		ddi_remove_intr(devinfo, 0, pOdl->iblock);
185975ab5f91Slh155975 	}
186075ab5f91Slh155975 
186175ab5f91Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) {
186275ab5f91Slh155975 		ddi_remove_softintr(pOdl->drain_id);
186375ab5f91Slh155975 	}
186475ab5f91Slh155975 
186575ab5f91Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) {
186675ab5f91Slh155975 		/* Stop HW */
186775ab5f91Slh155975 		mdlStopChip(pLayerPointers);
186875ab5f91Slh155975 		ddi_regs_map_free(&(pOdl->MemBasehandle));
186975ab5f91Slh155975 	}
187075ab5f91Slh155975 
187175ab5f91Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) {
187275ab5f91Slh155975 		/* Free All memory allocated */
187375ab5f91Slh155975 		amd8111s_free_resource(pLayerPointers);
187475ab5f91Slh155975 	}
187575ab5f91Slh155975 
187675ab5f91Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) {
187775ab5f91Slh155975 		pci_config_teardown(&pOdl->pci_handle);
187875ab5f91Slh155975 		mutex_destroy(&pOdl->mdlSendLock);
187975ab5f91Slh155975 		mutex_destroy(&pOdl->mdlRcvLock);
188075ab5f91Slh155975 		rw_destroy(&pOdl->chip_lock);
188175ab5f91Slh155975 	}
188275ab5f91Slh155975 
188375ab5f91Slh155975 	kmem_free(pOdl, sizeof (struct odl));
188475ab5f91Slh155975 	kmem_free(pLayerPointers, sizeof (struct LayerPointers));
188575ab5f91Slh155975 
188675ab5f91Slh155975 	return (DDI_SUCCESS);
188775ab5f91Slh155975 }
188875ab5f91Slh155975 
188975ab5f91Slh155975 /*
189075ab5f91Slh155975  * (GLD Entry Point)GLD will call this entry point perodicaly to
189175ab5f91Slh155975  * get driver statistices.
189275ab5f91Slh155975  */
189375ab5f91Slh155975 static int
amd8111s_m_stat(void * arg,uint_t stat,uint64_t * val)189475ab5f91Slh155975 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val)
189575ab5f91Slh155975 {
189675ab5f91Slh155975 	struct LayerPointers *pLayerPointers = arg;
189775ab5f91Slh155975 	struct amd8111s_statistics *adapterStat;
189875ab5f91Slh155975 
189975ab5f91Slh155975 	adapterStat = &pLayerPointers->pOdl->statistics;
190075ab5f91Slh155975 
190175ab5f91Slh155975 	switch (stat) {
190275ab5f91Slh155975 
190375ab5f91Slh155975 	/*
190475ab5f91Slh155975 	 * Current Status
190575ab5f91Slh155975 	 */
190675ab5f91Slh155975 	case MAC_STAT_IFSPEED:
190775ab5f91Slh155975 		*val = 	pLayerPointers->pMdl->Speed * 1000000;
190875ab5f91Slh155975 		break;
190975ab5f91Slh155975 
191075ab5f91Slh155975 	case ETHER_STAT_LINK_DUPLEX:
191175ab5f91Slh155975 		if (pLayerPointers->pMdl->FullDuplex) {
191275ab5f91Slh155975 			*val = LINK_DUPLEX_FULL;
191375ab5f91Slh155975 		} else {
191475ab5f91Slh155975 			*val = LINK_DUPLEX_HALF;
191575ab5f91Slh155975 		}
191675ab5f91Slh155975 		break;
191775ab5f91Slh155975 
191875ab5f91Slh155975 	/*
191975ab5f91Slh155975 	 * Capabilities
192075ab5f91Slh155975 	 */
192175ab5f91Slh155975 	case ETHER_STAT_CAP_1000FDX:
192275ab5f91Slh155975 		*val = 0;
192375ab5f91Slh155975 		break;
192475ab5f91Slh155975 
192575ab5f91Slh155975 	case ETHER_STAT_CAP_1000HDX:
192675ab5f91Slh155975 		*val = 0;
192775ab5f91Slh155975 		break;
192875ab5f91Slh155975 
192975ab5f91Slh155975 	case ETHER_STAT_CAP_100FDX:
193075ab5f91Slh155975 		*val = 1;
193175ab5f91Slh155975 		break;
193275ab5f91Slh155975 
193375ab5f91Slh155975 	case ETHER_STAT_CAP_100HDX:
193475ab5f91Slh155975 		*val = 1;
193575ab5f91Slh155975 		break;
193675ab5f91Slh155975 
193775ab5f91Slh155975 	case ETHER_STAT_CAP_10FDX:
193875ab5f91Slh155975 		*val = 1;
193975ab5f91Slh155975 		break;
194075ab5f91Slh155975 
194175ab5f91Slh155975 	case ETHER_STAT_CAP_10HDX:
194275ab5f91Slh155975 		*val = 1;
194375ab5f91Slh155975 		break;
194475ab5f91Slh155975 
194575ab5f91Slh155975 	case ETHER_STAT_CAP_ASMPAUSE:
194675ab5f91Slh155975 		*val = 1;
194775ab5f91Slh155975 		break;
194875ab5f91Slh155975 
194975ab5f91Slh155975 	case ETHER_STAT_CAP_PAUSE:
195075ab5f91Slh155975 		*val = 1;
195175ab5f91Slh155975 		break;
195275ab5f91Slh155975 
195375ab5f91Slh155975 	case ETHER_STAT_CAP_AUTONEG:
195475ab5f91Slh155975 		*val = 1;
195575ab5f91Slh155975 		break;
195675ab5f91Slh155975 
195775ab5f91Slh155975 	case ETHER_STAT_ADV_CAP_1000FDX:
195875ab5f91Slh155975 		*val = 0;
195975ab5f91Slh155975 		break;
196075ab5f91Slh155975 
196175ab5f91Slh155975 	case ETHER_STAT_ADV_CAP_1000HDX:
196275ab5f91Slh155975 		*val = 0;
196375ab5f91Slh155975 		break;
196475ab5f91Slh155975 
196575ab5f91Slh155975 	case ETHER_STAT_ADV_CAP_100FDX:
196675ab5f91Slh155975 		*val = 1;
196775ab5f91Slh155975 		break;
196875ab5f91Slh155975 
196975ab5f91Slh155975 	case ETHER_STAT_ADV_CAP_100HDX:
197075ab5f91Slh155975 		*val = 1;
197175ab5f91Slh155975 		break;
197275ab5f91Slh155975 
197375ab5f91Slh155975 	case ETHER_STAT_ADV_CAP_10FDX:
197475ab5f91Slh155975 		*val = 1;
197575ab5f91Slh155975 		break;
197675ab5f91Slh155975 
197775ab5f91Slh155975 	case ETHER_STAT_ADV_CAP_10HDX:
197875ab5f91Slh155975 		*val = 1;
197975ab5f91Slh155975 		break;
198075ab5f91Slh155975 
198175ab5f91Slh155975 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
198275ab5f91Slh155975 		*val = 1;
198375ab5f91Slh155975 		break;
198475ab5f91Slh155975 
198575ab5f91Slh155975 	case ETHER_STAT_ADV_CAP_PAUSE:
198675ab5f91Slh155975 		*val = 1;
198775ab5f91Slh155975 		break;
198875ab5f91Slh155975 
198975ab5f91Slh155975 	case ETHER_STAT_ADV_CAP_AUTONEG:
199075ab5f91Slh155975 		*val = 1;
199175ab5f91Slh155975 		break;
199275ab5f91Slh155975 
199375ab5f91Slh155975 	/*
199475ab5f91Slh155975 	 * Rx Counters
199575ab5f91Slh155975 	 */
199675ab5f91Slh155975 	case MAC_STAT_IPACKETS:
199775ab5f91Slh155975 		*val = adapterStat->rx_mib_unicst_packets +
199875ab5f91Slh155975 		    adapterStat->rx_mib_multicst_packets +
199975ab5f91Slh155975 		    adapterStat->rx_mib_broadcst_packets +
200075ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvUniCastPkts) +
200175ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts) +
200275ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
200375ab5f91Slh155975 		break;
200475ab5f91Slh155975 
200575ab5f91Slh155975 	case MAC_STAT_RBYTES:
200675ab5f91Slh155975 		*val = adapterStat->rx_mib_bytes +
200775ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvOctets);
200875ab5f91Slh155975 		break;
200975ab5f91Slh155975 
201075ab5f91Slh155975 	case MAC_STAT_MULTIRCV:
201175ab5f91Slh155975 		*val = adapterStat->rx_mib_multicst_packets +
201275ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
201375ab5f91Slh155975 		break;
201475ab5f91Slh155975 
201575ab5f91Slh155975 	case MAC_STAT_BRDCSTRCV:
201675ab5f91Slh155975 		*val = adapterStat->rx_mib_broadcst_packets +
201775ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
201875ab5f91Slh155975 		break;
201975ab5f91Slh155975 
202075ab5f91Slh155975 	case MAC_STAT_NORCVBUF:
202175ab5f91Slh155975 		*val = adapterStat->rx_allocfail +
202275ab5f91Slh155975 		    adapterStat->rx_mib_drop_packets +
202375ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
202475ab5f91Slh155975 		break;
202575ab5f91Slh155975 
202675ab5f91Slh155975 	case MAC_STAT_IERRORS:
202775ab5f91Slh155975 		*val = adapterStat->rx_mib_align_err_packets +
202875ab5f91Slh155975 		    adapterStat->rx_mib_fcs_err_packets +
202975ab5f91Slh155975 		    adapterStat->rx_mib_symbol_err_packets +
203075ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors) +
203175ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvFCSErrors) +
203275ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvSymbolErrors);
203375ab5f91Slh155975 		break;
203475ab5f91Slh155975 
203575ab5f91Slh155975 	case ETHER_STAT_ALIGN_ERRORS:
203675ab5f91Slh155975 		*val = adapterStat->rx_mib_align_err_packets +
203775ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
203875ab5f91Slh155975 		break;
203975ab5f91Slh155975 
204075ab5f91Slh155975 	case ETHER_STAT_FCS_ERRORS:
204175ab5f91Slh155975 		*val = adapterStat->rx_mib_fcs_err_packets +
204275ab5f91Slh155975 		    mdlReadMib(pLayerPointers, RcvFCSErrors);
204375ab5f91Slh155975 		break;
204475ab5f91Slh155975 
204575ab5f91Slh155975 	/*
204675ab5f91Slh155975 	 * Tx Counters
204775ab5f91Slh155975 	 */
204875ab5f91Slh155975 	case MAC_STAT_OPACKETS:
204975ab5f91Slh155975 		*val = adapterStat->tx_mib_packets +
205075ab5f91Slh155975 		    mdlReadMib(pLayerPointers, XmtPackets);
205175ab5f91Slh155975 		break;
205275ab5f91Slh155975 
205375ab5f91Slh155975 	case MAC_STAT_OBYTES:
205475ab5f91Slh155975 		*val = adapterStat->tx_mib_bytes +
205575ab5f91Slh155975 		    mdlReadMib(pLayerPointers, XmtOctets);
205675ab5f91Slh155975 		break;
205775ab5f91Slh155975 
205875ab5f91Slh155975 	case MAC_STAT_MULTIXMT:
205975ab5f91Slh155975 		*val = adapterStat->tx_mib_multicst_packets +
206075ab5f91Slh155975 		    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
206175ab5f91Slh155975 		break;
206275ab5f91Slh155975 
206375ab5f91Slh155975 	case MAC_STAT_BRDCSTXMT:
206475ab5f91Slh155975 		*val = adapterStat->tx_mib_broadcst_packets +
206575ab5f91Slh155975 		    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
206675ab5f91Slh155975 		break;
206775ab5f91Slh155975 
206875ab5f91Slh155975 	case MAC_STAT_NOXMTBUF:
206975ab5f91Slh155975 		*val = adapterStat->tx_no_descriptor;
207075ab5f91Slh155975 		break;
207175ab5f91Slh155975 
207275ab5f91Slh155975 	case MAC_STAT_OERRORS:
207375ab5f91Slh155975 		*val = adapterStat->tx_mib_ex_coll_packets +
207475ab5f91Slh155975 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
207575ab5f91Slh155975 		break;
207675ab5f91Slh155975 
207775ab5f91Slh155975 	case MAC_STAT_COLLISIONS:
207875ab5f91Slh155975 		*val = adapterStat->tx_mib_ex_coll_packets +
207975ab5f91Slh155975 		    mdlReadMib(pLayerPointers, XmtCollisions);
208075ab5f91Slh155975 		break;
208175ab5f91Slh155975 
208275ab5f91Slh155975 	case ETHER_STAT_FIRST_COLLISIONS:
208375ab5f91Slh155975 		*val = adapterStat->tx_mib_one_coll_packets +
208475ab5f91Slh155975 		    mdlReadMib(pLayerPointers, XmtOneCollision);
208575ab5f91Slh155975 		break;
208675ab5f91Slh155975 
208775ab5f91Slh155975 	case ETHER_STAT_MULTI_COLLISIONS:
208875ab5f91Slh155975 		*val = adapterStat->tx_mib_multi_coll_packets +
208975ab5f91Slh155975 		    mdlReadMib(pLayerPointers, XmtMultipleCollision);
209075ab5f91Slh155975 		break;
209175ab5f91Slh155975 
209275ab5f91Slh155975 	case ETHER_STAT_EX_COLLISIONS:
209375ab5f91Slh155975 		*val = adapterStat->tx_mib_ex_coll_packets +
209475ab5f91Slh155975 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
209575ab5f91Slh155975 		break;
209675ab5f91Slh155975 
209775ab5f91Slh155975 	case ETHER_STAT_TX_LATE_COLLISIONS:
209875ab5f91Slh155975 		*val = adapterStat->tx_mib_late_coll_packets +
209975ab5f91Slh155975 		    mdlReadMib(pLayerPointers, XmtLateCollision);
210075ab5f91Slh155975 		break;
210175ab5f91Slh155975 
210275ab5f91Slh155975 	case ETHER_STAT_DEFER_XMTS:
210375ab5f91Slh155975 		*val = adapterStat->tx_mib_defer_trans_packets +
210475ab5f91Slh155975 		    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
210575ab5f91Slh155975 		break;
210675ab5f91Slh155975 
210775ab5f91Slh155975 	default:
210875ab5f91Slh155975 		return (ENOTSUP);
210975ab5f91Slh155975 	}
211075ab5f91Slh155975 	return (0);
211175ab5f91Slh155975 }
211275ab5f91Slh155975 
211375ab5f91Slh155975 /*
211475ab5f91Slh155975  *	Memory Read Function Used by MDL to set card registers.
211575ab5f91Slh155975  */
211675ab5f91Slh155975 unsigned char
READ_REG8(struct LayerPointers * pLayerPointers,long x)211775ab5f91Slh155975 READ_REG8(struct LayerPointers *pLayerPointers, long x)
211875ab5f91Slh155975 {
211975ab5f91Slh155975 	return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x));
212075ab5f91Slh155975 }
212175ab5f91Slh155975 
212275ab5f91Slh155975 int
READ_REG16(struct LayerPointers * pLayerPointers,long x)212375ab5f91Slh155975 READ_REG16(struct LayerPointers *pLayerPointers, long x)
212475ab5f91Slh155975 {
212575ab5f91Slh155975 	return (ddi_get16(pLayerPointers->pOdl->MemBasehandle,
212675ab5f91Slh155975 	    (uint16_t *)(x)));
212775ab5f91Slh155975 }
212875ab5f91Slh155975 
212975ab5f91Slh155975 long
READ_REG32(struct LayerPointers * pLayerPointers,long x)213075ab5f91Slh155975 READ_REG32(struct LayerPointers *pLayerPointers, long x)
213175ab5f91Slh155975 {
213275ab5f91Slh155975 	return (ddi_get32(pLayerPointers->pOdl->MemBasehandle,
213375ab5f91Slh155975 	    (uint32_t *)(x)));
213475ab5f91Slh155975 }
213575ab5f91Slh155975 
213675ab5f91Slh155975 void
WRITE_REG8(struct LayerPointers * pLayerPointers,long x,int y)213775ab5f91Slh155975 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y)
213875ab5f91Slh155975 {
213975ab5f91Slh155975 	ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y);
214075ab5f91Slh155975 }
214175ab5f91Slh155975 
214275ab5f91Slh155975 void
WRITE_REG16(struct LayerPointers * pLayerPointers,long x,int y)214375ab5f91Slh155975 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y)
214475ab5f91Slh155975 {
214575ab5f91Slh155975 	ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y);
214675ab5f91Slh155975 }
214775ab5f91Slh155975 
214875ab5f91Slh155975 void
WRITE_REG32(struct LayerPointers * pLayerPointers,long x,int y)214975ab5f91Slh155975 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y)
215075ab5f91Slh155975 {
215175ab5f91Slh155975 	ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y);
215275ab5f91Slh155975 }
215375ab5f91Slh155975 
215475ab5f91Slh155975 void
WRITE_REG64(struct LayerPointers * pLayerPointers,long x,char * y)215575ab5f91Slh155975 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y)
215675ab5f91Slh155975 {
215775ab5f91Slh155975 	int i;
215875ab5f91Slh155975 	for (i = 0; i < 8; i++) {
215975ab5f91Slh155975 		WRITE_REG8(pLayerPointers, (x + i), y[i]);
216075ab5f91Slh155975 	}
216175ab5f91Slh155975 }
2162