xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge.c (revision bafec74292ca6805e5acb387856f4e60a5314b37)
1*bafec742SSukumar Swaminathan /*
2*bafec742SSukumar Swaminathan  * CDDL HEADER START
3*bafec742SSukumar Swaminathan  *
4*bafec742SSukumar Swaminathan  * The contents of this file are subject to the terms of the
5*bafec742SSukumar Swaminathan  * Common Development and Distribution License (the "License").
6*bafec742SSukumar Swaminathan  * You may not use this file except in compliance with the License.
7*bafec742SSukumar Swaminathan  *
8*bafec742SSukumar Swaminathan  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*bafec742SSukumar Swaminathan  * or http://www.opensolaris.org/os/licensing.
10*bafec742SSukumar Swaminathan  * See the License for the specific language governing permissions
11*bafec742SSukumar Swaminathan  * and limitations under the License.
12*bafec742SSukumar Swaminathan  *
13*bafec742SSukumar Swaminathan  * When distributing Covered Code, include this CDDL HEADER in each
14*bafec742SSukumar Swaminathan  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*bafec742SSukumar Swaminathan  * If applicable, add the following below this CDDL HEADER, with the
16*bafec742SSukumar Swaminathan  * fields enclosed by brackets "[]" replaced with your own identifying
17*bafec742SSukumar Swaminathan  * information: Portions Copyright [yyyy] [name of copyright owner]
18*bafec742SSukumar Swaminathan  *
19*bafec742SSukumar Swaminathan  * CDDL HEADER END
20*bafec742SSukumar Swaminathan  */
21*bafec742SSukumar Swaminathan 
22*bafec742SSukumar Swaminathan /*
23*bafec742SSukumar Swaminathan  * Copyright 2009 QLogic Corporation. All rights reserved.
24*bafec742SSukumar Swaminathan  */
25*bafec742SSukumar Swaminathan 
26*bafec742SSukumar Swaminathan #include <qlge.h>
27*bafec742SSukumar Swaminathan #include <sys/atomic.h>
28*bafec742SSukumar Swaminathan #include <sys/strsubr.h>
29*bafec742SSukumar Swaminathan #include <sys/pattr.h>
30*bafec742SSukumar Swaminathan #include <netinet/in.h>
31*bafec742SSukumar Swaminathan #include <netinet/ip.h>
32*bafec742SSukumar Swaminathan #include <netinet/ip6.h>
33*bafec742SSukumar Swaminathan #include <netinet/tcp.h>
34*bafec742SSukumar Swaminathan #include <netinet/udp.h>
35*bafec742SSukumar Swaminathan #include <inet/ip.h>
36*bafec742SSukumar Swaminathan 
37*bafec742SSukumar Swaminathan 
38*bafec742SSukumar Swaminathan 
39*bafec742SSukumar Swaminathan /*
40*bafec742SSukumar Swaminathan  * Local variables
41*bafec742SSukumar Swaminathan  */
42*bafec742SSukumar Swaminathan static struct ether_addr ql_ether_broadcast_addr =
43*bafec742SSukumar Swaminathan 	{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
44*bafec742SSukumar Swaminathan static char version[] = "QLogic GLDv3 Driver " VERSIONSTR;
45*bafec742SSukumar Swaminathan 
46*bafec742SSukumar Swaminathan /*
47*bafec742SSukumar Swaminathan  * Local function prototypes
48*bafec742SSukumar Swaminathan  */
49*bafec742SSukumar Swaminathan static void ql_free_resources(dev_info_t *, qlge_t *);
50*bafec742SSukumar Swaminathan static void ql_fini_kstats(qlge_t *);
51*bafec742SSukumar Swaminathan static uint32_t ql_get_link_state(qlge_t *);
52*bafec742SSukumar Swaminathan static void ql_read_conf(qlge_t *);
53*bafec742SSukumar Swaminathan static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
54*bafec742SSukumar Swaminathan     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
55*bafec742SSukumar Swaminathan     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
56*bafec742SSukumar Swaminathan static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
57*bafec742SSukumar Swaminathan static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
58*bafec742SSukumar Swaminathan static int ql_route_initialize(qlge_t *);
59*bafec742SSukumar Swaminathan static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
60*bafec742SSukumar Swaminathan static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
61*bafec742SSukumar Swaminathan static int ql_bringdown_adapter(qlge_t *);
62*bafec742SSukumar Swaminathan static int ql_bringup_adapter(qlge_t *);
63*bafec742SSukumar Swaminathan static int ql_asic_reset(qlge_t *);
64*bafec742SSukumar Swaminathan static void ql_wake_mpi_reset_soft_intr(qlge_t *);
65*bafec742SSukumar Swaminathan static void ql_stop_timer(qlge_t *qlge);
66*bafec742SSukumar Swaminathan 
67*bafec742SSukumar Swaminathan /*
68*bafec742SSukumar Swaminathan  * TX dma maping handlers allow multiple sscatter-gather lists
69*bafec742SSukumar Swaminathan  */
70*bafec742SSukumar Swaminathan ddi_dma_attr_t  tx_mapping_dma_attr = {
71*bafec742SSukumar Swaminathan 	DMA_ATTR_V0,			/* dma_attr_version */
72*bafec742SSukumar Swaminathan 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
73*bafec742SSukumar Swaminathan 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
74*bafec742SSukumar Swaminathan 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
75*bafec742SSukumar Swaminathan 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
76*bafec742SSukumar Swaminathan 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
77*bafec742SSukumar Swaminathan 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
78*bafec742SSukumar Swaminathan 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
79*bafec742SSukumar Swaminathan 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
80*bafec742SSukumar Swaminathan 	QL_MAX_TX_DMA_HANDLES,		/* s/g list length */
81*bafec742SSukumar Swaminathan 	QL_DMA_GRANULARITY,		/* granularity of device */
82*bafec742SSukumar Swaminathan 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
83*bafec742SSukumar Swaminathan };
84*bafec742SSukumar Swaminathan 
85*bafec742SSukumar Swaminathan /*
86*bafec742SSukumar Swaminathan  * Receive buffers and Request/Response queues do not allow scatter-gather lists
87*bafec742SSukumar Swaminathan  */
88*bafec742SSukumar Swaminathan ddi_dma_attr_t  dma_attr = {
89*bafec742SSukumar Swaminathan 	DMA_ATTR_V0,			/* dma_attr_version */
90*bafec742SSukumar Swaminathan 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
91*bafec742SSukumar Swaminathan 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
92*bafec742SSukumar Swaminathan 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
93*bafec742SSukumar Swaminathan 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
94*bafec742SSukumar Swaminathan 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
95*bafec742SSukumar Swaminathan 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
96*bafec742SSukumar Swaminathan 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
97*bafec742SSukumar Swaminathan 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
98*bafec742SSukumar Swaminathan 	1,				/* s/g list length, i.e no sg list */
99*bafec742SSukumar Swaminathan 	QL_DMA_GRANULARITY,		/* granularity of device */
100*bafec742SSukumar Swaminathan 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
101*bafec742SSukumar Swaminathan };
102*bafec742SSukumar Swaminathan 
103*bafec742SSukumar Swaminathan /*
104*bafec742SSukumar Swaminathan  * DMA access attribute structure.
105*bafec742SSukumar Swaminathan  */
106*bafec742SSukumar Swaminathan /* device register access from host */
107*bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_dev_acc_attr = {
108*bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
109*bafec742SSukumar Swaminathan 	DDI_STRUCTURE_LE_ACC,
110*bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
111*bafec742SSukumar Swaminathan };
112*bafec742SSukumar Swaminathan 
113*bafec742SSukumar Swaminathan /* host ring descriptors */
114*bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_desc_acc_attr = {
115*bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
116*bafec742SSukumar Swaminathan 	DDI_NEVERSWAP_ACC,
117*bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
118*bafec742SSukumar Swaminathan };
119*bafec742SSukumar Swaminathan 
120*bafec742SSukumar Swaminathan /* host ring buffer */
121*bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_buf_acc_attr = {
122*bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
123*bafec742SSukumar Swaminathan 	DDI_NEVERSWAP_ACC,
124*bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
125*bafec742SSukumar Swaminathan };
126*bafec742SSukumar Swaminathan 
127*bafec742SSukumar Swaminathan /*
128*bafec742SSukumar Swaminathan  * Hash key table for Receive Side Scaling (RSS) support
129*bafec742SSukumar Swaminathan  */
130*bafec742SSukumar Swaminathan const uint8_t key_data[] = {
131*bafec742SSukumar Swaminathan 	0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
132*bafec742SSukumar Swaminathan 	0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
133*bafec742SSukumar Swaminathan 	0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
134*bafec742SSukumar Swaminathan 	0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
135*bafec742SSukumar Swaminathan 
136*bafec742SSukumar Swaminathan /*
137*bafec742SSukumar Swaminathan  * Shadow Registers:
138*bafec742SSukumar Swaminathan  * Outbound queues have a consumer index that is maintained by the chip.
139*bafec742SSukumar Swaminathan  * Inbound queues have a producer index that is maintained by the chip.
140*bafec742SSukumar Swaminathan  * For lower overhead, these registers are "shadowed" to host memory
141*bafec742SSukumar Swaminathan  * which allows the device driver to track the queue progress without
142*bafec742SSukumar Swaminathan  * PCI reads. When an entry is placed on an inbound queue, the chip will
143*bafec742SSukumar Swaminathan  * update the relevant index register and then copy the value to the
144*bafec742SSukumar Swaminathan  * shadow register in host memory.
145*bafec742SSukumar Swaminathan  */
146*bafec742SSukumar Swaminathan 
147*bafec742SSukumar Swaminathan static inline unsigned int
148*bafec742SSukumar Swaminathan ql_read_sh_reg(const volatile void *addr)
149*bafec742SSukumar Swaminathan {
150*bafec742SSukumar Swaminathan 	return (*(volatile uint32_t *)addr);
151*bafec742SSukumar Swaminathan }
152*bafec742SSukumar Swaminathan 
153*bafec742SSukumar Swaminathan /*
154*bafec742SSukumar Swaminathan  * Read 32 bit atomically
155*bafec742SSukumar Swaminathan  */
156*bafec742SSukumar Swaminathan uint32_t
157*bafec742SSukumar Swaminathan ql_atomic_read_32(volatile uint32_t *target)
158*bafec742SSukumar Swaminathan {
159*bafec742SSukumar Swaminathan 	/*
160*bafec742SSukumar Swaminathan 	 * atomic_add_32_nv returns the new value after the add,
161*bafec742SSukumar Swaminathan 	 * we are adding 0 so we should get the original value
162*bafec742SSukumar Swaminathan 	 */
163*bafec742SSukumar Swaminathan 	return (atomic_add_32_nv(target, 0));
164*bafec742SSukumar Swaminathan }
165*bafec742SSukumar Swaminathan 
166*bafec742SSukumar Swaminathan /*
167*bafec742SSukumar Swaminathan  * Set 32 bit atomically
168*bafec742SSukumar Swaminathan  */
169*bafec742SSukumar Swaminathan void
170*bafec742SSukumar Swaminathan ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
171*bafec742SSukumar Swaminathan {
172*bafec742SSukumar Swaminathan 	(void) atomic_swap_32(target, newval);
173*bafec742SSukumar Swaminathan }
174*bafec742SSukumar Swaminathan 
175*bafec742SSukumar Swaminathan 
176*bafec742SSukumar Swaminathan /*
177*bafec742SSukumar Swaminathan  * Setup device PCI configuration registers.
178*bafec742SSukumar Swaminathan  * Kernel context.
179*bafec742SSukumar Swaminathan  */
180*bafec742SSukumar Swaminathan static void
181*bafec742SSukumar Swaminathan ql_pci_config(qlge_t *qlge)
182*bafec742SSukumar Swaminathan {
183*bafec742SSukumar Swaminathan 	uint16_t w;
184*bafec742SSukumar Swaminathan 
185*bafec742SSukumar Swaminathan 	qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
186*bafec742SSukumar Swaminathan 	    PCI_CONF_VENID);
187*bafec742SSukumar Swaminathan 	qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
188*bafec742SSukumar Swaminathan 	    PCI_CONF_DEVID);
189*bafec742SSukumar Swaminathan 
190*bafec742SSukumar Swaminathan 	/*
191*bafec742SSukumar Swaminathan 	 * we want to respect framework's setting of PCI
192*bafec742SSukumar Swaminathan 	 * configuration space command register and also
193*bafec742SSukumar Swaminathan 	 * want to make sure that all bits of interest to us
194*bafec742SSukumar Swaminathan 	 * are properly set in PCI Command register(0x04).
195*bafec742SSukumar Swaminathan 	 * PCI_COMM_IO		0x1	 I/O access enable
196*bafec742SSukumar Swaminathan 	 * PCI_COMM_MAE		0x2	 Memory access enable
197*bafec742SSukumar Swaminathan 	 * PCI_COMM_ME		0x4	 bus master enable
198*bafec742SSukumar Swaminathan 	 * PCI_COMM_MEMWR_INVAL	0x10	 memory write and invalidate enable.
199*bafec742SSukumar Swaminathan 	 */
200*bafec742SSukumar Swaminathan 	w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
201*bafec742SSukumar Swaminathan 	w = (uint16_t)(w & (~PCI_COMM_IO));
202*bafec742SSukumar Swaminathan 	w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
203*bafec742SSukumar Swaminathan 	    /* PCI_COMM_MEMWR_INVAL | */
204*bafec742SSukumar Swaminathan 	    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
205*bafec742SSukumar Swaminathan 
206*bafec742SSukumar Swaminathan 	pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
207*bafec742SSukumar Swaminathan 
208*bafec742SSukumar Swaminathan 	ql_dump_pci_config(qlge);
209*bafec742SSukumar Swaminathan }
210*bafec742SSukumar Swaminathan 
211*bafec742SSukumar Swaminathan /*
212*bafec742SSukumar Swaminathan  * This routine parforms the neccessary steps to set GLD mac information
213*bafec742SSukumar Swaminathan  * such as Function number, xgmac mask and shift bits
214*bafec742SSukumar Swaminathan  */
215*bafec742SSukumar Swaminathan static int
216*bafec742SSukumar Swaminathan ql_set_mac_info(qlge_t *qlge)
217*bafec742SSukumar Swaminathan {
218*bafec742SSukumar Swaminathan 	uint32_t value;
219*bafec742SSukumar Swaminathan 	int rval = DDI_SUCCESS;
220*bafec742SSukumar Swaminathan 	uint32_t fn0_net, fn1_net;
221*bafec742SSukumar Swaminathan 
222*bafec742SSukumar Swaminathan 	/* set default value */
223*bafec742SSukumar Swaminathan 	qlge->fn0_net = FN0_NET;
224*bafec742SSukumar Swaminathan 	qlge->fn1_net = FN1_NET;
225*bafec742SSukumar Swaminathan 
226*bafec742SSukumar Swaminathan 	if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
227*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d) read MPI register failed",
228*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
229*bafec742SSukumar Swaminathan 	} else {
230*bafec742SSukumar Swaminathan 		fn0_net = (value >> 1) & 0x07;
231*bafec742SSukumar Swaminathan 		fn1_net = (value >> 5) & 0x07;
232*bafec742SSukumar Swaminathan 		if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
233*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
234*bafec742SSukumar Swaminathan 			    "nic0 function number %d,"
235*bafec742SSukumar Swaminathan 			    "nic1 function number %d "
236*bafec742SSukumar Swaminathan 			    "use default\n",
237*bafec742SSukumar Swaminathan 			    __func__, qlge->instance, value, fn0_net, fn1_net);
238*bafec742SSukumar Swaminathan 		} else {
239*bafec742SSukumar Swaminathan 			qlge->fn0_net = fn0_net;
240*bafec742SSukumar Swaminathan 			qlge->fn1_net = fn1_net;
241*bafec742SSukumar Swaminathan 		}
242*bafec742SSukumar Swaminathan 	}
243*bafec742SSukumar Swaminathan 
244*bafec742SSukumar Swaminathan 	/* Get the function number that the driver is associated with */
245*bafec742SSukumar Swaminathan 	value = ql_read_reg(qlge, REG_STATUS);
246*bafec742SSukumar Swaminathan 	qlge->func_number = (uint8_t)((value >> 6) & 0x03);
247*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
248*bafec742SSukumar Swaminathan 	    value, qlge->func_number));
249*bafec742SSukumar Swaminathan 
250*bafec742SSukumar Swaminathan 	/* The driver is loaded on a non-NIC function? */
251*bafec742SSukumar Swaminathan 	if ((qlge->func_number != qlge->fn0_net) &&
252*bafec742SSukumar Swaminathan 	    (qlge->func_number != qlge->fn1_net)) {
253*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
254*bafec742SSukumar Swaminathan 		    "Invalid function number = 0x%x\n", qlge->func_number);
255*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
256*bafec742SSukumar Swaminathan 	}
257*bafec742SSukumar Swaminathan 	/* network port 0? */
258*bafec742SSukumar Swaminathan 	if (qlge->func_number == qlge->fn0_net) {
259*bafec742SSukumar Swaminathan 		qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
260*bafec742SSukumar Swaminathan 		qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
261*bafec742SSukumar Swaminathan 	} else {
262*bafec742SSukumar Swaminathan 		qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
263*bafec742SSukumar Swaminathan 		qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
264*bafec742SSukumar Swaminathan 	}
265*bafec742SSukumar Swaminathan 
266*bafec742SSukumar Swaminathan 	return (rval);
267*bafec742SSukumar Swaminathan 
268*bafec742SSukumar Swaminathan }
269*bafec742SSukumar Swaminathan 
270*bafec742SSukumar Swaminathan /*
271*bafec742SSukumar Swaminathan  * write to doorbell register
272*bafec742SSukumar Swaminathan  */
273*bafec742SSukumar Swaminathan void
274*bafec742SSukumar Swaminathan ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
275*bafec742SSukumar Swaminathan {
276*bafec742SSukumar Swaminathan 	ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
277*bafec742SSukumar Swaminathan }
278*bafec742SSukumar Swaminathan 
279*bafec742SSukumar Swaminathan /*
280*bafec742SSukumar Swaminathan  * read from doorbell register
281*bafec742SSukumar Swaminathan  */
282*bafec742SSukumar Swaminathan uint32_t
283*bafec742SSukumar Swaminathan ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
284*bafec742SSukumar Swaminathan {
285*bafec742SSukumar Swaminathan 	uint32_t ret;
286*bafec742SSukumar Swaminathan 
287*bafec742SSukumar Swaminathan 	ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
288*bafec742SSukumar Swaminathan 
289*bafec742SSukumar Swaminathan 	return	(ret);
290*bafec742SSukumar Swaminathan }
291*bafec742SSukumar Swaminathan 
292*bafec742SSukumar Swaminathan /*
293*bafec742SSukumar Swaminathan  * This function waits for a specific bit to come ready
294*bafec742SSukumar Swaminathan  * in a given register.  It is used mostly by the initialize
295*bafec742SSukumar Swaminathan  * process, but is also used in kernel thread API such as
296*bafec742SSukumar Swaminathan  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
297*bafec742SSukumar Swaminathan  */
298*bafec742SSukumar Swaminathan static int
299*bafec742SSukumar Swaminathan ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
300*bafec742SSukumar Swaminathan {
301*bafec742SSukumar Swaminathan 	uint32_t temp;
302*bafec742SSukumar Swaminathan 	int count = UDELAY_COUNT;
303*bafec742SSukumar Swaminathan 
304*bafec742SSukumar Swaminathan 	while (count) {
305*bafec742SSukumar Swaminathan 		temp = ql_read_reg(qlge, reg);
306*bafec742SSukumar Swaminathan 
307*bafec742SSukumar Swaminathan 		/* check for errors */
308*bafec742SSukumar Swaminathan 		if ((temp & err_bit) != 0) {
309*bafec742SSukumar Swaminathan 			break;
310*bafec742SSukumar Swaminathan 		} else if ((temp & bit) != 0)
311*bafec742SSukumar Swaminathan 			return (DDI_SUCCESS);
312*bafec742SSukumar Swaminathan 		qlge_delay(UDELAY_DELAY);
313*bafec742SSukumar Swaminathan 		count--;
314*bafec742SSukumar Swaminathan 	}
315*bafec742SSukumar Swaminathan 	cmn_err(CE_WARN,
316*bafec742SSukumar Swaminathan 	    "Waiting for reg %x to come ready failed.", reg);
317*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
318*bafec742SSukumar Swaminathan }
319*bafec742SSukumar Swaminathan 
320*bafec742SSukumar Swaminathan /*
321*bafec742SSukumar Swaminathan  * The CFG register is used to download TX and RX control blocks
322*bafec742SSukumar Swaminathan  * to the chip. This function waits for an operation to complete.
323*bafec742SSukumar Swaminathan  */
324*bafec742SSukumar Swaminathan static int
325*bafec742SSukumar Swaminathan ql_wait_cfg(qlge_t *qlge, uint32_t bit)
326*bafec742SSukumar Swaminathan {
327*bafec742SSukumar Swaminathan 	int count = UDELAY_COUNT;
328*bafec742SSukumar Swaminathan 	uint32_t temp;
329*bafec742SSukumar Swaminathan 
330*bafec742SSukumar Swaminathan 	while (count) {
331*bafec742SSukumar Swaminathan 		temp = ql_read_reg(qlge, REG_CONFIGURATION);
332*bafec742SSukumar Swaminathan 		if ((temp & CFG_LE) != 0) {
333*bafec742SSukumar Swaminathan 			break;
334*bafec742SSukumar Swaminathan 		}
335*bafec742SSukumar Swaminathan 		if ((temp & bit) == 0)
336*bafec742SSukumar Swaminathan 			return (DDI_SUCCESS);
337*bafec742SSukumar Swaminathan 		qlge_delay(UDELAY_DELAY);
338*bafec742SSukumar Swaminathan 		count--;
339*bafec742SSukumar Swaminathan 	}
340*bafec742SSukumar Swaminathan 	cmn_err(CE_WARN,
341*bafec742SSukumar Swaminathan 	    "Waiting for cfg register bit %x failed.", bit);
342*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
343*bafec742SSukumar Swaminathan }
344*bafec742SSukumar Swaminathan 
345*bafec742SSukumar Swaminathan 
346*bafec742SSukumar Swaminathan /*
347*bafec742SSukumar Swaminathan  * Used to issue init control blocks to hw. Maps control block,
348*bafec742SSukumar Swaminathan  * sets address, triggers download, waits for completion.
349*bafec742SSukumar Swaminathan  */
350*bafec742SSukumar Swaminathan static int
351*bafec742SSukumar Swaminathan ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
352*bafec742SSukumar Swaminathan {
353*bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
354*bafec742SSukumar Swaminathan 	uint32_t mask;
355*bafec742SSukumar Swaminathan 	uint32_t value;
356*bafec742SSukumar Swaminathan 
357*bafec742SSukumar Swaminathan 	status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
358*bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS) {
359*bafec742SSukumar Swaminathan 		goto exit;
360*bafec742SSukumar Swaminathan 	}
361*bafec742SSukumar Swaminathan 	status = ql_wait_cfg(qlge, bit);
362*bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS) {
363*bafec742SSukumar Swaminathan 		goto exit;
364*bafec742SSukumar Swaminathan 	}
365*bafec742SSukumar Swaminathan 
366*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
367*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
368*bafec742SSukumar Swaminathan 
369*bafec742SSukumar Swaminathan 	mask = CFG_Q_MASK | (bit << 16);
370*bafec742SSukumar Swaminathan 	value = bit | (q_id << CFG_Q_SHIFT);
371*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
372*bafec742SSukumar Swaminathan 
373*bafec742SSukumar Swaminathan 	/*
374*bafec742SSukumar Swaminathan 	 * Wait for the bit to clear after signaling hw.
375*bafec742SSukumar Swaminathan 	 */
376*bafec742SSukumar Swaminathan 	status = ql_wait_cfg(qlge, bit);
377*bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
378*bafec742SSukumar Swaminathan 
379*bafec742SSukumar Swaminathan exit:
380*bafec742SSukumar Swaminathan 	return (status);
381*bafec742SSukumar Swaminathan }
382*bafec742SSukumar Swaminathan 
383*bafec742SSukumar Swaminathan /*
384*bafec742SSukumar Swaminathan  * Initialize adapter instance
385*bafec742SSukumar Swaminathan  */
386*bafec742SSukumar Swaminathan static int
387*bafec742SSukumar Swaminathan ql_init_instance(qlge_t *qlge)
388*bafec742SSukumar Swaminathan {
389*bafec742SSukumar Swaminathan 	int i;
390*bafec742SSukumar Swaminathan 
391*bafec742SSukumar Swaminathan 	/* Default value */
392*bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_INIT;
393*bafec742SSukumar Swaminathan 	qlge->mtu = ETHERMTU;		/* set normal size as default */
394*bafec742SSukumar Swaminathan 	qlge->page_size = VM_PAGE_SIZE;	/* default page size */
395*bafec742SSukumar Swaminathan 	/* Set up the default ring sizes. */
396*bafec742SSukumar Swaminathan 	qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
397*bafec742SSukumar Swaminathan 	qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
398*bafec742SSukumar Swaminathan 
399*bafec742SSukumar Swaminathan 	/* Set up the coalescing parameters. */
400*bafec742SSukumar Swaminathan 	qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
401*bafec742SSukumar Swaminathan 	qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
402*bafec742SSukumar Swaminathan 	qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
403*bafec742SSukumar Swaminathan 	qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
404*bafec742SSukumar Swaminathan 	qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
405*bafec742SSukumar Swaminathan 	qlge->ql_dbgprnt = 0;
406*bafec742SSukumar Swaminathan #if QL_DEBUG
407*bafec742SSukumar Swaminathan 	qlge->ql_dbgprnt = QL_DEBUG;
408*bafec742SSukumar Swaminathan #endif /* QL_DEBUG */
409*bafec742SSukumar Swaminathan 
410*bafec742SSukumar Swaminathan 	/*
411*bafec742SSukumar Swaminathan 	 * TODO: Should be obtained from configuration or based off
412*bafec742SSukumar Swaminathan 	 * number of active cpus SJP 4th Mar. 09
413*bafec742SSukumar Swaminathan 	 */
414*bafec742SSukumar Swaminathan 	qlge->tx_ring_count = 1;
415*bafec742SSukumar Swaminathan 	qlge->rss_ring_count = 4;
416*bafec742SSukumar Swaminathan 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
417*bafec742SSukumar Swaminathan 
418*bafec742SSukumar Swaminathan 	for (i = 0; i < MAX_RX_RINGS; i++) {
419*bafec742SSukumar Swaminathan 		qlge->rx_polls[i] = 0;
420*bafec742SSukumar Swaminathan 		qlge->rx_interrupts[i] = 0;
421*bafec742SSukumar Swaminathan 	}
422*bafec742SSukumar Swaminathan 
423*bafec742SSukumar Swaminathan 	/*
424*bafec742SSukumar Swaminathan 	 * Set up the operating parameters.
425*bafec742SSukumar Swaminathan 	 */
426*bafec742SSukumar Swaminathan 	qlge->multicast_list_count = 0;
427*bafec742SSukumar Swaminathan 
428*bafec742SSukumar Swaminathan 	/*
429*bafec742SSukumar Swaminathan 	 * Set up the max number of unicast list
430*bafec742SSukumar Swaminathan 	 */
431*bafec742SSukumar Swaminathan 	qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
432*bafec742SSukumar Swaminathan 	qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
433*bafec742SSukumar Swaminathan 
434*bafec742SSukumar Swaminathan 	/*
435*bafec742SSukumar Swaminathan 	 * read user defined properties in .conf file
436*bafec742SSukumar Swaminathan 	 */
437*bafec742SSukumar Swaminathan 	ql_read_conf(qlge); /* mtu, pause, LSO etc */
438*bafec742SSukumar Swaminathan 
439*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
440*bafec742SSukumar Swaminathan 
441*bafec742SSukumar Swaminathan 	/* choose Memory Space mapping and get Vendor Id, Device ID etc */
442*bafec742SSukumar Swaminathan 	ql_pci_config(qlge);
443*bafec742SSukumar Swaminathan 	qlge->ip_hdr_offset = 0;
444*bafec742SSukumar Swaminathan 
445*bafec742SSukumar Swaminathan 	if (qlge->device_id == 0x8000) {
446*bafec742SSukumar Swaminathan 		/* Schultz card */
447*bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_CHIP_8100;
448*bafec742SSukumar Swaminathan 		/* enable just ipv4 chksum offload for Schultz */
449*bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
450*bafec742SSukumar Swaminathan 		/*
451*bafec742SSukumar Swaminathan 		 * Schultz firmware does not do pseduo IP header checksum
452*bafec742SSukumar Swaminathan 		 * calculation, needed to be done by driver
453*bafec742SSukumar Swaminathan 		 */
454*bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
455*bafec742SSukumar Swaminathan 		if (qlge->lso_enable)
456*bafec742SSukumar Swaminathan 			qlge->cfg_flags |= CFG_LSO;
457*bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
458*bafec742SSukumar Swaminathan 		/* Schultz must split packet header */
459*bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
460*bafec742SSukumar Swaminathan 		qlge->max_read_mbx = 5;
461*bafec742SSukumar Swaminathan 		qlge->ip_hdr_offset = 2;
462*bafec742SSukumar Swaminathan 	}
463*bafec742SSukumar Swaminathan 
464*bafec742SSukumar Swaminathan 	/* Set Function Number and some of the iocb mac information */
465*bafec742SSukumar Swaminathan 	if (ql_set_mac_info(qlge) != DDI_SUCCESS)
466*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
467*bafec742SSukumar Swaminathan 
468*bafec742SSukumar Swaminathan 	/* Read network settings from NVRAM */
469*bafec742SSukumar Swaminathan 	/* After nvram is read successfully, update dev_addr */
470*bafec742SSukumar Swaminathan 	if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
471*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
472*bafec742SSukumar Swaminathan 		for (i = 0; i < ETHERADDRL; i++) {
473*bafec742SSukumar Swaminathan 			qlge->dev_addr.ether_addr_octet[i] =
474*bafec742SSukumar Swaminathan 			    qlge->nic_config.factory_MAC[i];
475*bafec742SSukumar Swaminathan 		}
476*bafec742SSukumar Swaminathan 	} else {
477*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
478*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
479*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
480*bafec742SSukumar Swaminathan 	}
481*bafec742SSukumar Swaminathan 
482*bafec742SSukumar Swaminathan 	bcopy(qlge->dev_addr.ether_addr_octet,
483*bafec742SSukumar Swaminathan 	    qlge->unicst_addr[0].addr.ether_addr_octet,
484*bafec742SSukumar Swaminathan 	    ETHERADDRL);
485*bafec742SSukumar Swaminathan 	QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
486*bafec742SSukumar Swaminathan 	    &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
487*bafec742SSukumar Swaminathan 
488*bafec742SSukumar Swaminathan 	qlge->port_link_state = LS_DOWN;
489*bafec742SSukumar Swaminathan 
490*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
491*bafec742SSukumar Swaminathan }
492*bafec742SSukumar Swaminathan 
493*bafec742SSukumar Swaminathan 
494*bafec742SSukumar Swaminathan /*
495*bafec742SSukumar Swaminathan  * This hardware semaphore provides the mechanism for exclusive access to
496*bafec742SSukumar Swaminathan  * resources shared between the NIC driver, MPI firmware,
497*bafec742SSukumar Swaminathan  * FCOE firmware and the FC driver.
498*bafec742SSukumar Swaminathan  */
499*bafec742SSukumar Swaminathan static int
500*bafec742SSukumar Swaminathan ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
501*bafec742SSukumar Swaminathan {
502*bafec742SSukumar Swaminathan 	uint32_t sem_bits = 0;
503*bafec742SSukumar Swaminathan 
504*bafec742SSukumar Swaminathan 	switch (sem_mask) {
505*bafec742SSukumar Swaminathan 	case SEM_XGMAC0_MASK:
506*bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
507*bafec742SSukumar Swaminathan 		break;
508*bafec742SSukumar Swaminathan 	case SEM_XGMAC1_MASK:
509*bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
510*bafec742SSukumar Swaminathan 		break;
511*bafec742SSukumar Swaminathan 	case SEM_ICB_MASK:
512*bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
513*bafec742SSukumar Swaminathan 		break;
514*bafec742SSukumar Swaminathan 	case SEM_MAC_ADDR_MASK:
515*bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
516*bafec742SSukumar Swaminathan 		break;
517*bafec742SSukumar Swaminathan 	case SEM_FLASH_MASK:
518*bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
519*bafec742SSukumar Swaminathan 		break;
520*bafec742SSukumar Swaminathan 	case SEM_PROBE_MASK:
521*bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
522*bafec742SSukumar Swaminathan 		break;
523*bafec742SSukumar Swaminathan 	case SEM_RT_IDX_MASK:
524*bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
525*bafec742SSukumar Swaminathan 		break;
526*bafec742SSukumar Swaminathan 	case SEM_PROC_REG_MASK:
527*bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
528*bafec742SSukumar Swaminathan 		break;
529*bafec742SSukumar Swaminathan 	default:
530*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Bad Semaphore mask!.");
531*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
532*bafec742SSukumar Swaminathan 	}
533*bafec742SSukumar Swaminathan 
534*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
535*bafec742SSukumar Swaminathan 	return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
536*bafec742SSukumar Swaminathan }
537*bafec742SSukumar Swaminathan 
538*bafec742SSukumar Swaminathan /*
539*bafec742SSukumar Swaminathan  * Lock a specific bit of Semaphore register to gain
540*bafec742SSukumar Swaminathan  * access to a particular shared register
541*bafec742SSukumar Swaminathan  */
542*bafec742SSukumar Swaminathan int
543*bafec742SSukumar Swaminathan ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
544*bafec742SSukumar Swaminathan {
545*bafec742SSukumar Swaminathan 	unsigned int wait_count = 30;
546*bafec742SSukumar Swaminathan 
547*bafec742SSukumar Swaminathan 	while (wait_count) {
548*bafec742SSukumar Swaminathan 		if (!ql_sem_trylock(qlge, sem_mask))
549*bafec742SSukumar Swaminathan 			return (DDI_SUCCESS);
550*bafec742SSukumar Swaminathan 		qlge_delay(100);
551*bafec742SSukumar Swaminathan 		wait_count--;
552*bafec742SSukumar Swaminathan 	}
553*bafec742SSukumar Swaminathan 	cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
554*bafec742SSukumar Swaminathan 	    __func__, qlge->instance, sem_mask);
555*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
556*bafec742SSukumar Swaminathan }
557*bafec742SSukumar Swaminathan 
558*bafec742SSukumar Swaminathan /*
559*bafec742SSukumar Swaminathan  * Unock a specific bit of Semaphore register to release
560*bafec742SSukumar Swaminathan  * access to a particular shared register
561*bafec742SSukumar Swaminathan  */
562*bafec742SSukumar Swaminathan void
563*bafec742SSukumar Swaminathan ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
564*bafec742SSukumar Swaminathan {
565*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
566*bafec742SSukumar Swaminathan 	ql_read_reg(qlge, REG_SEMAPHORE);	/* flush */
567*bafec742SSukumar Swaminathan }
568*bafec742SSukumar Swaminathan 
569*bafec742SSukumar Swaminathan /*
570*bafec742SSukumar Swaminathan  * Get property value from configuration file.
571*bafec742SSukumar Swaminathan  *
572*bafec742SSukumar Swaminathan  * string = property string pointer.
573*bafec742SSukumar Swaminathan  *
574*bafec742SSukumar Swaminathan  * Returns:
575*bafec742SSukumar Swaminathan  * 0xFFFFFFFF = no property else property value.
576*bafec742SSukumar Swaminathan  */
577*bafec742SSukumar Swaminathan static uint32_t
578*bafec742SSukumar Swaminathan ql_get_prop(qlge_t *qlge, char *string)
579*bafec742SSukumar Swaminathan {
580*bafec742SSukumar Swaminathan 	char buf[256];
581*bafec742SSukumar Swaminathan 	uint32_t data;
582*bafec742SSukumar Swaminathan 
583*bafec742SSukumar Swaminathan 	/* Get adapter instance parameter. */
584*bafec742SSukumar Swaminathan 	(void) sprintf(buf, "hba%d-%s", qlge->instance, string);
585*bafec742SSukumar Swaminathan 	data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
586*bafec742SSukumar Swaminathan 	    (int)0xffffffff);
587*bafec742SSukumar Swaminathan 
588*bafec742SSukumar Swaminathan 	/* Adapter instance parameter found? */
589*bafec742SSukumar Swaminathan 	if (data == 0xffffffff) {
590*bafec742SSukumar Swaminathan 		/* No, get default parameter. */
591*bafec742SSukumar Swaminathan 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
592*bafec742SSukumar Swaminathan 		    string, (int)0xffffffff);
593*bafec742SSukumar Swaminathan 	}
594*bafec742SSukumar Swaminathan 
595*bafec742SSukumar Swaminathan 	return (data);
596*bafec742SSukumar Swaminathan }
597*bafec742SSukumar Swaminathan 
598*bafec742SSukumar Swaminathan /*
599*bafec742SSukumar Swaminathan  * Read user setting from configuration file.
600*bafec742SSukumar Swaminathan  */
601*bafec742SSukumar Swaminathan static void
602*bafec742SSukumar Swaminathan ql_read_conf(qlge_t *qlge)
603*bafec742SSukumar Swaminathan {
604*bafec742SSukumar Swaminathan 	uint32_t data;
605*bafec742SSukumar Swaminathan 
606*bafec742SSukumar Swaminathan 	/* clear configuration flags */
607*bafec742SSukumar Swaminathan 	qlge->cfg_flags = 0;
608*bafec742SSukumar Swaminathan 
609*bafec742SSukumar Swaminathan 	/* Get default rx_copy enable/disable. */
610*bafec742SSukumar Swaminathan 	if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
611*bafec742SSukumar Swaminathan 	    data == 0) {
612*bafec742SSukumar Swaminathan 		qlge->cfg_flags &= ~CFG_RX_COPY_MODE;
613*bafec742SSukumar Swaminathan 		qlge->rx_copy = B_FALSE;
614*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
615*bafec742SSukumar Swaminathan 	} else if (data == 1) {
616*bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_RX_COPY_MODE;
617*bafec742SSukumar Swaminathan 		qlge->rx_copy = B_TRUE;
618*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
619*bafec742SSukumar Swaminathan 	}
620*bafec742SSukumar Swaminathan 
621*bafec742SSukumar Swaminathan 	/* Get mtu packet size. */
622*bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "mtu");
623*bafec742SSukumar Swaminathan 	if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
624*bafec742SSukumar Swaminathan 		if (qlge->mtu != data) {
625*bafec742SSukumar Swaminathan 			qlge->mtu = data;
626*bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
627*bafec742SSukumar Swaminathan 		}
628*bafec742SSukumar Swaminathan 	}
629*bafec742SSukumar Swaminathan 
630*bafec742SSukumar Swaminathan 	/* Get pause mode, default is Per Priority mode. */
631*bafec742SSukumar Swaminathan 	qlge->pause = PAUSE_MODE_PER_PRIORITY;
632*bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "pause");
633*bafec742SSukumar Swaminathan 	if (data <= PAUSE_MODE_PER_PRIORITY) {
634*bafec742SSukumar Swaminathan 		if (qlge->pause != data) {
635*bafec742SSukumar Swaminathan 			qlge->pause = data;
636*bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
637*bafec742SSukumar Swaminathan 		}
638*bafec742SSukumar Swaminathan 	}
639*bafec742SSukumar Swaminathan 
640*bafec742SSukumar Swaminathan 	/* Get tx_max_coalesced_frames. */
641*bafec742SSukumar Swaminathan 	qlge->tx_max_coalesced_frames = 5;
642*bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "tx_max_coalesced_frames");
643*bafec742SSukumar Swaminathan 	/* if data is valid */
644*bafec742SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
645*bafec742SSukumar Swaminathan 		if (qlge->tx_max_coalesced_frames != data) {
646*bafec742SSukumar Swaminathan 			qlge->tx_max_coalesced_frames = (uint16_t)data;
647*bafec742SSukumar Swaminathan 		}
648*bafec742SSukumar Swaminathan 	}
649*bafec742SSukumar Swaminathan 
650*bafec742SSukumar Swaminathan 	/* Get split header payload_copy_thresh. */
651*bafec742SSukumar Swaminathan 	qlge->payload_copy_thresh = 6;
652*bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "payload_copy_thresh");
653*bafec742SSukumar Swaminathan 	/* if data is valid */
654*bafec742SSukumar Swaminathan 	if ((data != 0xffffffff) && (data != 0)) {
655*bafec742SSukumar Swaminathan 		if (qlge->payload_copy_thresh != data) {
656*bafec742SSukumar Swaminathan 			qlge->payload_copy_thresh = data;
657*bafec742SSukumar Swaminathan 		}
658*bafec742SSukumar Swaminathan 	}
659*bafec742SSukumar Swaminathan 
660*bafec742SSukumar Swaminathan 	/* large send offload (LSO) capability. */
661*bafec742SSukumar Swaminathan 	qlge->lso_enable = 1;
662*bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "lso_enable");
663*bafec742SSukumar Swaminathan 	/* if data is valid */
664*bafec742SSukumar Swaminathan 	if (data != 0xffffffff) {
665*bafec742SSukumar Swaminathan 		if (qlge->lso_enable != data) {
666*bafec742SSukumar Swaminathan 			qlge->lso_enable = (uint16_t)data;
667*bafec742SSukumar Swaminathan 		}
668*bafec742SSukumar Swaminathan 	}
669*bafec742SSukumar Swaminathan }
670*bafec742SSukumar Swaminathan 
671*bafec742SSukumar Swaminathan /*
672*bafec742SSukumar Swaminathan  * Enable global interrupt
673*bafec742SSukumar Swaminathan  */
674*bafec742SSukumar Swaminathan static void
675*bafec742SSukumar Swaminathan ql_enable_global_interrupt(qlge_t *qlge)
676*bafec742SSukumar Swaminathan {
677*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
678*bafec742SSukumar Swaminathan 	    (INTR_EN_EI << 16) | INTR_EN_EI);
679*bafec742SSukumar Swaminathan 	qlge->flags |= INTERRUPTS_ENABLED;
680*bafec742SSukumar Swaminathan }
681*bafec742SSukumar Swaminathan 
682*bafec742SSukumar Swaminathan /*
683*bafec742SSukumar Swaminathan  * Disable global interrupt
684*bafec742SSukumar Swaminathan  */
685*bafec742SSukumar Swaminathan static void
686*bafec742SSukumar Swaminathan ql_disable_global_interrupt(qlge_t *qlge)
687*bafec742SSukumar Swaminathan {
688*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
689*bafec742SSukumar Swaminathan 	qlge->flags &= ~INTERRUPTS_ENABLED;
690*bafec742SSukumar Swaminathan }
691*bafec742SSukumar Swaminathan 
692*bafec742SSukumar Swaminathan /*
693*bafec742SSukumar Swaminathan  * Enable one ring interrupt
694*bafec742SSukumar Swaminathan  */
695*bafec742SSukumar Swaminathan void
696*bafec742SSukumar Swaminathan ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
697*bafec742SSukumar Swaminathan {
698*bafec742SSukumar Swaminathan 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
699*bafec742SSukumar Swaminathan 
700*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
701*bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
702*bafec742SSukumar Swaminathan 
703*bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
704*bafec742SSukumar Swaminathan 		/*
705*bafec742SSukumar Swaminathan 		 * Always enable if we're MSIX multi interrupts and
706*bafec742SSukumar Swaminathan 		 * it's not the default (zeroeth) interrupt.
707*bafec742SSukumar Swaminathan 		 */
708*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
709*bafec742SSukumar Swaminathan 		return;
710*bafec742SSukumar Swaminathan 	}
711*bafec742SSukumar Swaminathan 
712*bafec742SSukumar Swaminathan 	if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
713*bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
714*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
715*bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
716*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INTR,
717*bafec742SSukumar Swaminathan 		    ("%s(%d): write %x to intr enable register \n",
718*bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ctx->intr_en_mask));
719*bafec742SSukumar Swaminathan 	}
720*bafec742SSukumar Swaminathan }
721*bafec742SSukumar Swaminathan 
722*bafec742SSukumar Swaminathan /*
723*bafec742SSukumar Swaminathan  * ql_forced_disable_completion_interrupt
724*bafec742SSukumar Swaminathan  * Used by call from OS, may be called without
725*bafec742SSukumar Swaminathan  * a pending interrupt so force the disable
726*bafec742SSukumar Swaminathan  */
727*bafec742SSukumar Swaminathan uint32_t
728*bafec742SSukumar Swaminathan ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
729*bafec742SSukumar Swaminathan {
730*bafec742SSukumar Swaminathan 	uint32_t var = 0;
731*bafec742SSukumar Swaminathan 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
732*bafec742SSukumar Swaminathan 
733*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
734*bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
735*bafec742SSukumar Swaminathan 
736*bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
737*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
738*bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
739*bafec742SSukumar Swaminathan 		return (var);
740*bafec742SSukumar Swaminathan 	}
741*bafec742SSukumar Swaminathan 
742*bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
743*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
744*bafec742SSukumar Swaminathan 	var = ql_read_reg(qlge, REG_STATUS);
745*bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
746*bafec742SSukumar Swaminathan 
747*bafec742SSukumar Swaminathan 	return (var);
748*bafec742SSukumar Swaminathan }
749*bafec742SSukumar Swaminathan 
750*bafec742SSukumar Swaminathan /*
751*bafec742SSukumar Swaminathan  * Disable a completion interrupt
752*bafec742SSukumar Swaminathan  */
753*bafec742SSukumar Swaminathan void
754*bafec742SSukumar Swaminathan ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
755*bafec742SSukumar Swaminathan {
756*bafec742SSukumar Swaminathan 	struct intr_ctx *ctx;
757*bafec742SSukumar Swaminathan 
758*bafec742SSukumar Swaminathan 	ctx = qlge->intr_ctx + intr;
759*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
760*bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
761*bafec742SSukumar Swaminathan 	/*
762*bafec742SSukumar Swaminathan 	 * HW disables for us if we're MSIX multi interrupts and
763*bafec742SSukumar Swaminathan 	 * it's not the default (zeroeth) interrupt.
764*bafec742SSukumar Swaminathan 	 */
765*bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
766*bafec742SSukumar Swaminathan 		return;
767*bafec742SSukumar Swaminathan 
768*bafec742SSukumar Swaminathan 	if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
769*bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
770*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
771*bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
772*bafec742SSukumar Swaminathan 	}
773*bafec742SSukumar Swaminathan 	atomic_inc_32(&ctx->irq_cnt);
774*bafec742SSukumar Swaminathan }
775*bafec742SSukumar Swaminathan 
776*bafec742SSukumar Swaminathan /*
777*bafec742SSukumar Swaminathan  * Enable all completion interrupts
778*bafec742SSukumar Swaminathan  */
779*bafec742SSukumar Swaminathan static void
780*bafec742SSukumar Swaminathan ql_enable_all_completion_interrupts(qlge_t *qlge)
781*bafec742SSukumar Swaminathan {
782*bafec742SSukumar Swaminathan 	int i;
783*bafec742SSukumar Swaminathan 	uint32_t value = 1;
784*bafec742SSukumar Swaminathan 
785*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
786*bafec742SSukumar Swaminathan 		/*
787*bafec742SSukumar Swaminathan 		 * Set the count to 1 for Legacy / MSI interrupts or for the
788*bafec742SSukumar Swaminathan 		 * default interrupt (0)
789*bafec742SSukumar Swaminathan 		 */
790*bafec742SSukumar Swaminathan 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
791*bafec742SSukumar Swaminathan 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
792*bafec742SSukumar Swaminathan 		}
793*bafec742SSukumar Swaminathan 		ql_enable_completion_interrupt(qlge, i);
794*bafec742SSukumar Swaminathan 	}
795*bafec742SSukumar Swaminathan }
796*bafec742SSukumar Swaminathan 
797*bafec742SSukumar Swaminathan /*
798*bafec742SSukumar Swaminathan  * Disable all completion interrupts
799*bafec742SSukumar Swaminathan  */
800*bafec742SSukumar Swaminathan static void
801*bafec742SSukumar Swaminathan ql_disable_all_completion_interrupts(qlge_t *qlge)
802*bafec742SSukumar Swaminathan {
803*bafec742SSukumar Swaminathan 	int i;
804*bafec742SSukumar Swaminathan 	uint32_t value = 0;
805*bafec742SSukumar Swaminathan 
806*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
807*bafec742SSukumar Swaminathan 
808*bafec742SSukumar Swaminathan 		/*
809*bafec742SSukumar Swaminathan 		 * Set the count to 0 for Legacy / MSI interrupts or for the
810*bafec742SSukumar Swaminathan 		 * default interrupt (0)
811*bafec742SSukumar Swaminathan 		 */
812*bafec742SSukumar Swaminathan 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
813*bafec742SSukumar Swaminathan 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
814*bafec742SSukumar Swaminathan 
815*bafec742SSukumar Swaminathan 		ql_disable_completion_interrupt(qlge, i);
816*bafec742SSukumar Swaminathan 	}
817*bafec742SSukumar Swaminathan }
818*bafec742SSukumar Swaminathan 
819*bafec742SSukumar Swaminathan /*
820*bafec742SSukumar Swaminathan  * Update small buffer queue producer index
821*bafec742SSukumar Swaminathan  */
822*bafec742SSukumar Swaminathan static void
823*bafec742SSukumar Swaminathan ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
824*bafec742SSukumar Swaminathan {
825*bafec742SSukumar Swaminathan 	/* Update the buffer producer index */
826*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
827*bafec742SSukumar Swaminathan 	    rx_ring->sbq_prod_idx));
828*bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
829*bafec742SSukumar Swaminathan 	    rx_ring->sbq_prod_idx);
830*bafec742SSukumar Swaminathan }
831*bafec742SSukumar Swaminathan 
832*bafec742SSukumar Swaminathan /*
833*bafec742SSukumar Swaminathan  * Update large buffer queue producer index
834*bafec742SSukumar Swaminathan  */
835*bafec742SSukumar Swaminathan static void
836*bafec742SSukumar Swaminathan ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
837*bafec742SSukumar Swaminathan {
838*bafec742SSukumar Swaminathan 	/* Update the buffer producer index */
839*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
840*bafec742SSukumar Swaminathan 	    rx_ring->lbq_prod_idx));
841*bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
842*bafec742SSukumar Swaminathan 	    rx_ring->lbq_prod_idx);
843*bafec742SSukumar Swaminathan }
844*bafec742SSukumar Swaminathan 
845*bafec742SSukumar Swaminathan /*
846*bafec742SSukumar Swaminathan  * Adds a small buffer descriptor to end of its in use list,
847*bafec742SSukumar Swaminathan  * assumes sbq_lock is already taken
848*bafec742SSukumar Swaminathan  */
849*bafec742SSukumar Swaminathan static void
850*bafec742SSukumar Swaminathan ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
851*bafec742SSukumar Swaminathan     struct bq_desc *sbq_desc)
852*bafec742SSukumar Swaminathan {
853*bafec742SSukumar Swaminathan 	uint32_t inuse_idx = rx_ring->sbq_use_tail;
854*bafec742SSukumar Swaminathan 
855*bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
856*bafec742SSukumar Swaminathan 	inuse_idx++;
857*bafec742SSukumar Swaminathan 	if (inuse_idx >= rx_ring->sbq_len)
858*bafec742SSukumar Swaminathan 		inuse_idx = 0;
859*bafec742SSukumar Swaminathan 	rx_ring->sbq_use_tail = inuse_idx;
860*bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->sbuf_in_use_count);
861*bafec742SSukumar Swaminathan 	ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
862*bafec742SSukumar Swaminathan }
863*bafec742SSukumar Swaminathan 
864*bafec742SSukumar Swaminathan /*
865*bafec742SSukumar Swaminathan  * Get a small buffer descriptor from its in use list
866*bafec742SSukumar Swaminathan  */
867*bafec742SSukumar Swaminathan static struct bq_desc *
868*bafec742SSukumar Swaminathan ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
869*bafec742SSukumar Swaminathan {
870*bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc = NULL;
871*bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
872*bafec742SSukumar Swaminathan 
873*bafec742SSukumar Swaminathan 	/* Pick from head of in use list */
874*bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->sbq_use_head;
875*bafec742SSukumar Swaminathan 	sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
876*bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use[inuse_idx] = NULL;
877*bafec742SSukumar Swaminathan 
878*bafec742SSukumar Swaminathan 	if (sbq_desc != NULL) {
879*bafec742SSukumar Swaminathan 		inuse_idx++;
880*bafec742SSukumar Swaminathan 		if (inuse_idx >= rx_ring->sbq_len)
881*bafec742SSukumar Swaminathan 			inuse_idx = 0;
882*bafec742SSukumar Swaminathan 		rx_ring->sbq_use_head = inuse_idx;
883*bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->sbuf_in_use_count);
884*bafec742SSukumar Swaminathan 		atomic_inc_32(&rx_ring->rx_indicate);
885*bafec742SSukumar Swaminathan 		sbq_desc->upl_inuse = 1;
886*bafec742SSukumar Swaminathan 		/* if mp is NULL */
887*bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
888*bafec742SSukumar Swaminathan 			/* try to remap mp again */
889*bafec742SSukumar Swaminathan 			sbq_desc->mp =
890*bafec742SSukumar Swaminathan 			    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
891*bafec742SSukumar Swaminathan 			    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
892*bafec742SSukumar Swaminathan 		}
893*bafec742SSukumar Swaminathan 	}
894*bafec742SSukumar Swaminathan 
895*bafec742SSukumar Swaminathan 	return (sbq_desc);
896*bafec742SSukumar Swaminathan }
897*bafec742SSukumar Swaminathan 
898*bafec742SSukumar Swaminathan /*
899*bafec742SSukumar Swaminathan  * Add a small buffer descriptor to its free list
900*bafec742SSukumar Swaminathan  */
901*bafec742SSukumar Swaminathan static void
902*bafec742SSukumar Swaminathan ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
903*bafec742SSukumar Swaminathan     struct bq_desc *sbq_desc)
904*bafec742SSukumar Swaminathan {
905*bafec742SSukumar Swaminathan 	uint32_t free_idx;
906*bafec742SSukumar Swaminathan 
907*bafec742SSukumar Swaminathan 	/* Add to the end of free list */
908*bafec742SSukumar Swaminathan 	free_idx = rx_ring->sbq_free_tail;
909*bafec742SSukumar Swaminathan 	rx_ring->sbuf_free[free_idx] = sbq_desc;
910*bafec742SSukumar Swaminathan 	ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
911*bafec742SSukumar Swaminathan 	free_idx++;
912*bafec742SSukumar Swaminathan 	if (free_idx >= rx_ring->sbq_len)
913*bafec742SSukumar Swaminathan 		free_idx = 0;
914*bafec742SSukumar Swaminathan 	rx_ring->sbq_free_tail = free_idx;
915*bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->sbuf_free_count);
916*bafec742SSukumar Swaminathan }
917*bafec742SSukumar Swaminathan 
918*bafec742SSukumar Swaminathan /*
919*bafec742SSukumar Swaminathan  * Get a small buffer descriptor from its free list
920*bafec742SSukumar Swaminathan  */
921*bafec742SSukumar Swaminathan static struct bq_desc *
922*bafec742SSukumar Swaminathan ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
923*bafec742SSukumar Swaminathan {
924*bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
925*bafec742SSukumar Swaminathan 	uint32_t free_idx;
926*bafec742SSukumar Swaminathan 
927*bafec742SSukumar Swaminathan 	free_idx = rx_ring->sbq_free_head;
928*bafec742SSukumar Swaminathan 	/* Pick from top of free list */
929*bafec742SSukumar Swaminathan 	sbq_desc = rx_ring->sbuf_free[free_idx];
930*bafec742SSukumar Swaminathan 	rx_ring->sbuf_free[free_idx] = NULL;
931*bafec742SSukumar Swaminathan 	if (sbq_desc != NULL) {
932*bafec742SSukumar Swaminathan 		free_idx++;
933*bafec742SSukumar Swaminathan 		if (free_idx >= rx_ring->sbq_len)
934*bafec742SSukumar Swaminathan 			free_idx = 0;
935*bafec742SSukumar Swaminathan 		rx_ring->sbq_free_head = free_idx;
936*bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->sbuf_free_count);
937*bafec742SSukumar Swaminathan 		ASSERT(rx_ring->sbuf_free_count != 0);
938*bafec742SSukumar Swaminathan 	}
939*bafec742SSukumar Swaminathan 	return (sbq_desc);
940*bafec742SSukumar Swaminathan }
941*bafec742SSukumar Swaminathan 
942*bafec742SSukumar Swaminathan /*
943*bafec742SSukumar Swaminathan  * Add a large buffer descriptor to its in use list
944*bafec742SSukumar Swaminathan  */
945*bafec742SSukumar Swaminathan static void
946*bafec742SSukumar Swaminathan ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
947*bafec742SSukumar Swaminathan     struct bq_desc *lbq_desc)
948*bafec742SSukumar Swaminathan {
949*bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
950*bafec742SSukumar Swaminathan 
951*bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->lbq_use_tail;
952*bafec742SSukumar Swaminathan 
953*bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
954*bafec742SSukumar Swaminathan 	inuse_idx++;
955*bafec742SSukumar Swaminathan 	if (inuse_idx >= rx_ring->lbq_len)
956*bafec742SSukumar Swaminathan 		inuse_idx = 0;
957*bafec742SSukumar Swaminathan 	rx_ring->lbq_use_tail = inuse_idx;
958*bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->lbuf_in_use_count);
959*bafec742SSukumar Swaminathan }
960*bafec742SSukumar Swaminathan 
961*bafec742SSukumar Swaminathan /*
962*bafec742SSukumar Swaminathan  * Get a large buffer descriptor from in use list
963*bafec742SSukumar Swaminathan  */
964*bafec742SSukumar Swaminathan static struct bq_desc *
965*bafec742SSukumar Swaminathan ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
966*bafec742SSukumar Swaminathan {
967*bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
968*bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
969*bafec742SSukumar Swaminathan 
970*bafec742SSukumar Swaminathan 	/* Pick from head of in use list */
971*bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->lbq_use_head;
972*bafec742SSukumar Swaminathan 	lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
973*bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use[inuse_idx] = NULL;
974*bafec742SSukumar Swaminathan 
975*bafec742SSukumar Swaminathan 	if (lbq_desc != NULL) {
976*bafec742SSukumar Swaminathan 		inuse_idx++;
977*bafec742SSukumar Swaminathan 		if (inuse_idx >= rx_ring->lbq_len)
978*bafec742SSukumar Swaminathan 			inuse_idx = 0;
979*bafec742SSukumar Swaminathan 		rx_ring->lbq_use_head = inuse_idx;
980*bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->lbuf_in_use_count);
981*bafec742SSukumar Swaminathan 		atomic_inc_32(&rx_ring->rx_indicate);
982*bafec742SSukumar Swaminathan 		lbq_desc->upl_inuse = 1;
983*bafec742SSukumar Swaminathan 
984*bafec742SSukumar Swaminathan 		/* if mp is NULL */
985*bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
986*bafec742SSukumar Swaminathan 			/* try to remap mp again */
987*bafec742SSukumar Swaminathan 			lbq_desc->mp =
988*bafec742SSukumar Swaminathan 			    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
989*bafec742SSukumar Swaminathan 			    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
990*bafec742SSukumar Swaminathan 		}
991*bafec742SSukumar Swaminathan 	}
992*bafec742SSukumar Swaminathan 	return (lbq_desc);
993*bafec742SSukumar Swaminathan }
994*bafec742SSukumar Swaminathan 
995*bafec742SSukumar Swaminathan /*
996*bafec742SSukumar Swaminathan  * Add a large buffer descriptor to free list
997*bafec742SSukumar Swaminathan  */
998*bafec742SSukumar Swaminathan static void
999*bafec742SSukumar Swaminathan ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
1000*bafec742SSukumar Swaminathan     struct bq_desc *lbq_desc)
1001*bafec742SSukumar Swaminathan {
1002*bafec742SSukumar Swaminathan 	uint32_t free_idx;
1003*bafec742SSukumar Swaminathan 
1004*bafec742SSukumar Swaminathan 	/* Add to the end of free list */
1005*bafec742SSukumar Swaminathan 	free_idx = rx_ring->lbq_free_tail;
1006*bafec742SSukumar Swaminathan 	rx_ring->lbuf_free[free_idx] = lbq_desc;
1007*bafec742SSukumar Swaminathan 	free_idx++;
1008*bafec742SSukumar Swaminathan 	if (free_idx >= rx_ring->lbq_len)
1009*bafec742SSukumar Swaminathan 		free_idx = 0;
1010*bafec742SSukumar Swaminathan 	rx_ring->lbq_free_tail = free_idx;
1011*bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->lbuf_free_count);
1012*bafec742SSukumar Swaminathan 	ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
1013*bafec742SSukumar Swaminathan }
1014*bafec742SSukumar Swaminathan 
1015*bafec742SSukumar Swaminathan /*
1016*bafec742SSukumar Swaminathan  * Get a large buffer descriptor from its free list
1017*bafec742SSukumar Swaminathan  */
1018*bafec742SSukumar Swaminathan static struct bq_desc *
1019*bafec742SSukumar Swaminathan ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
1020*bafec742SSukumar Swaminathan {
1021*bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1022*bafec742SSukumar Swaminathan 	uint32_t free_idx;
1023*bafec742SSukumar Swaminathan 
1024*bafec742SSukumar Swaminathan 	free_idx = rx_ring->lbq_free_head;
1025*bafec742SSukumar Swaminathan 	/* Pick from head of free list */
1026*bafec742SSukumar Swaminathan 	lbq_desc = rx_ring->lbuf_free[free_idx];
1027*bafec742SSukumar Swaminathan 	rx_ring->lbuf_free[free_idx] = NULL;
1028*bafec742SSukumar Swaminathan 
1029*bafec742SSukumar Swaminathan 	if (lbq_desc != NULL) {
1030*bafec742SSukumar Swaminathan 		free_idx++;
1031*bafec742SSukumar Swaminathan 		if (free_idx >= rx_ring->lbq_len)
1032*bafec742SSukumar Swaminathan 			free_idx = 0;
1033*bafec742SSukumar Swaminathan 		rx_ring->lbq_free_head = free_idx;
1034*bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->lbuf_free_count);
1035*bafec742SSukumar Swaminathan 		ASSERT(rx_ring->lbuf_free_count != 0);
1036*bafec742SSukumar Swaminathan 	}
1037*bafec742SSukumar Swaminathan 	return (lbq_desc);
1038*bafec742SSukumar Swaminathan }
1039*bafec742SSukumar Swaminathan 
1040*bafec742SSukumar Swaminathan /*
1041*bafec742SSukumar Swaminathan  * Add a small buffer descriptor to free list
1042*bafec742SSukumar Swaminathan  */
1043*bafec742SSukumar Swaminathan static void
1044*bafec742SSukumar Swaminathan ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
1045*bafec742SSukumar Swaminathan {
1046*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = sbq_desc->rx_ring;
1047*bafec742SSukumar Swaminathan 	uint64_t *sbq_entry;
1048*bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)rx_ring->qlge;
1049*bafec742SSukumar Swaminathan 	/*
1050*bafec742SSukumar Swaminathan 	 * Sync access
1051*bafec742SSukumar Swaminathan 	 */
1052*bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->sbq_lock);
1053*bafec742SSukumar Swaminathan 
1054*bafec742SSukumar Swaminathan 	sbq_desc->upl_inuse = 0;
1055*bafec742SSukumar Swaminathan 
1056*bafec742SSukumar Swaminathan 	/*
1057*bafec742SSukumar Swaminathan 	 * If we are freeing the buffers as a result of adapter unload, get out
1058*bafec742SSukumar Swaminathan 	 */
1059*bafec742SSukumar Swaminathan 	if ((sbq_desc->free_buf != NULL) ||
1060*bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1061*bafec742SSukumar Swaminathan 		if (sbq_desc->free_buf == NULL)
1062*bafec742SSukumar Swaminathan 			atomic_dec_32(&rx_ring->rx_indicate);
1063*bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->sbq_lock);
1064*bafec742SSukumar Swaminathan 		return;
1065*bafec742SSukumar Swaminathan 	}
1066*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1067*bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate == 0)
1068*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "sbq: indicate wrong");
1069*bafec742SSukumar Swaminathan #endif
1070*bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1071*bafec742SSukumar Swaminathan 	uint32_t sb_consumer_idx;
1072*bafec742SSukumar Swaminathan 	uint32_t sb_producer_idx;
1073*bafec742SSukumar Swaminathan 	uint32_t num_free_buffers;
1074*bafec742SSukumar Swaminathan 	uint32_t temp;
1075*bafec742SSukumar Swaminathan 
1076*bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
1077*bafec742SSukumar Swaminathan 	sb_producer_idx = temp & 0x0000ffff;
1078*bafec742SSukumar Swaminathan 	sb_consumer_idx = (temp >> 16);
1079*bafec742SSukumar Swaminathan 
1080*bafec742SSukumar Swaminathan 	if (sb_consumer_idx > sb_producer_idx)
1081*bafec742SSukumar Swaminathan 		num_free_buffers = NUM_SMALL_BUFFERS -
1082*bafec742SSukumar Swaminathan 		    (sb_consumer_idx - sb_producer_idx);
1083*bafec742SSukumar Swaminathan 	else
1084*bafec742SSukumar Swaminathan 		num_free_buffers = sb_producer_idx - sb_consumer_idx;
1085*bafec742SSukumar Swaminathan 
1086*bafec742SSukumar Swaminathan 	if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
1087*bafec742SSukumar Swaminathan 		qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
1088*bafec742SSukumar Swaminathan 
1089*bafec742SSukumar Swaminathan #endif
1090*bafec742SSukumar Swaminathan 
1091*bafec742SSukumar Swaminathan 	ASSERT(sbq_desc->mp == NULL);
1092*bafec742SSukumar Swaminathan 
1093*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1094*bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate > 0xFF000000)
1095*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
1096*bafec742SSukumar Swaminathan 		    " sbq_desc index %d.",
1097*bafec742SSukumar Swaminathan 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1098*bafec742SSukumar Swaminathan 		    sbq_desc->index);
1099*bafec742SSukumar Swaminathan #endif
1100*bafec742SSukumar Swaminathan 	if (alloc_memory) {
1101*bafec742SSukumar Swaminathan 		sbq_desc->mp =
1102*bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1103*bafec742SSukumar Swaminathan 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1104*bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
1105*bafec742SSukumar Swaminathan 			rx_ring->rx_failed_sbq_allocs++;
1106*bafec742SSukumar Swaminathan 		}
1107*bafec742SSukumar Swaminathan 	}
1108*bafec742SSukumar Swaminathan 
1109*bafec742SSukumar Swaminathan 	/* Got the packet from the stack decrement rx_indicate count */
1110*bafec742SSukumar Swaminathan 	atomic_dec_32(&rx_ring->rx_indicate);
1111*bafec742SSukumar Swaminathan 
1112*bafec742SSukumar Swaminathan 	ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1113*bafec742SSukumar Swaminathan 
1114*bafec742SSukumar Swaminathan 	/* Rearm if possible */
1115*bafec742SSukumar Swaminathan 	if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1116*bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1117*bafec742SSukumar Swaminathan 		sbq_entry = rx_ring->sbq_dma.vaddr;
1118*bafec742SSukumar Swaminathan 		sbq_entry += rx_ring->sbq_prod_idx;
1119*bafec742SSukumar Swaminathan 
1120*bafec742SSukumar Swaminathan 		while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1121*bafec742SSukumar Swaminathan 			/* Get first one from free list */
1122*bafec742SSukumar Swaminathan 			sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
1123*bafec742SSukumar Swaminathan 
1124*bafec742SSukumar Swaminathan 			*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
1125*bafec742SSukumar Swaminathan 			sbq_entry++;
1126*bafec742SSukumar Swaminathan 			rx_ring->sbq_prod_idx++;
1127*bafec742SSukumar Swaminathan 			if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
1128*bafec742SSukumar Swaminathan 				rx_ring->sbq_prod_idx = 0;
1129*bafec742SSukumar Swaminathan 				sbq_entry = rx_ring->sbq_dma.vaddr;
1130*bafec742SSukumar Swaminathan 			}
1131*bafec742SSukumar Swaminathan 			/* Add to end of in use list */
1132*bafec742SSukumar Swaminathan 			ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
1133*bafec742SSukumar Swaminathan 		}
1134*bafec742SSukumar Swaminathan 
1135*bafec742SSukumar Swaminathan 		/* Update small buffer queue producer index */
1136*bafec742SSukumar Swaminathan 		ql_update_sbq_prod_idx(qlge, rx_ring);
1137*bafec742SSukumar Swaminathan 	}
1138*bafec742SSukumar Swaminathan 
1139*bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->sbq_lock);
1140*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
1141*bafec742SSukumar Swaminathan 	    __func__, qlge->instance, rx_ring->sbuf_free_count));
1142*bafec742SSukumar Swaminathan }
1143*bafec742SSukumar Swaminathan 
1144*bafec742SSukumar Swaminathan /*
1145*bafec742SSukumar Swaminathan  * rx recycle call back function
1146*bafec742SSukumar Swaminathan  */
1147*bafec742SSukumar Swaminathan static void
1148*bafec742SSukumar Swaminathan ql_release_to_sbuf_free_list(caddr_t p)
1149*bafec742SSukumar Swaminathan {
1150*bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
1151*bafec742SSukumar Swaminathan 
1152*bafec742SSukumar Swaminathan 	if (sbq_desc == NULL)
1153*bafec742SSukumar Swaminathan 		return;
1154*bafec742SSukumar Swaminathan 	ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
1155*bafec742SSukumar Swaminathan }
1156*bafec742SSukumar Swaminathan 
1157*bafec742SSukumar Swaminathan /*
1158*bafec742SSukumar Swaminathan  * Add a large buffer descriptor to free list
1159*bafec742SSukumar Swaminathan  */
1160*bafec742SSukumar Swaminathan static void
1161*bafec742SSukumar Swaminathan ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
1162*bafec742SSukumar Swaminathan {
1163*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = lbq_desc->rx_ring;
1164*bafec742SSukumar Swaminathan 	uint64_t *lbq_entry;
1165*bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
1166*bafec742SSukumar Swaminathan 
1167*bafec742SSukumar Swaminathan 	/* Sync access */
1168*bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->lbq_lock);
1169*bafec742SSukumar Swaminathan 
1170*bafec742SSukumar Swaminathan 	lbq_desc->upl_inuse = 0;
1171*bafec742SSukumar Swaminathan 	/*
1172*bafec742SSukumar Swaminathan 	 * If we are freeing the buffers as a result of adapter unload, get out
1173*bafec742SSukumar Swaminathan 	 */
1174*bafec742SSukumar Swaminathan 	if ((lbq_desc->free_buf != NULL) ||
1175*bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1176*bafec742SSukumar Swaminathan 		if (lbq_desc->free_buf == NULL)
1177*bafec742SSukumar Swaminathan 			atomic_dec_32(&rx_ring->rx_indicate);
1178*bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->lbq_lock);
1179*bafec742SSukumar Swaminathan 		return;
1180*bafec742SSukumar Swaminathan 	}
1181*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1182*bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate == 0)
1183*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: indicate wrong");
1184*bafec742SSukumar Swaminathan #endif
1185*bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1186*bafec742SSukumar Swaminathan 	uint32_t lb_consumer_idx;
1187*bafec742SSukumar Swaminathan 	uint32_t lb_producer_idx;
1188*bafec742SSukumar Swaminathan 	uint32_t num_free_buffers;
1189*bafec742SSukumar Swaminathan 	uint32_t temp;
1190*bafec742SSukumar Swaminathan 
1191*bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
1192*bafec742SSukumar Swaminathan 
1193*bafec742SSukumar Swaminathan 	lb_producer_idx = temp & 0x0000ffff;
1194*bafec742SSukumar Swaminathan 	lb_consumer_idx = (temp >> 16);
1195*bafec742SSukumar Swaminathan 
1196*bafec742SSukumar Swaminathan 	if (lb_consumer_idx > lb_producer_idx)
1197*bafec742SSukumar Swaminathan 		num_free_buffers = NUM_LARGE_BUFFERS -
1198*bafec742SSukumar Swaminathan 		    (lb_consumer_idx - lb_producer_idx);
1199*bafec742SSukumar Swaminathan 	else
1200*bafec742SSukumar Swaminathan 		num_free_buffers = lb_producer_idx - lb_consumer_idx;
1201*bafec742SSukumar Swaminathan 
1202*bafec742SSukumar Swaminathan 	if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
1203*bafec742SSukumar Swaminathan 		qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
1204*bafec742SSukumar Swaminathan 	}
1205*bafec742SSukumar Swaminathan #endif
1206*bafec742SSukumar Swaminathan 
1207*bafec742SSukumar Swaminathan 	ASSERT(lbq_desc->mp == NULL);
1208*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1209*bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate > 0xFF000000)
1210*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
1211*bafec742SSukumar Swaminathan 		    "lbq_desc index %d",
1212*bafec742SSukumar Swaminathan 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1213*bafec742SSukumar Swaminathan 		    lbq_desc->index);
1214*bafec742SSukumar Swaminathan #endif
1215*bafec742SSukumar Swaminathan 	if (alloc_memory) {
1216*bafec742SSukumar Swaminathan 		lbq_desc->mp =
1217*bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1218*bafec742SSukumar Swaminathan 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1219*bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
1220*bafec742SSukumar Swaminathan 			rx_ring->rx_failed_lbq_allocs++;
1221*bafec742SSukumar Swaminathan 		}
1222*bafec742SSukumar Swaminathan 	}
1223*bafec742SSukumar Swaminathan 
1224*bafec742SSukumar Swaminathan 	/* Got the packet from the stack decrement rx_indicate count */
1225*bafec742SSukumar Swaminathan 	atomic_dec_32(&rx_ring->rx_indicate);
1226*bafec742SSukumar Swaminathan 
1227*bafec742SSukumar Swaminathan 	ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1228*bafec742SSukumar Swaminathan 
1229*bafec742SSukumar Swaminathan 	/* Rearm if possible */
1230*bafec742SSukumar Swaminathan 	if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1231*bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1232*bafec742SSukumar Swaminathan 		lbq_entry = rx_ring->lbq_dma.vaddr;
1233*bafec742SSukumar Swaminathan 		lbq_entry += rx_ring->lbq_prod_idx;
1234*bafec742SSukumar Swaminathan 		while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1235*bafec742SSukumar Swaminathan 			/* Get first one from free list */
1236*bafec742SSukumar Swaminathan 			lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
1237*bafec742SSukumar Swaminathan 
1238*bafec742SSukumar Swaminathan 			*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
1239*bafec742SSukumar Swaminathan 			lbq_entry++;
1240*bafec742SSukumar Swaminathan 			rx_ring->lbq_prod_idx++;
1241*bafec742SSukumar Swaminathan 			if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
1242*bafec742SSukumar Swaminathan 				rx_ring->lbq_prod_idx = 0;
1243*bafec742SSukumar Swaminathan 				lbq_entry = rx_ring->lbq_dma.vaddr;
1244*bafec742SSukumar Swaminathan 			}
1245*bafec742SSukumar Swaminathan 
1246*bafec742SSukumar Swaminathan 			/* Add to end of in use list */
1247*bafec742SSukumar Swaminathan 			ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
1248*bafec742SSukumar Swaminathan 		}
1249*bafec742SSukumar Swaminathan 
1250*bafec742SSukumar Swaminathan 		/* Update large buffer queue producer index */
1251*bafec742SSukumar Swaminathan 		ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
1252*bafec742SSukumar Swaminathan 	}
1253*bafec742SSukumar Swaminathan 
1254*bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->lbq_lock);
1255*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
1256*bafec742SSukumar Swaminathan 	    __func__, rx_ring->lbuf_free_count));
1257*bafec742SSukumar Swaminathan }
1258*bafec742SSukumar Swaminathan /*
1259*bafec742SSukumar Swaminathan  * rx recycle call back function
1260*bafec742SSukumar Swaminathan  */
1261*bafec742SSukumar Swaminathan static void
1262*bafec742SSukumar Swaminathan ql_release_to_lbuf_free_list(caddr_t p)
1263*bafec742SSukumar Swaminathan {
1264*bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
1265*bafec742SSukumar Swaminathan 
1266*bafec742SSukumar Swaminathan 	if (lbq_desc == NULL)
1267*bafec742SSukumar Swaminathan 		return;
1268*bafec742SSukumar Swaminathan 	ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
1269*bafec742SSukumar Swaminathan }
1270*bafec742SSukumar Swaminathan 
1271*bafec742SSukumar Swaminathan /*
1272*bafec742SSukumar Swaminathan  * free small buffer queue buffers
1273*bafec742SSukumar Swaminathan  */
1274*bafec742SSukumar Swaminathan static void
1275*bafec742SSukumar Swaminathan ql_free_sbq_buffers(struct rx_ring *rx_ring)
1276*bafec742SSukumar Swaminathan {
1277*bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1278*bafec742SSukumar Swaminathan 	uint32_t i;
1279*bafec742SSukumar Swaminathan 	uint32_t j = rx_ring->sbq_free_head;
1280*bafec742SSukumar Swaminathan 	int  force_cnt = 0;
1281*bafec742SSukumar Swaminathan 
1282*bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbuf_free_count; i++) {
1283*bafec742SSukumar Swaminathan 		sbq_desc = rx_ring->sbuf_free[j];
1284*bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1285*bafec742SSukumar Swaminathan 		j++;
1286*bafec742SSukumar Swaminathan 		if (j >= rx_ring->sbq_len) {
1287*bafec742SSukumar Swaminathan 			j = 0;
1288*bafec742SSukumar Swaminathan 		}
1289*bafec742SSukumar Swaminathan 		if (sbq_desc->mp != NULL) {
1290*bafec742SSukumar Swaminathan 			freemsg(sbq_desc->mp);
1291*bafec742SSukumar Swaminathan 			sbq_desc->mp = NULL;
1292*bafec742SSukumar Swaminathan 		}
1293*bafec742SSukumar Swaminathan 	}
1294*bafec742SSukumar Swaminathan 	rx_ring->sbuf_free_count = 0;
1295*bafec742SSukumar Swaminathan 
1296*bafec742SSukumar Swaminathan 	j = rx_ring->sbq_use_head;
1297*bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
1298*bafec742SSukumar Swaminathan 		sbq_desc = rx_ring->sbuf_in_use[j];
1299*bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1300*bafec742SSukumar Swaminathan 		j++;
1301*bafec742SSukumar Swaminathan 		if (j >= rx_ring->sbq_len) {
1302*bafec742SSukumar Swaminathan 			j = 0;
1303*bafec742SSukumar Swaminathan 		}
1304*bafec742SSukumar Swaminathan 		if (sbq_desc->mp != NULL) {
1305*bafec742SSukumar Swaminathan 			freemsg(sbq_desc->mp);
1306*bafec742SSukumar Swaminathan 			sbq_desc->mp = NULL;
1307*bafec742SSukumar Swaminathan 		}
1308*bafec742SSukumar Swaminathan 	}
1309*bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use_count = 0;
1310*bafec742SSukumar Swaminathan 
1311*bafec742SSukumar Swaminathan 	sbq_desc = &rx_ring->sbq_desc[0];
1312*bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1313*bafec742SSukumar Swaminathan 		/*
1314*bafec742SSukumar Swaminathan 		 * Set flag so that the callback does not allocate a new buffer
1315*bafec742SSukumar Swaminathan 		 */
1316*bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1317*bafec742SSukumar Swaminathan 		if (sbq_desc->upl_inuse != 0) {
1318*bafec742SSukumar Swaminathan 			force_cnt++;
1319*bafec742SSukumar Swaminathan 		}
1320*bafec742SSukumar Swaminathan 		if (sbq_desc->bd_dma.dma_handle != NULL) {
1321*bafec742SSukumar Swaminathan 			ql_free_phys(&sbq_desc->bd_dma.dma_handle,
1322*bafec742SSukumar Swaminathan 			    &sbq_desc->bd_dma.acc_handle);
1323*bafec742SSukumar Swaminathan 			sbq_desc->bd_dma.dma_handle = NULL;
1324*bafec742SSukumar Swaminathan 			sbq_desc->bd_dma.acc_handle = NULL;
1325*bafec742SSukumar Swaminathan 		}
1326*bafec742SSukumar Swaminathan 	}
1327*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1328*bafec742SSukumar Swaminathan 	cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
1329*bafec742SSukumar Swaminathan 	    rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
1330*bafec742SSukumar Swaminathan #endif
1331*bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_in_use != NULL) {
1332*bafec742SSukumar Swaminathan 		kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
1333*bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1334*bafec742SSukumar Swaminathan 		rx_ring->sbuf_in_use = NULL;
1335*bafec742SSukumar Swaminathan 	}
1336*bafec742SSukumar Swaminathan 
1337*bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free != NULL) {
1338*bafec742SSukumar Swaminathan 		kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
1339*bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1340*bafec742SSukumar Swaminathan 		rx_ring->sbuf_free = NULL;
1341*bafec742SSukumar Swaminathan 	}
1342*bafec742SSukumar Swaminathan }
1343*bafec742SSukumar Swaminathan 
1344*bafec742SSukumar Swaminathan /* Allocate small buffers */
1345*bafec742SSukumar Swaminathan static int
1346*bafec742SSukumar Swaminathan ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1347*bafec742SSukumar Swaminathan {
1348*bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1349*bafec742SSukumar Swaminathan 	int i;
1350*bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
1351*bafec742SSukumar Swaminathan 
1352*bafec742SSukumar Swaminathan 	rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
1353*bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1354*bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free == NULL) {
1355*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1356*bafec742SSukumar Swaminathan 		    "!%s: sbuf_free_list alloc: failed",
1357*bafec742SSukumar Swaminathan 		    __func__);
1358*bafec742SSukumar Swaminathan 		rx_ring->sbuf_free_count = 0;
1359*bafec742SSukumar Swaminathan 		goto alloc_sbuf_err;
1360*bafec742SSukumar Swaminathan 	}
1361*bafec742SSukumar Swaminathan 
1362*bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
1363*bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1364*bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_in_use == NULL) {
1365*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1366*bafec742SSukumar Swaminathan 		    "!%s: sbuf_inuse_list alloc: failed",
1367*bafec742SSukumar Swaminathan 		    __func__);
1368*bafec742SSukumar Swaminathan 		rx_ring->sbuf_in_use_count = 0;
1369*bafec742SSukumar Swaminathan 		goto alloc_sbuf_err;
1370*bafec742SSukumar Swaminathan 	}
1371*bafec742SSukumar Swaminathan 	rx_ring->sbq_use_head = 0;
1372*bafec742SSukumar Swaminathan 	rx_ring->sbq_use_tail = 0;
1373*bafec742SSukumar Swaminathan 	rx_ring->sbq_free_head = 0;
1374*bafec742SSukumar Swaminathan 	rx_ring->sbq_free_tail = 0;
1375*bafec742SSukumar Swaminathan 	sbq_desc = &rx_ring->sbq_desc[0];
1376*bafec742SSukumar Swaminathan 
1377*bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1378*bafec742SSukumar Swaminathan 		/* Allocate buffer */
1379*bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &sbq_desc->bd_dma.dma_handle,
1380*bafec742SSukumar Swaminathan 		    &ql_buf_acc_attr,
1381*bafec742SSukumar Swaminathan 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1382*bafec742SSukumar Swaminathan 		    &sbq_desc->bd_dma.acc_handle,
1383*bafec742SSukumar Swaminathan 		    (size_t)rx_ring->sbq_buf_size,	/* mem size */
1384*bafec742SSukumar Swaminathan 		    (size_t)0,				/* default alignment */
1385*bafec742SSukumar Swaminathan 		    (caddr_t *)&sbq_desc->bd_dma.vaddr,
1386*bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
1387*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
1388*bafec742SSukumar Swaminathan 			    "!%s: ddi_dma_alloc_handle: failed",
1389*bafec742SSukumar Swaminathan 			    __func__);
1390*bafec742SSukumar Swaminathan 			goto alloc_sbuf_err;
1391*bafec742SSukumar Swaminathan 		}
1392*bafec742SSukumar Swaminathan 
1393*bafec742SSukumar Swaminathan 		/* Set context for Return buffer callback */
1394*bafec742SSukumar Swaminathan 		sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1395*bafec742SSukumar Swaminathan 		sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
1396*bafec742SSukumar Swaminathan 		sbq_desc->rx_recycle.free_arg  = (caddr_t)sbq_desc;
1397*bafec742SSukumar Swaminathan 		sbq_desc->rx_ring = rx_ring;
1398*bafec742SSukumar Swaminathan 		sbq_desc->upl_inuse = 0;
1399*bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 0;
1400*bafec742SSukumar Swaminathan 
1401*bafec742SSukumar Swaminathan 		sbq_desc->mp =
1402*bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1403*bafec742SSukumar Swaminathan 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1404*bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
1405*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1406*bafec742SSukumar Swaminathan 			goto alloc_sbuf_err;
1407*bafec742SSukumar Swaminathan 		}
1408*bafec742SSukumar Swaminathan 		ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1409*bafec742SSukumar Swaminathan 	}
1410*bafec742SSukumar Swaminathan 
1411*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1412*bafec742SSukumar Swaminathan 
1413*bafec742SSukumar Swaminathan alloc_sbuf_err:
1414*bafec742SSukumar Swaminathan 	ql_free_sbq_buffers(rx_ring);
1415*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1416*bafec742SSukumar Swaminathan }
1417*bafec742SSukumar Swaminathan 
1418*bafec742SSukumar Swaminathan static void
1419*bafec742SSukumar Swaminathan ql_free_lbq_buffers(struct rx_ring *rx_ring)
1420*bafec742SSukumar Swaminathan {
1421*bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1422*bafec742SSukumar Swaminathan 	uint32_t i, j;
1423*bafec742SSukumar Swaminathan 	int force_cnt = 0;
1424*bafec742SSukumar Swaminathan 
1425*bafec742SSukumar Swaminathan 	j = rx_ring->lbq_free_head;
1426*bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbuf_free_count; i++) {
1427*bafec742SSukumar Swaminathan 		lbq_desc = rx_ring->lbuf_free[j];
1428*bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1429*bafec742SSukumar Swaminathan 		j++;
1430*bafec742SSukumar Swaminathan 		if (j >= rx_ring->lbq_len)
1431*bafec742SSukumar Swaminathan 			j = 0;
1432*bafec742SSukumar Swaminathan 		if (lbq_desc->mp != NULL) {
1433*bafec742SSukumar Swaminathan 			freemsg(lbq_desc->mp);
1434*bafec742SSukumar Swaminathan 			lbq_desc->mp = NULL;
1435*bafec742SSukumar Swaminathan 		}
1436*bafec742SSukumar Swaminathan 	}
1437*bafec742SSukumar Swaminathan 	rx_ring->lbuf_free_count = 0;
1438*bafec742SSukumar Swaminathan 
1439*bafec742SSukumar Swaminathan 	j = rx_ring->lbq_use_head;
1440*bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
1441*bafec742SSukumar Swaminathan 		lbq_desc = rx_ring->lbuf_in_use[j];
1442*bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1443*bafec742SSukumar Swaminathan 		j++;
1444*bafec742SSukumar Swaminathan 		if (j >= rx_ring->lbq_len) {
1445*bafec742SSukumar Swaminathan 			j = 0;
1446*bafec742SSukumar Swaminathan 		}
1447*bafec742SSukumar Swaminathan 		if (lbq_desc->mp != NULL) {
1448*bafec742SSukumar Swaminathan 			freemsg(lbq_desc->mp);
1449*bafec742SSukumar Swaminathan 			lbq_desc->mp = NULL;
1450*bafec742SSukumar Swaminathan 		}
1451*bafec742SSukumar Swaminathan 	}
1452*bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use_count = 0;
1453*bafec742SSukumar Swaminathan 
1454*bafec742SSukumar Swaminathan 	lbq_desc = &rx_ring->lbq_desc[0];
1455*bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1456*bafec742SSukumar Swaminathan 		/* Set flag so that callback will not allocate a new buffer */
1457*bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1458*bafec742SSukumar Swaminathan 		if (lbq_desc->upl_inuse != 0) {
1459*bafec742SSukumar Swaminathan 			force_cnt++;
1460*bafec742SSukumar Swaminathan 		}
1461*bafec742SSukumar Swaminathan 		if (lbq_desc->bd_dma.dma_handle != NULL) {
1462*bafec742SSukumar Swaminathan 			ql_free_phys(&lbq_desc->bd_dma.dma_handle,
1463*bafec742SSukumar Swaminathan 			    &lbq_desc->bd_dma.acc_handle);
1464*bafec742SSukumar Swaminathan 			lbq_desc->bd_dma.dma_handle = NULL;
1465*bafec742SSukumar Swaminathan 			lbq_desc->bd_dma.acc_handle = NULL;
1466*bafec742SSukumar Swaminathan 		}
1467*bafec742SSukumar Swaminathan 	}
1468*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1469*bafec742SSukumar Swaminathan 	if (force_cnt) {
1470*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
1471*bafec742SSukumar Swaminathan 		    rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
1472*bafec742SSukumar Swaminathan 		    force_cnt);
1473*bafec742SSukumar Swaminathan 	}
1474*bafec742SSukumar Swaminathan #endif
1475*bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_in_use != NULL) {
1476*bafec742SSukumar Swaminathan 		kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
1477*bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1478*bafec742SSukumar Swaminathan 		rx_ring->lbuf_in_use = NULL;
1479*bafec742SSukumar Swaminathan 	}
1480*bafec742SSukumar Swaminathan 
1481*bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free != NULL) {
1482*bafec742SSukumar Swaminathan 		kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
1483*bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1484*bafec742SSukumar Swaminathan 		rx_ring->lbuf_free = NULL;
1485*bafec742SSukumar Swaminathan 	}
1486*bafec742SSukumar Swaminathan }
1487*bafec742SSukumar Swaminathan 
1488*bafec742SSukumar Swaminathan /* Allocate large buffers */
1489*bafec742SSukumar Swaminathan static int
1490*bafec742SSukumar Swaminathan ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1491*bafec742SSukumar Swaminathan {
1492*bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1493*bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
1494*bafec742SSukumar Swaminathan 	int i;
1495*bafec742SSukumar Swaminathan 	uint32_t lbq_buf_size;
1496*bafec742SSukumar Swaminathan 
1497*bafec742SSukumar Swaminathan 	rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
1498*bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1499*bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free == NULL) {
1500*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1501*bafec742SSukumar Swaminathan 		    "!%s: lbuf_free_list alloc: failed",
1502*bafec742SSukumar Swaminathan 		    __func__);
1503*bafec742SSukumar Swaminathan 		rx_ring->lbuf_free_count = 0;
1504*bafec742SSukumar Swaminathan 		goto alloc_lbuf_err;
1505*bafec742SSukumar Swaminathan 	}
1506*bafec742SSukumar Swaminathan 
1507*bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
1508*bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1509*bafec742SSukumar Swaminathan 
1510*bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_in_use == NULL) {
1511*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1512*bafec742SSukumar Swaminathan 		    "!%s: lbuf_inuse_list alloc: failed",
1513*bafec742SSukumar Swaminathan 		    __func__);
1514*bafec742SSukumar Swaminathan 		rx_ring->lbuf_in_use_count = 0;
1515*bafec742SSukumar Swaminathan 		goto alloc_lbuf_err;
1516*bafec742SSukumar Swaminathan 	}
1517*bafec742SSukumar Swaminathan 	rx_ring->lbq_use_head = 0;
1518*bafec742SSukumar Swaminathan 	rx_ring->lbq_use_tail = 0;
1519*bafec742SSukumar Swaminathan 	rx_ring->lbq_free_head = 0;
1520*bafec742SSukumar Swaminathan 	rx_ring->lbq_free_tail = 0;
1521*bafec742SSukumar Swaminathan 
1522*bafec742SSukumar Swaminathan 	lbq_buf_size = (qlge->mtu == ETHERMTU) ?
1523*bafec742SSukumar Swaminathan 	    NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
1524*bafec742SSukumar Swaminathan 
1525*bafec742SSukumar Swaminathan 	lbq_desc = &rx_ring->lbq_desc[0];
1526*bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1527*bafec742SSukumar Swaminathan 		rx_ring->lbq_buf_size = lbq_buf_size;
1528*bafec742SSukumar Swaminathan 		/* Allocate buffer */
1529*bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &lbq_desc->bd_dma.dma_handle,
1530*bafec742SSukumar Swaminathan 		    &ql_buf_acc_attr,
1531*bafec742SSukumar Swaminathan 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1532*bafec742SSukumar Swaminathan 		    &lbq_desc->bd_dma.acc_handle,
1533*bafec742SSukumar Swaminathan 		    (size_t)rx_ring->lbq_buf_size,  /* mem size */
1534*bafec742SSukumar Swaminathan 		    (size_t)0, /* default alignment */
1535*bafec742SSukumar Swaminathan 		    (caddr_t *)&lbq_desc->bd_dma.vaddr,
1536*bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
1537*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
1538*bafec742SSukumar Swaminathan 			    "!%s: ddi_dma_alloc_handle: failed",
1539*bafec742SSukumar Swaminathan 			    __func__);
1540*bafec742SSukumar Swaminathan 			goto alloc_lbuf_err;
1541*bafec742SSukumar Swaminathan 		}
1542*bafec742SSukumar Swaminathan 
1543*bafec742SSukumar Swaminathan 		/* Set context for Return buffer callback */
1544*bafec742SSukumar Swaminathan 		lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1545*bafec742SSukumar Swaminathan 		lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
1546*bafec742SSukumar Swaminathan 		lbq_desc->rx_recycle.free_arg  = (caddr_t)lbq_desc;
1547*bafec742SSukumar Swaminathan 		lbq_desc->rx_ring = rx_ring;
1548*bafec742SSukumar Swaminathan 		lbq_desc->upl_inuse = 0;
1549*bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 0;
1550*bafec742SSukumar Swaminathan 
1551*bafec742SSukumar Swaminathan 		lbq_desc->mp =
1552*bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1553*bafec742SSukumar Swaminathan 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1554*bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
1555*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1556*bafec742SSukumar Swaminathan 			goto alloc_lbuf_err;
1557*bafec742SSukumar Swaminathan 		}
1558*bafec742SSukumar Swaminathan 		ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1559*bafec742SSukumar Swaminathan 	} /* For all large buffers */
1560*bafec742SSukumar Swaminathan 
1561*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1562*bafec742SSukumar Swaminathan 
1563*bafec742SSukumar Swaminathan alloc_lbuf_err:
1564*bafec742SSukumar Swaminathan 	ql_free_lbq_buffers(rx_ring);
1565*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1566*bafec742SSukumar Swaminathan }
1567*bafec742SSukumar Swaminathan 
1568*bafec742SSukumar Swaminathan /*
1569*bafec742SSukumar Swaminathan  * Free rx buffers
1570*bafec742SSukumar Swaminathan  */
1571*bafec742SSukumar Swaminathan static void
1572*bafec742SSukumar Swaminathan ql_free_rx_buffers(qlge_t *qlge)
1573*bafec742SSukumar Swaminathan {
1574*bafec742SSukumar Swaminathan 	int i;
1575*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
1576*bafec742SSukumar Swaminathan 
1577*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1578*bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
1579*bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
1580*bafec742SSukumar Swaminathan 			ql_free_lbq_buffers(rx_ring);
1581*bafec742SSukumar Swaminathan 			ql_free_sbq_buffers(rx_ring);
1582*bafec742SSukumar Swaminathan 		}
1583*bafec742SSukumar Swaminathan 	}
1584*bafec742SSukumar Swaminathan }
1585*bafec742SSukumar Swaminathan 
1586*bafec742SSukumar Swaminathan /*
1587*bafec742SSukumar Swaminathan  * Allocate rx buffers
1588*bafec742SSukumar Swaminathan  */
1589*bafec742SSukumar Swaminathan static int
1590*bafec742SSukumar Swaminathan ql_alloc_rx_buffers(qlge_t *qlge)
1591*bafec742SSukumar Swaminathan {
1592*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
1593*bafec742SSukumar Swaminathan 	int i;
1594*bafec742SSukumar Swaminathan 
1595*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1596*bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
1597*bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
1598*bafec742SSukumar Swaminathan 			if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
1599*bafec742SSukumar Swaminathan 				goto alloc_err;
1600*bafec742SSukumar Swaminathan 			if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
1601*bafec742SSukumar Swaminathan 				goto alloc_err;
1602*bafec742SSukumar Swaminathan 		}
1603*bafec742SSukumar Swaminathan 	}
1604*bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1605*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1606*bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type == RX_Q) {
1607*bafec742SSukumar Swaminathan 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
1608*bafec742SSukumar Swaminathan 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
1609*bafec742SSukumar Swaminathan 		}
1610*bafec742SSukumar Swaminathan 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
1611*bafec742SSukumar Swaminathan 	}
1612*bafec742SSukumar Swaminathan #endif
1613*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1614*bafec742SSukumar Swaminathan 
1615*bafec742SSukumar Swaminathan alloc_err:
1616*bafec742SSukumar Swaminathan 
1617*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1618*bafec742SSukumar Swaminathan }
1619*bafec742SSukumar Swaminathan 
1620*bafec742SSukumar Swaminathan /*
1621*bafec742SSukumar Swaminathan  * Initialize large buffer queue ring
1622*bafec742SSukumar Swaminathan  */
1623*bafec742SSukumar Swaminathan static void
1624*bafec742SSukumar Swaminathan ql_init_lbq_ring(struct rx_ring *rx_ring)
1625*bafec742SSukumar Swaminathan {
1626*bafec742SSukumar Swaminathan 	uint16_t i;
1627*bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1628*bafec742SSukumar Swaminathan 
1629*bafec742SSukumar Swaminathan 	bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
1630*bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++) {
1631*bafec742SSukumar Swaminathan 		lbq_desc = &rx_ring->lbq_desc[i];
1632*bafec742SSukumar Swaminathan 		lbq_desc->index = i;
1633*bafec742SSukumar Swaminathan 	}
1634*bafec742SSukumar Swaminathan }
1635*bafec742SSukumar Swaminathan 
1636*bafec742SSukumar Swaminathan /*
1637*bafec742SSukumar Swaminathan  * Initialize small buffer queue ring
1638*bafec742SSukumar Swaminathan  */
1639*bafec742SSukumar Swaminathan static void
1640*bafec742SSukumar Swaminathan ql_init_sbq_ring(struct rx_ring *rx_ring)
1641*bafec742SSukumar Swaminathan {
1642*bafec742SSukumar Swaminathan 	uint16_t i;
1643*bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1644*bafec742SSukumar Swaminathan 
1645*bafec742SSukumar Swaminathan 	bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
1646*bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++) {
1647*bafec742SSukumar Swaminathan 		sbq_desc = &rx_ring->sbq_desc[i];
1648*bafec742SSukumar Swaminathan 		sbq_desc->index = i;
1649*bafec742SSukumar Swaminathan 	}
1650*bafec742SSukumar Swaminathan }
1651*bafec742SSukumar Swaminathan 
1652*bafec742SSukumar Swaminathan /*
1653*bafec742SSukumar Swaminathan  * Calculate the pseudo-header checksum if hardware can not do
1654*bafec742SSukumar Swaminathan  */
1655*bafec742SSukumar Swaminathan static void
1656*bafec742SSukumar Swaminathan ql_pseudo_cksum(uint8_t *buf)
1657*bafec742SSukumar Swaminathan {
1658*bafec742SSukumar Swaminathan 	uint32_t cksum;
1659*bafec742SSukumar Swaminathan 	uint16_t iphl;
1660*bafec742SSukumar Swaminathan 	uint16_t proto;
1661*bafec742SSukumar Swaminathan 
1662*bafec742SSukumar Swaminathan 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
1663*bafec742SSukumar Swaminathan 	cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
1664*bafec742SSukumar Swaminathan 	cksum += proto = buf[9];
1665*bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
1666*bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
1667*bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
1668*bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
1669*bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1670*bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1671*bafec742SSukumar Swaminathan 
1672*bafec742SSukumar Swaminathan 	/*
1673*bafec742SSukumar Swaminathan 	 * Point it to the TCP/UDP header, and
1674*bafec742SSukumar Swaminathan 	 * update the checksum field.
1675*bafec742SSukumar Swaminathan 	 */
1676*bafec742SSukumar Swaminathan 	buf += iphl + ((proto == IPPROTO_TCP) ?
1677*bafec742SSukumar Swaminathan 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
1678*bafec742SSukumar Swaminathan 
1679*bafec742SSukumar Swaminathan 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
1680*bafec742SSukumar Swaminathan 
1681*bafec742SSukumar Swaminathan }
1682*bafec742SSukumar Swaminathan 
1683*bafec742SSukumar Swaminathan /*
1684*bafec742SSukumar Swaminathan  * Transmit an incoming packet.
1685*bafec742SSukumar Swaminathan  */
1686*bafec742SSukumar Swaminathan mblk_t *
1687*bafec742SSukumar Swaminathan ql_ring_tx(void *arg, mblk_t *mp)
1688*bafec742SSukumar Swaminathan {
1689*bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring = (struct tx_ring *)arg;
1690*bafec742SSukumar Swaminathan 	qlge_t *qlge = tx_ring->qlge;
1691*bafec742SSukumar Swaminathan 	mblk_t *next;
1692*bafec742SSukumar Swaminathan 	int rval;
1693*bafec742SSukumar Swaminathan 	uint32_t tx_count = 0;
1694*bafec742SSukumar Swaminathan 
1695*bafec742SSukumar Swaminathan 	if (qlge->port_link_state == LS_DOWN) {
1696*bafec742SSukumar Swaminathan 		/* can not send message while link is down */
1697*bafec742SSukumar Swaminathan 		mblk_t *tp;
1698*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "tx failed due to link down");
1699*bafec742SSukumar Swaminathan 
1700*bafec742SSukumar Swaminathan 		while (mp != NULL) {
1701*bafec742SSukumar Swaminathan 			tp = mp->b_next;
1702*bafec742SSukumar Swaminathan 			mp->b_next = NULL;
1703*bafec742SSukumar Swaminathan 			freemsg(mp);
1704*bafec742SSukumar Swaminathan 			mp = tp;
1705*bafec742SSukumar Swaminathan 		}
1706*bafec742SSukumar Swaminathan 		goto exit;
1707*bafec742SSukumar Swaminathan 	}
1708*bafec742SSukumar Swaminathan 
1709*bafec742SSukumar Swaminathan 	mutex_enter(&tx_ring->tx_lock);
1710*bafec742SSukumar Swaminathan 	/* if mac is not started, driver is not ready, can not send */
1711*bafec742SSukumar Swaminathan 	if (tx_ring->mac_flags != QL_MAC_STARTED) {
1712*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
1713*bafec742SSukumar Swaminathan 		    " return packets",
1714*bafec742SSukumar Swaminathan 		    __func__, qlge->instance, tx_ring->mac_flags);
1715*bafec742SSukumar Swaminathan 		mutex_exit(&tx_ring->tx_lock);
1716*bafec742SSukumar Swaminathan 		goto exit;
1717*bafec742SSukumar Swaminathan 	}
1718*bafec742SSukumar Swaminathan 
1719*bafec742SSukumar Swaminathan 	/* we must try to send all */
1720*bafec742SSukumar Swaminathan 	while (mp != NULL) {
1721*bafec742SSukumar Swaminathan 		/*
1722*bafec742SSukumar Swaminathan 		 * if number of available slots is less than a threshold,
1723*bafec742SSukumar Swaminathan 		 * then quit
1724*bafec742SSukumar Swaminathan 		 */
1725*bafec742SSukumar Swaminathan 		if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
1726*bafec742SSukumar Swaminathan 			tx_ring->queue_stopped = 1;
1727*bafec742SSukumar Swaminathan 			rval = DDI_FAILURE;
1728*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1729*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) no resources",
1730*bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
1731*bafec742SSukumar Swaminathan #endif
1732*bafec742SSukumar Swaminathan 			tx_ring->defer++;
1733*bafec742SSukumar Swaminathan 			/*
1734*bafec742SSukumar Swaminathan 			 * If we return the buffer back we are expected to call
1735*bafec742SSukumar Swaminathan 			 * mac_tx_ring_update() when resources are available
1736*bafec742SSukumar Swaminathan 			 */
1737*bafec742SSukumar Swaminathan 			break;
1738*bafec742SSukumar Swaminathan 		}
1739*bafec742SSukumar Swaminathan 
1740*bafec742SSukumar Swaminathan 		next = mp->b_next;
1741*bafec742SSukumar Swaminathan 		mp->b_next = NULL;
1742*bafec742SSukumar Swaminathan 
1743*bafec742SSukumar Swaminathan 		rval = ql_send_common(tx_ring, mp);
1744*bafec742SSukumar Swaminathan 
1745*bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS) {
1746*bafec742SSukumar Swaminathan 			mp->b_next = next;
1747*bafec742SSukumar Swaminathan 			break;
1748*bafec742SSukumar Swaminathan 		}
1749*bafec742SSukumar Swaminathan 		tx_count++;
1750*bafec742SSukumar Swaminathan 		mp = next;
1751*bafec742SSukumar Swaminathan 	}
1752*bafec742SSukumar Swaminathan 
1753*bafec742SSukumar Swaminathan 	/*
1754*bafec742SSukumar Swaminathan 	 * After all msg blocks are mapped or copied to tx buffer,
1755*bafec742SSukumar Swaminathan 	 * trigger the hardware to send!
1756*bafec742SSukumar Swaminathan 	 */
1757*bafec742SSukumar Swaminathan 	if (tx_count > 0) {
1758*bafec742SSukumar Swaminathan 		ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
1759*bafec742SSukumar Swaminathan 		    tx_ring->prod_idx);
1760*bafec742SSukumar Swaminathan 	}
1761*bafec742SSukumar Swaminathan 
1762*bafec742SSukumar Swaminathan 	mutex_exit(&tx_ring->tx_lock);
1763*bafec742SSukumar Swaminathan exit:
1764*bafec742SSukumar Swaminathan 	return (mp);
1765*bafec742SSukumar Swaminathan }
1766*bafec742SSukumar Swaminathan 
1767*bafec742SSukumar Swaminathan 
1768*bafec742SSukumar Swaminathan /*
1769*bafec742SSukumar Swaminathan  * This function builds an mblk list for the given inbound
1770*bafec742SSukumar Swaminathan  * completion.
1771*bafec742SSukumar Swaminathan  */
1772*bafec742SSukumar Swaminathan 
1773*bafec742SSukumar Swaminathan static mblk_t *
1774*bafec742SSukumar Swaminathan ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
1775*bafec742SSukumar Swaminathan     struct ib_mac_iocb_rsp *ib_mac_rsp)
1776*bafec742SSukumar Swaminathan {
1777*bafec742SSukumar Swaminathan 	mblk_t *mp = NULL;
1778*bafec742SSukumar Swaminathan 	mblk_t *mp1 = NULL;	/* packet header */
1779*bafec742SSukumar Swaminathan 	mblk_t *mp2 = NULL;	/* packet content */
1780*bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1781*bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1782*bafec742SSukumar Swaminathan 	uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
1783*bafec742SSukumar Swaminathan 	uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
1784*bafec742SSukumar Swaminathan 	uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1785*bafec742SSukumar Swaminathan 	uint32_t pkt_len = payload_len + header_len;
1786*bafec742SSukumar Swaminathan 	uint32_t done;
1787*bafec742SSukumar Swaminathan 	uint64_t *curr_ial_ptr;
1788*bafec742SSukumar Swaminathan 	uint32_t ial_data_addr_low;
1789*bafec742SSukumar Swaminathan 	uint32_t actual_data_addr_low;
1790*bafec742SSukumar Swaminathan 	mblk_t *mp_ial = NULL;	/* ial chained packets */
1791*bafec742SSukumar Swaminathan 	uint32_t size;
1792*bafec742SSukumar Swaminathan 
1793*bafec742SSukumar Swaminathan 	/*
1794*bafec742SSukumar Swaminathan 	 * Check if error flags are set
1795*bafec742SSukumar Swaminathan 	 */
1796*bafec742SSukumar Swaminathan 	if (err_flag != 0) {
1797*bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
1798*bafec742SSukumar Swaminathan 			rx_ring->frame_too_long++;
1799*bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
1800*bafec742SSukumar Swaminathan 			rx_ring->frame_too_short++;
1801*bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
1802*bafec742SSukumar Swaminathan 			rx_ring->fcs_err++;
1803*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1804*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
1805*bafec742SSukumar Swaminathan #endif
1806*bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
1807*bafec742SSukumar Swaminathan 		    (uint8_t *)ib_mac_rsp, 8,
1808*bafec742SSukumar Swaminathan 		    (size_t)sizeof (struct ib_mac_iocb_rsp));
1809*bafec742SSukumar Swaminathan 	}
1810*bafec742SSukumar Swaminathan 
1811*bafec742SSukumar Swaminathan 	/* header should not be in large buffer */
1812*bafec742SSukumar Swaminathan 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
1813*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "header in large buffer or invalid!");
1814*bafec742SSukumar Swaminathan 		err_flag |= 1;
1815*bafec742SSukumar Swaminathan 	}
1816*bafec742SSukumar Swaminathan 	/*
1817*bafec742SSukumar Swaminathan 	 * Handle the header buffer if present.
1818*bafec742SSukumar Swaminathan 	 * packet header must be valid and saved in one small buffer
1819*bafec742SSukumar Swaminathan 	 * broadcast/multicast packets' headers not splitted
1820*bafec742SSukumar Swaminathan 	 */
1821*bafec742SSukumar Swaminathan 	if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
1822*bafec742SSukumar Swaminathan 	    (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1823*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
1824*bafec742SSukumar Swaminathan 		    header_len));
1825*bafec742SSukumar Swaminathan 		/* Sync access */
1826*bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1827*bafec742SSukumar Swaminathan 
1828*bafec742SSukumar Swaminathan 		ASSERT(sbq_desc != NULL);
1829*bafec742SSukumar Swaminathan 
1830*bafec742SSukumar Swaminathan 		/*
1831*bafec742SSukumar Swaminathan 		 * Validate addresses from the ASIC with the
1832*bafec742SSukumar Swaminathan 		 * expected sbuf address
1833*bafec742SSukumar Swaminathan 		 */
1834*bafec742SSukumar Swaminathan 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1835*bafec742SSukumar Swaminathan 		    != ib_mac_rsp->hdr_addr) {
1836*bafec742SSukumar Swaminathan 			/* Small buffer address mismatch */
1837*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1838*bafec742SSukumar Swaminathan 			    " in wrong small buffer",
1839*bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
1840*bafec742SSukumar Swaminathan 			goto fetal_error;
1841*bafec742SSukumar Swaminathan 		}
1842*bafec742SSukumar Swaminathan 		/* get this packet */
1843*bafec742SSukumar Swaminathan 		mp1 = sbq_desc->mp;
1844*bafec742SSukumar Swaminathan 		if ((err_flag != 0)|| (mp1 == NULL)) {
1845*bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
1846*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1847*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "get header from small buffer fail");
1848*bafec742SSukumar Swaminathan #endif
1849*bafec742SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1850*bafec742SSukumar Swaminathan 			mp1 = NULL;
1851*bafec742SSukumar Swaminathan 		} else {
1852*bafec742SSukumar Swaminathan 			/* Flush DMA'd data */
1853*bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1854*bafec742SSukumar Swaminathan 			    0, header_len, DDI_DMA_SYNC_FORKERNEL);
1855*bafec742SSukumar Swaminathan 
1856*bafec742SSukumar Swaminathan 			if ((qlge->ip_hdr_offset != 0)&&
1857*bafec742SSukumar Swaminathan 			    (header_len < SMALL_BUFFER_SIZE)) {
1858*bafec742SSukumar Swaminathan 				/*
1859*bafec742SSukumar Swaminathan 				 * copy entire header to a 2 bytes boundary
1860*bafec742SSukumar Swaminathan 				 * address for 8100 adapters so that the IP
1861*bafec742SSukumar Swaminathan 				 * header can be on a 4 byte boundary address
1862*bafec742SSukumar Swaminathan 				 */
1863*bafec742SSukumar Swaminathan 				bcopy(mp1->b_rptr,
1864*bafec742SSukumar Swaminathan 				    (mp1->b_rptr + SMALL_BUFFER_SIZE +
1865*bafec742SSukumar Swaminathan 				    qlge->ip_hdr_offset),
1866*bafec742SSukumar Swaminathan 				    header_len);
1867*bafec742SSukumar Swaminathan 				mp1->b_rptr += SMALL_BUFFER_SIZE +
1868*bafec742SSukumar Swaminathan 				    qlge->ip_hdr_offset;
1869*bafec742SSukumar Swaminathan 			}
1870*bafec742SSukumar Swaminathan 
1871*bafec742SSukumar Swaminathan 			/*
1872*bafec742SSukumar Swaminathan 			 * Adjust the mp payload_len to match
1873*bafec742SSukumar Swaminathan 			 * the packet header payload_len
1874*bafec742SSukumar Swaminathan 			 */
1875*bafec742SSukumar Swaminathan 			mp1->b_wptr = mp1->b_rptr + header_len;
1876*bafec742SSukumar Swaminathan 			mp1->b_next = mp1->b_cont = NULL;
1877*bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
1878*bafec742SSukumar Swaminathan 			    (uint8_t *)mp1->b_rptr, 8, header_len);
1879*bafec742SSukumar Swaminathan 		}
1880*bafec742SSukumar Swaminathan 	}
1881*bafec742SSukumar Swaminathan 
1882*bafec742SSukumar Swaminathan 	/*
1883*bafec742SSukumar Swaminathan 	 * packet data or whole packet can be in small or one or
1884*bafec742SSukumar Swaminathan 	 * several large buffer(s)
1885*bafec742SSukumar Swaminathan 	 */
1886*bafec742SSukumar Swaminathan 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1887*bafec742SSukumar Swaminathan 		/*
1888*bafec742SSukumar Swaminathan 		 * The data is in a single small buffer.
1889*bafec742SSukumar Swaminathan 		 */
1890*bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1891*bafec742SSukumar Swaminathan 
1892*bafec742SSukumar Swaminathan 		ASSERT(sbq_desc != NULL);
1893*bafec742SSukumar Swaminathan 
1894*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
1895*bafec742SSukumar Swaminathan 		    ("%d bytes in a single small buffer, sbq_desc = %p, "
1896*bafec742SSukumar Swaminathan 		    "sbq_desc->bd_dma.dma_addr = %x,"
1897*bafec742SSukumar Swaminathan 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
1898*bafec742SSukumar Swaminathan 		    payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
1899*bafec742SSukumar Swaminathan 		    ib_mac_rsp->data_addr, sbq_desc->mp));
1900*bafec742SSukumar Swaminathan 
1901*bafec742SSukumar Swaminathan 		/*
1902*bafec742SSukumar Swaminathan 		 * Validate  addresses from the ASIC with the
1903*bafec742SSukumar Swaminathan 		 * expected sbuf address
1904*bafec742SSukumar Swaminathan 		 */
1905*bafec742SSukumar Swaminathan 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1906*bafec742SSukumar Swaminathan 		    != ib_mac_rsp->data_addr) {
1907*bafec742SSukumar Swaminathan 			/* Small buffer address mismatch */
1908*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1909*bafec742SSukumar Swaminathan 			    " in wrong small buffer",
1910*bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
1911*bafec742SSukumar Swaminathan 			goto fetal_error;
1912*bafec742SSukumar Swaminathan 		}
1913*bafec742SSukumar Swaminathan 		/* get this packet */
1914*bafec742SSukumar Swaminathan 		mp2 = sbq_desc->mp;
1915*bafec742SSukumar Swaminathan 		if ((err_flag != 0) || (mp2 == NULL)) {
1916*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1917*bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
1918*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "ignore bad data from small buffer");
1919*bafec742SSukumar Swaminathan #endif
1920*bafec742SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1921*bafec742SSukumar Swaminathan 			mp2 = NULL;
1922*bafec742SSukumar Swaminathan 		} else {
1923*bafec742SSukumar Swaminathan 			/* Adjust the buffer length to match the payload_len */
1924*bafec742SSukumar Swaminathan 			mp2->b_wptr = mp2->b_rptr + payload_len;
1925*bafec742SSukumar Swaminathan 			mp2->b_next = mp2->b_cont = NULL;
1926*bafec742SSukumar Swaminathan 			/* Flush DMA'd data */
1927*bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1928*bafec742SSukumar Swaminathan 			    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
1929*bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
1930*bafec742SSukumar Swaminathan 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
1931*bafec742SSukumar Swaminathan 			/*
1932*bafec742SSukumar Swaminathan 			 * if payload is too small , copy to
1933*bafec742SSukumar Swaminathan 			 * the end of packet header
1934*bafec742SSukumar Swaminathan 			 */
1935*bafec742SSukumar Swaminathan 			if ((mp1 != NULL) &&
1936*bafec742SSukumar Swaminathan 			    (payload_len <= qlge->payload_copy_thresh) &&
1937*bafec742SSukumar Swaminathan 			    (pkt_len <
1938*bafec742SSukumar Swaminathan 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
1939*bafec742SSukumar Swaminathan 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
1940*bafec742SSukumar Swaminathan 				mp1->b_wptr += payload_len;
1941*bafec742SSukumar Swaminathan 				freemsg(mp2);
1942*bafec742SSukumar Swaminathan 				mp2 = NULL;
1943*bafec742SSukumar Swaminathan 			}
1944*bafec742SSukumar Swaminathan 		}
1945*bafec742SSukumar Swaminathan 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1946*bafec742SSukumar Swaminathan 		/*
1947*bafec742SSukumar Swaminathan 		 * The data is in a single large buffer.
1948*bafec742SSukumar Swaminathan 		 */
1949*bafec742SSukumar Swaminathan 		lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
1950*bafec742SSukumar Swaminathan 
1951*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
1952*bafec742SSukumar Swaminathan 		    ("%d bytes in a single large buffer, lbq_desc = %p, "
1953*bafec742SSukumar Swaminathan 		    "lbq_desc->bd_dma.dma_addr = %x,"
1954*bafec742SSukumar Swaminathan 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
1955*bafec742SSukumar Swaminathan 		    payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
1956*bafec742SSukumar Swaminathan 		    ib_mac_rsp->data_addr, lbq_desc->mp));
1957*bafec742SSukumar Swaminathan 
1958*bafec742SSukumar Swaminathan 		ASSERT(lbq_desc != NULL);
1959*bafec742SSukumar Swaminathan 
1960*bafec742SSukumar Swaminathan 		/*
1961*bafec742SSukumar Swaminathan 		 * Validate  addresses from the ASIC with
1962*bafec742SSukumar Swaminathan 		 * the expected lbuf address
1963*bafec742SSukumar Swaminathan 		 */
1964*bafec742SSukumar Swaminathan 		if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
1965*bafec742SSukumar Swaminathan 		    != ib_mac_rsp->data_addr) {
1966*bafec742SSukumar Swaminathan 			/* Large buffer address mismatch */
1967*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1968*bafec742SSukumar Swaminathan 			    " in wrong large buffer",
1969*bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
1970*bafec742SSukumar Swaminathan 			goto fetal_error;
1971*bafec742SSukumar Swaminathan 		}
1972*bafec742SSukumar Swaminathan 		mp2 = lbq_desc->mp;
1973*bafec742SSukumar Swaminathan 		if ((err_flag != 0) || (mp2 == NULL)) {
1974*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1975*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "ignore bad data from large buffer");
1976*bafec742SSukumar Swaminathan #endif
1977*bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
1978*bafec742SSukumar Swaminathan 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
1979*bafec742SSukumar Swaminathan 			mp2 = NULL;
1980*bafec742SSukumar Swaminathan 		} else {
1981*bafec742SSukumar Swaminathan 			/*
1982*bafec742SSukumar Swaminathan 			 * Adjust the buffer length to match
1983*bafec742SSukumar Swaminathan 			 * the packet payload_len
1984*bafec742SSukumar Swaminathan 			 */
1985*bafec742SSukumar Swaminathan 			mp2->b_wptr = mp2->b_rptr + payload_len;
1986*bafec742SSukumar Swaminathan 			mp2->b_next = mp2->b_cont = NULL;
1987*bafec742SSukumar Swaminathan 			/* Flush DMA'd data */
1988*bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
1989*bafec742SSukumar Swaminathan 			    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
1990*bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
1991*bafec742SSukumar Swaminathan 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
1992*bafec742SSukumar Swaminathan 			/*
1993*bafec742SSukumar Swaminathan 			 * if payload is too small , copy to
1994*bafec742SSukumar Swaminathan 			 * the end of packet header
1995*bafec742SSukumar Swaminathan 			 */
1996*bafec742SSukumar Swaminathan 			if ((mp1 != NULL) &&
1997*bafec742SSukumar Swaminathan 			    (payload_len <= qlge->payload_copy_thresh) &&
1998*bafec742SSukumar Swaminathan 			    (pkt_len<
1999*bafec742SSukumar Swaminathan 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2000*bafec742SSukumar Swaminathan 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2001*bafec742SSukumar Swaminathan 				mp1->b_wptr += payload_len;
2002*bafec742SSukumar Swaminathan 				freemsg(mp2);
2003*bafec742SSukumar Swaminathan 				mp2 = NULL;
2004*bafec742SSukumar Swaminathan 			}
2005*bafec742SSukumar Swaminathan 		}
2006*bafec742SSukumar Swaminathan 	} else if (payload_len) {
2007*bafec742SSukumar Swaminathan 		/*
2008*bafec742SSukumar Swaminathan 		 * payload available but not in sml nor lrg buffer,
2009*bafec742SSukumar Swaminathan 		 * so, it is saved in IAL
2010*bafec742SSukumar Swaminathan 		 */
2011*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2012*bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "packet chained in IAL \n");
2013*bafec742SSukumar Swaminathan #endif
2014*bafec742SSukumar Swaminathan 		/* lrg buf addresses are saved in one small buffer */
2015*bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2016*bafec742SSukumar Swaminathan 		curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
2017*bafec742SSukumar Swaminathan 		done = 0;
2018*bafec742SSukumar Swaminathan 		while (!done) {
2019*bafec742SSukumar Swaminathan 			ial_data_addr_low =
2020*bafec742SSukumar Swaminathan 			    (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
2021*bafec742SSukumar Swaminathan 			    0xFFFFFFFE);
2022*bafec742SSukumar Swaminathan 			/* check if this is the last packet fragment */
2023*bafec742SSukumar Swaminathan 			done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
2024*bafec742SSukumar Swaminathan 			curr_ial_ptr++;
2025*bafec742SSukumar Swaminathan 			/*
2026*bafec742SSukumar Swaminathan 			 * The data is in one or several large buffer(s).
2027*bafec742SSukumar Swaminathan 			 */
2028*bafec742SSukumar Swaminathan 			lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2029*bafec742SSukumar Swaminathan 			actual_data_addr_low =
2030*bafec742SSukumar Swaminathan 			    (uint32_t)(lbq_desc->bd_dma.dma_addr &
2031*bafec742SSukumar Swaminathan 			    0xFFFFFFFE);
2032*bafec742SSukumar Swaminathan 			if (ial_data_addr_low != actual_data_addr_low) {
2033*bafec742SSukumar Swaminathan 				cmn_err(CE_WARN,
2034*bafec742SSukumar Swaminathan 				    "packet saved in wrong ial lrg buffer"
2035*bafec742SSukumar Swaminathan 				    " expected %x, actual %lx",
2036*bafec742SSukumar Swaminathan 				    ial_data_addr_low,
2037*bafec742SSukumar Swaminathan 				    (uintptr_t)lbq_desc->bd_dma.dma_addr);
2038*bafec742SSukumar Swaminathan 				goto fetal_error;
2039*bafec742SSukumar Swaminathan 			}
2040*bafec742SSukumar Swaminathan 
2041*bafec742SSukumar Swaminathan 			if (mp_ial == NULL) {
2042*bafec742SSukumar Swaminathan 				mp_ial = mp2 = lbq_desc->mp;
2043*bafec742SSukumar Swaminathan 			} else {
2044*bafec742SSukumar Swaminathan 				mp2->b_cont = lbq_desc->mp;
2045*bafec742SSukumar Swaminathan 				mp2 = lbq_desc->mp;
2046*bafec742SSukumar Swaminathan 			}
2047*bafec742SSukumar Swaminathan 			mp2->b_next = NULL;
2048*bafec742SSukumar Swaminathan 			mp2->b_cont = NULL;
2049*bafec742SSukumar Swaminathan 			size = (payload_len < rx_ring->lbq_buf_size)?
2050*bafec742SSukumar Swaminathan 			    payload_len : rx_ring->lbq_buf_size;
2051*bafec742SSukumar Swaminathan 			mp2->b_wptr = mp2->b_rptr + size;
2052*bafec742SSukumar Swaminathan 			/* Flush DMA'd data */
2053*bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2054*bafec742SSukumar Swaminathan 			    0, size, DDI_DMA_SYNC_FORKERNEL);
2055*bafec742SSukumar Swaminathan 			payload_len -= size;
2056*bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t Mac data dump:\n",
2057*bafec742SSukumar Swaminathan 			    (uint8_t *)mp2->b_rptr, 8, size);
2058*bafec742SSukumar Swaminathan 		}
2059*bafec742SSukumar Swaminathan 		mp2 = mp_ial;
2060*bafec742SSukumar Swaminathan 		freemsg(sbq_desc->mp);
2061*bafec742SSukumar Swaminathan 	}
2062*bafec742SSukumar Swaminathan 	/*
2063*bafec742SSukumar Swaminathan 	 * some packets' hdr not split, then send mp2 upstream, otherwise,
2064*bafec742SSukumar Swaminathan 	 * concatenate message block mp2 to the tail of message header, mp1
2065*bafec742SSukumar Swaminathan 	 */
2066*bafec742SSukumar Swaminathan 	if (!err_flag) {
2067*bafec742SSukumar Swaminathan 		if (mp1) {
2068*bafec742SSukumar Swaminathan 			if (mp2) {
2069*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_RX, ("packet in mp1 and mp2\n"));
2070*bafec742SSukumar Swaminathan 				linkb(mp1, mp2); /* mp1->b_cont = mp2; */
2071*bafec742SSukumar Swaminathan 				mp = mp1;
2072*bafec742SSukumar Swaminathan 			} else {
2073*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_RX, ("packet in mp1 only\n"));
2074*bafec742SSukumar Swaminathan 				mp = mp1;
2075*bafec742SSukumar Swaminathan 			}
2076*bafec742SSukumar Swaminathan 		} else if (mp2) {
2077*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
2078*bafec742SSukumar Swaminathan 			mp = mp2;
2079*bafec742SSukumar Swaminathan 		}
2080*bafec742SSukumar Swaminathan 	}
2081*bafec742SSukumar Swaminathan 	return (mp);
2082*bafec742SSukumar Swaminathan 
2083*bafec742SSukumar Swaminathan fetal_error:
2084*bafec742SSukumar Swaminathan 	/* Fetal Error! */
2085*bafec742SSukumar Swaminathan 	*mp->b_wptr = 0;
2086*bafec742SSukumar Swaminathan 	return (mp);
2087*bafec742SSukumar Swaminathan 
2088*bafec742SSukumar Swaminathan }
2089*bafec742SSukumar Swaminathan 
2090*bafec742SSukumar Swaminathan /*
2091*bafec742SSukumar Swaminathan  * Bump completion queue consumer index.
2092*bafec742SSukumar Swaminathan  */
2093*bafec742SSukumar Swaminathan static void
2094*bafec742SSukumar Swaminathan ql_update_cq(struct rx_ring *rx_ring)
2095*bafec742SSukumar Swaminathan {
2096*bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx++;
2097*bafec742SSukumar Swaminathan 	rx_ring->curr_entry++;
2098*bafec742SSukumar Swaminathan 	if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
2099*bafec742SSukumar Swaminathan 		rx_ring->cnsmr_idx = 0;
2100*bafec742SSukumar Swaminathan 		rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
2101*bafec742SSukumar Swaminathan 	}
2102*bafec742SSukumar Swaminathan }
2103*bafec742SSukumar Swaminathan 
2104*bafec742SSukumar Swaminathan /*
2105*bafec742SSukumar Swaminathan  * Update completion queue consumer index.
2106*bafec742SSukumar Swaminathan  */
2107*bafec742SSukumar Swaminathan static void
2108*bafec742SSukumar Swaminathan ql_write_cq_idx(struct rx_ring *rx_ring)
2109*bafec742SSukumar Swaminathan {
2110*bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2111*bafec742SSukumar Swaminathan 
2112*bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
2113*bafec742SSukumar Swaminathan 	    rx_ring->cnsmr_idx);
2114*bafec742SSukumar Swaminathan }
2115*bafec742SSukumar Swaminathan 
2116*bafec742SSukumar Swaminathan /*
2117*bafec742SSukumar Swaminathan  * Processes a SYS-Chip Event Notification Completion Event.
2118*bafec742SSukumar Swaminathan  * The incoming notification event that describes a link up/down
2119*bafec742SSukumar Swaminathan  * or some sorts of error happens.
2120*bafec742SSukumar Swaminathan  */
2121*bafec742SSukumar Swaminathan static void
2122*bafec742SSukumar Swaminathan ql_process_chip_ae_intr(qlge_t *qlge,
2123*bafec742SSukumar Swaminathan     struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
2124*bafec742SSukumar Swaminathan {
2125*bafec742SSukumar Swaminathan 	uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
2126*bafec742SSukumar Swaminathan 	uint32_t soft_req = 0;
2127*bafec742SSukumar Swaminathan 
2128*bafec742SSukumar Swaminathan 	switch (eventType) {
2129*bafec742SSukumar Swaminathan 		case SYS_EVENT_PORT_LINK_UP:	/* 0x0h */
2130*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("Port Link Up\n"));
2131*bafec742SSukumar Swaminathan 			break;
2132*bafec742SSukumar Swaminathan 
2133*bafec742SSukumar Swaminathan 		case SYS_EVENT_PORT_LINK_DOWN:	/* 0x1h */
2134*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("Port Link Down\n"));
2135*bafec742SSukumar Swaminathan 			break;
2136*bafec742SSukumar Swaminathan 
2137*bafec742SSukumar Swaminathan 		case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
2138*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "A multiple CAM hits look up error "
2139*bafec742SSukumar Swaminathan 			    "occurred");
2140*bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2141*bafec742SSukumar Swaminathan 			break;
2142*bafec742SSukumar Swaminathan 
2143*bafec742SSukumar Swaminathan 		case SYS_EVENT_SOFT_ECC_ERR:	/* 0x7h */
2144*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Soft ECC error detected");
2145*bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2146*bafec742SSukumar Swaminathan 			break;
2147*bafec742SSukumar Swaminathan 
2148*bafec742SSukumar Swaminathan 		case SYS_EVENT_MGMT_FATAL_ERR:	/* 0x8h */
2149*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Management (MPI) Processor fatal"
2150*bafec742SSukumar Swaminathan 			    " error occured");
2151*bafec742SSukumar Swaminathan 			soft_req |= NEED_MPI_RESET;
2152*bafec742SSukumar Swaminathan 			break;
2153*bafec742SSukumar Swaminathan 
2154*bafec742SSukumar Swaminathan 		case SYS_EVENT_MAC_INTERRUPT:	/* 0x9h */
2155*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("MAC Interrupt"));
2156*bafec742SSukumar Swaminathan 			break;
2157*bafec742SSukumar Swaminathan 
2158*bafec742SSukumar Swaminathan 		case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF:	/* 0x40h */
2159*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "PCI Error reading small/large "
2160*bafec742SSukumar Swaminathan 			    "buffers occured");
2161*bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2162*bafec742SSukumar Swaminathan 			break;
2163*bafec742SSukumar Swaminathan 
2164*bafec742SSukumar Swaminathan 		default:
2165*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
2166*bafec742SSukumar Swaminathan 			    "type 0x%x occured",
2167*bafec742SSukumar Swaminathan 			    __func__, qlge->instance, eventType));
2168*bafec742SSukumar Swaminathan 			break;
2169*bafec742SSukumar Swaminathan 	}
2170*bafec742SSukumar Swaminathan 
2171*bafec742SSukumar Swaminathan 	if ((soft_req & NEED_MPI_RESET) != 0) {
2172*bafec742SSukumar Swaminathan 		ql_wake_mpi_reset_soft_intr(qlge);
2173*bafec742SSukumar Swaminathan 	} else if ((soft_req & NEED_HW_RESET) != 0) {
2174*bafec742SSukumar Swaminathan 		ql_wake_asic_reset_soft_intr(qlge);
2175*bafec742SSukumar Swaminathan 	}
2176*bafec742SSukumar Swaminathan }
2177*bafec742SSukumar Swaminathan 
2178*bafec742SSukumar Swaminathan /*
2179*bafec742SSukumar Swaminathan  * set received packet checksum flag
2180*bafec742SSukumar Swaminathan  */
2181*bafec742SSukumar Swaminathan void
2182*bafec742SSukumar Swaminathan ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
2183*bafec742SSukumar Swaminathan {
2184*bafec742SSukumar Swaminathan 	uint32_t flags;
2185*bafec742SSukumar Swaminathan 
2186*bafec742SSukumar Swaminathan 	/* Not TCP or UDP packet? nothing more to do */
2187*bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
2188*bafec742SSukumar Swaminathan 	    ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
2189*bafec742SSukumar Swaminathan 	return;
2190*bafec742SSukumar Swaminathan 
2191*bafec742SSukumar Swaminathan 	/* No CKO support for IPv6 */
2192*bafec742SSukumar Swaminathan 	if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
2193*bafec742SSukumar Swaminathan 		return;
2194*bafec742SSukumar Swaminathan 
2195*bafec742SSukumar Swaminathan 	/*
2196*bafec742SSukumar Swaminathan 	 * If checksum error, don't set flags; stack will calculate
2197*bafec742SSukumar Swaminathan 	 * checksum, detect the error and update statistics
2198*bafec742SSukumar Swaminathan 	 */
2199*bafec742SSukumar Swaminathan 	if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
2200*bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
2201*bafec742SSukumar Swaminathan 		return;
2202*bafec742SSukumar Swaminathan 
2203*bafec742SSukumar Swaminathan 	/* TCP or UDP packet and checksum valid */
2204*bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
2205*bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2206*bafec742SSukumar Swaminathan 		flags = HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
2207*bafec742SSukumar Swaminathan 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, flags, 0);
2208*bafec742SSukumar Swaminathan 	}
2209*bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
2210*bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2211*bafec742SSukumar Swaminathan 		flags = HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
2212*bafec742SSukumar Swaminathan 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, flags, 0);
2213*bafec742SSukumar Swaminathan 	}
2214*bafec742SSukumar Swaminathan }
2215*bafec742SSukumar Swaminathan 
2216*bafec742SSukumar Swaminathan /*
2217*bafec742SSukumar Swaminathan  * This function goes through h/w descriptor in one specified rx ring,
2218*bafec742SSukumar Swaminathan  * receives the data if the descriptor status shows the data is ready.
2219*bafec742SSukumar Swaminathan  * It returns a chain of mblks containing the received data, to be
2220*bafec742SSukumar Swaminathan  * passed up to mac_rx_ring().
2221*bafec742SSukumar Swaminathan  */
2222*bafec742SSukumar Swaminathan mblk_t *
2223*bafec742SSukumar Swaminathan ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
2224*bafec742SSukumar Swaminathan {
2225*bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2226*bafec742SSukumar Swaminathan 	uint32_t prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2227*bafec742SSukumar Swaminathan 	struct ib_mac_iocb_rsp *net_rsp;
2228*bafec742SSukumar Swaminathan 	mblk_t *mp;
2229*bafec742SSukumar Swaminathan 	mblk_t *mblk_head;
2230*bafec742SSukumar Swaminathan 	mblk_t **mblk_tail;
2231*bafec742SSukumar Swaminathan 	uint32_t received_bytes = 0;
2232*bafec742SSukumar Swaminathan 	boolean_t done = B_FALSE;
2233*bafec742SSukumar Swaminathan 	uint32_t length;
2234*bafec742SSukumar Swaminathan 
2235*bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
2236*bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
2237*bafec742SSukumar Swaminathan 	uint32_t producer_idx;
2238*bafec742SSukumar Swaminathan 	uint32_t num_free_entries;
2239*bafec742SSukumar Swaminathan 	uint32_t temp;
2240*bafec742SSukumar Swaminathan 
2241*bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2242*bafec742SSukumar Swaminathan 	consumer_idx = temp & 0x0000ffff;
2243*bafec742SSukumar Swaminathan 	producer_idx = (temp >> 16);
2244*bafec742SSukumar Swaminathan 
2245*bafec742SSukumar Swaminathan 	if (consumer_idx > producer_idx)
2246*bafec742SSukumar Swaminathan 		num_free_entries = (consumer_idx - producer_idx);
2247*bafec742SSukumar Swaminathan 	else
2248*bafec742SSukumar Swaminathan 		num_free_entries = NUM_RX_RING_ENTRIES - (
2249*bafec742SSukumar Swaminathan 		    producer_idx - consumer_idx);
2250*bafec742SSukumar Swaminathan 
2251*bafec742SSukumar Swaminathan 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2252*bafec742SSukumar Swaminathan 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2253*bafec742SSukumar Swaminathan 
2254*bafec742SSukumar Swaminathan #endif
2255*bafec742SSukumar Swaminathan 	mblk_head = NULL;
2256*bafec742SSukumar Swaminathan 	mblk_tail = &mblk_head;
2257*bafec742SSukumar Swaminathan 
2258*bafec742SSukumar Swaminathan 	while (!done && (prod != rx_ring->cnsmr_idx)) {
2259*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2260*bafec742SSukumar Swaminathan 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
2261*bafec742SSukumar Swaminathan 		    __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2262*bafec742SSukumar Swaminathan 
2263*bafec742SSukumar Swaminathan 		net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
2264*bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2265*bafec742SSukumar Swaminathan 		    (off_t)((uintptr_t)net_rsp -
2266*bafec742SSukumar Swaminathan 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2267*bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2268*bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
2269*bafec742SSukumar Swaminathan 		    rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
2270*bafec742SSukumar Swaminathan 
2271*bafec742SSukumar Swaminathan 		switch (net_rsp->opcode) {
2272*bafec742SSukumar Swaminathan 
2273*bafec742SSukumar Swaminathan 		case OPCODE_IB_MAC_IOCB:
2274*bafec742SSukumar Swaminathan 			/* Adding length of pkt header and payload */
2275*bafec742SSukumar Swaminathan 			length = le32_to_cpu(net_rsp->data_len) +
2276*bafec742SSukumar Swaminathan 			    le32_to_cpu(net_rsp->hdr_len);
2277*bafec742SSukumar Swaminathan 			if ((poll_bytes != QLGE_POLL_ALL) &&
2278*bafec742SSukumar Swaminathan 			    ((received_bytes + length) > poll_bytes)) {
2279*bafec742SSukumar Swaminathan 				done = B_TRUE;
2280*bafec742SSukumar Swaminathan 				continue;
2281*bafec742SSukumar Swaminathan 			}
2282*bafec742SSukumar Swaminathan 			received_bytes += length;
2283*bafec742SSukumar Swaminathan 
2284*bafec742SSukumar Swaminathan 			mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
2285*bafec742SSukumar Swaminathan 			if (mp != NULL) {
2286*bafec742SSukumar Swaminathan 				if (rx_ring->mac_flags != QL_MAC_STARTED) {
2287*bafec742SSukumar Swaminathan 					/*
2288*bafec742SSukumar Swaminathan 					 * Increment number of packets we have
2289*bafec742SSukumar Swaminathan 					 * indicated to the stack, should be
2290*bafec742SSukumar Swaminathan 					 * decremented when we get it back
2291*bafec742SSukumar Swaminathan 					 * or when freemsg is called
2292*bafec742SSukumar Swaminathan 					 */
2293*bafec742SSukumar Swaminathan 					ASSERT(rx_ring->rx_indicate
2294*bafec742SSukumar Swaminathan 					    <= rx_ring->cq_len);
2295*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2296*bafec742SSukumar Swaminathan 					cmn_err(CE_WARN, "%s do not send to OS,"
2297*bafec742SSukumar Swaminathan 					    " mac_flags %d, indicate %d",
2298*bafec742SSukumar Swaminathan 					    __func__, rx_ring->mac_flags,
2299*bafec742SSukumar Swaminathan 					    rx_ring->rx_indicate);
2300*bafec742SSukumar Swaminathan #endif
2301*bafec742SSukumar Swaminathan 					QL_PRINT(DBG_RX,
2302*bafec742SSukumar Swaminathan 					    ("cq_id = %d, packet "
2303*bafec742SSukumar Swaminathan 					    "dropped, mac not "
2304*bafec742SSukumar Swaminathan 					    "enabled.\n",
2305*bafec742SSukumar Swaminathan 					    rx_ring->cq_id));
2306*bafec742SSukumar Swaminathan 					rx_ring->rx_pkt_dropped_mac_unenabled++;
2307*bafec742SSukumar Swaminathan 
2308*bafec742SSukumar Swaminathan 					/* rx_lock is expected to be held */
2309*bafec742SSukumar Swaminathan 					mutex_exit(&rx_ring->rx_lock);
2310*bafec742SSukumar Swaminathan 					freemsg(mp);
2311*bafec742SSukumar Swaminathan 					mutex_enter(&rx_ring->rx_lock);
2312*bafec742SSukumar Swaminathan 					mp = NULL;
2313*bafec742SSukumar Swaminathan 				}
2314*bafec742SSukumar Swaminathan 
2315*bafec742SSukumar Swaminathan 				if (mp != NULL) {
2316*bafec742SSukumar Swaminathan 					/*
2317*bafec742SSukumar Swaminathan 					 * IP full packet has been
2318*bafec742SSukumar Swaminathan 					 * successfully verified by
2319*bafec742SSukumar Swaminathan 					 * H/W and is correct
2320*bafec742SSukumar Swaminathan 					 */
2321*bafec742SSukumar Swaminathan 					ql_set_rx_cksum(mp, net_rsp);
2322*bafec742SSukumar Swaminathan 
2323*bafec742SSukumar Swaminathan 					rx_ring->rx_packets++;
2324*bafec742SSukumar Swaminathan 					rx_ring->rx_bytes += length;
2325*bafec742SSukumar Swaminathan 					*mblk_tail = mp;
2326*bafec742SSukumar Swaminathan 					mblk_tail = &mp->b_next;
2327*bafec742SSukumar Swaminathan 				}
2328*bafec742SSukumar Swaminathan 			} else {
2329*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_RX,
2330*bafec742SSukumar Swaminathan 				    ("cq_id = %d, packet dropped\n",
2331*bafec742SSukumar Swaminathan 				    rx_ring->cq_id));
2332*bafec742SSukumar Swaminathan 				rx_ring->rx_packets_dropped_no_buffer++;
2333*bafec742SSukumar Swaminathan 			}
2334*bafec742SSukumar Swaminathan 			break;
2335*bafec742SSukumar Swaminathan 
2336*bafec742SSukumar Swaminathan 		case OPCODE_IB_SYS_EVENT_IOCB:
2337*bafec742SSukumar Swaminathan 			ql_process_chip_ae_intr(qlge,
2338*bafec742SSukumar Swaminathan 			    (struct ib_sys_event_iocb_rsp *)
2339*bafec742SSukumar Swaminathan 			    net_rsp);
2340*bafec742SSukumar Swaminathan 			break;
2341*bafec742SSukumar Swaminathan 
2342*bafec742SSukumar Swaminathan 		default:
2343*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2344*bafec742SSukumar Swaminathan 			    "%s Ring(%d)Hit default case, not handled!"
2345*bafec742SSukumar Swaminathan 			    " dropping the packet, "
2346*bafec742SSukumar Swaminathan 			    "opcode = %x.", __func__, rx_ring->cq_id,
2347*bafec742SSukumar Swaminathan 			    net_rsp->opcode);
2348*bafec742SSukumar Swaminathan 			break;
2349*bafec742SSukumar Swaminathan 		}
2350*bafec742SSukumar Swaminathan 		/* increment cnsmr_idx and curr_entry */
2351*bafec742SSukumar Swaminathan 		ql_update_cq(rx_ring);
2352*bafec742SSukumar Swaminathan 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2353*bafec742SSukumar Swaminathan 
2354*bafec742SSukumar Swaminathan 	}
2355*bafec742SSukumar Swaminathan 	/* update cnsmr_idx */
2356*bafec742SSukumar Swaminathan 	ql_write_cq_idx(rx_ring);
2357*bafec742SSukumar Swaminathan 	/* do not enable interrupt for polling mode */
2358*bafec742SSukumar Swaminathan 	if (poll_bytes == QLGE_POLL_ALL)
2359*bafec742SSukumar Swaminathan 		ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2360*bafec742SSukumar Swaminathan 	return (mblk_head);
2361*bafec742SSukumar Swaminathan }
2362*bafec742SSukumar Swaminathan 
2363*bafec742SSukumar Swaminathan /* Process an outbound completion from an rx ring. */
2364*bafec742SSukumar Swaminathan static void
2365*bafec742SSukumar Swaminathan ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
2366*bafec742SSukumar Swaminathan {
2367*bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
2368*bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
2369*bafec742SSukumar Swaminathan 	int j;
2370*bafec742SSukumar Swaminathan 
2371*bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
2372*bafec742SSukumar Swaminathan 	tx_ring_desc = tx_ring->wq_desc;
2373*bafec742SSukumar Swaminathan 	tx_ring_desc += mac_rsp->tid;
2374*bafec742SSukumar Swaminathan 
2375*bafec742SSukumar Swaminathan 	if (tx_ring_desc->tx_type == USE_DMA) {
2376*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
2377*bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
2378*bafec742SSukumar Swaminathan 
2379*bafec742SSukumar Swaminathan 		/*
2380*bafec742SSukumar Swaminathan 		 * Release the DMA resource that is used for
2381*bafec742SSukumar Swaminathan 		 * DMA binding.
2382*bafec742SSukumar Swaminathan 		 */
2383*bafec742SSukumar Swaminathan 		for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
2384*bafec742SSukumar Swaminathan 			(void) ddi_dma_unbind_handle(
2385*bafec742SSukumar Swaminathan 			    tx_ring_desc->tx_dma_handle[j]);
2386*bafec742SSukumar Swaminathan 		}
2387*bafec742SSukumar Swaminathan 
2388*bafec742SSukumar Swaminathan 		tx_ring_desc->tx_dma_handle_used = 0;
2389*bafec742SSukumar Swaminathan 		/*
2390*bafec742SSukumar Swaminathan 		 * Free the mblk after sending completed
2391*bafec742SSukumar Swaminathan 		 */
2392*bafec742SSukumar Swaminathan 		if (tx_ring_desc->mp != NULL) {
2393*bafec742SSukumar Swaminathan 			freemsg(tx_ring_desc->mp);
2394*bafec742SSukumar Swaminathan 			tx_ring_desc->mp = NULL;
2395*bafec742SSukumar Swaminathan 		}
2396*bafec742SSukumar Swaminathan 	}
2397*bafec742SSukumar Swaminathan 
2398*bafec742SSukumar Swaminathan 	tx_ring->obytes += tx_ring_desc->tx_bytes;
2399*bafec742SSukumar Swaminathan 	tx_ring->opackets++;
2400*bafec742SSukumar Swaminathan 
2401*bafec742SSukumar Swaminathan 	if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
2402*bafec742SSukumar Swaminathan 	    OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
2403*bafec742SSukumar Swaminathan 		tx_ring->errxmt++;
2404*bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2405*bafec742SSukumar Swaminathan 			/* EMPTY */
2406*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2407*bafec742SSukumar Swaminathan 			    ("Total descriptor length did not match "
2408*bafec742SSukumar Swaminathan 			    "transfer length.\n"));
2409*bafec742SSukumar Swaminathan 		}
2410*bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2411*bafec742SSukumar Swaminathan 			/* EMPTY */
2412*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2413*bafec742SSukumar Swaminathan 			    ("Frame too short to be legal, not sent.\n"));
2414*bafec742SSukumar Swaminathan 		}
2415*bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2416*bafec742SSukumar Swaminathan 			/* EMPTY */
2417*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2418*bafec742SSukumar Swaminathan 			    ("Frame too long, but sent anyway.\n"));
2419*bafec742SSukumar Swaminathan 		}
2420*bafec742SSukumar Swaminathan 		if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
2421*bafec742SSukumar Swaminathan 			/* EMPTY */
2422*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2423*bafec742SSukumar Swaminathan 			    ("PCI backplane error. Frame not sent.\n"));
2424*bafec742SSukumar Swaminathan 		}
2425*bafec742SSukumar Swaminathan 	}
2426*bafec742SSukumar Swaminathan 	atomic_inc_32(&tx_ring->tx_free_count);
2427*bafec742SSukumar Swaminathan }
2428*bafec742SSukumar Swaminathan 
2429*bafec742SSukumar Swaminathan /*
2430*bafec742SSukumar Swaminathan  * clean up tx completion iocbs
2431*bafec742SSukumar Swaminathan  */
2432*bafec742SSukumar Swaminathan static int
2433*bafec742SSukumar Swaminathan ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2434*bafec742SSukumar Swaminathan {
2435*bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2436*bafec742SSukumar Swaminathan 	uint32_t prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2437*bafec742SSukumar Swaminathan 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2438*bafec742SSukumar Swaminathan 	int count = 0;
2439*bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
2440*bafec742SSukumar Swaminathan 	boolean_t resume_tx = B_FALSE;
2441*bafec742SSukumar Swaminathan 
2442*bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
2443*bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
2444*bafec742SSukumar Swaminathan 	{
2445*bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
2446*bafec742SSukumar Swaminathan 	uint32_t producer_idx;
2447*bafec742SSukumar Swaminathan 	uint32_t num_free_entries;
2448*bafec742SSukumar Swaminathan 	uint32_t temp;
2449*bafec742SSukumar Swaminathan 
2450*bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2451*bafec742SSukumar Swaminathan 	consumer_idx = temp & 0x0000ffff;
2452*bafec742SSukumar Swaminathan 	producer_idx = (temp >> 16);
2453*bafec742SSukumar Swaminathan 
2454*bafec742SSukumar Swaminathan 	if (consumer_idx > producer_idx)
2455*bafec742SSukumar Swaminathan 		num_free_entries = (consumer_idx - producer_idx);
2456*bafec742SSukumar Swaminathan 	else
2457*bafec742SSukumar Swaminathan 		num_free_entries = NUM_RX_RING_ENTRIES -
2458*bafec742SSukumar Swaminathan 		    (producer_idx - consumer_idx);
2459*bafec742SSukumar Swaminathan 
2460*bafec742SSukumar Swaminathan 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2461*bafec742SSukumar Swaminathan 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2462*bafec742SSukumar Swaminathan 
2463*bafec742SSukumar Swaminathan 	}
2464*bafec742SSukumar Swaminathan #endif
2465*bafec742SSukumar Swaminathan 	/* While there are entries in the completion queue. */
2466*bafec742SSukumar Swaminathan 	while (prod != rx_ring->cnsmr_idx) {
2467*bafec742SSukumar Swaminathan 
2468*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2469*bafec742SSukumar Swaminathan 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
2470*bafec742SSukumar Swaminathan 		    rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2471*bafec742SSukumar Swaminathan 
2472*bafec742SSukumar Swaminathan 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2473*bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2474*bafec742SSukumar Swaminathan 		    (off_t)((uintptr_t)net_rsp -
2475*bafec742SSukumar Swaminathan 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2476*bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2477*bafec742SSukumar Swaminathan 
2478*bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
2479*bafec742SSukumar Swaminathan 		    "response packet data\n",
2480*bafec742SSukumar Swaminathan 		    rx_ring->curr_entry, 8,
2481*bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp));
2482*bafec742SSukumar Swaminathan 
2483*bafec742SSukumar Swaminathan 		switch (net_rsp->opcode) {
2484*bafec742SSukumar Swaminathan 
2485*bafec742SSukumar Swaminathan 		case OPCODE_OB_MAC_OFFLOAD_IOCB:
2486*bafec742SSukumar Swaminathan 		case OPCODE_OB_MAC_IOCB:
2487*bafec742SSukumar Swaminathan 			ql_process_mac_tx_intr(qlge, net_rsp);
2488*bafec742SSukumar Swaminathan 			break;
2489*bafec742SSukumar Swaminathan 
2490*bafec742SSukumar Swaminathan 		default:
2491*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2492*bafec742SSukumar Swaminathan 			    "%s Hit default case, not handled! "
2493*bafec742SSukumar Swaminathan 			    "dropping the packet,"
2494*bafec742SSukumar Swaminathan 			    " opcode = %x.",
2495*bafec742SSukumar Swaminathan 			    __func__, net_rsp->opcode);
2496*bafec742SSukumar Swaminathan 			break;
2497*bafec742SSukumar Swaminathan 		}
2498*bafec742SSukumar Swaminathan 		count++;
2499*bafec742SSukumar Swaminathan 		ql_update_cq(rx_ring);
2500*bafec742SSukumar Swaminathan 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2501*bafec742SSukumar Swaminathan 	}
2502*bafec742SSukumar Swaminathan 	ql_write_cq_idx(rx_ring);
2503*bafec742SSukumar Swaminathan 
2504*bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
2505*bafec742SSukumar Swaminathan 
2506*bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
2507*bafec742SSukumar Swaminathan 
2508*bafec742SSukumar Swaminathan 	mutex_enter(&tx_ring->tx_lock);
2509*bafec742SSukumar Swaminathan 
2510*bafec742SSukumar Swaminathan 	if (tx_ring->queue_stopped &&
2511*bafec742SSukumar Swaminathan 	    (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
2512*bafec742SSukumar Swaminathan 		/*
2513*bafec742SSukumar Swaminathan 		 * The queue got stopped because the tx_ring was full.
2514*bafec742SSukumar Swaminathan 		 * Wake it up, because it's now at least 25% empty.
2515*bafec742SSukumar Swaminathan 		 */
2516*bafec742SSukumar Swaminathan 		tx_ring->queue_stopped = 0;
2517*bafec742SSukumar Swaminathan 		resume_tx = B_TRUE;
2518*bafec742SSukumar Swaminathan 	}
2519*bafec742SSukumar Swaminathan 
2520*bafec742SSukumar Swaminathan 	mutex_exit(&tx_ring->tx_lock);
2521*bafec742SSukumar Swaminathan 	/* Don't hold the lock during OS callback */
2522*bafec742SSukumar Swaminathan 	if (resume_tx)
2523*bafec742SSukumar Swaminathan 		RESUME_TX(tx_ring);
2524*bafec742SSukumar Swaminathan 	return (count);
2525*bafec742SSukumar Swaminathan }
2526*bafec742SSukumar Swaminathan 
2527*bafec742SSukumar Swaminathan /*
2528*bafec742SSukumar Swaminathan  * reset asic when error happens
2529*bafec742SSukumar Swaminathan  */
2530*bafec742SSukumar Swaminathan /* ARGSUSED */
2531*bafec742SSukumar Swaminathan static uint_t
2532*bafec742SSukumar Swaminathan ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
2533*bafec742SSukumar Swaminathan {
2534*bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2535*bafec742SSukumar Swaminathan 	int status;
2536*bafec742SSukumar Swaminathan 
2537*bafec742SSukumar Swaminathan 	mutex_enter(&qlge->gen_mutex);
2538*bafec742SSukumar Swaminathan 	status = ql_bringdown_adapter(qlge);
2539*bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS)
2540*bafec742SSukumar Swaminathan 		goto error;
2541*bafec742SSukumar Swaminathan 
2542*bafec742SSukumar Swaminathan 	status = ql_bringup_adapter(qlge);
2543*bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS)
2544*bafec742SSukumar Swaminathan 		goto error;
2545*bafec742SSukumar Swaminathan 	mutex_exit(&qlge->gen_mutex);
2546*bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2547*bafec742SSukumar Swaminathan 
2548*bafec742SSukumar Swaminathan error:
2549*bafec742SSukumar Swaminathan 	mutex_exit(&qlge->gen_mutex);
2550*bafec742SSukumar Swaminathan 	cmn_err(CE_WARN,
2551*bafec742SSukumar Swaminathan 	    "qlge up/down cycle failed, closing device");
2552*bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2553*bafec742SSukumar Swaminathan }
2554*bafec742SSukumar Swaminathan 
2555*bafec742SSukumar Swaminathan /*
2556*bafec742SSukumar Swaminathan  * Reset MPI
2557*bafec742SSukumar Swaminathan  */
2558*bafec742SSukumar Swaminathan /* ARGSUSED */
2559*bafec742SSukumar Swaminathan static uint_t
2560*bafec742SSukumar Swaminathan ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
2561*bafec742SSukumar Swaminathan {
2562*bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2563*bafec742SSukumar Swaminathan 
2564*bafec742SSukumar Swaminathan 	ql_reset_mpi_risc(qlge);
2565*bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2566*bafec742SSukumar Swaminathan }
2567*bafec742SSukumar Swaminathan 
2568*bafec742SSukumar Swaminathan /*
2569*bafec742SSukumar Swaminathan  * Process MPI mailbox messages
2570*bafec742SSukumar Swaminathan  */
2571*bafec742SSukumar Swaminathan /* ARGSUSED */
2572*bafec742SSukumar Swaminathan static uint_t
2573*bafec742SSukumar Swaminathan ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
2574*bafec742SSukumar Swaminathan {
2575*bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2576*bafec742SSukumar Swaminathan 
2577*bafec742SSukumar Swaminathan 	ql_do_mpi_intr(qlge);
2578*bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2579*bafec742SSukumar Swaminathan }
2580*bafec742SSukumar Swaminathan 
2581*bafec742SSukumar Swaminathan /* Fire up a handler to reset the MPI processor. */
2582*bafec742SSukumar Swaminathan void
2583*bafec742SSukumar Swaminathan ql_wake_asic_reset_soft_intr(qlge_t *qlge)
2584*bafec742SSukumar Swaminathan {
2585*bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
2586*bafec742SSukumar Swaminathan }
2587*bafec742SSukumar Swaminathan 
2588*bafec742SSukumar Swaminathan static void
2589*bafec742SSukumar Swaminathan ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
2590*bafec742SSukumar Swaminathan {
2591*bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
2592*bafec742SSukumar Swaminathan }
2593*bafec742SSukumar Swaminathan 
2594*bafec742SSukumar Swaminathan static void
2595*bafec742SSukumar Swaminathan ql_wake_mpi_event_soft_intr(qlge_t *qlge)
2596*bafec742SSukumar Swaminathan {
2597*bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
2598*bafec742SSukumar Swaminathan }
2599*bafec742SSukumar Swaminathan 
2600*bafec742SSukumar Swaminathan /*
2601*bafec742SSukumar Swaminathan  * This handles a fatal error, MPI activity, and the default
2602*bafec742SSukumar Swaminathan  * rx_ring in an MSI-X multiple interrupt vector environment.
2603*bafec742SSukumar Swaminathan  * In MSI/Legacy environment it also process the rest of
2604*bafec742SSukumar Swaminathan  * the rx_rings.
2605*bafec742SSukumar Swaminathan  */
2606*bafec742SSukumar Swaminathan /* ARGSUSED */
2607*bafec742SSukumar Swaminathan static uint_t
2608*bafec742SSukumar Swaminathan ql_isr(caddr_t arg1, caddr_t arg2)
2609*bafec742SSukumar Swaminathan {
2610*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2611*bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2612*bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
2613*bafec742SSukumar Swaminathan 	uint32_t var, prod;
2614*bafec742SSukumar Swaminathan 	int i;
2615*bafec742SSukumar Swaminathan 	int work_done = 0;
2616*bafec742SSukumar Swaminathan 
2617*bafec742SSukumar Swaminathan 	mblk_t *mp;
2618*bafec742SSukumar Swaminathan 
2619*bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
2620*bafec742SSukumar Swaminathan 
2621*bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
2622*bafec742SSukumar Swaminathan 
2623*bafec742SSukumar Swaminathan 	if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
2624*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
2625*bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_ERROR_STATUS);
2626*bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
2627*bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
2628*bafec742SSukumar Swaminathan 		return (DDI_INTR_CLAIMED);
2629*bafec742SSukumar Swaminathan 	}
2630*bafec742SSukumar Swaminathan 
2631*bafec742SSukumar Swaminathan 	ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2632*bafec742SSukumar Swaminathan 
2633*bafec742SSukumar Swaminathan 	/*
2634*bafec742SSukumar Swaminathan 	 * Check the default queue and wake handler if active.
2635*bafec742SSukumar Swaminathan 	 */
2636*bafec742SSukumar Swaminathan 	rx_ring = &qlge->rx_ring[0];
2637*bafec742SSukumar Swaminathan 	prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2638*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
2639*bafec742SSukumar Swaminathan 	    prod, rx_ring->cnsmr_idx));
2640*bafec742SSukumar Swaminathan 	/* check if interrupt is due to incoming packet */
2641*bafec742SSukumar Swaminathan 	if (prod != rx_ring->cnsmr_idx) {
2642*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
2643*bafec742SSukumar Swaminathan 		ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2644*bafec742SSukumar Swaminathan 		mutex_enter(&rx_ring->rx_lock);
2645*bafec742SSukumar Swaminathan 		mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2646*bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->rx_lock);
2647*bafec742SSukumar Swaminathan 
2648*bafec742SSukumar Swaminathan 		if (mp != NULL)
2649*bafec742SSukumar Swaminathan 			RX_UPSTREAM(rx_ring, mp);
2650*bafec742SSukumar Swaminathan 		work_done++;
2651*bafec742SSukumar Swaminathan 	} else {
2652*bafec742SSukumar Swaminathan 		/*
2653*bafec742SSukumar Swaminathan 		 * If interrupt is not due to incoming packet, read status
2654*bafec742SSukumar Swaminathan 		 * register to see if error happens or mailbox interrupt.
2655*bafec742SSukumar Swaminathan 		 */
2656*bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
2657*bafec742SSukumar Swaminathan 		if ((var & STATUS_FE) != 0) {
2658*bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2659*bafec742SSukumar Swaminathan 
2660*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
2661*bafec742SSukumar Swaminathan 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2662*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2663*bafec742SSukumar Swaminathan 			    "Resetting chip. Error Status Register = 0x%x",
2664*bafec742SSukumar Swaminathan 			    var);
2665*bafec742SSukumar Swaminathan 			ql_wake_asic_reset_soft_intr(qlge);
2666*bafec742SSukumar Swaminathan 			return (DDI_INTR_CLAIMED);
2667*bafec742SSukumar Swaminathan 		}
2668*bafec742SSukumar Swaminathan 
2669*bafec742SSukumar Swaminathan 		/*
2670*bafec742SSukumar Swaminathan 		 * Check MPI processor activity.
2671*bafec742SSukumar Swaminathan 		 */
2672*bafec742SSukumar Swaminathan 		if ((var & STATUS_PI) != 0) {
2673*bafec742SSukumar Swaminathan 			/*
2674*bafec742SSukumar Swaminathan 			 * We've got an async event or mailbox completion.
2675*bafec742SSukumar Swaminathan 			 * Handle it and clear the source of the interrupt.
2676*bafec742SSukumar Swaminathan 			 */
2677*bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2678*bafec742SSukumar Swaminathan 
2679*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
2680*bafec742SSukumar Swaminathan 			ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2681*bafec742SSukumar Swaminathan 			ql_wake_mpi_event_soft_intr(qlge);
2682*bafec742SSukumar Swaminathan 			work_done++;
2683*bafec742SSukumar Swaminathan 		}
2684*bafec742SSukumar Swaminathan 	}
2685*bafec742SSukumar Swaminathan 
2686*bafec742SSukumar Swaminathan 	if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
2687*bafec742SSukumar Swaminathan 		/*
2688*bafec742SSukumar Swaminathan 		 * Start the DPC for each active queue.
2689*bafec742SSukumar Swaminathan 		 */
2690*bafec742SSukumar Swaminathan 		for (i = 1; i < qlge->rx_ring_count; i++) {
2691*bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i];
2692*bafec742SSukumar Swaminathan 
2693*bafec742SSukumar Swaminathan 			if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2694*bafec742SSukumar Swaminathan 			    rx_ring->cnsmr_idx) {
2695*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INTR,
2696*bafec742SSukumar Swaminathan 				    ("Waking handler for rx_ring[%d].\n", i));
2697*bafec742SSukumar Swaminathan 
2698*bafec742SSukumar Swaminathan 				ql_disable_completion_interrupt(qlge,
2699*bafec742SSukumar Swaminathan 				    rx_ring->irq);
2700*bafec742SSukumar Swaminathan 				if (rx_ring->type == TX_Q) {
2701*bafec742SSukumar Swaminathan 					ql_clean_outbound_rx_ring(rx_ring);
2702*bafec742SSukumar Swaminathan 					ql_enable_completion_interrupt(
2703*bafec742SSukumar Swaminathan 					    rx_ring->qlge, rx_ring->irq);
2704*bafec742SSukumar Swaminathan 				} else {
2705*bafec742SSukumar Swaminathan 					mutex_enter(&rx_ring->rx_lock);
2706*bafec742SSukumar Swaminathan 					mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2707*bafec742SSukumar Swaminathan 					mutex_exit(&rx_ring->rx_lock);
2708*bafec742SSukumar Swaminathan 					if (mp != NULL)
2709*bafec742SSukumar Swaminathan 						RX_UPSTREAM(rx_ring, mp);
2710*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2711*bafec742SSukumar Swaminathan 					if (rx_ring->mac_flags ==
2712*bafec742SSukumar Swaminathan 					    QL_MAC_STOPPED)
2713*bafec742SSukumar Swaminathan 						cmn_err(CE_NOTE,
2714*bafec742SSukumar Swaminathan 						    "%s rx_indicate(%d) %d\n",
2715*bafec742SSukumar Swaminathan 						    __func__, i,
2716*bafec742SSukumar Swaminathan 						    rx_ring->rx_indicate);
2717*bafec742SSukumar Swaminathan #endif
2718*bafec742SSukumar Swaminathan 				}
2719*bafec742SSukumar Swaminathan 				work_done++;
2720*bafec742SSukumar Swaminathan 			}
2721*bafec742SSukumar Swaminathan 		}
2722*bafec742SSukumar Swaminathan 	}
2723*bafec742SSukumar Swaminathan 
2724*bafec742SSukumar Swaminathan 	ql_enable_completion_interrupt(qlge, intr_ctx->intr);
2725*bafec742SSukumar Swaminathan 
2726*bafec742SSukumar Swaminathan 	return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2727*bafec742SSukumar Swaminathan }
2728*bafec742SSukumar Swaminathan 
2729*bafec742SSukumar Swaminathan /*
2730*bafec742SSukumar Swaminathan  * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
2731*bafec742SSukumar Swaminathan  */
2732*bafec742SSukumar Swaminathan /* ARGSUSED */
2733*bafec742SSukumar Swaminathan static uint_t
2734*bafec742SSukumar Swaminathan ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
2735*bafec742SSukumar Swaminathan {
2736*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2737*bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2738*bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
2739*bafec742SSukumar Swaminathan 
2740*bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
2741*bafec742SSukumar Swaminathan 	ql_clean_outbound_rx_ring(rx_ring);
2742*bafec742SSukumar Swaminathan 	ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2743*bafec742SSukumar Swaminathan 
2744*bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2745*bafec742SSukumar Swaminathan }
2746*bafec742SSukumar Swaminathan 
2747*bafec742SSukumar Swaminathan /*
2748*bafec742SSukumar Swaminathan  * Poll n_bytes of chained incoming packets
2749*bafec742SSukumar Swaminathan  */
2750*bafec742SSukumar Swaminathan mblk_t *
2751*bafec742SSukumar Swaminathan ql_ring_rx_poll(void *arg, int n_bytes)
2752*bafec742SSukumar Swaminathan {
2753*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)arg;
2754*bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2755*bafec742SSukumar Swaminathan 	mblk_t *mp = NULL;
2756*bafec742SSukumar Swaminathan 	uint32_t var;
2757*bafec742SSukumar Swaminathan 
2758*bafec742SSukumar Swaminathan 	ASSERT(n_bytes >= 0);
2759*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
2760*bafec742SSukumar Swaminathan 	    __func__, rx_ring->cq_id, n_bytes));
2761*bafec742SSukumar Swaminathan 
2762*bafec742SSukumar Swaminathan 	++qlge->rx_polls[rx_ring->cq_id];
2763*bafec742SSukumar Swaminathan 
2764*bafec742SSukumar Swaminathan 	if (n_bytes == 0)
2765*bafec742SSukumar Swaminathan 		return (mp);
2766*bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
2767*bafec742SSukumar Swaminathan 	mp = ql_ring_rx(rx_ring, n_bytes);
2768*bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
2769*bafec742SSukumar Swaminathan 
2770*bafec742SSukumar Swaminathan 	if ((rx_ring->cq_id == 0) && (mp == NULL)) {
2771*bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
2772*bafec742SSukumar Swaminathan 		/*
2773*bafec742SSukumar Swaminathan 		 * Check for fatal error.
2774*bafec742SSukumar Swaminathan 		 */
2775*bafec742SSukumar Swaminathan 		if ((var & STATUS_FE) != 0) {
2776*bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2777*bafec742SSukumar Swaminathan 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2778*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Got fatal error %x.", var);
2779*bafec742SSukumar Swaminathan 			ql_wake_asic_reset_soft_intr(qlge);
2780*bafec742SSukumar Swaminathan 		}
2781*bafec742SSukumar Swaminathan 		/*
2782*bafec742SSukumar Swaminathan 		 * Check MPI processor activity.
2783*bafec742SSukumar Swaminathan 		 */
2784*bafec742SSukumar Swaminathan 		if ((var & STATUS_PI) != 0) {
2785*bafec742SSukumar Swaminathan 			/*
2786*bafec742SSukumar Swaminathan 			 * We've got an async event or mailbox completion.
2787*bafec742SSukumar Swaminathan 			 * Handle it and clear the source of the interrupt.
2788*bafec742SSukumar Swaminathan 			 */
2789*bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2790*bafec742SSukumar Swaminathan 			ql_do_mpi_intr(qlge);
2791*bafec742SSukumar Swaminathan 		}
2792*bafec742SSukumar Swaminathan 	}
2793*bafec742SSukumar Swaminathan 
2794*bafec742SSukumar Swaminathan 	return (mp);
2795*bafec742SSukumar Swaminathan }
2796*bafec742SSukumar Swaminathan 
2797*bafec742SSukumar Swaminathan /*
2798*bafec742SSukumar Swaminathan  * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
2799*bafec742SSukumar Swaminathan  */
2800*bafec742SSukumar Swaminathan /* ARGSUSED */
2801*bafec742SSukumar Swaminathan static uint_t
2802*bafec742SSukumar Swaminathan ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
2803*bafec742SSukumar Swaminathan {
2804*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2805*bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2806*bafec742SSukumar Swaminathan 	mblk_t *mp;
2807*bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
2808*bafec742SSukumar Swaminathan 
2809*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
2810*bafec742SSukumar Swaminathan 
2811*bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
2812*bafec742SSukumar Swaminathan 
2813*bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
2814*bafec742SSukumar Swaminathan 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2815*bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
2816*bafec742SSukumar Swaminathan 
2817*bafec742SSukumar Swaminathan 	if (mp != NULL)
2818*bafec742SSukumar Swaminathan 		RX_UPSTREAM(rx_ring, mp);
2819*bafec742SSukumar Swaminathan 
2820*bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2821*bafec742SSukumar Swaminathan }
2822*bafec742SSukumar Swaminathan 
2823*bafec742SSukumar Swaminathan 
2824*bafec742SSukumar Swaminathan /*
2825*bafec742SSukumar Swaminathan  *
2826*bafec742SSukumar Swaminathan  * Allocate DMA Buffer for ioctl service
2827*bafec742SSukumar Swaminathan  *
2828*bafec742SSukumar Swaminathan  */
2829*bafec742SSukumar Swaminathan static int
2830*bafec742SSukumar Swaminathan ql_alloc_ioctl_dma_buf(qlge_t *qlge)
2831*bafec742SSukumar Swaminathan {
2832*bafec742SSukumar Swaminathan 	uint64_t phy_addr;
2833*bafec742SSukumar Swaminathan 	uint64_t alloc_size;
2834*bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
2835*bafec742SSukumar Swaminathan 
2836*bafec742SSukumar Swaminathan 	alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
2837*bafec742SSukumar Swaminathan 	    max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
2838*bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
2839*bafec742SSukumar Swaminathan 	    &ql_buf_acc_attr,
2840*bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2841*bafec742SSukumar Swaminathan 	    &qlge->ioctl_buf_dma_attr.acc_handle,
2842*bafec742SSukumar Swaminathan 	    (size_t)alloc_size,  /* mem size */
2843*bafec742SSukumar Swaminathan 	    (size_t)0,  /* alignment */
2844*bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
2845*bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
2846*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
2847*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
2848*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
2849*bafec742SSukumar Swaminathan 	}
2850*bafec742SSukumar Swaminathan 
2851*bafec742SSukumar Swaminathan 	phy_addr = dma_cookie.dmac_laddress;
2852*bafec742SSukumar Swaminathan 
2853*bafec742SSukumar Swaminathan 	if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
2854*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
2855*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
2856*bafec742SSukumar Swaminathan 	}
2857*bafec742SSukumar Swaminathan 
2858*bafec742SSukumar Swaminathan 	qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
2859*bafec742SSukumar Swaminathan 
2860*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
2861*bafec742SSukumar Swaminathan 	    "phy_addr = 0x%lx\n",
2862*bafec742SSukumar Swaminathan 	    __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
2863*bafec742SSukumar Swaminathan 
2864*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
2865*bafec742SSukumar Swaminathan }
2866*bafec742SSukumar Swaminathan 
2867*bafec742SSukumar Swaminathan 
2868*bafec742SSukumar Swaminathan /*
2869*bafec742SSukumar Swaminathan  * Function to free physical memory.
2870*bafec742SSukumar Swaminathan  */
2871*bafec742SSukumar Swaminathan static void
2872*bafec742SSukumar Swaminathan ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
2873*bafec742SSukumar Swaminathan {
2874*bafec742SSukumar Swaminathan 	if (dma_handle != NULL) {
2875*bafec742SSukumar Swaminathan 		(void) ddi_dma_unbind_handle(*dma_handle);
2876*bafec742SSukumar Swaminathan 		if (acc_handle != NULL)
2877*bafec742SSukumar Swaminathan 			ddi_dma_mem_free(acc_handle);
2878*bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
2879*bafec742SSukumar Swaminathan 	}
2880*bafec742SSukumar Swaminathan }
2881*bafec742SSukumar Swaminathan 
2882*bafec742SSukumar Swaminathan /*
2883*bafec742SSukumar Swaminathan  * Function to free ioctl dma buffer.
2884*bafec742SSukumar Swaminathan  */
2885*bafec742SSukumar Swaminathan static void
2886*bafec742SSukumar Swaminathan ql_free_ioctl_dma_buf(qlge_t *qlge)
2887*bafec742SSukumar Swaminathan {
2888*bafec742SSukumar Swaminathan 	if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
2889*bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
2890*bafec742SSukumar Swaminathan 		    &qlge->ioctl_buf_dma_attr.acc_handle);
2891*bafec742SSukumar Swaminathan 
2892*bafec742SSukumar Swaminathan 		qlge->ioctl_buf_dma_attr.vaddr = NULL;
2893*bafec742SSukumar Swaminathan 		qlge->ioctl_buf_dma_attr.dma_handle = NULL;
2894*bafec742SSukumar Swaminathan 	}
2895*bafec742SSukumar Swaminathan }
2896*bafec742SSukumar Swaminathan 
2897*bafec742SSukumar Swaminathan /*
2898*bafec742SSukumar Swaminathan  * Free shadow register space used for request and completion queues
2899*bafec742SSukumar Swaminathan  */
2900*bafec742SSukumar Swaminathan static void
2901*bafec742SSukumar Swaminathan ql_free_shadow_space(qlge_t *qlge)
2902*bafec742SSukumar Swaminathan {
2903*bafec742SSukumar Swaminathan 	if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
2904*bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
2905*bafec742SSukumar Swaminathan 		    &qlge->host_copy_shadow_dma_attr.acc_handle);
2906*bafec742SSukumar Swaminathan 		bzero(&qlge->host_copy_shadow_dma_attr,
2907*bafec742SSukumar Swaminathan 		    sizeof (qlge->host_copy_shadow_dma_attr));
2908*bafec742SSukumar Swaminathan 	}
2909*bafec742SSukumar Swaminathan 
2910*bafec742SSukumar Swaminathan 	if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
2911*bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
2912*bafec742SSukumar Swaminathan 		    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
2913*bafec742SSukumar Swaminathan 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
2914*bafec742SSukumar Swaminathan 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
2915*bafec742SSukumar Swaminathan 	}
2916*bafec742SSukumar Swaminathan }
2917*bafec742SSukumar Swaminathan 
2918*bafec742SSukumar Swaminathan /*
2919*bafec742SSukumar Swaminathan  * Allocate shadow register space for request and completion queues
2920*bafec742SSukumar Swaminathan  */
2921*bafec742SSukumar Swaminathan static int
2922*bafec742SSukumar Swaminathan ql_alloc_shadow_space(qlge_t *qlge)
2923*bafec742SSukumar Swaminathan {
2924*bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
2925*bafec742SSukumar Swaminathan 
2926*bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip,
2927*bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.dma_handle,
2928*bafec742SSukumar Swaminathan 	    &ql_dev_acc_attr,
2929*bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2930*bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.acc_handle,
2931*bafec742SSukumar Swaminathan 	    (size_t)VM_PAGE_SIZE,  /* mem size */
2932*bafec742SSukumar Swaminathan 	    (size_t)4, /* 4 bytes alignment */
2933*bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
2934*bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
2935*bafec742SSukumar Swaminathan 		bzero(&qlge->host_copy_shadow_dma_attr,
2936*bafec742SSukumar Swaminathan 		    sizeof (qlge->host_copy_shadow_dma_attr));
2937*bafec742SSukumar Swaminathan 
2938*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
2939*bafec742SSukumar Swaminathan 		    "response shadow registers", __func__, qlge->instance);
2940*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
2941*bafec742SSukumar Swaminathan 	}
2942*bafec742SSukumar Swaminathan 
2943*bafec742SSukumar Swaminathan 	qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
2944*bafec742SSukumar Swaminathan 
2945*bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip,
2946*bafec742SSukumar Swaminathan 	    &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
2947*bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
2948*bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2949*bafec742SSukumar Swaminathan 	    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
2950*bafec742SSukumar Swaminathan 	    (size_t)VM_PAGE_SIZE,  /* mem size */
2951*bafec742SSukumar Swaminathan 	    (size_t)4, /* 4 bytes alignment */
2952*bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
2953*bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
2954*bafec742SSukumar Swaminathan 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
2955*bafec742SSukumar Swaminathan 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
2956*bafec742SSukumar Swaminathan 
2957*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
2958*bafec742SSukumar Swaminathan 		    "for request shadow registers",
2959*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
2960*bafec742SSukumar Swaminathan 		goto err_wqp_sh_area;
2961*bafec742SSukumar Swaminathan 	}
2962*bafec742SSukumar Swaminathan 	qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
2963*bafec742SSukumar Swaminathan 
2964*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
2965*bafec742SSukumar Swaminathan 
2966*bafec742SSukumar Swaminathan err_wqp_sh_area:
2967*bafec742SSukumar Swaminathan 	ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
2968*bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.acc_handle);
2969*bafec742SSukumar Swaminathan 	bzero(&qlge->host_copy_shadow_dma_attr,
2970*bafec742SSukumar Swaminathan 	    sizeof (qlge->host_copy_shadow_dma_attr));
2971*bafec742SSukumar Swaminathan 
2972*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
2973*bafec742SSukumar Swaminathan }
2974*bafec742SSukumar Swaminathan 
2975*bafec742SSukumar Swaminathan /*
2976*bafec742SSukumar Swaminathan  * Initialize a tx ring
2977*bafec742SSukumar Swaminathan  */
2978*bafec742SSukumar Swaminathan static void
2979*bafec742SSukumar Swaminathan ql_init_tx_ring(struct tx_ring *tx_ring)
2980*bafec742SSukumar Swaminathan {
2981*bafec742SSukumar Swaminathan 	int i;
2982*bafec742SSukumar Swaminathan 	struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr;
2983*bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc;
2984*bafec742SSukumar Swaminathan 
2985*bafec742SSukumar Swaminathan 	for (i = 0; i < tx_ring->wq_len; i++) {
2986*bafec742SSukumar Swaminathan 		tx_ring_desc->index = i;
2987*bafec742SSukumar Swaminathan 		tx_ring_desc->queue_entry = mac_iocb_ptr;
2988*bafec742SSukumar Swaminathan 		mac_iocb_ptr++;
2989*bafec742SSukumar Swaminathan 		tx_ring_desc++;
2990*bafec742SSukumar Swaminathan 	}
2991*bafec742SSukumar Swaminathan 	tx_ring->tx_free_count = tx_ring->wq_len;
2992*bafec742SSukumar Swaminathan 	tx_ring->queue_stopped = 0;
2993*bafec742SSukumar Swaminathan }
2994*bafec742SSukumar Swaminathan 
2995*bafec742SSukumar Swaminathan /*
2996*bafec742SSukumar Swaminathan  * Free one tx ring resources
2997*bafec742SSukumar Swaminathan  */
2998*bafec742SSukumar Swaminathan static void
2999*bafec742SSukumar Swaminathan ql_free_tx_resources(struct tx_ring *tx_ring)
3000*bafec742SSukumar Swaminathan {
3001*bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
3002*bafec742SSukumar Swaminathan 	int i, j;
3003*bafec742SSukumar Swaminathan 
3004*bafec742SSukumar Swaminathan 	ql_free_phys(&tx_ring->wq_dma.dma_handle, &tx_ring->wq_dma.acc_handle);
3005*bafec742SSukumar Swaminathan 	bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3006*bafec742SSukumar Swaminathan 
3007*bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc != NULL) {
3008*bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
3009*bafec742SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3010*bafec742SSukumar Swaminathan 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3011*bafec742SSukumar Swaminathan 				if (tx_ring_desc->tx_dma_handle[j]) {
3012*bafec742SSukumar Swaminathan 					/*
3013*bafec742SSukumar Swaminathan 					 * The unbinding will happen in tx
3014*bafec742SSukumar Swaminathan 					 * completion, here we just free the
3015*bafec742SSukumar Swaminathan 					 * handles
3016*bafec742SSukumar Swaminathan 					 */
3017*bafec742SSukumar Swaminathan 					ddi_dma_free_handle(
3018*bafec742SSukumar Swaminathan 					    &(tx_ring_desc->tx_dma_handle[j]));
3019*bafec742SSukumar Swaminathan 					tx_ring_desc->tx_dma_handle[j] = NULL;
3020*bafec742SSukumar Swaminathan 				}
3021*bafec742SSukumar Swaminathan 			}
3022*bafec742SSukumar Swaminathan 			if (tx_ring_desc->oal != NULL) {
3023*bafec742SSukumar Swaminathan 				tx_ring_desc->oal_dma_addr = 0;
3024*bafec742SSukumar Swaminathan 				tx_ring_desc->oal = NULL;
3025*bafec742SSukumar Swaminathan 				tx_ring_desc->copy_buffer = NULL;
3026*bafec742SSukumar Swaminathan 				tx_ring_desc->copy_buffer_dma_addr = 0;
3027*bafec742SSukumar Swaminathan 
3028*bafec742SSukumar Swaminathan 				ql_free_phys(&tx_ring_desc->oal_dma.dma_handle,
3029*bafec742SSukumar Swaminathan 				    &tx_ring_desc->oal_dma.acc_handle);
3030*bafec742SSukumar Swaminathan 			}
3031*bafec742SSukumar Swaminathan 		}
3032*bafec742SSukumar Swaminathan 		kmem_free(tx_ring->wq_desc,
3033*bafec742SSukumar Swaminathan 		    tx_ring->wq_len * sizeof (struct tx_ring_desc));
3034*bafec742SSukumar Swaminathan 		tx_ring->wq_desc = NULL;
3035*bafec742SSukumar Swaminathan 	}
3036*bafec742SSukumar Swaminathan 	/* free the wqicb struct */
3037*bafec742SSukumar Swaminathan 	if (tx_ring->wqicb_dma.dma_handle) {
3038*bafec742SSukumar Swaminathan 		ql_free_phys(&tx_ring->wqicb_dma.dma_handle,
3039*bafec742SSukumar Swaminathan 		    &tx_ring->wqicb_dma.acc_handle);
3040*bafec742SSukumar Swaminathan 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3041*bafec742SSukumar Swaminathan 	}
3042*bafec742SSukumar Swaminathan }
3043*bafec742SSukumar Swaminathan 
3044*bafec742SSukumar Swaminathan /*
3045*bafec742SSukumar Swaminathan  * Allocate work (request) queue memory and transmit
3046*bafec742SSukumar Swaminathan  * descriptors for this transmit ring
3047*bafec742SSukumar Swaminathan  */
3048*bafec742SSukumar Swaminathan static int
3049*bafec742SSukumar Swaminathan ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring)
3050*bafec742SSukumar Swaminathan {
3051*bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3052*bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
3053*bafec742SSukumar Swaminathan 	int i, j;
3054*bafec742SSukumar Swaminathan 	uint32_t length;
3055*bafec742SSukumar Swaminathan 
3056*bafec742SSukumar Swaminathan 	/* allocate dma buffers for obiocbs */
3057*bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle,
3058*bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3059*bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3060*bafec742SSukumar Swaminathan 	    &tx_ring->wq_dma.acc_handle,
3061*bafec742SSukumar Swaminathan 	    (size_t)tx_ring->wq_size,	/* mem size */
3062*bafec742SSukumar Swaminathan 	    (size_t)128, /* alignment:128 bytes boundary */
3063*bafec742SSukumar Swaminathan 	    (caddr_t *)&tx_ring->wq_dma.vaddr,
3064*bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3065*bafec742SSukumar Swaminathan 		bzero(&tx_ring->wq_dma, sizeof (&tx_ring->wq_dma));
3066*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.",
3067*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3068*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3069*bafec742SSukumar Swaminathan 	}
3070*bafec742SSukumar Swaminathan 	tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress;
3071*bafec742SSukumar Swaminathan 
3072*bafec742SSukumar Swaminathan 	tx_ring->wq_desc =
3073*bafec742SSukumar Swaminathan 	    kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc),
3074*bafec742SSukumar Swaminathan 	    KM_NOSLEEP);
3075*bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc == NULL) {
3076*bafec742SSukumar Swaminathan 		goto err;
3077*bafec742SSukumar Swaminathan 	} else {
3078*bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
3079*bafec742SSukumar Swaminathan 		/*
3080*bafec742SSukumar Swaminathan 		 * Allocate a large enough structure to hold the following
3081*bafec742SSukumar Swaminathan 		 * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
3082*bafec742SSukumar Swaminathan 		 * 2. copy buffer of QL_MAX_COPY_LENGTH bytes
3083*bafec742SSukumar Swaminathan 		 */
3084*bafec742SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3085*bafec742SSukumar Swaminathan 			length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)
3086*bafec742SSukumar Swaminathan 			    + QL_MAX_COPY_LENGTH;
3087*bafec742SSukumar Swaminathan 
3088*bafec742SSukumar Swaminathan 			if (ql_alloc_phys(qlge->dip,
3089*bafec742SSukumar Swaminathan 			    &tx_ring_desc->oal_dma.dma_handle,
3090*bafec742SSukumar Swaminathan 			    &ql_desc_acc_attr,
3091*bafec742SSukumar Swaminathan 			    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3092*bafec742SSukumar Swaminathan 			    &tx_ring_desc->oal_dma.acc_handle,
3093*bafec742SSukumar Swaminathan 			    (size_t)length,	/* mem size */
3094*bafec742SSukumar Swaminathan 			    (size_t)0, /* default alignment:8 bytes boundary */
3095*bafec742SSukumar Swaminathan 			    (caddr_t *)&tx_ring_desc->oal_dma.vaddr,
3096*bafec742SSukumar Swaminathan 			    &dma_cookie) != 0) {
3097*bafec742SSukumar Swaminathan 				bzero(&tx_ring_desc->oal_dma,
3098*bafec742SSukumar Swaminathan 				    sizeof (tx_ring_desc->oal_dma));
3099*bafec742SSukumar Swaminathan 				cmn_err(CE_WARN, "%s(%d): reqQ tx buf &"
3100*bafec742SSukumar Swaminathan 				    "oal alloc failed.",
3101*bafec742SSukumar Swaminathan 				    __func__, qlge->instance);
3102*bafec742SSukumar Swaminathan 				return (DDI_FAILURE);
3103*bafec742SSukumar Swaminathan 			}
3104*bafec742SSukumar Swaminathan 
3105*bafec742SSukumar Swaminathan 			tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr;
3106*bafec742SSukumar Swaminathan 			tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress;
3107*bafec742SSukumar Swaminathan 			tx_ring_desc->copy_buffer =
3108*bafec742SSukumar Swaminathan 			    (caddr_t)((uint8_t *)tx_ring_desc->oal
3109*bafec742SSukumar Swaminathan 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3110*bafec742SSukumar Swaminathan 			tx_ring_desc->copy_buffer_dma_addr =
3111*bafec742SSukumar Swaminathan 			    (tx_ring_desc->oal_dma_addr
3112*bafec742SSukumar Swaminathan 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3113*bafec742SSukumar Swaminathan 
3114*bafec742SSukumar Swaminathan 			/* Allocate dma handles for transmit buffers */
3115*bafec742SSukumar Swaminathan 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3116*bafec742SSukumar Swaminathan 				if (ddi_dma_alloc_handle(qlge->dip,
3117*bafec742SSukumar Swaminathan 				    &tx_mapping_dma_attr,
3118*bafec742SSukumar Swaminathan 				    DDI_DMA_DONTWAIT,
3119*bafec742SSukumar Swaminathan 				    0, &tx_ring_desc->tx_dma_handle[j])
3120*bafec742SSukumar Swaminathan 				    != DDI_SUCCESS) {
3121*bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
3122*bafec742SSukumar Swaminathan 					    "!%s: ddi_dma_alloc_handle: "
3123*bafec742SSukumar Swaminathan 					    "tx_dma_handle "
3124*bafec742SSukumar Swaminathan 					    "alloc failed", __func__);
3125*bafec742SSukumar Swaminathan 					goto err;
3126*bafec742SSukumar Swaminathan 				}
3127*bafec742SSukumar Swaminathan 			}
3128*bafec742SSukumar Swaminathan 		}
3129*bafec742SSukumar Swaminathan 	}
3130*bafec742SSukumar Swaminathan 	/* alloc a wqicb control block to load this tx ring to hw */
3131*bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle,
3132*bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3133*bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3134*bafec742SSukumar Swaminathan 	    &tx_ring->wqicb_dma.acc_handle,
3135*bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct wqicb_t),	/* mem size */
3136*bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3137*bafec742SSukumar Swaminathan 	    (caddr_t *)&tx_ring->wqicb_dma.vaddr,
3138*bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3139*bafec742SSukumar Swaminathan 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3140*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.",
3141*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3142*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3143*bafec742SSukumar Swaminathan 	}
3144*bafec742SSukumar Swaminathan 	tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3145*bafec742SSukumar Swaminathan 
3146*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3147*bafec742SSukumar Swaminathan 
3148*bafec742SSukumar Swaminathan err:
3149*bafec742SSukumar Swaminathan 	ql_free_tx_resources(tx_ring);
3150*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3151*bafec742SSukumar Swaminathan }
3152*bafec742SSukumar Swaminathan 
3153*bafec742SSukumar Swaminathan /*
3154*bafec742SSukumar Swaminathan  * Free one rx ring resources
3155*bafec742SSukumar Swaminathan  */
3156*bafec742SSukumar Swaminathan static void
3157*bafec742SSukumar Swaminathan ql_free_rx_resources(struct rx_ring *rx_ring)
3158*bafec742SSukumar Swaminathan {
3159*bafec742SSukumar Swaminathan 	/* Free the small buffer queue. */
3160*bafec742SSukumar Swaminathan 	if (rx_ring->sbq_dma.dma_handle) {
3161*bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->sbq_dma.dma_handle,
3162*bafec742SSukumar Swaminathan 		    &rx_ring->sbq_dma.acc_handle);
3163*bafec742SSukumar Swaminathan 		bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3164*bafec742SSukumar Swaminathan 	}
3165*bafec742SSukumar Swaminathan 
3166*bafec742SSukumar Swaminathan 	/* Free the small buffer queue control blocks. */
3167*bafec742SSukumar Swaminathan 	kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len *
3168*bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc));
3169*bafec742SSukumar Swaminathan 	rx_ring->sbq_desc = NULL;
3170*bafec742SSukumar Swaminathan 
3171*bafec742SSukumar Swaminathan 	/* Free the large buffer queue. */
3172*bafec742SSukumar Swaminathan 	if (rx_ring->lbq_dma.dma_handle) {
3173*bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->lbq_dma.dma_handle,
3174*bafec742SSukumar Swaminathan 		    &rx_ring->lbq_dma.acc_handle);
3175*bafec742SSukumar Swaminathan 		bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3176*bafec742SSukumar Swaminathan 	}
3177*bafec742SSukumar Swaminathan 
3178*bafec742SSukumar Swaminathan 	/* Free the large buffer queue control blocks. */
3179*bafec742SSukumar Swaminathan 	kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len *
3180*bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc));
3181*bafec742SSukumar Swaminathan 	rx_ring->lbq_desc = NULL;
3182*bafec742SSukumar Swaminathan 
3183*bafec742SSukumar Swaminathan 	/* Free cqicb struct */
3184*bafec742SSukumar Swaminathan 	if (rx_ring->cqicb_dma.dma_handle) {
3185*bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->cqicb_dma.dma_handle,
3186*bafec742SSukumar Swaminathan 		    &rx_ring->cqicb_dma.acc_handle);
3187*bafec742SSukumar Swaminathan 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3188*bafec742SSukumar Swaminathan 	}
3189*bafec742SSukumar Swaminathan 	/* Free the rx queue. */
3190*bafec742SSukumar Swaminathan 	if (rx_ring->cq_dma.dma_handle) {
3191*bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->cq_dma.dma_handle,
3192*bafec742SSukumar Swaminathan 		    &rx_ring->cq_dma.acc_handle);
3193*bafec742SSukumar Swaminathan 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3194*bafec742SSukumar Swaminathan 	}
3195*bafec742SSukumar Swaminathan }
3196*bafec742SSukumar Swaminathan 
3197*bafec742SSukumar Swaminathan /*
3198*bafec742SSukumar Swaminathan  * Allocate queues and buffers for this completions queue based
3199*bafec742SSukumar Swaminathan  * on the values in the parameter structure.
3200*bafec742SSukumar Swaminathan  */
3201*bafec742SSukumar Swaminathan static int
3202*bafec742SSukumar Swaminathan ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring)
3203*bafec742SSukumar Swaminathan {
3204*bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3205*bafec742SSukumar Swaminathan 
3206*bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle,
3207*bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3208*bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3209*bafec742SSukumar Swaminathan 	    &rx_ring->cq_dma.acc_handle,
3210*bafec742SSukumar Swaminathan 	    (size_t)rx_ring->cq_size,  /* mem size */
3211*bafec742SSukumar Swaminathan 	    (size_t)128, /* alignment:128 bytes boundary */
3212*bafec742SSukumar Swaminathan 	    (caddr_t *)&rx_ring->cq_dma.vaddr,
3213*bafec742SSukumar Swaminathan 	    &dma_cookie) != 0)	{
3214*bafec742SSukumar Swaminathan 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3215*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.",
3216*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3217*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3218*bafec742SSukumar Swaminathan 	}
3219*bafec742SSukumar Swaminathan 	rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress;
3220*bafec742SSukumar Swaminathan 
3221*bafec742SSukumar Swaminathan 	if (rx_ring->sbq_len != 0) {
3222*bafec742SSukumar Swaminathan 		/*
3223*bafec742SSukumar Swaminathan 		 * Allocate small buffer queue.
3224*bafec742SSukumar Swaminathan 		 */
3225*bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle,
3226*bafec742SSukumar Swaminathan 		    &ql_desc_acc_attr,
3227*bafec742SSukumar Swaminathan 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3228*bafec742SSukumar Swaminathan 		    &rx_ring->sbq_dma.acc_handle,
3229*bafec742SSukumar Swaminathan 		    (size_t)rx_ring->sbq_size,  /* mem size */
3230*bafec742SSukumar Swaminathan 		    (size_t)128, /* alignment:128 bytes boundary */
3231*bafec742SSukumar Swaminathan 		    (caddr_t *)&rx_ring->sbq_dma.vaddr,
3232*bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
3233*bafec742SSukumar Swaminathan 			bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3234*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3235*bafec742SSukumar Swaminathan 			    "%s(%d): small buffer queue allocation failed.",
3236*bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
3237*bafec742SSukumar Swaminathan 			goto err_mem;
3238*bafec742SSukumar Swaminathan 		}
3239*bafec742SSukumar Swaminathan 		rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress;
3240*bafec742SSukumar Swaminathan 
3241*bafec742SSukumar Swaminathan 		/*
3242*bafec742SSukumar Swaminathan 		 * Allocate small buffer queue control blocks.
3243*bafec742SSukumar Swaminathan 		 */
3244*bafec742SSukumar Swaminathan 		rx_ring->sbq_desc =
3245*bafec742SSukumar Swaminathan 		    kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc),
3246*bafec742SSukumar Swaminathan 		    KM_NOSLEEP);
3247*bafec742SSukumar Swaminathan 		if (rx_ring->sbq_desc == NULL) {
3248*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3249*bafec742SSukumar Swaminathan 			    "sbq control block allocation failed.");
3250*bafec742SSukumar Swaminathan 			goto err_mem;
3251*bafec742SSukumar Swaminathan 		}
3252*bafec742SSukumar Swaminathan 
3253*bafec742SSukumar Swaminathan 		ql_init_sbq_ring(rx_ring);
3254*bafec742SSukumar Swaminathan 	}
3255*bafec742SSukumar Swaminathan 
3256*bafec742SSukumar Swaminathan 	if (rx_ring->lbq_len != 0) {
3257*bafec742SSukumar Swaminathan 		/*
3258*bafec742SSukumar Swaminathan 		 * Allocate large buffer queue.
3259*bafec742SSukumar Swaminathan 		 */
3260*bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle,
3261*bafec742SSukumar Swaminathan 		    &ql_desc_acc_attr,
3262*bafec742SSukumar Swaminathan 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3263*bafec742SSukumar Swaminathan 		    &rx_ring->lbq_dma.acc_handle,
3264*bafec742SSukumar Swaminathan 		    (size_t)rx_ring->lbq_size,  /* mem size */
3265*bafec742SSukumar Swaminathan 		    (size_t)128, /* alignment:128 bytes boundary */
3266*bafec742SSukumar Swaminathan 		    (caddr_t *)&rx_ring->lbq_dma.vaddr,
3267*bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
3268*bafec742SSukumar Swaminathan 			bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3269*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): lbq allocation failed.",
3270*bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
3271*bafec742SSukumar Swaminathan 			goto err_mem;
3272*bafec742SSukumar Swaminathan 		}
3273*bafec742SSukumar Swaminathan 		rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress;
3274*bafec742SSukumar Swaminathan 
3275*bafec742SSukumar Swaminathan 		/*
3276*bafec742SSukumar Swaminathan 		 * Allocate large buffer queue control blocks.
3277*bafec742SSukumar Swaminathan 		 */
3278*bafec742SSukumar Swaminathan 		rx_ring->lbq_desc =
3279*bafec742SSukumar Swaminathan 		    kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc),
3280*bafec742SSukumar Swaminathan 		    KM_NOSLEEP);
3281*bafec742SSukumar Swaminathan 		if (rx_ring->lbq_desc == NULL) {
3282*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3283*bafec742SSukumar Swaminathan 			    "Large buffer queue control block allocation "
3284*bafec742SSukumar Swaminathan 			    "failed.");
3285*bafec742SSukumar Swaminathan 			goto err_mem;
3286*bafec742SSukumar Swaminathan 		}
3287*bafec742SSukumar Swaminathan 		ql_init_lbq_ring(rx_ring);
3288*bafec742SSukumar Swaminathan 	}
3289*bafec742SSukumar Swaminathan 
3290*bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle,
3291*bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3292*bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3293*bafec742SSukumar Swaminathan 	    &rx_ring->cqicb_dma.acc_handle,
3294*bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct cqicb_t),  /* mem size */
3295*bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3296*bafec742SSukumar Swaminathan 	    (caddr_t *)&rx_ring->cqicb_dma.vaddr,
3297*bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3298*bafec742SSukumar Swaminathan 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3299*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.",
3300*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3301*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3302*bafec742SSukumar Swaminathan 	}
3303*bafec742SSukumar Swaminathan 	rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3304*bafec742SSukumar Swaminathan 
3305*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3306*bafec742SSukumar Swaminathan 
3307*bafec742SSukumar Swaminathan err_mem:
3308*bafec742SSukumar Swaminathan 	ql_free_rx_resources(rx_ring);
3309*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3310*bafec742SSukumar Swaminathan }
3311*bafec742SSukumar Swaminathan 
3312*bafec742SSukumar Swaminathan /*
3313*bafec742SSukumar Swaminathan  * Frees tx/rx queues memory resources
3314*bafec742SSukumar Swaminathan  */
3315*bafec742SSukumar Swaminathan static void
3316*bafec742SSukumar Swaminathan ql_free_mem_resources(qlge_t *qlge)
3317*bafec742SSukumar Swaminathan {
3318*bafec742SSukumar Swaminathan 	int i;
3319*bafec742SSukumar Swaminathan 
3320*bafec742SSukumar Swaminathan 	if (qlge->ricb_dma.dma_handle) {
3321*bafec742SSukumar Swaminathan 		/* free the ricb struct */
3322*bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->ricb_dma.dma_handle,
3323*bafec742SSukumar Swaminathan 		    &qlge->ricb_dma.acc_handle);
3324*bafec742SSukumar Swaminathan 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3325*bafec742SSukumar Swaminathan 	}
3326*bafec742SSukumar Swaminathan 
3327*bafec742SSukumar Swaminathan 	ql_free_rx_buffers(qlge);
3328*bafec742SSukumar Swaminathan 
3329*bafec742SSukumar Swaminathan 	ql_free_ioctl_dma_buf(qlge);
3330*bafec742SSukumar Swaminathan 
3331*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++)
3332*bafec742SSukumar Swaminathan 		ql_free_tx_resources(&qlge->tx_ring[i]);
3333*bafec742SSukumar Swaminathan 
3334*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++)
3335*bafec742SSukumar Swaminathan 		ql_free_rx_resources(&qlge->rx_ring[i]);
3336*bafec742SSukumar Swaminathan 
3337*bafec742SSukumar Swaminathan 	ql_free_shadow_space(qlge);
3338*bafec742SSukumar Swaminathan }
3339*bafec742SSukumar Swaminathan 
3340*bafec742SSukumar Swaminathan /*
3341*bafec742SSukumar Swaminathan  * Allocate buffer queues, large buffers and small buffers etc
3342*bafec742SSukumar Swaminathan  *
3343*bafec742SSukumar Swaminathan  * This API is called in the gld_attach member function. It is called
3344*bafec742SSukumar Swaminathan  * only once.  Later reset,reboot should not re-allocate all rings and
3345*bafec742SSukumar Swaminathan  * buffers.
3346*bafec742SSukumar Swaminathan  */
3347*bafec742SSukumar Swaminathan static int
3348*bafec742SSukumar Swaminathan ql_alloc_mem_resources(qlge_t *qlge)
3349*bafec742SSukumar Swaminathan {
3350*bafec742SSukumar Swaminathan 	int i;
3351*bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3352*bafec742SSukumar Swaminathan 
3353*bafec742SSukumar Swaminathan 	/* Allocate space for our shadow registers */
3354*bafec742SSukumar Swaminathan 	if (ql_alloc_shadow_space(qlge))
3355*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3356*bafec742SSukumar Swaminathan 
3357*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
3358*bafec742SSukumar Swaminathan 		if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) {
3359*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "RX resource allocation failed.");
3360*bafec742SSukumar Swaminathan 			goto err_mem;
3361*bafec742SSukumar Swaminathan 		}
3362*bafec742SSukumar Swaminathan 	}
3363*bafec742SSukumar Swaminathan 	/* Allocate tx queue resources */
3364*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
3365*bafec742SSukumar Swaminathan 		if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) {
3366*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Tx resource allocation failed.");
3367*bafec742SSukumar Swaminathan 			goto err_mem;
3368*bafec742SSukumar Swaminathan 		}
3369*bafec742SSukumar Swaminathan 	}
3370*bafec742SSukumar Swaminathan 
3371*bafec742SSukumar Swaminathan 	if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) {
3372*bafec742SSukumar Swaminathan 		goto err_mem;
3373*bafec742SSukumar Swaminathan 	}
3374*bafec742SSukumar Swaminathan 
3375*bafec742SSukumar Swaminathan 	if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) {
3376*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed",
3377*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3378*bafec742SSukumar Swaminathan 		goto err_mem;
3379*bafec742SSukumar Swaminathan 	}
3380*bafec742SSukumar Swaminathan 
3381*bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_ALLOC_RX_BUF;
3382*bafec742SSukumar Swaminathan 
3383*bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle,
3384*bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3385*bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3386*bafec742SSukumar Swaminathan 	    &qlge->ricb_dma.acc_handle,
3387*bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct ricb),  /* mem size */
3388*bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3389*bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->ricb_dma.vaddr,
3390*bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3391*bafec742SSukumar Swaminathan 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3392*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): ricb allocation failed.",
3393*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3394*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3395*bafec742SSukumar Swaminathan 	}
3396*bafec742SSukumar Swaminathan 	qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress;
3397*bafec742SSukumar Swaminathan 
3398*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3399*bafec742SSukumar Swaminathan 
3400*bafec742SSukumar Swaminathan err_mem:
3401*bafec742SSukumar Swaminathan 	ql_free_mem_resources(qlge);
3402*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3403*bafec742SSukumar Swaminathan }
3404*bafec742SSukumar Swaminathan 
3405*bafec742SSukumar Swaminathan 
3406*bafec742SSukumar Swaminathan /*
3407*bafec742SSukumar Swaminathan  * Function used to allocate physical memory and zero it.
3408*bafec742SSukumar Swaminathan  */
3409*bafec742SSukumar Swaminathan 
3410*bafec742SSukumar Swaminathan static int
3411*bafec742SSukumar Swaminathan ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3412*bafec742SSukumar Swaminathan     ddi_device_acc_attr_t *device_acc_attr,
3413*bafec742SSukumar Swaminathan     uint_t dma_flags,
3414*bafec742SSukumar Swaminathan     ddi_acc_handle_t *acc_handle,
3415*bafec742SSukumar Swaminathan     size_t size,
3416*bafec742SSukumar Swaminathan     size_t alignment,
3417*bafec742SSukumar Swaminathan     caddr_t *vaddr,
3418*bafec742SSukumar Swaminathan     ddi_dma_cookie_t *dma_cookie)
3419*bafec742SSukumar Swaminathan {
3420*bafec742SSukumar Swaminathan 	size_t rlen;
3421*bafec742SSukumar Swaminathan 	uint_t cnt;
3422*bafec742SSukumar Swaminathan 
3423*bafec742SSukumar Swaminathan 	/*
3424*bafec742SSukumar Swaminathan 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3425*bafec742SSukumar Swaminathan 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3426*bafec742SSukumar Swaminathan 	 * to make sure buffer has enough room for overrun.
3427*bafec742SSukumar Swaminathan 	 */
3428*bafec742SSukumar Swaminathan 	if (size & 7) {
3429*bafec742SSukumar Swaminathan 		size += 8 - (size & 7);
3430*bafec742SSukumar Swaminathan 	}
3431*bafec742SSukumar Swaminathan 
3432*bafec742SSukumar Swaminathan 	/* Adjust the alignment if requested */
3433*bafec742SSukumar Swaminathan 	if (alignment) {
3434*bafec742SSukumar Swaminathan 		dma_attr.dma_attr_align = alignment;
3435*bafec742SSukumar Swaminathan 	}
3436*bafec742SSukumar Swaminathan 
3437*bafec742SSukumar Swaminathan 	/*
3438*bafec742SSukumar Swaminathan 	 * Allocate DMA handle
3439*bafec742SSukumar Swaminathan 	 */
3440*bafec742SSukumar Swaminathan 	if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_SLEEP, NULL,
3441*bafec742SSukumar Swaminathan 	    dma_handle) != DDI_SUCCESS) {
3442*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3443*bafec742SSukumar Swaminathan 		    __func__);
3444*bafec742SSukumar Swaminathan 		return (QL_ERROR);
3445*bafec742SSukumar Swaminathan 	}
3446*bafec742SSukumar Swaminathan 	/*
3447*bafec742SSukumar Swaminathan 	 * Allocate DMA memory
3448*bafec742SSukumar Swaminathan 	 */
3449*bafec742SSukumar Swaminathan 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3450*bafec742SSukumar Swaminathan 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING), DDI_DMA_SLEEP,
3451*bafec742SSukumar Swaminathan 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3452*bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3453*bafec742SSukumar Swaminathan 	}
3454*bafec742SSukumar Swaminathan 	if (vaddr == NULL) {
3455*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "alloc_phys: Memory alloc Failed");
3456*bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3457*bafec742SSukumar Swaminathan 		return (QL_ERROR);
3458*bafec742SSukumar Swaminathan 	}
3459*bafec742SSukumar Swaminathan 
3460*bafec742SSukumar Swaminathan 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3461*bafec742SSukumar Swaminathan 	    dma_flags, DDI_DMA_SLEEP, NULL,
3462*bafec742SSukumar Swaminathan 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3463*bafec742SSukumar Swaminathan 		ddi_dma_mem_free(acc_handle);
3464*bafec742SSukumar Swaminathan 
3465*bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3466*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3467*bafec742SSukumar Swaminathan 		    __func__);
3468*bafec742SSukumar Swaminathan 		return (QL_ERROR);
3469*bafec742SSukumar Swaminathan 	}
3470*bafec742SSukumar Swaminathan 
3471*bafec742SSukumar Swaminathan 	if (cnt != 1) {
3472*bafec742SSukumar Swaminathan 
3473*bafec742SSukumar Swaminathan 		ql_free_phys(dma_handle, acc_handle);
3474*bafec742SSukumar Swaminathan 
3475*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3476*bafec742SSukumar Swaminathan 		    __func__);
3477*bafec742SSukumar Swaminathan 		return (QL_ERROR);
3478*bafec742SSukumar Swaminathan 	}
3479*bafec742SSukumar Swaminathan 
3480*bafec742SSukumar Swaminathan 	bzero((caddr_t)*vaddr, rlen);
3481*bafec742SSukumar Swaminathan 
3482*bafec742SSukumar Swaminathan 	return (0);
3483*bafec742SSukumar Swaminathan }
3484*bafec742SSukumar Swaminathan 
3485*bafec742SSukumar Swaminathan /*
3486*bafec742SSukumar Swaminathan  * Add interrupt handlers based on the interrupt type.
3487*bafec742SSukumar Swaminathan  * Before adding the interrupt handlers, the interrupt vectors should
3488*bafec742SSukumar Swaminathan  * have been allocated, and the rx/tx rings have also been allocated.
3489*bafec742SSukumar Swaminathan  */
3490*bafec742SSukumar Swaminathan static int
3491*bafec742SSukumar Swaminathan ql_add_intr_handlers(qlge_t *qlge)
3492*bafec742SSukumar Swaminathan {
3493*bafec742SSukumar Swaminathan 	int vector = 0;
3494*bafec742SSukumar Swaminathan 	int rc, i;
3495*bafec742SSukumar Swaminathan 	uint32_t value;
3496*bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3497*bafec742SSukumar Swaminathan 
3498*bafec742SSukumar Swaminathan 	switch (qlge->intr_type) {
3499*bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSIX:
3500*bafec742SSukumar Swaminathan 		/*
3501*bafec742SSukumar Swaminathan 		 * Add interrupt handler for rx and tx rings: vector[0 -
3502*bafec742SSukumar Swaminathan 		 * (qlge->intr_cnt -1)].
3503*bafec742SSukumar Swaminathan 		 */
3504*bafec742SSukumar Swaminathan 		value = 0;
3505*bafec742SSukumar Swaminathan 		for (vector = 0; vector < qlge->intr_cnt; vector++) {
3506*bafec742SSukumar Swaminathan 			ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3507*bafec742SSukumar Swaminathan 
3508*bafec742SSukumar Swaminathan 			/*
3509*bafec742SSukumar Swaminathan 			 * associate interrupt vector with interrupt handler
3510*bafec742SSukumar Swaminathan 			 */
3511*bafec742SSukumar Swaminathan 			rc = ddi_intr_add_handler(qlge->htable[vector],
3512*bafec742SSukumar Swaminathan 			    (ddi_intr_handler_t *)intr_ctx->handler,
3513*bafec742SSukumar Swaminathan 			    (void *)&qlge->rx_ring[vector], NULL);
3514*bafec742SSukumar Swaminathan 
3515*bafec742SSukumar Swaminathan 			if (rc != DDI_SUCCESS) {
3516*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INIT,
3517*bafec742SSukumar Swaminathan 				    ("Add rx interrupt handler failed. "
3518*bafec742SSukumar Swaminathan 				    "return: %d, vector: %d", rc, vector));
3519*bafec742SSukumar Swaminathan 				for (vector--; vector >= 0; vector--) {
3520*bafec742SSukumar Swaminathan 					(void) ddi_intr_remove_handler(
3521*bafec742SSukumar Swaminathan 					    qlge->htable[vector]);
3522*bafec742SSukumar Swaminathan 				}
3523*bafec742SSukumar Swaminathan 				return (DDI_FAILURE);
3524*bafec742SSukumar Swaminathan 			}
3525*bafec742SSukumar Swaminathan 			intr_ctx++;
3526*bafec742SSukumar Swaminathan 		}
3527*bafec742SSukumar Swaminathan 		break;
3528*bafec742SSukumar Swaminathan 
3529*bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSI:
3530*bafec742SSukumar Swaminathan 		/*
3531*bafec742SSukumar Swaminathan 		 * Add interrupt handlers for the only vector
3532*bafec742SSukumar Swaminathan 		 */
3533*bafec742SSukumar Swaminathan 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3534*bafec742SSukumar Swaminathan 
3535*bafec742SSukumar Swaminathan 		rc = ddi_intr_add_handler(qlge->htable[vector],
3536*bafec742SSukumar Swaminathan 		    ql_isr,
3537*bafec742SSukumar Swaminathan 		    (caddr_t)&qlge->rx_ring[0], NULL);
3538*bafec742SSukumar Swaminathan 
3539*bafec742SSukumar Swaminathan 		if (rc != DDI_SUCCESS) {
3540*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT,
3541*bafec742SSukumar Swaminathan 			    ("Add MSI interrupt handler failed: %d\n", rc));
3542*bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
3543*bafec742SSukumar Swaminathan 		}
3544*bafec742SSukumar Swaminathan 		break;
3545*bafec742SSukumar Swaminathan 
3546*bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_FIXED:
3547*bafec742SSukumar Swaminathan 		/*
3548*bafec742SSukumar Swaminathan 		 * Add interrupt handlers for the only vector
3549*bafec742SSukumar Swaminathan 		 */
3550*bafec742SSukumar Swaminathan 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3551*bafec742SSukumar Swaminathan 
3552*bafec742SSukumar Swaminathan 		rc = ddi_intr_add_handler(qlge->htable[vector],
3553*bafec742SSukumar Swaminathan 		    ql_isr,
3554*bafec742SSukumar Swaminathan 		    (caddr_t)&qlge->rx_ring[0], NULL);
3555*bafec742SSukumar Swaminathan 
3556*bafec742SSukumar Swaminathan 		if (rc != DDI_SUCCESS) {
3557*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT,
3558*bafec742SSukumar Swaminathan 			    ("Add legacy interrupt handler failed: %d\n", rc));
3559*bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
3560*bafec742SSukumar Swaminathan 		}
3561*bafec742SSukumar Swaminathan 		break;
3562*bafec742SSukumar Swaminathan 
3563*bafec742SSukumar Swaminathan 	default:
3564*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3565*bafec742SSukumar Swaminathan 	}
3566*bafec742SSukumar Swaminathan 
3567*bafec742SSukumar Swaminathan 	/* Enable interrupts */
3568*bafec742SSukumar Swaminathan 	/* Block enable */
3569*bafec742SSukumar Swaminathan 	if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3570*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n",
3571*bafec742SSukumar Swaminathan 		    qlge->intr_cnt));
3572*bafec742SSukumar Swaminathan 		(void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt);
3573*bafec742SSukumar Swaminathan 	} else { /* Non block enable */
3574*bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->intr_cnt; i++) {
3575*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d\n,"
3576*bafec742SSukumar Swaminathan 			    "handle 0x%x\n", i, qlge->htable[i]));
3577*bafec742SSukumar Swaminathan 			(void) ddi_intr_enable(qlge->htable[i]);
3578*bafec742SSukumar Swaminathan 		}
3579*bafec742SSukumar Swaminathan 	}
3580*bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_INTR_ENABLED;
3581*bafec742SSukumar Swaminathan 
3582*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3583*bafec742SSukumar Swaminathan }
3584*bafec742SSukumar Swaminathan 
3585*bafec742SSukumar Swaminathan /*
3586*bafec742SSukumar Swaminathan  * Here we build the intr_ctx structures based on
3587*bafec742SSukumar Swaminathan  * our rx_ring count and intr vector count.
3588*bafec742SSukumar Swaminathan  * The intr_ctx structure is used to hook each vector
3589*bafec742SSukumar Swaminathan  * to possibly different handlers.
3590*bafec742SSukumar Swaminathan  */
3591*bafec742SSukumar Swaminathan static void
3592*bafec742SSukumar Swaminathan ql_resolve_queues_to_irqs(qlge_t *qlge)
3593*bafec742SSukumar Swaminathan {
3594*bafec742SSukumar Swaminathan 	int i = 0;
3595*bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3596*bafec742SSukumar Swaminathan 
3597*bafec742SSukumar Swaminathan 	if (qlge->intr_type == DDI_INTR_TYPE_MSIX) {
3598*bafec742SSukumar Swaminathan 		/*
3599*bafec742SSukumar Swaminathan 		 * Each rx_ring has its own intr_ctx since we
3600*bafec742SSukumar Swaminathan 		 * have separate vectors for each queue.
3601*bafec742SSukumar Swaminathan 		 * This only true when MSI-X is enabled.
3602*bafec742SSukumar Swaminathan 		 */
3603*bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) {
3604*bafec742SSukumar Swaminathan 			qlge->rx_ring[i].irq = i;
3605*bafec742SSukumar Swaminathan 			intr_ctx->intr = i;
3606*bafec742SSukumar Swaminathan 			intr_ctx->qlge = qlge;
3607*bafec742SSukumar Swaminathan 
3608*bafec742SSukumar Swaminathan 			/*
3609*bafec742SSukumar Swaminathan 			 * We set up each vectors enable/disable/read bits so
3610*bafec742SSukumar Swaminathan 			 * there's no bit/mask calculations in critical path.
3611*bafec742SSukumar Swaminathan 			 */
3612*bafec742SSukumar Swaminathan 			intr_ctx->intr_en_mask =
3613*bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3614*bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
3615*bafec742SSukumar Swaminathan 			    INTR_EN_IHD | i;
3616*bafec742SSukumar Swaminathan 			intr_ctx->intr_dis_mask =
3617*bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3618*bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3619*bafec742SSukumar Swaminathan 			    INTR_EN_IHD | i;
3620*bafec742SSukumar Swaminathan 			intr_ctx->intr_read_mask =
3621*bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3622*bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
3623*bafec742SSukumar Swaminathan 			    | i;
3624*bafec742SSukumar Swaminathan 
3625*bafec742SSukumar Swaminathan 			if (i == 0) {
3626*bafec742SSukumar Swaminathan 				/*
3627*bafec742SSukumar Swaminathan 				 * Default queue handles bcast/mcast plus
3628*bafec742SSukumar Swaminathan 				 * async events.
3629*bafec742SSukumar Swaminathan 				 */
3630*bafec742SSukumar Swaminathan 				intr_ctx->handler = ql_isr;
3631*bafec742SSukumar Swaminathan 			} else if (qlge->rx_ring[i].type == TX_Q) {
3632*bafec742SSukumar Swaminathan 				/*
3633*bafec742SSukumar Swaminathan 				 * Outbound queue is for outbound completions
3634*bafec742SSukumar Swaminathan 				 * only.
3635*bafec742SSukumar Swaminathan 				 */
3636*bafec742SSukumar Swaminathan 				intr_ctx->handler = ql_msix_tx_isr;
3637*bafec742SSukumar Swaminathan 			} else {
3638*bafec742SSukumar Swaminathan 				/*
3639*bafec742SSukumar Swaminathan 				 * Inbound queues handle unicast frames only.
3640*bafec742SSukumar Swaminathan 				 */
3641*bafec742SSukumar Swaminathan 				intr_ctx->handler = ql_msix_rx_isr;
3642*bafec742SSukumar Swaminathan 			}
3643*bafec742SSukumar Swaminathan 		}
3644*bafec742SSukumar Swaminathan 	} else {
3645*bafec742SSukumar Swaminathan 		/*
3646*bafec742SSukumar Swaminathan 		 * All rx_rings use the same intr_ctx since
3647*bafec742SSukumar Swaminathan 		 * there is only one vector.
3648*bafec742SSukumar Swaminathan 		 */
3649*bafec742SSukumar Swaminathan 		intr_ctx->intr = 0;
3650*bafec742SSukumar Swaminathan 		intr_ctx->qlge = qlge;
3651*bafec742SSukumar Swaminathan 		/*
3652*bafec742SSukumar Swaminathan 		 * We set up each vectors enable/disable/read bits so
3653*bafec742SSukumar Swaminathan 		 * there's no bit/mask calculations in the critical path.
3654*bafec742SSukumar Swaminathan 		 */
3655*bafec742SSukumar Swaminathan 		intr_ctx->intr_en_mask =
3656*bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3657*bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_ENABLE;
3658*bafec742SSukumar Swaminathan 		intr_ctx->intr_dis_mask =
3659*bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3660*bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_DISABLE;
3661*bafec742SSukumar Swaminathan 		intr_ctx->intr_read_mask =
3662*bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3663*bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_READ;
3664*bafec742SSukumar Swaminathan 		/*
3665*bafec742SSukumar Swaminathan 		 * Single interrupt means one handler for all rings.
3666*bafec742SSukumar Swaminathan 		 */
3667*bafec742SSukumar Swaminathan 		intr_ctx->handler = ql_isr;
3668*bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++)
3669*bafec742SSukumar Swaminathan 			qlge->rx_ring[i].irq = 0;
3670*bafec742SSukumar Swaminathan 	}
3671*bafec742SSukumar Swaminathan }
3672*bafec742SSukumar Swaminathan 
3673*bafec742SSukumar Swaminathan 
3674*bafec742SSukumar Swaminathan /*
3675*bafec742SSukumar Swaminathan  * Free allocated interrupts.
3676*bafec742SSukumar Swaminathan  */
3677*bafec742SSukumar Swaminathan static void
3678*bafec742SSukumar Swaminathan ql_free_irq_vectors(qlge_t *qlge)
3679*bafec742SSukumar Swaminathan {
3680*bafec742SSukumar Swaminathan 	int i;
3681*bafec742SSukumar Swaminathan 	int rc;
3682*bafec742SSukumar Swaminathan 
3683*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_INTR_ENABLED) {
3684*bafec742SSukumar Swaminathan 		/* Disable all interrupts */
3685*bafec742SSukumar Swaminathan 		if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3686*bafec742SSukumar Swaminathan 			/* Call ddi_intr_block_disable() */
3687*bafec742SSukumar Swaminathan 			(void) ddi_intr_block_disable(qlge->htable,
3688*bafec742SSukumar Swaminathan 			    qlge->intr_cnt);
3689*bafec742SSukumar Swaminathan 		} else {
3690*bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->intr_cnt; i++) {
3691*bafec742SSukumar Swaminathan 				(void) ddi_intr_disable(qlge->htable[i]);
3692*bafec742SSukumar Swaminathan 			}
3693*bafec742SSukumar Swaminathan 		}
3694*bafec742SSukumar Swaminathan 
3695*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_INTR_ENABLED;
3696*bafec742SSukumar Swaminathan 	}
3697*bafec742SSukumar Swaminathan 
3698*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
3699*bafec742SSukumar Swaminathan 
3700*bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_ADD_INTERRUPT)
3701*bafec742SSukumar Swaminathan 			(void) ddi_intr_remove_handler(qlge->htable[i]);
3702*bafec742SSukumar Swaminathan 
3703*bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_INTR_ALLOC) {
3704*bafec742SSukumar Swaminathan 			rc = ddi_intr_free(qlge->htable[i]);
3705*bafec742SSukumar Swaminathan 			if (rc != DDI_SUCCESS) {
3706*bafec742SSukumar Swaminathan 				/* EMPTY */
3707*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INIT, ("Free intr failed: %d",
3708*bafec742SSukumar Swaminathan 				    rc));
3709*bafec742SSukumar Swaminathan 			}
3710*bafec742SSukumar Swaminathan 		}
3711*bafec742SSukumar Swaminathan 	}
3712*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_INTR_ALLOC)
3713*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_INTR_ALLOC;
3714*bafec742SSukumar Swaminathan 
3715*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_ADD_INTERRUPT)
3716*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
3717*bafec742SSukumar Swaminathan 
3718*bafec742SSukumar Swaminathan 	if (qlge->htable) {
3719*bafec742SSukumar Swaminathan 		kmem_free(qlge->htable, qlge->intr_size);
3720*bafec742SSukumar Swaminathan 		qlge->htable = NULL;
3721*bafec742SSukumar Swaminathan 	}
3722*bafec742SSukumar Swaminathan }
3723*bafec742SSukumar Swaminathan 
3724*bafec742SSukumar Swaminathan /*
3725*bafec742SSukumar Swaminathan  * Allocate interrupt vectors
3726*bafec742SSukumar Swaminathan  * For legacy and MSI, only 1 handle is needed.
3727*bafec742SSukumar Swaminathan  * For MSI-X, if fewer than 2 vectors are available, return failure.
3728*bafec742SSukumar Swaminathan  * Upon success, this maps the vectors to rx and tx rings for
3729*bafec742SSukumar Swaminathan  * interrupts.
3730*bafec742SSukumar Swaminathan  */
3731*bafec742SSukumar Swaminathan static int
3732*bafec742SSukumar Swaminathan ql_request_irq_vectors(qlge_t *qlge, int intr_type)
3733*bafec742SSukumar Swaminathan {
3734*bafec742SSukumar Swaminathan 	dev_info_t *devinfo;
3735*bafec742SSukumar Swaminathan 	uint32_t request, orig;
3736*bafec742SSukumar Swaminathan 	int count, avail, actual;
3737*bafec742SSukumar Swaminathan 	int minimum;
3738*bafec742SSukumar Swaminathan 	int rc;
3739*bafec742SSukumar Swaminathan 
3740*bafec742SSukumar Swaminathan 	devinfo = qlge->dip;
3741*bafec742SSukumar Swaminathan 
3742*bafec742SSukumar Swaminathan 	switch (intr_type) {
3743*bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_FIXED:
3744*bafec742SSukumar Swaminathan 		request = 1;	/* Request 1 legacy interrupt handle */
3745*bafec742SSukumar Swaminathan 		minimum = 1;
3746*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: legacy\n"));
3747*bafec742SSukumar Swaminathan 		break;
3748*bafec742SSukumar Swaminathan 
3749*bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSI:
3750*bafec742SSukumar Swaminathan 		request = 1;	/* Request 1 MSI interrupt handle */
3751*bafec742SSukumar Swaminathan 		minimum = 1;
3752*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: MSI\n"));
3753*bafec742SSukumar Swaminathan 		break;
3754*bafec742SSukumar Swaminathan 
3755*bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSIX:
3756*bafec742SSukumar Swaminathan 		/*
3757*bafec742SSukumar Swaminathan 		 * Ideal number of vectors for the adapter is
3758*bafec742SSukumar Swaminathan 		 * # rss rings + tx completion rings for default completion
3759*bafec742SSukumar Swaminathan 		 * queue.
3760*bafec742SSukumar Swaminathan 		 */
3761*bafec742SSukumar Swaminathan 		request = qlge->rx_ring_count;
3762*bafec742SSukumar Swaminathan 
3763*bafec742SSukumar Swaminathan 		orig = request;
3764*bafec742SSukumar Swaminathan 		if (request > (MAX_RX_RINGS))
3765*bafec742SSukumar Swaminathan 			request = MAX_RX_RINGS;
3766*bafec742SSukumar Swaminathan 		minimum = 2;
3767*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n"));
3768*bafec742SSukumar Swaminathan 		break;
3769*bafec742SSukumar Swaminathan 
3770*bafec742SSukumar Swaminathan 	default:
3771*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Invalid parameter\n"));
3772*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3773*bafec742SSukumar Swaminathan 	}
3774*bafec742SSukumar Swaminathan 
3775*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupt handles requested: %d  minimum: %d\n",
3776*bafec742SSukumar Swaminathan 	    request, minimum));
3777*bafec742SSukumar Swaminathan 
3778*bafec742SSukumar Swaminathan 	/*
3779*bafec742SSukumar Swaminathan 	 * Get number of supported interrupts
3780*bafec742SSukumar Swaminathan 	 */
3781*bafec742SSukumar Swaminathan 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3782*bafec742SSukumar Swaminathan 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3783*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, "
3784*bafec742SSukumar Swaminathan 		    "count: %d\n", rc, count));
3785*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3786*bafec742SSukumar Swaminathan 	}
3787*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count));
3788*bafec742SSukumar Swaminathan 
3789*bafec742SSukumar Swaminathan 	/*
3790*bafec742SSukumar Swaminathan 	 * Get number of available interrupts
3791*bafec742SSukumar Swaminathan 	 */
3792*bafec742SSukumar Swaminathan 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3793*bafec742SSukumar Swaminathan 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3794*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT,
3795*bafec742SSukumar Swaminathan 		    ("Get interrupt available number failed. Return:"
3796*bafec742SSukumar Swaminathan 		    " %d, available: %d\n", rc, avail));
3797*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3798*bafec742SSukumar Swaminathan 	}
3799*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail));
3800*bafec742SSukumar Swaminathan 
3801*bafec742SSukumar Swaminathan 	if (avail < request) {
3802*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n",
3803*bafec742SSukumar Swaminathan 		    request, avail));
3804*bafec742SSukumar Swaminathan 		request = avail;
3805*bafec742SSukumar Swaminathan 	}
3806*bafec742SSukumar Swaminathan 
3807*bafec742SSukumar Swaminathan 	actual = 0;
3808*bafec742SSukumar Swaminathan 	qlge->intr_cnt = 0;
3809*bafec742SSukumar Swaminathan 
3810*bafec742SSukumar Swaminathan 	/*
3811*bafec742SSukumar Swaminathan 	 * Allocate an array of interrupt handles
3812*bafec742SSukumar Swaminathan 	 */
3813*bafec742SSukumar Swaminathan 	qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t));
3814*bafec742SSukumar Swaminathan 	qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP);
3815*bafec742SSukumar Swaminathan 
3816*bafec742SSukumar Swaminathan 	rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0,
3817*bafec742SSukumar Swaminathan 	    (int)request, &actual, DDI_INTR_ALLOC_NORMAL);
3818*bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
3819*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:"
3820*bafec742SSukumar Swaminathan 		    " %d, request: %d, actual: %d",
3821*bafec742SSukumar Swaminathan 		    __func__, qlge->instance, rc, request, actual);
3822*bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
3823*bafec742SSukumar Swaminathan 	}
3824*bafec742SSukumar Swaminathan 	qlge->intr_cnt = actual;
3825*bafec742SSukumar Swaminathan 
3826*bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_INTR_ALLOC;
3827*bafec742SSukumar Swaminathan 
3828*bafec742SSukumar Swaminathan 	/*
3829*bafec742SSukumar Swaminathan 	 * If the actual number of vectors is less than the minumum
3830*bafec742SSukumar Swaminathan 	 * then fail.
3831*bafec742SSukumar Swaminathan 	 */
3832*bafec742SSukumar Swaminathan 	if (actual < minimum) {
3833*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
3834*bafec742SSukumar Swaminathan 		    "Insufficient interrupt handles available: %d", actual);
3835*bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
3836*bafec742SSukumar Swaminathan 	}
3837*bafec742SSukumar Swaminathan 
3838*bafec742SSukumar Swaminathan 	/*
3839*bafec742SSukumar Swaminathan 	 * For MSI-X, actual might force us to reduce number of tx & rx rings
3840*bafec742SSukumar Swaminathan 	 */
3841*bafec742SSukumar Swaminathan 	if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
3842*bafec742SSukumar Swaminathan 		if (actual < MAX_RX_RINGS) {
3843*bafec742SSukumar Swaminathan 			qlge->tx_ring_count = 1;
3844*bafec742SSukumar Swaminathan 			qlge->rss_ring_count = actual - 1;
3845*bafec742SSukumar Swaminathan 			qlge->rx_ring_count = qlge->tx_ring_count +
3846*bafec742SSukumar Swaminathan 			    qlge->rss_ring_count;
3847*bafec742SSukumar Swaminathan 		}
3848*bafec742SSukumar Swaminathan 	}
3849*bafec742SSukumar Swaminathan 	/*
3850*bafec742SSukumar Swaminathan 	 * Get priority for first vector, assume remaining are all the same
3851*bafec742SSukumar Swaminathan 	 */
3852*bafec742SSukumar Swaminathan 	rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri);
3853*bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
3854*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc));
3855*bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
3856*bafec742SSukumar Swaminathan 	}
3857*bafec742SSukumar Swaminathan 
3858*bafec742SSukumar Swaminathan 	rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap);
3859*bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
3860*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc));
3861*bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
3862*bafec742SSukumar Swaminathan 	}
3863*bafec742SSukumar Swaminathan 
3864*bafec742SSukumar Swaminathan 	qlge->intr_type = intr_type;
3865*bafec742SSukumar Swaminathan 
3866*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3867*bafec742SSukumar Swaminathan 
3868*bafec742SSukumar Swaminathan ql_intr_alloc_fail:
3869*bafec742SSukumar Swaminathan 	ql_free_irq_vectors(qlge);
3870*bafec742SSukumar Swaminathan 
3871*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3872*bafec742SSukumar Swaminathan }
3873*bafec742SSukumar Swaminathan 
3874*bafec742SSukumar Swaminathan /*
3875*bafec742SSukumar Swaminathan  * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
3876*bafec742SSukumar Swaminathan  * MSI or Legacy. In MSI and Legacy modes we only support a single receive and
3877*bafec742SSukumar Swaminathan  * transmit queue.
3878*bafec742SSukumar Swaminathan  */
3879*bafec742SSukumar Swaminathan int
3880*bafec742SSukumar Swaminathan ql_alloc_irqs(qlge_t *qlge)
3881*bafec742SSukumar Swaminathan {
3882*bafec742SSukumar Swaminathan 	int intr_types;
3883*bafec742SSukumar Swaminathan 	int rval;
3884*bafec742SSukumar Swaminathan 
3885*bafec742SSukumar Swaminathan 	/*
3886*bafec742SSukumar Swaminathan 	 * Get supported interrupt types
3887*bafec742SSukumar Swaminathan 	 */
3888*bafec742SSukumar Swaminathan 	if (ddi_intr_get_supported_types(qlge->dip, &intr_types)
3889*bafec742SSukumar Swaminathan 	    != DDI_SUCCESS) {
3890*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed",
3891*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3892*bafec742SSukumar Swaminathan 
3893*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3894*bafec742SSukumar Swaminathan 	}
3895*bafec742SSukumar Swaminathan 
3896*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n",
3897*bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr_types));
3898*bafec742SSukumar Swaminathan 
3899*bafec742SSukumar Swaminathan 	/* Install MSI-X interrupts */
3900*bafec742SSukumar Swaminathan 	if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
3901*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n",
3902*bafec742SSukumar Swaminathan 		    __func__, qlge->instance, intr_types));
3903*bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX);
3904*bafec742SSukumar Swaminathan 		if (rval == DDI_SUCCESS) {
3905*bafec742SSukumar Swaminathan 			return (rval);
3906*bafec742SSukumar Swaminathan 		}
3907*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed,"
3908*bafec742SSukumar Swaminathan 		    " trying MSI interrupts ...\n", __func__, qlge->instance));
3909*bafec742SSukumar Swaminathan 	}
3910*bafec742SSukumar Swaminathan 
3911*bafec742SSukumar Swaminathan 	/*
3912*bafec742SSukumar Swaminathan 	 * We will have 2 completion queues in MSI / Legacy mode,
3913*bafec742SSukumar Swaminathan 	 * Queue 0 for default completions
3914*bafec742SSukumar Swaminathan 	 * Queue 1 for transmit completions
3915*bafec742SSukumar Swaminathan 	 */
3916*bafec742SSukumar Swaminathan 	qlge->rss_ring_count = 1; /* Default completion queue (0) for all */
3917*bafec742SSukumar Swaminathan 	qlge->tx_ring_count = 1; /* Single tx completion queue */
3918*bafec742SSukumar Swaminathan 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
3919*bafec742SSukumar Swaminathan 
3920*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n",
3921*bafec742SSukumar Swaminathan 	    __func__, qlge->instance));
3922*bafec742SSukumar Swaminathan 	/*
3923*bafec742SSukumar Swaminathan 	 * Add the h/w interrupt handler and initialise mutexes
3924*bafec742SSukumar Swaminathan 	 */
3925*bafec742SSukumar Swaminathan 	rval = DDI_FAILURE;
3926*bafec742SSukumar Swaminathan 
3927*bafec742SSukumar Swaminathan 	/*
3928*bafec742SSukumar Swaminathan 	 * If OS supports MSIX interrupt but fails to allocate, then try
3929*bafec742SSukumar Swaminathan 	 * MSI interrupt. If MSI interrupt allocation fails also, then roll
3930*bafec742SSukumar Swaminathan 	 * back to fixed interrupt.
3931*bafec742SSukumar Swaminathan 	 */
3932*bafec742SSukumar Swaminathan 	if (intr_types & DDI_INTR_TYPE_MSI) {
3933*bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI);
3934*bafec742SSukumar Swaminathan 		if (rval == DDI_SUCCESS) {
3935*bafec742SSukumar Swaminathan 			qlge->intr_type = DDI_INTR_TYPE_MSI;
3936*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n",
3937*bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
3938*bafec742SSukumar Swaminathan 		}
3939*bafec742SSukumar Swaminathan 	}
3940*bafec742SSukumar Swaminathan 
3941*bafec742SSukumar Swaminathan 	/* Try Fixed interrupt Legacy mode */
3942*bafec742SSukumar Swaminathan 	if (rval != DDI_SUCCESS) {
3943*bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED);
3944*bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS) {
3945*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt "
3946*bafec742SSukumar Swaminathan 			    "allocation failed",
3947*bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
3948*bafec742SSukumar Swaminathan 		} else {
3949*bafec742SSukumar Swaminathan 			qlge->intr_type = DDI_INTR_TYPE_FIXED;
3950*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n",
3951*bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
3952*bafec742SSukumar Swaminathan 		}
3953*bafec742SSukumar Swaminathan 	}
3954*bafec742SSukumar Swaminathan 
3955*bafec742SSukumar Swaminathan 	return (rval);
3956*bafec742SSukumar Swaminathan }
3957*bafec742SSukumar Swaminathan 
3958*bafec742SSukumar Swaminathan static void
3959*bafec742SSukumar Swaminathan ql_free_rx_tx_locks(qlge_t *qlge)
3960*bafec742SSukumar Swaminathan {
3961*bafec742SSukumar Swaminathan 	int i;
3962*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
3963*bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
3964*bafec742SSukumar Swaminathan 
3965*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
3966*bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
3967*bafec742SSukumar Swaminathan 		mutex_destroy(&tx_ring->tx_lock);
3968*bafec742SSukumar Swaminathan 	}
3969*bafec742SSukumar Swaminathan 
3970*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
3971*bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
3972*bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->rx_lock);
3973*bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->sbq_lock);
3974*bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->lbq_lock);
3975*bafec742SSukumar Swaminathan 	}
3976*bafec742SSukumar Swaminathan }
3977*bafec742SSukumar Swaminathan 
3978*bafec742SSukumar Swaminathan /*
3979*bafec742SSukumar Swaminathan  * Frees all resources allocated during attach.
3980*bafec742SSukumar Swaminathan  *
3981*bafec742SSukumar Swaminathan  * Input:
3982*bafec742SSukumar Swaminathan  * dip = pointer to device information structure.
3983*bafec742SSukumar Swaminathan  * sequence = bits indicating resources to free.
3984*bafec742SSukumar Swaminathan  *
3985*bafec742SSukumar Swaminathan  * Context:
3986*bafec742SSukumar Swaminathan  * Kernel context.
3987*bafec742SSukumar Swaminathan  */
3988*bafec742SSukumar Swaminathan static void
3989*bafec742SSukumar Swaminathan ql_free_resources(dev_info_t *dip, qlge_t *qlge)
3990*bafec742SSukumar Swaminathan {
3991*bafec742SSukumar Swaminathan 
3992*bafec742SSukumar Swaminathan 	/* Disable driver timer */
3993*bafec742SSukumar Swaminathan 	ql_stop_timer(qlge);
3994*bafec742SSukumar Swaminathan 
3995*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MAC_REGISTERED) {
3996*bafec742SSukumar Swaminathan 		mac_unregister(qlge->mh);
3997*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MAC_REGISTERED;
3998*bafec742SSukumar Swaminathan 	}
3999*bafec742SSukumar Swaminathan 
4000*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MAC_ALLOC) {
4001*bafec742SSukumar Swaminathan 		/* Nothing to do, macp is already freed */
4002*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MAC_ALLOC;
4003*bafec742SSukumar Swaminathan 	}
4004*bafec742SSukumar Swaminathan 
4005*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_PCI_CONFIG_SETUP) {
4006*bafec742SSukumar Swaminathan 		pci_config_teardown(&qlge->pci_handle);
4007*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_PCI_CONFIG_SETUP;
4008*bafec742SSukumar Swaminathan 	}
4009*bafec742SSukumar Swaminathan 
4010*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_ADD_INTERRUPT) {
4011*bafec742SSukumar Swaminathan 		ql_free_irq_vectors(qlge);
4012*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4013*bafec742SSukumar Swaminathan 	}
4014*bafec742SSukumar Swaminathan 
4015*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) {
4016*bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl);
4017*bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl);
4018*bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl);
4019*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT;
4020*bafec742SSukumar Swaminathan 	}
4021*bafec742SSukumar Swaminathan 
4022*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_KSTATS) {
4023*bafec742SSukumar Swaminathan 		ql_fini_kstats(qlge);
4024*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_KSTATS;
4025*bafec742SSukumar Swaminathan 	}
4026*bafec742SSukumar Swaminathan 
4027*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MUTEX) {
4028*bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->gen_mutex);
4029*bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->hw_mutex);
4030*bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->mbx_mutex);
4031*bafec742SSukumar Swaminathan 		cv_destroy(&qlge->cv_mbx_intr);
4032*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MUTEX;
4033*bafec742SSukumar Swaminathan 	}
4034*bafec742SSukumar Swaminathan 
4035*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_LOCKS_CREATED) {
4036*bafec742SSukumar Swaminathan 		ql_free_rx_tx_locks(qlge);
4037*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_LOCKS_CREATED;
4038*bafec742SSukumar Swaminathan 	}
4039*bafec742SSukumar Swaminathan 
4040*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MEMORY_ALLOC) {
4041*bafec742SSukumar Swaminathan 		ql_free_mem_resources(qlge);
4042*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MEMORY_ALLOC;
4043*bafec742SSukumar Swaminathan 	}
4044*bafec742SSukumar Swaminathan 
4045*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_REGS_SETUP) {
4046*bafec742SSukumar Swaminathan 		ddi_regs_map_free(&qlge->dev_handle);
4047*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_REGS_SETUP;
4048*bafec742SSukumar Swaminathan 	}
4049*bafec742SSukumar Swaminathan 
4050*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) {
4051*bafec742SSukumar Swaminathan 		ddi_regs_map_free(&qlge->dev_doorbell_reg_handle);
4052*bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP;
4053*bafec742SSukumar Swaminathan 	}
4054*bafec742SSukumar Swaminathan 
4055*bafec742SSukumar Swaminathan 	/*
4056*bafec742SSukumar Swaminathan 	 * free flash flt table that allocated in attach stage
4057*bafec742SSukumar Swaminathan 	 */
4058*bafec742SSukumar Swaminathan 	if ((qlge->flt.ql_flt_entry_ptr != NULL)&&
4059*bafec742SSukumar Swaminathan 	    (qlge->flt.header.length != 0)) {
4060*bafec742SSukumar Swaminathan 		kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
4061*bafec742SSukumar Swaminathan 		qlge->flt.ql_flt_entry_ptr = NULL;
4062*bafec742SSukumar Swaminathan 	}
4063*bafec742SSukumar Swaminathan 
4064*bafec742SSukumar Swaminathan 	/* finally, free qlge structure */
4065*bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_SOFTSTATE_ALLOC) {
4066*bafec742SSukumar Swaminathan 		kmem_free(qlge, sizeof (qlge_t));
4067*bafec742SSukumar Swaminathan 	}
4068*bafec742SSukumar Swaminathan 
4069*bafec742SSukumar Swaminathan 	ddi_prop_remove_all(dip);
4070*bafec742SSukumar Swaminathan 	ddi_set_driver_private(dip, NULL);
4071*bafec742SSukumar Swaminathan 
4072*bafec742SSukumar Swaminathan }
4073*bafec742SSukumar Swaminathan 
4074*bafec742SSukumar Swaminathan /*
4075*bafec742SSukumar Swaminathan  * Set promiscuous mode of the driver
4076*bafec742SSukumar Swaminathan  * Caller must catch HW_LOCK
4077*bafec742SSukumar Swaminathan  */
4078*bafec742SSukumar Swaminathan void
4079*bafec742SSukumar Swaminathan ql_set_promiscuous(qlge_t *qlge, int mode)
4080*bafec742SSukumar Swaminathan {
4081*bafec742SSukumar Swaminathan 	if (mode) {
4082*bafec742SSukumar Swaminathan 		ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4083*bafec742SSukumar Swaminathan 		    RT_IDX_VALID, 1);
4084*bafec742SSukumar Swaminathan 	} else {
4085*bafec742SSukumar Swaminathan 		ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4086*bafec742SSukumar Swaminathan 		    RT_IDX_VALID, 0);
4087*bafec742SSukumar Swaminathan 	}
4088*bafec742SSukumar Swaminathan }
4089*bafec742SSukumar Swaminathan /*
4090*bafec742SSukumar Swaminathan  * Write 'data1' to Mac Protocol Address Index Register and
4091*bafec742SSukumar Swaminathan  * 'data2' to Mac Protocol Address Data Register
4092*bafec742SSukumar Swaminathan  *  Assuming that the Mac Protocol semaphore lock has been acquired.
4093*bafec742SSukumar Swaminathan  */
4094*bafec742SSukumar Swaminathan static int
4095*bafec742SSukumar Swaminathan ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2)
4096*bafec742SSukumar Swaminathan {
4097*bafec742SSukumar Swaminathan 	int return_value = DDI_SUCCESS;
4098*bafec742SSukumar Swaminathan 
4099*bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
4100*bafec742SSukumar Swaminathan 	    MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) {
4101*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register "
4102*bafec742SSukumar Swaminathan 		    "timeout.");
4103*bafec742SSukumar Swaminathan 		return_value = DDI_FAILURE;
4104*bafec742SSukumar Swaminathan 		goto out;
4105*bafec742SSukumar Swaminathan 	}
4106*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1);
4107*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2);
4108*bafec742SSukumar Swaminathan out:
4109*bafec742SSukumar Swaminathan 	return (return_value);
4110*bafec742SSukumar Swaminathan }
4111*bafec742SSukumar Swaminathan /*
4112*bafec742SSukumar Swaminathan  * Enable the 'index'ed multicast address in the host memory's multicast_list
4113*bafec742SSukumar Swaminathan  */
4114*bafec742SSukumar Swaminathan int
4115*bafec742SSukumar Swaminathan ql_add_multicast_address(qlge_t *qlge, int index)
4116*bafec742SSukumar Swaminathan {
4117*bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4118*bafec742SSukumar Swaminathan 	uint32_t offset;
4119*bafec742SSukumar Swaminathan 	uint32_t value1, value2;
4120*bafec742SSukumar Swaminathan 
4121*bafec742SSukumar Swaminathan 	/* Acquire the required semaphore */
4122*bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4123*bafec742SSukumar Swaminathan 		return (rtn_val);
4124*bafec742SSukumar Swaminathan 	}
4125*bafec742SSukumar Swaminathan 
4126*bafec742SSukumar Swaminathan 	/* Program Offset0 - lower 32 bits of the MAC address */
4127*bafec742SSukumar Swaminathan 	offset = 0;
4128*bafec742SSukumar Swaminathan 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4129*bafec742SSukumar Swaminathan 	    (index << 4) | offset;
4130*bafec742SSukumar Swaminathan 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4131*bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4132*bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4133*bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4134*bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS)
4135*bafec742SSukumar Swaminathan 		goto out;
4136*bafec742SSukumar Swaminathan 
4137*bafec742SSukumar Swaminathan 	/* Program offset1: upper 16 bits of the MAC address */
4138*bafec742SSukumar Swaminathan 	offset = 1;
4139*bafec742SSukumar Swaminathan 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4140*bafec742SSukumar Swaminathan 	    (index<<4) | offset;
4141*bafec742SSukumar Swaminathan 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8)
4142*bafec742SSukumar Swaminathan 	    |qlge->multicast_list[index].addr.ether_addr_octet[1]);
4143*bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4144*bafec742SSukumar Swaminathan 		goto out;
4145*bafec742SSukumar Swaminathan 	}
4146*bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4147*bafec742SSukumar Swaminathan out:
4148*bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4149*bafec742SSukumar Swaminathan 	return (rtn_val);
4150*bafec742SSukumar Swaminathan }
4151*bafec742SSukumar Swaminathan 
4152*bafec742SSukumar Swaminathan /*
4153*bafec742SSukumar Swaminathan  * Disable the 'index'ed multicast address in the host memory's multicast_list
4154*bafec742SSukumar Swaminathan  */
4155*bafec742SSukumar Swaminathan int
4156*bafec742SSukumar Swaminathan ql_remove_multicast_address(qlge_t *qlge, int index)
4157*bafec742SSukumar Swaminathan {
4158*bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4159*bafec742SSukumar Swaminathan 	uint32_t offset;
4160*bafec742SSukumar Swaminathan 	uint32_t value1, value2;
4161*bafec742SSukumar Swaminathan 
4162*bafec742SSukumar Swaminathan 	/* Acquire the required semaphore */
4163*bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4164*bafec742SSukumar Swaminathan 		return (rtn_val);
4165*bafec742SSukumar Swaminathan 	}
4166*bafec742SSukumar Swaminathan 	/* Program Offset0 - lower 32 bits of the MAC address */
4167*bafec742SSukumar Swaminathan 	offset = 0;
4168*bafec742SSukumar Swaminathan 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4169*bafec742SSukumar Swaminathan 	value2 =
4170*bafec742SSukumar Swaminathan 	    ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4171*bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4172*bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4173*bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4174*bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4175*bafec742SSukumar Swaminathan 		goto out;
4176*bafec742SSukumar Swaminathan 	}
4177*bafec742SSukumar Swaminathan 	/* Program offset1: upper 16 bits of the MAC address */
4178*bafec742SSukumar Swaminathan 	offset = 1;
4179*bafec742SSukumar Swaminathan 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4180*bafec742SSukumar Swaminathan 	value2 = 0;
4181*bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4182*bafec742SSukumar Swaminathan 		goto out;
4183*bafec742SSukumar Swaminathan 	}
4184*bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4185*bafec742SSukumar Swaminathan out:
4186*bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4187*bafec742SSukumar Swaminathan 	return (rtn_val);
4188*bafec742SSukumar Swaminathan }
4189*bafec742SSukumar Swaminathan 
4190*bafec742SSukumar Swaminathan /*
4191*bafec742SSukumar Swaminathan  * Add a new multicast address to the list of supported list
4192*bafec742SSukumar Swaminathan  * This API is called after OS called gld_set_multicast (GLDv2)
4193*bafec742SSukumar Swaminathan  * or m_multicst (GLDv3)
4194*bafec742SSukumar Swaminathan  *
4195*bafec742SSukumar Swaminathan  * Restriction:
4196*bafec742SSukumar Swaminathan  * The number of maximum multicast address is limited by hardware.
4197*bafec742SSukumar Swaminathan  */
4198*bafec742SSukumar Swaminathan int
4199*bafec742SSukumar Swaminathan ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep)
4200*bafec742SSukumar Swaminathan {
4201*bafec742SSukumar Swaminathan 	uint32_t index = qlge->multicast_list_count;
4202*bafec742SSukumar Swaminathan 	int rval = DDI_SUCCESS;
4203*bafec742SSukumar Swaminathan 	int status;
4204*bafec742SSukumar Swaminathan 
4205*bafec742SSukumar Swaminathan 	if ((ep[0] & 01) == 0) {
4206*bafec742SSukumar Swaminathan 		rval = EINVAL;
4207*bafec742SSukumar Swaminathan 		goto exit;
4208*bafec742SSukumar Swaminathan 	}
4209*bafec742SSukumar Swaminathan 
4210*bafec742SSukumar Swaminathan 	/* if there is an availabe space in multicast_list, then add it */
4211*bafec742SSukumar Swaminathan 	if (index < MAX_MULTICAST_LIST_SIZE) {
4212*bafec742SSukumar Swaminathan 		bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet,
4213*bafec742SSukumar Swaminathan 		    ETHERADDRL);
4214*bafec742SSukumar Swaminathan 		/* increment the total number of addresses in multicast list */
4215*bafec742SSukumar Swaminathan 		ql_add_multicast_address(qlge, index);
4216*bafec742SSukumar Swaminathan 		qlge->multicast_list_count++;
4217*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD,
4218*bafec742SSukumar Swaminathan 		    ("%s(%d): added to index of multicast list= 0x%x, "
4219*bafec742SSukumar Swaminathan 		    "total %d\n", __func__, qlge->instance, index,
4220*bafec742SSukumar Swaminathan 		    qlge->multicast_list_count));
4221*bafec742SSukumar Swaminathan 
4222*bafec742SSukumar Swaminathan 		if (index > MAX_MULTICAST_HW_SIZE) {
4223*bafec742SSukumar Swaminathan 			if (!qlge->multicast_promisc) {
4224*bafec742SSukumar Swaminathan 				status = ql_set_routing_reg(qlge,
4225*bafec742SSukumar Swaminathan 				    RT_IDX_ALLMULTI_SLOT,
4226*bafec742SSukumar Swaminathan 				    RT_IDX_MCAST, 1);
4227*bafec742SSukumar Swaminathan 				if (status) {
4228*bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
4229*bafec742SSukumar Swaminathan 					    "Failed to init routing reg "
4230*bafec742SSukumar Swaminathan 					    "for mcast promisc mode.");
4231*bafec742SSukumar Swaminathan 					rval = ENOENT;
4232*bafec742SSukumar Swaminathan 					goto exit;
4233*bafec742SSukumar Swaminathan 				}
4234*bafec742SSukumar Swaminathan 				qlge->multicast_promisc = B_TRUE;
4235*bafec742SSukumar Swaminathan 			}
4236*bafec742SSukumar Swaminathan 		}
4237*bafec742SSukumar Swaminathan 	} else {
4238*bafec742SSukumar Swaminathan 		rval = ENOENT;
4239*bafec742SSukumar Swaminathan 	}
4240*bafec742SSukumar Swaminathan exit:
4241*bafec742SSukumar Swaminathan 	return (rval);
4242*bafec742SSukumar Swaminathan }
4243*bafec742SSukumar Swaminathan 
4244*bafec742SSukumar Swaminathan /*
4245*bafec742SSukumar Swaminathan  * Remove an old multicast address from the list of supported multicast
4246*bafec742SSukumar Swaminathan  * addresses. This API is called after OS called gld_set_multicast (GLDv2)
4247*bafec742SSukumar Swaminathan  * or m_multicst (GLDv3)
4248*bafec742SSukumar Swaminathan  * The number of maximum multicast address is limited by hardware.
4249*bafec742SSukumar Swaminathan  */
4250*bafec742SSukumar Swaminathan int
4251*bafec742SSukumar Swaminathan ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep)
4252*bafec742SSukumar Swaminathan {
4253*bafec742SSukumar Swaminathan 	uint32_t total = qlge->multicast_list_count;
4254*bafec742SSukumar Swaminathan 	int i = 0;
4255*bafec742SSukumar Swaminathan 	int rmv_index = 0;
4256*bafec742SSukumar Swaminathan 	size_t length = sizeof (ql_multicast_addr);
4257*bafec742SSukumar Swaminathan 	int status;
4258*bafec742SSukumar Swaminathan 
4259*bafec742SSukumar Swaminathan 	for (i = 0; i < total; i++) {
4260*bafec742SSukumar Swaminathan 		if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) {
4261*bafec742SSukumar Swaminathan 			continue;
4262*bafec742SSukumar Swaminathan 		}
4263*bafec742SSukumar Swaminathan 
4264*bafec742SSukumar Swaminathan 		rmv_index = i;
4265*bafec742SSukumar Swaminathan 		/* block move the reset of other multicast address forward */
4266*bafec742SSukumar Swaminathan 		length = ((total -1) -i) * sizeof (ql_multicast_addr);
4267*bafec742SSukumar Swaminathan 		if (length > 0) {
4268*bafec742SSukumar Swaminathan 			bcopy(&qlge->multicast_list[i+1],
4269*bafec742SSukumar Swaminathan 			    &qlge->multicast_list[i], length);
4270*bafec742SSukumar Swaminathan 		}
4271*bafec742SSukumar Swaminathan 		qlge->multicast_list_count--;
4272*bafec742SSukumar Swaminathan 		if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) {
4273*bafec742SSukumar Swaminathan 			/*
4274*bafec742SSukumar Swaminathan 			 * there is a deletion in multicast list table,
4275*bafec742SSukumar Swaminathan 			 * re-enable them
4276*bafec742SSukumar Swaminathan 			 */
4277*bafec742SSukumar Swaminathan 			for (i = rmv_index; i < qlge->multicast_list_count;
4278*bafec742SSukumar Swaminathan 			    i++) {
4279*bafec742SSukumar Swaminathan 				ql_add_multicast_address(qlge, i);
4280*bafec742SSukumar Swaminathan 			}
4281*bafec742SSukumar Swaminathan 			/* and disable the last one */
4282*bafec742SSukumar Swaminathan 			ql_remove_multicast_address(qlge, i);
4283*bafec742SSukumar Swaminathan 
4284*bafec742SSukumar Swaminathan 			/* disable multicast promiscuous mode */
4285*bafec742SSukumar Swaminathan 			if (qlge->multicast_promisc) {
4286*bafec742SSukumar Swaminathan 				status = ql_set_routing_reg(qlge,
4287*bafec742SSukumar Swaminathan 				    RT_IDX_ALLMULTI_SLOT,
4288*bafec742SSukumar Swaminathan 				    RT_IDX_MCAST, 0);
4289*bafec742SSukumar Swaminathan 				if (status) {
4290*bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
4291*bafec742SSukumar Swaminathan 					    "Failed to init routing reg for "
4292*bafec742SSukumar Swaminathan 					    "mcast promisc mode.");
4293*bafec742SSukumar Swaminathan 					goto exit;
4294*bafec742SSukumar Swaminathan 				}
4295*bafec742SSukumar Swaminathan 				/* write to config register */
4296*bafec742SSukumar Swaminathan 				qlge->multicast_promisc = B_FALSE;
4297*bafec742SSukumar Swaminathan 			}
4298*bafec742SSukumar Swaminathan 		}
4299*bafec742SSukumar Swaminathan 		break;
4300*bafec742SSukumar Swaminathan 	}
4301*bafec742SSukumar Swaminathan exit:
4302*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
4303*bafec742SSukumar Swaminathan }
4304*bafec742SSukumar Swaminathan 
4305*bafec742SSukumar Swaminathan /*
4306*bafec742SSukumar Swaminathan  * Read a XGMAC register
4307*bafec742SSukumar Swaminathan  */
4308*bafec742SSukumar Swaminathan int
4309*bafec742SSukumar Swaminathan ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val)
4310*bafec742SSukumar Swaminathan {
4311*bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4312*bafec742SSukumar Swaminathan 
4313*bafec742SSukumar Swaminathan 	/* wait for XGMAC Address register RDY bit set */
4314*bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4315*bafec742SSukumar Swaminathan 	    BIT_SET, 10) != DDI_SUCCESS) {
4316*bafec742SSukumar Swaminathan 		goto out;
4317*bafec742SSukumar Swaminathan 	}
4318*bafec742SSukumar Swaminathan 	/* start rx transaction */
4319*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT);
4320*bafec742SSukumar Swaminathan 
4321*bafec742SSukumar Swaminathan 	/*
4322*bafec742SSukumar Swaminathan 	 * wait for XGMAC Address register RDY bit set,
4323*bafec742SSukumar Swaminathan 	 * which indicates data is ready
4324*bafec742SSukumar Swaminathan 	 */
4325*bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4326*bafec742SSukumar Swaminathan 	    BIT_SET, 10) != DDI_SUCCESS) {
4327*bafec742SSukumar Swaminathan 		goto out;
4328*bafec742SSukumar Swaminathan 	}
4329*bafec742SSukumar Swaminathan 	/* read data from XGAMC_DATA register */
4330*bafec742SSukumar Swaminathan 	*val = ql_read_reg(qlge, REG_XGMAC_DATA);
4331*bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4332*bafec742SSukumar Swaminathan out:
4333*bafec742SSukumar Swaminathan 	return (rtn_val);
4334*bafec742SSukumar Swaminathan }
4335*bafec742SSukumar Swaminathan 
4336*bafec742SSukumar Swaminathan /*
4337*bafec742SSukumar Swaminathan  * Implement checksum offload for IPv4 IP packets
4338*bafec742SSukumar Swaminathan  */
4339*bafec742SSukumar Swaminathan static void
4340*bafec742SSukumar Swaminathan ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp,
4341*bafec742SSukumar Swaminathan     struct ob_mac_iocb_req *mac_iocb_ptr)
4342*bafec742SSukumar Swaminathan {
4343*bafec742SSukumar Swaminathan 	struct ip *iphdr = NULL;
4344*bafec742SSukumar Swaminathan 	struct ether_header *ethhdr;
4345*bafec742SSukumar Swaminathan 	struct ether_vlan_header *ethvhdr;
4346*bafec742SSukumar Swaminathan 	struct tcphdr *tcp_hdr;
4347*bafec742SSukumar Swaminathan 	uint32_t etherType;
4348*bafec742SSukumar Swaminathan 	int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4349*bafec742SSukumar Swaminathan 	int ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4350*bafec742SSukumar Swaminathan 
4351*bafec742SSukumar Swaminathan 	ethhdr  = (struct ether_header *)((void *)bp);
4352*bafec742SSukumar Swaminathan 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
4353*bafec742SSukumar Swaminathan 	/* Is this vlan packet? */
4354*bafec742SSukumar Swaminathan 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4355*bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_vlan_header);
4356*bafec742SSukumar Swaminathan 		etherType = ntohs(ethvhdr->ether_type);
4357*bafec742SSukumar Swaminathan 	} else {
4358*bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_header);
4359*bafec742SSukumar Swaminathan 		etherType = ntohs(ethhdr->ether_type);
4360*bafec742SSukumar Swaminathan 	}
4361*bafec742SSukumar Swaminathan 	/* Is this IPv4 or IPv6 packet? */
4362*bafec742SSukumar Swaminathan 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) ==
4363*bafec742SSukumar Swaminathan 	    IPV4_VERSION) {
4364*bafec742SSukumar Swaminathan 		if (etherType == ETHERTYPE_IP /* 0800 */) {
4365*bafec742SSukumar Swaminathan 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
4366*bafec742SSukumar Swaminathan 		} else {
4367*bafec742SSukumar Swaminathan 			/* EMPTY */
4368*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
4369*bafec742SSukumar Swaminathan 			    ("%s(%d) : IPv4 None IP packet type 0x%x\n",
4370*bafec742SSukumar Swaminathan 			    __func__, qlge->instance, etherType));
4371*bafec742SSukumar Swaminathan 		}
4372*bafec742SSukumar Swaminathan 	}
4373*bafec742SSukumar Swaminathan 	/* ipV4 packets */
4374*bafec742SSukumar Swaminathan 	if (iphdr != NULL) {
4375*bafec742SSukumar Swaminathan 
4376*bafec742SSukumar Swaminathan 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
4377*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX,
4378*bafec742SSukumar Swaminathan 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
4379*bafec742SSukumar Swaminathan 		    " %d bytes \n", __func__, qlge->instance, ip_hdr_len));
4380*bafec742SSukumar Swaminathan 
4381*bafec742SSukumar Swaminathan 		ip_hdr_off = mac_hdr_len;
4382*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4383*bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ip_hdr_len));
4384*bafec742SSukumar Swaminathan 
4385*bafec742SSukumar Swaminathan 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4386*bafec742SSukumar Swaminathan 		    OB_MAC_IOCB_REQ_IPv4);
4387*bafec742SSukumar Swaminathan 
4388*bafec742SSukumar Swaminathan 		if (pflags & HCK_IPV4_HDRCKSUM) {
4389*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n",
4390*bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
4391*bafec742SSukumar Swaminathan 			mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB;
4392*bafec742SSukumar Swaminathan 			mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 |
4393*bafec742SSukumar Swaminathan 			    OB_MAC_IOCB_REQ_IC);
4394*bafec742SSukumar Swaminathan 			iphdr->ip_sum = 0;
4395*bafec742SSukumar Swaminathan 			mac_iocb_ptr->hdr_off = (uint16_t)
4396*bafec742SSukumar Swaminathan 			    cpu_to_le16(ip_hdr_off);
4397*bafec742SSukumar Swaminathan 		}
4398*bafec742SSukumar Swaminathan 		if (pflags & HCK_FULLCKSUM) {
4399*bafec742SSukumar Swaminathan 			if (iphdr->ip_p == IPPROTO_TCP) {
4400*bafec742SSukumar Swaminathan 				tcp_hdr =
4401*bafec742SSukumar Swaminathan 				    (struct tcphdr *)(void *)
4402*bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr + ip_hdr_len);
4403*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n",
4404*bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4405*bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4406*bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4407*bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4408*bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4409*bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_TC);
4410*bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag2 =
4411*bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag2 |
4412*bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_IC);
4413*bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4414*bafec742SSukumar Swaminathan 				tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len;
4415*bafec742SSukumar Swaminathan 				tcp_udp_hdr_len = tcp_hdr->th_off*4;
4416*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4417*bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4418*bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4419*bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4420*bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4421*bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4422*bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4423*bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4424*bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4425*bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
4426*bafec742SSukumar Swaminathan 
4427*bafec742SSukumar Swaminathan 				/*
4428*bafec742SSukumar Swaminathan 				 * if the chip is unable to do pseudo header
4429*bafec742SSukumar Swaminathan 				 * cksum calculation, do it in then put the
4430*bafec742SSukumar Swaminathan 				 * result to the data passed to the chip
4431*bafec742SSukumar Swaminathan 				 */
4432*bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4433*bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4434*bafec742SSukumar Swaminathan 					ql_pseudo_cksum((uint8_t *)iphdr);
4435*bafec742SSukumar Swaminathan 				}
4436*bafec742SSukumar Swaminathan 			} else if (iphdr->ip_p == IPPROTO_UDP) {
4437*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n",
4438*bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4439*bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4440*bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4441*bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4442*bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4443*bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_UC);
4444*bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag2 =
4445*bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag2 |
4446*bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_IC);
4447*bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4448*bafec742SSukumar Swaminathan 				tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len;
4449*bafec742SSukumar Swaminathan 				tcp_udp_hdr_len = sizeof (struct udphdr);
4450*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4451*bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4452*bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4453*bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4454*bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4455*bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4456*bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4457*bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4458*bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len
4459*bafec742SSukumar Swaminathan 				    + tcp_udp_hdr_len);
4460*bafec742SSukumar Swaminathan 
4461*bafec742SSukumar Swaminathan 				/*
4462*bafec742SSukumar Swaminathan 				 * if the chip is unable to calculate pseudo
4463*bafec742SSukumar Swaminathan 				 * hdr cksum,do it in then put the result to
4464*bafec742SSukumar Swaminathan 				 * the data passed to the chip
4465*bafec742SSukumar Swaminathan 				 */
4466*bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4467*bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4468*bafec742SSukumar Swaminathan 					ql_pseudo_cksum((uint8_t *)iphdr);
4469*bafec742SSukumar Swaminathan 				}
4470*bafec742SSukumar Swaminathan 			}
4471*bafec742SSukumar Swaminathan 		}
4472*bafec742SSukumar Swaminathan 	}
4473*bafec742SSukumar Swaminathan }
4474*bafec742SSukumar Swaminathan 
4475*bafec742SSukumar Swaminathan /*
4476*bafec742SSukumar Swaminathan  * For TSO/LSO:
4477*bafec742SSukumar Swaminathan  * MAC frame transmission with TCP large segment offload is performed in the
4478*bafec742SSukumar Swaminathan  * same way as the MAC frame transmission with checksum offload with the
4479*bafec742SSukumar Swaminathan  * exception that the maximum TCP segment size (MSS) must be specified to
4480*bafec742SSukumar Swaminathan  * allow the chip to segment the data into legal sized frames.
4481*bafec742SSukumar Swaminathan  * The host also needs to calculate a pseudo-header checksum over the
4482*bafec742SSukumar Swaminathan  * following fields:
4483*bafec742SSukumar Swaminathan  * Source IP Address, Destination IP Address, and the Protocol.
4484*bafec742SSukumar Swaminathan  * The TCP length is not included in the pseudo-header calculation.
4485*bafec742SSukumar Swaminathan  * The pseudo-header checksum is place in the TCP checksum field of the
4486*bafec742SSukumar Swaminathan  * prototype header.
4487*bafec742SSukumar Swaminathan  */
4488*bafec742SSukumar Swaminathan static void
4489*bafec742SSukumar Swaminathan ql_lso_pseudo_cksum(uint8_t *buf)
4490*bafec742SSukumar Swaminathan {
4491*bafec742SSukumar Swaminathan 	uint32_t cksum;
4492*bafec742SSukumar Swaminathan 	uint16_t iphl;
4493*bafec742SSukumar Swaminathan 	uint16_t proto;
4494*bafec742SSukumar Swaminathan 
4495*bafec742SSukumar Swaminathan 	/*
4496*bafec742SSukumar Swaminathan 	 * Calculate the LSO pseudo-header checksum.
4497*bafec742SSukumar Swaminathan 	 */
4498*bafec742SSukumar Swaminathan 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
4499*bafec742SSukumar Swaminathan 	cksum = proto = buf[9];
4500*bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
4501*bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
4502*bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
4503*bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
4504*bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4505*bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4506*bafec742SSukumar Swaminathan 
4507*bafec742SSukumar Swaminathan 	/*
4508*bafec742SSukumar Swaminathan 	 * Point it to the TCP/UDP header, and
4509*bafec742SSukumar Swaminathan 	 * update the checksum field.
4510*bafec742SSukumar Swaminathan 	 */
4511*bafec742SSukumar Swaminathan 	buf += iphl + ((proto == IPPROTO_TCP) ?
4512*bafec742SSukumar Swaminathan 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
4513*bafec742SSukumar Swaminathan 
4514*bafec742SSukumar Swaminathan 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
4515*bafec742SSukumar Swaminathan }
4516*bafec742SSukumar Swaminathan 
4517*bafec742SSukumar Swaminathan /*
4518*bafec742SSukumar Swaminathan  * Tell the hardware to do Large Send Offload (LSO)
4519*bafec742SSukumar Swaminathan  *
4520*bafec742SSukumar Swaminathan  * Some fields in ob_mac_iocb need to be set so hardware can know what is
4521*bafec742SSukumar Swaminathan  * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
4522*bafec742SSukumar Swaminathan  * in the right place of the packet etc, thus, hardware can process the
4523*bafec742SSukumar Swaminathan  * packet correctly.
4524*bafec742SSukumar Swaminathan  */
4525*bafec742SSukumar Swaminathan static void
4526*bafec742SSukumar Swaminathan ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp,
4527*bafec742SSukumar Swaminathan     struct ob_mac_iocb_req *mac_iocb_ptr)
4528*bafec742SSukumar Swaminathan {
4529*bafec742SSukumar Swaminathan 	struct ip *iphdr = NULL;
4530*bafec742SSukumar Swaminathan 	struct ether_header *ethhdr;
4531*bafec742SSukumar Swaminathan 	struct ether_vlan_header *ethvhdr;
4532*bafec742SSukumar Swaminathan 	struct tcphdr *tcp_hdr;
4533*bafec742SSukumar Swaminathan 	struct udphdr *udp_hdr;
4534*bafec742SSukumar Swaminathan 	uint32_t etherType;
4535*bafec742SSukumar Swaminathan 	uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4536*bafec742SSukumar Swaminathan 	uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4537*bafec742SSukumar Swaminathan 
4538*bafec742SSukumar Swaminathan 	ethhdr = (struct ether_header *)(void *)bp;
4539*bafec742SSukumar Swaminathan 	ethvhdr = (struct ether_vlan_header *)(void *)bp;
4540*bafec742SSukumar Swaminathan 
4541*bafec742SSukumar Swaminathan 	/* Is this vlan packet? */
4542*bafec742SSukumar Swaminathan 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4543*bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_vlan_header);
4544*bafec742SSukumar Swaminathan 		etherType = ntohs(ethvhdr->ether_type);
4545*bafec742SSukumar Swaminathan 	} else {
4546*bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_header);
4547*bafec742SSukumar Swaminathan 		etherType = ntohs(ethhdr->ether_type);
4548*bafec742SSukumar Swaminathan 	}
4549*bafec742SSukumar Swaminathan 	/* Is this IPv4 or IPv6 packet? */
4550*bafec742SSukumar Swaminathan 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) ==
4551*bafec742SSukumar Swaminathan 	    IPV4_VERSION) {
4552*bafec742SSukumar Swaminathan 		if (etherType == ETHERTYPE_IP /* 0800 */) {
4553*bafec742SSukumar Swaminathan 			iphdr 	= (struct ip *)(void *)(bp+mac_hdr_len);
4554*bafec742SSukumar Swaminathan 		} else {
4555*bafec742SSukumar Swaminathan 			/* EMPTY */
4556*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet"
4557*bafec742SSukumar Swaminathan 			    " type 0x%x\n",
4558*bafec742SSukumar Swaminathan 			    __func__, qlge->instance, etherType));
4559*bafec742SSukumar Swaminathan 		}
4560*bafec742SSukumar Swaminathan 	}
4561*bafec742SSukumar Swaminathan 
4562*bafec742SSukumar Swaminathan 	if (iphdr != NULL) { /* ipV4 packets */
4563*bafec742SSukumar Swaminathan 		ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr);
4564*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX,
4565*bafec742SSukumar Swaminathan 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
4566*bafec742SSukumar Swaminathan 		    " bytes \n", __func__, qlge->instance, ip_hdr_len));
4567*bafec742SSukumar Swaminathan 
4568*bafec742SSukumar Swaminathan 		ip_hdr_off = mac_hdr_len;
4569*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4570*bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ip_hdr_len));
4571*bafec742SSukumar Swaminathan 
4572*bafec742SSukumar Swaminathan 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4573*bafec742SSukumar Swaminathan 		    OB_MAC_IOCB_REQ_IPv4);
4574*bafec742SSukumar Swaminathan 		if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) {
4575*bafec742SSukumar Swaminathan 			if (iphdr->ip_p == IPPROTO_TCP) {
4576*bafec742SSukumar Swaminathan 				tcp_hdr = (struct tcphdr *)(void *)
4577*bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr +
4578*bafec742SSukumar Swaminathan 				    ip_hdr_len);
4579*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP "
4580*bafec742SSukumar Swaminathan 				    "packet\n",
4581*bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4582*bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4583*bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4584*bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4585*bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4586*bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_LSO);
4587*bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4588*bafec742SSukumar Swaminathan 				tcp_udp_hdr_off =
4589*bafec742SSukumar Swaminathan 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
4590*bafec742SSukumar Swaminathan 				tcp_udp_hdr_len =
4591*bafec742SSukumar Swaminathan 				    (uint16_t)(tcp_hdr->th_off*4);
4592*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4593*bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4594*bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4595*bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4596*bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4597*bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4598*bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4599*bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4600*bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4601*bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
4602*bafec742SSukumar Swaminathan 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
4603*bafec742SSukumar Swaminathan 
4604*bafec742SSukumar Swaminathan 				/*
4605*bafec742SSukumar Swaminathan 				 * if the chip is unable to calculate pseudo
4606*bafec742SSukumar Swaminathan 				 * header checksum, do it in then put the result
4607*bafec742SSukumar Swaminathan 				 * to the data passed to the chip
4608*bafec742SSukumar Swaminathan 				 */
4609*bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4610*bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
4611*bafec742SSukumar Swaminathan 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
4612*bafec742SSukumar Swaminathan 			} else if (iphdr->ip_p == IPPROTO_UDP) {
4613*bafec742SSukumar Swaminathan 				udp_hdr = (struct udphdr *)(void *)
4614*bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr
4615*bafec742SSukumar Swaminathan 				    + ip_hdr_len);
4616*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP "
4617*bafec742SSukumar Swaminathan 				    "packet\n",
4618*bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4619*bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4620*bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4621*bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4622*bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4623*bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_LSO);
4624*bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4625*bafec742SSukumar Swaminathan 				tcp_udp_hdr_off =
4626*bafec742SSukumar Swaminathan 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
4627*bafec742SSukumar Swaminathan 				tcp_udp_hdr_len =
4628*bafec742SSukumar Swaminathan 				    (uint16_t)(udp_hdr->uh_ulen*4);
4629*bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4630*bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4631*bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4632*bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4633*bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4634*bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4635*bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4636*bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4637*bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4638*bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
4639*bafec742SSukumar Swaminathan 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
4640*bafec742SSukumar Swaminathan 
4641*bafec742SSukumar Swaminathan 				/*
4642*bafec742SSukumar Swaminathan 				 * if the chip is unable to do pseudo header
4643*bafec742SSukumar Swaminathan 				 * checksum calculation, do it here then put the
4644*bafec742SSukumar Swaminathan 				 * result to the data passed to the chip
4645*bafec742SSukumar Swaminathan 				 */
4646*bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4647*bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
4648*bafec742SSukumar Swaminathan 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
4649*bafec742SSukumar Swaminathan 			}
4650*bafec742SSukumar Swaminathan 		}
4651*bafec742SSukumar Swaminathan 	}
4652*bafec742SSukumar Swaminathan }
4653*bafec742SSukumar Swaminathan 
4654*bafec742SSukumar Swaminathan /*
4655*bafec742SSukumar Swaminathan  * Generic packet sending function which is used to send one packet.
4656*bafec742SSukumar Swaminathan  */
4657*bafec742SSukumar Swaminathan int
4658*bafec742SSukumar Swaminathan ql_send_common(struct tx_ring *tx_ring, mblk_t *mp)
4659*bafec742SSukumar Swaminathan {
4660*bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_cb;
4661*bafec742SSukumar Swaminathan 	struct ob_mac_iocb_req *mac_iocb_ptr;
4662*bafec742SSukumar Swaminathan 	mblk_t *tp;
4663*bafec742SSukumar Swaminathan 	size_t msg_len = 0;
4664*bafec742SSukumar Swaminathan 	size_t off;
4665*bafec742SSukumar Swaminathan 	caddr_t bp;
4666*bafec742SSukumar Swaminathan 	size_t nbyte, total_len;
4667*bafec742SSukumar Swaminathan 	uint_t i = 0;
4668*bafec742SSukumar Swaminathan 	int j = 0, frags = 0;
4669*bafec742SSukumar Swaminathan 	uint32_t phy_addr_low, phy_addr_high;
4670*bafec742SSukumar Swaminathan 	uint64_t phys_addr;
4671*bafec742SSukumar Swaminathan 	clock_t now;
4672*bafec742SSukumar Swaminathan 	uint32_t pflags = 0;
4673*bafec742SSukumar Swaminathan 	uint32_t mss = 0;
4674*bafec742SSukumar Swaminathan 	enum tx_mode_t tx_mode;
4675*bafec742SSukumar Swaminathan 	struct oal_entry *oal_entry;
4676*bafec742SSukumar Swaminathan 	int status;
4677*bafec742SSukumar Swaminathan 	uint_t ncookies, oal_entries, max_oal_entries;
4678*bafec742SSukumar Swaminathan 	size_t max_seg_len = 0;
4679*bafec742SSukumar Swaminathan 	boolean_t use_lso = B_FALSE;
4680*bafec742SSukumar Swaminathan 	struct oal_entry *tx_entry = NULL;
4681*bafec742SSukumar Swaminathan 	struct oal_entry *last_oal_entry;
4682*bafec742SSukumar Swaminathan 	qlge_t *qlge = tx_ring->qlge;
4683*bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
4684*bafec742SSukumar Swaminathan 	size_t tx_buf_len = QL_MAX_COPY_LENGTH;
4685*bafec742SSukumar Swaminathan 	int force_pullup = 0;
4686*bafec742SSukumar Swaminathan 
4687*bafec742SSukumar Swaminathan 	tp = mp;
4688*bafec742SSukumar Swaminathan 	total_len = msg_len = 0;
4689*bafec742SSukumar Swaminathan 	max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1;
4690*bafec742SSukumar Swaminathan 
4691*bafec742SSukumar Swaminathan 	/* Calculate number of data and segments in the incoming message */
4692*bafec742SSukumar Swaminathan 	for (tp = mp; tp != NULL; tp = tp->b_cont) {
4693*bafec742SSukumar Swaminathan 		nbyte = MBLKL(tp);
4694*bafec742SSukumar Swaminathan 		total_len += nbyte;
4695*bafec742SSukumar Swaminathan 		max_seg_len = max(nbyte, max_seg_len);
4696*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("Requested sending data in %d segments, "
4697*bafec742SSukumar Swaminathan 		    "total length: %d\n", frags, nbyte));
4698*bafec742SSukumar Swaminathan 		frags++;
4699*bafec742SSukumar Swaminathan 	}
4700*bafec742SSukumar Swaminathan 
4701*bafec742SSukumar Swaminathan 	if (total_len >= QL_LSO_MAX) {
4702*bafec742SSukumar Swaminathan 		freemsg(mp);
4703*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
4704*bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n",
4705*bafec742SSukumar Swaminathan 		    __func__, (int)total_len);
4706*bafec742SSukumar Swaminathan #endif
4707*bafec742SSukumar Swaminathan 		return (NULL);
4708*bafec742SSukumar Swaminathan 	}
4709*bafec742SSukumar Swaminathan 
4710*bafec742SSukumar Swaminathan 	bp = (caddr_t)mp->b_rptr;
4711*bafec742SSukumar Swaminathan 	if (bp[0] & 1) {
4712*bafec742SSukumar Swaminathan 		if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet,
4713*bafec742SSukumar Swaminathan 		    ETHERADDRL) == 0) {
4714*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("Broadcast packet\n"));
4715*bafec742SSukumar Swaminathan 			tx_ring->brdcstxmt++;
4716*bafec742SSukumar Swaminathan 		} else {
4717*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("multicast packet\n"));
4718*bafec742SSukumar Swaminathan 			tx_ring->multixmt++;
4719*bafec742SSukumar Swaminathan 		}
4720*bafec742SSukumar Swaminathan 	}
4721*bafec742SSukumar Swaminathan 
4722*bafec742SSukumar Swaminathan 	tx_ring->obytes += total_len;
4723*bafec742SSukumar Swaminathan 	tx_ring->opackets ++;
4724*bafec742SSukumar Swaminathan 
4725*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs,"
4726*bafec742SSukumar Swaminathan 	    " max seg len: %d\n", total_len, frags, max_seg_len));
4727*bafec742SSukumar Swaminathan 
4728*bafec742SSukumar Swaminathan 	/* claim a free slot in tx ring */
4729*bafec742SSukumar Swaminathan 	tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx];
4730*bafec742SSukumar Swaminathan 
4731*bafec742SSukumar Swaminathan 	/* get the tx descriptor */
4732*bafec742SSukumar Swaminathan 	mac_iocb_ptr = tx_cb->queue_entry;
4733*bafec742SSukumar Swaminathan 
4734*bafec742SSukumar Swaminathan 	bzero((void *)mac_iocb_ptr, sizeof (*mac_iocb_ptr));
4735*bafec742SSukumar Swaminathan 
4736*bafec742SSukumar Swaminathan 	ASSERT(tx_cb->mp == NULL);
4737*bafec742SSukumar Swaminathan 
4738*bafec742SSukumar Swaminathan 	/*
4739*bafec742SSukumar Swaminathan 	 * Decide to use DMA map or copy mode.
4740*bafec742SSukumar Swaminathan 	 * DMA map mode must be used when the total msg length is more than the
4741*bafec742SSukumar Swaminathan 	 * tx buffer length.
4742*bafec742SSukumar Swaminathan 	 */
4743*bafec742SSukumar Swaminathan 
4744*bafec742SSukumar Swaminathan 	if (total_len > tx_buf_len)
4745*bafec742SSukumar Swaminathan 		tx_mode = USE_DMA;
4746*bafec742SSukumar Swaminathan 	else if	(max_seg_len > QL_MAX_COPY_LENGTH)
4747*bafec742SSukumar Swaminathan 		tx_mode = USE_DMA;
4748*bafec742SSukumar Swaminathan 	else
4749*bafec742SSukumar Swaminathan 		tx_mode = USE_COPY;
4750*bafec742SSukumar Swaminathan 
4751*bafec742SSukumar Swaminathan 	if (qlge->chksum_cap) {
4752*bafec742SSukumar Swaminathan 		hcksum_retrieve(mp, NULL, NULL, NULL,
4753*bafec742SSukumar Swaminathan 		    NULL, NULL, NULL, &pflags);
4754*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability "
4755*bafec742SSukumar Swaminathan 		    "is 0x%x \n", pflags, qlge->chksum_cap));
4756*bafec742SSukumar Swaminathan 		if (qlge->lso_enable) {
4757*bafec742SSukumar Swaminathan 			uint32_t lso_flags = 0;
4758*bafec742SSukumar Swaminathan 			lso_info_get(mp, &mss, &lso_flags);
4759*bafec742SSukumar Swaminathan 			use_lso = (lso_flags == HW_LSO);
4760*bafec742SSukumar Swaminathan 		}
4761*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n",
4762*bafec742SSukumar Swaminathan 		    mss, use_lso));
4763*bafec742SSukumar Swaminathan 	}
4764*bafec742SSukumar Swaminathan 
4765*bafec742SSukumar Swaminathan do_pullup:
4766*bafec742SSukumar Swaminathan 
4767*bafec742SSukumar Swaminathan 	/* concatenate all frags into one large packet if too fragmented */
4768*bafec742SSukumar Swaminathan 	if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) ||
4769*bafec742SSukumar Swaminathan 	    force_pullup) {
4770*bafec742SSukumar Swaminathan 		mblk_t *mp1;
4771*bafec742SSukumar Swaminathan 		if ((mp1 = msgpullup(mp, -1)) != NULL) {
4772*bafec742SSukumar Swaminathan 			freemsg(mp);
4773*bafec742SSukumar Swaminathan 			mp = mp1;
4774*bafec742SSukumar Swaminathan 			frags = 1;
4775*bafec742SSukumar Swaminathan 		} else {
4776*bafec742SSukumar Swaminathan 			tx_ring->tx_fail_dma_bind++;
4777*bafec742SSukumar Swaminathan 			goto bad;
4778*bafec742SSukumar Swaminathan 		}
4779*bafec742SSukumar Swaminathan 	}
4780*bafec742SSukumar Swaminathan 
4781*bafec742SSukumar Swaminathan 	tx_cb->tx_bytes = (uint32_t)total_len;
4782*bafec742SSukumar Swaminathan 	tx_cb->mp = mp;
4783*bafec742SSukumar Swaminathan 	tx_cb->tx_dma_handle_used = 0;
4784*bafec742SSukumar Swaminathan 
4785*bafec742SSukumar Swaminathan 	if (tx_mode == USE_DMA) {
4786*bafec742SSukumar Swaminathan 		msg_len = total_len;
4787*bafec742SSukumar Swaminathan 
4788*bafec742SSukumar Swaminathan 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
4789*bafec742SSukumar Swaminathan 		mac_iocb_ptr->tid = tx_ring->prod_idx;
4790*bafec742SSukumar Swaminathan 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
4791*bafec742SSukumar Swaminathan 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
4792*bafec742SSukumar Swaminathan 
4793*bafec742SSukumar Swaminathan 		tx_entry = &mac_iocb_ptr->oal_entry[0];
4794*bafec742SSukumar Swaminathan 		oal_entry = NULL;
4795*bafec742SSukumar Swaminathan 
4796*bafec742SSukumar Swaminathan 		for (tp = mp, oal_entries = j = 0; tp != NULL;
4797*bafec742SSukumar Swaminathan 		    tp = tp->b_cont) {
4798*bafec742SSukumar Swaminathan 			/* if too many tx dma handles needed */
4799*bafec742SSukumar Swaminathan 			if (j >= QL_MAX_TX_DMA_HANDLES) {
4800*bafec742SSukumar Swaminathan 				tx_ring->tx_no_dma_handle++;
4801*bafec742SSukumar Swaminathan 				if (!force_pullup) {
4802*bafec742SSukumar Swaminathan 					force_pullup = 1;
4803*bafec742SSukumar Swaminathan 					goto do_pullup;
4804*bafec742SSukumar Swaminathan 				} else {
4805*bafec742SSukumar Swaminathan 					goto bad;
4806*bafec742SSukumar Swaminathan 				}
4807*bafec742SSukumar Swaminathan 			}
4808*bafec742SSukumar Swaminathan 			nbyte = (uint16_t)MBLKL(tp);
4809*bafec742SSukumar Swaminathan 			if (nbyte == 0)
4810*bafec742SSukumar Swaminathan 				continue;
4811*bafec742SSukumar Swaminathan 
4812*bafec742SSukumar Swaminathan 			status = ddi_dma_addr_bind_handle(
4813*bafec742SSukumar Swaminathan 			    tx_cb->tx_dma_handle[j], NULL,
4814*bafec742SSukumar Swaminathan 			    (caddr_t)tp->b_rptr, nbyte,
4815*bafec742SSukumar Swaminathan 			    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
4816*bafec742SSukumar Swaminathan 			    0, &dma_cookie, &ncookies);
4817*bafec742SSukumar Swaminathan 
4818*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("map sending data segment: %d, "
4819*bafec742SSukumar Swaminathan 			    "length: %d, spans in %d cookies\n",
4820*bafec742SSukumar Swaminathan 			    j, nbyte, ncookies));
4821*bafec742SSukumar Swaminathan 
4822*bafec742SSukumar Swaminathan 			if (status != DDI_DMA_MAPPED) {
4823*bafec742SSukumar Swaminathan 				goto bad;
4824*bafec742SSukumar Swaminathan 			}
4825*bafec742SSukumar Swaminathan 			/*
4826*bafec742SSukumar Swaminathan 			 * Each fragment can span several cookies. One cookie
4827*bafec742SSukumar Swaminathan 			 * will use one tx descriptor to transmit.
4828*bafec742SSukumar Swaminathan 			 */
4829*bafec742SSukumar Swaminathan 			for (i = ncookies; i > 0; i--, tx_entry++,
4830*bafec742SSukumar Swaminathan 			    oal_entries++) {
4831*bafec742SSukumar Swaminathan 				/*
4832*bafec742SSukumar Swaminathan 				 * The number of TX descriptors that can be
4833*bafec742SSukumar Swaminathan 				 *  saved in tx iocb and oal list is limited
4834*bafec742SSukumar Swaminathan 				 */
4835*bafec742SSukumar Swaminathan 				if (oal_entries > max_oal_entries) {
4836*bafec742SSukumar Swaminathan 					tx_ring->tx_no_dma_cookie++;
4837*bafec742SSukumar Swaminathan 					if (!force_pullup) {
4838*bafec742SSukumar Swaminathan 						force_pullup = 1;
4839*bafec742SSukumar Swaminathan 						goto do_pullup;
4840*bafec742SSukumar Swaminathan 					} else {
4841*bafec742SSukumar Swaminathan 						goto bad;
4842*bafec742SSukumar Swaminathan 					}
4843*bafec742SSukumar Swaminathan 				}
4844*bafec742SSukumar Swaminathan 
4845*bafec742SSukumar Swaminathan 				if ((oal_entries == TX_DESC_PER_IOCB) &&
4846*bafec742SSukumar Swaminathan 				    !oal_entry) {
4847*bafec742SSukumar Swaminathan 					/*
4848*bafec742SSukumar Swaminathan 					 * Time to switch to an oal list
4849*bafec742SSukumar Swaminathan 					 * The last entry should be copied
4850*bafec742SSukumar Swaminathan 					 * to first entry in the oal list
4851*bafec742SSukumar Swaminathan 					 */
4852*bafec742SSukumar Swaminathan 					oal_entry = tx_cb->oal;
4853*bafec742SSukumar Swaminathan 					tx_entry =
4854*bafec742SSukumar Swaminathan 					    &mac_iocb_ptr->oal_entry[
4855*bafec742SSukumar Swaminathan 					    TX_DESC_PER_IOCB-1];
4856*bafec742SSukumar Swaminathan 					bcopy(tx_entry, oal_entry,
4857*bafec742SSukumar Swaminathan 					    sizeof (*oal_entry));
4858*bafec742SSukumar Swaminathan 
4859*bafec742SSukumar Swaminathan 					/*
4860*bafec742SSukumar Swaminathan 					 * last entry should be updated to
4861*bafec742SSukumar Swaminathan 					 * point to the extended oal list itself
4862*bafec742SSukumar Swaminathan 					 */
4863*bafec742SSukumar Swaminathan 					tx_entry->buf_addr_low =
4864*bafec742SSukumar Swaminathan 					    cpu_to_le32(
4865*bafec742SSukumar Swaminathan 					    LS_64BITS(tx_cb->oal_dma_addr));
4866*bafec742SSukumar Swaminathan 					tx_entry->buf_addr_high =
4867*bafec742SSukumar Swaminathan 					    cpu_to_le32(
4868*bafec742SSukumar Swaminathan 					    MS_64BITS(tx_cb->oal_dma_addr));
4869*bafec742SSukumar Swaminathan 					/*
4870*bafec742SSukumar Swaminathan 					 * Point tx_entry to the oal list
4871*bafec742SSukumar Swaminathan 					 * second entry
4872*bafec742SSukumar Swaminathan 					 */
4873*bafec742SSukumar Swaminathan 					tx_entry = &oal_entry[1];
4874*bafec742SSukumar Swaminathan 				}
4875*bafec742SSukumar Swaminathan 
4876*bafec742SSukumar Swaminathan 				tx_entry->buf_len =
4877*bafec742SSukumar Swaminathan 				    (uint32_t)cpu_to_le32(dma_cookie.dmac_size);
4878*bafec742SSukumar Swaminathan 				phys_addr = dma_cookie.dmac_laddress;
4879*bafec742SSukumar Swaminathan 				tx_entry->buf_addr_low =
4880*bafec742SSukumar Swaminathan 				    cpu_to_le32(LS_64BITS(phys_addr));
4881*bafec742SSukumar Swaminathan 				tx_entry->buf_addr_high =
4882*bafec742SSukumar Swaminathan 				    cpu_to_le32(MS_64BITS(phys_addr));
4883*bafec742SSukumar Swaminathan 
4884*bafec742SSukumar Swaminathan 				last_oal_entry = tx_entry;
4885*bafec742SSukumar Swaminathan 
4886*bafec742SSukumar Swaminathan 				if (i > 1)
4887*bafec742SSukumar Swaminathan 					ddi_dma_nextcookie(
4888*bafec742SSukumar Swaminathan 					    tx_cb->tx_dma_handle[j],
4889*bafec742SSukumar Swaminathan 					    &dma_cookie);
4890*bafec742SSukumar Swaminathan 			}
4891*bafec742SSukumar Swaminathan 			j++;
4892*bafec742SSukumar Swaminathan 		}
4893*bafec742SSukumar Swaminathan 		/*
4894*bafec742SSukumar Swaminathan 		 * if OAL is used, the last oal entry in tx iocb indicates
4895*bafec742SSukumar Swaminathan 		 * number of additional address/len pairs in OAL
4896*bafec742SSukumar Swaminathan 		 */
4897*bafec742SSukumar Swaminathan 		if (oal_entries > TX_DESC_PER_IOCB) {
4898*bafec742SSukumar Swaminathan 			tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1];
4899*bafec742SSukumar Swaminathan 			tx_entry->buf_len = (uint32_t)
4900*bafec742SSukumar Swaminathan 			    (cpu_to_le32((sizeof (struct oal_entry) *
4901*bafec742SSukumar Swaminathan 			    (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY));
4902*bafec742SSukumar Swaminathan 		}
4903*bafec742SSukumar Swaminathan 		last_oal_entry->buf_len = cpu_to_le32(
4904*bafec742SSukumar Swaminathan 		    le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY);
4905*bafec742SSukumar Swaminathan 
4906*bafec742SSukumar Swaminathan 		tx_cb->tx_dma_handle_used = j;
4907*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n",
4908*bafec742SSukumar Swaminathan 		    j, oal_entries));
4909*bafec742SSukumar Swaminathan 
4910*bafec742SSukumar Swaminathan 		bp = (caddr_t)mp->b_rptr;
4911*bafec742SSukumar Swaminathan 	}
4912*bafec742SSukumar Swaminathan 	if (tx_mode == USE_COPY) {
4913*bafec742SSukumar Swaminathan 		bp = tx_cb->copy_buffer;
4914*bafec742SSukumar Swaminathan 		off = 0;
4915*bafec742SSukumar Swaminathan 		nbyte = 0;
4916*bafec742SSukumar Swaminathan 		frags = 0;
4917*bafec742SSukumar Swaminathan 		/*
4918*bafec742SSukumar Swaminathan 		 * Copy up to tx_buf_len of the transmit data
4919*bafec742SSukumar Swaminathan 		 * from mp to tx buffer
4920*bafec742SSukumar Swaminathan 		 */
4921*bafec742SSukumar Swaminathan 		for (tp = mp; tp != NULL; tp = tp->b_cont) {
4922*bafec742SSukumar Swaminathan 			nbyte = MBLKL(tp);
4923*bafec742SSukumar Swaminathan 			if ((off + nbyte) <= tx_buf_len) {
4924*bafec742SSukumar Swaminathan 				bcopy(tp->b_rptr, &bp[off], nbyte);
4925*bafec742SSukumar Swaminathan 				off += nbyte;
4926*bafec742SSukumar Swaminathan 				frags ++;
4927*bafec742SSukumar Swaminathan 			}
4928*bafec742SSukumar Swaminathan 		}
4929*bafec742SSukumar Swaminathan 
4930*bafec742SSukumar Swaminathan 		msg_len = off;
4931*bafec742SSukumar Swaminathan 
4932*bafec742SSukumar Swaminathan 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
4933*bafec742SSukumar Swaminathan 		mac_iocb_ptr->tid = tx_ring->prod_idx;
4934*bafec742SSukumar Swaminathan 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
4935*bafec742SSukumar Swaminathan 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
4936*bafec742SSukumar Swaminathan 
4937*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, "
4938*bafec742SSukumar Swaminathan 		    "from %d segaments\n", msg_len, frags));
4939*bafec742SSukumar Swaminathan 
4940*bafec742SSukumar Swaminathan 		phys_addr = tx_cb->copy_buffer_dma_addr;
4941*bafec742SSukumar Swaminathan 		phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr));
4942*bafec742SSukumar Swaminathan 		phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr));
4943*bafec742SSukumar Swaminathan 
4944*bafec742SSukumar Swaminathan 		QL_DUMP(DBG_TX, "\t requested sending data:\n",
4945*bafec742SSukumar Swaminathan 		    (uint8_t *)tx_cb->copy_buffer, 8, total_len);
4946*bafec742SSukumar Swaminathan 
4947*bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t)
4948*bafec742SSukumar Swaminathan 		    cpu_to_le32(msg_len | OAL_LAST_ENTRY);
4949*bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_addr_low  = phy_addr_low;
4950*bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high;
4951*bafec742SSukumar Swaminathan 
4952*bafec742SSukumar Swaminathan 		freemsg(mp); /* no need, we have copied */
4953*bafec742SSukumar Swaminathan 		tx_cb->mp = NULL;
4954*bafec742SSukumar Swaminathan 	} /* End of Copy Mode */
4955*bafec742SSukumar Swaminathan 
4956*bafec742SSukumar Swaminathan 	/* Do TSO/LSO on TCP packet? */
4957*bafec742SSukumar Swaminathan 	if (use_lso && mss) {
4958*bafec742SSukumar Swaminathan 		ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr);
4959*bafec742SSukumar Swaminathan 	} else if (pflags & qlge->chksum_cap) {
4960*bafec742SSukumar Swaminathan 		/* Do checksum offloading */
4961*bafec742SSukumar Swaminathan 		ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr);
4962*bafec742SSukumar Swaminathan 	}
4963*bafec742SSukumar Swaminathan 
4964*bafec742SSukumar Swaminathan 	/* let device know the latest outbound IOCB */
4965*bafec742SSukumar Swaminathan 	(void) ddi_dma_sync(tx_ring->wq_dma.dma_handle,
4966*bafec742SSukumar Swaminathan 	    (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr),
4967*bafec742SSukumar Swaminathan 	    (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV);
4968*bafec742SSukumar Swaminathan 
4969*bafec742SSukumar Swaminathan 	if (tx_mode == USE_DMA) {
4970*bafec742SSukumar Swaminathan 		/* let device know the latest outbound OAL if necessary */
4971*bafec742SSukumar Swaminathan 		if (oal_entries > TX_DESC_PER_IOCB) {
4972*bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
4973*bafec742SSukumar Swaminathan 			    (off_t)0,
4974*bafec742SSukumar Swaminathan 			    (sizeof (struct oal_entry) *
4975*bafec742SSukumar Swaminathan 			    (oal_entries -TX_DESC_PER_IOCB+1)),
4976*bafec742SSukumar Swaminathan 			    DDI_DMA_SYNC_FORDEV);
4977*bafec742SSukumar Swaminathan 		}
4978*bafec742SSukumar Swaminathan 	} else { /* for USE_COPY mode, tx buffer has changed */
4979*bafec742SSukumar Swaminathan 		/* let device know the latest change */
4980*bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
4981*bafec742SSukumar Swaminathan 		/* copy buf offset */
4982*bafec742SSukumar Swaminathan 		    (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS),
4983*bafec742SSukumar Swaminathan 		    msg_len, DDI_DMA_SYNC_FORDEV);
4984*bafec742SSukumar Swaminathan 	}
4985*bafec742SSukumar Swaminathan 
4986*bafec742SSukumar Swaminathan 	/* save how the packet was sent */
4987*bafec742SSukumar Swaminathan 	tx_cb->tx_type = tx_mode;
4988*bafec742SSukumar Swaminathan 
4989*bafec742SSukumar Swaminathan 	QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries);
4990*bafec742SSukumar Swaminathan 	/* reduce the number of available tx slot */
4991*bafec742SSukumar Swaminathan 	atomic_dec_32(&tx_ring->tx_free_count);
4992*bafec742SSukumar Swaminathan 
4993*bafec742SSukumar Swaminathan 	tx_ring->prod_idx++;
4994*bafec742SSukumar Swaminathan 	if (tx_ring->prod_idx >= tx_ring->wq_len)
4995*bafec742SSukumar Swaminathan 		tx_ring->prod_idx = 0;
4996*bafec742SSukumar Swaminathan 
4997*bafec742SSukumar Swaminathan 	now = ddi_get_lbolt();
4998*bafec742SSukumar Swaminathan 	qlge->last_tx_time = now;
4999*bafec742SSukumar Swaminathan 
5000*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5001*bafec742SSukumar Swaminathan 
5002*bafec742SSukumar Swaminathan bad:
5003*bafec742SSukumar Swaminathan 	/*
5004*bafec742SSukumar Swaminathan 	 * if for any reason driver can not send, delete
5005*bafec742SSukumar Swaminathan 	 * the message pointer, mp
5006*bafec742SSukumar Swaminathan 	 */
5007*bafec742SSukumar Swaminathan 	now = ddi_get_lbolt();
5008*bafec742SSukumar Swaminathan 	freemsg(mp);
5009*bafec742SSukumar Swaminathan 	mp = NULL;
5010*bafec742SSukumar Swaminathan 	for (i = 0; i < j; i++)
5011*bafec742SSukumar Swaminathan 		(void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]);
5012*bafec742SSukumar Swaminathan 
5013*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x",
5014*bafec742SSukumar Swaminathan 	    __func__, qlge->instance, (int)now));
5015*bafec742SSukumar Swaminathan 
5016*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5017*bafec742SSukumar Swaminathan }
5018*bafec742SSukumar Swaminathan 
5019*bafec742SSukumar Swaminathan 
5020*bafec742SSukumar Swaminathan /*
5021*bafec742SSukumar Swaminathan  * Initializes hardware and driver software flags before the driver
5022*bafec742SSukumar Swaminathan  * is finally ready to work.
5023*bafec742SSukumar Swaminathan  */
5024*bafec742SSukumar Swaminathan int
5025*bafec742SSukumar Swaminathan ql_do_start(qlge_t *qlge)
5026*bafec742SSukumar Swaminathan {
5027*bafec742SSukumar Swaminathan 	int i;
5028*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
5029*bafec742SSukumar Swaminathan 	uint16_t lbq_buf_size;
5030*bafec742SSukumar Swaminathan 	int rings_done;
5031*bafec742SSukumar Swaminathan 
5032*bafec742SSukumar Swaminathan 	ASSERT(qlge != NULL);
5033*bafec742SSukumar Swaminathan 
5034*bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
5035*bafec742SSukumar Swaminathan 
5036*bafec742SSukumar Swaminathan 	/* Reset adapter */
5037*bafec742SSukumar Swaminathan 	ql_asic_reset(qlge);
5038*bafec742SSukumar Swaminathan 
5039*bafec742SSukumar Swaminathan 	lbq_buf_size = (uint16_t)
5040*bafec742SSukumar Swaminathan 	    ((qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE);
5041*bafec742SSukumar Swaminathan 	if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) {
5042*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5043*bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n",
5044*bafec742SSukumar Swaminathan 		    qlge->rx_ring[0].lbq_buf_size, lbq_buf_size);
5045*bafec742SSukumar Swaminathan #endif
5046*bafec742SSukumar Swaminathan 		/*
5047*bafec742SSukumar Swaminathan 		 * Check if any ring has buffers still with upper layers
5048*bafec742SSukumar Swaminathan 		 * If buffers are pending with upper layers, we use the
5049*bafec742SSukumar Swaminathan 		 * existing buffers and don't reallocate new ones
5050*bafec742SSukumar Swaminathan 		 * Unfortunately there is no way to evict buffers from
5051*bafec742SSukumar Swaminathan 		 * upper layers. Using buffers with the current size may
5052*bafec742SSukumar Swaminathan 		 * cause slightly sub-optimal performance, but that seems
5053*bafec742SSukumar Swaminathan 		 * to be the easiest way to handle this situation.
5054*bafec742SSukumar Swaminathan 		 */
5055*bafec742SSukumar Swaminathan 		rings_done = 0;
5056*bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
5057*bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i];
5058*bafec742SSukumar Swaminathan 			if (rx_ring->rx_indicate == 0)
5059*bafec742SSukumar Swaminathan 				rings_done++;
5060*bafec742SSukumar Swaminathan 			else
5061*bafec742SSukumar Swaminathan 				break;
5062*bafec742SSukumar Swaminathan 		}
5063*bafec742SSukumar Swaminathan 		/*
5064*bafec742SSukumar Swaminathan 		 * No buffers pending with upper layers;
5065*bafec742SSukumar Swaminathan 		 * reallocte them for new MTU size
5066*bafec742SSukumar Swaminathan 		 */
5067*bafec742SSukumar Swaminathan 		if (rings_done >= qlge->rx_ring_count) {
5068*bafec742SSukumar Swaminathan 			/* free large buffer pool */
5069*bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->rx_ring_count; i++) {
5070*bafec742SSukumar Swaminathan 				rx_ring = &qlge->rx_ring[i];
5071*bafec742SSukumar Swaminathan 				if (rx_ring->type != TX_Q) {
5072*bafec742SSukumar Swaminathan 					ql_free_sbq_buffers(rx_ring);
5073*bafec742SSukumar Swaminathan 					ql_free_lbq_buffers(rx_ring);
5074*bafec742SSukumar Swaminathan 				}
5075*bafec742SSukumar Swaminathan 			}
5076*bafec742SSukumar Swaminathan 			/* reallocate large buffer pool */
5077*bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->rx_ring_count; i++) {
5078*bafec742SSukumar Swaminathan 				rx_ring = &qlge->rx_ring[i];
5079*bafec742SSukumar Swaminathan 				if (rx_ring->type != TX_Q) {
5080*bafec742SSukumar Swaminathan 					ql_alloc_sbufs(qlge, rx_ring);
5081*bafec742SSukumar Swaminathan 					ql_alloc_lbufs(qlge, rx_ring);
5082*bafec742SSukumar Swaminathan 				}
5083*bafec742SSukumar Swaminathan 			}
5084*bafec742SSukumar Swaminathan 		}
5085*bafec742SSukumar Swaminathan 	}
5086*bafec742SSukumar Swaminathan 
5087*bafec742SSukumar Swaminathan 	if (ql_bringup_adapter(qlge) != DDI_SUCCESS) {
5088*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "qlge bringup adapter failed");
5089*bafec742SSukumar Swaminathan 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
5090*bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
5091*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
5092*bafec742SSukumar Swaminathan 	}
5093*bafec742SSukumar Swaminathan 
5094*bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
5095*bafec742SSukumar Swaminathan 
5096*bafec742SSukumar Swaminathan 	/* Get current link state */
5097*bafec742SSukumar Swaminathan 	qlge->port_link_state = ql_get_link_state(qlge);
5098*bafec742SSukumar Swaminathan 
5099*bafec742SSukumar Swaminathan 	if (qlge->port_link_state == LS_UP) {
5100*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5101*bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
5102*bafec742SSukumar Swaminathan 		/* If driver detects a carrier on */
5103*bafec742SSukumar Swaminathan 		CARRIER_ON(qlge);
5104*bafec742SSukumar Swaminathan 	} else {
5105*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5106*bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
5107*bafec742SSukumar Swaminathan 		/* If driver detects a lack of carrier */
5108*bafec742SSukumar Swaminathan 		CARRIER_OFF(qlge);
5109*bafec742SSukumar Swaminathan 	}
5110*bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_STARTED;
5111*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5112*bafec742SSukumar Swaminathan }
5113*bafec742SSukumar Swaminathan 
5114*bafec742SSukumar Swaminathan /*
5115*bafec742SSukumar Swaminathan  * Stop currently running driver
5116*bafec742SSukumar Swaminathan  * Driver needs to stop routing new packets to driver and wait until
5117*bafec742SSukumar Swaminathan  * all pending tx/rx buffers to be free-ed.
5118*bafec742SSukumar Swaminathan  */
5119*bafec742SSukumar Swaminathan int
5120*bafec742SSukumar Swaminathan ql_do_stop(qlge_t *qlge)
5121*bafec742SSukumar Swaminathan {
5122*bafec742SSukumar Swaminathan 	int rc = DDI_FAILURE;
5123*bafec742SSukumar Swaminathan 	uint32_t i, j, k;
5124*bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc, *lbq_desc;
5125*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
5126*bafec742SSukumar Swaminathan 
5127*bafec742SSukumar Swaminathan 	ASSERT(qlge != NULL);
5128*bafec742SSukumar Swaminathan 
5129*bafec742SSukumar Swaminathan 	CARRIER_OFF(qlge);
5130*bafec742SSukumar Swaminathan 
5131*bafec742SSukumar Swaminathan 	rc = ql_bringdown_adapter(qlge);
5132*bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
5133*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "qlge bringdown adapter failed.");
5134*bafec742SSukumar Swaminathan 	} else
5135*bafec742SSukumar Swaminathan 		rc = DDI_SUCCESS;
5136*bafec742SSukumar Swaminathan 
5137*bafec742SSukumar Swaminathan 	for (k = 0; k < qlge->rx_ring_count; k++) {
5138*bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[k];
5139*bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
5140*bafec742SSukumar Swaminathan 			j = rx_ring->lbq_use_head;
5141*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5142*bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list"
5143*bafec742SSukumar Swaminathan 			    " to free list %d\n total %d\n",
5144*bafec742SSukumar Swaminathan 			    k, rx_ring->lbuf_in_use_count,
5145*bafec742SSukumar Swaminathan 			    rx_ring->lbuf_free_count,
5146*bafec742SSukumar Swaminathan 			    rx_ring->lbuf_in_use_count +
5147*bafec742SSukumar Swaminathan 			    rx_ring->lbuf_free_count);
5148*bafec742SSukumar Swaminathan #endif
5149*bafec742SSukumar Swaminathan 			for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
5150*bafec742SSukumar Swaminathan 				lbq_desc = rx_ring->lbuf_in_use[j];
5151*bafec742SSukumar Swaminathan 				j++;
5152*bafec742SSukumar Swaminathan 				if (j >= rx_ring->lbq_len) {
5153*bafec742SSukumar Swaminathan 					j = 0;
5154*bafec742SSukumar Swaminathan 				}
5155*bafec742SSukumar Swaminathan 				if (lbq_desc->mp) {
5156*bafec742SSukumar Swaminathan 					atomic_inc_32(&rx_ring->rx_indicate);
5157*bafec742SSukumar Swaminathan 					freemsg(lbq_desc->mp);
5158*bafec742SSukumar Swaminathan 				}
5159*bafec742SSukumar Swaminathan 			}
5160*bafec742SSukumar Swaminathan 			rx_ring->lbq_use_head = j;
5161*bafec742SSukumar Swaminathan 			rx_ring->lbq_use_tail = j;
5162*bafec742SSukumar Swaminathan 			rx_ring->lbuf_in_use_count = 0;
5163*bafec742SSukumar Swaminathan 			j = rx_ring->sbq_use_head;
5164*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5165*bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list,"
5166*bafec742SSukumar Swaminathan 			    " to free list %d\n total %d \n",
5167*bafec742SSukumar Swaminathan 			    k, rx_ring->sbuf_in_use_count,
5168*bafec742SSukumar Swaminathan 			    rx_ring->sbuf_free_count,
5169*bafec742SSukumar Swaminathan 			    rx_ring->sbuf_in_use_count +
5170*bafec742SSukumar Swaminathan 			    rx_ring->sbuf_free_count);
5171*bafec742SSukumar Swaminathan #endif
5172*bafec742SSukumar Swaminathan 			for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
5173*bafec742SSukumar Swaminathan 				sbq_desc = rx_ring->sbuf_in_use[j];
5174*bafec742SSukumar Swaminathan 				j++;
5175*bafec742SSukumar Swaminathan 				if (j >= rx_ring->sbq_len) {
5176*bafec742SSukumar Swaminathan 					j = 0;
5177*bafec742SSukumar Swaminathan 				}
5178*bafec742SSukumar Swaminathan 				if (sbq_desc->mp) {
5179*bafec742SSukumar Swaminathan 					atomic_inc_32(&rx_ring->rx_indicate);
5180*bafec742SSukumar Swaminathan 					freemsg(sbq_desc->mp);
5181*bafec742SSukumar Swaminathan 				}
5182*bafec742SSukumar Swaminathan 			}
5183*bafec742SSukumar Swaminathan 			rx_ring->sbq_use_head = j;
5184*bafec742SSukumar Swaminathan 			rx_ring->sbq_use_tail = j;
5185*bafec742SSukumar Swaminathan 			rx_ring->sbuf_in_use_count = 0;
5186*bafec742SSukumar Swaminathan 		}
5187*bafec742SSukumar Swaminathan 	}
5188*bafec742SSukumar Swaminathan 
5189*bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_STOPPED;
5190*bafec742SSukumar Swaminathan 
5191*bafec742SSukumar Swaminathan 	return (rc);
5192*bafec742SSukumar Swaminathan }
5193*bafec742SSukumar Swaminathan 
5194*bafec742SSukumar Swaminathan /*
5195*bafec742SSukumar Swaminathan  * Support
5196*bafec742SSukumar Swaminathan  */
5197*bafec742SSukumar Swaminathan 
5198*bafec742SSukumar Swaminathan void
5199*bafec742SSukumar Swaminathan ql_disable_isr(qlge_t *qlge)
5200*bafec742SSukumar Swaminathan {
5201*bafec742SSukumar Swaminathan 	/*
5202*bafec742SSukumar Swaminathan 	 * disable the hardware interrupt
5203*bafec742SSukumar Swaminathan 	 */
5204*bafec742SSukumar Swaminathan 	ISP_DISABLE_GLOBAL_INTRS(qlge);
5205*bafec742SSukumar Swaminathan 
5206*bafec742SSukumar Swaminathan 	qlge->flags &= ~INTERRUPTS_ENABLED;
5207*bafec742SSukumar Swaminathan }
5208*bafec742SSukumar Swaminathan 
5209*bafec742SSukumar Swaminathan 
5210*bafec742SSukumar Swaminathan 
5211*bafec742SSukumar Swaminathan /*
5212*bafec742SSukumar Swaminathan  * busy wait for 'usecs' microseconds.
5213*bafec742SSukumar Swaminathan  */
5214*bafec742SSukumar Swaminathan void
5215*bafec742SSukumar Swaminathan qlge_delay(clock_t usecs)
5216*bafec742SSukumar Swaminathan {
5217*bafec742SSukumar Swaminathan 	drv_usecwait(usecs);
5218*bafec742SSukumar Swaminathan }
5219*bafec742SSukumar Swaminathan 
5220*bafec742SSukumar Swaminathan /*
5221*bafec742SSukumar Swaminathan  * retrieve firmware details.
5222*bafec742SSukumar Swaminathan  */
5223*bafec742SSukumar Swaminathan 
5224*bafec742SSukumar Swaminathan pci_cfg_t *
5225*bafec742SSukumar Swaminathan ql_get_pci_config(qlge_t *qlge)
5226*bafec742SSukumar Swaminathan {
5227*bafec742SSukumar Swaminathan 	return (&(qlge->pci_cfg));
5228*bafec742SSukumar Swaminathan }
5229*bafec742SSukumar Swaminathan 
5230*bafec742SSukumar Swaminathan /*
5231*bafec742SSukumar Swaminathan  * Get current Link status
5232*bafec742SSukumar Swaminathan  */
5233*bafec742SSukumar Swaminathan static uint32_t
5234*bafec742SSukumar Swaminathan ql_get_link_state(qlge_t *qlge)
5235*bafec742SSukumar Swaminathan {
5236*bafec742SSukumar Swaminathan 	uint32_t bitToCheck = 0;
5237*bafec742SSukumar Swaminathan 	uint32_t temp, linkState;
5238*bafec742SSukumar Swaminathan 
5239*bafec742SSukumar Swaminathan 	if (qlge->func_number == qlge->fn0_net) {
5240*bafec742SSukumar Swaminathan 		bitToCheck = STS_PL0;
5241*bafec742SSukumar Swaminathan 	} else {
5242*bafec742SSukumar Swaminathan 		bitToCheck = STS_PL1;
5243*bafec742SSukumar Swaminathan 	}
5244*bafec742SSukumar Swaminathan 	temp = ql_read_reg(qlge, REG_STATUS);
5245*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n",
5246*bafec742SSukumar Swaminathan 	    __func__, qlge->instance, temp));
5247*bafec742SSukumar Swaminathan 
5248*bafec742SSukumar Swaminathan 	if (temp & bitToCheck) {
5249*bafec742SSukumar Swaminathan 		linkState = LS_UP;
5250*bafec742SSukumar Swaminathan 	} else {
5251*bafec742SSukumar Swaminathan 		linkState = LS_DOWN;
5252*bafec742SSukumar Swaminathan 	}
5253*bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
5254*bafec742SSukumar Swaminathan 		/* for Schultz, link Speed is fixed to 10G, full duplex */
5255*bafec742SSukumar Swaminathan 		qlge->speed  = SPEED_10G;
5256*bafec742SSukumar Swaminathan 		qlge->duplex = 1;
5257*bafec742SSukumar Swaminathan 	}
5258*bafec742SSukumar Swaminathan 	return (linkState);
5259*bafec742SSukumar Swaminathan }
5260*bafec742SSukumar Swaminathan /*
5261*bafec742SSukumar Swaminathan  * Get current link status and report to OS
5262*bafec742SSukumar Swaminathan  */
5263*bafec742SSukumar Swaminathan static void
5264*bafec742SSukumar Swaminathan ql_get_and_report_link_state(qlge_t *qlge)
5265*bafec742SSukumar Swaminathan {
5266*bafec742SSukumar Swaminathan 	uint32_t cur_link_state;
5267*bafec742SSukumar Swaminathan 
5268*bafec742SSukumar Swaminathan 	/* Get current link state */
5269*bafec742SSukumar Swaminathan 	cur_link_state = ql_get_link_state(qlge);
5270*bafec742SSukumar Swaminathan 	/* if link state has changed */
5271*bafec742SSukumar Swaminathan 	if (cur_link_state != qlge->port_link_state) {
5272*bafec742SSukumar Swaminathan 
5273*bafec742SSukumar Swaminathan 		qlge->port_link_state = cur_link_state;
5274*bafec742SSukumar Swaminathan 
5275*bafec742SSukumar Swaminathan 		if (qlge->port_link_state == LS_UP) {
5276*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5277*bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
5278*bafec742SSukumar Swaminathan 			/* If driver detects a carrier on */
5279*bafec742SSukumar Swaminathan 			CARRIER_ON(qlge);
5280*bafec742SSukumar Swaminathan 		} else {
5281*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5282*bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
5283*bafec742SSukumar Swaminathan 			/* If driver detects a lack of carrier */
5284*bafec742SSukumar Swaminathan 			CARRIER_OFF(qlge);
5285*bafec742SSukumar Swaminathan 		}
5286*bafec742SSukumar Swaminathan 	}
5287*bafec742SSukumar Swaminathan }
5288*bafec742SSukumar Swaminathan 
5289*bafec742SSukumar Swaminathan /*
5290*bafec742SSukumar Swaminathan  * timer callback function executed after timer expires
5291*bafec742SSukumar Swaminathan  */
5292*bafec742SSukumar Swaminathan static void
5293*bafec742SSukumar Swaminathan ql_timer(void* arg)
5294*bafec742SSukumar Swaminathan {
5295*bafec742SSukumar Swaminathan 	ql_get_and_report_link_state((qlge_t *)arg);
5296*bafec742SSukumar Swaminathan }
5297*bafec742SSukumar Swaminathan 
5298*bafec742SSukumar Swaminathan /*
5299*bafec742SSukumar Swaminathan  * stop the running timer if activated
5300*bafec742SSukumar Swaminathan  */
5301*bafec742SSukumar Swaminathan static void
5302*bafec742SSukumar Swaminathan ql_stop_timer(qlge_t *qlge)
5303*bafec742SSukumar Swaminathan {
5304*bafec742SSukumar Swaminathan 	timeout_id_t timer_id;
5305*bafec742SSukumar Swaminathan 	/* Disable driver timer */
5306*bafec742SSukumar Swaminathan 	if (qlge->ql_timer_timeout_id != NULL) {
5307*bafec742SSukumar Swaminathan 		timer_id = qlge->ql_timer_timeout_id;
5308*bafec742SSukumar Swaminathan 		qlge->ql_timer_timeout_id = NULL;
5309*bafec742SSukumar Swaminathan 		(void) untimeout(timer_id);
5310*bafec742SSukumar Swaminathan 	}
5311*bafec742SSukumar Swaminathan }
5312*bafec742SSukumar Swaminathan 
5313*bafec742SSukumar Swaminathan /*
5314*bafec742SSukumar Swaminathan  * stop then restart timer
5315*bafec742SSukumar Swaminathan  */
5316*bafec742SSukumar Swaminathan void
5317*bafec742SSukumar Swaminathan ql_restart_timer(qlge_t *qlge)
5318*bafec742SSukumar Swaminathan {
5319*bafec742SSukumar Swaminathan 	ql_stop_timer(qlge);
5320*bafec742SSukumar Swaminathan 	qlge->ql_timer_ticks = TICKS_PER_SEC / 4;
5321*bafec742SSukumar Swaminathan 	qlge->ql_timer_timeout_id = timeout(ql_timer,
5322*bafec742SSukumar Swaminathan 	    (void *)qlge, qlge->ql_timer_ticks);
5323*bafec742SSukumar Swaminathan }
5324*bafec742SSukumar Swaminathan 
5325*bafec742SSukumar Swaminathan /* ************************************************************************* */
5326*bafec742SSukumar Swaminathan /*
5327*bafec742SSukumar Swaminathan  *		Hardware K-Stats Data Structures and Subroutines
5328*bafec742SSukumar Swaminathan  */
5329*bafec742SSukumar Swaminathan /* ************************************************************************* */
5330*bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_hw[] = {
5331*bafec742SSukumar Swaminathan 	/* PCI related hardware information */
5332*bafec742SSukumar Swaminathan 	{ 0, "Vendor Id"			},
5333*bafec742SSukumar Swaminathan 	{ 1, "Device Id"			},
5334*bafec742SSukumar Swaminathan 	{ 2, "Command"				},
5335*bafec742SSukumar Swaminathan 	{ 3, "Status"				},
5336*bafec742SSukumar Swaminathan 	{ 4, "Revision Id"			},
5337*bafec742SSukumar Swaminathan 	{ 5, "Cache Line Size"			},
5338*bafec742SSukumar Swaminathan 	{ 6, "Latency Timer"			},
5339*bafec742SSukumar Swaminathan 	{ 7, "Header Type"			},
5340*bafec742SSukumar Swaminathan 	{ 9, "I/O base addr"			},
5341*bafec742SSukumar Swaminathan 	{ 10, "Control Reg Base addr low"	},
5342*bafec742SSukumar Swaminathan 	{ 11, "Control Reg Base addr high"	},
5343*bafec742SSukumar Swaminathan 	{ 12, "Doorbell Reg Base addr low"	},
5344*bafec742SSukumar Swaminathan 	{ 13, "Doorbell Reg Base addr high"	},
5345*bafec742SSukumar Swaminathan 	{ 14, "Subsystem Vendor Id"		},
5346*bafec742SSukumar Swaminathan 	{ 15, "Subsystem Device ID"		},
5347*bafec742SSukumar Swaminathan 	{ 16, "PCIe Device Control"		},
5348*bafec742SSukumar Swaminathan 	{ 17, "PCIe Link Status"		},
5349*bafec742SSukumar Swaminathan 
5350*bafec742SSukumar Swaminathan 	{ -1,	NULL				},
5351*bafec742SSukumar Swaminathan };
5352*bafec742SSukumar Swaminathan 
5353*bafec742SSukumar Swaminathan /*
5354*bafec742SSukumar Swaminathan  * kstat update function for PCI registers
5355*bafec742SSukumar Swaminathan  */
5356*bafec742SSukumar Swaminathan static int
5357*bafec742SSukumar Swaminathan ql_kstats_get_pci_regs(kstat_t *ksp, int flag)
5358*bafec742SSukumar Swaminathan {
5359*bafec742SSukumar Swaminathan 	qlge_t *qlge;
5360*bafec742SSukumar Swaminathan 	kstat_named_t *knp;
5361*bafec742SSukumar Swaminathan 
5362*bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
5363*bafec742SSukumar Swaminathan 		return (EACCES);
5364*bafec742SSukumar Swaminathan 
5365*bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
5366*bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
5367*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.vendor_id;
5368*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.device_id;
5369*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.command;
5370*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.status;
5371*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.revision;
5372*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.cache_line_size;
5373*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.latency_timer;
5374*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.header_type;
5375*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.io_base_address;
5376*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
5377*bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower;
5378*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
5379*bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper;
5380*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
5381*bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_doorbell_mem_base_address_lower;
5382*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
5383*bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_doorbell_mem_base_address_upper;
5384*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id;
5385*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.sub_device_id;
5386*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control;
5387*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.link_status;
5388*bafec742SSukumar Swaminathan 
5389*bafec742SSukumar Swaminathan 	return (0);
5390*bafec742SSukumar Swaminathan }
5391*bafec742SSukumar Swaminathan 
5392*bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_mii[] = {
5393*bafec742SSukumar Swaminathan 	/* MAC/MII related hardware information */
5394*bafec742SSukumar Swaminathan 	{ 0, "mtu"},
5395*bafec742SSukumar Swaminathan 
5396*bafec742SSukumar Swaminathan 	{ -1, NULL},
5397*bafec742SSukumar Swaminathan };
5398*bafec742SSukumar Swaminathan 
5399*bafec742SSukumar Swaminathan 
5400*bafec742SSukumar Swaminathan /*
5401*bafec742SSukumar Swaminathan  * kstat update function for MII related information.
5402*bafec742SSukumar Swaminathan  */
5403*bafec742SSukumar Swaminathan static int
5404*bafec742SSukumar Swaminathan ql_kstats_mii_update(kstat_t *ksp, int flag)
5405*bafec742SSukumar Swaminathan {
5406*bafec742SSukumar Swaminathan 	qlge_t *qlge;
5407*bafec742SSukumar Swaminathan 	kstat_named_t *knp;
5408*bafec742SSukumar Swaminathan 
5409*bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
5410*bafec742SSukumar Swaminathan 		return (EACCES);
5411*bafec742SSukumar Swaminathan 
5412*bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
5413*bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
5414*bafec742SSukumar Swaminathan 
5415*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->mtu;
5416*bafec742SSukumar Swaminathan 
5417*bafec742SSukumar Swaminathan 	return (0);
5418*bafec742SSukumar Swaminathan }
5419*bafec742SSukumar Swaminathan 
5420*bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_reg[] = {
5421*bafec742SSukumar Swaminathan 	/* Register information */
5422*bafec742SSukumar Swaminathan 	{ 0, "System (0x08)"			},
5423*bafec742SSukumar Swaminathan 	{ 1, "Reset/Fail Over(0x0Ch"		},
5424*bafec742SSukumar Swaminathan 	{ 2, "Function Specific Control(0x10)"	},
5425*bafec742SSukumar Swaminathan 	{ 3, "Status (0x30)"			},
5426*bafec742SSukumar Swaminathan 	{ 4, "Intr Enable (0x34)"		},
5427*bafec742SSukumar Swaminathan 	{ 5, "Intr Status1 (0x3C)"		},
5428*bafec742SSukumar Swaminathan 	{ 6, "Error Status (0x54)"		},
5429*bafec742SSukumar Swaminathan 	{ 7, "XGMAC Flow Control(0x11C)"	},
5430*bafec742SSukumar Swaminathan 	{ 8, "XGMAC Tx Pause Frames(0x230)"	},
5431*bafec742SSukumar Swaminathan 	{ 9, "XGMAC Rx Pause Frames(0x388)"	},
5432*bafec742SSukumar Swaminathan 	{ 10, "XGMAC Rx FIFO Drop Count(0x5B8)"	},
5433*bafec742SSukumar Swaminathan 	{ 11, "interrupts actually allocated"	},
5434*bafec742SSukumar Swaminathan 	{ 12, "interrupts on rx ring 0"		},
5435*bafec742SSukumar Swaminathan 	{ 13, "interrupts on rx ring 1"		},
5436*bafec742SSukumar Swaminathan 	{ 14, "interrupts on rx ring 2"		},
5437*bafec742SSukumar Swaminathan 	{ 15, "interrupts on rx ring 3"		},
5438*bafec742SSukumar Swaminathan 	{ 16, "interrupts on rx ring 4"		},
5439*bafec742SSukumar Swaminathan 	{ 17, "interrupts on rx ring 5"		},
5440*bafec742SSukumar Swaminathan 	{ 18, "interrupts on rx ring 6"		},
5441*bafec742SSukumar Swaminathan 	{ 19, "interrupts on rx ring 7"		},
5442*bafec742SSukumar Swaminathan 	{ 20, "polls on rx ring 0"		},
5443*bafec742SSukumar Swaminathan 	{ 21, "polls on rx ring 1"		},
5444*bafec742SSukumar Swaminathan 	{ 22, "polls on rx ring 2"		},
5445*bafec742SSukumar Swaminathan 	{ 23, "polls on rx ring 3"		},
5446*bafec742SSukumar Swaminathan 	{ 24, "polls on rx ring 4"		},
5447*bafec742SSukumar Swaminathan 	{ 25, "polls on rx ring 5"		},
5448*bafec742SSukumar Swaminathan 	{ 26, "polls on rx ring 6"		},
5449*bafec742SSukumar Swaminathan 	{ 27, "polls on rx ring 7"		},
5450*bafec742SSukumar Swaminathan 	{ 28, "tx no resource on ring 0"	},
5451*bafec742SSukumar Swaminathan 	{ 29, "tx dma bind fail on ring 0"	},
5452*bafec742SSukumar Swaminathan 	{ 30, "tx dma no handle on ring 0"	},
5453*bafec742SSukumar Swaminathan 	{ 31, "tx dma no cookie on ring 0"	},
5454*bafec742SSukumar Swaminathan 	{ 32, "MPI firmware major version"},
5455*bafec742SSukumar Swaminathan 	{ 33, "MPI firmware minor version"},
5456*bafec742SSukumar Swaminathan 	{ 34, "MPI firmware sub version"},
5457*bafec742SSukumar Swaminathan 
5458*bafec742SSukumar Swaminathan 	{ -1, NULL},
5459*bafec742SSukumar Swaminathan };
5460*bafec742SSukumar Swaminathan 
5461*bafec742SSukumar Swaminathan 
5462*bafec742SSukumar Swaminathan /*
5463*bafec742SSukumar Swaminathan  * kstat update function for device register set
5464*bafec742SSukumar Swaminathan  */
5465*bafec742SSukumar Swaminathan static int
5466*bafec742SSukumar Swaminathan ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag)
5467*bafec742SSukumar Swaminathan {
5468*bafec742SSukumar Swaminathan 	qlge_t *qlge;
5469*bafec742SSukumar Swaminathan 	kstat_named_t *knp;
5470*bafec742SSukumar Swaminathan 	uint32_t val32;
5471*bafec742SSukumar Swaminathan 	int i = 0;
5472*bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
5473*bafec742SSukumar Swaminathan 
5474*bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
5475*bafec742SSukumar Swaminathan 		return (EACCES);
5476*bafec742SSukumar Swaminathan 
5477*bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
5478*bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
5479*bafec742SSukumar Swaminathan 
5480*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM);
5481*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER);
5482*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL);
5483*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS);
5484*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
5485*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
5486*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS);
5487*bafec742SSukumar Swaminathan 
5488*bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) {
5489*bafec742SSukumar Swaminathan 		return (0);
5490*bafec742SSukumar Swaminathan 	}
5491*bafec742SSukumar Swaminathan 	ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32);
5492*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
5493*bafec742SSukumar Swaminathan 
5494*bafec742SSukumar Swaminathan 	ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32);
5495*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
5496*bafec742SSukumar Swaminathan 
5497*bafec742SSukumar Swaminathan 	ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32);
5498*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
5499*bafec742SSukumar Swaminathan 
5500*bafec742SSukumar Swaminathan 	ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32);
5501*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
5502*bafec742SSukumar Swaminathan 
5503*bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
5504*bafec742SSukumar Swaminathan 
5505*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->intr_cnt;
5506*bafec742SSukumar Swaminathan 
5507*bafec742SSukumar Swaminathan 	for (i = 0; i < 8; i++) {
5508*bafec742SSukumar Swaminathan 		(knp++)->value.ui32 = qlge->rx_interrupts[i];
5509*bafec742SSukumar Swaminathan 	}
5510*bafec742SSukumar Swaminathan 
5511*bafec742SSukumar Swaminathan 	for (i = 0; i < 8; i++) {
5512*bafec742SSukumar Swaminathan 		(knp++)->value.ui32 = qlge->rx_polls[i];
5513*bafec742SSukumar Swaminathan 	}
5514*bafec742SSukumar Swaminathan 
5515*bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[0];
5516*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->defer;
5517*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_fail_dma_bind;
5518*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_no_dma_handle;
5519*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_no_dma_cookie;
5520*bafec742SSukumar Swaminathan 
5521*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.major_version;
5522*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.minor_version;
5523*bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version;
5524*bafec742SSukumar Swaminathan 
5525*bafec742SSukumar Swaminathan 	return (0);
5526*bafec742SSukumar Swaminathan }
5527*bafec742SSukumar Swaminathan 
5528*bafec742SSukumar Swaminathan 
5529*bafec742SSukumar Swaminathan static kstat_t *
5530*bafec742SSukumar Swaminathan ql_setup_named_kstat(qlge_t *qlge, int instance, char *name,
5531*bafec742SSukumar Swaminathan     const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
5532*bafec742SSukumar Swaminathan {
5533*bafec742SSukumar Swaminathan 	kstat_t *ksp;
5534*bafec742SSukumar Swaminathan 	kstat_named_t *knp;
5535*bafec742SSukumar Swaminathan 	char *np;
5536*bafec742SSukumar Swaminathan 	int type;
5537*bafec742SSukumar Swaminathan 
5538*bafec742SSukumar Swaminathan 	size /= sizeof (ql_ksindex_t);
5539*bafec742SSukumar Swaminathan 	ksp = kstat_create(ADAPTER_NAME, instance, name, "net",
5540*bafec742SSukumar Swaminathan 	    KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT);
5541*bafec742SSukumar Swaminathan 	if (ksp == NULL)
5542*bafec742SSukumar Swaminathan 		return (NULL);
5543*bafec742SSukumar Swaminathan 
5544*bafec742SSukumar Swaminathan 	ksp->ks_private = qlge;
5545*bafec742SSukumar Swaminathan 	ksp->ks_update = update;
5546*bafec742SSukumar Swaminathan 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
5547*bafec742SSukumar Swaminathan 		switch (*np) {
5548*bafec742SSukumar Swaminathan 		default:
5549*bafec742SSukumar Swaminathan 			type = KSTAT_DATA_UINT32;
5550*bafec742SSukumar Swaminathan 			break;
5551*bafec742SSukumar Swaminathan 		case '&':
5552*bafec742SSukumar Swaminathan 			np += 1;
5553*bafec742SSukumar Swaminathan 			type = KSTAT_DATA_CHAR;
5554*bafec742SSukumar Swaminathan 			break;
5555*bafec742SSukumar Swaminathan 		}
5556*bafec742SSukumar Swaminathan 		kstat_named_init(knp, np, (uint8_t)type);
5557*bafec742SSukumar Swaminathan 	}
5558*bafec742SSukumar Swaminathan 	kstat_install(ksp);
5559*bafec742SSukumar Swaminathan 
5560*bafec742SSukumar Swaminathan 	return (ksp);
5561*bafec742SSukumar Swaminathan }
5562*bafec742SSukumar Swaminathan 
5563*bafec742SSukumar Swaminathan /*
5564*bafec742SSukumar Swaminathan  * Setup various kstat
5565*bafec742SSukumar Swaminathan  */
5566*bafec742SSukumar Swaminathan int
5567*bafec742SSukumar Swaminathan ql_init_kstats(qlge_t *qlge)
5568*bafec742SSukumar Swaminathan {
5569*bafec742SSukumar Swaminathan 	/* Hardware KStats */
5570*bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge,
5571*bafec742SSukumar Swaminathan 	    qlge->instance, "chip", ql_kstats_hw,
5572*bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
5573*bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) {
5574*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
5575*bafec742SSukumar Swaminathan 	}
5576*bafec742SSukumar Swaminathan 
5577*bafec742SSukumar Swaminathan 	/* MII KStats */
5578*bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge,
5579*bafec742SSukumar Swaminathan 	    qlge->instance, "mii", ql_kstats_mii,
5580*bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_mii), ql_kstats_mii_update);
5581*bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) {
5582*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
5583*bafec742SSukumar Swaminathan 	}
5584*bafec742SSukumar Swaminathan 
5585*bafec742SSukumar Swaminathan 	/* REG KStats */
5586*bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge,
5587*bafec742SSukumar Swaminathan 	    qlge->instance, "reg", ql_kstats_reg,
5588*bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
5589*bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) {
5590*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
5591*bafec742SSukumar Swaminathan 	}
5592*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5593*bafec742SSukumar Swaminathan }
5594*bafec742SSukumar Swaminathan 
5595*bafec742SSukumar Swaminathan /*
5596*bafec742SSukumar Swaminathan  * delete all kstat
5597*bafec742SSukumar Swaminathan  */
5598*bafec742SSukumar Swaminathan void
5599*bafec742SSukumar Swaminathan ql_fini_kstats(qlge_t *qlge)
5600*bafec742SSukumar Swaminathan {
5601*bafec742SSukumar Swaminathan 	int i;
5602*bafec742SSukumar Swaminathan 
5603*bafec742SSukumar Swaminathan 	for (i = 0; i < QL_KSTAT_COUNT; i++) {
5604*bafec742SSukumar Swaminathan 		if (qlge->ql_kstats[i] != NULL)
5605*bafec742SSukumar Swaminathan 			kstat_delete(qlge->ql_kstats[i]);
5606*bafec742SSukumar Swaminathan 	}
5607*bafec742SSukumar Swaminathan }
5608*bafec742SSukumar Swaminathan 
5609*bafec742SSukumar Swaminathan /* ************************************************************************* */
5610*bafec742SSukumar Swaminathan /*
5611*bafec742SSukumar Swaminathan  *                                 kstat end
5612*bafec742SSukumar Swaminathan  */
5613*bafec742SSukumar Swaminathan /* ************************************************************************* */
5614*bafec742SSukumar Swaminathan 
5615*bafec742SSukumar Swaminathan /*
5616*bafec742SSukumar Swaminathan  * Setup the parameters for receive and transmit rings including buffer sizes
5617*bafec742SSukumar Swaminathan  * and completion queue sizes
5618*bafec742SSukumar Swaminathan  */
5619*bafec742SSukumar Swaminathan static int
5620*bafec742SSukumar Swaminathan ql_setup_rings(qlge_t *qlge)
5621*bafec742SSukumar Swaminathan {
5622*bafec742SSukumar Swaminathan 	uint8_t i;
5623*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
5624*bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
5625*bafec742SSukumar Swaminathan 	uint16_t lbq_buf_size;
5626*bafec742SSukumar Swaminathan 
5627*bafec742SSukumar Swaminathan 	lbq_buf_size = (uint16_t)
5628*bafec742SSukumar Swaminathan 	    ((qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE);
5629*bafec742SSukumar Swaminathan 
5630*bafec742SSukumar Swaminathan 	/*
5631*bafec742SSukumar Swaminathan 	 * rx_ring[0] is always the default queue.
5632*bafec742SSukumar Swaminathan 	 */
5633*bafec742SSukumar Swaminathan 	/*
5634*bafec742SSukumar Swaminathan 	 * qlge->rx_ring_count:
5635*bafec742SSukumar Swaminathan 	 * Total number of rx_rings. This includes a number
5636*bafec742SSukumar Swaminathan 	 * of outbound completion handler rx_rings, and a
5637*bafec742SSukumar Swaminathan 	 * number of inbound completion handler rx_rings.
5638*bafec742SSukumar Swaminathan 	 * rss is only enabled if we have more than 1 rx completion
5639*bafec742SSukumar Swaminathan 	 * queue. If we have a single rx completion queue
5640*bafec742SSukumar Swaminathan 	 * then all rx completions go to this queue and
5641*bafec742SSukumar Swaminathan 	 * the last completion queue
5642*bafec742SSukumar Swaminathan 	 */
5643*bafec742SSukumar Swaminathan 
5644*bafec742SSukumar Swaminathan 	qlge->tx_ring_first_cq_id = qlge->rss_ring_count;
5645*bafec742SSukumar Swaminathan 
5646*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
5647*bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
5648*bafec742SSukumar Swaminathan 		bzero((void *)tx_ring, sizeof (*tx_ring));
5649*bafec742SSukumar Swaminathan 		tx_ring->qlge = qlge;
5650*bafec742SSukumar Swaminathan 		tx_ring->wq_id = i;
5651*bafec742SSukumar Swaminathan 		tx_ring->wq_len = qlge->tx_ring_size;
5652*bafec742SSukumar Swaminathan 		tx_ring->wq_size = (uint32_t)(
5653*bafec742SSukumar Swaminathan 		    tx_ring->wq_len * sizeof (struct ob_mac_iocb_req));
5654*bafec742SSukumar Swaminathan 
5655*bafec742SSukumar Swaminathan 		/*
5656*bafec742SSukumar Swaminathan 		 * The completion queue ID for the tx rings start
5657*bafec742SSukumar Swaminathan 		 * immediately after the last rss completion queue.
5658*bafec742SSukumar Swaminathan 		 */
5659*bafec742SSukumar Swaminathan 		tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id);
5660*bafec742SSukumar Swaminathan 	}
5661*bafec742SSukumar Swaminathan 
5662*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
5663*bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
5664*bafec742SSukumar Swaminathan 		bzero((void *)rx_ring, sizeof (*rx_ring));
5665*bafec742SSukumar Swaminathan 		rx_ring->qlge = qlge;
5666*bafec742SSukumar Swaminathan 		rx_ring->cq_id = i;
5667*bafec742SSukumar Swaminathan 		if (i != 0)
5668*bafec742SSukumar Swaminathan 			rx_ring->cpu = (i) % qlge->rx_ring_count;
5669*bafec742SSukumar Swaminathan 		else
5670*bafec742SSukumar Swaminathan 			rx_ring->cpu = 0;
5671*bafec742SSukumar Swaminathan 
5672*bafec742SSukumar Swaminathan 		if (i < qlge->rss_ring_count) {
5673*bafec742SSukumar Swaminathan 			/*
5674*bafec742SSukumar Swaminathan 			 * Inbound completions (RSS) queues
5675*bafec742SSukumar Swaminathan 			 * Default queue is queue 0 which handles
5676*bafec742SSukumar Swaminathan 			 * unicast plus bcast/mcast and async events.
5677*bafec742SSukumar Swaminathan 			 * Other inbound queues handle unicast frames only.
5678*bafec742SSukumar Swaminathan 			 */
5679*bafec742SSukumar Swaminathan 			rx_ring->cq_len = qlge->rx_ring_size;
5680*bafec742SSukumar Swaminathan 			rx_ring->cq_size = (uint32_t)
5681*bafec742SSukumar Swaminathan 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
5682*bafec742SSukumar Swaminathan 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
5683*bafec742SSukumar Swaminathan 			rx_ring->lbq_size = (uint32_t)
5684*bafec742SSukumar Swaminathan 			    (rx_ring->lbq_len * sizeof (uint64_t));
5685*bafec742SSukumar Swaminathan 			rx_ring->lbq_buf_size = lbq_buf_size;
5686*bafec742SSukumar Swaminathan 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
5687*bafec742SSukumar Swaminathan 			rx_ring->sbq_size = (uint32_t)
5688*bafec742SSukumar Swaminathan 			    (rx_ring->sbq_len * sizeof (uint64_t));
5689*bafec742SSukumar Swaminathan 			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
5690*bafec742SSukumar Swaminathan 			rx_ring->type = RX_Q;
5691*bafec742SSukumar Swaminathan 
5692*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD,
5693*bafec742SSukumar Swaminathan 			    ("%s(%d)Allocating rss completion queue %d "
5694*bafec742SSukumar Swaminathan 			    "on cpu %d\n", __func__, qlge->instance,
5695*bafec742SSukumar Swaminathan 			    rx_ring->cq_id, rx_ring->cpu));
5696*bafec742SSukumar Swaminathan 		} else {
5697*bafec742SSukumar Swaminathan 			/*
5698*bafec742SSukumar Swaminathan 			 * Outbound queue handles outbound completions only
5699*bafec742SSukumar Swaminathan 			 */
5700*bafec742SSukumar Swaminathan 			/* outbound cq is same size as tx_ring it services. */
5701*bafec742SSukumar Swaminathan 			rx_ring->cq_len = qlge->tx_ring_size;
5702*bafec742SSukumar Swaminathan 			rx_ring->cq_size = (uint32_t)
5703*bafec742SSukumar Swaminathan 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
5704*bafec742SSukumar Swaminathan 			rx_ring->lbq_len = 0;
5705*bafec742SSukumar Swaminathan 			rx_ring->lbq_size = 0;
5706*bafec742SSukumar Swaminathan 			rx_ring->lbq_buf_size = 0;
5707*bafec742SSukumar Swaminathan 			rx_ring->sbq_len = 0;
5708*bafec742SSukumar Swaminathan 			rx_ring->sbq_size = 0;
5709*bafec742SSukumar Swaminathan 			rx_ring->sbq_buf_size = 0;
5710*bafec742SSukumar Swaminathan 			rx_ring->type = TX_Q;
5711*bafec742SSukumar Swaminathan 
5712*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD,
5713*bafec742SSukumar Swaminathan 			    ("%s(%d)Allocating TX completion queue %d on"
5714*bafec742SSukumar Swaminathan 			    " cpu %d\n", __func__, qlge->instance,
5715*bafec742SSukumar Swaminathan 			    rx_ring->cq_id, rx_ring->cpu));
5716*bafec742SSukumar Swaminathan 		}
5717*bafec742SSukumar Swaminathan 	}
5718*bafec742SSukumar Swaminathan 
5719*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5720*bafec742SSukumar Swaminathan }
5721*bafec742SSukumar Swaminathan 
5722*bafec742SSukumar Swaminathan static int
5723*bafec742SSukumar Swaminathan ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring)
5724*bafec742SSukumar Swaminathan {
5725*bafec742SSukumar Swaminathan 	struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr;
5726*bafec742SSukumar Swaminathan 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
5727*bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
5728*bafec742SSukumar Swaminathan 	/* first shadow area is used by wqicb's host copy of consumer index */
5729*bafec742SSukumar Swaminathan 	    + sizeof (uint64_t);
5730*bafec742SSukumar Swaminathan 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
5731*bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
5732*bafec742SSukumar Swaminathan 	    + sizeof (uint64_t);
5733*bafec742SSukumar Swaminathan 	/* lrg/sml bufq pointers */
5734*bafec742SSukumar Swaminathan 	uint8_t *buf_q_base_reg =
5735*bafec742SSukumar Swaminathan 	    (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr +
5736*bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
5737*bafec742SSukumar Swaminathan 	uint64_t buf_q_base_reg_dma =
5738*bafec742SSukumar Swaminathan 	    qlge->buf_q_ptr_base_addr_dma_attr.dma_addr +
5739*bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
5740*bafec742SSukumar Swaminathan 	caddr_t doorbell_area =
5741*bafec742SSukumar Swaminathan 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id));
5742*bafec742SSukumar Swaminathan 	int err = 0;
5743*bafec742SSukumar Swaminathan 	uint16_t bq_len;
5744*bafec742SSukumar Swaminathan 	uint64_t tmp;
5745*bafec742SSukumar Swaminathan 	uint64_t *base_indirect_ptr;
5746*bafec742SSukumar Swaminathan 	int page_entries;
5747*bafec742SSukumar Swaminathan 
5748*bafec742SSukumar Swaminathan 	/* Set up the shadow registers for this ring. */
5749*bafec742SSukumar Swaminathan 	rx_ring->prod_idx_sh_reg = shadow_reg;
5750*bafec742SSukumar Swaminathan 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
5751*bafec742SSukumar Swaminathan 
5752*bafec742SSukumar Swaminathan 	rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
5753*bafec742SSukumar Swaminathan 	rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma;
5754*bafec742SSukumar Swaminathan 
5755*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx,"
5756*bafec742SSukumar Swaminathan 	    " phys_addr 0x%lx\n", __func__, rx_ring->cq_id,
5757*bafec742SSukumar Swaminathan 	    rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma));
5758*bafec742SSukumar Swaminathan 
5759*bafec742SSukumar Swaminathan 	buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
5760*bafec742SSukumar Swaminathan 	buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
5761*bafec742SSukumar Swaminathan 	rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
5762*bafec742SSukumar Swaminathan 	rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma;
5763*bafec742SSukumar Swaminathan 
5764*bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x00 for consumer index register */
5765*bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area;
5766*bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx = 0;
5767*bafec742SSukumar Swaminathan 	*rx_ring->prod_idx_sh_reg = 0;
5768*bafec742SSukumar Swaminathan 	rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
5769*bafec742SSukumar Swaminathan 
5770*bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x04 for valid register */
5771*bafec742SSukumar Swaminathan 	rx_ring->valid_db_reg = (uint32_t *)(void *)
5772*bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x04);
5773*bafec742SSukumar Swaminathan 
5774*bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
5775*bafec742SSukumar Swaminathan 	rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *)
5776*bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x18);
5777*bafec742SSukumar Swaminathan 
5778*bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x1c */
5779*bafec742SSukumar Swaminathan 	rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *)
5780*bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x1c);
5781*bafec742SSukumar Swaminathan 
5782*bafec742SSukumar Swaminathan 	bzero((void *)cqicb, sizeof (*cqicb));
5783*bafec742SSukumar Swaminathan 
5784*bafec742SSukumar Swaminathan 	cqicb->msix_vect = (uint8_t)rx_ring->irq;
5785*bafec742SSukumar Swaminathan 
5786*bafec742SSukumar Swaminathan 	bq_len = (uint16_t)((rx_ring->cq_len == 65536) ?
5787*bafec742SSukumar Swaminathan 	    (uint16_t)0 : (uint16_t)rx_ring->cq_len);
5788*bafec742SSukumar Swaminathan 	cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
5789*bafec742SSukumar Swaminathan 
5790*bafec742SSukumar Swaminathan 	cqicb->cq_base_addr_lo =
5791*bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr));
5792*bafec742SSukumar Swaminathan 	cqicb->cq_base_addr_hi =
5793*bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr));
5794*bafec742SSukumar Swaminathan 
5795*bafec742SSukumar Swaminathan 	cqicb->prod_idx_addr_lo =
5796*bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma));
5797*bafec742SSukumar Swaminathan 	cqicb->prod_idx_addr_hi =
5798*bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma));
5799*bafec742SSukumar Swaminathan 
5800*bafec742SSukumar Swaminathan 	/*
5801*bafec742SSukumar Swaminathan 	 * Set up the control block load flags.
5802*bafec742SSukumar Swaminathan 	 */
5803*bafec742SSukumar Swaminathan 	cqicb->flags = FLAGS_LC | /* Load queue base address */
5804*bafec742SSukumar Swaminathan 	    FLAGS_LV | /* Load MSI-X vector */
5805*bafec742SSukumar Swaminathan 	    FLAGS_LI;  /* Load irq delay values */
5806*bafec742SSukumar Swaminathan 	if (rx_ring->lbq_len) {
5807*bafec742SSukumar Swaminathan 		/* Load lbq values */
5808*bafec742SSukumar Swaminathan 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL);
5809*bafec742SSukumar Swaminathan 		tmp = (uint64_t)rx_ring->lbq_dma.dma_addr;
5810*bafec742SSukumar Swaminathan 		base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect;
5811*bafec742SSukumar Swaminathan 		page_entries = 0;
5812*bafec742SSukumar Swaminathan 		do {
5813*bafec742SSukumar Swaminathan 			*base_indirect_ptr = cpu_to_le64(tmp);
5814*bafec742SSukumar Swaminathan 			tmp += VM_PAGE_SIZE;
5815*bafec742SSukumar Swaminathan 			base_indirect_ptr++;
5816*bafec742SSukumar Swaminathan 			page_entries++;
5817*bafec742SSukumar Swaminathan 		} while (page_entries < (int)(
5818*bafec742SSukumar Swaminathan 		    ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
5819*bafec742SSukumar Swaminathan 
5820*bafec742SSukumar Swaminathan 		cqicb->lbq_addr_lo =
5821*bafec742SSukumar Swaminathan 		    cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma));
5822*bafec742SSukumar Swaminathan 		cqicb->lbq_addr_hi =
5823*bafec742SSukumar Swaminathan 		    cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma));
5824*bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ?
5825*bafec742SSukumar Swaminathan 		    (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size);
5826*bafec742SSukumar Swaminathan 		cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len);
5827*bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 :
5828*bafec742SSukumar Swaminathan 		    (uint16_t)rx_ring->lbq_len);
5829*bafec742SSukumar Swaminathan 		cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len);
5830*bafec742SSukumar Swaminathan 		rx_ring->lbq_prod_idx = 0;
5831*bafec742SSukumar Swaminathan 		rx_ring->lbq_curr_idx = 0;
5832*bafec742SSukumar Swaminathan 	}
5833*bafec742SSukumar Swaminathan 	if (rx_ring->sbq_len) {
5834*bafec742SSukumar Swaminathan 		/* Load sbq values */
5835*bafec742SSukumar Swaminathan 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS);
5836*bafec742SSukumar Swaminathan 		tmp = (uint64_t)rx_ring->sbq_dma.dma_addr;
5837*bafec742SSukumar Swaminathan 		base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect;
5838*bafec742SSukumar Swaminathan 		page_entries = 0;
5839*bafec742SSukumar Swaminathan 
5840*bafec742SSukumar Swaminathan 		do {
5841*bafec742SSukumar Swaminathan 			*base_indirect_ptr = cpu_to_le64(tmp);
5842*bafec742SSukumar Swaminathan 			tmp += VM_PAGE_SIZE;
5843*bafec742SSukumar Swaminathan 			base_indirect_ptr++;
5844*bafec742SSukumar Swaminathan 			page_entries++;
5845*bafec742SSukumar Swaminathan 		} while (page_entries < (uint32_t)
5846*bafec742SSukumar Swaminathan 		    (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
5847*bafec742SSukumar Swaminathan 
5848*bafec742SSukumar Swaminathan 		cqicb->sbq_addr_lo =
5849*bafec742SSukumar Swaminathan 		    cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma));
5850*bafec742SSukumar Swaminathan 		cqicb->sbq_addr_hi =
5851*bafec742SSukumar Swaminathan 		    cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma));
5852*bafec742SSukumar Swaminathan 		cqicb->sbq_buf_size = (uint16_t)
5853*bafec742SSukumar Swaminathan 		    cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2));
5854*bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ?
5855*bafec742SSukumar Swaminathan 		    (uint16_t)0 : (uint16_t)rx_ring->sbq_len);
5856*bafec742SSukumar Swaminathan 		cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len);
5857*bafec742SSukumar Swaminathan 		rx_ring->sbq_prod_idx = 0;
5858*bafec742SSukumar Swaminathan 		rx_ring->sbq_curr_idx = 0;
5859*bafec742SSukumar Swaminathan 	}
5860*bafec742SSukumar Swaminathan 	switch (rx_ring->type) {
5861*bafec742SSukumar Swaminathan 	case TX_Q:
5862*bafec742SSukumar Swaminathan 		cqicb->irq_delay = (uint16_t)
5863*bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->tx_coalesce_usecs);
5864*bafec742SSukumar Swaminathan 		cqicb->pkt_delay = (uint16_t)
5865*bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->tx_max_coalesced_frames);
5866*bafec742SSukumar Swaminathan 		break;
5867*bafec742SSukumar Swaminathan 
5868*bafec742SSukumar Swaminathan 	case DEFAULT_Q:
5869*bafec742SSukumar Swaminathan 		cqicb->irq_delay = 0;
5870*bafec742SSukumar Swaminathan 		cqicb->pkt_delay = 0;
5871*bafec742SSukumar Swaminathan 		break;
5872*bafec742SSukumar Swaminathan 
5873*bafec742SSukumar Swaminathan 	case RX_Q:
5874*bafec742SSukumar Swaminathan 		/*
5875*bafec742SSukumar Swaminathan 		 * Inbound completion handling rx_rings run in
5876*bafec742SSukumar Swaminathan 		 * separate NAPI contexts.
5877*bafec742SSukumar Swaminathan 		 */
5878*bafec742SSukumar Swaminathan 		cqicb->irq_delay = (uint16_t)
5879*bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_coalesce_usecs);
5880*bafec742SSukumar Swaminathan 		cqicb->pkt_delay = (uint16_t)
5881*bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
5882*bafec742SSukumar Swaminathan 		break;
5883*bafec742SSukumar Swaminathan 	default:
5884*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Invalid rx_ring->type = %d.",
5885*bafec742SSukumar Swaminathan 		    rx_ring->type);
5886*bafec742SSukumar Swaminathan 	}
5887*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n",
5888*bafec742SSukumar Swaminathan 	    rx_ring->cq_id));
5889*bafec742SSukumar Swaminathan 	/* QL_DUMP_CQICB(qlge, cqicb); */
5890*bafec742SSukumar Swaminathan 	err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr,
5891*bafec742SSukumar Swaminathan 	    rx_ring->cq_id);
5892*bafec742SSukumar Swaminathan 	if (err) {
5893*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load CQICB.");
5894*bafec742SSukumar Swaminathan 		return (err);
5895*bafec742SSukumar Swaminathan 	}
5896*bafec742SSukumar Swaminathan 
5897*bafec742SSukumar Swaminathan 	rx_ring->rx_packets_dropped_no_buffer = 0;
5898*bafec742SSukumar Swaminathan 	rx_ring->rx_pkt_dropped_mac_unenabled = 0;
5899*bafec742SSukumar Swaminathan 	rx_ring->rx_failed_sbq_allocs = 0;
5900*bafec742SSukumar Swaminathan 	rx_ring->rx_failed_lbq_allocs = 0;
5901*bafec742SSukumar Swaminathan 	rx_ring->rx_packets = 0;
5902*bafec742SSukumar Swaminathan 	rx_ring->rx_bytes = 0;
5903*bafec742SSukumar Swaminathan 	rx_ring->frame_too_long = 0;
5904*bafec742SSukumar Swaminathan 	rx_ring->frame_too_short = 0;
5905*bafec742SSukumar Swaminathan 	rx_ring->fcs_err = 0;
5906*bafec742SSukumar Swaminathan 
5907*bafec742SSukumar Swaminathan 	return (err);
5908*bafec742SSukumar Swaminathan }
5909*bafec742SSukumar Swaminathan 
5910*bafec742SSukumar Swaminathan /*
5911*bafec742SSukumar Swaminathan  * start RSS
5912*bafec742SSukumar Swaminathan  */
5913*bafec742SSukumar Swaminathan static int
5914*bafec742SSukumar Swaminathan ql_start_rss(qlge_t *qlge)
5915*bafec742SSukumar Swaminathan {
5916*bafec742SSukumar Swaminathan 	struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr;
5917*bafec742SSukumar Swaminathan 	int status = 0;
5918*bafec742SSukumar Swaminathan 	int i;
5919*bafec742SSukumar Swaminathan 	uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id;
5920*bafec742SSukumar Swaminathan 
5921*bafec742SSukumar Swaminathan 	bzero((void *)ricb, sizeof (*ricb));
5922*bafec742SSukumar Swaminathan 
5923*bafec742SSukumar Swaminathan 	ricb->base_cq = RSS_L4K;
5924*bafec742SSukumar Swaminathan 	ricb->flags =
5925*bafec742SSukumar Swaminathan 	    (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
5926*bafec742SSukumar Swaminathan 	    RSS_RT6);
5927*bafec742SSukumar Swaminathan 	ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1);
5928*bafec742SSukumar Swaminathan 
5929*bafec742SSukumar Swaminathan 	/*
5930*bafec742SSukumar Swaminathan 	 * Fill out the Indirection Table.
5931*bafec742SSukumar Swaminathan 	 */
5932*bafec742SSukumar Swaminathan 	for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
5933*bafec742SSukumar Swaminathan 		hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1));
5934*bafec742SSukumar Swaminathan 
5935*bafec742SSukumar Swaminathan 	(void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40);
5936*bafec742SSukumar Swaminathan 	(void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16);
5937*bafec742SSukumar Swaminathan 
5938*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("Initializing RSS.\n"));
5939*bafec742SSukumar Swaminathan 
5940*bafec742SSukumar Swaminathan 	status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0);
5941*bafec742SSukumar Swaminathan 	if (status) {
5942*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load RICB.");
5943*bafec742SSukumar Swaminathan 		return (status);
5944*bafec742SSukumar Swaminathan 	}
5945*bafec742SSukumar Swaminathan 
5946*bafec742SSukumar Swaminathan 	return (status);
5947*bafec742SSukumar Swaminathan }
5948*bafec742SSukumar Swaminathan 
5949*bafec742SSukumar Swaminathan /*
5950*bafec742SSukumar Swaminathan  * load a tx ring control block to hw and start this ring
5951*bafec742SSukumar Swaminathan  */
5952*bafec742SSukumar Swaminathan static int
5953*bafec742SSukumar Swaminathan ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring)
5954*bafec742SSukumar Swaminathan {
5955*bafec742SSukumar Swaminathan 	struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr;
5956*bafec742SSukumar Swaminathan 	caddr_t doorbell_area =
5957*bafec742SSukumar Swaminathan 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id);
5958*bafec742SSukumar Swaminathan 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
5959*bafec742SSukumar Swaminathan 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
5960*bafec742SSukumar Swaminathan 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
5961*bafec742SSukumar Swaminathan 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
5962*bafec742SSukumar Swaminathan 	int err = 0;
5963*bafec742SSukumar Swaminathan 
5964*bafec742SSukumar Swaminathan 	/*
5965*bafec742SSukumar Swaminathan 	 * Assign doorbell registers for this tx_ring.
5966*bafec742SSukumar Swaminathan 	 */
5967*bafec742SSukumar Swaminathan 
5968*bafec742SSukumar Swaminathan 	/* TX PCI doorbell mem area for tx producer index */
5969*bafec742SSukumar Swaminathan 	tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area;
5970*bafec742SSukumar Swaminathan 	tx_ring->prod_idx = 0;
5971*bafec742SSukumar Swaminathan 	/* TX PCI doorbell mem area + 0x04 */
5972*bafec742SSukumar Swaminathan 	tx_ring->valid_db_reg = (uint32_t *)(void *)
5973*bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x04);
5974*bafec742SSukumar Swaminathan 
5975*bafec742SSukumar Swaminathan 	/*
5976*bafec742SSukumar Swaminathan 	 * Assign shadow registers for this tx_ring.
5977*bafec742SSukumar Swaminathan 	 */
5978*bafec742SSukumar Swaminathan 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
5979*bafec742SSukumar Swaminathan 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
5980*bafec742SSukumar Swaminathan 	*tx_ring->cnsmr_idx_sh_reg = 0;
5981*bafec742SSukumar Swaminathan 
5982*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx,"
5983*bafec742SSukumar Swaminathan 	    " phys_addr 0x%lx\n",
5984*bafec742SSukumar Swaminathan 	    __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg,
5985*bafec742SSukumar Swaminathan 	    tx_ring->cnsmr_idx_sh_reg_dma));
5986*bafec742SSukumar Swaminathan 
5987*bafec742SSukumar Swaminathan 	wqicb->len =
5988*bafec742SSukumar Swaminathan 	    (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
5989*bafec742SSukumar Swaminathan 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
5990*bafec742SSukumar Swaminathan 	    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
5991*bafec742SSukumar Swaminathan 	wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id);
5992*bafec742SSukumar Swaminathan 	wqicb->rid = 0;
5993*bafec742SSukumar Swaminathan 	wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr));
5994*bafec742SSukumar Swaminathan 	wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr));
5995*bafec742SSukumar Swaminathan 	wqicb->cnsmr_idx_addr_lo =
5996*bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
5997*bafec742SSukumar Swaminathan 	wqicb->cnsmr_idx_addr_hi =
5998*bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
5999*bafec742SSukumar Swaminathan 
6000*bafec742SSukumar Swaminathan 	ql_init_tx_ring(tx_ring);
6001*bafec742SSukumar Swaminathan 	/* QL_DUMP_WQICB(qlge, wqicb); */
6002*bafec742SSukumar Swaminathan 	err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr,
6003*bafec742SSukumar Swaminathan 	    tx_ring->wq_id);
6004*bafec742SSukumar Swaminathan 
6005*bafec742SSukumar Swaminathan 	if (err) {
6006*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load WQICB.");
6007*bafec742SSukumar Swaminathan 		return (err);
6008*bafec742SSukumar Swaminathan 	}
6009*bafec742SSukumar Swaminathan 	return (err);
6010*bafec742SSukumar Swaminathan }
6011*bafec742SSukumar Swaminathan 
6012*bafec742SSukumar Swaminathan /*
6013*bafec742SSukumar Swaminathan  * Set up a MAC, multicast or VLAN address for the
6014*bafec742SSukumar Swaminathan  * inbound frame matching.
6015*bafec742SSukumar Swaminathan  */
6016*bafec742SSukumar Swaminathan int
6017*bafec742SSukumar Swaminathan ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type,
6018*bafec742SSukumar Swaminathan     uint16_t index)
6019*bafec742SSukumar Swaminathan {
6020*bafec742SSukumar Swaminathan 	uint32_t offset = 0;
6021*bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
6022*bafec742SSukumar Swaminathan 
6023*bafec742SSukumar Swaminathan 	switch (type) {
6024*bafec742SSukumar Swaminathan 	case MAC_ADDR_TYPE_MULTI_MAC:
6025*bafec742SSukumar Swaminathan 	case MAC_ADDR_TYPE_CAM_MAC: {
6026*bafec742SSukumar Swaminathan 		uint32_t cam_output;
6027*bafec742SSukumar Swaminathan 		uint32_t upper = (addr[0] << 8) | addr[1];
6028*bafec742SSukumar Swaminathan 		uint32_t lower =
6029*bafec742SSukumar Swaminathan 		    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
6030*bafec742SSukumar Swaminathan 		    (addr[5]);
6031*bafec742SSukumar Swaminathan 
6032*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Adding %s ", (type ==
6033*bafec742SSukumar Swaminathan 		    MAC_ADDR_TYPE_MULTI_MAC) ?
6034*bafec742SSukumar Swaminathan 		    "MULTICAST" : "UNICAST"));
6035*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT,
6036*bafec742SSukumar Swaminathan 		    ("addr %02x %02x %02x %02x %02x %02x at index %d in "
6037*bafec742SSukumar Swaminathan 		    "the CAM.\n",
6038*bafec742SSukumar Swaminathan 		    addr[0], addr[1], addr[2], addr[3], addr[4],
6039*bafec742SSukumar Swaminathan 		    addr[5], index));
6040*bafec742SSukumar Swaminathan 
6041*bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6042*bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6043*bafec742SSukumar Swaminathan 		if (status)
6044*bafec742SSukumar Swaminathan 			goto exit;
6045*bafec742SSukumar Swaminathan 		/* offset 0 - lower 32 bits of the MAC address */
6046*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6047*bafec742SSukumar Swaminathan 		    (offset++) |
6048*bafec742SSukumar Swaminathan 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6049*bafec742SSukumar Swaminathan 		    type);	/* type */
6050*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower);
6051*bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6052*bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6053*bafec742SSukumar Swaminathan 		if (status)
6054*bafec742SSukumar Swaminathan 			goto exit;
6055*bafec742SSukumar Swaminathan 		/* offset 1 - upper 16 bits of the MAC address */
6056*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6057*bafec742SSukumar Swaminathan 		    (offset++) |
6058*bafec742SSukumar Swaminathan 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6059*bafec742SSukumar Swaminathan 		    type);	/* type */
6060*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper);
6061*bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6062*bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6063*bafec742SSukumar Swaminathan 		if (status)
6064*bafec742SSukumar Swaminathan 			goto exit;
6065*bafec742SSukumar Swaminathan 		/* offset 2 - CQ ID associated with this MAC address */
6066*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6067*bafec742SSukumar Swaminathan 		    (offset) | (index << MAC_ADDR_IDX_SHIFT) |	/* index */
6068*bafec742SSukumar Swaminathan 		    type);	/* type */
6069*bafec742SSukumar Swaminathan 		/*
6070*bafec742SSukumar Swaminathan 		 * This field should also include the queue id
6071*bafec742SSukumar Swaminathan 		 * and possibly the function id.  Right now we hardcode
6072*bafec742SSukumar Swaminathan 		 * the route field to NIC core.
6073*bafec742SSukumar Swaminathan 		 */
6074*bafec742SSukumar Swaminathan 		if (type == MAC_ADDR_TYPE_CAM_MAC) {
6075*bafec742SSukumar Swaminathan 			cam_output = (CAM_OUT_ROUTE_NIC |
6076*bafec742SSukumar Swaminathan 			    (qlge->func_number << CAM_OUT_FUNC_SHIFT) |
6077*bafec742SSukumar Swaminathan 			    (0 <<
6078*bafec742SSukumar Swaminathan 			    CAM_OUT_CQ_ID_SHIFT));
6079*bafec742SSukumar Swaminathan 
6080*bafec742SSukumar Swaminathan 			/* route to NIC core */
6081*bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA,
6082*bafec742SSukumar Swaminathan 			    cam_output);
6083*bafec742SSukumar Swaminathan 			}
6084*bafec742SSukumar Swaminathan 		break;
6085*bafec742SSukumar Swaminathan 		}
6086*bafec742SSukumar Swaminathan 	default:
6087*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6088*bafec742SSukumar Swaminathan 		    "Address type %d not yet supported.", type);
6089*bafec742SSukumar Swaminathan 		status = DDI_FAILURE;
6090*bafec742SSukumar Swaminathan 	}
6091*bafec742SSukumar Swaminathan exit:
6092*bafec742SSukumar Swaminathan 	return (status);
6093*bafec742SSukumar Swaminathan }
6094*bafec742SSukumar Swaminathan 
6095*bafec742SSukumar Swaminathan /*
6096*bafec742SSukumar Swaminathan  * The NIC function for this chip has 16 routing indexes.  Each one can be used
6097*bafec742SSukumar Swaminathan  * to route different frame types to various inbound queues.  We send broadcast
6098*bafec742SSukumar Swaminathan  * multicast/error frames to the default queue for slow handling,
6099*bafec742SSukumar Swaminathan  * and CAM hit/RSS frames to the fast handling queues.
6100*bafec742SSukumar Swaminathan  */
6101*bafec742SSukumar Swaminathan static int
6102*bafec742SSukumar Swaminathan ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable)
6103*bafec742SSukumar Swaminathan {
6104*bafec742SSukumar Swaminathan 	int status;
6105*bafec742SSukumar Swaminathan 	uint32_t value = 0;
6106*bafec742SSukumar Swaminathan 
6107*bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT,
6108*bafec742SSukumar Swaminathan 	    ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
6109*bafec742SSukumar Swaminathan 	    (enable ? "Adding" : "Removing"),
6110*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
6111*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
6112*bafec742SSukumar Swaminathan 	    ((index ==
6113*bafec742SSukumar Swaminathan 	    RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
6114*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
6115*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
6116*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
6117*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
6118*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
6119*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
6120*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
6121*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
6122*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
6123*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
6124*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
6125*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
6126*bafec742SSukumar Swaminathan 	    ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
6127*bafec742SSukumar Swaminathan 	    (enable ? "to" : "from")));
6128*bafec742SSukumar Swaminathan 
6129*bafec742SSukumar Swaminathan 	switch (mask) {
6130*bafec742SSukumar Swaminathan 	case RT_IDX_CAM_HIT:
6131*bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q | /* dest */
6132*bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ | /* type */
6133*bafec742SSukumar Swaminathan 		    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */
6134*bafec742SSukumar Swaminathan 		break;
6135*bafec742SSukumar Swaminathan 
6136*bafec742SSukumar Swaminathan 	case RT_IDX_VALID: /* Promiscuous Mode frames. */
6137*bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6138*bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6139*bafec742SSukumar Swaminathan 		    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */
6140*bafec742SSukumar Swaminathan 		break;
6141*bafec742SSukumar Swaminathan 
6142*bafec742SSukumar Swaminathan 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
6143*bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6144*bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6145*bafec742SSukumar Swaminathan 		    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */
6146*bafec742SSukumar Swaminathan 		break;
6147*bafec742SSukumar Swaminathan 
6148*bafec742SSukumar Swaminathan 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
6149*bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6150*bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6151*bafec742SSukumar Swaminathan 		    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */
6152*bafec742SSukumar Swaminathan 		break;
6153*bafec742SSukumar Swaminathan 
6154*bafec742SSukumar Swaminathan 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
6155*bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q |	/* dest */
6156*bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6157*bafec742SSukumar Swaminathan 		    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */
6158*bafec742SSukumar Swaminathan 		break;
6159*bafec742SSukumar Swaminathan 
6160*bafec742SSukumar Swaminathan 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
6161*bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q |	/* dest */
6162*bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6163*bafec742SSukumar Swaminathan 		    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6164*bafec742SSukumar Swaminathan 		break;
6165*bafec742SSukumar Swaminathan 
6166*bafec742SSukumar Swaminathan 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
6167*bafec742SSukumar Swaminathan 		value = RT_IDX_DST_RSS |	/* dest */
6168*bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6169*bafec742SSukumar Swaminathan 		    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6170*bafec742SSukumar Swaminathan 		break;
6171*bafec742SSukumar Swaminathan 
6172*bafec742SSukumar Swaminathan 	case 0:	/* Clear the E-bit on an entry. */
6173*bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6174*bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6175*bafec742SSukumar Swaminathan 		    (index << RT_IDX_IDX_SHIFT); /* index */
6176*bafec742SSukumar Swaminathan 		break;
6177*bafec742SSukumar Swaminathan 
6178*bafec742SSukumar Swaminathan 	default:
6179*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Mask type %d not yet supported.",
6180*bafec742SSukumar Swaminathan 		    mask);
6181*bafec742SSukumar Swaminathan 		status = -EPERM;
6182*bafec742SSukumar Swaminathan 		goto exit;
6183*bafec742SSukumar Swaminathan 	}
6184*bafec742SSukumar Swaminathan 
6185*bafec742SSukumar Swaminathan 	if (value != 0) {
6186*bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0);
6187*bafec742SSukumar Swaminathan 		if (status)
6188*bafec742SSukumar Swaminathan 			goto exit;
6189*bafec742SSukumar Swaminathan 		value |= (enable ? RT_IDX_E : 0);
6190*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_ROUTING_INDEX, value);
6191*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0);
6192*bafec742SSukumar Swaminathan 	}
6193*bafec742SSukumar Swaminathan 
6194*bafec742SSukumar Swaminathan exit:
6195*bafec742SSukumar Swaminathan 	return (status);
6196*bafec742SSukumar Swaminathan }
6197*bafec742SSukumar Swaminathan 
6198*bafec742SSukumar Swaminathan /*
6199*bafec742SSukumar Swaminathan  * Clear all the entries in the routing table.
6200*bafec742SSukumar Swaminathan  * Caller must get semaphore in advance.
6201*bafec742SSukumar Swaminathan  */
6202*bafec742SSukumar Swaminathan 
6203*bafec742SSukumar Swaminathan static int
6204*bafec742SSukumar Swaminathan ql_stop_routing(qlge_t *qlge)
6205*bafec742SSukumar Swaminathan {
6206*bafec742SSukumar Swaminathan 	int status = 0;
6207*bafec742SSukumar Swaminathan 	int i;
6208*bafec742SSukumar Swaminathan 	/* Clear all the entries in the routing table. */
6209*bafec742SSukumar Swaminathan 	for (i = 0; i < 16; i++) {
6210*bafec742SSukumar Swaminathan 		status = ql_set_routing_reg(qlge, i, 0, 0);
6211*bafec742SSukumar Swaminathan 		if (status) {
6212*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Stop routing failed. ");
6213*bafec742SSukumar Swaminathan 		}
6214*bafec742SSukumar Swaminathan 	}
6215*bafec742SSukumar Swaminathan 	return (status);
6216*bafec742SSukumar Swaminathan }
6217*bafec742SSukumar Swaminathan 
6218*bafec742SSukumar Swaminathan /* Initialize the frame-to-queue routing. */
6219*bafec742SSukumar Swaminathan static int
6220*bafec742SSukumar Swaminathan ql_route_initialize(qlge_t *qlge)
6221*bafec742SSukumar Swaminathan {
6222*bafec742SSukumar Swaminathan 	int status = 0;
6223*bafec742SSukumar Swaminathan 
6224*bafec742SSukumar Swaminathan 	status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
6225*bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS)
6226*bafec742SSukumar Swaminathan 		return (status);
6227*bafec742SSukumar Swaminathan 
6228*bafec742SSukumar Swaminathan 	/* Clear all the entries in the routing table. */
6229*bafec742SSukumar Swaminathan 	status = ql_stop_routing(qlge);
6230*bafec742SSukumar Swaminathan 	if (status) {
6231*bafec742SSukumar Swaminathan 		goto exit;
6232*bafec742SSukumar Swaminathan 	}
6233*bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
6234*bafec742SSukumar Swaminathan 	if (status) {
6235*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6236*bafec742SSukumar Swaminathan 		    "Failed to init routing register for broadcast packets.");
6237*bafec742SSukumar Swaminathan 		goto exit;
6238*bafec742SSukumar Swaminathan 	}
6239*bafec742SSukumar Swaminathan 	/*
6240*bafec742SSukumar Swaminathan 	 * If we have more than one inbound queue, then turn on RSS in the
6241*bafec742SSukumar Swaminathan 	 * routing block.
6242*bafec742SSukumar Swaminathan 	 */
6243*bafec742SSukumar Swaminathan 	if (qlge->rss_ring_count > 1) {
6244*bafec742SSukumar Swaminathan 		status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT,
6245*bafec742SSukumar Swaminathan 		    RT_IDX_RSS_MATCH, 1);
6246*bafec742SSukumar Swaminathan 		if (status) {
6247*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
6248*bafec742SSukumar Swaminathan 			    "Failed to init routing register for MATCH RSS "
6249*bafec742SSukumar Swaminathan 			    "packets.");
6250*bafec742SSukumar Swaminathan 			goto exit;
6251*bafec742SSukumar Swaminathan 		}
6252*bafec742SSukumar Swaminathan 	}
6253*bafec742SSukumar Swaminathan 
6254*bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT,
6255*bafec742SSukumar Swaminathan 	    RT_IDX_CAM_HIT, 1);
6256*bafec742SSukumar Swaminathan 	if (status) {
6257*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6258*bafec742SSukumar Swaminathan 		    "Failed to init routing register for CAM packets.");
6259*bafec742SSukumar Swaminathan 		goto exit;
6260*bafec742SSukumar Swaminathan 	}
6261*bafec742SSukumar Swaminathan 
6262*bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT,
6263*bafec742SSukumar Swaminathan 	    RT_IDX_MCAST_MATCH, 1);
6264*bafec742SSukumar Swaminathan 	if (status) {
6265*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6266*bafec742SSukumar Swaminathan 		    "Failed to init routing register for Multicast "
6267*bafec742SSukumar Swaminathan 		    "packets.");
6268*bafec742SSukumar Swaminathan 	}
6269*bafec742SSukumar Swaminathan 
6270*bafec742SSukumar Swaminathan exit:
6271*bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
6272*bafec742SSukumar Swaminathan 	return (status);
6273*bafec742SSukumar Swaminathan }
6274*bafec742SSukumar Swaminathan 
6275*bafec742SSukumar Swaminathan /*
6276*bafec742SSukumar Swaminathan  * Initialize hardware
6277*bafec742SSukumar Swaminathan  */
6278*bafec742SSukumar Swaminathan static int
6279*bafec742SSukumar Swaminathan ql_device_initialize(qlge_t *qlge)
6280*bafec742SSukumar Swaminathan {
6281*bafec742SSukumar Swaminathan 	uint32_t value, mask, required_max_frame_size;
6282*bafec742SSukumar Swaminathan 	int i;
6283*bafec742SSukumar Swaminathan 	int status = 0;
6284*bafec742SSukumar Swaminathan 	uint16_t pause = PAUSE_MODE_DISABLED;
6285*bafec742SSukumar Swaminathan 	boolean_t update_port_config = B_FALSE;
6286*bafec742SSukumar Swaminathan 	/*
6287*bafec742SSukumar Swaminathan 	 * Set up the System register to halt on errors.
6288*bafec742SSukumar Swaminathan 	 */
6289*bafec742SSukumar Swaminathan 	value = SYS_EFE | SYS_FAE;
6290*bafec742SSukumar Swaminathan 	mask = value << 16;
6291*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SYSTEM, mask | value);
6292*bafec742SSukumar Swaminathan 
6293*bafec742SSukumar Swaminathan 	/* Set the default queue. */
6294*bafec742SSukumar Swaminathan 	value = NIC_RCV_CFG_DFQ;
6295*bafec742SSukumar Swaminathan 	mask = NIC_RCV_CFG_DFQ_MASK;
6296*bafec742SSukumar Swaminathan 
6297*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value);
6298*bafec742SSukumar Swaminathan 
6299*bafec742SSukumar Swaminathan 	/* Enable the MPI interrupt. */
6300*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16)
6301*bafec742SSukumar Swaminathan 	    | INTR_MASK_PI);
6302*bafec742SSukumar Swaminathan 	/* Enable the function, set pagesize, enable error checking. */
6303*bafec742SSukumar Swaminathan 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
6304*bafec742SSukumar Swaminathan 	    FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
6305*bafec742SSukumar Swaminathan 	/* Set/clear header splitting. */
6306*bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) {
6307*bafec742SSukumar Swaminathan 		value |= FSC_SH;
6308*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE);
6309*bafec742SSukumar Swaminathan 	}
6310*bafec742SSukumar Swaminathan 	mask = FSC_VM_PAGESIZE_MASK |
6311*bafec742SSukumar Swaminathan 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
6312*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value);
6313*bafec742SSukumar Swaminathan 	/*
6314*bafec742SSukumar Swaminathan 	 * check current port max frame size, if different from OS setting,
6315*bafec742SSukumar Swaminathan 	 * then we need to change
6316*bafec742SSukumar Swaminathan 	 */
6317*bafec742SSukumar Swaminathan 	required_max_frame_size =
6318*bafec742SSukumar Swaminathan 	    (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
6319*bafec742SSukumar Swaminathan 
6320*bafec742SSukumar Swaminathan 	if (ql_get_port_cfg(qlge) == DDI_SUCCESS) {
6321*bafec742SSukumar Swaminathan 		/* if correct frame size but different from required size */
6322*bafec742SSukumar Swaminathan 		if (qlge->port_cfg_info.max_frame_size !=
6323*bafec742SSukumar Swaminathan 		    required_max_frame_size) {
6324*bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX,
6325*bafec742SSukumar Swaminathan 			    ("update frame size, current %d, new %d\n",
6326*bafec742SSukumar Swaminathan 			    qlge->port_cfg_info.max_frame_size,
6327*bafec742SSukumar Swaminathan 			    required_max_frame_size));
6328*bafec742SSukumar Swaminathan 			qlge->port_cfg_info.max_frame_size =
6329*bafec742SSukumar Swaminathan 			    required_max_frame_size;
6330*bafec742SSukumar Swaminathan 			update_port_config = B_TRUE;
6331*bafec742SSukumar Swaminathan 		}
6332*bafec742SSukumar Swaminathan 		if (qlge->port_cfg_info.link_cfg & STD_PAUSE)
6333*bafec742SSukumar Swaminathan 			pause = PAUSE_MODE_STANDARD;
6334*bafec742SSukumar Swaminathan 		else if (qlge->port_cfg_info.link_cfg & PP_PAUSE)
6335*bafec742SSukumar Swaminathan 			pause = PAUSE_MODE_PER_PRIORITY;
6336*bafec742SSukumar Swaminathan 		if (pause != qlge->pause) {
6337*bafec742SSukumar Swaminathan 			update_port_config = B_TRUE;
6338*bafec742SSukumar Swaminathan 		}
6339*bafec742SSukumar Swaminathan 		/*
6340*bafec742SSukumar Swaminathan 		 * Always update port config for now to work around
6341*bafec742SSukumar Swaminathan 		 * a hardware bug
6342*bafec742SSukumar Swaminathan 		 */
6343*bafec742SSukumar Swaminathan 		update_port_config = B_TRUE;
6344*bafec742SSukumar Swaminathan 
6345*bafec742SSukumar Swaminathan 		/* if need to update port configuration */
6346*bafec742SSukumar Swaminathan 		if (update_port_config)
6347*bafec742SSukumar Swaminathan 			ql_set_port_cfg(qlge);
6348*bafec742SSukumar Swaminathan 	} else
6349*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "ql_get_port_cfg failed");
6350*bafec742SSukumar Swaminathan 
6351*bafec742SSukumar Swaminathan 	/* Start up the rx queues. */
6352*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6353*bafec742SSukumar Swaminathan 		status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]);
6354*bafec742SSukumar Swaminathan 		if (status) {
6355*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
6356*bafec742SSukumar Swaminathan 			    "Failed to start rx ring[%d]", i);
6357*bafec742SSukumar Swaminathan 			return (status);
6358*bafec742SSukumar Swaminathan 		}
6359*bafec742SSukumar Swaminathan 	}
6360*bafec742SSukumar Swaminathan 
6361*bafec742SSukumar Swaminathan 	/*
6362*bafec742SSukumar Swaminathan 	 * If there is more than one inbound completion queue
6363*bafec742SSukumar Swaminathan 	 * then download a RICB to configure RSS.
6364*bafec742SSukumar Swaminathan 	 */
6365*bafec742SSukumar Swaminathan 	if (qlge->rss_ring_count > 1) {
6366*bafec742SSukumar Swaminathan 		status = ql_start_rss(qlge);
6367*bafec742SSukumar Swaminathan 		if (status) {
6368*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Failed to start RSS.");
6369*bafec742SSukumar Swaminathan 			return (status);
6370*bafec742SSukumar Swaminathan 		}
6371*bafec742SSukumar Swaminathan 	}
6372*bafec742SSukumar Swaminathan 
6373*bafec742SSukumar Swaminathan 	/* Start up the tx queues. */
6374*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
6375*bafec742SSukumar Swaminathan 		status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]);
6376*bafec742SSukumar Swaminathan 		if (status) {
6377*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
6378*bafec742SSukumar Swaminathan 			    "Failed to start tx ring[%d]", i);
6379*bafec742SSukumar Swaminathan 			return (status);
6380*bafec742SSukumar Swaminathan 		}
6381*bafec742SSukumar Swaminathan 	}
6382*bafec742SSukumar Swaminathan 	qlge->selected_tx_ring = 0;
6383*bafec742SSukumar Swaminathan 	/* Set the frame routing filter. */
6384*bafec742SSukumar Swaminathan 	status = ql_route_initialize(qlge);
6385*bafec742SSukumar Swaminathan 	if (status) {
6386*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6387*bafec742SSukumar Swaminathan 		    "Failed to init CAM/Routing tables.");
6388*bafec742SSukumar Swaminathan 		return (status);
6389*bafec742SSukumar Swaminathan 	}
6390*bafec742SSukumar Swaminathan 
6391*bafec742SSukumar Swaminathan 	return (status);
6392*bafec742SSukumar Swaminathan }
6393*bafec742SSukumar Swaminathan 
6394*bafec742SSukumar Swaminathan /*
6395*bafec742SSukumar Swaminathan  * Issue soft reset to chip.
6396*bafec742SSukumar Swaminathan  */
6397*bafec742SSukumar Swaminathan static int
6398*bafec742SSukumar Swaminathan ql_asic_reset(qlge_t *qlge)
6399*bafec742SSukumar Swaminathan {
6400*bafec742SSukumar Swaminathan 	uint32_t value;
6401*bafec742SSukumar Swaminathan 	int max_wait_time = 3;
6402*bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
6403*bafec742SSukumar Swaminathan 
6404*bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK
6405*bafec742SSukumar Swaminathan 	    |FUNCTION_RESET);
6406*bafec742SSukumar Swaminathan 
6407*bafec742SSukumar Swaminathan 	max_wait_time = 3;
6408*bafec742SSukumar Swaminathan 	do {
6409*bafec742SSukumar Swaminathan 		value =  ql_read_reg(qlge, REG_RESET_FAILOVER);
6410*bafec742SSukumar Swaminathan 		if ((value & FUNCTION_RESET) == 0)
6411*bafec742SSukumar Swaminathan 			break;
6412*bafec742SSukumar Swaminathan 		qlge_delay(QL_ONE_SEC_DELAY);
6413*bafec742SSukumar Swaminathan 	} while ((--max_wait_time));
6414*bafec742SSukumar Swaminathan 
6415*bafec742SSukumar Swaminathan 	if (max_wait_time == 0) {
6416*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6417*bafec742SSukumar Swaminathan 		    "TIMEOUT!!! errored out of resetting the chip!");
6418*bafec742SSukumar Swaminathan 		status = DDI_FAILURE;
6419*bafec742SSukumar Swaminathan 	}
6420*bafec742SSukumar Swaminathan 
6421*bafec742SSukumar Swaminathan 	return (status);
6422*bafec742SSukumar Swaminathan }
6423*bafec742SSukumar Swaminathan 
6424*bafec742SSukumar Swaminathan /*
6425*bafec742SSukumar Swaminathan  * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
6426*bafec742SSukumar Swaminathan  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
6427*bafec742SSukumar Swaminathan  * to be used by hardware.
6428*bafec742SSukumar Swaminathan  */
6429*bafec742SSukumar Swaminathan static void
6430*bafec742SSukumar Swaminathan ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring)
6431*bafec742SSukumar Swaminathan {
6432*bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
6433*bafec742SSukumar Swaminathan 	int i;
6434*bafec742SSukumar Swaminathan 	uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr;
6435*bafec742SSukumar Swaminathan 	uint32_t arm_count;
6436*bafec742SSukumar Swaminathan 
6437*bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT)
6438*bafec742SSukumar Swaminathan 		arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT);
6439*bafec742SSukumar Swaminathan 	else {
6440*bafec742SSukumar Swaminathan 		/* Adjust to a multiple of 16 */
6441*bafec742SSukumar Swaminathan 		arm_count = (rx_ring->sbuf_free_count / 16) * 16;
6442*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
6443*bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count);
6444*bafec742SSukumar Swaminathan #endif
6445*bafec742SSukumar Swaminathan 	}
6446*bafec742SSukumar Swaminathan 	for (i = 0; i < arm_count; i++) {
6447*bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
6448*bafec742SSukumar Swaminathan 		if (sbq_desc == NULL)
6449*bafec742SSukumar Swaminathan 			break;
6450*bafec742SSukumar Swaminathan 		/* Arm asic */
6451*bafec742SSukumar Swaminathan 		*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
6452*bafec742SSukumar Swaminathan 		sbq_entry++;
6453*bafec742SSukumar Swaminathan 
6454*bafec742SSukumar Swaminathan 		/* link the descriptors to in_use_list */
6455*bafec742SSukumar Swaminathan 		ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
6456*bafec742SSukumar Swaminathan 		rx_ring->sbq_prod_idx++;
6457*bafec742SSukumar Swaminathan 	}
6458*bafec742SSukumar Swaminathan 	ql_update_sbq_prod_idx(qlge, rx_ring);
6459*bafec742SSukumar Swaminathan }
6460*bafec742SSukumar Swaminathan 
6461*bafec742SSukumar Swaminathan /*
6462*bafec742SSukumar Swaminathan  * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
6463*bafec742SSukumar Swaminathan  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
6464*bafec742SSukumar Swaminathan  * to be used by hardware.
6465*bafec742SSukumar Swaminathan  */
6466*bafec742SSukumar Swaminathan static void
6467*bafec742SSukumar Swaminathan ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring)
6468*bafec742SSukumar Swaminathan {
6469*bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
6470*bafec742SSukumar Swaminathan 	int i;
6471*bafec742SSukumar Swaminathan 	uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr;
6472*bafec742SSukumar Swaminathan 	uint32_t arm_count;
6473*bafec742SSukumar Swaminathan 
6474*bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT)
6475*bafec742SSukumar Swaminathan 		arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT);
6476*bafec742SSukumar Swaminathan 	else {
6477*bafec742SSukumar Swaminathan 		/* Adjust to a multiple of 16 */
6478*bafec742SSukumar Swaminathan 		arm_count = (rx_ring->lbuf_free_count / 16) * 16;
6479*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
6480*bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count);
6481*bafec742SSukumar Swaminathan #endif
6482*bafec742SSukumar Swaminathan 	}
6483*bafec742SSukumar Swaminathan 	for (i = 0; i < arm_count; i++) {
6484*bafec742SSukumar Swaminathan 		lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
6485*bafec742SSukumar Swaminathan 		if (lbq_desc == NULL)
6486*bafec742SSukumar Swaminathan 			break;
6487*bafec742SSukumar Swaminathan 		/* Arm asic */
6488*bafec742SSukumar Swaminathan 		*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
6489*bafec742SSukumar Swaminathan 		lbq_entry++;
6490*bafec742SSukumar Swaminathan 
6491*bafec742SSukumar Swaminathan 		/* link the descriptors to in_use_list */
6492*bafec742SSukumar Swaminathan 		ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
6493*bafec742SSukumar Swaminathan 		rx_ring->lbq_prod_idx++;
6494*bafec742SSukumar Swaminathan 	}
6495*bafec742SSukumar Swaminathan 	ql_update_lbq_prod_idx(qlge, rx_ring);
6496*bafec742SSukumar Swaminathan }
6497*bafec742SSukumar Swaminathan 
6498*bafec742SSukumar Swaminathan 
6499*bafec742SSukumar Swaminathan /*
6500*bafec742SSukumar Swaminathan  * Initializes the adapter by configuring request and response queues,
6501*bafec742SSukumar Swaminathan  * allocates and ARMs small and large receive buffers to the
6502*bafec742SSukumar Swaminathan  * hardware
6503*bafec742SSukumar Swaminathan  */
6504*bafec742SSukumar Swaminathan static int
6505*bafec742SSukumar Swaminathan ql_bringup_adapter(qlge_t *qlge)
6506*bafec742SSukumar Swaminathan {
6507*bafec742SSukumar Swaminathan 	int i;
6508*bafec742SSukumar Swaminathan 
6509*bafec742SSukumar Swaminathan 	if (ql_device_initialize(qlge) != DDI_SUCCESS) {
6510*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed",
6511*bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
6512*bafec742SSukumar Swaminathan 		goto err_bringup;
6513*bafec742SSukumar Swaminathan 	}
6514*bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_ADAPTER_UP;
6515*bafec742SSukumar Swaminathan 
6516*bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
6517*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6518*bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type != TX_Q) {
6519*bafec742SSukumar Swaminathan 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
6520*bafec742SSukumar Swaminathan 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
6521*bafec742SSukumar Swaminathan 		}
6522*bafec742SSukumar Swaminathan 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
6523*bafec742SSukumar Swaminathan 	}
6524*bafec742SSukumar Swaminathan #endif
6525*bafec742SSukumar Swaminathan 	/* Arm buffers */
6526*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6527*bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type != TX_Q) {
6528*bafec742SSukumar Swaminathan 			ql_arm_sbuf(qlge, &qlge->rx_ring[i]);
6529*bafec742SSukumar Swaminathan 			ql_arm_lbuf(qlge, &qlge->rx_ring[i]);
6530*bafec742SSukumar Swaminathan 		}
6531*bafec742SSukumar Swaminathan 	}
6532*bafec742SSukumar Swaminathan 
6533*bafec742SSukumar Swaminathan 	/* Enable work/request queues */
6534*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
6535*bafec742SSukumar Swaminathan 		if (qlge->tx_ring[i].valid_db_reg)
6536*bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
6537*bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].valid_db_reg,
6538*bafec742SSukumar Swaminathan 			    REQ_Q_VALID);
6539*bafec742SSukumar Swaminathan 	}
6540*bafec742SSukumar Swaminathan 
6541*bafec742SSukumar Swaminathan 	/* Enable completion queues */
6542*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6543*bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].valid_db_reg)
6544*bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
6545*bafec742SSukumar Swaminathan 			    qlge->rx_ring[i].valid_db_reg,
6546*bafec742SSukumar Swaminathan 			    RSP_Q_VALID);
6547*bafec742SSukumar Swaminathan 	}
6548*bafec742SSukumar Swaminathan 
6549*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
6550*bafec742SSukumar Swaminathan 		mutex_enter(&qlge->tx_ring[i].tx_lock);
6551*bafec742SSukumar Swaminathan 		qlge->tx_ring[i].mac_flags = QL_MAC_STARTED;
6552*bafec742SSukumar Swaminathan 		mutex_exit(&qlge->tx_ring[i].tx_lock);
6553*bafec742SSukumar Swaminathan 	}
6554*bafec742SSukumar Swaminathan 
6555*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6556*bafec742SSukumar Swaminathan 		mutex_enter(&qlge->rx_ring[i].rx_lock);
6557*bafec742SSukumar Swaminathan 		qlge->rx_ring[i].mac_flags = QL_MAC_STARTED;
6558*bafec742SSukumar Swaminathan 		mutex_exit(&qlge->rx_ring[i].rx_lock);
6559*bafec742SSukumar Swaminathan 	}
6560*bafec742SSukumar Swaminathan 
6561*bafec742SSukumar Swaminathan 	/* This mutex will get re-acquired in enable_completion interrupt */
6562*bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
6563*bafec742SSukumar Swaminathan 	/* Traffic can start flowing now */
6564*bafec742SSukumar Swaminathan 	ql_enable_all_completion_interrupts(qlge);
6565*bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
6566*bafec742SSukumar Swaminathan 
6567*bafec742SSukumar Swaminathan 	ql_enable_global_interrupt(qlge);
6568*bafec742SSukumar Swaminathan 
6569*bafec742SSukumar Swaminathan 	qlge->sequence |= ADAPTER_INIT;
6570*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
6571*bafec742SSukumar Swaminathan 
6572*bafec742SSukumar Swaminathan err_bringup:
6573*bafec742SSukumar Swaminathan 	ql_asic_reset(qlge);
6574*bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
6575*bafec742SSukumar Swaminathan }
6576*bafec742SSukumar Swaminathan 
6577*bafec742SSukumar Swaminathan /*
6578*bafec742SSukumar Swaminathan  * Initialize mutexes of each rx/tx rings
6579*bafec742SSukumar Swaminathan  */
6580*bafec742SSukumar Swaminathan static int
6581*bafec742SSukumar Swaminathan ql_init_rx_tx_locks(qlge_t *qlge)
6582*bafec742SSukumar Swaminathan {
6583*bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
6584*bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
6585*bafec742SSukumar Swaminathan 	int i;
6586*bafec742SSukumar Swaminathan 
6587*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
6588*bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
6589*bafec742SSukumar Swaminathan 		mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER,
6590*bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6591*bafec742SSukumar Swaminathan 	}
6592*bafec742SSukumar Swaminathan 
6593*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6594*bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
6595*bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER,
6596*bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6597*bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER,
6598*bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6599*bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER,
6600*bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6601*bafec742SSukumar Swaminathan 	}
6602*bafec742SSukumar Swaminathan 
6603*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
6604*bafec742SSukumar Swaminathan }
6605*bafec742SSukumar Swaminathan 
6606*bafec742SSukumar Swaminathan /*
6607*bafec742SSukumar Swaminathan  * ql_attach - Driver attach.
6608*bafec742SSukumar Swaminathan  */
6609*bafec742SSukumar Swaminathan static int
6610*bafec742SSukumar Swaminathan ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
6611*bafec742SSukumar Swaminathan {
6612*bafec742SSukumar Swaminathan 	int instance;
6613*bafec742SSukumar Swaminathan 	qlge_t *qlge;
6614*bafec742SSukumar Swaminathan 	int rval;
6615*bafec742SSukumar Swaminathan 	uint16_t w;
6616*bafec742SSukumar Swaminathan 	mac_register_t *macp = NULL;
6617*bafec742SSukumar Swaminathan 	rval = DDI_FAILURE;
6618*bafec742SSukumar Swaminathan 
6619*bafec742SSukumar Swaminathan 	/* first get the instance */
6620*bafec742SSukumar Swaminathan 	instance = ddi_get_instance(dip);
6621*bafec742SSukumar Swaminathan 
6622*bafec742SSukumar Swaminathan 	switch (cmd) {
6623*bafec742SSukumar Swaminathan 	case DDI_ATTACH:
6624*bafec742SSukumar Swaminathan 		/*
6625*bafec742SSukumar Swaminathan 		 * Check that hardware is installed in a DMA-capable slot
6626*bafec742SSukumar Swaminathan 		 */
6627*bafec742SSukumar Swaminathan 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
6628*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "?%s(%d): Not installed in a "
6629*bafec742SSukumar Swaminathan 			    "DMA-capable slot", ADAPTER_NAME, instance);
6630*bafec742SSukumar Swaminathan 			break;
6631*bafec742SSukumar Swaminathan 		}
6632*bafec742SSukumar Swaminathan 
6633*bafec742SSukumar Swaminathan 		/*
6634*bafec742SSukumar Swaminathan 		 * No support for high-level interrupts
6635*bafec742SSukumar Swaminathan 		 */
6636*bafec742SSukumar Swaminathan 		if (ddi_intr_hilevel(dip, 0) != 0) {
6637*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "?%s(%d): No support for high-level"
6638*bafec742SSukumar Swaminathan 			    " intrs", ADAPTER_NAME, instance);
6639*bafec742SSukumar Swaminathan 			break;
6640*bafec742SSukumar Swaminathan 		}
6641*bafec742SSukumar Swaminathan 
6642*bafec742SSukumar Swaminathan 		/*
6643*bafec742SSukumar Swaminathan 		 * Allocate our per-device-instance structure
6644*bafec742SSukumar Swaminathan 		 */
6645*bafec742SSukumar Swaminathan 
6646*bafec742SSukumar Swaminathan 		qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP);
6647*bafec742SSukumar Swaminathan 		ASSERT(qlge != NULL);
6648*bafec742SSukumar Swaminathan 
6649*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_SOFTSTATE_ALLOC;
6650*bafec742SSukumar Swaminathan 
6651*bafec742SSukumar Swaminathan 		qlge->dip = dip;
6652*bafec742SSukumar Swaminathan 		qlge->instance = instance;
6653*bafec742SSukumar Swaminathan 
6654*bafec742SSukumar Swaminathan 		/*
6655*bafec742SSukumar Swaminathan 		 * Setup the ISP8x00 registers address mapping to be
6656*bafec742SSukumar Swaminathan 		 * accessed by this particular driver.
6657*bafec742SSukumar Swaminathan 		 * 0x0   Configuration Space
6658*bafec742SSukumar Swaminathan 		 * 0x1   I/O Space
6659*bafec742SSukumar Swaminathan 		 * 0x2   1st Memory Space address - Control Register Set
6660*bafec742SSukumar Swaminathan 		 * 0x3   2nd Memory Space address - Doorbell Memory Space
6661*bafec742SSukumar Swaminathan 		 */
6662*bafec742SSukumar Swaminathan 
6663*bafec742SSukumar Swaminathan 		w = 2;
6664*bafec742SSukumar Swaminathan 		if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0,
6665*bafec742SSukumar Swaminathan 		    sizeof (dev_reg_t), &ql_dev_acc_attr,
6666*bafec742SSukumar Swaminathan 		    &qlge->dev_handle) != DDI_SUCCESS) {
6667*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to map device "
6668*bafec742SSukumar Swaminathan 			    "registers", ADAPTER_NAME, instance);
6669*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6670*bafec742SSukumar Swaminathan 			break;
6671*bafec742SSukumar Swaminathan 		}
6672*bafec742SSukumar Swaminathan 
6673*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n",
6674*bafec742SSukumar Swaminathan 		    qlge->iobase));
6675*bafec742SSukumar Swaminathan 
6676*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_REGS_SETUP;
6677*bafec742SSukumar Swaminathan 
6678*bafec742SSukumar Swaminathan 		/* map Doorbell memory space */
6679*bafec742SSukumar Swaminathan 		w = 3;
6680*bafec742SSukumar Swaminathan 		if (ddi_regs_map_setup(dip, w,
6681*bafec742SSukumar Swaminathan 		    (caddr_t *)&qlge->doorbell_reg_iobase, 0,
6682*bafec742SSukumar Swaminathan 		    0x100000 /* sizeof (dev_doorbell_reg_t) */,
6683*bafec742SSukumar Swaminathan 		    &ql_dev_acc_attr,
6684*bafec742SSukumar Swaminathan 		    &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) {
6685*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell "
6686*bafec742SSukumar Swaminathan 			    "registers",
6687*bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
6688*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6689*bafec742SSukumar Swaminathan 			break;
6690*bafec742SSukumar Swaminathan 		}
6691*bafec742SSukumar Swaminathan 
6692*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n",
6693*bafec742SSukumar Swaminathan 		    qlge->doorbell_reg_iobase));
6694*bafec742SSukumar Swaminathan 
6695*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_DOORBELL_REGS_SETUP;
6696*bafec742SSukumar Swaminathan 
6697*bafec742SSukumar Swaminathan 		/*
6698*bafec742SSukumar Swaminathan 		 * Allocate a macinfo structure for this instance
6699*bafec742SSukumar Swaminathan 		 */
6700*bafec742SSukumar Swaminathan 		if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
6701*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): mac_alloc failed",
6702*bafec742SSukumar Swaminathan 			    __func__, instance);
6703*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6704*bafec742SSukumar Swaminathan 			return (NULL);
6705*bafec742SSukumar Swaminathan 		}
6706*bafec742SSukumar Swaminathan 		/* save adapter status to dip private data */
6707*bafec742SSukumar Swaminathan 		ddi_set_driver_private(dip, qlge);
6708*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n",
6709*bafec742SSukumar Swaminathan 		    ADAPTER_NAME, instance));
6710*bafec742SSukumar Swaminathan 
6711*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MAC_ALLOC;
6712*bafec742SSukumar Swaminathan 
6713*bafec742SSukumar Swaminathan 		/*
6714*bafec742SSukumar Swaminathan 		 * Attach this instance of the device
6715*bafec742SSukumar Swaminathan 		 */
6716*bafec742SSukumar Swaminathan 		/* Setup PCI Local Bus Configuration resource. */
6717*bafec742SSukumar Swaminathan 		if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) {
6718*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources",
6719*bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
6720*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6721*bafec742SSukumar Swaminathan 			break;
6722*bafec742SSukumar Swaminathan 		}
6723*bafec742SSukumar Swaminathan 
6724*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_PCI_CONFIG_SETUP;
6725*bafec742SSukumar Swaminathan 
6726*bafec742SSukumar Swaminathan 		if (ql_init_instance(qlge) != DDI_SUCCESS) {
6727*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to initialize device "
6728*bafec742SSukumar Swaminathan 			    "instance", ADAPTER_NAME, instance);
6729*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6730*bafec742SSukumar Swaminathan 			break;
6731*bafec742SSukumar Swaminathan 		}
6732*bafec742SSukumar Swaminathan 
6733*bafec742SSukumar Swaminathan 		/* Setup interrupt vectors */
6734*bafec742SSukumar Swaminathan 		if (ql_alloc_irqs(qlge) != DDI_SUCCESS) {
6735*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6736*bafec742SSukumar Swaminathan 			break;
6737*bafec742SSukumar Swaminathan 		}
6738*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_INTR_ALLOC;
6739*bafec742SSukumar Swaminathan 
6740*bafec742SSukumar Swaminathan 		/* Configure queues */
6741*bafec742SSukumar Swaminathan 		if (ql_setup_rings(qlge) != DDI_SUCCESS) {
6742*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6743*bafec742SSukumar Swaminathan 			break;
6744*bafec742SSukumar Swaminathan 		}
6745*bafec742SSukumar Swaminathan 
6746*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_SETUP_RINGS;
6747*bafec742SSukumar Swaminathan 		/*
6748*bafec742SSukumar Swaminathan 		 * Map queues to interrupt vectors
6749*bafec742SSukumar Swaminathan 		 */
6750*bafec742SSukumar Swaminathan 		ql_resolve_queues_to_irqs(qlge);
6751*bafec742SSukumar Swaminathan 		/*
6752*bafec742SSukumar Swaminathan 		 * Add interrupt handlers
6753*bafec742SSukumar Swaminathan 		 */
6754*bafec742SSukumar Swaminathan 		if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) {
6755*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Failed to add interrupt "
6756*bafec742SSukumar Swaminathan 			    "handlers");
6757*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6758*bafec742SSukumar Swaminathan 			break;
6759*bafec742SSukumar Swaminathan 		}
6760*bafec742SSukumar Swaminathan 
6761*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_ADD_INTERRUPT;
6762*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d): Add interrupt handler done\n",
6763*bafec742SSukumar Swaminathan 		    ADAPTER_NAME, instance));
6764*bafec742SSukumar Swaminathan 
6765*bafec742SSukumar Swaminathan 		/* Initialize mutex, need the interrupt priority */
6766*bafec742SSukumar Swaminathan 		ql_init_rx_tx_locks(qlge);
6767*bafec742SSukumar Swaminathan 
6768*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_LOCKS_CREATED;
6769*bafec742SSukumar Swaminathan 
6770*bafec742SSukumar Swaminathan 		/*
6771*bafec742SSukumar Swaminathan 		 * Use a soft interrupt to do something that we do not want
6772*bafec742SSukumar Swaminathan 		 * to do in regular network functions or with mutexs being held
6773*bafec742SSukumar Swaminathan 		 */
6774*bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl,
6775*bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge)
6776*bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
6777*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6778*bafec742SSukumar Swaminathan 			break;
6779*bafec742SSukumar Swaminathan 		}
6780*bafec742SSukumar Swaminathan 
6781*bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl,
6782*bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge)
6783*bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
6784*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6785*bafec742SSukumar Swaminathan 			break;
6786*bafec742SSukumar Swaminathan 		}
6787*bafec742SSukumar Swaminathan 
6788*bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl,
6789*bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge)
6790*bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
6791*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6792*bafec742SSukumar Swaminathan 			break;
6793*bafec742SSukumar Swaminathan 		}
6794*bafec742SSukumar Swaminathan 
6795*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_ADD_SOFT_INTERRUPT;
6796*bafec742SSukumar Swaminathan 
6797*bafec742SSukumar Swaminathan 		/*
6798*bafec742SSukumar Swaminathan 		 * mutex to protect the adapter state structure.
6799*bafec742SSukumar Swaminathan 		 * initialize mutexes according to the interrupt priority
6800*bafec742SSukumar Swaminathan 		 */
6801*bafec742SSukumar Swaminathan 		mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER,
6802*bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6803*bafec742SSukumar Swaminathan 		mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER,
6804*bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6805*bafec742SSukumar Swaminathan 		mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER,
6806*bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6807*bafec742SSukumar Swaminathan 
6808*bafec742SSukumar Swaminathan 		/* Mailbox wait and interrupt conditional variable. */
6809*bafec742SSukumar Swaminathan 		cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL);
6810*bafec742SSukumar Swaminathan 
6811*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MUTEX;
6812*bafec742SSukumar Swaminathan 
6813*bafec742SSukumar Swaminathan 		/*
6814*bafec742SSukumar Swaminathan 		 * KStats
6815*bafec742SSukumar Swaminathan 		 */
6816*bafec742SSukumar Swaminathan 		if (ql_init_kstats(qlge) != DDI_SUCCESS) {
6817*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): KState initialization failed",
6818*bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
6819*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6820*bafec742SSukumar Swaminathan 			break;
6821*bafec742SSukumar Swaminathan 		}
6822*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_KSTATS;
6823*bafec742SSukumar Swaminathan 
6824*bafec742SSukumar Swaminathan 		/*
6825*bafec742SSukumar Swaminathan 		 * Initialize gld macinfo structure
6826*bafec742SSukumar Swaminathan 		 */
6827*bafec742SSukumar Swaminathan 		ql_gld3_init(qlge, macp);
6828*bafec742SSukumar Swaminathan 
6829*bafec742SSukumar Swaminathan 		if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) {
6830*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): mac_register failed",
6831*bafec742SSukumar Swaminathan 			    __func__, instance);
6832*bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6833*bafec742SSukumar Swaminathan 			break;
6834*bafec742SSukumar Swaminathan 		}
6835*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MAC_REGISTERED;
6836*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n",
6837*bafec742SSukumar Swaminathan 		    ADAPTER_NAME, instance));
6838*bafec742SSukumar Swaminathan 
6839*bafec742SSukumar Swaminathan 		mac_free(macp);
6840*bafec742SSukumar Swaminathan 		macp = NULL;
6841*bafec742SSukumar Swaminathan 
6842*bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_ATTACHED;
6843*bafec742SSukumar Swaminathan 
6844*bafec742SSukumar Swaminathan 		/*
6845*bafec742SSukumar Swaminathan 		 * Allocate memory resources
6846*bafec742SSukumar Swaminathan 		 */
6847*bafec742SSukumar Swaminathan 		if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) {
6848*bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): memory allocation failed",
6849*bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
6850*bafec742SSukumar Swaminathan 			ql_free_mem_resources(qlge);
6851*bafec742SSukumar Swaminathan 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
6852*bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
6853*bafec742SSukumar Swaminathan 		}
6854*bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MEMORY_ALLOC;
6855*bafec742SSukumar Swaminathan 
6856*bafec742SSukumar Swaminathan 		ddi_report_dev(dip);
6857*bafec742SSukumar Swaminathan 
6858*bafec742SSukumar Swaminathan 		rval = DDI_SUCCESS;
6859*bafec742SSukumar Swaminathan 	break;
6860*bafec742SSukumar Swaminathan /*
6861*bafec742SSukumar Swaminathan  * DDI_RESUME
6862*bafec742SSukumar Swaminathan  * When called  with  cmd  set  to  DDI_RESUME,  attach()  must
6863*bafec742SSukumar Swaminathan  * restore  the hardware state of a device (power may have been
6864*bafec742SSukumar Swaminathan  * removed from the device), allow  pending  requests  to  con-
6865*bafec742SSukumar Swaminathan  * tinue,  and  service  new requests. In this case, the driver
6866*bafec742SSukumar Swaminathan  * must not  make  any  assumptions  about  the  state  of  the
6867*bafec742SSukumar Swaminathan  * hardware,  but  must  restore the state of the device except
6868*bafec742SSukumar Swaminathan  * for the power level of components.
6869*bafec742SSukumar Swaminathan  *
6870*bafec742SSukumar Swaminathan  */
6871*bafec742SSukumar Swaminathan 	case DDI_RESUME:
6872*bafec742SSukumar Swaminathan 
6873*bafec742SSukumar Swaminathan 		if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL)
6874*bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
6875*bafec742SSukumar Swaminathan 
6876*bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n",
6877*bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
6878*bafec742SSukumar Swaminathan 
6879*bafec742SSukumar Swaminathan 		mutex_enter(&qlge->gen_mutex);
6880*bafec742SSukumar Swaminathan 		rval = ql_do_start(qlge);
6881*bafec742SSukumar Swaminathan 		mutex_exit(&qlge->gen_mutex);
6882*bafec742SSukumar Swaminathan 		break;
6883*bafec742SSukumar Swaminathan 
6884*bafec742SSukumar Swaminathan 	default:
6885*bafec742SSukumar Swaminathan 		break;
6886*bafec742SSukumar Swaminathan 	}
6887*bafec742SSukumar Swaminathan 	return (rval);
6888*bafec742SSukumar Swaminathan }
6889*bafec742SSukumar Swaminathan 
6890*bafec742SSukumar Swaminathan /*
6891*bafec742SSukumar Swaminathan  * Unbind all pending tx dma handles during driver bring down
6892*bafec742SSukumar Swaminathan  */
6893*bafec742SSukumar Swaminathan static void
6894*bafec742SSukumar Swaminathan ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring)
6895*bafec742SSukumar Swaminathan {
6896*bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
6897*bafec742SSukumar Swaminathan 	int i, j;
6898*bafec742SSukumar Swaminathan 
6899*bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc) {
6900*bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
6901*bafec742SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
6902*bafec742SSukumar Swaminathan 			for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
6903*bafec742SSukumar Swaminathan 				if (tx_ring_desc->tx_dma_handle[j]) {
6904*bafec742SSukumar Swaminathan 					(void) ddi_dma_unbind_handle(
6905*bafec742SSukumar Swaminathan 					    tx_ring_desc->tx_dma_handle[j]);
6906*bafec742SSukumar Swaminathan 				}
6907*bafec742SSukumar Swaminathan 			}
6908*bafec742SSukumar Swaminathan 			tx_ring_desc->tx_dma_handle_used = 0;
6909*bafec742SSukumar Swaminathan 		} /* end of for loop */
6910*bafec742SSukumar Swaminathan 	}
6911*bafec742SSukumar Swaminathan }
6912*bafec742SSukumar Swaminathan /*
6913*bafec742SSukumar Swaminathan  * Wait for all the packets sent to the chip to finish transmission
6914*bafec742SSukumar Swaminathan  * to prevent buffers to be unmapped before or during a transmit operation
6915*bafec742SSukumar Swaminathan  */
6916*bafec742SSukumar Swaminathan static int
6917*bafec742SSukumar Swaminathan ql_wait_tx_quiesce(qlge_t *qlge)
6918*bafec742SSukumar Swaminathan {
6919*bafec742SSukumar Swaminathan 	int count = MAX_TX_WAIT_COUNT, i;
6920*bafec742SSukumar Swaminathan 	int rings_done;
6921*bafec742SSukumar Swaminathan 	volatile struct tx_ring *tx_ring;
6922*bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
6923*bafec742SSukumar Swaminathan 	uint32_t producer_idx;
6924*bafec742SSukumar Swaminathan 	uint32_t temp;
6925*bafec742SSukumar Swaminathan 	int done = 0;
6926*bafec742SSukumar Swaminathan 	int rval = DDI_FAILURE;
6927*bafec742SSukumar Swaminathan 
6928*bafec742SSukumar Swaminathan 	while (!done) {
6929*bafec742SSukumar Swaminathan 		rings_done = 0;
6930*bafec742SSukumar Swaminathan 
6931*bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
6932*bafec742SSukumar Swaminathan 			tx_ring = &qlge->tx_ring[i];
6933*bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
6934*bafec742SSukumar Swaminathan 			    tx_ring->prod_idx_db_reg);
6935*bafec742SSukumar Swaminathan 			producer_idx = temp & 0x0000ffff;
6936*bafec742SSukumar Swaminathan 			consumer_idx = (temp >> 16);
6937*bafec742SSukumar Swaminathan 
6938*bafec742SSukumar Swaminathan 			/*
6939*bafec742SSukumar Swaminathan 			 * Get the pending iocb count, ones which have not been
6940*bafec742SSukumar Swaminathan 			 * pulled down by the chip
6941*bafec742SSukumar Swaminathan 			 */
6942*bafec742SSukumar Swaminathan 			if (producer_idx >= consumer_idx)
6943*bafec742SSukumar Swaminathan 				temp = (producer_idx - consumer_idx);
6944*bafec742SSukumar Swaminathan 			else
6945*bafec742SSukumar Swaminathan 				temp = (tx_ring->wq_len - consumer_idx) +
6946*bafec742SSukumar Swaminathan 				    producer_idx;
6947*bafec742SSukumar Swaminathan 
6948*bafec742SSukumar Swaminathan 			if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len)
6949*bafec742SSukumar Swaminathan 				rings_done++;
6950*bafec742SSukumar Swaminathan 			else {
6951*bafec742SSukumar Swaminathan 				done = 1;
6952*bafec742SSukumar Swaminathan 				break;
6953*bafec742SSukumar Swaminathan 			}
6954*bafec742SSukumar Swaminathan 		}
6955*bafec742SSukumar Swaminathan 
6956*bafec742SSukumar Swaminathan 		/* If all the rings are done */
6957*bafec742SSukumar Swaminathan 		if (rings_done >= qlge->tx_ring_count) {
6958*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
6959*bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d) done successfully \n",
6960*bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
6961*bafec742SSukumar Swaminathan #endif
6962*bafec742SSukumar Swaminathan 			rval = DDI_SUCCESS;
6963*bafec742SSukumar Swaminathan 			break;
6964*bafec742SSukumar Swaminathan 		}
6965*bafec742SSukumar Swaminathan 
6966*bafec742SSukumar Swaminathan 		qlge_delay(100);
6967*bafec742SSukumar Swaminathan 
6968*bafec742SSukumar Swaminathan 		count--;
6969*bafec742SSukumar Swaminathan 		if (!count) {
6970*bafec742SSukumar Swaminathan 
6971*bafec742SSukumar Swaminathan 			count = MAX_TX_WAIT_COUNT;
6972*bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
6973*bafec742SSukumar Swaminathan 			volatile struct rx_ring *rx_ring;
6974*bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending"
6975*bafec742SSukumar Swaminathan 			    " Transmits on queue %d to complete .\n",
6976*bafec742SSukumar Swaminathan 			    __func__, qlge->instance,
6977*bafec742SSukumar Swaminathan 			    (qlge->tx_ring[i].wq_len -
6978*bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].tx_free_count),
6979*bafec742SSukumar Swaminathan 			    i);
6980*bafec742SSukumar Swaminathan 
6981*bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i+1];
6982*bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
6983*bafec742SSukumar Swaminathan 			    rx_ring->cnsmr_idx_db_reg);
6984*bafec742SSukumar Swaminathan 			consumer_idx = temp & 0x0000ffff;
6985*bafec742SSukumar Swaminathan 			producer_idx = (temp >> 16);
6986*bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d,"
6987*bafec742SSukumar Swaminathan 			    " Producer %d, Consumer %d\n",
6988*bafec742SSukumar Swaminathan 			    __func__, qlge->instance,
6989*bafec742SSukumar Swaminathan 			    i+1,
6990*bafec742SSukumar Swaminathan 			    producer_idx, consumer_idx);
6991*bafec742SSukumar Swaminathan 
6992*bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
6993*bafec742SSukumar Swaminathan 			    tx_ring->prod_idx_db_reg);
6994*bafec742SSukumar Swaminathan 			producer_idx = temp & 0x0000ffff;
6995*bafec742SSukumar Swaminathan 			consumer_idx = (temp >> 16);
6996*bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d,"
6997*bafec742SSukumar Swaminathan 			    " Producer %d, Consumer %d\n",
6998*bafec742SSukumar Swaminathan 			    __func__, qlge->instance, i,
6999*bafec742SSukumar Swaminathan 			    producer_idx, consumer_idx);
7000*bafec742SSukumar Swaminathan #endif
7001*bafec742SSukumar Swaminathan 
7002*bafec742SSukumar Swaminathan 			/* For now move on */
7003*bafec742SSukumar Swaminathan 			break;
7004*bafec742SSukumar Swaminathan 		}
7005*bafec742SSukumar Swaminathan 	}
7006*bafec742SSukumar Swaminathan 	/* Stop the request queue */
7007*bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
7008*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7009*bafec742SSukumar Swaminathan 		if (qlge->tx_ring[i].valid_db_reg) {
7010*bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7011*bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].valid_db_reg, 0);
7012*bafec742SSukumar Swaminathan 		}
7013*bafec742SSukumar Swaminathan 	}
7014*bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
7015*bafec742SSukumar Swaminathan 	return (rval);
7016*bafec742SSukumar Swaminathan }
7017*bafec742SSukumar Swaminathan 
7018*bafec742SSukumar Swaminathan /*
7019*bafec742SSukumar Swaminathan  * Wait for all the receives indicated to the stack to come back
7020*bafec742SSukumar Swaminathan  */
7021*bafec742SSukumar Swaminathan static int
7022*bafec742SSukumar Swaminathan ql_wait_rx_complete(qlge_t *qlge)
7023*bafec742SSukumar Swaminathan {
7024*bafec742SSukumar Swaminathan 	int i;
7025*bafec742SSukumar Swaminathan 	/* Disable all the completion queues */
7026*bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
7027*bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7028*bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].valid_db_reg) {
7029*bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7030*bafec742SSukumar Swaminathan 			    qlge->rx_ring[i].valid_db_reg, 0);
7031*bafec742SSukumar Swaminathan 		}
7032*bafec742SSukumar Swaminathan 	}
7033*bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
7034*bafec742SSukumar Swaminathan 
7035*bafec742SSukumar Swaminathan 	/* Wait for OS to return all rx buffers */
7036*bafec742SSukumar Swaminathan 	qlge_delay(QL_ONE_SEC_DELAY);
7037*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
7038*bafec742SSukumar Swaminathan }
7039*bafec742SSukumar Swaminathan 
7040*bafec742SSukumar Swaminathan /*
7041*bafec742SSukumar Swaminathan  * stop the driver
7042*bafec742SSukumar Swaminathan  */
7043*bafec742SSukumar Swaminathan static int
7044*bafec742SSukumar Swaminathan ql_bringdown_adapter(qlge_t *qlge)
7045*bafec742SSukumar Swaminathan {
7046*bafec742SSukumar Swaminathan 	int i;
7047*bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
7048*bafec742SSukumar Swaminathan 
7049*bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_BRINGDOWN;
7050*bafec742SSukumar Swaminathan 	if (qlge->sequence & ADAPTER_INIT) {
7051*bafec742SSukumar Swaminathan 		/* stop forwarding external packets to driver */
7052*bafec742SSukumar Swaminathan 		status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7053*bafec742SSukumar Swaminathan 		if (status)
7054*bafec742SSukumar Swaminathan 			return (status);
7055*bafec742SSukumar Swaminathan 		ql_stop_routing(qlge);
7056*bafec742SSukumar Swaminathan 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7057*bafec742SSukumar Swaminathan 		/*
7058*bafec742SSukumar Swaminathan 		 * Set the flag for receive and transmit
7059*bafec742SSukumar Swaminathan 		 * operations to cease
7060*bafec742SSukumar Swaminathan 		 */
7061*bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7062*bafec742SSukumar Swaminathan 			mutex_enter(&qlge->tx_ring[i].tx_lock);
7063*bafec742SSukumar Swaminathan 			qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED;
7064*bafec742SSukumar Swaminathan 			mutex_exit(&qlge->tx_ring[i].tx_lock);
7065*bafec742SSukumar Swaminathan 		}
7066*bafec742SSukumar Swaminathan 
7067*bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
7068*bafec742SSukumar Swaminathan 			mutex_enter(&qlge->rx_ring[i].rx_lock);
7069*bafec742SSukumar Swaminathan 			qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED;
7070*bafec742SSukumar Swaminathan 			mutex_exit(&qlge->rx_ring[i].rx_lock);
7071*bafec742SSukumar Swaminathan 		}
7072*bafec742SSukumar Swaminathan 
7073*bafec742SSukumar Swaminathan 		/*
7074*bafec742SSukumar Swaminathan 		 * Need interrupts to be running while the transmit
7075*bafec742SSukumar Swaminathan 		 * completions are cleared. Wait for the packets
7076*bafec742SSukumar Swaminathan 		 * queued to the chip to be sent out
7077*bafec742SSukumar Swaminathan 		 */
7078*bafec742SSukumar Swaminathan 		(void) ql_wait_tx_quiesce(qlge);
7079*bafec742SSukumar Swaminathan 		/* Interrupts not needed from now */
7080*bafec742SSukumar Swaminathan 		ql_disable_all_completion_interrupts(qlge);
7081*bafec742SSukumar Swaminathan 
7082*bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
7083*bafec742SSukumar Swaminathan 		/* Disable Global interrupt */
7084*bafec742SSukumar Swaminathan 		ql_disable_global_interrupt(qlge);
7085*bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
7086*bafec742SSukumar Swaminathan 
7087*bafec742SSukumar Swaminathan 		/* Wait for all the indicated packets to come back */
7088*bafec742SSukumar Swaminathan 		status = ql_wait_rx_complete(qlge);
7089*bafec742SSukumar Swaminathan 
7090*bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
7091*bafec742SSukumar Swaminathan 		/* Reset adapter */
7092*bafec742SSukumar Swaminathan 		ql_asic_reset(qlge);
7093*bafec742SSukumar Swaminathan 		/*
7094*bafec742SSukumar Swaminathan 		 * Unbind all tx dma handles to prevent pending tx descriptors'
7095*bafec742SSukumar Swaminathan 		 * dma handles from being re-used.
7096*bafec742SSukumar Swaminathan 		 */
7097*bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7098*bafec742SSukumar Swaminathan 			ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]);
7099*bafec742SSukumar Swaminathan 		}
7100*bafec742SSukumar Swaminathan 
7101*bafec742SSukumar Swaminathan 		qlge->sequence &= ~ADAPTER_INIT;
7102*bafec742SSukumar Swaminathan 
7103*bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
7104*bafec742SSukumar Swaminathan 	}
7105*bafec742SSukumar Swaminathan 	return (status);
7106*bafec742SSukumar Swaminathan }
7107*bafec742SSukumar Swaminathan 
7108*bafec742SSukumar Swaminathan /*
7109*bafec742SSukumar Swaminathan  * ql_detach
7110*bafec742SSukumar Swaminathan  * Used to remove all the states associated with a given
7111*bafec742SSukumar Swaminathan  * instances of a device node prior to the removal of that
7112*bafec742SSukumar Swaminathan  * instance from the system.
7113*bafec742SSukumar Swaminathan  */
7114*bafec742SSukumar Swaminathan static int
7115*bafec742SSukumar Swaminathan ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7116*bafec742SSukumar Swaminathan {
7117*bafec742SSukumar Swaminathan 	qlge_t *qlge;
7118*bafec742SSukumar Swaminathan 	int rval;
7119*bafec742SSukumar Swaminathan 
7120*bafec742SSukumar Swaminathan 	rval = DDI_SUCCESS;
7121*bafec742SSukumar Swaminathan 
7122*bafec742SSukumar Swaminathan 	switch (cmd) {
7123*bafec742SSukumar Swaminathan 	case DDI_DETACH:
7124*bafec742SSukumar Swaminathan 
7125*bafec742SSukumar Swaminathan 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7126*bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
7127*bafec742SSukumar Swaminathan 		rval = ql_bringdown_adapter(qlge);
7128*bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS)
7129*bafec742SSukumar Swaminathan 			break;
7130*bafec742SSukumar Swaminathan 
7131*bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_DETACH;
7132*bafec742SSukumar Swaminathan 
7133*bafec742SSukumar Swaminathan 		/* free memory resources */
7134*bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_MEMORY_ALLOC) {
7135*bafec742SSukumar Swaminathan 			ql_free_mem_resources(qlge);
7136*bafec742SSukumar Swaminathan 			qlge->sequence &= ~INIT_MEMORY_ALLOC;
7137*bafec742SSukumar Swaminathan 		}
7138*bafec742SSukumar Swaminathan 		ql_free_resources(dip, qlge);
7139*bafec742SSukumar Swaminathan 
7140*bafec742SSukumar Swaminathan 		break;
7141*bafec742SSukumar Swaminathan 
7142*bafec742SSukumar Swaminathan 	case DDI_SUSPEND:
7143*bafec742SSukumar Swaminathan 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7144*bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
7145*bafec742SSukumar Swaminathan 
7146*bafec742SSukumar Swaminathan 		mutex_enter(&qlge->gen_mutex);
7147*bafec742SSukumar Swaminathan 		if ((qlge->mac_flags == QL_MAC_ATTACHED) ||
7148*bafec742SSukumar Swaminathan 		    (qlge->mac_flags == QL_MAC_STARTED)) {
7149*bafec742SSukumar Swaminathan 			ql_do_stop(qlge);
7150*bafec742SSukumar Swaminathan 		}
7151*bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_SUSPENDED;
7152*bafec742SSukumar Swaminathan 		mutex_exit(&qlge->gen_mutex);
7153*bafec742SSukumar Swaminathan 
7154*bafec742SSukumar Swaminathan 		break;
7155*bafec742SSukumar Swaminathan 	default:
7156*bafec742SSukumar Swaminathan 		rval = DDI_FAILURE;
7157*bafec742SSukumar Swaminathan 		break;
7158*bafec742SSukumar Swaminathan 	}
7159*bafec742SSukumar Swaminathan 
7160*bafec742SSukumar Swaminathan 	return (rval);
7161*bafec742SSukumar Swaminathan }
7162*bafec742SSukumar Swaminathan 
7163*bafec742SSukumar Swaminathan /*
7164*bafec742SSukumar Swaminathan  * quiesce(9E) entry point.
7165*bafec742SSukumar Swaminathan  *
7166*bafec742SSukumar Swaminathan  * This function is called when the system is single-threaded at high
7167*bafec742SSukumar Swaminathan  * PIL with preemption disabled. Therefore, this function must not be
7168*bafec742SSukumar Swaminathan  * blocked.
7169*bafec742SSukumar Swaminathan  *
7170*bafec742SSukumar Swaminathan  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7171*bafec742SSukumar Swaminathan  */
7172*bafec742SSukumar Swaminathan int
7173*bafec742SSukumar Swaminathan ql_quiesce(dev_info_t *dip)
7174*bafec742SSukumar Swaminathan {
7175*bafec742SSukumar Swaminathan 	qlge_t *qlge;
7176*bafec742SSukumar Swaminathan 	int i;
7177*bafec742SSukumar Swaminathan 
7178*bafec742SSukumar Swaminathan 	if ((qlge = QL_GET_DEV(dip)) == NULL)
7179*bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
7180*bafec742SSukumar Swaminathan 
7181*bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
7182*bafec742SSukumar Swaminathan 		/* stop forwarding external packets to driver */
7183*bafec742SSukumar Swaminathan 		ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7184*bafec742SSukumar Swaminathan 		ql_stop_routing(qlge);
7185*bafec742SSukumar Swaminathan 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7186*bafec742SSukumar Swaminathan 		/* Stop all the request queues */
7187*bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7188*bafec742SSukumar Swaminathan 			if (qlge->tx_ring[i].valid_db_reg) {
7189*bafec742SSukumar Swaminathan 				ql_write_doorbell_reg(qlge,
7190*bafec742SSukumar Swaminathan 				    qlge->tx_ring[i].valid_db_reg, 0);
7191*bafec742SSukumar Swaminathan 			}
7192*bafec742SSukumar Swaminathan 		}
7193*bafec742SSukumar Swaminathan 		qlge_delay(QL_ONE_SEC_DELAY/4);
7194*bafec742SSukumar Swaminathan 		/* Interrupts not needed from now */
7195*bafec742SSukumar Swaminathan 		/* Disable MPI interrupt */
7196*bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_MASK,
7197*bafec742SSukumar Swaminathan 		    (INTR_MASK_PI << 16));
7198*bafec742SSukumar Swaminathan 		ql_disable_global_interrupt(qlge);
7199*bafec742SSukumar Swaminathan 
7200*bafec742SSukumar Swaminathan 		/* Disable all the rx completion queues */
7201*bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
7202*bafec742SSukumar Swaminathan 			if (qlge->rx_ring[i].valid_db_reg) {
7203*bafec742SSukumar Swaminathan 				ql_write_doorbell_reg(qlge,
7204*bafec742SSukumar Swaminathan 				    qlge->rx_ring[i].valid_db_reg, 0);
7205*bafec742SSukumar Swaminathan 			}
7206*bafec742SSukumar Swaminathan 		}
7207*bafec742SSukumar Swaminathan 		qlge_delay(QL_ONE_SEC_DELAY/4);
7208*bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_STOPPED;
7209*bafec742SSukumar Swaminathan 		/* Reset adapter */
7210*bafec742SSukumar Swaminathan 		ql_asic_reset(qlge);
7211*bafec742SSukumar Swaminathan 		qlge_delay(100);
7212*bafec742SSukumar Swaminathan 	}
7213*bafec742SSukumar Swaminathan 
7214*bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
7215*bafec742SSukumar Swaminathan }
7216*bafec742SSukumar Swaminathan 
7217*bafec742SSukumar Swaminathan QL_STREAM_OPS(ql_ops, ql_attach, ql_detach);
7218*bafec742SSukumar Swaminathan 
7219*bafec742SSukumar Swaminathan /*
7220*bafec742SSukumar Swaminathan  * Loadable Driver Interface Structures.
7221*bafec742SSukumar Swaminathan  * Declare and initialize the module configuration section...
7222*bafec742SSukumar Swaminathan  */
7223*bafec742SSukumar Swaminathan static struct modldrv modldrv = {
7224*bafec742SSukumar Swaminathan 	&mod_driverops,		/* type of module: driver */
7225*bafec742SSukumar Swaminathan 	version,		/* name of module */
7226*bafec742SSukumar Swaminathan 	&ql_ops			/* driver dev_ops */
7227*bafec742SSukumar Swaminathan };
7228*bafec742SSukumar Swaminathan 
7229*bafec742SSukumar Swaminathan static struct modlinkage modlinkage = {
7230*bafec742SSukumar Swaminathan 	MODREV_1, 	&modldrv,	NULL
7231*bafec742SSukumar Swaminathan };
7232*bafec742SSukumar Swaminathan 
7233*bafec742SSukumar Swaminathan /*
7234*bafec742SSukumar Swaminathan  * Loadable Module Routines
7235*bafec742SSukumar Swaminathan  */
7236*bafec742SSukumar Swaminathan 
7237*bafec742SSukumar Swaminathan /*
7238*bafec742SSukumar Swaminathan  * _init
7239*bafec742SSukumar Swaminathan  * Initializes a loadable module. It is called before any other
7240*bafec742SSukumar Swaminathan  * routine in a loadable module.
7241*bafec742SSukumar Swaminathan  */
7242*bafec742SSukumar Swaminathan int
7243*bafec742SSukumar Swaminathan _init(void)
7244*bafec742SSukumar Swaminathan {
7245*bafec742SSukumar Swaminathan 	int rval;
7246*bafec742SSukumar Swaminathan 
7247*bafec742SSukumar Swaminathan 	mac_init_ops(&ql_ops, ADAPTER_NAME);
7248*bafec742SSukumar Swaminathan 	rval = mod_install(&modlinkage);
7249*bafec742SSukumar Swaminathan 	if (rval != DDI_SUCCESS) {
7250*bafec742SSukumar Swaminathan 		mac_fini_ops(&ql_ops);
7251*bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?Unable to install/attach driver '%s'",
7252*bafec742SSukumar Swaminathan 		    ADAPTER_NAME);
7253*bafec742SSukumar Swaminathan 	}
7254*bafec742SSukumar Swaminathan 
7255*bafec742SSukumar Swaminathan 	return (rval);
7256*bafec742SSukumar Swaminathan }
7257*bafec742SSukumar Swaminathan 
7258*bafec742SSukumar Swaminathan /*
7259*bafec742SSukumar Swaminathan  * _fini
7260*bafec742SSukumar Swaminathan  * Prepares a module for unloading. It is called when the system
7261*bafec742SSukumar Swaminathan  * wants to unload a module. If the module determines that it can
7262*bafec742SSukumar Swaminathan  * be unloaded, then _fini() returns the value returned by
7263*bafec742SSukumar Swaminathan  * mod_remove(). Upon successful return from _fini() no other
7264*bafec742SSukumar Swaminathan  * routine in the module will be called before _init() is called.
7265*bafec742SSukumar Swaminathan  */
7266*bafec742SSukumar Swaminathan int
7267*bafec742SSukumar Swaminathan _fini(void)
7268*bafec742SSukumar Swaminathan {
7269*bafec742SSukumar Swaminathan 	int rval;
7270*bafec742SSukumar Swaminathan 
7271*bafec742SSukumar Swaminathan 	rval = mod_remove(&modlinkage);
7272*bafec742SSukumar Swaminathan 	if (rval == DDI_SUCCESS) {
7273*bafec742SSukumar Swaminathan 		mac_fini_ops(&ql_ops);
7274*bafec742SSukumar Swaminathan 	}
7275*bafec742SSukumar Swaminathan 
7276*bafec742SSukumar Swaminathan 	return (rval);
7277*bafec742SSukumar Swaminathan }
7278*bafec742SSukumar Swaminathan 
7279*bafec742SSukumar Swaminathan /*
7280*bafec742SSukumar Swaminathan  * _info
7281*bafec742SSukumar Swaminathan  * Returns information about loadable module.
7282*bafec742SSukumar Swaminathan  */
7283*bafec742SSukumar Swaminathan int
7284*bafec742SSukumar Swaminathan _info(struct modinfo *modinfop)
7285*bafec742SSukumar Swaminathan {
7286*bafec742SSukumar Swaminathan 	return (mod_info(&modlinkage, modinfop));
7287*bafec742SSukumar Swaminathan }
7288