xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge.c (revision 0662fbf4a5c0ae38617fd0fe521817c65a4dca3f)
1bafec742SSukumar Swaminathan /*
2bafec742SSukumar Swaminathan  * CDDL HEADER START
3bafec742SSukumar Swaminathan  *
4bafec742SSukumar Swaminathan  * The contents of this file are subject to the terms of the
5bafec742SSukumar Swaminathan  * Common Development and Distribution License (the "License").
6bafec742SSukumar Swaminathan  * You may not use this file except in compliance with the License.
7bafec742SSukumar Swaminathan  *
8bafec742SSukumar Swaminathan  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9bafec742SSukumar Swaminathan  * or http://www.opensolaris.org/os/licensing.
10bafec742SSukumar Swaminathan  * See the License for the specific language governing permissions
11bafec742SSukumar Swaminathan  * and limitations under the License.
12bafec742SSukumar Swaminathan  *
13bafec742SSukumar Swaminathan  * When distributing Covered Code, include this CDDL HEADER in each
14bafec742SSukumar Swaminathan  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15bafec742SSukumar Swaminathan  * If applicable, add the following below this CDDL HEADER, with the
16bafec742SSukumar Swaminathan  * fields enclosed by brackets "[]" replaced with your own identifying
17bafec742SSukumar Swaminathan  * information: Portions Copyright [yyyy] [name of copyright owner]
18bafec742SSukumar Swaminathan  *
19bafec742SSukumar Swaminathan  * CDDL HEADER END
20bafec742SSukumar Swaminathan  */
21bafec742SSukumar Swaminathan 
22bafec742SSukumar Swaminathan /*
23bafec742SSukumar Swaminathan  * Copyright 2009 QLogic Corporation. All rights reserved.
24bafec742SSukumar Swaminathan  */
25bafec742SSukumar Swaminathan 
26bafec742SSukumar Swaminathan #include <qlge.h>
27bafec742SSukumar Swaminathan #include <sys/atomic.h>
28bafec742SSukumar Swaminathan #include <sys/strsubr.h>
29bafec742SSukumar Swaminathan #include <sys/pattr.h>
30bafec742SSukumar Swaminathan #include <netinet/in.h>
31bafec742SSukumar Swaminathan #include <netinet/ip.h>
32bafec742SSukumar Swaminathan #include <netinet/ip6.h>
33bafec742SSukumar Swaminathan #include <netinet/tcp.h>
34bafec742SSukumar Swaminathan #include <netinet/udp.h>
35bafec742SSukumar Swaminathan #include <inet/ip.h>
36bafec742SSukumar Swaminathan 
37bafec742SSukumar Swaminathan 
38bafec742SSukumar Swaminathan 
39bafec742SSukumar Swaminathan /*
40bafec742SSukumar Swaminathan  * Local variables
41bafec742SSukumar Swaminathan  */
42bafec742SSukumar Swaminathan static struct ether_addr ql_ether_broadcast_addr =
43bafec742SSukumar Swaminathan 	{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
44bafec742SSukumar Swaminathan static char version[] = "QLogic GLDv3 Driver " VERSIONSTR;
45bafec742SSukumar Swaminathan 
46bafec742SSukumar Swaminathan /*
47bafec742SSukumar Swaminathan  * Local function prototypes
48bafec742SSukumar Swaminathan  */
49bafec742SSukumar Swaminathan static void ql_free_resources(dev_info_t *, qlge_t *);
50bafec742SSukumar Swaminathan static void ql_fini_kstats(qlge_t *);
51bafec742SSukumar Swaminathan static uint32_t ql_get_link_state(qlge_t *);
52bafec742SSukumar Swaminathan static void ql_read_conf(qlge_t *);
53bafec742SSukumar Swaminathan static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
54bafec742SSukumar Swaminathan     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
55bafec742SSukumar Swaminathan     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
56bafec742SSukumar Swaminathan static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
57bafec742SSukumar Swaminathan static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
58bafec742SSukumar Swaminathan static int ql_route_initialize(qlge_t *);
59bafec742SSukumar Swaminathan static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
60bafec742SSukumar Swaminathan static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
61bafec742SSukumar Swaminathan static int ql_bringdown_adapter(qlge_t *);
62bafec742SSukumar Swaminathan static int ql_bringup_adapter(qlge_t *);
63bafec742SSukumar Swaminathan static int ql_asic_reset(qlge_t *);
64bafec742SSukumar Swaminathan static void ql_wake_mpi_reset_soft_intr(qlge_t *);
65bafec742SSukumar Swaminathan static void ql_stop_timer(qlge_t *qlge);
66bafec742SSukumar Swaminathan 
67bafec742SSukumar Swaminathan /*
68bafec742SSukumar Swaminathan  * TX dma maping handlers allow multiple sscatter-gather lists
69bafec742SSukumar Swaminathan  */
70bafec742SSukumar Swaminathan ddi_dma_attr_t  tx_mapping_dma_attr = {
71bafec742SSukumar Swaminathan 	DMA_ATTR_V0,			/* dma_attr_version */
72bafec742SSukumar Swaminathan 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
73bafec742SSukumar Swaminathan 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
74bafec742SSukumar Swaminathan 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
75bafec742SSukumar Swaminathan 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
76bafec742SSukumar Swaminathan 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
77bafec742SSukumar Swaminathan 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
78bafec742SSukumar Swaminathan 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
79bafec742SSukumar Swaminathan 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
80bafec742SSukumar Swaminathan 	QL_MAX_TX_DMA_HANDLES,		/* s/g list length */
81bafec742SSukumar Swaminathan 	QL_DMA_GRANULARITY,		/* granularity of device */
82bafec742SSukumar Swaminathan 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
83bafec742SSukumar Swaminathan };
84bafec742SSukumar Swaminathan 
85bafec742SSukumar Swaminathan /*
86bafec742SSukumar Swaminathan  * Receive buffers and Request/Response queues do not allow scatter-gather lists
87bafec742SSukumar Swaminathan  */
88bafec742SSukumar Swaminathan ddi_dma_attr_t  dma_attr = {
89bafec742SSukumar Swaminathan 	DMA_ATTR_V0,			/* dma_attr_version */
90bafec742SSukumar Swaminathan 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
91bafec742SSukumar Swaminathan 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
92bafec742SSukumar Swaminathan 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
93bafec742SSukumar Swaminathan 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
94bafec742SSukumar Swaminathan 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
95bafec742SSukumar Swaminathan 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
96bafec742SSukumar Swaminathan 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
97bafec742SSukumar Swaminathan 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
98bafec742SSukumar Swaminathan 	1,				/* s/g list length, i.e no sg list */
99bafec742SSukumar Swaminathan 	QL_DMA_GRANULARITY,		/* granularity of device */
100bafec742SSukumar Swaminathan 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
101bafec742SSukumar Swaminathan };
102bafec742SSukumar Swaminathan 
103bafec742SSukumar Swaminathan /*
104bafec742SSukumar Swaminathan  * DMA access attribute structure.
105bafec742SSukumar Swaminathan  */
106bafec742SSukumar Swaminathan /* device register access from host */
107bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_dev_acc_attr = {
108bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
109bafec742SSukumar Swaminathan 	DDI_STRUCTURE_LE_ACC,
110bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
111bafec742SSukumar Swaminathan };
112bafec742SSukumar Swaminathan 
113bafec742SSukumar Swaminathan /* host ring descriptors */
114bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_desc_acc_attr = {
115bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
116bafec742SSukumar Swaminathan 	DDI_NEVERSWAP_ACC,
117bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
118bafec742SSukumar Swaminathan };
119bafec742SSukumar Swaminathan 
120bafec742SSukumar Swaminathan /* host ring buffer */
121bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_buf_acc_attr = {
122bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
123bafec742SSukumar Swaminathan 	DDI_NEVERSWAP_ACC,
124bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
125bafec742SSukumar Swaminathan };
126bafec742SSukumar Swaminathan 
127bafec742SSukumar Swaminathan /*
128bafec742SSukumar Swaminathan  * Hash key table for Receive Side Scaling (RSS) support
129bafec742SSukumar Swaminathan  */
130bafec742SSukumar Swaminathan const uint8_t key_data[] = {
131bafec742SSukumar Swaminathan 	0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
132bafec742SSukumar Swaminathan 	0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
133bafec742SSukumar Swaminathan 	0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
134bafec742SSukumar Swaminathan 	0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
135bafec742SSukumar Swaminathan 
136bafec742SSukumar Swaminathan /*
137bafec742SSukumar Swaminathan  * Shadow Registers:
138bafec742SSukumar Swaminathan  * Outbound queues have a consumer index that is maintained by the chip.
139bafec742SSukumar Swaminathan  * Inbound queues have a producer index that is maintained by the chip.
140bafec742SSukumar Swaminathan  * For lower overhead, these registers are "shadowed" to host memory
141bafec742SSukumar Swaminathan  * which allows the device driver to track the queue progress without
142bafec742SSukumar Swaminathan  * PCI reads. When an entry is placed on an inbound queue, the chip will
143bafec742SSukumar Swaminathan  * update the relevant index register and then copy the value to the
144bafec742SSukumar Swaminathan  * shadow register in host memory.
145bafec742SSukumar Swaminathan  */
146bafec742SSukumar Swaminathan 
147bafec742SSukumar Swaminathan static inline unsigned int
148bafec742SSukumar Swaminathan ql_read_sh_reg(const volatile void *addr)
149bafec742SSukumar Swaminathan {
150bafec742SSukumar Swaminathan 	return (*(volatile uint32_t *)addr);
151bafec742SSukumar Swaminathan }
152bafec742SSukumar Swaminathan 
153bafec742SSukumar Swaminathan /*
154bafec742SSukumar Swaminathan  * Read 32 bit atomically
155bafec742SSukumar Swaminathan  */
156bafec742SSukumar Swaminathan uint32_t
157bafec742SSukumar Swaminathan ql_atomic_read_32(volatile uint32_t *target)
158bafec742SSukumar Swaminathan {
159bafec742SSukumar Swaminathan 	/*
160bafec742SSukumar Swaminathan 	 * atomic_add_32_nv returns the new value after the add,
161bafec742SSukumar Swaminathan 	 * we are adding 0 so we should get the original value
162bafec742SSukumar Swaminathan 	 */
163bafec742SSukumar Swaminathan 	return (atomic_add_32_nv(target, 0));
164bafec742SSukumar Swaminathan }
165bafec742SSukumar Swaminathan 
166bafec742SSukumar Swaminathan /*
167bafec742SSukumar Swaminathan  * Set 32 bit atomically
168bafec742SSukumar Swaminathan  */
169bafec742SSukumar Swaminathan void
170bafec742SSukumar Swaminathan ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
171bafec742SSukumar Swaminathan {
172bafec742SSukumar Swaminathan 	(void) atomic_swap_32(target, newval);
173bafec742SSukumar Swaminathan }
174bafec742SSukumar Swaminathan 
175bafec742SSukumar Swaminathan 
176bafec742SSukumar Swaminathan /*
177bafec742SSukumar Swaminathan  * Setup device PCI configuration registers.
178bafec742SSukumar Swaminathan  * Kernel context.
179bafec742SSukumar Swaminathan  */
180bafec742SSukumar Swaminathan static void
181bafec742SSukumar Swaminathan ql_pci_config(qlge_t *qlge)
182bafec742SSukumar Swaminathan {
183bafec742SSukumar Swaminathan 	uint16_t w;
184bafec742SSukumar Swaminathan 
185bafec742SSukumar Swaminathan 	qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
186bafec742SSukumar Swaminathan 	    PCI_CONF_VENID);
187bafec742SSukumar Swaminathan 	qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
188bafec742SSukumar Swaminathan 	    PCI_CONF_DEVID);
189bafec742SSukumar Swaminathan 
190bafec742SSukumar Swaminathan 	/*
191bafec742SSukumar Swaminathan 	 * we want to respect framework's setting of PCI
192bafec742SSukumar Swaminathan 	 * configuration space command register and also
193bafec742SSukumar Swaminathan 	 * want to make sure that all bits of interest to us
194bafec742SSukumar Swaminathan 	 * are properly set in PCI Command register(0x04).
195bafec742SSukumar Swaminathan 	 * PCI_COMM_IO		0x1	 I/O access enable
196bafec742SSukumar Swaminathan 	 * PCI_COMM_MAE		0x2	 Memory access enable
197bafec742SSukumar Swaminathan 	 * PCI_COMM_ME		0x4	 bus master enable
198bafec742SSukumar Swaminathan 	 * PCI_COMM_MEMWR_INVAL	0x10	 memory write and invalidate enable.
199bafec742SSukumar Swaminathan 	 */
200bafec742SSukumar Swaminathan 	w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
201bafec742SSukumar Swaminathan 	w = (uint16_t)(w & (~PCI_COMM_IO));
202bafec742SSukumar Swaminathan 	w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
203bafec742SSukumar Swaminathan 	    /* PCI_COMM_MEMWR_INVAL | */
204bafec742SSukumar Swaminathan 	    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
205bafec742SSukumar Swaminathan 
206bafec742SSukumar Swaminathan 	pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
207bafec742SSukumar Swaminathan 
208bafec742SSukumar Swaminathan 	ql_dump_pci_config(qlge);
209bafec742SSukumar Swaminathan }
210bafec742SSukumar Swaminathan 
211bafec742SSukumar Swaminathan /*
212bafec742SSukumar Swaminathan  * This routine parforms the neccessary steps to set GLD mac information
213bafec742SSukumar Swaminathan  * such as Function number, xgmac mask and shift bits
214bafec742SSukumar Swaminathan  */
215bafec742SSukumar Swaminathan static int
216bafec742SSukumar Swaminathan ql_set_mac_info(qlge_t *qlge)
217bafec742SSukumar Swaminathan {
218bafec742SSukumar Swaminathan 	uint32_t value;
219bafec742SSukumar Swaminathan 	int rval = DDI_SUCCESS;
220bafec742SSukumar Swaminathan 	uint32_t fn0_net, fn1_net;
221bafec742SSukumar Swaminathan 
222bafec742SSukumar Swaminathan 	/* set default value */
223bafec742SSukumar Swaminathan 	qlge->fn0_net = FN0_NET;
224bafec742SSukumar Swaminathan 	qlge->fn1_net = FN1_NET;
225bafec742SSukumar Swaminathan 
226bafec742SSukumar Swaminathan 	if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
227bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d) read MPI register failed",
228bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
229bafec742SSukumar Swaminathan 	} else {
230bafec742SSukumar Swaminathan 		fn0_net = (value >> 1) & 0x07;
231bafec742SSukumar Swaminathan 		fn1_net = (value >> 5) & 0x07;
232bafec742SSukumar Swaminathan 		if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
233bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
234bafec742SSukumar Swaminathan 			    "nic0 function number %d,"
235bafec742SSukumar Swaminathan 			    "nic1 function number %d "
236bafec742SSukumar Swaminathan 			    "use default\n",
237bafec742SSukumar Swaminathan 			    __func__, qlge->instance, value, fn0_net, fn1_net);
238bafec742SSukumar Swaminathan 		} else {
239bafec742SSukumar Swaminathan 			qlge->fn0_net = fn0_net;
240bafec742SSukumar Swaminathan 			qlge->fn1_net = fn1_net;
241bafec742SSukumar Swaminathan 		}
242bafec742SSukumar Swaminathan 	}
243bafec742SSukumar Swaminathan 
244bafec742SSukumar Swaminathan 	/* Get the function number that the driver is associated with */
245bafec742SSukumar Swaminathan 	value = ql_read_reg(qlge, REG_STATUS);
246bafec742SSukumar Swaminathan 	qlge->func_number = (uint8_t)((value >> 6) & 0x03);
247bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
248bafec742SSukumar Swaminathan 	    value, qlge->func_number));
249bafec742SSukumar Swaminathan 
250bafec742SSukumar Swaminathan 	/* The driver is loaded on a non-NIC function? */
251bafec742SSukumar Swaminathan 	if ((qlge->func_number != qlge->fn0_net) &&
252bafec742SSukumar Swaminathan 	    (qlge->func_number != qlge->fn1_net)) {
253bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
254bafec742SSukumar Swaminathan 		    "Invalid function number = 0x%x\n", qlge->func_number);
255bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
256bafec742SSukumar Swaminathan 	}
257bafec742SSukumar Swaminathan 	/* network port 0? */
258bafec742SSukumar Swaminathan 	if (qlge->func_number == qlge->fn0_net) {
259bafec742SSukumar Swaminathan 		qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
260bafec742SSukumar Swaminathan 		qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
261bafec742SSukumar Swaminathan 	} else {
262bafec742SSukumar Swaminathan 		qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
263bafec742SSukumar Swaminathan 		qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
264bafec742SSukumar Swaminathan 	}
265bafec742SSukumar Swaminathan 
266bafec742SSukumar Swaminathan 	return (rval);
267bafec742SSukumar Swaminathan 
268bafec742SSukumar Swaminathan }
269bafec742SSukumar Swaminathan 
270bafec742SSukumar Swaminathan /*
271bafec742SSukumar Swaminathan  * write to doorbell register
272bafec742SSukumar Swaminathan  */
273bafec742SSukumar Swaminathan void
274bafec742SSukumar Swaminathan ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
275bafec742SSukumar Swaminathan {
276bafec742SSukumar Swaminathan 	ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
277bafec742SSukumar Swaminathan }
278bafec742SSukumar Swaminathan 
279bafec742SSukumar Swaminathan /*
280bafec742SSukumar Swaminathan  * read from doorbell register
281bafec742SSukumar Swaminathan  */
282bafec742SSukumar Swaminathan uint32_t
283bafec742SSukumar Swaminathan ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
284bafec742SSukumar Swaminathan {
285bafec742SSukumar Swaminathan 	uint32_t ret;
286bafec742SSukumar Swaminathan 
287bafec742SSukumar Swaminathan 	ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
288bafec742SSukumar Swaminathan 
289bafec742SSukumar Swaminathan 	return	(ret);
290bafec742SSukumar Swaminathan }
291bafec742SSukumar Swaminathan 
292bafec742SSukumar Swaminathan /*
293bafec742SSukumar Swaminathan  * This function waits for a specific bit to come ready
294bafec742SSukumar Swaminathan  * in a given register.  It is used mostly by the initialize
295bafec742SSukumar Swaminathan  * process, but is also used in kernel thread API such as
296bafec742SSukumar Swaminathan  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
297bafec742SSukumar Swaminathan  */
298bafec742SSukumar Swaminathan static int
299bafec742SSukumar Swaminathan ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
300bafec742SSukumar Swaminathan {
301bafec742SSukumar Swaminathan 	uint32_t temp;
302bafec742SSukumar Swaminathan 	int count = UDELAY_COUNT;
303bafec742SSukumar Swaminathan 
304bafec742SSukumar Swaminathan 	while (count) {
305bafec742SSukumar Swaminathan 		temp = ql_read_reg(qlge, reg);
306bafec742SSukumar Swaminathan 
307bafec742SSukumar Swaminathan 		/* check for errors */
308bafec742SSukumar Swaminathan 		if ((temp & err_bit) != 0) {
309bafec742SSukumar Swaminathan 			break;
310bafec742SSukumar Swaminathan 		} else if ((temp & bit) != 0)
311bafec742SSukumar Swaminathan 			return (DDI_SUCCESS);
312bafec742SSukumar Swaminathan 		qlge_delay(UDELAY_DELAY);
313bafec742SSukumar Swaminathan 		count--;
314bafec742SSukumar Swaminathan 	}
315bafec742SSukumar Swaminathan 	cmn_err(CE_WARN,
316bafec742SSukumar Swaminathan 	    "Waiting for reg %x to come ready failed.", reg);
317bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
318bafec742SSukumar Swaminathan }
319bafec742SSukumar Swaminathan 
320bafec742SSukumar Swaminathan /*
321bafec742SSukumar Swaminathan  * The CFG register is used to download TX and RX control blocks
322bafec742SSukumar Swaminathan  * to the chip. This function waits for an operation to complete.
323bafec742SSukumar Swaminathan  */
324bafec742SSukumar Swaminathan static int
325bafec742SSukumar Swaminathan ql_wait_cfg(qlge_t *qlge, uint32_t bit)
326bafec742SSukumar Swaminathan {
327bafec742SSukumar Swaminathan 	int count = UDELAY_COUNT;
328bafec742SSukumar Swaminathan 	uint32_t temp;
329bafec742SSukumar Swaminathan 
330bafec742SSukumar Swaminathan 	while (count) {
331bafec742SSukumar Swaminathan 		temp = ql_read_reg(qlge, REG_CONFIGURATION);
332bafec742SSukumar Swaminathan 		if ((temp & CFG_LE) != 0) {
333bafec742SSukumar Swaminathan 			break;
334bafec742SSukumar Swaminathan 		}
335bafec742SSukumar Swaminathan 		if ((temp & bit) == 0)
336bafec742SSukumar Swaminathan 			return (DDI_SUCCESS);
337bafec742SSukumar Swaminathan 		qlge_delay(UDELAY_DELAY);
338bafec742SSukumar Swaminathan 		count--;
339bafec742SSukumar Swaminathan 	}
340bafec742SSukumar Swaminathan 	cmn_err(CE_WARN,
341bafec742SSukumar Swaminathan 	    "Waiting for cfg register bit %x failed.", bit);
342bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
343bafec742SSukumar Swaminathan }
344bafec742SSukumar Swaminathan 
345bafec742SSukumar Swaminathan 
346bafec742SSukumar Swaminathan /*
347bafec742SSukumar Swaminathan  * Used to issue init control blocks to hw. Maps control block,
348bafec742SSukumar Swaminathan  * sets address, triggers download, waits for completion.
349bafec742SSukumar Swaminathan  */
350bafec742SSukumar Swaminathan static int
351bafec742SSukumar Swaminathan ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
352bafec742SSukumar Swaminathan {
353bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
354bafec742SSukumar Swaminathan 	uint32_t mask;
355bafec742SSukumar Swaminathan 	uint32_t value;
356bafec742SSukumar Swaminathan 
357bafec742SSukumar Swaminathan 	status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
358bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS) {
359bafec742SSukumar Swaminathan 		goto exit;
360bafec742SSukumar Swaminathan 	}
361bafec742SSukumar Swaminathan 	status = ql_wait_cfg(qlge, bit);
362bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS) {
363bafec742SSukumar Swaminathan 		goto exit;
364bafec742SSukumar Swaminathan 	}
365bafec742SSukumar Swaminathan 
366bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
367bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
368bafec742SSukumar Swaminathan 
369bafec742SSukumar Swaminathan 	mask = CFG_Q_MASK | (bit << 16);
370bafec742SSukumar Swaminathan 	value = bit | (q_id << CFG_Q_SHIFT);
371bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
372bafec742SSukumar Swaminathan 
373bafec742SSukumar Swaminathan 	/*
374bafec742SSukumar Swaminathan 	 * Wait for the bit to clear after signaling hw.
375bafec742SSukumar Swaminathan 	 */
376bafec742SSukumar Swaminathan 	status = ql_wait_cfg(qlge, bit);
377bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
378bafec742SSukumar Swaminathan 
379bafec742SSukumar Swaminathan exit:
380bafec742SSukumar Swaminathan 	return (status);
381bafec742SSukumar Swaminathan }
382bafec742SSukumar Swaminathan 
383bafec742SSukumar Swaminathan /*
384bafec742SSukumar Swaminathan  * Initialize adapter instance
385bafec742SSukumar Swaminathan  */
386bafec742SSukumar Swaminathan static int
387bafec742SSukumar Swaminathan ql_init_instance(qlge_t *qlge)
388bafec742SSukumar Swaminathan {
389bafec742SSukumar Swaminathan 	int i;
390bafec742SSukumar Swaminathan 
391bafec742SSukumar Swaminathan 	/* Default value */
392bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_INIT;
393bafec742SSukumar Swaminathan 	qlge->mtu = ETHERMTU;		/* set normal size as default */
394bafec742SSukumar Swaminathan 	qlge->page_size = VM_PAGE_SIZE;	/* default page size */
395bafec742SSukumar Swaminathan 	/* Set up the default ring sizes. */
396bafec742SSukumar Swaminathan 	qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
397bafec742SSukumar Swaminathan 	qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
398bafec742SSukumar Swaminathan 
399bafec742SSukumar Swaminathan 	/* Set up the coalescing parameters. */
400bafec742SSukumar Swaminathan 	qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
401bafec742SSukumar Swaminathan 	qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
402bafec742SSukumar Swaminathan 	qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
403bafec742SSukumar Swaminathan 	qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
404bafec742SSukumar Swaminathan 	qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
405bafec742SSukumar Swaminathan 	qlge->ql_dbgprnt = 0;
406bafec742SSukumar Swaminathan #if QL_DEBUG
407bafec742SSukumar Swaminathan 	qlge->ql_dbgprnt = QL_DEBUG;
408bafec742SSukumar Swaminathan #endif /* QL_DEBUG */
409bafec742SSukumar Swaminathan 
410bafec742SSukumar Swaminathan 	/*
411bafec742SSukumar Swaminathan 	 * TODO: Should be obtained from configuration or based off
412bafec742SSukumar Swaminathan 	 * number of active cpus SJP 4th Mar. 09
413bafec742SSukumar Swaminathan 	 */
414bafec742SSukumar Swaminathan 	qlge->tx_ring_count = 1;
415bafec742SSukumar Swaminathan 	qlge->rss_ring_count = 4;
416bafec742SSukumar Swaminathan 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
417bafec742SSukumar Swaminathan 
418bafec742SSukumar Swaminathan 	for (i = 0; i < MAX_RX_RINGS; i++) {
419bafec742SSukumar Swaminathan 		qlge->rx_polls[i] = 0;
420bafec742SSukumar Swaminathan 		qlge->rx_interrupts[i] = 0;
421bafec742SSukumar Swaminathan 	}
422bafec742SSukumar Swaminathan 
423bafec742SSukumar Swaminathan 	/*
424bafec742SSukumar Swaminathan 	 * Set up the operating parameters.
425bafec742SSukumar Swaminathan 	 */
426bafec742SSukumar Swaminathan 	qlge->multicast_list_count = 0;
427bafec742SSukumar Swaminathan 
428bafec742SSukumar Swaminathan 	/*
429bafec742SSukumar Swaminathan 	 * Set up the max number of unicast list
430bafec742SSukumar Swaminathan 	 */
431bafec742SSukumar Swaminathan 	qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
432bafec742SSukumar Swaminathan 	qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
433bafec742SSukumar Swaminathan 
434bafec742SSukumar Swaminathan 	/*
435bafec742SSukumar Swaminathan 	 * read user defined properties in .conf file
436bafec742SSukumar Swaminathan 	 */
437bafec742SSukumar Swaminathan 	ql_read_conf(qlge); /* mtu, pause, LSO etc */
438bafec742SSukumar Swaminathan 
439bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
440bafec742SSukumar Swaminathan 
441bafec742SSukumar Swaminathan 	/* choose Memory Space mapping and get Vendor Id, Device ID etc */
442bafec742SSukumar Swaminathan 	ql_pci_config(qlge);
443bafec742SSukumar Swaminathan 	qlge->ip_hdr_offset = 0;
444bafec742SSukumar Swaminathan 
445bafec742SSukumar Swaminathan 	if (qlge->device_id == 0x8000) {
446bafec742SSukumar Swaminathan 		/* Schultz card */
447bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_CHIP_8100;
448bafec742SSukumar Swaminathan 		/* enable just ipv4 chksum offload for Schultz */
449bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
450bafec742SSukumar Swaminathan 		/*
451bafec742SSukumar Swaminathan 		 * Schultz firmware does not do pseduo IP header checksum
452bafec742SSukumar Swaminathan 		 * calculation, needed to be done by driver
453bafec742SSukumar Swaminathan 		 */
454bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
455bafec742SSukumar Swaminathan 		if (qlge->lso_enable)
456bafec742SSukumar Swaminathan 			qlge->cfg_flags |= CFG_LSO;
457bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
458bafec742SSukumar Swaminathan 		/* Schultz must split packet header */
459bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
460bafec742SSukumar Swaminathan 		qlge->max_read_mbx = 5;
461bafec742SSukumar Swaminathan 		qlge->ip_hdr_offset = 2;
462bafec742SSukumar Swaminathan 	}
463bafec742SSukumar Swaminathan 
464bafec742SSukumar Swaminathan 	/* Set Function Number and some of the iocb mac information */
465bafec742SSukumar Swaminathan 	if (ql_set_mac_info(qlge) != DDI_SUCCESS)
466bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
467bafec742SSukumar Swaminathan 
468bafec742SSukumar Swaminathan 	/* Read network settings from NVRAM */
469bafec742SSukumar Swaminathan 	/* After nvram is read successfully, update dev_addr */
470bafec742SSukumar Swaminathan 	if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
471bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
472bafec742SSukumar Swaminathan 		for (i = 0; i < ETHERADDRL; i++) {
473bafec742SSukumar Swaminathan 			qlge->dev_addr.ether_addr_octet[i] =
474bafec742SSukumar Swaminathan 			    qlge->nic_config.factory_MAC[i];
475bafec742SSukumar Swaminathan 		}
476bafec742SSukumar Swaminathan 	} else {
477bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
478bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
479bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
480bafec742SSukumar Swaminathan 	}
481bafec742SSukumar Swaminathan 
482bafec742SSukumar Swaminathan 	bcopy(qlge->dev_addr.ether_addr_octet,
483bafec742SSukumar Swaminathan 	    qlge->unicst_addr[0].addr.ether_addr_octet,
484bafec742SSukumar Swaminathan 	    ETHERADDRL);
485bafec742SSukumar Swaminathan 	QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
486bafec742SSukumar Swaminathan 	    &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
487bafec742SSukumar Swaminathan 
488bafec742SSukumar Swaminathan 	qlge->port_link_state = LS_DOWN;
489bafec742SSukumar Swaminathan 
490bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
491bafec742SSukumar Swaminathan }
492bafec742SSukumar Swaminathan 
493bafec742SSukumar Swaminathan 
494bafec742SSukumar Swaminathan /*
495bafec742SSukumar Swaminathan  * This hardware semaphore provides the mechanism for exclusive access to
496bafec742SSukumar Swaminathan  * resources shared between the NIC driver, MPI firmware,
497bafec742SSukumar Swaminathan  * FCOE firmware and the FC driver.
498bafec742SSukumar Swaminathan  */
499bafec742SSukumar Swaminathan static int
500bafec742SSukumar Swaminathan ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
501bafec742SSukumar Swaminathan {
502bafec742SSukumar Swaminathan 	uint32_t sem_bits = 0;
503bafec742SSukumar Swaminathan 
504bafec742SSukumar Swaminathan 	switch (sem_mask) {
505bafec742SSukumar Swaminathan 	case SEM_XGMAC0_MASK:
506bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
507bafec742SSukumar Swaminathan 		break;
508bafec742SSukumar Swaminathan 	case SEM_XGMAC1_MASK:
509bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
510bafec742SSukumar Swaminathan 		break;
511bafec742SSukumar Swaminathan 	case SEM_ICB_MASK:
512bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
513bafec742SSukumar Swaminathan 		break;
514bafec742SSukumar Swaminathan 	case SEM_MAC_ADDR_MASK:
515bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
516bafec742SSukumar Swaminathan 		break;
517bafec742SSukumar Swaminathan 	case SEM_FLASH_MASK:
518bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
519bafec742SSukumar Swaminathan 		break;
520bafec742SSukumar Swaminathan 	case SEM_PROBE_MASK:
521bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
522bafec742SSukumar Swaminathan 		break;
523bafec742SSukumar Swaminathan 	case SEM_RT_IDX_MASK:
524bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
525bafec742SSukumar Swaminathan 		break;
526bafec742SSukumar Swaminathan 	case SEM_PROC_REG_MASK:
527bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
528bafec742SSukumar Swaminathan 		break;
529bafec742SSukumar Swaminathan 	default:
530bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Bad Semaphore mask!.");
531bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
532bafec742SSukumar Swaminathan 	}
533bafec742SSukumar Swaminathan 
534bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
535bafec742SSukumar Swaminathan 	return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
536bafec742SSukumar Swaminathan }
537bafec742SSukumar Swaminathan 
538bafec742SSukumar Swaminathan /*
539bafec742SSukumar Swaminathan  * Lock a specific bit of Semaphore register to gain
540bafec742SSukumar Swaminathan  * access to a particular shared register
541bafec742SSukumar Swaminathan  */
542bafec742SSukumar Swaminathan int
543bafec742SSukumar Swaminathan ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
544bafec742SSukumar Swaminathan {
545bafec742SSukumar Swaminathan 	unsigned int wait_count = 30;
546bafec742SSukumar Swaminathan 
547bafec742SSukumar Swaminathan 	while (wait_count) {
548bafec742SSukumar Swaminathan 		if (!ql_sem_trylock(qlge, sem_mask))
549bafec742SSukumar Swaminathan 			return (DDI_SUCCESS);
550bafec742SSukumar Swaminathan 		qlge_delay(100);
551bafec742SSukumar Swaminathan 		wait_count--;
552bafec742SSukumar Swaminathan 	}
553bafec742SSukumar Swaminathan 	cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
554bafec742SSukumar Swaminathan 	    __func__, qlge->instance, sem_mask);
555bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
556bafec742SSukumar Swaminathan }
557bafec742SSukumar Swaminathan 
558bafec742SSukumar Swaminathan /*
559bafec742SSukumar Swaminathan  * Unock a specific bit of Semaphore register to release
560bafec742SSukumar Swaminathan  * access to a particular shared register
561bafec742SSukumar Swaminathan  */
562bafec742SSukumar Swaminathan void
563bafec742SSukumar Swaminathan ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
564bafec742SSukumar Swaminathan {
565bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
566*0662fbf4SSukumar Swaminathan 	(void) ql_read_reg(qlge, REG_SEMAPHORE);	/* flush */
567bafec742SSukumar Swaminathan }
568bafec742SSukumar Swaminathan 
569bafec742SSukumar Swaminathan /*
570bafec742SSukumar Swaminathan  * Get property value from configuration file.
571bafec742SSukumar Swaminathan  *
572bafec742SSukumar Swaminathan  * string = property string pointer.
573bafec742SSukumar Swaminathan  *
574bafec742SSukumar Swaminathan  * Returns:
575bafec742SSukumar Swaminathan  * 0xFFFFFFFF = no property else property value.
576bafec742SSukumar Swaminathan  */
577bafec742SSukumar Swaminathan static uint32_t
578bafec742SSukumar Swaminathan ql_get_prop(qlge_t *qlge, char *string)
579bafec742SSukumar Swaminathan {
580bafec742SSukumar Swaminathan 	char buf[256];
581bafec742SSukumar Swaminathan 	uint32_t data;
582bafec742SSukumar Swaminathan 
583bafec742SSukumar Swaminathan 	/* Get adapter instance parameter. */
584bafec742SSukumar Swaminathan 	(void) sprintf(buf, "hba%d-%s", qlge->instance, string);
585bafec742SSukumar Swaminathan 	data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
586bafec742SSukumar Swaminathan 	    (int)0xffffffff);
587bafec742SSukumar Swaminathan 
588bafec742SSukumar Swaminathan 	/* Adapter instance parameter found? */
589bafec742SSukumar Swaminathan 	if (data == 0xffffffff) {
590bafec742SSukumar Swaminathan 		/* No, get default parameter. */
591bafec742SSukumar Swaminathan 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
592bafec742SSukumar Swaminathan 		    string, (int)0xffffffff);
593bafec742SSukumar Swaminathan 	}
594bafec742SSukumar Swaminathan 
595bafec742SSukumar Swaminathan 	return (data);
596bafec742SSukumar Swaminathan }
597bafec742SSukumar Swaminathan 
598bafec742SSukumar Swaminathan /*
599bafec742SSukumar Swaminathan  * Read user setting from configuration file.
600bafec742SSukumar Swaminathan  */
601bafec742SSukumar Swaminathan static void
602bafec742SSukumar Swaminathan ql_read_conf(qlge_t *qlge)
603bafec742SSukumar Swaminathan {
604bafec742SSukumar Swaminathan 	uint32_t data;
605bafec742SSukumar Swaminathan 
606bafec742SSukumar Swaminathan 	/* clear configuration flags */
607bafec742SSukumar Swaminathan 	qlge->cfg_flags = 0;
608bafec742SSukumar Swaminathan 
609bafec742SSukumar Swaminathan 	/* Get default rx_copy enable/disable. */
610bafec742SSukumar Swaminathan 	if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
611bafec742SSukumar Swaminathan 	    data == 0) {
612bafec742SSukumar Swaminathan 		qlge->cfg_flags &= ~CFG_RX_COPY_MODE;
613bafec742SSukumar Swaminathan 		qlge->rx_copy = B_FALSE;
614bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
615bafec742SSukumar Swaminathan 	} else if (data == 1) {
616bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_RX_COPY_MODE;
617bafec742SSukumar Swaminathan 		qlge->rx_copy = B_TRUE;
618bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
619bafec742SSukumar Swaminathan 	}
620bafec742SSukumar Swaminathan 
621bafec742SSukumar Swaminathan 	/* Get mtu packet size. */
622bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "mtu");
623bafec742SSukumar Swaminathan 	if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
624bafec742SSukumar Swaminathan 		if (qlge->mtu != data) {
625bafec742SSukumar Swaminathan 			qlge->mtu = data;
626bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
627bafec742SSukumar Swaminathan 		}
628bafec742SSukumar Swaminathan 	}
629bafec742SSukumar Swaminathan 
630bafec742SSukumar Swaminathan 	/* Get pause mode, default is Per Priority mode. */
631bafec742SSukumar Swaminathan 	qlge->pause = PAUSE_MODE_PER_PRIORITY;
632bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "pause");
633bafec742SSukumar Swaminathan 	if (data <= PAUSE_MODE_PER_PRIORITY) {
634bafec742SSukumar Swaminathan 		if (qlge->pause != data) {
635bafec742SSukumar Swaminathan 			qlge->pause = data;
636bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
637bafec742SSukumar Swaminathan 		}
638bafec742SSukumar Swaminathan 	}
639bafec742SSukumar Swaminathan 
640bafec742SSukumar Swaminathan 	/* Get tx_max_coalesced_frames. */
641bafec742SSukumar Swaminathan 	qlge->tx_max_coalesced_frames = 5;
642bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "tx_max_coalesced_frames");
643bafec742SSukumar Swaminathan 	/* if data is valid */
644bafec742SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
645bafec742SSukumar Swaminathan 		if (qlge->tx_max_coalesced_frames != data) {
646bafec742SSukumar Swaminathan 			qlge->tx_max_coalesced_frames = (uint16_t)data;
647bafec742SSukumar Swaminathan 		}
648bafec742SSukumar Swaminathan 	}
649bafec742SSukumar Swaminathan 
650bafec742SSukumar Swaminathan 	/* Get split header payload_copy_thresh. */
651bafec742SSukumar Swaminathan 	qlge->payload_copy_thresh = 6;
652bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "payload_copy_thresh");
653bafec742SSukumar Swaminathan 	/* if data is valid */
654bafec742SSukumar Swaminathan 	if ((data != 0xffffffff) && (data != 0)) {
655bafec742SSukumar Swaminathan 		if (qlge->payload_copy_thresh != data) {
656bafec742SSukumar Swaminathan 			qlge->payload_copy_thresh = data;
657bafec742SSukumar Swaminathan 		}
658bafec742SSukumar Swaminathan 	}
659bafec742SSukumar Swaminathan 
660bafec742SSukumar Swaminathan 	/* large send offload (LSO) capability. */
661bafec742SSukumar Swaminathan 	qlge->lso_enable = 1;
662bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "lso_enable");
663bafec742SSukumar Swaminathan 	/* if data is valid */
664bafec742SSukumar Swaminathan 	if (data != 0xffffffff) {
665bafec742SSukumar Swaminathan 		if (qlge->lso_enable != data) {
666bafec742SSukumar Swaminathan 			qlge->lso_enable = (uint16_t)data;
667bafec742SSukumar Swaminathan 		}
668bafec742SSukumar Swaminathan 	}
669bafec742SSukumar Swaminathan }
670bafec742SSukumar Swaminathan 
671bafec742SSukumar Swaminathan /*
672bafec742SSukumar Swaminathan  * Enable global interrupt
673bafec742SSukumar Swaminathan  */
674bafec742SSukumar Swaminathan static void
675bafec742SSukumar Swaminathan ql_enable_global_interrupt(qlge_t *qlge)
676bafec742SSukumar Swaminathan {
677bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
678bafec742SSukumar Swaminathan 	    (INTR_EN_EI << 16) | INTR_EN_EI);
679bafec742SSukumar Swaminathan 	qlge->flags |= INTERRUPTS_ENABLED;
680bafec742SSukumar Swaminathan }
681bafec742SSukumar Swaminathan 
682bafec742SSukumar Swaminathan /*
683bafec742SSukumar Swaminathan  * Disable global interrupt
684bafec742SSukumar Swaminathan  */
685bafec742SSukumar Swaminathan static void
686bafec742SSukumar Swaminathan ql_disable_global_interrupt(qlge_t *qlge)
687bafec742SSukumar Swaminathan {
688bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
689bafec742SSukumar Swaminathan 	qlge->flags &= ~INTERRUPTS_ENABLED;
690bafec742SSukumar Swaminathan }
691bafec742SSukumar Swaminathan 
692bafec742SSukumar Swaminathan /*
693bafec742SSukumar Swaminathan  * Enable one ring interrupt
694bafec742SSukumar Swaminathan  */
695bafec742SSukumar Swaminathan void
696bafec742SSukumar Swaminathan ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
697bafec742SSukumar Swaminathan {
698bafec742SSukumar Swaminathan 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
699bafec742SSukumar Swaminathan 
700bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
701bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
702bafec742SSukumar Swaminathan 
703bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
704bafec742SSukumar Swaminathan 		/*
705bafec742SSukumar Swaminathan 		 * Always enable if we're MSIX multi interrupts and
706bafec742SSukumar Swaminathan 		 * it's not the default (zeroeth) interrupt.
707bafec742SSukumar Swaminathan 		 */
708bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
709bafec742SSukumar Swaminathan 		return;
710bafec742SSukumar Swaminathan 	}
711bafec742SSukumar Swaminathan 
712bafec742SSukumar Swaminathan 	if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
713bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
714bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
715bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
716bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INTR,
717bafec742SSukumar Swaminathan 		    ("%s(%d): write %x to intr enable register \n",
718bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ctx->intr_en_mask));
719bafec742SSukumar Swaminathan 	}
720bafec742SSukumar Swaminathan }
721bafec742SSukumar Swaminathan 
722bafec742SSukumar Swaminathan /*
723bafec742SSukumar Swaminathan  * ql_forced_disable_completion_interrupt
724bafec742SSukumar Swaminathan  * Used by call from OS, may be called without
725bafec742SSukumar Swaminathan  * a pending interrupt so force the disable
726bafec742SSukumar Swaminathan  */
727bafec742SSukumar Swaminathan uint32_t
728bafec742SSukumar Swaminathan ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
729bafec742SSukumar Swaminathan {
730bafec742SSukumar Swaminathan 	uint32_t var = 0;
731bafec742SSukumar Swaminathan 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
732bafec742SSukumar Swaminathan 
733bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
734bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
735bafec742SSukumar Swaminathan 
736bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
737bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
738bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
739bafec742SSukumar Swaminathan 		return (var);
740bafec742SSukumar Swaminathan 	}
741bafec742SSukumar Swaminathan 
742bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
743bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
744bafec742SSukumar Swaminathan 	var = ql_read_reg(qlge, REG_STATUS);
745bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
746bafec742SSukumar Swaminathan 
747bafec742SSukumar Swaminathan 	return (var);
748bafec742SSukumar Swaminathan }
749bafec742SSukumar Swaminathan 
750bafec742SSukumar Swaminathan /*
751bafec742SSukumar Swaminathan  * Disable a completion interrupt
752bafec742SSukumar Swaminathan  */
753bafec742SSukumar Swaminathan void
754bafec742SSukumar Swaminathan ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
755bafec742SSukumar Swaminathan {
756bafec742SSukumar Swaminathan 	struct intr_ctx *ctx;
757bafec742SSukumar Swaminathan 
758bafec742SSukumar Swaminathan 	ctx = qlge->intr_ctx + intr;
759bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
760bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
761bafec742SSukumar Swaminathan 	/*
762bafec742SSukumar Swaminathan 	 * HW disables for us if we're MSIX multi interrupts and
763bafec742SSukumar Swaminathan 	 * it's not the default (zeroeth) interrupt.
764bafec742SSukumar Swaminathan 	 */
765bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
766bafec742SSukumar Swaminathan 		return;
767bafec742SSukumar Swaminathan 
768bafec742SSukumar Swaminathan 	if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
769bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
770bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
771bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
772bafec742SSukumar Swaminathan 	}
773bafec742SSukumar Swaminathan 	atomic_inc_32(&ctx->irq_cnt);
774bafec742SSukumar Swaminathan }
775bafec742SSukumar Swaminathan 
776bafec742SSukumar Swaminathan /*
777bafec742SSukumar Swaminathan  * Enable all completion interrupts
778bafec742SSukumar Swaminathan  */
779bafec742SSukumar Swaminathan static void
780bafec742SSukumar Swaminathan ql_enable_all_completion_interrupts(qlge_t *qlge)
781bafec742SSukumar Swaminathan {
782bafec742SSukumar Swaminathan 	int i;
783bafec742SSukumar Swaminathan 	uint32_t value = 1;
784bafec742SSukumar Swaminathan 
785bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
786bafec742SSukumar Swaminathan 		/*
787bafec742SSukumar Swaminathan 		 * Set the count to 1 for Legacy / MSI interrupts or for the
788bafec742SSukumar Swaminathan 		 * default interrupt (0)
789bafec742SSukumar Swaminathan 		 */
790bafec742SSukumar Swaminathan 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
791bafec742SSukumar Swaminathan 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
792bafec742SSukumar Swaminathan 		}
793bafec742SSukumar Swaminathan 		ql_enable_completion_interrupt(qlge, i);
794bafec742SSukumar Swaminathan 	}
795bafec742SSukumar Swaminathan }
796bafec742SSukumar Swaminathan 
797bafec742SSukumar Swaminathan /*
798bafec742SSukumar Swaminathan  * Disable all completion interrupts
799bafec742SSukumar Swaminathan  */
800bafec742SSukumar Swaminathan static void
801bafec742SSukumar Swaminathan ql_disable_all_completion_interrupts(qlge_t *qlge)
802bafec742SSukumar Swaminathan {
803bafec742SSukumar Swaminathan 	int i;
804bafec742SSukumar Swaminathan 	uint32_t value = 0;
805bafec742SSukumar Swaminathan 
806bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
807bafec742SSukumar Swaminathan 
808bafec742SSukumar Swaminathan 		/*
809bafec742SSukumar Swaminathan 		 * Set the count to 0 for Legacy / MSI interrupts or for the
810bafec742SSukumar Swaminathan 		 * default interrupt (0)
811bafec742SSukumar Swaminathan 		 */
812bafec742SSukumar Swaminathan 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
813bafec742SSukumar Swaminathan 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
814bafec742SSukumar Swaminathan 
815bafec742SSukumar Swaminathan 		ql_disable_completion_interrupt(qlge, i);
816bafec742SSukumar Swaminathan 	}
817bafec742SSukumar Swaminathan }
818bafec742SSukumar Swaminathan 
819bafec742SSukumar Swaminathan /*
820bafec742SSukumar Swaminathan  * Update small buffer queue producer index
821bafec742SSukumar Swaminathan  */
822bafec742SSukumar Swaminathan static void
823bafec742SSukumar Swaminathan ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
824bafec742SSukumar Swaminathan {
825bafec742SSukumar Swaminathan 	/* Update the buffer producer index */
826bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
827bafec742SSukumar Swaminathan 	    rx_ring->sbq_prod_idx));
828bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
829bafec742SSukumar Swaminathan 	    rx_ring->sbq_prod_idx);
830bafec742SSukumar Swaminathan }
831bafec742SSukumar Swaminathan 
832bafec742SSukumar Swaminathan /*
833bafec742SSukumar Swaminathan  * Update large buffer queue producer index
834bafec742SSukumar Swaminathan  */
835bafec742SSukumar Swaminathan static void
836bafec742SSukumar Swaminathan ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
837bafec742SSukumar Swaminathan {
838bafec742SSukumar Swaminathan 	/* Update the buffer producer index */
839bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
840bafec742SSukumar Swaminathan 	    rx_ring->lbq_prod_idx));
841bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
842bafec742SSukumar Swaminathan 	    rx_ring->lbq_prod_idx);
843bafec742SSukumar Swaminathan }
844bafec742SSukumar Swaminathan 
845bafec742SSukumar Swaminathan /*
846bafec742SSukumar Swaminathan  * Adds a small buffer descriptor to end of its in use list,
847bafec742SSukumar Swaminathan  * assumes sbq_lock is already taken
848bafec742SSukumar Swaminathan  */
849bafec742SSukumar Swaminathan static void
850bafec742SSukumar Swaminathan ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
851bafec742SSukumar Swaminathan     struct bq_desc *sbq_desc)
852bafec742SSukumar Swaminathan {
853bafec742SSukumar Swaminathan 	uint32_t inuse_idx = rx_ring->sbq_use_tail;
854bafec742SSukumar Swaminathan 
855bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
856bafec742SSukumar Swaminathan 	inuse_idx++;
857bafec742SSukumar Swaminathan 	if (inuse_idx >= rx_ring->sbq_len)
858bafec742SSukumar Swaminathan 		inuse_idx = 0;
859bafec742SSukumar Swaminathan 	rx_ring->sbq_use_tail = inuse_idx;
860bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->sbuf_in_use_count);
861bafec742SSukumar Swaminathan 	ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
862bafec742SSukumar Swaminathan }
863bafec742SSukumar Swaminathan 
864bafec742SSukumar Swaminathan /*
865bafec742SSukumar Swaminathan  * Get a small buffer descriptor from its in use list
866bafec742SSukumar Swaminathan  */
867bafec742SSukumar Swaminathan static struct bq_desc *
868bafec742SSukumar Swaminathan ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
869bafec742SSukumar Swaminathan {
870bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc = NULL;
871bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
872bafec742SSukumar Swaminathan 
873bafec742SSukumar Swaminathan 	/* Pick from head of in use list */
874bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->sbq_use_head;
875bafec742SSukumar Swaminathan 	sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
876bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use[inuse_idx] = NULL;
877bafec742SSukumar Swaminathan 
878bafec742SSukumar Swaminathan 	if (sbq_desc != NULL) {
879bafec742SSukumar Swaminathan 		inuse_idx++;
880bafec742SSukumar Swaminathan 		if (inuse_idx >= rx_ring->sbq_len)
881bafec742SSukumar Swaminathan 			inuse_idx = 0;
882bafec742SSukumar Swaminathan 		rx_ring->sbq_use_head = inuse_idx;
883bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->sbuf_in_use_count);
884bafec742SSukumar Swaminathan 		atomic_inc_32(&rx_ring->rx_indicate);
885bafec742SSukumar Swaminathan 		sbq_desc->upl_inuse = 1;
886bafec742SSukumar Swaminathan 		/* if mp is NULL */
887bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
888bafec742SSukumar Swaminathan 			/* try to remap mp again */
889bafec742SSukumar Swaminathan 			sbq_desc->mp =
890bafec742SSukumar Swaminathan 			    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
891bafec742SSukumar Swaminathan 			    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
892bafec742SSukumar Swaminathan 		}
893bafec742SSukumar Swaminathan 	}
894bafec742SSukumar Swaminathan 
895bafec742SSukumar Swaminathan 	return (sbq_desc);
896bafec742SSukumar Swaminathan }
897bafec742SSukumar Swaminathan 
898bafec742SSukumar Swaminathan /*
899bafec742SSukumar Swaminathan  * Add a small buffer descriptor to its free list
900bafec742SSukumar Swaminathan  */
901bafec742SSukumar Swaminathan static void
902bafec742SSukumar Swaminathan ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
903bafec742SSukumar Swaminathan     struct bq_desc *sbq_desc)
904bafec742SSukumar Swaminathan {
905bafec742SSukumar Swaminathan 	uint32_t free_idx;
906bafec742SSukumar Swaminathan 
907bafec742SSukumar Swaminathan 	/* Add to the end of free list */
908bafec742SSukumar Swaminathan 	free_idx = rx_ring->sbq_free_tail;
909bafec742SSukumar Swaminathan 	rx_ring->sbuf_free[free_idx] = sbq_desc;
910bafec742SSukumar Swaminathan 	ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
911bafec742SSukumar Swaminathan 	free_idx++;
912bafec742SSukumar Swaminathan 	if (free_idx >= rx_ring->sbq_len)
913bafec742SSukumar Swaminathan 		free_idx = 0;
914bafec742SSukumar Swaminathan 	rx_ring->sbq_free_tail = free_idx;
915bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->sbuf_free_count);
916bafec742SSukumar Swaminathan }
917bafec742SSukumar Swaminathan 
918bafec742SSukumar Swaminathan /*
919bafec742SSukumar Swaminathan  * Get a small buffer descriptor from its free list
920bafec742SSukumar Swaminathan  */
921bafec742SSukumar Swaminathan static struct bq_desc *
922bafec742SSukumar Swaminathan ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
923bafec742SSukumar Swaminathan {
924bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
925bafec742SSukumar Swaminathan 	uint32_t free_idx;
926bafec742SSukumar Swaminathan 
927bafec742SSukumar Swaminathan 	free_idx = rx_ring->sbq_free_head;
928bafec742SSukumar Swaminathan 	/* Pick from top of free list */
929bafec742SSukumar Swaminathan 	sbq_desc = rx_ring->sbuf_free[free_idx];
930bafec742SSukumar Swaminathan 	rx_ring->sbuf_free[free_idx] = NULL;
931bafec742SSukumar Swaminathan 	if (sbq_desc != NULL) {
932bafec742SSukumar Swaminathan 		free_idx++;
933bafec742SSukumar Swaminathan 		if (free_idx >= rx_ring->sbq_len)
934bafec742SSukumar Swaminathan 			free_idx = 0;
935bafec742SSukumar Swaminathan 		rx_ring->sbq_free_head = free_idx;
936bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->sbuf_free_count);
937bafec742SSukumar Swaminathan 		ASSERT(rx_ring->sbuf_free_count != 0);
938bafec742SSukumar Swaminathan 	}
939bafec742SSukumar Swaminathan 	return (sbq_desc);
940bafec742SSukumar Swaminathan }
941bafec742SSukumar Swaminathan 
942bafec742SSukumar Swaminathan /*
943bafec742SSukumar Swaminathan  * Add a large buffer descriptor to its in use list
944bafec742SSukumar Swaminathan  */
945bafec742SSukumar Swaminathan static void
946bafec742SSukumar Swaminathan ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
947bafec742SSukumar Swaminathan     struct bq_desc *lbq_desc)
948bafec742SSukumar Swaminathan {
949bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
950bafec742SSukumar Swaminathan 
951bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->lbq_use_tail;
952bafec742SSukumar Swaminathan 
953bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
954bafec742SSukumar Swaminathan 	inuse_idx++;
955bafec742SSukumar Swaminathan 	if (inuse_idx >= rx_ring->lbq_len)
956bafec742SSukumar Swaminathan 		inuse_idx = 0;
957bafec742SSukumar Swaminathan 	rx_ring->lbq_use_tail = inuse_idx;
958bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->lbuf_in_use_count);
959bafec742SSukumar Swaminathan }
960bafec742SSukumar Swaminathan 
961bafec742SSukumar Swaminathan /*
962bafec742SSukumar Swaminathan  * Get a large buffer descriptor from in use list
963bafec742SSukumar Swaminathan  */
964bafec742SSukumar Swaminathan static struct bq_desc *
965bafec742SSukumar Swaminathan ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
966bafec742SSukumar Swaminathan {
967bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
968bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
969bafec742SSukumar Swaminathan 
970bafec742SSukumar Swaminathan 	/* Pick from head of in use list */
971bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->lbq_use_head;
972bafec742SSukumar Swaminathan 	lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
973bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use[inuse_idx] = NULL;
974bafec742SSukumar Swaminathan 
975bafec742SSukumar Swaminathan 	if (lbq_desc != NULL) {
976bafec742SSukumar Swaminathan 		inuse_idx++;
977bafec742SSukumar Swaminathan 		if (inuse_idx >= rx_ring->lbq_len)
978bafec742SSukumar Swaminathan 			inuse_idx = 0;
979bafec742SSukumar Swaminathan 		rx_ring->lbq_use_head = inuse_idx;
980bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->lbuf_in_use_count);
981bafec742SSukumar Swaminathan 		atomic_inc_32(&rx_ring->rx_indicate);
982bafec742SSukumar Swaminathan 		lbq_desc->upl_inuse = 1;
983bafec742SSukumar Swaminathan 
984bafec742SSukumar Swaminathan 		/* if mp is NULL */
985bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
986bafec742SSukumar Swaminathan 			/* try to remap mp again */
987bafec742SSukumar Swaminathan 			lbq_desc->mp =
988bafec742SSukumar Swaminathan 			    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
989bafec742SSukumar Swaminathan 			    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
990bafec742SSukumar Swaminathan 		}
991bafec742SSukumar Swaminathan 	}
992bafec742SSukumar Swaminathan 	return (lbq_desc);
993bafec742SSukumar Swaminathan }
994bafec742SSukumar Swaminathan 
995bafec742SSukumar Swaminathan /*
996bafec742SSukumar Swaminathan  * Add a large buffer descriptor to free list
997bafec742SSukumar Swaminathan  */
998bafec742SSukumar Swaminathan static void
999bafec742SSukumar Swaminathan ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
1000bafec742SSukumar Swaminathan     struct bq_desc *lbq_desc)
1001bafec742SSukumar Swaminathan {
1002bafec742SSukumar Swaminathan 	uint32_t free_idx;
1003bafec742SSukumar Swaminathan 
1004bafec742SSukumar Swaminathan 	/* Add to the end of free list */
1005bafec742SSukumar Swaminathan 	free_idx = rx_ring->lbq_free_tail;
1006bafec742SSukumar Swaminathan 	rx_ring->lbuf_free[free_idx] = lbq_desc;
1007bafec742SSukumar Swaminathan 	free_idx++;
1008bafec742SSukumar Swaminathan 	if (free_idx >= rx_ring->lbq_len)
1009bafec742SSukumar Swaminathan 		free_idx = 0;
1010bafec742SSukumar Swaminathan 	rx_ring->lbq_free_tail = free_idx;
1011bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->lbuf_free_count);
1012bafec742SSukumar Swaminathan 	ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
1013bafec742SSukumar Swaminathan }
1014bafec742SSukumar Swaminathan 
1015bafec742SSukumar Swaminathan /*
1016bafec742SSukumar Swaminathan  * Get a large buffer descriptor from its free list
1017bafec742SSukumar Swaminathan  */
1018bafec742SSukumar Swaminathan static struct bq_desc *
1019bafec742SSukumar Swaminathan ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
1020bafec742SSukumar Swaminathan {
1021bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1022bafec742SSukumar Swaminathan 	uint32_t free_idx;
1023bafec742SSukumar Swaminathan 
1024bafec742SSukumar Swaminathan 	free_idx = rx_ring->lbq_free_head;
1025bafec742SSukumar Swaminathan 	/* Pick from head of free list */
1026bafec742SSukumar Swaminathan 	lbq_desc = rx_ring->lbuf_free[free_idx];
1027bafec742SSukumar Swaminathan 	rx_ring->lbuf_free[free_idx] = NULL;
1028bafec742SSukumar Swaminathan 
1029bafec742SSukumar Swaminathan 	if (lbq_desc != NULL) {
1030bafec742SSukumar Swaminathan 		free_idx++;
1031bafec742SSukumar Swaminathan 		if (free_idx >= rx_ring->lbq_len)
1032bafec742SSukumar Swaminathan 			free_idx = 0;
1033bafec742SSukumar Swaminathan 		rx_ring->lbq_free_head = free_idx;
1034bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->lbuf_free_count);
1035bafec742SSukumar Swaminathan 		ASSERT(rx_ring->lbuf_free_count != 0);
1036bafec742SSukumar Swaminathan 	}
1037bafec742SSukumar Swaminathan 	return (lbq_desc);
1038bafec742SSukumar Swaminathan }
1039bafec742SSukumar Swaminathan 
1040bafec742SSukumar Swaminathan /*
1041bafec742SSukumar Swaminathan  * Add a small buffer descriptor to free list
1042bafec742SSukumar Swaminathan  */
1043bafec742SSukumar Swaminathan static void
1044bafec742SSukumar Swaminathan ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
1045bafec742SSukumar Swaminathan {
1046bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = sbq_desc->rx_ring;
1047bafec742SSukumar Swaminathan 	uint64_t *sbq_entry;
1048bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)rx_ring->qlge;
1049bafec742SSukumar Swaminathan 	/*
1050bafec742SSukumar Swaminathan 	 * Sync access
1051bafec742SSukumar Swaminathan 	 */
1052bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->sbq_lock);
1053bafec742SSukumar Swaminathan 
1054bafec742SSukumar Swaminathan 	sbq_desc->upl_inuse = 0;
1055bafec742SSukumar Swaminathan 
1056bafec742SSukumar Swaminathan 	/*
1057bafec742SSukumar Swaminathan 	 * If we are freeing the buffers as a result of adapter unload, get out
1058bafec742SSukumar Swaminathan 	 */
1059bafec742SSukumar Swaminathan 	if ((sbq_desc->free_buf != NULL) ||
1060bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1061bafec742SSukumar Swaminathan 		if (sbq_desc->free_buf == NULL)
1062bafec742SSukumar Swaminathan 			atomic_dec_32(&rx_ring->rx_indicate);
1063bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->sbq_lock);
1064bafec742SSukumar Swaminathan 		return;
1065bafec742SSukumar Swaminathan 	}
1066bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1067bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate == 0)
1068bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "sbq: indicate wrong");
1069bafec742SSukumar Swaminathan #endif
1070bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1071bafec742SSukumar Swaminathan 	uint32_t sb_consumer_idx;
1072bafec742SSukumar Swaminathan 	uint32_t sb_producer_idx;
1073bafec742SSukumar Swaminathan 	uint32_t num_free_buffers;
1074bafec742SSukumar Swaminathan 	uint32_t temp;
1075bafec742SSukumar Swaminathan 
1076bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
1077bafec742SSukumar Swaminathan 	sb_producer_idx = temp & 0x0000ffff;
1078bafec742SSukumar Swaminathan 	sb_consumer_idx = (temp >> 16);
1079bafec742SSukumar Swaminathan 
1080bafec742SSukumar Swaminathan 	if (sb_consumer_idx > sb_producer_idx)
1081bafec742SSukumar Swaminathan 		num_free_buffers = NUM_SMALL_BUFFERS -
1082bafec742SSukumar Swaminathan 		    (sb_consumer_idx - sb_producer_idx);
1083bafec742SSukumar Swaminathan 	else
1084bafec742SSukumar Swaminathan 		num_free_buffers = sb_producer_idx - sb_consumer_idx;
1085bafec742SSukumar Swaminathan 
1086bafec742SSukumar Swaminathan 	if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
1087bafec742SSukumar Swaminathan 		qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
1088bafec742SSukumar Swaminathan 
1089bafec742SSukumar Swaminathan #endif
1090bafec742SSukumar Swaminathan 
1091bafec742SSukumar Swaminathan 	ASSERT(sbq_desc->mp == NULL);
1092bafec742SSukumar Swaminathan 
1093bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1094bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate > 0xFF000000)
1095bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
1096bafec742SSukumar Swaminathan 		    " sbq_desc index %d.",
1097bafec742SSukumar Swaminathan 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1098bafec742SSukumar Swaminathan 		    sbq_desc->index);
1099bafec742SSukumar Swaminathan #endif
1100bafec742SSukumar Swaminathan 	if (alloc_memory) {
1101bafec742SSukumar Swaminathan 		sbq_desc->mp =
1102bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1103bafec742SSukumar Swaminathan 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1104bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
1105bafec742SSukumar Swaminathan 			rx_ring->rx_failed_sbq_allocs++;
1106bafec742SSukumar Swaminathan 		}
1107bafec742SSukumar Swaminathan 	}
1108bafec742SSukumar Swaminathan 
1109bafec742SSukumar Swaminathan 	/* Got the packet from the stack decrement rx_indicate count */
1110bafec742SSukumar Swaminathan 	atomic_dec_32(&rx_ring->rx_indicate);
1111bafec742SSukumar Swaminathan 
1112bafec742SSukumar Swaminathan 	ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1113bafec742SSukumar Swaminathan 
1114bafec742SSukumar Swaminathan 	/* Rearm if possible */
1115bafec742SSukumar Swaminathan 	if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1116bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1117bafec742SSukumar Swaminathan 		sbq_entry = rx_ring->sbq_dma.vaddr;
1118bafec742SSukumar Swaminathan 		sbq_entry += rx_ring->sbq_prod_idx;
1119bafec742SSukumar Swaminathan 
1120bafec742SSukumar Swaminathan 		while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1121bafec742SSukumar Swaminathan 			/* Get first one from free list */
1122bafec742SSukumar Swaminathan 			sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
1123bafec742SSukumar Swaminathan 
1124bafec742SSukumar Swaminathan 			*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
1125bafec742SSukumar Swaminathan 			sbq_entry++;
1126bafec742SSukumar Swaminathan 			rx_ring->sbq_prod_idx++;
1127bafec742SSukumar Swaminathan 			if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
1128bafec742SSukumar Swaminathan 				rx_ring->sbq_prod_idx = 0;
1129bafec742SSukumar Swaminathan 				sbq_entry = rx_ring->sbq_dma.vaddr;
1130bafec742SSukumar Swaminathan 			}
1131bafec742SSukumar Swaminathan 			/* Add to end of in use list */
1132bafec742SSukumar Swaminathan 			ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
1133bafec742SSukumar Swaminathan 		}
1134bafec742SSukumar Swaminathan 
1135bafec742SSukumar Swaminathan 		/* Update small buffer queue producer index */
1136bafec742SSukumar Swaminathan 		ql_update_sbq_prod_idx(qlge, rx_ring);
1137bafec742SSukumar Swaminathan 	}
1138bafec742SSukumar Swaminathan 
1139bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->sbq_lock);
1140bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
1141bafec742SSukumar Swaminathan 	    __func__, qlge->instance, rx_ring->sbuf_free_count));
1142bafec742SSukumar Swaminathan }
1143bafec742SSukumar Swaminathan 
1144bafec742SSukumar Swaminathan /*
1145bafec742SSukumar Swaminathan  * rx recycle call back function
1146bafec742SSukumar Swaminathan  */
1147bafec742SSukumar Swaminathan static void
1148bafec742SSukumar Swaminathan ql_release_to_sbuf_free_list(caddr_t p)
1149bafec742SSukumar Swaminathan {
1150bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
1151bafec742SSukumar Swaminathan 
1152bafec742SSukumar Swaminathan 	if (sbq_desc == NULL)
1153bafec742SSukumar Swaminathan 		return;
1154bafec742SSukumar Swaminathan 	ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
1155bafec742SSukumar Swaminathan }
1156bafec742SSukumar Swaminathan 
1157bafec742SSukumar Swaminathan /*
1158bafec742SSukumar Swaminathan  * Add a large buffer descriptor to free list
1159bafec742SSukumar Swaminathan  */
1160bafec742SSukumar Swaminathan static void
1161bafec742SSukumar Swaminathan ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
1162bafec742SSukumar Swaminathan {
1163bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = lbq_desc->rx_ring;
1164bafec742SSukumar Swaminathan 	uint64_t *lbq_entry;
1165bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
1166bafec742SSukumar Swaminathan 
1167bafec742SSukumar Swaminathan 	/* Sync access */
1168bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->lbq_lock);
1169bafec742SSukumar Swaminathan 
1170bafec742SSukumar Swaminathan 	lbq_desc->upl_inuse = 0;
1171bafec742SSukumar Swaminathan 	/*
1172bafec742SSukumar Swaminathan 	 * If we are freeing the buffers as a result of adapter unload, get out
1173bafec742SSukumar Swaminathan 	 */
1174bafec742SSukumar Swaminathan 	if ((lbq_desc->free_buf != NULL) ||
1175bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1176bafec742SSukumar Swaminathan 		if (lbq_desc->free_buf == NULL)
1177bafec742SSukumar Swaminathan 			atomic_dec_32(&rx_ring->rx_indicate);
1178bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->lbq_lock);
1179bafec742SSukumar Swaminathan 		return;
1180bafec742SSukumar Swaminathan 	}
1181bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1182bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate == 0)
1183bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: indicate wrong");
1184bafec742SSukumar Swaminathan #endif
1185bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1186bafec742SSukumar Swaminathan 	uint32_t lb_consumer_idx;
1187bafec742SSukumar Swaminathan 	uint32_t lb_producer_idx;
1188bafec742SSukumar Swaminathan 	uint32_t num_free_buffers;
1189bafec742SSukumar Swaminathan 	uint32_t temp;
1190bafec742SSukumar Swaminathan 
1191bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
1192bafec742SSukumar Swaminathan 
1193bafec742SSukumar Swaminathan 	lb_producer_idx = temp & 0x0000ffff;
1194bafec742SSukumar Swaminathan 	lb_consumer_idx = (temp >> 16);
1195bafec742SSukumar Swaminathan 
1196bafec742SSukumar Swaminathan 	if (lb_consumer_idx > lb_producer_idx)
1197bafec742SSukumar Swaminathan 		num_free_buffers = NUM_LARGE_BUFFERS -
1198bafec742SSukumar Swaminathan 		    (lb_consumer_idx - lb_producer_idx);
1199bafec742SSukumar Swaminathan 	else
1200bafec742SSukumar Swaminathan 		num_free_buffers = lb_producer_idx - lb_consumer_idx;
1201bafec742SSukumar Swaminathan 
1202bafec742SSukumar Swaminathan 	if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
1203bafec742SSukumar Swaminathan 		qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
1204bafec742SSukumar Swaminathan 	}
1205bafec742SSukumar Swaminathan #endif
1206bafec742SSukumar Swaminathan 
1207bafec742SSukumar Swaminathan 	ASSERT(lbq_desc->mp == NULL);
1208bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1209bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate > 0xFF000000)
1210bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
1211bafec742SSukumar Swaminathan 		    "lbq_desc index %d",
1212bafec742SSukumar Swaminathan 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1213bafec742SSukumar Swaminathan 		    lbq_desc->index);
1214bafec742SSukumar Swaminathan #endif
1215bafec742SSukumar Swaminathan 	if (alloc_memory) {
1216bafec742SSukumar Swaminathan 		lbq_desc->mp =
1217bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1218bafec742SSukumar Swaminathan 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1219bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
1220bafec742SSukumar Swaminathan 			rx_ring->rx_failed_lbq_allocs++;
1221bafec742SSukumar Swaminathan 		}
1222bafec742SSukumar Swaminathan 	}
1223bafec742SSukumar Swaminathan 
1224bafec742SSukumar Swaminathan 	/* Got the packet from the stack decrement rx_indicate count */
1225bafec742SSukumar Swaminathan 	atomic_dec_32(&rx_ring->rx_indicate);
1226bafec742SSukumar Swaminathan 
1227bafec742SSukumar Swaminathan 	ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1228bafec742SSukumar Swaminathan 
1229bafec742SSukumar Swaminathan 	/* Rearm if possible */
1230bafec742SSukumar Swaminathan 	if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1231bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1232bafec742SSukumar Swaminathan 		lbq_entry = rx_ring->lbq_dma.vaddr;
1233bafec742SSukumar Swaminathan 		lbq_entry += rx_ring->lbq_prod_idx;
1234bafec742SSukumar Swaminathan 		while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1235bafec742SSukumar Swaminathan 			/* Get first one from free list */
1236bafec742SSukumar Swaminathan 			lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
1237bafec742SSukumar Swaminathan 
1238bafec742SSukumar Swaminathan 			*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
1239bafec742SSukumar Swaminathan 			lbq_entry++;
1240bafec742SSukumar Swaminathan 			rx_ring->lbq_prod_idx++;
1241bafec742SSukumar Swaminathan 			if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
1242bafec742SSukumar Swaminathan 				rx_ring->lbq_prod_idx = 0;
1243bafec742SSukumar Swaminathan 				lbq_entry = rx_ring->lbq_dma.vaddr;
1244bafec742SSukumar Swaminathan 			}
1245bafec742SSukumar Swaminathan 
1246bafec742SSukumar Swaminathan 			/* Add to end of in use list */
1247bafec742SSukumar Swaminathan 			ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
1248bafec742SSukumar Swaminathan 		}
1249bafec742SSukumar Swaminathan 
1250bafec742SSukumar Swaminathan 		/* Update large buffer queue producer index */
1251bafec742SSukumar Swaminathan 		ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
1252bafec742SSukumar Swaminathan 	}
1253bafec742SSukumar Swaminathan 
1254bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->lbq_lock);
1255bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
1256bafec742SSukumar Swaminathan 	    __func__, rx_ring->lbuf_free_count));
1257bafec742SSukumar Swaminathan }
1258bafec742SSukumar Swaminathan /*
1259bafec742SSukumar Swaminathan  * rx recycle call back function
1260bafec742SSukumar Swaminathan  */
1261bafec742SSukumar Swaminathan static void
1262bafec742SSukumar Swaminathan ql_release_to_lbuf_free_list(caddr_t p)
1263bafec742SSukumar Swaminathan {
1264bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
1265bafec742SSukumar Swaminathan 
1266bafec742SSukumar Swaminathan 	if (lbq_desc == NULL)
1267bafec742SSukumar Swaminathan 		return;
1268bafec742SSukumar Swaminathan 	ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
1269bafec742SSukumar Swaminathan }
1270bafec742SSukumar Swaminathan 
1271bafec742SSukumar Swaminathan /*
1272bafec742SSukumar Swaminathan  * free small buffer queue buffers
1273bafec742SSukumar Swaminathan  */
1274bafec742SSukumar Swaminathan static void
1275bafec742SSukumar Swaminathan ql_free_sbq_buffers(struct rx_ring *rx_ring)
1276bafec742SSukumar Swaminathan {
1277bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1278bafec742SSukumar Swaminathan 	uint32_t i;
1279bafec742SSukumar Swaminathan 	uint32_t j = rx_ring->sbq_free_head;
1280bafec742SSukumar Swaminathan 	int  force_cnt = 0;
1281bafec742SSukumar Swaminathan 
1282bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbuf_free_count; i++) {
1283bafec742SSukumar Swaminathan 		sbq_desc = rx_ring->sbuf_free[j];
1284bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1285bafec742SSukumar Swaminathan 		j++;
1286bafec742SSukumar Swaminathan 		if (j >= rx_ring->sbq_len) {
1287bafec742SSukumar Swaminathan 			j = 0;
1288bafec742SSukumar Swaminathan 		}
1289bafec742SSukumar Swaminathan 		if (sbq_desc->mp != NULL) {
1290bafec742SSukumar Swaminathan 			freemsg(sbq_desc->mp);
1291bafec742SSukumar Swaminathan 			sbq_desc->mp = NULL;
1292bafec742SSukumar Swaminathan 		}
1293bafec742SSukumar Swaminathan 	}
1294bafec742SSukumar Swaminathan 	rx_ring->sbuf_free_count = 0;
1295bafec742SSukumar Swaminathan 
1296bafec742SSukumar Swaminathan 	j = rx_ring->sbq_use_head;
1297bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
1298bafec742SSukumar Swaminathan 		sbq_desc = rx_ring->sbuf_in_use[j];
1299bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1300bafec742SSukumar Swaminathan 		j++;
1301bafec742SSukumar Swaminathan 		if (j >= rx_ring->sbq_len) {
1302bafec742SSukumar Swaminathan 			j = 0;
1303bafec742SSukumar Swaminathan 		}
1304bafec742SSukumar Swaminathan 		if (sbq_desc->mp != NULL) {
1305bafec742SSukumar Swaminathan 			freemsg(sbq_desc->mp);
1306bafec742SSukumar Swaminathan 			sbq_desc->mp = NULL;
1307bafec742SSukumar Swaminathan 		}
1308bafec742SSukumar Swaminathan 	}
1309bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use_count = 0;
1310bafec742SSukumar Swaminathan 
1311bafec742SSukumar Swaminathan 	sbq_desc = &rx_ring->sbq_desc[0];
1312bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1313bafec742SSukumar Swaminathan 		/*
1314bafec742SSukumar Swaminathan 		 * Set flag so that the callback does not allocate a new buffer
1315bafec742SSukumar Swaminathan 		 */
1316bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1317bafec742SSukumar Swaminathan 		if (sbq_desc->upl_inuse != 0) {
1318bafec742SSukumar Swaminathan 			force_cnt++;
1319bafec742SSukumar Swaminathan 		}
1320bafec742SSukumar Swaminathan 		if (sbq_desc->bd_dma.dma_handle != NULL) {
1321bafec742SSukumar Swaminathan 			ql_free_phys(&sbq_desc->bd_dma.dma_handle,
1322bafec742SSukumar Swaminathan 			    &sbq_desc->bd_dma.acc_handle);
1323bafec742SSukumar Swaminathan 			sbq_desc->bd_dma.dma_handle = NULL;
1324bafec742SSukumar Swaminathan 			sbq_desc->bd_dma.acc_handle = NULL;
1325bafec742SSukumar Swaminathan 		}
1326bafec742SSukumar Swaminathan 	}
1327bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1328bafec742SSukumar Swaminathan 	cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
1329bafec742SSukumar Swaminathan 	    rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
1330bafec742SSukumar Swaminathan #endif
1331bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_in_use != NULL) {
1332bafec742SSukumar Swaminathan 		kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
1333bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1334bafec742SSukumar Swaminathan 		rx_ring->sbuf_in_use = NULL;
1335bafec742SSukumar Swaminathan 	}
1336bafec742SSukumar Swaminathan 
1337bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free != NULL) {
1338bafec742SSukumar Swaminathan 		kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
1339bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1340bafec742SSukumar Swaminathan 		rx_ring->sbuf_free = NULL;
1341bafec742SSukumar Swaminathan 	}
1342bafec742SSukumar Swaminathan }
1343bafec742SSukumar Swaminathan 
1344bafec742SSukumar Swaminathan /* Allocate small buffers */
1345bafec742SSukumar Swaminathan static int
1346bafec742SSukumar Swaminathan ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1347bafec742SSukumar Swaminathan {
1348bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1349bafec742SSukumar Swaminathan 	int i;
1350bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
1351bafec742SSukumar Swaminathan 
1352bafec742SSukumar Swaminathan 	rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
1353bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1354bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free == NULL) {
1355bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1356bafec742SSukumar Swaminathan 		    "!%s: sbuf_free_list alloc: failed",
1357bafec742SSukumar Swaminathan 		    __func__);
1358bafec742SSukumar Swaminathan 		rx_ring->sbuf_free_count = 0;
1359bafec742SSukumar Swaminathan 		goto alloc_sbuf_err;
1360bafec742SSukumar Swaminathan 	}
1361bafec742SSukumar Swaminathan 
1362bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
1363bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1364bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_in_use == NULL) {
1365bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1366bafec742SSukumar Swaminathan 		    "!%s: sbuf_inuse_list alloc: failed",
1367bafec742SSukumar Swaminathan 		    __func__);
1368bafec742SSukumar Swaminathan 		rx_ring->sbuf_in_use_count = 0;
1369bafec742SSukumar Swaminathan 		goto alloc_sbuf_err;
1370bafec742SSukumar Swaminathan 	}
1371bafec742SSukumar Swaminathan 	rx_ring->sbq_use_head = 0;
1372bafec742SSukumar Swaminathan 	rx_ring->sbq_use_tail = 0;
1373bafec742SSukumar Swaminathan 	rx_ring->sbq_free_head = 0;
1374bafec742SSukumar Swaminathan 	rx_ring->sbq_free_tail = 0;
1375bafec742SSukumar Swaminathan 	sbq_desc = &rx_ring->sbq_desc[0];
1376bafec742SSukumar Swaminathan 
1377bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1378bafec742SSukumar Swaminathan 		/* Allocate buffer */
1379bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &sbq_desc->bd_dma.dma_handle,
1380bafec742SSukumar Swaminathan 		    &ql_buf_acc_attr,
1381bafec742SSukumar Swaminathan 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1382bafec742SSukumar Swaminathan 		    &sbq_desc->bd_dma.acc_handle,
1383bafec742SSukumar Swaminathan 		    (size_t)rx_ring->sbq_buf_size,	/* mem size */
1384bafec742SSukumar Swaminathan 		    (size_t)0,				/* default alignment */
1385bafec742SSukumar Swaminathan 		    (caddr_t *)&sbq_desc->bd_dma.vaddr,
1386bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
1387bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
1388bafec742SSukumar Swaminathan 			    "!%s: ddi_dma_alloc_handle: failed",
1389bafec742SSukumar Swaminathan 			    __func__);
1390bafec742SSukumar Swaminathan 			goto alloc_sbuf_err;
1391bafec742SSukumar Swaminathan 		}
1392bafec742SSukumar Swaminathan 
1393bafec742SSukumar Swaminathan 		/* Set context for Return buffer callback */
1394bafec742SSukumar Swaminathan 		sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1395bafec742SSukumar Swaminathan 		sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
1396bafec742SSukumar Swaminathan 		sbq_desc->rx_recycle.free_arg  = (caddr_t)sbq_desc;
1397bafec742SSukumar Swaminathan 		sbq_desc->rx_ring = rx_ring;
1398bafec742SSukumar Swaminathan 		sbq_desc->upl_inuse = 0;
1399bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 0;
1400bafec742SSukumar Swaminathan 
1401bafec742SSukumar Swaminathan 		sbq_desc->mp =
1402bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1403bafec742SSukumar Swaminathan 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1404bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
1405bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1406bafec742SSukumar Swaminathan 			goto alloc_sbuf_err;
1407bafec742SSukumar Swaminathan 		}
1408bafec742SSukumar Swaminathan 		ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1409bafec742SSukumar Swaminathan 	}
1410bafec742SSukumar Swaminathan 
1411bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1412bafec742SSukumar Swaminathan 
1413bafec742SSukumar Swaminathan alloc_sbuf_err:
1414bafec742SSukumar Swaminathan 	ql_free_sbq_buffers(rx_ring);
1415bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1416bafec742SSukumar Swaminathan }
1417bafec742SSukumar Swaminathan 
1418bafec742SSukumar Swaminathan static void
1419bafec742SSukumar Swaminathan ql_free_lbq_buffers(struct rx_ring *rx_ring)
1420bafec742SSukumar Swaminathan {
1421bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1422bafec742SSukumar Swaminathan 	uint32_t i, j;
1423bafec742SSukumar Swaminathan 	int force_cnt = 0;
1424bafec742SSukumar Swaminathan 
1425bafec742SSukumar Swaminathan 	j = rx_ring->lbq_free_head;
1426bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbuf_free_count; i++) {
1427bafec742SSukumar Swaminathan 		lbq_desc = rx_ring->lbuf_free[j];
1428bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1429bafec742SSukumar Swaminathan 		j++;
1430bafec742SSukumar Swaminathan 		if (j >= rx_ring->lbq_len)
1431bafec742SSukumar Swaminathan 			j = 0;
1432bafec742SSukumar Swaminathan 		if (lbq_desc->mp != NULL) {
1433bafec742SSukumar Swaminathan 			freemsg(lbq_desc->mp);
1434bafec742SSukumar Swaminathan 			lbq_desc->mp = NULL;
1435bafec742SSukumar Swaminathan 		}
1436bafec742SSukumar Swaminathan 	}
1437bafec742SSukumar Swaminathan 	rx_ring->lbuf_free_count = 0;
1438bafec742SSukumar Swaminathan 
1439bafec742SSukumar Swaminathan 	j = rx_ring->lbq_use_head;
1440bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
1441bafec742SSukumar Swaminathan 		lbq_desc = rx_ring->lbuf_in_use[j];
1442bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1443bafec742SSukumar Swaminathan 		j++;
1444bafec742SSukumar Swaminathan 		if (j >= rx_ring->lbq_len) {
1445bafec742SSukumar Swaminathan 			j = 0;
1446bafec742SSukumar Swaminathan 		}
1447bafec742SSukumar Swaminathan 		if (lbq_desc->mp != NULL) {
1448bafec742SSukumar Swaminathan 			freemsg(lbq_desc->mp);
1449bafec742SSukumar Swaminathan 			lbq_desc->mp = NULL;
1450bafec742SSukumar Swaminathan 		}
1451bafec742SSukumar Swaminathan 	}
1452bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use_count = 0;
1453bafec742SSukumar Swaminathan 
1454bafec742SSukumar Swaminathan 	lbq_desc = &rx_ring->lbq_desc[0];
1455bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1456bafec742SSukumar Swaminathan 		/* Set flag so that callback will not allocate a new buffer */
1457bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1458bafec742SSukumar Swaminathan 		if (lbq_desc->upl_inuse != 0) {
1459bafec742SSukumar Swaminathan 			force_cnt++;
1460bafec742SSukumar Swaminathan 		}
1461bafec742SSukumar Swaminathan 		if (lbq_desc->bd_dma.dma_handle != NULL) {
1462bafec742SSukumar Swaminathan 			ql_free_phys(&lbq_desc->bd_dma.dma_handle,
1463bafec742SSukumar Swaminathan 			    &lbq_desc->bd_dma.acc_handle);
1464bafec742SSukumar Swaminathan 			lbq_desc->bd_dma.dma_handle = NULL;
1465bafec742SSukumar Swaminathan 			lbq_desc->bd_dma.acc_handle = NULL;
1466bafec742SSukumar Swaminathan 		}
1467bafec742SSukumar Swaminathan 	}
1468bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1469bafec742SSukumar Swaminathan 	if (force_cnt) {
1470bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
1471bafec742SSukumar Swaminathan 		    rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
1472bafec742SSukumar Swaminathan 		    force_cnt);
1473bafec742SSukumar Swaminathan 	}
1474bafec742SSukumar Swaminathan #endif
1475bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_in_use != NULL) {
1476bafec742SSukumar Swaminathan 		kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
1477bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1478bafec742SSukumar Swaminathan 		rx_ring->lbuf_in_use = NULL;
1479bafec742SSukumar Swaminathan 	}
1480bafec742SSukumar Swaminathan 
1481bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free != NULL) {
1482bafec742SSukumar Swaminathan 		kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
1483bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1484bafec742SSukumar Swaminathan 		rx_ring->lbuf_free = NULL;
1485bafec742SSukumar Swaminathan 	}
1486bafec742SSukumar Swaminathan }
1487bafec742SSukumar Swaminathan 
1488bafec742SSukumar Swaminathan /* Allocate large buffers */
1489bafec742SSukumar Swaminathan static int
1490bafec742SSukumar Swaminathan ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1491bafec742SSukumar Swaminathan {
1492bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1493bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
1494bafec742SSukumar Swaminathan 	int i;
1495bafec742SSukumar Swaminathan 	uint32_t lbq_buf_size;
1496bafec742SSukumar Swaminathan 
1497bafec742SSukumar Swaminathan 	rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
1498bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1499bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free == NULL) {
1500bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1501bafec742SSukumar Swaminathan 		    "!%s: lbuf_free_list alloc: failed",
1502bafec742SSukumar Swaminathan 		    __func__);
1503bafec742SSukumar Swaminathan 		rx_ring->lbuf_free_count = 0;
1504bafec742SSukumar Swaminathan 		goto alloc_lbuf_err;
1505bafec742SSukumar Swaminathan 	}
1506bafec742SSukumar Swaminathan 
1507bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
1508bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1509bafec742SSukumar Swaminathan 
1510bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_in_use == NULL) {
1511bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1512bafec742SSukumar Swaminathan 		    "!%s: lbuf_inuse_list alloc: failed",
1513bafec742SSukumar Swaminathan 		    __func__);
1514bafec742SSukumar Swaminathan 		rx_ring->lbuf_in_use_count = 0;
1515bafec742SSukumar Swaminathan 		goto alloc_lbuf_err;
1516bafec742SSukumar Swaminathan 	}
1517bafec742SSukumar Swaminathan 	rx_ring->lbq_use_head = 0;
1518bafec742SSukumar Swaminathan 	rx_ring->lbq_use_tail = 0;
1519bafec742SSukumar Swaminathan 	rx_ring->lbq_free_head = 0;
1520bafec742SSukumar Swaminathan 	rx_ring->lbq_free_tail = 0;
1521bafec742SSukumar Swaminathan 
1522bafec742SSukumar Swaminathan 	lbq_buf_size = (qlge->mtu == ETHERMTU) ?
1523bafec742SSukumar Swaminathan 	    NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
1524bafec742SSukumar Swaminathan 
1525bafec742SSukumar Swaminathan 	lbq_desc = &rx_ring->lbq_desc[0];
1526bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1527bafec742SSukumar Swaminathan 		rx_ring->lbq_buf_size = lbq_buf_size;
1528bafec742SSukumar Swaminathan 		/* Allocate buffer */
1529bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &lbq_desc->bd_dma.dma_handle,
1530bafec742SSukumar Swaminathan 		    &ql_buf_acc_attr,
1531bafec742SSukumar Swaminathan 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1532bafec742SSukumar Swaminathan 		    &lbq_desc->bd_dma.acc_handle,
1533bafec742SSukumar Swaminathan 		    (size_t)rx_ring->lbq_buf_size,  /* mem size */
1534bafec742SSukumar Swaminathan 		    (size_t)0, /* default alignment */
1535bafec742SSukumar Swaminathan 		    (caddr_t *)&lbq_desc->bd_dma.vaddr,
1536bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
1537bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
1538bafec742SSukumar Swaminathan 			    "!%s: ddi_dma_alloc_handle: failed",
1539bafec742SSukumar Swaminathan 			    __func__);
1540bafec742SSukumar Swaminathan 			goto alloc_lbuf_err;
1541bafec742SSukumar Swaminathan 		}
1542bafec742SSukumar Swaminathan 
1543bafec742SSukumar Swaminathan 		/* Set context for Return buffer callback */
1544bafec742SSukumar Swaminathan 		lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1545bafec742SSukumar Swaminathan 		lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
1546bafec742SSukumar Swaminathan 		lbq_desc->rx_recycle.free_arg  = (caddr_t)lbq_desc;
1547bafec742SSukumar Swaminathan 		lbq_desc->rx_ring = rx_ring;
1548bafec742SSukumar Swaminathan 		lbq_desc->upl_inuse = 0;
1549bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 0;
1550bafec742SSukumar Swaminathan 
1551bafec742SSukumar Swaminathan 		lbq_desc->mp =
1552bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1553bafec742SSukumar Swaminathan 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1554bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
1555bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1556bafec742SSukumar Swaminathan 			goto alloc_lbuf_err;
1557bafec742SSukumar Swaminathan 		}
1558bafec742SSukumar Swaminathan 		ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1559bafec742SSukumar Swaminathan 	} /* For all large buffers */
1560bafec742SSukumar Swaminathan 
1561bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1562bafec742SSukumar Swaminathan 
1563bafec742SSukumar Swaminathan alloc_lbuf_err:
1564bafec742SSukumar Swaminathan 	ql_free_lbq_buffers(rx_ring);
1565bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1566bafec742SSukumar Swaminathan }
1567bafec742SSukumar Swaminathan 
1568bafec742SSukumar Swaminathan /*
1569bafec742SSukumar Swaminathan  * Free rx buffers
1570bafec742SSukumar Swaminathan  */
1571bafec742SSukumar Swaminathan static void
1572bafec742SSukumar Swaminathan ql_free_rx_buffers(qlge_t *qlge)
1573bafec742SSukumar Swaminathan {
1574bafec742SSukumar Swaminathan 	int i;
1575bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
1576bafec742SSukumar Swaminathan 
1577bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1578bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
1579bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
1580bafec742SSukumar Swaminathan 			ql_free_lbq_buffers(rx_ring);
1581bafec742SSukumar Swaminathan 			ql_free_sbq_buffers(rx_ring);
1582bafec742SSukumar Swaminathan 		}
1583bafec742SSukumar Swaminathan 	}
1584bafec742SSukumar Swaminathan }
1585bafec742SSukumar Swaminathan 
1586bafec742SSukumar Swaminathan /*
1587bafec742SSukumar Swaminathan  * Allocate rx buffers
1588bafec742SSukumar Swaminathan  */
1589bafec742SSukumar Swaminathan static int
1590bafec742SSukumar Swaminathan ql_alloc_rx_buffers(qlge_t *qlge)
1591bafec742SSukumar Swaminathan {
1592bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
1593bafec742SSukumar Swaminathan 	int i;
1594bafec742SSukumar Swaminathan 
1595bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1596bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
1597bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
1598bafec742SSukumar Swaminathan 			if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
1599bafec742SSukumar Swaminathan 				goto alloc_err;
1600bafec742SSukumar Swaminathan 			if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
1601bafec742SSukumar Swaminathan 				goto alloc_err;
1602bafec742SSukumar Swaminathan 		}
1603bafec742SSukumar Swaminathan 	}
1604bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1605bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1606bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type == RX_Q) {
1607bafec742SSukumar Swaminathan 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
1608bafec742SSukumar Swaminathan 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
1609bafec742SSukumar Swaminathan 		}
1610bafec742SSukumar Swaminathan 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
1611bafec742SSukumar Swaminathan 	}
1612bafec742SSukumar Swaminathan #endif
1613bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1614bafec742SSukumar Swaminathan 
1615bafec742SSukumar Swaminathan alloc_err:
1616bafec742SSukumar Swaminathan 
1617bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1618bafec742SSukumar Swaminathan }
1619bafec742SSukumar Swaminathan 
1620bafec742SSukumar Swaminathan /*
1621bafec742SSukumar Swaminathan  * Initialize large buffer queue ring
1622bafec742SSukumar Swaminathan  */
1623bafec742SSukumar Swaminathan static void
1624bafec742SSukumar Swaminathan ql_init_lbq_ring(struct rx_ring *rx_ring)
1625bafec742SSukumar Swaminathan {
1626bafec742SSukumar Swaminathan 	uint16_t i;
1627bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1628bafec742SSukumar Swaminathan 
1629bafec742SSukumar Swaminathan 	bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
1630bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++) {
1631bafec742SSukumar Swaminathan 		lbq_desc = &rx_ring->lbq_desc[i];
1632bafec742SSukumar Swaminathan 		lbq_desc->index = i;
1633bafec742SSukumar Swaminathan 	}
1634bafec742SSukumar Swaminathan }
1635bafec742SSukumar Swaminathan 
1636bafec742SSukumar Swaminathan /*
1637bafec742SSukumar Swaminathan  * Initialize small buffer queue ring
1638bafec742SSukumar Swaminathan  */
1639bafec742SSukumar Swaminathan static void
1640bafec742SSukumar Swaminathan ql_init_sbq_ring(struct rx_ring *rx_ring)
1641bafec742SSukumar Swaminathan {
1642bafec742SSukumar Swaminathan 	uint16_t i;
1643bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1644bafec742SSukumar Swaminathan 
1645bafec742SSukumar Swaminathan 	bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
1646bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++) {
1647bafec742SSukumar Swaminathan 		sbq_desc = &rx_ring->sbq_desc[i];
1648bafec742SSukumar Swaminathan 		sbq_desc->index = i;
1649bafec742SSukumar Swaminathan 	}
1650bafec742SSukumar Swaminathan }
1651bafec742SSukumar Swaminathan 
1652bafec742SSukumar Swaminathan /*
1653bafec742SSukumar Swaminathan  * Calculate the pseudo-header checksum if hardware can not do
1654bafec742SSukumar Swaminathan  */
1655bafec742SSukumar Swaminathan static void
1656bafec742SSukumar Swaminathan ql_pseudo_cksum(uint8_t *buf)
1657bafec742SSukumar Swaminathan {
1658bafec742SSukumar Swaminathan 	uint32_t cksum;
1659bafec742SSukumar Swaminathan 	uint16_t iphl;
1660bafec742SSukumar Swaminathan 	uint16_t proto;
1661bafec742SSukumar Swaminathan 
1662bafec742SSukumar Swaminathan 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
1663bafec742SSukumar Swaminathan 	cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
1664bafec742SSukumar Swaminathan 	cksum += proto = buf[9];
1665bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
1666bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
1667bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
1668bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
1669bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1670bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1671bafec742SSukumar Swaminathan 
1672bafec742SSukumar Swaminathan 	/*
1673bafec742SSukumar Swaminathan 	 * Point it to the TCP/UDP header, and
1674bafec742SSukumar Swaminathan 	 * update the checksum field.
1675bafec742SSukumar Swaminathan 	 */
1676bafec742SSukumar Swaminathan 	buf += iphl + ((proto == IPPROTO_TCP) ?
1677bafec742SSukumar Swaminathan 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
1678bafec742SSukumar Swaminathan 
1679bafec742SSukumar Swaminathan 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
1680bafec742SSukumar Swaminathan 
1681bafec742SSukumar Swaminathan }
1682bafec742SSukumar Swaminathan 
1683bafec742SSukumar Swaminathan /*
1684bafec742SSukumar Swaminathan  * Transmit an incoming packet.
1685bafec742SSukumar Swaminathan  */
1686bafec742SSukumar Swaminathan mblk_t *
1687bafec742SSukumar Swaminathan ql_ring_tx(void *arg, mblk_t *mp)
1688bafec742SSukumar Swaminathan {
1689bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring = (struct tx_ring *)arg;
1690bafec742SSukumar Swaminathan 	qlge_t *qlge = tx_ring->qlge;
1691bafec742SSukumar Swaminathan 	mblk_t *next;
1692bafec742SSukumar Swaminathan 	int rval;
1693bafec742SSukumar Swaminathan 	uint32_t tx_count = 0;
1694bafec742SSukumar Swaminathan 
1695bafec742SSukumar Swaminathan 	if (qlge->port_link_state == LS_DOWN) {
1696bafec742SSukumar Swaminathan 		/* can not send message while link is down */
1697bafec742SSukumar Swaminathan 		mblk_t *tp;
1698bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "tx failed due to link down");
1699bafec742SSukumar Swaminathan 
1700bafec742SSukumar Swaminathan 		while (mp != NULL) {
1701bafec742SSukumar Swaminathan 			tp = mp->b_next;
1702bafec742SSukumar Swaminathan 			mp->b_next = NULL;
1703bafec742SSukumar Swaminathan 			freemsg(mp);
1704bafec742SSukumar Swaminathan 			mp = tp;
1705bafec742SSukumar Swaminathan 		}
1706bafec742SSukumar Swaminathan 		goto exit;
1707bafec742SSukumar Swaminathan 	}
1708bafec742SSukumar Swaminathan 
1709bafec742SSukumar Swaminathan 	mutex_enter(&tx_ring->tx_lock);
1710bafec742SSukumar Swaminathan 	/* if mac is not started, driver is not ready, can not send */
1711bafec742SSukumar Swaminathan 	if (tx_ring->mac_flags != QL_MAC_STARTED) {
1712bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
1713bafec742SSukumar Swaminathan 		    " return packets",
1714bafec742SSukumar Swaminathan 		    __func__, qlge->instance, tx_ring->mac_flags);
1715bafec742SSukumar Swaminathan 		mutex_exit(&tx_ring->tx_lock);
1716bafec742SSukumar Swaminathan 		goto exit;
1717bafec742SSukumar Swaminathan 	}
1718bafec742SSukumar Swaminathan 
1719bafec742SSukumar Swaminathan 	/* we must try to send all */
1720bafec742SSukumar Swaminathan 	while (mp != NULL) {
1721bafec742SSukumar Swaminathan 		/*
1722bafec742SSukumar Swaminathan 		 * if number of available slots is less than a threshold,
1723bafec742SSukumar Swaminathan 		 * then quit
1724bafec742SSukumar Swaminathan 		 */
1725bafec742SSukumar Swaminathan 		if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
1726bafec742SSukumar Swaminathan 			tx_ring->queue_stopped = 1;
1727bafec742SSukumar Swaminathan 			rval = DDI_FAILURE;
1728bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1729bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) no resources",
1730bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
1731bafec742SSukumar Swaminathan #endif
1732bafec742SSukumar Swaminathan 			tx_ring->defer++;
1733bafec742SSukumar Swaminathan 			/*
1734bafec742SSukumar Swaminathan 			 * If we return the buffer back we are expected to call
1735bafec742SSukumar Swaminathan 			 * mac_tx_ring_update() when resources are available
1736bafec742SSukumar Swaminathan 			 */
1737bafec742SSukumar Swaminathan 			break;
1738bafec742SSukumar Swaminathan 		}
1739bafec742SSukumar Swaminathan 
1740bafec742SSukumar Swaminathan 		next = mp->b_next;
1741bafec742SSukumar Swaminathan 		mp->b_next = NULL;
1742bafec742SSukumar Swaminathan 
1743bafec742SSukumar Swaminathan 		rval = ql_send_common(tx_ring, mp);
1744bafec742SSukumar Swaminathan 
1745bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS) {
1746bafec742SSukumar Swaminathan 			mp->b_next = next;
1747bafec742SSukumar Swaminathan 			break;
1748bafec742SSukumar Swaminathan 		}
1749bafec742SSukumar Swaminathan 		tx_count++;
1750bafec742SSukumar Swaminathan 		mp = next;
1751bafec742SSukumar Swaminathan 	}
1752bafec742SSukumar Swaminathan 
1753bafec742SSukumar Swaminathan 	/*
1754bafec742SSukumar Swaminathan 	 * After all msg blocks are mapped or copied to tx buffer,
1755bafec742SSukumar Swaminathan 	 * trigger the hardware to send!
1756bafec742SSukumar Swaminathan 	 */
1757bafec742SSukumar Swaminathan 	if (tx_count > 0) {
1758bafec742SSukumar Swaminathan 		ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
1759bafec742SSukumar Swaminathan 		    tx_ring->prod_idx);
1760bafec742SSukumar Swaminathan 	}
1761bafec742SSukumar Swaminathan 
1762bafec742SSukumar Swaminathan 	mutex_exit(&tx_ring->tx_lock);
1763bafec742SSukumar Swaminathan exit:
1764bafec742SSukumar Swaminathan 	return (mp);
1765bafec742SSukumar Swaminathan }
1766bafec742SSukumar Swaminathan 
1767bafec742SSukumar Swaminathan 
1768bafec742SSukumar Swaminathan /*
1769bafec742SSukumar Swaminathan  * This function builds an mblk list for the given inbound
1770bafec742SSukumar Swaminathan  * completion.
1771bafec742SSukumar Swaminathan  */
1772bafec742SSukumar Swaminathan 
1773bafec742SSukumar Swaminathan static mblk_t *
1774bafec742SSukumar Swaminathan ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
1775bafec742SSukumar Swaminathan     struct ib_mac_iocb_rsp *ib_mac_rsp)
1776bafec742SSukumar Swaminathan {
1777bafec742SSukumar Swaminathan 	mblk_t *mp = NULL;
1778bafec742SSukumar Swaminathan 	mblk_t *mp1 = NULL;	/* packet header */
1779bafec742SSukumar Swaminathan 	mblk_t *mp2 = NULL;	/* packet content */
1780bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1781bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1782bafec742SSukumar Swaminathan 	uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
1783bafec742SSukumar Swaminathan 	uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
1784bafec742SSukumar Swaminathan 	uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1785bafec742SSukumar Swaminathan 	uint32_t pkt_len = payload_len + header_len;
1786bafec742SSukumar Swaminathan 	uint32_t done;
1787bafec742SSukumar Swaminathan 	uint64_t *curr_ial_ptr;
1788bafec742SSukumar Swaminathan 	uint32_t ial_data_addr_low;
1789bafec742SSukumar Swaminathan 	uint32_t actual_data_addr_low;
1790bafec742SSukumar Swaminathan 	mblk_t *mp_ial = NULL;	/* ial chained packets */
1791bafec742SSukumar Swaminathan 	uint32_t size;
1792bafec742SSukumar Swaminathan 
1793bafec742SSukumar Swaminathan 	/*
1794bafec742SSukumar Swaminathan 	 * Check if error flags are set
1795bafec742SSukumar Swaminathan 	 */
1796bafec742SSukumar Swaminathan 	if (err_flag != 0) {
1797bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
1798bafec742SSukumar Swaminathan 			rx_ring->frame_too_long++;
1799bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
1800bafec742SSukumar Swaminathan 			rx_ring->frame_too_short++;
1801bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
1802bafec742SSukumar Swaminathan 			rx_ring->fcs_err++;
1803bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1804bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
1805bafec742SSukumar Swaminathan #endif
1806bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
1807bafec742SSukumar Swaminathan 		    (uint8_t *)ib_mac_rsp, 8,
1808bafec742SSukumar Swaminathan 		    (size_t)sizeof (struct ib_mac_iocb_rsp));
1809bafec742SSukumar Swaminathan 	}
1810bafec742SSukumar Swaminathan 
1811bafec742SSukumar Swaminathan 	/* header should not be in large buffer */
1812bafec742SSukumar Swaminathan 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
1813bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "header in large buffer or invalid!");
1814bafec742SSukumar Swaminathan 		err_flag |= 1;
1815bafec742SSukumar Swaminathan 	}
1816bafec742SSukumar Swaminathan 	/*
1817bafec742SSukumar Swaminathan 	 * Handle the header buffer if present.
1818bafec742SSukumar Swaminathan 	 * packet header must be valid and saved in one small buffer
1819bafec742SSukumar Swaminathan 	 * broadcast/multicast packets' headers not splitted
1820bafec742SSukumar Swaminathan 	 */
1821bafec742SSukumar Swaminathan 	if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
1822bafec742SSukumar Swaminathan 	    (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1823bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
1824bafec742SSukumar Swaminathan 		    header_len));
1825bafec742SSukumar Swaminathan 		/* Sync access */
1826bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1827bafec742SSukumar Swaminathan 
1828bafec742SSukumar Swaminathan 		ASSERT(sbq_desc != NULL);
1829bafec742SSukumar Swaminathan 
1830bafec742SSukumar Swaminathan 		/*
1831bafec742SSukumar Swaminathan 		 * Validate addresses from the ASIC with the
1832bafec742SSukumar Swaminathan 		 * expected sbuf address
1833bafec742SSukumar Swaminathan 		 */
1834bafec742SSukumar Swaminathan 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1835bafec742SSukumar Swaminathan 		    != ib_mac_rsp->hdr_addr) {
1836bafec742SSukumar Swaminathan 			/* Small buffer address mismatch */
1837bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1838bafec742SSukumar Swaminathan 			    " in wrong small buffer",
1839bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
1840bafec742SSukumar Swaminathan 			goto fetal_error;
1841bafec742SSukumar Swaminathan 		}
1842bafec742SSukumar Swaminathan 		/* get this packet */
1843bafec742SSukumar Swaminathan 		mp1 = sbq_desc->mp;
1844bafec742SSukumar Swaminathan 		if ((err_flag != 0)|| (mp1 == NULL)) {
1845bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
1846bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1847bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "get header from small buffer fail");
1848bafec742SSukumar Swaminathan #endif
1849bafec742SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1850bafec742SSukumar Swaminathan 			mp1 = NULL;
1851bafec742SSukumar Swaminathan 		} else {
1852bafec742SSukumar Swaminathan 			/* Flush DMA'd data */
1853bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1854bafec742SSukumar Swaminathan 			    0, header_len, DDI_DMA_SYNC_FORKERNEL);
1855bafec742SSukumar Swaminathan 
1856bafec742SSukumar Swaminathan 			if ((qlge->ip_hdr_offset != 0)&&
1857bafec742SSukumar Swaminathan 			    (header_len < SMALL_BUFFER_SIZE)) {
1858bafec742SSukumar Swaminathan 				/*
1859bafec742SSukumar Swaminathan 				 * copy entire header to a 2 bytes boundary
1860bafec742SSukumar Swaminathan 				 * address for 8100 adapters so that the IP
1861bafec742SSukumar Swaminathan 				 * header can be on a 4 byte boundary address
1862bafec742SSukumar Swaminathan 				 */
1863bafec742SSukumar Swaminathan 				bcopy(mp1->b_rptr,
1864bafec742SSukumar Swaminathan 				    (mp1->b_rptr + SMALL_BUFFER_SIZE +
1865bafec742SSukumar Swaminathan 				    qlge->ip_hdr_offset),
1866bafec742SSukumar Swaminathan 				    header_len);
1867bafec742SSukumar Swaminathan 				mp1->b_rptr += SMALL_BUFFER_SIZE +
1868bafec742SSukumar Swaminathan 				    qlge->ip_hdr_offset;
1869bafec742SSukumar Swaminathan 			}
1870bafec742SSukumar Swaminathan 
1871bafec742SSukumar Swaminathan 			/*
1872bafec742SSukumar Swaminathan 			 * Adjust the mp payload_len to match
1873bafec742SSukumar Swaminathan 			 * the packet header payload_len
1874bafec742SSukumar Swaminathan 			 */
1875bafec742SSukumar Swaminathan 			mp1->b_wptr = mp1->b_rptr + header_len;
1876bafec742SSukumar Swaminathan 			mp1->b_next = mp1->b_cont = NULL;
1877bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
1878bafec742SSukumar Swaminathan 			    (uint8_t *)mp1->b_rptr, 8, header_len);
1879bafec742SSukumar Swaminathan 		}
1880bafec742SSukumar Swaminathan 	}
1881bafec742SSukumar Swaminathan 
1882bafec742SSukumar Swaminathan 	/*
1883bafec742SSukumar Swaminathan 	 * packet data or whole packet can be in small or one or
1884bafec742SSukumar Swaminathan 	 * several large buffer(s)
1885bafec742SSukumar Swaminathan 	 */
1886bafec742SSukumar Swaminathan 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1887bafec742SSukumar Swaminathan 		/*
1888bafec742SSukumar Swaminathan 		 * The data is in a single small buffer.
1889bafec742SSukumar Swaminathan 		 */
1890bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1891bafec742SSukumar Swaminathan 
1892bafec742SSukumar Swaminathan 		ASSERT(sbq_desc != NULL);
1893bafec742SSukumar Swaminathan 
1894bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
1895bafec742SSukumar Swaminathan 		    ("%d bytes in a single small buffer, sbq_desc = %p, "
1896bafec742SSukumar Swaminathan 		    "sbq_desc->bd_dma.dma_addr = %x,"
1897bafec742SSukumar Swaminathan 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
1898bafec742SSukumar Swaminathan 		    payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
1899bafec742SSukumar Swaminathan 		    ib_mac_rsp->data_addr, sbq_desc->mp));
1900bafec742SSukumar Swaminathan 
1901bafec742SSukumar Swaminathan 		/*
1902bafec742SSukumar Swaminathan 		 * Validate  addresses from the ASIC with the
1903bafec742SSukumar Swaminathan 		 * expected sbuf address
1904bafec742SSukumar Swaminathan 		 */
1905bafec742SSukumar Swaminathan 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1906bafec742SSukumar Swaminathan 		    != ib_mac_rsp->data_addr) {
1907bafec742SSukumar Swaminathan 			/* Small buffer address mismatch */
1908bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1909bafec742SSukumar Swaminathan 			    " in wrong small buffer",
1910bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
1911bafec742SSukumar Swaminathan 			goto fetal_error;
1912bafec742SSukumar Swaminathan 		}
1913bafec742SSukumar Swaminathan 		/* get this packet */
1914bafec742SSukumar Swaminathan 		mp2 = sbq_desc->mp;
1915bafec742SSukumar Swaminathan 		if ((err_flag != 0) || (mp2 == NULL)) {
1916bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1917bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
1918bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "ignore bad data from small buffer");
1919bafec742SSukumar Swaminathan #endif
1920bafec742SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1921bafec742SSukumar Swaminathan 			mp2 = NULL;
1922bafec742SSukumar Swaminathan 		} else {
1923bafec742SSukumar Swaminathan 			/* Adjust the buffer length to match the payload_len */
1924bafec742SSukumar Swaminathan 			mp2->b_wptr = mp2->b_rptr + payload_len;
1925bafec742SSukumar Swaminathan 			mp2->b_next = mp2->b_cont = NULL;
1926bafec742SSukumar Swaminathan 			/* Flush DMA'd data */
1927bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1928bafec742SSukumar Swaminathan 			    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
1929bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
1930bafec742SSukumar Swaminathan 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
1931bafec742SSukumar Swaminathan 			/*
1932bafec742SSukumar Swaminathan 			 * if payload is too small , copy to
1933bafec742SSukumar Swaminathan 			 * the end of packet header
1934bafec742SSukumar Swaminathan 			 */
1935bafec742SSukumar Swaminathan 			if ((mp1 != NULL) &&
1936bafec742SSukumar Swaminathan 			    (payload_len <= qlge->payload_copy_thresh) &&
1937bafec742SSukumar Swaminathan 			    (pkt_len <
1938bafec742SSukumar Swaminathan 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
1939bafec742SSukumar Swaminathan 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
1940bafec742SSukumar Swaminathan 				mp1->b_wptr += payload_len;
1941bafec742SSukumar Swaminathan 				freemsg(mp2);
1942bafec742SSukumar Swaminathan 				mp2 = NULL;
1943bafec742SSukumar Swaminathan 			}
1944bafec742SSukumar Swaminathan 		}
1945bafec742SSukumar Swaminathan 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1946bafec742SSukumar Swaminathan 		/*
1947bafec742SSukumar Swaminathan 		 * The data is in a single large buffer.
1948bafec742SSukumar Swaminathan 		 */
1949bafec742SSukumar Swaminathan 		lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
1950bafec742SSukumar Swaminathan 
1951bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
1952bafec742SSukumar Swaminathan 		    ("%d bytes in a single large buffer, lbq_desc = %p, "
1953bafec742SSukumar Swaminathan 		    "lbq_desc->bd_dma.dma_addr = %x,"
1954bafec742SSukumar Swaminathan 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
1955bafec742SSukumar Swaminathan 		    payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
1956bafec742SSukumar Swaminathan 		    ib_mac_rsp->data_addr, lbq_desc->mp));
1957bafec742SSukumar Swaminathan 
1958bafec742SSukumar Swaminathan 		ASSERT(lbq_desc != NULL);
1959bafec742SSukumar Swaminathan 
1960bafec742SSukumar Swaminathan 		/*
1961bafec742SSukumar Swaminathan 		 * Validate  addresses from the ASIC with
1962bafec742SSukumar Swaminathan 		 * the expected lbuf address
1963bafec742SSukumar Swaminathan 		 */
1964bafec742SSukumar Swaminathan 		if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
1965bafec742SSukumar Swaminathan 		    != ib_mac_rsp->data_addr) {
1966bafec742SSukumar Swaminathan 			/* Large buffer address mismatch */
1967bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1968bafec742SSukumar Swaminathan 			    " in wrong large buffer",
1969bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
1970bafec742SSukumar Swaminathan 			goto fetal_error;
1971bafec742SSukumar Swaminathan 		}
1972bafec742SSukumar Swaminathan 		mp2 = lbq_desc->mp;
1973bafec742SSukumar Swaminathan 		if ((err_flag != 0) || (mp2 == NULL)) {
1974bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1975bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "ignore bad data from large buffer");
1976bafec742SSukumar Swaminathan #endif
1977bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
1978bafec742SSukumar Swaminathan 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
1979bafec742SSukumar Swaminathan 			mp2 = NULL;
1980bafec742SSukumar Swaminathan 		} else {
1981bafec742SSukumar Swaminathan 			/*
1982bafec742SSukumar Swaminathan 			 * Adjust the buffer length to match
1983bafec742SSukumar Swaminathan 			 * the packet payload_len
1984bafec742SSukumar Swaminathan 			 */
1985bafec742SSukumar Swaminathan 			mp2->b_wptr = mp2->b_rptr + payload_len;
1986bafec742SSukumar Swaminathan 			mp2->b_next = mp2->b_cont = NULL;
1987bafec742SSukumar Swaminathan 			/* Flush DMA'd data */
1988bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
1989bafec742SSukumar Swaminathan 			    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
1990bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
1991bafec742SSukumar Swaminathan 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
1992bafec742SSukumar Swaminathan 			/*
1993bafec742SSukumar Swaminathan 			 * if payload is too small , copy to
1994bafec742SSukumar Swaminathan 			 * the end of packet header
1995bafec742SSukumar Swaminathan 			 */
1996bafec742SSukumar Swaminathan 			if ((mp1 != NULL) &&
1997bafec742SSukumar Swaminathan 			    (payload_len <= qlge->payload_copy_thresh) &&
1998bafec742SSukumar Swaminathan 			    (pkt_len<
1999bafec742SSukumar Swaminathan 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2000bafec742SSukumar Swaminathan 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2001bafec742SSukumar Swaminathan 				mp1->b_wptr += payload_len;
2002bafec742SSukumar Swaminathan 				freemsg(mp2);
2003bafec742SSukumar Swaminathan 				mp2 = NULL;
2004bafec742SSukumar Swaminathan 			}
2005bafec742SSukumar Swaminathan 		}
2006bafec742SSukumar Swaminathan 	} else if (payload_len) {
2007bafec742SSukumar Swaminathan 		/*
2008bafec742SSukumar Swaminathan 		 * payload available but not in sml nor lrg buffer,
2009bafec742SSukumar Swaminathan 		 * so, it is saved in IAL
2010bafec742SSukumar Swaminathan 		 */
2011bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2012bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "packet chained in IAL \n");
2013bafec742SSukumar Swaminathan #endif
2014bafec742SSukumar Swaminathan 		/* lrg buf addresses are saved in one small buffer */
2015bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2016bafec742SSukumar Swaminathan 		curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
2017bafec742SSukumar Swaminathan 		done = 0;
2018bafec742SSukumar Swaminathan 		while (!done) {
2019bafec742SSukumar Swaminathan 			ial_data_addr_low =
2020bafec742SSukumar Swaminathan 			    (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
2021bafec742SSukumar Swaminathan 			    0xFFFFFFFE);
2022bafec742SSukumar Swaminathan 			/* check if this is the last packet fragment */
2023bafec742SSukumar Swaminathan 			done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
2024bafec742SSukumar Swaminathan 			curr_ial_ptr++;
2025bafec742SSukumar Swaminathan 			/*
2026bafec742SSukumar Swaminathan 			 * The data is in one or several large buffer(s).
2027bafec742SSukumar Swaminathan 			 */
2028bafec742SSukumar Swaminathan 			lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2029bafec742SSukumar Swaminathan 			actual_data_addr_low =
2030bafec742SSukumar Swaminathan 			    (uint32_t)(lbq_desc->bd_dma.dma_addr &
2031bafec742SSukumar Swaminathan 			    0xFFFFFFFE);
2032bafec742SSukumar Swaminathan 			if (ial_data_addr_low != actual_data_addr_low) {
2033bafec742SSukumar Swaminathan 				cmn_err(CE_WARN,
2034bafec742SSukumar Swaminathan 				    "packet saved in wrong ial lrg buffer"
2035bafec742SSukumar Swaminathan 				    " expected %x, actual %lx",
2036bafec742SSukumar Swaminathan 				    ial_data_addr_low,
2037bafec742SSukumar Swaminathan 				    (uintptr_t)lbq_desc->bd_dma.dma_addr);
2038bafec742SSukumar Swaminathan 				goto fetal_error;
2039bafec742SSukumar Swaminathan 			}
2040bafec742SSukumar Swaminathan 
2041bafec742SSukumar Swaminathan 			if (mp_ial == NULL) {
2042bafec742SSukumar Swaminathan 				mp_ial = mp2 = lbq_desc->mp;
2043bafec742SSukumar Swaminathan 			} else {
2044bafec742SSukumar Swaminathan 				mp2->b_cont = lbq_desc->mp;
2045bafec742SSukumar Swaminathan 				mp2 = lbq_desc->mp;
2046bafec742SSukumar Swaminathan 			}
2047bafec742SSukumar Swaminathan 			mp2->b_next = NULL;
2048bafec742SSukumar Swaminathan 			mp2->b_cont = NULL;
2049bafec742SSukumar Swaminathan 			size = (payload_len < rx_ring->lbq_buf_size)?
2050bafec742SSukumar Swaminathan 			    payload_len : rx_ring->lbq_buf_size;
2051bafec742SSukumar Swaminathan 			mp2->b_wptr = mp2->b_rptr + size;
2052bafec742SSukumar Swaminathan 			/* Flush DMA'd data */
2053bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2054bafec742SSukumar Swaminathan 			    0, size, DDI_DMA_SYNC_FORKERNEL);
2055bafec742SSukumar Swaminathan 			payload_len -= size;
2056bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t Mac data dump:\n",
2057bafec742SSukumar Swaminathan 			    (uint8_t *)mp2->b_rptr, 8, size);
2058bafec742SSukumar Swaminathan 		}
2059bafec742SSukumar Swaminathan 		mp2 = mp_ial;
2060bafec742SSukumar Swaminathan 		freemsg(sbq_desc->mp);
2061bafec742SSukumar Swaminathan 	}
2062bafec742SSukumar Swaminathan 	/*
2063bafec742SSukumar Swaminathan 	 * some packets' hdr not split, then send mp2 upstream, otherwise,
2064bafec742SSukumar Swaminathan 	 * concatenate message block mp2 to the tail of message header, mp1
2065bafec742SSukumar Swaminathan 	 */
2066bafec742SSukumar Swaminathan 	if (!err_flag) {
2067bafec742SSukumar Swaminathan 		if (mp1) {
2068bafec742SSukumar Swaminathan 			if (mp2) {
2069bafec742SSukumar Swaminathan 				QL_PRINT(DBG_RX, ("packet in mp1 and mp2\n"));
2070bafec742SSukumar Swaminathan 				linkb(mp1, mp2); /* mp1->b_cont = mp2; */
2071bafec742SSukumar Swaminathan 				mp = mp1;
2072bafec742SSukumar Swaminathan 			} else {
2073bafec742SSukumar Swaminathan 				QL_PRINT(DBG_RX, ("packet in mp1 only\n"));
2074bafec742SSukumar Swaminathan 				mp = mp1;
2075bafec742SSukumar Swaminathan 			}
2076bafec742SSukumar Swaminathan 		} else if (mp2) {
2077bafec742SSukumar Swaminathan 			QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
2078bafec742SSukumar Swaminathan 			mp = mp2;
2079bafec742SSukumar Swaminathan 		}
2080bafec742SSukumar Swaminathan 	}
2081bafec742SSukumar Swaminathan 	return (mp);
2082bafec742SSukumar Swaminathan 
2083bafec742SSukumar Swaminathan fetal_error:
2084bafec742SSukumar Swaminathan 	/* Fetal Error! */
2085bafec742SSukumar Swaminathan 	*mp->b_wptr = 0;
2086bafec742SSukumar Swaminathan 	return (mp);
2087bafec742SSukumar Swaminathan 
2088bafec742SSukumar Swaminathan }
2089bafec742SSukumar Swaminathan 
2090bafec742SSukumar Swaminathan /*
2091bafec742SSukumar Swaminathan  * Bump completion queue consumer index.
2092bafec742SSukumar Swaminathan  */
2093bafec742SSukumar Swaminathan static void
2094bafec742SSukumar Swaminathan ql_update_cq(struct rx_ring *rx_ring)
2095bafec742SSukumar Swaminathan {
2096bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx++;
2097bafec742SSukumar Swaminathan 	rx_ring->curr_entry++;
2098bafec742SSukumar Swaminathan 	if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
2099bafec742SSukumar Swaminathan 		rx_ring->cnsmr_idx = 0;
2100bafec742SSukumar Swaminathan 		rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
2101bafec742SSukumar Swaminathan 	}
2102bafec742SSukumar Swaminathan }
2103bafec742SSukumar Swaminathan 
2104bafec742SSukumar Swaminathan /*
2105bafec742SSukumar Swaminathan  * Update completion queue consumer index.
2106bafec742SSukumar Swaminathan  */
2107bafec742SSukumar Swaminathan static void
2108bafec742SSukumar Swaminathan ql_write_cq_idx(struct rx_ring *rx_ring)
2109bafec742SSukumar Swaminathan {
2110bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2111bafec742SSukumar Swaminathan 
2112bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
2113bafec742SSukumar Swaminathan 	    rx_ring->cnsmr_idx);
2114bafec742SSukumar Swaminathan }
2115bafec742SSukumar Swaminathan 
2116bafec742SSukumar Swaminathan /*
2117bafec742SSukumar Swaminathan  * Processes a SYS-Chip Event Notification Completion Event.
2118bafec742SSukumar Swaminathan  * The incoming notification event that describes a link up/down
2119bafec742SSukumar Swaminathan  * or some sorts of error happens.
2120bafec742SSukumar Swaminathan  */
2121bafec742SSukumar Swaminathan static void
2122bafec742SSukumar Swaminathan ql_process_chip_ae_intr(qlge_t *qlge,
2123bafec742SSukumar Swaminathan     struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
2124bafec742SSukumar Swaminathan {
2125bafec742SSukumar Swaminathan 	uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
2126bafec742SSukumar Swaminathan 	uint32_t soft_req = 0;
2127bafec742SSukumar Swaminathan 
2128bafec742SSukumar Swaminathan 	switch (eventType) {
2129bafec742SSukumar Swaminathan 		case SYS_EVENT_PORT_LINK_UP:	/* 0x0h */
2130bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("Port Link Up\n"));
2131bafec742SSukumar Swaminathan 			break;
2132bafec742SSukumar Swaminathan 
2133bafec742SSukumar Swaminathan 		case SYS_EVENT_PORT_LINK_DOWN:	/* 0x1h */
2134bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("Port Link Down\n"));
2135bafec742SSukumar Swaminathan 			break;
2136bafec742SSukumar Swaminathan 
2137bafec742SSukumar Swaminathan 		case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
2138bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "A multiple CAM hits look up error "
2139bafec742SSukumar Swaminathan 			    "occurred");
2140bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2141bafec742SSukumar Swaminathan 			break;
2142bafec742SSukumar Swaminathan 
2143bafec742SSukumar Swaminathan 		case SYS_EVENT_SOFT_ECC_ERR:	/* 0x7h */
2144bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Soft ECC error detected");
2145bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2146bafec742SSukumar Swaminathan 			break;
2147bafec742SSukumar Swaminathan 
2148bafec742SSukumar Swaminathan 		case SYS_EVENT_MGMT_FATAL_ERR:	/* 0x8h */
2149bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Management (MPI) Processor fatal"
2150bafec742SSukumar Swaminathan 			    " error occured");
2151bafec742SSukumar Swaminathan 			soft_req |= NEED_MPI_RESET;
2152bafec742SSukumar Swaminathan 			break;
2153bafec742SSukumar Swaminathan 
2154bafec742SSukumar Swaminathan 		case SYS_EVENT_MAC_INTERRUPT:	/* 0x9h */
2155bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("MAC Interrupt"));
2156bafec742SSukumar Swaminathan 			break;
2157bafec742SSukumar Swaminathan 
2158bafec742SSukumar Swaminathan 		case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF:	/* 0x40h */
2159bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "PCI Error reading small/large "
2160bafec742SSukumar Swaminathan 			    "buffers occured");
2161bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2162bafec742SSukumar Swaminathan 			break;
2163bafec742SSukumar Swaminathan 
2164bafec742SSukumar Swaminathan 		default:
2165bafec742SSukumar Swaminathan 			QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
2166bafec742SSukumar Swaminathan 			    "type 0x%x occured",
2167bafec742SSukumar Swaminathan 			    __func__, qlge->instance, eventType));
2168bafec742SSukumar Swaminathan 			break;
2169bafec742SSukumar Swaminathan 	}
2170bafec742SSukumar Swaminathan 
2171bafec742SSukumar Swaminathan 	if ((soft_req & NEED_MPI_RESET) != 0) {
2172bafec742SSukumar Swaminathan 		ql_wake_mpi_reset_soft_intr(qlge);
2173bafec742SSukumar Swaminathan 	} else if ((soft_req & NEED_HW_RESET) != 0) {
2174bafec742SSukumar Swaminathan 		ql_wake_asic_reset_soft_intr(qlge);
2175bafec742SSukumar Swaminathan 	}
2176bafec742SSukumar Swaminathan }
2177bafec742SSukumar Swaminathan 
2178bafec742SSukumar Swaminathan /*
2179bafec742SSukumar Swaminathan  * set received packet checksum flag
2180bafec742SSukumar Swaminathan  */
2181bafec742SSukumar Swaminathan void
2182bafec742SSukumar Swaminathan ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
2183bafec742SSukumar Swaminathan {
2184bafec742SSukumar Swaminathan 	uint32_t flags;
2185bafec742SSukumar Swaminathan 
2186bafec742SSukumar Swaminathan 	/* Not TCP or UDP packet? nothing more to do */
2187bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
2188bafec742SSukumar Swaminathan 	    ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
2189bafec742SSukumar Swaminathan 	return;
2190bafec742SSukumar Swaminathan 
2191bafec742SSukumar Swaminathan 	/* No CKO support for IPv6 */
2192bafec742SSukumar Swaminathan 	if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
2193bafec742SSukumar Swaminathan 		return;
2194bafec742SSukumar Swaminathan 
2195bafec742SSukumar Swaminathan 	/*
2196bafec742SSukumar Swaminathan 	 * If checksum error, don't set flags; stack will calculate
2197bafec742SSukumar Swaminathan 	 * checksum, detect the error and update statistics
2198bafec742SSukumar Swaminathan 	 */
2199bafec742SSukumar Swaminathan 	if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
2200bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
2201bafec742SSukumar Swaminathan 		return;
2202bafec742SSukumar Swaminathan 
2203bafec742SSukumar Swaminathan 	/* TCP or UDP packet and checksum valid */
2204bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
2205bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2206bafec742SSukumar Swaminathan 		flags = HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
2207bafec742SSukumar Swaminathan 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, flags, 0);
2208bafec742SSukumar Swaminathan 	}
2209bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
2210bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2211bafec742SSukumar Swaminathan 		flags = HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
2212bafec742SSukumar Swaminathan 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, flags, 0);
2213bafec742SSukumar Swaminathan 	}
2214bafec742SSukumar Swaminathan }
2215bafec742SSukumar Swaminathan 
2216bafec742SSukumar Swaminathan /*
2217bafec742SSukumar Swaminathan  * This function goes through h/w descriptor in one specified rx ring,
2218bafec742SSukumar Swaminathan  * receives the data if the descriptor status shows the data is ready.
2219bafec742SSukumar Swaminathan  * It returns a chain of mblks containing the received data, to be
2220bafec742SSukumar Swaminathan  * passed up to mac_rx_ring().
2221bafec742SSukumar Swaminathan  */
2222bafec742SSukumar Swaminathan mblk_t *
2223bafec742SSukumar Swaminathan ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
2224bafec742SSukumar Swaminathan {
2225bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2226bafec742SSukumar Swaminathan 	uint32_t prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2227bafec742SSukumar Swaminathan 	struct ib_mac_iocb_rsp *net_rsp;
2228bafec742SSukumar Swaminathan 	mblk_t *mp;
2229bafec742SSukumar Swaminathan 	mblk_t *mblk_head;
2230bafec742SSukumar Swaminathan 	mblk_t **mblk_tail;
2231bafec742SSukumar Swaminathan 	uint32_t received_bytes = 0;
2232bafec742SSukumar Swaminathan 	boolean_t done = B_FALSE;
2233bafec742SSukumar Swaminathan 	uint32_t length;
2234bafec742SSukumar Swaminathan 
2235bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
2236bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
2237bafec742SSukumar Swaminathan 	uint32_t producer_idx;
2238bafec742SSukumar Swaminathan 	uint32_t num_free_entries;
2239bafec742SSukumar Swaminathan 	uint32_t temp;
2240bafec742SSukumar Swaminathan 
2241bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2242bafec742SSukumar Swaminathan 	consumer_idx = temp & 0x0000ffff;
2243bafec742SSukumar Swaminathan 	producer_idx = (temp >> 16);
2244bafec742SSukumar Swaminathan 
2245bafec742SSukumar Swaminathan 	if (consumer_idx > producer_idx)
2246bafec742SSukumar Swaminathan 		num_free_entries = (consumer_idx - producer_idx);
2247bafec742SSukumar Swaminathan 	else
2248bafec742SSukumar Swaminathan 		num_free_entries = NUM_RX_RING_ENTRIES - (
2249bafec742SSukumar Swaminathan 		    producer_idx - consumer_idx);
2250bafec742SSukumar Swaminathan 
2251bafec742SSukumar Swaminathan 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2252bafec742SSukumar Swaminathan 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2253bafec742SSukumar Swaminathan 
2254bafec742SSukumar Swaminathan #endif
2255bafec742SSukumar Swaminathan 	mblk_head = NULL;
2256bafec742SSukumar Swaminathan 	mblk_tail = &mblk_head;
2257bafec742SSukumar Swaminathan 
2258bafec742SSukumar Swaminathan 	while (!done && (prod != rx_ring->cnsmr_idx)) {
2259bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2260bafec742SSukumar Swaminathan 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
2261bafec742SSukumar Swaminathan 		    __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2262bafec742SSukumar Swaminathan 
2263bafec742SSukumar Swaminathan 		net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
2264bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2265bafec742SSukumar Swaminathan 		    (off_t)((uintptr_t)net_rsp -
2266bafec742SSukumar Swaminathan 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2267bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2268bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
2269bafec742SSukumar Swaminathan 		    rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
2270bafec742SSukumar Swaminathan 
2271bafec742SSukumar Swaminathan 		switch (net_rsp->opcode) {
2272bafec742SSukumar Swaminathan 
2273bafec742SSukumar Swaminathan 		case OPCODE_IB_MAC_IOCB:
2274bafec742SSukumar Swaminathan 			/* Adding length of pkt header and payload */
2275bafec742SSukumar Swaminathan 			length = le32_to_cpu(net_rsp->data_len) +
2276bafec742SSukumar Swaminathan 			    le32_to_cpu(net_rsp->hdr_len);
2277bafec742SSukumar Swaminathan 			if ((poll_bytes != QLGE_POLL_ALL) &&
2278bafec742SSukumar Swaminathan 			    ((received_bytes + length) > poll_bytes)) {
2279bafec742SSukumar Swaminathan 				done = B_TRUE;
2280bafec742SSukumar Swaminathan 				continue;
2281bafec742SSukumar Swaminathan 			}
2282bafec742SSukumar Swaminathan 			received_bytes += length;
2283bafec742SSukumar Swaminathan 
2284bafec742SSukumar Swaminathan 			mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
2285bafec742SSukumar Swaminathan 			if (mp != NULL) {
2286bafec742SSukumar Swaminathan 				if (rx_ring->mac_flags != QL_MAC_STARTED) {
2287bafec742SSukumar Swaminathan 					/*
2288bafec742SSukumar Swaminathan 					 * Increment number of packets we have
2289bafec742SSukumar Swaminathan 					 * indicated to the stack, should be
2290bafec742SSukumar Swaminathan 					 * decremented when we get it back
2291bafec742SSukumar Swaminathan 					 * or when freemsg is called
2292bafec742SSukumar Swaminathan 					 */
2293bafec742SSukumar Swaminathan 					ASSERT(rx_ring->rx_indicate
2294bafec742SSukumar Swaminathan 					    <= rx_ring->cq_len);
2295bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2296bafec742SSukumar Swaminathan 					cmn_err(CE_WARN, "%s do not send to OS,"
2297bafec742SSukumar Swaminathan 					    " mac_flags %d, indicate %d",
2298bafec742SSukumar Swaminathan 					    __func__, rx_ring->mac_flags,
2299bafec742SSukumar Swaminathan 					    rx_ring->rx_indicate);
2300bafec742SSukumar Swaminathan #endif
2301bafec742SSukumar Swaminathan 					QL_PRINT(DBG_RX,
2302bafec742SSukumar Swaminathan 					    ("cq_id = %d, packet "
2303bafec742SSukumar Swaminathan 					    "dropped, mac not "
2304bafec742SSukumar Swaminathan 					    "enabled.\n",
2305bafec742SSukumar Swaminathan 					    rx_ring->cq_id));
2306bafec742SSukumar Swaminathan 					rx_ring->rx_pkt_dropped_mac_unenabled++;
2307bafec742SSukumar Swaminathan 
2308bafec742SSukumar Swaminathan 					/* rx_lock is expected to be held */
2309bafec742SSukumar Swaminathan 					mutex_exit(&rx_ring->rx_lock);
2310bafec742SSukumar Swaminathan 					freemsg(mp);
2311bafec742SSukumar Swaminathan 					mutex_enter(&rx_ring->rx_lock);
2312bafec742SSukumar Swaminathan 					mp = NULL;
2313bafec742SSukumar Swaminathan 				}
2314bafec742SSukumar Swaminathan 
2315bafec742SSukumar Swaminathan 				if (mp != NULL) {
2316bafec742SSukumar Swaminathan 					/*
2317bafec742SSukumar Swaminathan 					 * IP full packet has been
2318bafec742SSukumar Swaminathan 					 * successfully verified by
2319bafec742SSukumar Swaminathan 					 * H/W and is correct
2320bafec742SSukumar Swaminathan 					 */
2321bafec742SSukumar Swaminathan 					ql_set_rx_cksum(mp, net_rsp);
2322bafec742SSukumar Swaminathan 
2323bafec742SSukumar Swaminathan 					rx_ring->rx_packets++;
2324bafec742SSukumar Swaminathan 					rx_ring->rx_bytes += length;
2325bafec742SSukumar Swaminathan 					*mblk_tail = mp;
2326bafec742SSukumar Swaminathan 					mblk_tail = &mp->b_next;
2327bafec742SSukumar Swaminathan 				}
2328bafec742SSukumar Swaminathan 			} else {
2329bafec742SSukumar Swaminathan 				QL_PRINT(DBG_RX,
2330bafec742SSukumar Swaminathan 				    ("cq_id = %d, packet dropped\n",
2331bafec742SSukumar Swaminathan 				    rx_ring->cq_id));
2332bafec742SSukumar Swaminathan 				rx_ring->rx_packets_dropped_no_buffer++;
2333bafec742SSukumar Swaminathan 			}
2334bafec742SSukumar Swaminathan 			break;
2335bafec742SSukumar Swaminathan 
2336bafec742SSukumar Swaminathan 		case OPCODE_IB_SYS_EVENT_IOCB:
2337bafec742SSukumar Swaminathan 			ql_process_chip_ae_intr(qlge,
2338bafec742SSukumar Swaminathan 			    (struct ib_sys_event_iocb_rsp *)
2339bafec742SSukumar Swaminathan 			    net_rsp);
2340bafec742SSukumar Swaminathan 			break;
2341bafec742SSukumar Swaminathan 
2342bafec742SSukumar Swaminathan 		default:
2343bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2344bafec742SSukumar Swaminathan 			    "%s Ring(%d)Hit default case, not handled!"
2345bafec742SSukumar Swaminathan 			    " dropping the packet, "
2346bafec742SSukumar Swaminathan 			    "opcode = %x.", __func__, rx_ring->cq_id,
2347bafec742SSukumar Swaminathan 			    net_rsp->opcode);
2348bafec742SSukumar Swaminathan 			break;
2349bafec742SSukumar Swaminathan 		}
2350bafec742SSukumar Swaminathan 		/* increment cnsmr_idx and curr_entry */
2351bafec742SSukumar Swaminathan 		ql_update_cq(rx_ring);
2352bafec742SSukumar Swaminathan 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2353bafec742SSukumar Swaminathan 
2354bafec742SSukumar Swaminathan 	}
2355bafec742SSukumar Swaminathan 	/* update cnsmr_idx */
2356bafec742SSukumar Swaminathan 	ql_write_cq_idx(rx_ring);
2357bafec742SSukumar Swaminathan 	/* do not enable interrupt for polling mode */
2358bafec742SSukumar Swaminathan 	if (poll_bytes == QLGE_POLL_ALL)
2359bafec742SSukumar Swaminathan 		ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2360bafec742SSukumar Swaminathan 	return (mblk_head);
2361bafec742SSukumar Swaminathan }
2362bafec742SSukumar Swaminathan 
2363bafec742SSukumar Swaminathan /* Process an outbound completion from an rx ring. */
2364bafec742SSukumar Swaminathan static void
2365bafec742SSukumar Swaminathan ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
2366bafec742SSukumar Swaminathan {
2367bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
2368bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
2369bafec742SSukumar Swaminathan 	int j;
2370bafec742SSukumar Swaminathan 
2371bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
2372bafec742SSukumar Swaminathan 	tx_ring_desc = tx_ring->wq_desc;
2373bafec742SSukumar Swaminathan 	tx_ring_desc += mac_rsp->tid;
2374bafec742SSukumar Swaminathan 
2375bafec742SSukumar Swaminathan 	if (tx_ring_desc->tx_type == USE_DMA) {
2376bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
2377bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
2378bafec742SSukumar Swaminathan 
2379bafec742SSukumar Swaminathan 		/*
2380bafec742SSukumar Swaminathan 		 * Release the DMA resource that is used for
2381bafec742SSukumar Swaminathan 		 * DMA binding.
2382bafec742SSukumar Swaminathan 		 */
2383bafec742SSukumar Swaminathan 		for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
2384bafec742SSukumar Swaminathan 			(void) ddi_dma_unbind_handle(
2385bafec742SSukumar Swaminathan 			    tx_ring_desc->tx_dma_handle[j]);
2386bafec742SSukumar Swaminathan 		}
2387bafec742SSukumar Swaminathan 
2388bafec742SSukumar Swaminathan 		tx_ring_desc->tx_dma_handle_used = 0;
2389bafec742SSukumar Swaminathan 		/*
2390bafec742SSukumar Swaminathan 		 * Free the mblk after sending completed
2391bafec742SSukumar Swaminathan 		 */
2392bafec742SSukumar Swaminathan 		if (tx_ring_desc->mp != NULL) {
2393bafec742SSukumar Swaminathan 			freemsg(tx_ring_desc->mp);
2394bafec742SSukumar Swaminathan 			tx_ring_desc->mp = NULL;
2395bafec742SSukumar Swaminathan 		}
2396bafec742SSukumar Swaminathan 	}
2397bafec742SSukumar Swaminathan 
2398bafec742SSukumar Swaminathan 	tx_ring->obytes += tx_ring_desc->tx_bytes;
2399bafec742SSukumar Swaminathan 	tx_ring->opackets++;
2400bafec742SSukumar Swaminathan 
2401bafec742SSukumar Swaminathan 	if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
2402bafec742SSukumar Swaminathan 	    OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
2403bafec742SSukumar Swaminathan 		tx_ring->errxmt++;
2404bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2405bafec742SSukumar Swaminathan 			/* EMPTY */
2406bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2407bafec742SSukumar Swaminathan 			    ("Total descriptor length did not match "
2408bafec742SSukumar Swaminathan 			    "transfer length.\n"));
2409bafec742SSukumar Swaminathan 		}
2410bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2411bafec742SSukumar Swaminathan 			/* EMPTY */
2412bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2413bafec742SSukumar Swaminathan 			    ("Frame too short to be legal, not sent.\n"));
2414bafec742SSukumar Swaminathan 		}
2415bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2416bafec742SSukumar Swaminathan 			/* EMPTY */
2417bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2418bafec742SSukumar Swaminathan 			    ("Frame too long, but sent anyway.\n"));
2419bafec742SSukumar Swaminathan 		}
2420bafec742SSukumar Swaminathan 		if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
2421bafec742SSukumar Swaminathan 			/* EMPTY */
2422bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2423bafec742SSukumar Swaminathan 			    ("PCI backplane error. Frame not sent.\n"));
2424bafec742SSukumar Swaminathan 		}
2425bafec742SSukumar Swaminathan 	}
2426bafec742SSukumar Swaminathan 	atomic_inc_32(&tx_ring->tx_free_count);
2427bafec742SSukumar Swaminathan }
2428bafec742SSukumar Swaminathan 
2429bafec742SSukumar Swaminathan /*
2430bafec742SSukumar Swaminathan  * clean up tx completion iocbs
2431bafec742SSukumar Swaminathan  */
2432bafec742SSukumar Swaminathan static int
2433bafec742SSukumar Swaminathan ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2434bafec742SSukumar Swaminathan {
2435bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2436bafec742SSukumar Swaminathan 	uint32_t prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2437bafec742SSukumar Swaminathan 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2438bafec742SSukumar Swaminathan 	int count = 0;
2439bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
2440bafec742SSukumar Swaminathan 	boolean_t resume_tx = B_FALSE;
2441bafec742SSukumar Swaminathan 
2442bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
2443bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
2444bafec742SSukumar Swaminathan 	{
2445bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
2446bafec742SSukumar Swaminathan 	uint32_t producer_idx;
2447bafec742SSukumar Swaminathan 	uint32_t num_free_entries;
2448bafec742SSukumar Swaminathan 	uint32_t temp;
2449bafec742SSukumar Swaminathan 
2450bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2451bafec742SSukumar Swaminathan 	consumer_idx = temp & 0x0000ffff;
2452bafec742SSukumar Swaminathan 	producer_idx = (temp >> 16);
2453bafec742SSukumar Swaminathan 
2454bafec742SSukumar Swaminathan 	if (consumer_idx > producer_idx)
2455bafec742SSukumar Swaminathan 		num_free_entries = (consumer_idx - producer_idx);
2456bafec742SSukumar Swaminathan 	else
2457bafec742SSukumar Swaminathan 		num_free_entries = NUM_RX_RING_ENTRIES -
2458bafec742SSukumar Swaminathan 		    (producer_idx - consumer_idx);
2459bafec742SSukumar Swaminathan 
2460bafec742SSukumar Swaminathan 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2461bafec742SSukumar Swaminathan 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2462bafec742SSukumar Swaminathan 
2463bafec742SSukumar Swaminathan 	}
2464bafec742SSukumar Swaminathan #endif
2465bafec742SSukumar Swaminathan 	/* While there are entries in the completion queue. */
2466bafec742SSukumar Swaminathan 	while (prod != rx_ring->cnsmr_idx) {
2467bafec742SSukumar Swaminathan 
2468bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2469bafec742SSukumar Swaminathan 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
2470bafec742SSukumar Swaminathan 		    rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2471bafec742SSukumar Swaminathan 
2472bafec742SSukumar Swaminathan 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2473bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2474bafec742SSukumar Swaminathan 		    (off_t)((uintptr_t)net_rsp -
2475bafec742SSukumar Swaminathan 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2476bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2477bafec742SSukumar Swaminathan 
2478bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
2479bafec742SSukumar Swaminathan 		    "response packet data\n",
2480bafec742SSukumar Swaminathan 		    rx_ring->curr_entry, 8,
2481bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp));
2482bafec742SSukumar Swaminathan 
2483bafec742SSukumar Swaminathan 		switch (net_rsp->opcode) {
2484bafec742SSukumar Swaminathan 
2485bafec742SSukumar Swaminathan 		case OPCODE_OB_MAC_OFFLOAD_IOCB:
2486bafec742SSukumar Swaminathan 		case OPCODE_OB_MAC_IOCB:
2487bafec742SSukumar Swaminathan 			ql_process_mac_tx_intr(qlge, net_rsp);
2488bafec742SSukumar Swaminathan 			break;
2489bafec742SSukumar Swaminathan 
2490bafec742SSukumar Swaminathan 		default:
2491bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2492bafec742SSukumar Swaminathan 			    "%s Hit default case, not handled! "
2493bafec742SSukumar Swaminathan 			    "dropping the packet,"
2494bafec742SSukumar Swaminathan 			    " opcode = %x.",
2495bafec742SSukumar Swaminathan 			    __func__, net_rsp->opcode);
2496bafec742SSukumar Swaminathan 			break;
2497bafec742SSukumar Swaminathan 		}
2498bafec742SSukumar Swaminathan 		count++;
2499bafec742SSukumar Swaminathan 		ql_update_cq(rx_ring);
2500bafec742SSukumar Swaminathan 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2501bafec742SSukumar Swaminathan 	}
2502bafec742SSukumar Swaminathan 	ql_write_cq_idx(rx_ring);
2503bafec742SSukumar Swaminathan 
2504bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
2505bafec742SSukumar Swaminathan 
2506bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
2507bafec742SSukumar Swaminathan 
2508bafec742SSukumar Swaminathan 	mutex_enter(&tx_ring->tx_lock);
2509bafec742SSukumar Swaminathan 
2510bafec742SSukumar Swaminathan 	if (tx_ring->queue_stopped &&
2511bafec742SSukumar Swaminathan 	    (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
2512bafec742SSukumar Swaminathan 		/*
2513bafec742SSukumar Swaminathan 		 * The queue got stopped because the tx_ring was full.
2514bafec742SSukumar Swaminathan 		 * Wake it up, because it's now at least 25% empty.
2515bafec742SSukumar Swaminathan 		 */
2516bafec742SSukumar Swaminathan 		tx_ring->queue_stopped = 0;
2517bafec742SSukumar Swaminathan 		resume_tx = B_TRUE;
2518bafec742SSukumar Swaminathan 	}
2519bafec742SSukumar Swaminathan 
2520bafec742SSukumar Swaminathan 	mutex_exit(&tx_ring->tx_lock);
2521bafec742SSukumar Swaminathan 	/* Don't hold the lock during OS callback */
2522bafec742SSukumar Swaminathan 	if (resume_tx)
2523bafec742SSukumar Swaminathan 		RESUME_TX(tx_ring);
2524bafec742SSukumar Swaminathan 	return (count);
2525bafec742SSukumar Swaminathan }
2526bafec742SSukumar Swaminathan 
2527bafec742SSukumar Swaminathan /*
2528bafec742SSukumar Swaminathan  * reset asic when error happens
2529bafec742SSukumar Swaminathan  */
2530bafec742SSukumar Swaminathan /* ARGSUSED */
2531bafec742SSukumar Swaminathan static uint_t
2532bafec742SSukumar Swaminathan ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
2533bafec742SSukumar Swaminathan {
2534bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2535bafec742SSukumar Swaminathan 	int status;
2536bafec742SSukumar Swaminathan 
2537bafec742SSukumar Swaminathan 	mutex_enter(&qlge->gen_mutex);
2538bafec742SSukumar Swaminathan 	status = ql_bringdown_adapter(qlge);
2539bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS)
2540bafec742SSukumar Swaminathan 		goto error;
2541bafec742SSukumar Swaminathan 
2542bafec742SSukumar Swaminathan 	status = ql_bringup_adapter(qlge);
2543bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS)
2544bafec742SSukumar Swaminathan 		goto error;
2545bafec742SSukumar Swaminathan 	mutex_exit(&qlge->gen_mutex);
2546bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2547bafec742SSukumar Swaminathan 
2548bafec742SSukumar Swaminathan error:
2549bafec742SSukumar Swaminathan 	mutex_exit(&qlge->gen_mutex);
2550bafec742SSukumar Swaminathan 	cmn_err(CE_WARN,
2551bafec742SSukumar Swaminathan 	    "qlge up/down cycle failed, closing device");
2552bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2553bafec742SSukumar Swaminathan }
2554bafec742SSukumar Swaminathan 
2555bafec742SSukumar Swaminathan /*
2556bafec742SSukumar Swaminathan  * Reset MPI
2557bafec742SSukumar Swaminathan  */
2558bafec742SSukumar Swaminathan /* ARGSUSED */
2559bafec742SSukumar Swaminathan static uint_t
2560bafec742SSukumar Swaminathan ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
2561bafec742SSukumar Swaminathan {
2562bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2563bafec742SSukumar Swaminathan 
2564*0662fbf4SSukumar Swaminathan 	(void) ql_reset_mpi_risc(qlge);
2565bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2566bafec742SSukumar Swaminathan }
2567bafec742SSukumar Swaminathan 
2568bafec742SSukumar Swaminathan /*
2569bafec742SSukumar Swaminathan  * Process MPI mailbox messages
2570bafec742SSukumar Swaminathan  */
2571bafec742SSukumar Swaminathan /* ARGSUSED */
2572bafec742SSukumar Swaminathan static uint_t
2573bafec742SSukumar Swaminathan ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
2574bafec742SSukumar Swaminathan {
2575bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2576bafec742SSukumar Swaminathan 
2577bafec742SSukumar Swaminathan 	ql_do_mpi_intr(qlge);
2578bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2579bafec742SSukumar Swaminathan }
2580bafec742SSukumar Swaminathan 
2581bafec742SSukumar Swaminathan /* Fire up a handler to reset the MPI processor. */
2582bafec742SSukumar Swaminathan void
2583bafec742SSukumar Swaminathan ql_wake_asic_reset_soft_intr(qlge_t *qlge)
2584bafec742SSukumar Swaminathan {
2585bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
2586bafec742SSukumar Swaminathan }
2587bafec742SSukumar Swaminathan 
2588bafec742SSukumar Swaminathan static void
2589bafec742SSukumar Swaminathan ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
2590bafec742SSukumar Swaminathan {
2591bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
2592bafec742SSukumar Swaminathan }
2593bafec742SSukumar Swaminathan 
2594bafec742SSukumar Swaminathan static void
2595bafec742SSukumar Swaminathan ql_wake_mpi_event_soft_intr(qlge_t *qlge)
2596bafec742SSukumar Swaminathan {
2597bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
2598bafec742SSukumar Swaminathan }
2599bafec742SSukumar Swaminathan 
2600bafec742SSukumar Swaminathan /*
2601bafec742SSukumar Swaminathan  * This handles a fatal error, MPI activity, and the default
2602bafec742SSukumar Swaminathan  * rx_ring in an MSI-X multiple interrupt vector environment.
2603bafec742SSukumar Swaminathan  * In MSI/Legacy environment it also process the rest of
2604bafec742SSukumar Swaminathan  * the rx_rings.
2605bafec742SSukumar Swaminathan  */
2606bafec742SSukumar Swaminathan /* ARGSUSED */
2607bafec742SSukumar Swaminathan static uint_t
2608bafec742SSukumar Swaminathan ql_isr(caddr_t arg1, caddr_t arg2)
2609bafec742SSukumar Swaminathan {
2610bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2611bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2612bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
2613bafec742SSukumar Swaminathan 	uint32_t var, prod;
2614bafec742SSukumar Swaminathan 	int i;
2615bafec742SSukumar Swaminathan 	int work_done = 0;
2616bafec742SSukumar Swaminathan 
2617bafec742SSukumar Swaminathan 	mblk_t *mp;
2618bafec742SSukumar Swaminathan 
2619bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
2620bafec742SSukumar Swaminathan 
2621bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
2622bafec742SSukumar Swaminathan 
2623bafec742SSukumar Swaminathan 	if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
2624bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
2625bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_ERROR_STATUS);
2626bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
2627bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
2628bafec742SSukumar Swaminathan 		return (DDI_INTR_CLAIMED);
2629bafec742SSukumar Swaminathan 	}
2630bafec742SSukumar Swaminathan 
2631bafec742SSukumar Swaminathan 	ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2632bafec742SSukumar Swaminathan 
2633bafec742SSukumar Swaminathan 	/*
2634bafec742SSukumar Swaminathan 	 * Check the default queue and wake handler if active.
2635bafec742SSukumar Swaminathan 	 */
2636bafec742SSukumar Swaminathan 	rx_ring = &qlge->rx_ring[0];
2637bafec742SSukumar Swaminathan 	prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2638bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
2639bafec742SSukumar Swaminathan 	    prod, rx_ring->cnsmr_idx));
2640bafec742SSukumar Swaminathan 	/* check if interrupt is due to incoming packet */
2641bafec742SSukumar Swaminathan 	if (prod != rx_ring->cnsmr_idx) {
2642bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
2643bafec742SSukumar Swaminathan 		ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2644bafec742SSukumar Swaminathan 		mutex_enter(&rx_ring->rx_lock);
2645bafec742SSukumar Swaminathan 		mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2646bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->rx_lock);
2647bafec742SSukumar Swaminathan 
2648bafec742SSukumar Swaminathan 		if (mp != NULL)
2649bafec742SSukumar Swaminathan 			RX_UPSTREAM(rx_ring, mp);
2650bafec742SSukumar Swaminathan 		work_done++;
2651bafec742SSukumar Swaminathan 	} else {
2652bafec742SSukumar Swaminathan 		/*
2653bafec742SSukumar Swaminathan 		 * If interrupt is not due to incoming packet, read status
2654bafec742SSukumar Swaminathan 		 * register to see if error happens or mailbox interrupt.
2655bafec742SSukumar Swaminathan 		 */
2656bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
2657bafec742SSukumar Swaminathan 		if ((var & STATUS_FE) != 0) {
2658bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2659bafec742SSukumar Swaminathan 
2660bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
2661bafec742SSukumar Swaminathan 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2662bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2663bafec742SSukumar Swaminathan 			    "Resetting chip. Error Status Register = 0x%x",
2664bafec742SSukumar Swaminathan 			    var);
2665bafec742SSukumar Swaminathan 			ql_wake_asic_reset_soft_intr(qlge);
2666bafec742SSukumar Swaminathan 			return (DDI_INTR_CLAIMED);
2667bafec742SSukumar Swaminathan 		}
2668bafec742SSukumar Swaminathan 
2669bafec742SSukumar Swaminathan 		/*
2670bafec742SSukumar Swaminathan 		 * Check MPI processor activity.
2671bafec742SSukumar Swaminathan 		 */
2672bafec742SSukumar Swaminathan 		if ((var & STATUS_PI) != 0) {
2673bafec742SSukumar Swaminathan 			/*
2674bafec742SSukumar Swaminathan 			 * We've got an async event or mailbox completion.
2675bafec742SSukumar Swaminathan 			 * Handle it and clear the source of the interrupt.
2676bafec742SSukumar Swaminathan 			 */
2677bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2678bafec742SSukumar Swaminathan 
2679bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
2680bafec742SSukumar Swaminathan 			ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2681bafec742SSukumar Swaminathan 			ql_wake_mpi_event_soft_intr(qlge);
2682bafec742SSukumar Swaminathan 			work_done++;
2683bafec742SSukumar Swaminathan 		}
2684bafec742SSukumar Swaminathan 	}
2685bafec742SSukumar Swaminathan 
2686bafec742SSukumar Swaminathan 	if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
2687bafec742SSukumar Swaminathan 		/*
2688bafec742SSukumar Swaminathan 		 * Start the DPC for each active queue.
2689bafec742SSukumar Swaminathan 		 */
2690bafec742SSukumar Swaminathan 		for (i = 1; i < qlge->rx_ring_count; i++) {
2691bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i];
2692bafec742SSukumar Swaminathan 
2693bafec742SSukumar Swaminathan 			if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2694bafec742SSukumar Swaminathan 			    rx_ring->cnsmr_idx) {
2695bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INTR,
2696bafec742SSukumar Swaminathan 				    ("Waking handler for rx_ring[%d].\n", i));
2697bafec742SSukumar Swaminathan 
2698bafec742SSukumar Swaminathan 				ql_disable_completion_interrupt(qlge,
2699bafec742SSukumar Swaminathan 				    rx_ring->irq);
2700bafec742SSukumar Swaminathan 				if (rx_ring->type == TX_Q) {
2701*0662fbf4SSukumar Swaminathan 					(void) ql_clean_outbound_rx_ring(
2702*0662fbf4SSukumar Swaminathan 					    rx_ring);
2703bafec742SSukumar Swaminathan 					ql_enable_completion_interrupt(
2704bafec742SSukumar Swaminathan 					    rx_ring->qlge, rx_ring->irq);
2705bafec742SSukumar Swaminathan 				} else {
2706bafec742SSukumar Swaminathan 					mutex_enter(&rx_ring->rx_lock);
2707bafec742SSukumar Swaminathan 					mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2708bafec742SSukumar Swaminathan 					mutex_exit(&rx_ring->rx_lock);
2709bafec742SSukumar Swaminathan 					if (mp != NULL)
2710bafec742SSukumar Swaminathan 						RX_UPSTREAM(rx_ring, mp);
2711bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2712bafec742SSukumar Swaminathan 					if (rx_ring->mac_flags ==
2713bafec742SSukumar Swaminathan 					    QL_MAC_STOPPED)
2714bafec742SSukumar Swaminathan 						cmn_err(CE_NOTE,
2715bafec742SSukumar Swaminathan 						    "%s rx_indicate(%d) %d\n",
2716bafec742SSukumar Swaminathan 						    __func__, i,
2717bafec742SSukumar Swaminathan 						    rx_ring->rx_indicate);
2718bafec742SSukumar Swaminathan #endif
2719bafec742SSukumar Swaminathan 				}
2720bafec742SSukumar Swaminathan 				work_done++;
2721bafec742SSukumar Swaminathan 			}
2722bafec742SSukumar Swaminathan 		}
2723bafec742SSukumar Swaminathan 	}
2724bafec742SSukumar Swaminathan 
2725bafec742SSukumar Swaminathan 	ql_enable_completion_interrupt(qlge, intr_ctx->intr);
2726bafec742SSukumar Swaminathan 
2727bafec742SSukumar Swaminathan 	return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2728bafec742SSukumar Swaminathan }
2729bafec742SSukumar Swaminathan 
2730bafec742SSukumar Swaminathan /*
2731bafec742SSukumar Swaminathan  * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
2732bafec742SSukumar Swaminathan  */
2733bafec742SSukumar Swaminathan /* ARGSUSED */
2734bafec742SSukumar Swaminathan static uint_t
2735bafec742SSukumar Swaminathan ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
2736bafec742SSukumar Swaminathan {
2737bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2738bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2739bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
2740bafec742SSukumar Swaminathan 
2741bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
2742*0662fbf4SSukumar Swaminathan 	(void) ql_clean_outbound_rx_ring(rx_ring);
2743bafec742SSukumar Swaminathan 	ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2744bafec742SSukumar Swaminathan 
2745bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2746bafec742SSukumar Swaminathan }
2747bafec742SSukumar Swaminathan 
2748bafec742SSukumar Swaminathan /*
2749bafec742SSukumar Swaminathan  * Poll n_bytes of chained incoming packets
2750bafec742SSukumar Swaminathan  */
2751bafec742SSukumar Swaminathan mblk_t *
2752bafec742SSukumar Swaminathan ql_ring_rx_poll(void *arg, int n_bytes)
2753bafec742SSukumar Swaminathan {
2754bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)arg;
2755bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2756bafec742SSukumar Swaminathan 	mblk_t *mp = NULL;
2757bafec742SSukumar Swaminathan 	uint32_t var;
2758bafec742SSukumar Swaminathan 
2759bafec742SSukumar Swaminathan 	ASSERT(n_bytes >= 0);
2760bafec742SSukumar Swaminathan 	QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
2761bafec742SSukumar Swaminathan 	    __func__, rx_ring->cq_id, n_bytes));
2762bafec742SSukumar Swaminathan 
2763bafec742SSukumar Swaminathan 	++qlge->rx_polls[rx_ring->cq_id];
2764bafec742SSukumar Swaminathan 
2765bafec742SSukumar Swaminathan 	if (n_bytes == 0)
2766bafec742SSukumar Swaminathan 		return (mp);
2767bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
2768bafec742SSukumar Swaminathan 	mp = ql_ring_rx(rx_ring, n_bytes);
2769bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
2770bafec742SSukumar Swaminathan 
2771bafec742SSukumar Swaminathan 	if ((rx_ring->cq_id == 0) && (mp == NULL)) {
2772bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
2773bafec742SSukumar Swaminathan 		/*
2774bafec742SSukumar Swaminathan 		 * Check for fatal error.
2775bafec742SSukumar Swaminathan 		 */
2776bafec742SSukumar Swaminathan 		if ((var & STATUS_FE) != 0) {
2777bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2778bafec742SSukumar Swaminathan 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2779bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Got fatal error %x.", var);
2780bafec742SSukumar Swaminathan 			ql_wake_asic_reset_soft_intr(qlge);
2781bafec742SSukumar Swaminathan 		}
2782bafec742SSukumar Swaminathan 		/*
2783bafec742SSukumar Swaminathan 		 * Check MPI processor activity.
2784bafec742SSukumar Swaminathan 		 */
2785bafec742SSukumar Swaminathan 		if ((var & STATUS_PI) != 0) {
2786bafec742SSukumar Swaminathan 			/*
2787bafec742SSukumar Swaminathan 			 * We've got an async event or mailbox completion.
2788bafec742SSukumar Swaminathan 			 * Handle it and clear the source of the interrupt.
2789bafec742SSukumar Swaminathan 			 */
2790bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2791bafec742SSukumar Swaminathan 			ql_do_mpi_intr(qlge);
2792bafec742SSukumar Swaminathan 		}
2793bafec742SSukumar Swaminathan 	}
2794bafec742SSukumar Swaminathan 
2795bafec742SSukumar Swaminathan 	return (mp);
2796bafec742SSukumar Swaminathan }
2797bafec742SSukumar Swaminathan 
2798bafec742SSukumar Swaminathan /*
2799bafec742SSukumar Swaminathan  * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
2800bafec742SSukumar Swaminathan  */
2801bafec742SSukumar Swaminathan /* ARGSUSED */
2802bafec742SSukumar Swaminathan static uint_t
2803bafec742SSukumar Swaminathan ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
2804bafec742SSukumar Swaminathan {
2805bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2806bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2807bafec742SSukumar Swaminathan 	mblk_t *mp;
2808bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
2809bafec742SSukumar Swaminathan 
2810bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
2811bafec742SSukumar Swaminathan 
2812bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
2813bafec742SSukumar Swaminathan 
2814bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
2815bafec742SSukumar Swaminathan 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2816bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
2817bafec742SSukumar Swaminathan 
2818bafec742SSukumar Swaminathan 	if (mp != NULL)
2819bafec742SSukumar Swaminathan 		RX_UPSTREAM(rx_ring, mp);
2820bafec742SSukumar Swaminathan 
2821bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2822bafec742SSukumar Swaminathan }
2823bafec742SSukumar Swaminathan 
2824bafec742SSukumar Swaminathan 
2825bafec742SSukumar Swaminathan /*
2826bafec742SSukumar Swaminathan  *
2827bafec742SSukumar Swaminathan  * Allocate DMA Buffer for ioctl service
2828bafec742SSukumar Swaminathan  *
2829bafec742SSukumar Swaminathan  */
2830bafec742SSukumar Swaminathan static int
2831bafec742SSukumar Swaminathan ql_alloc_ioctl_dma_buf(qlge_t *qlge)
2832bafec742SSukumar Swaminathan {
2833bafec742SSukumar Swaminathan 	uint64_t phy_addr;
2834bafec742SSukumar Swaminathan 	uint64_t alloc_size;
2835bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
2836bafec742SSukumar Swaminathan 
2837bafec742SSukumar Swaminathan 	alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
2838bafec742SSukumar Swaminathan 	    max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
2839bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
2840bafec742SSukumar Swaminathan 	    &ql_buf_acc_attr,
2841bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2842bafec742SSukumar Swaminathan 	    &qlge->ioctl_buf_dma_attr.acc_handle,
2843bafec742SSukumar Swaminathan 	    (size_t)alloc_size,  /* mem size */
2844bafec742SSukumar Swaminathan 	    (size_t)0,  /* alignment */
2845bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
2846bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
2847bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
2848bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
2849bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
2850bafec742SSukumar Swaminathan 	}
2851bafec742SSukumar Swaminathan 
2852bafec742SSukumar Swaminathan 	phy_addr = dma_cookie.dmac_laddress;
2853bafec742SSukumar Swaminathan 
2854bafec742SSukumar Swaminathan 	if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
2855bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
2856bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
2857bafec742SSukumar Swaminathan 	}
2858bafec742SSukumar Swaminathan 
2859bafec742SSukumar Swaminathan 	qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
2860bafec742SSukumar Swaminathan 
2861bafec742SSukumar Swaminathan 	QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
2862bafec742SSukumar Swaminathan 	    "phy_addr = 0x%lx\n",
2863bafec742SSukumar Swaminathan 	    __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
2864bafec742SSukumar Swaminathan 
2865bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
2866bafec742SSukumar Swaminathan }
2867bafec742SSukumar Swaminathan 
2868bafec742SSukumar Swaminathan 
2869bafec742SSukumar Swaminathan /*
2870bafec742SSukumar Swaminathan  * Function to free physical memory.
2871bafec742SSukumar Swaminathan  */
2872bafec742SSukumar Swaminathan static void
2873bafec742SSukumar Swaminathan ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
2874bafec742SSukumar Swaminathan {
2875bafec742SSukumar Swaminathan 	if (dma_handle != NULL) {
2876bafec742SSukumar Swaminathan 		(void) ddi_dma_unbind_handle(*dma_handle);
2877bafec742SSukumar Swaminathan 		if (acc_handle != NULL)
2878bafec742SSukumar Swaminathan 			ddi_dma_mem_free(acc_handle);
2879bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
2880bafec742SSukumar Swaminathan 	}
2881bafec742SSukumar Swaminathan }
2882bafec742SSukumar Swaminathan 
2883bafec742SSukumar Swaminathan /*
2884bafec742SSukumar Swaminathan  * Function to free ioctl dma buffer.
2885bafec742SSukumar Swaminathan  */
2886bafec742SSukumar Swaminathan static void
2887bafec742SSukumar Swaminathan ql_free_ioctl_dma_buf(qlge_t *qlge)
2888bafec742SSukumar Swaminathan {
2889bafec742SSukumar Swaminathan 	if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
2890bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
2891bafec742SSukumar Swaminathan 		    &qlge->ioctl_buf_dma_attr.acc_handle);
2892bafec742SSukumar Swaminathan 
2893bafec742SSukumar Swaminathan 		qlge->ioctl_buf_dma_attr.vaddr = NULL;
2894bafec742SSukumar Swaminathan 		qlge->ioctl_buf_dma_attr.dma_handle = NULL;
2895bafec742SSukumar Swaminathan 	}
2896bafec742SSukumar Swaminathan }
2897bafec742SSukumar Swaminathan 
2898bafec742SSukumar Swaminathan /*
2899bafec742SSukumar Swaminathan  * Free shadow register space used for request and completion queues
2900bafec742SSukumar Swaminathan  */
2901bafec742SSukumar Swaminathan static void
2902bafec742SSukumar Swaminathan ql_free_shadow_space(qlge_t *qlge)
2903bafec742SSukumar Swaminathan {
2904bafec742SSukumar Swaminathan 	if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
2905bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
2906bafec742SSukumar Swaminathan 		    &qlge->host_copy_shadow_dma_attr.acc_handle);
2907bafec742SSukumar Swaminathan 		bzero(&qlge->host_copy_shadow_dma_attr,
2908bafec742SSukumar Swaminathan 		    sizeof (qlge->host_copy_shadow_dma_attr));
2909bafec742SSukumar Swaminathan 	}
2910bafec742SSukumar Swaminathan 
2911bafec742SSukumar Swaminathan 	if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
2912bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
2913bafec742SSukumar Swaminathan 		    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
2914bafec742SSukumar Swaminathan 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
2915bafec742SSukumar Swaminathan 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
2916bafec742SSukumar Swaminathan 	}
2917bafec742SSukumar Swaminathan }
2918bafec742SSukumar Swaminathan 
2919bafec742SSukumar Swaminathan /*
2920bafec742SSukumar Swaminathan  * Allocate shadow register space for request and completion queues
2921bafec742SSukumar Swaminathan  */
2922bafec742SSukumar Swaminathan static int
2923bafec742SSukumar Swaminathan ql_alloc_shadow_space(qlge_t *qlge)
2924bafec742SSukumar Swaminathan {
2925bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
2926bafec742SSukumar Swaminathan 
2927bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip,
2928bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.dma_handle,
2929bafec742SSukumar Swaminathan 	    &ql_dev_acc_attr,
2930bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2931bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.acc_handle,
2932bafec742SSukumar Swaminathan 	    (size_t)VM_PAGE_SIZE,  /* mem size */
2933bafec742SSukumar Swaminathan 	    (size_t)4, /* 4 bytes alignment */
2934bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
2935bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
2936bafec742SSukumar Swaminathan 		bzero(&qlge->host_copy_shadow_dma_attr,
2937bafec742SSukumar Swaminathan 		    sizeof (qlge->host_copy_shadow_dma_attr));
2938bafec742SSukumar Swaminathan 
2939bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
2940bafec742SSukumar Swaminathan 		    "response shadow registers", __func__, qlge->instance);
2941bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
2942bafec742SSukumar Swaminathan 	}
2943bafec742SSukumar Swaminathan 
2944bafec742SSukumar Swaminathan 	qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
2945bafec742SSukumar Swaminathan 
2946bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip,
2947bafec742SSukumar Swaminathan 	    &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
2948bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
2949bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2950bafec742SSukumar Swaminathan 	    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
2951bafec742SSukumar Swaminathan 	    (size_t)VM_PAGE_SIZE,  /* mem size */
2952bafec742SSukumar Swaminathan 	    (size_t)4, /* 4 bytes alignment */
2953bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
2954bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
2955bafec742SSukumar Swaminathan 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
2956bafec742SSukumar Swaminathan 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
2957bafec742SSukumar Swaminathan 
2958bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
2959bafec742SSukumar Swaminathan 		    "for request shadow registers",
2960bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
2961bafec742SSukumar Swaminathan 		goto err_wqp_sh_area;
2962bafec742SSukumar Swaminathan 	}
2963bafec742SSukumar Swaminathan 	qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
2964bafec742SSukumar Swaminathan 
2965bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
2966bafec742SSukumar Swaminathan 
2967bafec742SSukumar Swaminathan err_wqp_sh_area:
2968bafec742SSukumar Swaminathan 	ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
2969bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.acc_handle);
2970bafec742SSukumar Swaminathan 	bzero(&qlge->host_copy_shadow_dma_attr,
2971bafec742SSukumar Swaminathan 	    sizeof (qlge->host_copy_shadow_dma_attr));
2972bafec742SSukumar Swaminathan 
2973bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
2974bafec742SSukumar Swaminathan }
2975bafec742SSukumar Swaminathan 
2976bafec742SSukumar Swaminathan /*
2977bafec742SSukumar Swaminathan  * Initialize a tx ring
2978bafec742SSukumar Swaminathan  */
2979bafec742SSukumar Swaminathan static void
2980bafec742SSukumar Swaminathan ql_init_tx_ring(struct tx_ring *tx_ring)
2981bafec742SSukumar Swaminathan {
2982bafec742SSukumar Swaminathan 	int i;
2983bafec742SSukumar Swaminathan 	struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr;
2984bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc;
2985bafec742SSukumar Swaminathan 
2986bafec742SSukumar Swaminathan 	for (i = 0; i < tx_ring->wq_len; i++) {
2987bafec742SSukumar Swaminathan 		tx_ring_desc->index = i;
2988bafec742SSukumar Swaminathan 		tx_ring_desc->queue_entry = mac_iocb_ptr;
2989bafec742SSukumar Swaminathan 		mac_iocb_ptr++;
2990bafec742SSukumar Swaminathan 		tx_ring_desc++;
2991bafec742SSukumar Swaminathan 	}
2992bafec742SSukumar Swaminathan 	tx_ring->tx_free_count = tx_ring->wq_len;
2993bafec742SSukumar Swaminathan 	tx_ring->queue_stopped = 0;
2994bafec742SSukumar Swaminathan }
2995bafec742SSukumar Swaminathan 
2996bafec742SSukumar Swaminathan /*
2997bafec742SSukumar Swaminathan  * Free one tx ring resources
2998bafec742SSukumar Swaminathan  */
2999bafec742SSukumar Swaminathan static void
3000bafec742SSukumar Swaminathan ql_free_tx_resources(struct tx_ring *tx_ring)
3001bafec742SSukumar Swaminathan {
3002bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
3003bafec742SSukumar Swaminathan 	int i, j;
3004bafec742SSukumar Swaminathan 
3005bafec742SSukumar Swaminathan 	ql_free_phys(&tx_ring->wq_dma.dma_handle, &tx_ring->wq_dma.acc_handle);
3006bafec742SSukumar Swaminathan 	bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3007bafec742SSukumar Swaminathan 
3008bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc != NULL) {
3009bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
3010bafec742SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3011bafec742SSukumar Swaminathan 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3012bafec742SSukumar Swaminathan 				if (tx_ring_desc->tx_dma_handle[j]) {
3013bafec742SSukumar Swaminathan 					/*
3014bafec742SSukumar Swaminathan 					 * The unbinding will happen in tx
3015bafec742SSukumar Swaminathan 					 * completion, here we just free the
3016bafec742SSukumar Swaminathan 					 * handles
3017bafec742SSukumar Swaminathan 					 */
3018bafec742SSukumar Swaminathan 					ddi_dma_free_handle(
3019bafec742SSukumar Swaminathan 					    &(tx_ring_desc->tx_dma_handle[j]));
3020bafec742SSukumar Swaminathan 					tx_ring_desc->tx_dma_handle[j] = NULL;
3021bafec742SSukumar Swaminathan 				}
3022bafec742SSukumar Swaminathan 			}
3023bafec742SSukumar Swaminathan 			if (tx_ring_desc->oal != NULL) {
3024bafec742SSukumar Swaminathan 				tx_ring_desc->oal_dma_addr = 0;
3025bafec742SSukumar Swaminathan 				tx_ring_desc->oal = NULL;
3026bafec742SSukumar Swaminathan 				tx_ring_desc->copy_buffer = NULL;
3027bafec742SSukumar Swaminathan 				tx_ring_desc->copy_buffer_dma_addr = 0;
3028bafec742SSukumar Swaminathan 
3029bafec742SSukumar Swaminathan 				ql_free_phys(&tx_ring_desc->oal_dma.dma_handle,
3030bafec742SSukumar Swaminathan 				    &tx_ring_desc->oal_dma.acc_handle);
3031bafec742SSukumar Swaminathan 			}
3032bafec742SSukumar Swaminathan 		}
3033bafec742SSukumar Swaminathan 		kmem_free(tx_ring->wq_desc,
3034bafec742SSukumar Swaminathan 		    tx_ring->wq_len * sizeof (struct tx_ring_desc));
3035bafec742SSukumar Swaminathan 		tx_ring->wq_desc = NULL;
3036bafec742SSukumar Swaminathan 	}
3037bafec742SSukumar Swaminathan 	/* free the wqicb struct */
3038bafec742SSukumar Swaminathan 	if (tx_ring->wqicb_dma.dma_handle) {
3039bafec742SSukumar Swaminathan 		ql_free_phys(&tx_ring->wqicb_dma.dma_handle,
3040bafec742SSukumar Swaminathan 		    &tx_ring->wqicb_dma.acc_handle);
3041bafec742SSukumar Swaminathan 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3042bafec742SSukumar Swaminathan 	}
3043bafec742SSukumar Swaminathan }
3044bafec742SSukumar Swaminathan 
3045bafec742SSukumar Swaminathan /*
3046bafec742SSukumar Swaminathan  * Allocate work (request) queue memory and transmit
3047bafec742SSukumar Swaminathan  * descriptors for this transmit ring
3048bafec742SSukumar Swaminathan  */
3049bafec742SSukumar Swaminathan static int
3050bafec742SSukumar Swaminathan ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring)
3051bafec742SSukumar Swaminathan {
3052bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3053bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
3054bafec742SSukumar Swaminathan 	int i, j;
3055bafec742SSukumar Swaminathan 	uint32_t length;
3056bafec742SSukumar Swaminathan 
3057bafec742SSukumar Swaminathan 	/* allocate dma buffers for obiocbs */
3058bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle,
3059bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3060bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3061bafec742SSukumar Swaminathan 	    &tx_ring->wq_dma.acc_handle,
3062bafec742SSukumar Swaminathan 	    (size_t)tx_ring->wq_size,	/* mem size */
3063bafec742SSukumar Swaminathan 	    (size_t)128, /* alignment:128 bytes boundary */
3064bafec742SSukumar Swaminathan 	    (caddr_t *)&tx_ring->wq_dma.vaddr,
3065bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3066bafec742SSukumar Swaminathan 		bzero(&tx_ring->wq_dma, sizeof (&tx_ring->wq_dma));
3067bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.",
3068bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3069bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3070bafec742SSukumar Swaminathan 	}
3071bafec742SSukumar Swaminathan 	tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress;
3072bafec742SSukumar Swaminathan 
3073bafec742SSukumar Swaminathan 	tx_ring->wq_desc =
3074bafec742SSukumar Swaminathan 	    kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc),
3075bafec742SSukumar Swaminathan 	    KM_NOSLEEP);
3076bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc == NULL) {
3077bafec742SSukumar Swaminathan 		goto err;
3078bafec742SSukumar Swaminathan 	} else {
3079bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
3080bafec742SSukumar Swaminathan 		/*
3081bafec742SSukumar Swaminathan 		 * Allocate a large enough structure to hold the following
3082bafec742SSukumar Swaminathan 		 * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
3083bafec742SSukumar Swaminathan 		 * 2. copy buffer of QL_MAX_COPY_LENGTH bytes
3084bafec742SSukumar Swaminathan 		 */
3085bafec742SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3086bafec742SSukumar Swaminathan 			length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)
3087bafec742SSukumar Swaminathan 			    + QL_MAX_COPY_LENGTH;
3088bafec742SSukumar Swaminathan 
3089bafec742SSukumar Swaminathan 			if (ql_alloc_phys(qlge->dip,
3090bafec742SSukumar Swaminathan 			    &tx_ring_desc->oal_dma.dma_handle,
3091bafec742SSukumar Swaminathan 			    &ql_desc_acc_attr,
3092bafec742SSukumar Swaminathan 			    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3093bafec742SSukumar Swaminathan 			    &tx_ring_desc->oal_dma.acc_handle,
3094bafec742SSukumar Swaminathan 			    (size_t)length,	/* mem size */
3095bafec742SSukumar Swaminathan 			    (size_t)0, /* default alignment:8 bytes boundary */
3096bafec742SSukumar Swaminathan 			    (caddr_t *)&tx_ring_desc->oal_dma.vaddr,
3097bafec742SSukumar Swaminathan 			    &dma_cookie) != 0) {
3098bafec742SSukumar Swaminathan 				bzero(&tx_ring_desc->oal_dma,
3099bafec742SSukumar Swaminathan 				    sizeof (tx_ring_desc->oal_dma));
3100bafec742SSukumar Swaminathan 				cmn_err(CE_WARN, "%s(%d): reqQ tx buf &"
3101bafec742SSukumar Swaminathan 				    "oal alloc failed.",
3102bafec742SSukumar Swaminathan 				    __func__, qlge->instance);
3103bafec742SSukumar Swaminathan 				return (DDI_FAILURE);
3104bafec742SSukumar Swaminathan 			}
3105bafec742SSukumar Swaminathan 
3106bafec742SSukumar Swaminathan 			tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr;
3107bafec742SSukumar Swaminathan 			tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress;
3108bafec742SSukumar Swaminathan 			tx_ring_desc->copy_buffer =
3109bafec742SSukumar Swaminathan 			    (caddr_t)((uint8_t *)tx_ring_desc->oal
3110bafec742SSukumar Swaminathan 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3111bafec742SSukumar Swaminathan 			tx_ring_desc->copy_buffer_dma_addr =
3112bafec742SSukumar Swaminathan 			    (tx_ring_desc->oal_dma_addr
3113bafec742SSukumar Swaminathan 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3114bafec742SSukumar Swaminathan 
3115bafec742SSukumar Swaminathan 			/* Allocate dma handles for transmit buffers */
3116bafec742SSukumar Swaminathan 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3117bafec742SSukumar Swaminathan 				if (ddi_dma_alloc_handle(qlge->dip,
3118bafec742SSukumar Swaminathan 				    &tx_mapping_dma_attr,
3119bafec742SSukumar Swaminathan 				    DDI_DMA_DONTWAIT,
3120bafec742SSukumar Swaminathan 				    0, &tx_ring_desc->tx_dma_handle[j])
3121bafec742SSukumar Swaminathan 				    != DDI_SUCCESS) {
3122bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
3123bafec742SSukumar Swaminathan 					    "!%s: ddi_dma_alloc_handle: "
3124bafec742SSukumar Swaminathan 					    "tx_dma_handle "
3125bafec742SSukumar Swaminathan 					    "alloc failed", __func__);
3126bafec742SSukumar Swaminathan 					goto err;
3127bafec742SSukumar Swaminathan 				}
3128bafec742SSukumar Swaminathan 			}
3129bafec742SSukumar Swaminathan 		}
3130bafec742SSukumar Swaminathan 	}
3131bafec742SSukumar Swaminathan 	/* alloc a wqicb control block to load this tx ring to hw */
3132bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle,
3133bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3134bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3135bafec742SSukumar Swaminathan 	    &tx_ring->wqicb_dma.acc_handle,
3136bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct wqicb_t),	/* mem size */
3137bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3138bafec742SSukumar Swaminathan 	    (caddr_t *)&tx_ring->wqicb_dma.vaddr,
3139bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3140bafec742SSukumar Swaminathan 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3141bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.",
3142bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3143bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3144bafec742SSukumar Swaminathan 	}
3145bafec742SSukumar Swaminathan 	tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3146bafec742SSukumar Swaminathan 
3147bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3148bafec742SSukumar Swaminathan 
3149bafec742SSukumar Swaminathan err:
3150bafec742SSukumar Swaminathan 	ql_free_tx_resources(tx_ring);
3151bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3152bafec742SSukumar Swaminathan }
3153bafec742SSukumar Swaminathan 
3154bafec742SSukumar Swaminathan /*
3155bafec742SSukumar Swaminathan  * Free one rx ring resources
3156bafec742SSukumar Swaminathan  */
3157bafec742SSukumar Swaminathan static void
3158bafec742SSukumar Swaminathan ql_free_rx_resources(struct rx_ring *rx_ring)
3159bafec742SSukumar Swaminathan {
3160bafec742SSukumar Swaminathan 	/* Free the small buffer queue. */
3161bafec742SSukumar Swaminathan 	if (rx_ring->sbq_dma.dma_handle) {
3162bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->sbq_dma.dma_handle,
3163bafec742SSukumar Swaminathan 		    &rx_ring->sbq_dma.acc_handle);
3164bafec742SSukumar Swaminathan 		bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3165bafec742SSukumar Swaminathan 	}
3166bafec742SSukumar Swaminathan 
3167bafec742SSukumar Swaminathan 	/* Free the small buffer queue control blocks. */
3168bafec742SSukumar Swaminathan 	kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len *
3169bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc));
3170bafec742SSukumar Swaminathan 	rx_ring->sbq_desc = NULL;
3171bafec742SSukumar Swaminathan 
3172bafec742SSukumar Swaminathan 	/* Free the large buffer queue. */
3173bafec742SSukumar Swaminathan 	if (rx_ring->lbq_dma.dma_handle) {
3174bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->lbq_dma.dma_handle,
3175bafec742SSukumar Swaminathan 		    &rx_ring->lbq_dma.acc_handle);
3176bafec742SSukumar Swaminathan 		bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3177bafec742SSukumar Swaminathan 	}
3178bafec742SSukumar Swaminathan 
3179bafec742SSukumar Swaminathan 	/* Free the large buffer queue control blocks. */
3180bafec742SSukumar Swaminathan 	kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len *
3181bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc));
3182bafec742SSukumar Swaminathan 	rx_ring->lbq_desc = NULL;
3183bafec742SSukumar Swaminathan 
3184bafec742SSukumar Swaminathan 	/* Free cqicb struct */
3185bafec742SSukumar Swaminathan 	if (rx_ring->cqicb_dma.dma_handle) {
3186bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->cqicb_dma.dma_handle,
3187bafec742SSukumar Swaminathan 		    &rx_ring->cqicb_dma.acc_handle);
3188bafec742SSukumar Swaminathan 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3189bafec742SSukumar Swaminathan 	}
3190bafec742SSukumar Swaminathan 	/* Free the rx queue. */
3191bafec742SSukumar Swaminathan 	if (rx_ring->cq_dma.dma_handle) {
3192bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->cq_dma.dma_handle,
3193bafec742SSukumar Swaminathan 		    &rx_ring->cq_dma.acc_handle);
3194bafec742SSukumar Swaminathan 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3195bafec742SSukumar Swaminathan 	}
3196bafec742SSukumar Swaminathan }
3197bafec742SSukumar Swaminathan 
3198bafec742SSukumar Swaminathan /*
3199bafec742SSukumar Swaminathan  * Allocate queues and buffers for this completions queue based
3200bafec742SSukumar Swaminathan  * on the values in the parameter structure.
3201bafec742SSukumar Swaminathan  */
3202bafec742SSukumar Swaminathan static int
3203bafec742SSukumar Swaminathan ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring)
3204bafec742SSukumar Swaminathan {
3205bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3206bafec742SSukumar Swaminathan 
3207bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle,
3208bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3209bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3210bafec742SSukumar Swaminathan 	    &rx_ring->cq_dma.acc_handle,
3211bafec742SSukumar Swaminathan 	    (size_t)rx_ring->cq_size,  /* mem size */
3212bafec742SSukumar Swaminathan 	    (size_t)128, /* alignment:128 bytes boundary */
3213bafec742SSukumar Swaminathan 	    (caddr_t *)&rx_ring->cq_dma.vaddr,
3214bafec742SSukumar Swaminathan 	    &dma_cookie) != 0)	{
3215bafec742SSukumar Swaminathan 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3216bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.",
3217bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3218bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3219bafec742SSukumar Swaminathan 	}
3220bafec742SSukumar Swaminathan 	rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress;
3221bafec742SSukumar Swaminathan 
3222bafec742SSukumar Swaminathan 	if (rx_ring->sbq_len != 0) {
3223bafec742SSukumar Swaminathan 		/*
3224bafec742SSukumar Swaminathan 		 * Allocate small buffer queue.
3225bafec742SSukumar Swaminathan 		 */
3226bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle,
3227bafec742SSukumar Swaminathan 		    &ql_desc_acc_attr,
3228bafec742SSukumar Swaminathan 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3229bafec742SSukumar Swaminathan 		    &rx_ring->sbq_dma.acc_handle,
3230bafec742SSukumar Swaminathan 		    (size_t)rx_ring->sbq_size,  /* mem size */
3231bafec742SSukumar Swaminathan 		    (size_t)128, /* alignment:128 bytes boundary */
3232bafec742SSukumar Swaminathan 		    (caddr_t *)&rx_ring->sbq_dma.vaddr,
3233bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
3234bafec742SSukumar Swaminathan 			bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3235bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3236bafec742SSukumar Swaminathan 			    "%s(%d): small buffer queue allocation failed.",
3237bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
3238bafec742SSukumar Swaminathan 			goto err_mem;
3239bafec742SSukumar Swaminathan 		}
3240bafec742SSukumar Swaminathan 		rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress;
3241bafec742SSukumar Swaminathan 
3242bafec742SSukumar Swaminathan 		/*
3243bafec742SSukumar Swaminathan 		 * Allocate small buffer queue control blocks.
3244bafec742SSukumar Swaminathan 		 */
3245bafec742SSukumar Swaminathan 		rx_ring->sbq_desc =
3246bafec742SSukumar Swaminathan 		    kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc),
3247bafec742SSukumar Swaminathan 		    KM_NOSLEEP);
3248bafec742SSukumar Swaminathan 		if (rx_ring->sbq_desc == NULL) {
3249bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3250bafec742SSukumar Swaminathan 			    "sbq control block allocation failed.");
3251bafec742SSukumar Swaminathan 			goto err_mem;
3252bafec742SSukumar Swaminathan 		}
3253bafec742SSukumar Swaminathan 
3254bafec742SSukumar Swaminathan 		ql_init_sbq_ring(rx_ring);
3255bafec742SSukumar Swaminathan 	}
3256bafec742SSukumar Swaminathan 
3257bafec742SSukumar Swaminathan 	if (rx_ring->lbq_len != 0) {
3258bafec742SSukumar Swaminathan 		/*
3259bafec742SSukumar Swaminathan 		 * Allocate large buffer queue.
3260bafec742SSukumar Swaminathan 		 */
3261bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle,
3262bafec742SSukumar Swaminathan 		    &ql_desc_acc_attr,
3263bafec742SSukumar Swaminathan 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3264bafec742SSukumar Swaminathan 		    &rx_ring->lbq_dma.acc_handle,
3265bafec742SSukumar Swaminathan 		    (size_t)rx_ring->lbq_size,  /* mem size */
3266bafec742SSukumar Swaminathan 		    (size_t)128, /* alignment:128 bytes boundary */
3267bafec742SSukumar Swaminathan 		    (caddr_t *)&rx_ring->lbq_dma.vaddr,
3268bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
3269bafec742SSukumar Swaminathan 			bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3270bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): lbq allocation failed.",
3271bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
3272bafec742SSukumar Swaminathan 			goto err_mem;
3273bafec742SSukumar Swaminathan 		}
3274bafec742SSukumar Swaminathan 		rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress;
3275bafec742SSukumar Swaminathan 
3276bafec742SSukumar Swaminathan 		/*
3277bafec742SSukumar Swaminathan 		 * Allocate large buffer queue control blocks.
3278bafec742SSukumar Swaminathan 		 */
3279bafec742SSukumar Swaminathan 		rx_ring->lbq_desc =
3280bafec742SSukumar Swaminathan 		    kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc),
3281bafec742SSukumar Swaminathan 		    KM_NOSLEEP);
3282bafec742SSukumar Swaminathan 		if (rx_ring->lbq_desc == NULL) {
3283bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3284bafec742SSukumar Swaminathan 			    "Large buffer queue control block allocation "
3285bafec742SSukumar Swaminathan 			    "failed.");
3286bafec742SSukumar Swaminathan 			goto err_mem;
3287bafec742SSukumar Swaminathan 		}
3288bafec742SSukumar Swaminathan 		ql_init_lbq_ring(rx_ring);
3289bafec742SSukumar Swaminathan 	}
3290bafec742SSukumar Swaminathan 
3291bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle,
3292bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3293bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3294bafec742SSukumar Swaminathan 	    &rx_ring->cqicb_dma.acc_handle,
3295bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct cqicb_t),  /* mem size */
3296bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3297bafec742SSukumar Swaminathan 	    (caddr_t *)&rx_ring->cqicb_dma.vaddr,
3298bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3299bafec742SSukumar Swaminathan 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3300bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.",
3301bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3302bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3303bafec742SSukumar Swaminathan 	}
3304bafec742SSukumar Swaminathan 	rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3305bafec742SSukumar Swaminathan 
3306bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3307bafec742SSukumar Swaminathan 
3308bafec742SSukumar Swaminathan err_mem:
3309bafec742SSukumar Swaminathan 	ql_free_rx_resources(rx_ring);
3310bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3311bafec742SSukumar Swaminathan }
3312bafec742SSukumar Swaminathan 
3313bafec742SSukumar Swaminathan /*
3314bafec742SSukumar Swaminathan  * Frees tx/rx queues memory resources
3315bafec742SSukumar Swaminathan  */
3316bafec742SSukumar Swaminathan static void
3317bafec742SSukumar Swaminathan ql_free_mem_resources(qlge_t *qlge)
3318bafec742SSukumar Swaminathan {
3319bafec742SSukumar Swaminathan 	int i;
3320bafec742SSukumar Swaminathan 
3321bafec742SSukumar Swaminathan 	if (qlge->ricb_dma.dma_handle) {
3322bafec742SSukumar Swaminathan 		/* free the ricb struct */
3323bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->ricb_dma.dma_handle,
3324bafec742SSukumar Swaminathan 		    &qlge->ricb_dma.acc_handle);
3325bafec742SSukumar Swaminathan 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3326bafec742SSukumar Swaminathan 	}
3327bafec742SSukumar Swaminathan 
3328bafec742SSukumar Swaminathan 	ql_free_rx_buffers(qlge);
3329bafec742SSukumar Swaminathan 
3330bafec742SSukumar Swaminathan 	ql_free_ioctl_dma_buf(qlge);
3331bafec742SSukumar Swaminathan 
3332bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++)
3333bafec742SSukumar Swaminathan 		ql_free_tx_resources(&qlge->tx_ring[i]);
3334bafec742SSukumar Swaminathan 
3335bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++)
3336bafec742SSukumar Swaminathan 		ql_free_rx_resources(&qlge->rx_ring[i]);
3337bafec742SSukumar Swaminathan 
3338bafec742SSukumar Swaminathan 	ql_free_shadow_space(qlge);
3339bafec742SSukumar Swaminathan }
3340bafec742SSukumar Swaminathan 
3341bafec742SSukumar Swaminathan /*
3342bafec742SSukumar Swaminathan  * Allocate buffer queues, large buffers and small buffers etc
3343bafec742SSukumar Swaminathan  *
3344bafec742SSukumar Swaminathan  * This API is called in the gld_attach member function. It is called
3345bafec742SSukumar Swaminathan  * only once.  Later reset,reboot should not re-allocate all rings and
3346bafec742SSukumar Swaminathan  * buffers.
3347bafec742SSukumar Swaminathan  */
3348bafec742SSukumar Swaminathan static int
3349bafec742SSukumar Swaminathan ql_alloc_mem_resources(qlge_t *qlge)
3350bafec742SSukumar Swaminathan {
3351bafec742SSukumar Swaminathan 	int i;
3352bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3353bafec742SSukumar Swaminathan 
3354bafec742SSukumar Swaminathan 	/* Allocate space for our shadow registers */
3355bafec742SSukumar Swaminathan 	if (ql_alloc_shadow_space(qlge))
3356bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3357bafec742SSukumar Swaminathan 
3358bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
3359bafec742SSukumar Swaminathan 		if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) {
3360bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "RX resource allocation failed.");
3361bafec742SSukumar Swaminathan 			goto err_mem;
3362bafec742SSukumar Swaminathan 		}
3363bafec742SSukumar Swaminathan 	}
3364bafec742SSukumar Swaminathan 	/* Allocate tx queue resources */
3365bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
3366bafec742SSukumar Swaminathan 		if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) {
3367bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Tx resource allocation failed.");
3368bafec742SSukumar Swaminathan 			goto err_mem;
3369bafec742SSukumar Swaminathan 		}
3370bafec742SSukumar Swaminathan 	}
3371bafec742SSukumar Swaminathan 
3372bafec742SSukumar Swaminathan 	if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) {
3373bafec742SSukumar Swaminathan 		goto err_mem;
3374bafec742SSukumar Swaminathan 	}
3375bafec742SSukumar Swaminathan 
3376bafec742SSukumar Swaminathan 	if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) {
3377bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed",
3378bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3379bafec742SSukumar Swaminathan 		goto err_mem;
3380bafec742SSukumar Swaminathan 	}
3381bafec742SSukumar Swaminathan 
3382bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_ALLOC_RX_BUF;
3383bafec742SSukumar Swaminathan 
3384bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle,
3385bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3386bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3387bafec742SSukumar Swaminathan 	    &qlge->ricb_dma.acc_handle,
3388bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct ricb),  /* mem size */
3389bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3390bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->ricb_dma.vaddr,
3391bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3392bafec742SSukumar Swaminathan 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3393bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): ricb allocation failed.",
3394bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3395bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3396bafec742SSukumar Swaminathan 	}
3397bafec742SSukumar Swaminathan 	qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress;
3398bafec742SSukumar Swaminathan 
3399bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3400bafec742SSukumar Swaminathan 
3401bafec742SSukumar Swaminathan err_mem:
3402bafec742SSukumar Swaminathan 	ql_free_mem_resources(qlge);
3403bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3404bafec742SSukumar Swaminathan }
3405bafec742SSukumar Swaminathan 
3406bafec742SSukumar Swaminathan 
3407bafec742SSukumar Swaminathan /*
3408bafec742SSukumar Swaminathan  * Function used to allocate physical memory and zero it.
3409bafec742SSukumar Swaminathan  */
3410bafec742SSukumar Swaminathan 
3411bafec742SSukumar Swaminathan static int
3412bafec742SSukumar Swaminathan ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3413bafec742SSukumar Swaminathan     ddi_device_acc_attr_t *device_acc_attr,
3414bafec742SSukumar Swaminathan     uint_t dma_flags,
3415bafec742SSukumar Swaminathan     ddi_acc_handle_t *acc_handle,
3416bafec742SSukumar Swaminathan     size_t size,
3417bafec742SSukumar Swaminathan     size_t alignment,
3418bafec742SSukumar Swaminathan     caddr_t *vaddr,
3419bafec742SSukumar Swaminathan     ddi_dma_cookie_t *dma_cookie)
3420bafec742SSukumar Swaminathan {
3421bafec742SSukumar Swaminathan 	size_t rlen;
3422bafec742SSukumar Swaminathan 	uint_t cnt;
3423bafec742SSukumar Swaminathan 
3424bafec742SSukumar Swaminathan 	/*
3425bafec742SSukumar Swaminathan 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3426bafec742SSukumar Swaminathan 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3427bafec742SSukumar Swaminathan 	 * to make sure buffer has enough room for overrun.
3428bafec742SSukumar Swaminathan 	 */
3429bafec742SSukumar Swaminathan 	if (size & 7) {
3430bafec742SSukumar Swaminathan 		size += 8 - (size & 7);
3431bafec742SSukumar Swaminathan 	}
3432bafec742SSukumar Swaminathan 
3433bafec742SSukumar Swaminathan 	/* Adjust the alignment if requested */
3434bafec742SSukumar Swaminathan 	if (alignment) {
3435bafec742SSukumar Swaminathan 		dma_attr.dma_attr_align = alignment;
3436bafec742SSukumar Swaminathan 	}
3437bafec742SSukumar Swaminathan 
3438bafec742SSukumar Swaminathan 	/*
3439bafec742SSukumar Swaminathan 	 * Allocate DMA handle
3440bafec742SSukumar Swaminathan 	 */
3441bafec742SSukumar Swaminathan 	if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_SLEEP, NULL,
3442bafec742SSukumar Swaminathan 	    dma_handle) != DDI_SUCCESS) {
3443bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3444bafec742SSukumar Swaminathan 		    __func__);
3445bafec742SSukumar Swaminathan 		return (QL_ERROR);
3446bafec742SSukumar Swaminathan 	}
3447bafec742SSukumar Swaminathan 	/*
3448bafec742SSukumar Swaminathan 	 * Allocate DMA memory
3449bafec742SSukumar Swaminathan 	 */
3450bafec742SSukumar Swaminathan 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3451bafec742SSukumar Swaminathan 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING), DDI_DMA_SLEEP,
3452bafec742SSukumar Swaminathan 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3453bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3454bafec742SSukumar Swaminathan 	}
3455bafec742SSukumar Swaminathan 	if (vaddr == NULL) {
3456bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "alloc_phys: Memory alloc Failed");
3457bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3458bafec742SSukumar Swaminathan 		return (QL_ERROR);
3459bafec742SSukumar Swaminathan 	}
3460bafec742SSukumar Swaminathan 
3461bafec742SSukumar Swaminathan 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3462bafec742SSukumar Swaminathan 	    dma_flags, DDI_DMA_SLEEP, NULL,
3463bafec742SSukumar Swaminathan 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3464bafec742SSukumar Swaminathan 		ddi_dma_mem_free(acc_handle);
3465bafec742SSukumar Swaminathan 
3466bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3467bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3468bafec742SSukumar Swaminathan 		    __func__);
3469bafec742SSukumar Swaminathan 		return (QL_ERROR);
3470bafec742SSukumar Swaminathan 	}
3471bafec742SSukumar Swaminathan 
3472bafec742SSukumar Swaminathan 	if (cnt != 1) {
3473bafec742SSukumar Swaminathan 
3474bafec742SSukumar Swaminathan 		ql_free_phys(dma_handle, acc_handle);
3475bafec742SSukumar Swaminathan 
3476bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3477bafec742SSukumar Swaminathan 		    __func__);
3478bafec742SSukumar Swaminathan 		return (QL_ERROR);
3479bafec742SSukumar Swaminathan 	}
3480bafec742SSukumar Swaminathan 
3481bafec742SSukumar Swaminathan 	bzero((caddr_t)*vaddr, rlen);
3482bafec742SSukumar Swaminathan 
3483bafec742SSukumar Swaminathan 	return (0);
3484bafec742SSukumar Swaminathan }
3485bafec742SSukumar Swaminathan 
3486bafec742SSukumar Swaminathan /*
3487bafec742SSukumar Swaminathan  * Add interrupt handlers based on the interrupt type.
3488bafec742SSukumar Swaminathan  * Before adding the interrupt handlers, the interrupt vectors should
3489bafec742SSukumar Swaminathan  * have been allocated, and the rx/tx rings have also been allocated.
3490bafec742SSukumar Swaminathan  */
3491bafec742SSukumar Swaminathan static int
3492bafec742SSukumar Swaminathan ql_add_intr_handlers(qlge_t *qlge)
3493bafec742SSukumar Swaminathan {
3494bafec742SSukumar Swaminathan 	int vector = 0;
3495bafec742SSukumar Swaminathan 	int rc, i;
3496bafec742SSukumar Swaminathan 	uint32_t value;
3497bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3498bafec742SSukumar Swaminathan 
3499bafec742SSukumar Swaminathan 	switch (qlge->intr_type) {
3500bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSIX:
3501bafec742SSukumar Swaminathan 		/*
3502bafec742SSukumar Swaminathan 		 * Add interrupt handler for rx and tx rings: vector[0 -
3503bafec742SSukumar Swaminathan 		 * (qlge->intr_cnt -1)].
3504bafec742SSukumar Swaminathan 		 */
3505bafec742SSukumar Swaminathan 		value = 0;
3506bafec742SSukumar Swaminathan 		for (vector = 0; vector < qlge->intr_cnt; vector++) {
3507bafec742SSukumar Swaminathan 			ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3508bafec742SSukumar Swaminathan 
3509bafec742SSukumar Swaminathan 			/*
3510bafec742SSukumar Swaminathan 			 * associate interrupt vector with interrupt handler
3511bafec742SSukumar Swaminathan 			 */
3512bafec742SSukumar Swaminathan 			rc = ddi_intr_add_handler(qlge->htable[vector],
3513bafec742SSukumar Swaminathan 			    (ddi_intr_handler_t *)intr_ctx->handler,
3514bafec742SSukumar Swaminathan 			    (void *)&qlge->rx_ring[vector], NULL);
3515bafec742SSukumar Swaminathan 
3516bafec742SSukumar Swaminathan 			if (rc != DDI_SUCCESS) {
3517bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INIT,
3518bafec742SSukumar Swaminathan 				    ("Add rx interrupt handler failed. "
3519bafec742SSukumar Swaminathan 				    "return: %d, vector: %d", rc, vector));
3520bafec742SSukumar Swaminathan 				for (vector--; vector >= 0; vector--) {
3521bafec742SSukumar Swaminathan 					(void) ddi_intr_remove_handler(
3522bafec742SSukumar Swaminathan 					    qlge->htable[vector]);
3523bafec742SSukumar Swaminathan 				}
3524bafec742SSukumar Swaminathan 				return (DDI_FAILURE);
3525bafec742SSukumar Swaminathan 			}
3526bafec742SSukumar Swaminathan 			intr_ctx++;
3527bafec742SSukumar Swaminathan 		}
3528bafec742SSukumar Swaminathan 		break;
3529bafec742SSukumar Swaminathan 
3530bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSI:
3531bafec742SSukumar Swaminathan 		/*
3532bafec742SSukumar Swaminathan 		 * Add interrupt handlers for the only vector
3533bafec742SSukumar Swaminathan 		 */
3534bafec742SSukumar Swaminathan 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3535bafec742SSukumar Swaminathan 
3536bafec742SSukumar Swaminathan 		rc = ddi_intr_add_handler(qlge->htable[vector],
3537bafec742SSukumar Swaminathan 		    ql_isr,
3538bafec742SSukumar Swaminathan 		    (caddr_t)&qlge->rx_ring[0], NULL);
3539bafec742SSukumar Swaminathan 
3540bafec742SSukumar Swaminathan 		if (rc != DDI_SUCCESS) {
3541bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT,
3542bafec742SSukumar Swaminathan 			    ("Add MSI interrupt handler failed: %d\n", rc));
3543bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
3544bafec742SSukumar Swaminathan 		}
3545bafec742SSukumar Swaminathan 		break;
3546bafec742SSukumar Swaminathan 
3547bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_FIXED:
3548bafec742SSukumar Swaminathan 		/*
3549bafec742SSukumar Swaminathan 		 * Add interrupt handlers for the only vector
3550bafec742SSukumar Swaminathan 		 */
3551bafec742SSukumar Swaminathan 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3552bafec742SSukumar Swaminathan 
3553bafec742SSukumar Swaminathan 		rc = ddi_intr_add_handler(qlge->htable[vector],
3554bafec742SSukumar Swaminathan 		    ql_isr,
3555bafec742SSukumar Swaminathan 		    (caddr_t)&qlge->rx_ring[0], NULL);
3556bafec742SSukumar Swaminathan 
3557bafec742SSukumar Swaminathan 		if (rc != DDI_SUCCESS) {
3558bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT,
3559bafec742SSukumar Swaminathan 			    ("Add legacy interrupt handler failed: %d\n", rc));
3560bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
3561bafec742SSukumar Swaminathan 		}
3562bafec742SSukumar Swaminathan 		break;
3563bafec742SSukumar Swaminathan 
3564bafec742SSukumar Swaminathan 	default:
3565bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3566bafec742SSukumar Swaminathan 	}
3567bafec742SSukumar Swaminathan 
3568bafec742SSukumar Swaminathan 	/* Enable interrupts */
3569bafec742SSukumar Swaminathan 	/* Block enable */
3570bafec742SSukumar Swaminathan 	if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3571bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n",
3572bafec742SSukumar Swaminathan 		    qlge->intr_cnt));
3573bafec742SSukumar Swaminathan 		(void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt);
3574bafec742SSukumar Swaminathan 	} else { /* Non block enable */
3575bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->intr_cnt; i++) {
3576bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d\n,"
3577bafec742SSukumar Swaminathan 			    "handle 0x%x\n", i, qlge->htable[i]));
3578bafec742SSukumar Swaminathan 			(void) ddi_intr_enable(qlge->htable[i]);
3579bafec742SSukumar Swaminathan 		}
3580bafec742SSukumar Swaminathan 	}
3581bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_INTR_ENABLED;
3582bafec742SSukumar Swaminathan 
3583bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3584bafec742SSukumar Swaminathan }
3585bafec742SSukumar Swaminathan 
3586bafec742SSukumar Swaminathan /*
3587bafec742SSukumar Swaminathan  * Here we build the intr_ctx structures based on
3588bafec742SSukumar Swaminathan  * our rx_ring count and intr vector count.
3589bafec742SSukumar Swaminathan  * The intr_ctx structure is used to hook each vector
3590bafec742SSukumar Swaminathan  * to possibly different handlers.
3591bafec742SSukumar Swaminathan  */
3592bafec742SSukumar Swaminathan static void
3593bafec742SSukumar Swaminathan ql_resolve_queues_to_irqs(qlge_t *qlge)
3594bafec742SSukumar Swaminathan {
3595bafec742SSukumar Swaminathan 	int i = 0;
3596bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3597bafec742SSukumar Swaminathan 
3598bafec742SSukumar Swaminathan 	if (qlge->intr_type == DDI_INTR_TYPE_MSIX) {
3599bafec742SSukumar Swaminathan 		/*
3600bafec742SSukumar Swaminathan 		 * Each rx_ring has its own intr_ctx since we
3601bafec742SSukumar Swaminathan 		 * have separate vectors for each queue.
3602bafec742SSukumar Swaminathan 		 * This only true when MSI-X is enabled.
3603bafec742SSukumar Swaminathan 		 */
3604bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) {
3605bafec742SSukumar Swaminathan 			qlge->rx_ring[i].irq = i;
3606bafec742SSukumar Swaminathan 			intr_ctx->intr = i;
3607bafec742SSukumar Swaminathan 			intr_ctx->qlge = qlge;
3608bafec742SSukumar Swaminathan 
3609bafec742SSukumar Swaminathan 			/*
3610bafec742SSukumar Swaminathan 			 * We set up each vectors enable/disable/read bits so
3611bafec742SSukumar Swaminathan 			 * there's no bit/mask calculations in critical path.
3612bafec742SSukumar Swaminathan 			 */
3613bafec742SSukumar Swaminathan 			intr_ctx->intr_en_mask =
3614bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3615bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
3616bafec742SSukumar Swaminathan 			    INTR_EN_IHD | i;
3617bafec742SSukumar Swaminathan 			intr_ctx->intr_dis_mask =
3618bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3619bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3620bafec742SSukumar Swaminathan 			    INTR_EN_IHD | i;
3621bafec742SSukumar Swaminathan 			intr_ctx->intr_read_mask =
3622bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3623bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
3624bafec742SSukumar Swaminathan 			    | i;
3625bafec742SSukumar Swaminathan 
3626bafec742SSukumar Swaminathan 			if (i == 0) {
3627bafec742SSukumar Swaminathan 				/*
3628bafec742SSukumar Swaminathan 				 * Default queue handles bcast/mcast plus
3629bafec742SSukumar Swaminathan 				 * async events.
3630bafec742SSukumar Swaminathan 				 */
3631bafec742SSukumar Swaminathan 				intr_ctx->handler = ql_isr;
3632bafec742SSukumar Swaminathan 			} else if (qlge->rx_ring[i].type == TX_Q) {
3633bafec742SSukumar Swaminathan 				/*
3634bafec742SSukumar Swaminathan 				 * Outbound queue is for outbound completions
3635bafec742SSukumar Swaminathan 				 * only.
3636bafec742SSukumar Swaminathan 				 */
3637bafec742SSukumar Swaminathan 				intr_ctx->handler = ql_msix_tx_isr;
3638bafec742SSukumar Swaminathan 			} else {
3639bafec742SSukumar Swaminathan 				/*
3640bafec742SSukumar Swaminathan 				 * Inbound queues handle unicast frames only.
3641bafec742SSukumar Swaminathan 				 */
3642bafec742SSukumar Swaminathan 				intr_ctx->handler = ql_msix_rx_isr;
3643bafec742SSukumar Swaminathan 			}
3644bafec742SSukumar Swaminathan 		}
3645bafec742SSukumar Swaminathan 	} else {
3646bafec742SSukumar Swaminathan 		/*
3647bafec742SSukumar Swaminathan 		 * All rx_rings use the same intr_ctx since
3648bafec742SSukumar Swaminathan 		 * there is only one vector.
3649bafec742SSukumar Swaminathan 		 */
3650bafec742SSukumar Swaminathan 		intr_ctx->intr = 0;
3651bafec742SSukumar Swaminathan 		intr_ctx->qlge = qlge;
3652bafec742SSukumar Swaminathan 		/*
3653bafec742SSukumar Swaminathan 		 * We set up each vectors enable/disable/read bits so
3654bafec742SSukumar Swaminathan 		 * there's no bit/mask calculations in the critical path.
3655bafec742SSukumar Swaminathan 		 */
3656bafec742SSukumar Swaminathan 		intr_ctx->intr_en_mask =
3657bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3658bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_ENABLE;
3659bafec742SSukumar Swaminathan 		intr_ctx->intr_dis_mask =
3660bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3661bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_DISABLE;
3662bafec742SSukumar Swaminathan 		intr_ctx->intr_read_mask =
3663bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3664bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_READ;
3665bafec742SSukumar Swaminathan 		/*
3666bafec742SSukumar Swaminathan 		 * Single interrupt means one handler for all rings.
3667bafec742SSukumar Swaminathan 		 */
3668bafec742SSukumar Swaminathan 		intr_ctx->handler = ql_isr;
3669bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++)
3670bafec742SSukumar Swaminathan 			qlge->rx_ring[i].irq = 0;
3671bafec742SSukumar Swaminathan 	}
3672bafec742SSukumar Swaminathan }
3673bafec742SSukumar Swaminathan 
3674bafec742SSukumar Swaminathan 
3675bafec742SSukumar Swaminathan /*
3676bafec742SSukumar Swaminathan  * Free allocated interrupts.
3677bafec742SSukumar Swaminathan  */
3678bafec742SSukumar Swaminathan static void
3679bafec742SSukumar Swaminathan ql_free_irq_vectors(qlge_t *qlge)
3680bafec742SSukumar Swaminathan {
3681bafec742SSukumar Swaminathan 	int i;
3682bafec742SSukumar Swaminathan 	int rc;
3683bafec742SSukumar Swaminathan 
3684bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_INTR_ENABLED) {
3685bafec742SSukumar Swaminathan 		/* Disable all interrupts */
3686bafec742SSukumar Swaminathan 		if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3687bafec742SSukumar Swaminathan 			/* Call ddi_intr_block_disable() */
3688bafec742SSukumar Swaminathan 			(void) ddi_intr_block_disable(qlge->htable,
3689bafec742SSukumar Swaminathan 			    qlge->intr_cnt);
3690bafec742SSukumar Swaminathan 		} else {
3691bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->intr_cnt; i++) {
3692bafec742SSukumar Swaminathan 				(void) ddi_intr_disable(qlge->htable[i]);
3693bafec742SSukumar Swaminathan 			}
3694bafec742SSukumar Swaminathan 		}
3695bafec742SSukumar Swaminathan 
3696bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_INTR_ENABLED;
3697bafec742SSukumar Swaminathan 	}
3698bafec742SSukumar Swaminathan 
3699bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
3700bafec742SSukumar Swaminathan 
3701bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_ADD_INTERRUPT)
3702bafec742SSukumar Swaminathan 			(void) ddi_intr_remove_handler(qlge->htable[i]);
3703bafec742SSukumar Swaminathan 
3704bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_INTR_ALLOC) {
3705bafec742SSukumar Swaminathan 			rc = ddi_intr_free(qlge->htable[i]);
3706bafec742SSukumar Swaminathan 			if (rc != DDI_SUCCESS) {
3707bafec742SSukumar Swaminathan 				/* EMPTY */
3708bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INIT, ("Free intr failed: %d",
3709bafec742SSukumar Swaminathan 				    rc));
3710bafec742SSukumar Swaminathan 			}
3711bafec742SSukumar Swaminathan 		}
3712bafec742SSukumar Swaminathan 	}
3713bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_INTR_ALLOC)
3714bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_INTR_ALLOC;
3715bafec742SSukumar Swaminathan 
3716bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_ADD_INTERRUPT)
3717bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
3718bafec742SSukumar Swaminathan 
3719bafec742SSukumar Swaminathan 	if (qlge->htable) {
3720bafec742SSukumar Swaminathan 		kmem_free(qlge->htable, qlge->intr_size);
3721bafec742SSukumar Swaminathan 		qlge->htable = NULL;
3722bafec742SSukumar Swaminathan 	}
3723bafec742SSukumar Swaminathan }
3724bafec742SSukumar Swaminathan 
3725bafec742SSukumar Swaminathan /*
3726bafec742SSukumar Swaminathan  * Allocate interrupt vectors
3727bafec742SSukumar Swaminathan  * For legacy and MSI, only 1 handle is needed.
3728bafec742SSukumar Swaminathan  * For MSI-X, if fewer than 2 vectors are available, return failure.
3729bafec742SSukumar Swaminathan  * Upon success, this maps the vectors to rx and tx rings for
3730bafec742SSukumar Swaminathan  * interrupts.
3731bafec742SSukumar Swaminathan  */
3732bafec742SSukumar Swaminathan static int
3733bafec742SSukumar Swaminathan ql_request_irq_vectors(qlge_t *qlge, int intr_type)
3734bafec742SSukumar Swaminathan {
3735bafec742SSukumar Swaminathan 	dev_info_t *devinfo;
3736bafec742SSukumar Swaminathan 	uint32_t request, orig;
3737bafec742SSukumar Swaminathan 	int count, avail, actual;
3738bafec742SSukumar Swaminathan 	int minimum;
3739bafec742SSukumar Swaminathan 	int rc;
3740bafec742SSukumar Swaminathan 
3741bafec742SSukumar Swaminathan 	devinfo = qlge->dip;
3742bafec742SSukumar Swaminathan 
3743bafec742SSukumar Swaminathan 	switch (intr_type) {
3744bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_FIXED:
3745bafec742SSukumar Swaminathan 		request = 1;	/* Request 1 legacy interrupt handle */
3746bafec742SSukumar Swaminathan 		minimum = 1;
3747bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: legacy\n"));
3748bafec742SSukumar Swaminathan 		break;
3749bafec742SSukumar Swaminathan 
3750bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSI:
3751bafec742SSukumar Swaminathan 		request = 1;	/* Request 1 MSI interrupt handle */
3752bafec742SSukumar Swaminathan 		minimum = 1;
3753bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: MSI\n"));
3754bafec742SSukumar Swaminathan 		break;
3755bafec742SSukumar Swaminathan 
3756bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSIX:
3757bafec742SSukumar Swaminathan 		/*
3758bafec742SSukumar Swaminathan 		 * Ideal number of vectors for the adapter is
3759bafec742SSukumar Swaminathan 		 * # rss rings + tx completion rings for default completion
3760bafec742SSukumar Swaminathan 		 * queue.
3761bafec742SSukumar Swaminathan 		 */
3762bafec742SSukumar Swaminathan 		request = qlge->rx_ring_count;
3763bafec742SSukumar Swaminathan 
3764bafec742SSukumar Swaminathan 		orig = request;
3765bafec742SSukumar Swaminathan 		if (request > (MAX_RX_RINGS))
3766bafec742SSukumar Swaminathan 			request = MAX_RX_RINGS;
3767bafec742SSukumar Swaminathan 		minimum = 2;
3768bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n"));
3769bafec742SSukumar Swaminathan 		break;
3770bafec742SSukumar Swaminathan 
3771bafec742SSukumar Swaminathan 	default:
3772bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Invalid parameter\n"));
3773bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3774bafec742SSukumar Swaminathan 	}
3775bafec742SSukumar Swaminathan 
3776bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupt handles requested: %d  minimum: %d\n",
3777bafec742SSukumar Swaminathan 	    request, minimum));
3778bafec742SSukumar Swaminathan 
3779bafec742SSukumar Swaminathan 	/*
3780bafec742SSukumar Swaminathan 	 * Get number of supported interrupts
3781bafec742SSukumar Swaminathan 	 */
3782bafec742SSukumar Swaminathan 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3783bafec742SSukumar Swaminathan 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3784bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, "
3785bafec742SSukumar Swaminathan 		    "count: %d\n", rc, count));
3786bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3787bafec742SSukumar Swaminathan 	}
3788bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count));
3789bafec742SSukumar Swaminathan 
3790bafec742SSukumar Swaminathan 	/*
3791bafec742SSukumar Swaminathan 	 * Get number of available interrupts
3792bafec742SSukumar Swaminathan 	 */
3793bafec742SSukumar Swaminathan 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3794bafec742SSukumar Swaminathan 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3795bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT,
3796bafec742SSukumar Swaminathan 		    ("Get interrupt available number failed. Return:"
3797bafec742SSukumar Swaminathan 		    " %d, available: %d\n", rc, avail));
3798bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3799bafec742SSukumar Swaminathan 	}
3800bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail));
3801bafec742SSukumar Swaminathan 
3802bafec742SSukumar Swaminathan 	if (avail < request) {
3803bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n",
3804bafec742SSukumar Swaminathan 		    request, avail));
3805bafec742SSukumar Swaminathan 		request = avail;
3806bafec742SSukumar Swaminathan 	}
3807bafec742SSukumar Swaminathan 
3808bafec742SSukumar Swaminathan 	actual = 0;
3809bafec742SSukumar Swaminathan 	qlge->intr_cnt = 0;
3810bafec742SSukumar Swaminathan 
3811bafec742SSukumar Swaminathan 	/*
3812bafec742SSukumar Swaminathan 	 * Allocate an array of interrupt handles
3813bafec742SSukumar Swaminathan 	 */
3814bafec742SSukumar Swaminathan 	qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t));
3815bafec742SSukumar Swaminathan 	qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP);
3816bafec742SSukumar Swaminathan 
3817bafec742SSukumar Swaminathan 	rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0,
3818bafec742SSukumar Swaminathan 	    (int)request, &actual, DDI_INTR_ALLOC_NORMAL);
3819bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
3820bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:"
3821bafec742SSukumar Swaminathan 		    " %d, request: %d, actual: %d",
3822bafec742SSukumar Swaminathan 		    __func__, qlge->instance, rc, request, actual);
3823bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
3824bafec742SSukumar Swaminathan 	}
3825bafec742SSukumar Swaminathan 	qlge->intr_cnt = actual;
3826bafec742SSukumar Swaminathan 
3827bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_INTR_ALLOC;
3828bafec742SSukumar Swaminathan 
3829bafec742SSukumar Swaminathan 	/*
3830bafec742SSukumar Swaminathan 	 * If the actual number of vectors is less than the minumum
3831bafec742SSukumar Swaminathan 	 * then fail.
3832bafec742SSukumar Swaminathan 	 */
3833bafec742SSukumar Swaminathan 	if (actual < minimum) {
3834bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
3835bafec742SSukumar Swaminathan 		    "Insufficient interrupt handles available: %d", actual);
3836bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
3837bafec742SSukumar Swaminathan 	}
3838bafec742SSukumar Swaminathan 
3839bafec742SSukumar Swaminathan 	/*
3840bafec742SSukumar Swaminathan 	 * For MSI-X, actual might force us to reduce number of tx & rx rings
3841bafec742SSukumar Swaminathan 	 */
3842bafec742SSukumar Swaminathan 	if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
3843bafec742SSukumar Swaminathan 		if (actual < MAX_RX_RINGS) {
3844bafec742SSukumar Swaminathan 			qlge->tx_ring_count = 1;
3845bafec742SSukumar Swaminathan 			qlge->rss_ring_count = actual - 1;
3846bafec742SSukumar Swaminathan 			qlge->rx_ring_count = qlge->tx_ring_count +
3847bafec742SSukumar Swaminathan 			    qlge->rss_ring_count;
3848bafec742SSukumar Swaminathan 		}
3849bafec742SSukumar Swaminathan 	}
3850bafec742SSukumar Swaminathan 	/*
3851bafec742SSukumar Swaminathan 	 * Get priority for first vector, assume remaining are all the same
3852bafec742SSukumar Swaminathan 	 */
3853bafec742SSukumar Swaminathan 	rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri);
3854bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
3855bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc));
3856bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
3857bafec742SSukumar Swaminathan 	}
3858bafec742SSukumar Swaminathan 
3859bafec742SSukumar Swaminathan 	rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap);
3860bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
3861bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc));
3862bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
3863bafec742SSukumar Swaminathan 	}
3864bafec742SSukumar Swaminathan 
3865bafec742SSukumar Swaminathan 	qlge->intr_type = intr_type;
3866bafec742SSukumar Swaminathan 
3867bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3868bafec742SSukumar Swaminathan 
3869bafec742SSukumar Swaminathan ql_intr_alloc_fail:
3870bafec742SSukumar Swaminathan 	ql_free_irq_vectors(qlge);
3871bafec742SSukumar Swaminathan 
3872bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3873bafec742SSukumar Swaminathan }
3874bafec742SSukumar Swaminathan 
3875bafec742SSukumar Swaminathan /*
3876bafec742SSukumar Swaminathan  * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
3877bafec742SSukumar Swaminathan  * MSI or Legacy. In MSI and Legacy modes we only support a single receive and
3878bafec742SSukumar Swaminathan  * transmit queue.
3879bafec742SSukumar Swaminathan  */
3880bafec742SSukumar Swaminathan int
3881bafec742SSukumar Swaminathan ql_alloc_irqs(qlge_t *qlge)
3882bafec742SSukumar Swaminathan {
3883bafec742SSukumar Swaminathan 	int intr_types;
3884bafec742SSukumar Swaminathan 	int rval;
3885bafec742SSukumar Swaminathan 
3886bafec742SSukumar Swaminathan 	/*
3887bafec742SSukumar Swaminathan 	 * Get supported interrupt types
3888bafec742SSukumar Swaminathan 	 */
3889bafec742SSukumar Swaminathan 	if (ddi_intr_get_supported_types(qlge->dip, &intr_types)
3890bafec742SSukumar Swaminathan 	    != DDI_SUCCESS) {
3891bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed",
3892bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3893bafec742SSukumar Swaminathan 
3894bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3895bafec742SSukumar Swaminathan 	}
3896bafec742SSukumar Swaminathan 
3897bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n",
3898bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr_types));
3899bafec742SSukumar Swaminathan 
3900bafec742SSukumar Swaminathan 	/* Install MSI-X interrupts */
3901bafec742SSukumar Swaminathan 	if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
3902bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n",
3903bafec742SSukumar Swaminathan 		    __func__, qlge->instance, intr_types));
3904bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX);
3905bafec742SSukumar Swaminathan 		if (rval == DDI_SUCCESS) {
3906bafec742SSukumar Swaminathan 			return (rval);
3907bafec742SSukumar Swaminathan 		}
3908bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed,"
3909bafec742SSukumar Swaminathan 		    " trying MSI interrupts ...\n", __func__, qlge->instance));
3910bafec742SSukumar Swaminathan 	}
3911bafec742SSukumar Swaminathan 
3912bafec742SSukumar Swaminathan 	/*
3913bafec742SSukumar Swaminathan 	 * We will have 2 completion queues in MSI / Legacy mode,
3914bafec742SSukumar Swaminathan 	 * Queue 0 for default completions
3915bafec742SSukumar Swaminathan 	 * Queue 1 for transmit completions
3916bafec742SSukumar Swaminathan 	 */
3917bafec742SSukumar Swaminathan 	qlge->rss_ring_count = 1; /* Default completion queue (0) for all */
3918bafec742SSukumar Swaminathan 	qlge->tx_ring_count = 1; /* Single tx completion queue */
3919bafec742SSukumar Swaminathan 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
3920bafec742SSukumar Swaminathan 
3921bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n",
3922bafec742SSukumar Swaminathan 	    __func__, qlge->instance));
3923bafec742SSukumar Swaminathan 	/*
3924bafec742SSukumar Swaminathan 	 * Add the h/w interrupt handler and initialise mutexes
3925bafec742SSukumar Swaminathan 	 */
3926bafec742SSukumar Swaminathan 	rval = DDI_FAILURE;
3927bafec742SSukumar Swaminathan 
3928bafec742SSukumar Swaminathan 	/*
3929bafec742SSukumar Swaminathan 	 * If OS supports MSIX interrupt but fails to allocate, then try
3930bafec742SSukumar Swaminathan 	 * MSI interrupt. If MSI interrupt allocation fails also, then roll
3931bafec742SSukumar Swaminathan 	 * back to fixed interrupt.
3932bafec742SSukumar Swaminathan 	 */
3933bafec742SSukumar Swaminathan 	if (intr_types & DDI_INTR_TYPE_MSI) {
3934bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI);
3935bafec742SSukumar Swaminathan 		if (rval == DDI_SUCCESS) {
3936bafec742SSukumar Swaminathan 			qlge->intr_type = DDI_INTR_TYPE_MSI;
3937bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n",
3938bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
3939bafec742SSukumar Swaminathan 		}
3940bafec742SSukumar Swaminathan 	}
3941bafec742SSukumar Swaminathan 
3942bafec742SSukumar Swaminathan 	/* Try Fixed interrupt Legacy mode */
3943bafec742SSukumar Swaminathan 	if (rval != DDI_SUCCESS) {
3944bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED);
3945bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS) {
3946bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt "
3947bafec742SSukumar Swaminathan 			    "allocation failed",
3948bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
3949bafec742SSukumar Swaminathan 		} else {
3950bafec742SSukumar Swaminathan 			qlge->intr_type = DDI_INTR_TYPE_FIXED;
3951bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n",
3952bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
3953bafec742SSukumar Swaminathan 		}
3954bafec742SSukumar Swaminathan 	}
3955bafec742SSukumar Swaminathan 
3956bafec742SSukumar Swaminathan 	return (rval);
3957bafec742SSukumar Swaminathan }
3958bafec742SSukumar Swaminathan 
3959bafec742SSukumar Swaminathan static void
3960bafec742SSukumar Swaminathan ql_free_rx_tx_locks(qlge_t *qlge)
3961bafec742SSukumar Swaminathan {
3962bafec742SSukumar Swaminathan 	int i;
3963bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
3964bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
3965bafec742SSukumar Swaminathan 
3966bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
3967bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
3968bafec742SSukumar Swaminathan 		mutex_destroy(&tx_ring->tx_lock);
3969bafec742SSukumar Swaminathan 	}
3970bafec742SSukumar Swaminathan 
3971bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
3972bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
3973bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->rx_lock);
3974bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->sbq_lock);
3975bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->lbq_lock);
3976bafec742SSukumar Swaminathan 	}
3977bafec742SSukumar Swaminathan }
3978bafec742SSukumar Swaminathan 
3979bafec742SSukumar Swaminathan /*
3980bafec742SSukumar Swaminathan  * Frees all resources allocated during attach.
3981bafec742SSukumar Swaminathan  *
3982bafec742SSukumar Swaminathan  * Input:
3983bafec742SSukumar Swaminathan  * dip = pointer to device information structure.
3984bafec742SSukumar Swaminathan  * sequence = bits indicating resources to free.
3985bafec742SSukumar Swaminathan  *
3986bafec742SSukumar Swaminathan  * Context:
3987bafec742SSukumar Swaminathan  * Kernel context.
3988bafec742SSukumar Swaminathan  */
3989bafec742SSukumar Swaminathan static void
3990bafec742SSukumar Swaminathan ql_free_resources(dev_info_t *dip, qlge_t *qlge)
3991bafec742SSukumar Swaminathan {
3992bafec742SSukumar Swaminathan 
3993bafec742SSukumar Swaminathan 	/* Disable driver timer */
3994bafec742SSukumar Swaminathan 	ql_stop_timer(qlge);
3995bafec742SSukumar Swaminathan 
3996bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MAC_REGISTERED) {
3997*0662fbf4SSukumar Swaminathan 		(void) mac_unregister(qlge->mh);
3998bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MAC_REGISTERED;
3999bafec742SSukumar Swaminathan 	}
4000bafec742SSukumar Swaminathan 
4001bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MAC_ALLOC) {
4002bafec742SSukumar Swaminathan 		/* Nothing to do, macp is already freed */
4003bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MAC_ALLOC;
4004bafec742SSukumar Swaminathan 	}
4005bafec742SSukumar Swaminathan 
4006bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_PCI_CONFIG_SETUP) {
4007bafec742SSukumar Swaminathan 		pci_config_teardown(&qlge->pci_handle);
4008bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_PCI_CONFIG_SETUP;
4009bafec742SSukumar Swaminathan 	}
4010bafec742SSukumar Swaminathan 
4011bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_ADD_INTERRUPT) {
4012bafec742SSukumar Swaminathan 		ql_free_irq_vectors(qlge);
4013bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4014bafec742SSukumar Swaminathan 	}
4015bafec742SSukumar Swaminathan 
4016bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) {
4017bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl);
4018bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl);
4019bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl);
4020bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT;
4021bafec742SSukumar Swaminathan 	}
4022bafec742SSukumar Swaminathan 
4023bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_KSTATS) {
4024bafec742SSukumar Swaminathan 		ql_fini_kstats(qlge);
4025bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_KSTATS;
4026bafec742SSukumar Swaminathan 	}
4027bafec742SSukumar Swaminathan 
4028bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MUTEX) {
4029bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->gen_mutex);
4030bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->hw_mutex);
4031bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->mbx_mutex);
4032bafec742SSukumar Swaminathan 		cv_destroy(&qlge->cv_mbx_intr);
4033bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MUTEX;
4034bafec742SSukumar Swaminathan 	}
4035bafec742SSukumar Swaminathan 
4036bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_LOCKS_CREATED) {
4037bafec742SSukumar Swaminathan 		ql_free_rx_tx_locks(qlge);
4038bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_LOCKS_CREATED;
4039bafec742SSukumar Swaminathan 	}
4040bafec742SSukumar Swaminathan 
4041bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MEMORY_ALLOC) {
4042bafec742SSukumar Swaminathan 		ql_free_mem_resources(qlge);
4043bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MEMORY_ALLOC;
4044bafec742SSukumar Swaminathan 	}
4045bafec742SSukumar Swaminathan 
4046bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_REGS_SETUP) {
4047bafec742SSukumar Swaminathan 		ddi_regs_map_free(&qlge->dev_handle);
4048bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_REGS_SETUP;
4049bafec742SSukumar Swaminathan 	}
4050bafec742SSukumar Swaminathan 
4051bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) {
4052bafec742SSukumar Swaminathan 		ddi_regs_map_free(&qlge->dev_doorbell_reg_handle);
4053bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP;
4054bafec742SSukumar Swaminathan 	}
4055bafec742SSukumar Swaminathan 
4056bafec742SSukumar Swaminathan 	/*
4057bafec742SSukumar Swaminathan 	 * free flash flt table that allocated in attach stage
4058bafec742SSukumar Swaminathan 	 */
4059bafec742SSukumar Swaminathan 	if ((qlge->flt.ql_flt_entry_ptr != NULL)&&
4060bafec742SSukumar Swaminathan 	    (qlge->flt.header.length != 0)) {
4061bafec742SSukumar Swaminathan 		kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
4062bafec742SSukumar Swaminathan 		qlge->flt.ql_flt_entry_ptr = NULL;
4063bafec742SSukumar Swaminathan 	}
4064bafec742SSukumar Swaminathan 
4065bafec742SSukumar Swaminathan 	/* finally, free qlge structure */
4066bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_SOFTSTATE_ALLOC) {
4067bafec742SSukumar Swaminathan 		kmem_free(qlge, sizeof (qlge_t));
4068bafec742SSukumar Swaminathan 	}
4069bafec742SSukumar Swaminathan 
4070bafec742SSukumar Swaminathan 	ddi_prop_remove_all(dip);
4071bafec742SSukumar Swaminathan 	ddi_set_driver_private(dip, NULL);
4072bafec742SSukumar Swaminathan 
4073bafec742SSukumar Swaminathan }
4074bafec742SSukumar Swaminathan 
4075bafec742SSukumar Swaminathan /*
4076bafec742SSukumar Swaminathan  * Set promiscuous mode of the driver
4077bafec742SSukumar Swaminathan  * Caller must catch HW_LOCK
4078bafec742SSukumar Swaminathan  */
4079bafec742SSukumar Swaminathan void
4080bafec742SSukumar Swaminathan ql_set_promiscuous(qlge_t *qlge, int mode)
4081bafec742SSukumar Swaminathan {
4082bafec742SSukumar Swaminathan 	if (mode) {
4083*0662fbf4SSukumar Swaminathan 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4084bafec742SSukumar Swaminathan 		    RT_IDX_VALID, 1);
4085bafec742SSukumar Swaminathan 	} else {
4086*0662fbf4SSukumar Swaminathan 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4087bafec742SSukumar Swaminathan 		    RT_IDX_VALID, 0);
4088bafec742SSukumar Swaminathan 	}
4089bafec742SSukumar Swaminathan }
4090bafec742SSukumar Swaminathan /*
4091bafec742SSukumar Swaminathan  * Write 'data1' to Mac Protocol Address Index Register and
4092bafec742SSukumar Swaminathan  * 'data2' to Mac Protocol Address Data Register
4093bafec742SSukumar Swaminathan  *  Assuming that the Mac Protocol semaphore lock has been acquired.
4094bafec742SSukumar Swaminathan  */
4095bafec742SSukumar Swaminathan static int
4096bafec742SSukumar Swaminathan ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2)
4097bafec742SSukumar Swaminathan {
4098bafec742SSukumar Swaminathan 	int return_value = DDI_SUCCESS;
4099bafec742SSukumar Swaminathan 
4100bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
4101bafec742SSukumar Swaminathan 	    MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) {
4102bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register "
4103bafec742SSukumar Swaminathan 		    "timeout.");
4104bafec742SSukumar Swaminathan 		return_value = DDI_FAILURE;
4105bafec742SSukumar Swaminathan 		goto out;
4106bafec742SSukumar Swaminathan 	}
4107bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1);
4108bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2);
4109bafec742SSukumar Swaminathan out:
4110bafec742SSukumar Swaminathan 	return (return_value);
4111bafec742SSukumar Swaminathan }
4112bafec742SSukumar Swaminathan /*
4113bafec742SSukumar Swaminathan  * Enable the 'index'ed multicast address in the host memory's multicast_list
4114bafec742SSukumar Swaminathan  */
4115bafec742SSukumar Swaminathan int
4116bafec742SSukumar Swaminathan ql_add_multicast_address(qlge_t *qlge, int index)
4117bafec742SSukumar Swaminathan {
4118bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4119bafec742SSukumar Swaminathan 	uint32_t offset;
4120bafec742SSukumar Swaminathan 	uint32_t value1, value2;
4121bafec742SSukumar Swaminathan 
4122bafec742SSukumar Swaminathan 	/* Acquire the required semaphore */
4123bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4124bafec742SSukumar Swaminathan 		return (rtn_val);
4125bafec742SSukumar Swaminathan 	}
4126bafec742SSukumar Swaminathan 
4127bafec742SSukumar Swaminathan 	/* Program Offset0 - lower 32 bits of the MAC address */
4128bafec742SSukumar Swaminathan 	offset = 0;
4129bafec742SSukumar Swaminathan 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4130bafec742SSukumar Swaminathan 	    (index << 4) | offset;
4131bafec742SSukumar Swaminathan 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4132bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4133bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4134bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4135bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS)
4136bafec742SSukumar Swaminathan 		goto out;
4137bafec742SSukumar Swaminathan 
4138bafec742SSukumar Swaminathan 	/* Program offset1: upper 16 bits of the MAC address */
4139bafec742SSukumar Swaminathan 	offset = 1;
4140bafec742SSukumar Swaminathan 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4141bafec742SSukumar Swaminathan 	    (index<<4) | offset;
4142bafec742SSukumar Swaminathan 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8)
4143bafec742SSukumar Swaminathan 	    |qlge->multicast_list[index].addr.ether_addr_octet[1]);
4144bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4145bafec742SSukumar Swaminathan 		goto out;
4146bafec742SSukumar Swaminathan 	}
4147bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4148bafec742SSukumar Swaminathan out:
4149bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4150bafec742SSukumar Swaminathan 	return (rtn_val);
4151bafec742SSukumar Swaminathan }
4152bafec742SSukumar Swaminathan 
4153bafec742SSukumar Swaminathan /*
4154bafec742SSukumar Swaminathan  * Disable the 'index'ed multicast address in the host memory's multicast_list
4155bafec742SSukumar Swaminathan  */
4156bafec742SSukumar Swaminathan int
4157bafec742SSukumar Swaminathan ql_remove_multicast_address(qlge_t *qlge, int index)
4158bafec742SSukumar Swaminathan {
4159bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4160bafec742SSukumar Swaminathan 	uint32_t offset;
4161bafec742SSukumar Swaminathan 	uint32_t value1, value2;
4162bafec742SSukumar Swaminathan 
4163bafec742SSukumar Swaminathan 	/* Acquire the required semaphore */
4164bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4165bafec742SSukumar Swaminathan 		return (rtn_val);
4166bafec742SSukumar Swaminathan 	}
4167bafec742SSukumar Swaminathan 	/* Program Offset0 - lower 32 bits of the MAC address */
4168bafec742SSukumar Swaminathan 	offset = 0;
4169bafec742SSukumar Swaminathan 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4170bafec742SSukumar Swaminathan 	value2 =
4171bafec742SSukumar Swaminathan 	    ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4172bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4173bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4174bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4175bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4176bafec742SSukumar Swaminathan 		goto out;
4177bafec742SSukumar Swaminathan 	}
4178bafec742SSukumar Swaminathan 	/* Program offset1: upper 16 bits of the MAC address */
4179bafec742SSukumar Swaminathan 	offset = 1;
4180bafec742SSukumar Swaminathan 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4181bafec742SSukumar Swaminathan 	value2 = 0;
4182bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4183bafec742SSukumar Swaminathan 		goto out;
4184bafec742SSukumar Swaminathan 	}
4185bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4186bafec742SSukumar Swaminathan out:
4187bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4188bafec742SSukumar Swaminathan 	return (rtn_val);
4189bafec742SSukumar Swaminathan }
4190bafec742SSukumar Swaminathan 
4191bafec742SSukumar Swaminathan /*
4192bafec742SSukumar Swaminathan  * Add a new multicast address to the list of supported list
4193bafec742SSukumar Swaminathan  * This API is called after OS called gld_set_multicast (GLDv2)
4194bafec742SSukumar Swaminathan  * or m_multicst (GLDv3)
4195bafec742SSukumar Swaminathan  *
4196bafec742SSukumar Swaminathan  * Restriction:
4197bafec742SSukumar Swaminathan  * The number of maximum multicast address is limited by hardware.
4198bafec742SSukumar Swaminathan  */
4199bafec742SSukumar Swaminathan int
4200bafec742SSukumar Swaminathan ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep)
4201bafec742SSukumar Swaminathan {
4202bafec742SSukumar Swaminathan 	uint32_t index = qlge->multicast_list_count;
4203bafec742SSukumar Swaminathan 	int rval = DDI_SUCCESS;
4204bafec742SSukumar Swaminathan 	int status;
4205bafec742SSukumar Swaminathan 
4206bafec742SSukumar Swaminathan 	if ((ep[0] & 01) == 0) {
4207bafec742SSukumar Swaminathan 		rval = EINVAL;
4208bafec742SSukumar Swaminathan 		goto exit;
4209bafec742SSukumar Swaminathan 	}
4210bafec742SSukumar Swaminathan 
4211bafec742SSukumar Swaminathan 	/* if there is an availabe space in multicast_list, then add it */
4212bafec742SSukumar Swaminathan 	if (index < MAX_MULTICAST_LIST_SIZE) {
4213bafec742SSukumar Swaminathan 		bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet,
4214bafec742SSukumar Swaminathan 		    ETHERADDRL);
4215bafec742SSukumar Swaminathan 		/* increment the total number of addresses in multicast list */
4216*0662fbf4SSukumar Swaminathan 		(void) ql_add_multicast_address(qlge, index);
4217bafec742SSukumar Swaminathan 		qlge->multicast_list_count++;
4218bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD,
4219bafec742SSukumar Swaminathan 		    ("%s(%d): added to index of multicast list= 0x%x, "
4220bafec742SSukumar Swaminathan 		    "total %d\n", __func__, qlge->instance, index,
4221bafec742SSukumar Swaminathan 		    qlge->multicast_list_count));
4222bafec742SSukumar Swaminathan 
4223bafec742SSukumar Swaminathan 		if (index > MAX_MULTICAST_HW_SIZE) {
4224bafec742SSukumar Swaminathan 			if (!qlge->multicast_promisc) {
4225bafec742SSukumar Swaminathan 				status = ql_set_routing_reg(qlge,
4226bafec742SSukumar Swaminathan 				    RT_IDX_ALLMULTI_SLOT,
4227bafec742SSukumar Swaminathan 				    RT_IDX_MCAST, 1);
4228bafec742SSukumar Swaminathan 				if (status) {
4229bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
4230bafec742SSukumar Swaminathan 					    "Failed to init routing reg "
4231bafec742SSukumar Swaminathan 					    "for mcast promisc mode.");
4232bafec742SSukumar Swaminathan 					rval = ENOENT;
4233bafec742SSukumar Swaminathan 					goto exit;
4234bafec742SSukumar Swaminathan 				}
4235bafec742SSukumar Swaminathan 				qlge->multicast_promisc = B_TRUE;
4236bafec742SSukumar Swaminathan 			}
4237bafec742SSukumar Swaminathan 		}
4238bafec742SSukumar Swaminathan 	} else {
4239bafec742SSukumar Swaminathan 		rval = ENOENT;
4240bafec742SSukumar Swaminathan 	}
4241bafec742SSukumar Swaminathan exit:
4242bafec742SSukumar Swaminathan 	return (rval);
4243bafec742SSukumar Swaminathan }
4244bafec742SSukumar Swaminathan 
4245bafec742SSukumar Swaminathan /*
4246bafec742SSukumar Swaminathan  * Remove an old multicast address from the list of supported multicast
4247bafec742SSukumar Swaminathan  * addresses. This API is called after OS called gld_set_multicast (GLDv2)
4248bafec742SSukumar Swaminathan  * or m_multicst (GLDv3)
4249bafec742SSukumar Swaminathan  * The number of maximum multicast address is limited by hardware.
4250bafec742SSukumar Swaminathan  */
4251bafec742SSukumar Swaminathan int
4252bafec742SSukumar Swaminathan ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep)
4253bafec742SSukumar Swaminathan {
4254bafec742SSukumar Swaminathan 	uint32_t total = qlge->multicast_list_count;
4255bafec742SSukumar Swaminathan 	int i = 0;
4256bafec742SSukumar Swaminathan 	int rmv_index = 0;
4257bafec742SSukumar Swaminathan 	size_t length = sizeof (ql_multicast_addr);
4258bafec742SSukumar Swaminathan 	int status;
4259bafec742SSukumar Swaminathan 
4260bafec742SSukumar Swaminathan 	for (i = 0; i < total; i++) {
4261bafec742SSukumar Swaminathan 		if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) {
4262bafec742SSukumar Swaminathan 			continue;
4263bafec742SSukumar Swaminathan 		}
4264bafec742SSukumar Swaminathan 
4265bafec742SSukumar Swaminathan 		rmv_index = i;
4266bafec742SSukumar Swaminathan 		/* block move the reset of other multicast address forward */
4267bafec742SSukumar Swaminathan 		length = ((total -1) -i) * sizeof (ql_multicast_addr);
4268bafec742SSukumar Swaminathan 		if (length > 0) {
4269bafec742SSukumar Swaminathan 			bcopy(&qlge->multicast_list[i+1],
4270bafec742SSukumar Swaminathan 			    &qlge->multicast_list[i], length);
4271bafec742SSukumar Swaminathan 		}
4272bafec742SSukumar Swaminathan 		qlge->multicast_list_count--;
4273bafec742SSukumar Swaminathan 		if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) {
4274bafec742SSukumar Swaminathan 			/*
4275bafec742SSukumar Swaminathan 			 * there is a deletion in multicast list table,
4276bafec742SSukumar Swaminathan 			 * re-enable them
4277bafec742SSukumar Swaminathan 			 */
4278bafec742SSukumar Swaminathan 			for (i = rmv_index; i < qlge->multicast_list_count;
4279bafec742SSukumar Swaminathan 			    i++) {
4280*0662fbf4SSukumar Swaminathan 				(void) ql_add_multicast_address(qlge, i);
4281bafec742SSukumar Swaminathan 			}
4282bafec742SSukumar Swaminathan 			/* and disable the last one */
4283*0662fbf4SSukumar Swaminathan 			(void) ql_remove_multicast_address(qlge, i);
4284bafec742SSukumar Swaminathan 
4285bafec742SSukumar Swaminathan 			/* disable multicast promiscuous mode */
4286bafec742SSukumar Swaminathan 			if (qlge->multicast_promisc) {
4287bafec742SSukumar Swaminathan 				status = ql_set_routing_reg(qlge,
4288bafec742SSukumar Swaminathan 				    RT_IDX_ALLMULTI_SLOT,
4289bafec742SSukumar Swaminathan 				    RT_IDX_MCAST, 0);
4290bafec742SSukumar Swaminathan 				if (status) {
4291bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
4292bafec742SSukumar Swaminathan 					    "Failed to init routing reg for "
4293bafec742SSukumar Swaminathan 					    "mcast promisc mode.");
4294bafec742SSukumar Swaminathan 					goto exit;
4295bafec742SSukumar Swaminathan 				}
4296bafec742SSukumar Swaminathan 				/* write to config register */
4297bafec742SSukumar Swaminathan 				qlge->multicast_promisc = B_FALSE;
4298bafec742SSukumar Swaminathan 			}
4299bafec742SSukumar Swaminathan 		}
4300bafec742SSukumar Swaminathan 		break;
4301bafec742SSukumar Swaminathan 	}
4302bafec742SSukumar Swaminathan exit:
4303bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
4304bafec742SSukumar Swaminathan }
4305bafec742SSukumar Swaminathan 
4306bafec742SSukumar Swaminathan /*
4307bafec742SSukumar Swaminathan  * Read a XGMAC register
4308bafec742SSukumar Swaminathan  */
4309bafec742SSukumar Swaminathan int
4310bafec742SSukumar Swaminathan ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val)
4311bafec742SSukumar Swaminathan {
4312bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4313bafec742SSukumar Swaminathan 
4314bafec742SSukumar Swaminathan 	/* wait for XGMAC Address register RDY bit set */
4315bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4316bafec742SSukumar Swaminathan 	    BIT_SET, 10) != DDI_SUCCESS) {
4317bafec742SSukumar Swaminathan 		goto out;
4318bafec742SSukumar Swaminathan 	}
4319bafec742SSukumar Swaminathan 	/* start rx transaction */
4320bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT);
4321bafec742SSukumar Swaminathan 
4322bafec742SSukumar Swaminathan 	/*
4323bafec742SSukumar Swaminathan 	 * wait for XGMAC Address register RDY bit set,
4324bafec742SSukumar Swaminathan 	 * which indicates data is ready
4325bafec742SSukumar Swaminathan 	 */
4326bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4327bafec742SSukumar Swaminathan 	    BIT_SET, 10) != DDI_SUCCESS) {
4328bafec742SSukumar Swaminathan 		goto out;
4329bafec742SSukumar Swaminathan 	}
4330bafec742SSukumar Swaminathan 	/* read data from XGAMC_DATA register */
4331bafec742SSukumar Swaminathan 	*val = ql_read_reg(qlge, REG_XGMAC_DATA);
4332bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4333bafec742SSukumar Swaminathan out:
4334bafec742SSukumar Swaminathan 	return (rtn_val);
4335bafec742SSukumar Swaminathan }
4336bafec742SSukumar Swaminathan 
4337bafec742SSukumar Swaminathan /*
4338bafec742SSukumar Swaminathan  * Implement checksum offload for IPv4 IP packets
4339bafec742SSukumar Swaminathan  */
4340bafec742SSukumar Swaminathan static void
4341bafec742SSukumar Swaminathan ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp,
4342bafec742SSukumar Swaminathan     struct ob_mac_iocb_req *mac_iocb_ptr)
4343bafec742SSukumar Swaminathan {
4344bafec742SSukumar Swaminathan 	struct ip *iphdr = NULL;
4345bafec742SSukumar Swaminathan 	struct ether_header *ethhdr;
4346bafec742SSukumar Swaminathan 	struct ether_vlan_header *ethvhdr;
4347bafec742SSukumar Swaminathan 	struct tcphdr *tcp_hdr;
4348bafec742SSukumar Swaminathan 	uint32_t etherType;
4349bafec742SSukumar Swaminathan 	int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4350bafec742SSukumar Swaminathan 	int ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4351bafec742SSukumar Swaminathan 
4352bafec742SSukumar Swaminathan 	ethhdr  = (struct ether_header *)((void *)bp);
4353bafec742SSukumar Swaminathan 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
4354bafec742SSukumar Swaminathan 	/* Is this vlan packet? */
4355bafec742SSukumar Swaminathan 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4356bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_vlan_header);
4357bafec742SSukumar Swaminathan 		etherType = ntohs(ethvhdr->ether_type);
4358bafec742SSukumar Swaminathan 	} else {
4359bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_header);
4360bafec742SSukumar Swaminathan 		etherType = ntohs(ethhdr->ether_type);
4361bafec742SSukumar Swaminathan 	}
4362bafec742SSukumar Swaminathan 	/* Is this IPv4 or IPv6 packet? */
4363bafec742SSukumar Swaminathan 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) ==
4364bafec742SSukumar Swaminathan 	    IPV4_VERSION) {
4365bafec742SSukumar Swaminathan 		if (etherType == ETHERTYPE_IP /* 0800 */) {
4366bafec742SSukumar Swaminathan 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
4367bafec742SSukumar Swaminathan 		} else {
4368bafec742SSukumar Swaminathan 			/* EMPTY */
4369bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
4370bafec742SSukumar Swaminathan 			    ("%s(%d) : IPv4 None IP packet type 0x%x\n",
4371bafec742SSukumar Swaminathan 			    __func__, qlge->instance, etherType));
4372bafec742SSukumar Swaminathan 		}
4373bafec742SSukumar Swaminathan 	}
4374bafec742SSukumar Swaminathan 	/* ipV4 packets */
4375bafec742SSukumar Swaminathan 	if (iphdr != NULL) {
4376bafec742SSukumar Swaminathan 
4377bafec742SSukumar Swaminathan 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
4378bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX,
4379bafec742SSukumar Swaminathan 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
4380bafec742SSukumar Swaminathan 		    " %d bytes \n", __func__, qlge->instance, ip_hdr_len));
4381bafec742SSukumar Swaminathan 
4382bafec742SSukumar Swaminathan 		ip_hdr_off = mac_hdr_len;
4383bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4384bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ip_hdr_len));
4385bafec742SSukumar Swaminathan 
4386bafec742SSukumar Swaminathan 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4387bafec742SSukumar Swaminathan 		    OB_MAC_IOCB_REQ_IPv4);
4388bafec742SSukumar Swaminathan 
4389bafec742SSukumar Swaminathan 		if (pflags & HCK_IPV4_HDRCKSUM) {
4390bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n",
4391bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
4392bafec742SSukumar Swaminathan 			mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB;
4393bafec742SSukumar Swaminathan 			mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 |
4394bafec742SSukumar Swaminathan 			    OB_MAC_IOCB_REQ_IC);
4395bafec742SSukumar Swaminathan 			iphdr->ip_sum = 0;
4396bafec742SSukumar Swaminathan 			mac_iocb_ptr->hdr_off = (uint16_t)
4397bafec742SSukumar Swaminathan 			    cpu_to_le16(ip_hdr_off);
4398bafec742SSukumar Swaminathan 		}
4399bafec742SSukumar Swaminathan 		if (pflags & HCK_FULLCKSUM) {
4400bafec742SSukumar Swaminathan 			if (iphdr->ip_p == IPPROTO_TCP) {
4401bafec742SSukumar Swaminathan 				tcp_hdr =
4402bafec742SSukumar Swaminathan 				    (struct tcphdr *)(void *)
4403bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr + ip_hdr_len);
4404bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n",
4405bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4406bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4407bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4408bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4409bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4410bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_TC);
4411bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag2 =
4412bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag2 |
4413bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_IC);
4414bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4415bafec742SSukumar Swaminathan 				tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len;
4416bafec742SSukumar Swaminathan 				tcp_udp_hdr_len = tcp_hdr->th_off*4;
4417bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4418bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4419bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4420bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4421bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4422bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4423bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4424bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4425bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4426bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
4427bafec742SSukumar Swaminathan 
4428bafec742SSukumar Swaminathan 				/*
4429bafec742SSukumar Swaminathan 				 * if the chip is unable to do pseudo header
4430bafec742SSukumar Swaminathan 				 * cksum calculation, do it in then put the
4431bafec742SSukumar Swaminathan 				 * result to the data passed to the chip
4432bafec742SSukumar Swaminathan 				 */
4433bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4434bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4435bafec742SSukumar Swaminathan 					ql_pseudo_cksum((uint8_t *)iphdr);
4436bafec742SSukumar Swaminathan 				}
4437bafec742SSukumar Swaminathan 			} else if (iphdr->ip_p == IPPROTO_UDP) {
4438bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n",
4439bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4440bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4441bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4442bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4443bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4444bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_UC);
4445bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag2 =
4446bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag2 |
4447bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_IC);
4448bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4449bafec742SSukumar Swaminathan 				tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len;
4450bafec742SSukumar Swaminathan 				tcp_udp_hdr_len = sizeof (struct udphdr);
4451bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4452bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4453bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4454bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4455bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4456bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4457bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4458bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4459bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len
4460bafec742SSukumar Swaminathan 				    + tcp_udp_hdr_len);
4461bafec742SSukumar Swaminathan 
4462bafec742SSukumar Swaminathan 				/*
4463bafec742SSukumar Swaminathan 				 * if the chip is unable to calculate pseudo
4464bafec742SSukumar Swaminathan 				 * hdr cksum,do it in then put the result to
4465bafec742SSukumar Swaminathan 				 * the data passed to the chip
4466bafec742SSukumar Swaminathan 				 */
4467bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4468bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4469bafec742SSukumar Swaminathan 					ql_pseudo_cksum((uint8_t *)iphdr);
4470bafec742SSukumar Swaminathan 				}
4471bafec742SSukumar Swaminathan 			}
4472bafec742SSukumar Swaminathan 		}
4473bafec742SSukumar Swaminathan 	}
4474bafec742SSukumar Swaminathan }
4475bafec742SSukumar Swaminathan 
4476bafec742SSukumar Swaminathan /*
4477bafec742SSukumar Swaminathan  * For TSO/LSO:
4478bafec742SSukumar Swaminathan  * MAC frame transmission with TCP large segment offload is performed in the
4479bafec742SSukumar Swaminathan  * same way as the MAC frame transmission with checksum offload with the
4480bafec742SSukumar Swaminathan  * exception that the maximum TCP segment size (MSS) must be specified to
4481bafec742SSukumar Swaminathan  * allow the chip to segment the data into legal sized frames.
4482bafec742SSukumar Swaminathan  * The host also needs to calculate a pseudo-header checksum over the
4483bafec742SSukumar Swaminathan  * following fields:
4484bafec742SSukumar Swaminathan  * Source IP Address, Destination IP Address, and the Protocol.
4485bafec742SSukumar Swaminathan  * The TCP length is not included in the pseudo-header calculation.
4486bafec742SSukumar Swaminathan  * The pseudo-header checksum is place in the TCP checksum field of the
4487bafec742SSukumar Swaminathan  * prototype header.
4488bafec742SSukumar Swaminathan  */
4489bafec742SSukumar Swaminathan static void
4490bafec742SSukumar Swaminathan ql_lso_pseudo_cksum(uint8_t *buf)
4491bafec742SSukumar Swaminathan {
4492bafec742SSukumar Swaminathan 	uint32_t cksum;
4493bafec742SSukumar Swaminathan 	uint16_t iphl;
4494bafec742SSukumar Swaminathan 	uint16_t proto;
4495bafec742SSukumar Swaminathan 
4496bafec742SSukumar Swaminathan 	/*
4497bafec742SSukumar Swaminathan 	 * Calculate the LSO pseudo-header checksum.
4498bafec742SSukumar Swaminathan 	 */
4499bafec742SSukumar Swaminathan 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
4500bafec742SSukumar Swaminathan 	cksum = proto = buf[9];
4501bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
4502bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
4503bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
4504bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
4505bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4506bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4507bafec742SSukumar Swaminathan 
4508bafec742SSukumar Swaminathan 	/*
4509bafec742SSukumar Swaminathan 	 * Point it to the TCP/UDP header, and
4510bafec742SSukumar Swaminathan 	 * update the checksum field.
4511bafec742SSukumar Swaminathan 	 */
4512bafec742SSukumar Swaminathan 	buf += iphl + ((proto == IPPROTO_TCP) ?
4513bafec742SSukumar Swaminathan 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
4514bafec742SSukumar Swaminathan 
4515bafec742SSukumar Swaminathan 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
4516bafec742SSukumar Swaminathan }
4517bafec742SSukumar Swaminathan 
4518bafec742SSukumar Swaminathan /*
4519bafec742SSukumar Swaminathan  * Tell the hardware to do Large Send Offload (LSO)
4520bafec742SSukumar Swaminathan  *
4521bafec742SSukumar Swaminathan  * Some fields in ob_mac_iocb need to be set so hardware can know what is
4522bafec742SSukumar Swaminathan  * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
4523bafec742SSukumar Swaminathan  * in the right place of the packet etc, thus, hardware can process the
4524bafec742SSukumar Swaminathan  * packet correctly.
4525bafec742SSukumar Swaminathan  */
4526bafec742SSukumar Swaminathan static void
4527bafec742SSukumar Swaminathan ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp,
4528bafec742SSukumar Swaminathan     struct ob_mac_iocb_req *mac_iocb_ptr)
4529bafec742SSukumar Swaminathan {
4530bafec742SSukumar Swaminathan 	struct ip *iphdr = NULL;
4531bafec742SSukumar Swaminathan 	struct ether_header *ethhdr;
4532bafec742SSukumar Swaminathan 	struct ether_vlan_header *ethvhdr;
4533bafec742SSukumar Swaminathan 	struct tcphdr *tcp_hdr;
4534bafec742SSukumar Swaminathan 	struct udphdr *udp_hdr;
4535bafec742SSukumar Swaminathan 	uint32_t etherType;
4536bafec742SSukumar Swaminathan 	uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4537bafec742SSukumar Swaminathan 	uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4538bafec742SSukumar Swaminathan 
4539bafec742SSukumar Swaminathan 	ethhdr = (struct ether_header *)(void *)bp;
4540bafec742SSukumar Swaminathan 	ethvhdr = (struct ether_vlan_header *)(void *)bp;
4541bafec742SSukumar Swaminathan 
4542bafec742SSukumar Swaminathan 	/* Is this vlan packet? */
4543bafec742SSukumar Swaminathan 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4544bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_vlan_header);
4545bafec742SSukumar Swaminathan 		etherType = ntohs(ethvhdr->ether_type);
4546bafec742SSukumar Swaminathan 	} else {
4547bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_header);
4548bafec742SSukumar Swaminathan 		etherType = ntohs(ethhdr->ether_type);
4549bafec742SSukumar Swaminathan 	}
4550bafec742SSukumar Swaminathan 	/* Is this IPv4 or IPv6 packet? */
4551bafec742SSukumar Swaminathan 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) ==
4552bafec742SSukumar Swaminathan 	    IPV4_VERSION) {
4553bafec742SSukumar Swaminathan 		if (etherType == ETHERTYPE_IP /* 0800 */) {
4554bafec742SSukumar Swaminathan 			iphdr 	= (struct ip *)(void *)(bp+mac_hdr_len);
4555bafec742SSukumar Swaminathan 		} else {
4556bafec742SSukumar Swaminathan 			/* EMPTY */
4557bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet"
4558bafec742SSukumar Swaminathan 			    " type 0x%x\n",
4559bafec742SSukumar Swaminathan 			    __func__, qlge->instance, etherType));
4560bafec742SSukumar Swaminathan 		}
4561bafec742SSukumar Swaminathan 	}
4562bafec742SSukumar Swaminathan 
4563bafec742SSukumar Swaminathan 	if (iphdr != NULL) { /* ipV4 packets */
4564bafec742SSukumar Swaminathan 		ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr);
4565bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX,
4566bafec742SSukumar Swaminathan 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
4567bafec742SSukumar Swaminathan 		    " bytes \n", __func__, qlge->instance, ip_hdr_len));
4568bafec742SSukumar Swaminathan 
4569bafec742SSukumar Swaminathan 		ip_hdr_off = mac_hdr_len;
4570bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4571bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ip_hdr_len));
4572bafec742SSukumar Swaminathan 
4573bafec742SSukumar Swaminathan 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4574bafec742SSukumar Swaminathan 		    OB_MAC_IOCB_REQ_IPv4);
4575bafec742SSukumar Swaminathan 		if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) {
4576bafec742SSukumar Swaminathan 			if (iphdr->ip_p == IPPROTO_TCP) {
4577bafec742SSukumar Swaminathan 				tcp_hdr = (struct tcphdr *)(void *)
4578bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr +
4579bafec742SSukumar Swaminathan 				    ip_hdr_len);
4580bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP "
4581bafec742SSukumar Swaminathan 				    "packet\n",
4582bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4583bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4584bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4585bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4586bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4587bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_LSO);
4588bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4589bafec742SSukumar Swaminathan 				tcp_udp_hdr_off =
4590bafec742SSukumar Swaminathan 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
4591bafec742SSukumar Swaminathan 				tcp_udp_hdr_len =
4592bafec742SSukumar Swaminathan 				    (uint16_t)(tcp_hdr->th_off*4);
4593bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4594bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4595bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4596bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4597bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4598bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4599bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4600bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4601bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4602bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
4603bafec742SSukumar Swaminathan 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
4604bafec742SSukumar Swaminathan 
4605bafec742SSukumar Swaminathan 				/*
4606bafec742SSukumar Swaminathan 				 * if the chip is unable to calculate pseudo
4607bafec742SSukumar Swaminathan 				 * header checksum, do it in then put the result
4608bafec742SSukumar Swaminathan 				 * to the data passed to the chip
4609bafec742SSukumar Swaminathan 				 */
4610bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4611bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
4612bafec742SSukumar Swaminathan 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
4613bafec742SSukumar Swaminathan 			} else if (iphdr->ip_p == IPPROTO_UDP) {
4614bafec742SSukumar Swaminathan 				udp_hdr = (struct udphdr *)(void *)
4615bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr
4616bafec742SSukumar Swaminathan 				    + ip_hdr_len);
4617bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP "
4618bafec742SSukumar Swaminathan 				    "packet\n",
4619bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4620bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4621bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4622bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4623bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4624bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_LSO);
4625bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4626bafec742SSukumar Swaminathan 				tcp_udp_hdr_off =
4627bafec742SSukumar Swaminathan 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
4628bafec742SSukumar Swaminathan 				tcp_udp_hdr_len =
4629bafec742SSukumar Swaminathan 				    (uint16_t)(udp_hdr->uh_ulen*4);
4630bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4631bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4632bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4633bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4634bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4635bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4636bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4637bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4638bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4639bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
4640bafec742SSukumar Swaminathan 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
4641bafec742SSukumar Swaminathan 
4642bafec742SSukumar Swaminathan 				/*
4643bafec742SSukumar Swaminathan 				 * if the chip is unable to do pseudo header
4644bafec742SSukumar Swaminathan 				 * checksum calculation, do it here then put the
4645bafec742SSukumar Swaminathan 				 * result to the data passed to the chip
4646bafec742SSukumar Swaminathan 				 */
4647bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4648bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
4649bafec742SSukumar Swaminathan 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
4650bafec742SSukumar Swaminathan 			}
4651bafec742SSukumar Swaminathan 		}
4652bafec742SSukumar Swaminathan 	}
4653bafec742SSukumar Swaminathan }
4654bafec742SSukumar Swaminathan 
4655bafec742SSukumar Swaminathan /*
4656bafec742SSukumar Swaminathan  * Generic packet sending function which is used to send one packet.
4657bafec742SSukumar Swaminathan  */
4658bafec742SSukumar Swaminathan int
4659bafec742SSukumar Swaminathan ql_send_common(struct tx_ring *tx_ring, mblk_t *mp)
4660bafec742SSukumar Swaminathan {
4661bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_cb;
4662bafec742SSukumar Swaminathan 	struct ob_mac_iocb_req *mac_iocb_ptr;
4663bafec742SSukumar Swaminathan 	mblk_t *tp;
4664bafec742SSukumar Swaminathan 	size_t msg_len = 0;
4665bafec742SSukumar Swaminathan 	size_t off;
4666bafec742SSukumar Swaminathan 	caddr_t bp;
4667bafec742SSukumar Swaminathan 	size_t nbyte, total_len;
4668bafec742SSukumar Swaminathan 	uint_t i = 0;
4669bafec742SSukumar Swaminathan 	int j = 0, frags = 0;
4670bafec742SSukumar Swaminathan 	uint32_t phy_addr_low, phy_addr_high;
4671bafec742SSukumar Swaminathan 	uint64_t phys_addr;
4672bafec742SSukumar Swaminathan 	clock_t now;
4673bafec742SSukumar Swaminathan 	uint32_t pflags = 0;
4674bafec742SSukumar Swaminathan 	uint32_t mss = 0;
4675bafec742SSukumar Swaminathan 	enum tx_mode_t tx_mode;
4676bafec742SSukumar Swaminathan 	struct oal_entry *oal_entry;
4677bafec742SSukumar Swaminathan 	int status;
4678bafec742SSukumar Swaminathan 	uint_t ncookies, oal_entries, max_oal_entries;
4679bafec742SSukumar Swaminathan 	size_t max_seg_len = 0;
4680bafec742SSukumar Swaminathan 	boolean_t use_lso = B_FALSE;
4681bafec742SSukumar Swaminathan 	struct oal_entry *tx_entry = NULL;
4682bafec742SSukumar Swaminathan 	struct oal_entry *last_oal_entry;
4683bafec742SSukumar Swaminathan 	qlge_t *qlge = tx_ring->qlge;
4684bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
4685bafec742SSukumar Swaminathan 	size_t tx_buf_len = QL_MAX_COPY_LENGTH;
4686bafec742SSukumar Swaminathan 	int force_pullup = 0;
4687bafec742SSukumar Swaminathan 
4688bafec742SSukumar Swaminathan 	tp = mp;
4689bafec742SSukumar Swaminathan 	total_len = msg_len = 0;
4690bafec742SSukumar Swaminathan 	max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1;
4691bafec742SSukumar Swaminathan 
4692bafec742SSukumar Swaminathan 	/* Calculate number of data and segments in the incoming message */
4693bafec742SSukumar Swaminathan 	for (tp = mp; tp != NULL; tp = tp->b_cont) {
4694bafec742SSukumar Swaminathan 		nbyte = MBLKL(tp);
4695bafec742SSukumar Swaminathan 		total_len += nbyte;
4696bafec742SSukumar Swaminathan 		max_seg_len = max(nbyte, max_seg_len);
4697bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("Requested sending data in %d segments, "
4698bafec742SSukumar Swaminathan 		    "total length: %d\n", frags, nbyte));
4699bafec742SSukumar Swaminathan 		frags++;
4700bafec742SSukumar Swaminathan 	}
4701bafec742SSukumar Swaminathan 
4702bafec742SSukumar Swaminathan 	if (total_len >= QL_LSO_MAX) {
4703bafec742SSukumar Swaminathan 		freemsg(mp);
4704bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
4705bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n",
4706bafec742SSukumar Swaminathan 		    __func__, (int)total_len);
4707bafec742SSukumar Swaminathan #endif
4708bafec742SSukumar Swaminathan 		return (NULL);
4709bafec742SSukumar Swaminathan 	}
4710bafec742SSukumar Swaminathan 
4711bafec742SSukumar Swaminathan 	bp = (caddr_t)mp->b_rptr;
4712bafec742SSukumar Swaminathan 	if (bp[0] & 1) {
4713bafec742SSukumar Swaminathan 		if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet,
4714bafec742SSukumar Swaminathan 		    ETHERADDRL) == 0) {
4715bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("Broadcast packet\n"));
4716bafec742SSukumar Swaminathan 			tx_ring->brdcstxmt++;
4717bafec742SSukumar Swaminathan 		} else {
4718bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("multicast packet\n"));
4719bafec742SSukumar Swaminathan 			tx_ring->multixmt++;
4720bafec742SSukumar Swaminathan 		}
4721bafec742SSukumar Swaminathan 	}
4722bafec742SSukumar Swaminathan 
4723bafec742SSukumar Swaminathan 	tx_ring->obytes += total_len;
4724bafec742SSukumar Swaminathan 	tx_ring->opackets ++;
4725bafec742SSukumar Swaminathan 
4726bafec742SSukumar Swaminathan 	QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs,"
4727bafec742SSukumar Swaminathan 	    " max seg len: %d\n", total_len, frags, max_seg_len));
4728bafec742SSukumar Swaminathan 
4729bafec742SSukumar Swaminathan 	/* claim a free slot in tx ring */
4730bafec742SSukumar Swaminathan 	tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx];
4731bafec742SSukumar Swaminathan 
4732bafec742SSukumar Swaminathan 	/* get the tx descriptor */
4733bafec742SSukumar Swaminathan 	mac_iocb_ptr = tx_cb->queue_entry;
4734bafec742SSukumar Swaminathan 
4735bafec742SSukumar Swaminathan 	bzero((void *)mac_iocb_ptr, sizeof (*mac_iocb_ptr));
4736bafec742SSukumar Swaminathan 
4737bafec742SSukumar Swaminathan 	ASSERT(tx_cb->mp == NULL);
4738bafec742SSukumar Swaminathan 
4739bafec742SSukumar Swaminathan 	/*
4740bafec742SSukumar Swaminathan 	 * Decide to use DMA map or copy mode.
4741bafec742SSukumar Swaminathan 	 * DMA map mode must be used when the total msg length is more than the
4742bafec742SSukumar Swaminathan 	 * tx buffer length.
4743bafec742SSukumar Swaminathan 	 */
4744bafec742SSukumar Swaminathan 
4745bafec742SSukumar Swaminathan 	if (total_len > tx_buf_len)
4746bafec742SSukumar Swaminathan 		tx_mode = USE_DMA;
4747bafec742SSukumar Swaminathan 	else if	(max_seg_len > QL_MAX_COPY_LENGTH)
4748bafec742SSukumar Swaminathan 		tx_mode = USE_DMA;
4749bafec742SSukumar Swaminathan 	else
4750bafec742SSukumar Swaminathan 		tx_mode = USE_COPY;
4751bafec742SSukumar Swaminathan 
4752bafec742SSukumar Swaminathan 	if (qlge->chksum_cap) {
4753bafec742SSukumar Swaminathan 		hcksum_retrieve(mp, NULL, NULL, NULL,
4754bafec742SSukumar Swaminathan 		    NULL, NULL, NULL, &pflags);
4755bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability "
4756bafec742SSukumar Swaminathan 		    "is 0x%x \n", pflags, qlge->chksum_cap));
4757bafec742SSukumar Swaminathan 		if (qlge->lso_enable) {
4758bafec742SSukumar Swaminathan 			uint32_t lso_flags = 0;
4759bafec742SSukumar Swaminathan 			lso_info_get(mp, &mss, &lso_flags);
4760bafec742SSukumar Swaminathan 			use_lso = (lso_flags == HW_LSO);
4761bafec742SSukumar Swaminathan 		}
4762bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n",
4763bafec742SSukumar Swaminathan 		    mss, use_lso));
4764bafec742SSukumar Swaminathan 	}
4765bafec742SSukumar Swaminathan 
4766bafec742SSukumar Swaminathan do_pullup:
4767bafec742SSukumar Swaminathan 
4768bafec742SSukumar Swaminathan 	/* concatenate all frags into one large packet if too fragmented */
4769bafec742SSukumar Swaminathan 	if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) ||
4770bafec742SSukumar Swaminathan 	    force_pullup) {
4771bafec742SSukumar Swaminathan 		mblk_t *mp1;
4772bafec742SSukumar Swaminathan 		if ((mp1 = msgpullup(mp, -1)) != NULL) {
4773bafec742SSukumar Swaminathan 			freemsg(mp);
4774bafec742SSukumar Swaminathan 			mp = mp1;
4775bafec742SSukumar Swaminathan 			frags = 1;
4776bafec742SSukumar Swaminathan 		} else {
4777bafec742SSukumar Swaminathan 			tx_ring->tx_fail_dma_bind++;
4778bafec742SSukumar Swaminathan 			goto bad;
4779bafec742SSukumar Swaminathan 		}
4780bafec742SSukumar Swaminathan 	}
4781bafec742SSukumar Swaminathan 
4782bafec742SSukumar Swaminathan 	tx_cb->tx_bytes = (uint32_t)total_len;
4783bafec742SSukumar Swaminathan 	tx_cb->mp = mp;
4784bafec742SSukumar Swaminathan 	tx_cb->tx_dma_handle_used = 0;
4785bafec742SSukumar Swaminathan 
4786bafec742SSukumar Swaminathan 	if (tx_mode == USE_DMA) {
4787bafec742SSukumar Swaminathan 		msg_len = total_len;
4788bafec742SSukumar Swaminathan 
4789bafec742SSukumar Swaminathan 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
4790bafec742SSukumar Swaminathan 		mac_iocb_ptr->tid = tx_ring->prod_idx;
4791bafec742SSukumar Swaminathan 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
4792bafec742SSukumar Swaminathan 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
4793bafec742SSukumar Swaminathan 
4794bafec742SSukumar Swaminathan 		tx_entry = &mac_iocb_ptr->oal_entry[0];
4795bafec742SSukumar Swaminathan 		oal_entry = NULL;
4796bafec742SSukumar Swaminathan 
4797bafec742SSukumar Swaminathan 		for (tp = mp, oal_entries = j = 0; tp != NULL;
4798bafec742SSukumar Swaminathan 		    tp = tp->b_cont) {
4799bafec742SSukumar Swaminathan 			/* if too many tx dma handles needed */
4800bafec742SSukumar Swaminathan 			if (j >= QL_MAX_TX_DMA_HANDLES) {
4801bafec742SSukumar Swaminathan 				tx_ring->tx_no_dma_handle++;
4802bafec742SSukumar Swaminathan 				if (!force_pullup) {
4803bafec742SSukumar Swaminathan 					force_pullup = 1;
4804bafec742SSukumar Swaminathan 					goto do_pullup;
4805bafec742SSukumar Swaminathan 				} else {
4806bafec742SSukumar Swaminathan 					goto bad;
4807bafec742SSukumar Swaminathan 				}
4808bafec742SSukumar Swaminathan 			}
4809bafec742SSukumar Swaminathan 			nbyte = (uint16_t)MBLKL(tp);
4810bafec742SSukumar Swaminathan 			if (nbyte == 0)
4811bafec742SSukumar Swaminathan 				continue;
4812bafec742SSukumar Swaminathan 
4813bafec742SSukumar Swaminathan 			status = ddi_dma_addr_bind_handle(
4814bafec742SSukumar Swaminathan 			    tx_cb->tx_dma_handle[j], NULL,
4815bafec742SSukumar Swaminathan 			    (caddr_t)tp->b_rptr, nbyte,
4816bafec742SSukumar Swaminathan 			    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
4817bafec742SSukumar Swaminathan 			    0, &dma_cookie, &ncookies);
4818bafec742SSukumar Swaminathan 
4819bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("map sending data segment: %d, "
4820bafec742SSukumar Swaminathan 			    "length: %d, spans in %d cookies\n",
4821bafec742SSukumar Swaminathan 			    j, nbyte, ncookies));
4822bafec742SSukumar Swaminathan 
4823bafec742SSukumar Swaminathan 			if (status != DDI_DMA_MAPPED) {
4824bafec742SSukumar Swaminathan 				goto bad;
4825bafec742SSukumar Swaminathan 			}
4826bafec742SSukumar Swaminathan 			/*
4827bafec742SSukumar Swaminathan 			 * Each fragment can span several cookies. One cookie
4828bafec742SSukumar Swaminathan 			 * will use one tx descriptor to transmit.
4829bafec742SSukumar Swaminathan 			 */
4830bafec742SSukumar Swaminathan 			for (i = ncookies; i > 0; i--, tx_entry++,
4831bafec742SSukumar Swaminathan 			    oal_entries++) {
4832bafec742SSukumar Swaminathan 				/*
4833bafec742SSukumar Swaminathan 				 * The number of TX descriptors that can be
4834bafec742SSukumar Swaminathan 				 *  saved in tx iocb and oal list is limited
4835bafec742SSukumar Swaminathan 				 */
4836bafec742SSukumar Swaminathan 				if (oal_entries > max_oal_entries) {
4837bafec742SSukumar Swaminathan 					tx_ring->tx_no_dma_cookie++;
4838bafec742SSukumar Swaminathan 					if (!force_pullup) {
4839bafec742SSukumar Swaminathan 						force_pullup = 1;
4840bafec742SSukumar Swaminathan 						goto do_pullup;
4841bafec742SSukumar Swaminathan 					} else {
4842bafec742SSukumar Swaminathan 						goto bad;
4843bafec742SSukumar Swaminathan 					}
4844bafec742SSukumar Swaminathan 				}
4845bafec742SSukumar Swaminathan 
4846bafec742SSukumar Swaminathan 				if ((oal_entries == TX_DESC_PER_IOCB) &&
4847bafec742SSukumar Swaminathan 				    !oal_entry) {
4848bafec742SSukumar Swaminathan 					/*
4849bafec742SSukumar Swaminathan 					 * Time to switch to an oal list
4850bafec742SSukumar Swaminathan 					 * The last entry should be copied
4851bafec742SSukumar Swaminathan 					 * to first entry in the oal list
4852bafec742SSukumar Swaminathan 					 */
4853bafec742SSukumar Swaminathan 					oal_entry = tx_cb->oal;
4854bafec742SSukumar Swaminathan 					tx_entry =
4855bafec742SSukumar Swaminathan 					    &mac_iocb_ptr->oal_entry[
4856bafec742SSukumar Swaminathan 					    TX_DESC_PER_IOCB-1];
4857bafec742SSukumar Swaminathan 					bcopy(tx_entry, oal_entry,
4858bafec742SSukumar Swaminathan 					    sizeof (*oal_entry));
4859bafec742SSukumar Swaminathan 
4860bafec742SSukumar Swaminathan 					/*
4861bafec742SSukumar Swaminathan 					 * last entry should be updated to
4862bafec742SSukumar Swaminathan 					 * point to the extended oal list itself
4863bafec742SSukumar Swaminathan 					 */
4864bafec742SSukumar Swaminathan 					tx_entry->buf_addr_low =
4865bafec742SSukumar Swaminathan 					    cpu_to_le32(
4866bafec742SSukumar Swaminathan 					    LS_64BITS(tx_cb->oal_dma_addr));
4867bafec742SSukumar Swaminathan 					tx_entry->buf_addr_high =
4868bafec742SSukumar Swaminathan 					    cpu_to_le32(
4869bafec742SSukumar Swaminathan 					    MS_64BITS(tx_cb->oal_dma_addr));
4870bafec742SSukumar Swaminathan 					/*
4871bafec742SSukumar Swaminathan 					 * Point tx_entry to the oal list
4872bafec742SSukumar Swaminathan 					 * second entry
4873bafec742SSukumar Swaminathan 					 */
4874bafec742SSukumar Swaminathan 					tx_entry = &oal_entry[1];
4875bafec742SSukumar Swaminathan 				}
4876bafec742SSukumar Swaminathan 
4877bafec742SSukumar Swaminathan 				tx_entry->buf_len =
4878bafec742SSukumar Swaminathan 				    (uint32_t)cpu_to_le32(dma_cookie.dmac_size);
4879bafec742SSukumar Swaminathan 				phys_addr = dma_cookie.dmac_laddress;
4880bafec742SSukumar Swaminathan 				tx_entry->buf_addr_low =
4881bafec742SSukumar Swaminathan 				    cpu_to_le32(LS_64BITS(phys_addr));
4882bafec742SSukumar Swaminathan 				tx_entry->buf_addr_high =
4883bafec742SSukumar Swaminathan 				    cpu_to_le32(MS_64BITS(phys_addr));
4884bafec742SSukumar Swaminathan 
4885bafec742SSukumar Swaminathan 				last_oal_entry = tx_entry;
4886bafec742SSukumar Swaminathan 
4887bafec742SSukumar Swaminathan 				if (i > 1)
4888bafec742SSukumar Swaminathan 					ddi_dma_nextcookie(
4889bafec742SSukumar Swaminathan 					    tx_cb->tx_dma_handle[j],
4890bafec742SSukumar Swaminathan 					    &dma_cookie);
4891bafec742SSukumar Swaminathan 			}
4892bafec742SSukumar Swaminathan 			j++;
4893bafec742SSukumar Swaminathan 		}
4894bafec742SSukumar Swaminathan 		/*
4895bafec742SSukumar Swaminathan 		 * if OAL is used, the last oal entry in tx iocb indicates
4896bafec742SSukumar Swaminathan 		 * number of additional address/len pairs in OAL
4897bafec742SSukumar Swaminathan 		 */
4898bafec742SSukumar Swaminathan 		if (oal_entries > TX_DESC_PER_IOCB) {
4899bafec742SSukumar Swaminathan 			tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1];
4900bafec742SSukumar Swaminathan 			tx_entry->buf_len = (uint32_t)
4901bafec742SSukumar Swaminathan 			    (cpu_to_le32((sizeof (struct oal_entry) *
4902bafec742SSukumar Swaminathan 			    (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY));
4903bafec742SSukumar Swaminathan 		}
4904bafec742SSukumar Swaminathan 		last_oal_entry->buf_len = cpu_to_le32(
4905bafec742SSukumar Swaminathan 		    le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY);
4906bafec742SSukumar Swaminathan 
4907bafec742SSukumar Swaminathan 		tx_cb->tx_dma_handle_used = j;
4908bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n",
4909bafec742SSukumar Swaminathan 		    j, oal_entries));
4910bafec742SSukumar Swaminathan 
4911bafec742SSukumar Swaminathan 		bp = (caddr_t)mp->b_rptr;
4912bafec742SSukumar Swaminathan 	}
4913bafec742SSukumar Swaminathan 	if (tx_mode == USE_COPY) {
4914bafec742SSukumar Swaminathan 		bp = tx_cb->copy_buffer;
4915bafec742SSukumar Swaminathan 		off = 0;
4916bafec742SSukumar Swaminathan 		nbyte = 0;
4917bafec742SSukumar Swaminathan 		frags = 0;
4918bafec742SSukumar Swaminathan 		/*
4919bafec742SSukumar Swaminathan 		 * Copy up to tx_buf_len of the transmit data
4920bafec742SSukumar Swaminathan 		 * from mp to tx buffer
4921bafec742SSukumar Swaminathan 		 */
4922bafec742SSukumar Swaminathan 		for (tp = mp; tp != NULL; tp = tp->b_cont) {
4923bafec742SSukumar Swaminathan 			nbyte = MBLKL(tp);
4924bafec742SSukumar Swaminathan 			if ((off + nbyte) <= tx_buf_len) {
4925bafec742SSukumar Swaminathan 				bcopy(tp->b_rptr, &bp[off], nbyte);
4926bafec742SSukumar Swaminathan 				off += nbyte;
4927bafec742SSukumar Swaminathan 				frags ++;
4928bafec742SSukumar Swaminathan 			}
4929bafec742SSukumar Swaminathan 		}
4930bafec742SSukumar Swaminathan 
4931bafec742SSukumar Swaminathan 		msg_len = off;
4932bafec742SSukumar Swaminathan 
4933bafec742SSukumar Swaminathan 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
4934bafec742SSukumar Swaminathan 		mac_iocb_ptr->tid = tx_ring->prod_idx;
4935bafec742SSukumar Swaminathan 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
4936bafec742SSukumar Swaminathan 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
4937bafec742SSukumar Swaminathan 
4938bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, "
4939bafec742SSukumar Swaminathan 		    "from %d segaments\n", msg_len, frags));
4940bafec742SSukumar Swaminathan 
4941bafec742SSukumar Swaminathan 		phys_addr = tx_cb->copy_buffer_dma_addr;
4942bafec742SSukumar Swaminathan 		phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr));
4943bafec742SSukumar Swaminathan 		phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr));
4944bafec742SSukumar Swaminathan 
4945bafec742SSukumar Swaminathan 		QL_DUMP(DBG_TX, "\t requested sending data:\n",
4946bafec742SSukumar Swaminathan 		    (uint8_t *)tx_cb->copy_buffer, 8, total_len);
4947bafec742SSukumar Swaminathan 
4948bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t)
4949bafec742SSukumar Swaminathan 		    cpu_to_le32(msg_len | OAL_LAST_ENTRY);
4950bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_addr_low  = phy_addr_low;
4951bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high;
4952bafec742SSukumar Swaminathan 
4953bafec742SSukumar Swaminathan 		freemsg(mp); /* no need, we have copied */
4954bafec742SSukumar Swaminathan 		tx_cb->mp = NULL;
4955bafec742SSukumar Swaminathan 	} /* End of Copy Mode */
4956bafec742SSukumar Swaminathan 
4957bafec742SSukumar Swaminathan 	/* Do TSO/LSO on TCP packet? */
4958bafec742SSukumar Swaminathan 	if (use_lso && mss) {
4959bafec742SSukumar Swaminathan 		ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr);
4960bafec742SSukumar Swaminathan 	} else if (pflags & qlge->chksum_cap) {
4961bafec742SSukumar Swaminathan 		/* Do checksum offloading */
4962bafec742SSukumar Swaminathan 		ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr);
4963bafec742SSukumar Swaminathan 	}
4964bafec742SSukumar Swaminathan 
4965bafec742SSukumar Swaminathan 	/* let device know the latest outbound IOCB */
4966bafec742SSukumar Swaminathan 	(void) ddi_dma_sync(tx_ring->wq_dma.dma_handle,
4967bafec742SSukumar Swaminathan 	    (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr),
4968bafec742SSukumar Swaminathan 	    (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV);
4969bafec742SSukumar Swaminathan 
4970bafec742SSukumar Swaminathan 	if (tx_mode == USE_DMA) {
4971bafec742SSukumar Swaminathan 		/* let device know the latest outbound OAL if necessary */
4972bafec742SSukumar Swaminathan 		if (oal_entries > TX_DESC_PER_IOCB) {
4973bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
4974bafec742SSukumar Swaminathan 			    (off_t)0,
4975bafec742SSukumar Swaminathan 			    (sizeof (struct oal_entry) *
4976bafec742SSukumar Swaminathan 			    (oal_entries -TX_DESC_PER_IOCB+1)),
4977bafec742SSukumar Swaminathan 			    DDI_DMA_SYNC_FORDEV);
4978bafec742SSukumar Swaminathan 		}
4979bafec742SSukumar Swaminathan 	} else { /* for USE_COPY mode, tx buffer has changed */
4980bafec742SSukumar Swaminathan 		/* let device know the latest change */
4981bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
4982bafec742SSukumar Swaminathan 		/* copy buf offset */
4983bafec742SSukumar Swaminathan 		    (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS),
4984bafec742SSukumar Swaminathan 		    msg_len, DDI_DMA_SYNC_FORDEV);
4985bafec742SSukumar Swaminathan 	}
4986bafec742SSukumar Swaminathan 
4987bafec742SSukumar Swaminathan 	/* save how the packet was sent */
4988bafec742SSukumar Swaminathan 	tx_cb->tx_type = tx_mode;
4989bafec742SSukumar Swaminathan 
4990bafec742SSukumar Swaminathan 	QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries);
4991bafec742SSukumar Swaminathan 	/* reduce the number of available tx slot */
4992bafec742SSukumar Swaminathan 	atomic_dec_32(&tx_ring->tx_free_count);
4993bafec742SSukumar Swaminathan 
4994bafec742SSukumar Swaminathan 	tx_ring->prod_idx++;
4995bafec742SSukumar Swaminathan 	if (tx_ring->prod_idx >= tx_ring->wq_len)
4996bafec742SSukumar Swaminathan 		tx_ring->prod_idx = 0;
4997bafec742SSukumar Swaminathan 
4998bafec742SSukumar Swaminathan 	now = ddi_get_lbolt();
4999bafec742SSukumar Swaminathan 	qlge->last_tx_time = now;
5000bafec742SSukumar Swaminathan 
5001bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5002bafec742SSukumar Swaminathan 
5003bafec742SSukumar Swaminathan bad:
5004bafec742SSukumar Swaminathan 	/*
5005bafec742SSukumar Swaminathan 	 * if for any reason driver can not send, delete
5006bafec742SSukumar Swaminathan 	 * the message pointer, mp
5007bafec742SSukumar Swaminathan 	 */
5008bafec742SSukumar Swaminathan 	now = ddi_get_lbolt();
5009bafec742SSukumar Swaminathan 	freemsg(mp);
5010bafec742SSukumar Swaminathan 	mp = NULL;
5011bafec742SSukumar Swaminathan 	for (i = 0; i < j; i++)
5012bafec742SSukumar Swaminathan 		(void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]);
5013bafec742SSukumar Swaminathan 
5014bafec742SSukumar Swaminathan 	QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x",
5015bafec742SSukumar Swaminathan 	    __func__, qlge->instance, (int)now));
5016bafec742SSukumar Swaminathan 
5017bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5018bafec742SSukumar Swaminathan }
5019bafec742SSukumar Swaminathan 
5020bafec742SSukumar Swaminathan 
5021bafec742SSukumar Swaminathan /*
5022bafec742SSukumar Swaminathan  * Initializes hardware and driver software flags before the driver
5023bafec742SSukumar Swaminathan  * is finally ready to work.
5024bafec742SSukumar Swaminathan  */
5025bafec742SSukumar Swaminathan int
5026bafec742SSukumar Swaminathan ql_do_start(qlge_t *qlge)
5027bafec742SSukumar Swaminathan {
5028bafec742SSukumar Swaminathan 	int i;
5029bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
5030bafec742SSukumar Swaminathan 	uint16_t lbq_buf_size;
5031bafec742SSukumar Swaminathan 	int rings_done;
5032bafec742SSukumar Swaminathan 
5033bafec742SSukumar Swaminathan 	ASSERT(qlge != NULL);
5034bafec742SSukumar Swaminathan 
5035bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
5036bafec742SSukumar Swaminathan 
5037bafec742SSukumar Swaminathan 	/* Reset adapter */
5038*0662fbf4SSukumar Swaminathan 	(void) ql_asic_reset(qlge);
5039bafec742SSukumar Swaminathan 
5040bafec742SSukumar Swaminathan 	lbq_buf_size = (uint16_t)
5041bafec742SSukumar Swaminathan 	    ((qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE);
5042bafec742SSukumar Swaminathan 	if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) {
5043bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5044bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n",
5045bafec742SSukumar Swaminathan 		    qlge->rx_ring[0].lbq_buf_size, lbq_buf_size);
5046bafec742SSukumar Swaminathan #endif
5047bafec742SSukumar Swaminathan 		/*
5048bafec742SSukumar Swaminathan 		 * Check if any ring has buffers still with upper layers
5049bafec742SSukumar Swaminathan 		 * If buffers are pending with upper layers, we use the
5050bafec742SSukumar Swaminathan 		 * existing buffers and don't reallocate new ones
5051bafec742SSukumar Swaminathan 		 * Unfortunately there is no way to evict buffers from
5052bafec742SSukumar Swaminathan 		 * upper layers. Using buffers with the current size may
5053bafec742SSukumar Swaminathan 		 * cause slightly sub-optimal performance, but that seems
5054bafec742SSukumar Swaminathan 		 * to be the easiest way to handle this situation.
5055bafec742SSukumar Swaminathan 		 */
5056bafec742SSukumar Swaminathan 		rings_done = 0;
5057bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
5058bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i];
5059bafec742SSukumar Swaminathan 			if (rx_ring->rx_indicate == 0)
5060bafec742SSukumar Swaminathan 				rings_done++;
5061bafec742SSukumar Swaminathan 			else
5062bafec742SSukumar Swaminathan 				break;
5063bafec742SSukumar Swaminathan 		}
5064bafec742SSukumar Swaminathan 		/*
5065bafec742SSukumar Swaminathan 		 * No buffers pending with upper layers;
5066bafec742SSukumar Swaminathan 		 * reallocte them for new MTU size
5067bafec742SSukumar Swaminathan 		 */
5068bafec742SSukumar Swaminathan 		if (rings_done >= qlge->rx_ring_count) {
5069bafec742SSukumar Swaminathan 			/* free large buffer pool */
5070bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->rx_ring_count; i++) {
5071bafec742SSukumar Swaminathan 				rx_ring = &qlge->rx_ring[i];
5072bafec742SSukumar Swaminathan 				if (rx_ring->type != TX_Q) {
5073bafec742SSukumar Swaminathan 					ql_free_sbq_buffers(rx_ring);
5074bafec742SSukumar Swaminathan 					ql_free_lbq_buffers(rx_ring);
5075bafec742SSukumar Swaminathan 				}
5076bafec742SSukumar Swaminathan 			}
5077bafec742SSukumar Swaminathan 			/* reallocate large buffer pool */
5078bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->rx_ring_count; i++) {
5079bafec742SSukumar Swaminathan 				rx_ring = &qlge->rx_ring[i];
5080bafec742SSukumar Swaminathan 				if (rx_ring->type != TX_Q) {
5081*0662fbf4SSukumar Swaminathan 					(void) ql_alloc_sbufs(qlge, rx_ring);
5082*0662fbf4SSukumar Swaminathan 					(void) ql_alloc_lbufs(qlge, rx_ring);
5083bafec742SSukumar Swaminathan 				}
5084bafec742SSukumar Swaminathan 			}
5085bafec742SSukumar Swaminathan 		}
5086bafec742SSukumar Swaminathan 	}
5087bafec742SSukumar Swaminathan 
5088bafec742SSukumar Swaminathan 	if (ql_bringup_adapter(qlge) != DDI_SUCCESS) {
5089bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "qlge bringup adapter failed");
5090bafec742SSukumar Swaminathan 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
5091bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
5092bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
5093bafec742SSukumar Swaminathan 	}
5094bafec742SSukumar Swaminathan 
5095bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
5096bafec742SSukumar Swaminathan 
5097bafec742SSukumar Swaminathan 	/* Get current link state */
5098bafec742SSukumar Swaminathan 	qlge->port_link_state = ql_get_link_state(qlge);
5099bafec742SSukumar Swaminathan 
5100bafec742SSukumar Swaminathan 	if (qlge->port_link_state == LS_UP) {
5101bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5102bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
5103bafec742SSukumar Swaminathan 		/* If driver detects a carrier on */
5104bafec742SSukumar Swaminathan 		CARRIER_ON(qlge);
5105bafec742SSukumar Swaminathan 	} else {
5106bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5107bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
5108bafec742SSukumar Swaminathan 		/* If driver detects a lack of carrier */
5109bafec742SSukumar Swaminathan 		CARRIER_OFF(qlge);
5110bafec742SSukumar Swaminathan 	}
5111bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_STARTED;
5112bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5113bafec742SSukumar Swaminathan }
5114bafec742SSukumar Swaminathan 
5115bafec742SSukumar Swaminathan /*
5116bafec742SSukumar Swaminathan  * Stop currently running driver
5117bafec742SSukumar Swaminathan  * Driver needs to stop routing new packets to driver and wait until
5118bafec742SSukumar Swaminathan  * all pending tx/rx buffers to be free-ed.
5119bafec742SSukumar Swaminathan  */
5120bafec742SSukumar Swaminathan int
5121bafec742SSukumar Swaminathan ql_do_stop(qlge_t *qlge)
5122bafec742SSukumar Swaminathan {
5123bafec742SSukumar Swaminathan 	int rc = DDI_FAILURE;
5124bafec742SSukumar Swaminathan 	uint32_t i, j, k;
5125bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc, *lbq_desc;
5126bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
5127bafec742SSukumar Swaminathan 
5128bafec742SSukumar Swaminathan 	ASSERT(qlge != NULL);
5129bafec742SSukumar Swaminathan 
5130bafec742SSukumar Swaminathan 	CARRIER_OFF(qlge);
5131bafec742SSukumar Swaminathan 
5132bafec742SSukumar Swaminathan 	rc = ql_bringdown_adapter(qlge);
5133bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
5134bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "qlge bringdown adapter failed.");
5135bafec742SSukumar Swaminathan 	} else
5136bafec742SSukumar Swaminathan 		rc = DDI_SUCCESS;
5137bafec742SSukumar Swaminathan 
5138bafec742SSukumar Swaminathan 	for (k = 0; k < qlge->rx_ring_count; k++) {
5139bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[k];
5140bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
5141bafec742SSukumar Swaminathan 			j = rx_ring->lbq_use_head;
5142bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5143bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list"
5144bafec742SSukumar Swaminathan 			    " to free list %d\n total %d\n",
5145bafec742SSukumar Swaminathan 			    k, rx_ring->lbuf_in_use_count,
5146bafec742SSukumar Swaminathan 			    rx_ring->lbuf_free_count,
5147bafec742SSukumar Swaminathan 			    rx_ring->lbuf_in_use_count +
5148bafec742SSukumar Swaminathan 			    rx_ring->lbuf_free_count);
5149bafec742SSukumar Swaminathan #endif
5150bafec742SSukumar Swaminathan 			for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
5151bafec742SSukumar Swaminathan 				lbq_desc = rx_ring->lbuf_in_use[j];
5152bafec742SSukumar Swaminathan 				j++;
5153bafec742SSukumar Swaminathan 				if (j >= rx_ring->lbq_len) {
5154bafec742SSukumar Swaminathan 					j = 0;
5155bafec742SSukumar Swaminathan 				}
5156bafec742SSukumar Swaminathan 				if (lbq_desc->mp) {
5157bafec742SSukumar Swaminathan 					atomic_inc_32(&rx_ring->rx_indicate);
5158bafec742SSukumar Swaminathan 					freemsg(lbq_desc->mp);
5159bafec742SSukumar Swaminathan 				}
5160bafec742SSukumar Swaminathan 			}
5161bafec742SSukumar Swaminathan 			rx_ring->lbq_use_head = j;
5162bafec742SSukumar Swaminathan 			rx_ring->lbq_use_tail = j;
5163bafec742SSukumar Swaminathan 			rx_ring->lbuf_in_use_count = 0;
5164bafec742SSukumar Swaminathan 			j = rx_ring->sbq_use_head;
5165bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5166bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list,"
5167bafec742SSukumar Swaminathan 			    " to free list %d\n total %d \n",
5168bafec742SSukumar Swaminathan 			    k, rx_ring->sbuf_in_use_count,
5169bafec742SSukumar Swaminathan 			    rx_ring->sbuf_free_count,
5170bafec742SSukumar Swaminathan 			    rx_ring->sbuf_in_use_count +
5171bafec742SSukumar Swaminathan 			    rx_ring->sbuf_free_count);
5172bafec742SSukumar Swaminathan #endif
5173bafec742SSukumar Swaminathan 			for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
5174bafec742SSukumar Swaminathan 				sbq_desc = rx_ring->sbuf_in_use[j];
5175bafec742SSukumar Swaminathan 				j++;
5176bafec742SSukumar Swaminathan 				if (j >= rx_ring->sbq_len) {
5177bafec742SSukumar Swaminathan 					j = 0;
5178bafec742SSukumar Swaminathan 				}
5179bafec742SSukumar Swaminathan 				if (sbq_desc->mp) {
5180bafec742SSukumar Swaminathan 					atomic_inc_32(&rx_ring->rx_indicate);
5181bafec742SSukumar Swaminathan 					freemsg(sbq_desc->mp);
5182bafec742SSukumar Swaminathan 				}
5183bafec742SSukumar Swaminathan 			}
5184bafec742SSukumar Swaminathan 			rx_ring->sbq_use_head = j;
5185bafec742SSukumar Swaminathan 			rx_ring->sbq_use_tail = j;
5186bafec742SSukumar Swaminathan 			rx_ring->sbuf_in_use_count = 0;
5187bafec742SSukumar Swaminathan 		}
5188bafec742SSukumar Swaminathan 	}
5189bafec742SSukumar Swaminathan 
5190bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_STOPPED;
5191bafec742SSukumar Swaminathan 
5192bafec742SSukumar Swaminathan 	return (rc);
5193bafec742SSukumar Swaminathan }
5194bafec742SSukumar Swaminathan 
5195bafec742SSukumar Swaminathan /*
5196bafec742SSukumar Swaminathan  * Support
5197bafec742SSukumar Swaminathan  */
5198bafec742SSukumar Swaminathan 
5199bafec742SSukumar Swaminathan void
5200bafec742SSukumar Swaminathan ql_disable_isr(qlge_t *qlge)
5201bafec742SSukumar Swaminathan {
5202bafec742SSukumar Swaminathan 	/*
5203bafec742SSukumar Swaminathan 	 * disable the hardware interrupt
5204bafec742SSukumar Swaminathan 	 */
5205bafec742SSukumar Swaminathan 	ISP_DISABLE_GLOBAL_INTRS(qlge);
5206bafec742SSukumar Swaminathan 
5207bafec742SSukumar Swaminathan 	qlge->flags &= ~INTERRUPTS_ENABLED;
5208bafec742SSukumar Swaminathan }
5209bafec742SSukumar Swaminathan 
5210bafec742SSukumar Swaminathan 
5211bafec742SSukumar Swaminathan 
5212bafec742SSukumar Swaminathan /*
5213bafec742SSukumar Swaminathan  * busy wait for 'usecs' microseconds.
5214bafec742SSukumar Swaminathan  */
5215bafec742SSukumar Swaminathan void
5216bafec742SSukumar Swaminathan qlge_delay(clock_t usecs)
5217bafec742SSukumar Swaminathan {
5218bafec742SSukumar Swaminathan 	drv_usecwait(usecs);
5219bafec742SSukumar Swaminathan }
5220bafec742SSukumar Swaminathan 
5221bafec742SSukumar Swaminathan /*
5222bafec742SSukumar Swaminathan  * retrieve firmware details.
5223bafec742SSukumar Swaminathan  */
5224bafec742SSukumar Swaminathan 
5225bafec742SSukumar Swaminathan pci_cfg_t *
5226bafec742SSukumar Swaminathan ql_get_pci_config(qlge_t *qlge)
5227bafec742SSukumar Swaminathan {
5228bafec742SSukumar Swaminathan 	return (&(qlge->pci_cfg));
5229bafec742SSukumar Swaminathan }
5230bafec742SSukumar Swaminathan 
5231bafec742SSukumar Swaminathan /*
5232bafec742SSukumar Swaminathan  * Get current Link status
5233bafec742SSukumar Swaminathan  */
5234bafec742SSukumar Swaminathan static uint32_t
5235bafec742SSukumar Swaminathan ql_get_link_state(qlge_t *qlge)
5236bafec742SSukumar Swaminathan {
5237bafec742SSukumar Swaminathan 	uint32_t bitToCheck = 0;
5238bafec742SSukumar Swaminathan 	uint32_t temp, linkState;
5239bafec742SSukumar Swaminathan 
5240bafec742SSukumar Swaminathan 	if (qlge->func_number == qlge->fn0_net) {
5241bafec742SSukumar Swaminathan 		bitToCheck = STS_PL0;
5242bafec742SSukumar Swaminathan 	} else {
5243bafec742SSukumar Swaminathan 		bitToCheck = STS_PL1;
5244bafec742SSukumar Swaminathan 	}
5245bafec742SSukumar Swaminathan 	temp = ql_read_reg(qlge, REG_STATUS);
5246bafec742SSukumar Swaminathan 	QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n",
5247bafec742SSukumar Swaminathan 	    __func__, qlge->instance, temp));
5248bafec742SSukumar Swaminathan 
5249bafec742SSukumar Swaminathan 	if (temp & bitToCheck) {
5250bafec742SSukumar Swaminathan 		linkState = LS_UP;
5251bafec742SSukumar Swaminathan 	} else {
5252bafec742SSukumar Swaminathan 		linkState = LS_DOWN;
5253bafec742SSukumar Swaminathan 	}
5254bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
5255bafec742SSukumar Swaminathan 		/* for Schultz, link Speed is fixed to 10G, full duplex */
5256bafec742SSukumar Swaminathan 		qlge->speed  = SPEED_10G;
5257bafec742SSukumar Swaminathan 		qlge->duplex = 1;
5258bafec742SSukumar Swaminathan 	}
5259bafec742SSukumar Swaminathan 	return (linkState);
5260bafec742SSukumar Swaminathan }
5261bafec742SSukumar Swaminathan /*
5262bafec742SSukumar Swaminathan  * Get current link status and report to OS
5263bafec742SSukumar Swaminathan  */
5264bafec742SSukumar Swaminathan static void
5265bafec742SSukumar Swaminathan ql_get_and_report_link_state(qlge_t *qlge)
5266bafec742SSukumar Swaminathan {
5267bafec742SSukumar Swaminathan 	uint32_t cur_link_state;
5268bafec742SSukumar Swaminathan 
5269bafec742SSukumar Swaminathan 	/* Get current link state */
5270bafec742SSukumar Swaminathan 	cur_link_state = ql_get_link_state(qlge);
5271bafec742SSukumar Swaminathan 	/* if link state has changed */
5272bafec742SSukumar Swaminathan 	if (cur_link_state != qlge->port_link_state) {
5273bafec742SSukumar Swaminathan 
5274bafec742SSukumar Swaminathan 		qlge->port_link_state = cur_link_state;
5275bafec742SSukumar Swaminathan 
5276bafec742SSukumar Swaminathan 		if (qlge->port_link_state == LS_UP) {
5277bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5278bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
5279bafec742SSukumar Swaminathan 			/* If driver detects a carrier on */
5280bafec742SSukumar Swaminathan 			CARRIER_ON(qlge);
5281bafec742SSukumar Swaminathan 		} else {
5282bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5283bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
5284bafec742SSukumar Swaminathan 			/* If driver detects a lack of carrier */
5285bafec742SSukumar Swaminathan 			CARRIER_OFF(qlge);
5286bafec742SSukumar Swaminathan 		}
5287bafec742SSukumar Swaminathan 	}
5288bafec742SSukumar Swaminathan }
5289bafec742SSukumar Swaminathan 
5290bafec742SSukumar Swaminathan /*
5291bafec742SSukumar Swaminathan  * timer callback function executed after timer expires
5292bafec742SSukumar Swaminathan  */
5293bafec742SSukumar Swaminathan static void
5294bafec742SSukumar Swaminathan ql_timer(void* arg)
5295bafec742SSukumar Swaminathan {
5296bafec742SSukumar Swaminathan 	ql_get_and_report_link_state((qlge_t *)arg);
5297bafec742SSukumar Swaminathan }
5298bafec742SSukumar Swaminathan 
5299bafec742SSukumar Swaminathan /*
5300bafec742SSukumar Swaminathan  * stop the running timer if activated
5301bafec742SSukumar Swaminathan  */
5302bafec742SSukumar Swaminathan static void
5303bafec742SSukumar Swaminathan ql_stop_timer(qlge_t *qlge)
5304bafec742SSukumar Swaminathan {
5305bafec742SSukumar Swaminathan 	timeout_id_t timer_id;
5306bafec742SSukumar Swaminathan 	/* Disable driver timer */
5307bafec742SSukumar Swaminathan 	if (qlge->ql_timer_timeout_id != NULL) {
5308bafec742SSukumar Swaminathan 		timer_id = qlge->ql_timer_timeout_id;
5309bafec742SSukumar Swaminathan 		qlge->ql_timer_timeout_id = NULL;
5310bafec742SSukumar Swaminathan 		(void) untimeout(timer_id);
5311bafec742SSukumar Swaminathan 	}
5312bafec742SSukumar Swaminathan }
5313bafec742SSukumar Swaminathan 
5314bafec742SSukumar Swaminathan /*
5315bafec742SSukumar Swaminathan  * stop then restart timer
5316bafec742SSukumar Swaminathan  */
5317bafec742SSukumar Swaminathan void
5318bafec742SSukumar Swaminathan ql_restart_timer(qlge_t *qlge)
5319bafec742SSukumar Swaminathan {
5320bafec742SSukumar Swaminathan 	ql_stop_timer(qlge);
5321bafec742SSukumar Swaminathan 	qlge->ql_timer_ticks = TICKS_PER_SEC / 4;
5322bafec742SSukumar Swaminathan 	qlge->ql_timer_timeout_id = timeout(ql_timer,
5323bafec742SSukumar Swaminathan 	    (void *)qlge, qlge->ql_timer_ticks);
5324bafec742SSukumar Swaminathan }
5325bafec742SSukumar Swaminathan 
5326bafec742SSukumar Swaminathan /* ************************************************************************* */
5327bafec742SSukumar Swaminathan /*
5328bafec742SSukumar Swaminathan  *		Hardware K-Stats Data Structures and Subroutines
5329bafec742SSukumar Swaminathan  */
5330bafec742SSukumar Swaminathan /* ************************************************************************* */
5331bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_hw[] = {
5332bafec742SSukumar Swaminathan 	/* PCI related hardware information */
5333bafec742SSukumar Swaminathan 	{ 0, "Vendor Id"			},
5334bafec742SSukumar Swaminathan 	{ 1, "Device Id"			},
5335bafec742SSukumar Swaminathan 	{ 2, "Command"				},
5336bafec742SSukumar Swaminathan 	{ 3, "Status"				},
5337bafec742SSukumar Swaminathan 	{ 4, "Revision Id"			},
5338bafec742SSukumar Swaminathan 	{ 5, "Cache Line Size"			},
5339bafec742SSukumar Swaminathan 	{ 6, "Latency Timer"			},
5340bafec742SSukumar Swaminathan 	{ 7, "Header Type"			},
5341bafec742SSukumar Swaminathan 	{ 9, "I/O base addr"			},
5342bafec742SSukumar Swaminathan 	{ 10, "Control Reg Base addr low"	},
5343bafec742SSukumar Swaminathan 	{ 11, "Control Reg Base addr high"	},
5344bafec742SSukumar Swaminathan 	{ 12, "Doorbell Reg Base addr low"	},
5345bafec742SSukumar Swaminathan 	{ 13, "Doorbell Reg Base addr high"	},
5346bafec742SSukumar Swaminathan 	{ 14, "Subsystem Vendor Id"		},
5347bafec742SSukumar Swaminathan 	{ 15, "Subsystem Device ID"		},
5348bafec742SSukumar Swaminathan 	{ 16, "PCIe Device Control"		},
5349bafec742SSukumar Swaminathan 	{ 17, "PCIe Link Status"		},
5350bafec742SSukumar Swaminathan 
5351bafec742SSukumar Swaminathan 	{ -1,	NULL				},
5352bafec742SSukumar Swaminathan };
5353bafec742SSukumar Swaminathan 
5354bafec742SSukumar Swaminathan /*
5355bafec742SSukumar Swaminathan  * kstat update function for PCI registers
5356bafec742SSukumar Swaminathan  */
5357bafec742SSukumar Swaminathan static int
5358bafec742SSukumar Swaminathan ql_kstats_get_pci_regs(kstat_t *ksp, int flag)
5359bafec742SSukumar Swaminathan {
5360bafec742SSukumar Swaminathan 	qlge_t *qlge;
5361bafec742SSukumar Swaminathan 	kstat_named_t *knp;
5362bafec742SSukumar Swaminathan 
5363bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
5364bafec742SSukumar Swaminathan 		return (EACCES);
5365bafec742SSukumar Swaminathan 
5366bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
5367bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
5368bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.vendor_id;
5369bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.device_id;
5370bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.command;
5371bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.status;
5372bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.revision;
5373bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.cache_line_size;
5374bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.latency_timer;
5375bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.header_type;
5376bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.io_base_address;
5377bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
5378bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower;
5379bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
5380bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper;
5381bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
5382bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_doorbell_mem_base_address_lower;
5383bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
5384bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_doorbell_mem_base_address_upper;
5385bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id;
5386bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.sub_device_id;
5387bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control;
5388bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.link_status;
5389bafec742SSukumar Swaminathan 
5390bafec742SSukumar Swaminathan 	return (0);
5391bafec742SSukumar Swaminathan }
5392bafec742SSukumar Swaminathan 
5393bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_mii[] = {
5394bafec742SSukumar Swaminathan 	/* MAC/MII related hardware information */
5395bafec742SSukumar Swaminathan 	{ 0, "mtu"},
5396bafec742SSukumar Swaminathan 
5397bafec742SSukumar Swaminathan 	{ -1, NULL},
5398bafec742SSukumar Swaminathan };
5399bafec742SSukumar Swaminathan 
5400bafec742SSukumar Swaminathan 
5401bafec742SSukumar Swaminathan /*
5402bafec742SSukumar Swaminathan  * kstat update function for MII related information.
5403bafec742SSukumar Swaminathan  */
5404bafec742SSukumar Swaminathan static int
5405bafec742SSukumar Swaminathan ql_kstats_mii_update(kstat_t *ksp, int flag)
5406bafec742SSukumar Swaminathan {
5407bafec742SSukumar Swaminathan 	qlge_t *qlge;
5408bafec742SSukumar Swaminathan 	kstat_named_t *knp;
5409bafec742SSukumar Swaminathan 
5410bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
5411bafec742SSukumar Swaminathan 		return (EACCES);
5412bafec742SSukumar Swaminathan 
5413bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
5414bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
5415bafec742SSukumar Swaminathan 
5416bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->mtu;
5417bafec742SSukumar Swaminathan 
5418bafec742SSukumar Swaminathan 	return (0);
5419bafec742SSukumar Swaminathan }
5420bafec742SSukumar Swaminathan 
5421bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_reg[] = {
5422bafec742SSukumar Swaminathan 	/* Register information */
5423bafec742SSukumar Swaminathan 	{ 0, "System (0x08)"			},
5424bafec742SSukumar Swaminathan 	{ 1, "Reset/Fail Over(0x0Ch"		},
5425bafec742SSukumar Swaminathan 	{ 2, "Function Specific Control(0x10)"	},
5426bafec742SSukumar Swaminathan 	{ 3, "Status (0x30)"			},
5427bafec742SSukumar Swaminathan 	{ 4, "Intr Enable (0x34)"		},
5428bafec742SSukumar Swaminathan 	{ 5, "Intr Status1 (0x3C)"		},
5429bafec742SSukumar Swaminathan 	{ 6, "Error Status (0x54)"		},
5430bafec742SSukumar Swaminathan 	{ 7, "XGMAC Flow Control(0x11C)"	},
5431bafec742SSukumar Swaminathan 	{ 8, "XGMAC Tx Pause Frames(0x230)"	},
5432bafec742SSukumar Swaminathan 	{ 9, "XGMAC Rx Pause Frames(0x388)"	},
5433bafec742SSukumar Swaminathan 	{ 10, "XGMAC Rx FIFO Drop Count(0x5B8)"	},
5434bafec742SSukumar Swaminathan 	{ 11, "interrupts actually allocated"	},
5435bafec742SSukumar Swaminathan 	{ 12, "interrupts on rx ring 0"		},
5436bafec742SSukumar Swaminathan 	{ 13, "interrupts on rx ring 1"		},
5437bafec742SSukumar Swaminathan 	{ 14, "interrupts on rx ring 2"		},
5438bafec742SSukumar Swaminathan 	{ 15, "interrupts on rx ring 3"		},
5439bafec742SSukumar Swaminathan 	{ 16, "interrupts on rx ring 4"		},
5440bafec742SSukumar Swaminathan 	{ 17, "interrupts on rx ring 5"		},
5441bafec742SSukumar Swaminathan 	{ 18, "interrupts on rx ring 6"		},
5442bafec742SSukumar Swaminathan 	{ 19, "interrupts on rx ring 7"		},
5443bafec742SSukumar Swaminathan 	{ 20, "polls on rx ring 0"		},
5444bafec742SSukumar Swaminathan 	{ 21, "polls on rx ring 1"		},
5445bafec742SSukumar Swaminathan 	{ 22, "polls on rx ring 2"		},
5446bafec742SSukumar Swaminathan 	{ 23, "polls on rx ring 3"		},
5447bafec742SSukumar Swaminathan 	{ 24, "polls on rx ring 4"		},
5448bafec742SSukumar Swaminathan 	{ 25, "polls on rx ring 5"		},
5449bafec742SSukumar Swaminathan 	{ 26, "polls on rx ring 6"		},
5450bafec742SSukumar Swaminathan 	{ 27, "polls on rx ring 7"		},
5451bafec742SSukumar Swaminathan 	{ 28, "tx no resource on ring 0"	},
5452bafec742SSukumar Swaminathan 	{ 29, "tx dma bind fail on ring 0"	},
5453bafec742SSukumar Swaminathan 	{ 30, "tx dma no handle on ring 0"	},
5454bafec742SSukumar Swaminathan 	{ 31, "tx dma no cookie on ring 0"	},
5455bafec742SSukumar Swaminathan 	{ 32, "MPI firmware major version"},
5456bafec742SSukumar Swaminathan 	{ 33, "MPI firmware minor version"},
5457bafec742SSukumar Swaminathan 	{ 34, "MPI firmware sub version"},
5458bafec742SSukumar Swaminathan 
5459bafec742SSukumar Swaminathan 	{ -1, NULL},
5460bafec742SSukumar Swaminathan };
5461bafec742SSukumar Swaminathan 
5462bafec742SSukumar Swaminathan 
5463bafec742SSukumar Swaminathan /*
5464bafec742SSukumar Swaminathan  * kstat update function for device register set
5465bafec742SSukumar Swaminathan  */
5466bafec742SSukumar Swaminathan static int
5467bafec742SSukumar Swaminathan ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag)
5468bafec742SSukumar Swaminathan {
5469bafec742SSukumar Swaminathan 	qlge_t *qlge;
5470bafec742SSukumar Swaminathan 	kstat_named_t *knp;
5471bafec742SSukumar Swaminathan 	uint32_t val32;
5472bafec742SSukumar Swaminathan 	int i = 0;
5473bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
5474bafec742SSukumar Swaminathan 
5475bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
5476bafec742SSukumar Swaminathan 		return (EACCES);
5477bafec742SSukumar Swaminathan 
5478bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
5479bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
5480bafec742SSukumar Swaminathan 
5481bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM);
5482bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER);
5483bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL);
5484bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS);
5485bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
5486bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
5487bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS);
5488bafec742SSukumar Swaminathan 
5489bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) {
5490bafec742SSukumar Swaminathan 		return (0);
5491bafec742SSukumar Swaminathan 	}
5492*0662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32);
5493bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
5494bafec742SSukumar Swaminathan 
5495*0662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32);
5496bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
5497bafec742SSukumar Swaminathan 
5498*0662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32);
5499bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
5500bafec742SSukumar Swaminathan 
5501*0662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32);
5502bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
5503bafec742SSukumar Swaminathan 
5504bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
5505bafec742SSukumar Swaminathan 
5506bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->intr_cnt;
5507bafec742SSukumar Swaminathan 
5508bafec742SSukumar Swaminathan 	for (i = 0; i < 8; i++) {
5509bafec742SSukumar Swaminathan 		(knp++)->value.ui32 = qlge->rx_interrupts[i];
5510bafec742SSukumar Swaminathan 	}
5511bafec742SSukumar Swaminathan 
5512bafec742SSukumar Swaminathan 	for (i = 0; i < 8; i++) {
5513bafec742SSukumar Swaminathan 		(knp++)->value.ui32 = qlge->rx_polls[i];
5514bafec742SSukumar Swaminathan 	}
5515bafec742SSukumar Swaminathan 
5516bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[0];
5517bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->defer;
5518bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_fail_dma_bind;
5519bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_no_dma_handle;
5520bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_no_dma_cookie;
5521bafec742SSukumar Swaminathan 
5522bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.major_version;
5523bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.minor_version;
5524bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version;
5525bafec742SSukumar Swaminathan 
5526bafec742SSukumar Swaminathan 	return (0);
5527bafec742SSukumar Swaminathan }
5528bafec742SSukumar Swaminathan 
5529bafec742SSukumar Swaminathan 
5530bafec742SSukumar Swaminathan static kstat_t *
5531bafec742SSukumar Swaminathan ql_setup_named_kstat(qlge_t *qlge, int instance, char *name,
5532bafec742SSukumar Swaminathan     const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
5533bafec742SSukumar Swaminathan {
5534bafec742SSukumar Swaminathan 	kstat_t *ksp;
5535bafec742SSukumar Swaminathan 	kstat_named_t *knp;
5536bafec742SSukumar Swaminathan 	char *np;
5537bafec742SSukumar Swaminathan 	int type;
5538bafec742SSukumar Swaminathan 
5539bafec742SSukumar Swaminathan 	size /= sizeof (ql_ksindex_t);
5540bafec742SSukumar Swaminathan 	ksp = kstat_create(ADAPTER_NAME, instance, name, "net",
5541bafec742SSukumar Swaminathan 	    KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT);
5542bafec742SSukumar Swaminathan 	if (ksp == NULL)
5543bafec742SSukumar Swaminathan 		return (NULL);
5544bafec742SSukumar Swaminathan 
5545bafec742SSukumar Swaminathan 	ksp->ks_private = qlge;
5546bafec742SSukumar Swaminathan 	ksp->ks_update = update;
5547bafec742SSukumar Swaminathan 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
5548bafec742SSukumar Swaminathan 		switch (*np) {
5549bafec742SSukumar Swaminathan 		default:
5550bafec742SSukumar Swaminathan 			type = KSTAT_DATA_UINT32;
5551bafec742SSukumar Swaminathan 			break;
5552bafec742SSukumar Swaminathan 		case '&':
5553bafec742SSukumar Swaminathan 			np += 1;
5554bafec742SSukumar Swaminathan 			type = KSTAT_DATA_CHAR;
5555bafec742SSukumar Swaminathan 			break;
5556bafec742SSukumar Swaminathan 		}
5557bafec742SSukumar Swaminathan 		kstat_named_init(knp, np, (uint8_t)type);
5558bafec742SSukumar Swaminathan 	}
5559bafec742SSukumar Swaminathan 	kstat_install(ksp);
5560bafec742SSukumar Swaminathan 
5561bafec742SSukumar Swaminathan 	return (ksp);
5562bafec742SSukumar Swaminathan }
5563bafec742SSukumar Swaminathan 
5564bafec742SSukumar Swaminathan /*
5565bafec742SSukumar Swaminathan  * Setup various kstat
5566bafec742SSukumar Swaminathan  */
5567bafec742SSukumar Swaminathan int
5568bafec742SSukumar Swaminathan ql_init_kstats(qlge_t *qlge)
5569bafec742SSukumar Swaminathan {
5570bafec742SSukumar Swaminathan 	/* Hardware KStats */
5571bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge,
5572bafec742SSukumar Swaminathan 	    qlge->instance, "chip", ql_kstats_hw,
5573bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
5574bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) {
5575bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
5576bafec742SSukumar Swaminathan 	}
5577bafec742SSukumar Swaminathan 
5578bafec742SSukumar Swaminathan 	/* MII KStats */
5579bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge,
5580bafec742SSukumar Swaminathan 	    qlge->instance, "mii", ql_kstats_mii,
5581bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_mii), ql_kstats_mii_update);
5582bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) {
5583bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
5584bafec742SSukumar Swaminathan 	}
5585bafec742SSukumar Swaminathan 
5586bafec742SSukumar Swaminathan 	/* REG KStats */
5587bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge,
5588bafec742SSukumar Swaminathan 	    qlge->instance, "reg", ql_kstats_reg,
5589bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
5590bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) {
5591bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
5592bafec742SSukumar Swaminathan 	}
5593bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5594bafec742SSukumar Swaminathan }
5595bafec742SSukumar Swaminathan 
5596bafec742SSukumar Swaminathan /*
5597bafec742SSukumar Swaminathan  * delete all kstat
5598bafec742SSukumar Swaminathan  */
5599bafec742SSukumar Swaminathan void
5600bafec742SSukumar Swaminathan ql_fini_kstats(qlge_t *qlge)
5601bafec742SSukumar Swaminathan {
5602bafec742SSukumar Swaminathan 	int i;
5603bafec742SSukumar Swaminathan 
5604bafec742SSukumar Swaminathan 	for (i = 0; i < QL_KSTAT_COUNT; i++) {
5605bafec742SSukumar Swaminathan 		if (qlge->ql_kstats[i] != NULL)
5606bafec742SSukumar Swaminathan 			kstat_delete(qlge->ql_kstats[i]);
5607bafec742SSukumar Swaminathan 	}
5608bafec742SSukumar Swaminathan }
5609bafec742SSukumar Swaminathan 
5610bafec742SSukumar Swaminathan /* ************************************************************************* */
5611bafec742SSukumar Swaminathan /*
5612bafec742SSukumar Swaminathan  *                                 kstat end
5613bafec742SSukumar Swaminathan  */
5614bafec742SSukumar Swaminathan /* ************************************************************************* */
5615bafec742SSukumar Swaminathan 
5616bafec742SSukumar Swaminathan /*
5617bafec742SSukumar Swaminathan  * Setup the parameters for receive and transmit rings including buffer sizes
5618bafec742SSukumar Swaminathan  * and completion queue sizes
5619bafec742SSukumar Swaminathan  */
5620bafec742SSukumar Swaminathan static int
5621bafec742SSukumar Swaminathan ql_setup_rings(qlge_t *qlge)
5622bafec742SSukumar Swaminathan {
5623bafec742SSukumar Swaminathan 	uint8_t i;
5624bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
5625bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
5626bafec742SSukumar Swaminathan 	uint16_t lbq_buf_size;
5627bafec742SSukumar Swaminathan 
5628bafec742SSukumar Swaminathan 	lbq_buf_size = (uint16_t)
5629bafec742SSukumar Swaminathan 	    ((qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE);
5630bafec742SSukumar Swaminathan 
5631bafec742SSukumar Swaminathan 	/*
5632bafec742SSukumar Swaminathan 	 * rx_ring[0] is always the default queue.
5633bafec742SSukumar Swaminathan 	 */
5634bafec742SSukumar Swaminathan 	/*
5635bafec742SSukumar Swaminathan 	 * qlge->rx_ring_count:
5636bafec742SSukumar Swaminathan 	 * Total number of rx_rings. This includes a number
5637bafec742SSukumar Swaminathan 	 * of outbound completion handler rx_rings, and a
5638bafec742SSukumar Swaminathan 	 * number of inbound completion handler rx_rings.
5639bafec742SSukumar Swaminathan 	 * rss is only enabled if we have more than 1 rx completion
5640bafec742SSukumar Swaminathan 	 * queue. If we have a single rx completion queue
5641bafec742SSukumar Swaminathan 	 * then all rx completions go to this queue and
5642bafec742SSukumar Swaminathan 	 * the last completion queue
5643bafec742SSukumar Swaminathan 	 */
5644bafec742SSukumar Swaminathan 
5645bafec742SSukumar Swaminathan 	qlge->tx_ring_first_cq_id = qlge->rss_ring_count;
5646bafec742SSukumar Swaminathan 
5647bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
5648bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
5649bafec742SSukumar Swaminathan 		bzero((void *)tx_ring, sizeof (*tx_ring));
5650bafec742SSukumar Swaminathan 		tx_ring->qlge = qlge;
5651bafec742SSukumar Swaminathan 		tx_ring->wq_id = i;
5652bafec742SSukumar Swaminathan 		tx_ring->wq_len = qlge->tx_ring_size;
5653bafec742SSukumar Swaminathan 		tx_ring->wq_size = (uint32_t)(
5654bafec742SSukumar Swaminathan 		    tx_ring->wq_len * sizeof (struct ob_mac_iocb_req));
5655bafec742SSukumar Swaminathan 
5656bafec742SSukumar Swaminathan 		/*
5657bafec742SSukumar Swaminathan 		 * The completion queue ID for the tx rings start
5658bafec742SSukumar Swaminathan 		 * immediately after the last rss completion queue.
5659bafec742SSukumar Swaminathan 		 */
5660bafec742SSukumar Swaminathan 		tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id);
5661bafec742SSukumar Swaminathan 	}
5662bafec742SSukumar Swaminathan 
5663bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
5664bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
5665bafec742SSukumar Swaminathan 		bzero((void *)rx_ring, sizeof (*rx_ring));
5666bafec742SSukumar Swaminathan 		rx_ring->qlge = qlge;
5667bafec742SSukumar Swaminathan 		rx_ring->cq_id = i;
5668bafec742SSukumar Swaminathan 		if (i != 0)
5669bafec742SSukumar Swaminathan 			rx_ring->cpu = (i) % qlge->rx_ring_count;
5670bafec742SSukumar Swaminathan 		else
5671bafec742SSukumar Swaminathan 			rx_ring->cpu = 0;
5672bafec742SSukumar Swaminathan 
5673bafec742SSukumar Swaminathan 		if (i < qlge->rss_ring_count) {
5674bafec742SSukumar Swaminathan 			/*
5675bafec742SSukumar Swaminathan 			 * Inbound completions (RSS) queues
5676bafec742SSukumar Swaminathan 			 * Default queue is queue 0 which handles
5677bafec742SSukumar Swaminathan 			 * unicast plus bcast/mcast and async events.
5678bafec742SSukumar Swaminathan 			 * Other inbound queues handle unicast frames only.
5679bafec742SSukumar Swaminathan 			 */
5680bafec742SSukumar Swaminathan 			rx_ring->cq_len = qlge->rx_ring_size;
5681bafec742SSukumar Swaminathan 			rx_ring->cq_size = (uint32_t)
5682bafec742SSukumar Swaminathan 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
5683bafec742SSukumar Swaminathan 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
5684bafec742SSukumar Swaminathan 			rx_ring->lbq_size = (uint32_t)
5685bafec742SSukumar Swaminathan 			    (rx_ring->lbq_len * sizeof (uint64_t));
5686bafec742SSukumar Swaminathan 			rx_ring->lbq_buf_size = lbq_buf_size;
5687bafec742SSukumar Swaminathan 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
5688bafec742SSukumar Swaminathan 			rx_ring->sbq_size = (uint32_t)
5689bafec742SSukumar Swaminathan 			    (rx_ring->sbq_len * sizeof (uint64_t));
5690bafec742SSukumar Swaminathan 			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
5691bafec742SSukumar Swaminathan 			rx_ring->type = RX_Q;
5692bafec742SSukumar Swaminathan 
5693bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD,
5694bafec742SSukumar Swaminathan 			    ("%s(%d)Allocating rss completion queue %d "
5695bafec742SSukumar Swaminathan 			    "on cpu %d\n", __func__, qlge->instance,
5696bafec742SSukumar Swaminathan 			    rx_ring->cq_id, rx_ring->cpu));
5697bafec742SSukumar Swaminathan 		} else {
5698bafec742SSukumar Swaminathan 			/*
5699bafec742SSukumar Swaminathan 			 * Outbound queue handles outbound completions only
5700bafec742SSukumar Swaminathan 			 */
5701bafec742SSukumar Swaminathan 			/* outbound cq is same size as tx_ring it services. */
5702bafec742SSukumar Swaminathan 			rx_ring->cq_len = qlge->tx_ring_size;
5703bafec742SSukumar Swaminathan 			rx_ring->cq_size = (uint32_t)
5704bafec742SSukumar Swaminathan 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
5705bafec742SSukumar Swaminathan 			rx_ring->lbq_len = 0;
5706bafec742SSukumar Swaminathan 			rx_ring->lbq_size = 0;
5707bafec742SSukumar Swaminathan 			rx_ring->lbq_buf_size = 0;
5708bafec742SSukumar Swaminathan 			rx_ring->sbq_len = 0;
5709bafec742SSukumar Swaminathan 			rx_ring->sbq_size = 0;
5710bafec742SSukumar Swaminathan 			rx_ring->sbq_buf_size = 0;
5711bafec742SSukumar Swaminathan 			rx_ring->type = TX_Q;
5712bafec742SSukumar Swaminathan 
5713bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD,
5714bafec742SSukumar Swaminathan 			    ("%s(%d)Allocating TX completion queue %d on"
5715bafec742SSukumar Swaminathan 			    " cpu %d\n", __func__, qlge->instance,
5716bafec742SSukumar Swaminathan 			    rx_ring->cq_id, rx_ring->cpu));
5717bafec742SSukumar Swaminathan 		}
5718bafec742SSukumar Swaminathan 	}
5719bafec742SSukumar Swaminathan 
5720bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5721bafec742SSukumar Swaminathan }
5722bafec742SSukumar Swaminathan 
5723bafec742SSukumar Swaminathan static int
5724bafec742SSukumar Swaminathan ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring)
5725bafec742SSukumar Swaminathan {
5726bafec742SSukumar Swaminathan 	struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr;
5727bafec742SSukumar Swaminathan 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
5728bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
5729bafec742SSukumar Swaminathan 	/* first shadow area is used by wqicb's host copy of consumer index */
5730bafec742SSukumar Swaminathan 	    + sizeof (uint64_t);
5731bafec742SSukumar Swaminathan 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
5732bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
5733bafec742SSukumar Swaminathan 	    + sizeof (uint64_t);
5734bafec742SSukumar Swaminathan 	/* lrg/sml bufq pointers */
5735bafec742SSukumar Swaminathan 	uint8_t *buf_q_base_reg =
5736bafec742SSukumar Swaminathan 	    (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr +
5737bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
5738bafec742SSukumar Swaminathan 	uint64_t buf_q_base_reg_dma =
5739bafec742SSukumar Swaminathan 	    qlge->buf_q_ptr_base_addr_dma_attr.dma_addr +
5740bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
5741bafec742SSukumar Swaminathan 	caddr_t doorbell_area =
5742bafec742SSukumar Swaminathan 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id));
5743bafec742SSukumar Swaminathan 	int err = 0;
5744bafec742SSukumar Swaminathan 	uint16_t bq_len;
5745bafec742SSukumar Swaminathan 	uint64_t tmp;
5746bafec742SSukumar Swaminathan 	uint64_t *base_indirect_ptr;
5747bafec742SSukumar Swaminathan 	int page_entries;
5748bafec742SSukumar Swaminathan 
5749bafec742SSukumar Swaminathan 	/* Set up the shadow registers for this ring. */
5750bafec742SSukumar Swaminathan 	rx_ring->prod_idx_sh_reg = shadow_reg;
5751bafec742SSukumar Swaminathan 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
5752bafec742SSukumar Swaminathan 
5753bafec742SSukumar Swaminathan 	rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
5754bafec742SSukumar Swaminathan 	rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma;
5755bafec742SSukumar Swaminathan 
5756bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx,"
5757bafec742SSukumar Swaminathan 	    " phys_addr 0x%lx\n", __func__, rx_ring->cq_id,
5758bafec742SSukumar Swaminathan 	    rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma));
5759bafec742SSukumar Swaminathan 
5760bafec742SSukumar Swaminathan 	buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
5761bafec742SSukumar Swaminathan 	buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
5762bafec742SSukumar Swaminathan 	rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
5763bafec742SSukumar Swaminathan 	rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma;
5764bafec742SSukumar Swaminathan 
5765bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x00 for consumer index register */
5766bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area;
5767bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx = 0;
5768bafec742SSukumar Swaminathan 	*rx_ring->prod_idx_sh_reg = 0;
5769bafec742SSukumar Swaminathan 	rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
5770bafec742SSukumar Swaminathan 
5771bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x04 for valid register */
5772bafec742SSukumar Swaminathan 	rx_ring->valid_db_reg = (uint32_t *)(void *)
5773bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x04);
5774bafec742SSukumar Swaminathan 
5775bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
5776bafec742SSukumar Swaminathan 	rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *)
5777bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x18);
5778bafec742SSukumar Swaminathan 
5779bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x1c */
5780bafec742SSukumar Swaminathan 	rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *)
5781bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x1c);
5782bafec742SSukumar Swaminathan 
5783bafec742SSukumar Swaminathan 	bzero((void *)cqicb, sizeof (*cqicb));
5784bafec742SSukumar Swaminathan 
5785bafec742SSukumar Swaminathan 	cqicb->msix_vect = (uint8_t)rx_ring->irq;
5786bafec742SSukumar Swaminathan 
5787bafec742SSukumar Swaminathan 	bq_len = (uint16_t)((rx_ring->cq_len == 65536) ?
5788bafec742SSukumar Swaminathan 	    (uint16_t)0 : (uint16_t)rx_ring->cq_len);
5789bafec742SSukumar Swaminathan 	cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
5790bafec742SSukumar Swaminathan 
5791bafec742SSukumar Swaminathan 	cqicb->cq_base_addr_lo =
5792bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr));
5793bafec742SSukumar Swaminathan 	cqicb->cq_base_addr_hi =
5794bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr));
5795bafec742SSukumar Swaminathan 
5796bafec742SSukumar Swaminathan 	cqicb->prod_idx_addr_lo =
5797bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma));
5798bafec742SSukumar Swaminathan 	cqicb->prod_idx_addr_hi =
5799bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma));
5800bafec742SSukumar Swaminathan 
5801bafec742SSukumar Swaminathan 	/*
5802bafec742SSukumar Swaminathan 	 * Set up the control block load flags.
5803bafec742SSukumar Swaminathan 	 */
5804bafec742SSukumar Swaminathan 	cqicb->flags = FLAGS_LC | /* Load queue base address */
5805bafec742SSukumar Swaminathan 	    FLAGS_LV | /* Load MSI-X vector */
5806bafec742SSukumar Swaminathan 	    FLAGS_LI;  /* Load irq delay values */
5807bafec742SSukumar Swaminathan 	if (rx_ring->lbq_len) {
5808bafec742SSukumar Swaminathan 		/* Load lbq values */
5809bafec742SSukumar Swaminathan 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL);
5810bafec742SSukumar Swaminathan 		tmp = (uint64_t)rx_ring->lbq_dma.dma_addr;
5811bafec742SSukumar Swaminathan 		base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect;
5812bafec742SSukumar Swaminathan 		page_entries = 0;
5813bafec742SSukumar Swaminathan 		do {
5814bafec742SSukumar Swaminathan 			*base_indirect_ptr = cpu_to_le64(tmp);
5815bafec742SSukumar Swaminathan 			tmp += VM_PAGE_SIZE;
5816bafec742SSukumar Swaminathan 			base_indirect_ptr++;
5817bafec742SSukumar Swaminathan 			page_entries++;
5818bafec742SSukumar Swaminathan 		} while (page_entries < (int)(
5819bafec742SSukumar Swaminathan 		    ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
5820bafec742SSukumar Swaminathan 
5821bafec742SSukumar Swaminathan 		cqicb->lbq_addr_lo =
5822bafec742SSukumar Swaminathan 		    cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma));
5823bafec742SSukumar Swaminathan 		cqicb->lbq_addr_hi =
5824bafec742SSukumar Swaminathan 		    cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma));
5825bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ?
5826bafec742SSukumar Swaminathan 		    (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size);
5827bafec742SSukumar Swaminathan 		cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len);
5828bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 :
5829bafec742SSukumar Swaminathan 		    (uint16_t)rx_ring->lbq_len);
5830bafec742SSukumar Swaminathan 		cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len);
5831bafec742SSukumar Swaminathan 		rx_ring->lbq_prod_idx = 0;
5832bafec742SSukumar Swaminathan 		rx_ring->lbq_curr_idx = 0;
5833bafec742SSukumar Swaminathan 	}
5834bafec742SSukumar Swaminathan 	if (rx_ring->sbq_len) {
5835bafec742SSukumar Swaminathan 		/* Load sbq values */
5836bafec742SSukumar Swaminathan 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS);
5837bafec742SSukumar Swaminathan 		tmp = (uint64_t)rx_ring->sbq_dma.dma_addr;
5838bafec742SSukumar Swaminathan 		base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect;
5839bafec742SSukumar Swaminathan 		page_entries = 0;
5840bafec742SSukumar Swaminathan 
5841bafec742SSukumar Swaminathan 		do {
5842bafec742SSukumar Swaminathan 			*base_indirect_ptr = cpu_to_le64(tmp);
5843bafec742SSukumar Swaminathan 			tmp += VM_PAGE_SIZE;
5844bafec742SSukumar Swaminathan 			base_indirect_ptr++;
5845bafec742SSukumar Swaminathan 			page_entries++;
5846bafec742SSukumar Swaminathan 		} while (page_entries < (uint32_t)
5847bafec742SSukumar Swaminathan 		    (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
5848bafec742SSukumar Swaminathan 
5849bafec742SSukumar Swaminathan 		cqicb->sbq_addr_lo =
5850bafec742SSukumar Swaminathan 		    cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma));
5851bafec742SSukumar Swaminathan 		cqicb->sbq_addr_hi =
5852bafec742SSukumar Swaminathan 		    cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma));
5853bafec742SSukumar Swaminathan 		cqicb->sbq_buf_size = (uint16_t)
5854bafec742SSukumar Swaminathan 		    cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2));
5855bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ?
5856bafec742SSukumar Swaminathan 		    (uint16_t)0 : (uint16_t)rx_ring->sbq_len);
5857bafec742SSukumar Swaminathan 		cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len);
5858bafec742SSukumar Swaminathan 		rx_ring->sbq_prod_idx = 0;
5859bafec742SSukumar Swaminathan 		rx_ring->sbq_curr_idx = 0;
5860bafec742SSukumar Swaminathan 	}
5861bafec742SSukumar Swaminathan 	switch (rx_ring->type) {
5862bafec742SSukumar Swaminathan 	case TX_Q:
5863bafec742SSukumar Swaminathan 		cqicb->irq_delay = (uint16_t)
5864bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->tx_coalesce_usecs);
5865bafec742SSukumar Swaminathan 		cqicb->pkt_delay = (uint16_t)
5866bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->tx_max_coalesced_frames);
5867bafec742SSukumar Swaminathan 		break;
5868bafec742SSukumar Swaminathan 
5869bafec742SSukumar Swaminathan 	case DEFAULT_Q:
5870bafec742SSukumar Swaminathan 		cqicb->irq_delay = 0;
5871bafec742SSukumar Swaminathan 		cqicb->pkt_delay = 0;
5872bafec742SSukumar Swaminathan 		break;
5873bafec742SSukumar Swaminathan 
5874bafec742SSukumar Swaminathan 	case RX_Q:
5875bafec742SSukumar Swaminathan 		/*
5876bafec742SSukumar Swaminathan 		 * Inbound completion handling rx_rings run in
5877bafec742SSukumar Swaminathan 		 * separate NAPI contexts.
5878bafec742SSukumar Swaminathan 		 */
5879bafec742SSukumar Swaminathan 		cqicb->irq_delay = (uint16_t)
5880bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_coalesce_usecs);
5881bafec742SSukumar Swaminathan 		cqicb->pkt_delay = (uint16_t)
5882bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
5883bafec742SSukumar Swaminathan 		break;
5884bafec742SSukumar Swaminathan 	default:
5885bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Invalid rx_ring->type = %d.",
5886bafec742SSukumar Swaminathan 		    rx_ring->type);
5887bafec742SSukumar Swaminathan 	}
5888bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n",
5889bafec742SSukumar Swaminathan 	    rx_ring->cq_id));
5890bafec742SSukumar Swaminathan 	/* QL_DUMP_CQICB(qlge, cqicb); */
5891bafec742SSukumar Swaminathan 	err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr,
5892bafec742SSukumar Swaminathan 	    rx_ring->cq_id);
5893bafec742SSukumar Swaminathan 	if (err) {
5894bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load CQICB.");
5895bafec742SSukumar Swaminathan 		return (err);
5896bafec742SSukumar Swaminathan 	}
5897bafec742SSukumar Swaminathan 
5898bafec742SSukumar Swaminathan 	rx_ring->rx_packets_dropped_no_buffer = 0;
5899bafec742SSukumar Swaminathan 	rx_ring->rx_pkt_dropped_mac_unenabled = 0;
5900bafec742SSukumar Swaminathan 	rx_ring->rx_failed_sbq_allocs = 0;
5901bafec742SSukumar Swaminathan 	rx_ring->rx_failed_lbq_allocs = 0;
5902bafec742SSukumar Swaminathan 	rx_ring->rx_packets = 0;
5903bafec742SSukumar Swaminathan 	rx_ring->rx_bytes = 0;
5904bafec742SSukumar Swaminathan 	rx_ring->frame_too_long = 0;
5905bafec742SSukumar Swaminathan 	rx_ring->frame_too_short = 0;
5906bafec742SSukumar Swaminathan 	rx_ring->fcs_err = 0;
5907bafec742SSukumar Swaminathan 
5908bafec742SSukumar Swaminathan 	return (err);
5909bafec742SSukumar Swaminathan }
5910bafec742SSukumar Swaminathan 
5911bafec742SSukumar Swaminathan /*
5912bafec742SSukumar Swaminathan  * start RSS
5913bafec742SSukumar Swaminathan  */
5914bafec742SSukumar Swaminathan static int
5915bafec742SSukumar Swaminathan ql_start_rss(qlge_t *qlge)
5916bafec742SSukumar Swaminathan {
5917bafec742SSukumar Swaminathan 	struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr;
5918bafec742SSukumar Swaminathan 	int status = 0;
5919bafec742SSukumar Swaminathan 	int i;
5920bafec742SSukumar Swaminathan 	uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id;
5921bafec742SSukumar Swaminathan 
5922bafec742SSukumar Swaminathan 	bzero((void *)ricb, sizeof (*ricb));
5923bafec742SSukumar Swaminathan 
5924bafec742SSukumar Swaminathan 	ricb->base_cq = RSS_L4K;
5925bafec742SSukumar Swaminathan 	ricb->flags =
5926bafec742SSukumar Swaminathan 	    (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
5927bafec742SSukumar Swaminathan 	    RSS_RT6);
5928bafec742SSukumar Swaminathan 	ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1);
5929bafec742SSukumar Swaminathan 
5930bafec742SSukumar Swaminathan 	/*
5931bafec742SSukumar Swaminathan 	 * Fill out the Indirection Table.
5932bafec742SSukumar Swaminathan 	 */
5933bafec742SSukumar Swaminathan 	for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
5934bafec742SSukumar Swaminathan 		hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1));
5935bafec742SSukumar Swaminathan 
5936bafec742SSukumar Swaminathan 	(void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40);
5937bafec742SSukumar Swaminathan 	(void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16);
5938bafec742SSukumar Swaminathan 
5939bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("Initializing RSS.\n"));
5940bafec742SSukumar Swaminathan 
5941bafec742SSukumar Swaminathan 	status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0);
5942bafec742SSukumar Swaminathan 	if (status) {
5943bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load RICB.");
5944bafec742SSukumar Swaminathan 		return (status);
5945bafec742SSukumar Swaminathan 	}
5946bafec742SSukumar Swaminathan 
5947bafec742SSukumar Swaminathan 	return (status);
5948bafec742SSukumar Swaminathan }
5949bafec742SSukumar Swaminathan 
5950bafec742SSukumar Swaminathan /*
5951bafec742SSukumar Swaminathan  * load a tx ring control block to hw and start this ring
5952bafec742SSukumar Swaminathan  */
5953bafec742SSukumar Swaminathan static int
5954bafec742SSukumar Swaminathan ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring)
5955bafec742SSukumar Swaminathan {
5956bafec742SSukumar Swaminathan 	struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr;
5957bafec742SSukumar Swaminathan 	caddr_t doorbell_area =
5958bafec742SSukumar Swaminathan 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id);
5959bafec742SSukumar Swaminathan 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
5960bafec742SSukumar Swaminathan 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
5961bafec742SSukumar Swaminathan 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
5962bafec742SSukumar Swaminathan 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
5963bafec742SSukumar Swaminathan 	int err = 0;
5964bafec742SSukumar Swaminathan 
5965bafec742SSukumar Swaminathan 	/*
5966bafec742SSukumar Swaminathan 	 * Assign doorbell registers for this tx_ring.
5967bafec742SSukumar Swaminathan 	 */
5968bafec742SSukumar Swaminathan 
5969bafec742SSukumar Swaminathan 	/* TX PCI doorbell mem area for tx producer index */
5970bafec742SSukumar Swaminathan 	tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area;
5971bafec742SSukumar Swaminathan 	tx_ring->prod_idx = 0;
5972bafec742SSukumar Swaminathan 	/* TX PCI doorbell mem area + 0x04 */
5973bafec742SSukumar Swaminathan 	tx_ring->valid_db_reg = (uint32_t *)(void *)
5974bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x04);
5975bafec742SSukumar Swaminathan 
5976bafec742SSukumar Swaminathan 	/*
5977bafec742SSukumar Swaminathan 	 * Assign shadow registers for this tx_ring.
5978bafec742SSukumar Swaminathan 	 */
5979bafec742SSukumar Swaminathan 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
5980bafec742SSukumar Swaminathan 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
5981bafec742SSukumar Swaminathan 	*tx_ring->cnsmr_idx_sh_reg = 0;
5982bafec742SSukumar Swaminathan 
5983bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx,"
5984bafec742SSukumar Swaminathan 	    " phys_addr 0x%lx\n",
5985bafec742SSukumar Swaminathan 	    __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg,
5986bafec742SSukumar Swaminathan 	    tx_ring->cnsmr_idx_sh_reg_dma));
5987bafec742SSukumar Swaminathan 
5988bafec742SSukumar Swaminathan 	wqicb->len =
5989bafec742SSukumar Swaminathan 	    (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
5990bafec742SSukumar Swaminathan 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
5991bafec742SSukumar Swaminathan 	    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
5992bafec742SSukumar Swaminathan 	wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id);
5993bafec742SSukumar Swaminathan 	wqicb->rid = 0;
5994bafec742SSukumar Swaminathan 	wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr));
5995bafec742SSukumar Swaminathan 	wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr));
5996bafec742SSukumar Swaminathan 	wqicb->cnsmr_idx_addr_lo =
5997bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
5998bafec742SSukumar Swaminathan 	wqicb->cnsmr_idx_addr_hi =
5999bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6000bafec742SSukumar Swaminathan 
6001bafec742SSukumar Swaminathan 	ql_init_tx_ring(tx_ring);
6002bafec742SSukumar Swaminathan 	/* QL_DUMP_WQICB(qlge, wqicb); */
6003bafec742SSukumar Swaminathan 	err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr,
6004bafec742SSukumar Swaminathan 	    tx_ring->wq_id);
6005bafec742SSukumar Swaminathan 
6006bafec742SSukumar Swaminathan 	if (err) {
6007bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load WQICB.");
6008bafec742SSukumar Swaminathan 		return (err);
6009bafec742SSukumar Swaminathan 	}
6010bafec742SSukumar Swaminathan 	return (err);
6011bafec742SSukumar Swaminathan }
6012bafec742SSukumar Swaminathan 
6013bafec742SSukumar Swaminathan /*
6014bafec742SSukumar Swaminathan  * Set up a MAC, multicast or VLAN address for the
6015bafec742SSukumar Swaminathan  * inbound frame matching.
6016bafec742SSukumar Swaminathan  */
6017bafec742SSukumar Swaminathan int
6018bafec742SSukumar Swaminathan ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type,
6019bafec742SSukumar Swaminathan     uint16_t index)
6020bafec742SSukumar Swaminathan {
6021bafec742SSukumar Swaminathan 	uint32_t offset = 0;
6022bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
6023bafec742SSukumar Swaminathan 
6024bafec742SSukumar Swaminathan 	switch (type) {
6025bafec742SSukumar Swaminathan 	case MAC_ADDR_TYPE_MULTI_MAC:
6026bafec742SSukumar Swaminathan 	case MAC_ADDR_TYPE_CAM_MAC: {
6027bafec742SSukumar Swaminathan 		uint32_t cam_output;
6028bafec742SSukumar Swaminathan 		uint32_t upper = (addr[0] << 8) | addr[1];
6029bafec742SSukumar Swaminathan 		uint32_t lower =
6030bafec742SSukumar Swaminathan 		    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
6031bafec742SSukumar Swaminathan 		    (addr[5]);
6032bafec742SSukumar Swaminathan 
6033bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Adding %s ", (type ==
6034bafec742SSukumar Swaminathan 		    MAC_ADDR_TYPE_MULTI_MAC) ?
6035bafec742SSukumar Swaminathan 		    "MULTICAST" : "UNICAST"));
6036bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT,
6037bafec742SSukumar Swaminathan 		    ("addr %02x %02x %02x %02x %02x %02x at index %d in "
6038bafec742SSukumar Swaminathan 		    "the CAM.\n",
6039bafec742SSukumar Swaminathan 		    addr[0], addr[1], addr[2], addr[3], addr[4],
6040bafec742SSukumar Swaminathan 		    addr[5], index));
6041bafec742SSukumar Swaminathan 
6042bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6043bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6044bafec742SSukumar Swaminathan 		if (status)
6045bafec742SSukumar Swaminathan 			goto exit;
6046bafec742SSukumar Swaminathan 		/* offset 0 - lower 32 bits of the MAC address */
6047bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6048bafec742SSukumar Swaminathan 		    (offset++) |
6049bafec742SSukumar Swaminathan 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6050bafec742SSukumar Swaminathan 		    type);	/* type */
6051bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower);
6052bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6053bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6054bafec742SSukumar Swaminathan 		if (status)
6055bafec742SSukumar Swaminathan 			goto exit;
6056bafec742SSukumar Swaminathan 		/* offset 1 - upper 16 bits of the MAC address */
6057bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6058bafec742SSukumar Swaminathan 		    (offset++) |
6059bafec742SSukumar Swaminathan 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6060bafec742SSukumar Swaminathan 		    type);	/* type */
6061bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper);
6062bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6063bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6064bafec742SSukumar Swaminathan 		if (status)
6065bafec742SSukumar Swaminathan 			goto exit;
6066bafec742SSukumar Swaminathan 		/* offset 2 - CQ ID associated with this MAC address */
6067bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6068bafec742SSukumar Swaminathan 		    (offset) | (index << MAC_ADDR_IDX_SHIFT) |	/* index */
6069bafec742SSukumar Swaminathan 		    type);	/* type */
6070bafec742SSukumar Swaminathan 		/*
6071bafec742SSukumar Swaminathan 		 * This field should also include the queue id
6072bafec742SSukumar Swaminathan 		 * and possibly the function id.  Right now we hardcode
6073bafec742SSukumar Swaminathan 		 * the route field to NIC core.
6074bafec742SSukumar Swaminathan 		 */
6075bafec742SSukumar Swaminathan 		if (type == MAC_ADDR_TYPE_CAM_MAC) {
6076bafec742SSukumar Swaminathan 			cam_output = (CAM_OUT_ROUTE_NIC |
6077bafec742SSukumar Swaminathan 			    (qlge->func_number << CAM_OUT_FUNC_SHIFT) |
6078bafec742SSukumar Swaminathan 			    (0 <<
6079bafec742SSukumar Swaminathan 			    CAM_OUT_CQ_ID_SHIFT));
6080bafec742SSukumar Swaminathan 
6081bafec742SSukumar Swaminathan 			/* route to NIC core */
6082bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA,
6083bafec742SSukumar Swaminathan 			    cam_output);
6084bafec742SSukumar Swaminathan 			}
6085bafec742SSukumar Swaminathan 		break;
6086bafec742SSukumar Swaminathan 		}
6087bafec742SSukumar Swaminathan 	default:
6088bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6089bafec742SSukumar Swaminathan 		    "Address type %d not yet supported.", type);
6090bafec742SSukumar Swaminathan 		status = DDI_FAILURE;
6091bafec742SSukumar Swaminathan 	}
6092bafec742SSukumar Swaminathan exit:
6093bafec742SSukumar Swaminathan 	return (status);
6094bafec742SSukumar Swaminathan }
6095bafec742SSukumar Swaminathan 
6096bafec742SSukumar Swaminathan /*
6097bafec742SSukumar Swaminathan  * The NIC function for this chip has 16 routing indexes.  Each one can be used
6098bafec742SSukumar Swaminathan  * to route different frame types to various inbound queues.  We send broadcast
6099bafec742SSukumar Swaminathan  * multicast/error frames to the default queue for slow handling,
6100bafec742SSukumar Swaminathan  * and CAM hit/RSS frames to the fast handling queues.
6101bafec742SSukumar Swaminathan  */
6102bafec742SSukumar Swaminathan static int
6103bafec742SSukumar Swaminathan ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable)
6104bafec742SSukumar Swaminathan {
6105bafec742SSukumar Swaminathan 	int status;
6106bafec742SSukumar Swaminathan 	uint32_t value = 0;
6107bafec742SSukumar Swaminathan 
6108bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT,
6109bafec742SSukumar Swaminathan 	    ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
6110bafec742SSukumar Swaminathan 	    (enable ? "Adding" : "Removing"),
6111bafec742SSukumar Swaminathan 	    ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
6112bafec742SSukumar Swaminathan 	    ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
6113bafec742SSukumar Swaminathan 	    ((index ==
6114bafec742SSukumar Swaminathan 	    RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
6115bafec742SSukumar Swaminathan 	    ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
6116bafec742SSukumar Swaminathan 	    ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
6117bafec742SSukumar Swaminathan 	    ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
6118bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
6119bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
6120bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
6121bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
6122bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
6123bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
6124bafec742SSukumar Swaminathan 	    ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
6125bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
6126bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
6127bafec742SSukumar Swaminathan 	    ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
6128bafec742SSukumar Swaminathan 	    (enable ? "to" : "from")));
6129bafec742SSukumar Swaminathan 
6130bafec742SSukumar Swaminathan 	switch (mask) {
6131bafec742SSukumar Swaminathan 	case RT_IDX_CAM_HIT:
6132bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q | /* dest */
6133bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ | /* type */
6134bafec742SSukumar Swaminathan 		    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */
6135bafec742SSukumar Swaminathan 		break;
6136bafec742SSukumar Swaminathan 
6137bafec742SSukumar Swaminathan 	case RT_IDX_VALID: /* Promiscuous Mode frames. */
6138bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6139bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6140bafec742SSukumar Swaminathan 		    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */
6141bafec742SSukumar Swaminathan 		break;
6142bafec742SSukumar Swaminathan 
6143bafec742SSukumar Swaminathan 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
6144bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6145bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6146bafec742SSukumar Swaminathan 		    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */
6147bafec742SSukumar Swaminathan 		break;
6148bafec742SSukumar Swaminathan 
6149bafec742SSukumar Swaminathan 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
6150bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6151bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6152bafec742SSukumar Swaminathan 		    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */
6153bafec742SSukumar Swaminathan 		break;
6154bafec742SSukumar Swaminathan 
6155bafec742SSukumar Swaminathan 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
6156bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q |	/* dest */
6157bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6158bafec742SSukumar Swaminathan 		    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */
6159bafec742SSukumar Swaminathan 		break;
6160bafec742SSukumar Swaminathan 
6161bafec742SSukumar Swaminathan 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
6162bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q |	/* dest */
6163bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6164bafec742SSukumar Swaminathan 		    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6165bafec742SSukumar Swaminathan 		break;
6166bafec742SSukumar Swaminathan 
6167bafec742SSukumar Swaminathan 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
6168bafec742SSukumar Swaminathan 		value = RT_IDX_DST_RSS |	/* dest */
6169bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6170bafec742SSukumar Swaminathan 		    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6171bafec742SSukumar Swaminathan 		break;
6172bafec742SSukumar Swaminathan 
6173bafec742SSukumar Swaminathan 	case 0:	/* Clear the E-bit on an entry. */
6174bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6175bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6176bafec742SSukumar Swaminathan 		    (index << RT_IDX_IDX_SHIFT); /* index */
6177bafec742SSukumar Swaminathan 		break;
6178bafec742SSukumar Swaminathan 
6179bafec742SSukumar Swaminathan 	default:
6180bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Mask type %d not yet supported.",
6181bafec742SSukumar Swaminathan 		    mask);
6182bafec742SSukumar Swaminathan 		status = -EPERM;
6183bafec742SSukumar Swaminathan 		goto exit;
6184bafec742SSukumar Swaminathan 	}
6185bafec742SSukumar Swaminathan 
6186bafec742SSukumar Swaminathan 	if (value != 0) {
6187bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0);
6188bafec742SSukumar Swaminathan 		if (status)
6189bafec742SSukumar Swaminathan 			goto exit;
6190bafec742SSukumar Swaminathan 		value |= (enable ? RT_IDX_E : 0);
6191bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_ROUTING_INDEX, value);
6192bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0);
6193bafec742SSukumar Swaminathan 	}
6194bafec742SSukumar Swaminathan 
6195bafec742SSukumar Swaminathan exit:
6196bafec742SSukumar Swaminathan 	return (status);
6197bafec742SSukumar Swaminathan }
6198bafec742SSukumar Swaminathan 
6199bafec742SSukumar Swaminathan /*
6200bafec742SSukumar Swaminathan  * Clear all the entries in the routing table.
6201bafec742SSukumar Swaminathan  * Caller must get semaphore in advance.
6202bafec742SSukumar Swaminathan  */
6203bafec742SSukumar Swaminathan 
6204bafec742SSukumar Swaminathan static int
6205bafec742SSukumar Swaminathan ql_stop_routing(qlge_t *qlge)
6206bafec742SSukumar Swaminathan {
6207bafec742SSukumar Swaminathan 	int status = 0;
6208bafec742SSukumar Swaminathan 	int i;
6209bafec742SSukumar Swaminathan 	/* Clear all the entries in the routing table. */
6210bafec742SSukumar Swaminathan 	for (i = 0; i < 16; i++) {
6211bafec742SSukumar Swaminathan 		status = ql_set_routing_reg(qlge, i, 0, 0);
6212bafec742SSukumar Swaminathan 		if (status) {
6213bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Stop routing failed. ");
6214bafec742SSukumar Swaminathan 		}
6215bafec742SSukumar Swaminathan 	}
6216bafec742SSukumar Swaminathan 	return (status);
6217bafec742SSukumar Swaminathan }
6218bafec742SSukumar Swaminathan 
6219bafec742SSukumar Swaminathan /* Initialize the frame-to-queue routing. */
6220bafec742SSukumar Swaminathan static int
6221bafec742SSukumar Swaminathan ql_route_initialize(qlge_t *qlge)
6222bafec742SSukumar Swaminathan {
6223bafec742SSukumar Swaminathan 	int status = 0;
6224bafec742SSukumar Swaminathan 
6225bafec742SSukumar Swaminathan 	status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
6226bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS)
6227bafec742SSukumar Swaminathan 		return (status);
6228bafec742SSukumar Swaminathan 
6229bafec742SSukumar Swaminathan 	/* Clear all the entries in the routing table. */
6230bafec742SSukumar Swaminathan 	status = ql_stop_routing(qlge);
6231bafec742SSukumar Swaminathan 	if (status) {
6232bafec742SSukumar Swaminathan 		goto exit;
6233bafec742SSukumar Swaminathan 	}
6234bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
6235bafec742SSukumar Swaminathan 	if (status) {
6236bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6237bafec742SSukumar Swaminathan 		    "Failed to init routing register for broadcast packets.");
6238bafec742SSukumar Swaminathan 		goto exit;
6239bafec742SSukumar Swaminathan 	}
6240bafec742SSukumar Swaminathan 	/*
6241bafec742SSukumar Swaminathan 	 * If we have more than one inbound queue, then turn on RSS in the
6242bafec742SSukumar Swaminathan 	 * routing block.
6243bafec742SSukumar Swaminathan 	 */
6244bafec742SSukumar Swaminathan 	if (qlge->rss_ring_count > 1) {
6245bafec742SSukumar Swaminathan 		status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT,
6246bafec742SSukumar Swaminathan 		    RT_IDX_RSS_MATCH, 1);
6247bafec742SSukumar Swaminathan 		if (status) {
6248bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
6249bafec742SSukumar Swaminathan 			    "Failed to init routing register for MATCH RSS "
6250bafec742SSukumar Swaminathan 			    "packets.");
6251bafec742SSukumar Swaminathan 			goto exit;
6252bafec742SSukumar Swaminathan 		}
6253bafec742SSukumar Swaminathan 	}
6254bafec742SSukumar Swaminathan 
6255bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT,
6256bafec742SSukumar Swaminathan 	    RT_IDX_CAM_HIT, 1);
6257bafec742SSukumar Swaminathan 	if (status) {
6258bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6259bafec742SSukumar Swaminathan 		    "Failed to init routing register for CAM packets.");
6260bafec742SSukumar Swaminathan 		goto exit;
6261bafec742SSukumar Swaminathan 	}
6262bafec742SSukumar Swaminathan 
6263bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT,
6264bafec742SSukumar Swaminathan 	    RT_IDX_MCAST_MATCH, 1);
6265bafec742SSukumar Swaminathan 	if (status) {
6266bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6267bafec742SSukumar Swaminathan 		    "Failed to init routing register for Multicast "
6268bafec742SSukumar Swaminathan 		    "packets.");
6269bafec742SSukumar Swaminathan 	}
6270bafec742SSukumar Swaminathan 
6271bafec742SSukumar Swaminathan exit:
6272bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
6273bafec742SSukumar Swaminathan 	return (status);
6274bafec742SSukumar Swaminathan }
6275bafec742SSukumar Swaminathan 
6276bafec742SSukumar Swaminathan /*
6277bafec742SSukumar Swaminathan  * Initialize hardware
6278bafec742SSukumar Swaminathan  */
6279bafec742SSukumar Swaminathan static int
6280bafec742SSukumar Swaminathan ql_device_initialize(qlge_t *qlge)
6281bafec742SSukumar Swaminathan {
6282bafec742SSukumar Swaminathan 	uint32_t value, mask, required_max_frame_size;
6283bafec742SSukumar Swaminathan 	int i;
6284bafec742SSukumar Swaminathan 	int status = 0;
6285bafec742SSukumar Swaminathan 	uint16_t pause = PAUSE_MODE_DISABLED;
6286bafec742SSukumar Swaminathan 	boolean_t update_port_config = B_FALSE;
6287bafec742SSukumar Swaminathan 	/*
6288bafec742SSukumar Swaminathan 	 * Set up the System register to halt on errors.
6289bafec742SSukumar Swaminathan 	 */
6290bafec742SSukumar Swaminathan 	value = SYS_EFE | SYS_FAE;
6291bafec742SSukumar Swaminathan 	mask = value << 16;
6292bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SYSTEM, mask | value);
6293bafec742SSukumar Swaminathan 
6294bafec742SSukumar Swaminathan 	/* Set the default queue. */
6295bafec742SSukumar Swaminathan 	value = NIC_RCV_CFG_DFQ;
6296bafec742SSukumar Swaminathan 	mask = NIC_RCV_CFG_DFQ_MASK;
6297bafec742SSukumar Swaminathan 
6298bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value);
6299bafec742SSukumar Swaminathan 
6300bafec742SSukumar Swaminathan 	/* Enable the MPI interrupt. */
6301bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16)
6302bafec742SSukumar Swaminathan 	    | INTR_MASK_PI);
6303bafec742SSukumar Swaminathan 	/* Enable the function, set pagesize, enable error checking. */
6304bafec742SSukumar Swaminathan 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
6305bafec742SSukumar Swaminathan 	    FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
6306bafec742SSukumar Swaminathan 	/* Set/clear header splitting. */
6307bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) {
6308bafec742SSukumar Swaminathan 		value |= FSC_SH;
6309bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE);
6310bafec742SSukumar Swaminathan 	}
6311bafec742SSukumar Swaminathan 	mask = FSC_VM_PAGESIZE_MASK |
6312bafec742SSukumar Swaminathan 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
6313bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value);
6314bafec742SSukumar Swaminathan 	/*
6315bafec742SSukumar Swaminathan 	 * check current port max frame size, if different from OS setting,
6316bafec742SSukumar Swaminathan 	 * then we need to change
6317bafec742SSukumar Swaminathan 	 */
6318bafec742SSukumar Swaminathan 	required_max_frame_size =
6319bafec742SSukumar Swaminathan 	    (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
6320bafec742SSukumar Swaminathan 
6321bafec742SSukumar Swaminathan 	if (ql_get_port_cfg(qlge) == DDI_SUCCESS) {
6322bafec742SSukumar Swaminathan 		/* if correct frame size but different from required size */
6323bafec742SSukumar Swaminathan 		if (qlge->port_cfg_info.max_frame_size !=
6324bafec742SSukumar Swaminathan 		    required_max_frame_size) {
6325bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX,
6326bafec742SSukumar Swaminathan 			    ("update frame size, current %d, new %d\n",
6327bafec742SSukumar Swaminathan 			    qlge->port_cfg_info.max_frame_size,
6328bafec742SSukumar Swaminathan 			    required_max_frame_size));
6329bafec742SSukumar Swaminathan 			qlge->port_cfg_info.max_frame_size =
6330bafec742SSukumar Swaminathan 			    required_max_frame_size;
6331bafec742SSukumar Swaminathan 			update_port_config = B_TRUE;
6332bafec742SSukumar Swaminathan 		}
6333bafec742SSukumar Swaminathan 		if (qlge->port_cfg_info.link_cfg & STD_PAUSE)
6334bafec742SSukumar Swaminathan 			pause = PAUSE_MODE_STANDARD;
6335bafec742SSukumar Swaminathan 		else if (qlge->port_cfg_info.link_cfg & PP_PAUSE)
6336bafec742SSukumar Swaminathan 			pause = PAUSE_MODE_PER_PRIORITY;
6337bafec742SSukumar Swaminathan 		if (pause != qlge->pause) {
6338bafec742SSukumar Swaminathan 			update_port_config = B_TRUE;
6339bafec742SSukumar Swaminathan 		}
6340bafec742SSukumar Swaminathan 		/*
6341bafec742SSukumar Swaminathan 		 * Always update port config for now to work around
6342bafec742SSukumar Swaminathan 		 * a hardware bug
6343bafec742SSukumar Swaminathan 		 */
6344bafec742SSukumar Swaminathan 		update_port_config = B_TRUE;
6345bafec742SSukumar Swaminathan 
6346bafec742SSukumar Swaminathan 		/* if need to update port configuration */
6347bafec742SSukumar Swaminathan 		if (update_port_config)
6348*0662fbf4SSukumar Swaminathan 			(void) ql_set_port_cfg(qlge);
6349bafec742SSukumar Swaminathan 	} else
6350bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "ql_get_port_cfg failed");
6351bafec742SSukumar Swaminathan 
6352bafec742SSukumar Swaminathan 	/* Start up the rx queues. */
6353bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6354bafec742SSukumar Swaminathan 		status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]);
6355bafec742SSukumar Swaminathan 		if (status) {
6356bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
6357bafec742SSukumar Swaminathan 			    "Failed to start rx ring[%d]", i);
6358bafec742SSukumar Swaminathan 			return (status);
6359bafec742SSukumar Swaminathan 		}
6360bafec742SSukumar Swaminathan 	}
6361bafec742SSukumar Swaminathan 
6362bafec742SSukumar Swaminathan 	/*
6363bafec742SSukumar Swaminathan 	 * If there is more than one inbound completion queue
6364bafec742SSukumar Swaminathan 	 * then download a RICB to configure RSS.
6365bafec742SSukumar Swaminathan 	 */
6366bafec742SSukumar Swaminathan 	if (qlge->rss_ring_count > 1) {
6367bafec742SSukumar Swaminathan 		status = ql_start_rss(qlge);
6368bafec742SSukumar Swaminathan 		if (status) {
6369bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Failed to start RSS.");
6370bafec742SSukumar Swaminathan 			return (status);
6371bafec742SSukumar Swaminathan 		}
6372bafec742SSukumar Swaminathan 	}
6373bafec742SSukumar Swaminathan 
6374bafec742SSukumar Swaminathan 	/* Start up the tx queues. */
6375bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
6376bafec742SSukumar Swaminathan 		status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]);
6377bafec742SSukumar Swaminathan 		if (status) {
6378bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
6379bafec742SSukumar Swaminathan 			    "Failed to start tx ring[%d]", i);
6380bafec742SSukumar Swaminathan 			return (status);
6381bafec742SSukumar Swaminathan 		}
6382bafec742SSukumar Swaminathan 	}
6383bafec742SSukumar Swaminathan 	qlge->selected_tx_ring = 0;
6384bafec742SSukumar Swaminathan 	/* Set the frame routing filter. */
6385bafec742SSukumar Swaminathan 	status = ql_route_initialize(qlge);
6386bafec742SSukumar Swaminathan 	if (status) {
6387bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6388bafec742SSukumar Swaminathan 		    "Failed to init CAM/Routing tables.");
6389bafec742SSukumar Swaminathan 		return (status);
6390bafec742SSukumar Swaminathan 	}
6391bafec742SSukumar Swaminathan 
6392bafec742SSukumar Swaminathan 	return (status);
6393bafec742SSukumar Swaminathan }
6394bafec742SSukumar Swaminathan 
6395bafec742SSukumar Swaminathan /*
6396bafec742SSukumar Swaminathan  * Issue soft reset to chip.
6397bafec742SSukumar Swaminathan  */
6398bafec742SSukumar Swaminathan static int
6399bafec742SSukumar Swaminathan ql_asic_reset(qlge_t *qlge)
6400bafec742SSukumar Swaminathan {
6401bafec742SSukumar Swaminathan 	uint32_t value;
6402bafec742SSukumar Swaminathan 	int max_wait_time = 3;
6403bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
6404bafec742SSukumar Swaminathan 
6405bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK
6406bafec742SSukumar Swaminathan 	    |FUNCTION_RESET);
6407bafec742SSukumar Swaminathan 
6408bafec742SSukumar Swaminathan 	max_wait_time = 3;
6409bafec742SSukumar Swaminathan 	do {
6410bafec742SSukumar Swaminathan 		value =  ql_read_reg(qlge, REG_RESET_FAILOVER);
6411bafec742SSukumar Swaminathan 		if ((value & FUNCTION_RESET) == 0)
6412bafec742SSukumar Swaminathan 			break;
6413bafec742SSukumar Swaminathan 		qlge_delay(QL_ONE_SEC_DELAY);
6414bafec742SSukumar Swaminathan 	} while ((--max_wait_time));
6415bafec742SSukumar Swaminathan 
6416bafec742SSukumar Swaminathan 	if (max_wait_time == 0) {
6417bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6418bafec742SSukumar Swaminathan 		    "TIMEOUT!!! errored out of resetting the chip!");
6419bafec742SSukumar Swaminathan 		status = DDI_FAILURE;
6420bafec742SSukumar Swaminathan 	}
6421bafec742SSukumar Swaminathan 
6422bafec742SSukumar Swaminathan 	return (status);
6423bafec742SSukumar Swaminathan }
6424bafec742SSukumar Swaminathan 
6425bafec742SSukumar Swaminathan /*
6426bafec742SSukumar Swaminathan  * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
6427bafec742SSukumar Swaminathan  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
6428bafec742SSukumar Swaminathan  * to be used by hardware.
6429bafec742SSukumar Swaminathan  */
6430bafec742SSukumar Swaminathan static void
6431bafec742SSukumar Swaminathan ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring)
6432bafec742SSukumar Swaminathan {
6433bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
6434bafec742SSukumar Swaminathan 	int i;
6435bafec742SSukumar Swaminathan 	uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr;
6436bafec742SSukumar Swaminathan 	uint32_t arm_count;
6437bafec742SSukumar Swaminathan 
6438bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT)
6439bafec742SSukumar Swaminathan 		arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT);
6440bafec742SSukumar Swaminathan 	else {
6441bafec742SSukumar Swaminathan 		/* Adjust to a multiple of 16 */
6442bafec742SSukumar Swaminathan 		arm_count = (rx_ring->sbuf_free_count / 16) * 16;
6443bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
6444bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count);
6445bafec742SSukumar Swaminathan #endif
6446bafec742SSukumar Swaminathan 	}
6447bafec742SSukumar Swaminathan 	for (i = 0; i < arm_count; i++) {
6448bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
6449bafec742SSukumar Swaminathan 		if (sbq_desc == NULL)
6450bafec742SSukumar Swaminathan 			break;
6451bafec742SSukumar Swaminathan 		/* Arm asic */
6452bafec742SSukumar Swaminathan 		*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
6453bafec742SSukumar Swaminathan 		sbq_entry++;
6454bafec742SSukumar Swaminathan 
6455bafec742SSukumar Swaminathan 		/* link the descriptors to in_use_list */
6456bafec742SSukumar Swaminathan 		ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
6457bafec742SSukumar Swaminathan 		rx_ring->sbq_prod_idx++;
6458bafec742SSukumar Swaminathan 	}
6459bafec742SSukumar Swaminathan 	ql_update_sbq_prod_idx(qlge, rx_ring);
6460bafec742SSukumar Swaminathan }
6461bafec742SSukumar Swaminathan 
6462bafec742SSukumar Swaminathan /*
6463bafec742SSukumar Swaminathan  * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
6464bafec742SSukumar Swaminathan  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
6465bafec742SSukumar Swaminathan  * to be used by hardware.
6466bafec742SSukumar Swaminathan  */
6467bafec742SSukumar Swaminathan static void
6468bafec742SSukumar Swaminathan ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring)
6469bafec742SSukumar Swaminathan {
6470bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
6471bafec742SSukumar Swaminathan 	int i;
6472bafec742SSukumar Swaminathan 	uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr;
6473bafec742SSukumar Swaminathan 	uint32_t arm_count;
6474bafec742SSukumar Swaminathan 
6475bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT)
6476bafec742SSukumar Swaminathan 		arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT);
6477bafec742SSukumar Swaminathan 	else {
6478bafec742SSukumar Swaminathan 		/* Adjust to a multiple of 16 */
6479bafec742SSukumar Swaminathan 		arm_count = (rx_ring->lbuf_free_count / 16) * 16;
6480bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
6481bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count);
6482bafec742SSukumar Swaminathan #endif
6483bafec742SSukumar Swaminathan 	}
6484bafec742SSukumar Swaminathan 	for (i = 0; i < arm_count; i++) {
6485bafec742SSukumar Swaminathan 		lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
6486bafec742SSukumar Swaminathan 		if (lbq_desc == NULL)
6487bafec742SSukumar Swaminathan 			break;
6488bafec742SSukumar Swaminathan 		/* Arm asic */
6489bafec742SSukumar Swaminathan 		*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
6490bafec742SSukumar Swaminathan 		lbq_entry++;
6491bafec742SSukumar Swaminathan 
6492bafec742SSukumar Swaminathan 		/* link the descriptors to in_use_list */
6493bafec742SSukumar Swaminathan 		ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
6494bafec742SSukumar Swaminathan 		rx_ring->lbq_prod_idx++;
6495bafec742SSukumar Swaminathan 	}
6496bafec742SSukumar Swaminathan 	ql_update_lbq_prod_idx(qlge, rx_ring);
6497bafec742SSukumar Swaminathan }
6498bafec742SSukumar Swaminathan 
6499bafec742SSukumar Swaminathan 
6500bafec742SSukumar Swaminathan /*
6501bafec742SSukumar Swaminathan  * Initializes the adapter by configuring request and response queues,
6502bafec742SSukumar Swaminathan  * allocates and ARMs small and large receive buffers to the
6503bafec742SSukumar Swaminathan  * hardware
6504bafec742SSukumar Swaminathan  */
6505bafec742SSukumar Swaminathan static int
6506bafec742SSukumar Swaminathan ql_bringup_adapter(qlge_t *qlge)
6507bafec742SSukumar Swaminathan {
6508bafec742SSukumar Swaminathan 	int i;
6509bafec742SSukumar Swaminathan 
6510bafec742SSukumar Swaminathan 	if (ql_device_initialize(qlge) != DDI_SUCCESS) {
6511bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed",
6512bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
6513bafec742SSukumar Swaminathan 		goto err_bringup;
6514bafec742SSukumar Swaminathan 	}
6515bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_ADAPTER_UP;
6516bafec742SSukumar Swaminathan 
6517bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
6518bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6519bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type != TX_Q) {
6520bafec742SSukumar Swaminathan 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
6521bafec742SSukumar Swaminathan 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
6522bafec742SSukumar Swaminathan 		}
6523bafec742SSukumar Swaminathan 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
6524bafec742SSukumar Swaminathan 	}
6525bafec742SSukumar Swaminathan #endif
6526bafec742SSukumar Swaminathan 	/* Arm buffers */
6527bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6528bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type != TX_Q) {
6529bafec742SSukumar Swaminathan 			ql_arm_sbuf(qlge, &qlge->rx_ring[i]);
6530bafec742SSukumar Swaminathan 			ql_arm_lbuf(qlge, &qlge->rx_ring[i]);
6531bafec742SSukumar Swaminathan 		}
6532bafec742SSukumar Swaminathan 	}
6533bafec742SSukumar Swaminathan 
6534bafec742SSukumar Swaminathan 	/* Enable work/request queues */
6535bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
6536bafec742SSukumar Swaminathan 		if (qlge->tx_ring[i].valid_db_reg)
6537bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
6538bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].valid_db_reg,
6539bafec742SSukumar Swaminathan 			    REQ_Q_VALID);
6540bafec742SSukumar Swaminathan 	}
6541bafec742SSukumar Swaminathan 
6542bafec742SSukumar Swaminathan 	/* Enable completion queues */
6543bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6544bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].valid_db_reg)
6545bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
6546bafec742SSukumar Swaminathan 			    qlge->rx_ring[i].valid_db_reg,
6547bafec742SSukumar Swaminathan 			    RSP_Q_VALID);
6548bafec742SSukumar Swaminathan 	}
6549bafec742SSukumar Swaminathan 
6550bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
6551bafec742SSukumar Swaminathan 		mutex_enter(&qlge->tx_ring[i].tx_lock);
6552bafec742SSukumar Swaminathan 		qlge->tx_ring[i].mac_flags = QL_MAC_STARTED;
6553bafec742SSukumar Swaminathan 		mutex_exit(&qlge->tx_ring[i].tx_lock);
6554bafec742SSukumar Swaminathan 	}
6555bafec742SSukumar Swaminathan 
6556bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6557bafec742SSukumar Swaminathan 		mutex_enter(&qlge->rx_ring[i].rx_lock);
6558bafec742SSukumar Swaminathan 		qlge->rx_ring[i].mac_flags = QL_MAC_STARTED;
6559bafec742SSukumar Swaminathan 		mutex_exit(&qlge->rx_ring[i].rx_lock);
6560bafec742SSukumar Swaminathan 	}
6561bafec742SSukumar Swaminathan 
6562bafec742SSukumar Swaminathan 	/* This mutex will get re-acquired in enable_completion interrupt */
6563bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
6564bafec742SSukumar Swaminathan 	/* Traffic can start flowing now */
6565bafec742SSukumar Swaminathan 	ql_enable_all_completion_interrupts(qlge);
6566bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
6567bafec742SSukumar Swaminathan 
6568bafec742SSukumar Swaminathan 	ql_enable_global_interrupt(qlge);
6569bafec742SSukumar Swaminathan 
6570bafec742SSukumar Swaminathan 	qlge->sequence |= ADAPTER_INIT;
6571bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
6572bafec742SSukumar Swaminathan 
6573bafec742SSukumar Swaminathan err_bringup:
6574*0662fbf4SSukumar Swaminathan 	(void) ql_asic_reset(qlge);
6575bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
6576bafec742SSukumar Swaminathan }
6577bafec742SSukumar Swaminathan 
6578bafec742SSukumar Swaminathan /*
6579bafec742SSukumar Swaminathan  * Initialize mutexes of each rx/tx rings
6580bafec742SSukumar Swaminathan  */
6581bafec742SSukumar Swaminathan static int
6582bafec742SSukumar Swaminathan ql_init_rx_tx_locks(qlge_t *qlge)
6583bafec742SSukumar Swaminathan {
6584bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
6585bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
6586bafec742SSukumar Swaminathan 	int i;
6587bafec742SSukumar Swaminathan 
6588bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
6589bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
6590bafec742SSukumar Swaminathan 		mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER,
6591bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6592bafec742SSukumar Swaminathan 	}
6593bafec742SSukumar Swaminathan 
6594bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6595bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
6596bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER,
6597bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6598bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER,
6599bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6600bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER,
6601bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6602bafec742SSukumar Swaminathan 	}
6603bafec742SSukumar Swaminathan 
6604bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
6605bafec742SSukumar Swaminathan }
6606bafec742SSukumar Swaminathan 
6607bafec742SSukumar Swaminathan /*
6608bafec742SSukumar Swaminathan  * ql_attach - Driver attach.
6609bafec742SSukumar Swaminathan  */
6610bafec742SSukumar Swaminathan static int
6611bafec742SSukumar Swaminathan ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
6612bafec742SSukumar Swaminathan {
6613bafec742SSukumar Swaminathan 	int instance;
6614bafec742SSukumar Swaminathan 	qlge_t *qlge;
6615bafec742SSukumar Swaminathan 	int rval;
6616bafec742SSukumar Swaminathan 	uint16_t w;
6617bafec742SSukumar Swaminathan 	mac_register_t *macp = NULL;
6618bafec742SSukumar Swaminathan 	rval = DDI_FAILURE;
6619bafec742SSukumar Swaminathan 
6620bafec742SSukumar Swaminathan 	/* first get the instance */
6621bafec742SSukumar Swaminathan 	instance = ddi_get_instance(dip);
6622bafec742SSukumar Swaminathan 
6623bafec742SSukumar Swaminathan 	switch (cmd) {
6624bafec742SSukumar Swaminathan 	case DDI_ATTACH:
6625bafec742SSukumar Swaminathan 		/*
6626bafec742SSukumar Swaminathan 		 * Check that hardware is installed in a DMA-capable slot
6627bafec742SSukumar Swaminathan 		 */
6628bafec742SSukumar Swaminathan 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
6629bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "?%s(%d): Not installed in a "
6630bafec742SSukumar Swaminathan 			    "DMA-capable slot", ADAPTER_NAME, instance);
6631bafec742SSukumar Swaminathan 			break;
6632bafec742SSukumar Swaminathan 		}
6633bafec742SSukumar Swaminathan 
6634bafec742SSukumar Swaminathan 		/*
6635bafec742SSukumar Swaminathan 		 * No support for high-level interrupts
6636bafec742SSukumar Swaminathan 		 */
6637bafec742SSukumar Swaminathan 		if (ddi_intr_hilevel(dip, 0) != 0) {
6638bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "?%s(%d): No support for high-level"
6639bafec742SSukumar Swaminathan 			    " intrs", ADAPTER_NAME, instance);
6640bafec742SSukumar Swaminathan 			break;
6641bafec742SSukumar Swaminathan 		}
6642bafec742SSukumar Swaminathan 
6643bafec742SSukumar Swaminathan 		/*
6644bafec742SSukumar Swaminathan 		 * Allocate our per-device-instance structure
6645bafec742SSukumar Swaminathan 		 */
6646bafec742SSukumar Swaminathan 
6647bafec742SSukumar Swaminathan 		qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP);
6648bafec742SSukumar Swaminathan 		ASSERT(qlge != NULL);
6649bafec742SSukumar Swaminathan 
6650bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_SOFTSTATE_ALLOC;
6651bafec742SSukumar Swaminathan 
6652bafec742SSukumar Swaminathan 		qlge->dip = dip;
6653bafec742SSukumar Swaminathan 		qlge->instance = instance;
6654bafec742SSukumar Swaminathan 
6655bafec742SSukumar Swaminathan 		/*
6656bafec742SSukumar Swaminathan 		 * Setup the ISP8x00 registers address mapping to be
6657bafec742SSukumar Swaminathan 		 * accessed by this particular driver.
6658bafec742SSukumar Swaminathan 		 * 0x0   Configuration Space
6659bafec742SSukumar Swaminathan 		 * 0x1   I/O Space
6660bafec742SSukumar Swaminathan 		 * 0x2   1st Memory Space address - Control Register Set
6661bafec742SSukumar Swaminathan 		 * 0x3   2nd Memory Space address - Doorbell Memory Space
6662bafec742SSukumar Swaminathan 		 */
6663bafec742SSukumar Swaminathan 
6664bafec742SSukumar Swaminathan 		w = 2;
6665bafec742SSukumar Swaminathan 		if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0,
6666bafec742SSukumar Swaminathan 		    sizeof (dev_reg_t), &ql_dev_acc_attr,
6667bafec742SSukumar Swaminathan 		    &qlge->dev_handle) != DDI_SUCCESS) {
6668bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to map device "
6669bafec742SSukumar Swaminathan 			    "registers", ADAPTER_NAME, instance);
6670bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6671bafec742SSukumar Swaminathan 			break;
6672bafec742SSukumar Swaminathan 		}
6673bafec742SSukumar Swaminathan 
6674bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n",
6675bafec742SSukumar Swaminathan 		    qlge->iobase));
6676bafec742SSukumar Swaminathan 
6677bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_REGS_SETUP;
6678bafec742SSukumar Swaminathan 
6679bafec742SSukumar Swaminathan 		/* map Doorbell memory space */
6680bafec742SSukumar Swaminathan 		w = 3;
6681bafec742SSukumar Swaminathan 		if (ddi_regs_map_setup(dip, w,
6682bafec742SSukumar Swaminathan 		    (caddr_t *)&qlge->doorbell_reg_iobase, 0,
6683bafec742SSukumar Swaminathan 		    0x100000 /* sizeof (dev_doorbell_reg_t) */,
6684bafec742SSukumar Swaminathan 		    &ql_dev_acc_attr,
6685bafec742SSukumar Swaminathan 		    &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) {
6686bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell "
6687bafec742SSukumar Swaminathan 			    "registers",
6688bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
6689bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6690bafec742SSukumar Swaminathan 			break;
6691bafec742SSukumar Swaminathan 		}
6692bafec742SSukumar Swaminathan 
6693bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n",
6694bafec742SSukumar Swaminathan 		    qlge->doorbell_reg_iobase));
6695bafec742SSukumar Swaminathan 
6696bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_DOORBELL_REGS_SETUP;
6697bafec742SSukumar Swaminathan 
6698bafec742SSukumar Swaminathan 		/*
6699bafec742SSukumar Swaminathan 		 * Allocate a macinfo structure for this instance
6700bafec742SSukumar Swaminathan 		 */
6701bafec742SSukumar Swaminathan 		if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
6702bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): mac_alloc failed",
6703bafec742SSukumar Swaminathan 			    __func__, instance);
6704bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6705bafec742SSukumar Swaminathan 			return (NULL);
6706bafec742SSukumar Swaminathan 		}
6707bafec742SSukumar Swaminathan 		/* save adapter status to dip private data */
6708bafec742SSukumar Swaminathan 		ddi_set_driver_private(dip, qlge);
6709bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n",
6710bafec742SSukumar Swaminathan 		    ADAPTER_NAME, instance));
6711bafec742SSukumar Swaminathan 
6712bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MAC_ALLOC;
6713bafec742SSukumar Swaminathan 
6714bafec742SSukumar Swaminathan 		/*
6715bafec742SSukumar Swaminathan 		 * Attach this instance of the device
6716bafec742SSukumar Swaminathan 		 */
6717bafec742SSukumar Swaminathan 		/* Setup PCI Local Bus Configuration resource. */
6718bafec742SSukumar Swaminathan 		if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) {
6719bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources",
6720bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
6721bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6722bafec742SSukumar Swaminathan 			break;
6723bafec742SSukumar Swaminathan 		}
6724bafec742SSukumar Swaminathan 
6725bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_PCI_CONFIG_SETUP;
6726bafec742SSukumar Swaminathan 
6727bafec742SSukumar Swaminathan 		if (ql_init_instance(qlge) != DDI_SUCCESS) {
6728bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to initialize device "
6729bafec742SSukumar Swaminathan 			    "instance", ADAPTER_NAME, instance);
6730bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6731bafec742SSukumar Swaminathan 			break;
6732bafec742SSukumar Swaminathan 		}
6733bafec742SSukumar Swaminathan 
6734bafec742SSukumar Swaminathan 		/* Setup interrupt vectors */
6735bafec742SSukumar Swaminathan 		if (ql_alloc_irqs(qlge) != DDI_SUCCESS) {
6736bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6737bafec742SSukumar Swaminathan 			break;
6738bafec742SSukumar Swaminathan 		}
6739bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_INTR_ALLOC;
6740bafec742SSukumar Swaminathan 
6741bafec742SSukumar Swaminathan 		/* Configure queues */
6742bafec742SSukumar Swaminathan 		if (ql_setup_rings(qlge) != DDI_SUCCESS) {
6743bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6744bafec742SSukumar Swaminathan 			break;
6745bafec742SSukumar Swaminathan 		}
6746bafec742SSukumar Swaminathan 
6747bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_SETUP_RINGS;
6748bafec742SSukumar Swaminathan 		/*
6749bafec742SSukumar Swaminathan 		 * Map queues to interrupt vectors
6750bafec742SSukumar Swaminathan 		 */
6751bafec742SSukumar Swaminathan 		ql_resolve_queues_to_irqs(qlge);
6752bafec742SSukumar Swaminathan 		/*
6753bafec742SSukumar Swaminathan 		 * Add interrupt handlers
6754bafec742SSukumar Swaminathan 		 */
6755bafec742SSukumar Swaminathan 		if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) {
6756bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Failed to add interrupt "
6757bafec742SSukumar Swaminathan 			    "handlers");
6758bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6759bafec742SSukumar Swaminathan 			break;
6760bafec742SSukumar Swaminathan 		}
6761bafec742SSukumar Swaminathan 
6762bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_ADD_INTERRUPT;
6763bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d): Add interrupt handler done\n",
6764bafec742SSukumar Swaminathan 		    ADAPTER_NAME, instance));
6765bafec742SSukumar Swaminathan 
6766bafec742SSukumar Swaminathan 		/* Initialize mutex, need the interrupt priority */
6767*0662fbf4SSukumar Swaminathan 		(void) ql_init_rx_tx_locks(qlge);
6768bafec742SSukumar Swaminathan 
6769bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_LOCKS_CREATED;
6770bafec742SSukumar Swaminathan 
6771bafec742SSukumar Swaminathan 		/*
6772bafec742SSukumar Swaminathan 		 * Use a soft interrupt to do something that we do not want
6773bafec742SSukumar Swaminathan 		 * to do in regular network functions or with mutexs being held
6774bafec742SSukumar Swaminathan 		 */
6775bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl,
6776bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge)
6777bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
6778bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6779bafec742SSukumar Swaminathan 			break;
6780bafec742SSukumar Swaminathan 		}
6781bafec742SSukumar Swaminathan 
6782bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl,
6783bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge)
6784bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
6785bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6786bafec742SSukumar Swaminathan 			break;
6787bafec742SSukumar Swaminathan 		}
6788bafec742SSukumar Swaminathan 
6789bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl,
6790bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge)
6791bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
6792bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6793bafec742SSukumar Swaminathan 			break;
6794bafec742SSukumar Swaminathan 		}
6795bafec742SSukumar Swaminathan 
6796bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_ADD_SOFT_INTERRUPT;
6797bafec742SSukumar Swaminathan 
6798bafec742SSukumar Swaminathan 		/*
6799bafec742SSukumar Swaminathan 		 * mutex to protect the adapter state structure.
6800bafec742SSukumar Swaminathan 		 * initialize mutexes according to the interrupt priority
6801bafec742SSukumar Swaminathan 		 */
6802bafec742SSukumar Swaminathan 		mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER,
6803bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6804bafec742SSukumar Swaminathan 		mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER,
6805bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6806bafec742SSukumar Swaminathan 		mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER,
6807bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
6808bafec742SSukumar Swaminathan 
6809bafec742SSukumar Swaminathan 		/* Mailbox wait and interrupt conditional variable. */
6810bafec742SSukumar Swaminathan 		cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL);
6811bafec742SSukumar Swaminathan 
6812bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MUTEX;
6813bafec742SSukumar Swaminathan 
6814bafec742SSukumar Swaminathan 		/*
6815bafec742SSukumar Swaminathan 		 * KStats
6816bafec742SSukumar Swaminathan 		 */
6817bafec742SSukumar Swaminathan 		if (ql_init_kstats(qlge) != DDI_SUCCESS) {
6818bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): KState initialization failed",
6819bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
6820bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6821bafec742SSukumar Swaminathan 			break;
6822bafec742SSukumar Swaminathan 		}
6823bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_KSTATS;
6824bafec742SSukumar Swaminathan 
6825bafec742SSukumar Swaminathan 		/*
6826bafec742SSukumar Swaminathan 		 * Initialize gld macinfo structure
6827bafec742SSukumar Swaminathan 		 */
6828bafec742SSukumar Swaminathan 		ql_gld3_init(qlge, macp);
6829bafec742SSukumar Swaminathan 
6830bafec742SSukumar Swaminathan 		if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) {
6831bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): mac_register failed",
6832bafec742SSukumar Swaminathan 			    __func__, instance);
6833bafec742SSukumar Swaminathan 			ql_free_resources(dip, qlge);
6834bafec742SSukumar Swaminathan 			break;
6835bafec742SSukumar Swaminathan 		}
6836bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MAC_REGISTERED;
6837bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n",
6838bafec742SSukumar Swaminathan 		    ADAPTER_NAME, instance));
6839bafec742SSukumar Swaminathan 
6840bafec742SSukumar Swaminathan 		mac_free(macp);
6841bafec742SSukumar Swaminathan 		macp = NULL;
6842bafec742SSukumar Swaminathan 
6843bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_ATTACHED;
6844bafec742SSukumar Swaminathan 
6845bafec742SSukumar Swaminathan 		/*
6846bafec742SSukumar Swaminathan 		 * Allocate memory resources
6847bafec742SSukumar Swaminathan 		 */
6848bafec742SSukumar Swaminathan 		if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) {
6849bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): memory allocation failed",
6850bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
6851bafec742SSukumar Swaminathan 			ql_free_mem_resources(qlge);
6852bafec742SSukumar Swaminathan 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
6853bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
6854bafec742SSukumar Swaminathan 		}
6855bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MEMORY_ALLOC;
6856bafec742SSukumar Swaminathan 
6857bafec742SSukumar Swaminathan 		ddi_report_dev(dip);
6858bafec742SSukumar Swaminathan 
6859bafec742SSukumar Swaminathan 		rval = DDI_SUCCESS;
6860bafec742SSukumar Swaminathan 	break;
6861bafec742SSukumar Swaminathan /*
6862bafec742SSukumar Swaminathan  * DDI_RESUME
6863bafec742SSukumar Swaminathan  * When called  with  cmd  set  to  DDI_RESUME,  attach()  must
6864bafec742SSukumar Swaminathan  * restore  the hardware state of a device (power may have been
6865bafec742SSukumar Swaminathan  * removed from the device), allow  pending  requests  to  con-
6866bafec742SSukumar Swaminathan  * tinue,  and  service  new requests. In this case, the driver
6867bafec742SSukumar Swaminathan  * must not  make  any  assumptions  about  the  state  of  the
6868bafec742SSukumar Swaminathan  * hardware,  but  must  restore the state of the device except
6869bafec742SSukumar Swaminathan  * for the power level of components.
6870bafec742SSukumar Swaminathan  *
6871bafec742SSukumar Swaminathan  */
6872bafec742SSukumar Swaminathan 	case DDI_RESUME:
6873bafec742SSukumar Swaminathan 
6874bafec742SSukumar Swaminathan 		if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL)
6875bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
6876bafec742SSukumar Swaminathan 
6877bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n",
6878bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
6879bafec742SSukumar Swaminathan 
6880bafec742SSukumar Swaminathan 		mutex_enter(&qlge->gen_mutex);
6881bafec742SSukumar Swaminathan 		rval = ql_do_start(qlge);
6882bafec742SSukumar Swaminathan 		mutex_exit(&qlge->gen_mutex);
6883bafec742SSukumar Swaminathan 		break;
6884bafec742SSukumar Swaminathan 
6885bafec742SSukumar Swaminathan 	default:
6886bafec742SSukumar Swaminathan 		break;
6887bafec742SSukumar Swaminathan 	}
6888bafec742SSukumar Swaminathan 	return (rval);
6889bafec742SSukumar Swaminathan }
6890bafec742SSukumar Swaminathan 
6891bafec742SSukumar Swaminathan /*
6892bafec742SSukumar Swaminathan  * Unbind all pending tx dma handles during driver bring down
6893bafec742SSukumar Swaminathan  */
6894bafec742SSukumar Swaminathan static void
6895bafec742SSukumar Swaminathan ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring)
6896bafec742SSukumar Swaminathan {
6897bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
6898bafec742SSukumar Swaminathan 	int i, j;
6899bafec742SSukumar Swaminathan 
6900bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc) {
6901bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
6902bafec742SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
6903bafec742SSukumar Swaminathan 			for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
6904bafec742SSukumar Swaminathan 				if (tx_ring_desc->tx_dma_handle[j]) {
6905bafec742SSukumar Swaminathan 					(void) ddi_dma_unbind_handle(
6906bafec742SSukumar Swaminathan 					    tx_ring_desc->tx_dma_handle[j]);
6907bafec742SSukumar Swaminathan 				}
6908bafec742SSukumar Swaminathan 			}
6909bafec742SSukumar Swaminathan 			tx_ring_desc->tx_dma_handle_used = 0;
6910bafec742SSukumar Swaminathan 		} /* end of for loop */
6911bafec742SSukumar Swaminathan 	}
6912bafec742SSukumar Swaminathan }
6913bafec742SSukumar Swaminathan /*
6914bafec742SSukumar Swaminathan  * Wait for all the packets sent to the chip to finish transmission
6915bafec742SSukumar Swaminathan  * to prevent buffers to be unmapped before or during a transmit operation
6916bafec742SSukumar Swaminathan  */
6917bafec742SSukumar Swaminathan static int
6918bafec742SSukumar Swaminathan ql_wait_tx_quiesce(qlge_t *qlge)
6919bafec742SSukumar Swaminathan {
6920bafec742SSukumar Swaminathan 	int count = MAX_TX_WAIT_COUNT, i;
6921bafec742SSukumar Swaminathan 	int rings_done;
6922bafec742SSukumar Swaminathan 	volatile struct tx_ring *tx_ring;
6923bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
6924bafec742SSukumar Swaminathan 	uint32_t producer_idx;
6925bafec742SSukumar Swaminathan 	uint32_t temp;
6926bafec742SSukumar Swaminathan 	int done = 0;
6927bafec742SSukumar Swaminathan 	int rval = DDI_FAILURE;
6928bafec742SSukumar Swaminathan 
6929bafec742SSukumar Swaminathan 	while (!done) {
6930bafec742SSukumar Swaminathan 		rings_done = 0;
6931bafec742SSukumar Swaminathan 
6932bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
6933bafec742SSukumar Swaminathan 			tx_ring = &qlge->tx_ring[i];
6934bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
6935bafec742SSukumar Swaminathan 			    tx_ring->prod_idx_db_reg);
6936bafec742SSukumar Swaminathan 			producer_idx = temp & 0x0000ffff;
6937bafec742SSukumar Swaminathan 			consumer_idx = (temp >> 16);
6938bafec742SSukumar Swaminathan 
6939bafec742SSukumar Swaminathan 			/*
6940bafec742SSukumar Swaminathan 			 * Get the pending iocb count, ones which have not been
6941bafec742SSukumar Swaminathan 			 * pulled down by the chip
6942bafec742SSukumar Swaminathan 			 */
6943bafec742SSukumar Swaminathan 			if (producer_idx >= consumer_idx)
6944bafec742SSukumar Swaminathan 				temp = (producer_idx - consumer_idx);
6945bafec742SSukumar Swaminathan 			else
6946bafec742SSukumar Swaminathan 				temp = (tx_ring->wq_len - consumer_idx) +
6947bafec742SSukumar Swaminathan 				    producer_idx;
6948bafec742SSukumar Swaminathan 
6949bafec742SSukumar Swaminathan 			if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len)
6950bafec742SSukumar Swaminathan 				rings_done++;
6951bafec742SSukumar Swaminathan 			else {
6952bafec742SSukumar Swaminathan 				done = 1;
6953bafec742SSukumar Swaminathan 				break;
6954bafec742SSukumar Swaminathan 			}
6955bafec742SSukumar Swaminathan 		}
6956bafec742SSukumar Swaminathan 
6957bafec742SSukumar Swaminathan 		/* If all the rings are done */
6958bafec742SSukumar Swaminathan 		if (rings_done >= qlge->tx_ring_count) {
6959bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
6960bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d) done successfully \n",
6961bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
6962bafec742SSukumar Swaminathan #endif
6963bafec742SSukumar Swaminathan 			rval = DDI_SUCCESS;
6964bafec742SSukumar Swaminathan 			break;
6965bafec742SSukumar Swaminathan 		}
6966bafec742SSukumar Swaminathan 
6967bafec742SSukumar Swaminathan 		qlge_delay(100);
6968bafec742SSukumar Swaminathan 
6969bafec742SSukumar Swaminathan 		count--;
6970bafec742SSukumar Swaminathan 		if (!count) {
6971bafec742SSukumar Swaminathan 
6972bafec742SSukumar Swaminathan 			count = MAX_TX_WAIT_COUNT;
6973bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
6974bafec742SSukumar Swaminathan 			volatile struct rx_ring *rx_ring;
6975bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending"
6976bafec742SSukumar Swaminathan 			    " Transmits on queue %d to complete .\n",
6977bafec742SSukumar Swaminathan 			    __func__, qlge->instance,
6978bafec742SSukumar Swaminathan 			    (qlge->tx_ring[i].wq_len -
6979bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].tx_free_count),
6980bafec742SSukumar Swaminathan 			    i);
6981bafec742SSukumar Swaminathan 
6982bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i+1];
6983bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
6984bafec742SSukumar Swaminathan 			    rx_ring->cnsmr_idx_db_reg);
6985bafec742SSukumar Swaminathan 			consumer_idx = temp & 0x0000ffff;
6986bafec742SSukumar Swaminathan 			producer_idx = (temp >> 16);
6987bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d,"
6988bafec742SSukumar Swaminathan 			    " Producer %d, Consumer %d\n",
6989bafec742SSukumar Swaminathan 			    __func__, qlge->instance,
6990bafec742SSukumar Swaminathan 			    i+1,
6991bafec742SSukumar Swaminathan 			    producer_idx, consumer_idx);
6992bafec742SSukumar Swaminathan 
6993bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
6994bafec742SSukumar Swaminathan 			    tx_ring->prod_idx_db_reg);
6995bafec742SSukumar Swaminathan 			producer_idx = temp & 0x0000ffff;
6996bafec742SSukumar Swaminathan 			consumer_idx = (temp >> 16);
6997bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d,"
6998bafec742SSukumar Swaminathan 			    " Producer %d, Consumer %d\n",
6999bafec742SSukumar Swaminathan 			    __func__, qlge->instance, i,
7000bafec742SSukumar Swaminathan 			    producer_idx, consumer_idx);
7001bafec742SSukumar Swaminathan #endif
7002bafec742SSukumar Swaminathan 
7003bafec742SSukumar Swaminathan 			/* For now move on */
7004bafec742SSukumar Swaminathan 			break;
7005bafec742SSukumar Swaminathan 		}
7006bafec742SSukumar Swaminathan 	}
7007bafec742SSukumar Swaminathan 	/* Stop the request queue */
7008bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
7009bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7010bafec742SSukumar Swaminathan 		if (qlge->tx_ring[i].valid_db_reg) {
7011bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7012bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].valid_db_reg, 0);
7013bafec742SSukumar Swaminathan 		}
7014bafec742SSukumar Swaminathan 	}
7015bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
7016bafec742SSukumar Swaminathan 	return (rval);
7017bafec742SSukumar Swaminathan }
7018bafec742SSukumar Swaminathan 
7019bafec742SSukumar Swaminathan /*
7020bafec742SSukumar Swaminathan  * Wait for all the receives indicated to the stack to come back
7021bafec742SSukumar Swaminathan  */
7022bafec742SSukumar Swaminathan static int
7023bafec742SSukumar Swaminathan ql_wait_rx_complete(qlge_t *qlge)
7024bafec742SSukumar Swaminathan {
7025bafec742SSukumar Swaminathan 	int i;
7026bafec742SSukumar Swaminathan 	/* Disable all the completion queues */
7027bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
7028bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7029bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].valid_db_reg) {
7030bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7031bafec742SSukumar Swaminathan 			    qlge->rx_ring[i].valid_db_reg, 0);
7032bafec742SSukumar Swaminathan 		}
7033bafec742SSukumar Swaminathan 	}
7034bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
7035bafec742SSukumar Swaminathan 
7036bafec742SSukumar Swaminathan 	/* Wait for OS to return all rx buffers */
7037bafec742SSukumar Swaminathan 	qlge_delay(QL_ONE_SEC_DELAY);
7038bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
7039bafec742SSukumar Swaminathan }
7040bafec742SSukumar Swaminathan 
7041bafec742SSukumar Swaminathan /*
7042bafec742SSukumar Swaminathan  * stop the driver
7043bafec742SSukumar Swaminathan  */
7044bafec742SSukumar Swaminathan static int
7045bafec742SSukumar Swaminathan ql_bringdown_adapter(qlge_t *qlge)
7046bafec742SSukumar Swaminathan {
7047bafec742SSukumar Swaminathan 	int i;
7048bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
7049bafec742SSukumar Swaminathan 
7050bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_BRINGDOWN;
7051bafec742SSukumar Swaminathan 	if (qlge->sequence & ADAPTER_INIT) {
7052bafec742SSukumar Swaminathan 		/* stop forwarding external packets to driver */
7053bafec742SSukumar Swaminathan 		status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7054bafec742SSukumar Swaminathan 		if (status)
7055bafec742SSukumar Swaminathan 			return (status);
7056*0662fbf4SSukumar Swaminathan 		(void) ql_stop_routing(qlge);
7057bafec742SSukumar Swaminathan 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7058bafec742SSukumar Swaminathan 		/*
7059bafec742SSukumar Swaminathan 		 * Set the flag for receive and transmit
7060bafec742SSukumar Swaminathan 		 * operations to cease
7061bafec742SSukumar Swaminathan 		 */
7062bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7063bafec742SSukumar Swaminathan 			mutex_enter(&qlge->tx_ring[i].tx_lock);
7064bafec742SSukumar Swaminathan 			qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED;
7065bafec742SSukumar Swaminathan 			mutex_exit(&qlge->tx_ring[i].tx_lock);
7066bafec742SSukumar Swaminathan 		}
7067bafec742SSukumar Swaminathan 
7068bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
7069bafec742SSukumar Swaminathan 			mutex_enter(&qlge->rx_ring[i].rx_lock);
7070bafec742SSukumar Swaminathan 			qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED;
7071bafec742SSukumar Swaminathan 			mutex_exit(&qlge->rx_ring[i].rx_lock);
7072bafec742SSukumar Swaminathan 		}
7073bafec742SSukumar Swaminathan 
7074bafec742SSukumar Swaminathan 		/*
7075bafec742SSukumar Swaminathan 		 * Need interrupts to be running while the transmit
7076bafec742SSukumar Swaminathan 		 * completions are cleared. Wait for the packets
7077bafec742SSukumar Swaminathan 		 * queued to the chip to be sent out
7078bafec742SSukumar Swaminathan 		 */
7079bafec742SSukumar Swaminathan 		(void) ql_wait_tx_quiesce(qlge);
7080bafec742SSukumar Swaminathan 		/* Interrupts not needed from now */
7081bafec742SSukumar Swaminathan 		ql_disable_all_completion_interrupts(qlge);
7082bafec742SSukumar Swaminathan 
7083bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
7084bafec742SSukumar Swaminathan 		/* Disable Global interrupt */
7085bafec742SSukumar Swaminathan 		ql_disable_global_interrupt(qlge);
7086bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
7087bafec742SSukumar Swaminathan 
7088bafec742SSukumar Swaminathan 		/* Wait for all the indicated packets to come back */
7089bafec742SSukumar Swaminathan 		status = ql_wait_rx_complete(qlge);
7090bafec742SSukumar Swaminathan 
7091bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
7092bafec742SSukumar Swaminathan 		/* Reset adapter */
7093*0662fbf4SSukumar Swaminathan 		(void) ql_asic_reset(qlge);
7094bafec742SSukumar Swaminathan 		/*
7095bafec742SSukumar Swaminathan 		 * Unbind all tx dma handles to prevent pending tx descriptors'
7096bafec742SSukumar Swaminathan 		 * dma handles from being re-used.
7097bafec742SSukumar Swaminathan 		 */
7098bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7099bafec742SSukumar Swaminathan 			ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]);
7100bafec742SSukumar Swaminathan 		}
7101bafec742SSukumar Swaminathan 
7102bafec742SSukumar Swaminathan 		qlge->sequence &= ~ADAPTER_INIT;
7103bafec742SSukumar Swaminathan 
7104bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
7105bafec742SSukumar Swaminathan 	}
7106bafec742SSukumar Swaminathan 	return (status);
7107bafec742SSukumar Swaminathan }
7108bafec742SSukumar Swaminathan 
7109bafec742SSukumar Swaminathan /*
7110bafec742SSukumar Swaminathan  * ql_detach
7111bafec742SSukumar Swaminathan  * Used to remove all the states associated with a given
7112bafec742SSukumar Swaminathan  * instances of a device node prior to the removal of that
7113bafec742SSukumar Swaminathan  * instance from the system.
7114bafec742SSukumar Swaminathan  */
7115bafec742SSukumar Swaminathan static int
7116bafec742SSukumar Swaminathan ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7117bafec742SSukumar Swaminathan {
7118bafec742SSukumar Swaminathan 	qlge_t *qlge;
7119bafec742SSukumar Swaminathan 	int rval;
7120bafec742SSukumar Swaminathan 
7121bafec742SSukumar Swaminathan 	rval = DDI_SUCCESS;
7122bafec742SSukumar Swaminathan 
7123bafec742SSukumar Swaminathan 	switch (cmd) {
7124bafec742SSukumar Swaminathan 	case DDI_DETACH:
7125bafec742SSukumar Swaminathan 
7126bafec742SSukumar Swaminathan 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7127bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
7128bafec742SSukumar Swaminathan 		rval = ql_bringdown_adapter(qlge);
7129bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS)
7130bafec742SSukumar Swaminathan 			break;
7131bafec742SSukumar Swaminathan 
7132bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_DETACH;
7133bafec742SSukumar Swaminathan 
7134bafec742SSukumar Swaminathan 		/* free memory resources */
7135bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_MEMORY_ALLOC) {
7136bafec742SSukumar Swaminathan 			ql_free_mem_resources(qlge);
7137bafec742SSukumar Swaminathan 			qlge->sequence &= ~INIT_MEMORY_ALLOC;
7138bafec742SSukumar Swaminathan 		}
7139bafec742SSukumar Swaminathan 		ql_free_resources(dip, qlge);
7140bafec742SSukumar Swaminathan 
7141bafec742SSukumar Swaminathan 		break;
7142bafec742SSukumar Swaminathan 
7143bafec742SSukumar Swaminathan 	case DDI_SUSPEND:
7144bafec742SSukumar Swaminathan 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7145bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
7146bafec742SSukumar Swaminathan 
7147bafec742SSukumar Swaminathan 		mutex_enter(&qlge->gen_mutex);
7148bafec742SSukumar Swaminathan 		if ((qlge->mac_flags == QL_MAC_ATTACHED) ||
7149bafec742SSukumar Swaminathan 		    (qlge->mac_flags == QL_MAC_STARTED)) {
7150*0662fbf4SSukumar Swaminathan 			(void) ql_do_stop(qlge);
7151bafec742SSukumar Swaminathan 		}
7152bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_SUSPENDED;
7153bafec742SSukumar Swaminathan 		mutex_exit(&qlge->gen_mutex);
7154bafec742SSukumar Swaminathan 
7155bafec742SSukumar Swaminathan 		break;
7156bafec742SSukumar Swaminathan 	default:
7157bafec742SSukumar Swaminathan 		rval = DDI_FAILURE;
7158bafec742SSukumar Swaminathan 		break;
7159bafec742SSukumar Swaminathan 	}
7160bafec742SSukumar Swaminathan 
7161bafec742SSukumar Swaminathan 	return (rval);
7162bafec742SSukumar Swaminathan }
7163bafec742SSukumar Swaminathan 
7164bafec742SSukumar Swaminathan /*
7165bafec742SSukumar Swaminathan  * quiesce(9E) entry point.
7166bafec742SSukumar Swaminathan  *
7167bafec742SSukumar Swaminathan  * This function is called when the system is single-threaded at high
7168bafec742SSukumar Swaminathan  * PIL with preemption disabled. Therefore, this function must not be
7169bafec742SSukumar Swaminathan  * blocked.
7170bafec742SSukumar Swaminathan  *
7171bafec742SSukumar Swaminathan  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7172bafec742SSukumar Swaminathan  */
7173bafec742SSukumar Swaminathan int
7174bafec742SSukumar Swaminathan ql_quiesce(dev_info_t *dip)
7175bafec742SSukumar Swaminathan {
7176bafec742SSukumar Swaminathan 	qlge_t *qlge;
7177bafec742SSukumar Swaminathan 	int i;
7178bafec742SSukumar Swaminathan 
7179bafec742SSukumar Swaminathan 	if ((qlge = QL_GET_DEV(dip)) == NULL)
7180bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
7181bafec742SSukumar Swaminathan 
7182bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
7183bafec742SSukumar Swaminathan 		/* stop forwarding external packets to driver */
7184*0662fbf4SSukumar Swaminathan 		(void) ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7185*0662fbf4SSukumar Swaminathan 		(void) ql_stop_routing(qlge);
7186bafec742SSukumar Swaminathan 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7187bafec742SSukumar Swaminathan 		/* Stop all the request queues */
7188bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7189bafec742SSukumar Swaminathan 			if (qlge->tx_ring[i].valid_db_reg) {
7190bafec742SSukumar Swaminathan 				ql_write_doorbell_reg(qlge,
7191bafec742SSukumar Swaminathan 				    qlge->tx_ring[i].valid_db_reg, 0);
7192bafec742SSukumar Swaminathan 			}
7193bafec742SSukumar Swaminathan 		}
7194bafec742SSukumar Swaminathan 		qlge_delay(QL_ONE_SEC_DELAY/4);
7195bafec742SSukumar Swaminathan 		/* Interrupts not needed from now */
7196bafec742SSukumar Swaminathan 		/* Disable MPI interrupt */
7197bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_MASK,
7198bafec742SSukumar Swaminathan 		    (INTR_MASK_PI << 16));
7199bafec742SSukumar Swaminathan 		ql_disable_global_interrupt(qlge);
7200bafec742SSukumar Swaminathan 
7201bafec742SSukumar Swaminathan 		/* Disable all the rx completion queues */
7202bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
7203bafec742SSukumar Swaminathan 			if (qlge->rx_ring[i].valid_db_reg) {
7204bafec742SSukumar Swaminathan 				ql_write_doorbell_reg(qlge,
7205bafec742SSukumar Swaminathan 				    qlge->rx_ring[i].valid_db_reg, 0);
7206bafec742SSukumar Swaminathan 			}
7207bafec742SSukumar Swaminathan 		}
7208bafec742SSukumar Swaminathan 		qlge_delay(QL_ONE_SEC_DELAY/4);
7209bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_STOPPED;
7210bafec742SSukumar Swaminathan 		/* Reset adapter */
7211*0662fbf4SSukumar Swaminathan 		(void) ql_asic_reset(qlge);
7212bafec742SSukumar Swaminathan 		qlge_delay(100);
7213bafec742SSukumar Swaminathan 	}
7214bafec742SSukumar Swaminathan 
7215bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
7216bafec742SSukumar Swaminathan }
7217bafec742SSukumar Swaminathan 
7218bafec742SSukumar Swaminathan QL_STREAM_OPS(ql_ops, ql_attach, ql_detach);
7219bafec742SSukumar Swaminathan 
7220bafec742SSukumar Swaminathan /*
7221bafec742SSukumar Swaminathan  * Loadable Driver Interface Structures.
7222bafec742SSukumar Swaminathan  * Declare and initialize the module configuration section...
7223bafec742SSukumar Swaminathan  */
7224bafec742SSukumar Swaminathan static struct modldrv modldrv = {
7225bafec742SSukumar Swaminathan 	&mod_driverops,		/* type of module: driver */
7226bafec742SSukumar Swaminathan 	version,		/* name of module */
7227bafec742SSukumar Swaminathan 	&ql_ops			/* driver dev_ops */
7228bafec742SSukumar Swaminathan };
7229bafec742SSukumar Swaminathan 
7230bafec742SSukumar Swaminathan static struct modlinkage modlinkage = {
7231bafec742SSukumar Swaminathan 	MODREV_1, 	&modldrv,	NULL
7232bafec742SSukumar Swaminathan };
7233bafec742SSukumar Swaminathan 
7234bafec742SSukumar Swaminathan /*
7235bafec742SSukumar Swaminathan  * Loadable Module Routines
7236bafec742SSukumar Swaminathan  */
7237bafec742SSukumar Swaminathan 
7238bafec742SSukumar Swaminathan /*
7239bafec742SSukumar Swaminathan  * _init
7240bafec742SSukumar Swaminathan  * Initializes a loadable module. It is called before any other
7241bafec742SSukumar Swaminathan  * routine in a loadable module.
7242bafec742SSukumar Swaminathan  */
7243bafec742SSukumar Swaminathan int
7244bafec742SSukumar Swaminathan _init(void)
7245bafec742SSukumar Swaminathan {
7246bafec742SSukumar Swaminathan 	int rval;
7247bafec742SSukumar Swaminathan 
7248bafec742SSukumar Swaminathan 	mac_init_ops(&ql_ops, ADAPTER_NAME);
7249bafec742SSukumar Swaminathan 	rval = mod_install(&modlinkage);
7250bafec742SSukumar Swaminathan 	if (rval != DDI_SUCCESS) {
7251bafec742SSukumar Swaminathan 		mac_fini_ops(&ql_ops);
7252bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?Unable to install/attach driver '%s'",
7253bafec742SSukumar Swaminathan 		    ADAPTER_NAME);
7254bafec742SSukumar Swaminathan 	}
7255bafec742SSukumar Swaminathan 
7256bafec742SSukumar Swaminathan 	return (rval);
7257bafec742SSukumar Swaminathan }
7258bafec742SSukumar Swaminathan 
7259bafec742SSukumar Swaminathan /*
7260bafec742SSukumar Swaminathan  * _fini
7261bafec742SSukumar Swaminathan  * Prepares a module for unloading. It is called when the system
7262bafec742SSukumar Swaminathan  * wants to unload a module. If the module determines that it can
7263bafec742SSukumar Swaminathan  * be unloaded, then _fini() returns the value returned by
7264bafec742SSukumar Swaminathan  * mod_remove(). Upon successful return from _fini() no other
7265bafec742SSukumar Swaminathan  * routine in the module will be called before _init() is called.
7266bafec742SSukumar Swaminathan  */
7267bafec742SSukumar Swaminathan int
7268bafec742SSukumar Swaminathan _fini(void)
7269bafec742SSukumar Swaminathan {
7270bafec742SSukumar Swaminathan 	int rval;
7271bafec742SSukumar Swaminathan 
7272bafec742SSukumar Swaminathan 	rval = mod_remove(&modlinkage);
7273bafec742SSukumar Swaminathan 	if (rval == DDI_SUCCESS) {
7274bafec742SSukumar Swaminathan 		mac_fini_ops(&ql_ops);
7275bafec742SSukumar Swaminathan 	}
7276bafec742SSukumar Swaminathan 
7277bafec742SSukumar Swaminathan 	return (rval);
7278bafec742SSukumar Swaminathan }
7279bafec742SSukumar Swaminathan 
7280bafec742SSukumar Swaminathan /*
7281bafec742SSukumar Swaminathan  * _info
7282bafec742SSukumar Swaminathan  * Returns information about loadable module.
7283bafec742SSukumar Swaminathan  */
7284bafec742SSukumar Swaminathan int
7285bafec742SSukumar Swaminathan _info(struct modinfo *modinfop)
7286bafec742SSukumar Swaminathan {
7287bafec742SSukumar Swaminathan 	return (mod_info(&modlinkage, modinfop));
7288bafec742SSukumar Swaminathan }
7289