xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge.c (revision a6766df40e85f57b203ec7f2143dd1a99e6fe03d)
1bafec742SSukumar Swaminathan /*
2bafec742SSukumar Swaminathan  * CDDL HEADER START
3bafec742SSukumar Swaminathan  *
4bafec742SSukumar Swaminathan  * The contents of this file are subject to the terms of the
5bafec742SSukumar Swaminathan  * Common Development and Distribution License (the "License").
6bafec742SSukumar Swaminathan  * You may not use this file except in compliance with the License.
7bafec742SSukumar Swaminathan  *
8bafec742SSukumar Swaminathan  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9bafec742SSukumar Swaminathan  * or http://www.opensolaris.org/os/licensing.
10bafec742SSukumar Swaminathan  * See the License for the specific language governing permissions
11bafec742SSukumar Swaminathan  * and limitations under the License.
12bafec742SSukumar Swaminathan  *
13bafec742SSukumar Swaminathan  * When distributing Covered Code, include this CDDL HEADER in each
14bafec742SSukumar Swaminathan  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15bafec742SSukumar Swaminathan  * If applicable, add the following below this CDDL HEADER, with the
16bafec742SSukumar Swaminathan  * fields enclosed by brackets "[]" replaced with your own identifying
17bafec742SSukumar Swaminathan  * information: Portions Copyright [yyyy] [name of copyright owner]
18bafec742SSukumar Swaminathan  *
19bafec742SSukumar Swaminathan  * CDDL HEADER END
20bafec742SSukumar Swaminathan  */
21bafec742SSukumar Swaminathan 
22bafec742SSukumar Swaminathan /*
23accf27a5SSukumar Swaminathan  * Copyright 2010 QLogic Corporation. All rights reserved.
24bafec742SSukumar Swaminathan  */
25bafec742SSukumar Swaminathan 
26bafec742SSukumar Swaminathan #include <qlge.h>
27bafec742SSukumar Swaminathan #include <sys/atomic.h>
28bafec742SSukumar Swaminathan #include <sys/strsubr.h>
29bafec742SSukumar Swaminathan #include <sys/pattr.h>
30bafec742SSukumar Swaminathan #include <netinet/in.h>
31bafec742SSukumar Swaminathan #include <netinet/ip.h>
32bafec742SSukumar Swaminathan #include <netinet/ip6.h>
33bafec742SSukumar Swaminathan #include <netinet/tcp.h>
34bafec742SSukumar Swaminathan #include <netinet/udp.h>
35bafec742SSukumar Swaminathan #include <inet/ip.h>
36bafec742SSukumar Swaminathan 
37bafec742SSukumar Swaminathan 
38bafec742SSukumar Swaminathan 
39bafec742SSukumar Swaminathan /*
40bafec742SSukumar Swaminathan  * Local variables
41bafec742SSukumar Swaminathan  */
42bafec742SSukumar Swaminathan static struct ether_addr ql_ether_broadcast_addr =
43bafec742SSukumar Swaminathan 	{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
44accf27a5SSukumar Swaminathan static char version[] = "GLDv3 QLogic 81XX " VERSIONSTR;
45bafec742SSukumar Swaminathan 
46bafec742SSukumar Swaminathan /*
47bafec742SSukumar Swaminathan  * Local function prototypes
48bafec742SSukumar Swaminathan  */
49accf27a5SSukumar Swaminathan static void ql_free_resources(qlge_t *);
50bafec742SSukumar Swaminathan static void ql_fini_kstats(qlge_t *);
51bafec742SSukumar Swaminathan static uint32_t ql_get_link_state(qlge_t *);
52bafec742SSukumar Swaminathan static void ql_read_conf(qlge_t *);
53bafec742SSukumar Swaminathan static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
54bafec742SSukumar Swaminathan     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
55bafec742SSukumar Swaminathan     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
56accf27a5SSukumar Swaminathan static int ql_alloc_phys_rbuf(dev_info_t *, ddi_dma_handle_t *,
57accf27a5SSukumar Swaminathan     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
58accf27a5SSukumar Swaminathan     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
59bafec742SSukumar Swaminathan static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
60bafec742SSukumar Swaminathan static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
61bafec742SSukumar Swaminathan static int ql_route_initialize(qlge_t *);
62bafec742SSukumar Swaminathan static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
63bafec742SSukumar Swaminathan static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
64bafec742SSukumar Swaminathan static int ql_bringdown_adapter(qlge_t *);
65bafec742SSukumar Swaminathan static int ql_bringup_adapter(qlge_t *);
66bafec742SSukumar Swaminathan static int ql_asic_reset(qlge_t *);
67bafec742SSukumar Swaminathan static void ql_wake_mpi_reset_soft_intr(qlge_t *);
68bafec742SSukumar Swaminathan static void ql_stop_timer(qlge_t *qlge);
69accf27a5SSukumar Swaminathan static void ql_fm_fini(qlge_t *qlge);
70accf27a5SSukumar Swaminathan int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring);
71bafec742SSukumar Swaminathan 
72bafec742SSukumar Swaminathan /*
73bafec742SSukumar Swaminathan  * TX dma maping handlers allow multiple sscatter-gather lists
74bafec742SSukumar Swaminathan  */
75bafec742SSukumar Swaminathan ddi_dma_attr_t  tx_mapping_dma_attr = {
76bafec742SSukumar Swaminathan 	DMA_ATTR_V0,			/* dma_attr_version */
77bafec742SSukumar Swaminathan 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
78bafec742SSukumar Swaminathan 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
79bafec742SSukumar Swaminathan 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
80bafec742SSukumar Swaminathan 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
81bafec742SSukumar Swaminathan 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
82bafec742SSukumar Swaminathan 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
83bafec742SSukumar Swaminathan 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
84bafec742SSukumar Swaminathan 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
85bafec742SSukumar Swaminathan 	QL_MAX_TX_DMA_HANDLES,		/* s/g list length */
86bafec742SSukumar Swaminathan 	QL_DMA_GRANULARITY,		/* granularity of device */
87accf27a5SSukumar Swaminathan 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
88bafec742SSukumar Swaminathan };
89bafec742SSukumar Swaminathan 
90bafec742SSukumar Swaminathan /*
91bafec742SSukumar Swaminathan  * Receive buffers and Request/Response queues do not allow scatter-gather lists
92bafec742SSukumar Swaminathan  */
93bafec742SSukumar Swaminathan ddi_dma_attr_t  dma_attr = {
94bafec742SSukumar Swaminathan 	DMA_ATTR_V0,			/* dma_attr_version */
95bafec742SSukumar Swaminathan 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
96bafec742SSukumar Swaminathan 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
97bafec742SSukumar Swaminathan 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
98bafec742SSukumar Swaminathan 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
99bafec742SSukumar Swaminathan 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
100bafec742SSukumar Swaminathan 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
101bafec742SSukumar Swaminathan 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
102bafec742SSukumar Swaminathan 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
103bafec742SSukumar Swaminathan 	1,				/* s/g list length, i.e no sg list */
104bafec742SSukumar Swaminathan 	QL_DMA_GRANULARITY,		/* granularity of device */
105bafec742SSukumar Swaminathan 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
106bafec742SSukumar Swaminathan };
107accf27a5SSukumar Swaminathan /*
108accf27a5SSukumar Swaminathan  * Receive buffers do not allow scatter-gather lists
109accf27a5SSukumar Swaminathan  */
110accf27a5SSukumar Swaminathan ddi_dma_attr_t  dma_attr_rbuf = {
111accf27a5SSukumar Swaminathan 	DMA_ATTR_V0,			/* dma_attr_version */
112accf27a5SSukumar Swaminathan 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
113accf27a5SSukumar Swaminathan 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
114accf27a5SSukumar Swaminathan 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
115accf27a5SSukumar Swaminathan 	0x1,				/* DMA address alignment, default - 8 */
116accf27a5SSukumar Swaminathan 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
117accf27a5SSukumar Swaminathan 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
118accf27a5SSukumar Swaminathan 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
119accf27a5SSukumar Swaminathan 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
120accf27a5SSukumar Swaminathan 	1,				/* s/g list length, i.e no sg list */
121accf27a5SSukumar Swaminathan 	QL_DMA_GRANULARITY,		/* granularity of device */
122accf27a5SSukumar Swaminathan 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
123accf27a5SSukumar Swaminathan };
124bafec742SSukumar Swaminathan /*
125bafec742SSukumar Swaminathan  * DMA access attribute structure.
126bafec742SSukumar Swaminathan  */
127bafec742SSukumar Swaminathan /* device register access from host */
128bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_dev_acc_attr = {
129bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
130bafec742SSukumar Swaminathan 	DDI_STRUCTURE_LE_ACC,
131bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
132bafec742SSukumar Swaminathan };
133bafec742SSukumar Swaminathan 
134bafec742SSukumar Swaminathan /* host ring descriptors */
135bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_desc_acc_attr = {
136bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
137bafec742SSukumar Swaminathan 	DDI_NEVERSWAP_ACC,
138bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
139bafec742SSukumar Swaminathan };
140bafec742SSukumar Swaminathan 
141bafec742SSukumar Swaminathan /* host ring buffer */
142bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_buf_acc_attr = {
143bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
144bafec742SSukumar Swaminathan 	DDI_NEVERSWAP_ACC,
145bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
146bafec742SSukumar Swaminathan };
147bafec742SSukumar Swaminathan 
148bafec742SSukumar Swaminathan /*
149bafec742SSukumar Swaminathan  * Hash key table for Receive Side Scaling (RSS) support
150bafec742SSukumar Swaminathan  */
151bafec742SSukumar Swaminathan const uint8_t key_data[] = {
152bafec742SSukumar Swaminathan 	0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
153bafec742SSukumar Swaminathan 	0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
154bafec742SSukumar Swaminathan 	0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
155bafec742SSukumar Swaminathan 	0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
156bafec742SSukumar Swaminathan 
157bafec742SSukumar Swaminathan /*
158bafec742SSukumar Swaminathan  * Shadow Registers:
159bafec742SSukumar Swaminathan  * Outbound queues have a consumer index that is maintained by the chip.
160bafec742SSukumar Swaminathan  * Inbound queues have a producer index that is maintained by the chip.
161bafec742SSukumar Swaminathan  * For lower overhead, these registers are "shadowed" to host memory
162bafec742SSukumar Swaminathan  * which allows the device driver to track the queue progress without
163bafec742SSukumar Swaminathan  * PCI reads. When an entry is placed on an inbound queue, the chip will
164bafec742SSukumar Swaminathan  * update the relevant index register and then copy the value to the
165bafec742SSukumar Swaminathan  * shadow register in host memory.
166accf27a5SSukumar Swaminathan  * Currently, ql_read_sh_reg only read Inbound queues'producer index.
167bafec742SSukumar Swaminathan  */
168bafec742SSukumar Swaminathan 
169bafec742SSukumar Swaminathan static inline unsigned int
170accf27a5SSukumar Swaminathan ql_read_sh_reg(qlge_t *qlge, struct rx_ring *rx_ring)
171bafec742SSukumar Swaminathan {
172accf27a5SSukumar Swaminathan 	uint32_t rtn;
173accf27a5SSukumar Swaminathan 
174accf27a5SSukumar Swaminathan 	/* re-synchronize shadow prod index dma buffer before reading */
175accf27a5SSukumar Swaminathan 	(void) ddi_dma_sync(qlge->host_copy_shadow_dma_attr.dma_handle,
176accf27a5SSukumar Swaminathan 	    rx_ring->prod_idx_sh_reg_offset,
177accf27a5SSukumar Swaminathan 	    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
178accf27a5SSukumar Swaminathan 
179accf27a5SSukumar Swaminathan 	rtn = ddi_get32(qlge->host_copy_shadow_dma_attr.acc_handle,
180accf27a5SSukumar Swaminathan 	    (uint32_t *)rx_ring->prod_idx_sh_reg);
181accf27a5SSukumar Swaminathan 
182accf27a5SSukumar Swaminathan 	return (rtn);
183bafec742SSukumar Swaminathan }
184bafec742SSukumar Swaminathan 
185bafec742SSukumar Swaminathan /*
186bafec742SSukumar Swaminathan  * Read 32 bit atomically
187bafec742SSukumar Swaminathan  */
188bafec742SSukumar Swaminathan uint32_t
189bafec742SSukumar Swaminathan ql_atomic_read_32(volatile uint32_t *target)
190bafec742SSukumar Swaminathan {
191bafec742SSukumar Swaminathan 	/*
192bafec742SSukumar Swaminathan 	 * atomic_add_32_nv returns the new value after the add,
193bafec742SSukumar Swaminathan 	 * we are adding 0 so we should get the original value
194bafec742SSukumar Swaminathan 	 */
195bafec742SSukumar Swaminathan 	return (atomic_add_32_nv(target, 0));
196bafec742SSukumar Swaminathan }
197bafec742SSukumar Swaminathan 
198bafec742SSukumar Swaminathan /*
199bafec742SSukumar Swaminathan  * Set 32 bit atomically
200bafec742SSukumar Swaminathan  */
201bafec742SSukumar Swaminathan void
202bafec742SSukumar Swaminathan ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
203bafec742SSukumar Swaminathan {
204bafec742SSukumar Swaminathan 	(void) atomic_swap_32(target, newval);
205bafec742SSukumar Swaminathan }
206bafec742SSukumar Swaminathan 
207bafec742SSukumar Swaminathan 
208bafec742SSukumar Swaminathan /*
209bafec742SSukumar Swaminathan  * Setup device PCI configuration registers.
210bafec742SSukumar Swaminathan  * Kernel context.
211bafec742SSukumar Swaminathan  */
212bafec742SSukumar Swaminathan static void
213bafec742SSukumar Swaminathan ql_pci_config(qlge_t *qlge)
214bafec742SSukumar Swaminathan {
215bafec742SSukumar Swaminathan 	uint16_t w;
216bafec742SSukumar Swaminathan 
217bafec742SSukumar Swaminathan 	qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
218bafec742SSukumar Swaminathan 	    PCI_CONF_VENID);
219bafec742SSukumar Swaminathan 	qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
220bafec742SSukumar Swaminathan 	    PCI_CONF_DEVID);
221bafec742SSukumar Swaminathan 
222bafec742SSukumar Swaminathan 	/*
223bafec742SSukumar Swaminathan 	 * we want to respect framework's setting of PCI
224bafec742SSukumar Swaminathan 	 * configuration space command register and also
225bafec742SSukumar Swaminathan 	 * want to make sure that all bits of interest to us
226bafec742SSukumar Swaminathan 	 * are properly set in PCI Command register(0x04).
227bafec742SSukumar Swaminathan 	 * PCI_COMM_IO		0x1	 I/O access enable
228bafec742SSukumar Swaminathan 	 * PCI_COMM_MAE		0x2	 Memory access enable
229bafec742SSukumar Swaminathan 	 * PCI_COMM_ME		0x4	 bus master enable
230bafec742SSukumar Swaminathan 	 * PCI_COMM_MEMWR_INVAL	0x10	 memory write and invalidate enable.
231bafec742SSukumar Swaminathan 	 */
232bafec742SSukumar Swaminathan 	w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
233bafec742SSukumar Swaminathan 	w = (uint16_t)(w & (~PCI_COMM_IO));
234bafec742SSukumar Swaminathan 	w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
235bafec742SSukumar Swaminathan 	    /* PCI_COMM_MEMWR_INVAL | */
236bafec742SSukumar Swaminathan 	    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
237bafec742SSukumar Swaminathan 
238bafec742SSukumar Swaminathan 	pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
239bafec742SSukumar Swaminathan 
240accf27a5SSukumar Swaminathan 	w = pci_config_get16(qlge->pci_handle, 0x54);
241accf27a5SSukumar Swaminathan 	w = (uint16_t)(w & (~0x7000));
242accf27a5SSukumar Swaminathan 	w = (uint16_t)(w | 0x5000);
243accf27a5SSukumar Swaminathan 	pci_config_put16(qlge->pci_handle, 0x54, w);
244accf27a5SSukumar Swaminathan 
245bafec742SSukumar Swaminathan 	ql_dump_pci_config(qlge);
246bafec742SSukumar Swaminathan }
247bafec742SSukumar Swaminathan 
248bafec742SSukumar Swaminathan /*
249bafec742SSukumar Swaminathan  * This routine parforms the neccessary steps to set GLD mac information
250bafec742SSukumar Swaminathan  * such as Function number, xgmac mask and shift bits
251bafec742SSukumar Swaminathan  */
252bafec742SSukumar Swaminathan static int
253bafec742SSukumar Swaminathan ql_set_mac_info(qlge_t *qlge)
254bafec742SSukumar Swaminathan {
255bafec742SSukumar Swaminathan 	uint32_t value;
256accf27a5SSukumar Swaminathan 	int rval = DDI_FAILURE;
257bafec742SSukumar Swaminathan 	uint32_t fn0_net, fn1_net;
258bafec742SSukumar Swaminathan 
259bafec742SSukumar Swaminathan 	/* set default value */
260bafec742SSukumar Swaminathan 	qlge->fn0_net = FN0_NET;
261bafec742SSukumar Swaminathan 	qlge->fn1_net = FN1_NET;
262bafec742SSukumar Swaminathan 
263bafec742SSukumar Swaminathan 	if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
264bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d) read MPI register failed",
265bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
266accf27a5SSukumar Swaminathan 		goto exit;
267bafec742SSukumar Swaminathan 	} else {
268bafec742SSukumar Swaminathan 		fn0_net = (value >> 1) & 0x07;
269bafec742SSukumar Swaminathan 		fn1_net = (value >> 5) & 0x07;
270bafec742SSukumar Swaminathan 		if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
271bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
272bafec742SSukumar Swaminathan 			    "nic0 function number %d,"
273bafec742SSukumar Swaminathan 			    "nic1 function number %d "
274bafec742SSukumar Swaminathan 			    "use default\n",
275bafec742SSukumar Swaminathan 			    __func__, qlge->instance, value, fn0_net, fn1_net);
276accf27a5SSukumar Swaminathan 			goto exit;
277bafec742SSukumar Swaminathan 		} else {
278bafec742SSukumar Swaminathan 			qlge->fn0_net = fn0_net;
279bafec742SSukumar Swaminathan 			qlge->fn1_net = fn1_net;
280bafec742SSukumar Swaminathan 		}
281bafec742SSukumar Swaminathan 	}
282bafec742SSukumar Swaminathan 
283bafec742SSukumar Swaminathan 	/* Get the function number that the driver is associated with */
284bafec742SSukumar Swaminathan 	value = ql_read_reg(qlge, REG_STATUS);
285bafec742SSukumar Swaminathan 	qlge->func_number = (uint8_t)((value >> 6) & 0x03);
286bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
287bafec742SSukumar Swaminathan 	    value, qlge->func_number));
288bafec742SSukumar Swaminathan 
289bafec742SSukumar Swaminathan 	/* The driver is loaded on a non-NIC function? */
290bafec742SSukumar Swaminathan 	if ((qlge->func_number != qlge->fn0_net) &&
291bafec742SSukumar Swaminathan 	    (qlge->func_number != qlge->fn1_net)) {
292bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
293bafec742SSukumar Swaminathan 		    "Invalid function number = 0x%x\n", qlge->func_number);
294accf27a5SSukumar Swaminathan 		goto exit;
295bafec742SSukumar Swaminathan 	}
296bafec742SSukumar Swaminathan 	/* network port 0? */
297bafec742SSukumar Swaminathan 	if (qlge->func_number == qlge->fn0_net) {
298bafec742SSukumar Swaminathan 		qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
299bafec742SSukumar Swaminathan 		qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
300bafec742SSukumar Swaminathan 	} else {
301bafec742SSukumar Swaminathan 		qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
302bafec742SSukumar Swaminathan 		qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
303bafec742SSukumar Swaminathan 	}
304accf27a5SSukumar Swaminathan 	rval = DDI_SUCCESS;
305accf27a5SSukumar Swaminathan exit:
306bafec742SSukumar Swaminathan 	return (rval);
307bafec742SSukumar Swaminathan 
308bafec742SSukumar Swaminathan }
309bafec742SSukumar Swaminathan 
310bafec742SSukumar Swaminathan /*
311bafec742SSukumar Swaminathan  * write to doorbell register
312bafec742SSukumar Swaminathan  */
313bafec742SSukumar Swaminathan void
314bafec742SSukumar Swaminathan ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
315bafec742SSukumar Swaminathan {
316bafec742SSukumar Swaminathan 	ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
317bafec742SSukumar Swaminathan }
318bafec742SSukumar Swaminathan 
319bafec742SSukumar Swaminathan /*
320bafec742SSukumar Swaminathan  * read from doorbell register
321bafec742SSukumar Swaminathan  */
322bafec742SSukumar Swaminathan uint32_t
323bafec742SSukumar Swaminathan ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
324bafec742SSukumar Swaminathan {
325bafec742SSukumar Swaminathan 	uint32_t ret;
326bafec742SSukumar Swaminathan 
327bafec742SSukumar Swaminathan 	ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
328bafec742SSukumar Swaminathan 
329bafec742SSukumar Swaminathan 	return	(ret);
330bafec742SSukumar Swaminathan }
331bafec742SSukumar Swaminathan 
332bafec742SSukumar Swaminathan /*
333bafec742SSukumar Swaminathan  * This function waits for a specific bit to come ready
334bafec742SSukumar Swaminathan  * in a given register.  It is used mostly by the initialize
335bafec742SSukumar Swaminathan  * process, but is also used in kernel thread API such as
336bafec742SSukumar Swaminathan  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
337bafec742SSukumar Swaminathan  */
338bafec742SSukumar Swaminathan static int
339bafec742SSukumar Swaminathan ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
340bafec742SSukumar Swaminathan {
341bafec742SSukumar Swaminathan 	uint32_t temp;
342bafec742SSukumar Swaminathan 	int count = UDELAY_COUNT;
343bafec742SSukumar Swaminathan 
344bafec742SSukumar Swaminathan 	while (count) {
345bafec742SSukumar Swaminathan 		temp = ql_read_reg(qlge, reg);
346bafec742SSukumar Swaminathan 
347bafec742SSukumar Swaminathan 		/* check for errors */
348bafec742SSukumar Swaminathan 		if ((temp & err_bit) != 0) {
349bafec742SSukumar Swaminathan 			break;
350bafec742SSukumar Swaminathan 		} else if ((temp & bit) != 0)
351bafec742SSukumar Swaminathan 			return (DDI_SUCCESS);
352bafec742SSukumar Swaminathan 		qlge_delay(UDELAY_DELAY);
353bafec742SSukumar Swaminathan 		count--;
354bafec742SSukumar Swaminathan 	}
355bafec742SSukumar Swaminathan 	cmn_err(CE_WARN,
356bafec742SSukumar Swaminathan 	    "Waiting for reg %x to come ready failed.", reg);
357accf27a5SSukumar Swaminathan 	if (qlge->fm_enable) {
358accf27a5SSukumar Swaminathan 		ql_fm_ereport(qlge, DDI_FM_DEVICE_NO_RESPONSE);
359accf27a5SSukumar Swaminathan 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
360accf27a5SSukumar Swaminathan 	}
361bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
362bafec742SSukumar Swaminathan }
363bafec742SSukumar Swaminathan 
364bafec742SSukumar Swaminathan /*
365bafec742SSukumar Swaminathan  * The CFG register is used to download TX and RX control blocks
366bafec742SSukumar Swaminathan  * to the chip. This function waits for an operation to complete.
367bafec742SSukumar Swaminathan  */
368bafec742SSukumar Swaminathan static int
369bafec742SSukumar Swaminathan ql_wait_cfg(qlge_t *qlge, uint32_t bit)
370bafec742SSukumar Swaminathan {
371accf27a5SSukumar Swaminathan 	return (ql_wait_reg_bit(qlge, REG_CONFIGURATION, bit, BIT_RESET, 0));
372bafec742SSukumar Swaminathan }
373bafec742SSukumar Swaminathan 
374bafec742SSukumar Swaminathan 
375bafec742SSukumar Swaminathan /*
376bafec742SSukumar Swaminathan  * Used to issue init control blocks to hw. Maps control block,
377bafec742SSukumar Swaminathan  * sets address, triggers download, waits for completion.
378bafec742SSukumar Swaminathan  */
379bafec742SSukumar Swaminathan static int
380bafec742SSukumar Swaminathan ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
381bafec742SSukumar Swaminathan {
382bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
383bafec742SSukumar Swaminathan 	uint32_t mask;
384bafec742SSukumar Swaminathan 	uint32_t value;
385bafec742SSukumar Swaminathan 
386bafec742SSukumar Swaminathan 	status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
387bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS) {
388bafec742SSukumar Swaminathan 		goto exit;
389bafec742SSukumar Swaminathan 	}
390bafec742SSukumar Swaminathan 	status = ql_wait_cfg(qlge, bit);
391bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS) {
392bafec742SSukumar Swaminathan 		goto exit;
393bafec742SSukumar Swaminathan 	}
394bafec742SSukumar Swaminathan 
395bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
396bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
397bafec742SSukumar Swaminathan 
398bafec742SSukumar Swaminathan 	mask = CFG_Q_MASK | (bit << 16);
399bafec742SSukumar Swaminathan 	value = bit | (q_id << CFG_Q_SHIFT);
400bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
401bafec742SSukumar Swaminathan 
402bafec742SSukumar Swaminathan 	/*
403bafec742SSukumar Swaminathan 	 * Wait for the bit to clear after signaling hw.
404bafec742SSukumar Swaminathan 	 */
405bafec742SSukumar Swaminathan 	status = ql_wait_cfg(qlge, bit);
406bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
407bafec742SSukumar Swaminathan 
408bafec742SSukumar Swaminathan exit:
409bafec742SSukumar Swaminathan 	return (status);
410bafec742SSukumar Swaminathan }
411bafec742SSukumar Swaminathan 
412bafec742SSukumar Swaminathan /*
413bafec742SSukumar Swaminathan  * Initialize adapter instance
414bafec742SSukumar Swaminathan  */
415bafec742SSukumar Swaminathan static int
416bafec742SSukumar Swaminathan ql_init_instance(qlge_t *qlge)
417bafec742SSukumar Swaminathan {
418bafec742SSukumar Swaminathan 	int i;
419bafec742SSukumar Swaminathan 
420bafec742SSukumar Swaminathan 	/* Default value */
421bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_INIT;
422bafec742SSukumar Swaminathan 	qlge->mtu = ETHERMTU;		/* set normal size as default */
423bafec742SSukumar Swaminathan 	qlge->page_size = VM_PAGE_SIZE;	/* default page size */
424bafec742SSukumar Swaminathan 
425bafec742SSukumar Swaminathan 	for (i = 0; i < MAX_RX_RINGS; i++) {
426bafec742SSukumar Swaminathan 		qlge->rx_polls[i] = 0;
427bafec742SSukumar Swaminathan 		qlge->rx_interrupts[i] = 0;
428bafec742SSukumar Swaminathan 	}
429bafec742SSukumar Swaminathan 
430bafec742SSukumar Swaminathan 	/*
431bafec742SSukumar Swaminathan 	 * Set up the operating parameters.
432bafec742SSukumar Swaminathan 	 */
433bafec742SSukumar Swaminathan 	qlge->multicast_list_count = 0;
434bafec742SSukumar Swaminathan 
435bafec742SSukumar Swaminathan 	/*
436bafec742SSukumar Swaminathan 	 * Set up the max number of unicast list
437bafec742SSukumar Swaminathan 	 */
438bafec742SSukumar Swaminathan 	qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
439bafec742SSukumar Swaminathan 	qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
440bafec742SSukumar Swaminathan 
441bafec742SSukumar Swaminathan 	/*
442bafec742SSukumar Swaminathan 	 * read user defined properties in .conf file
443bafec742SSukumar Swaminathan 	 */
444bafec742SSukumar Swaminathan 	ql_read_conf(qlge); /* mtu, pause, LSO etc */
445accf27a5SSukumar Swaminathan 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
446bafec742SSukumar Swaminathan 
447bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
448bafec742SSukumar Swaminathan 
449bafec742SSukumar Swaminathan 	/* choose Memory Space mapping and get Vendor Id, Device ID etc */
450bafec742SSukumar Swaminathan 	ql_pci_config(qlge);
451bafec742SSukumar Swaminathan 	qlge->ip_hdr_offset = 0;
452bafec742SSukumar Swaminathan 
453bafec742SSukumar Swaminathan 	if (qlge->device_id == 0x8000) {
454bafec742SSukumar Swaminathan 		/* Schultz card */
455bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_CHIP_8100;
456bafec742SSukumar Swaminathan 		/* enable just ipv4 chksum offload for Schultz */
457bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
458bafec742SSukumar Swaminathan 		/*
459bafec742SSukumar Swaminathan 		 * Schultz firmware does not do pseduo IP header checksum
460bafec742SSukumar Swaminathan 		 * calculation, needed to be done by driver
461bafec742SSukumar Swaminathan 		 */
462bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
463bafec742SSukumar Swaminathan 		if (qlge->lso_enable)
464bafec742SSukumar Swaminathan 			qlge->cfg_flags |= CFG_LSO;
465bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
466bafec742SSukumar Swaminathan 		/* Schultz must split packet header */
467bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
468bafec742SSukumar Swaminathan 		qlge->max_read_mbx = 5;
469bafec742SSukumar Swaminathan 		qlge->ip_hdr_offset = 2;
470bafec742SSukumar Swaminathan 	}
471bafec742SSukumar Swaminathan 
472bafec742SSukumar Swaminathan 	/* Set Function Number and some of the iocb mac information */
473bafec742SSukumar Swaminathan 	if (ql_set_mac_info(qlge) != DDI_SUCCESS)
474bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
475bafec742SSukumar Swaminathan 
476bafec742SSukumar Swaminathan 	/* Read network settings from NVRAM */
477bafec742SSukumar Swaminathan 	/* After nvram is read successfully, update dev_addr */
478bafec742SSukumar Swaminathan 	if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
479bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
480bafec742SSukumar Swaminathan 		for (i = 0; i < ETHERADDRL; i++) {
481bafec742SSukumar Swaminathan 			qlge->dev_addr.ether_addr_octet[i] =
482bafec742SSukumar Swaminathan 			    qlge->nic_config.factory_MAC[i];
483bafec742SSukumar Swaminathan 		}
484bafec742SSukumar Swaminathan 	} else {
485bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
486bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
487bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
488bafec742SSukumar Swaminathan 	}
489bafec742SSukumar Swaminathan 
490bafec742SSukumar Swaminathan 	bcopy(qlge->dev_addr.ether_addr_octet,
491bafec742SSukumar Swaminathan 	    qlge->unicst_addr[0].addr.ether_addr_octet,
492bafec742SSukumar Swaminathan 	    ETHERADDRL);
493bafec742SSukumar Swaminathan 	QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
494bafec742SSukumar Swaminathan 	    &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
495bafec742SSukumar Swaminathan 
496bafec742SSukumar Swaminathan 	qlge->port_link_state = LS_DOWN;
497bafec742SSukumar Swaminathan 
498bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
499bafec742SSukumar Swaminathan }
500bafec742SSukumar Swaminathan 
501bafec742SSukumar Swaminathan 
502bafec742SSukumar Swaminathan /*
503bafec742SSukumar Swaminathan  * This hardware semaphore provides the mechanism for exclusive access to
504bafec742SSukumar Swaminathan  * resources shared between the NIC driver, MPI firmware,
505bafec742SSukumar Swaminathan  * FCOE firmware and the FC driver.
506bafec742SSukumar Swaminathan  */
507bafec742SSukumar Swaminathan static int
508bafec742SSukumar Swaminathan ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
509bafec742SSukumar Swaminathan {
510bafec742SSukumar Swaminathan 	uint32_t sem_bits = 0;
511bafec742SSukumar Swaminathan 
512bafec742SSukumar Swaminathan 	switch (sem_mask) {
513bafec742SSukumar Swaminathan 	case SEM_XGMAC0_MASK:
514bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
515bafec742SSukumar Swaminathan 		break;
516bafec742SSukumar Swaminathan 	case SEM_XGMAC1_MASK:
517bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
518bafec742SSukumar Swaminathan 		break;
519bafec742SSukumar Swaminathan 	case SEM_ICB_MASK:
520bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
521bafec742SSukumar Swaminathan 		break;
522bafec742SSukumar Swaminathan 	case SEM_MAC_ADDR_MASK:
523bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
524bafec742SSukumar Swaminathan 		break;
525bafec742SSukumar Swaminathan 	case SEM_FLASH_MASK:
526bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
527bafec742SSukumar Swaminathan 		break;
528bafec742SSukumar Swaminathan 	case SEM_PROBE_MASK:
529bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
530bafec742SSukumar Swaminathan 		break;
531bafec742SSukumar Swaminathan 	case SEM_RT_IDX_MASK:
532bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
533bafec742SSukumar Swaminathan 		break;
534bafec742SSukumar Swaminathan 	case SEM_PROC_REG_MASK:
535bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
536bafec742SSukumar Swaminathan 		break;
537bafec742SSukumar Swaminathan 	default:
538bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Bad Semaphore mask!.");
539bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
540bafec742SSukumar Swaminathan 	}
541bafec742SSukumar Swaminathan 
542bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
543bafec742SSukumar Swaminathan 	return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
544bafec742SSukumar Swaminathan }
545bafec742SSukumar Swaminathan 
546bafec742SSukumar Swaminathan /*
547bafec742SSukumar Swaminathan  * Lock a specific bit of Semaphore register to gain
548bafec742SSukumar Swaminathan  * access to a particular shared register
549bafec742SSukumar Swaminathan  */
550bafec742SSukumar Swaminathan int
551bafec742SSukumar Swaminathan ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
552bafec742SSukumar Swaminathan {
553bafec742SSukumar Swaminathan 	unsigned int wait_count = 30;
554bafec742SSukumar Swaminathan 
555bafec742SSukumar Swaminathan 	while (wait_count) {
556bafec742SSukumar Swaminathan 		if (!ql_sem_trylock(qlge, sem_mask))
557bafec742SSukumar Swaminathan 			return (DDI_SUCCESS);
558bafec742SSukumar Swaminathan 		qlge_delay(100);
559bafec742SSukumar Swaminathan 		wait_count--;
560bafec742SSukumar Swaminathan 	}
561bafec742SSukumar Swaminathan 	cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
562bafec742SSukumar Swaminathan 	    __func__, qlge->instance, sem_mask);
563bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
564bafec742SSukumar Swaminathan }
565bafec742SSukumar Swaminathan 
566bafec742SSukumar Swaminathan /*
567bafec742SSukumar Swaminathan  * Unock a specific bit of Semaphore register to release
568bafec742SSukumar Swaminathan  * access to a particular shared register
569bafec742SSukumar Swaminathan  */
570bafec742SSukumar Swaminathan void
571bafec742SSukumar Swaminathan ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
572bafec742SSukumar Swaminathan {
573bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
5740662fbf4SSukumar Swaminathan 	(void) ql_read_reg(qlge, REG_SEMAPHORE);	/* flush */
575bafec742SSukumar Swaminathan }
576bafec742SSukumar Swaminathan 
577bafec742SSukumar Swaminathan /*
578bafec742SSukumar Swaminathan  * Get property value from configuration file.
579bafec742SSukumar Swaminathan  *
580bafec742SSukumar Swaminathan  * string = property string pointer.
581bafec742SSukumar Swaminathan  *
582bafec742SSukumar Swaminathan  * Returns:
583bafec742SSukumar Swaminathan  * 0xFFFFFFFF = no property else property value.
584bafec742SSukumar Swaminathan  */
585bafec742SSukumar Swaminathan static uint32_t
586bafec742SSukumar Swaminathan ql_get_prop(qlge_t *qlge, char *string)
587bafec742SSukumar Swaminathan {
588bafec742SSukumar Swaminathan 	char buf[256];
589bafec742SSukumar Swaminathan 	uint32_t data;
590bafec742SSukumar Swaminathan 
591bafec742SSukumar Swaminathan 	/* Get adapter instance parameter. */
592bafec742SSukumar Swaminathan 	(void) sprintf(buf, "hba%d-%s", qlge->instance, string);
593bafec742SSukumar Swaminathan 	data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
594bafec742SSukumar Swaminathan 	    (int)0xffffffff);
595bafec742SSukumar Swaminathan 
596bafec742SSukumar Swaminathan 	/* Adapter instance parameter found? */
597bafec742SSukumar Swaminathan 	if (data == 0xffffffff) {
598bafec742SSukumar Swaminathan 		/* No, get default parameter. */
599bafec742SSukumar Swaminathan 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
600bafec742SSukumar Swaminathan 		    string, (int)0xffffffff);
601bafec742SSukumar Swaminathan 	}
602bafec742SSukumar Swaminathan 
603bafec742SSukumar Swaminathan 	return (data);
604bafec742SSukumar Swaminathan }
605bafec742SSukumar Swaminathan 
606bafec742SSukumar Swaminathan /*
607bafec742SSukumar Swaminathan  * Read user setting from configuration file.
608bafec742SSukumar Swaminathan  */
609bafec742SSukumar Swaminathan static void
610bafec742SSukumar Swaminathan ql_read_conf(qlge_t *qlge)
611bafec742SSukumar Swaminathan {
612bafec742SSukumar Swaminathan 	uint32_t data;
613bafec742SSukumar Swaminathan 
614bafec742SSukumar Swaminathan 	/* clear configuration flags */
615bafec742SSukumar Swaminathan 	qlge->cfg_flags = 0;
616bafec742SSukumar Swaminathan 
617accf27a5SSukumar Swaminathan 	/* Set up the default ring sizes. */
618accf27a5SSukumar Swaminathan 	qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
619accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "tx_ring_size");
620accf27a5SSukumar Swaminathan 	/* if data is valid */
621accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
622accf27a5SSukumar Swaminathan 		if (qlge->tx_ring_size != data) {
623accf27a5SSukumar Swaminathan 			qlge->tx_ring_size = (uint16_t)data;
624accf27a5SSukumar Swaminathan 		}
625accf27a5SSukumar Swaminathan 	}
626accf27a5SSukumar Swaminathan 
627accf27a5SSukumar Swaminathan 	qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
628accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "rx_ring_size");
629accf27a5SSukumar Swaminathan 	/* if data is valid */
630accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
631accf27a5SSukumar Swaminathan 		if (qlge->rx_ring_size != data) {
632accf27a5SSukumar Swaminathan 			qlge->rx_ring_size = (uint16_t)data;
633accf27a5SSukumar Swaminathan 		}
634accf27a5SSukumar Swaminathan 	}
635accf27a5SSukumar Swaminathan 
636accf27a5SSukumar Swaminathan 	qlge->tx_ring_count = 8;
637accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "tx_ring_count");
638accf27a5SSukumar Swaminathan 	/* if data is valid */
639accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
640accf27a5SSukumar Swaminathan 		if (qlge->tx_ring_count != data) {
641accf27a5SSukumar Swaminathan 			qlge->tx_ring_count = (uint16_t)data;
642accf27a5SSukumar Swaminathan 		}
643accf27a5SSukumar Swaminathan 	}
644accf27a5SSukumar Swaminathan 
645accf27a5SSukumar Swaminathan 	qlge->rss_ring_count = 8;
646accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "rss_ring_count");
647accf27a5SSukumar Swaminathan 	/* if data is valid */
648accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
649accf27a5SSukumar Swaminathan 		if (qlge->rss_ring_count != data) {
650accf27a5SSukumar Swaminathan 			qlge->rss_ring_count = (uint16_t)data;
651accf27a5SSukumar Swaminathan 		}
652accf27a5SSukumar Swaminathan 	}
653accf27a5SSukumar Swaminathan 
654bafec742SSukumar Swaminathan 	/* Get default rx_copy enable/disable. */
655bafec742SSukumar Swaminathan 	if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
656bafec742SSukumar Swaminathan 	    data == 0) {
657bafec742SSukumar Swaminathan 		qlge->rx_copy = B_FALSE;
658bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
659bafec742SSukumar Swaminathan 	} else if (data == 1) {
660bafec742SSukumar Swaminathan 		qlge->rx_copy = B_TRUE;
661bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
662bafec742SSukumar Swaminathan 	}
663bafec742SSukumar Swaminathan 
664*a6766df4SSukumar Swaminathan 	qlge->rx_copy_threshold = qlge->rx_ring_size / 4;
665*a6766df4SSukumar Swaminathan 	data = ql_get_prop(qlge, "rx_copy_threshold");
666*a6766df4SSukumar Swaminathan 	if ((data != 0xffffffff) && (data != 0)) {
667*a6766df4SSukumar Swaminathan 		qlge->rx_copy_threshold = data;
668*a6766df4SSukumar Swaminathan 		cmn_err(CE_NOTE, "!new rx_copy_threshold %d \n",
669*a6766df4SSukumar Swaminathan 		    qlge->rx_copy_threshold);
670*a6766df4SSukumar Swaminathan 	}
671*a6766df4SSukumar Swaminathan 
672bafec742SSukumar Swaminathan 	/* Get mtu packet size. */
673bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "mtu");
674bafec742SSukumar Swaminathan 	if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
675bafec742SSukumar Swaminathan 		if (qlge->mtu != data) {
676bafec742SSukumar Swaminathan 			qlge->mtu = data;
677bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
678bafec742SSukumar Swaminathan 		}
679bafec742SSukumar Swaminathan 	}
680bafec742SSukumar Swaminathan 
681accf27a5SSukumar Swaminathan 	if (qlge->mtu == JUMBO_MTU) {
682accf27a5SSukumar Swaminathan 		qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT_JUMBO;
683accf27a5SSukumar Swaminathan 		qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT_JUMBO;
684accf27a5SSukumar Swaminathan 		qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT_JUMBO;
685accf27a5SSukumar Swaminathan 		qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT_JUMBO;
686accf27a5SSukumar Swaminathan 	}
687accf27a5SSukumar Swaminathan 
688accf27a5SSukumar Swaminathan 
689bafec742SSukumar Swaminathan 	/* Get pause mode, default is Per Priority mode. */
690bafec742SSukumar Swaminathan 	qlge->pause = PAUSE_MODE_PER_PRIORITY;
691bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "pause");
692bafec742SSukumar Swaminathan 	if (data <= PAUSE_MODE_PER_PRIORITY) {
693bafec742SSukumar Swaminathan 		if (qlge->pause != data) {
694bafec742SSukumar Swaminathan 			qlge->pause = data;
695bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
696bafec742SSukumar Swaminathan 		}
697bafec742SSukumar Swaminathan 	}
698accf27a5SSukumar Swaminathan 	/* Receive interrupt delay */
699accf27a5SSukumar Swaminathan 	qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
700accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "rx_intr_delay");
701accf27a5SSukumar Swaminathan 	/* if data is valid */
702accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
703accf27a5SSukumar Swaminathan 		if (qlge->rx_coalesce_usecs != data) {
704accf27a5SSukumar Swaminathan 			qlge->rx_coalesce_usecs = (uint16_t)data;
705accf27a5SSukumar Swaminathan 		}
706accf27a5SSukumar Swaminathan 	}
707accf27a5SSukumar Swaminathan 	/* Rx inter-packet delay. */
708accf27a5SSukumar Swaminathan 	qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
709accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "rx_ipkt_delay");
710accf27a5SSukumar Swaminathan 	/* if data is valid */
711accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
712accf27a5SSukumar Swaminathan 		if (qlge->rx_max_coalesced_frames != data) {
713accf27a5SSukumar Swaminathan 			qlge->rx_max_coalesced_frames = (uint16_t)data;
714accf27a5SSukumar Swaminathan 		}
715accf27a5SSukumar Swaminathan 	}
716accf27a5SSukumar Swaminathan 	/* Transmit interrupt delay */
717accf27a5SSukumar Swaminathan 	qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
718accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "tx_intr_delay");
719accf27a5SSukumar Swaminathan 	/* if data is valid */
720accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
721accf27a5SSukumar Swaminathan 		if (qlge->tx_coalesce_usecs != data) {
722accf27a5SSukumar Swaminathan 			qlge->tx_coalesce_usecs = (uint16_t)data;
723accf27a5SSukumar Swaminathan 		}
724accf27a5SSukumar Swaminathan 	}
725accf27a5SSukumar Swaminathan 	/* Tx inter-packet delay. */
726accf27a5SSukumar Swaminathan 	qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
727accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "tx_ipkt_delay");
728bafec742SSukumar Swaminathan 	/* if data is valid */
729bafec742SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
730bafec742SSukumar Swaminathan 		if (qlge->tx_max_coalesced_frames != data) {
731bafec742SSukumar Swaminathan 			qlge->tx_max_coalesced_frames = (uint16_t)data;
732bafec742SSukumar Swaminathan 		}
733bafec742SSukumar Swaminathan 	}
734bafec742SSukumar Swaminathan 
735bafec742SSukumar Swaminathan 	/* Get split header payload_copy_thresh. */
736accf27a5SSukumar Swaminathan 	qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
737bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "payload_copy_thresh");
738bafec742SSukumar Swaminathan 	/* if data is valid */
739bafec742SSukumar Swaminathan 	if ((data != 0xffffffff) && (data != 0)) {
740bafec742SSukumar Swaminathan 		if (qlge->payload_copy_thresh != data) {
741bafec742SSukumar Swaminathan 			qlge->payload_copy_thresh = data;
742bafec742SSukumar Swaminathan 		}
743bafec742SSukumar Swaminathan 	}
744bafec742SSukumar Swaminathan 
745bafec742SSukumar Swaminathan 	/* large send offload (LSO) capability. */
746bafec742SSukumar Swaminathan 	qlge->lso_enable = 1;
747bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "lso_enable");
748bafec742SSukumar Swaminathan 	/* if data is valid */
749accf27a5SSukumar Swaminathan 	if ((data == 0) || (data == 1)) {
750bafec742SSukumar Swaminathan 		if (qlge->lso_enable != data) {
751bafec742SSukumar Swaminathan 			qlge->lso_enable = (uint16_t)data;
752bafec742SSukumar Swaminathan 		}
753bafec742SSukumar Swaminathan 	}
754accf27a5SSukumar Swaminathan 
755accf27a5SSukumar Swaminathan 	/* dcbx capability. */
756accf27a5SSukumar Swaminathan 	qlge->dcbx_enable = 1;
757accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "dcbx_enable");
758accf27a5SSukumar Swaminathan 	/* if data is valid */
759accf27a5SSukumar Swaminathan 	if ((data == 0) || (data == 1)) {
760accf27a5SSukumar Swaminathan 		if (qlge->dcbx_enable != data) {
761accf27a5SSukumar Swaminathan 			qlge->dcbx_enable = (uint16_t)data;
762accf27a5SSukumar Swaminathan 		}
763accf27a5SSukumar Swaminathan 	}
764accf27a5SSukumar Swaminathan 	/* fault management enable */
765accf27a5SSukumar Swaminathan 	qlge->fm_enable = B_TRUE;
766accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "fm-enable");
767accf27a5SSukumar Swaminathan 	if ((data == 0x1) || (data == 0)) {
768accf27a5SSukumar Swaminathan 		qlge->fm_enable = (boolean_t)data;
769accf27a5SSukumar Swaminathan 	}
770accf27a5SSukumar Swaminathan 
771bafec742SSukumar Swaminathan }
772bafec742SSukumar Swaminathan 
773bafec742SSukumar Swaminathan /*
774bafec742SSukumar Swaminathan  * Enable global interrupt
775bafec742SSukumar Swaminathan  */
776bafec742SSukumar Swaminathan static void
777bafec742SSukumar Swaminathan ql_enable_global_interrupt(qlge_t *qlge)
778bafec742SSukumar Swaminathan {
779bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
780bafec742SSukumar Swaminathan 	    (INTR_EN_EI << 16) | INTR_EN_EI);
781bafec742SSukumar Swaminathan 	qlge->flags |= INTERRUPTS_ENABLED;
782bafec742SSukumar Swaminathan }
783bafec742SSukumar Swaminathan 
784bafec742SSukumar Swaminathan /*
785bafec742SSukumar Swaminathan  * Disable global interrupt
786bafec742SSukumar Swaminathan  */
787bafec742SSukumar Swaminathan static void
788bafec742SSukumar Swaminathan ql_disable_global_interrupt(qlge_t *qlge)
789bafec742SSukumar Swaminathan {
790bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
791bafec742SSukumar Swaminathan 	qlge->flags &= ~INTERRUPTS_ENABLED;
792bafec742SSukumar Swaminathan }
793bafec742SSukumar Swaminathan 
794bafec742SSukumar Swaminathan /*
795bafec742SSukumar Swaminathan  * Enable one ring interrupt
796bafec742SSukumar Swaminathan  */
797bafec742SSukumar Swaminathan void
798bafec742SSukumar Swaminathan ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
799bafec742SSukumar Swaminathan {
800bafec742SSukumar Swaminathan 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
801bafec742SSukumar Swaminathan 
802bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
803bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
804bafec742SSukumar Swaminathan 
805bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
806bafec742SSukumar Swaminathan 		/*
807bafec742SSukumar Swaminathan 		 * Always enable if we're MSIX multi interrupts and
808bafec742SSukumar Swaminathan 		 * it's not the default (zeroeth) interrupt.
809bafec742SSukumar Swaminathan 		 */
810bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
811bafec742SSukumar Swaminathan 		return;
812bafec742SSukumar Swaminathan 	}
813bafec742SSukumar Swaminathan 
814bafec742SSukumar Swaminathan 	if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
815bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
816bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
817bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
818bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INTR,
819bafec742SSukumar Swaminathan 		    ("%s(%d): write %x to intr enable register \n",
820bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ctx->intr_en_mask));
821bafec742SSukumar Swaminathan 	}
822bafec742SSukumar Swaminathan }
823bafec742SSukumar Swaminathan 
824bafec742SSukumar Swaminathan /*
825bafec742SSukumar Swaminathan  * ql_forced_disable_completion_interrupt
826bafec742SSukumar Swaminathan  * Used by call from OS, may be called without
827bafec742SSukumar Swaminathan  * a pending interrupt so force the disable
828bafec742SSukumar Swaminathan  */
829bafec742SSukumar Swaminathan uint32_t
830bafec742SSukumar Swaminathan ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
831bafec742SSukumar Swaminathan {
832bafec742SSukumar Swaminathan 	uint32_t var = 0;
833bafec742SSukumar Swaminathan 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
834bafec742SSukumar Swaminathan 
835bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
836bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
837bafec742SSukumar Swaminathan 
838bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
839bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
840bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
841bafec742SSukumar Swaminathan 		return (var);
842bafec742SSukumar Swaminathan 	}
843bafec742SSukumar Swaminathan 
844bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
845bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
846bafec742SSukumar Swaminathan 	var = ql_read_reg(qlge, REG_STATUS);
847bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
848bafec742SSukumar Swaminathan 
849bafec742SSukumar Swaminathan 	return (var);
850bafec742SSukumar Swaminathan }
851bafec742SSukumar Swaminathan 
852bafec742SSukumar Swaminathan /*
853bafec742SSukumar Swaminathan  * Disable a completion interrupt
854bafec742SSukumar Swaminathan  */
855bafec742SSukumar Swaminathan void
856bafec742SSukumar Swaminathan ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
857bafec742SSukumar Swaminathan {
858bafec742SSukumar Swaminathan 	struct intr_ctx *ctx;
859bafec742SSukumar Swaminathan 
860bafec742SSukumar Swaminathan 	ctx = qlge->intr_ctx + intr;
861bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
862bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
863bafec742SSukumar Swaminathan 	/*
864bafec742SSukumar Swaminathan 	 * HW disables for us if we're MSIX multi interrupts and
865bafec742SSukumar Swaminathan 	 * it's not the default (zeroeth) interrupt.
866bafec742SSukumar Swaminathan 	 */
867bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
868bafec742SSukumar Swaminathan 		return;
869bafec742SSukumar Swaminathan 
870bafec742SSukumar Swaminathan 	if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
871bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
872bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
873bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
874bafec742SSukumar Swaminathan 	}
875bafec742SSukumar Swaminathan 	atomic_inc_32(&ctx->irq_cnt);
876bafec742SSukumar Swaminathan }
877bafec742SSukumar Swaminathan 
878bafec742SSukumar Swaminathan /*
879bafec742SSukumar Swaminathan  * Enable all completion interrupts
880bafec742SSukumar Swaminathan  */
881bafec742SSukumar Swaminathan static void
882bafec742SSukumar Swaminathan ql_enable_all_completion_interrupts(qlge_t *qlge)
883bafec742SSukumar Swaminathan {
884bafec742SSukumar Swaminathan 	int i;
885bafec742SSukumar Swaminathan 	uint32_t value = 1;
886bafec742SSukumar Swaminathan 
887bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
888bafec742SSukumar Swaminathan 		/*
889bafec742SSukumar Swaminathan 		 * Set the count to 1 for Legacy / MSI interrupts or for the
890bafec742SSukumar Swaminathan 		 * default interrupt (0)
891bafec742SSukumar Swaminathan 		 */
892bafec742SSukumar Swaminathan 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
893bafec742SSukumar Swaminathan 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
894bafec742SSukumar Swaminathan 		}
895bafec742SSukumar Swaminathan 		ql_enable_completion_interrupt(qlge, i);
896bafec742SSukumar Swaminathan 	}
897bafec742SSukumar Swaminathan }
898bafec742SSukumar Swaminathan 
899bafec742SSukumar Swaminathan /*
900bafec742SSukumar Swaminathan  * Disable all completion interrupts
901bafec742SSukumar Swaminathan  */
902bafec742SSukumar Swaminathan static void
903bafec742SSukumar Swaminathan ql_disable_all_completion_interrupts(qlge_t *qlge)
904bafec742SSukumar Swaminathan {
905bafec742SSukumar Swaminathan 	int i;
906bafec742SSukumar Swaminathan 	uint32_t value = 0;
907bafec742SSukumar Swaminathan 
908bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
909bafec742SSukumar Swaminathan 
910bafec742SSukumar Swaminathan 		/*
911bafec742SSukumar Swaminathan 		 * Set the count to 0 for Legacy / MSI interrupts or for the
912bafec742SSukumar Swaminathan 		 * default interrupt (0)
913bafec742SSukumar Swaminathan 		 */
914bafec742SSukumar Swaminathan 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
915bafec742SSukumar Swaminathan 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
916bafec742SSukumar Swaminathan 
917bafec742SSukumar Swaminathan 		ql_disable_completion_interrupt(qlge, i);
918bafec742SSukumar Swaminathan 	}
919bafec742SSukumar Swaminathan }
920bafec742SSukumar Swaminathan 
921bafec742SSukumar Swaminathan /*
922bafec742SSukumar Swaminathan  * Update small buffer queue producer index
923bafec742SSukumar Swaminathan  */
924bafec742SSukumar Swaminathan static void
925bafec742SSukumar Swaminathan ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
926bafec742SSukumar Swaminathan {
927bafec742SSukumar Swaminathan 	/* Update the buffer producer index */
928bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
929bafec742SSukumar Swaminathan 	    rx_ring->sbq_prod_idx));
930bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
931bafec742SSukumar Swaminathan 	    rx_ring->sbq_prod_idx);
932bafec742SSukumar Swaminathan }
933bafec742SSukumar Swaminathan 
934bafec742SSukumar Swaminathan /*
935bafec742SSukumar Swaminathan  * Update large buffer queue producer index
936bafec742SSukumar Swaminathan  */
937bafec742SSukumar Swaminathan static void
938bafec742SSukumar Swaminathan ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
939bafec742SSukumar Swaminathan {
940bafec742SSukumar Swaminathan 	/* Update the buffer producer index */
941bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
942bafec742SSukumar Swaminathan 	    rx_ring->lbq_prod_idx));
943bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
944bafec742SSukumar Swaminathan 	    rx_ring->lbq_prod_idx);
945bafec742SSukumar Swaminathan }
946bafec742SSukumar Swaminathan 
947bafec742SSukumar Swaminathan /*
948bafec742SSukumar Swaminathan  * Adds a small buffer descriptor to end of its in use list,
949bafec742SSukumar Swaminathan  * assumes sbq_lock is already taken
950bafec742SSukumar Swaminathan  */
951bafec742SSukumar Swaminathan static void
952bafec742SSukumar Swaminathan ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
953bafec742SSukumar Swaminathan     struct bq_desc *sbq_desc)
954bafec742SSukumar Swaminathan {
955bafec742SSukumar Swaminathan 	uint32_t inuse_idx = rx_ring->sbq_use_tail;
956bafec742SSukumar Swaminathan 
957bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
958bafec742SSukumar Swaminathan 	inuse_idx++;
959bafec742SSukumar Swaminathan 	if (inuse_idx >= rx_ring->sbq_len)
960bafec742SSukumar Swaminathan 		inuse_idx = 0;
961bafec742SSukumar Swaminathan 	rx_ring->sbq_use_tail = inuse_idx;
962bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->sbuf_in_use_count);
963bafec742SSukumar Swaminathan 	ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
964bafec742SSukumar Swaminathan }
965bafec742SSukumar Swaminathan 
966bafec742SSukumar Swaminathan /*
967bafec742SSukumar Swaminathan  * Get a small buffer descriptor from its in use list
968bafec742SSukumar Swaminathan  */
969bafec742SSukumar Swaminathan static struct bq_desc *
970bafec742SSukumar Swaminathan ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
971bafec742SSukumar Swaminathan {
972bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc = NULL;
973bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
974bafec742SSukumar Swaminathan 
975bafec742SSukumar Swaminathan 	/* Pick from head of in use list */
976bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->sbq_use_head;
977bafec742SSukumar Swaminathan 	sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
978bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use[inuse_idx] = NULL;
979bafec742SSukumar Swaminathan 
980bafec742SSukumar Swaminathan 	if (sbq_desc != NULL) {
981bafec742SSukumar Swaminathan 		inuse_idx++;
982bafec742SSukumar Swaminathan 		if (inuse_idx >= rx_ring->sbq_len)
983bafec742SSukumar Swaminathan 			inuse_idx = 0;
984bafec742SSukumar Swaminathan 		rx_ring->sbq_use_head = inuse_idx;
985bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->sbuf_in_use_count);
986bafec742SSukumar Swaminathan 		atomic_inc_32(&rx_ring->rx_indicate);
987bafec742SSukumar Swaminathan 		sbq_desc->upl_inuse = 1;
988bafec742SSukumar Swaminathan 		/* if mp is NULL */
989bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
990bafec742SSukumar Swaminathan 			/* try to remap mp again */
991bafec742SSukumar Swaminathan 			sbq_desc->mp =
992bafec742SSukumar Swaminathan 			    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
993bafec742SSukumar Swaminathan 			    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
994bafec742SSukumar Swaminathan 		}
995bafec742SSukumar Swaminathan 	}
996bafec742SSukumar Swaminathan 
997bafec742SSukumar Swaminathan 	return (sbq_desc);
998bafec742SSukumar Swaminathan }
999bafec742SSukumar Swaminathan 
1000bafec742SSukumar Swaminathan /*
1001bafec742SSukumar Swaminathan  * Add a small buffer descriptor to its free list
1002bafec742SSukumar Swaminathan  */
1003bafec742SSukumar Swaminathan static void
1004bafec742SSukumar Swaminathan ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
1005bafec742SSukumar Swaminathan     struct bq_desc *sbq_desc)
1006bafec742SSukumar Swaminathan {
1007bafec742SSukumar Swaminathan 	uint32_t free_idx;
1008bafec742SSukumar Swaminathan 
1009bafec742SSukumar Swaminathan 	/* Add to the end of free list */
1010bafec742SSukumar Swaminathan 	free_idx = rx_ring->sbq_free_tail;
1011bafec742SSukumar Swaminathan 	rx_ring->sbuf_free[free_idx] = sbq_desc;
1012bafec742SSukumar Swaminathan 	ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
1013bafec742SSukumar Swaminathan 	free_idx++;
1014bafec742SSukumar Swaminathan 	if (free_idx >= rx_ring->sbq_len)
1015bafec742SSukumar Swaminathan 		free_idx = 0;
1016bafec742SSukumar Swaminathan 	rx_ring->sbq_free_tail = free_idx;
1017bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->sbuf_free_count);
1018bafec742SSukumar Swaminathan }
1019bafec742SSukumar Swaminathan 
1020bafec742SSukumar Swaminathan /*
1021bafec742SSukumar Swaminathan  * Get a small buffer descriptor from its free list
1022bafec742SSukumar Swaminathan  */
1023bafec742SSukumar Swaminathan static struct bq_desc *
1024bafec742SSukumar Swaminathan ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
1025bafec742SSukumar Swaminathan {
1026bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1027bafec742SSukumar Swaminathan 	uint32_t free_idx;
1028bafec742SSukumar Swaminathan 
1029bafec742SSukumar Swaminathan 	free_idx = rx_ring->sbq_free_head;
1030bafec742SSukumar Swaminathan 	/* Pick from top of free list */
1031bafec742SSukumar Swaminathan 	sbq_desc = rx_ring->sbuf_free[free_idx];
1032bafec742SSukumar Swaminathan 	rx_ring->sbuf_free[free_idx] = NULL;
1033bafec742SSukumar Swaminathan 	if (sbq_desc != NULL) {
1034bafec742SSukumar Swaminathan 		free_idx++;
1035bafec742SSukumar Swaminathan 		if (free_idx >= rx_ring->sbq_len)
1036bafec742SSukumar Swaminathan 			free_idx = 0;
1037bafec742SSukumar Swaminathan 		rx_ring->sbq_free_head = free_idx;
1038bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->sbuf_free_count);
1039bafec742SSukumar Swaminathan 	}
1040bafec742SSukumar Swaminathan 	return (sbq_desc);
1041bafec742SSukumar Swaminathan }
1042bafec742SSukumar Swaminathan 
1043bafec742SSukumar Swaminathan /*
1044bafec742SSukumar Swaminathan  * Add a large buffer descriptor to its in use list
1045bafec742SSukumar Swaminathan  */
1046bafec742SSukumar Swaminathan static void
1047bafec742SSukumar Swaminathan ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
1048bafec742SSukumar Swaminathan     struct bq_desc *lbq_desc)
1049bafec742SSukumar Swaminathan {
1050bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
1051bafec742SSukumar Swaminathan 
1052bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->lbq_use_tail;
1053bafec742SSukumar Swaminathan 
1054bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
1055bafec742SSukumar Swaminathan 	inuse_idx++;
1056bafec742SSukumar Swaminathan 	if (inuse_idx >= rx_ring->lbq_len)
1057bafec742SSukumar Swaminathan 		inuse_idx = 0;
1058bafec742SSukumar Swaminathan 	rx_ring->lbq_use_tail = inuse_idx;
1059bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->lbuf_in_use_count);
1060bafec742SSukumar Swaminathan }
1061bafec742SSukumar Swaminathan 
1062bafec742SSukumar Swaminathan /*
1063bafec742SSukumar Swaminathan  * Get a large buffer descriptor from in use list
1064bafec742SSukumar Swaminathan  */
1065bafec742SSukumar Swaminathan static struct bq_desc *
1066bafec742SSukumar Swaminathan ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
1067bafec742SSukumar Swaminathan {
1068bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1069bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
1070bafec742SSukumar Swaminathan 
1071bafec742SSukumar Swaminathan 	/* Pick from head of in use list */
1072bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->lbq_use_head;
1073bafec742SSukumar Swaminathan 	lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
1074bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use[inuse_idx] = NULL;
1075bafec742SSukumar Swaminathan 
1076bafec742SSukumar Swaminathan 	if (lbq_desc != NULL) {
1077bafec742SSukumar Swaminathan 		inuse_idx++;
1078bafec742SSukumar Swaminathan 		if (inuse_idx >= rx_ring->lbq_len)
1079bafec742SSukumar Swaminathan 			inuse_idx = 0;
1080bafec742SSukumar Swaminathan 		rx_ring->lbq_use_head = inuse_idx;
1081bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->lbuf_in_use_count);
1082bafec742SSukumar Swaminathan 		atomic_inc_32(&rx_ring->rx_indicate);
1083bafec742SSukumar Swaminathan 		lbq_desc->upl_inuse = 1;
1084bafec742SSukumar Swaminathan 
1085bafec742SSukumar Swaminathan 		/* if mp is NULL */
1086bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
1087bafec742SSukumar Swaminathan 			/* try to remap mp again */
1088bafec742SSukumar Swaminathan 			lbq_desc->mp =
1089bafec742SSukumar Swaminathan 			    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1090bafec742SSukumar Swaminathan 			    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1091bafec742SSukumar Swaminathan 		}
1092bafec742SSukumar Swaminathan 	}
1093bafec742SSukumar Swaminathan 	return (lbq_desc);
1094bafec742SSukumar Swaminathan }
1095bafec742SSukumar Swaminathan 
1096bafec742SSukumar Swaminathan /*
1097bafec742SSukumar Swaminathan  * Add a large buffer descriptor to free list
1098bafec742SSukumar Swaminathan  */
1099bafec742SSukumar Swaminathan static void
1100bafec742SSukumar Swaminathan ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
1101bafec742SSukumar Swaminathan     struct bq_desc *lbq_desc)
1102bafec742SSukumar Swaminathan {
1103bafec742SSukumar Swaminathan 	uint32_t free_idx;
1104bafec742SSukumar Swaminathan 
1105bafec742SSukumar Swaminathan 	/* Add to the end of free list */
1106bafec742SSukumar Swaminathan 	free_idx = rx_ring->lbq_free_tail;
1107bafec742SSukumar Swaminathan 	rx_ring->lbuf_free[free_idx] = lbq_desc;
1108bafec742SSukumar Swaminathan 	free_idx++;
1109bafec742SSukumar Swaminathan 	if (free_idx >= rx_ring->lbq_len)
1110bafec742SSukumar Swaminathan 		free_idx = 0;
1111bafec742SSukumar Swaminathan 	rx_ring->lbq_free_tail = free_idx;
1112bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->lbuf_free_count);
1113bafec742SSukumar Swaminathan 	ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
1114bafec742SSukumar Swaminathan }
1115bafec742SSukumar Swaminathan 
1116bafec742SSukumar Swaminathan /*
1117bafec742SSukumar Swaminathan  * Get a large buffer descriptor from its free list
1118bafec742SSukumar Swaminathan  */
1119bafec742SSukumar Swaminathan static struct bq_desc *
1120bafec742SSukumar Swaminathan ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
1121bafec742SSukumar Swaminathan {
1122bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1123bafec742SSukumar Swaminathan 	uint32_t free_idx;
1124bafec742SSukumar Swaminathan 
1125bafec742SSukumar Swaminathan 	free_idx = rx_ring->lbq_free_head;
1126bafec742SSukumar Swaminathan 	/* Pick from head of free list */
1127bafec742SSukumar Swaminathan 	lbq_desc = rx_ring->lbuf_free[free_idx];
1128bafec742SSukumar Swaminathan 	rx_ring->lbuf_free[free_idx] = NULL;
1129bafec742SSukumar Swaminathan 
1130bafec742SSukumar Swaminathan 	if (lbq_desc != NULL) {
1131bafec742SSukumar Swaminathan 		free_idx++;
1132bafec742SSukumar Swaminathan 		if (free_idx >= rx_ring->lbq_len)
1133bafec742SSukumar Swaminathan 			free_idx = 0;
1134bafec742SSukumar Swaminathan 		rx_ring->lbq_free_head = free_idx;
1135bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->lbuf_free_count);
1136bafec742SSukumar Swaminathan 	}
1137bafec742SSukumar Swaminathan 	return (lbq_desc);
1138bafec742SSukumar Swaminathan }
1139bafec742SSukumar Swaminathan 
1140bafec742SSukumar Swaminathan /*
1141bafec742SSukumar Swaminathan  * Add a small buffer descriptor to free list
1142bafec742SSukumar Swaminathan  */
1143bafec742SSukumar Swaminathan static void
1144bafec742SSukumar Swaminathan ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
1145bafec742SSukumar Swaminathan {
1146bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = sbq_desc->rx_ring;
1147bafec742SSukumar Swaminathan 	uint64_t *sbq_entry;
1148bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)rx_ring->qlge;
1149bafec742SSukumar Swaminathan 	/*
1150bafec742SSukumar Swaminathan 	 * Sync access
1151bafec742SSukumar Swaminathan 	 */
1152bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->sbq_lock);
1153bafec742SSukumar Swaminathan 
1154bafec742SSukumar Swaminathan 	sbq_desc->upl_inuse = 0;
1155bafec742SSukumar Swaminathan 
1156bafec742SSukumar Swaminathan 	/*
1157bafec742SSukumar Swaminathan 	 * If we are freeing the buffers as a result of adapter unload, get out
1158bafec742SSukumar Swaminathan 	 */
1159bafec742SSukumar Swaminathan 	if ((sbq_desc->free_buf != NULL) ||
1160bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1161bafec742SSukumar Swaminathan 		if (sbq_desc->free_buf == NULL)
1162bafec742SSukumar Swaminathan 			atomic_dec_32(&rx_ring->rx_indicate);
1163bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->sbq_lock);
1164bafec742SSukumar Swaminathan 		return;
1165bafec742SSukumar Swaminathan 	}
1166bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1167bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate == 0)
1168bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "sbq: indicate wrong");
1169bafec742SSukumar Swaminathan #endif
1170bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1171bafec742SSukumar Swaminathan 	uint32_t sb_consumer_idx;
1172bafec742SSukumar Swaminathan 	uint32_t sb_producer_idx;
1173bafec742SSukumar Swaminathan 	uint32_t num_free_buffers;
1174bafec742SSukumar Swaminathan 	uint32_t temp;
1175bafec742SSukumar Swaminathan 
1176bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
1177bafec742SSukumar Swaminathan 	sb_producer_idx = temp & 0x0000ffff;
1178bafec742SSukumar Swaminathan 	sb_consumer_idx = (temp >> 16);
1179bafec742SSukumar Swaminathan 
1180bafec742SSukumar Swaminathan 	if (sb_consumer_idx > sb_producer_idx)
1181bafec742SSukumar Swaminathan 		num_free_buffers = NUM_SMALL_BUFFERS -
1182bafec742SSukumar Swaminathan 		    (sb_consumer_idx - sb_producer_idx);
1183bafec742SSukumar Swaminathan 	else
1184bafec742SSukumar Swaminathan 		num_free_buffers = sb_producer_idx - sb_consumer_idx;
1185bafec742SSukumar Swaminathan 
1186bafec742SSukumar Swaminathan 	if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
1187bafec742SSukumar Swaminathan 		qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
1188bafec742SSukumar Swaminathan 
1189bafec742SSukumar Swaminathan #endif
1190bafec742SSukumar Swaminathan 
1191bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1192bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate > 0xFF000000)
1193bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
1194bafec742SSukumar Swaminathan 		    " sbq_desc index %d.",
1195bafec742SSukumar Swaminathan 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1196bafec742SSukumar Swaminathan 		    sbq_desc->index);
1197bafec742SSukumar Swaminathan #endif
1198bafec742SSukumar Swaminathan 	if (alloc_memory) {
1199bafec742SSukumar Swaminathan 		sbq_desc->mp =
1200bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1201bafec742SSukumar Swaminathan 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1202bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
1203bafec742SSukumar Swaminathan 			rx_ring->rx_failed_sbq_allocs++;
1204bafec742SSukumar Swaminathan 		}
1205bafec742SSukumar Swaminathan 	}
1206bafec742SSukumar Swaminathan 
1207bafec742SSukumar Swaminathan 	/* Got the packet from the stack decrement rx_indicate count */
1208bafec742SSukumar Swaminathan 	atomic_dec_32(&rx_ring->rx_indicate);
1209bafec742SSukumar Swaminathan 
1210bafec742SSukumar Swaminathan 	ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1211bafec742SSukumar Swaminathan 
1212bafec742SSukumar Swaminathan 	/* Rearm if possible */
1213bafec742SSukumar Swaminathan 	if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1214bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1215bafec742SSukumar Swaminathan 		sbq_entry = rx_ring->sbq_dma.vaddr;
1216bafec742SSukumar Swaminathan 		sbq_entry += rx_ring->sbq_prod_idx;
1217bafec742SSukumar Swaminathan 
1218bafec742SSukumar Swaminathan 		while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1219bafec742SSukumar Swaminathan 			/* Get first one from free list */
1220bafec742SSukumar Swaminathan 			sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
1221bafec742SSukumar Swaminathan 
1222bafec742SSukumar Swaminathan 			*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
1223bafec742SSukumar Swaminathan 			sbq_entry++;
1224bafec742SSukumar Swaminathan 			rx_ring->sbq_prod_idx++;
1225bafec742SSukumar Swaminathan 			if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
1226bafec742SSukumar Swaminathan 				rx_ring->sbq_prod_idx = 0;
1227bafec742SSukumar Swaminathan 				sbq_entry = rx_ring->sbq_dma.vaddr;
1228bafec742SSukumar Swaminathan 			}
1229bafec742SSukumar Swaminathan 			/* Add to end of in use list */
1230bafec742SSukumar Swaminathan 			ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
1231bafec742SSukumar Swaminathan 		}
1232bafec742SSukumar Swaminathan 
1233bafec742SSukumar Swaminathan 		/* Update small buffer queue producer index */
1234bafec742SSukumar Swaminathan 		ql_update_sbq_prod_idx(qlge, rx_ring);
1235bafec742SSukumar Swaminathan 	}
1236bafec742SSukumar Swaminathan 
1237bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->sbq_lock);
1238bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
1239bafec742SSukumar Swaminathan 	    __func__, qlge->instance, rx_ring->sbuf_free_count));
1240bafec742SSukumar Swaminathan }
1241bafec742SSukumar Swaminathan 
1242bafec742SSukumar Swaminathan /*
1243bafec742SSukumar Swaminathan  * rx recycle call back function
1244bafec742SSukumar Swaminathan  */
1245bafec742SSukumar Swaminathan static void
1246bafec742SSukumar Swaminathan ql_release_to_sbuf_free_list(caddr_t p)
1247bafec742SSukumar Swaminathan {
1248bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
1249bafec742SSukumar Swaminathan 
1250bafec742SSukumar Swaminathan 	if (sbq_desc == NULL)
1251bafec742SSukumar Swaminathan 		return;
1252bafec742SSukumar Swaminathan 	ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
1253bafec742SSukumar Swaminathan }
1254bafec742SSukumar Swaminathan 
1255bafec742SSukumar Swaminathan /*
1256bafec742SSukumar Swaminathan  * Add a large buffer descriptor to free list
1257bafec742SSukumar Swaminathan  */
1258bafec742SSukumar Swaminathan static void
1259bafec742SSukumar Swaminathan ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
1260bafec742SSukumar Swaminathan {
1261bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = lbq_desc->rx_ring;
1262bafec742SSukumar Swaminathan 	uint64_t *lbq_entry;
1263bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
1264bafec742SSukumar Swaminathan 
1265bafec742SSukumar Swaminathan 	/* Sync access */
1266bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->lbq_lock);
1267bafec742SSukumar Swaminathan 
1268bafec742SSukumar Swaminathan 	lbq_desc->upl_inuse = 0;
1269bafec742SSukumar Swaminathan 	/*
1270bafec742SSukumar Swaminathan 	 * If we are freeing the buffers as a result of adapter unload, get out
1271bafec742SSukumar Swaminathan 	 */
1272bafec742SSukumar Swaminathan 	if ((lbq_desc->free_buf != NULL) ||
1273bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1274bafec742SSukumar Swaminathan 		if (lbq_desc->free_buf == NULL)
1275bafec742SSukumar Swaminathan 			atomic_dec_32(&rx_ring->rx_indicate);
1276bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->lbq_lock);
1277bafec742SSukumar Swaminathan 		return;
1278bafec742SSukumar Swaminathan 	}
1279bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1280bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate == 0)
1281bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: indicate wrong");
1282bafec742SSukumar Swaminathan #endif
1283bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1284bafec742SSukumar Swaminathan 	uint32_t lb_consumer_idx;
1285bafec742SSukumar Swaminathan 	uint32_t lb_producer_idx;
1286bafec742SSukumar Swaminathan 	uint32_t num_free_buffers;
1287bafec742SSukumar Swaminathan 	uint32_t temp;
1288bafec742SSukumar Swaminathan 
1289bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
1290bafec742SSukumar Swaminathan 
1291bafec742SSukumar Swaminathan 	lb_producer_idx = temp & 0x0000ffff;
1292bafec742SSukumar Swaminathan 	lb_consumer_idx = (temp >> 16);
1293bafec742SSukumar Swaminathan 
1294bafec742SSukumar Swaminathan 	if (lb_consumer_idx > lb_producer_idx)
1295bafec742SSukumar Swaminathan 		num_free_buffers = NUM_LARGE_BUFFERS -
1296bafec742SSukumar Swaminathan 		    (lb_consumer_idx - lb_producer_idx);
1297bafec742SSukumar Swaminathan 	else
1298bafec742SSukumar Swaminathan 		num_free_buffers = lb_producer_idx - lb_consumer_idx;
1299bafec742SSukumar Swaminathan 
1300bafec742SSukumar Swaminathan 	if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
1301bafec742SSukumar Swaminathan 		qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
1302bafec742SSukumar Swaminathan 	}
1303bafec742SSukumar Swaminathan #endif
1304bafec742SSukumar Swaminathan 
1305bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1306bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate > 0xFF000000)
1307bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
1308bafec742SSukumar Swaminathan 		    "lbq_desc index %d",
1309bafec742SSukumar Swaminathan 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1310bafec742SSukumar Swaminathan 		    lbq_desc->index);
1311bafec742SSukumar Swaminathan #endif
1312bafec742SSukumar Swaminathan 	if (alloc_memory) {
1313bafec742SSukumar Swaminathan 		lbq_desc->mp =
1314bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1315bafec742SSukumar Swaminathan 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1316bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
1317bafec742SSukumar Swaminathan 			rx_ring->rx_failed_lbq_allocs++;
1318bafec742SSukumar Swaminathan 		}
1319bafec742SSukumar Swaminathan 	}
1320bafec742SSukumar Swaminathan 
1321bafec742SSukumar Swaminathan 	/* Got the packet from the stack decrement rx_indicate count */
1322bafec742SSukumar Swaminathan 	atomic_dec_32(&rx_ring->rx_indicate);
1323bafec742SSukumar Swaminathan 
1324bafec742SSukumar Swaminathan 	ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1325bafec742SSukumar Swaminathan 
1326bafec742SSukumar Swaminathan 	/* Rearm if possible */
1327bafec742SSukumar Swaminathan 	if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1328bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1329bafec742SSukumar Swaminathan 		lbq_entry = rx_ring->lbq_dma.vaddr;
1330bafec742SSukumar Swaminathan 		lbq_entry += rx_ring->lbq_prod_idx;
1331bafec742SSukumar Swaminathan 		while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1332bafec742SSukumar Swaminathan 			/* Get first one from free list */
1333bafec742SSukumar Swaminathan 			lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
1334bafec742SSukumar Swaminathan 
1335bafec742SSukumar Swaminathan 			*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
1336bafec742SSukumar Swaminathan 			lbq_entry++;
1337bafec742SSukumar Swaminathan 			rx_ring->lbq_prod_idx++;
1338bafec742SSukumar Swaminathan 			if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
1339bafec742SSukumar Swaminathan 				rx_ring->lbq_prod_idx = 0;
1340bafec742SSukumar Swaminathan 				lbq_entry = rx_ring->lbq_dma.vaddr;
1341bafec742SSukumar Swaminathan 			}
1342bafec742SSukumar Swaminathan 
1343bafec742SSukumar Swaminathan 			/* Add to end of in use list */
1344bafec742SSukumar Swaminathan 			ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
1345bafec742SSukumar Swaminathan 		}
1346bafec742SSukumar Swaminathan 
1347bafec742SSukumar Swaminathan 		/* Update large buffer queue producer index */
1348bafec742SSukumar Swaminathan 		ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
1349bafec742SSukumar Swaminathan 	}
1350bafec742SSukumar Swaminathan 
1351bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->lbq_lock);
1352bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
1353bafec742SSukumar Swaminathan 	    __func__, rx_ring->lbuf_free_count));
1354bafec742SSukumar Swaminathan }
1355bafec742SSukumar Swaminathan /*
1356bafec742SSukumar Swaminathan  * rx recycle call back function
1357bafec742SSukumar Swaminathan  */
1358bafec742SSukumar Swaminathan static void
1359bafec742SSukumar Swaminathan ql_release_to_lbuf_free_list(caddr_t p)
1360bafec742SSukumar Swaminathan {
1361bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
1362bafec742SSukumar Swaminathan 
1363bafec742SSukumar Swaminathan 	if (lbq_desc == NULL)
1364bafec742SSukumar Swaminathan 		return;
1365bafec742SSukumar Swaminathan 	ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
1366bafec742SSukumar Swaminathan }
1367bafec742SSukumar Swaminathan 
1368bafec742SSukumar Swaminathan /*
1369bafec742SSukumar Swaminathan  * free small buffer queue buffers
1370bafec742SSukumar Swaminathan  */
1371bafec742SSukumar Swaminathan static void
1372bafec742SSukumar Swaminathan ql_free_sbq_buffers(struct rx_ring *rx_ring)
1373bafec742SSukumar Swaminathan {
1374bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1375bafec742SSukumar Swaminathan 	uint32_t i;
1376bafec742SSukumar Swaminathan 	uint32_t j = rx_ring->sbq_free_head;
1377bafec742SSukumar Swaminathan 	int  force_cnt = 0;
1378bafec742SSukumar Swaminathan 
1379bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbuf_free_count; i++) {
1380bafec742SSukumar Swaminathan 		sbq_desc = rx_ring->sbuf_free[j];
1381bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1382bafec742SSukumar Swaminathan 		j++;
1383bafec742SSukumar Swaminathan 		if (j >= rx_ring->sbq_len) {
1384bafec742SSukumar Swaminathan 			j = 0;
1385bafec742SSukumar Swaminathan 		}
1386bafec742SSukumar Swaminathan 		if (sbq_desc->mp != NULL) {
1387bafec742SSukumar Swaminathan 			freemsg(sbq_desc->mp);
1388bafec742SSukumar Swaminathan 			sbq_desc->mp = NULL;
1389bafec742SSukumar Swaminathan 		}
1390bafec742SSukumar Swaminathan 	}
1391bafec742SSukumar Swaminathan 	rx_ring->sbuf_free_count = 0;
1392bafec742SSukumar Swaminathan 
1393bafec742SSukumar Swaminathan 	j = rx_ring->sbq_use_head;
1394bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
1395bafec742SSukumar Swaminathan 		sbq_desc = rx_ring->sbuf_in_use[j];
1396bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1397bafec742SSukumar Swaminathan 		j++;
1398bafec742SSukumar Swaminathan 		if (j >= rx_ring->sbq_len) {
1399bafec742SSukumar Swaminathan 			j = 0;
1400bafec742SSukumar Swaminathan 		}
1401bafec742SSukumar Swaminathan 		if (sbq_desc->mp != NULL) {
1402bafec742SSukumar Swaminathan 			freemsg(sbq_desc->mp);
1403bafec742SSukumar Swaminathan 			sbq_desc->mp = NULL;
1404bafec742SSukumar Swaminathan 		}
1405bafec742SSukumar Swaminathan 	}
1406bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use_count = 0;
1407bafec742SSukumar Swaminathan 
1408bafec742SSukumar Swaminathan 	sbq_desc = &rx_ring->sbq_desc[0];
1409bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1410bafec742SSukumar Swaminathan 		/*
1411bafec742SSukumar Swaminathan 		 * Set flag so that the callback does not allocate a new buffer
1412bafec742SSukumar Swaminathan 		 */
1413bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1414bafec742SSukumar Swaminathan 		if (sbq_desc->upl_inuse != 0) {
1415bafec742SSukumar Swaminathan 			force_cnt++;
1416bafec742SSukumar Swaminathan 		}
1417bafec742SSukumar Swaminathan 		if (sbq_desc->bd_dma.dma_handle != NULL) {
1418bafec742SSukumar Swaminathan 			ql_free_phys(&sbq_desc->bd_dma.dma_handle,
1419bafec742SSukumar Swaminathan 			    &sbq_desc->bd_dma.acc_handle);
1420bafec742SSukumar Swaminathan 			sbq_desc->bd_dma.dma_handle = NULL;
1421bafec742SSukumar Swaminathan 			sbq_desc->bd_dma.acc_handle = NULL;
1422bafec742SSukumar Swaminathan 		}
1423bafec742SSukumar Swaminathan 	}
1424bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1425bafec742SSukumar Swaminathan 	cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
1426bafec742SSukumar Swaminathan 	    rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
1427bafec742SSukumar Swaminathan #endif
1428bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_in_use != NULL) {
1429bafec742SSukumar Swaminathan 		kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
1430bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1431bafec742SSukumar Swaminathan 		rx_ring->sbuf_in_use = NULL;
1432bafec742SSukumar Swaminathan 	}
1433bafec742SSukumar Swaminathan 
1434bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free != NULL) {
1435bafec742SSukumar Swaminathan 		kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
1436bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1437bafec742SSukumar Swaminathan 		rx_ring->sbuf_free = NULL;
1438bafec742SSukumar Swaminathan 	}
1439bafec742SSukumar Swaminathan }
1440bafec742SSukumar Swaminathan 
1441bafec742SSukumar Swaminathan /* Allocate small buffers */
1442bafec742SSukumar Swaminathan static int
1443bafec742SSukumar Swaminathan ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1444bafec742SSukumar Swaminathan {
1445bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1446bafec742SSukumar Swaminathan 	int i;
1447bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
1448bafec742SSukumar Swaminathan 
1449bafec742SSukumar Swaminathan 	rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
1450bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1451bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free == NULL) {
1452bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1453bafec742SSukumar Swaminathan 		    "!%s: sbuf_free_list alloc: failed",
1454bafec742SSukumar Swaminathan 		    __func__);
1455bafec742SSukumar Swaminathan 		rx_ring->sbuf_free_count = 0;
1456bafec742SSukumar Swaminathan 		goto alloc_sbuf_err;
1457bafec742SSukumar Swaminathan 	}
1458bafec742SSukumar Swaminathan 
1459bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
1460bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1461bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_in_use == NULL) {
1462bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1463bafec742SSukumar Swaminathan 		    "!%s: sbuf_inuse_list alloc: failed",
1464bafec742SSukumar Swaminathan 		    __func__);
1465bafec742SSukumar Swaminathan 		rx_ring->sbuf_in_use_count = 0;
1466bafec742SSukumar Swaminathan 		goto alloc_sbuf_err;
1467bafec742SSukumar Swaminathan 	}
1468bafec742SSukumar Swaminathan 	rx_ring->sbq_use_head = 0;
1469bafec742SSukumar Swaminathan 	rx_ring->sbq_use_tail = 0;
1470bafec742SSukumar Swaminathan 	rx_ring->sbq_free_head = 0;
1471bafec742SSukumar Swaminathan 	rx_ring->sbq_free_tail = 0;
1472bafec742SSukumar Swaminathan 	sbq_desc = &rx_ring->sbq_desc[0];
1473bafec742SSukumar Swaminathan 
1474bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1475bafec742SSukumar Swaminathan 		/* Allocate buffer */
1476accf27a5SSukumar Swaminathan 		if (ql_alloc_phys_rbuf(qlge->dip, &sbq_desc->bd_dma.dma_handle,
1477bafec742SSukumar Swaminathan 		    &ql_buf_acc_attr,
1478bafec742SSukumar Swaminathan 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1479bafec742SSukumar Swaminathan 		    &sbq_desc->bd_dma.acc_handle,
1480bafec742SSukumar Swaminathan 		    (size_t)rx_ring->sbq_buf_size,	/* mem size */
1481bafec742SSukumar Swaminathan 		    (size_t)0,				/* default alignment */
1482bafec742SSukumar Swaminathan 		    (caddr_t *)&sbq_desc->bd_dma.vaddr,
1483bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
1484bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
1485bafec742SSukumar Swaminathan 			    "!%s: ddi_dma_alloc_handle: failed",
1486bafec742SSukumar Swaminathan 			    __func__);
1487bafec742SSukumar Swaminathan 			goto alloc_sbuf_err;
1488bafec742SSukumar Swaminathan 		}
1489bafec742SSukumar Swaminathan 
1490bafec742SSukumar Swaminathan 		/* Set context for Return buffer callback */
1491bafec742SSukumar Swaminathan 		sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1492bafec742SSukumar Swaminathan 		sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
1493bafec742SSukumar Swaminathan 		sbq_desc->rx_recycle.free_arg  = (caddr_t)sbq_desc;
1494bafec742SSukumar Swaminathan 		sbq_desc->rx_ring = rx_ring;
1495bafec742SSukumar Swaminathan 		sbq_desc->upl_inuse = 0;
1496bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 0;
1497bafec742SSukumar Swaminathan 
1498bafec742SSukumar Swaminathan 		sbq_desc->mp =
1499bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1500bafec742SSukumar Swaminathan 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1501bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
1502bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1503bafec742SSukumar Swaminathan 			goto alloc_sbuf_err;
1504bafec742SSukumar Swaminathan 		}
1505bafec742SSukumar Swaminathan 		ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1506bafec742SSukumar Swaminathan 	}
1507bafec742SSukumar Swaminathan 
1508bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1509bafec742SSukumar Swaminathan 
1510bafec742SSukumar Swaminathan alloc_sbuf_err:
1511bafec742SSukumar Swaminathan 	ql_free_sbq_buffers(rx_ring);
1512bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1513bafec742SSukumar Swaminathan }
1514bafec742SSukumar Swaminathan 
1515bafec742SSukumar Swaminathan static void
1516bafec742SSukumar Swaminathan ql_free_lbq_buffers(struct rx_ring *rx_ring)
1517bafec742SSukumar Swaminathan {
1518bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1519bafec742SSukumar Swaminathan 	uint32_t i, j;
1520bafec742SSukumar Swaminathan 	int force_cnt = 0;
1521bafec742SSukumar Swaminathan 
1522bafec742SSukumar Swaminathan 	j = rx_ring->lbq_free_head;
1523bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbuf_free_count; i++) {
1524bafec742SSukumar Swaminathan 		lbq_desc = rx_ring->lbuf_free[j];
1525bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1526bafec742SSukumar Swaminathan 		j++;
1527bafec742SSukumar Swaminathan 		if (j >= rx_ring->lbq_len)
1528bafec742SSukumar Swaminathan 			j = 0;
1529bafec742SSukumar Swaminathan 		if (lbq_desc->mp != NULL) {
1530bafec742SSukumar Swaminathan 			freemsg(lbq_desc->mp);
1531bafec742SSukumar Swaminathan 			lbq_desc->mp = NULL;
1532bafec742SSukumar Swaminathan 		}
1533bafec742SSukumar Swaminathan 	}
1534bafec742SSukumar Swaminathan 	rx_ring->lbuf_free_count = 0;
1535bafec742SSukumar Swaminathan 
1536bafec742SSukumar Swaminathan 	j = rx_ring->lbq_use_head;
1537bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
1538bafec742SSukumar Swaminathan 		lbq_desc = rx_ring->lbuf_in_use[j];
1539bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1540bafec742SSukumar Swaminathan 		j++;
1541bafec742SSukumar Swaminathan 		if (j >= rx_ring->lbq_len) {
1542bafec742SSukumar Swaminathan 			j = 0;
1543bafec742SSukumar Swaminathan 		}
1544bafec742SSukumar Swaminathan 		if (lbq_desc->mp != NULL) {
1545bafec742SSukumar Swaminathan 			freemsg(lbq_desc->mp);
1546bafec742SSukumar Swaminathan 			lbq_desc->mp = NULL;
1547bafec742SSukumar Swaminathan 		}
1548bafec742SSukumar Swaminathan 	}
1549bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use_count = 0;
1550bafec742SSukumar Swaminathan 
1551bafec742SSukumar Swaminathan 	lbq_desc = &rx_ring->lbq_desc[0];
1552bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1553bafec742SSukumar Swaminathan 		/* Set flag so that callback will not allocate a new buffer */
1554bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1555bafec742SSukumar Swaminathan 		if (lbq_desc->upl_inuse != 0) {
1556bafec742SSukumar Swaminathan 			force_cnt++;
1557bafec742SSukumar Swaminathan 		}
1558bafec742SSukumar Swaminathan 		if (lbq_desc->bd_dma.dma_handle != NULL) {
1559bafec742SSukumar Swaminathan 			ql_free_phys(&lbq_desc->bd_dma.dma_handle,
1560bafec742SSukumar Swaminathan 			    &lbq_desc->bd_dma.acc_handle);
1561bafec742SSukumar Swaminathan 			lbq_desc->bd_dma.dma_handle = NULL;
1562bafec742SSukumar Swaminathan 			lbq_desc->bd_dma.acc_handle = NULL;
1563bafec742SSukumar Swaminathan 		}
1564bafec742SSukumar Swaminathan 	}
1565bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1566bafec742SSukumar Swaminathan 	if (force_cnt) {
1567bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
1568bafec742SSukumar Swaminathan 		    rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
1569bafec742SSukumar Swaminathan 		    force_cnt);
1570bafec742SSukumar Swaminathan 	}
1571bafec742SSukumar Swaminathan #endif
1572bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_in_use != NULL) {
1573bafec742SSukumar Swaminathan 		kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
1574bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1575bafec742SSukumar Swaminathan 		rx_ring->lbuf_in_use = NULL;
1576bafec742SSukumar Swaminathan 	}
1577bafec742SSukumar Swaminathan 
1578bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free != NULL) {
1579bafec742SSukumar Swaminathan 		kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
1580bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1581bafec742SSukumar Swaminathan 		rx_ring->lbuf_free = NULL;
1582bafec742SSukumar Swaminathan 	}
1583bafec742SSukumar Swaminathan }
1584bafec742SSukumar Swaminathan 
1585bafec742SSukumar Swaminathan /* Allocate large buffers */
1586bafec742SSukumar Swaminathan static int
1587bafec742SSukumar Swaminathan ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1588bafec742SSukumar Swaminathan {
1589bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1590bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
1591bafec742SSukumar Swaminathan 	int i;
1592bafec742SSukumar Swaminathan 	uint32_t lbq_buf_size;
1593bafec742SSukumar Swaminathan 
1594bafec742SSukumar Swaminathan 	rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
1595bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1596bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free == NULL) {
1597bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1598bafec742SSukumar Swaminathan 		    "!%s: lbuf_free_list alloc: failed",
1599bafec742SSukumar Swaminathan 		    __func__);
1600bafec742SSukumar Swaminathan 		rx_ring->lbuf_free_count = 0;
1601bafec742SSukumar Swaminathan 		goto alloc_lbuf_err;
1602bafec742SSukumar Swaminathan 	}
1603bafec742SSukumar Swaminathan 
1604bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
1605bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1606bafec742SSukumar Swaminathan 
1607bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_in_use == NULL) {
1608bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1609bafec742SSukumar Swaminathan 		    "!%s: lbuf_inuse_list alloc: failed",
1610bafec742SSukumar Swaminathan 		    __func__);
1611bafec742SSukumar Swaminathan 		rx_ring->lbuf_in_use_count = 0;
1612bafec742SSukumar Swaminathan 		goto alloc_lbuf_err;
1613bafec742SSukumar Swaminathan 	}
1614bafec742SSukumar Swaminathan 	rx_ring->lbq_use_head = 0;
1615bafec742SSukumar Swaminathan 	rx_ring->lbq_use_tail = 0;
1616bafec742SSukumar Swaminathan 	rx_ring->lbq_free_head = 0;
1617bafec742SSukumar Swaminathan 	rx_ring->lbq_free_tail = 0;
1618bafec742SSukumar Swaminathan 
1619bafec742SSukumar Swaminathan 	lbq_buf_size = (qlge->mtu == ETHERMTU) ?
1620accf27a5SSukumar Swaminathan 	    LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE;
1621bafec742SSukumar Swaminathan 
1622bafec742SSukumar Swaminathan 	lbq_desc = &rx_ring->lbq_desc[0];
1623bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1624bafec742SSukumar Swaminathan 		rx_ring->lbq_buf_size = lbq_buf_size;
1625bafec742SSukumar Swaminathan 		/* Allocate buffer */
1626accf27a5SSukumar Swaminathan 		if (ql_alloc_phys_rbuf(qlge->dip, &lbq_desc->bd_dma.dma_handle,
1627bafec742SSukumar Swaminathan 		    &ql_buf_acc_attr,
1628bafec742SSukumar Swaminathan 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1629bafec742SSukumar Swaminathan 		    &lbq_desc->bd_dma.acc_handle,
1630bafec742SSukumar Swaminathan 		    (size_t)rx_ring->lbq_buf_size,  /* mem size */
1631bafec742SSukumar Swaminathan 		    (size_t)0, /* default alignment */
1632bafec742SSukumar Swaminathan 		    (caddr_t *)&lbq_desc->bd_dma.vaddr,
1633bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
1634bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
1635bafec742SSukumar Swaminathan 			    "!%s: ddi_dma_alloc_handle: failed",
1636bafec742SSukumar Swaminathan 			    __func__);
1637bafec742SSukumar Swaminathan 			goto alloc_lbuf_err;
1638bafec742SSukumar Swaminathan 		}
1639bafec742SSukumar Swaminathan 
1640bafec742SSukumar Swaminathan 		/* Set context for Return buffer callback */
1641bafec742SSukumar Swaminathan 		lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1642bafec742SSukumar Swaminathan 		lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
1643bafec742SSukumar Swaminathan 		lbq_desc->rx_recycle.free_arg  = (caddr_t)lbq_desc;
1644bafec742SSukumar Swaminathan 		lbq_desc->rx_ring = rx_ring;
1645bafec742SSukumar Swaminathan 		lbq_desc->upl_inuse = 0;
1646bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 0;
1647bafec742SSukumar Swaminathan 
1648bafec742SSukumar Swaminathan 		lbq_desc->mp =
1649bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1650bafec742SSukumar Swaminathan 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1651bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
1652bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1653bafec742SSukumar Swaminathan 			goto alloc_lbuf_err;
1654bafec742SSukumar Swaminathan 		}
1655bafec742SSukumar Swaminathan 		ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1656bafec742SSukumar Swaminathan 	} /* For all large buffers */
1657bafec742SSukumar Swaminathan 
1658bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1659bafec742SSukumar Swaminathan 
1660bafec742SSukumar Swaminathan alloc_lbuf_err:
1661bafec742SSukumar Swaminathan 	ql_free_lbq_buffers(rx_ring);
1662bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1663bafec742SSukumar Swaminathan }
1664bafec742SSukumar Swaminathan 
1665bafec742SSukumar Swaminathan /*
1666bafec742SSukumar Swaminathan  * Free rx buffers
1667bafec742SSukumar Swaminathan  */
1668bafec742SSukumar Swaminathan static void
1669bafec742SSukumar Swaminathan ql_free_rx_buffers(qlge_t *qlge)
1670bafec742SSukumar Swaminathan {
1671bafec742SSukumar Swaminathan 	int i;
1672bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
1673bafec742SSukumar Swaminathan 
1674bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1675bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
1676bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
1677bafec742SSukumar Swaminathan 			ql_free_lbq_buffers(rx_ring);
1678bafec742SSukumar Swaminathan 			ql_free_sbq_buffers(rx_ring);
1679bafec742SSukumar Swaminathan 		}
1680bafec742SSukumar Swaminathan 	}
1681bafec742SSukumar Swaminathan }
1682bafec742SSukumar Swaminathan 
1683bafec742SSukumar Swaminathan /*
1684bafec742SSukumar Swaminathan  * Allocate rx buffers
1685bafec742SSukumar Swaminathan  */
1686bafec742SSukumar Swaminathan static int
1687bafec742SSukumar Swaminathan ql_alloc_rx_buffers(qlge_t *qlge)
1688bafec742SSukumar Swaminathan {
1689bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
1690bafec742SSukumar Swaminathan 	int i;
1691bafec742SSukumar Swaminathan 
1692bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1693bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
1694bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
1695bafec742SSukumar Swaminathan 			if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
1696bafec742SSukumar Swaminathan 				goto alloc_err;
1697bafec742SSukumar Swaminathan 			if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
1698bafec742SSukumar Swaminathan 				goto alloc_err;
1699bafec742SSukumar Swaminathan 		}
1700bafec742SSukumar Swaminathan 	}
1701bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1702bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1703bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type == RX_Q) {
1704bafec742SSukumar Swaminathan 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
1705bafec742SSukumar Swaminathan 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
1706bafec742SSukumar Swaminathan 		}
1707bafec742SSukumar Swaminathan 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
1708bafec742SSukumar Swaminathan 	}
1709bafec742SSukumar Swaminathan #endif
1710bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1711bafec742SSukumar Swaminathan 
1712bafec742SSukumar Swaminathan alloc_err:
1713bafec742SSukumar Swaminathan 
1714bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1715bafec742SSukumar Swaminathan }
1716bafec742SSukumar Swaminathan 
1717bafec742SSukumar Swaminathan /*
1718bafec742SSukumar Swaminathan  * Initialize large buffer queue ring
1719bafec742SSukumar Swaminathan  */
1720bafec742SSukumar Swaminathan static void
1721bafec742SSukumar Swaminathan ql_init_lbq_ring(struct rx_ring *rx_ring)
1722bafec742SSukumar Swaminathan {
1723bafec742SSukumar Swaminathan 	uint16_t i;
1724bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1725bafec742SSukumar Swaminathan 
1726bafec742SSukumar Swaminathan 	bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
1727bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++) {
1728bafec742SSukumar Swaminathan 		lbq_desc = &rx_ring->lbq_desc[i];
1729bafec742SSukumar Swaminathan 		lbq_desc->index = i;
1730bafec742SSukumar Swaminathan 	}
1731bafec742SSukumar Swaminathan }
1732bafec742SSukumar Swaminathan 
1733bafec742SSukumar Swaminathan /*
1734bafec742SSukumar Swaminathan  * Initialize small buffer queue ring
1735bafec742SSukumar Swaminathan  */
1736bafec742SSukumar Swaminathan static void
1737bafec742SSukumar Swaminathan ql_init_sbq_ring(struct rx_ring *rx_ring)
1738bafec742SSukumar Swaminathan {
1739bafec742SSukumar Swaminathan 	uint16_t i;
1740bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1741bafec742SSukumar Swaminathan 
1742bafec742SSukumar Swaminathan 	bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
1743bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++) {
1744bafec742SSukumar Swaminathan 		sbq_desc = &rx_ring->sbq_desc[i];
1745bafec742SSukumar Swaminathan 		sbq_desc->index = i;
1746bafec742SSukumar Swaminathan 	}
1747bafec742SSukumar Swaminathan }
1748bafec742SSukumar Swaminathan 
1749bafec742SSukumar Swaminathan /*
1750bafec742SSukumar Swaminathan  * Calculate the pseudo-header checksum if hardware can not do
1751bafec742SSukumar Swaminathan  */
1752bafec742SSukumar Swaminathan static void
1753bafec742SSukumar Swaminathan ql_pseudo_cksum(uint8_t *buf)
1754bafec742SSukumar Swaminathan {
1755bafec742SSukumar Swaminathan 	uint32_t cksum;
1756bafec742SSukumar Swaminathan 	uint16_t iphl;
1757bafec742SSukumar Swaminathan 	uint16_t proto;
1758bafec742SSukumar Swaminathan 
1759bafec742SSukumar Swaminathan 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
1760bafec742SSukumar Swaminathan 	cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
1761bafec742SSukumar Swaminathan 	cksum += proto = buf[9];
1762bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
1763bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
1764bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
1765bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
1766bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1767bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1768bafec742SSukumar Swaminathan 
1769bafec742SSukumar Swaminathan 	/*
1770bafec742SSukumar Swaminathan 	 * Point it to the TCP/UDP header, and
1771bafec742SSukumar Swaminathan 	 * update the checksum field.
1772bafec742SSukumar Swaminathan 	 */
1773bafec742SSukumar Swaminathan 	buf += iphl + ((proto == IPPROTO_TCP) ?
1774bafec742SSukumar Swaminathan 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
1775bafec742SSukumar Swaminathan 
1776bafec742SSukumar Swaminathan 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
1777bafec742SSukumar Swaminathan 
1778bafec742SSukumar Swaminathan }
1779bafec742SSukumar Swaminathan 
1780bafec742SSukumar Swaminathan /*
1781bafec742SSukumar Swaminathan  * Transmit an incoming packet.
1782bafec742SSukumar Swaminathan  */
1783bafec742SSukumar Swaminathan mblk_t *
1784bafec742SSukumar Swaminathan ql_ring_tx(void *arg, mblk_t *mp)
1785bafec742SSukumar Swaminathan {
1786bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring = (struct tx_ring *)arg;
1787bafec742SSukumar Swaminathan 	qlge_t *qlge = tx_ring->qlge;
1788bafec742SSukumar Swaminathan 	mblk_t *next;
1789bafec742SSukumar Swaminathan 	int rval;
1790bafec742SSukumar Swaminathan 	uint32_t tx_count = 0;
1791bafec742SSukumar Swaminathan 
1792bafec742SSukumar Swaminathan 	if (qlge->port_link_state == LS_DOWN) {
1793bafec742SSukumar Swaminathan 		/* can not send message while link is down */
1794bafec742SSukumar Swaminathan 		mblk_t *tp;
1795bafec742SSukumar Swaminathan 
1796bafec742SSukumar Swaminathan 		while (mp != NULL) {
1797bafec742SSukumar Swaminathan 			tp = mp->b_next;
1798bafec742SSukumar Swaminathan 			mp->b_next = NULL;
1799bafec742SSukumar Swaminathan 			freemsg(mp);
1800bafec742SSukumar Swaminathan 			mp = tp;
1801bafec742SSukumar Swaminathan 		}
1802bafec742SSukumar Swaminathan 		goto exit;
1803bafec742SSukumar Swaminathan 	}
1804bafec742SSukumar Swaminathan 
1805bafec742SSukumar Swaminathan 	mutex_enter(&tx_ring->tx_lock);
1806bafec742SSukumar Swaminathan 	/* if mac is not started, driver is not ready, can not send */
1807bafec742SSukumar Swaminathan 	if (tx_ring->mac_flags != QL_MAC_STARTED) {
1808bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
1809bafec742SSukumar Swaminathan 		    " return packets",
1810bafec742SSukumar Swaminathan 		    __func__, qlge->instance, tx_ring->mac_flags);
1811bafec742SSukumar Swaminathan 		mutex_exit(&tx_ring->tx_lock);
1812bafec742SSukumar Swaminathan 		goto exit;
1813bafec742SSukumar Swaminathan 	}
1814bafec742SSukumar Swaminathan 
1815bafec742SSukumar Swaminathan 	/* we must try to send all */
1816bafec742SSukumar Swaminathan 	while (mp != NULL) {
1817bafec742SSukumar Swaminathan 		/*
1818bafec742SSukumar Swaminathan 		 * if number of available slots is less than a threshold,
1819bafec742SSukumar Swaminathan 		 * then quit
1820bafec742SSukumar Swaminathan 		 */
1821bafec742SSukumar Swaminathan 		if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
1822bafec742SSukumar Swaminathan 			tx_ring->queue_stopped = 1;
1823bafec742SSukumar Swaminathan 			rval = DDI_FAILURE;
1824bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1825bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) no resources",
1826bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
1827bafec742SSukumar Swaminathan #endif
1828bafec742SSukumar Swaminathan 			tx_ring->defer++;
1829bafec742SSukumar Swaminathan 			/*
1830bafec742SSukumar Swaminathan 			 * If we return the buffer back we are expected to call
1831bafec742SSukumar Swaminathan 			 * mac_tx_ring_update() when resources are available
1832bafec742SSukumar Swaminathan 			 */
1833bafec742SSukumar Swaminathan 			break;
1834bafec742SSukumar Swaminathan 		}
1835bafec742SSukumar Swaminathan 
1836bafec742SSukumar Swaminathan 		next = mp->b_next;
1837bafec742SSukumar Swaminathan 		mp->b_next = NULL;
1838bafec742SSukumar Swaminathan 
1839bafec742SSukumar Swaminathan 		rval = ql_send_common(tx_ring, mp);
1840bafec742SSukumar Swaminathan 
1841bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS) {
1842bafec742SSukumar Swaminathan 			mp->b_next = next;
1843bafec742SSukumar Swaminathan 			break;
1844bafec742SSukumar Swaminathan 		}
1845bafec742SSukumar Swaminathan 		tx_count++;
1846bafec742SSukumar Swaminathan 		mp = next;
1847bafec742SSukumar Swaminathan 	}
1848bafec742SSukumar Swaminathan 
1849bafec742SSukumar Swaminathan 	/*
1850bafec742SSukumar Swaminathan 	 * After all msg blocks are mapped or copied to tx buffer,
1851bafec742SSukumar Swaminathan 	 * trigger the hardware to send!
1852bafec742SSukumar Swaminathan 	 */
1853bafec742SSukumar Swaminathan 	if (tx_count > 0) {
1854bafec742SSukumar Swaminathan 		ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
1855bafec742SSukumar Swaminathan 		    tx_ring->prod_idx);
1856bafec742SSukumar Swaminathan 	}
1857bafec742SSukumar Swaminathan 
1858bafec742SSukumar Swaminathan 	mutex_exit(&tx_ring->tx_lock);
1859bafec742SSukumar Swaminathan exit:
1860bafec742SSukumar Swaminathan 	return (mp);
1861bafec742SSukumar Swaminathan }
1862bafec742SSukumar Swaminathan 
1863bafec742SSukumar Swaminathan 
1864bafec742SSukumar Swaminathan /*
1865bafec742SSukumar Swaminathan  * This function builds an mblk list for the given inbound
1866bafec742SSukumar Swaminathan  * completion.
1867bafec742SSukumar Swaminathan  */
1868bafec742SSukumar Swaminathan 
1869bafec742SSukumar Swaminathan static mblk_t *
1870bafec742SSukumar Swaminathan ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
1871bafec742SSukumar Swaminathan     struct ib_mac_iocb_rsp *ib_mac_rsp)
1872bafec742SSukumar Swaminathan {
1873bafec742SSukumar Swaminathan 	mblk_t *mp = NULL;
1874bafec742SSukumar Swaminathan 	mblk_t *mp1 = NULL;	/* packet header */
1875bafec742SSukumar Swaminathan 	mblk_t *mp2 = NULL;	/* packet content */
1876bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1877bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1878bafec742SSukumar Swaminathan 	uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
1879bafec742SSukumar Swaminathan 	uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
1880bafec742SSukumar Swaminathan 	uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1881bafec742SSukumar Swaminathan 	uint32_t pkt_len = payload_len + header_len;
1882bafec742SSukumar Swaminathan 	uint32_t done;
1883bafec742SSukumar Swaminathan 	uint64_t *curr_ial_ptr;
1884bafec742SSukumar Swaminathan 	uint32_t ial_data_addr_low;
1885bafec742SSukumar Swaminathan 	uint32_t actual_data_addr_low;
1886bafec742SSukumar Swaminathan 	mblk_t *mp_ial = NULL;	/* ial chained packets */
1887bafec742SSukumar Swaminathan 	uint32_t size;
1888*a6766df4SSukumar Swaminathan 	uint32_t cp_offset;
1889*a6766df4SSukumar Swaminathan 	boolean_t rx_copy = B_FALSE;
1890*a6766df4SSukumar Swaminathan 	mblk_t *tp = NULL;
1891bafec742SSukumar Swaminathan 
1892bafec742SSukumar Swaminathan 	/*
1893bafec742SSukumar Swaminathan 	 * Check if error flags are set
1894bafec742SSukumar Swaminathan 	 */
1895bafec742SSukumar Swaminathan 	if (err_flag != 0) {
1896bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
1897bafec742SSukumar Swaminathan 			rx_ring->frame_too_long++;
1898bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
1899bafec742SSukumar Swaminathan 			rx_ring->frame_too_short++;
1900bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
1901bafec742SSukumar Swaminathan 			rx_ring->fcs_err++;
1902bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1903bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
1904bafec742SSukumar Swaminathan #endif
1905bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
1906bafec742SSukumar Swaminathan 		    (uint8_t *)ib_mac_rsp, 8,
1907bafec742SSukumar Swaminathan 		    (size_t)sizeof (struct ib_mac_iocb_rsp));
1908bafec742SSukumar Swaminathan 	}
1909bafec742SSukumar Swaminathan 
1910bafec742SSukumar Swaminathan 	/* header should not be in large buffer */
1911bafec742SSukumar Swaminathan 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
1912bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "header in large buffer or invalid!");
1913bafec742SSukumar Swaminathan 		err_flag |= 1;
1914bafec742SSukumar Swaminathan 	}
1915accf27a5SSukumar Swaminathan 	/* if whole packet is too big than rx buffer size */
1916accf27a5SSukumar Swaminathan 	if (pkt_len > qlge->max_frame_size) {
1917accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "ql_build_rx_mpframe too long(%d)!", pkt_len);
1918accf27a5SSukumar Swaminathan 		err_flag |= 1;
1919accf27a5SSukumar Swaminathan 	}
1920*a6766df4SSukumar Swaminathan 	if (qlge->rx_copy ||
1921*a6766df4SSukumar Swaminathan 	    (rx_ring->sbuf_in_use_count <= qlge->rx_copy_threshold) ||
1922*a6766df4SSukumar Swaminathan 	    (rx_ring->lbuf_in_use_count <= qlge->rx_copy_threshold)) {
1923*a6766df4SSukumar Swaminathan 		rx_copy = B_TRUE;
1924*a6766df4SSukumar Swaminathan 	}
1925accf27a5SSukumar Swaminathan 
1926*a6766df4SSukumar Swaminathan 	/* if using rx copy mode, we need to allocate a big enough buffer */
1927*a6766df4SSukumar Swaminathan 	if (rx_copy) {
1928*a6766df4SSukumar Swaminathan 		qlge->stats.norcvbuf++;
1929*a6766df4SSukumar Swaminathan 		tp = allocb(payload_len + header_len + qlge->ip_hdr_offset,
1930*a6766df4SSukumar Swaminathan 		    BPRI_MED);
1931*a6766df4SSukumar Swaminathan 		if (tp == NULL) {
1932*a6766df4SSukumar Swaminathan 			cmn_err(CE_WARN, "rx copy failed to allocate memory");
1933*a6766df4SSukumar Swaminathan 		} else {
1934*a6766df4SSukumar Swaminathan 			tp->b_rptr += qlge->ip_hdr_offset;
1935*a6766df4SSukumar Swaminathan 		}
1936*a6766df4SSukumar Swaminathan 	}
1937bafec742SSukumar Swaminathan 	/*
1938bafec742SSukumar Swaminathan 	 * Handle the header buffer if present.
1939bafec742SSukumar Swaminathan 	 * packet header must be valid and saved in one small buffer
1940bafec742SSukumar Swaminathan 	 * broadcast/multicast packets' headers not splitted
1941bafec742SSukumar Swaminathan 	 */
1942bafec742SSukumar Swaminathan 	if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
1943bafec742SSukumar Swaminathan 	    (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1944bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
1945bafec742SSukumar Swaminathan 		    header_len));
1946bafec742SSukumar Swaminathan 		/* Sync access */
1947bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1948bafec742SSukumar Swaminathan 
1949bafec742SSukumar Swaminathan 		ASSERT(sbq_desc != NULL);
1950bafec742SSukumar Swaminathan 
1951bafec742SSukumar Swaminathan 		/*
1952bafec742SSukumar Swaminathan 		 * Validate addresses from the ASIC with the
1953bafec742SSukumar Swaminathan 		 * expected sbuf address
1954bafec742SSukumar Swaminathan 		 */
1955bafec742SSukumar Swaminathan 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1956bafec742SSukumar Swaminathan 		    != ib_mac_rsp->hdr_addr) {
1957bafec742SSukumar Swaminathan 			/* Small buffer address mismatch */
1958bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1959bafec742SSukumar Swaminathan 			    " in wrong small buffer",
1960bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
1961accf27a5SSukumar Swaminathan 			goto fatal_error;
1962bafec742SSukumar Swaminathan 		}
1963bafec742SSukumar Swaminathan 		/* get this packet */
1964bafec742SSukumar Swaminathan 		mp1 = sbq_desc->mp;
1965*a6766df4SSukumar Swaminathan 		/* Flush DMA'd data */
1966*a6766df4SSukumar Swaminathan 		(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1967*a6766df4SSukumar Swaminathan 		    0, header_len, DDI_DMA_SYNC_FORKERNEL);
1968*a6766df4SSukumar Swaminathan 
1969bafec742SSukumar Swaminathan 		if ((err_flag != 0)|| (mp1 == NULL)) {
1970bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
1971bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1972bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "get header from small buffer fail");
1973bafec742SSukumar Swaminathan #endif
1974bafec742SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1975bafec742SSukumar Swaminathan 			mp1 = NULL;
1976*a6766df4SSukumar Swaminathan 		} else if (rx_copy) {
1977*a6766df4SSukumar Swaminathan 			if (tp != NULL) {
1978*a6766df4SSukumar Swaminathan 				bcopy(sbq_desc->bd_dma.vaddr, tp->b_rptr,
1979*a6766df4SSukumar Swaminathan 				    header_len);
1980*a6766df4SSukumar Swaminathan 			}
1981*a6766df4SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1982*a6766df4SSukumar Swaminathan 			mp1 = NULL;
1983bafec742SSukumar Swaminathan 		} else {
1984bafec742SSukumar Swaminathan 			if ((qlge->ip_hdr_offset != 0)&&
1985bafec742SSukumar Swaminathan 			    (header_len < SMALL_BUFFER_SIZE)) {
1986bafec742SSukumar Swaminathan 				/*
1987bafec742SSukumar Swaminathan 				 * copy entire header to a 2 bytes boundary
1988bafec742SSukumar Swaminathan 				 * address for 8100 adapters so that the IP
1989bafec742SSukumar Swaminathan 				 * header can be on a 4 byte boundary address
1990bafec742SSukumar Swaminathan 				 */
1991bafec742SSukumar Swaminathan 				bcopy(mp1->b_rptr,
1992bafec742SSukumar Swaminathan 				    (mp1->b_rptr + SMALL_BUFFER_SIZE +
1993bafec742SSukumar Swaminathan 				    qlge->ip_hdr_offset),
1994bafec742SSukumar Swaminathan 				    header_len);
1995bafec742SSukumar Swaminathan 				mp1->b_rptr += SMALL_BUFFER_SIZE +
1996bafec742SSukumar Swaminathan 				    qlge->ip_hdr_offset;
1997bafec742SSukumar Swaminathan 			}
1998bafec742SSukumar Swaminathan 
1999bafec742SSukumar Swaminathan 			/*
2000bafec742SSukumar Swaminathan 			 * Adjust the mp payload_len to match
2001bafec742SSukumar Swaminathan 			 * the packet header payload_len
2002bafec742SSukumar Swaminathan 			 */
2003bafec742SSukumar Swaminathan 			mp1->b_wptr = mp1->b_rptr + header_len;
2004bafec742SSukumar Swaminathan 			mp1->b_next = mp1->b_cont = NULL;
2005bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
2006bafec742SSukumar Swaminathan 			    (uint8_t *)mp1->b_rptr, 8, header_len);
2007bafec742SSukumar Swaminathan 		}
2008bafec742SSukumar Swaminathan 	}
2009bafec742SSukumar Swaminathan 
2010bafec742SSukumar Swaminathan 	/*
2011bafec742SSukumar Swaminathan 	 * packet data or whole packet can be in small or one or
2012bafec742SSukumar Swaminathan 	 * several large buffer(s)
2013bafec742SSukumar Swaminathan 	 */
2014bafec742SSukumar Swaminathan 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2015bafec742SSukumar Swaminathan 		/*
2016bafec742SSukumar Swaminathan 		 * The data is in a single small buffer.
2017bafec742SSukumar Swaminathan 		 */
2018bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2019bafec742SSukumar Swaminathan 
2020bafec742SSukumar Swaminathan 		ASSERT(sbq_desc != NULL);
2021bafec742SSukumar Swaminathan 
2022bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2023bafec742SSukumar Swaminathan 		    ("%d bytes in a single small buffer, sbq_desc = %p, "
2024bafec742SSukumar Swaminathan 		    "sbq_desc->bd_dma.dma_addr = %x,"
2025bafec742SSukumar Swaminathan 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
2026bafec742SSukumar Swaminathan 		    payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
2027bafec742SSukumar Swaminathan 		    ib_mac_rsp->data_addr, sbq_desc->mp));
2028bafec742SSukumar Swaminathan 
2029bafec742SSukumar Swaminathan 		/*
2030bafec742SSukumar Swaminathan 		 * Validate  addresses from the ASIC with the
2031bafec742SSukumar Swaminathan 		 * expected sbuf address
2032bafec742SSukumar Swaminathan 		 */
2033bafec742SSukumar Swaminathan 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
2034bafec742SSukumar Swaminathan 		    != ib_mac_rsp->data_addr) {
2035bafec742SSukumar Swaminathan 			/* Small buffer address mismatch */
2036bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2037bafec742SSukumar Swaminathan 			    " in wrong small buffer",
2038bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
2039accf27a5SSukumar Swaminathan 			goto fatal_error;
2040bafec742SSukumar Swaminathan 		}
2041bafec742SSukumar Swaminathan 		/* get this packet */
2042bafec742SSukumar Swaminathan 		mp2 = sbq_desc->mp;
2043*a6766df4SSukumar Swaminathan 		(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
2044*a6766df4SSukumar Swaminathan 		    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2045bafec742SSukumar Swaminathan 		if ((err_flag != 0) || (mp2 == NULL)) {
2046bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2047bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
2048bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2049bafec742SSukumar Swaminathan #endif
2050bafec742SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2051bafec742SSukumar Swaminathan 			mp2 = NULL;
2052*a6766df4SSukumar Swaminathan 		} else if (rx_copy) {
2053*a6766df4SSukumar Swaminathan 			if (tp != NULL) {
2054*a6766df4SSukumar Swaminathan 				bcopy(sbq_desc->bd_dma.vaddr,
2055*a6766df4SSukumar Swaminathan 				    tp->b_rptr + header_len, payload_len);
2056*a6766df4SSukumar Swaminathan 				tp->b_wptr =
2057*a6766df4SSukumar Swaminathan 				    tp->b_rptr + header_len + payload_len;
2058*a6766df4SSukumar Swaminathan 			}
2059*a6766df4SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2060*a6766df4SSukumar Swaminathan 			mp2 = NULL;
2061bafec742SSukumar Swaminathan 		} else {
2062bafec742SSukumar Swaminathan 			/* Adjust the buffer length to match the payload_len */
2063bafec742SSukumar Swaminathan 			mp2->b_wptr = mp2->b_rptr + payload_len;
2064bafec742SSukumar Swaminathan 			mp2->b_next = mp2->b_cont = NULL;
2065bafec742SSukumar Swaminathan 			/* Flush DMA'd data */
2066bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2067bafec742SSukumar Swaminathan 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2068bafec742SSukumar Swaminathan 			/*
2069bafec742SSukumar Swaminathan 			 * if payload is too small , copy to
2070bafec742SSukumar Swaminathan 			 * the end of packet header
2071bafec742SSukumar Swaminathan 			 */
2072bafec742SSukumar Swaminathan 			if ((mp1 != NULL) &&
2073bafec742SSukumar Swaminathan 			    (payload_len <= qlge->payload_copy_thresh) &&
2074bafec742SSukumar Swaminathan 			    (pkt_len <
2075bafec742SSukumar Swaminathan 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2076bafec742SSukumar Swaminathan 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2077bafec742SSukumar Swaminathan 				mp1->b_wptr += payload_len;
2078bafec742SSukumar Swaminathan 				freemsg(mp2);
2079bafec742SSukumar Swaminathan 				mp2 = NULL;
2080bafec742SSukumar Swaminathan 			}
2081bafec742SSukumar Swaminathan 		}
2082bafec742SSukumar Swaminathan 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2083bafec742SSukumar Swaminathan 		/*
2084bafec742SSukumar Swaminathan 		 * The data is in a single large buffer.
2085bafec742SSukumar Swaminathan 		 */
2086bafec742SSukumar Swaminathan 		lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2087bafec742SSukumar Swaminathan 
2088bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2089bafec742SSukumar Swaminathan 		    ("%d bytes in a single large buffer, lbq_desc = %p, "
2090bafec742SSukumar Swaminathan 		    "lbq_desc->bd_dma.dma_addr = %x,"
2091bafec742SSukumar Swaminathan 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
2092bafec742SSukumar Swaminathan 		    payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
2093bafec742SSukumar Swaminathan 		    ib_mac_rsp->data_addr, lbq_desc->mp));
2094bafec742SSukumar Swaminathan 
2095bafec742SSukumar Swaminathan 		ASSERT(lbq_desc != NULL);
2096bafec742SSukumar Swaminathan 
2097bafec742SSukumar Swaminathan 		/*
2098bafec742SSukumar Swaminathan 		 * Validate  addresses from the ASIC with
2099bafec742SSukumar Swaminathan 		 * the expected lbuf address
2100bafec742SSukumar Swaminathan 		 */
2101bafec742SSukumar Swaminathan 		if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
2102bafec742SSukumar Swaminathan 		    != ib_mac_rsp->data_addr) {
2103bafec742SSukumar Swaminathan 			/* Large buffer address mismatch */
2104bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2105bafec742SSukumar Swaminathan 			    " in wrong large buffer",
2106bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
2107accf27a5SSukumar Swaminathan 			goto fatal_error;
2108bafec742SSukumar Swaminathan 		}
2109bafec742SSukumar Swaminathan 		mp2 = lbq_desc->mp;
2110*a6766df4SSukumar Swaminathan 		/* Flush DMA'd data */
2111*a6766df4SSukumar Swaminathan 		(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2112*a6766df4SSukumar Swaminathan 		    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2113bafec742SSukumar Swaminathan 		if ((err_flag != 0) || (mp2 == NULL)) {
2114bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2115bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "ignore bad data from large buffer");
2116bafec742SSukumar Swaminathan #endif
2117bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
2118bafec742SSukumar Swaminathan 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2119bafec742SSukumar Swaminathan 			mp2 = NULL;
2120*a6766df4SSukumar Swaminathan 		} else if (rx_copy) {
2121*a6766df4SSukumar Swaminathan 			if (tp != NULL) {
2122*a6766df4SSukumar Swaminathan 				bcopy(lbq_desc->bd_dma.vaddr,
2123*a6766df4SSukumar Swaminathan 				    tp->b_rptr + header_len, payload_len);
2124*a6766df4SSukumar Swaminathan 				tp->b_wptr =
2125*a6766df4SSukumar Swaminathan 				    tp->b_rptr + header_len + payload_len;
2126*a6766df4SSukumar Swaminathan 			}
2127*a6766df4SSukumar Swaminathan 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2128*a6766df4SSukumar Swaminathan 			mp2 = NULL;
2129bafec742SSukumar Swaminathan 		} else {
2130bafec742SSukumar Swaminathan 			/*
2131bafec742SSukumar Swaminathan 			 * Adjust the buffer length to match
2132bafec742SSukumar Swaminathan 			 * the packet payload_len
2133bafec742SSukumar Swaminathan 			 */
2134bafec742SSukumar Swaminathan 			mp2->b_wptr = mp2->b_rptr + payload_len;
2135bafec742SSukumar Swaminathan 			mp2->b_next = mp2->b_cont = NULL;
2136bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2137bafec742SSukumar Swaminathan 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2138bafec742SSukumar Swaminathan 			/*
2139bafec742SSukumar Swaminathan 			 * if payload is too small , copy to
2140bafec742SSukumar Swaminathan 			 * the end of packet header
2141bafec742SSukumar Swaminathan 			 */
2142bafec742SSukumar Swaminathan 			if ((mp1 != NULL) &&
2143bafec742SSukumar Swaminathan 			    (payload_len <= qlge->payload_copy_thresh) &&
2144bafec742SSukumar Swaminathan 			    (pkt_len<
2145bafec742SSukumar Swaminathan 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2146bafec742SSukumar Swaminathan 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2147bafec742SSukumar Swaminathan 				mp1->b_wptr += payload_len;
2148bafec742SSukumar Swaminathan 				freemsg(mp2);
2149bafec742SSukumar Swaminathan 				mp2 = NULL;
2150bafec742SSukumar Swaminathan 			}
2151bafec742SSukumar Swaminathan 		}
2152*a6766df4SSukumar Swaminathan 	} else if (payload_len) { /* ial case */
2153bafec742SSukumar Swaminathan 		/*
2154bafec742SSukumar Swaminathan 		 * payload available but not in sml nor lrg buffer,
2155bafec742SSukumar Swaminathan 		 * so, it is saved in IAL
2156bafec742SSukumar Swaminathan 		 */
2157bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2158bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "packet chained in IAL \n");
2159bafec742SSukumar Swaminathan #endif
2160bafec742SSukumar Swaminathan 		/* lrg buf addresses are saved in one small buffer */
2161bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2162bafec742SSukumar Swaminathan 		curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
2163bafec742SSukumar Swaminathan 		done = 0;
2164*a6766df4SSukumar Swaminathan 		cp_offset = 0;
2165*a6766df4SSukumar Swaminathan 
2166bafec742SSukumar Swaminathan 		while (!done) {
2167bafec742SSukumar Swaminathan 			ial_data_addr_low =
2168bafec742SSukumar Swaminathan 			    (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
2169bafec742SSukumar Swaminathan 			    0xFFFFFFFE);
2170bafec742SSukumar Swaminathan 			/* check if this is the last packet fragment */
2171bafec742SSukumar Swaminathan 			done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
2172bafec742SSukumar Swaminathan 			curr_ial_ptr++;
2173bafec742SSukumar Swaminathan 			/*
2174bafec742SSukumar Swaminathan 			 * The data is in one or several large buffer(s).
2175bafec742SSukumar Swaminathan 			 */
2176bafec742SSukumar Swaminathan 			lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2177bafec742SSukumar Swaminathan 			actual_data_addr_low =
2178bafec742SSukumar Swaminathan 			    (uint32_t)(lbq_desc->bd_dma.dma_addr &
2179bafec742SSukumar Swaminathan 			    0xFFFFFFFE);
2180bafec742SSukumar Swaminathan 			if (ial_data_addr_low != actual_data_addr_low) {
2181bafec742SSukumar Swaminathan 				cmn_err(CE_WARN,
2182bafec742SSukumar Swaminathan 				    "packet saved in wrong ial lrg buffer"
2183bafec742SSukumar Swaminathan 				    " expected %x, actual %lx",
2184bafec742SSukumar Swaminathan 				    ial_data_addr_low,
2185bafec742SSukumar Swaminathan 				    (uintptr_t)lbq_desc->bd_dma.dma_addr);
2186accf27a5SSukumar Swaminathan 				goto fatal_error;
2187bafec742SSukumar Swaminathan 			}
2188bafec742SSukumar Swaminathan 
2189bafec742SSukumar Swaminathan 			size = (payload_len < rx_ring->lbq_buf_size)?
2190bafec742SSukumar Swaminathan 			    payload_len : rx_ring->lbq_buf_size;
2191accf27a5SSukumar Swaminathan 			payload_len -= size;
2192accf27a5SSukumar Swaminathan 			mp2 = lbq_desc->mp;
2193accf27a5SSukumar Swaminathan 			if ((err_flag != 0) || (mp2 == NULL)) {
2194accf27a5SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2195accf27a5SSukumar Swaminathan 				cmn_err(CE_WARN,
2196accf27a5SSukumar Swaminathan 				    "ignore bad data from large buffer");
2197accf27a5SSukumar Swaminathan #endif
2198accf27a5SSukumar Swaminathan 				ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2199accf27a5SSukumar Swaminathan 				mp2 = NULL;
2200*a6766df4SSukumar Swaminathan 			} else if (rx_copy) {
2201*a6766df4SSukumar Swaminathan 				if (tp != NULL) {
2202*a6766df4SSukumar Swaminathan 					(void) ddi_dma_sync(
2203*a6766df4SSukumar Swaminathan 					    lbq_desc->bd_dma.dma_handle,
2204*a6766df4SSukumar Swaminathan 					    0, size, DDI_DMA_SYNC_FORKERNEL);
2205*a6766df4SSukumar Swaminathan 					bcopy(lbq_desc->bd_dma.vaddr,
2206*a6766df4SSukumar Swaminathan 					    tp->b_rptr + header_len + cp_offset,
2207*a6766df4SSukumar Swaminathan 					    size);
2208*a6766df4SSukumar Swaminathan 					tp->b_wptr =
2209*a6766df4SSukumar Swaminathan 					    tp->b_rptr + size + cp_offset +
2210*a6766df4SSukumar Swaminathan 					    header_len;
2211*a6766df4SSukumar Swaminathan 					cp_offset += size;
2212*a6766df4SSukumar Swaminathan 				}
2213*a6766df4SSukumar Swaminathan 				ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2214*a6766df4SSukumar Swaminathan 				mp2 = NULL;
2215accf27a5SSukumar Swaminathan 			} else {
2216accf27a5SSukumar Swaminathan 				if (mp_ial == NULL) {
2217accf27a5SSukumar Swaminathan 					mp_ial = mp2;
2218accf27a5SSukumar Swaminathan 				} else {
2219accf27a5SSukumar Swaminathan 					linkb(mp_ial, mp2);
2220accf27a5SSukumar Swaminathan 				}
2221accf27a5SSukumar Swaminathan 
2222accf27a5SSukumar Swaminathan 				mp2->b_next = NULL;
2223accf27a5SSukumar Swaminathan 				mp2->b_cont = NULL;
2224bafec742SSukumar Swaminathan 				mp2->b_wptr = mp2->b_rptr + size;
2225bafec742SSukumar Swaminathan 				/* Flush DMA'd data */
2226bafec742SSukumar Swaminathan 				(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2227bafec742SSukumar Swaminathan 				    0, size, DDI_DMA_SYNC_FORKERNEL);
2228accf27a5SSukumar Swaminathan 				QL_PRINT(DBG_RX, ("ial %d payload received \n",
2229accf27a5SSukumar Swaminathan 				    size));
2230bafec742SSukumar Swaminathan 				QL_DUMP(DBG_RX, "\t Mac data dump:\n",
2231bafec742SSukumar Swaminathan 				    (uint8_t *)mp2->b_rptr, 8, size);
2232bafec742SSukumar Swaminathan 			}
2233accf27a5SSukumar Swaminathan 		}
2234accf27a5SSukumar Swaminathan 		if (err_flag != 0) {
2235accf27a5SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2236accf27a5SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
2237accf27a5SSukumar Swaminathan 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2238accf27a5SSukumar Swaminathan #endif
2239accf27a5SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2240accf27a5SSukumar Swaminathan 		} else {
2241bafec742SSukumar Swaminathan 			mp2 = mp_ial;
2242bafec742SSukumar Swaminathan 			freemsg(sbq_desc->mp);
2243bafec742SSukumar Swaminathan 		}
2244accf27a5SSukumar Swaminathan 	}
2245bafec742SSukumar Swaminathan 	/*
2246bafec742SSukumar Swaminathan 	 * some packets' hdr not split, then send mp2 upstream, otherwise,
2247bafec742SSukumar Swaminathan 	 * concatenate message block mp2 to the tail of message header, mp1
2248bafec742SSukumar Swaminathan 	 */
2249bafec742SSukumar Swaminathan 	if (!err_flag) {
2250*a6766df4SSukumar Swaminathan 		if (rx_copy) {
2251*a6766df4SSukumar Swaminathan 			if (tp != NULL) {
2252*a6766df4SSukumar Swaminathan 				tp->b_next = NULL;
2253*a6766df4SSukumar Swaminathan 				tp->b_cont = NULL;
2254*a6766df4SSukumar Swaminathan 				tp->b_wptr = tp->b_rptr +
2255*a6766df4SSukumar Swaminathan 				    header_len + payload_len;
2256*a6766df4SSukumar Swaminathan 			}
2257*a6766df4SSukumar Swaminathan 			mp = tp;
2258*a6766df4SSukumar Swaminathan 		} else {
2259bafec742SSukumar Swaminathan 			if (mp1) {
2260bafec742SSukumar Swaminathan 				if (mp2) {
2261*a6766df4SSukumar Swaminathan 					QL_PRINT(DBG_RX,
2262*a6766df4SSukumar Swaminathan 					    ("packet in mp1 and mp2\n"));
2263*a6766df4SSukumar Swaminathan 					/* mp1->b_cont = mp2; */
2264*a6766df4SSukumar Swaminathan 					linkb(mp1, mp2);
2265bafec742SSukumar Swaminathan 					mp = mp1;
2266bafec742SSukumar Swaminathan 				} else {
2267*a6766df4SSukumar Swaminathan 					QL_PRINT(DBG_RX,
2268*a6766df4SSukumar Swaminathan 					    ("packet in mp1 only\n"));
2269bafec742SSukumar Swaminathan 					mp = mp1;
2270bafec742SSukumar Swaminathan 				}
2271bafec742SSukumar Swaminathan 			} else if (mp2) {
2272bafec742SSukumar Swaminathan 				QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
2273bafec742SSukumar Swaminathan 				mp = mp2;
2274bafec742SSukumar Swaminathan 			}
2275bafec742SSukumar Swaminathan 		}
2276*a6766df4SSukumar Swaminathan 	}
2277bafec742SSukumar Swaminathan 	return (mp);
2278bafec742SSukumar Swaminathan 
2279accf27a5SSukumar Swaminathan fatal_error:
2280accf27a5SSukumar Swaminathan 	/* fatal Error! */
2281accf27a5SSukumar Swaminathan 	if (qlge->fm_enable) {
2282accf27a5SSukumar Swaminathan 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2283accf27a5SSukumar Swaminathan 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2284accf27a5SSukumar Swaminathan 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2285accf27a5SSukumar Swaminathan 	}
2286*a6766df4SSukumar Swaminathan 	if (tp) {
2287*a6766df4SSukumar Swaminathan 		freemsg(tp);
2288*a6766df4SSukumar Swaminathan 	}
2289*a6766df4SSukumar Swaminathan 
2290accf27a5SSukumar Swaminathan 	/* *mp->b_wptr = 0; */
2291accf27a5SSukumar Swaminathan 	ql_wake_asic_reset_soft_intr(qlge);
2292accf27a5SSukumar Swaminathan 	return (NULL);
2293bafec742SSukumar Swaminathan 
2294bafec742SSukumar Swaminathan }
2295bafec742SSukumar Swaminathan 
2296bafec742SSukumar Swaminathan /*
2297bafec742SSukumar Swaminathan  * Bump completion queue consumer index.
2298bafec742SSukumar Swaminathan  */
2299bafec742SSukumar Swaminathan static void
2300bafec742SSukumar Swaminathan ql_update_cq(struct rx_ring *rx_ring)
2301bafec742SSukumar Swaminathan {
2302bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx++;
2303bafec742SSukumar Swaminathan 	rx_ring->curr_entry++;
2304bafec742SSukumar Swaminathan 	if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
2305bafec742SSukumar Swaminathan 		rx_ring->cnsmr_idx = 0;
2306bafec742SSukumar Swaminathan 		rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
2307bafec742SSukumar Swaminathan 	}
2308bafec742SSukumar Swaminathan }
2309bafec742SSukumar Swaminathan 
2310bafec742SSukumar Swaminathan /*
2311bafec742SSukumar Swaminathan  * Update completion queue consumer index.
2312bafec742SSukumar Swaminathan  */
2313bafec742SSukumar Swaminathan static void
2314bafec742SSukumar Swaminathan ql_write_cq_idx(struct rx_ring *rx_ring)
2315bafec742SSukumar Swaminathan {
2316bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2317bafec742SSukumar Swaminathan 
2318bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
2319bafec742SSukumar Swaminathan 	    rx_ring->cnsmr_idx);
2320bafec742SSukumar Swaminathan }
2321bafec742SSukumar Swaminathan 
2322bafec742SSukumar Swaminathan /*
2323bafec742SSukumar Swaminathan  * Processes a SYS-Chip Event Notification Completion Event.
2324bafec742SSukumar Swaminathan  * The incoming notification event that describes a link up/down
2325bafec742SSukumar Swaminathan  * or some sorts of error happens.
2326bafec742SSukumar Swaminathan  */
2327bafec742SSukumar Swaminathan static void
2328bafec742SSukumar Swaminathan ql_process_chip_ae_intr(qlge_t *qlge,
2329bafec742SSukumar Swaminathan     struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
2330bafec742SSukumar Swaminathan {
2331bafec742SSukumar Swaminathan 	uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
2332bafec742SSukumar Swaminathan 	uint32_t soft_req = 0;
2333bafec742SSukumar Swaminathan 
2334bafec742SSukumar Swaminathan 	switch (eventType) {
2335bafec742SSukumar Swaminathan 		case SYS_EVENT_PORT_LINK_UP:	/* 0x0h */
2336bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("Port Link Up\n"));
2337bafec742SSukumar Swaminathan 			break;
2338bafec742SSukumar Swaminathan 
2339bafec742SSukumar Swaminathan 		case SYS_EVENT_PORT_LINK_DOWN:	/* 0x1h */
2340bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("Port Link Down\n"));
2341bafec742SSukumar Swaminathan 			break;
2342bafec742SSukumar Swaminathan 
2343bafec742SSukumar Swaminathan 		case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
2344bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "A multiple CAM hits look up error "
2345bafec742SSukumar Swaminathan 			    "occurred");
2346bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2347bafec742SSukumar Swaminathan 			break;
2348bafec742SSukumar Swaminathan 
2349bafec742SSukumar Swaminathan 		case SYS_EVENT_SOFT_ECC_ERR:	/* 0x7h */
2350bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Soft ECC error detected");
2351bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2352bafec742SSukumar Swaminathan 			break;
2353bafec742SSukumar Swaminathan 
2354bafec742SSukumar Swaminathan 		case SYS_EVENT_MGMT_FATAL_ERR:	/* 0x8h */
2355bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Management (MPI) Processor fatal"
2356bafec742SSukumar Swaminathan 			    " error occured");
2357bafec742SSukumar Swaminathan 			soft_req |= NEED_MPI_RESET;
2358bafec742SSukumar Swaminathan 			break;
2359bafec742SSukumar Swaminathan 
2360bafec742SSukumar Swaminathan 		case SYS_EVENT_MAC_INTERRUPT:	/* 0x9h */
2361bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("MAC Interrupt"));
2362bafec742SSukumar Swaminathan 			break;
2363bafec742SSukumar Swaminathan 
2364bafec742SSukumar Swaminathan 		case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF:	/* 0x40h */
2365bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "PCI Error reading small/large "
2366bafec742SSukumar Swaminathan 			    "buffers occured");
2367bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2368bafec742SSukumar Swaminathan 			break;
2369bafec742SSukumar Swaminathan 
2370bafec742SSukumar Swaminathan 		default:
2371bafec742SSukumar Swaminathan 			QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
2372bafec742SSukumar Swaminathan 			    "type 0x%x occured",
2373bafec742SSukumar Swaminathan 			    __func__, qlge->instance, eventType));
2374bafec742SSukumar Swaminathan 			break;
2375bafec742SSukumar Swaminathan 	}
2376bafec742SSukumar Swaminathan 
2377bafec742SSukumar Swaminathan 	if ((soft_req & NEED_MPI_RESET) != 0) {
2378bafec742SSukumar Swaminathan 		ql_wake_mpi_reset_soft_intr(qlge);
2379accf27a5SSukumar Swaminathan 		if (qlge->fm_enable) {
2380accf27a5SSukumar Swaminathan 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2381accf27a5SSukumar Swaminathan 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2382accf27a5SSukumar Swaminathan 		}
2383bafec742SSukumar Swaminathan 	} else if ((soft_req & NEED_HW_RESET) != 0) {
2384bafec742SSukumar Swaminathan 		ql_wake_asic_reset_soft_intr(qlge);
2385accf27a5SSukumar Swaminathan 		if (qlge->fm_enable) {
2386accf27a5SSukumar Swaminathan 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2387accf27a5SSukumar Swaminathan 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2388accf27a5SSukumar Swaminathan 		}
2389bafec742SSukumar Swaminathan 	}
2390bafec742SSukumar Swaminathan }
2391bafec742SSukumar Swaminathan 
2392bafec742SSukumar Swaminathan /*
2393bafec742SSukumar Swaminathan  * set received packet checksum flag
2394bafec742SSukumar Swaminathan  */
2395bafec742SSukumar Swaminathan void
2396bafec742SSukumar Swaminathan ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
2397bafec742SSukumar Swaminathan {
2398bafec742SSukumar Swaminathan 	uint32_t flags;
2399bafec742SSukumar Swaminathan 
2400bafec742SSukumar Swaminathan 	/* Not TCP or UDP packet? nothing more to do */
2401bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
2402bafec742SSukumar Swaminathan 	    ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
2403bafec742SSukumar Swaminathan 	return;
2404bafec742SSukumar Swaminathan 
2405bafec742SSukumar Swaminathan 	/* No CKO support for IPv6 */
2406bafec742SSukumar Swaminathan 	if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
2407bafec742SSukumar Swaminathan 		return;
2408bafec742SSukumar Swaminathan 
2409bafec742SSukumar Swaminathan 	/*
2410bafec742SSukumar Swaminathan 	 * If checksum error, don't set flags; stack will calculate
2411bafec742SSukumar Swaminathan 	 * checksum, detect the error and update statistics
2412bafec742SSukumar Swaminathan 	 */
2413bafec742SSukumar Swaminathan 	if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
2414bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
2415bafec742SSukumar Swaminathan 		return;
2416bafec742SSukumar Swaminathan 
2417bafec742SSukumar Swaminathan 	/* TCP or UDP packet and checksum valid */
2418bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
2419bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
24200dc2366fSVenugopal Iyer 		flags = HCK_FULLCKSUM_OK;
24210dc2366fSVenugopal Iyer 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2422bafec742SSukumar Swaminathan 	}
2423bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
2424bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
24250dc2366fSVenugopal Iyer 		flags = HCK_FULLCKSUM_OK;
24260dc2366fSVenugopal Iyer 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2427bafec742SSukumar Swaminathan 	}
2428bafec742SSukumar Swaminathan }
2429bafec742SSukumar Swaminathan 
2430bafec742SSukumar Swaminathan /*
2431bafec742SSukumar Swaminathan  * This function goes through h/w descriptor in one specified rx ring,
2432bafec742SSukumar Swaminathan  * receives the data if the descriptor status shows the data is ready.
2433bafec742SSukumar Swaminathan  * It returns a chain of mblks containing the received data, to be
2434bafec742SSukumar Swaminathan  * passed up to mac_rx_ring().
2435bafec742SSukumar Swaminathan  */
2436bafec742SSukumar Swaminathan mblk_t *
2437bafec742SSukumar Swaminathan ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
2438bafec742SSukumar Swaminathan {
2439bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2440accf27a5SSukumar Swaminathan 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2441bafec742SSukumar Swaminathan 	struct ib_mac_iocb_rsp *net_rsp;
2442bafec742SSukumar Swaminathan 	mblk_t *mp;
2443bafec742SSukumar Swaminathan 	mblk_t *mblk_head;
2444bafec742SSukumar Swaminathan 	mblk_t **mblk_tail;
2445bafec742SSukumar Swaminathan 	uint32_t received_bytes = 0;
2446bafec742SSukumar Swaminathan 	uint32_t length;
2447accf27a5SSukumar Swaminathan #ifdef QLGE_PERFORMANCE
2448accf27a5SSukumar Swaminathan 	uint32_t pkt_ct = 0;
2449accf27a5SSukumar Swaminathan #endif
2450bafec742SSukumar Swaminathan 
2451bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
2452bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
2453bafec742SSukumar Swaminathan 	uint32_t producer_idx;
2454bafec742SSukumar Swaminathan 	uint32_t num_free_entries;
2455bafec742SSukumar Swaminathan 	uint32_t temp;
2456bafec742SSukumar Swaminathan 
2457bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2458bafec742SSukumar Swaminathan 	consumer_idx = temp & 0x0000ffff;
2459bafec742SSukumar Swaminathan 	producer_idx = (temp >> 16);
2460bafec742SSukumar Swaminathan 
2461bafec742SSukumar Swaminathan 	if (consumer_idx > producer_idx)
2462bafec742SSukumar Swaminathan 		num_free_entries = (consumer_idx - producer_idx);
2463bafec742SSukumar Swaminathan 	else
2464bafec742SSukumar Swaminathan 		num_free_entries = NUM_RX_RING_ENTRIES - (
2465bafec742SSukumar Swaminathan 		    producer_idx - consumer_idx);
2466bafec742SSukumar Swaminathan 
2467bafec742SSukumar Swaminathan 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2468bafec742SSukumar Swaminathan 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2469bafec742SSukumar Swaminathan 
2470bafec742SSukumar Swaminathan #endif
2471bafec742SSukumar Swaminathan 	mblk_head = NULL;
2472bafec742SSukumar Swaminathan 	mblk_tail = &mblk_head;
2473bafec742SSukumar Swaminathan 
2474accf27a5SSukumar Swaminathan 	while ((prod != rx_ring->cnsmr_idx)) {
2475bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2476bafec742SSukumar Swaminathan 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
2477bafec742SSukumar Swaminathan 		    __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2478bafec742SSukumar Swaminathan 
2479bafec742SSukumar Swaminathan 		net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
2480bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2481bafec742SSukumar Swaminathan 		    (off_t)((uintptr_t)net_rsp -
2482bafec742SSukumar Swaminathan 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2483bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2484bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
2485bafec742SSukumar Swaminathan 		    rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
2486bafec742SSukumar Swaminathan 
2487bafec742SSukumar Swaminathan 		switch (net_rsp->opcode) {
2488bafec742SSukumar Swaminathan 
2489bafec742SSukumar Swaminathan 		case OPCODE_IB_MAC_IOCB:
2490bafec742SSukumar Swaminathan 			/* Adding length of pkt header and payload */
2491bafec742SSukumar Swaminathan 			length = le32_to_cpu(net_rsp->data_len) +
2492bafec742SSukumar Swaminathan 			    le32_to_cpu(net_rsp->hdr_len);
2493bafec742SSukumar Swaminathan 			if ((poll_bytes != QLGE_POLL_ALL) &&
2494bafec742SSukumar Swaminathan 			    ((received_bytes + length) > poll_bytes)) {
2495bafec742SSukumar Swaminathan 				continue;
2496bafec742SSukumar Swaminathan 			}
2497bafec742SSukumar Swaminathan 			received_bytes += length;
2498bafec742SSukumar Swaminathan 
2499accf27a5SSukumar Swaminathan #ifdef QLGE_PERFORMANCE
2500accf27a5SSukumar Swaminathan 			pkt_ct++;
2501accf27a5SSukumar Swaminathan #endif
2502bafec742SSukumar Swaminathan 			mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
2503bafec742SSukumar Swaminathan 			if (mp != NULL) {
2504bafec742SSukumar Swaminathan 				if (rx_ring->mac_flags != QL_MAC_STARTED) {
2505bafec742SSukumar Swaminathan 					/*
2506bafec742SSukumar Swaminathan 					 * Increment number of packets we have
2507bafec742SSukumar Swaminathan 					 * indicated to the stack, should be
2508bafec742SSukumar Swaminathan 					 * decremented when we get it back
2509bafec742SSukumar Swaminathan 					 * or when freemsg is called
2510bafec742SSukumar Swaminathan 					 */
2511bafec742SSukumar Swaminathan 					ASSERT(rx_ring->rx_indicate
2512bafec742SSukumar Swaminathan 					    <= rx_ring->cq_len);
2513bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2514bafec742SSukumar Swaminathan 					cmn_err(CE_WARN, "%s do not send to OS,"
2515bafec742SSukumar Swaminathan 					    " mac_flags %d, indicate %d",
2516bafec742SSukumar Swaminathan 					    __func__, rx_ring->mac_flags,
2517bafec742SSukumar Swaminathan 					    rx_ring->rx_indicate);
2518bafec742SSukumar Swaminathan #endif
2519bafec742SSukumar Swaminathan 					QL_PRINT(DBG_RX,
2520bafec742SSukumar Swaminathan 					    ("cq_id = %d, packet "
2521bafec742SSukumar Swaminathan 					    "dropped, mac not "
2522bafec742SSukumar Swaminathan 					    "enabled.\n",
2523bafec742SSukumar Swaminathan 					    rx_ring->cq_id));
2524bafec742SSukumar Swaminathan 					rx_ring->rx_pkt_dropped_mac_unenabled++;
2525bafec742SSukumar Swaminathan 
2526bafec742SSukumar Swaminathan 					/* rx_lock is expected to be held */
2527bafec742SSukumar Swaminathan 					mutex_exit(&rx_ring->rx_lock);
2528bafec742SSukumar Swaminathan 					freemsg(mp);
2529bafec742SSukumar Swaminathan 					mutex_enter(&rx_ring->rx_lock);
2530bafec742SSukumar Swaminathan 					mp = NULL;
2531bafec742SSukumar Swaminathan 				}
2532bafec742SSukumar Swaminathan 
2533bafec742SSukumar Swaminathan 				if (mp != NULL) {
2534bafec742SSukumar Swaminathan 					/*
2535bafec742SSukumar Swaminathan 					 * IP full packet has been
2536bafec742SSukumar Swaminathan 					 * successfully verified by
2537bafec742SSukumar Swaminathan 					 * H/W and is correct
2538bafec742SSukumar Swaminathan 					 */
2539bafec742SSukumar Swaminathan 					ql_set_rx_cksum(mp, net_rsp);
2540bafec742SSukumar Swaminathan 
2541bafec742SSukumar Swaminathan 					rx_ring->rx_packets++;
2542bafec742SSukumar Swaminathan 					rx_ring->rx_bytes += length;
2543bafec742SSukumar Swaminathan 					*mblk_tail = mp;
2544bafec742SSukumar Swaminathan 					mblk_tail = &mp->b_next;
2545bafec742SSukumar Swaminathan 				}
2546bafec742SSukumar Swaminathan 			} else {
2547bafec742SSukumar Swaminathan 				QL_PRINT(DBG_RX,
2548bafec742SSukumar Swaminathan 				    ("cq_id = %d, packet dropped\n",
2549bafec742SSukumar Swaminathan 				    rx_ring->cq_id));
2550bafec742SSukumar Swaminathan 				rx_ring->rx_packets_dropped_no_buffer++;
2551bafec742SSukumar Swaminathan 			}
2552bafec742SSukumar Swaminathan 			break;
2553bafec742SSukumar Swaminathan 
2554bafec742SSukumar Swaminathan 		case OPCODE_IB_SYS_EVENT_IOCB:
2555bafec742SSukumar Swaminathan 			ql_process_chip_ae_intr(qlge,
2556bafec742SSukumar Swaminathan 			    (struct ib_sys_event_iocb_rsp *)
2557bafec742SSukumar Swaminathan 			    net_rsp);
2558bafec742SSukumar Swaminathan 			break;
2559bafec742SSukumar Swaminathan 
2560bafec742SSukumar Swaminathan 		default:
2561bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2562bafec742SSukumar Swaminathan 			    "%s Ring(%d)Hit default case, not handled!"
2563bafec742SSukumar Swaminathan 			    " dropping the packet, "
2564bafec742SSukumar Swaminathan 			    "opcode = %x.", __func__, rx_ring->cq_id,
2565bafec742SSukumar Swaminathan 			    net_rsp->opcode);
2566bafec742SSukumar Swaminathan 			break;
2567bafec742SSukumar Swaminathan 		}
2568bafec742SSukumar Swaminathan 		/* increment cnsmr_idx and curr_entry */
2569bafec742SSukumar Swaminathan 		ql_update_cq(rx_ring);
2570accf27a5SSukumar Swaminathan 		prod = ql_read_sh_reg(qlge, rx_ring);
2571bafec742SSukumar Swaminathan 
2572bafec742SSukumar Swaminathan 	}
2573accf27a5SSukumar Swaminathan 
2574accf27a5SSukumar Swaminathan #ifdef QLGE_PERFORMANCE
2575accf27a5SSukumar Swaminathan 	if (pkt_ct >= 7)
2576accf27a5SSukumar Swaminathan 		rx_ring->hist[7]++;
2577accf27a5SSukumar Swaminathan 	else if (pkt_ct == 6)
2578accf27a5SSukumar Swaminathan 		rx_ring->hist[6]++;
2579accf27a5SSukumar Swaminathan 	else if (pkt_ct == 5)
2580accf27a5SSukumar Swaminathan 		rx_ring->hist[5]++;
2581accf27a5SSukumar Swaminathan 	else if (pkt_ct == 4)
2582accf27a5SSukumar Swaminathan 		rx_ring->hist[4]++;
2583accf27a5SSukumar Swaminathan 	else if (pkt_ct == 3)
2584accf27a5SSukumar Swaminathan 		rx_ring->hist[3]++;
2585accf27a5SSukumar Swaminathan 	else if (pkt_ct == 2)
2586accf27a5SSukumar Swaminathan 		rx_ring->hist[2]++;
2587accf27a5SSukumar Swaminathan 	else if (pkt_ct == 1)
2588accf27a5SSukumar Swaminathan 		rx_ring->hist[1]++;
2589accf27a5SSukumar Swaminathan 	else if (pkt_ct == 0)
2590accf27a5SSukumar Swaminathan 		rx_ring->hist[0]++;
2591accf27a5SSukumar Swaminathan #endif
2592accf27a5SSukumar Swaminathan 
2593bafec742SSukumar Swaminathan 	/* update cnsmr_idx */
2594bafec742SSukumar Swaminathan 	ql_write_cq_idx(rx_ring);
2595bafec742SSukumar Swaminathan 	/* do not enable interrupt for polling mode */
2596bafec742SSukumar Swaminathan 	if (poll_bytes == QLGE_POLL_ALL)
2597bafec742SSukumar Swaminathan 		ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2598bafec742SSukumar Swaminathan 	return (mblk_head);
2599bafec742SSukumar Swaminathan }
2600bafec742SSukumar Swaminathan 
2601bafec742SSukumar Swaminathan /* Process an outbound completion from an rx ring. */
2602bafec742SSukumar Swaminathan static void
2603bafec742SSukumar Swaminathan ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
2604bafec742SSukumar Swaminathan {
2605bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
2606bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
2607bafec742SSukumar Swaminathan 	int j;
2608bafec742SSukumar Swaminathan 
2609bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
2610bafec742SSukumar Swaminathan 	tx_ring_desc = tx_ring->wq_desc;
2611bafec742SSukumar Swaminathan 	tx_ring_desc += mac_rsp->tid;
2612bafec742SSukumar Swaminathan 
2613bafec742SSukumar Swaminathan 	if (tx_ring_desc->tx_type == USE_DMA) {
2614bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
2615bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
2616bafec742SSukumar Swaminathan 
2617bafec742SSukumar Swaminathan 		/*
2618bafec742SSukumar Swaminathan 		 * Release the DMA resource that is used for
2619bafec742SSukumar Swaminathan 		 * DMA binding.
2620bafec742SSukumar Swaminathan 		 */
2621bafec742SSukumar Swaminathan 		for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
2622bafec742SSukumar Swaminathan 			(void) ddi_dma_unbind_handle(
2623bafec742SSukumar Swaminathan 			    tx_ring_desc->tx_dma_handle[j]);
2624bafec742SSukumar Swaminathan 		}
2625bafec742SSukumar Swaminathan 
2626bafec742SSukumar Swaminathan 		tx_ring_desc->tx_dma_handle_used = 0;
2627bafec742SSukumar Swaminathan 		/*
2628bafec742SSukumar Swaminathan 		 * Free the mblk after sending completed
2629bafec742SSukumar Swaminathan 		 */
2630bafec742SSukumar Swaminathan 		if (tx_ring_desc->mp != NULL) {
2631bafec742SSukumar Swaminathan 			freemsg(tx_ring_desc->mp);
2632bafec742SSukumar Swaminathan 			tx_ring_desc->mp = NULL;
2633bafec742SSukumar Swaminathan 		}
2634bafec742SSukumar Swaminathan 	}
2635bafec742SSukumar Swaminathan 
2636bafec742SSukumar Swaminathan 	tx_ring->obytes += tx_ring_desc->tx_bytes;
2637bafec742SSukumar Swaminathan 	tx_ring->opackets++;
2638bafec742SSukumar Swaminathan 
2639bafec742SSukumar Swaminathan 	if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
2640bafec742SSukumar Swaminathan 	    OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
2641bafec742SSukumar Swaminathan 		tx_ring->errxmt++;
2642bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2643bafec742SSukumar Swaminathan 			/* EMPTY */
2644bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2645bafec742SSukumar Swaminathan 			    ("Total descriptor length did not match "
2646bafec742SSukumar Swaminathan 			    "transfer length.\n"));
2647bafec742SSukumar Swaminathan 		}
2648bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2649bafec742SSukumar Swaminathan 			/* EMPTY */
2650bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2651bafec742SSukumar Swaminathan 			    ("Frame too short to be legal, not sent.\n"));
2652bafec742SSukumar Swaminathan 		}
2653bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2654bafec742SSukumar Swaminathan 			/* EMPTY */
2655bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2656bafec742SSukumar Swaminathan 			    ("Frame too long, but sent anyway.\n"));
2657bafec742SSukumar Swaminathan 		}
2658bafec742SSukumar Swaminathan 		if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
2659bafec742SSukumar Swaminathan 			/* EMPTY */
2660bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2661bafec742SSukumar Swaminathan 			    ("PCI backplane error. Frame not sent.\n"));
2662bafec742SSukumar Swaminathan 		}
2663bafec742SSukumar Swaminathan 	}
2664bafec742SSukumar Swaminathan 	atomic_inc_32(&tx_ring->tx_free_count);
2665bafec742SSukumar Swaminathan }
2666bafec742SSukumar Swaminathan 
2667bafec742SSukumar Swaminathan /*
2668bafec742SSukumar Swaminathan  * clean up tx completion iocbs
2669bafec742SSukumar Swaminathan  */
2670accf27a5SSukumar Swaminathan int
2671bafec742SSukumar Swaminathan ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2672bafec742SSukumar Swaminathan {
2673bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2674accf27a5SSukumar Swaminathan 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2675bafec742SSukumar Swaminathan 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2676bafec742SSukumar Swaminathan 	int count = 0;
2677bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
2678bafec742SSukumar Swaminathan 	boolean_t resume_tx = B_FALSE;
2679bafec742SSukumar Swaminathan 
2680bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
2681bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
2682bafec742SSukumar Swaminathan 	{
2683bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
2684bafec742SSukumar Swaminathan 	uint32_t producer_idx;
2685bafec742SSukumar Swaminathan 	uint32_t num_free_entries;
2686bafec742SSukumar Swaminathan 	uint32_t temp;
2687bafec742SSukumar Swaminathan 
2688bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2689bafec742SSukumar Swaminathan 	consumer_idx = temp & 0x0000ffff;
2690bafec742SSukumar Swaminathan 	producer_idx = (temp >> 16);
2691bafec742SSukumar Swaminathan 
2692bafec742SSukumar Swaminathan 	if (consumer_idx > producer_idx)
2693bafec742SSukumar Swaminathan 		num_free_entries = (consumer_idx - producer_idx);
2694bafec742SSukumar Swaminathan 	else
2695bafec742SSukumar Swaminathan 		num_free_entries = NUM_RX_RING_ENTRIES -
2696bafec742SSukumar Swaminathan 		    (producer_idx - consumer_idx);
2697bafec742SSukumar Swaminathan 
2698bafec742SSukumar Swaminathan 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2699bafec742SSukumar Swaminathan 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2700bafec742SSukumar Swaminathan 
2701bafec742SSukumar Swaminathan 	}
2702bafec742SSukumar Swaminathan #endif
2703bafec742SSukumar Swaminathan 	/* While there are entries in the completion queue. */
2704bafec742SSukumar Swaminathan 	while (prod != rx_ring->cnsmr_idx) {
2705bafec742SSukumar Swaminathan 
2706bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2707bafec742SSukumar Swaminathan 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
2708bafec742SSukumar Swaminathan 		    rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2709bafec742SSukumar Swaminathan 
2710bafec742SSukumar Swaminathan 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2711bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2712bafec742SSukumar Swaminathan 		    (off_t)((uintptr_t)net_rsp -
2713bafec742SSukumar Swaminathan 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2714bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2715bafec742SSukumar Swaminathan 
2716bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
2717bafec742SSukumar Swaminathan 		    "response packet data\n",
2718bafec742SSukumar Swaminathan 		    rx_ring->curr_entry, 8,
2719bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp));
2720bafec742SSukumar Swaminathan 
2721bafec742SSukumar Swaminathan 		switch (net_rsp->opcode) {
2722bafec742SSukumar Swaminathan 
2723bafec742SSukumar Swaminathan 		case OPCODE_OB_MAC_OFFLOAD_IOCB:
2724bafec742SSukumar Swaminathan 		case OPCODE_OB_MAC_IOCB:
2725bafec742SSukumar Swaminathan 			ql_process_mac_tx_intr(qlge, net_rsp);
2726bafec742SSukumar Swaminathan 			break;
2727bafec742SSukumar Swaminathan 
2728bafec742SSukumar Swaminathan 		default:
2729bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2730bafec742SSukumar Swaminathan 			    "%s Hit default case, not handled! "
2731bafec742SSukumar Swaminathan 			    "dropping the packet,"
2732bafec742SSukumar Swaminathan 			    " opcode = %x.",
2733bafec742SSukumar Swaminathan 			    __func__, net_rsp->opcode);
2734bafec742SSukumar Swaminathan 			break;
2735bafec742SSukumar Swaminathan 		}
2736bafec742SSukumar Swaminathan 		count++;
2737bafec742SSukumar Swaminathan 		ql_update_cq(rx_ring);
2738accf27a5SSukumar Swaminathan 		prod = ql_read_sh_reg(qlge, rx_ring);
2739bafec742SSukumar Swaminathan 	}
2740bafec742SSukumar Swaminathan 	ql_write_cq_idx(rx_ring);
2741bafec742SSukumar Swaminathan 
2742bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
2743bafec742SSukumar Swaminathan 
2744accf27a5SSukumar Swaminathan 	net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2745bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
2746bafec742SSukumar Swaminathan 
2747bafec742SSukumar Swaminathan 	mutex_enter(&tx_ring->tx_lock);
2748bafec742SSukumar Swaminathan 
2749bafec742SSukumar Swaminathan 	if (tx_ring->queue_stopped &&
2750bafec742SSukumar Swaminathan 	    (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
2751bafec742SSukumar Swaminathan 		/*
2752bafec742SSukumar Swaminathan 		 * The queue got stopped because the tx_ring was full.
2753bafec742SSukumar Swaminathan 		 * Wake it up, because it's now at least 25% empty.
2754bafec742SSukumar Swaminathan 		 */
2755bafec742SSukumar Swaminathan 		tx_ring->queue_stopped = 0;
2756bafec742SSukumar Swaminathan 		resume_tx = B_TRUE;
2757bafec742SSukumar Swaminathan 	}
2758bafec742SSukumar Swaminathan 
2759bafec742SSukumar Swaminathan 	mutex_exit(&tx_ring->tx_lock);
2760bafec742SSukumar Swaminathan 	/* Don't hold the lock during OS callback */
2761bafec742SSukumar Swaminathan 	if (resume_tx)
2762bafec742SSukumar Swaminathan 		RESUME_TX(tx_ring);
2763bafec742SSukumar Swaminathan 	return (count);
2764bafec742SSukumar Swaminathan }
2765bafec742SSukumar Swaminathan 
2766bafec742SSukumar Swaminathan /*
2767bafec742SSukumar Swaminathan  * reset asic when error happens
2768bafec742SSukumar Swaminathan  */
2769bafec742SSukumar Swaminathan /* ARGSUSED */
2770bafec742SSukumar Swaminathan static uint_t
2771bafec742SSukumar Swaminathan ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
2772bafec742SSukumar Swaminathan {
2773bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2774bafec742SSukumar Swaminathan 	int status;
2775bafec742SSukumar Swaminathan 
2776bafec742SSukumar Swaminathan 	mutex_enter(&qlge->gen_mutex);
2777accf27a5SSukumar Swaminathan 	(void) ql_do_stop(qlge);
2778accf27a5SSukumar Swaminathan 	/*
2779accf27a5SSukumar Swaminathan 	 * Write default ethernet address to chip register Mac
2780accf27a5SSukumar Swaminathan 	 * Address slot 0 and Enable Primary Mac Function.
2781accf27a5SSukumar Swaminathan 	 */
2782accf27a5SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
2783accf27a5SSukumar Swaminathan 	(void) ql_unicst_set(qlge,
2784accf27a5SSukumar Swaminathan 	    (uint8_t *)qlge->unicst_addr[0].addr.ether_addr_octet, 0);
2785accf27a5SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
2786accf27a5SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_INIT;
2787accf27a5SSukumar Swaminathan 	status = ql_do_start(qlge);
2788bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS)
2789bafec742SSukumar Swaminathan 		goto error;
2790accf27a5SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_STARTED;
2791bafec742SSukumar Swaminathan 	mutex_exit(&qlge->gen_mutex);
2792accf27a5SSukumar Swaminathan 	ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
2793accf27a5SSukumar Swaminathan 
2794bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2795bafec742SSukumar Swaminathan 
2796bafec742SSukumar Swaminathan error:
2797bafec742SSukumar Swaminathan 	mutex_exit(&qlge->gen_mutex);
2798bafec742SSukumar Swaminathan 	cmn_err(CE_WARN,
2799bafec742SSukumar Swaminathan 	    "qlge up/down cycle failed, closing device");
2800accf27a5SSukumar Swaminathan 	if (qlge->fm_enable) {
2801accf27a5SSukumar Swaminathan 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2802accf27a5SSukumar Swaminathan 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
2803accf27a5SSukumar Swaminathan 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2804accf27a5SSukumar Swaminathan 	}
2805bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2806bafec742SSukumar Swaminathan }
2807bafec742SSukumar Swaminathan 
2808bafec742SSukumar Swaminathan /*
2809bafec742SSukumar Swaminathan  * Reset MPI
2810bafec742SSukumar Swaminathan  */
2811bafec742SSukumar Swaminathan /* ARGSUSED */
2812bafec742SSukumar Swaminathan static uint_t
2813bafec742SSukumar Swaminathan ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
2814bafec742SSukumar Swaminathan {
2815bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2816bafec742SSukumar Swaminathan 
28170662fbf4SSukumar Swaminathan 	(void) ql_reset_mpi_risc(qlge);
2818bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2819bafec742SSukumar Swaminathan }
2820bafec742SSukumar Swaminathan 
2821bafec742SSukumar Swaminathan /*
2822bafec742SSukumar Swaminathan  * Process MPI mailbox messages
2823bafec742SSukumar Swaminathan  */
2824bafec742SSukumar Swaminathan /* ARGSUSED */
2825bafec742SSukumar Swaminathan static uint_t
2826bafec742SSukumar Swaminathan ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
2827bafec742SSukumar Swaminathan {
2828bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2829bafec742SSukumar Swaminathan 
2830bafec742SSukumar Swaminathan 	ql_do_mpi_intr(qlge);
2831bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2832bafec742SSukumar Swaminathan }
2833bafec742SSukumar Swaminathan 
2834bafec742SSukumar Swaminathan /* Fire up a handler to reset the MPI processor. */
2835bafec742SSukumar Swaminathan void
2836bafec742SSukumar Swaminathan ql_wake_asic_reset_soft_intr(qlge_t *qlge)
2837bafec742SSukumar Swaminathan {
2838bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
2839bafec742SSukumar Swaminathan }
2840bafec742SSukumar Swaminathan 
2841bafec742SSukumar Swaminathan static void
2842bafec742SSukumar Swaminathan ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
2843bafec742SSukumar Swaminathan {
2844bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
2845bafec742SSukumar Swaminathan }
2846bafec742SSukumar Swaminathan 
2847bafec742SSukumar Swaminathan static void
2848bafec742SSukumar Swaminathan ql_wake_mpi_event_soft_intr(qlge_t *qlge)
2849bafec742SSukumar Swaminathan {
2850bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
2851bafec742SSukumar Swaminathan }
2852bafec742SSukumar Swaminathan 
2853bafec742SSukumar Swaminathan /*
2854bafec742SSukumar Swaminathan  * This handles a fatal error, MPI activity, and the default
2855bafec742SSukumar Swaminathan  * rx_ring in an MSI-X multiple interrupt vector environment.
2856bafec742SSukumar Swaminathan  * In MSI/Legacy environment it also process the rest of
2857bafec742SSukumar Swaminathan  * the rx_rings.
2858bafec742SSukumar Swaminathan  */
2859bafec742SSukumar Swaminathan /* ARGSUSED */
2860bafec742SSukumar Swaminathan static uint_t
2861bafec742SSukumar Swaminathan ql_isr(caddr_t arg1, caddr_t arg2)
2862bafec742SSukumar Swaminathan {
2863bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2864accf27a5SSukumar Swaminathan 	struct rx_ring *ob_ring;
2865bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2866bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
2867bafec742SSukumar Swaminathan 	uint32_t var, prod;
2868bafec742SSukumar Swaminathan 	int i;
2869bafec742SSukumar Swaminathan 	int work_done = 0;
2870bafec742SSukumar Swaminathan 
2871bafec742SSukumar Swaminathan 	mblk_t *mp;
2872bafec742SSukumar Swaminathan 
2873bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
2874bafec742SSukumar Swaminathan 
2875bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
2876bafec742SSukumar Swaminathan 
2877bafec742SSukumar Swaminathan 	if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
2878bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
2879bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_ERROR_STATUS);
2880bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
2881bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
2882bafec742SSukumar Swaminathan 		return (DDI_INTR_CLAIMED);
2883bafec742SSukumar Swaminathan 	}
2884bafec742SSukumar Swaminathan 
2885bafec742SSukumar Swaminathan 	ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2886bafec742SSukumar Swaminathan 
2887bafec742SSukumar Swaminathan 	/*
2888accf27a5SSukumar Swaminathan 	 * process send completes on first stride tx ring if available
2889accf27a5SSukumar Swaminathan 	 */
2890accf27a5SSukumar Swaminathan 	if (qlge->isr_stride) {
2891accf27a5SSukumar Swaminathan 		ob_ring = &qlge->rx_ring[qlge->isr_stride];
2892accf27a5SSukumar Swaminathan 		if (ql_read_sh_reg(qlge, ob_ring) !=
2893accf27a5SSukumar Swaminathan 		    ob_ring->cnsmr_idx) {
2894accf27a5SSukumar Swaminathan 			(void) ql_clean_outbound_rx_ring(ob_ring);
2895accf27a5SSukumar Swaminathan 		}
2896accf27a5SSukumar Swaminathan 	}
2897accf27a5SSukumar Swaminathan 	/*
2898bafec742SSukumar Swaminathan 	 * Check the default queue and wake handler if active.
2899bafec742SSukumar Swaminathan 	 */
2900bafec742SSukumar Swaminathan 	rx_ring = &qlge->rx_ring[0];
2901accf27a5SSukumar Swaminathan 	prod = ql_read_sh_reg(qlge, rx_ring);
2902bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
2903bafec742SSukumar Swaminathan 	    prod, rx_ring->cnsmr_idx));
2904bafec742SSukumar Swaminathan 	/* check if interrupt is due to incoming packet */
2905bafec742SSukumar Swaminathan 	if (prod != rx_ring->cnsmr_idx) {
2906bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
2907bafec742SSukumar Swaminathan 		ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2908bafec742SSukumar Swaminathan 		mutex_enter(&rx_ring->rx_lock);
2909bafec742SSukumar Swaminathan 		mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2910bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->rx_lock);
2911bafec742SSukumar Swaminathan 
2912bafec742SSukumar Swaminathan 		if (mp != NULL)
2913bafec742SSukumar Swaminathan 			RX_UPSTREAM(rx_ring, mp);
2914bafec742SSukumar Swaminathan 		work_done++;
2915bafec742SSukumar Swaminathan 	} else {
2916bafec742SSukumar Swaminathan 		/*
2917bafec742SSukumar Swaminathan 		 * If interrupt is not due to incoming packet, read status
2918bafec742SSukumar Swaminathan 		 * register to see if error happens or mailbox interrupt.
2919bafec742SSukumar Swaminathan 		 */
2920bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
2921bafec742SSukumar Swaminathan 		if ((var & STATUS_FE) != 0) {
2922bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2923accf27a5SSukumar Swaminathan 			if (qlge->fm_enable) {
2924accf27a5SSukumar Swaminathan 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2925accf27a5SSukumar Swaminathan 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2926accf27a5SSukumar Swaminathan 				ddi_fm_service_impact(qlge->dip,
2927accf27a5SSukumar Swaminathan 				    DDI_SERVICE_LOST);
2928accf27a5SSukumar Swaminathan 			}
2929bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
2930bafec742SSukumar Swaminathan 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2931bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2932bafec742SSukumar Swaminathan 			    "Resetting chip. Error Status Register = 0x%x",
2933bafec742SSukumar Swaminathan 			    var);
2934bafec742SSukumar Swaminathan 			ql_wake_asic_reset_soft_intr(qlge);
2935bafec742SSukumar Swaminathan 			return (DDI_INTR_CLAIMED);
2936bafec742SSukumar Swaminathan 		}
2937bafec742SSukumar Swaminathan 
2938bafec742SSukumar Swaminathan 		/*
2939bafec742SSukumar Swaminathan 		 * Check MPI processor activity.
2940bafec742SSukumar Swaminathan 		 */
2941bafec742SSukumar Swaminathan 		if ((var & STATUS_PI) != 0) {
2942bafec742SSukumar Swaminathan 			/*
2943bafec742SSukumar Swaminathan 			 * We've got an async event or mailbox completion.
2944bafec742SSukumar Swaminathan 			 * Handle it and clear the source of the interrupt.
2945bafec742SSukumar Swaminathan 			 */
2946bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2947bafec742SSukumar Swaminathan 
2948bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
2949bafec742SSukumar Swaminathan 			ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2950bafec742SSukumar Swaminathan 			ql_wake_mpi_event_soft_intr(qlge);
2951bafec742SSukumar Swaminathan 			work_done++;
2952bafec742SSukumar Swaminathan 		}
2953bafec742SSukumar Swaminathan 	}
2954bafec742SSukumar Swaminathan 
2955accf27a5SSukumar Swaminathan 
2956bafec742SSukumar Swaminathan 	if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
2957bafec742SSukumar Swaminathan 		/*
2958bafec742SSukumar Swaminathan 		 * Start the DPC for each active queue.
2959bafec742SSukumar Swaminathan 		 */
2960bafec742SSukumar Swaminathan 		for (i = 1; i < qlge->rx_ring_count; i++) {
2961bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i];
2962bafec742SSukumar Swaminathan 
2963accf27a5SSukumar Swaminathan 			if (ql_read_sh_reg(qlge, rx_ring) !=
2964bafec742SSukumar Swaminathan 			    rx_ring->cnsmr_idx) {
2965bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INTR,
2966bafec742SSukumar Swaminathan 				    ("Waking handler for rx_ring[%d].\n", i));
2967bafec742SSukumar Swaminathan 
2968bafec742SSukumar Swaminathan 				ql_disable_completion_interrupt(qlge,
2969bafec742SSukumar Swaminathan 				    rx_ring->irq);
2970bafec742SSukumar Swaminathan 				if (rx_ring->type == TX_Q) {
29710662fbf4SSukumar Swaminathan 					(void) ql_clean_outbound_rx_ring(
29720662fbf4SSukumar Swaminathan 					    rx_ring);
2973bafec742SSukumar Swaminathan 					ql_enable_completion_interrupt(
2974bafec742SSukumar Swaminathan 					    rx_ring->qlge, rx_ring->irq);
2975bafec742SSukumar Swaminathan 				} else {
2976bafec742SSukumar Swaminathan 					mutex_enter(&rx_ring->rx_lock);
2977bafec742SSukumar Swaminathan 					mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2978bafec742SSukumar Swaminathan 					mutex_exit(&rx_ring->rx_lock);
2979bafec742SSukumar Swaminathan 					if (mp != NULL)
2980bafec742SSukumar Swaminathan 						RX_UPSTREAM(rx_ring, mp);
2981bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2982bafec742SSukumar Swaminathan 					if (rx_ring->mac_flags ==
2983bafec742SSukumar Swaminathan 					    QL_MAC_STOPPED)
2984bafec742SSukumar Swaminathan 						cmn_err(CE_NOTE,
2985bafec742SSukumar Swaminathan 						    "%s rx_indicate(%d) %d\n",
2986bafec742SSukumar Swaminathan 						    __func__, i,
2987bafec742SSukumar Swaminathan 						    rx_ring->rx_indicate);
2988bafec742SSukumar Swaminathan #endif
2989bafec742SSukumar Swaminathan 				}
2990bafec742SSukumar Swaminathan 				work_done++;
2991bafec742SSukumar Swaminathan 			}
2992bafec742SSukumar Swaminathan 		}
2993bafec742SSukumar Swaminathan 	}
2994bafec742SSukumar Swaminathan 
2995bafec742SSukumar Swaminathan 	ql_enable_completion_interrupt(qlge, intr_ctx->intr);
2996bafec742SSukumar Swaminathan 
2997bafec742SSukumar Swaminathan 	return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2998bafec742SSukumar Swaminathan }
2999bafec742SSukumar Swaminathan 
3000bafec742SSukumar Swaminathan /*
3001bafec742SSukumar Swaminathan  * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
3002bafec742SSukumar Swaminathan  */
3003bafec742SSukumar Swaminathan /* ARGSUSED */
3004bafec742SSukumar Swaminathan static uint_t
3005bafec742SSukumar Swaminathan ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
3006bafec742SSukumar Swaminathan {
3007bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3008bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
3009bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
3010bafec742SSukumar Swaminathan 
3011bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
30120662fbf4SSukumar Swaminathan 	(void) ql_clean_outbound_rx_ring(rx_ring);
3013bafec742SSukumar Swaminathan 	ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
3014bafec742SSukumar Swaminathan 
3015bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
3016bafec742SSukumar Swaminathan }
3017bafec742SSukumar Swaminathan 
3018bafec742SSukumar Swaminathan /*
3019accf27a5SSukumar Swaminathan  * MSI-X Multiple Vector Interrupt Handler
3020accf27a5SSukumar Swaminathan  */
3021accf27a5SSukumar Swaminathan /* ARGSUSED */
3022accf27a5SSukumar Swaminathan static uint_t
3023accf27a5SSukumar Swaminathan ql_msix_isr(caddr_t arg1, caddr_t arg2)
3024accf27a5SSukumar Swaminathan {
3025accf27a5SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3026accf27a5SSukumar Swaminathan 	struct rx_ring *ob_ring;
3027accf27a5SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
3028accf27a5SSukumar Swaminathan 	mblk_t *mp;
3029accf27a5SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
3030accf27a5SSukumar Swaminathan 
3031accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3032accf27a5SSukumar Swaminathan 
3033accf27a5SSukumar Swaminathan 	ql_disable_completion_interrupt(qlge, rx_ring->irq);
3034accf27a5SSukumar Swaminathan 
3035accf27a5SSukumar Swaminathan 	/*
3036accf27a5SSukumar Swaminathan 	 * process send completes on stride tx ring if available
3037accf27a5SSukumar Swaminathan 	 */
3038accf27a5SSukumar Swaminathan 	if (qlge->isr_stride) {
3039accf27a5SSukumar Swaminathan 		ob_ring = rx_ring + qlge->isr_stride;
3040accf27a5SSukumar Swaminathan 		if (ql_read_sh_reg(qlge, ob_ring) !=
3041accf27a5SSukumar Swaminathan 		    ob_ring->cnsmr_idx) {
3042accf27a5SSukumar Swaminathan 			++qlge->rx_interrupts[ob_ring->cq_id];
3043accf27a5SSukumar Swaminathan 			(void) ql_clean_outbound_rx_ring(ob_ring);
3044accf27a5SSukumar Swaminathan 		}
3045accf27a5SSukumar Swaminathan 	}
3046accf27a5SSukumar Swaminathan 
3047accf27a5SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
3048accf27a5SSukumar Swaminathan 
3049accf27a5SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
3050accf27a5SSukumar Swaminathan 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3051accf27a5SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
3052accf27a5SSukumar Swaminathan 
3053accf27a5SSukumar Swaminathan 	if (mp != NULL)
3054accf27a5SSukumar Swaminathan 		RX_UPSTREAM(rx_ring, mp);
3055accf27a5SSukumar Swaminathan 
3056accf27a5SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
3057accf27a5SSukumar Swaminathan }
3058accf27a5SSukumar Swaminathan 
3059accf27a5SSukumar Swaminathan /*
3060bafec742SSukumar Swaminathan  * Poll n_bytes of chained incoming packets
3061bafec742SSukumar Swaminathan  */
3062bafec742SSukumar Swaminathan mblk_t *
3063bafec742SSukumar Swaminathan ql_ring_rx_poll(void *arg, int n_bytes)
3064bafec742SSukumar Swaminathan {
3065bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)arg;
3066bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
3067bafec742SSukumar Swaminathan 	mblk_t *mp = NULL;
3068bafec742SSukumar Swaminathan 	uint32_t var;
3069bafec742SSukumar Swaminathan 
3070bafec742SSukumar Swaminathan 	ASSERT(n_bytes >= 0);
3071bafec742SSukumar Swaminathan 	QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
3072bafec742SSukumar Swaminathan 	    __func__, rx_ring->cq_id, n_bytes));
3073bafec742SSukumar Swaminathan 
3074bafec742SSukumar Swaminathan 	++qlge->rx_polls[rx_ring->cq_id];
3075bafec742SSukumar Swaminathan 
3076bafec742SSukumar Swaminathan 	if (n_bytes == 0)
3077bafec742SSukumar Swaminathan 		return (mp);
3078bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
3079bafec742SSukumar Swaminathan 	mp = ql_ring_rx(rx_ring, n_bytes);
3080bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
3081bafec742SSukumar Swaminathan 
3082bafec742SSukumar Swaminathan 	if ((rx_ring->cq_id == 0) && (mp == NULL)) {
3083bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
3084bafec742SSukumar Swaminathan 		/*
3085bafec742SSukumar Swaminathan 		 * Check for fatal error.
3086bafec742SSukumar Swaminathan 		 */
3087bafec742SSukumar Swaminathan 		if ((var & STATUS_FE) != 0) {
3088bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
3089bafec742SSukumar Swaminathan 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
3090bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Got fatal error %x.", var);
3091bafec742SSukumar Swaminathan 			ql_wake_asic_reset_soft_intr(qlge);
3092accf27a5SSukumar Swaminathan 			if (qlge->fm_enable) {
3093accf27a5SSukumar Swaminathan 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
3094accf27a5SSukumar Swaminathan 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
3095accf27a5SSukumar Swaminathan 				ddi_fm_service_impact(qlge->dip,
3096accf27a5SSukumar Swaminathan 				    DDI_SERVICE_LOST);
3097accf27a5SSukumar Swaminathan 			}
3098bafec742SSukumar Swaminathan 		}
3099bafec742SSukumar Swaminathan 		/*
3100bafec742SSukumar Swaminathan 		 * Check MPI processor activity.
3101bafec742SSukumar Swaminathan 		 */
3102bafec742SSukumar Swaminathan 		if ((var & STATUS_PI) != 0) {
3103bafec742SSukumar Swaminathan 			/*
3104bafec742SSukumar Swaminathan 			 * We've got an async event or mailbox completion.
3105bafec742SSukumar Swaminathan 			 * Handle it and clear the source of the interrupt.
3106bafec742SSukumar Swaminathan 			 */
3107bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
3108bafec742SSukumar Swaminathan 			ql_do_mpi_intr(qlge);
3109bafec742SSukumar Swaminathan 		}
3110bafec742SSukumar Swaminathan 	}
3111bafec742SSukumar Swaminathan 
3112bafec742SSukumar Swaminathan 	return (mp);
3113bafec742SSukumar Swaminathan }
3114bafec742SSukumar Swaminathan 
3115bafec742SSukumar Swaminathan /*
3116bafec742SSukumar Swaminathan  * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
3117bafec742SSukumar Swaminathan  */
3118bafec742SSukumar Swaminathan /* ARGSUSED */
3119bafec742SSukumar Swaminathan static uint_t
3120bafec742SSukumar Swaminathan ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
3121bafec742SSukumar Swaminathan {
3122bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3123bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
3124bafec742SSukumar Swaminathan 	mblk_t *mp;
3125bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
3126bafec742SSukumar Swaminathan 
3127bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3128bafec742SSukumar Swaminathan 
3129bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
3130bafec742SSukumar Swaminathan 
3131bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
3132bafec742SSukumar Swaminathan 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3133bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
3134bafec742SSukumar Swaminathan 
3135bafec742SSukumar Swaminathan 	if (mp != NULL)
3136bafec742SSukumar Swaminathan 		RX_UPSTREAM(rx_ring, mp);
3137bafec742SSukumar Swaminathan 
3138bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
3139bafec742SSukumar Swaminathan }
3140bafec742SSukumar Swaminathan 
3141bafec742SSukumar Swaminathan 
3142bafec742SSukumar Swaminathan /*
3143bafec742SSukumar Swaminathan  *
3144bafec742SSukumar Swaminathan  * Allocate DMA Buffer for ioctl service
3145bafec742SSukumar Swaminathan  *
3146bafec742SSukumar Swaminathan  */
3147bafec742SSukumar Swaminathan static int
3148bafec742SSukumar Swaminathan ql_alloc_ioctl_dma_buf(qlge_t *qlge)
3149bafec742SSukumar Swaminathan {
3150bafec742SSukumar Swaminathan 	uint64_t phy_addr;
3151bafec742SSukumar Swaminathan 	uint64_t alloc_size;
3152bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3153bafec742SSukumar Swaminathan 
3154bafec742SSukumar Swaminathan 	alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
3155bafec742SSukumar Swaminathan 	    max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
3156bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
3157bafec742SSukumar Swaminathan 	    &ql_buf_acc_attr,
3158bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3159bafec742SSukumar Swaminathan 	    &qlge->ioctl_buf_dma_attr.acc_handle,
3160bafec742SSukumar Swaminathan 	    (size_t)alloc_size,  /* mem size */
3161bafec742SSukumar Swaminathan 	    (size_t)0,  /* alignment */
3162bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
3163bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3164bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
3165bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3166bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3167bafec742SSukumar Swaminathan 	}
3168bafec742SSukumar Swaminathan 
3169bafec742SSukumar Swaminathan 	phy_addr = dma_cookie.dmac_laddress;
3170bafec742SSukumar Swaminathan 
3171bafec742SSukumar Swaminathan 	if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
3172bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
3173bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3174bafec742SSukumar Swaminathan 	}
3175bafec742SSukumar Swaminathan 
3176bafec742SSukumar Swaminathan 	qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
3177bafec742SSukumar Swaminathan 
3178bafec742SSukumar Swaminathan 	QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
3179bafec742SSukumar Swaminathan 	    "phy_addr = 0x%lx\n",
3180bafec742SSukumar Swaminathan 	    __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
3181bafec742SSukumar Swaminathan 
3182bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3183bafec742SSukumar Swaminathan }
3184bafec742SSukumar Swaminathan 
3185bafec742SSukumar Swaminathan 
3186bafec742SSukumar Swaminathan /*
3187bafec742SSukumar Swaminathan  * Function to free physical memory.
3188bafec742SSukumar Swaminathan  */
3189bafec742SSukumar Swaminathan static void
3190bafec742SSukumar Swaminathan ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
3191bafec742SSukumar Swaminathan {
3192bafec742SSukumar Swaminathan 	if (dma_handle != NULL) {
3193bafec742SSukumar Swaminathan 		(void) ddi_dma_unbind_handle(*dma_handle);
3194bafec742SSukumar Swaminathan 		if (acc_handle != NULL)
3195bafec742SSukumar Swaminathan 			ddi_dma_mem_free(acc_handle);
3196bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3197bafec742SSukumar Swaminathan 	}
3198bafec742SSukumar Swaminathan }
3199bafec742SSukumar Swaminathan 
3200bafec742SSukumar Swaminathan /*
3201bafec742SSukumar Swaminathan  * Function to free ioctl dma buffer.
3202bafec742SSukumar Swaminathan  */
3203bafec742SSukumar Swaminathan static void
3204bafec742SSukumar Swaminathan ql_free_ioctl_dma_buf(qlge_t *qlge)
3205bafec742SSukumar Swaminathan {
3206bafec742SSukumar Swaminathan 	if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
3207bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
3208bafec742SSukumar Swaminathan 		    &qlge->ioctl_buf_dma_attr.acc_handle);
3209bafec742SSukumar Swaminathan 
3210bafec742SSukumar Swaminathan 		qlge->ioctl_buf_dma_attr.vaddr = NULL;
3211bafec742SSukumar Swaminathan 		qlge->ioctl_buf_dma_attr.dma_handle = NULL;
3212bafec742SSukumar Swaminathan 	}
3213bafec742SSukumar Swaminathan }
3214bafec742SSukumar Swaminathan 
3215bafec742SSukumar Swaminathan /*
3216bafec742SSukumar Swaminathan  * Free shadow register space used for request and completion queues
3217bafec742SSukumar Swaminathan  */
3218bafec742SSukumar Swaminathan static void
3219bafec742SSukumar Swaminathan ql_free_shadow_space(qlge_t *qlge)
3220bafec742SSukumar Swaminathan {
3221bafec742SSukumar Swaminathan 	if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
3222bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3223bafec742SSukumar Swaminathan 		    &qlge->host_copy_shadow_dma_attr.acc_handle);
3224bafec742SSukumar Swaminathan 		bzero(&qlge->host_copy_shadow_dma_attr,
3225bafec742SSukumar Swaminathan 		    sizeof (qlge->host_copy_shadow_dma_attr));
3226bafec742SSukumar Swaminathan 	}
3227bafec742SSukumar Swaminathan 
3228bafec742SSukumar Swaminathan 	if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
3229bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3230bafec742SSukumar Swaminathan 		    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
3231bafec742SSukumar Swaminathan 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3232bafec742SSukumar Swaminathan 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3233bafec742SSukumar Swaminathan 	}
3234bafec742SSukumar Swaminathan }
3235bafec742SSukumar Swaminathan 
3236bafec742SSukumar Swaminathan /*
3237bafec742SSukumar Swaminathan  * Allocate shadow register space for request and completion queues
3238bafec742SSukumar Swaminathan  */
3239bafec742SSukumar Swaminathan static int
3240bafec742SSukumar Swaminathan ql_alloc_shadow_space(qlge_t *qlge)
3241bafec742SSukumar Swaminathan {
3242bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3243bafec742SSukumar Swaminathan 
3244bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip,
3245bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.dma_handle,
3246bafec742SSukumar Swaminathan 	    &ql_dev_acc_attr,
3247bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3248bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.acc_handle,
3249bafec742SSukumar Swaminathan 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3250bafec742SSukumar Swaminathan 	    (size_t)4, /* 4 bytes alignment */
3251bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
3252bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3253bafec742SSukumar Swaminathan 		bzero(&qlge->host_copy_shadow_dma_attr,
3254bafec742SSukumar Swaminathan 		    sizeof (qlge->host_copy_shadow_dma_attr));
3255bafec742SSukumar Swaminathan 
3256bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
3257bafec742SSukumar Swaminathan 		    "response shadow registers", __func__, qlge->instance);
3258bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3259bafec742SSukumar Swaminathan 	}
3260bafec742SSukumar Swaminathan 
3261bafec742SSukumar Swaminathan 	qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3262bafec742SSukumar Swaminathan 
3263bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip,
3264bafec742SSukumar Swaminathan 	    &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3265bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3266bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3267bafec742SSukumar Swaminathan 	    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
3268bafec742SSukumar Swaminathan 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3269bafec742SSukumar Swaminathan 	    (size_t)4, /* 4 bytes alignment */
3270bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
3271bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3272bafec742SSukumar Swaminathan 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3273bafec742SSukumar Swaminathan 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3274bafec742SSukumar Swaminathan 
3275bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
3276bafec742SSukumar Swaminathan 		    "for request shadow registers",
3277bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3278bafec742SSukumar Swaminathan 		goto err_wqp_sh_area;
3279bafec742SSukumar Swaminathan 	}
3280bafec742SSukumar Swaminathan 	qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3281bafec742SSukumar Swaminathan 
3282bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3283bafec742SSukumar Swaminathan 
3284bafec742SSukumar Swaminathan err_wqp_sh_area:
3285bafec742SSukumar Swaminathan 	ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3286bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.acc_handle);
3287bafec742SSukumar Swaminathan 	bzero(&qlge->host_copy_shadow_dma_attr,
3288bafec742SSukumar Swaminathan 	    sizeof (qlge->host_copy_shadow_dma_attr));
3289bafec742SSukumar Swaminathan 
3290bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3291bafec742SSukumar Swaminathan }
3292bafec742SSukumar Swaminathan 
3293bafec742SSukumar Swaminathan /*
3294bafec742SSukumar Swaminathan  * Initialize a tx ring
3295bafec742SSukumar Swaminathan  */
3296bafec742SSukumar Swaminathan static void
3297bafec742SSukumar Swaminathan ql_init_tx_ring(struct tx_ring *tx_ring)
3298bafec742SSukumar Swaminathan {
3299bafec742SSukumar Swaminathan 	int i;
3300bafec742SSukumar Swaminathan 	struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr;
3301bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc;
3302bafec742SSukumar Swaminathan 
3303bafec742SSukumar Swaminathan 	for (i = 0; i < tx_ring->wq_len; i++) {
3304bafec742SSukumar Swaminathan 		tx_ring_desc->index = i;
3305bafec742SSukumar Swaminathan 		tx_ring_desc->queue_entry = mac_iocb_ptr;
3306bafec742SSukumar Swaminathan 		mac_iocb_ptr++;
3307bafec742SSukumar Swaminathan 		tx_ring_desc++;
3308bafec742SSukumar Swaminathan 	}
3309bafec742SSukumar Swaminathan 	tx_ring->tx_free_count = tx_ring->wq_len;
3310bafec742SSukumar Swaminathan 	tx_ring->queue_stopped = 0;
3311bafec742SSukumar Swaminathan }
3312bafec742SSukumar Swaminathan 
3313bafec742SSukumar Swaminathan /*
3314bafec742SSukumar Swaminathan  * Free one tx ring resources
3315bafec742SSukumar Swaminathan  */
3316bafec742SSukumar Swaminathan static void
3317bafec742SSukumar Swaminathan ql_free_tx_resources(struct tx_ring *tx_ring)
3318bafec742SSukumar Swaminathan {
3319bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
3320bafec742SSukumar Swaminathan 	int i, j;
3321bafec742SSukumar Swaminathan 
3322bafec742SSukumar Swaminathan 	ql_free_phys(&tx_ring->wq_dma.dma_handle, &tx_ring->wq_dma.acc_handle);
3323bafec742SSukumar Swaminathan 	bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3324bafec742SSukumar Swaminathan 
3325bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc != NULL) {
3326bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
3327bafec742SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3328bafec742SSukumar Swaminathan 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3329bafec742SSukumar Swaminathan 				if (tx_ring_desc->tx_dma_handle[j]) {
3330bafec742SSukumar Swaminathan 					/*
3331bafec742SSukumar Swaminathan 					 * The unbinding will happen in tx
3332bafec742SSukumar Swaminathan 					 * completion, here we just free the
3333bafec742SSukumar Swaminathan 					 * handles
3334bafec742SSukumar Swaminathan 					 */
3335bafec742SSukumar Swaminathan 					ddi_dma_free_handle(
3336bafec742SSukumar Swaminathan 					    &(tx_ring_desc->tx_dma_handle[j]));
3337bafec742SSukumar Swaminathan 					tx_ring_desc->tx_dma_handle[j] = NULL;
3338bafec742SSukumar Swaminathan 				}
3339bafec742SSukumar Swaminathan 			}
3340bafec742SSukumar Swaminathan 			if (tx_ring_desc->oal != NULL) {
3341bafec742SSukumar Swaminathan 				tx_ring_desc->oal_dma_addr = 0;
3342bafec742SSukumar Swaminathan 				tx_ring_desc->oal = NULL;
3343bafec742SSukumar Swaminathan 				tx_ring_desc->copy_buffer = NULL;
3344bafec742SSukumar Swaminathan 				tx_ring_desc->copy_buffer_dma_addr = 0;
3345bafec742SSukumar Swaminathan 
3346bafec742SSukumar Swaminathan 				ql_free_phys(&tx_ring_desc->oal_dma.dma_handle,
3347bafec742SSukumar Swaminathan 				    &tx_ring_desc->oal_dma.acc_handle);
3348bafec742SSukumar Swaminathan 			}
3349bafec742SSukumar Swaminathan 		}
3350bafec742SSukumar Swaminathan 		kmem_free(tx_ring->wq_desc,
3351bafec742SSukumar Swaminathan 		    tx_ring->wq_len * sizeof (struct tx_ring_desc));
3352bafec742SSukumar Swaminathan 		tx_ring->wq_desc = NULL;
3353bafec742SSukumar Swaminathan 	}
3354bafec742SSukumar Swaminathan 	/* free the wqicb struct */
3355bafec742SSukumar Swaminathan 	if (tx_ring->wqicb_dma.dma_handle) {
3356bafec742SSukumar Swaminathan 		ql_free_phys(&tx_ring->wqicb_dma.dma_handle,
3357bafec742SSukumar Swaminathan 		    &tx_ring->wqicb_dma.acc_handle);
3358bafec742SSukumar Swaminathan 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3359bafec742SSukumar Swaminathan 	}
3360bafec742SSukumar Swaminathan }
3361bafec742SSukumar Swaminathan 
3362bafec742SSukumar Swaminathan /*
3363bafec742SSukumar Swaminathan  * Allocate work (request) queue memory and transmit
3364bafec742SSukumar Swaminathan  * descriptors for this transmit ring
3365bafec742SSukumar Swaminathan  */
3366bafec742SSukumar Swaminathan static int
3367bafec742SSukumar Swaminathan ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring)
3368bafec742SSukumar Swaminathan {
3369bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3370bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
3371bafec742SSukumar Swaminathan 	int i, j;
3372bafec742SSukumar Swaminathan 	uint32_t length;
3373bafec742SSukumar Swaminathan 
3374bafec742SSukumar Swaminathan 	/* allocate dma buffers for obiocbs */
3375bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle,
3376bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3377bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3378bafec742SSukumar Swaminathan 	    &tx_ring->wq_dma.acc_handle,
3379bafec742SSukumar Swaminathan 	    (size_t)tx_ring->wq_size,	/* mem size */
3380bafec742SSukumar Swaminathan 	    (size_t)128, /* alignment:128 bytes boundary */
3381bafec742SSukumar Swaminathan 	    (caddr_t *)&tx_ring->wq_dma.vaddr,
3382bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3383bafec742SSukumar Swaminathan 		bzero(&tx_ring->wq_dma, sizeof (&tx_ring->wq_dma));
3384bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.",
3385bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3386bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3387bafec742SSukumar Swaminathan 	}
3388bafec742SSukumar Swaminathan 	tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress;
3389bafec742SSukumar Swaminathan 
3390bafec742SSukumar Swaminathan 	tx_ring->wq_desc =
3391bafec742SSukumar Swaminathan 	    kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc),
3392bafec742SSukumar Swaminathan 	    KM_NOSLEEP);
3393bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc == NULL) {
3394bafec742SSukumar Swaminathan 		goto err;
3395bafec742SSukumar Swaminathan 	} else {
3396bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
3397bafec742SSukumar Swaminathan 		/*
3398bafec742SSukumar Swaminathan 		 * Allocate a large enough structure to hold the following
3399bafec742SSukumar Swaminathan 		 * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
3400bafec742SSukumar Swaminathan 		 * 2. copy buffer of QL_MAX_COPY_LENGTH bytes
3401bafec742SSukumar Swaminathan 		 */
3402bafec742SSukumar Swaminathan 		length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)
3403bafec742SSukumar Swaminathan 		    + QL_MAX_COPY_LENGTH;
3404accf27a5SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3405bafec742SSukumar Swaminathan 
3406bafec742SSukumar Swaminathan 			if (ql_alloc_phys(qlge->dip,
3407bafec742SSukumar Swaminathan 			    &tx_ring_desc->oal_dma.dma_handle,
3408bafec742SSukumar Swaminathan 			    &ql_desc_acc_attr,
3409bafec742SSukumar Swaminathan 			    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3410bafec742SSukumar Swaminathan 			    &tx_ring_desc->oal_dma.acc_handle,
3411bafec742SSukumar Swaminathan 			    (size_t)length,	/* mem size */
3412bafec742SSukumar Swaminathan 			    (size_t)0, /* default alignment:8 bytes boundary */
3413bafec742SSukumar Swaminathan 			    (caddr_t *)&tx_ring_desc->oal_dma.vaddr,
3414bafec742SSukumar Swaminathan 			    &dma_cookie) != 0) {
3415bafec742SSukumar Swaminathan 				bzero(&tx_ring_desc->oal_dma,
3416bafec742SSukumar Swaminathan 				    sizeof (tx_ring_desc->oal_dma));
3417bafec742SSukumar Swaminathan 				cmn_err(CE_WARN, "%s(%d): reqQ tx buf &"
3418bafec742SSukumar Swaminathan 				    "oal alloc failed.",
3419bafec742SSukumar Swaminathan 				    __func__, qlge->instance);
3420bafec742SSukumar Swaminathan 				return (DDI_FAILURE);
3421bafec742SSukumar Swaminathan 			}
3422bafec742SSukumar Swaminathan 
3423bafec742SSukumar Swaminathan 			tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr;
3424bafec742SSukumar Swaminathan 			tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress;
3425bafec742SSukumar Swaminathan 			tx_ring_desc->copy_buffer =
3426bafec742SSukumar Swaminathan 			    (caddr_t)((uint8_t *)tx_ring_desc->oal
3427bafec742SSukumar Swaminathan 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3428bafec742SSukumar Swaminathan 			tx_ring_desc->copy_buffer_dma_addr =
3429bafec742SSukumar Swaminathan 			    (tx_ring_desc->oal_dma_addr
3430bafec742SSukumar Swaminathan 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3431bafec742SSukumar Swaminathan 
3432bafec742SSukumar Swaminathan 			/* Allocate dma handles for transmit buffers */
3433bafec742SSukumar Swaminathan 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3434bafec742SSukumar Swaminathan 				if (ddi_dma_alloc_handle(qlge->dip,
3435bafec742SSukumar Swaminathan 				    &tx_mapping_dma_attr,
3436bafec742SSukumar Swaminathan 				    DDI_DMA_DONTWAIT,
3437bafec742SSukumar Swaminathan 				    0, &tx_ring_desc->tx_dma_handle[j])
3438bafec742SSukumar Swaminathan 				    != DDI_SUCCESS) {
3439bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
3440bafec742SSukumar Swaminathan 					    "!%s: ddi_dma_alloc_handle: "
3441bafec742SSukumar Swaminathan 					    "tx_dma_handle "
3442bafec742SSukumar Swaminathan 					    "alloc failed", __func__);
3443bafec742SSukumar Swaminathan 					goto err;
3444bafec742SSukumar Swaminathan 				}
3445bafec742SSukumar Swaminathan 			}
3446bafec742SSukumar Swaminathan 		}
3447bafec742SSukumar Swaminathan 	}
3448bafec742SSukumar Swaminathan 	/* alloc a wqicb control block to load this tx ring to hw */
3449bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle,
3450bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3451bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3452bafec742SSukumar Swaminathan 	    &tx_ring->wqicb_dma.acc_handle,
3453bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct wqicb_t),	/* mem size */
3454bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3455bafec742SSukumar Swaminathan 	    (caddr_t *)&tx_ring->wqicb_dma.vaddr,
3456bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3457bafec742SSukumar Swaminathan 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3458bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.",
3459bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3460bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3461bafec742SSukumar Swaminathan 	}
3462bafec742SSukumar Swaminathan 	tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3463bafec742SSukumar Swaminathan 
3464bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3465bafec742SSukumar Swaminathan 
3466bafec742SSukumar Swaminathan err:
3467bafec742SSukumar Swaminathan 	ql_free_tx_resources(tx_ring);
3468bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3469bafec742SSukumar Swaminathan }
3470bafec742SSukumar Swaminathan 
3471bafec742SSukumar Swaminathan /*
3472bafec742SSukumar Swaminathan  * Free one rx ring resources
3473bafec742SSukumar Swaminathan  */
3474bafec742SSukumar Swaminathan static void
3475bafec742SSukumar Swaminathan ql_free_rx_resources(struct rx_ring *rx_ring)
3476bafec742SSukumar Swaminathan {
3477bafec742SSukumar Swaminathan 	/* Free the small buffer queue. */
3478bafec742SSukumar Swaminathan 	if (rx_ring->sbq_dma.dma_handle) {
3479bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->sbq_dma.dma_handle,
3480bafec742SSukumar Swaminathan 		    &rx_ring->sbq_dma.acc_handle);
3481bafec742SSukumar Swaminathan 		bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3482bafec742SSukumar Swaminathan 	}
3483bafec742SSukumar Swaminathan 
3484bafec742SSukumar Swaminathan 	/* Free the small buffer queue control blocks. */
3485bafec742SSukumar Swaminathan 	kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len *
3486bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc));
3487bafec742SSukumar Swaminathan 	rx_ring->sbq_desc = NULL;
3488bafec742SSukumar Swaminathan 
3489bafec742SSukumar Swaminathan 	/* Free the large buffer queue. */
3490bafec742SSukumar Swaminathan 	if (rx_ring->lbq_dma.dma_handle) {
3491bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->lbq_dma.dma_handle,
3492bafec742SSukumar Swaminathan 		    &rx_ring->lbq_dma.acc_handle);
3493bafec742SSukumar Swaminathan 		bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3494bafec742SSukumar Swaminathan 	}
3495bafec742SSukumar Swaminathan 
3496bafec742SSukumar Swaminathan 	/* Free the large buffer queue control blocks. */
3497bafec742SSukumar Swaminathan 	kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len *
3498bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc));
3499bafec742SSukumar Swaminathan 	rx_ring->lbq_desc = NULL;
3500bafec742SSukumar Swaminathan 
3501bafec742SSukumar Swaminathan 	/* Free cqicb struct */
3502bafec742SSukumar Swaminathan 	if (rx_ring->cqicb_dma.dma_handle) {
3503bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->cqicb_dma.dma_handle,
3504bafec742SSukumar Swaminathan 		    &rx_ring->cqicb_dma.acc_handle);
3505bafec742SSukumar Swaminathan 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3506bafec742SSukumar Swaminathan 	}
3507bafec742SSukumar Swaminathan 	/* Free the rx queue. */
3508bafec742SSukumar Swaminathan 	if (rx_ring->cq_dma.dma_handle) {
3509bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->cq_dma.dma_handle,
3510bafec742SSukumar Swaminathan 		    &rx_ring->cq_dma.acc_handle);
3511bafec742SSukumar Swaminathan 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3512bafec742SSukumar Swaminathan 	}
3513bafec742SSukumar Swaminathan }
3514bafec742SSukumar Swaminathan 
3515bafec742SSukumar Swaminathan /*
3516bafec742SSukumar Swaminathan  * Allocate queues and buffers for this completions queue based
3517bafec742SSukumar Swaminathan  * on the values in the parameter structure.
3518bafec742SSukumar Swaminathan  */
3519bafec742SSukumar Swaminathan static int
3520bafec742SSukumar Swaminathan ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring)
3521bafec742SSukumar Swaminathan {
3522bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3523bafec742SSukumar Swaminathan 
3524bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle,
3525bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3526bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3527bafec742SSukumar Swaminathan 	    &rx_ring->cq_dma.acc_handle,
3528bafec742SSukumar Swaminathan 	    (size_t)rx_ring->cq_size,  /* mem size */
3529bafec742SSukumar Swaminathan 	    (size_t)128, /* alignment:128 bytes boundary */
3530bafec742SSukumar Swaminathan 	    (caddr_t *)&rx_ring->cq_dma.vaddr,
3531bafec742SSukumar Swaminathan 	    &dma_cookie) != 0)	{
3532bafec742SSukumar Swaminathan 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3533bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.",
3534bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3535bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3536bafec742SSukumar Swaminathan 	}
3537bafec742SSukumar Swaminathan 	rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress;
3538bafec742SSukumar Swaminathan 
3539bafec742SSukumar Swaminathan 	if (rx_ring->sbq_len != 0) {
3540bafec742SSukumar Swaminathan 		/*
3541bafec742SSukumar Swaminathan 		 * Allocate small buffer queue.
3542bafec742SSukumar Swaminathan 		 */
3543bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle,
3544bafec742SSukumar Swaminathan 		    &ql_desc_acc_attr,
3545bafec742SSukumar Swaminathan 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3546bafec742SSukumar Swaminathan 		    &rx_ring->sbq_dma.acc_handle,
3547bafec742SSukumar Swaminathan 		    (size_t)rx_ring->sbq_size,  /* mem size */
3548bafec742SSukumar Swaminathan 		    (size_t)128, /* alignment:128 bytes boundary */
3549bafec742SSukumar Swaminathan 		    (caddr_t *)&rx_ring->sbq_dma.vaddr,
3550bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
3551bafec742SSukumar Swaminathan 			bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3552bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3553bafec742SSukumar Swaminathan 			    "%s(%d): small buffer queue allocation failed.",
3554bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
3555bafec742SSukumar Swaminathan 			goto err_mem;
3556bafec742SSukumar Swaminathan 		}
3557bafec742SSukumar Swaminathan 		rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress;
3558bafec742SSukumar Swaminathan 
3559bafec742SSukumar Swaminathan 		/*
3560bafec742SSukumar Swaminathan 		 * Allocate small buffer queue control blocks.
3561bafec742SSukumar Swaminathan 		 */
3562bafec742SSukumar Swaminathan 		rx_ring->sbq_desc =
3563bafec742SSukumar Swaminathan 		    kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc),
3564bafec742SSukumar Swaminathan 		    KM_NOSLEEP);
3565bafec742SSukumar Swaminathan 		if (rx_ring->sbq_desc == NULL) {
3566bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3567bafec742SSukumar Swaminathan 			    "sbq control block allocation failed.");
3568bafec742SSukumar Swaminathan 			goto err_mem;
3569bafec742SSukumar Swaminathan 		}
3570bafec742SSukumar Swaminathan 
3571bafec742SSukumar Swaminathan 		ql_init_sbq_ring(rx_ring);
3572bafec742SSukumar Swaminathan 	}
3573bafec742SSukumar Swaminathan 
3574bafec742SSukumar Swaminathan 	if (rx_ring->lbq_len != 0) {
3575bafec742SSukumar Swaminathan 		/*
3576bafec742SSukumar Swaminathan 		 * Allocate large buffer queue.
3577bafec742SSukumar Swaminathan 		 */
3578bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle,
3579bafec742SSukumar Swaminathan 		    &ql_desc_acc_attr,
3580bafec742SSukumar Swaminathan 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3581bafec742SSukumar Swaminathan 		    &rx_ring->lbq_dma.acc_handle,
3582bafec742SSukumar Swaminathan 		    (size_t)rx_ring->lbq_size,  /* mem size */
3583bafec742SSukumar Swaminathan 		    (size_t)128, /* alignment:128 bytes boundary */
3584bafec742SSukumar Swaminathan 		    (caddr_t *)&rx_ring->lbq_dma.vaddr,
3585bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
3586bafec742SSukumar Swaminathan 			bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3587bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): lbq allocation failed.",
3588bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
3589bafec742SSukumar Swaminathan 			goto err_mem;
3590bafec742SSukumar Swaminathan 		}
3591bafec742SSukumar Swaminathan 		rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress;
3592bafec742SSukumar Swaminathan 
3593bafec742SSukumar Swaminathan 		/*
3594bafec742SSukumar Swaminathan 		 * Allocate large buffer queue control blocks.
3595bafec742SSukumar Swaminathan 		 */
3596bafec742SSukumar Swaminathan 		rx_ring->lbq_desc =
3597bafec742SSukumar Swaminathan 		    kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc),
3598bafec742SSukumar Swaminathan 		    KM_NOSLEEP);
3599bafec742SSukumar Swaminathan 		if (rx_ring->lbq_desc == NULL) {
3600bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3601bafec742SSukumar Swaminathan 			    "Large buffer queue control block allocation "
3602bafec742SSukumar Swaminathan 			    "failed.");
3603bafec742SSukumar Swaminathan 			goto err_mem;
3604bafec742SSukumar Swaminathan 		}
3605bafec742SSukumar Swaminathan 		ql_init_lbq_ring(rx_ring);
3606bafec742SSukumar Swaminathan 	}
3607bafec742SSukumar Swaminathan 
3608bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle,
3609bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3610bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3611bafec742SSukumar Swaminathan 	    &rx_ring->cqicb_dma.acc_handle,
3612bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct cqicb_t),  /* mem size */
3613bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3614bafec742SSukumar Swaminathan 	    (caddr_t *)&rx_ring->cqicb_dma.vaddr,
3615bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3616bafec742SSukumar Swaminathan 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3617bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.",
3618bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3619bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3620bafec742SSukumar Swaminathan 	}
3621bafec742SSukumar Swaminathan 	rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3622bafec742SSukumar Swaminathan 
3623bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3624bafec742SSukumar Swaminathan 
3625bafec742SSukumar Swaminathan err_mem:
3626bafec742SSukumar Swaminathan 	ql_free_rx_resources(rx_ring);
3627bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3628bafec742SSukumar Swaminathan }
3629bafec742SSukumar Swaminathan 
3630bafec742SSukumar Swaminathan /*
3631bafec742SSukumar Swaminathan  * Frees tx/rx queues memory resources
3632bafec742SSukumar Swaminathan  */
3633bafec742SSukumar Swaminathan static void
3634bafec742SSukumar Swaminathan ql_free_mem_resources(qlge_t *qlge)
3635bafec742SSukumar Swaminathan {
3636bafec742SSukumar Swaminathan 	int i;
3637bafec742SSukumar Swaminathan 
3638bafec742SSukumar Swaminathan 	if (qlge->ricb_dma.dma_handle) {
3639bafec742SSukumar Swaminathan 		/* free the ricb struct */
3640bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->ricb_dma.dma_handle,
3641bafec742SSukumar Swaminathan 		    &qlge->ricb_dma.acc_handle);
3642bafec742SSukumar Swaminathan 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3643bafec742SSukumar Swaminathan 	}
3644bafec742SSukumar Swaminathan 
3645bafec742SSukumar Swaminathan 	ql_free_rx_buffers(qlge);
3646bafec742SSukumar Swaminathan 
3647bafec742SSukumar Swaminathan 	ql_free_ioctl_dma_buf(qlge);
3648bafec742SSukumar Swaminathan 
3649bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++)
3650bafec742SSukumar Swaminathan 		ql_free_tx_resources(&qlge->tx_ring[i]);
3651bafec742SSukumar Swaminathan 
3652bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++)
3653bafec742SSukumar Swaminathan 		ql_free_rx_resources(&qlge->rx_ring[i]);
3654bafec742SSukumar Swaminathan 
3655bafec742SSukumar Swaminathan 	ql_free_shadow_space(qlge);
3656bafec742SSukumar Swaminathan }
3657bafec742SSukumar Swaminathan 
3658bafec742SSukumar Swaminathan /*
3659bafec742SSukumar Swaminathan  * Allocate buffer queues, large buffers and small buffers etc
3660bafec742SSukumar Swaminathan  *
3661bafec742SSukumar Swaminathan  * This API is called in the gld_attach member function. It is called
3662bafec742SSukumar Swaminathan  * only once.  Later reset,reboot should not re-allocate all rings and
3663bafec742SSukumar Swaminathan  * buffers.
3664bafec742SSukumar Swaminathan  */
3665bafec742SSukumar Swaminathan static int
3666bafec742SSukumar Swaminathan ql_alloc_mem_resources(qlge_t *qlge)
3667bafec742SSukumar Swaminathan {
3668bafec742SSukumar Swaminathan 	int i;
3669bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3670bafec742SSukumar Swaminathan 
3671bafec742SSukumar Swaminathan 	/* Allocate space for our shadow registers */
3672bafec742SSukumar Swaminathan 	if (ql_alloc_shadow_space(qlge))
3673bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3674bafec742SSukumar Swaminathan 
3675bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
3676bafec742SSukumar Swaminathan 		if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) {
3677bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "RX resource allocation failed.");
3678bafec742SSukumar Swaminathan 			goto err_mem;
3679bafec742SSukumar Swaminathan 		}
3680bafec742SSukumar Swaminathan 	}
3681bafec742SSukumar Swaminathan 	/* Allocate tx queue resources */
3682bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
3683bafec742SSukumar Swaminathan 		if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) {
3684bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Tx resource allocation failed.");
3685bafec742SSukumar Swaminathan 			goto err_mem;
3686bafec742SSukumar Swaminathan 		}
3687bafec742SSukumar Swaminathan 	}
3688bafec742SSukumar Swaminathan 
3689bafec742SSukumar Swaminathan 	if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) {
3690bafec742SSukumar Swaminathan 		goto err_mem;
3691bafec742SSukumar Swaminathan 	}
3692bafec742SSukumar Swaminathan 
3693bafec742SSukumar Swaminathan 	if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) {
3694bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed",
3695bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3696bafec742SSukumar Swaminathan 		goto err_mem;
3697bafec742SSukumar Swaminathan 	}
3698bafec742SSukumar Swaminathan 
3699bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_ALLOC_RX_BUF;
3700bafec742SSukumar Swaminathan 
3701bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle,
3702bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3703bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3704bafec742SSukumar Swaminathan 	    &qlge->ricb_dma.acc_handle,
3705bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct ricb),  /* mem size */
3706bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3707bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->ricb_dma.vaddr,
3708bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3709bafec742SSukumar Swaminathan 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3710bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): ricb allocation failed.",
3711bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3712bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3713bafec742SSukumar Swaminathan 	}
3714bafec742SSukumar Swaminathan 	qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress;
3715bafec742SSukumar Swaminathan 
3716bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3717bafec742SSukumar Swaminathan 
3718bafec742SSukumar Swaminathan err_mem:
3719bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3720bafec742SSukumar Swaminathan }
3721bafec742SSukumar Swaminathan 
3722bafec742SSukumar Swaminathan 
3723bafec742SSukumar Swaminathan /*
3724bafec742SSukumar Swaminathan  * Function used to allocate physical memory and zero it.
3725bafec742SSukumar Swaminathan  */
3726bafec742SSukumar Swaminathan 
3727bafec742SSukumar Swaminathan static int
3728accf27a5SSukumar Swaminathan ql_alloc_phys_rbuf(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3729accf27a5SSukumar Swaminathan     ddi_device_acc_attr_t *device_acc_attr,
3730accf27a5SSukumar Swaminathan     uint_t dma_flags,
3731accf27a5SSukumar Swaminathan     ddi_acc_handle_t *acc_handle,
3732accf27a5SSukumar Swaminathan     size_t size,
3733accf27a5SSukumar Swaminathan     size_t alignment,
3734accf27a5SSukumar Swaminathan     caddr_t *vaddr,
3735accf27a5SSukumar Swaminathan     ddi_dma_cookie_t *dma_cookie)
3736accf27a5SSukumar Swaminathan {
3737accf27a5SSukumar Swaminathan 	size_t rlen;
3738accf27a5SSukumar Swaminathan 	uint_t cnt;
3739accf27a5SSukumar Swaminathan 
3740accf27a5SSukumar Swaminathan 	/*
3741accf27a5SSukumar Swaminathan 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3742accf27a5SSukumar Swaminathan 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3743accf27a5SSukumar Swaminathan 	 * to make sure buffer has enough room for overrun.
3744accf27a5SSukumar Swaminathan 	 */
3745accf27a5SSukumar Swaminathan 	if (size & 7) {
3746accf27a5SSukumar Swaminathan 		size += 8 - (size & 7);
3747accf27a5SSukumar Swaminathan 	}
3748accf27a5SSukumar Swaminathan 
3749accf27a5SSukumar Swaminathan 	/* Adjust the alignment if requested */
3750accf27a5SSukumar Swaminathan 	if (alignment) {
3751accf27a5SSukumar Swaminathan 		dma_attr.dma_attr_align = alignment;
3752accf27a5SSukumar Swaminathan 	}
3753accf27a5SSukumar Swaminathan 
3754accf27a5SSukumar Swaminathan 	/*
3755accf27a5SSukumar Swaminathan 	 * Allocate DMA handle
3756accf27a5SSukumar Swaminathan 	 */
3757accf27a5SSukumar Swaminathan 	if (ddi_dma_alloc_handle(dip, &dma_attr_rbuf, DDI_DMA_DONTWAIT, NULL,
3758accf27a5SSukumar Swaminathan 	    dma_handle) != DDI_SUCCESS) {
3759accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3760accf27a5SSukumar Swaminathan 		    __func__);
3761accf27a5SSukumar Swaminathan 		return (QL_ERROR);
3762accf27a5SSukumar Swaminathan 	}
3763accf27a5SSukumar Swaminathan 	/*
3764accf27a5SSukumar Swaminathan 	 * Allocate DMA memory
3765accf27a5SSukumar Swaminathan 	 */
3766accf27a5SSukumar Swaminathan 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3767accf27a5SSukumar Swaminathan 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3768accf27a5SSukumar Swaminathan 	    DDI_DMA_DONTWAIT,
3769accf27a5SSukumar Swaminathan 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3770accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3771accf27a5SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3772accf27a5SSukumar Swaminathan 		return (QL_ERROR);
3773accf27a5SSukumar Swaminathan 	}
3774accf27a5SSukumar Swaminathan 
3775accf27a5SSukumar Swaminathan 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3776accf27a5SSukumar Swaminathan 	    dma_flags, DDI_DMA_DONTWAIT, NULL,
3777accf27a5SSukumar Swaminathan 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3778accf27a5SSukumar Swaminathan 		ddi_dma_mem_free(acc_handle);
3779accf27a5SSukumar Swaminathan 
3780accf27a5SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3781accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3782accf27a5SSukumar Swaminathan 		    __func__);
3783accf27a5SSukumar Swaminathan 		return (QL_ERROR);
3784accf27a5SSukumar Swaminathan 	}
3785accf27a5SSukumar Swaminathan 
3786accf27a5SSukumar Swaminathan 	if (cnt != 1) {
3787accf27a5SSukumar Swaminathan 
3788accf27a5SSukumar Swaminathan 		ql_free_phys(dma_handle, acc_handle);
3789accf27a5SSukumar Swaminathan 
3790accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3791accf27a5SSukumar Swaminathan 		    __func__);
3792accf27a5SSukumar Swaminathan 		return (QL_ERROR);
3793accf27a5SSukumar Swaminathan 	}
3794accf27a5SSukumar Swaminathan 
3795accf27a5SSukumar Swaminathan 	bzero((caddr_t)*vaddr, rlen);
3796accf27a5SSukumar Swaminathan 
3797accf27a5SSukumar Swaminathan 	return (0);
3798accf27a5SSukumar Swaminathan }
3799accf27a5SSukumar Swaminathan 
3800accf27a5SSukumar Swaminathan /*
3801accf27a5SSukumar Swaminathan  * Function used to allocate physical memory and zero it.
3802accf27a5SSukumar Swaminathan  */
3803accf27a5SSukumar Swaminathan static int
3804bafec742SSukumar Swaminathan ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3805bafec742SSukumar Swaminathan     ddi_device_acc_attr_t *device_acc_attr,
3806bafec742SSukumar Swaminathan     uint_t dma_flags,
3807bafec742SSukumar Swaminathan     ddi_acc_handle_t *acc_handle,
3808bafec742SSukumar Swaminathan     size_t size,
3809bafec742SSukumar Swaminathan     size_t alignment,
3810bafec742SSukumar Swaminathan     caddr_t *vaddr,
3811bafec742SSukumar Swaminathan     ddi_dma_cookie_t *dma_cookie)
3812bafec742SSukumar Swaminathan {
3813bafec742SSukumar Swaminathan 	size_t rlen;
3814bafec742SSukumar Swaminathan 	uint_t cnt;
3815bafec742SSukumar Swaminathan 
3816bafec742SSukumar Swaminathan 	/*
3817bafec742SSukumar Swaminathan 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3818bafec742SSukumar Swaminathan 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3819bafec742SSukumar Swaminathan 	 * to make sure buffer has enough room for overrun.
3820bafec742SSukumar Swaminathan 	 */
3821bafec742SSukumar Swaminathan 	if (size & 7) {
3822bafec742SSukumar Swaminathan 		size += 8 - (size & 7);
3823bafec742SSukumar Swaminathan 	}
3824bafec742SSukumar Swaminathan 
3825bafec742SSukumar Swaminathan 	/* Adjust the alignment if requested */
3826bafec742SSukumar Swaminathan 	if (alignment) {
3827bafec742SSukumar Swaminathan 		dma_attr.dma_attr_align = alignment;
3828bafec742SSukumar Swaminathan 	}
3829bafec742SSukumar Swaminathan 
3830bafec742SSukumar Swaminathan 	/*
3831bafec742SSukumar Swaminathan 	 * Allocate DMA handle
3832bafec742SSukumar Swaminathan 	 */
3833accf27a5SSukumar Swaminathan 	if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_DONTWAIT, NULL,
3834bafec742SSukumar Swaminathan 	    dma_handle) != DDI_SUCCESS) {
3835bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3836bafec742SSukumar Swaminathan 		    __func__);
3837bafec742SSukumar Swaminathan 		return (QL_ERROR);
3838bafec742SSukumar Swaminathan 	}
3839bafec742SSukumar Swaminathan 	/*
3840bafec742SSukumar Swaminathan 	 * Allocate DMA memory
3841bafec742SSukumar Swaminathan 	 */
3842bafec742SSukumar Swaminathan 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3843accf27a5SSukumar Swaminathan 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3844accf27a5SSukumar Swaminathan 	    DDI_DMA_DONTWAIT,
3845bafec742SSukumar Swaminathan 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3846accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3847bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3848bafec742SSukumar Swaminathan 		return (QL_ERROR);
3849bafec742SSukumar Swaminathan 	}
3850bafec742SSukumar Swaminathan 
3851bafec742SSukumar Swaminathan 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3852accf27a5SSukumar Swaminathan 	    dma_flags, DDI_DMA_DONTWAIT, NULL,
3853bafec742SSukumar Swaminathan 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3854bafec742SSukumar Swaminathan 		ddi_dma_mem_free(acc_handle);
3855bafec742SSukumar Swaminathan 
3856bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3857bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3858bafec742SSukumar Swaminathan 		    __func__);
3859bafec742SSukumar Swaminathan 		return (QL_ERROR);
3860bafec742SSukumar Swaminathan 	}
3861bafec742SSukumar Swaminathan 
3862bafec742SSukumar Swaminathan 	if (cnt != 1) {
3863bafec742SSukumar Swaminathan 
3864bafec742SSukumar Swaminathan 		ql_free_phys(dma_handle, acc_handle);
3865bafec742SSukumar Swaminathan 
3866bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3867bafec742SSukumar Swaminathan 		    __func__);
3868bafec742SSukumar Swaminathan 		return (QL_ERROR);
3869bafec742SSukumar Swaminathan 	}
3870bafec742SSukumar Swaminathan 
3871bafec742SSukumar Swaminathan 	bzero((caddr_t)*vaddr, rlen);
3872bafec742SSukumar Swaminathan 
3873bafec742SSukumar Swaminathan 	return (0);
3874bafec742SSukumar Swaminathan }
3875bafec742SSukumar Swaminathan 
3876bafec742SSukumar Swaminathan /*
3877bafec742SSukumar Swaminathan  * Add interrupt handlers based on the interrupt type.
3878bafec742SSukumar Swaminathan  * Before adding the interrupt handlers, the interrupt vectors should
3879bafec742SSukumar Swaminathan  * have been allocated, and the rx/tx rings have also been allocated.
3880bafec742SSukumar Swaminathan  */
3881bafec742SSukumar Swaminathan static int
3882bafec742SSukumar Swaminathan ql_add_intr_handlers(qlge_t *qlge)
3883bafec742SSukumar Swaminathan {
3884bafec742SSukumar Swaminathan 	int vector = 0;
3885bafec742SSukumar Swaminathan 	int rc, i;
3886bafec742SSukumar Swaminathan 	uint32_t value;
3887bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3888bafec742SSukumar Swaminathan 
3889bafec742SSukumar Swaminathan 	switch (qlge->intr_type) {
3890bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSIX:
3891bafec742SSukumar Swaminathan 		/*
3892bafec742SSukumar Swaminathan 		 * Add interrupt handler for rx and tx rings: vector[0 -
3893bafec742SSukumar Swaminathan 		 * (qlge->intr_cnt -1)].
3894bafec742SSukumar Swaminathan 		 */
3895bafec742SSukumar Swaminathan 		value = 0;
3896bafec742SSukumar Swaminathan 		for (vector = 0; vector < qlge->intr_cnt; vector++) {
3897bafec742SSukumar Swaminathan 			ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3898bafec742SSukumar Swaminathan 
3899bafec742SSukumar Swaminathan 			/*
3900bafec742SSukumar Swaminathan 			 * associate interrupt vector with interrupt handler
3901bafec742SSukumar Swaminathan 			 */
3902bafec742SSukumar Swaminathan 			rc = ddi_intr_add_handler(qlge->htable[vector],
3903bafec742SSukumar Swaminathan 			    (ddi_intr_handler_t *)intr_ctx->handler,
3904bafec742SSukumar Swaminathan 			    (void *)&qlge->rx_ring[vector], NULL);
3905bafec742SSukumar Swaminathan 
3906accf27a5SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("rx_ring[%d] 0x%p\n",
3907accf27a5SSukumar Swaminathan 			    vector, &qlge->rx_ring[vector]));
3908bafec742SSukumar Swaminathan 			if (rc != DDI_SUCCESS) {
3909bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INIT,
3910bafec742SSukumar Swaminathan 				    ("Add rx interrupt handler failed. "
3911bafec742SSukumar Swaminathan 				    "return: %d, vector: %d", rc, vector));
3912bafec742SSukumar Swaminathan 				for (vector--; vector >= 0; vector--) {
3913bafec742SSukumar Swaminathan 					(void) ddi_intr_remove_handler(
3914bafec742SSukumar Swaminathan 					    qlge->htable[vector]);
3915bafec742SSukumar Swaminathan 				}
3916bafec742SSukumar Swaminathan 				return (DDI_FAILURE);
3917bafec742SSukumar Swaminathan 			}
3918bafec742SSukumar Swaminathan 			intr_ctx++;
3919bafec742SSukumar Swaminathan 		}
3920bafec742SSukumar Swaminathan 		break;
3921bafec742SSukumar Swaminathan 
3922bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSI:
3923bafec742SSukumar Swaminathan 		/*
3924bafec742SSukumar Swaminathan 		 * Add interrupt handlers for the only vector
3925bafec742SSukumar Swaminathan 		 */
3926bafec742SSukumar Swaminathan 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3927bafec742SSukumar Swaminathan 
3928bafec742SSukumar Swaminathan 		rc = ddi_intr_add_handler(qlge->htable[vector],
3929bafec742SSukumar Swaminathan 		    ql_isr,
3930bafec742SSukumar Swaminathan 		    (caddr_t)&qlge->rx_ring[0], NULL);
3931bafec742SSukumar Swaminathan 
3932bafec742SSukumar Swaminathan 		if (rc != DDI_SUCCESS) {
3933bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT,
3934bafec742SSukumar Swaminathan 			    ("Add MSI interrupt handler failed: %d\n", rc));
3935bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
3936bafec742SSukumar Swaminathan 		}
3937bafec742SSukumar Swaminathan 		break;
3938bafec742SSukumar Swaminathan 
3939bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_FIXED:
3940bafec742SSukumar Swaminathan 		/*
3941bafec742SSukumar Swaminathan 		 * Add interrupt handlers for the only vector
3942bafec742SSukumar Swaminathan 		 */
3943bafec742SSukumar Swaminathan 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3944bafec742SSukumar Swaminathan 
3945bafec742SSukumar Swaminathan 		rc = ddi_intr_add_handler(qlge->htable[vector],
3946bafec742SSukumar Swaminathan 		    ql_isr,
3947bafec742SSukumar Swaminathan 		    (caddr_t)&qlge->rx_ring[0], NULL);
3948bafec742SSukumar Swaminathan 
3949bafec742SSukumar Swaminathan 		if (rc != DDI_SUCCESS) {
3950bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT,
3951bafec742SSukumar Swaminathan 			    ("Add legacy interrupt handler failed: %d\n", rc));
3952bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
3953bafec742SSukumar Swaminathan 		}
3954bafec742SSukumar Swaminathan 		break;
3955bafec742SSukumar Swaminathan 
3956bafec742SSukumar Swaminathan 	default:
3957bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3958bafec742SSukumar Swaminathan 	}
3959bafec742SSukumar Swaminathan 
3960bafec742SSukumar Swaminathan 	/* Enable interrupts */
3961bafec742SSukumar Swaminathan 	/* Block enable */
3962bafec742SSukumar Swaminathan 	if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3963bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n",
3964bafec742SSukumar Swaminathan 		    qlge->intr_cnt));
3965bafec742SSukumar Swaminathan 		(void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt);
3966bafec742SSukumar Swaminathan 	} else { /* Non block enable */
3967bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->intr_cnt; i++) {
3968accf27a5SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d "
3969bafec742SSukumar Swaminathan 			    "handle 0x%x\n", i, qlge->htable[i]));
3970bafec742SSukumar Swaminathan 			(void) ddi_intr_enable(qlge->htable[i]);
3971bafec742SSukumar Swaminathan 		}
3972bafec742SSukumar Swaminathan 	}
3973bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_INTR_ENABLED;
3974bafec742SSukumar Swaminathan 
3975bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3976bafec742SSukumar Swaminathan }
3977bafec742SSukumar Swaminathan 
3978bafec742SSukumar Swaminathan /*
3979bafec742SSukumar Swaminathan  * Here we build the intr_ctx structures based on
3980bafec742SSukumar Swaminathan  * our rx_ring count and intr vector count.
3981bafec742SSukumar Swaminathan  * The intr_ctx structure is used to hook each vector
3982bafec742SSukumar Swaminathan  * to possibly different handlers.
3983bafec742SSukumar Swaminathan  */
3984bafec742SSukumar Swaminathan static void
3985bafec742SSukumar Swaminathan ql_resolve_queues_to_irqs(qlge_t *qlge)
3986bafec742SSukumar Swaminathan {
3987bafec742SSukumar Swaminathan 	int i = 0;
3988bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3989bafec742SSukumar Swaminathan 
3990bafec742SSukumar Swaminathan 	if (qlge->intr_type == DDI_INTR_TYPE_MSIX) {
3991bafec742SSukumar Swaminathan 		/*
3992bafec742SSukumar Swaminathan 		 * Each rx_ring has its own intr_ctx since we
3993bafec742SSukumar Swaminathan 		 * have separate vectors for each queue.
3994bafec742SSukumar Swaminathan 		 * This only true when MSI-X is enabled.
3995bafec742SSukumar Swaminathan 		 */
3996bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) {
3997bafec742SSukumar Swaminathan 			qlge->rx_ring[i].irq = i;
3998bafec742SSukumar Swaminathan 			intr_ctx->intr = i;
3999bafec742SSukumar Swaminathan 			intr_ctx->qlge = qlge;
4000bafec742SSukumar Swaminathan 
4001bafec742SSukumar Swaminathan 			/*
4002bafec742SSukumar Swaminathan 			 * We set up each vectors enable/disable/read bits so
4003bafec742SSukumar Swaminathan 			 * there's no bit/mask calculations in critical path.
4004bafec742SSukumar Swaminathan 			 */
4005bafec742SSukumar Swaminathan 			intr_ctx->intr_en_mask =
4006bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4007bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4008bafec742SSukumar Swaminathan 			    INTR_EN_IHD | i;
4009bafec742SSukumar Swaminathan 			intr_ctx->intr_dis_mask =
4010bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4011bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4012bafec742SSukumar Swaminathan 			    INTR_EN_IHD | i;
4013bafec742SSukumar Swaminathan 			intr_ctx->intr_read_mask =
4014bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4015bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4016bafec742SSukumar Swaminathan 			    | i;
4017bafec742SSukumar Swaminathan 
4018bafec742SSukumar Swaminathan 			if (i == 0) {
4019bafec742SSukumar Swaminathan 				/*
4020bafec742SSukumar Swaminathan 				 * Default queue handles bcast/mcast plus
4021bafec742SSukumar Swaminathan 				 * async events.
4022bafec742SSukumar Swaminathan 				 */
4023bafec742SSukumar Swaminathan 				intr_ctx->handler = ql_isr;
4024bafec742SSukumar Swaminathan 			} else if (qlge->rx_ring[i].type == TX_Q) {
4025bafec742SSukumar Swaminathan 				/*
4026bafec742SSukumar Swaminathan 				 * Outbound queue is for outbound completions
4027bafec742SSukumar Swaminathan 				 * only.
4028bafec742SSukumar Swaminathan 				 */
4029accf27a5SSukumar Swaminathan 				if (qlge->isr_stride)
4030accf27a5SSukumar Swaminathan 					intr_ctx->handler = ql_msix_isr;
4031accf27a5SSukumar Swaminathan 				else
4032bafec742SSukumar Swaminathan 					intr_ctx->handler = ql_msix_tx_isr;
4033bafec742SSukumar Swaminathan 			} else {
4034bafec742SSukumar Swaminathan 				/*
4035bafec742SSukumar Swaminathan 				 * Inbound queues handle unicast frames only.
4036bafec742SSukumar Swaminathan 				 */
4037accf27a5SSukumar Swaminathan 				if (qlge->isr_stride)
4038accf27a5SSukumar Swaminathan 					intr_ctx->handler = ql_msix_isr;
4039accf27a5SSukumar Swaminathan 				else
4040accf27a5SSukumar Swaminathan 					intr_ctx->handler = ql_msix_rx_isr;
4041accf27a5SSukumar Swaminathan 			}
4042accf27a5SSukumar Swaminathan 		}
4043accf27a5SSukumar Swaminathan 		i = qlge->intr_cnt;
4044accf27a5SSukumar Swaminathan 		for (; i < qlge->rx_ring_count; i++, intr_ctx++) {
4045accf27a5SSukumar Swaminathan 			int iv = i - qlge->isr_stride;
4046accf27a5SSukumar Swaminathan 			qlge->rx_ring[i].irq = iv;
4047accf27a5SSukumar Swaminathan 			intr_ctx->intr = iv;
4048accf27a5SSukumar Swaminathan 			intr_ctx->qlge = qlge;
4049accf27a5SSukumar Swaminathan 
4050accf27a5SSukumar Swaminathan 			/*
4051accf27a5SSukumar Swaminathan 			 * We set up each vectors enable/disable/read bits so
4052accf27a5SSukumar Swaminathan 			 * there's no bit/mask calculations in critical path.
4053accf27a5SSukumar Swaminathan 			 */
4054accf27a5SSukumar Swaminathan 			intr_ctx->intr_en_mask =
4055accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4056accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4057accf27a5SSukumar Swaminathan 			    INTR_EN_IHD | iv;
4058accf27a5SSukumar Swaminathan 			intr_ctx->intr_dis_mask =
4059accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4060accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4061accf27a5SSukumar Swaminathan 			    INTR_EN_IHD | iv;
4062accf27a5SSukumar Swaminathan 			intr_ctx->intr_read_mask =
4063accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4064accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4065accf27a5SSukumar Swaminathan 			    | iv;
4066accf27a5SSukumar Swaminathan 
4067accf27a5SSukumar Swaminathan 			if (qlge->rx_ring[i].type == TX_Q) {
4068accf27a5SSukumar Swaminathan 				/*
4069accf27a5SSukumar Swaminathan 				 * Outbound queue is for outbound completions
4070accf27a5SSukumar Swaminathan 				 * only.
4071accf27a5SSukumar Swaminathan 				 */
4072accf27a5SSukumar Swaminathan 				intr_ctx->handler = ql_msix_isr;
4073accf27a5SSukumar Swaminathan 			} else {
4074accf27a5SSukumar Swaminathan 				/*
4075accf27a5SSukumar Swaminathan 				 * Inbound queues handle unicast frames only.
4076accf27a5SSukumar Swaminathan 				 */
4077bafec742SSukumar Swaminathan 				intr_ctx->handler = ql_msix_rx_isr;
4078bafec742SSukumar Swaminathan 			}
4079bafec742SSukumar Swaminathan 		}
4080bafec742SSukumar Swaminathan 	} else {
4081bafec742SSukumar Swaminathan 		/*
4082bafec742SSukumar Swaminathan 		 * All rx_rings use the same intr_ctx since
4083bafec742SSukumar Swaminathan 		 * there is only one vector.
4084bafec742SSukumar Swaminathan 		 */
4085bafec742SSukumar Swaminathan 		intr_ctx->intr = 0;
4086bafec742SSukumar Swaminathan 		intr_ctx->qlge = qlge;
4087bafec742SSukumar Swaminathan 		/*
4088bafec742SSukumar Swaminathan 		 * We set up each vectors enable/disable/read bits so
4089bafec742SSukumar Swaminathan 		 * there's no bit/mask calculations in the critical path.
4090bafec742SSukumar Swaminathan 		 */
4091bafec742SSukumar Swaminathan 		intr_ctx->intr_en_mask =
4092bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4093bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_ENABLE;
4094bafec742SSukumar Swaminathan 		intr_ctx->intr_dis_mask =
4095bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4096bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_DISABLE;
4097bafec742SSukumar Swaminathan 		intr_ctx->intr_read_mask =
4098bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4099bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_READ;
4100bafec742SSukumar Swaminathan 		/*
4101bafec742SSukumar Swaminathan 		 * Single interrupt means one handler for all rings.
4102bafec742SSukumar Swaminathan 		 */
4103bafec742SSukumar Swaminathan 		intr_ctx->handler = ql_isr;
4104bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++)
4105bafec742SSukumar Swaminathan 			qlge->rx_ring[i].irq = 0;
4106bafec742SSukumar Swaminathan 	}
4107bafec742SSukumar Swaminathan }
4108bafec742SSukumar Swaminathan 
4109bafec742SSukumar Swaminathan 
4110bafec742SSukumar Swaminathan /*
4111bafec742SSukumar Swaminathan  * Free allocated interrupts.
4112bafec742SSukumar Swaminathan  */
4113bafec742SSukumar Swaminathan static void
4114bafec742SSukumar Swaminathan ql_free_irq_vectors(qlge_t *qlge)
4115bafec742SSukumar Swaminathan {
4116bafec742SSukumar Swaminathan 	int i;
4117bafec742SSukumar Swaminathan 	int rc;
4118bafec742SSukumar Swaminathan 
4119bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_INTR_ENABLED) {
4120bafec742SSukumar Swaminathan 		/* Disable all interrupts */
4121bafec742SSukumar Swaminathan 		if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
4122bafec742SSukumar Swaminathan 			/* Call ddi_intr_block_disable() */
4123bafec742SSukumar Swaminathan 			(void) ddi_intr_block_disable(qlge->htable,
4124bafec742SSukumar Swaminathan 			    qlge->intr_cnt);
4125bafec742SSukumar Swaminathan 		} else {
4126bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->intr_cnt; i++) {
4127bafec742SSukumar Swaminathan 				(void) ddi_intr_disable(qlge->htable[i]);
4128bafec742SSukumar Swaminathan 			}
4129bafec742SSukumar Swaminathan 		}
4130bafec742SSukumar Swaminathan 
4131bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_INTR_ENABLED;
4132bafec742SSukumar Swaminathan 	}
4133bafec742SSukumar Swaminathan 
4134bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
4135bafec742SSukumar Swaminathan 
4136bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_ADD_INTERRUPT)
4137bafec742SSukumar Swaminathan 			(void) ddi_intr_remove_handler(qlge->htable[i]);
4138bafec742SSukumar Swaminathan 
4139bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_INTR_ALLOC) {
4140bafec742SSukumar Swaminathan 			rc = ddi_intr_free(qlge->htable[i]);
4141bafec742SSukumar Swaminathan 			if (rc != DDI_SUCCESS) {
4142bafec742SSukumar Swaminathan 				/* EMPTY */
4143bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INIT, ("Free intr failed: %d",
4144bafec742SSukumar Swaminathan 				    rc));
4145bafec742SSukumar Swaminathan 			}
4146bafec742SSukumar Swaminathan 		}
4147bafec742SSukumar Swaminathan 	}
4148bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_INTR_ALLOC)
4149bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_INTR_ALLOC;
4150bafec742SSukumar Swaminathan 
4151bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_ADD_INTERRUPT)
4152bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4153bafec742SSukumar Swaminathan 
4154bafec742SSukumar Swaminathan 	if (qlge->htable) {
4155bafec742SSukumar Swaminathan 		kmem_free(qlge->htable, qlge->intr_size);
4156bafec742SSukumar Swaminathan 		qlge->htable = NULL;
4157bafec742SSukumar Swaminathan 	}
4158bafec742SSukumar Swaminathan }
4159bafec742SSukumar Swaminathan 
4160bafec742SSukumar Swaminathan /*
4161bafec742SSukumar Swaminathan  * Allocate interrupt vectors
4162bafec742SSukumar Swaminathan  * For legacy and MSI, only 1 handle is needed.
4163bafec742SSukumar Swaminathan  * For MSI-X, if fewer than 2 vectors are available, return failure.
4164bafec742SSukumar Swaminathan  * Upon success, this maps the vectors to rx and tx rings for
4165bafec742SSukumar Swaminathan  * interrupts.
4166bafec742SSukumar Swaminathan  */
4167bafec742SSukumar Swaminathan static int
4168bafec742SSukumar Swaminathan ql_request_irq_vectors(qlge_t *qlge, int intr_type)
4169bafec742SSukumar Swaminathan {
4170bafec742SSukumar Swaminathan 	dev_info_t *devinfo;
4171bafec742SSukumar Swaminathan 	uint32_t request, orig;
4172bafec742SSukumar Swaminathan 	int count, avail, actual;
4173bafec742SSukumar Swaminathan 	int minimum;
4174bafec742SSukumar Swaminathan 	int rc;
4175bafec742SSukumar Swaminathan 
4176bafec742SSukumar Swaminathan 	devinfo = qlge->dip;
4177bafec742SSukumar Swaminathan 
4178bafec742SSukumar Swaminathan 	switch (intr_type) {
4179bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_FIXED:
4180bafec742SSukumar Swaminathan 		request = 1;	/* Request 1 legacy interrupt handle */
4181bafec742SSukumar Swaminathan 		minimum = 1;
4182bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: legacy\n"));
4183bafec742SSukumar Swaminathan 		break;
4184bafec742SSukumar Swaminathan 
4185bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSI:
4186bafec742SSukumar Swaminathan 		request = 1;	/* Request 1 MSI interrupt handle */
4187bafec742SSukumar Swaminathan 		minimum = 1;
4188bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: MSI\n"));
4189bafec742SSukumar Swaminathan 		break;
4190bafec742SSukumar Swaminathan 
4191bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSIX:
4192bafec742SSukumar Swaminathan 		/*
4193bafec742SSukumar Swaminathan 		 * Ideal number of vectors for the adapter is
4194bafec742SSukumar Swaminathan 		 * # rss rings + tx completion rings for default completion
4195bafec742SSukumar Swaminathan 		 * queue.
4196bafec742SSukumar Swaminathan 		 */
4197bafec742SSukumar Swaminathan 		request = qlge->rx_ring_count;
4198bafec742SSukumar Swaminathan 
4199bafec742SSukumar Swaminathan 		orig = request;
4200bafec742SSukumar Swaminathan 		if (request > (MAX_RX_RINGS))
4201bafec742SSukumar Swaminathan 			request = MAX_RX_RINGS;
4202bafec742SSukumar Swaminathan 		minimum = 2;
4203bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n"));
4204bafec742SSukumar Swaminathan 		break;
4205bafec742SSukumar Swaminathan 
4206bafec742SSukumar Swaminathan 	default:
4207bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Invalid parameter\n"));
4208bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
4209bafec742SSukumar Swaminathan 	}
4210bafec742SSukumar Swaminathan 
4211bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupt handles requested: %d  minimum: %d\n",
4212bafec742SSukumar Swaminathan 	    request, minimum));
4213bafec742SSukumar Swaminathan 
4214bafec742SSukumar Swaminathan 	/*
4215bafec742SSukumar Swaminathan 	 * Get number of supported interrupts
4216bafec742SSukumar Swaminathan 	 */
4217bafec742SSukumar Swaminathan 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4218bafec742SSukumar Swaminathan 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
4219bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, "
4220bafec742SSukumar Swaminathan 		    "count: %d\n", rc, count));
4221bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
4222bafec742SSukumar Swaminathan 	}
4223bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count));
4224bafec742SSukumar Swaminathan 
4225bafec742SSukumar Swaminathan 	/*
4226bafec742SSukumar Swaminathan 	 * Get number of available interrupts
4227bafec742SSukumar Swaminathan 	 */
4228bafec742SSukumar Swaminathan 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
4229bafec742SSukumar Swaminathan 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
4230bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT,
4231bafec742SSukumar Swaminathan 		    ("Get interrupt available number failed. Return:"
4232bafec742SSukumar Swaminathan 		    " %d, available: %d\n", rc, avail));
4233bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
4234bafec742SSukumar Swaminathan 	}
4235bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail));
4236bafec742SSukumar Swaminathan 
4237bafec742SSukumar Swaminathan 	if (avail < request) {
4238bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n",
4239bafec742SSukumar Swaminathan 		    request, avail));
4240bafec742SSukumar Swaminathan 		request = avail;
4241bafec742SSukumar Swaminathan 	}
4242bafec742SSukumar Swaminathan 
4243bafec742SSukumar Swaminathan 	actual = 0;
4244bafec742SSukumar Swaminathan 	qlge->intr_cnt = 0;
4245bafec742SSukumar Swaminathan 
4246bafec742SSukumar Swaminathan 	/*
4247bafec742SSukumar Swaminathan 	 * Allocate an array of interrupt handles
4248bafec742SSukumar Swaminathan 	 */
4249bafec742SSukumar Swaminathan 	qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t));
4250bafec742SSukumar Swaminathan 	qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP);
4251bafec742SSukumar Swaminathan 
4252bafec742SSukumar Swaminathan 	rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0,
4253bafec742SSukumar Swaminathan 	    (int)request, &actual, DDI_INTR_ALLOC_NORMAL);
4254bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
4255bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:"
4256bafec742SSukumar Swaminathan 		    " %d, request: %d, actual: %d",
4257bafec742SSukumar Swaminathan 		    __func__, qlge->instance, rc, request, actual);
4258bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
4259bafec742SSukumar Swaminathan 	}
4260bafec742SSukumar Swaminathan 	qlge->intr_cnt = actual;
4261bafec742SSukumar Swaminathan 
4262bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_INTR_ALLOC;
4263bafec742SSukumar Swaminathan 
4264bafec742SSukumar Swaminathan 	/*
4265bafec742SSukumar Swaminathan 	 * If the actual number of vectors is less than the minumum
4266bafec742SSukumar Swaminathan 	 * then fail.
4267bafec742SSukumar Swaminathan 	 */
4268bafec742SSukumar Swaminathan 	if (actual < minimum) {
4269bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
4270bafec742SSukumar Swaminathan 		    "Insufficient interrupt handles available: %d", actual);
4271bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
4272bafec742SSukumar Swaminathan 	}
4273bafec742SSukumar Swaminathan 
4274bafec742SSukumar Swaminathan 	/*
4275bafec742SSukumar Swaminathan 	 * For MSI-X, actual might force us to reduce number of tx & rx rings
4276bafec742SSukumar Swaminathan 	 */
4277bafec742SSukumar Swaminathan 	if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
4278accf27a5SSukumar Swaminathan 		if (actual >= (orig / 2)) {
4279accf27a5SSukumar Swaminathan 			count = orig / 2;
4280accf27a5SSukumar Swaminathan 			qlge->rss_ring_count = count;
4281accf27a5SSukumar Swaminathan 			qlge->tx_ring_count = count;
4282accf27a5SSukumar Swaminathan 			qlge->isr_stride = count;
4283accf27a5SSukumar Swaminathan 		} else if (actual >= (orig / 4)) {
4284accf27a5SSukumar Swaminathan 			count = orig / 4;
4285accf27a5SSukumar Swaminathan 			qlge->rss_ring_count = count;
4286accf27a5SSukumar Swaminathan 			qlge->tx_ring_count = count;
4287accf27a5SSukumar Swaminathan 			qlge->isr_stride = count;
4288accf27a5SSukumar Swaminathan 		} else if (actual >= (orig / 8)) {
4289accf27a5SSukumar Swaminathan 			count = orig / 8;
4290accf27a5SSukumar Swaminathan 			qlge->rss_ring_count = count;
4291accf27a5SSukumar Swaminathan 			qlge->tx_ring_count = count;
4292accf27a5SSukumar Swaminathan 			qlge->isr_stride = count;
4293accf27a5SSukumar Swaminathan 		} else if (actual < MAX_RX_RINGS) {
4294bafec742SSukumar Swaminathan 			qlge->tx_ring_count = 1;
4295bafec742SSukumar Swaminathan 			qlge->rss_ring_count = actual - 1;
4296accf27a5SSukumar Swaminathan 		}
4297accf27a5SSukumar Swaminathan 		qlge->intr_cnt = count;
4298bafec742SSukumar Swaminathan 		qlge->rx_ring_count = qlge->tx_ring_count +
4299bafec742SSukumar Swaminathan 		    qlge->rss_ring_count;
4300bafec742SSukumar Swaminathan 	}
4301accf27a5SSukumar Swaminathan 	cmn_err(CE_NOTE, "!qlge(%d) tx %d, rss %d, stride %d\n", qlge->instance,
4302accf27a5SSukumar Swaminathan 	    qlge->tx_ring_count, qlge->rss_ring_count, qlge->isr_stride);
4303accf27a5SSukumar Swaminathan 
4304bafec742SSukumar Swaminathan 	/*
4305bafec742SSukumar Swaminathan 	 * Get priority for first vector, assume remaining are all the same
4306bafec742SSukumar Swaminathan 	 */
4307bafec742SSukumar Swaminathan 	rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri);
4308bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
4309bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc));
4310bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
4311bafec742SSukumar Swaminathan 	}
4312bafec742SSukumar Swaminathan 
4313bafec742SSukumar Swaminathan 	rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap);
4314bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
4315bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc));
4316bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
4317bafec742SSukumar Swaminathan 	}
4318bafec742SSukumar Swaminathan 
4319bafec742SSukumar Swaminathan 	qlge->intr_type = intr_type;
4320bafec742SSukumar Swaminathan 
4321bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
4322bafec742SSukumar Swaminathan 
4323bafec742SSukumar Swaminathan ql_intr_alloc_fail:
4324bafec742SSukumar Swaminathan 	ql_free_irq_vectors(qlge);
4325bafec742SSukumar Swaminathan 
4326bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
4327bafec742SSukumar Swaminathan }
4328bafec742SSukumar Swaminathan 
4329bafec742SSukumar Swaminathan /*
4330bafec742SSukumar Swaminathan  * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
4331bafec742SSukumar Swaminathan  * MSI or Legacy. In MSI and Legacy modes we only support a single receive and
4332bafec742SSukumar Swaminathan  * transmit queue.
4333bafec742SSukumar Swaminathan  */
4334bafec742SSukumar Swaminathan int
4335bafec742SSukumar Swaminathan ql_alloc_irqs(qlge_t *qlge)
4336bafec742SSukumar Swaminathan {
4337bafec742SSukumar Swaminathan 	int intr_types;
4338bafec742SSukumar Swaminathan 	int rval;
4339bafec742SSukumar Swaminathan 
4340bafec742SSukumar Swaminathan 	/*
4341bafec742SSukumar Swaminathan 	 * Get supported interrupt types
4342bafec742SSukumar Swaminathan 	 */
4343bafec742SSukumar Swaminathan 	if (ddi_intr_get_supported_types(qlge->dip, &intr_types)
4344bafec742SSukumar Swaminathan 	    != DDI_SUCCESS) {
4345bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed",
4346bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
4347bafec742SSukumar Swaminathan 
4348bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
4349bafec742SSukumar Swaminathan 	}
4350bafec742SSukumar Swaminathan 
4351bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n",
4352bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr_types));
4353bafec742SSukumar Swaminathan 
4354bafec742SSukumar Swaminathan 	/* Install MSI-X interrupts */
4355bafec742SSukumar Swaminathan 	if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
4356bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n",
4357bafec742SSukumar Swaminathan 		    __func__, qlge->instance, intr_types));
4358bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX);
4359bafec742SSukumar Swaminathan 		if (rval == DDI_SUCCESS) {
4360bafec742SSukumar Swaminathan 			return (rval);
4361bafec742SSukumar Swaminathan 		}
4362bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed,"
4363bafec742SSukumar Swaminathan 		    " trying MSI interrupts ...\n", __func__, qlge->instance));
4364bafec742SSukumar Swaminathan 	}
4365bafec742SSukumar Swaminathan 
4366bafec742SSukumar Swaminathan 	/*
4367bafec742SSukumar Swaminathan 	 * We will have 2 completion queues in MSI / Legacy mode,
4368bafec742SSukumar Swaminathan 	 * Queue 0 for default completions
4369bafec742SSukumar Swaminathan 	 * Queue 1 for transmit completions
4370bafec742SSukumar Swaminathan 	 */
4371bafec742SSukumar Swaminathan 	qlge->rss_ring_count = 1; /* Default completion queue (0) for all */
4372bafec742SSukumar Swaminathan 	qlge->tx_ring_count = 1; /* Single tx completion queue */
4373bafec742SSukumar Swaminathan 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
4374bafec742SSukumar Swaminathan 
4375bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n",
4376bafec742SSukumar Swaminathan 	    __func__, qlge->instance));
4377bafec742SSukumar Swaminathan 	/*
4378bafec742SSukumar Swaminathan 	 * Add the h/w interrupt handler and initialise mutexes
4379bafec742SSukumar Swaminathan 	 */
4380bafec742SSukumar Swaminathan 	rval = DDI_FAILURE;
4381bafec742SSukumar Swaminathan 
4382bafec742SSukumar Swaminathan 	/*
4383bafec742SSukumar Swaminathan 	 * If OS supports MSIX interrupt but fails to allocate, then try
4384bafec742SSukumar Swaminathan 	 * MSI interrupt. If MSI interrupt allocation fails also, then roll
4385bafec742SSukumar Swaminathan 	 * back to fixed interrupt.
4386bafec742SSukumar Swaminathan 	 */
4387bafec742SSukumar Swaminathan 	if (intr_types & DDI_INTR_TYPE_MSI) {
4388bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI);
4389bafec742SSukumar Swaminathan 		if (rval == DDI_SUCCESS) {
4390bafec742SSukumar Swaminathan 			qlge->intr_type = DDI_INTR_TYPE_MSI;
4391bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n",
4392bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
4393bafec742SSukumar Swaminathan 		}
4394bafec742SSukumar Swaminathan 	}
4395bafec742SSukumar Swaminathan 
4396bafec742SSukumar Swaminathan 	/* Try Fixed interrupt Legacy mode */
4397bafec742SSukumar Swaminathan 	if (rval != DDI_SUCCESS) {
4398bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED);
4399bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS) {
4400bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt "
4401bafec742SSukumar Swaminathan 			    "allocation failed",
4402bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
4403bafec742SSukumar Swaminathan 		} else {
4404bafec742SSukumar Swaminathan 			qlge->intr_type = DDI_INTR_TYPE_FIXED;
4405bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n",
4406bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
4407bafec742SSukumar Swaminathan 		}
4408bafec742SSukumar Swaminathan 	}
4409bafec742SSukumar Swaminathan 
4410bafec742SSukumar Swaminathan 	return (rval);
4411bafec742SSukumar Swaminathan }
4412bafec742SSukumar Swaminathan 
4413bafec742SSukumar Swaminathan static void
4414bafec742SSukumar Swaminathan ql_free_rx_tx_locks(qlge_t *qlge)
4415bafec742SSukumar Swaminathan {
4416bafec742SSukumar Swaminathan 	int i;
4417bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
4418bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
4419bafec742SSukumar Swaminathan 
4420bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
4421bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
4422bafec742SSukumar Swaminathan 		mutex_destroy(&tx_ring->tx_lock);
4423bafec742SSukumar Swaminathan 	}
4424bafec742SSukumar Swaminathan 
4425bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
4426bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
4427bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->rx_lock);
4428bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->sbq_lock);
4429bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->lbq_lock);
4430bafec742SSukumar Swaminathan 	}
4431bafec742SSukumar Swaminathan }
4432bafec742SSukumar Swaminathan 
4433bafec742SSukumar Swaminathan /*
4434bafec742SSukumar Swaminathan  * Frees all resources allocated during attach.
4435bafec742SSukumar Swaminathan  *
4436bafec742SSukumar Swaminathan  * Input:
4437bafec742SSukumar Swaminathan  * dip = pointer to device information structure.
4438bafec742SSukumar Swaminathan  * sequence = bits indicating resources to free.
4439bafec742SSukumar Swaminathan  *
4440bafec742SSukumar Swaminathan  * Context:
4441bafec742SSukumar Swaminathan  * Kernel context.
4442bafec742SSukumar Swaminathan  */
4443bafec742SSukumar Swaminathan static void
4444accf27a5SSukumar Swaminathan ql_free_resources(qlge_t *qlge)
4445bafec742SSukumar Swaminathan {
4446bafec742SSukumar Swaminathan 
4447bafec742SSukumar Swaminathan 	/* Disable driver timer */
4448bafec742SSukumar Swaminathan 	ql_stop_timer(qlge);
4449bafec742SSukumar Swaminathan 
4450bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MAC_REGISTERED) {
44510662fbf4SSukumar Swaminathan 		(void) mac_unregister(qlge->mh);
4452bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MAC_REGISTERED;
4453bafec742SSukumar Swaminathan 	}
4454bafec742SSukumar Swaminathan 
4455bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MAC_ALLOC) {
4456bafec742SSukumar Swaminathan 		/* Nothing to do, macp is already freed */
4457bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MAC_ALLOC;
4458bafec742SSukumar Swaminathan 	}
4459bafec742SSukumar Swaminathan 
4460bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_PCI_CONFIG_SETUP) {
4461bafec742SSukumar Swaminathan 		pci_config_teardown(&qlge->pci_handle);
4462bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_PCI_CONFIG_SETUP;
4463bafec742SSukumar Swaminathan 	}
4464bafec742SSukumar Swaminathan 
4465bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_ADD_INTERRUPT) {
4466bafec742SSukumar Swaminathan 		ql_free_irq_vectors(qlge);
4467bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4468bafec742SSukumar Swaminathan 	}
4469bafec742SSukumar Swaminathan 
4470bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) {
4471bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl);
4472bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl);
4473bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl);
4474bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT;
4475bafec742SSukumar Swaminathan 	}
4476bafec742SSukumar Swaminathan 
4477bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_KSTATS) {
4478bafec742SSukumar Swaminathan 		ql_fini_kstats(qlge);
4479bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_KSTATS;
4480bafec742SSukumar Swaminathan 	}
4481bafec742SSukumar Swaminathan 
4482bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MUTEX) {
4483bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->gen_mutex);
4484bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->hw_mutex);
4485bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->mbx_mutex);
4486bafec742SSukumar Swaminathan 		cv_destroy(&qlge->cv_mbx_intr);
4487bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MUTEX;
4488bafec742SSukumar Swaminathan 	}
4489bafec742SSukumar Swaminathan 
4490bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_LOCKS_CREATED) {
4491bafec742SSukumar Swaminathan 		ql_free_rx_tx_locks(qlge);
4492bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_LOCKS_CREATED;
4493bafec742SSukumar Swaminathan 	}
4494bafec742SSukumar Swaminathan 
4495bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MEMORY_ALLOC) {
4496bafec742SSukumar Swaminathan 		ql_free_mem_resources(qlge);
4497bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MEMORY_ALLOC;
4498bafec742SSukumar Swaminathan 	}
4499bafec742SSukumar Swaminathan 
4500bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_REGS_SETUP) {
4501bafec742SSukumar Swaminathan 		ddi_regs_map_free(&qlge->dev_handle);
4502bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_REGS_SETUP;
4503bafec742SSukumar Swaminathan 	}
4504bafec742SSukumar Swaminathan 
4505bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) {
4506bafec742SSukumar Swaminathan 		ddi_regs_map_free(&qlge->dev_doorbell_reg_handle);
4507bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP;
4508bafec742SSukumar Swaminathan 	}
4509bafec742SSukumar Swaminathan 
4510bafec742SSukumar Swaminathan 	/*
4511bafec742SSukumar Swaminathan 	 * free flash flt table that allocated in attach stage
4512bafec742SSukumar Swaminathan 	 */
4513bafec742SSukumar Swaminathan 	if ((qlge->flt.ql_flt_entry_ptr != NULL)&&
4514bafec742SSukumar Swaminathan 	    (qlge->flt.header.length != 0)) {
4515bafec742SSukumar Swaminathan 		kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
4516bafec742SSukumar Swaminathan 		qlge->flt.ql_flt_entry_ptr = NULL;
4517bafec742SSukumar Swaminathan 	}
4518bafec742SSukumar Swaminathan 
4519accf27a5SSukumar Swaminathan 	if (qlge->sequence & INIT_FM) {
4520accf27a5SSukumar Swaminathan 		ql_fm_fini(qlge);
4521accf27a5SSukumar Swaminathan 		qlge->sequence &= ~INIT_FM;
4522accf27a5SSukumar Swaminathan 	}
4523accf27a5SSukumar Swaminathan 
4524accf27a5SSukumar Swaminathan 	ddi_prop_remove_all(qlge->dip);
4525accf27a5SSukumar Swaminathan 	ddi_set_driver_private(qlge->dip, NULL);
4526accf27a5SSukumar Swaminathan 
4527bafec742SSukumar Swaminathan 	/* finally, free qlge structure */
4528bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_SOFTSTATE_ALLOC) {
4529bafec742SSukumar Swaminathan 		kmem_free(qlge, sizeof (qlge_t));
4530bafec742SSukumar Swaminathan 	}
4531bafec742SSukumar Swaminathan }
4532bafec742SSukumar Swaminathan 
4533bafec742SSukumar Swaminathan /*
4534bafec742SSukumar Swaminathan  * Set promiscuous mode of the driver
4535bafec742SSukumar Swaminathan  * Caller must catch HW_LOCK
4536bafec742SSukumar Swaminathan  */
4537bafec742SSukumar Swaminathan void
4538bafec742SSukumar Swaminathan ql_set_promiscuous(qlge_t *qlge, int mode)
4539bafec742SSukumar Swaminathan {
4540bafec742SSukumar Swaminathan 	if (mode) {
45410662fbf4SSukumar Swaminathan 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4542bafec742SSukumar Swaminathan 		    RT_IDX_VALID, 1);
4543bafec742SSukumar Swaminathan 	} else {
45440662fbf4SSukumar Swaminathan 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4545bafec742SSukumar Swaminathan 		    RT_IDX_VALID, 0);
4546bafec742SSukumar Swaminathan 	}
4547bafec742SSukumar Swaminathan }
4548bafec742SSukumar Swaminathan /*
4549bafec742SSukumar Swaminathan  * Write 'data1' to Mac Protocol Address Index Register and
4550bafec742SSukumar Swaminathan  * 'data2' to Mac Protocol Address Data Register
4551bafec742SSukumar Swaminathan  *  Assuming that the Mac Protocol semaphore lock has been acquired.
4552bafec742SSukumar Swaminathan  */
4553bafec742SSukumar Swaminathan static int
4554bafec742SSukumar Swaminathan ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2)
4555bafec742SSukumar Swaminathan {
4556bafec742SSukumar Swaminathan 	int return_value = DDI_SUCCESS;
4557bafec742SSukumar Swaminathan 
4558bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
4559bafec742SSukumar Swaminathan 	    MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) {
4560bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register "
4561bafec742SSukumar Swaminathan 		    "timeout.");
4562bafec742SSukumar Swaminathan 		return_value = DDI_FAILURE;
4563bafec742SSukumar Swaminathan 		goto out;
4564bafec742SSukumar Swaminathan 	}
4565bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1);
4566bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2);
4567bafec742SSukumar Swaminathan out:
4568bafec742SSukumar Swaminathan 	return (return_value);
4569bafec742SSukumar Swaminathan }
4570bafec742SSukumar Swaminathan /*
4571bafec742SSukumar Swaminathan  * Enable the 'index'ed multicast address in the host memory's multicast_list
4572bafec742SSukumar Swaminathan  */
4573bafec742SSukumar Swaminathan int
4574bafec742SSukumar Swaminathan ql_add_multicast_address(qlge_t *qlge, int index)
4575bafec742SSukumar Swaminathan {
4576bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4577bafec742SSukumar Swaminathan 	uint32_t offset;
4578bafec742SSukumar Swaminathan 	uint32_t value1, value2;
4579bafec742SSukumar Swaminathan 
4580bafec742SSukumar Swaminathan 	/* Acquire the required semaphore */
4581bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4582bafec742SSukumar Swaminathan 		return (rtn_val);
4583bafec742SSukumar Swaminathan 	}
4584bafec742SSukumar Swaminathan 
4585bafec742SSukumar Swaminathan 	/* Program Offset0 - lower 32 bits of the MAC address */
4586bafec742SSukumar Swaminathan 	offset = 0;
4587bafec742SSukumar Swaminathan 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4588bafec742SSukumar Swaminathan 	    (index << 4) | offset;
4589bafec742SSukumar Swaminathan 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4590bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4591bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4592bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4593bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS)
4594bafec742SSukumar Swaminathan 		goto out;
4595bafec742SSukumar Swaminathan 
4596bafec742SSukumar Swaminathan 	/* Program offset1: upper 16 bits of the MAC address */
4597bafec742SSukumar Swaminathan 	offset = 1;
4598bafec742SSukumar Swaminathan 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4599bafec742SSukumar Swaminathan 	    (index<<4) | offset;
4600bafec742SSukumar Swaminathan 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8)
4601bafec742SSukumar Swaminathan 	    |qlge->multicast_list[index].addr.ether_addr_octet[1]);
4602bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4603bafec742SSukumar Swaminathan 		goto out;
4604bafec742SSukumar Swaminathan 	}
4605bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4606bafec742SSukumar Swaminathan out:
4607bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4608bafec742SSukumar Swaminathan 	return (rtn_val);
4609bafec742SSukumar Swaminathan }
4610bafec742SSukumar Swaminathan 
4611bafec742SSukumar Swaminathan /*
4612bafec742SSukumar Swaminathan  * Disable the 'index'ed multicast address in the host memory's multicast_list
4613bafec742SSukumar Swaminathan  */
4614bafec742SSukumar Swaminathan int
4615bafec742SSukumar Swaminathan ql_remove_multicast_address(qlge_t *qlge, int index)
4616bafec742SSukumar Swaminathan {
4617bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4618bafec742SSukumar Swaminathan 	uint32_t offset;
4619bafec742SSukumar Swaminathan 	uint32_t value1, value2;
4620bafec742SSukumar Swaminathan 
4621bafec742SSukumar Swaminathan 	/* Acquire the required semaphore */
4622bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4623bafec742SSukumar Swaminathan 		return (rtn_val);
4624bafec742SSukumar Swaminathan 	}
4625bafec742SSukumar Swaminathan 	/* Program Offset0 - lower 32 bits of the MAC address */
4626bafec742SSukumar Swaminathan 	offset = 0;
4627bafec742SSukumar Swaminathan 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4628bafec742SSukumar Swaminathan 	value2 =
4629bafec742SSukumar Swaminathan 	    ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4630bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4631bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4632bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4633bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4634bafec742SSukumar Swaminathan 		goto out;
4635bafec742SSukumar Swaminathan 	}
4636bafec742SSukumar Swaminathan 	/* Program offset1: upper 16 bits of the MAC address */
4637bafec742SSukumar Swaminathan 	offset = 1;
4638bafec742SSukumar Swaminathan 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4639bafec742SSukumar Swaminathan 	value2 = 0;
4640bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4641bafec742SSukumar Swaminathan 		goto out;
4642bafec742SSukumar Swaminathan 	}
4643bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4644bafec742SSukumar Swaminathan out:
4645bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4646bafec742SSukumar Swaminathan 	return (rtn_val);
4647bafec742SSukumar Swaminathan }
4648bafec742SSukumar Swaminathan 
4649bafec742SSukumar Swaminathan /*
4650bafec742SSukumar Swaminathan  * Add a new multicast address to the list of supported list
4651bafec742SSukumar Swaminathan  * This API is called after OS called gld_set_multicast (GLDv2)
4652bafec742SSukumar Swaminathan  * or m_multicst (GLDv3)
4653bafec742SSukumar Swaminathan  *
4654bafec742SSukumar Swaminathan  * Restriction:
4655bafec742SSukumar Swaminathan  * The number of maximum multicast address is limited by hardware.
4656bafec742SSukumar Swaminathan  */
4657bafec742SSukumar Swaminathan int
4658bafec742SSukumar Swaminathan ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep)
4659bafec742SSukumar Swaminathan {
4660bafec742SSukumar Swaminathan 	uint32_t index = qlge->multicast_list_count;
4661bafec742SSukumar Swaminathan 	int rval = DDI_SUCCESS;
4662bafec742SSukumar Swaminathan 	int status;
4663bafec742SSukumar Swaminathan 
4664bafec742SSukumar Swaminathan 	if ((ep[0] & 01) == 0) {
4665bafec742SSukumar Swaminathan 		rval = EINVAL;
4666bafec742SSukumar Swaminathan 		goto exit;
4667bafec742SSukumar Swaminathan 	}
4668bafec742SSukumar Swaminathan 
4669bafec742SSukumar Swaminathan 	/* if there is an availabe space in multicast_list, then add it */
4670bafec742SSukumar Swaminathan 	if (index < MAX_MULTICAST_LIST_SIZE) {
4671bafec742SSukumar Swaminathan 		bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet,
4672bafec742SSukumar Swaminathan 		    ETHERADDRL);
4673bafec742SSukumar Swaminathan 		/* increment the total number of addresses in multicast list */
46740662fbf4SSukumar Swaminathan 		(void) ql_add_multicast_address(qlge, index);
4675bafec742SSukumar Swaminathan 		qlge->multicast_list_count++;
4676bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD,
4677bafec742SSukumar Swaminathan 		    ("%s(%d): added to index of multicast list= 0x%x, "
4678bafec742SSukumar Swaminathan 		    "total %d\n", __func__, qlge->instance, index,
4679bafec742SSukumar Swaminathan 		    qlge->multicast_list_count));
4680bafec742SSukumar Swaminathan 
4681bafec742SSukumar Swaminathan 		if (index > MAX_MULTICAST_HW_SIZE) {
4682bafec742SSukumar Swaminathan 			if (!qlge->multicast_promisc) {
4683bafec742SSukumar Swaminathan 				status = ql_set_routing_reg(qlge,
4684bafec742SSukumar Swaminathan 				    RT_IDX_ALLMULTI_SLOT,
4685bafec742SSukumar Swaminathan 				    RT_IDX_MCAST, 1);
4686bafec742SSukumar Swaminathan 				if (status) {
4687bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
4688bafec742SSukumar Swaminathan 					    "Failed to init routing reg "
4689bafec742SSukumar Swaminathan 					    "for mcast promisc mode.");
4690bafec742SSukumar Swaminathan 					rval = ENOENT;
4691bafec742SSukumar Swaminathan 					goto exit;
4692bafec742SSukumar Swaminathan 				}
4693bafec742SSukumar Swaminathan 				qlge->multicast_promisc = B_TRUE;
4694bafec742SSukumar Swaminathan 			}
4695bafec742SSukumar Swaminathan 		}
4696bafec742SSukumar Swaminathan 	} else {
4697bafec742SSukumar Swaminathan 		rval = ENOENT;
4698bafec742SSukumar Swaminathan 	}
4699bafec742SSukumar Swaminathan exit:
4700bafec742SSukumar Swaminathan 	return (rval);
4701bafec742SSukumar Swaminathan }
4702bafec742SSukumar Swaminathan 
4703bafec742SSukumar Swaminathan /*
4704bafec742SSukumar Swaminathan  * Remove an old multicast address from the list of supported multicast
4705bafec742SSukumar Swaminathan  * addresses. This API is called after OS called gld_set_multicast (GLDv2)
4706bafec742SSukumar Swaminathan  * or m_multicst (GLDv3)
4707bafec742SSukumar Swaminathan  * The number of maximum multicast address is limited by hardware.
4708bafec742SSukumar Swaminathan  */
4709bafec742SSukumar Swaminathan int
4710bafec742SSukumar Swaminathan ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep)
4711bafec742SSukumar Swaminathan {
4712bafec742SSukumar Swaminathan 	uint32_t total = qlge->multicast_list_count;
4713bafec742SSukumar Swaminathan 	int i = 0;
4714bafec742SSukumar Swaminathan 	int rmv_index = 0;
4715bafec742SSukumar Swaminathan 	size_t length = sizeof (ql_multicast_addr);
4716bafec742SSukumar Swaminathan 	int status;
4717bafec742SSukumar Swaminathan 
4718bafec742SSukumar Swaminathan 	for (i = 0; i < total; i++) {
4719bafec742SSukumar Swaminathan 		if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) {
4720bafec742SSukumar Swaminathan 			continue;
4721bafec742SSukumar Swaminathan 		}
4722bafec742SSukumar Swaminathan 
4723bafec742SSukumar Swaminathan 		rmv_index = i;
4724bafec742SSukumar Swaminathan 		/* block move the reset of other multicast address forward */
4725bafec742SSukumar Swaminathan 		length = ((total -1) -i) * sizeof (ql_multicast_addr);
4726bafec742SSukumar Swaminathan 		if (length > 0) {
4727bafec742SSukumar Swaminathan 			bcopy(&qlge->multicast_list[i+1],
4728bafec742SSukumar Swaminathan 			    &qlge->multicast_list[i], length);
4729bafec742SSukumar Swaminathan 		}
4730bafec742SSukumar Swaminathan 		qlge->multicast_list_count--;
4731bafec742SSukumar Swaminathan 		if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) {
4732bafec742SSukumar Swaminathan 			/*
4733bafec742SSukumar Swaminathan 			 * there is a deletion in multicast list table,
4734bafec742SSukumar Swaminathan 			 * re-enable them
4735bafec742SSukumar Swaminathan 			 */
4736bafec742SSukumar Swaminathan 			for (i = rmv_index; i < qlge->multicast_list_count;
4737bafec742SSukumar Swaminathan 			    i++) {
47380662fbf4SSukumar Swaminathan 				(void) ql_add_multicast_address(qlge, i);
4739bafec742SSukumar Swaminathan 			}
4740bafec742SSukumar Swaminathan 			/* and disable the last one */
47410662fbf4SSukumar Swaminathan 			(void) ql_remove_multicast_address(qlge, i);
4742bafec742SSukumar Swaminathan 
4743bafec742SSukumar Swaminathan 			/* disable multicast promiscuous mode */
4744bafec742SSukumar Swaminathan 			if (qlge->multicast_promisc) {
4745bafec742SSukumar Swaminathan 				status = ql_set_routing_reg(qlge,
4746bafec742SSukumar Swaminathan 				    RT_IDX_ALLMULTI_SLOT,
4747bafec742SSukumar Swaminathan 				    RT_IDX_MCAST, 0);
4748bafec742SSukumar Swaminathan 				if (status) {
4749bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
4750bafec742SSukumar Swaminathan 					    "Failed to init routing reg for "
4751bafec742SSukumar Swaminathan 					    "mcast promisc mode.");
4752bafec742SSukumar Swaminathan 					goto exit;
4753bafec742SSukumar Swaminathan 				}
4754bafec742SSukumar Swaminathan 				/* write to config register */
4755bafec742SSukumar Swaminathan 				qlge->multicast_promisc = B_FALSE;
4756bafec742SSukumar Swaminathan 			}
4757bafec742SSukumar Swaminathan 		}
4758bafec742SSukumar Swaminathan 		break;
4759bafec742SSukumar Swaminathan 	}
4760bafec742SSukumar Swaminathan exit:
4761bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
4762bafec742SSukumar Swaminathan }
4763bafec742SSukumar Swaminathan 
4764bafec742SSukumar Swaminathan /*
4765bafec742SSukumar Swaminathan  * Read a XGMAC register
4766bafec742SSukumar Swaminathan  */
4767bafec742SSukumar Swaminathan int
4768bafec742SSukumar Swaminathan ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val)
4769bafec742SSukumar Swaminathan {
4770bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4771bafec742SSukumar Swaminathan 
4772bafec742SSukumar Swaminathan 	/* wait for XGMAC Address register RDY bit set */
4773bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4774bafec742SSukumar Swaminathan 	    BIT_SET, 10) != DDI_SUCCESS) {
4775bafec742SSukumar Swaminathan 		goto out;
4776bafec742SSukumar Swaminathan 	}
4777bafec742SSukumar Swaminathan 	/* start rx transaction */
4778bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT);
4779bafec742SSukumar Swaminathan 
4780bafec742SSukumar Swaminathan 	/*
4781bafec742SSukumar Swaminathan 	 * wait for XGMAC Address register RDY bit set,
4782bafec742SSukumar Swaminathan 	 * which indicates data is ready
4783bafec742SSukumar Swaminathan 	 */
4784bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4785bafec742SSukumar Swaminathan 	    BIT_SET, 10) != DDI_SUCCESS) {
4786bafec742SSukumar Swaminathan 		goto out;
4787bafec742SSukumar Swaminathan 	}
4788bafec742SSukumar Swaminathan 	/* read data from XGAMC_DATA register */
4789bafec742SSukumar Swaminathan 	*val = ql_read_reg(qlge, REG_XGMAC_DATA);
4790bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4791bafec742SSukumar Swaminathan out:
4792bafec742SSukumar Swaminathan 	return (rtn_val);
4793bafec742SSukumar Swaminathan }
4794bafec742SSukumar Swaminathan 
4795bafec742SSukumar Swaminathan /*
4796bafec742SSukumar Swaminathan  * Implement checksum offload for IPv4 IP packets
4797bafec742SSukumar Swaminathan  */
4798bafec742SSukumar Swaminathan static void
4799bafec742SSukumar Swaminathan ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp,
4800bafec742SSukumar Swaminathan     struct ob_mac_iocb_req *mac_iocb_ptr)
4801bafec742SSukumar Swaminathan {
4802bafec742SSukumar Swaminathan 	struct ip *iphdr = NULL;
4803bafec742SSukumar Swaminathan 	struct ether_header *ethhdr;
4804bafec742SSukumar Swaminathan 	struct ether_vlan_header *ethvhdr;
4805bafec742SSukumar Swaminathan 	struct tcphdr *tcp_hdr;
4806bafec742SSukumar Swaminathan 	uint32_t etherType;
4807bafec742SSukumar Swaminathan 	int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4808bafec742SSukumar Swaminathan 	int ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4809bafec742SSukumar Swaminathan 
4810bafec742SSukumar Swaminathan 	ethhdr  = (struct ether_header *)((void *)bp);
4811bafec742SSukumar Swaminathan 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
4812bafec742SSukumar Swaminathan 	/* Is this vlan packet? */
4813bafec742SSukumar Swaminathan 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4814bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_vlan_header);
4815bafec742SSukumar Swaminathan 		etherType = ntohs(ethvhdr->ether_type);
4816bafec742SSukumar Swaminathan 	} else {
4817bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_header);
4818bafec742SSukumar Swaminathan 		etherType = ntohs(ethhdr->ether_type);
4819bafec742SSukumar Swaminathan 	}
4820bafec742SSukumar Swaminathan 	/* Is this IPv4 or IPv6 packet? */
4821bafec742SSukumar Swaminathan 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) ==
4822bafec742SSukumar Swaminathan 	    IPV4_VERSION) {
4823bafec742SSukumar Swaminathan 		if (etherType == ETHERTYPE_IP /* 0800 */) {
4824bafec742SSukumar Swaminathan 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
4825bafec742SSukumar Swaminathan 		} else {
4826bafec742SSukumar Swaminathan 			/* EMPTY */
4827bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
4828bafec742SSukumar Swaminathan 			    ("%s(%d) : IPv4 None IP packet type 0x%x\n",
4829bafec742SSukumar Swaminathan 			    __func__, qlge->instance, etherType));
4830bafec742SSukumar Swaminathan 		}
4831bafec742SSukumar Swaminathan 	}
4832bafec742SSukumar Swaminathan 	/* ipV4 packets */
4833bafec742SSukumar Swaminathan 	if (iphdr != NULL) {
4834bafec742SSukumar Swaminathan 
4835bafec742SSukumar Swaminathan 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
4836bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX,
4837bafec742SSukumar Swaminathan 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
4838bafec742SSukumar Swaminathan 		    " %d bytes \n", __func__, qlge->instance, ip_hdr_len));
4839bafec742SSukumar Swaminathan 
4840bafec742SSukumar Swaminathan 		ip_hdr_off = mac_hdr_len;
4841bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4842bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ip_hdr_len));
4843bafec742SSukumar Swaminathan 
4844bafec742SSukumar Swaminathan 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4845bafec742SSukumar Swaminathan 		    OB_MAC_IOCB_REQ_IPv4);
4846bafec742SSukumar Swaminathan 
4847bafec742SSukumar Swaminathan 		if (pflags & HCK_IPV4_HDRCKSUM) {
4848bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n",
4849bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
4850bafec742SSukumar Swaminathan 			mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB;
4851bafec742SSukumar Swaminathan 			mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 |
4852bafec742SSukumar Swaminathan 			    OB_MAC_IOCB_REQ_IC);
4853bafec742SSukumar Swaminathan 			iphdr->ip_sum = 0;
4854bafec742SSukumar Swaminathan 			mac_iocb_ptr->hdr_off = (uint16_t)
4855bafec742SSukumar Swaminathan 			    cpu_to_le16(ip_hdr_off);
4856bafec742SSukumar Swaminathan 		}
4857bafec742SSukumar Swaminathan 		if (pflags & HCK_FULLCKSUM) {
4858bafec742SSukumar Swaminathan 			if (iphdr->ip_p == IPPROTO_TCP) {
4859bafec742SSukumar Swaminathan 				tcp_hdr =
4860bafec742SSukumar Swaminathan 				    (struct tcphdr *)(void *)
4861bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr + ip_hdr_len);
4862bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n",
4863bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4864bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4865bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4866bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4867bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4868bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_TC);
4869bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag2 =
4870bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag2 |
4871bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_IC);
4872bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4873bafec742SSukumar Swaminathan 				tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len;
4874bafec742SSukumar Swaminathan 				tcp_udp_hdr_len = tcp_hdr->th_off*4;
4875bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4876bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4877bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4878bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4879bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4880bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4881bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4882bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4883bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4884bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
4885bafec742SSukumar Swaminathan 
4886bafec742SSukumar Swaminathan 				/*
4887bafec742SSukumar Swaminathan 				 * if the chip is unable to do pseudo header
4888bafec742SSukumar Swaminathan 				 * cksum calculation, do it in then put the
4889bafec742SSukumar Swaminathan 				 * result to the data passed to the chip
4890bafec742SSukumar Swaminathan 				 */
4891bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4892bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4893bafec742SSukumar Swaminathan 					ql_pseudo_cksum((uint8_t *)iphdr);
4894bafec742SSukumar Swaminathan 				}
4895bafec742SSukumar Swaminathan 			} else if (iphdr->ip_p == IPPROTO_UDP) {
4896bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n",
4897bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4898bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4899bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4900bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4901bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4902bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_UC);
4903bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag2 =
4904bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag2 |
4905bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_IC);
4906bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4907bafec742SSukumar Swaminathan 				tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len;
4908bafec742SSukumar Swaminathan 				tcp_udp_hdr_len = sizeof (struct udphdr);
4909bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4910bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4911bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4912bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4913bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4914bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4915bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4916bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4917bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len
4918bafec742SSukumar Swaminathan 				    + tcp_udp_hdr_len);
4919bafec742SSukumar Swaminathan 
4920bafec742SSukumar Swaminathan 				/*
4921bafec742SSukumar Swaminathan 				 * if the chip is unable to calculate pseudo
4922bafec742SSukumar Swaminathan 				 * hdr cksum,do it in then put the result to
4923bafec742SSukumar Swaminathan 				 * the data passed to the chip
4924bafec742SSukumar Swaminathan 				 */
4925bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4926bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4927bafec742SSukumar Swaminathan 					ql_pseudo_cksum((uint8_t *)iphdr);
4928bafec742SSukumar Swaminathan 				}
4929bafec742SSukumar Swaminathan 			}
4930bafec742SSukumar Swaminathan 		}
4931bafec742SSukumar Swaminathan 	}
4932bafec742SSukumar Swaminathan }
4933bafec742SSukumar Swaminathan 
4934bafec742SSukumar Swaminathan /*
4935bafec742SSukumar Swaminathan  * For TSO/LSO:
4936bafec742SSukumar Swaminathan  * MAC frame transmission with TCP large segment offload is performed in the
4937bafec742SSukumar Swaminathan  * same way as the MAC frame transmission with checksum offload with the
4938bafec742SSukumar Swaminathan  * exception that the maximum TCP segment size (MSS) must be specified to
4939bafec742SSukumar Swaminathan  * allow the chip to segment the data into legal sized frames.
4940bafec742SSukumar Swaminathan  * The host also needs to calculate a pseudo-header checksum over the
4941bafec742SSukumar Swaminathan  * following fields:
4942bafec742SSukumar Swaminathan  * Source IP Address, Destination IP Address, and the Protocol.
4943bafec742SSukumar Swaminathan  * The TCP length is not included in the pseudo-header calculation.
4944bafec742SSukumar Swaminathan  * The pseudo-header checksum is place in the TCP checksum field of the
4945bafec742SSukumar Swaminathan  * prototype header.
4946bafec742SSukumar Swaminathan  */
4947bafec742SSukumar Swaminathan static void
4948bafec742SSukumar Swaminathan ql_lso_pseudo_cksum(uint8_t *buf)
4949bafec742SSukumar Swaminathan {
4950bafec742SSukumar Swaminathan 	uint32_t cksum;
4951bafec742SSukumar Swaminathan 	uint16_t iphl;
4952bafec742SSukumar Swaminathan 	uint16_t proto;
4953bafec742SSukumar Swaminathan 
4954bafec742SSukumar Swaminathan 	/*
4955bafec742SSukumar Swaminathan 	 * Calculate the LSO pseudo-header checksum.
4956bafec742SSukumar Swaminathan 	 */
4957bafec742SSukumar Swaminathan 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
4958bafec742SSukumar Swaminathan 	cksum = proto = buf[9];
4959bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
4960bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
4961bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
4962bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
4963bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4964bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4965bafec742SSukumar Swaminathan 
4966bafec742SSukumar Swaminathan 	/*
4967bafec742SSukumar Swaminathan 	 * Point it to the TCP/UDP header, and
4968bafec742SSukumar Swaminathan 	 * update the checksum field.
4969bafec742SSukumar Swaminathan 	 */
4970bafec742SSukumar Swaminathan 	buf += iphl + ((proto == IPPROTO_TCP) ?
4971bafec742SSukumar Swaminathan 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
4972bafec742SSukumar Swaminathan 
4973bafec742SSukumar Swaminathan 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
4974bafec742SSukumar Swaminathan }
4975bafec742SSukumar Swaminathan 
4976bafec742SSukumar Swaminathan /*
4977accf27a5SSukumar Swaminathan  * For IPv4 IP packets, distribute the tx packets evenly among tx rings
4978accf27a5SSukumar Swaminathan  */
4979accf27a5SSukumar Swaminathan typedef	uint32_t	ub4; /* unsigned 4-byte quantities */
4980accf27a5SSukumar Swaminathan typedef	uint8_t		ub1;
4981accf27a5SSukumar Swaminathan 
4982accf27a5SSukumar Swaminathan #define	hashsize(n)	((ub4)1<<(n))
4983accf27a5SSukumar Swaminathan #define	hashmask(n)	(hashsize(n)-1)
4984accf27a5SSukumar Swaminathan 
4985accf27a5SSukumar Swaminathan #define	mix(a, b, c) \
4986accf27a5SSukumar Swaminathan { \
4987accf27a5SSukumar Swaminathan 	a -= b; a -= c; a ^= (c>>13); \
4988accf27a5SSukumar Swaminathan 	b -= c; b -= a; b ^= (a<<8); \
4989accf27a5SSukumar Swaminathan 	c -= a; c -= b; c ^= (b>>13); \
4990accf27a5SSukumar Swaminathan 	a -= b; a -= c; a ^= (c>>12);  \
4991accf27a5SSukumar Swaminathan 	b -= c; b -= a; b ^= (a<<16); \
4992accf27a5SSukumar Swaminathan 	c -= a; c -= b; c ^= (b>>5); \
4993accf27a5SSukumar Swaminathan 	a -= b; a -= c; a ^= (c>>3);  \
4994accf27a5SSukumar Swaminathan 	b -= c; b -= a; b ^= (a<<10); \
4995accf27a5SSukumar Swaminathan 	c -= a; c -= b; c ^= (b>>15); \
4996accf27a5SSukumar Swaminathan }
4997accf27a5SSukumar Swaminathan 
4998accf27a5SSukumar Swaminathan ub4
4999accf27a5SSukumar Swaminathan hash(k, length, initval)
5000accf27a5SSukumar Swaminathan register ub1 *k;	/* the key */
5001accf27a5SSukumar Swaminathan register ub4 length;	/* the length of the key */
5002accf27a5SSukumar Swaminathan register ub4 initval;	/* the previous hash, or an arbitrary value */
5003accf27a5SSukumar Swaminathan {
5004accf27a5SSukumar Swaminathan 	register ub4 a, b, c, len;
5005accf27a5SSukumar Swaminathan 
5006accf27a5SSukumar Swaminathan 	/* Set up the internal state */
5007accf27a5SSukumar Swaminathan 	len = length;
5008accf27a5SSukumar Swaminathan 	a = b = 0x9e3779b9;	/* the golden ratio; an arbitrary value */
5009accf27a5SSukumar Swaminathan 	c = initval;		/* the previous hash value */
5010accf27a5SSukumar Swaminathan 
5011accf27a5SSukumar Swaminathan 	/* handle most of the key */
5012accf27a5SSukumar Swaminathan 	while (len >= 12) {
5013accf27a5SSukumar Swaminathan 		a += (k[0] +((ub4)k[1]<<8) +((ub4)k[2]<<16) +((ub4)k[3]<<24));
5014accf27a5SSukumar Swaminathan 		b += (k[4] +((ub4)k[5]<<8) +((ub4)k[6]<<16) +((ub4)k[7]<<24));
5015accf27a5SSukumar Swaminathan 		c += (k[8] +((ub4)k[9]<<8) +((ub4)k[10]<<16)+((ub4)k[11]<<24));
5016accf27a5SSukumar Swaminathan 		mix(a, b, c);
5017accf27a5SSukumar Swaminathan 		k += 12;
5018accf27a5SSukumar Swaminathan 		len -= 12;
5019accf27a5SSukumar Swaminathan 	}
5020accf27a5SSukumar Swaminathan 
5021accf27a5SSukumar Swaminathan 	/* handle the last 11 bytes */
5022accf27a5SSukumar Swaminathan 	c += length;
5023accf27a5SSukumar Swaminathan 	/* all the case statements fall through */
5024accf27a5SSukumar Swaminathan 	switch (len) {
5025accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5026accf27a5SSukumar Swaminathan 	case 11: c += ((ub4)k[10]<<24);
5027accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5028accf27a5SSukumar Swaminathan 	case 10: c += ((ub4)k[9]<<16);
5029accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5030accf27a5SSukumar Swaminathan 	case 9 : c += ((ub4)k[8]<<8);
5031accf27a5SSukumar Swaminathan 	/* the first byte of c is reserved for the length */
5032accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5033accf27a5SSukumar Swaminathan 	case 8 : b += ((ub4)k[7]<<24);
5034accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5035accf27a5SSukumar Swaminathan 	case 7 : b += ((ub4)k[6]<<16);
5036accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5037accf27a5SSukumar Swaminathan 	case 6 : b += ((ub4)k[5]<<8);
5038accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5039accf27a5SSukumar Swaminathan 	case 5 : b += k[4];
5040accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5041accf27a5SSukumar Swaminathan 	case 4 : a += ((ub4)k[3]<<24);
5042accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5043accf27a5SSukumar Swaminathan 	case 3 : a += ((ub4)k[2]<<16);
5044accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5045accf27a5SSukumar Swaminathan 	case 2 : a += ((ub4)k[1]<<8);
5046accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5047accf27a5SSukumar Swaminathan 	case 1 : a += k[0];
5048accf27a5SSukumar Swaminathan 	/* case 0: nothing left to add */
5049accf27a5SSukumar Swaminathan 	}
5050accf27a5SSukumar Swaminathan 	mix(a, b, c);
5051accf27a5SSukumar Swaminathan 	/* report the result */
5052accf27a5SSukumar Swaminathan 	return (c);
5053accf27a5SSukumar Swaminathan }
5054accf27a5SSukumar Swaminathan 
5055accf27a5SSukumar Swaminathan uint8_t
5056accf27a5SSukumar Swaminathan ql_tx_hashing(qlge_t *qlge, caddr_t bp)
5057accf27a5SSukumar Swaminathan {
5058accf27a5SSukumar Swaminathan 	struct ip *iphdr = NULL;
5059accf27a5SSukumar Swaminathan 	struct ether_header *ethhdr;
5060accf27a5SSukumar Swaminathan 	struct ether_vlan_header *ethvhdr;
5061accf27a5SSukumar Swaminathan 	struct tcphdr *tcp_hdr;
5062accf27a5SSukumar Swaminathan 	struct udphdr *udp_hdr;
5063accf27a5SSukumar Swaminathan 	uint32_t etherType;
5064accf27a5SSukumar Swaminathan 	int mac_hdr_len, ip_hdr_len;
5065accf27a5SSukumar Swaminathan 	uint32_t h = 0; /* 0 by default */
5066accf27a5SSukumar Swaminathan 	uint8_t tx_ring_id = 0;
5067accf27a5SSukumar Swaminathan 	uint32_t ip_src_addr = 0;
5068accf27a5SSukumar Swaminathan 	uint32_t ip_desc_addr = 0;
5069accf27a5SSukumar Swaminathan 	uint16_t src_port = 0;
5070accf27a5SSukumar Swaminathan 	uint16_t dest_port = 0;
5071accf27a5SSukumar Swaminathan 	uint8_t key[12];
5072accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_TX, ("%s(%d) entered \n", __func__, qlge->instance));
5073accf27a5SSukumar Swaminathan 
5074accf27a5SSukumar Swaminathan 	ethhdr = (struct ether_header *)((void *)bp);
5075accf27a5SSukumar Swaminathan 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
5076accf27a5SSukumar Swaminathan 
5077accf27a5SSukumar Swaminathan 	if (qlge->tx_ring_count == 1)
5078accf27a5SSukumar Swaminathan 		return (tx_ring_id);
5079accf27a5SSukumar Swaminathan 
5080accf27a5SSukumar Swaminathan 	/* Is this vlan packet? */
5081accf27a5SSukumar Swaminathan 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5082accf27a5SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_vlan_header);
5083accf27a5SSukumar Swaminathan 		etherType = ntohs(ethvhdr->ether_type);
5084accf27a5SSukumar Swaminathan 	} else {
5085accf27a5SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_header);
5086accf27a5SSukumar Swaminathan 		etherType = ntohs(ethhdr->ether_type);
5087accf27a5SSukumar Swaminathan 	}
5088accf27a5SSukumar Swaminathan 	/* Is this IPv4 or IPv6 packet? */
5089accf27a5SSukumar Swaminathan 	if (etherType == ETHERTYPE_IP /* 0800 */) {
5090accf27a5SSukumar Swaminathan 		if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len))
5091accf27a5SSukumar Swaminathan 		    == IPV4_VERSION) {
5092accf27a5SSukumar Swaminathan 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
5093accf27a5SSukumar Swaminathan 		}
5094accf27a5SSukumar Swaminathan 		if (((unsigned long)iphdr) & 0x3) {
5095accf27a5SSukumar Swaminathan 			/*  IP hdr not 4-byte aligned */
5096accf27a5SSukumar Swaminathan 			return (tx_ring_id);
5097accf27a5SSukumar Swaminathan 		}
5098accf27a5SSukumar Swaminathan 	}
5099accf27a5SSukumar Swaminathan 	/* ipV4 packets */
5100accf27a5SSukumar Swaminathan 	if (iphdr) {
5101accf27a5SSukumar Swaminathan 
5102accf27a5SSukumar Swaminathan 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
5103accf27a5SSukumar Swaminathan 		ip_src_addr = iphdr->ip_src.s_addr;
5104accf27a5SSukumar Swaminathan 		ip_desc_addr = iphdr->ip_dst.s_addr;
5105accf27a5SSukumar Swaminathan 
5106accf27a5SSukumar Swaminathan 		if (iphdr->ip_p == IPPROTO_TCP) {
5107accf27a5SSukumar Swaminathan 			tcp_hdr = (struct tcphdr *)(void *)
5108accf27a5SSukumar Swaminathan 			    ((uint8_t *)iphdr + ip_hdr_len);
5109accf27a5SSukumar Swaminathan 			src_port = tcp_hdr->th_sport;
5110accf27a5SSukumar Swaminathan 			dest_port = tcp_hdr->th_dport;
5111accf27a5SSukumar Swaminathan 		} else if (iphdr->ip_p == IPPROTO_UDP) {
5112accf27a5SSukumar Swaminathan 			udp_hdr = (struct udphdr *)(void *)
5113accf27a5SSukumar Swaminathan 			    ((uint8_t *)iphdr + ip_hdr_len);
5114accf27a5SSukumar Swaminathan 			src_port = udp_hdr->uh_sport;
5115accf27a5SSukumar Swaminathan 			dest_port = udp_hdr->uh_dport;
5116accf27a5SSukumar Swaminathan 		}
5117accf27a5SSukumar Swaminathan 		key[0] = (uint8_t)((ip_src_addr) &0xFF);
5118accf27a5SSukumar Swaminathan 		key[1] = (uint8_t)((ip_src_addr >> 8) &0xFF);
5119accf27a5SSukumar Swaminathan 		key[2] = (uint8_t)((ip_src_addr >> 16) &0xFF);
5120accf27a5SSukumar Swaminathan 		key[3] = (uint8_t)((ip_src_addr >> 24) &0xFF);
5121accf27a5SSukumar Swaminathan 		key[4] = (uint8_t)((ip_desc_addr) &0xFF);
5122accf27a5SSukumar Swaminathan 		key[5] = (uint8_t)((ip_desc_addr >> 8) &0xFF);
5123accf27a5SSukumar Swaminathan 		key[6] = (uint8_t)((ip_desc_addr >> 16) &0xFF);
5124accf27a5SSukumar Swaminathan 		key[7] = (uint8_t)((ip_desc_addr >> 24) &0xFF);
5125accf27a5SSukumar Swaminathan 		key[8] = (uint8_t)((src_port) &0xFF);
5126accf27a5SSukumar Swaminathan 		key[9] = (uint8_t)((src_port >> 8) &0xFF);
5127accf27a5SSukumar Swaminathan 		key[10] = (uint8_t)((dest_port) &0xFF);
5128accf27a5SSukumar Swaminathan 		key[11] = (uint8_t)((dest_port >> 8) &0xFF);
5129accf27a5SSukumar Swaminathan 		h = hash(key, 12, 0); /* return 32 bit */
5130accf27a5SSukumar Swaminathan 		tx_ring_id = (h & (qlge->tx_ring_count - 1));
5131accf27a5SSukumar Swaminathan 		if (tx_ring_id >= qlge->tx_ring_count) {
5132accf27a5SSukumar Swaminathan 			cmn_err(CE_WARN, "%s bad tx_ring_id %d\n",
5133accf27a5SSukumar Swaminathan 			    __func__, tx_ring_id);
5134accf27a5SSukumar Swaminathan 			tx_ring_id = 0;
5135accf27a5SSukumar Swaminathan 		}
5136accf27a5SSukumar Swaminathan 	}
5137accf27a5SSukumar Swaminathan 	return (tx_ring_id);
5138accf27a5SSukumar Swaminathan }
5139accf27a5SSukumar Swaminathan 
5140accf27a5SSukumar Swaminathan /*
5141bafec742SSukumar Swaminathan  * Tell the hardware to do Large Send Offload (LSO)
5142bafec742SSukumar Swaminathan  *
5143bafec742SSukumar Swaminathan  * Some fields in ob_mac_iocb need to be set so hardware can know what is
5144bafec742SSukumar Swaminathan  * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
5145bafec742SSukumar Swaminathan  * in the right place of the packet etc, thus, hardware can process the
5146bafec742SSukumar Swaminathan  * packet correctly.
5147bafec742SSukumar Swaminathan  */
5148bafec742SSukumar Swaminathan static void
5149bafec742SSukumar Swaminathan ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp,
5150bafec742SSukumar Swaminathan     struct ob_mac_iocb_req *mac_iocb_ptr)
5151bafec742SSukumar Swaminathan {
5152bafec742SSukumar Swaminathan 	struct ip *iphdr = NULL;
5153bafec742SSukumar Swaminathan 	struct ether_header *ethhdr;
5154bafec742SSukumar Swaminathan 	struct ether_vlan_header *ethvhdr;
5155bafec742SSukumar Swaminathan 	struct tcphdr *tcp_hdr;
5156bafec742SSukumar Swaminathan 	struct udphdr *udp_hdr;
5157bafec742SSukumar Swaminathan 	uint32_t etherType;
5158bafec742SSukumar Swaminathan 	uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
5159bafec742SSukumar Swaminathan 	uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off;
5160bafec742SSukumar Swaminathan 
5161bafec742SSukumar Swaminathan 	ethhdr = (struct ether_header *)(void *)bp;
5162bafec742SSukumar Swaminathan 	ethvhdr = (struct ether_vlan_header *)(void *)bp;
5163bafec742SSukumar Swaminathan 
5164bafec742SSukumar Swaminathan 	/* Is this vlan packet? */
5165bafec742SSukumar Swaminathan 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5166bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_vlan_header);
5167bafec742SSukumar Swaminathan 		etherType = ntohs(ethvhdr->ether_type);
5168bafec742SSukumar Swaminathan 	} else {
5169bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_header);
5170bafec742SSukumar Swaminathan 		etherType = ntohs(ethhdr->ether_type);
5171bafec742SSukumar Swaminathan 	}
5172bafec742SSukumar Swaminathan 	/* Is this IPv4 or IPv6 packet? */
5173bafec742SSukumar Swaminathan 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) ==
5174bafec742SSukumar Swaminathan 	    IPV4_VERSION) {
5175bafec742SSukumar Swaminathan 		if (etherType == ETHERTYPE_IP /* 0800 */) {
5176bafec742SSukumar Swaminathan 			iphdr 	= (struct ip *)(void *)(bp+mac_hdr_len);
5177bafec742SSukumar Swaminathan 		} else {
5178bafec742SSukumar Swaminathan 			/* EMPTY */
5179bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet"
5180bafec742SSukumar Swaminathan 			    " type 0x%x\n",
5181bafec742SSukumar Swaminathan 			    __func__, qlge->instance, etherType));
5182bafec742SSukumar Swaminathan 		}
5183bafec742SSukumar Swaminathan 	}
5184bafec742SSukumar Swaminathan 
5185bafec742SSukumar Swaminathan 	if (iphdr != NULL) { /* ipV4 packets */
5186bafec742SSukumar Swaminathan 		ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr);
5187bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX,
5188bafec742SSukumar Swaminathan 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
5189bafec742SSukumar Swaminathan 		    " bytes \n", __func__, qlge->instance, ip_hdr_len));
5190bafec742SSukumar Swaminathan 
5191bafec742SSukumar Swaminathan 		ip_hdr_off = mac_hdr_len;
5192bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
5193bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ip_hdr_len));
5194bafec742SSukumar Swaminathan 
5195bafec742SSukumar Swaminathan 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
5196bafec742SSukumar Swaminathan 		    OB_MAC_IOCB_REQ_IPv4);
5197bafec742SSukumar Swaminathan 		if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) {
5198bafec742SSukumar Swaminathan 			if (iphdr->ip_p == IPPROTO_TCP) {
5199bafec742SSukumar Swaminathan 				tcp_hdr = (struct tcphdr *)(void *)
5200bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr +
5201bafec742SSukumar Swaminathan 				    ip_hdr_len);
5202bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP "
5203bafec742SSukumar Swaminathan 				    "packet\n",
5204bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
5205bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
5206bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
5207bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
5208bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
5209bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_LSO);
5210bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
5211bafec742SSukumar Swaminathan 				tcp_udp_hdr_off =
5212bafec742SSukumar Swaminathan 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
5213bafec742SSukumar Swaminathan 				tcp_udp_hdr_len =
5214bafec742SSukumar Swaminathan 				    (uint16_t)(tcp_hdr->th_off*4);
5215bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
5216bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
5217bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
5218bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
5219bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
5220bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
5221bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
5222bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5223bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
5224bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
5225bafec742SSukumar Swaminathan 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5226bafec742SSukumar Swaminathan 
5227bafec742SSukumar Swaminathan 				/*
5228bafec742SSukumar Swaminathan 				 * if the chip is unable to calculate pseudo
5229bafec742SSukumar Swaminathan 				 * header checksum, do it in then put the result
5230bafec742SSukumar Swaminathan 				 * to the data passed to the chip
5231bafec742SSukumar Swaminathan 				 */
5232bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
5233bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5234bafec742SSukumar Swaminathan 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
5235bafec742SSukumar Swaminathan 			} else if (iphdr->ip_p == IPPROTO_UDP) {
5236bafec742SSukumar Swaminathan 				udp_hdr = (struct udphdr *)(void *)
5237bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr
5238bafec742SSukumar Swaminathan 				    + ip_hdr_len);
5239bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP "
5240bafec742SSukumar Swaminathan 				    "packet\n",
5241bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
5242bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
5243bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
5244bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
5245bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
5246bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_LSO);
5247bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
5248bafec742SSukumar Swaminathan 				tcp_udp_hdr_off =
5249bafec742SSukumar Swaminathan 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
5250bafec742SSukumar Swaminathan 				tcp_udp_hdr_len =
5251bafec742SSukumar Swaminathan 				    (uint16_t)(udp_hdr->uh_ulen*4);
5252bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
5253bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
5254bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
5255bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
5256bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
5257bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
5258bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
5259bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5260bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
5261bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
5262bafec742SSukumar Swaminathan 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5263bafec742SSukumar Swaminathan 
5264bafec742SSukumar Swaminathan 				/*
5265bafec742SSukumar Swaminathan 				 * if the chip is unable to do pseudo header
5266bafec742SSukumar Swaminathan 				 * checksum calculation, do it here then put the
5267bafec742SSukumar Swaminathan 				 * result to the data passed to the chip
5268bafec742SSukumar Swaminathan 				 */
5269bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
5270bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5271bafec742SSukumar Swaminathan 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
5272bafec742SSukumar Swaminathan 			}
5273bafec742SSukumar Swaminathan 		}
5274bafec742SSukumar Swaminathan 	}
5275bafec742SSukumar Swaminathan }
5276bafec742SSukumar Swaminathan 
5277bafec742SSukumar Swaminathan /*
5278bafec742SSukumar Swaminathan  * Generic packet sending function which is used to send one packet.
5279bafec742SSukumar Swaminathan  */
5280bafec742SSukumar Swaminathan int
5281bafec742SSukumar Swaminathan ql_send_common(struct tx_ring *tx_ring, mblk_t *mp)
5282bafec742SSukumar Swaminathan {
5283bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_cb;
5284bafec742SSukumar Swaminathan 	struct ob_mac_iocb_req *mac_iocb_ptr;
5285bafec742SSukumar Swaminathan 	mblk_t *tp;
5286bafec742SSukumar Swaminathan 	size_t msg_len = 0;
5287bafec742SSukumar Swaminathan 	size_t off;
5288bafec742SSukumar Swaminathan 	caddr_t bp;
5289bafec742SSukumar Swaminathan 	size_t nbyte, total_len;
5290bafec742SSukumar Swaminathan 	uint_t i = 0;
5291bafec742SSukumar Swaminathan 	int j = 0, frags = 0;
5292bafec742SSukumar Swaminathan 	uint32_t phy_addr_low, phy_addr_high;
5293bafec742SSukumar Swaminathan 	uint64_t phys_addr;
5294bafec742SSukumar Swaminathan 	clock_t now;
5295bafec742SSukumar Swaminathan 	uint32_t pflags = 0;
5296bafec742SSukumar Swaminathan 	uint32_t mss = 0;
5297bafec742SSukumar Swaminathan 	enum tx_mode_t tx_mode;
5298bafec742SSukumar Swaminathan 	struct oal_entry *oal_entry;
5299bafec742SSukumar Swaminathan 	int status;
5300bafec742SSukumar Swaminathan 	uint_t ncookies, oal_entries, max_oal_entries;
5301bafec742SSukumar Swaminathan 	size_t max_seg_len = 0;
5302bafec742SSukumar Swaminathan 	boolean_t use_lso = B_FALSE;
5303bafec742SSukumar Swaminathan 	struct oal_entry *tx_entry = NULL;
5304bafec742SSukumar Swaminathan 	struct oal_entry *last_oal_entry;
5305bafec742SSukumar Swaminathan 	qlge_t *qlge = tx_ring->qlge;
5306bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
5307bafec742SSukumar Swaminathan 	size_t tx_buf_len = QL_MAX_COPY_LENGTH;
5308bafec742SSukumar Swaminathan 	int force_pullup = 0;
5309bafec742SSukumar Swaminathan 
5310bafec742SSukumar Swaminathan 	tp = mp;
5311bafec742SSukumar Swaminathan 	total_len = msg_len = 0;
5312bafec742SSukumar Swaminathan 	max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1;
5313bafec742SSukumar Swaminathan 
5314bafec742SSukumar Swaminathan 	/* Calculate number of data and segments in the incoming message */
5315bafec742SSukumar Swaminathan 	for (tp = mp; tp != NULL; tp = tp->b_cont) {
5316bafec742SSukumar Swaminathan 		nbyte = MBLKL(tp);
5317bafec742SSukumar Swaminathan 		total_len += nbyte;
5318bafec742SSukumar Swaminathan 		max_seg_len = max(nbyte, max_seg_len);
5319bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("Requested sending data in %d segments, "
5320bafec742SSukumar Swaminathan 		    "total length: %d\n", frags, nbyte));
5321bafec742SSukumar Swaminathan 		frags++;
5322bafec742SSukumar Swaminathan 	}
5323bafec742SSukumar Swaminathan 
5324bafec742SSukumar Swaminathan 	if (total_len >= QL_LSO_MAX) {
5325bafec742SSukumar Swaminathan 		freemsg(mp);
5326bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5327bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n",
5328bafec742SSukumar Swaminathan 		    __func__, (int)total_len);
5329bafec742SSukumar Swaminathan #endif
5330bafec742SSukumar Swaminathan 		return (NULL);
5331bafec742SSukumar Swaminathan 	}
5332bafec742SSukumar Swaminathan 
5333bafec742SSukumar Swaminathan 	bp = (caddr_t)mp->b_rptr;
5334bafec742SSukumar Swaminathan 	if (bp[0] & 1) {
5335bafec742SSukumar Swaminathan 		if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet,
5336bafec742SSukumar Swaminathan 		    ETHERADDRL) == 0) {
5337bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("Broadcast packet\n"));
5338bafec742SSukumar Swaminathan 			tx_ring->brdcstxmt++;
5339bafec742SSukumar Swaminathan 		} else {
5340bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("multicast packet\n"));
5341bafec742SSukumar Swaminathan 			tx_ring->multixmt++;
5342bafec742SSukumar Swaminathan 		}
5343bafec742SSukumar Swaminathan 	}
5344bafec742SSukumar Swaminathan 
5345bafec742SSukumar Swaminathan 	tx_ring->obytes += total_len;
5346bafec742SSukumar Swaminathan 	tx_ring->opackets ++;
5347bafec742SSukumar Swaminathan 
5348bafec742SSukumar Swaminathan 	QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs,"
5349bafec742SSukumar Swaminathan 	    " max seg len: %d\n", total_len, frags, max_seg_len));
5350bafec742SSukumar Swaminathan 
5351bafec742SSukumar Swaminathan 	/* claim a free slot in tx ring */
5352bafec742SSukumar Swaminathan 	tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx];
5353bafec742SSukumar Swaminathan 
5354bafec742SSukumar Swaminathan 	/* get the tx descriptor */
5355bafec742SSukumar Swaminathan 	mac_iocb_ptr = tx_cb->queue_entry;
5356bafec742SSukumar Swaminathan 
5357accf27a5SSukumar Swaminathan 	bzero((void *)mac_iocb_ptr, 20);
5358bafec742SSukumar Swaminathan 
5359bafec742SSukumar Swaminathan 	ASSERT(tx_cb->mp == NULL);
5360bafec742SSukumar Swaminathan 
5361bafec742SSukumar Swaminathan 	/*
5362bafec742SSukumar Swaminathan 	 * Decide to use DMA map or copy mode.
5363bafec742SSukumar Swaminathan 	 * DMA map mode must be used when the total msg length is more than the
5364bafec742SSukumar Swaminathan 	 * tx buffer length.
5365bafec742SSukumar Swaminathan 	 */
5366bafec742SSukumar Swaminathan 
5367bafec742SSukumar Swaminathan 	if (total_len > tx_buf_len)
5368bafec742SSukumar Swaminathan 		tx_mode = USE_DMA;
5369bafec742SSukumar Swaminathan 	else if	(max_seg_len > QL_MAX_COPY_LENGTH)
5370bafec742SSukumar Swaminathan 		tx_mode = USE_DMA;
5371bafec742SSukumar Swaminathan 	else
5372bafec742SSukumar Swaminathan 		tx_mode = USE_COPY;
5373bafec742SSukumar Swaminathan 
5374bafec742SSukumar Swaminathan 	if (qlge->chksum_cap) {
53750dc2366fSVenugopal Iyer 		mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
5376bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability "
5377bafec742SSukumar Swaminathan 		    "is 0x%x \n", pflags, qlge->chksum_cap));
5378bafec742SSukumar Swaminathan 		if (qlge->lso_enable) {
5379bafec742SSukumar Swaminathan 			uint32_t lso_flags = 0;
53800dc2366fSVenugopal Iyer 			mac_lso_get(mp, &mss, &lso_flags);
5381bafec742SSukumar Swaminathan 			use_lso = (lso_flags == HW_LSO);
5382bafec742SSukumar Swaminathan 		}
5383bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n",
5384bafec742SSukumar Swaminathan 		    mss, use_lso));
5385bafec742SSukumar Swaminathan 	}
5386bafec742SSukumar Swaminathan 
5387bafec742SSukumar Swaminathan do_pullup:
5388bafec742SSukumar Swaminathan 
5389bafec742SSukumar Swaminathan 	/* concatenate all frags into one large packet if too fragmented */
5390bafec742SSukumar Swaminathan 	if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) ||
5391bafec742SSukumar Swaminathan 	    force_pullup) {
5392bafec742SSukumar Swaminathan 		mblk_t *mp1;
5393bafec742SSukumar Swaminathan 		if ((mp1 = msgpullup(mp, -1)) != NULL) {
5394bafec742SSukumar Swaminathan 			freemsg(mp);
5395bafec742SSukumar Swaminathan 			mp = mp1;
5396bafec742SSukumar Swaminathan 			frags = 1;
5397bafec742SSukumar Swaminathan 		} else {
5398bafec742SSukumar Swaminathan 			tx_ring->tx_fail_dma_bind++;
5399bafec742SSukumar Swaminathan 			goto bad;
5400bafec742SSukumar Swaminathan 		}
5401bafec742SSukumar Swaminathan 	}
5402bafec742SSukumar Swaminathan 
5403bafec742SSukumar Swaminathan 	tx_cb->tx_bytes = (uint32_t)total_len;
5404bafec742SSukumar Swaminathan 	tx_cb->mp = mp;
5405bafec742SSukumar Swaminathan 	tx_cb->tx_dma_handle_used = 0;
5406bafec742SSukumar Swaminathan 
5407bafec742SSukumar Swaminathan 	if (tx_mode == USE_DMA) {
5408bafec742SSukumar Swaminathan 		msg_len = total_len;
5409bafec742SSukumar Swaminathan 
5410bafec742SSukumar Swaminathan 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5411bafec742SSukumar Swaminathan 		mac_iocb_ptr->tid = tx_ring->prod_idx;
5412bafec742SSukumar Swaminathan 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5413bafec742SSukumar Swaminathan 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5414bafec742SSukumar Swaminathan 
5415bafec742SSukumar Swaminathan 		tx_entry = &mac_iocb_ptr->oal_entry[0];
5416bafec742SSukumar Swaminathan 		oal_entry = NULL;
5417bafec742SSukumar Swaminathan 
5418bafec742SSukumar Swaminathan 		for (tp = mp, oal_entries = j = 0; tp != NULL;
5419bafec742SSukumar Swaminathan 		    tp = tp->b_cont) {
5420bafec742SSukumar Swaminathan 			/* if too many tx dma handles needed */
5421bafec742SSukumar Swaminathan 			if (j >= QL_MAX_TX_DMA_HANDLES) {
5422bafec742SSukumar Swaminathan 				tx_ring->tx_no_dma_handle++;
5423bafec742SSukumar Swaminathan 				if (!force_pullup) {
5424bafec742SSukumar Swaminathan 					force_pullup = 1;
5425bafec742SSukumar Swaminathan 					goto do_pullup;
5426bafec742SSukumar Swaminathan 				} else {
5427bafec742SSukumar Swaminathan 					goto bad;
5428bafec742SSukumar Swaminathan 				}
5429bafec742SSukumar Swaminathan 			}
5430bafec742SSukumar Swaminathan 			nbyte = (uint16_t)MBLKL(tp);
5431bafec742SSukumar Swaminathan 			if (nbyte == 0)
5432bafec742SSukumar Swaminathan 				continue;
5433bafec742SSukumar Swaminathan 
5434bafec742SSukumar Swaminathan 			status = ddi_dma_addr_bind_handle(
5435bafec742SSukumar Swaminathan 			    tx_cb->tx_dma_handle[j], NULL,
5436bafec742SSukumar Swaminathan 			    (caddr_t)tp->b_rptr, nbyte,
5437bafec742SSukumar Swaminathan 			    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
5438bafec742SSukumar Swaminathan 			    0, &dma_cookie, &ncookies);
5439bafec742SSukumar Swaminathan 
5440bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("map sending data segment: %d, "
5441bafec742SSukumar Swaminathan 			    "length: %d, spans in %d cookies\n",
5442bafec742SSukumar Swaminathan 			    j, nbyte, ncookies));
5443bafec742SSukumar Swaminathan 
5444bafec742SSukumar Swaminathan 			if (status != DDI_DMA_MAPPED) {
5445bafec742SSukumar Swaminathan 				goto bad;
5446bafec742SSukumar Swaminathan 			}
5447bafec742SSukumar Swaminathan 			/*
5448bafec742SSukumar Swaminathan 			 * Each fragment can span several cookies. One cookie
5449bafec742SSukumar Swaminathan 			 * will use one tx descriptor to transmit.
5450bafec742SSukumar Swaminathan 			 */
5451bafec742SSukumar Swaminathan 			for (i = ncookies; i > 0; i--, tx_entry++,
5452bafec742SSukumar Swaminathan 			    oal_entries++) {
5453bafec742SSukumar Swaminathan 				/*
5454bafec742SSukumar Swaminathan 				 * The number of TX descriptors that can be
5455bafec742SSukumar Swaminathan 				 *  saved in tx iocb and oal list is limited
5456bafec742SSukumar Swaminathan 				 */
5457bafec742SSukumar Swaminathan 				if (oal_entries > max_oal_entries) {
5458bafec742SSukumar Swaminathan 					tx_ring->tx_no_dma_cookie++;
5459bafec742SSukumar Swaminathan 					if (!force_pullup) {
5460bafec742SSukumar Swaminathan 						force_pullup = 1;
5461bafec742SSukumar Swaminathan 						goto do_pullup;
5462bafec742SSukumar Swaminathan 					} else {
5463bafec742SSukumar Swaminathan 						goto bad;
5464bafec742SSukumar Swaminathan 					}
5465bafec742SSukumar Swaminathan 				}
5466bafec742SSukumar Swaminathan 
5467bafec742SSukumar Swaminathan 				if ((oal_entries == TX_DESC_PER_IOCB) &&
5468bafec742SSukumar Swaminathan 				    !oal_entry) {
5469bafec742SSukumar Swaminathan 					/*
5470bafec742SSukumar Swaminathan 					 * Time to switch to an oal list
5471bafec742SSukumar Swaminathan 					 * The last entry should be copied
5472bafec742SSukumar Swaminathan 					 * to first entry in the oal list
5473bafec742SSukumar Swaminathan 					 */
5474bafec742SSukumar Swaminathan 					oal_entry = tx_cb->oal;
5475bafec742SSukumar Swaminathan 					tx_entry =
5476bafec742SSukumar Swaminathan 					    &mac_iocb_ptr->oal_entry[
5477bafec742SSukumar Swaminathan 					    TX_DESC_PER_IOCB-1];
5478bafec742SSukumar Swaminathan 					bcopy(tx_entry, oal_entry,
5479bafec742SSukumar Swaminathan 					    sizeof (*oal_entry));
5480bafec742SSukumar Swaminathan 
5481bafec742SSukumar Swaminathan 					/*
5482bafec742SSukumar Swaminathan 					 * last entry should be updated to
5483bafec742SSukumar Swaminathan 					 * point to the extended oal list itself
5484bafec742SSukumar Swaminathan 					 */
5485bafec742SSukumar Swaminathan 					tx_entry->buf_addr_low =
5486bafec742SSukumar Swaminathan 					    cpu_to_le32(
5487bafec742SSukumar Swaminathan 					    LS_64BITS(tx_cb->oal_dma_addr));
5488bafec742SSukumar Swaminathan 					tx_entry->buf_addr_high =
5489bafec742SSukumar Swaminathan 					    cpu_to_le32(
5490bafec742SSukumar Swaminathan 					    MS_64BITS(tx_cb->oal_dma_addr));
5491bafec742SSukumar Swaminathan 					/*
5492bafec742SSukumar Swaminathan 					 * Point tx_entry to the oal list
5493bafec742SSukumar Swaminathan 					 * second entry
5494bafec742SSukumar Swaminathan 					 */
5495bafec742SSukumar Swaminathan 					tx_entry = &oal_entry[1];
5496bafec742SSukumar Swaminathan 				}
5497bafec742SSukumar Swaminathan 
5498bafec742SSukumar Swaminathan 				tx_entry->buf_len =
5499bafec742SSukumar Swaminathan 				    (uint32_t)cpu_to_le32(dma_cookie.dmac_size);
5500bafec742SSukumar Swaminathan 				phys_addr = dma_cookie.dmac_laddress;
5501bafec742SSukumar Swaminathan 				tx_entry->buf_addr_low =
5502bafec742SSukumar Swaminathan 				    cpu_to_le32(LS_64BITS(phys_addr));
5503bafec742SSukumar Swaminathan 				tx_entry->buf_addr_high =
5504bafec742SSukumar Swaminathan 				    cpu_to_le32(MS_64BITS(phys_addr));
5505bafec742SSukumar Swaminathan 
5506bafec742SSukumar Swaminathan 				last_oal_entry = tx_entry;
5507bafec742SSukumar Swaminathan 
5508bafec742SSukumar Swaminathan 				if (i > 1)
5509bafec742SSukumar Swaminathan 					ddi_dma_nextcookie(
5510bafec742SSukumar Swaminathan 					    tx_cb->tx_dma_handle[j],
5511bafec742SSukumar Swaminathan 					    &dma_cookie);
5512bafec742SSukumar Swaminathan 			}
5513bafec742SSukumar Swaminathan 			j++;
5514bafec742SSukumar Swaminathan 		}
5515bafec742SSukumar Swaminathan 		/*
5516bafec742SSukumar Swaminathan 		 * if OAL is used, the last oal entry in tx iocb indicates
5517bafec742SSukumar Swaminathan 		 * number of additional address/len pairs in OAL
5518bafec742SSukumar Swaminathan 		 */
5519bafec742SSukumar Swaminathan 		if (oal_entries > TX_DESC_PER_IOCB) {
5520bafec742SSukumar Swaminathan 			tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1];
5521bafec742SSukumar Swaminathan 			tx_entry->buf_len = (uint32_t)
5522bafec742SSukumar Swaminathan 			    (cpu_to_le32((sizeof (struct oal_entry) *
5523bafec742SSukumar Swaminathan 			    (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY));
5524bafec742SSukumar Swaminathan 		}
5525bafec742SSukumar Swaminathan 		last_oal_entry->buf_len = cpu_to_le32(
5526bafec742SSukumar Swaminathan 		    le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY);
5527bafec742SSukumar Swaminathan 
5528bafec742SSukumar Swaminathan 		tx_cb->tx_dma_handle_used = j;
5529bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n",
5530bafec742SSukumar Swaminathan 		    j, oal_entries));
5531bafec742SSukumar Swaminathan 
5532bafec742SSukumar Swaminathan 		bp = (caddr_t)mp->b_rptr;
5533bafec742SSukumar Swaminathan 	}
5534bafec742SSukumar Swaminathan 	if (tx_mode == USE_COPY) {
5535bafec742SSukumar Swaminathan 		bp = tx_cb->copy_buffer;
5536bafec742SSukumar Swaminathan 		off = 0;
5537bafec742SSukumar Swaminathan 		nbyte = 0;
5538bafec742SSukumar Swaminathan 		frags = 0;
5539bafec742SSukumar Swaminathan 		/*
5540bafec742SSukumar Swaminathan 		 * Copy up to tx_buf_len of the transmit data
5541bafec742SSukumar Swaminathan 		 * from mp to tx buffer
5542bafec742SSukumar Swaminathan 		 */
5543bafec742SSukumar Swaminathan 		for (tp = mp; tp != NULL; tp = tp->b_cont) {
5544bafec742SSukumar Swaminathan 			nbyte = MBLKL(tp);
5545bafec742SSukumar Swaminathan 			if ((off + nbyte) <= tx_buf_len) {
5546bafec742SSukumar Swaminathan 				bcopy(tp->b_rptr, &bp[off], nbyte);
5547bafec742SSukumar Swaminathan 				off += nbyte;
5548bafec742SSukumar Swaminathan 				frags ++;
5549bafec742SSukumar Swaminathan 			}
5550bafec742SSukumar Swaminathan 		}
5551bafec742SSukumar Swaminathan 
5552bafec742SSukumar Swaminathan 		msg_len = off;
5553bafec742SSukumar Swaminathan 
5554bafec742SSukumar Swaminathan 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5555bafec742SSukumar Swaminathan 		mac_iocb_ptr->tid = tx_ring->prod_idx;
5556bafec742SSukumar Swaminathan 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5557bafec742SSukumar Swaminathan 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5558bafec742SSukumar Swaminathan 
5559bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, "
5560bafec742SSukumar Swaminathan 		    "from %d segaments\n", msg_len, frags));
5561bafec742SSukumar Swaminathan 
5562bafec742SSukumar Swaminathan 		phys_addr = tx_cb->copy_buffer_dma_addr;
5563bafec742SSukumar Swaminathan 		phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr));
5564bafec742SSukumar Swaminathan 		phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr));
5565bafec742SSukumar Swaminathan 
5566bafec742SSukumar Swaminathan 		QL_DUMP(DBG_TX, "\t requested sending data:\n",
5567bafec742SSukumar Swaminathan 		    (uint8_t *)tx_cb->copy_buffer, 8, total_len);
5568bafec742SSukumar Swaminathan 
5569bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t)
5570bafec742SSukumar Swaminathan 		    cpu_to_le32(msg_len | OAL_LAST_ENTRY);
5571bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_addr_low  = phy_addr_low;
5572bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high;
5573bafec742SSukumar Swaminathan 
5574bafec742SSukumar Swaminathan 		freemsg(mp); /* no need, we have copied */
5575bafec742SSukumar Swaminathan 		tx_cb->mp = NULL;
5576bafec742SSukumar Swaminathan 	} /* End of Copy Mode */
5577bafec742SSukumar Swaminathan 
5578bafec742SSukumar Swaminathan 	/* Do TSO/LSO on TCP packet? */
5579bafec742SSukumar Swaminathan 	if (use_lso && mss) {
5580bafec742SSukumar Swaminathan 		ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr);
5581bafec742SSukumar Swaminathan 	} else if (pflags & qlge->chksum_cap) {
5582bafec742SSukumar Swaminathan 		/* Do checksum offloading */
5583bafec742SSukumar Swaminathan 		ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr);
5584bafec742SSukumar Swaminathan 	}
5585bafec742SSukumar Swaminathan 
5586bafec742SSukumar Swaminathan 	/* let device know the latest outbound IOCB */
5587bafec742SSukumar Swaminathan 	(void) ddi_dma_sync(tx_ring->wq_dma.dma_handle,
5588bafec742SSukumar Swaminathan 	    (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr),
5589bafec742SSukumar Swaminathan 	    (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV);
5590bafec742SSukumar Swaminathan 
5591bafec742SSukumar Swaminathan 	if (tx_mode == USE_DMA) {
5592bafec742SSukumar Swaminathan 		/* let device know the latest outbound OAL if necessary */
5593bafec742SSukumar Swaminathan 		if (oal_entries > TX_DESC_PER_IOCB) {
5594bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5595bafec742SSukumar Swaminathan 			    (off_t)0,
5596bafec742SSukumar Swaminathan 			    (sizeof (struct oal_entry) *
5597bafec742SSukumar Swaminathan 			    (oal_entries -TX_DESC_PER_IOCB+1)),
5598bafec742SSukumar Swaminathan 			    DDI_DMA_SYNC_FORDEV);
5599bafec742SSukumar Swaminathan 		}
5600bafec742SSukumar Swaminathan 	} else { /* for USE_COPY mode, tx buffer has changed */
5601bafec742SSukumar Swaminathan 		/* let device know the latest change */
5602bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5603bafec742SSukumar Swaminathan 		/* copy buf offset */
5604bafec742SSukumar Swaminathan 		    (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS),
5605bafec742SSukumar Swaminathan 		    msg_len, DDI_DMA_SYNC_FORDEV);
5606bafec742SSukumar Swaminathan 	}
5607bafec742SSukumar Swaminathan 
5608bafec742SSukumar Swaminathan 	/* save how the packet was sent */
5609bafec742SSukumar Swaminathan 	tx_cb->tx_type = tx_mode;
5610bafec742SSukumar Swaminathan 
5611bafec742SSukumar Swaminathan 	QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries);
5612bafec742SSukumar Swaminathan 	/* reduce the number of available tx slot */
5613bafec742SSukumar Swaminathan 	atomic_dec_32(&tx_ring->tx_free_count);
5614bafec742SSukumar Swaminathan 
5615bafec742SSukumar Swaminathan 	tx_ring->prod_idx++;
5616bafec742SSukumar Swaminathan 	if (tx_ring->prod_idx >= tx_ring->wq_len)
5617bafec742SSukumar Swaminathan 		tx_ring->prod_idx = 0;
5618bafec742SSukumar Swaminathan 
5619bafec742SSukumar Swaminathan 	now = ddi_get_lbolt();
5620bafec742SSukumar Swaminathan 	qlge->last_tx_time = now;
5621bafec742SSukumar Swaminathan 
5622bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5623bafec742SSukumar Swaminathan 
5624bafec742SSukumar Swaminathan bad:
5625bafec742SSukumar Swaminathan 	/*
5626bafec742SSukumar Swaminathan 	 * if for any reason driver can not send, delete
5627bafec742SSukumar Swaminathan 	 * the message pointer, mp
5628bafec742SSukumar Swaminathan 	 */
5629bafec742SSukumar Swaminathan 	now = ddi_get_lbolt();
5630bafec742SSukumar Swaminathan 	freemsg(mp);
5631bafec742SSukumar Swaminathan 	mp = NULL;
5632accf27a5SSukumar Swaminathan 	tx_cb->mp = NULL;
5633bafec742SSukumar Swaminathan 	for (i = 0; i < j; i++)
5634bafec742SSukumar Swaminathan 		(void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]);
5635bafec742SSukumar Swaminathan 
5636bafec742SSukumar Swaminathan 	QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x",
5637bafec742SSukumar Swaminathan 	    __func__, qlge->instance, (int)now));
5638bafec742SSukumar Swaminathan 
5639bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5640bafec742SSukumar Swaminathan }
5641bafec742SSukumar Swaminathan 
5642bafec742SSukumar Swaminathan 
5643bafec742SSukumar Swaminathan /*
5644bafec742SSukumar Swaminathan  * Initializes hardware and driver software flags before the driver
5645bafec742SSukumar Swaminathan  * is finally ready to work.
5646bafec742SSukumar Swaminathan  */
5647bafec742SSukumar Swaminathan int
5648bafec742SSukumar Swaminathan ql_do_start(qlge_t *qlge)
5649bafec742SSukumar Swaminathan {
5650bafec742SSukumar Swaminathan 	int i;
5651bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
5652bafec742SSukumar Swaminathan 	uint16_t lbq_buf_size;
5653bafec742SSukumar Swaminathan 	int rings_done;
5654bafec742SSukumar Swaminathan 
5655bafec742SSukumar Swaminathan 	ASSERT(qlge != NULL);
5656bafec742SSukumar Swaminathan 
5657bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
5658bafec742SSukumar Swaminathan 
5659bafec742SSukumar Swaminathan 	/* Reset adapter */
56600662fbf4SSukumar Swaminathan 	(void) ql_asic_reset(qlge);
5661bafec742SSukumar Swaminathan 
5662bafec742SSukumar Swaminathan 	lbq_buf_size = (uint16_t)
5663accf27a5SSukumar Swaminathan 	    ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
5664bafec742SSukumar Swaminathan 	if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) {
5665bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5666bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n",
5667bafec742SSukumar Swaminathan 		    qlge->rx_ring[0].lbq_buf_size, lbq_buf_size);
5668bafec742SSukumar Swaminathan #endif
5669bafec742SSukumar Swaminathan 		/*
5670bafec742SSukumar Swaminathan 		 * Check if any ring has buffers still with upper layers
5671bafec742SSukumar Swaminathan 		 * If buffers are pending with upper layers, we use the
5672bafec742SSukumar Swaminathan 		 * existing buffers and don't reallocate new ones
5673bafec742SSukumar Swaminathan 		 * Unfortunately there is no way to evict buffers from
5674bafec742SSukumar Swaminathan 		 * upper layers. Using buffers with the current size may
5675bafec742SSukumar Swaminathan 		 * cause slightly sub-optimal performance, but that seems
5676bafec742SSukumar Swaminathan 		 * to be the easiest way to handle this situation.
5677bafec742SSukumar Swaminathan 		 */
5678bafec742SSukumar Swaminathan 		rings_done = 0;
5679bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
5680bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i];
5681bafec742SSukumar Swaminathan 			if (rx_ring->rx_indicate == 0)
5682bafec742SSukumar Swaminathan 				rings_done++;
5683bafec742SSukumar Swaminathan 			else
5684bafec742SSukumar Swaminathan 				break;
5685bafec742SSukumar Swaminathan 		}
5686bafec742SSukumar Swaminathan 		/*
5687bafec742SSukumar Swaminathan 		 * No buffers pending with upper layers;
5688bafec742SSukumar Swaminathan 		 * reallocte them for new MTU size
5689bafec742SSukumar Swaminathan 		 */
5690bafec742SSukumar Swaminathan 		if (rings_done >= qlge->rx_ring_count) {
5691bafec742SSukumar Swaminathan 			/* free large buffer pool */
5692bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->rx_ring_count; i++) {
5693bafec742SSukumar Swaminathan 				rx_ring = &qlge->rx_ring[i];
5694bafec742SSukumar Swaminathan 				if (rx_ring->type != TX_Q) {
5695bafec742SSukumar Swaminathan 					ql_free_sbq_buffers(rx_ring);
5696bafec742SSukumar Swaminathan 					ql_free_lbq_buffers(rx_ring);
5697bafec742SSukumar Swaminathan 				}
5698bafec742SSukumar Swaminathan 			}
5699bafec742SSukumar Swaminathan 			/* reallocate large buffer pool */
5700bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->rx_ring_count; i++) {
5701bafec742SSukumar Swaminathan 				rx_ring = &qlge->rx_ring[i];
5702bafec742SSukumar Swaminathan 				if (rx_ring->type != TX_Q) {
57030662fbf4SSukumar Swaminathan 					(void) ql_alloc_sbufs(qlge, rx_ring);
57040662fbf4SSukumar Swaminathan 					(void) ql_alloc_lbufs(qlge, rx_ring);
5705bafec742SSukumar Swaminathan 				}
5706bafec742SSukumar Swaminathan 			}
5707bafec742SSukumar Swaminathan 		}
5708bafec742SSukumar Swaminathan 	}
5709bafec742SSukumar Swaminathan 
5710bafec742SSukumar Swaminathan 	if (ql_bringup_adapter(qlge) != DDI_SUCCESS) {
5711bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "qlge bringup adapter failed");
5712bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
5713accf27a5SSukumar Swaminathan 		if (qlge->fm_enable) {
5714accf27a5SSukumar Swaminathan 			atomic_or_32(&qlge->flags, ADAPTER_ERROR);
5715accf27a5SSukumar Swaminathan 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
5716accf27a5SSukumar Swaminathan 		}
5717bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
5718bafec742SSukumar Swaminathan 	}
5719bafec742SSukumar Swaminathan 
5720bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
5721accf27a5SSukumar Swaminathan 	/* if adapter is up successfully but was bad before */
5722accf27a5SSukumar Swaminathan 	if (qlge->flags & ADAPTER_ERROR) {
5723accf27a5SSukumar Swaminathan 		atomic_and_32(&qlge->flags, ~ADAPTER_ERROR);
5724accf27a5SSukumar Swaminathan 		if (qlge->fm_enable) {
5725accf27a5SSukumar Swaminathan 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
5726accf27a5SSukumar Swaminathan 		}
5727accf27a5SSukumar Swaminathan 	}
5728bafec742SSukumar Swaminathan 
5729bafec742SSukumar Swaminathan 	/* Get current link state */
5730bafec742SSukumar Swaminathan 	qlge->port_link_state = ql_get_link_state(qlge);
5731bafec742SSukumar Swaminathan 
5732bafec742SSukumar Swaminathan 	if (qlge->port_link_state == LS_UP) {
5733bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5734bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
5735bafec742SSukumar Swaminathan 		/* If driver detects a carrier on */
5736bafec742SSukumar Swaminathan 		CARRIER_ON(qlge);
5737bafec742SSukumar Swaminathan 	} else {
5738bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5739bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
5740bafec742SSukumar Swaminathan 		/* If driver detects a lack of carrier */
5741bafec742SSukumar Swaminathan 		CARRIER_OFF(qlge);
5742bafec742SSukumar Swaminathan 	}
5743bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_STARTED;
5744bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5745bafec742SSukumar Swaminathan }
5746bafec742SSukumar Swaminathan 
5747bafec742SSukumar Swaminathan /*
5748bafec742SSukumar Swaminathan  * Stop currently running driver
5749bafec742SSukumar Swaminathan  * Driver needs to stop routing new packets to driver and wait until
5750bafec742SSukumar Swaminathan  * all pending tx/rx buffers to be free-ed.
5751bafec742SSukumar Swaminathan  */
5752bafec742SSukumar Swaminathan int
5753bafec742SSukumar Swaminathan ql_do_stop(qlge_t *qlge)
5754bafec742SSukumar Swaminathan {
5755bafec742SSukumar Swaminathan 	int rc = DDI_FAILURE;
5756bafec742SSukumar Swaminathan 	uint32_t i, j, k;
5757bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc, *lbq_desc;
5758bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
5759bafec742SSukumar Swaminathan 
5760bafec742SSukumar Swaminathan 	ASSERT(qlge != NULL);
5761bafec742SSukumar Swaminathan 
5762bafec742SSukumar Swaminathan 	CARRIER_OFF(qlge);
5763bafec742SSukumar Swaminathan 
5764bafec742SSukumar Swaminathan 	rc = ql_bringdown_adapter(qlge);
5765bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
5766bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "qlge bringdown adapter failed.");
5767bafec742SSukumar Swaminathan 	} else
5768bafec742SSukumar Swaminathan 		rc = DDI_SUCCESS;
5769bafec742SSukumar Swaminathan 
5770bafec742SSukumar Swaminathan 	for (k = 0; k < qlge->rx_ring_count; k++) {
5771bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[k];
5772bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
5773bafec742SSukumar Swaminathan 			j = rx_ring->lbq_use_head;
5774bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5775bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list"
5776bafec742SSukumar Swaminathan 			    " to free list %d\n total %d\n",
5777bafec742SSukumar Swaminathan 			    k, rx_ring->lbuf_in_use_count,
5778bafec742SSukumar Swaminathan 			    rx_ring->lbuf_free_count,
5779bafec742SSukumar Swaminathan 			    rx_ring->lbuf_in_use_count +
5780bafec742SSukumar Swaminathan 			    rx_ring->lbuf_free_count);
5781bafec742SSukumar Swaminathan #endif
5782bafec742SSukumar Swaminathan 			for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
5783bafec742SSukumar Swaminathan 				lbq_desc = rx_ring->lbuf_in_use[j];
5784bafec742SSukumar Swaminathan 				j++;
5785bafec742SSukumar Swaminathan 				if (j >= rx_ring->lbq_len) {
5786bafec742SSukumar Swaminathan 					j = 0;
5787bafec742SSukumar Swaminathan 				}
5788bafec742SSukumar Swaminathan 				if (lbq_desc->mp) {
5789bafec742SSukumar Swaminathan 					atomic_inc_32(&rx_ring->rx_indicate);
5790bafec742SSukumar Swaminathan 					freemsg(lbq_desc->mp);
5791bafec742SSukumar Swaminathan 				}
5792bafec742SSukumar Swaminathan 			}
5793bafec742SSukumar Swaminathan 			rx_ring->lbq_use_head = j;
5794bafec742SSukumar Swaminathan 			rx_ring->lbq_use_tail = j;
5795bafec742SSukumar Swaminathan 			rx_ring->lbuf_in_use_count = 0;
5796bafec742SSukumar Swaminathan 			j = rx_ring->sbq_use_head;
5797bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5798bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list,"
5799bafec742SSukumar Swaminathan 			    " to free list %d\n total %d \n",
5800bafec742SSukumar Swaminathan 			    k, rx_ring->sbuf_in_use_count,
5801bafec742SSukumar Swaminathan 			    rx_ring->sbuf_free_count,
5802bafec742SSukumar Swaminathan 			    rx_ring->sbuf_in_use_count +
5803bafec742SSukumar Swaminathan 			    rx_ring->sbuf_free_count);
5804bafec742SSukumar Swaminathan #endif
5805bafec742SSukumar Swaminathan 			for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
5806bafec742SSukumar Swaminathan 				sbq_desc = rx_ring->sbuf_in_use[j];
5807bafec742SSukumar Swaminathan 				j++;
5808bafec742SSukumar Swaminathan 				if (j >= rx_ring->sbq_len) {
5809bafec742SSukumar Swaminathan 					j = 0;
5810bafec742SSukumar Swaminathan 				}
5811bafec742SSukumar Swaminathan 				if (sbq_desc->mp) {
5812bafec742SSukumar Swaminathan 					atomic_inc_32(&rx_ring->rx_indicate);
5813bafec742SSukumar Swaminathan 					freemsg(sbq_desc->mp);
5814bafec742SSukumar Swaminathan 				}
5815bafec742SSukumar Swaminathan 			}
5816bafec742SSukumar Swaminathan 			rx_ring->sbq_use_head = j;
5817bafec742SSukumar Swaminathan 			rx_ring->sbq_use_tail = j;
5818bafec742SSukumar Swaminathan 			rx_ring->sbuf_in_use_count = 0;
5819bafec742SSukumar Swaminathan 		}
5820bafec742SSukumar Swaminathan 	}
5821bafec742SSukumar Swaminathan 
5822bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_STOPPED;
5823bafec742SSukumar Swaminathan 
5824bafec742SSukumar Swaminathan 	return (rc);
5825bafec742SSukumar Swaminathan }
5826bafec742SSukumar Swaminathan 
5827bafec742SSukumar Swaminathan /*
5828bafec742SSukumar Swaminathan  * Support
5829bafec742SSukumar Swaminathan  */
5830bafec742SSukumar Swaminathan 
5831bafec742SSukumar Swaminathan void
5832bafec742SSukumar Swaminathan ql_disable_isr(qlge_t *qlge)
5833bafec742SSukumar Swaminathan {
5834bafec742SSukumar Swaminathan 	/*
5835bafec742SSukumar Swaminathan 	 * disable the hardware interrupt
5836bafec742SSukumar Swaminathan 	 */
5837bafec742SSukumar Swaminathan 	ISP_DISABLE_GLOBAL_INTRS(qlge);
5838bafec742SSukumar Swaminathan 
5839bafec742SSukumar Swaminathan 	qlge->flags &= ~INTERRUPTS_ENABLED;
5840bafec742SSukumar Swaminathan }
5841bafec742SSukumar Swaminathan 
5842bafec742SSukumar Swaminathan 
5843bafec742SSukumar Swaminathan 
5844bafec742SSukumar Swaminathan /*
5845bafec742SSukumar Swaminathan  * busy wait for 'usecs' microseconds.
5846bafec742SSukumar Swaminathan  */
5847bafec742SSukumar Swaminathan void
5848bafec742SSukumar Swaminathan qlge_delay(clock_t usecs)
5849bafec742SSukumar Swaminathan {
5850bafec742SSukumar Swaminathan 	drv_usecwait(usecs);
5851bafec742SSukumar Swaminathan }
5852bafec742SSukumar Swaminathan 
5853bafec742SSukumar Swaminathan /*
5854bafec742SSukumar Swaminathan  * retrieve firmware details.
5855bafec742SSukumar Swaminathan  */
5856bafec742SSukumar Swaminathan 
5857bafec742SSukumar Swaminathan pci_cfg_t *
5858bafec742SSukumar Swaminathan ql_get_pci_config(qlge_t *qlge)
5859bafec742SSukumar Swaminathan {
5860bafec742SSukumar Swaminathan 	return (&(qlge->pci_cfg));
5861bafec742SSukumar Swaminathan }
5862bafec742SSukumar Swaminathan 
5863bafec742SSukumar Swaminathan /*
5864bafec742SSukumar Swaminathan  * Get current Link status
5865bafec742SSukumar Swaminathan  */
5866bafec742SSukumar Swaminathan static uint32_t
5867bafec742SSukumar Swaminathan ql_get_link_state(qlge_t *qlge)
5868bafec742SSukumar Swaminathan {
5869bafec742SSukumar Swaminathan 	uint32_t bitToCheck = 0;
5870bafec742SSukumar Swaminathan 	uint32_t temp, linkState;
5871bafec742SSukumar Swaminathan 
5872bafec742SSukumar Swaminathan 	if (qlge->func_number == qlge->fn0_net) {
5873bafec742SSukumar Swaminathan 		bitToCheck = STS_PL0;
5874bafec742SSukumar Swaminathan 	} else {
5875bafec742SSukumar Swaminathan 		bitToCheck = STS_PL1;
5876bafec742SSukumar Swaminathan 	}
5877bafec742SSukumar Swaminathan 	temp = ql_read_reg(qlge, REG_STATUS);
5878bafec742SSukumar Swaminathan 	QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n",
5879bafec742SSukumar Swaminathan 	    __func__, qlge->instance, temp));
5880bafec742SSukumar Swaminathan 
5881bafec742SSukumar Swaminathan 	if (temp & bitToCheck) {
5882bafec742SSukumar Swaminathan 		linkState = LS_UP;
5883bafec742SSukumar Swaminathan 	} else {
5884bafec742SSukumar Swaminathan 		linkState = LS_DOWN;
5885bafec742SSukumar Swaminathan 	}
5886bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
5887bafec742SSukumar Swaminathan 		/* for Schultz, link Speed is fixed to 10G, full duplex */
5888bafec742SSukumar Swaminathan 		qlge->speed  = SPEED_10G;
5889bafec742SSukumar Swaminathan 		qlge->duplex = 1;
5890bafec742SSukumar Swaminathan 	}
5891bafec742SSukumar Swaminathan 	return (linkState);
5892bafec742SSukumar Swaminathan }
5893bafec742SSukumar Swaminathan /*
5894bafec742SSukumar Swaminathan  * Get current link status and report to OS
5895bafec742SSukumar Swaminathan  */
5896bafec742SSukumar Swaminathan static void
5897bafec742SSukumar Swaminathan ql_get_and_report_link_state(qlge_t *qlge)
5898bafec742SSukumar Swaminathan {
5899bafec742SSukumar Swaminathan 	uint32_t cur_link_state;
5900bafec742SSukumar Swaminathan 
5901bafec742SSukumar Swaminathan 	/* Get current link state */
5902bafec742SSukumar Swaminathan 	cur_link_state = ql_get_link_state(qlge);
5903bafec742SSukumar Swaminathan 	/* if link state has changed */
5904bafec742SSukumar Swaminathan 	if (cur_link_state != qlge->port_link_state) {
5905bafec742SSukumar Swaminathan 
5906bafec742SSukumar Swaminathan 		qlge->port_link_state = cur_link_state;
5907bafec742SSukumar Swaminathan 
5908bafec742SSukumar Swaminathan 		if (qlge->port_link_state == LS_UP) {
5909bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5910bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
5911bafec742SSukumar Swaminathan 			/* If driver detects a carrier on */
5912bafec742SSukumar Swaminathan 			CARRIER_ON(qlge);
5913bafec742SSukumar Swaminathan 		} else {
5914bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5915bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
5916bafec742SSukumar Swaminathan 			/* If driver detects a lack of carrier */
5917bafec742SSukumar Swaminathan 			CARRIER_OFF(qlge);
5918bafec742SSukumar Swaminathan 		}
5919bafec742SSukumar Swaminathan 	}
5920bafec742SSukumar Swaminathan }
5921bafec742SSukumar Swaminathan 
5922bafec742SSukumar Swaminathan /*
5923bafec742SSukumar Swaminathan  * timer callback function executed after timer expires
5924bafec742SSukumar Swaminathan  */
5925bafec742SSukumar Swaminathan static void
5926bafec742SSukumar Swaminathan ql_timer(void* arg)
5927bafec742SSukumar Swaminathan {
5928bafec742SSukumar Swaminathan 	ql_get_and_report_link_state((qlge_t *)arg);
5929bafec742SSukumar Swaminathan }
5930bafec742SSukumar Swaminathan 
5931bafec742SSukumar Swaminathan /*
5932bafec742SSukumar Swaminathan  * stop the running timer if activated
5933bafec742SSukumar Swaminathan  */
5934bafec742SSukumar Swaminathan static void
5935bafec742SSukumar Swaminathan ql_stop_timer(qlge_t *qlge)
5936bafec742SSukumar Swaminathan {
5937bafec742SSukumar Swaminathan 	timeout_id_t timer_id;
5938bafec742SSukumar Swaminathan 	/* Disable driver timer */
5939bafec742SSukumar Swaminathan 	if (qlge->ql_timer_timeout_id != NULL) {
5940bafec742SSukumar Swaminathan 		timer_id = qlge->ql_timer_timeout_id;
5941bafec742SSukumar Swaminathan 		qlge->ql_timer_timeout_id = NULL;
5942bafec742SSukumar Swaminathan 		(void) untimeout(timer_id);
5943bafec742SSukumar Swaminathan 	}
5944bafec742SSukumar Swaminathan }
5945bafec742SSukumar Swaminathan 
5946bafec742SSukumar Swaminathan /*
5947bafec742SSukumar Swaminathan  * stop then restart timer
5948bafec742SSukumar Swaminathan  */
5949bafec742SSukumar Swaminathan void
5950bafec742SSukumar Swaminathan ql_restart_timer(qlge_t *qlge)
5951bafec742SSukumar Swaminathan {
5952bafec742SSukumar Swaminathan 	ql_stop_timer(qlge);
5953bafec742SSukumar Swaminathan 	qlge->ql_timer_ticks = TICKS_PER_SEC / 4;
5954bafec742SSukumar Swaminathan 	qlge->ql_timer_timeout_id = timeout(ql_timer,
5955bafec742SSukumar Swaminathan 	    (void *)qlge, qlge->ql_timer_ticks);
5956bafec742SSukumar Swaminathan }
5957bafec742SSukumar Swaminathan 
5958bafec742SSukumar Swaminathan /* ************************************************************************* */
5959bafec742SSukumar Swaminathan /*
5960bafec742SSukumar Swaminathan  *		Hardware K-Stats Data Structures and Subroutines
5961bafec742SSukumar Swaminathan  */
5962bafec742SSukumar Swaminathan /* ************************************************************************* */
5963bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_hw[] = {
5964bafec742SSukumar Swaminathan 	/* PCI related hardware information */
5965bafec742SSukumar Swaminathan 	{ 0, "Vendor Id"			},
5966bafec742SSukumar Swaminathan 	{ 1, "Device Id"			},
5967bafec742SSukumar Swaminathan 	{ 2, "Command"				},
5968bafec742SSukumar Swaminathan 	{ 3, "Status"				},
5969bafec742SSukumar Swaminathan 	{ 4, "Revision Id"			},
5970bafec742SSukumar Swaminathan 	{ 5, "Cache Line Size"			},
5971bafec742SSukumar Swaminathan 	{ 6, "Latency Timer"			},
5972bafec742SSukumar Swaminathan 	{ 7, "Header Type"			},
5973bafec742SSukumar Swaminathan 	{ 9, "I/O base addr"			},
5974bafec742SSukumar Swaminathan 	{ 10, "Control Reg Base addr low"	},
5975bafec742SSukumar Swaminathan 	{ 11, "Control Reg Base addr high"	},
5976bafec742SSukumar Swaminathan 	{ 12, "Doorbell Reg Base addr low"	},
5977bafec742SSukumar Swaminathan 	{ 13, "Doorbell Reg Base addr high"	},
5978bafec742SSukumar Swaminathan 	{ 14, "Subsystem Vendor Id"		},
5979bafec742SSukumar Swaminathan 	{ 15, "Subsystem Device ID"		},
5980bafec742SSukumar Swaminathan 	{ 16, "PCIe Device Control"		},
5981bafec742SSukumar Swaminathan 	{ 17, "PCIe Link Status"		},
5982bafec742SSukumar Swaminathan 
5983bafec742SSukumar Swaminathan 	{ -1,	NULL				},
5984bafec742SSukumar Swaminathan };
5985bafec742SSukumar Swaminathan 
5986bafec742SSukumar Swaminathan /*
5987bafec742SSukumar Swaminathan  * kstat update function for PCI registers
5988bafec742SSukumar Swaminathan  */
5989bafec742SSukumar Swaminathan static int
5990bafec742SSukumar Swaminathan ql_kstats_get_pci_regs(kstat_t *ksp, int flag)
5991bafec742SSukumar Swaminathan {
5992bafec742SSukumar Swaminathan 	qlge_t *qlge;
5993bafec742SSukumar Swaminathan 	kstat_named_t *knp;
5994bafec742SSukumar Swaminathan 
5995bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
5996bafec742SSukumar Swaminathan 		return (EACCES);
5997bafec742SSukumar Swaminathan 
5998bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
5999bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
6000bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.vendor_id;
6001bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.device_id;
6002bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.command;
6003bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.status;
6004bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.revision;
6005bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.cache_line_size;
6006bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.latency_timer;
6007bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.header_type;
6008bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.io_base_address;
6009bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
6010bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower;
6011bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
6012bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper;
6013bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
6014bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_doorbell_mem_base_address_lower;
6015bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
6016bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_doorbell_mem_base_address_upper;
6017bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id;
6018bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.sub_device_id;
6019bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control;
6020bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.link_status;
6021bafec742SSukumar Swaminathan 
6022bafec742SSukumar Swaminathan 	return (0);
6023bafec742SSukumar Swaminathan }
6024bafec742SSukumar Swaminathan 
6025bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_mii[] = {
6026bafec742SSukumar Swaminathan 	/* MAC/MII related hardware information */
6027bafec742SSukumar Swaminathan 	{ 0, "mtu"},
6028bafec742SSukumar Swaminathan 
6029bafec742SSukumar Swaminathan 	{ -1, NULL},
6030bafec742SSukumar Swaminathan };
6031bafec742SSukumar Swaminathan 
6032bafec742SSukumar Swaminathan 
6033bafec742SSukumar Swaminathan /*
6034bafec742SSukumar Swaminathan  * kstat update function for MII related information.
6035bafec742SSukumar Swaminathan  */
6036bafec742SSukumar Swaminathan static int
6037bafec742SSukumar Swaminathan ql_kstats_mii_update(kstat_t *ksp, int flag)
6038bafec742SSukumar Swaminathan {
6039bafec742SSukumar Swaminathan 	qlge_t *qlge;
6040bafec742SSukumar Swaminathan 	kstat_named_t *knp;
6041bafec742SSukumar Swaminathan 
6042bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
6043bafec742SSukumar Swaminathan 		return (EACCES);
6044bafec742SSukumar Swaminathan 
6045bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
6046bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
6047bafec742SSukumar Swaminathan 
6048bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->mtu;
6049bafec742SSukumar Swaminathan 
6050bafec742SSukumar Swaminathan 	return (0);
6051bafec742SSukumar Swaminathan }
6052bafec742SSukumar Swaminathan 
6053bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_reg[] = {
6054bafec742SSukumar Swaminathan 	/* Register information */
6055bafec742SSukumar Swaminathan 	{ 0, "System (0x08)"			},
6056bafec742SSukumar Swaminathan 	{ 1, "Reset/Fail Over(0x0Ch"		},
6057bafec742SSukumar Swaminathan 	{ 2, "Function Specific Control(0x10)"	},
6058bafec742SSukumar Swaminathan 	{ 3, "Status (0x30)"			},
6059bafec742SSukumar Swaminathan 	{ 4, "Intr Enable (0x34)"		},
6060bafec742SSukumar Swaminathan 	{ 5, "Intr Status1 (0x3C)"		},
6061bafec742SSukumar Swaminathan 	{ 6, "Error Status (0x54)"		},
6062bafec742SSukumar Swaminathan 	{ 7, "XGMAC Flow Control(0x11C)"	},
6063bafec742SSukumar Swaminathan 	{ 8, "XGMAC Tx Pause Frames(0x230)"	},
6064bafec742SSukumar Swaminathan 	{ 9, "XGMAC Rx Pause Frames(0x388)"	},
6065bafec742SSukumar Swaminathan 	{ 10, "XGMAC Rx FIFO Drop Count(0x5B8)"	},
6066bafec742SSukumar Swaminathan 	{ 11, "interrupts actually allocated"	},
6067bafec742SSukumar Swaminathan 	{ 12, "interrupts on rx ring 0"		},
6068bafec742SSukumar Swaminathan 	{ 13, "interrupts on rx ring 1"		},
6069bafec742SSukumar Swaminathan 	{ 14, "interrupts on rx ring 2"		},
6070bafec742SSukumar Swaminathan 	{ 15, "interrupts on rx ring 3"		},
6071bafec742SSukumar Swaminathan 	{ 16, "interrupts on rx ring 4"		},
6072bafec742SSukumar Swaminathan 	{ 17, "interrupts on rx ring 5"		},
6073bafec742SSukumar Swaminathan 	{ 18, "interrupts on rx ring 6"		},
6074bafec742SSukumar Swaminathan 	{ 19, "interrupts on rx ring 7"		},
6075bafec742SSukumar Swaminathan 	{ 20, "polls on rx ring 0"		},
6076bafec742SSukumar Swaminathan 	{ 21, "polls on rx ring 1"		},
6077bafec742SSukumar Swaminathan 	{ 22, "polls on rx ring 2"		},
6078bafec742SSukumar Swaminathan 	{ 23, "polls on rx ring 3"		},
6079bafec742SSukumar Swaminathan 	{ 24, "polls on rx ring 4"		},
6080bafec742SSukumar Swaminathan 	{ 25, "polls on rx ring 5"		},
6081bafec742SSukumar Swaminathan 	{ 26, "polls on rx ring 6"		},
6082bafec742SSukumar Swaminathan 	{ 27, "polls on rx ring 7"		},
6083bafec742SSukumar Swaminathan 	{ 28, "tx no resource on ring 0"	},
6084bafec742SSukumar Swaminathan 	{ 29, "tx dma bind fail on ring 0"	},
6085bafec742SSukumar Swaminathan 	{ 30, "tx dma no handle on ring 0"	},
6086bafec742SSukumar Swaminathan 	{ 31, "tx dma no cookie on ring 0"	},
6087bafec742SSukumar Swaminathan 	{ 32, "MPI firmware major version"	},
6088bafec742SSukumar Swaminathan 	{ 33, "MPI firmware minor version"	},
6089bafec742SSukumar Swaminathan 	{ 34, "MPI firmware sub version"	},
6090accf27a5SSukumar Swaminathan 	{ 35, "rx no resource"			},
6091bafec742SSukumar Swaminathan 
6092bafec742SSukumar Swaminathan 	{ -1, NULL},
6093bafec742SSukumar Swaminathan };
6094bafec742SSukumar Swaminathan 
6095bafec742SSukumar Swaminathan 
6096bafec742SSukumar Swaminathan /*
6097bafec742SSukumar Swaminathan  * kstat update function for device register set
6098bafec742SSukumar Swaminathan  */
6099bafec742SSukumar Swaminathan static int
6100bafec742SSukumar Swaminathan ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag)
6101bafec742SSukumar Swaminathan {
6102bafec742SSukumar Swaminathan 	qlge_t *qlge;
6103bafec742SSukumar Swaminathan 	kstat_named_t *knp;
6104bafec742SSukumar Swaminathan 	uint32_t val32;
6105bafec742SSukumar Swaminathan 	int i = 0;
6106bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
6107accf27a5SSukumar Swaminathan 	struct rx_ring *rx_ring;
6108bafec742SSukumar Swaminathan 
6109bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
6110bafec742SSukumar Swaminathan 		return (EACCES);
6111bafec742SSukumar Swaminathan 
6112bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
6113bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
6114bafec742SSukumar Swaminathan 
6115bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM);
6116bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER);
6117bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL);
6118bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS);
6119bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
6120bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
6121bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS);
6122bafec742SSukumar Swaminathan 
6123bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) {
6124bafec742SSukumar Swaminathan 		return (0);
6125bafec742SSukumar Swaminathan 	}
61260662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32);
6127bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
6128bafec742SSukumar Swaminathan 
61290662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32);
6130bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
6131bafec742SSukumar Swaminathan 
61320662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32);
6133bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
6134bafec742SSukumar Swaminathan 
61350662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32);
6136bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
6137bafec742SSukumar Swaminathan 
6138bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
6139bafec742SSukumar Swaminathan 
6140bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->intr_cnt;
6141bafec742SSukumar Swaminathan 
6142bafec742SSukumar Swaminathan 	for (i = 0; i < 8; i++) {
6143bafec742SSukumar Swaminathan 		(knp++)->value.ui32 = qlge->rx_interrupts[i];
6144bafec742SSukumar Swaminathan 	}
6145bafec742SSukumar Swaminathan 
6146bafec742SSukumar Swaminathan 	for (i = 0; i < 8; i++) {
6147bafec742SSukumar Swaminathan 		(knp++)->value.ui32 = qlge->rx_polls[i];
6148bafec742SSukumar Swaminathan 	}
6149bafec742SSukumar Swaminathan 
6150bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[0];
6151bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->defer;
6152bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_fail_dma_bind;
6153bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_no_dma_handle;
6154bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_no_dma_cookie;
6155bafec742SSukumar Swaminathan 
6156bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.major_version;
6157bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.minor_version;
6158bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version;
6159bafec742SSukumar Swaminathan 
6160accf27a5SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6161accf27a5SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
6162accf27a5SSukumar Swaminathan 		val32 += rx_ring->rx_packets_dropped_no_buffer;
6163accf27a5SSukumar Swaminathan 	}
6164accf27a5SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
6165accf27a5SSukumar Swaminathan 
6166bafec742SSukumar Swaminathan 	return (0);
6167bafec742SSukumar Swaminathan }
6168bafec742SSukumar Swaminathan 
6169bafec742SSukumar Swaminathan 
6170bafec742SSukumar Swaminathan static kstat_t *
6171bafec742SSukumar Swaminathan ql_setup_named_kstat(qlge_t *qlge, int instance, char *name,
6172bafec742SSukumar Swaminathan     const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
6173bafec742SSukumar Swaminathan {
6174bafec742SSukumar Swaminathan 	kstat_t *ksp;
6175bafec742SSukumar Swaminathan 	kstat_named_t *knp;
6176bafec742SSukumar Swaminathan 	char *np;
6177bafec742SSukumar Swaminathan 	int type;
6178bafec742SSukumar Swaminathan 
6179bafec742SSukumar Swaminathan 	size /= sizeof (ql_ksindex_t);
6180bafec742SSukumar Swaminathan 	ksp = kstat_create(ADAPTER_NAME, instance, name, "net",
6181bafec742SSukumar Swaminathan 	    KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT);
6182bafec742SSukumar Swaminathan 	if (ksp == NULL)
6183bafec742SSukumar Swaminathan 		return (NULL);
6184bafec742SSukumar Swaminathan 
6185bafec742SSukumar Swaminathan 	ksp->ks_private = qlge;
6186bafec742SSukumar Swaminathan 	ksp->ks_update = update;
6187bafec742SSukumar Swaminathan 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
6188bafec742SSukumar Swaminathan 		switch (*np) {
6189bafec742SSukumar Swaminathan 		default:
6190bafec742SSukumar Swaminathan 			type = KSTAT_DATA_UINT32;
6191bafec742SSukumar Swaminathan 			break;
6192bafec742SSukumar Swaminathan 		case '&':
6193bafec742SSukumar Swaminathan 			np += 1;
6194bafec742SSukumar Swaminathan 			type = KSTAT_DATA_CHAR;
6195bafec742SSukumar Swaminathan 			break;
6196bafec742SSukumar Swaminathan 		}
6197bafec742SSukumar Swaminathan 		kstat_named_init(knp, np, (uint8_t)type);
6198bafec742SSukumar Swaminathan 	}
6199bafec742SSukumar Swaminathan 	kstat_install(ksp);
6200bafec742SSukumar Swaminathan 
6201bafec742SSukumar Swaminathan 	return (ksp);
6202bafec742SSukumar Swaminathan }
6203bafec742SSukumar Swaminathan 
6204bafec742SSukumar Swaminathan /*
6205bafec742SSukumar Swaminathan  * Setup various kstat
6206bafec742SSukumar Swaminathan  */
6207bafec742SSukumar Swaminathan int
6208bafec742SSukumar Swaminathan ql_init_kstats(qlge_t *qlge)
6209bafec742SSukumar Swaminathan {
6210bafec742SSukumar Swaminathan 	/* Hardware KStats */
6211bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge,
6212bafec742SSukumar Swaminathan 	    qlge->instance, "chip", ql_kstats_hw,
6213bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
6214bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) {
6215bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
6216bafec742SSukumar Swaminathan 	}
6217bafec742SSukumar Swaminathan 
6218bafec742SSukumar Swaminathan 	/* MII KStats */
6219bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge,
6220bafec742SSukumar Swaminathan 	    qlge->instance, "mii", ql_kstats_mii,
6221bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_mii), ql_kstats_mii_update);
6222bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) {
6223bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
6224bafec742SSukumar Swaminathan 	}
6225bafec742SSukumar Swaminathan 
6226bafec742SSukumar Swaminathan 	/* REG KStats */
6227bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge,
6228bafec742SSukumar Swaminathan 	    qlge->instance, "reg", ql_kstats_reg,
6229bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
6230bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) {
6231bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
6232bafec742SSukumar Swaminathan 	}
6233bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
6234bafec742SSukumar Swaminathan }
6235bafec742SSukumar Swaminathan 
6236bafec742SSukumar Swaminathan /*
6237bafec742SSukumar Swaminathan  * delete all kstat
6238bafec742SSukumar Swaminathan  */
6239bafec742SSukumar Swaminathan void
6240bafec742SSukumar Swaminathan ql_fini_kstats(qlge_t *qlge)
6241bafec742SSukumar Swaminathan {
6242bafec742SSukumar Swaminathan 	int i;
6243bafec742SSukumar Swaminathan 
6244bafec742SSukumar Swaminathan 	for (i = 0; i < QL_KSTAT_COUNT; i++) {
6245bafec742SSukumar Swaminathan 		if (qlge->ql_kstats[i] != NULL)
6246bafec742SSukumar Swaminathan 			kstat_delete(qlge->ql_kstats[i]);
6247bafec742SSukumar Swaminathan 	}
6248bafec742SSukumar Swaminathan }
6249bafec742SSukumar Swaminathan 
6250bafec742SSukumar Swaminathan /* ************************************************************************* */
6251bafec742SSukumar Swaminathan /*
6252bafec742SSukumar Swaminathan  *                                 kstat end
6253bafec742SSukumar Swaminathan  */
6254bafec742SSukumar Swaminathan /* ************************************************************************* */
6255bafec742SSukumar Swaminathan 
6256bafec742SSukumar Swaminathan /*
6257bafec742SSukumar Swaminathan  * Setup the parameters for receive and transmit rings including buffer sizes
6258bafec742SSukumar Swaminathan  * and completion queue sizes
6259bafec742SSukumar Swaminathan  */
6260bafec742SSukumar Swaminathan static int
6261bafec742SSukumar Swaminathan ql_setup_rings(qlge_t *qlge)
6262bafec742SSukumar Swaminathan {
6263bafec742SSukumar Swaminathan 	uint8_t i;
6264bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
6265bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
6266bafec742SSukumar Swaminathan 	uint16_t lbq_buf_size;
6267bafec742SSukumar Swaminathan 
6268bafec742SSukumar Swaminathan 	lbq_buf_size = (uint16_t)
6269accf27a5SSukumar Swaminathan 	    ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
6270bafec742SSukumar Swaminathan 
6271bafec742SSukumar Swaminathan 	/*
6272bafec742SSukumar Swaminathan 	 * rx_ring[0] is always the default queue.
6273bafec742SSukumar Swaminathan 	 */
6274bafec742SSukumar Swaminathan 	/*
6275bafec742SSukumar Swaminathan 	 * qlge->rx_ring_count:
6276bafec742SSukumar Swaminathan 	 * Total number of rx_rings. This includes a number
6277bafec742SSukumar Swaminathan 	 * of outbound completion handler rx_rings, and a
6278bafec742SSukumar Swaminathan 	 * number of inbound completion handler rx_rings.
6279bafec742SSukumar Swaminathan 	 * rss is only enabled if we have more than 1 rx completion
6280bafec742SSukumar Swaminathan 	 * queue. If we have a single rx completion queue
6281bafec742SSukumar Swaminathan 	 * then all rx completions go to this queue and
6282bafec742SSukumar Swaminathan 	 * the last completion queue
6283bafec742SSukumar Swaminathan 	 */
6284bafec742SSukumar Swaminathan 
6285bafec742SSukumar Swaminathan 	qlge->tx_ring_first_cq_id = qlge->rss_ring_count;
6286bafec742SSukumar Swaminathan 
6287bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
6288bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
6289bafec742SSukumar Swaminathan 		bzero((void *)tx_ring, sizeof (*tx_ring));
6290bafec742SSukumar Swaminathan 		tx_ring->qlge = qlge;
6291bafec742SSukumar Swaminathan 		tx_ring->wq_id = i;
6292bafec742SSukumar Swaminathan 		tx_ring->wq_len = qlge->tx_ring_size;
6293bafec742SSukumar Swaminathan 		tx_ring->wq_size = (uint32_t)(
6294bafec742SSukumar Swaminathan 		    tx_ring->wq_len * sizeof (struct ob_mac_iocb_req));
6295bafec742SSukumar Swaminathan 
6296bafec742SSukumar Swaminathan 		/*
6297bafec742SSukumar Swaminathan 		 * The completion queue ID for the tx rings start
6298bafec742SSukumar Swaminathan 		 * immediately after the last rss completion queue.
6299bafec742SSukumar Swaminathan 		 */
6300bafec742SSukumar Swaminathan 		tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id);
6301bafec742SSukumar Swaminathan 	}
6302bafec742SSukumar Swaminathan 
6303bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6304bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
6305bafec742SSukumar Swaminathan 		bzero((void *)rx_ring, sizeof (*rx_ring));
6306bafec742SSukumar Swaminathan 		rx_ring->qlge = qlge;
6307bafec742SSukumar Swaminathan 		rx_ring->cq_id = i;
6308bafec742SSukumar Swaminathan 		if (i != 0)
6309bafec742SSukumar Swaminathan 			rx_ring->cpu = (i) % qlge->rx_ring_count;
6310bafec742SSukumar Swaminathan 		else
6311bafec742SSukumar Swaminathan 			rx_ring->cpu = 0;
6312bafec742SSukumar Swaminathan 
6313bafec742SSukumar Swaminathan 		if (i < qlge->rss_ring_count) {
6314bafec742SSukumar Swaminathan 			/*
6315bafec742SSukumar Swaminathan 			 * Inbound completions (RSS) queues
6316bafec742SSukumar Swaminathan 			 * Default queue is queue 0 which handles
6317bafec742SSukumar Swaminathan 			 * unicast plus bcast/mcast and async events.
6318bafec742SSukumar Swaminathan 			 * Other inbound queues handle unicast frames only.
6319bafec742SSukumar Swaminathan 			 */
6320bafec742SSukumar Swaminathan 			rx_ring->cq_len = qlge->rx_ring_size;
6321bafec742SSukumar Swaminathan 			rx_ring->cq_size = (uint32_t)
6322bafec742SSukumar Swaminathan 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6323bafec742SSukumar Swaminathan 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
6324bafec742SSukumar Swaminathan 			rx_ring->lbq_size = (uint32_t)
6325bafec742SSukumar Swaminathan 			    (rx_ring->lbq_len * sizeof (uint64_t));
6326bafec742SSukumar Swaminathan 			rx_ring->lbq_buf_size = lbq_buf_size;
6327bafec742SSukumar Swaminathan 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
6328bafec742SSukumar Swaminathan 			rx_ring->sbq_size = (uint32_t)
6329bafec742SSukumar Swaminathan 			    (rx_ring->sbq_len * sizeof (uint64_t));
6330bafec742SSukumar Swaminathan 			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
6331bafec742SSukumar Swaminathan 			rx_ring->type = RX_Q;
6332bafec742SSukumar Swaminathan 
6333bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD,
6334bafec742SSukumar Swaminathan 			    ("%s(%d)Allocating rss completion queue %d "
6335bafec742SSukumar Swaminathan 			    "on cpu %d\n", __func__, qlge->instance,
6336bafec742SSukumar Swaminathan 			    rx_ring->cq_id, rx_ring->cpu));
6337bafec742SSukumar Swaminathan 		} else {
6338bafec742SSukumar Swaminathan 			/*
6339bafec742SSukumar Swaminathan 			 * Outbound queue handles outbound completions only
6340bafec742SSukumar Swaminathan 			 */
6341bafec742SSukumar Swaminathan 			/* outbound cq is same size as tx_ring it services. */
6342accf27a5SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("rx_ring 0x%p i %d\n", rx_ring, i));
6343bafec742SSukumar Swaminathan 			rx_ring->cq_len = qlge->tx_ring_size;
6344bafec742SSukumar Swaminathan 			rx_ring->cq_size = (uint32_t)
6345bafec742SSukumar Swaminathan 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6346bafec742SSukumar Swaminathan 			rx_ring->lbq_len = 0;
6347bafec742SSukumar Swaminathan 			rx_ring->lbq_size = 0;
6348bafec742SSukumar Swaminathan 			rx_ring->lbq_buf_size = 0;
6349bafec742SSukumar Swaminathan 			rx_ring->sbq_len = 0;
6350bafec742SSukumar Swaminathan 			rx_ring->sbq_size = 0;
6351bafec742SSukumar Swaminathan 			rx_ring->sbq_buf_size = 0;
6352bafec742SSukumar Swaminathan 			rx_ring->type = TX_Q;
6353bafec742SSukumar Swaminathan 
6354bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD,
6355bafec742SSukumar Swaminathan 			    ("%s(%d)Allocating TX completion queue %d on"
6356bafec742SSukumar Swaminathan 			    " cpu %d\n", __func__, qlge->instance,
6357bafec742SSukumar Swaminathan 			    rx_ring->cq_id, rx_ring->cpu));
6358bafec742SSukumar Swaminathan 		}
6359bafec742SSukumar Swaminathan 	}
6360bafec742SSukumar Swaminathan 
6361bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
6362bafec742SSukumar Swaminathan }
6363bafec742SSukumar Swaminathan 
6364bafec742SSukumar Swaminathan static int
6365bafec742SSukumar Swaminathan ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring)
6366bafec742SSukumar Swaminathan {
6367bafec742SSukumar Swaminathan 	struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr;
6368bafec742SSukumar Swaminathan 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6369bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6370bafec742SSukumar Swaminathan 	/* first shadow area is used by wqicb's host copy of consumer index */
6371bafec742SSukumar Swaminathan 	    + sizeof (uint64_t);
6372bafec742SSukumar Swaminathan 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6373bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6374bafec742SSukumar Swaminathan 	    + sizeof (uint64_t);
6375bafec742SSukumar Swaminathan 	/* lrg/sml bufq pointers */
6376bafec742SSukumar Swaminathan 	uint8_t *buf_q_base_reg =
6377bafec742SSukumar Swaminathan 	    (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr +
6378bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6379bafec742SSukumar Swaminathan 	uint64_t buf_q_base_reg_dma =
6380bafec742SSukumar Swaminathan 	    qlge->buf_q_ptr_base_addr_dma_attr.dma_addr +
6381bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6382bafec742SSukumar Swaminathan 	caddr_t doorbell_area =
6383bafec742SSukumar Swaminathan 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id));
6384bafec742SSukumar Swaminathan 	int err = 0;
6385bafec742SSukumar Swaminathan 	uint16_t bq_len;
6386bafec742SSukumar Swaminathan 	uint64_t tmp;
6387bafec742SSukumar Swaminathan 	uint64_t *base_indirect_ptr;
6388bafec742SSukumar Swaminathan 	int page_entries;
6389bafec742SSukumar Swaminathan 
6390bafec742SSukumar Swaminathan 	/* Set up the shadow registers for this ring. */
6391bafec742SSukumar Swaminathan 	rx_ring->prod_idx_sh_reg = shadow_reg;
6392bafec742SSukumar Swaminathan 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
6393accf27a5SSukumar Swaminathan 	rx_ring->prod_idx_sh_reg_offset = (off_t)(((rx_ring->cq_id *
6394accf27a5SSukumar Swaminathan 	    sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE) + sizeof (uint64_t)));
6395bafec742SSukumar Swaminathan 
6396bafec742SSukumar Swaminathan 	rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6397bafec742SSukumar Swaminathan 	rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma;
6398bafec742SSukumar Swaminathan 
6399bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx,"
6400bafec742SSukumar Swaminathan 	    " phys_addr 0x%lx\n", __func__, rx_ring->cq_id,
6401bafec742SSukumar Swaminathan 	    rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma));
6402bafec742SSukumar Swaminathan 
6403bafec742SSukumar Swaminathan 	buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6404bafec742SSukumar Swaminathan 	buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6405bafec742SSukumar Swaminathan 	rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6406bafec742SSukumar Swaminathan 	rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma;
6407bafec742SSukumar Swaminathan 
6408bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x00 for consumer index register */
6409bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6410bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx = 0;
6411bafec742SSukumar Swaminathan 	*rx_ring->prod_idx_sh_reg = 0;
6412bafec742SSukumar Swaminathan 	rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
6413bafec742SSukumar Swaminathan 
6414bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x04 for valid register */
6415bafec742SSukumar Swaminathan 	rx_ring->valid_db_reg = (uint32_t *)(void *)
6416bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x04);
6417bafec742SSukumar Swaminathan 
6418bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
6419bafec742SSukumar Swaminathan 	rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *)
6420bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x18);
6421bafec742SSukumar Swaminathan 
6422bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x1c */
6423bafec742SSukumar Swaminathan 	rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *)
6424bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x1c);
6425bafec742SSukumar Swaminathan 
6426bafec742SSukumar Swaminathan 	bzero((void *)cqicb, sizeof (*cqicb));
6427bafec742SSukumar Swaminathan 
6428bafec742SSukumar Swaminathan 	cqicb->msix_vect = (uint8_t)rx_ring->irq;
6429bafec742SSukumar Swaminathan 
6430bafec742SSukumar Swaminathan 	bq_len = (uint16_t)((rx_ring->cq_len == 65536) ?
6431bafec742SSukumar Swaminathan 	    (uint16_t)0 : (uint16_t)rx_ring->cq_len);
6432bafec742SSukumar Swaminathan 	cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
6433bafec742SSukumar Swaminathan 
6434bafec742SSukumar Swaminathan 	cqicb->cq_base_addr_lo =
6435bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr));
6436bafec742SSukumar Swaminathan 	cqicb->cq_base_addr_hi =
6437bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr));
6438bafec742SSukumar Swaminathan 
6439bafec742SSukumar Swaminathan 	cqicb->prod_idx_addr_lo =
6440bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6441bafec742SSukumar Swaminathan 	cqicb->prod_idx_addr_hi =
6442bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6443bafec742SSukumar Swaminathan 
6444bafec742SSukumar Swaminathan 	/*
6445bafec742SSukumar Swaminathan 	 * Set up the control block load flags.
6446bafec742SSukumar Swaminathan 	 */
6447bafec742SSukumar Swaminathan 	cqicb->flags = FLAGS_LC | /* Load queue base address */
6448bafec742SSukumar Swaminathan 	    FLAGS_LV | /* Load MSI-X vector */
6449bafec742SSukumar Swaminathan 	    FLAGS_LI;  /* Load irq delay values */
6450bafec742SSukumar Swaminathan 	if (rx_ring->lbq_len) {
6451bafec742SSukumar Swaminathan 		/* Load lbq values */
6452bafec742SSukumar Swaminathan 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL);
6453bafec742SSukumar Swaminathan 		tmp = (uint64_t)rx_ring->lbq_dma.dma_addr;
6454bafec742SSukumar Swaminathan 		base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect;
6455bafec742SSukumar Swaminathan 		page_entries = 0;
6456bafec742SSukumar Swaminathan 		do {
6457bafec742SSukumar Swaminathan 			*base_indirect_ptr = cpu_to_le64(tmp);
6458bafec742SSukumar Swaminathan 			tmp += VM_PAGE_SIZE;
6459bafec742SSukumar Swaminathan 			base_indirect_ptr++;
6460bafec742SSukumar Swaminathan 			page_entries++;
6461bafec742SSukumar Swaminathan 		} while (page_entries < (int)(
6462bafec742SSukumar Swaminathan 		    ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6463bafec742SSukumar Swaminathan 
6464bafec742SSukumar Swaminathan 		cqicb->lbq_addr_lo =
6465bafec742SSukumar Swaminathan 		    cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma));
6466bafec742SSukumar Swaminathan 		cqicb->lbq_addr_hi =
6467bafec742SSukumar Swaminathan 		    cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma));
6468bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ?
6469bafec742SSukumar Swaminathan 		    (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size);
6470bafec742SSukumar Swaminathan 		cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len);
6471bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 :
6472bafec742SSukumar Swaminathan 		    (uint16_t)rx_ring->lbq_len);
6473bafec742SSukumar Swaminathan 		cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len);
6474bafec742SSukumar Swaminathan 		rx_ring->lbq_prod_idx = 0;
6475bafec742SSukumar Swaminathan 		rx_ring->lbq_curr_idx = 0;
6476bafec742SSukumar Swaminathan 	}
6477bafec742SSukumar Swaminathan 	if (rx_ring->sbq_len) {
6478bafec742SSukumar Swaminathan 		/* Load sbq values */
6479bafec742SSukumar Swaminathan 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS);
6480bafec742SSukumar Swaminathan 		tmp = (uint64_t)rx_ring->sbq_dma.dma_addr;
6481bafec742SSukumar Swaminathan 		base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect;
6482bafec742SSukumar Swaminathan 		page_entries = 0;
6483bafec742SSukumar Swaminathan 
6484bafec742SSukumar Swaminathan 		do {
6485bafec742SSukumar Swaminathan 			*base_indirect_ptr = cpu_to_le64(tmp);
6486bafec742SSukumar Swaminathan 			tmp += VM_PAGE_SIZE;
6487bafec742SSukumar Swaminathan 			base_indirect_ptr++;
6488bafec742SSukumar Swaminathan 			page_entries++;
6489bafec742SSukumar Swaminathan 		} while (page_entries < (uint32_t)
6490bafec742SSukumar Swaminathan 		    (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6491bafec742SSukumar Swaminathan 
6492bafec742SSukumar Swaminathan 		cqicb->sbq_addr_lo =
6493bafec742SSukumar Swaminathan 		    cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma));
6494bafec742SSukumar Swaminathan 		cqicb->sbq_addr_hi =
6495bafec742SSukumar Swaminathan 		    cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma));
6496bafec742SSukumar Swaminathan 		cqicb->sbq_buf_size = (uint16_t)
6497bafec742SSukumar Swaminathan 		    cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2));
6498bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ?
6499bafec742SSukumar Swaminathan 		    (uint16_t)0 : (uint16_t)rx_ring->sbq_len);
6500bafec742SSukumar Swaminathan 		cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len);
6501bafec742SSukumar Swaminathan 		rx_ring->sbq_prod_idx = 0;
6502bafec742SSukumar Swaminathan 		rx_ring->sbq_curr_idx = 0;
6503bafec742SSukumar Swaminathan 	}
6504bafec742SSukumar Swaminathan 	switch (rx_ring->type) {
6505bafec742SSukumar Swaminathan 	case TX_Q:
6506bafec742SSukumar Swaminathan 		cqicb->irq_delay = (uint16_t)
6507bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->tx_coalesce_usecs);
6508bafec742SSukumar Swaminathan 		cqicb->pkt_delay = (uint16_t)
6509bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->tx_max_coalesced_frames);
6510bafec742SSukumar Swaminathan 		break;
6511bafec742SSukumar Swaminathan 
6512bafec742SSukumar Swaminathan 	case DEFAULT_Q:
6513accf27a5SSukumar Swaminathan 		cqicb->irq_delay = (uint16_t)
6514accf27a5SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_coalesce_usecs);
6515accf27a5SSukumar Swaminathan 		cqicb->pkt_delay = (uint16_t)
6516accf27a5SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
6517bafec742SSukumar Swaminathan 		break;
6518bafec742SSukumar Swaminathan 
6519bafec742SSukumar Swaminathan 	case RX_Q:
6520bafec742SSukumar Swaminathan 		/*
6521bafec742SSukumar Swaminathan 		 * Inbound completion handling rx_rings run in
6522bafec742SSukumar Swaminathan 		 * separate NAPI contexts.
6523bafec742SSukumar Swaminathan 		 */
6524bafec742SSukumar Swaminathan 		cqicb->irq_delay = (uint16_t)
6525bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_coalesce_usecs);
6526bafec742SSukumar Swaminathan 		cqicb->pkt_delay = (uint16_t)
6527bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
6528bafec742SSukumar Swaminathan 		break;
6529bafec742SSukumar Swaminathan 	default:
6530bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Invalid rx_ring->type = %d.",
6531bafec742SSukumar Swaminathan 		    rx_ring->type);
6532bafec742SSukumar Swaminathan 	}
6533bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n",
6534bafec742SSukumar Swaminathan 	    rx_ring->cq_id));
6535bafec742SSukumar Swaminathan 	/* QL_DUMP_CQICB(qlge, cqicb); */
6536bafec742SSukumar Swaminathan 	err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr,
6537bafec742SSukumar Swaminathan 	    rx_ring->cq_id);
6538bafec742SSukumar Swaminathan 	if (err) {
6539bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load CQICB.");
6540bafec742SSukumar Swaminathan 		return (err);
6541bafec742SSukumar Swaminathan 	}
6542bafec742SSukumar Swaminathan 
6543bafec742SSukumar Swaminathan 	rx_ring->rx_packets_dropped_no_buffer = 0;
6544bafec742SSukumar Swaminathan 	rx_ring->rx_pkt_dropped_mac_unenabled = 0;
6545bafec742SSukumar Swaminathan 	rx_ring->rx_failed_sbq_allocs = 0;
6546bafec742SSukumar Swaminathan 	rx_ring->rx_failed_lbq_allocs = 0;
6547bafec742SSukumar Swaminathan 	rx_ring->rx_packets = 0;
6548bafec742SSukumar Swaminathan 	rx_ring->rx_bytes = 0;
6549bafec742SSukumar Swaminathan 	rx_ring->frame_too_long = 0;
6550bafec742SSukumar Swaminathan 	rx_ring->frame_too_short = 0;
6551bafec742SSukumar Swaminathan 	rx_ring->fcs_err = 0;
6552bafec742SSukumar Swaminathan 
6553bafec742SSukumar Swaminathan 	return (err);
6554bafec742SSukumar Swaminathan }
6555bafec742SSukumar Swaminathan 
6556bafec742SSukumar Swaminathan /*
6557bafec742SSukumar Swaminathan  * start RSS
6558bafec742SSukumar Swaminathan  */
6559bafec742SSukumar Swaminathan static int
6560bafec742SSukumar Swaminathan ql_start_rss(qlge_t *qlge)
6561bafec742SSukumar Swaminathan {
6562bafec742SSukumar Swaminathan 	struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr;
6563bafec742SSukumar Swaminathan 	int status = 0;
6564bafec742SSukumar Swaminathan 	int i;
6565bafec742SSukumar Swaminathan 	uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id;
6566bafec742SSukumar Swaminathan 
6567bafec742SSukumar Swaminathan 	bzero((void *)ricb, sizeof (*ricb));
6568bafec742SSukumar Swaminathan 
6569bafec742SSukumar Swaminathan 	ricb->base_cq = RSS_L4K;
6570bafec742SSukumar Swaminathan 	ricb->flags =
6571bafec742SSukumar Swaminathan 	    (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
6572bafec742SSukumar Swaminathan 	    RSS_RT6);
6573bafec742SSukumar Swaminathan 	ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1);
6574bafec742SSukumar Swaminathan 
6575bafec742SSukumar Swaminathan 	/*
6576bafec742SSukumar Swaminathan 	 * Fill out the Indirection Table.
6577bafec742SSukumar Swaminathan 	 */
6578bafec742SSukumar Swaminathan 	for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
6579bafec742SSukumar Swaminathan 		hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1));
6580bafec742SSukumar Swaminathan 
6581bafec742SSukumar Swaminathan 	(void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40);
6582bafec742SSukumar Swaminathan 	(void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16);
6583bafec742SSukumar Swaminathan 
6584bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("Initializing RSS.\n"));
6585bafec742SSukumar Swaminathan 
6586bafec742SSukumar Swaminathan 	status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0);
6587bafec742SSukumar Swaminathan 	if (status) {
6588bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load RICB.");
6589bafec742SSukumar Swaminathan 		return (status);
6590bafec742SSukumar Swaminathan 	}
6591bafec742SSukumar Swaminathan 
6592bafec742SSukumar Swaminathan 	return (status);
6593bafec742SSukumar Swaminathan }
6594bafec742SSukumar Swaminathan 
6595bafec742SSukumar Swaminathan /*
6596bafec742SSukumar Swaminathan  * load a tx ring control block to hw and start this ring
6597bafec742SSukumar Swaminathan  */
6598bafec742SSukumar Swaminathan static int
6599bafec742SSukumar Swaminathan ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring)
6600bafec742SSukumar Swaminathan {
6601bafec742SSukumar Swaminathan 	struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr;
6602bafec742SSukumar Swaminathan 	caddr_t doorbell_area =
6603bafec742SSukumar Swaminathan 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id);
6604bafec742SSukumar Swaminathan 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6605bafec742SSukumar Swaminathan 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6606bafec742SSukumar Swaminathan 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6607bafec742SSukumar Swaminathan 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6608bafec742SSukumar Swaminathan 	int err = 0;
6609bafec742SSukumar Swaminathan 
6610bafec742SSukumar Swaminathan 	/*
6611bafec742SSukumar Swaminathan 	 * Assign doorbell registers for this tx_ring.
6612bafec742SSukumar Swaminathan 	 */
6613bafec742SSukumar Swaminathan 
6614bafec742SSukumar Swaminathan 	/* TX PCI doorbell mem area for tx producer index */
6615bafec742SSukumar Swaminathan 	tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6616bafec742SSukumar Swaminathan 	tx_ring->prod_idx = 0;
6617bafec742SSukumar Swaminathan 	/* TX PCI doorbell mem area + 0x04 */
6618bafec742SSukumar Swaminathan 	tx_ring->valid_db_reg = (uint32_t *)(void *)
6619bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x04);
6620bafec742SSukumar Swaminathan 
6621bafec742SSukumar Swaminathan 	/*
6622bafec742SSukumar Swaminathan 	 * Assign shadow registers for this tx_ring.
6623bafec742SSukumar Swaminathan 	 */
6624bafec742SSukumar Swaminathan 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
6625bafec742SSukumar Swaminathan 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
6626bafec742SSukumar Swaminathan 	*tx_ring->cnsmr_idx_sh_reg = 0;
6627bafec742SSukumar Swaminathan 
6628bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx,"
6629bafec742SSukumar Swaminathan 	    " phys_addr 0x%lx\n",
6630bafec742SSukumar Swaminathan 	    __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg,
6631bafec742SSukumar Swaminathan 	    tx_ring->cnsmr_idx_sh_reg_dma));
6632bafec742SSukumar Swaminathan 
6633bafec742SSukumar Swaminathan 	wqicb->len =
6634bafec742SSukumar Swaminathan 	    (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
6635bafec742SSukumar Swaminathan 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
6636bafec742SSukumar Swaminathan 	    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
6637bafec742SSukumar Swaminathan 	wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id);
6638bafec742SSukumar Swaminathan 	wqicb->rid = 0;
6639bafec742SSukumar Swaminathan 	wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr));
6640bafec742SSukumar Swaminathan 	wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr));
6641bafec742SSukumar Swaminathan 	wqicb->cnsmr_idx_addr_lo =
6642bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6643bafec742SSukumar Swaminathan 	wqicb->cnsmr_idx_addr_hi =
6644bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6645bafec742SSukumar Swaminathan 
6646bafec742SSukumar Swaminathan 	ql_init_tx_ring(tx_ring);
6647bafec742SSukumar Swaminathan 	/* QL_DUMP_WQICB(qlge, wqicb); */
6648bafec742SSukumar Swaminathan 	err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr,
6649bafec742SSukumar Swaminathan 	    tx_ring->wq_id);
6650bafec742SSukumar Swaminathan 
6651bafec742SSukumar Swaminathan 	if (err) {
6652bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load WQICB.");
6653bafec742SSukumar Swaminathan 		return (err);
6654bafec742SSukumar Swaminathan 	}
6655bafec742SSukumar Swaminathan 	return (err);
6656bafec742SSukumar Swaminathan }
6657bafec742SSukumar Swaminathan 
6658bafec742SSukumar Swaminathan /*
6659bafec742SSukumar Swaminathan  * Set up a MAC, multicast or VLAN address for the
6660bafec742SSukumar Swaminathan  * inbound frame matching.
6661bafec742SSukumar Swaminathan  */
6662bafec742SSukumar Swaminathan int
6663bafec742SSukumar Swaminathan ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type,
6664bafec742SSukumar Swaminathan     uint16_t index)
6665bafec742SSukumar Swaminathan {
6666bafec742SSukumar Swaminathan 	uint32_t offset = 0;
6667bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
6668bafec742SSukumar Swaminathan 
6669bafec742SSukumar Swaminathan 	switch (type) {
6670bafec742SSukumar Swaminathan 	case MAC_ADDR_TYPE_MULTI_MAC:
6671bafec742SSukumar Swaminathan 	case MAC_ADDR_TYPE_CAM_MAC: {
6672bafec742SSukumar Swaminathan 		uint32_t cam_output;
6673bafec742SSukumar Swaminathan 		uint32_t upper = (addr[0] << 8) | addr[1];
6674bafec742SSukumar Swaminathan 		uint32_t lower =
6675bafec742SSukumar Swaminathan 		    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
6676bafec742SSukumar Swaminathan 		    (addr[5]);
6677bafec742SSukumar Swaminathan 
6678bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Adding %s ", (type ==
6679bafec742SSukumar Swaminathan 		    MAC_ADDR_TYPE_MULTI_MAC) ?
6680bafec742SSukumar Swaminathan 		    "MULTICAST" : "UNICAST"));
6681bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT,
6682bafec742SSukumar Swaminathan 		    ("addr %02x %02x %02x %02x %02x %02x at index %d in "
6683bafec742SSukumar Swaminathan 		    "the CAM.\n",
6684bafec742SSukumar Swaminathan 		    addr[0], addr[1], addr[2], addr[3], addr[4],
6685bafec742SSukumar Swaminathan 		    addr[5], index));
6686bafec742SSukumar Swaminathan 
6687bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6688bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6689bafec742SSukumar Swaminathan 		if (status)
6690bafec742SSukumar Swaminathan 			goto exit;
6691bafec742SSukumar Swaminathan 		/* offset 0 - lower 32 bits of the MAC address */
6692bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6693bafec742SSukumar Swaminathan 		    (offset++) |
6694bafec742SSukumar Swaminathan 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6695bafec742SSukumar Swaminathan 		    type);	/* type */
6696bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower);
6697bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6698bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6699bafec742SSukumar Swaminathan 		if (status)
6700bafec742SSukumar Swaminathan 			goto exit;
6701bafec742SSukumar Swaminathan 		/* offset 1 - upper 16 bits of the MAC address */
6702bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6703bafec742SSukumar Swaminathan 		    (offset++) |
6704bafec742SSukumar Swaminathan 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6705bafec742SSukumar Swaminathan 		    type);	/* type */
6706bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper);
6707bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6708bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6709bafec742SSukumar Swaminathan 		if (status)
6710bafec742SSukumar Swaminathan 			goto exit;
6711bafec742SSukumar Swaminathan 		/* offset 2 - CQ ID associated with this MAC address */
6712bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6713bafec742SSukumar Swaminathan 		    (offset) | (index << MAC_ADDR_IDX_SHIFT) |	/* index */
6714bafec742SSukumar Swaminathan 		    type);	/* type */
6715bafec742SSukumar Swaminathan 		/*
6716bafec742SSukumar Swaminathan 		 * This field should also include the queue id
6717bafec742SSukumar Swaminathan 		 * and possibly the function id.  Right now we hardcode
6718bafec742SSukumar Swaminathan 		 * the route field to NIC core.
6719bafec742SSukumar Swaminathan 		 */
6720bafec742SSukumar Swaminathan 		if (type == MAC_ADDR_TYPE_CAM_MAC) {
6721bafec742SSukumar Swaminathan 			cam_output = (CAM_OUT_ROUTE_NIC |
6722bafec742SSukumar Swaminathan 			    (qlge->func_number << CAM_OUT_FUNC_SHIFT) |
6723bafec742SSukumar Swaminathan 			    (0 <<
6724bafec742SSukumar Swaminathan 			    CAM_OUT_CQ_ID_SHIFT));
6725bafec742SSukumar Swaminathan 
6726bafec742SSukumar Swaminathan 			/* route to NIC core */
6727bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA,
6728bafec742SSukumar Swaminathan 			    cam_output);
6729bafec742SSukumar Swaminathan 			}
6730bafec742SSukumar Swaminathan 		break;
6731bafec742SSukumar Swaminathan 		}
6732bafec742SSukumar Swaminathan 	default:
6733bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6734bafec742SSukumar Swaminathan 		    "Address type %d not yet supported.", type);
6735bafec742SSukumar Swaminathan 		status = DDI_FAILURE;
6736bafec742SSukumar Swaminathan 	}
6737bafec742SSukumar Swaminathan exit:
6738bafec742SSukumar Swaminathan 	return (status);
6739bafec742SSukumar Swaminathan }
6740bafec742SSukumar Swaminathan 
6741bafec742SSukumar Swaminathan /*
6742bafec742SSukumar Swaminathan  * The NIC function for this chip has 16 routing indexes.  Each one can be used
6743bafec742SSukumar Swaminathan  * to route different frame types to various inbound queues.  We send broadcast
6744bafec742SSukumar Swaminathan  * multicast/error frames to the default queue for slow handling,
6745bafec742SSukumar Swaminathan  * and CAM hit/RSS frames to the fast handling queues.
6746bafec742SSukumar Swaminathan  */
6747bafec742SSukumar Swaminathan static int
6748bafec742SSukumar Swaminathan ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable)
6749bafec742SSukumar Swaminathan {
6750bafec742SSukumar Swaminathan 	int status;
6751bafec742SSukumar Swaminathan 	uint32_t value = 0;
6752bafec742SSukumar Swaminathan 
6753bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT,
6754bafec742SSukumar Swaminathan 	    ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
6755bafec742SSukumar Swaminathan 	    (enable ? "Adding" : "Removing"),
6756bafec742SSukumar Swaminathan 	    ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
6757bafec742SSukumar Swaminathan 	    ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
6758bafec742SSukumar Swaminathan 	    ((index ==
6759bafec742SSukumar Swaminathan 	    RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
6760bafec742SSukumar Swaminathan 	    ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
6761bafec742SSukumar Swaminathan 	    ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
6762bafec742SSukumar Swaminathan 	    ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
6763bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
6764bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
6765bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
6766bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
6767bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
6768bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
6769bafec742SSukumar Swaminathan 	    ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
6770bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
6771bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
6772bafec742SSukumar Swaminathan 	    ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
6773bafec742SSukumar Swaminathan 	    (enable ? "to" : "from")));
6774bafec742SSukumar Swaminathan 
6775bafec742SSukumar Swaminathan 	switch (mask) {
6776bafec742SSukumar Swaminathan 	case RT_IDX_CAM_HIT:
6777bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q | /* dest */
6778bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ | /* type */
6779bafec742SSukumar Swaminathan 		    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */
6780bafec742SSukumar Swaminathan 		break;
6781bafec742SSukumar Swaminathan 
6782bafec742SSukumar Swaminathan 	case RT_IDX_VALID: /* Promiscuous Mode frames. */
6783bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6784bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6785bafec742SSukumar Swaminathan 		    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */
6786bafec742SSukumar Swaminathan 		break;
6787bafec742SSukumar Swaminathan 
6788bafec742SSukumar Swaminathan 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
6789bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6790bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6791bafec742SSukumar Swaminathan 		    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */
6792bafec742SSukumar Swaminathan 		break;
6793bafec742SSukumar Swaminathan 
6794bafec742SSukumar Swaminathan 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
6795bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6796bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6797bafec742SSukumar Swaminathan 		    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */
6798bafec742SSukumar Swaminathan 		break;
6799bafec742SSukumar Swaminathan 
6800bafec742SSukumar Swaminathan 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
6801bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q |	/* dest */
6802bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6803bafec742SSukumar Swaminathan 		    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */
6804bafec742SSukumar Swaminathan 		break;
6805bafec742SSukumar Swaminathan 
6806bafec742SSukumar Swaminathan 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
6807bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q |	/* dest */
6808bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6809bafec742SSukumar Swaminathan 		    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6810bafec742SSukumar Swaminathan 		break;
6811bafec742SSukumar Swaminathan 
6812bafec742SSukumar Swaminathan 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
6813bafec742SSukumar Swaminathan 		value = RT_IDX_DST_RSS |	/* dest */
6814bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6815bafec742SSukumar Swaminathan 		    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6816bafec742SSukumar Swaminathan 		break;
6817bafec742SSukumar Swaminathan 
6818bafec742SSukumar Swaminathan 	case 0:	/* Clear the E-bit on an entry. */
6819bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6820bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6821bafec742SSukumar Swaminathan 		    (index << RT_IDX_IDX_SHIFT); /* index */
6822bafec742SSukumar Swaminathan 		break;
6823bafec742SSukumar Swaminathan 
6824bafec742SSukumar Swaminathan 	default:
6825bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Mask type %d not yet supported.",
6826bafec742SSukumar Swaminathan 		    mask);
6827bafec742SSukumar Swaminathan 		status = -EPERM;
6828bafec742SSukumar Swaminathan 		goto exit;
6829bafec742SSukumar Swaminathan 	}
6830bafec742SSukumar Swaminathan 
6831bafec742SSukumar Swaminathan 	if (value != 0) {
6832bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0);
6833bafec742SSukumar Swaminathan 		if (status)
6834bafec742SSukumar Swaminathan 			goto exit;
6835bafec742SSukumar Swaminathan 		value |= (enable ? RT_IDX_E : 0);
6836bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_ROUTING_INDEX, value);
6837bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0);
6838bafec742SSukumar Swaminathan 	}
6839bafec742SSukumar Swaminathan 
6840bafec742SSukumar Swaminathan exit:
6841bafec742SSukumar Swaminathan 	return (status);
6842bafec742SSukumar Swaminathan }
6843bafec742SSukumar Swaminathan 
6844bafec742SSukumar Swaminathan /*
6845bafec742SSukumar Swaminathan  * Clear all the entries in the routing table.
6846bafec742SSukumar Swaminathan  * Caller must get semaphore in advance.
6847bafec742SSukumar Swaminathan  */
6848bafec742SSukumar Swaminathan 
6849bafec742SSukumar Swaminathan static int
6850bafec742SSukumar Swaminathan ql_stop_routing(qlge_t *qlge)
6851bafec742SSukumar Swaminathan {
6852bafec742SSukumar Swaminathan 	int status = 0;
6853bafec742SSukumar Swaminathan 	int i;
6854bafec742SSukumar Swaminathan 	/* Clear all the entries in the routing table. */
6855bafec742SSukumar Swaminathan 	for (i = 0; i < 16; i++) {
6856bafec742SSukumar Swaminathan 		status = ql_set_routing_reg(qlge, i, 0, 0);
6857bafec742SSukumar Swaminathan 		if (status) {
6858bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Stop routing failed. ");
6859bafec742SSukumar Swaminathan 		}
6860bafec742SSukumar Swaminathan 	}
6861bafec742SSukumar Swaminathan 	return (status);
6862bafec742SSukumar Swaminathan }
6863bafec742SSukumar Swaminathan 
6864bafec742SSukumar Swaminathan /* Initialize the frame-to-queue routing. */
6865bafec742SSukumar Swaminathan static int
6866bafec742SSukumar Swaminathan ql_route_initialize(qlge_t *qlge)
6867bafec742SSukumar Swaminathan {
6868bafec742SSukumar Swaminathan 	int status = 0;
6869bafec742SSukumar Swaminathan 
6870bafec742SSukumar Swaminathan 	status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
6871bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS)
6872bafec742SSukumar Swaminathan 		return (status);
6873bafec742SSukumar Swaminathan 
6874bafec742SSukumar Swaminathan 	/* Clear all the entries in the routing table. */
6875bafec742SSukumar Swaminathan 	status = ql_stop_routing(qlge);
6876bafec742SSukumar Swaminathan 	if (status) {
6877bafec742SSukumar Swaminathan 		goto exit;
6878bafec742SSukumar Swaminathan 	}
6879bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
6880bafec742SSukumar Swaminathan 	if (status) {
6881bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6882bafec742SSukumar Swaminathan 		    "Failed to init routing register for broadcast packets.");
6883bafec742SSukumar Swaminathan 		goto exit;
6884bafec742SSukumar Swaminathan 	}
6885bafec742SSukumar Swaminathan 	/*
6886bafec742SSukumar Swaminathan 	 * If we have more than one inbound queue, then turn on RSS in the
6887bafec742SSukumar Swaminathan 	 * routing block.
6888bafec742SSukumar Swaminathan 	 */
6889bafec742SSukumar Swaminathan 	if (qlge->rss_ring_count > 1) {
6890bafec742SSukumar Swaminathan 		status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT,
6891bafec742SSukumar Swaminathan 		    RT_IDX_RSS_MATCH, 1);
6892bafec742SSukumar Swaminathan 		if (status) {
6893bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
6894bafec742SSukumar Swaminathan 			    "Failed to init routing register for MATCH RSS "
6895bafec742SSukumar Swaminathan 			    "packets.");
6896bafec742SSukumar Swaminathan 			goto exit;
6897bafec742SSukumar Swaminathan 		}
6898bafec742SSukumar Swaminathan 	}
6899bafec742SSukumar Swaminathan 
6900bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT,
6901bafec742SSukumar Swaminathan 	    RT_IDX_CAM_HIT, 1);
6902bafec742SSukumar Swaminathan 	if (status) {
6903bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6904bafec742SSukumar Swaminathan 		    "Failed to init routing register for CAM packets.");
6905bafec742SSukumar Swaminathan 		goto exit;
6906bafec742SSukumar Swaminathan 	}
6907bafec742SSukumar Swaminathan 
6908bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT,
6909bafec742SSukumar Swaminathan 	    RT_IDX_MCAST_MATCH, 1);
6910bafec742SSukumar Swaminathan 	if (status) {
6911bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6912bafec742SSukumar Swaminathan 		    "Failed to init routing register for Multicast "
6913bafec742SSukumar Swaminathan 		    "packets.");
6914bafec742SSukumar Swaminathan 	}
6915bafec742SSukumar Swaminathan 
6916bafec742SSukumar Swaminathan exit:
6917bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
6918bafec742SSukumar Swaminathan 	return (status);
6919bafec742SSukumar Swaminathan }
6920bafec742SSukumar Swaminathan 
6921bafec742SSukumar Swaminathan /*
6922bafec742SSukumar Swaminathan  * Initialize hardware
6923bafec742SSukumar Swaminathan  */
6924bafec742SSukumar Swaminathan static int
6925bafec742SSukumar Swaminathan ql_device_initialize(qlge_t *qlge)
6926bafec742SSukumar Swaminathan {
6927accf27a5SSukumar Swaminathan 	uint32_t value, mask;
6928bafec742SSukumar Swaminathan 	int i;
6929bafec742SSukumar Swaminathan 	int status = 0;
6930bafec742SSukumar Swaminathan 	uint16_t pause = PAUSE_MODE_DISABLED;
6931bafec742SSukumar Swaminathan 	boolean_t update_port_config = B_FALSE;
6932accf27a5SSukumar Swaminathan 	uint32_t pause_bit_mask;
6933accf27a5SSukumar Swaminathan 	boolean_t dcbx_enable = B_FALSE;
6934accf27a5SSukumar Swaminathan 	uint32_t dcbx_bit_mask = 0x10;
6935bafec742SSukumar Swaminathan 	/*
6936bafec742SSukumar Swaminathan 	 * Set up the System register to halt on errors.
6937bafec742SSukumar Swaminathan 	 */
6938bafec742SSukumar Swaminathan 	value = SYS_EFE | SYS_FAE;
6939bafec742SSukumar Swaminathan 	mask = value << 16;
6940bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SYSTEM, mask | value);
6941bafec742SSukumar Swaminathan 
6942bafec742SSukumar Swaminathan 	/* Set the default queue. */
6943bafec742SSukumar Swaminathan 	value = NIC_RCV_CFG_DFQ;
6944bafec742SSukumar Swaminathan 	mask = NIC_RCV_CFG_DFQ_MASK;
6945bafec742SSukumar Swaminathan 
6946bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value);
6947bafec742SSukumar Swaminathan 
6948bafec742SSukumar Swaminathan 	/* Enable the MPI interrupt. */
6949bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16)
6950bafec742SSukumar Swaminathan 	    | INTR_MASK_PI);
6951bafec742SSukumar Swaminathan 	/* Enable the function, set pagesize, enable error checking. */
6952bafec742SSukumar Swaminathan 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
6953bafec742SSukumar Swaminathan 	    FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
6954bafec742SSukumar Swaminathan 	/* Set/clear header splitting. */
6955bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) {
6956bafec742SSukumar Swaminathan 		value |= FSC_SH;
6957bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE);
6958bafec742SSukumar Swaminathan 	}
6959bafec742SSukumar Swaminathan 	mask = FSC_VM_PAGESIZE_MASK |
6960bafec742SSukumar Swaminathan 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
6961bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value);
6962bafec742SSukumar Swaminathan 	/*
6963bafec742SSukumar Swaminathan 	 * check current port max frame size, if different from OS setting,
6964bafec742SSukumar Swaminathan 	 * then we need to change
6965bafec742SSukumar Swaminathan 	 */
6966accf27a5SSukumar Swaminathan 	qlge->max_frame_size =
6967bafec742SSukumar Swaminathan 	    (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
6968bafec742SSukumar Swaminathan 
6969accf27a5SSukumar Swaminathan 	mutex_enter(&qlge->mbx_mutex);
6970accf27a5SSukumar Swaminathan 	status = ql_get_port_cfg(qlge);
6971accf27a5SSukumar Swaminathan 	mutex_exit(&qlge->mbx_mutex);
6972accf27a5SSukumar Swaminathan 
6973accf27a5SSukumar Swaminathan 	if (status == DDI_SUCCESS) {
6974accf27a5SSukumar Swaminathan 		/* if current frame size is smaller than required size */
6975accf27a5SSukumar Swaminathan 		if (qlge->port_cfg_info.max_frame_size <
6976accf27a5SSukumar Swaminathan 		    qlge->max_frame_size) {
6977bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX,
6978bafec742SSukumar Swaminathan 			    ("update frame size, current %d, new %d\n",
6979bafec742SSukumar Swaminathan 			    qlge->port_cfg_info.max_frame_size,
6980accf27a5SSukumar Swaminathan 			    qlge->max_frame_size));
6981bafec742SSukumar Swaminathan 			qlge->port_cfg_info.max_frame_size =
6982accf27a5SSukumar Swaminathan 			    qlge->max_frame_size;
6983accf27a5SSukumar Swaminathan 			qlge->port_cfg_info.link_cfg |= ENABLE_JUMBO;
6984bafec742SSukumar Swaminathan 			update_port_config = B_TRUE;
6985bafec742SSukumar Swaminathan 		}
6986accf27a5SSukumar Swaminathan 
6987bafec742SSukumar Swaminathan 		if (qlge->port_cfg_info.link_cfg & STD_PAUSE)
6988bafec742SSukumar Swaminathan 			pause = PAUSE_MODE_STANDARD;
6989bafec742SSukumar Swaminathan 		else if (qlge->port_cfg_info.link_cfg & PP_PAUSE)
6990bafec742SSukumar Swaminathan 			pause = PAUSE_MODE_PER_PRIORITY;
6991accf27a5SSukumar Swaminathan 
6992bafec742SSukumar Swaminathan 		if (pause != qlge->pause) {
6993accf27a5SSukumar Swaminathan 			pause_bit_mask = 0x60;	/* bit 5-6 */
6994accf27a5SSukumar Swaminathan 			/* clear pause bits */
6995accf27a5SSukumar Swaminathan 			qlge->port_cfg_info.link_cfg &= ~pause_bit_mask;
6996accf27a5SSukumar Swaminathan 			if (qlge->pause == PAUSE_MODE_STANDARD)
6997accf27a5SSukumar Swaminathan 				qlge->port_cfg_info.link_cfg |= STD_PAUSE;
6998accf27a5SSukumar Swaminathan 			else if (qlge->pause == PAUSE_MODE_PER_PRIORITY)
6999accf27a5SSukumar Swaminathan 				qlge->port_cfg_info.link_cfg |= PP_PAUSE;
7000bafec742SSukumar Swaminathan 			update_port_config = B_TRUE;
7001bafec742SSukumar Swaminathan 		}
7002accf27a5SSukumar Swaminathan 
7003accf27a5SSukumar Swaminathan 		if (qlge->port_cfg_info.link_cfg & DCBX_ENABLE)
7004accf27a5SSukumar Swaminathan 			dcbx_enable = B_TRUE;
7005accf27a5SSukumar Swaminathan 		if (dcbx_enable != qlge->dcbx_enable) {
7006accf27a5SSukumar Swaminathan 			qlge->port_cfg_info.link_cfg &= ~dcbx_bit_mask;
7007accf27a5SSukumar Swaminathan 			if (qlge->dcbx_enable)
7008accf27a5SSukumar Swaminathan 				qlge->port_cfg_info.link_cfg |= DCBX_ENABLE;
7009accf27a5SSukumar Swaminathan 		}
7010accf27a5SSukumar Swaminathan 
7011bafec742SSukumar Swaminathan 		update_port_config = B_TRUE;
7012bafec742SSukumar Swaminathan 
7013bafec742SSukumar Swaminathan 		/* if need to update port configuration */
7014accf27a5SSukumar Swaminathan 		if (update_port_config) {
7015accf27a5SSukumar Swaminathan 			mutex_enter(&qlge->mbx_mutex);
7016accf27a5SSukumar Swaminathan 			(void) ql_set_mpi_port_config(qlge,
7017accf27a5SSukumar Swaminathan 			    qlge->port_cfg_info);
7018accf27a5SSukumar Swaminathan 			mutex_exit(&qlge->mbx_mutex);
7019accf27a5SSukumar Swaminathan 		}
7020bafec742SSukumar Swaminathan 	} else
7021bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "ql_get_port_cfg failed");
7022bafec742SSukumar Swaminathan 
7023bafec742SSukumar Swaminathan 	/* Start up the rx queues. */
7024bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7025bafec742SSukumar Swaminathan 		status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]);
7026bafec742SSukumar Swaminathan 		if (status) {
7027bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
7028bafec742SSukumar Swaminathan 			    "Failed to start rx ring[%d]", i);
7029bafec742SSukumar Swaminathan 			return (status);
7030bafec742SSukumar Swaminathan 		}
7031bafec742SSukumar Swaminathan 	}
7032bafec742SSukumar Swaminathan 
7033bafec742SSukumar Swaminathan 	/*
7034bafec742SSukumar Swaminathan 	 * If there is more than one inbound completion queue
7035bafec742SSukumar Swaminathan 	 * then download a RICB to configure RSS.
7036bafec742SSukumar Swaminathan 	 */
7037bafec742SSukumar Swaminathan 	if (qlge->rss_ring_count > 1) {
7038bafec742SSukumar Swaminathan 		status = ql_start_rss(qlge);
7039bafec742SSukumar Swaminathan 		if (status) {
7040bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Failed to start RSS.");
7041bafec742SSukumar Swaminathan 			return (status);
7042bafec742SSukumar Swaminathan 		}
7043bafec742SSukumar Swaminathan 	}
7044bafec742SSukumar Swaminathan 
7045bafec742SSukumar Swaminathan 	/* Start up the tx queues. */
7046bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7047bafec742SSukumar Swaminathan 		status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]);
7048bafec742SSukumar Swaminathan 		if (status) {
7049bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
7050bafec742SSukumar Swaminathan 			    "Failed to start tx ring[%d]", i);
7051bafec742SSukumar Swaminathan 			return (status);
7052bafec742SSukumar Swaminathan 		}
7053bafec742SSukumar Swaminathan 	}
7054bafec742SSukumar Swaminathan 	qlge->selected_tx_ring = 0;
7055bafec742SSukumar Swaminathan 	/* Set the frame routing filter. */
7056bafec742SSukumar Swaminathan 	status = ql_route_initialize(qlge);
7057bafec742SSukumar Swaminathan 	if (status) {
7058bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
7059bafec742SSukumar Swaminathan 		    "Failed to init CAM/Routing tables.");
7060bafec742SSukumar Swaminathan 		return (status);
7061bafec742SSukumar Swaminathan 	}
7062bafec742SSukumar Swaminathan 
7063bafec742SSukumar Swaminathan 	return (status);
7064bafec742SSukumar Swaminathan }
7065bafec742SSukumar Swaminathan /*
7066bafec742SSukumar Swaminathan  * Issue soft reset to chip.
7067bafec742SSukumar Swaminathan  */
7068bafec742SSukumar Swaminathan static int
7069bafec742SSukumar Swaminathan ql_asic_reset(qlge_t *qlge)
7070bafec742SSukumar Swaminathan {
7071bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
7072bafec742SSukumar Swaminathan 
7073bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK
7074bafec742SSukumar Swaminathan 	    |FUNCTION_RESET);
7075bafec742SSukumar Swaminathan 
7076accf27a5SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_RESET_FAILOVER, FUNCTION_RESET,
7077accf27a5SSukumar Swaminathan 	    BIT_RESET, 0) != DDI_SUCCESS) {
7078bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
7079bafec742SSukumar Swaminathan 		    "TIMEOUT!!! errored out of resetting the chip!");
7080bafec742SSukumar Swaminathan 		status = DDI_FAILURE;
7081bafec742SSukumar Swaminathan 	}
7082bafec742SSukumar Swaminathan 
7083bafec742SSukumar Swaminathan 	return (status);
7084bafec742SSukumar Swaminathan }
7085bafec742SSukumar Swaminathan 
7086bafec742SSukumar Swaminathan /*
7087bafec742SSukumar Swaminathan  * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
7088bafec742SSukumar Swaminathan  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7089bafec742SSukumar Swaminathan  * to be used by hardware.
7090bafec742SSukumar Swaminathan  */
7091bafec742SSukumar Swaminathan static void
7092bafec742SSukumar Swaminathan ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7093bafec742SSukumar Swaminathan {
7094bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
7095bafec742SSukumar Swaminathan 	int i;
7096bafec742SSukumar Swaminathan 	uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr;
7097bafec742SSukumar Swaminathan 	uint32_t arm_count;
7098bafec742SSukumar Swaminathan 
7099bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT)
7100bafec742SSukumar Swaminathan 		arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT);
7101bafec742SSukumar Swaminathan 	else {
7102bafec742SSukumar Swaminathan 		/* Adjust to a multiple of 16 */
7103bafec742SSukumar Swaminathan 		arm_count = (rx_ring->sbuf_free_count / 16) * 16;
7104bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
7105bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count);
7106bafec742SSukumar Swaminathan #endif
7107bafec742SSukumar Swaminathan 	}
7108bafec742SSukumar Swaminathan 	for (i = 0; i < arm_count; i++) {
7109bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
7110bafec742SSukumar Swaminathan 		if (sbq_desc == NULL)
7111bafec742SSukumar Swaminathan 			break;
7112bafec742SSukumar Swaminathan 		/* Arm asic */
7113bafec742SSukumar Swaminathan 		*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
7114bafec742SSukumar Swaminathan 		sbq_entry++;
7115bafec742SSukumar Swaminathan 
7116bafec742SSukumar Swaminathan 		/* link the descriptors to in_use_list */
7117bafec742SSukumar Swaminathan 		ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
7118bafec742SSukumar Swaminathan 		rx_ring->sbq_prod_idx++;
7119bafec742SSukumar Swaminathan 	}
7120bafec742SSukumar Swaminathan 	ql_update_sbq_prod_idx(qlge, rx_ring);
7121bafec742SSukumar Swaminathan }
7122bafec742SSukumar Swaminathan 
7123bafec742SSukumar Swaminathan /*
7124bafec742SSukumar Swaminathan  * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
7125bafec742SSukumar Swaminathan  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7126bafec742SSukumar Swaminathan  * to be used by hardware.
7127bafec742SSukumar Swaminathan  */
7128bafec742SSukumar Swaminathan static void
7129bafec742SSukumar Swaminathan ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7130bafec742SSukumar Swaminathan {
7131bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
7132bafec742SSukumar Swaminathan 	int i;
7133bafec742SSukumar Swaminathan 	uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr;
7134bafec742SSukumar Swaminathan 	uint32_t arm_count;
7135bafec742SSukumar Swaminathan 
7136bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT)
7137bafec742SSukumar Swaminathan 		arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT);
7138bafec742SSukumar Swaminathan 	else {
7139bafec742SSukumar Swaminathan 		/* Adjust to a multiple of 16 */
7140bafec742SSukumar Swaminathan 		arm_count = (rx_ring->lbuf_free_count / 16) * 16;
7141bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
7142bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count);
7143bafec742SSukumar Swaminathan #endif
7144bafec742SSukumar Swaminathan 	}
7145bafec742SSukumar Swaminathan 	for (i = 0; i < arm_count; i++) {
7146bafec742SSukumar Swaminathan 		lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
7147bafec742SSukumar Swaminathan 		if (lbq_desc == NULL)
7148bafec742SSukumar Swaminathan 			break;
7149bafec742SSukumar Swaminathan 		/* Arm asic */
7150bafec742SSukumar Swaminathan 		*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
7151bafec742SSukumar Swaminathan 		lbq_entry++;
7152bafec742SSukumar Swaminathan 
7153bafec742SSukumar Swaminathan 		/* link the descriptors to in_use_list */
7154bafec742SSukumar Swaminathan 		ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
7155bafec742SSukumar Swaminathan 		rx_ring->lbq_prod_idx++;
7156bafec742SSukumar Swaminathan 	}
7157bafec742SSukumar Swaminathan 	ql_update_lbq_prod_idx(qlge, rx_ring);
7158bafec742SSukumar Swaminathan }
7159bafec742SSukumar Swaminathan 
7160bafec742SSukumar Swaminathan 
7161bafec742SSukumar Swaminathan /*
7162bafec742SSukumar Swaminathan  * Initializes the adapter by configuring request and response queues,
7163bafec742SSukumar Swaminathan  * allocates and ARMs small and large receive buffers to the
7164bafec742SSukumar Swaminathan  * hardware
7165bafec742SSukumar Swaminathan  */
7166bafec742SSukumar Swaminathan static int
7167bafec742SSukumar Swaminathan ql_bringup_adapter(qlge_t *qlge)
7168bafec742SSukumar Swaminathan {
7169bafec742SSukumar Swaminathan 	int i;
7170bafec742SSukumar Swaminathan 
7171bafec742SSukumar Swaminathan 	if (ql_device_initialize(qlge) != DDI_SUCCESS) {
7172bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed",
7173bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
7174bafec742SSukumar Swaminathan 		goto err_bringup;
7175bafec742SSukumar Swaminathan 	}
7176bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_ADAPTER_UP;
7177bafec742SSukumar Swaminathan 
7178bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
7179bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7180bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type != TX_Q) {
7181bafec742SSukumar Swaminathan 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
7182bafec742SSukumar Swaminathan 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
7183bafec742SSukumar Swaminathan 		}
7184bafec742SSukumar Swaminathan 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
7185bafec742SSukumar Swaminathan 	}
7186bafec742SSukumar Swaminathan #endif
7187bafec742SSukumar Swaminathan 	/* Arm buffers */
7188bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7189bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type != TX_Q) {
7190bafec742SSukumar Swaminathan 			ql_arm_sbuf(qlge, &qlge->rx_ring[i]);
7191bafec742SSukumar Swaminathan 			ql_arm_lbuf(qlge, &qlge->rx_ring[i]);
7192bafec742SSukumar Swaminathan 		}
7193bafec742SSukumar Swaminathan 	}
7194bafec742SSukumar Swaminathan 
7195bafec742SSukumar Swaminathan 	/* Enable work/request queues */
7196bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7197bafec742SSukumar Swaminathan 		if (qlge->tx_ring[i].valid_db_reg)
7198bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7199bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].valid_db_reg,
7200bafec742SSukumar Swaminathan 			    REQ_Q_VALID);
7201bafec742SSukumar Swaminathan 	}
7202bafec742SSukumar Swaminathan 
7203bafec742SSukumar Swaminathan 	/* Enable completion queues */
7204bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7205bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].valid_db_reg)
7206bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7207bafec742SSukumar Swaminathan 			    qlge->rx_ring[i].valid_db_reg,
7208bafec742SSukumar Swaminathan 			    RSP_Q_VALID);
7209bafec742SSukumar Swaminathan 	}
7210bafec742SSukumar Swaminathan 
7211bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7212bafec742SSukumar Swaminathan 		mutex_enter(&qlge->tx_ring[i].tx_lock);
7213bafec742SSukumar Swaminathan 		qlge->tx_ring[i].mac_flags = QL_MAC_STARTED;
7214bafec742SSukumar Swaminathan 		mutex_exit(&qlge->tx_ring[i].tx_lock);
7215bafec742SSukumar Swaminathan 	}
7216bafec742SSukumar Swaminathan 
7217bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7218bafec742SSukumar Swaminathan 		mutex_enter(&qlge->rx_ring[i].rx_lock);
7219bafec742SSukumar Swaminathan 		qlge->rx_ring[i].mac_flags = QL_MAC_STARTED;
7220bafec742SSukumar Swaminathan 		mutex_exit(&qlge->rx_ring[i].rx_lock);
7221bafec742SSukumar Swaminathan 	}
7222bafec742SSukumar Swaminathan 
7223bafec742SSukumar Swaminathan 	/* This mutex will get re-acquired in enable_completion interrupt */
7224bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
7225bafec742SSukumar Swaminathan 	/* Traffic can start flowing now */
7226bafec742SSukumar Swaminathan 	ql_enable_all_completion_interrupts(qlge);
7227bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
7228bafec742SSukumar Swaminathan 
7229bafec742SSukumar Swaminathan 	ql_enable_global_interrupt(qlge);
7230bafec742SSukumar Swaminathan 
7231bafec742SSukumar Swaminathan 	qlge->sequence |= ADAPTER_INIT;
7232bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
7233bafec742SSukumar Swaminathan 
7234bafec742SSukumar Swaminathan err_bringup:
72350662fbf4SSukumar Swaminathan 	(void) ql_asic_reset(qlge);
7236bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
7237bafec742SSukumar Swaminathan }
7238bafec742SSukumar Swaminathan 
7239bafec742SSukumar Swaminathan /*
7240bafec742SSukumar Swaminathan  * Initialize mutexes of each rx/tx rings
7241bafec742SSukumar Swaminathan  */
7242bafec742SSukumar Swaminathan static int
7243bafec742SSukumar Swaminathan ql_init_rx_tx_locks(qlge_t *qlge)
7244bafec742SSukumar Swaminathan {
7245bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
7246bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
7247bafec742SSukumar Swaminathan 	int i;
7248bafec742SSukumar Swaminathan 
7249bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7250bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
7251bafec742SSukumar Swaminathan 		mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER,
7252bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7253bafec742SSukumar Swaminathan 	}
7254bafec742SSukumar Swaminathan 
7255bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7256bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
7257bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER,
7258bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7259bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER,
7260bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7261bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER,
7262bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7263bafec742SSukumar Swaminathan 	}
7264bafec742SSukumar Swaminathan 
7265bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
7266bafec742SSukumar Swaminathan }
7267bafec742SSukumar Swaminathan 
7268accf27a5SSukumar Swaminathan /*ARGSUSED*/
7269accf27a5SSukumar Swaminathan /*
7270accf27a5SSukumar Swaminathan  * Simply call pci_ereport_post which generates ereports for errors
7271accf27a5SSukumar Swaminathan  * that occur in the PCI local bus configuration status registers.
7272accf27a5SSukumar Swaminathan  */
7273accf27a5SSukumar Swaminathan static int
7274accf27a5SSukumar Swaminathan ql_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7275accf27a5SSukumar Swaminathan {
7276accf27a5SSukumar Swaminathan 	pci_ereport_post(dip, err, NULL);
7277accf27a5SSukumar Swaminathan 	return (err->fme_status);
7278accf27a5SSukumar Swaminathan }
7279accf27a5SSukumar Swaminathan 
7280accf27a5SSukumar Swaminathan static void
7281accf27a5SSukumar Swaminathan ql_fm_init(qlge_t *qlge)
7282accf27a5SSukumar Swaminathan {
7283accf27a5SSukumar Swaminathan 	ddi_iblock_cookie_t iblk;
7284accf27a5SSukumar Swaminathan 
7285accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("ql_fm_init(%d) entered, FMA capability %x\n",
7286accf27a5SSukumar Swaminathan 	    qlge->instance, qlge->fm_capabilities));
7287accf27a5SSukumar Swaminathan 	/*
7288accf27a5SSukumar Swaminathan 	 * Register capabilities with IO Fault Services. The capabilities
7289accf27a5SSukumar Swaminathan 	 * set above may not be supported by the parent nexus, in that case
7290accf27a5SSukumar Swaminathan 	 * some capability bits may be cleared.
7291accf27a5SSukumar Swaminathan 	 */
7292accf27a5SSukumar Swaminathan 	if (qlge->fm_capabilities)
7293accf27a5SSukumar Swaminathan 		ddi_fm_init(qlge->dip, &qlge->fm_capabilities, &iblk);
7294accf27a5SSukumar Swaminathan 
7295accf27a5SSukumar Swaminathan 	/*
7296accf27a5SSukumar Swaminathan 	 * Initialize pci ereport capabilities if ereport capable
7297accf27a5SSukumar Swaminathan 	 */
7298accf27a5SSukumar Swaminathan 	if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7299accf27a5SSukumar Swaminathan 	    DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7300accf27a5SSukumar Swaminathan 		pci_ereport_setup(qlge->dip);
7301accf27a5SSukumar Swaminathan 	}
7302accf27a5SSukumar Swaminathan 
7303accf27a5SSukumar Swaminathan 	/* Register error callback if error callback capable */
7304accf27a5SSukumar Swaminathan 	if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7305accf27a5SSukumar Swaminathan 		ddi_fm_handler_register(qlge->dip,
7306accf27a5SSukumar Swaminathan 		    ql_fm_error_cb, (void*) qlge);
7307accf27a5SSukumar Swaminathan 	}
7308accf27a5SSukumar Swaminathan 
7309accf27a5SSukumar Swaminathan 	/*
7310accf27a5SSukumar Swaminathan 	 * DDI_FLGERR_ACC indicates:
7311accf27a5SSukumar Swaminathan 	 *  Driver will check its access handle(s) for faults on
7312accf27a5SSukumar Swaminathan 	 *   a regular basis by calling ddi_fm_acc_err_get
7313accf27a5SSukumar Swaminathan 	 *  Driver is able to cope with incorrect results of I/O
7314accf27a5SSukumar Swaminathan 	 *   operations resulted from an I/O fault
7315accf27a5SSukumar Swaminathan 	 */
7316accf27a5SSukumar Swaminathan 	if (DDI_FM_ACC_ERR_CAP(qlge->fm_capabilities)) {
7317accf27a5SSukumar Swaminathan 		ql_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7318accf27a5SSukumar Swaminathan 	}
7319accf27a5SSukumar Swaminathan 
7320accf27a5SSukumar Swaminathan 	/*
7321accf27a5SSukumar Swaminathan 	 * DDI_DMA_FLAGERR indicates:
7322accf27a5SSukumar Swaminathan 	 *  Driver will check its DMA handle(s) for faults on a
7323accf27a5SSukumar Swaminathan 	 *   regular basis using ddi_fm_dma_err_get
7324accf27a5SSukumar Swaminathan 	 *  Driver is able to cope with incorrect results of DMA
7325accf27a5SSukumar Swaminathan 	 *   operations resulted from an I/O fault
7326accf27a5SSukumar Swaminathan 	 */
7327accf27a5SSukumar Swaminathan 	if (DDI_FM_DMA_ERR_CAP(qlge->fm_capabilities)) {
7328accf27a5SSukumar Swaminathan 		tx_mapping_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7329accf27a5SSukumar Swaminathan 		dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7330accf27a5SSukumar Swaminathan 	}
7331accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("ql_fm_init(%d) done\n",
7332accf27a5SSukumar Swaminathan 	    qlge->instance));
7333accf27a5SSukumar Swaminathan }
7334accf27a5SSukumar Swaminathan 
7335accf27a5SSukumar Swaminathan static void
7336accf27a5SSukumar Swaminathan ql_fm_fini(qlge_t *qlge)
7337accf27a5SSukumar Swaminathan {
7338accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) entered\n",
7339accf27a5SSukumar Swaminathan 	    qlge->instance));
7340accf27a5SSukumar Swaminathan 	/* Only unregister FMA capabilities if we registered some */
7341accf27a5SSukumar Swaminathan 	if (qlge->fm_capabilities) {
7342accf27a5SSukumar Swaminathan 
7343accf27a5SSukumar Swaminathan 		/*
7344accf27a5SSukumar Swaminathan 		 * Release any resources allocated by pci_ereport_setup()
7345accf27a5SSukumar Swaminathan 		 */
7346accf27a5SSukumar Swaminathan 		if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7347accf27a5SSukumar Swaminathan 		    DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7348accf27a5SSukumar Swaminathan 			pci_ereport_teardown(qlge->dip);
7349accf27a5SSukumar Swaminathan 
7350accf27a5SSukumar Swaminathan 		/*
7351accf27a5SSukumar Swaminathan 		 * Un-register error callback if error callback capable
7352accf27a5SSukumar Swaminathan 		 */
7353accf27a5SSukumar Swaminathan 		if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7354accf27a5SSukumar Swaminathan 			ddi_fm_handler_unregister(qlge->dip);
7355accf27a5SSukumar Swaminathan 
7356accf27a5SSukumar Swaminathan 		/* Unregister from IO Fault Services */
7357accf27a5SSukumar Swaminathan 		ddi_fm_fini(qlge->dip);
7358accf27a5SSukumar Swaminathan 	}
7359accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) done\n",
7360accf27a5SSukumar Swaminathan 	    qlge->instance));
7361accf27a5SSukumar Swaminathan }
7362bafec742SSukumar Swaminathan /*
7363bafec742SSukumar Swaminathan  * ql_attach - Driver attach.
7364bafec742SSukumar Swaminathan  */
7365bafec742SSukumar Swaminathan static int
7366bafec742SSukumar Swaminathan ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
7367bafec742SSukumar Swaminathan {
7368bafec742SSukumar Swaminathan 	int instance;
7369accf27a5SSukumar Swaminathan 	qlge_t *qlge = NULL;
7370bafec742SSukumar Swaminathan 	int rval;
7371bafec742SSukumar Swaminathan 	uint16_t w;
7372bafec742SSukumar Swaminathan 	mac_register_t *macp = NULL;
7373accf27a5SSukumar Swaminathan 	uint32_t data;
7374accf27a5SSukumar Swaminathan 
7375bafec742SSukumar Swaminathan 	rval = DDI_FAILURE;
7376bafec742SSukumar Swaminathan 
7377bafec742SSukumar Swaminathan 	/* first get the instance */
7378bafec742SSukumar Swaminathan 	instance = ddi_get_instance(dip);
7379bafec742SSukumar Swaminathan 
7380bafec742SSukumar Swaminathan 	switch (cmd) {
7381bafec742SSukumar Swaminathan 	case DDI_ATTACH:
7382bafec742SSukumar Swaminathan 		/*
7383bafec742SSukumar Swaminathan 		 * Allocate our per-device-instance structure
7384bafec742SSukumar Swaminathan 		 */
7385bafec742SSukumar Swaminathan 		qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP);
7386bafec742SSukumar Swaminathan 		ASSERT(qlge != NULL);
7387bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_SOFTSTATE_ALLOC;
7388bafec742SSukumar Swaminathan 
7389bafec742SSukumar Swaminathan 		qlge->dip = dip;
7390bafec742SSukumar Swaminathan 		qlge->instance = instance;
7391accf27a5SSukumar Swaminathan 		/* Set up the coalescing parameters. */
7392accf27a5SSukumar Swaminathan 		qlge->ql_dbgprnt = 0;
7393accf27a5SSukumar Swaminathan #if QL_DEBUG
7394accf27a5SSukumar Swaminathan 		qlge->ql_dbgprnt = QL_DEBUG;
7395accf27a5SSukumar Swaminathan #endif /* QL_DEBUG */
7396accf27a5SSukumar Swaminathan 
7397accf27a5SSukumar Swaminathan 		/*
7398accf27a5SSukumar Swaminathan 		 * Initialize for fma support
7399accf27a5SSukumar Swaminathan 		 */
7400accf27a5SSukumar Swaminathan 		/* fault management (fm) capabilities. */
7401accf27a5SSukumar Swaminathan 		qlge->fm_capabilities =
7402accf27a5SSukumar Swaminathan 		    DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE;
7403accf27a5SSukumar Swaminathan 		data = ql_get_prop(qlge, "fm-capable");
7404accf27a5SSukumar Swaminathan 		if (data <= 0xf) {
7405accf27a5SSukumar Swaminathan 			qlge->fm_capabilities = data;
7406accf27a5SSukumar Swaminathan 		}
7407accf27a5SSukumar Swaminathan 		ql_fm_init(qlge);
7408accf27a5SSukumar Swaminathan 		qlge->sequence |= INIT_FM;
7409accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("ql_attach(%d): fma init done\n",
7410accf27a5SSukumar Swaminathan 		    qlge->instance));
7411bafec742SSukumar Swaminathan 
7412bafec742SSukumar Swaminathan 		/*
7413bafec742SSukumar Swaminathan 		 * Setup the ISP8x00 registers address mapping to be
7414bafec742SSukumar Swaminathan 		 * accessed by this particular driver.
7415bafec742SSukumar Swaminathan 		 * 0x0   Configuration Space
7416bafec742SSukumar Swaminathan 		 * 0x1   I/O Space
7417bafec742SSukumar Swaminathan 		 * 0x2   1st Memory Space address - Control Register Set
7418bafec742SSukumar Swaminathan 		 * 0x3   2nd Memory Space address - Doorbell Memory Space
7419bafec742SSukumar Swaminathan 		 */
7420bafec742SSukumar Swaminathan 		w = 2;
7421bafec742SSukumar Swaminathan 		if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0,
7422bafec742SSukumar Swaminathan 		    sizeof (dev_reg_t), &ql_dev_acc_attr,
7423bafec742SSukumar Swaminathan 		    &qlge->dev_handle) != DDI_SUCCESS) {
7424bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to map device "
7425bafec742SSukumar Swaminathan 			    "registers", ADAPTER_NAME, instance);
7426bafec742SSukumar Swaminathan 			break;
7427bafec742SSukumar Swaminathan 		}
7428bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n",
7429bafec742SSukumar Swaminathan 		    qlge->iobase));
7430bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_REGS_SETUP;
7431bafec742SSukumar Swaminathan 
7432bafec742SSukumar Swaminathan 		/* map Doorbell memory space */
7433bafec742SSukumar Swaminathan 		w = 3;
7434bafec742SSukumar Swaminathan 		if (ddi_regs_map_setup(dip, w,
7435bafec742SSukumar Swaminathan 		    (caddr_t *)&qlge->doorbell_reg_iobase, 0,
7436bafec742SSukumar Swaminathan 		    0x100000 /* sizeof (dev_doorbell_reg_t) */,
7437bafec742SSukumar Swaminathan 		    &ql_dev_acc_attr,
7438bafec742SSukumar Swaminathan 		    &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) {
7439bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell "
7440bafec742SSukumar Swaminathan 			    "registers",
7441bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
7442bafec742SSukumar Swaminathan 			break;
7443bafec742SSukumar Swaminathan 		}
7444bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n",
7445bafec742SSukumar Swaminathan 		    qlge->doorbell_reg_iobase));
7446bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_DOORBELL_REGS_SETUP;
7447bafec742SSukumar Swaminathan 
7448bafec742SSukumar Swaminathan 		/*
7449bafec742SSukumar Swaminathan 		 * Allocate a macinfo structure for this instance
7450bafec742SSukumar Swaminathan 		 */
7451bafec742SSukumar Swaminathan 		if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
7452bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): mac_alloc failed",
7453bafec742SSukumar Swaminathan 			    __func__, instance);
7454accf27a5SSukumar Swaminathan 			break;
7455bafec742SSukumar Swaminathan 		}
7456bafec742SSukumar Swaminathan 		/* save adapter status to dip private data */
7457bafec742SSukumar Swaminathan 		ddi_set_driver_private(dip, qlge);
7458bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n",
7459bafec742SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7460bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MAC_ALLOC;
7461bafec742SSukumar Swaminathan 
7462bafec742SSukumar Swaminathan 		/*
7463bafec742SSukumar Swaminathan 		 * Attach this instance of the device
7464bafec742SSukumar Swaminathan 		 */
7465bafec742SSukumar Swaminathan 		/* Setup PCI Local Bus Configuration resource. */
7466bafec742SSukumar Swaminathan 		if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) {
7467bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources",
7468bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
7469accf27a5SSukumar Swaminathan 			if (qlge->fm_enable) {
7470accf27a5SSukumar Swaminathan 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7471accf27a5SSukumar Swaminathan 				ddi_fm_service_impact(qlge->dip,
7472accf27a5SSukumar Swaminathan 				    DDI_SERVICE_LOST);
7473accf27a5SSukumar Swaminathan 			}
7474bafec742SSukumar Swaminathan 			break;
7475bafec742SSukumar Swaminathan 		}
7476bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_PCI_CONFIG_SETUP;
7477accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach(%d): pci_config_setup done\n",
7478accf27a5SSukumar Swaminathan 		    instance));
7479bafec742SSukumar Swaminathan 
7480bafec742SSukumar Swaminathan 		if (ql_init_instance(qlge) != DDI_SUCCESS) {
7481bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to initialize device "
7482bafec742SSukumar Swaminathan 			    "instance", ADAPTER_NAME, instance);
7483accf27a5SSukumar Swaminathan 			if (qlge->fm_enable) {
7484accf27a5SSukumar Swaminathan 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7485accf27a5SSukumar Swaminathan 				ddi_fm_service_impact(qlge->dip,
7486accf27a5SSukumar Swaminathan 				    DDI_SERVICE_LOST);
7487accf27a5SSukumar Swaminathan 			}
7488bafec742SSukumar Swaminathan 			break;
7489bafec742SSukumar Swaminathan 		}
7490accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_init_instance done\n",
7491accf27a5SSukumar Swaminathan 		    instance));
7492bafec742SSukumar Swaminathan 
7493bafec742SSukumar Swaminathan 		/* Setup interrupt vectors */
7494bafec742SSukumar Swaminathan 		if (ql_alloc_irqs(qlge) != DDI_SUCCESS) {
7495bafec742SSukumar Swaminathan 			break;
7496bafec742SSukumar Swaminathan 		}
7497bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_INTR_ALLOC;
7498accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_alloc_irqs done\n",
7499accf27a5SSukumar Swaminathan 		    instance));
7500bafec742SSukumar Swaminathan 
7501bafec742SSukumar Swaminathan 		/* Configure queues */
7502bafec742SSukumar Swaminathan 		if (ql_setup_rings(qlge) != DDI_SUCCESS) {
7503bafec742SSukumar Swaminathan 			break;
7504bafec742SSukumar Swaminathan 		}
7505bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_SETUP_RINGS;
7506accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach(%d): setup rings done\n",
7507accf27a5SSukumar Swaminathan 		    instance));
7508accf27a5SSukumar Swaminathan 
7509accf27a5SSukumar Swaminathan 		/*
7510accf27a5SSukumar Swaminathan 		 * Allocate memory resources
7511accf27a5SSukumar Swaminathan 		 */
7512accf27a5SSukumar Swaminathan 		if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) {
7513accf27a5SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): memory allocation failed",
7514accf27a5SSukumar Swaminathan 			    __func__, qlge->instance);
7515accf27a5SSukumar Swaminathan 			break;
7516accf27a5SSukumar Swaminathan 		}
7517accf27a5SSukumar Swaminathan 		qlge->sequence |= INIT_MEMORY_ALLOC;
7518accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_alloc_mem_resources(%d) done\n",
7519accf27a5SSukumar Swaminathan 		    instance));
7520accf27a5SSukumar Swaminathan 
7521bafec742SSukumar Swaminathan 		/*
7522bafec742SSukumar Swaminathan 		 * Map queues to interrupt vectors
7523bafec742SSukumar Swaminathan 		 */
7524bafec742SSukumar Swaminathan 		ql_resolve_queues_to_irqs(qlge);
7525bafec742SSukumar Swaminathan 
7526bafec742SSukumar Swaminathan 		/* Initialize mutex, need the interrupt priority */
75270662fbf4SSukumar Swaminathan 		(void) ql_init_rx_tx_locks(qlge);
7528bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_LOCKS_CREATED;
7529accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): ql_init_rx_tx_locks done\n",
7530accf27a5SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7531bafec742SSukumar Swaminathan 
7532bafec742SSukumar Swaminathan 		/*
7533bafec742SSukumar Swaminathan 		 * Use a soft interrupt to do something that we do not want
7534bafec742SSukumar Swaminathan 		 * to do in regular network functions or with mutexs being held
7535bafec742SSukumar Swaminathan 		 */
7536bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl,
7537bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge)
7538bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
7539bafec742SSukumar Swaminathan 			break;
7540bafec742SSukumar Swaminathan 		}
7541bafec742SSukumar Swaminathan 
7542bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl,
7543bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge)
7544bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
7545bafec742SSukumar Swaminathan 			break;
7546bafec742SSukumar Swaminathan 		}
7547bafec742SSukumar Swaminathan 
7548bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl,
7549bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge)
7550bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
7551bafec742SSukumar Swaminathan 			break;
7552bafec742SSukumar Swaminathan 		}
7553bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_ADD_SOFT_INTERRUPT;
7554accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): ddi_intr_add_softint done\n",
7555accf27a5SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7556bafec742SSukumar Swaminathan 
7557bafec742SSukumar Swaminathan 		/*
7558bafec742SSukumar Swaminathan 		 * mutex to protect the adapter state structure.
7559bafec742SSukumar Swaminathan 		 * initialize mutexes according to the interrupt priority
7560bafec742SSukumar Swaminathan 		 */
7561bafec742SSukumar Swaminathan 		mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER,
7562bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7563bafec742SSukumar Swaminathan 		mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER,
7564bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7565bafec742SSukumar Swaminathan 		mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER,
7566bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7567bafec742SSukumar Swaminathan 
7568bafec742SSukumar Swaminathan 		/* Mailbox wait and interrupt conditional variable. */
7569bafec742SSukumar Swaminathan 		cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL);
7570bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MUTEX;
7571accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): mutex_init done\n",
7572accf27a5SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7573bafec742SSukumar Swaminathan 
7574bafec742SSukumar Swaminathan 		/*
7575bafec742SSukumar Swaminathan 		 * KStats
7576bafec742SSukumar Swaminathan 		 */
7577bafec742SSukumar Swaminathan 		if (ql_init_kstats(qlge) != DDI_SUCCESS) {
7578bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): KState initialization failed",
7579bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
7580bafec742SSukumar Swaminathan 			break;
7581bafec742SSukumar Swaminathan 		}
7582bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_KSTATS;
7583accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): ql_init_kstats done\n",
7584accf27a5SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7585bafec742SSukumar Swaminathan 
7586bafec742SSukumar Swaminathan 		/*
7587bafec742SSukumar Swaminathan 		 * Initialize gld macinfo structure
7588bafec742SSukumar Swaminathan 		 */
7589bafec742SSukumar Swaminathan 		ql_gld3_init(qlge, macp);
7590accf27a5SSukumar Swaminathan 		/*
7591accf27a5SSukumar Swaminathan 		 * Add interrupt handlers
7592accf27a5SSukumar Swaminathan 		 */
7593accf27a5SSukumar Swaminathan 		if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) {
7594accf27a5SSukumar Swaminathan 			cmn_err(CE_WARN, "Failed to add interrupt "
7595accf27a5SSukumar Swaminathan 			    "handlers");
7596accf27a5SSukumar Swaminathan 			break;
7597accf27a5SSukumar Swaminathan 		}
7598accf27a5SSukumar Swaminathan 		qlge->sequence |= INIT_ADD_INTERRUPT;
7599accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): Add interrupt handler done\n",
7600accf27a5SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7601bafec742SSukumar Swaminathan 
7602accf27a5SSukumar Swaminathan 		/*
7603accf27a5SSukumar Swaminathan 		 * MAC Register
7604accf27a5SSukumar Swaminathan 		 */
7605bafec742SSukumar Swaminathan 		if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) {
7606bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): mac_register failed",
7607bafec742SSukumar Swaminathan 			    __func__, instance);
7608bafec742SSukumar Swaminathan 			break;
7609bafec742SSukumar Swaminathan 		}
7610bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MAC_REGISTERED;
7611bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n",
7612bafec742SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7613bafec742SSukumar Swaminathan 
7614bafec742SSukumar Swaminathan 		mac_free(macp);
7615bafec742SSukumar Swaminathan 		macp = NULL;
7616bafec742SSukumar Swaminathan 
7617bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_ATTACHED;
7618bafec742SSukumar Swaminathan 
7619bafec742SSukumar Swaminathan 		ddi_report_dev(dip);
7620bafec742SSukumar Swaminathan 
7621bafec742SSukumar Swaminathan 		rval = DDI_SUCCESS;
7622accf27a5SSukumar Swaminathan 
7623bafec742SSukumar Swaminathan 	break;
7624bafec742SSukumar Swaminathan /*
7625bafec742SSukumar Swaminathan  * DDI_RESUME
7626bafec742SSukumar Swaminathan  * When called  with  cmd  set  to  DDI_RESUME,  attach()  must
7627bafec742SSukumar Swaminathan  * restore  the hardware state of a device (power may have been
7628bafec742SSukumar Swaminathan  * removed from the device), allow  pending  requests  to  con-
7629bafec742SSukumar Swaminathan  * tinue,  and  service  new requests. In this case, the driver
7630bafec742SSukumar Swaminathan  * must not  make  any  assumptions  about  the  state  of  the
7631bafec742SSukumar Swaminathan  * hardware,  but  must  restore the state of the device except
7632bafec742SSukumar Swaminathan  * for the power level of components.
7633bafec742SSukumar Swaminathan  *
7634bafec742SSukumar Swaminathan  */
7635bafec742SSukumar Swaminathan 	case DDI_RESUME:
7636bafec742SSukumar Swaminathan 
7637bafec742SSukumar Swaminathan 		if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL)
7638bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
7639bafec742SSukumar Swaminathan 
7640bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n",
7641bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
7642bafec742SSukumar Swaminathan 
7643bafec742SSukumar Swaminathan 		mutex_enter(&qlge->gen_mutex);
7644bafec742SSukumar Swaminathan 		rval = ql_do_start(qlge);
7645bafec742SSukumar Swaminathan 		mutex_exit(&qlge->gen_mutex);
7646bafec742SSukumar Swaminathan 		break;
7647bafec742SSukumar Swaminathan 
7648bafec742SSukumar Swaminathan 	default:
7649bafec742SSukumar Swaminathan 		break;
7650bafec742SSukumar Swaminathan 	}
7651accf27a5SSukumar Swaminathan 
7652accf27a5SSukumar Swaminathan 	/* if failed to attach */
7653accf27a5SSukumar Swaminathan 	if ((cmd == DDI_ATTACH) && (rval != DDI_SUCCESS) && (qlge != NULL)) {
7654accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "qlge driver attach failed, sequence %x",
7655accf27a5SSukumar Swaminathan 		    qlge->sequence);
7656accf27a5SSukumar Swaminathan 		ql_free_resources(qlge);
7657accf27a5SSukumar Swaminathan 	}
7658accf27a5SSukumar Swaminathan 
7659bafec742SSukumar Swaminathan 	return (rval);
7660bafec742SSukumar Swaminathan }
7661bafec742SSukumar Swaminathan 
7662bafec742SSukumar Swaminathan /*
7663bafec742SSukumar Swaminathan  * Unbind all pending tx dma handles during driver bring down
7664bafec742SSukumar Swaminathan  */
7665bafec742SSukumar Swaminathan static void
7666bafec742SSukumar Swaminathan ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring)
7667bafec742SSukumar Swaminathan {
7668bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
7669bafec742SSukumar Swaminathan 	int i, j;
7670bafec742SSukumar Swaminathan 
7671bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc) {
7672bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
7673bafec742SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
7674bafec742SSukumar Swaminathan 			for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
7675bafec742SSukumar Swaminathan 				if (tx_ring_desc->tx_dma_handle[j]) {
7676bafec742SSukumar Swaminathan 					(void) ddi_dma_unbind_handle(
7677bafec742SSukumar Swaminathan 					    tx_ring_desc->tx_dma_handle[j]);
7678bafec742SSukumar Swaminathan 				}
7679bafec742SSukumar Swaminathan 			}
7680bafec742SSukumar Swaminathan 			tx_ring_desc->tx_dma_handle_used = 0;
7681bafec742SSukumar Swaminathan 		} /* end of for loop */
7682bafec742SSukumar Swaminathan 	}
7683bafec742SSukumar Swaminathan }
7684bafec742SSukumar Swaminathan /*
7685bafec742SSukumar Swaminathan  * Wait for all the packets sent to the chip to finish transmission
7686bafec742SSukumar Swaminathan  * to prevent buffers to be unmapped before or during a transmit operation
7687bafec742SSukumar Swaminathan  */
7688bafec742SSukumar Swaminathan static int
7689bafec742SSukumar Swaminathan ql_wait_tx_quiesce(qlge_t *qlge)
7690bafec742SSukumar Swaminathan {
7691bafec742SSukumar Swaminathan 	int count = MAX_TX_WAIT_COUNT, i;
7692bafec742SSukumar Swaminathan 	int rings_done;
7693bafec742SSukumar Swaminathan 	volatile struct tx_ring *tx_ring;
7694bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
7695bafec742SSukumar Swaminathan 	uint32_t producer_idx;
7696bafec742SSukumar Swaminathan 	uint32_t temp;
7697bafec742SSukumar Swaminathan 	int done = 0;
7698bafec742SSukumar Swaminathan 	int rval = DDI_FAILURE;
7699bafec742SSukumar Swaminathan 
7700bafec742SSukumar Swaminathan 	while (!done) {
7701bafec742SSukumar Swaminathan 		rings_done = 0;
7702bafec742SSukumar Swaminathan 
7703bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7704bafec742SSukumar Swaminathan 			tx_ring = &qlge->tx_ring[i];
7705bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
7706bafec742SSukumar Swaminathan 			    tx_ring->prod_idx_db_reg);
7707bafec742SSukumar Swaminathan 			producer_idx = temp & 0x0000ffff;
7708bafec742SSukumar Swaminathan 			consumer_idx = (temp >> 16);
7709bafec742SSukumar Swaminathan 
7710accf27a5SSukumar Swaminathan 			if (qlge->isr_stride) {
7711accf27a5SSukumar Swaminathan 				struct rx_ring *ob_ring;
7712accf27a5SSukumar Swaminathan 				ob_ring = &qlge->rx_ring[tx_ring->cq_id];
7713accf27a5SSukumar Swaminathan 				if (producer_idx != ob_ring->cnsmr_idx) {
7714accf27a5SSukumar Swaminathan 					cmn_err(CE_NOTE, " force clean \n");
7715accf27a5SSukumar Swaminathan 					(void) ql_clean_outbound_rx_ring(
7716accf27a5SSukumar Swaminathan 					    ob_ring);
7717accf27a5SSukumar Swaminathan 				}
7718accf27a5SSukumar Swaminathan 			}
7719bafec742SSukumar Swaminathan 			/*
7720bafec742SSukumar Swaminathan 			 * Get the pending iocb count, ones which have not been
7721bafec742SSukumar Swaminathan 			 * pulled down by the chip
7722bafec742SSukumar Swaminathan 			 */
7723bafec742SSukumar Swaminathan 			if (producer_idx >= consumer_idx)
7724bafec742SSukumar Swaminathan 				temp = (producer_idx - consumer_idx);
7725bafec742SSukumar Swaminathan 			else
7726bafec742SSukumar Swaminathan 				temp = (tx_ring->wq_len - consumer_idx) +
7727bafec742SSukumar Swaminathan 				    producer_idx;
7728bafec742SSukumar Swaminathan 
7729bafec742SSukumar Swaminathan 			if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len)
7730bafec742SSukumar Swaminathan 				rings_done++;
7731bafec742SSukumar Swaminathan 			else {
7732bafec742SSukumar Swaminathan 				done = 1;
7733bafec742SSukumar Swaminathan 				break;
7734bafec742SSukumar Swaminathan 			}
7735bafec742SSukumar Swaminathan 		}
7736bafec742SSukumar Swaminathan 
7737bafec742SSukumar Swaminathan 		/* If all the rings are done */
7738bafec742SSukumar Swaminathan 		if (rings_done >= qlge->tx_ring_count) {
7739bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
7740bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d) done successfully \n",
7741bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
7742bafec742SSukumar Swaminathan #endif
7743bafec742SSukumar Swaminathan 			rval = DDI_SUCCESS;
7744bafec742SSukumar Swaminathan 			break;
7745bafec742SSukumar Swaminathan 		}
7746bafec742SSukumar Swaminathan 
7747bafec742SSukumar Swaminathan 		qlge_delay(100);
7748bafec742SSukumar Swaminathan 
7749bafec742SSukumar Swaminathan 		count--;
7750bafec742SSukumar Swaminathan 		if (!count) {
7751bafec742SSukumar Swaminathan 
7752bafec742SSukumar Swaminathan 			count = MAX_TX_WAIT_COUNT;
7753bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
7754bafec742SSukumar Swaminathan 			volatile struct rx_ring *rx_ring;
7755bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending"
7756bafec742SSukumar Swaminathan 			    " Transmits on queue %d to complete .\n",
7757bafec742SSukumar Swaminathan 			    __func__, qlge->instance,
7758bafec742SSukumar Swaminathan 			    (qlge->tx_ring[i].wq_len -
7759bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].tx_free_count),
7760bafec742SSukumar Swaminathan 			    i);
7761bafec742SSukumar Swaminathan 
7762bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i+1];
7763bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
7764bafec742SSukumar Swaminathan 			    rx_ring->cnsmr_idx_db_reg);
7765bafec742SSukumar Swaminathan 			consumer_idx = temp & 0x0000ffff;
7766bafec742SSukumar Swaminathan 			producer_idx = (temp >> 16);
7767bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d,"
7768bafec742SSukumar Swaminathan 			    " Producer %d, Consumer %d\n",
7769bafec742SSukumar Swaminathan 			    __func__, qlge->instance,
7770bafec742SSukumar Swaminathan 			    i+1,
7771bafec742SSukumar Swaminathan 			    producer_idx, consumer_idx);
7772bafec742SSukumar Swaminathan 
7773bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
7774bafec742SSukumar Swaminathan 			    tx_ring->prod_idx_db_reg);
7775bafec742SSukumar Swaminathan 			producer_idx = temp & 0x0000ffff;
7776bafec742SSukumar Swaminathan 			consumer_idx = (temp >> 16);
7777bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d,"
7778bafec742SSukumar Swaminathan 			    " Producer %d, Consumer %d\n",
7779bafec742SSukumar Swaminathan 			    __func__, qlge->instance, i,
7780bafec742SSukumar Swaminathan 			    producer_idx, consumer_idx);
7781bafec742SSukumar Swaminathan #endif
7782bafec742SSukumar Swaminathan 
7783bafec742SSukumar Swaminathan 			/* For now move on */
7784bafec742SSukumar Swaminathan 			break;
7785bafec742SSukumar Swaminathan 		}
7786bafec742SSukumar Swaminathan 	}
7787bafec742SSukumar Swaminathan 	/* Stop the request queue */
7788bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
7789bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7790bafec742SSukumar Swaminathan 		if (qlge->tx_ring[i].valid_db_reg) {
7791bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7792bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].valid_db_reg, 0);
7793bafec742SSukumar Swaminathan 		}
7794bafec742SSukumar Swaminathan 	}
7795bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
7796bafec742SSukumar Swaminathan 	return (rval);
7797bafec742SSukumar Swaminathan }
7798bafec742SSukumar Swaminathan 
7799bafec742SSukumar Swaminathan /*
7800bafec742SSukumar Swaminathan  * Wait for all the receives indicated to the stack to come back
7801bafec742SSukumar Swaminathan  */
7802bafec742SSukumar Swaminathan static int
7803bafec742SSukumar Swaminathan ql_wait_rx_complete(qlge_t *qlge)
7804bafec742SSukumar Swaminathan {
7805bafec742SSukumar Swaminathan 	int i;
7806bafec742SSukumar Swaminathan 	/* Disable all the completion queues */
7807bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
7808bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7809bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].valid_db_reg) {
7810bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7811bafec742SSukumar Swaminathan 			    qlge->rx_ring[i].valid_db_reg, 0);
7812bafec742SSukumar Swaminathan 		}
7813bafec742SSukumar Swaminathan 	}
7814bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
7815bafec742SSukumar Swaminathan 
7816bafec742SSukumar Swaminathan 	/* Wait for OS to return all rx buffers */
7817bafec742SSukumar Swaminathan 	qlge_delay(QL_ONE_SEC_DELAY);
7818bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
7819bafec742SSukumar Swaminathan }
7820bafec742SSukumar Swaminathan 
7821bafec742SSukumar Swaminathan /*
7822bafec742SSukumar Swaminathan  * stop the driver
7823bafec742SSukumar Swaminathan  */
7824bafec742SSukumar Swaminathan static int
7825bafec742SSukumar Swaminathan ql_bringdown_adapter(qlge_t *qlge)
7826bafec742SSukumar Swaminathan {
7827bafec742SSukumar Swaminathan 	int i;
7828bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
7829bafec742SSukumar Swaminathan 
7830bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_BRINGDOWN;
7831bafec742SSukumar Swaminathan 	if (qlge->sequence & ADAPTER_INIT) {
7832bafec742SSukumar Swaminathan 		/* stop forwarding external packets to driver */
7833bafec742SSukumar Swaminathan 		status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7834bafec742SSukumar Swaminathan 		if (status)
7835bafec742SSukumar Swaminathan 			return (status);
78360662fbf4SSukumar Swaminathan 		(void) ql_stop_routing(qlge);
7837bafec742SSukumar Swaminathan 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7838bafec742SSukumar Swaminathan 		/*
7839bafec742SSukumar Swaminathan 		 * Set the flag for receive and transmit
7840bafec742SSukumar Swaminathan 		 * operations to cease
7841bafec742SSukumar Swaminathan 		 */
7842bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7843bafec742SSukumar Swaminathan 			mutex_enter(&qlge->tx_ring[i].tx_lock);
7844bafec742SSukumar Swaminathan 			qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED;
7845bafec742SSukumar Swaminathan 			mutex_exit(&qlge->tx_ring[i].tx_lock);
7846bafec742SSukumar Swaminathan 		}
7847bafec742SSukumar Swaminathan 
7848bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
7849bafec742SSukumar Swaminathan 			mutex_enter(&qlge->rx_ring[i].rx_lock);
7850bafec742SSukumar Swaminathan 			qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED;
7851bafec742SSukumar Swaminathan 			mutex_exit(&qlge->rx_ring[i].rx_lock);
7852bafec742SSukumar Swaminathan 		}
7853bafec742SSukumar Swaminathan 
7854bafec742SSukumar Swaminathan 		/*
7855bafec742SSukumar Swaminathan 		 * Need interrupts to be running while the transmit
7856bafec742SSukumar Swaminathan 		 * completions are cleared. Wait for the packets
7857bafec742SSukumar Swaminathan 		 * queued to the chip to be sent out
7858bafec742SSukumar Swaminathan 		 */
7859bafec742SSukumar Swaminathan 		(void) ql_wait_tx_quiesce(qlge);
7860bafec742SSukumar Swaminathan 		/* Interrupts not needed from now */
7861bafec742SSukumar Swaminathan 		ql_disable_all_completion_interrupts(qlge);
7862bafec742SSukumar Swaminathan 
7863bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
7864bafec742SSukumar Swaminathan 		/* Disable Global interrupt */
7865bafec742SSukumar Swaminathan 		ql_disable_global_interrupt(qlge);
7866bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
7867bafec742SSukumar Swaminathan 
7868bafec742SSukumar Swaminathan 		/* Wait for all the indicated packets to come back */
7869bafec742SSukumar Swaminathan 		status = ql_wait_rx_complete(qlge);
7870bafec742SSukumar Swaminathan 
7871bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
7872bafec742SSukumar Swaminathan 		/* Reset adapter */
78730662fbf4SSukumar Swaminathan 		(void) ql_asic_reset(qlge);
7874bafec742SSukumar Swaminathan 		/*
7875bafec742SSukumar Swaminathan 		 * Unbind all tx dma handles to prevent pending tx descriptors'
7876bafec742SSukumar Swaminathan 		 * dma handles from being re-used.
7877bafec742SSukumar Swaminathan 		 */
7878bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7879bafec742SSukumar Swaminathan 			ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]);
7880bafec742SSukumar Swaminathan 		}
7881bafec742SSukumar Swaminathan 
7882bafec742SSukumar Swaminathan 		qlge->sequence &= ~ADAPTER_INIT;
7883bafec742SSukumar Swaminathan 
7884bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
7885bafec742SSukumar Swaminathan 	}
7886bafec742SSukumar Swaminathan 	return (status);
7887bafec742SSukumar Swaminathan }
7888bafec742SSukumar Swaminathan 
7889bafec742SSukumar Swaminathan /*
7890bafec742SSukumar Swaminathan  * ql_detach
7891bafec742SSukumar Swaminathan  * Used to remove all the states associated with a given
7892bafec742SSukumar Swaminathan  * instances of a device node prior to the removal of that
7893bafec742SSukumar Swaminathan  * instance from the system.
7894bafec742SSukumar Swaminathan  */
7895bafec742SSukumar Swaminathan static int
7896bafec742SSukumar Swaminathan ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7897bafec742SSukumar Swaminathan {
7898bafec742SSukumar Swaminathan 	qlge_t *qlge;
7899bafec742SSukumar Swaminathan 	int rval;
7900bafec742SSukumar Swaminathan 
7901bafec742SSukumar Swaminathan 	rval = DDI_SUCCESS;
7902bafec742SSukumar Swaminathan 
7903bafec742SSukumar Swaminathan 	switch (cmd) {
7904bafec742SSukumar Swaminathan 	case DDI_DETACH:
7905bafec742SSukumar Swaminathan 
7906bafec742SSukumar Swaminathan 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7907bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
7908bafec742SSukumar Swaminathan 		rval = ql_bringdown_adapter(qlge);
7909bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS)
7910bafec742SSukumar Swaminathan 			break;
7911bafec742SSukumar Swaminathan 
7912bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_DETACH;
7913bafec742SSukumar Swaminathan 
7914bafec742SSukumar Swaminathan 		/* free memory resources */
7915bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_MEMORY_ALLOC) {
7916bafec742SSukumar Swaminathan 			ql_free_mem_resources(qlge);
7917bafec742SSukumar Swaminathan 			qlge->sequence &= ~INIT_MEMORY_ALLOC;
7918bafec742SSukumar Swaminathan 		}
7919accf27a5SSukumar Swaminathan 		ql_free_resources(qlge);
7920bafec742SSukumar Swaminathan 
7921bafec742SSukumar Swaminathan 		break;
7922bafec742SSukumar Swaminathan 
7923bafec742SSukumar Swaminathan 	case DDI_SUSPEND:
7924bafec742SSukumar Swaminathan 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7925bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
7926bafec742SSukumar Swaminathan 
7927bafec742SSukumar Swaminathan 		mutex_enter(&qlge->gen_mutex);
7928bafec742SSukumar Swaminathan 		if ((qlge->mac_flags == QL_MAC_ATTACHED) ||
7929bafec742SSukumar Swaminathan 		    (qlge->mac_flags == QL_MAC_STARTED)) {
79300662fbf4SSukumar Swaminathan 			(void) ql_do_stop(qlge);
7931bafec742SSukumar Swaminathan 		}
7932bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_SUSPENDED;
7933bafec742SSukumar Swaminathan 		mutex_exit(&qlge->gen_mutex);
7934bafec742SSukumar Swaminathan 
7935bafec742SSukumar Swaminathan 		break;
7936bafec742SSukumar Swaminathan 	default:
7937bafec742SSukumar Swaminathan 		rval = DDI_FAILURE;
7938bafec742SSukumar Swaminathan 		break;
7939bafec742SSukumar Swaminathan 	}
7940bafec742SSukumar Swaminathan 
7941bafec742SSukumar Swaminathan 	return (rval);
7942bafec742SSukumar Swaminathan }
7943bafec742SSukumar Swaminathan 
7944bafec742SSukumar Swaminathan /*
7945bafec742SSukumar Swaminathan  * quiesce(9E) entry point.
7946bafec742SSukumar Swaminathan  *
7947bafec742SSukumar Swaminathan  * This function is called when the system is single-threaded at high
7948bafec742SSukumar Swaminathan  * PIL with preemption disabled. Therefore, this function must not be
7949bafec742SSukumar Swaminathan  * blocked.
7950bafec742SSukumar Swaminathan  *
7951bafec742SSukumar Swaminathan  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7952bafec742SSukumar Swaminathan  */
7953bafec742SSukumar Swaminathan int
7954bafec742SSukumar Swaminathan ql_quiesce(dev_info_t *dip)
7955bafec742SSukumar Swaminathan {
7956bafec742SSukumar Swaminathan 	qlge_t *qlge;
7957bafec742SSukumar Swaminathan 	int i;
7958bafec742SSukumar Swaminathan 
7959bafec742SSukumar Swaminathan 	if ((qlge = QL_GET_DEV(dip)) == NULL)
7960bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
7961bafec742SSukumar Swaminathan 
7962bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
7963bafec742SSukumar Swaminathan 		/* stop forwarding external packets to driver */
79640662fbf4SSukumar Swaminathan 		(void) ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
79650662fbf4SSukumar Swaminathan 		(void) ql_stop_routing(qlge);
7966bafec742SSukumar Swaminathan 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7967bafec742SSukumar Swaminathan 		/* Stop all the request queues */
7968bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7969bafec742SSukumar Swaminathan 			if (qlge->tx_ring[i].valid_db_reg) {
7970bafec742SSukumar Swaminathan 				ql_write_doorbell_reg(qlge,
7971bafec742SSukumar Swaminathan 				    qlge->tx_ring[i].valid_db_reg, 0);
7972bafec742SSukumar Swaminathan 			}
7973bafec742SSukumar Swaminathan 		}
7974bafec742SSukumar Swaminathan 		qlge_delay(QL_ONE_SEC_DELAY/4);
7975bafec742SSukumar Swaminathan 		/* Interrupts not needed from now */
7976bafec742SSukumar Swaminathan 		/* Disable MPI interrupt */
7977bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_MASK,
7978bafec742SSukumar Swaminathan 		    (INTR_MASK_PI << 16));
7979bafec742SSukumar Swaminathan 		ql_disable_global_interrupt(qlge);
7980bafec742SSukumar Swaminathan 
7981bafec742SSukumar Swaminathan 		/* Disable all the rx completion queues */
7982bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
7983bafec742SSukumar Swaminathan 			if (qlge->rx_ring[i].valid_db_reg) {
7984bafec742SSukumar Swaminathan 				ql_write_doorbell_reg(qlge,
7985bafec742SSukumar Swaminathan 				    qlge->rx_ring[i].valid_db_reg, 0);
7986bafec742SSukumar Swaminathan 			}
7987bafec742SSukumar Swaminathan 		}
7988bafec742SSukumar Swaminathan 		qlge_delay(QL_ONE_SEC_DELAY/4);
7989bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_STOPPED;
7990bafec742SSukumar Swaminathan 		/* Reset adapter */
79910662fbf4SSukumar Swaminathan 		(void) ql_asic_reset(qlge);
7992bafec742SSukumar Swaminathan 		qlge_delay(100);
7993bafec742SSukumar Swaminathan 	}
7994bafec742SSukumar Swaminathan 
7995bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
7996bafec742SSukumar Swaminathan }
7997bafec742SSukumar Swaminathan 
7998bafec742SSukumar Swaminathan QL_STREAM_OPS(ql_ops, ql_attach, ql_detach);
7999bafec742SSukumar Swaminathan 
8000bafec742SSukumar Swaminathan /*
8001bafec742SSukumar Swaminathan  * Loadable Driver Interface Structures.
8002bafec742SSukumar Swaminathan  * Declare and initialize the module configuration section...
8003bafec742SSukumar Swaminathan  */
8004bafec742SSukumar Swaminathan static struct modldrv modldrv = {
8005bafec742SSukumar Swaminathan 	&mod_driverops,		/* type of module: driver */
8006bafec742SSukumar Swaminathan 	version,		/* name of module */
8007bafec742SSukumar Swaminathan 	&ql_ops			/* driver dev_ops */
8008bafec742SSukumar Swaminathan };
8009bafec742SSukumar Swaminathan 
8010bafec742SSukumar Swaminathan static struct modlinkage modlinkage = {
8011bafec742SSukumar Swaminathan 	MODREV_1, 	&modldrv,	NULL
8012bafec742SSukumar Swaminathan };
8013bafec742SSukumar Swaminathan 
8014bafec742SSukumar Swaminathan /*
8015bafec742SSukumar Swaminathan  * Loadable Module Routines
8016bafec742SSukumar Swaminathan  */
8017bafec742SSukumar Swaminathan 
8018bafec742SSukumar Swaminathan /*
8019bafec742SSukumar Swaminathan  * _init
8020bafec742SSukumar Swaminathan  * Initializes a loadable module. It is called before any other
8021bafec742SSukumar Swaminathan  * routine in a loadable module.
8022bafec742SSukumar Swaminathan  */
8023bafec742SSukumar Swaminathan int
8024bafec742SSukumar Swaminathan _init(void)
8025bafec742SSukumar Swaminathan {
8026bafec742SSukumar Swaminathan 	int rval;
8027bafec742SSukumar Swaminathan 
8028bafec742SSukumar Swaminathan 	mac_init_ops(&ql_ops, ADAPTER_NAME);
8029bafec742SSukumar Swaminathan 	rval = mod_install(&modlinkage);
8030bafec742SSukumar Swaminathan 	if (rval != DDI_SUCCESS) {
8031bafec742SSukumar Swaminathan 		mac_fini_ops(&ql_ops);
8032bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?Unable to install/attach driver '%s'",
8033bafec742SSukumar Swaminathan 		    ADAPTER_NAME);
8034bafec742SSukumar Swaminathan 	}
8035bafec742SSukumar Swaminathan 
8036bafec742SSukumar Swaminathan 	return (rval);
8037bafec742SSukumar Swaminathan }
8038bafec742SSukumar Swaminathan 
8039bafec742SSukumar Swaminathan /*
8040bafec742SSukumar Swaminathan  * _fini
8041bafec742SSukumar Swaminathan  * Prepares a module for unloading. It is called when the system
8042bafec742SSukumar Swaminathan  * wants to unload a module. If the module determines that it can
8043bafec742SSukumar Swaminathan  * be unloaded, then _fini() returns the value returned by
8044bafec742SSukumar Swaminathan  * mod_remove(). Upon successful return from _fini() no other
8045bafec742SSukumar Swaminathan  * routine in the module will be called before _init() is called.
8046bafec742SSukumar Swaminathan  */
8047bafec742SSukumar Swaminathan int
8048bafec742SSukumar Swaminathan _fini(void)
8049bafec742SSukumar Swaminathan {
8050bafec742SSukumar Swaminathan 	int rval;
8051bafec742SSukumar Swaminathan 
8052bafec742SSukumar Swaminathan 	rval = mod_remove(&modlinkage);
8053bafec742SSukumar Swaminathan 	if (rval == DDI_SUCCESS) {
8054bafec742SSukumar Swaminathan 		mac_fini_ops(&ql_ops);
8055bafec742SSukumar Swaminathan 	}
8056bafec742SSukumar Swaminathan 
8057bafec742SSukumar Swaminathan 	return (rval);
8058bafec742SSukumar Swaminathan }
8059bafec742SSukumar Swaminathan 
8060bafec742SSukumar Swaminathan /*
8061bafec742SSukumar Swaminathan  * _info
8062bafec742SSukumar Swaminathan  * Returns information about loadable module.
8063bafec742SSukumar Swaminathan  */
8064bafec742SSukumar Swaminathan int
8065bafec742SSukumar Swaminathan _info(struct modinfo *modinfop)
8066bafec742SSukumar Swaminathan {
8067bafec742SSukumar Swaminathan 	return (mod_info(&modlinkage, modinfop));
8068bafec742SSukumar Swaminathan }
8069