xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge.c (revision 93a18d6d401e844455263f926578e9d2aa6b47ec)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 QLogic Corporation. All rights reserved.
24  */
25 
26 #include <qlge.h>
27 #include <sys/atomic.h>
28 #include <sys/strsubr.h>
29 #include <sys/pattr.h>
30 #include <netinet/in.h>
31 #include <netinet/ip.h>
32 #include <netinet/ip6.h>
33 #include <netinet/tcp.h>
34 #include <netinet/udp.h>
35 #include <inet/ip.h>
36 
37 
38 
39 /*
40  * Local variables
41  */
42 static struct ether_addr ql_ether_broadcast_addr =
43 	{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
44 static char version[] = "QLogic GLDv3 Driver " VERSIONSTR;
45 
46 /*
47  * Local function prototypes
48  */
49 static void ql_free_resources(dev_info_t *, qlge_t *);
50 static void ql_fini_kstats(qlge_t *);
51 static uint32_t ql_get_link_state(qlge_t *);
52 static void ql_read_conf(qlge_t *);
53 static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
54     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
55     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
56 static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
57 static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
58 static int ql_route_initialize(qlge_t *);
59 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
60 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
61 static int ql_bringdown_adapter(qlge_t *);
62 static int ql_bringup_adapter(qlge_t *);
63 static int ql_asic_reset(qlge_t *);
64 static void ql_wake_mpi_reset_soft_intr(qlge_t *);
65 static void ql_stop_timer(qlge_t *qlge);
66 
67 /*
68  * TX dma maping handlers allow multiple sscatter-gather lists
69  */
70 ddi_dma_attr_t  tx_mapping_dma_attr = {
71 	DMA_ATTR_V0,			/* dma_attr_version */
72 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
73 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
74 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
75 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
76 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
77 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
78 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
79 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
80 	QL_MAX_TX_DMA_HANDLES,		/* s/g list length */
81 	QL_DMA_GRANULARITY,		/* granularity of device */
82 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
83 };
84 
85 /*
86  * Receive buffers and Request/Response queues do not allow scatter-gather lists
87  */
88 ddi_dma_attr_t  dma_attr = {
89 	DMA_ATTR_V0,			/* dma_attr_version */
90 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
91 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
92 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
93 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
94 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
95 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
96 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
97 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
98 	1,				/* s/g list length, i.e no sg list */
99 	QL_DMA_GRANULARITY,		/* granularity of device */
100 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
101 };
102 
103 /*
104  * DMA access attribute structure.
105  */
106 /* device register access from host */
107 ddi_device_acc_attr_t ql_dev_acc_attr = {
108 	DDI_DEVICE_ATTR_V0,
109 	DDI_STRUCTURE_LE_ACC,
110 	DDI_STRICTORDER_ACC
111 };
112 
113 /* host ring descriptors */
114 ddi_device_acc_attr_t ql_desc_acc_attr = {
115 	DDI_DEVICE_ATTR_V0,
116 	DDI_NEVERSWAP_ACC,
117 	DDI_STRICTORDER_ACC
118 };
119 
120 /* host ring buffer */
121 ddi_device_acc_attr_t ql_buf_acc_attr = {
122 	DDI_DEVICE_ATTR_V0,
123 	DDI_NEVERSWAP_ACC,
124 	DDI_STRICTORDER_ACC
125 };
126 
127 /*
128  * Hash key table for Receive Side Scaling (RSS) support
129  */
130 const uint8_t key_data[] = {
131 	0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
132 	0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
133 	0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
134 	0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
135 
136 /*
137  * Shadow Registers:
138  * Outbound queues have a consumer index that is maintained by the chip.
139  * Inbound queues have a producer index that is maintained by the chip.
140  * For lower overhead, these registers are "shadowed" to host memory
141  * which allows the device driver to track the queue progress without
142  * PCI reads. When an entry is placed on an inbound queue, the chip will
143  * update the relevant index register and then copy the value to the
144  * shadow register in host memory.
145  */
146 
147 static inline unsigned int
148 ql_read_sh_reg(const volatile void *addr)
149 {
150 	return (*(volatile uint32_t *)addr);
151 }
152 
153 /*
154  * Read 32 bit atomically
155  */
156 uint32_t
157 ql_atomic_read_32(volatile uint32_t *target)
158 {
159 	/*
160 	 * atomic_add_32_nv returns the new value after the add,
161 	 * we are adding 0 so we should get the original value
162 	 */
163 	return (atomic_add_32_nv(target, 0));
164 }
165 
166 /*
167  * Set 32 bit atomically
168  */
169 void
170 ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
171 {
172 	(void) atomic_swap_32(target, newval);
173 }
174 
175 
176 /*
177  * Setup device PCI configuration registers.
178  * Kernel context.
179  */
180 static void
181 ql_pci_config(qlge_t *qlge)
182 {
183 	uint16_t w;
184 
185 	qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
186 	    PCI_CONF_VENID);
187 	qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
188 	    PCI_CONF_DEVID);
189 
190 	/*
191 	 * we want to respect framework's setting of PCI
192 	 * configuration space command register and also
193 	 * want to make sure that all bits of interest to us
194 	 * are properly set in PCI Command register(0x04).
195 	 * PCI_COMM_IO		0x1	 I/O access enable
196 	 * PCI_COMM_MAE		0x2	 Memory access enable
197 	 * PCI_COMM_ME		0x4	 bus master enable
198 	 * PCI_COMM_MEMWR_INVAL	0x10	 memory write and invalidate enable.
199 	 */
200 	w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
201 	w = (uint16_t)(w & (~PCI_COMM_IO));
202 	w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
203 	    /* PCI_COMM_MEMWR_INVAL | */
204 	    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
205 
206 	pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
207 
208 	ql_dump_pci_config(qlge);
209 }
210 
211 /*
212  * This routine parforms the neccessary steps to set GLD mac information
213  * such as Function number, xgmac mask and shift bits
214  */
215 static int
216 ql_set_mac_info(qlge_t *qlge)
217 {
218 	uint32_t value;
219 	int rval = DDI_SUCCESS;
220 	uint32_t fn0_net, fn1_net;
221 
222 	/* set default value */
223 	qlge->fn0_net = FN0_NET;
224 	qlge->fn1_net = FN1_NET;
225 
226 	if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
227 		cmn_err(CE_WARN, "%s(%d) read MPI register failed",
228 		    __func__, qlge->instance);
229 	} else {
230 		fn0_net = (value >> 1) & 0x07;
231 		fn1_net = (value >> 5) & 0x07;
232 		if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
233 			cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
234 			    "nic0 function number %d,"
235 			    "nic1 function number %d "
236 			    "use default\n",
237 			    __func__, qlge->instance, value, fn0_net, fn1_net);
238 		} else {
239 			qlge->fn0_net = fn0_net;
240 			qlge->fn1_net = fn1_net;
241 		}
242 	}
243 
244 	/* Get the function number that the driver is associated with */
245 	value = ql_read_reg(qlge, REG_STATUS);
246 	qlge->func_number = (uint8_t)((value >> 6) & 0x03);
247 	QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
248 	    value, qlge->func_number));
249 
250 	/* The driver is loaded on a non-NIC function? */
251 	if ((qlge->func_number != qlge->fn0_net) &&
252 	    (qlge->func_number != qlge->fn1_net)) {
253 		cmn_err(CE_WARN,
254 		    "Invalid function number = 0x%x\n", qlge->func_number);
255 		return (DDI_FAILURE);
256 	}
257 	/* network port 0? */
258 	if (qlge->func_number == qlge->fn0_net) {
259 		qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
260 		qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
261 	} else {
262 		qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
263 		qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
264 	}
265 
266 	return (rval);
267 
268 }
269 
270 /*
271  * write to doorbell register
272  */
273 void
274 ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
275 {
276 	ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
277 }
278 
279 /*
280  * read from doorbell register
281  */
282 uint32_t
283 ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
284 {
285 	uint32_t ret;
286 
287 	ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
288 
289 	return	(ret);
290 }
291 
292 /*
293  * This function waits for a specific bit to come ready
294  * in a given register.  It is used mostly by the initialize
295  * process, but is also used in kernel thread API such as
296  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
297  */
298 static int
299 ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
300 {
301 	uint32_t temp;
302 	int count = UDELAY_COUNT;
303 
304 	while (count) {
305 		temp = ql_read_reg(qlge, reg);
306 
307 		/* check for errors */
308 		if ((temp & err_bit) != 0) {
309 			break;
310 		} else if ((temp & bit) != 0)
311 			return (DDI_SUCCESS);
312 		qlge_delay(UDELAY_DELAY);
313 		count--;
314 	}
315 	cmn_err(CE_WARN,
316 	    "Waiting for reg %x to come ready failed.", reg);
317 	return (DDI_FAILURE);
318 }
319 
320 /*
321  * The CFG register is used to download TX and RX control blocks
322  * to the chip. This function waits for an operation to complete.
323  */
324 static int
325 ql_wait_cfg(qlge_t *qlge, uint32_t bit)
326 {
327 	int count = UDELAY_COUNT;
328 	uint32_t temp;
329 
330 	while (count) {
331 		temp = ql_read_reg(qlge, REG_CONFIGURATION);
332 		if ((temp & CFG_LE) != 0) {
333 			break;
334 		}
335 		if ((temp & bit) == 0)
336 			return (DDI_SUCCESS);
337 		qlge_delay(UDELAY_DELAY);
338 		count--;
339 	}
340 	cmn_err(CE_WARN,
341 	    "Waiting for cfg register bit %x failed.", bit);
342 	return (DDI_FAILURE);
343 }
344 
345 
346 /*
347  * Used to issue init control blocks to hw. Maps control block,
348  * sets address, triggers download, waits for completion.
349  */
350 static int
351 ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
352 {
353 	int status = DDI_SUCCESS;
354 	uint32_t mask;
355 	uint32_t value;
356 
357 	status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
358 	if (status != DDI_SUCCESS) {
359 		goto exit;
360 	}
361 	status = ql_wait_cfg(qlge, bit);
362 	if (status != DDI_SUCCESS) {
363 		goto exit;
364 	}
365 
366 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
367 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
368 
369 	mask = CFG_Q_MASK | (bit << 16);
370 	value = bit | (q_id << CFG_Q_SHIFT);
371 	ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
372 
373 	/*
374 	 * Wait for the bit to clear after signaling hw.
375 	 */
376 	status = ql_wait_cfg(qlge, bit);
377 	ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
378 
379 exit:
380 	return (status);
381 }
382 
383 /*
384  * Initialize adapter instance
385  */
386 static int
387 ql_init_instance(qlge_t *qlge)
388 {
389 	int i;
390 
391 	/* Default value */
392 	qlge->mac_flags = QL_MAC_INIT;
393 	qlge->mtu = ETHERMTU;		/* set normal size as default */
394 	qlge->page_size = VM_PAGE_SIZE;	/* default page size */
395 	/* Set up the default ring sizes. */
396 	qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
397 	qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
398 
399 	/* Set up the coalescing parameters. */
400 	qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
401 	qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
402 	qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
403 	qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
404 	qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
405 	qlge->ql_dbgprnt = 0;
406 #if QL_DEBUG
407 	qlge->ql_dbgprnt = QL_DEBUG;
408 #endif /* QL_DEBUG */
409 
410 	/*
411 	 * TODO: Should be obtained from configuration or based off
412 	 * number of active cpus SJP 4th Mar. 09
413 	 */
414 	qlge->tx_ring_count = 1;
415 	qlge->rss_ring_count = 4;
416 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
417 
418 	for (i = 0; i < MAX_RX_RINGS; i++) {
419 		qlge->rx_polls[i] = 0;
420 		qlge->rx_interrupts[i] = 0;
421 	}
422 
423 	/*
424 	 * Set up the operating parameters.
425 	 */
426 	qlge->multicast_list_count = 0;
427 
428 	/*
429 	 * Set up the max number of unicast list
430 	 */
431 	qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
432 	qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
433 
434 	/*
435 	 * read user defined properties in .conf file
436 	 */
437 	ql_read_conf(qlge); /* mtu, pause, LSO etc */
438 
439 	QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
440 
441 	/* choose Memory Space mapping and get Vendor Id, Device ID etc */
442 	ql_pci_config(qlge);
443 	qlge->ip_hdr_offset = 0;
444 
445 	if (qlge->device_id == 0x8000) {
446 		/* Schultz card */
447 		qlge->cfg_flags |= CFG_CHIP_8100;
448 		/* enable just ipv4 chksum offload for Schultz */
449 		qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
450 		/*
451 		 * Schultz firmware does not do pseduo IP header checksum
452 		 * calculation, needed to be done by driver
453 		 */
454 		qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
455 		if (qlge->lso_enable)
456 			qlge->cfg_flags |= CFG_LSO;
457 		qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
458 		/* Schultz must split packet header */
459 		qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
460 		qlge->max_read_mbx = 5;
461 		qlge->ip_hdr_offset = 2;
462 	}
463 
464 	/* Set Function Number and some of the iocb mac information */
465 	if (ql_set_mac_info(qlge) != DDI_SUCCESS)
466 		return (DDI_FAILURE);
467 
468 	/* Read network settings from NVRAM */
469 	/* After nvram is read successfully, update dev_addr */
470 	if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
471 		QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
472 		for (i = 0; i < ETHERADDRL; i++) {
473 			qlge->dev_addr.ether_addr_octet[i] =
474 			    qlge->nic_config.factory_MAC[i];
475 		}
476 	} else {
477 		cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
478 		    __func__, qlge->instance);
479 		return (DDI_FAILURE);
480 	}
481 
482 	bcopy(qlge->dev_addr.ether_addr_octet,
483 	    qlge->unicst_addr[0].addr.ether_addr_octet,
484 	    ETHERADDRL);
485 	QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
486 	    &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
487 
488 	qlge->port_link_state = LS_DOWN;
489 
490 	return (DDI_SUCCESS);
491 }
492 
493 
494 /*
495  * This hardware semaphore provides the mechanism for exclusive access to
496  * resources shared between the NIC driver, MPI firmware,
497  * FCOE firmware and the FC driver.
498  */
499 static int
500 ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
501 {
502 	uint32_t sem_bits = 0;
503 
504 	switch (sem_mask) {
505 	case SEM_XGMAC0_MASK:
506 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
507 		break;
508 	case SEM_XGMAC1_MASK:
509 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
510 		break;
511 	case SEM_ICB_MASK:
512 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
513 		break;
514 	case SEM_MAC_ADDR_MASK:
515 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
516 		break;
517 	case SEM_FLASH_MASK:
518 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
519 		break;
520 	case SEM_PROBE_MASK:
521 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
522 		break;
523 	case SEM_RT_IDX_MASK:
524 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
525 		break;
526 	case SEM_PROC_REG_MASK:
527 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
528 		break;
529 	default:
530 		cmn_err(CE_WARN, "Bad Semaphore mask!.");
531 		return (DDI_FAILURE);
532 	}
533 
534 	ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
535 	return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
536 }
537 
538 /*
539  * Lock a specific bit of Semaphore register to gain
540  * access to a particular shared register
541  */
542 int
543 ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
544 {
545 	unsigned int wait_count = 30;
546 
547 	while (wait_count) {
548 		if (!ql_sem_trylock(qlge, sem_mask))
549 			return (DDI_SUCCESS);
550 		qlge_delay(100);
551 		wait_count--;
552 	}
553 	cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
554 	    __func__, qlge->instance, sem_mask);
555 	return (DDI_FAILURE);
556 }
557 
558 /*
559  * Unock a specific bit of Semaphore register to release
560  * access to a particular shared register
561  */
562 void
563 ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
564 {
565 	ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
566 	(void) ql_read_reg(qlge, REG_SEMAPHORE);	/* flush */
567 }
568 
569 /*
570  * Get property value from configuration file.
571  *
572  * string = property string pointer.
573  *
574  * Returns:
575  * 0xFFFFFFFF = no property else property value.
576  */
577 static uint32_t
578 ql_get_prop(qlge_t *qlge, char *string)
579 {
580 	char buf[256];
581 	uint32_t data;
582 
583 	/* Get adapter instance parameter. */
584 	(void) sprintf(buf, "hba%d-%s", qlge->instance, string);
585 	data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
586 	    (int)0xffffffff);
587 
588 	/* Adapter instance parameter found? */
589 	if (data == 0xffffffff) {
590 		/* No, get default parameter. */
591 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
592 		    string, (int)0xffffffff);
593 	}
594 
595 	return (data);
596 }
597 
598 /*
599  * Read user setting from configuration file.
600  */
601 static void
602 ql_read_conf(qlge_t *qlge)
603 {
604 	uint32_t data;
605 
606 	/* clear configuration flags */
607 	qlge->cfg_flags = 0;
608 
609 	/* Get default rx_copy enable/disable. */
610 	if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
611 	    data == 0) {
612 		qlge->cfg_flags &= ~CFG_RX_COPY_MODE;
613 		qlge->rx_copy = B_FALSE;
614 		QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
615 	} else if (data == 1) {
616 		qlge->cfg_flags |= CFG_RX_COPY_MODE;
617 		qlge->rx_copy = B_TRUE;
618 		QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
619 	}
620 
621 	/* Get mtu packet size. */
622 	data = ql_get_prop(qlge, "mtu");
623 	if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
624 		if (qlge->mtu != data) {
625 			qlge->mtu = data;
626 			cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
627 		}
628 	}
629 
630 	/* Get pause mode, default is Per Priority mode. */
631 	qlge->pause = PAUSE_MODE_PER_PRIORITY;
632 	data = ql_get_prop(qlge, "pause");
633 	if (data <= PAUSE_MODE_PER_PRIORITY) {
634 		if (qlge->pause != data) {
635 			qlge->pause = data;
636 			cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
637 		}
638 	}
639 
640 	/* Get tx_max_coalesced_frames. */
641 	qlge->tx_max_coalesced_frames = 5;
642 	data = ql_get_prop(qlge, "tx_max_coalesced_frames");
643 	/* if data is valid */
644 	if ((data != 0xffffffff) && data) {
645 		if (qlge->tx_max_coalesced_frames != data) {
646 			qlge->tx_max_coalesced_frames = (uint16_t)data;
647 		}
648 	}
649 
650 	/* Get split header payload_copy_thresh. */
651 	qlge->payload_copy_thresh = 6;
652 	data = ql_get_prop(qlge, "payload_copy_thresh");
653 	/* if data is valid */
654 	if ((data != 0xffffffff) && (data != 0)) {
655 		if (qlge->payload_copy_thresh != data) {
656 			qlge->payload_copy_thresh = data;
657 		}
658 	}
659 
660 	/* large send offload (LSO) capability. */
661 	qlge->lso_enable = 1;
662 	data = ql_get_prop(qlge, "lso_enable");
663 	/* if data is valid */
664 	if (data != 0xffffffff) {
665 		if (qlge->lso_enable != data) {
666 			qlge->lso_enable = (uint16_t)data;
667 		}
668 	}
669 }
670 
671 /*
672  * Enable global interrupt
673  */
674 static void
675 ql_enable_global_interrupt(qlge_t *qlge)
676 {
677 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
678 	    (INTR_EN_EI << 16) | INTR_EN_EI);
679 	qlge->flags |= INTERRUPTS_ENABLED;
680 }
681 
682 /*
683  * Disable global interrupt
684  */
685 static void
686 ql_disable_global_interrupt(qlge_t *qlge)
687 {
688 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
689 	qlge->flags &= ~INTERRUPTS_ENABLED;
690 }
691 
692 /*
693  * Enable one ring interrupt
694  */
695 void
696 ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
697 {
698 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
699 
700 	QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
701 	    __func__, qlge->instance, intr, ctx->irq_cnt));
702 
703 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
704 		/*
705 		 * Always enable if we're MSIX multi interrupts and
706 		 * it's not the default (zeroeth) interrupt.
707 		 */
708 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
709 		return;
710 	}
711 
712 	if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
713 		mutex_enter(&qlge->hw_mutex);
714 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
715 		mutex_exit(&qlge->hw_mutex);
716 		QL_PRINT(DBG_INTR,
717 		    ("%s(%d): write %x to intr enable register \n",
718 		    __func__, qlge->instance, ctx->intr_en_mask));
719 	}
720 }
721 
722 /*
723  * ql_forced_disable_completion_interrupt
724  * Used by call from OS, may be called without
725  * a pending interrupt so force the disable
726  */
727 uint32_t
728 ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
729 {
730 	uint32_t var = 0;
731 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
732 
733 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
734 	    __func__, qlge->instance, intr, ctx->irq_cnt));
735 
736 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
737 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
738 		var = ql_read_reg(qlge, REG_STATUS);
739 		return (var);
740 	}
741 
742 	mutex_enter(&qlge->hw_mutex);
743 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
744 	var = ql_read_reg(qlge, REG_STATUS);
745 	mutex_exit(&qlge->hw_mutex);
746 
747 	return (var);
748 }
749 
750 /*
751  * Disable a completion interrupt
752  */
753 void
754 ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
755 {
756 	struct intr_ctx *ctx;
757 
758 	ctx = qlge->intr_ctx + intr;
759 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
760 	    __func__, qlge->instance, intr, ctx->irq_cnt));
761 	/*
762 	 * HW disables for us if we're MSIX multi interrupts and
763 	 * it's not the default (zeroeth) interrupt.
764 	 */
765 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
766 		return;
767 
768 	if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
769 		mutex_enter(&qlge->hw_mutex);
770 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
771 		mutex_exit(&qlge->hw_mutex);
772 	}
773 	atomic_inc_32(&ctx->irq_cnt);
774 }
775 
776 /*
777  * Enable all completion interrupts
778  */
779 static void
780 ql_enable_all_completion_interrupts(qlge_t *qlge)
781 {
782 	int i;
783 	uint32_t value = 1;
784 
785 	for (i = 0; i < qlge->intr_cnt; i++) {
786 		/*
787 		 * Set the count to 1 for Legacy / MSI interrupts or for the
788 		 * default interrupt (0)
789 		 */
790 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
791 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
792 		}
793 		ql_enable_completion_interrupt(qlge, i);
794 	}
795 }
796 
797 /*
798  * Disable all completion interrupts
799  */
800 static void
801 ql_disable_all_completion_interrupts(qlge_t *qlge)
802 {
803 	int i;
804 	uint32_t value = 0;
805 
806 	for (i = 0; i < qlge->intr_cnt; i++) {
807 
808 		/*
809 		 * Set the count to 0 for Legacy / MSI interrupts or for the
810 		 * default interrupt (0)
811 		 */
812 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
813 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
814 
815 		ql_disable_completion_interrupt(qlge, i);
816 	}
817 }
818 
819 /*
820  * Update small buffer queue producer index
821  */
822 static void
823 ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
824 {
825 	/* Update the buffer producer index */
826 	QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
827 	    rx_ring->sbq_prod_idx));
828 	ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
829 	    rx_ring->sbq_prod_idx);
830 }
831 
832 /*
833  * Update large buffer queue producer index
834  */
835 static void
836 ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
837 {
838 	/* Update the buffer producer index */
839 	QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
840 	    rx_ring->lbq_prod_idx));
841 	ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
842 	    rx_ring->lbq_prod_idx);
843 }
844 
845 /*
846  * Adds a small buffer descriptor to end of its in use list,
847  * assumes sbq_lock is already taken
848  */
849 static void
850 ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
851     struct bq_desc *sbq_desc)
852 {
853 	uint32_t inuse_idx = rx_ring->sbq_use_tail;
854 
855 	rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
856 	inuse_idx++;
857 	if (inuse_idx >= rx_ring->sbq_len)
858 		inuse_idx = 0;
859 	rx_ring->sbq_use_tail = inuse_idx;
860 	atomic_inc_32(&rx_ring->sbuf_in_use_count);
861 	ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
862 }
863 
864 /*
865  * Get a small buffer descriptor from its in use list
866  */
867 static struct bq_desc *
868 ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
869 {
870 	struct bq_desc *sbq_desc = NULL;
871 	uint32_t inuse_idx;
872 
873 	/* Pick from head of in use list */
874 	inuse_idx = rx_ring->sbq_use_head;
875 	sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
876 	rx_ring->sbuf_in_use[inuse_idx] = NULL;
877 
878 	if (sbq_desc != NULL) {
879 		inuse_idx++;
880 		if (inuse_idx >= rx_ring->sbq_len)
881 			inuse_idx = 0;
882 		rx_ring->sbq_use_head = inuse_idx;
883 		atomic_dec_32(&rx_ring->sbuf_in_use_count);
884 		atomic_inc_32(&rx_ring->rx_indicate);
885 		sbq_desc->upl_inuse = 1;
886 		/* if mp is NULL */
887 		if (sbq_desc->mp == NULL) {
888 			/* try to remap mp again */
889 			sbq_desc->mp =
890 			    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
891 			    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
892 		}
893 	}
894 
895 	return (sbq_desc);
896 }
897 
898 /*
899  * Add a small buffer descriptor to its free list
900  */
901 static void
902 ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
903     struct bq_desc *sbq_desc)
904 {
905 	uint32_t free_idx;
906 
907 	/* Add to the end of free list */
908 	free_idx = rx_ring->sbq_free_tail;
909 	rx_ring->sbuf_free[free_idx] = sbq_desc;
910 	ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
911 	free_idx++;
912 	if (free_idx >= rx_ring->sbq_len)
913 		free_idx = 0;
914 	rx_ring->sbq_free_tail = free_idx;
915 	atomic_inc_32(&rx_ring->sbuf_free_count);
916 }
917 
918 /*
919  * Get a small buffer descriptor from its free list
920  */
921 static struct bq_desc *
922 ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
923 {
924 	struct bq_desc *sbq_desc;
925 	uint32_t free_idx;
926 
927 	free_idx = rx_ring->sbq_free_head;
928 	/* Pick from top of free list */
929 	sbq_desc = rx_ring->sbuf_free[free_idx];
930 	rx_ring->sbuf_free[free_idx] = NULL;
931 	if (sbq_desc != NULL) {
932 		free_idx++;
933 		if (free_idx >= rx_ring->sbq_len)
934 			free_idx = 0;
935 		rx_ring->sbq_free_head = free_idx;
936 		atomic_dec_32(&rx_ring->sbuf_free_count);
937 		ASSERT(rx_ring->sbuf_free_count != 0);
938 	}
939 	return (sbq_desc);
940 }
941 
942 /*
943  * Add a large buffer descriptor to its in use list
944  */
945 static void
946 ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
947     struct bq_desc *lbq_desc)
948 {
949 	uint32_t inuse_idx;
950 
951 	inuse_idx = rx_ring->lbq_use_tail;
952 
953 	rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
954 	inuse_idx++;
955 	if (inuse_idx >= rx_ring->lbq_len)
956 		inuse_idx = 0;
957 	rx_ring->lbq_use_tail = inuse_idx;
958 	atomic_inc_32(&rx_ring->lbuf_in_use_count);
959 }
960 
961 /*
962  * Get a large buffer descriptor from in use list
963  */
964 static struct bq_desc *
965 ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
966 {
967 	struct bq_desc *lbq_desc;
968 	uint32_t inuse_idx;
969 
970 	/* Pick from head of in use list */
971 	inuse_idx = rx_ring->lbq_use_head;
972 	lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
973 	rx_ring->lbuf_in_use[inuse_idx] = NULL;
974 
975 	if (lbq_desc != NULL) {
976 		inuse_idx++;
977 		if (inuse_idx >= rx_ring->lbq_len)
978 			inuse_idx = 0;
979 		rx_ring->lbq_use_head = inuse_idx;
980 		atomic_dec_32(&rx_ring->lbuf_in_use_count);
981 		atomic_inc_32(&rx_ring->rx_indicate);
982 		lbq_desc->upl_inuse = 1;
983 
984 		/* if mp is NULL */
985 		if (lbq_desc->mp == NULL) {
986 			/* try to remap mp again */
987 			lbq_desc->mp =
988 			    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
989 			    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
990 		}
991 	}
992 	return (lbq_desc);
993 }
994 
995 /*
996  * Add a large buffer descriptor to free list
997  */
998 static void
999 ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
1000     struct bq_desc *lbq_desc)
1001 {
1002 	uint32_t free_idx;
1003 
1004 	/* Add to the end of free list */
1005 	free_idx = rx_ring->lbq_free_tail;
1006 	rx_ring->lbuf_free[free_idx] = lbq_desc;
1007 	free_idx++;
1008 	if (free_idx >= rx_ring->lbq_len)
1009 		free_idx = 0;
1010 	rx_ring->lbq_free_tail = free_idx;
1011 	atomic_inc_32(&rx_ring->lbuf_free_count);
1012 	ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
1013 }
1014 
1015 /*
1016  * Get a large buffer descriptor from its free list
1017  */
1018 static struct bq_desc *
1019 ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
1020 {
1021 	struct bq_desc *lbq_desc;
1022 	uint32_t free_idx;
1023 
1024 	free_idx = rx_ring->lbq_free_head;
1025 	/* Pick from head of free list */
1026 	lbq_desc = rx_ring->lbuf_free[free_idx];
1027 	rx_ring->lbuf_free[free_idx] = NULL;
1028 
1029 	if (lbq_desc != NULL) {
1030 		free_idx++;
1031 		if (free_idx >= rx_ring->lbq_len)
1032 			free_idx = 0;
1033 		rx_ring->lbq_free_head = free_idx;
1034 		atomic_dec_32(&rx_ring->lbuf_free_count);
1035 		ASSERT(rx_ring->lbuf_free_count != 0);
1036 	}
1037 	return (lbq_desc);
1038 }
1039 
1040 /*
1041  * Add a small buffer descriptor to free list
1042  */
1043 static void
1044 ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
1045 {
1046 	struct rx_ring *rx_ring = sbq_desc->rx_ring;
1047 	uint64_t *sbq_entry;
1048 	qlge_t *qlge = (qlge_t *)rx_ring->qlge;
1049 	/*
1050 	 * Sync access
1051 	 */
1052 	mutex_enter(&rx_ring->sbq_lock);
1053 
1054 	sbq_desc->upl_inuse = 0;
1055 
1056 	/*
1057 	 * If we are freeing the buffers as a result of adapter unload, get out
1058 	 */
1059 	if ((sbq_desc->free_buf != NULL) ||
1060 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1061 		if (sbq_desc->free_buf == NULL)
1062 			atomic_dec_32(&rx_ring->rx_indicate);
1063 		mutex_exit(&rx_ring->sbq_lock);
1064 		return;
1065 	}
1066 #ifdef QLGE_LOAD_UNLOAD
1067 	if (rx_ring->rx_indicate == 0)
1068 		cmn_err(CE_WARN, "sbq: indicate wrong");
1069 #endif
1070 #ifdef QLGE_TRACK_BUFFER_USAGE
1071 	uint32_t sb_consumer_idx;
1072 	uint32_t sb_producer_idx;
1073 	uint32_t num_free_buffers;
1074 	uint32_t temp;
1075 
1076 	temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
1077 	sb_producer_idx = temp & 0x0000ffff;
1078 	sb_consumer_idx = (temp >> 16);
1079 
1080 	if (sb_consumer_idx > sb_producer_idx)
1081 		num_free_buffers = NUM_SMALL_BUFFERS -
1082 		    (sb_consumer_idx - sb_producer_idx);
1083 	else
1084 		num_free_buffers = sb_producer_idx - sb_consumer_idx;
1085 
1086 	if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
1087 		qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
1088 
1089 #endif
1090 
1091 	ASSERT(sbq_desc->mp == NULL);
1092 
1093 #ifdef QLGE_LOAD_UNLOAD
1094 	if (rx_ring->rx_indicate > 0xFF000000)
1095 		cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
1096 		    " sbq_desc index %d.",
1097 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1098 		    sbq_desc->index);
1099 #endif
1100 	if (alloc_memory) {
1101 		sbq_desc->mp =
1102 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1103 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1104 		if (sbq_desc->mp == NULL) {
1105 			rx_ring->rx_failed_sbq_allocs++;
1106 		}
1107 	}
1108 
1109 	/* Got the packet from the stack decrement rx_indicate count */
1110 	atomic_dec_32(&rx_ring->rx_indicate);
1111 
1112 	ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1113 
1114 	/* Rearm if possible */
1115 	if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1116 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1117 		sbq_entry = rx_ring->sbq_dma.vaddr;
1118 		sbq_entry += rx_ring->sbq_prod_idx;
1119 
1120 		while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1121 			/* Get first one from free list */
1122 			sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
1123 
1124 			*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
1125 			sbq_entry++;
1126 			rx_ring->sbq_prod_idx++;
1127 			if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
1128 				rx_ring->sbq_prod_idx = 0;
1129 				sbq_entry = rx_ring->sbq_dma.vaddr;
1130 			}
1131 			/* Add to end of in use list */
1132 			ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
1133 		}
1134 
1135 		/* Update small buffer queue producer index */
1136 		ql_update_sbq_prod_idx(qlge, rx_ring);
1137 	}
1138 
1139 	mutex_exit(&rx_ring->sbq_lock);
1140 	QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
1141 	    __func__, qlge->instance, rx_ring->sbuf_free_count));
1142 }
1143 
1144 /*
1145  * rx recycle call back function
1146  */
1147 static void
1148 ql_release_to_sbuf_free_list(caddr_t p)
1149 {
1150 	struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
1151 
1152 	if (sbq_desc == NULL)
1153 		return;
1154 	ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
1155 }
1156 
1157 /*
1158  * Add a large buffer descriptor to free list
1159  */
1160 static void
1161 ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
1162 {
1163 	struct rx_ring *rx_ring = lbq_desc->rx_ring;
1164 	uint64_t *lbq_entry;
1165 	qlge_t *qlge = rx_ring->qlge;
1166 
1167 	/* Sync access */
1168 	mutex_enter(&rx_ring->lbq_lock);
1169 
1170 	lbq_desc->upl_inuse = 0;
1171 	/*
1172 	 * If we are freeing the buffers as a result of adapter unload, get out
1173 	 */
1174 	if ((lbq_desc->free_buf != NULL) ||
1175 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1176 		if (lbq_desc->free_buf == NULL)
1177 			atomic_dec_32(&rx_ring->rx_indicate);
1178 		mutex_exit(&rx_ring->lbq_lock);
1179 		return;
1180 	}
1181 #ifdef QLGE_LOAD_UNLOAD
1182 	if (rx_ring->rx_indicate == 0)
1183 		cmn_err(CE_WARN, "lbq: indicate wrong");
1184 #endif
1185 #ifdef QLGE_TRACK_BUFFER_USAGE
1186 	uint32_t lb_consumer_idx;
1187 	uint32_t lb_producer_idx;
1188 	uint32_t num_free_buffers;
1189 	uint32_t temp;
1190 
1191 	temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
1192 
1193 	lb_producer_idx = temp & 0x0000ffff;
1194 	lb_consumer_idx = (temp >> 16);
1195 
1196 	if (lb_consumer_idx > lb_producer_idx)
1197 		num_free_buffers = NUM_LARGE_BUFFERS -
1198 		    (lb_consumer_idx - lb_producer_idx);
1199 	else
1200 		num_free_buffers = lb_producer_idx - lb_consumer_idx;
1201 
1202 	if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
1203 		qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
1204 	}
1205 #endif
1206 
1207 	ASSERT(lbq_desc->mp == NULL);
1208 #ifdef QLGE_LOAD_UNLOAD
1209 	if (rx_ring->rx_indicate > 0xFF000000)
1210 		cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
1211 		    "lbq_desc index %d",
1212 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1213 		    lbq_desc->index);
1214 #endif
1215 	if (alloc_memory) {
1216 		lbq_desc->mp =
1217 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1218 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1219 		if (lbq_desc->mp == NULL) {
1220 			rx_ring->rx_failed_lbq_allocs++;
1221 		}
1222 	}
1223 
1224 	/* Got the packet from the stack decrement rx_indicate count */
1225 	atomic_dec_32(&rx_ring->rx_indicate);
1226 
1227 	ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1228 
1229 	/* Rearm if possible */
1230 	if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1231 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1232 		lbq_entry = rx_ring->lbq_dma.vaddr;
1233 		lbq_entry += rx_ring->lbq_prod_idx;
1234 		while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1235 			/* Get first one from free list */
1236 			lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
1237 
1238 			*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
1239 			lbq_entry++;
1240 			rx_ring->lbq_prod_idx++;
1241 			if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
1242 				rx_ring->lbq_prod_idx = 0;
1243 				lbq_entry = rx_ring->lbq_dma.vaddr;
1244 			}
1245 
1246 			/* Add to end of in use list */
1247 			ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
1248 		}
1249 
1250 		/* Update large buffer queue producer index */
1251 		ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
1252 	}
1253 
1254 	mutex_exit(&rx_ring->lbq_lock);
1255 	QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
1256 	    __func__, rx_ring->lbuf_free_count));
1257 }
1258 /*
1259  * rx recycle call back function
1260  */
1261 static void
1262 ql_release_to_lbuf_free_list(caddr_t p)
1263 {
1264 	struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
1265 
1266 	if (lbq_desc == NULL)
1267 		return;
1268 	ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
1269 }
1270 
1271 /*
1272  * free small buffer queue buffers
1273  */
1274 static void
1275 ql_free_sbq_buffers(struct rx_ring *rx_ring)
1276 {
1277 	struct bq_desc *sbq_desc;
1278 	uint32_t i;
1279 	uint32_t j = rx_ring->sbq_free_head;
1280 	int  force_cnt = 0;
1281 
1282 	for (i = 0; i < rx_ring->sbuf_free_count; i++) {
1283 		sbq_desc = rx_ring->sbuf_free[j];
1284 		sbq_desc->free_buf = 1;
1285 		j++;
1286 		if (j >= rx_ring->sbq_len) {
1287 			j = 0;
1288 		}
1289 		if (sbq_desc->mp != NULL) {
1290 			freemsg(sbq_desc->mp);
1291 			sbq_desc->mp = NULL;
1292 		}
1293 	}
1294 	rx_ring->sbuf_free_count = 0;
1295 
1296 	j = rx_ring->sbq_use_head;
1297 	for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
1298 		sbq_desc = rx_ring->sbuf_in_use[j];
1299 		sbq_desc->free_buf = 1;
1300 		j++;
1301 		if (j >= rx_ring->sbq_len) {
1302 			j = 0;
1303 		}
1304 		if (sbq_desc->mp != NULL) {
1305 			freemsg(sbq_desc->mp);
1306 			sbq_desc->mp = NULL;
1307 		}
1308 	}
1309 	rx_ring->sbuf_in_use_count = 0;
1310 
1311 	sbq_desc = &rx_ring->sbq_desc[0];
1312 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1313 		/*
1314 		 * Set flag so that the callback does not allocate a new buffer
1315 		 */
1316 		sbq_desc->free_buf = 1;
1317 		if (sbq_desc->upl_inuse != 0) {
1318 			force_cnt++;
1319 		}
1320 		if (sbq_desc->bd_dma.dma_handle != NULL) {
1321 			ql_free_phys(&sbq_desc->bd_dma.dma_handle,
1322 			    &sbq_desc->bd_dma.acc_handle);
1323 			sbq_desc->bd_dma.dma_handle = NULL;
1324 			sbq_desc->bd_dma.acc_handle = NULL;
1325 		}
1326 	}
1327 #ifdef QLGE_LOAD_UNLOAD
1328 	cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
1329 	    rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
1330 #endif
1331 	if (rx_ring->sbuf_in_use != NULL) {
1332 		kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
1333 		    sizeof (struct bq_desc *)));
1334 		rx_ring->sbuf_in_use = NULL;
1335 	}
1336 
1337 	if (rx_ring->sbuf_free != NULL) {
1338 		kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
1339 		    sizeof (struct bq_desc *)));
1340 		rx_ring->sbuf_free = NULL;
1341 	}
1342 }
1343 
1344 /* Allocate small buffers */
1345 static int
1346 ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1347 {
1348 	struct bq_desc *sbq_desc;
1349 	int i;
1350 	ddi_dma_cookie_t dma_cookie;
1351 
1352 	rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
1353 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1354 	if (rx_ring->sbuf_free == NULL) {
1355 		cmn_err(CE_WARN,
1356 		    "!%s: sbuf_free_list alloc: failed",
1357 		    __func__);
1358 		rx_ring->sbuf_free_count = 0;
1359 		goto alloc_sbuf_err;
1360 	}
1361 
1362 	rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
1363 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1364 	if (rx_ring->sbuf_in_use == NULL) {
1365 		cmn_err(CE_WARN,
1366 		    "!%s: sbuf_inuse_list alloc: failed",
1367 		    __func__);
1368 		rx_ring->sbuf_in_use_count = 0;
1369 		goto alloc_sbuf_err;
1370 	}
1371 	rx_ring->sbq_use_head = 0;
1372 	rx_ring->sbq_use_tail = 0;
1373 	rx_ring->sbq_free_head = 0;
1374 	rx_ring->sbq_free_tail = 0;
1375 	sbq_desc = &rx_ring->sbq_desc[0];
1376 
1377 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1378 		/* Allocate buffer */
1379 		if (ql_alloc_phys(qlge->dip, &sbq_desc->bd_dma.dma_handle,
1380 		    &ql_buf_acc_attr,
1381 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1382 		    &sbq_desc->bd_dma.acc_handle,
1383 		    (size_t)rx_ring->sbq_buf_size,	/* mem size */
1384 		    (size_t)0,				/* default alignment */
1385 		    (caddr_t *)&sbq_desc->bd_dma.vaddr,
1386 		    &dma_cookie) != 0) {
1387 			cmn_err(CE_WARN,
1388 			    "!%s: ddi_dma_alloc_handle: failed",
1389 			    __func__);
1390 			goto alloc_sbuf_err;
1391 		}
1392 
1393 		/* Set context for Return buffer callback */
1394 		sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1395 		sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
1396 		sbq_desc->rx_recycle.free_arg  = (caddr_t)sbq_desc;
1397 		sbq_desc->rx_ring = rx_ring;
1398 		sbq_desc->upl_inuse = 0;
1399 		sbq_desc->free_buf = 0;
1400 
1401 		sbq_desc->mp =
1402 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1403 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1404 		if (sbq_desc->mp == NULL) {
1405 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1406 			goto alloc_sbuf_err;
1407 		}
1408 		ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1409 	}
1410 
1411 	return (DDI_SUCCESS);
1412 
1413 alloc_sbuf_err:
1414 	ql_free_sbq_buffers(rx_ring);
1415 	return (DDI_FAILURE);
1416 }
1417 
1418 static void
1419 ql_free_lbq_buffers(struct rx_ring *rx_ring)
1420 {
1421 	struct bq_desc *lbq_desc;
1422 	uint32_t i, j;
1423 	int force_cnt = 0;
1424 
1425 	j = rx_ring->lbq_free_head;
1426 	for (i = 0; i < rx_ring->lbuf_free_count; i++) {
1427 		lbq_desc = rx_ring->lbuf_free[j];
1428 		lbq_desc->free_buf = 1;
1429 		j++;
1430 		if (j >= rx_ring->lbq_len)
1431 			j = 0;
1432 		if (lbq_desc->mp != NULL) {
1433 			freemsg(lbq_desc->mp);
1434 			lbq_desc->mp = NULL;
1435 		}
1436 	}
1437 	rx_ring->lbuf_free_count = 0;
1438 
1439 	j = rx_ring->lbq_use_head;
1440 	for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
1441 		lbq_desc = rx_ring->lbuf_in_use[j];
1442 		lbq_desc->free_buf = 1;
1443 		j++;
1444 		if (j >= rx_ring->lbq_len) {
1445 			j = 0;
1446 		}
1447 		if (lbq_desc->mp != NULL) {
1448 			freemsg(lbq_desc->mp);
1449 			lbq_desc->mp = NULL;
1450 		}
1451 	}
1452 	rx_ring->lbuf_in_use_count = 0;
1453 
1454 	lbq_desc = &rx_ring->lbq_desc[0];
1455 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1456 		/* Set flag so that callback will not allocate a new buffer */
1457 		lbq_desc->free_buf = 1;
1458 		if (lbq_desc->upl_inuse != 0) {
1459 			force_cnt++;
1460 		}
1461 		if (lbq_desc->bd_dma.dma_handle != NULL) {
1462 			ql_free_phys(&lbq_desc->bd_dma.dma_handle,
1463 			    &lbq_desc->bd_dma.acc_handle);
1464 			lbq_desc->bd_dma.dma_handle = NULL;
1465 			lbq_desc->bd_dma.acc_handle = NULL;
1466 		}
1467 	}
1468 #ifdef QLGE_LOAD_UNLOAD
1469 	if (force_cnt) {
1470 		cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
1471 		    rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
1472 		    force_cnt);
1473 	}
1474 #endif
1475 	if (rx_ring->lbuf_in_use != NULL) {
1476 		kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
1477 		    sizeof (struct bq_desc *)));
1478 		rx_ring->lbuf_in_use = NULL;
1479 	}
1480 
1481 	if (rx_ring->lbuf_free != NULL) {
1482 		kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
1483 		    sizeof (struct bq_desc *)));
1484 		rx_ring->lbuf_free = NULL;
1485 	}
1486 }
1487 
1488 /* Allocate large buffers */
1489 static int
1490 ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1491 {
1492 	struct bq_desc *lbq_desc;
1493 	ddi_dma_cookie_t dma_cookie;
1494 	int i;
1495 	uint32_t lbq_buf_size;
1496 
1497 	rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
1498 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1499 	if (rx_ring->lbuf_free == NULL) {
1500 		cmn_err(CE_WARN,
1501 		    "!%s: lbuf_free_list alloc: failed",
1502 		    __func__);
1503 		rx_ring->lbuf_free_count = 0;
1504 		goto alloc_lbuf_err;
1505 	}
1506 
1507 	rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
1508 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1509 
1510 	if (rx_ring->lbuf_in_use == NULL) {
1511 		cmn_err(CE_WARN,
1512 		    "!%s: lbuf_inuse_list alloc: failed",
1513 		    __func__);
1514 		rx_ring->lbuf_in_use_count = 0;
1515 		goto alloc_lbuf_err;
1516 	}
1517 	rx_ring->lbq_use_head = 0;
1518 	rx_ring->lbq_use_tail = 0;
1519 	rx_ring->lbq_free_head = 0;
1520 	rx_ring->lbq_free_tail = 0;
1521 
1522 	lbq_buf_size = (qlge->mtu == ETHERMTU) ?
1523 	    NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
1524 
1525 	lbq_desc = &rx_ring->lbq_desc[0];
1526 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1527 		rx_ring->lbq_buf_size = lbq_buf_size;
1528 		/* Allocate buffer */
1529 		if (ql_alloc_phys(qlge->dip, &lbq_desc->bd_dma.dma_handle,
1530 		    &ql_buf_acc_attr,
1531 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1532 		    &lbq_desc->bd_dma.acc_handle,
1533 		    (size_t)rx_ring->lbq_buf_size,  /* mem size */
1534 		    (size_t)0, /* default alignment */
1535 		    (caddr_t *)&lbq_desc->bd_dma.vaddr,
1536 		    &dma_cookie) != 0) {
1537 			cmn_err(CE_WARN,
1538 			    "!%s: ddi_dma_alloc_handle: failed",
1539 			    __func__);
1540 			goto alloc_lbuf_err;
1541 		}
1542 
1543 		/* Set context for Return buffer callback */
1544 		lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1545 		lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
1546 		lbq_desc->rx_recycle.free_arg  = (caddr_t)lbq_desc;
1547 		lbq_desc->rx_ring = rx_ring;
1548 		lbq_desc->upl_inuse = 0;
1549 		lbq_desc->free_buf = 0;
1550 
1551 		lbq_desc->mp =
1552 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1553 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1554 		if (lbq_desc->mp == NULL) {
1555 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1556 			goto alloc_lbuf_err;
1557 		}
1558 		ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1559 	} /* For all large buffers */
1560 
1561 	return (DDI_SUCCESS);
1562 
1563 alloc_lbuf_err:
1564 	ql_free_lbq_buffers(rx_ring);
1565 	return (DDI_FAILURE);
1566 }
1567 
1568 /*
1569  * Free rx buffers
1570  */
1571 static void
1572 ql_free_rx_buffers(qlge_t *qlge)
1573 {
1574 	int i;
1575 	struct rx_ring *rx_ring;
1576 
1577 	for (i = 0; i < qlge->rx_ring_count; i++) {
1578 		rx_ring = &qlge->rx_ring[i];
1579 		if (rx_ring->type != TX_Q) {
1580 			ql_free_lbq_buffers(rx_ring);
1581 			ql_free_sbq_buffers(rx_ring);
1582 		}
1583 	}
1584 }
1585 
1586 /*
1587  * Allocate rx buffers
1588  */
1589 static int
1590 ql_alloc_rx_buffers(qlge_t *qlge)
1591 {
1592 	struct rx_ring *rx_ring;
1593 	int i;
1594 
1595 	for (i = 0; i < qlge->rx_ring_count; i++) {
1596 		rx_ring = &qlge->rx_ring[i];
1597 		if (rx_ring->type != TX_Q) {
1598 			if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
1599 				goto alloc_err;
1600 			if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
1601 				goto alloc_err;
1602 		}
1603 	}
1604 #ifdef QLGE_TRACK_BUFFER_USAGE
1605 	for (i = 0; i < qlge->rx_ring_count; i++) {
1606 		if (qlge->rx_ring[i].type == RX_Q) {
1607 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
1608 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
1609 		}
1610 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
1611 	}
1612 #endif
1613 	return (DDI_SUCCESS);
1614 
1615 alloc_err:
1616 
1617 	return (DDI_FAILURE);
1618 }
1619 
1620 /*
1621  * Initialize large buffer queue ring
1622  */
1623 static void
1624 ql_init_lbq_ring(struct rx_ring *rx_ring)
1625 {
1626 	uint16_t i;
1627 	struct bq_desc *lbq_desc;
1628 
1629 	bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
1630 	for (i = 0; i < rx_ring->lbq_len; i++) {
1631 		lbq_desc = &rx_ring->lbq_desc[i];
1632 		lbq_desc->index = i;
1633 	}
1634 }
1635 
1636 /*
1637  * Initialize small buffer queue ring
1638  */
1639 static void
1640 ql_init_sbq_ring(struct rx_ring *rx_ring)
1641 {
1642 	uint16_t i;
1643 	struct bq_desc *sbq_desc;
1644 
1645 	bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
1646 	for (i = 0; i < rx_ring->sbq_len; i++) {
1647 		sbq_desc = &rx_ring->sbq_desc[i];
1648 		sbq_desc->index = i;
1649 	}
1650 }
1651 
1652 /*
1653  * Calculate the pseudo-header checksum if hardware can not do
1654  */
1655 static void
1656 ql_pseudo_cksum(uint8_t *buf)
1657 {
1658 	uint32_t cksum;
1659 	uint16_t iphl;
1660 	uint16_t proto;
1661 
1662 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
1663 	cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
1664 	cksum += proto = buf[9];
1665 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
1666 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
1667 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
1668 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
1669 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1670 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1671 
1672 	/*
1673 	 * Point it to the TCP/UDP header, and
1674 	 * update the checksum field.
1675 	 */
1676 	buf += iphl + ((proto == IPPROTO_TCP) ?
1677 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
1678 
1679 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
1680 
1681 }
1682 
1683 /*
1684  * Transmit an incoming packet.
1685  */
1686 mblk_t *
1687 ql_ring_tx(void *arg, mblk_t *mp)
1688 {
1689 	struct tx_ring *tx_ring = (struct tx_ring *)arg;
1690 	qlge_t *qlge = tx_ring->qlge;
1691 	mblk_t *next;
1692 	int rval;
1693 	uint32_t tx_count = 0;
1694 
1695 	if (qlge->port_link_state == LS_DOWN) {
1696 		/* can not send message while link is down */
1697 		mblk_t *tp;
1698 		cmn_err(CE_WARN, "tx failed due to link down");
1699 
1700 		while (mp != NULL) {
1701 			tp = mp->b_next;
1702 			mp->b_next = NULL;
1703 			freemsg(mp);
1704 			mp = tp;
1705 		}
1706 		goto exit;
1707 	}
1708 
1709 	mutex_enter(&tx_ring->tx_lock);
1710 	/* if mac is not started, driver is not ready, can not send */
1711 	if (tx_ring->mac_flags != QL_MAC_STARTED) {
1712 		cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
1713 		    " return packets",
1714 		    __func__, qlge->instance, tx_ring->mac_flags);
1715 		mutex_exit(&tx_ring->tx_lock);
1716 		goto exit;
1717 	}
1718 
1719 	/* we must try to send all */
1720 	while (mp != NULL) {
1721 		/*
1722 		 * if number of available slots is less than a threshold,
1723 		 * then quit
1724 		 */
1725 		if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
1726 			tx_ring->queue_stopped = 1;
1727 			rval = DDI_FAILURE;
1728 #ifdef QLGE_LOAD_UNLOAD
1729 			cmn_err(CE_WARN, "%s(%d) no resources",
1730 			    __func__, qlge->instance);
1731 #endif
1732 			tx_ring->defer++;
1733 			/*
1734 			 * If we return the buffer back we are expected to call
1735 			 * mac_tx_ring_update() when resources are available
1736 			 */
1737 			break;
1738 		}
1739 
1740 		next = mp->b_next;
1741 		mp->b_next = NULL;
1742 
1743 		rval = ql_send_common(tx_ring, mp);
1744 
1745 		if (rval != DDI_SUCCESS) {
1746 			mp->b_next = next;
1747 			break;
1748 		}
1749 		tx_count++;
1750 		mp = next;
1751 	}
1752 
1753 	/*
1754 	 * After all msg blocks are mapped or copied to tx buffer,
1755 	 * trigger the hardware to send!
1756 	 */
1757 	if (tx_count > 0) {
1758 		ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
1759 		    tx_ring->prod_idx);
1760 	}
1761 
1762 	mutex_exit(&tx_ring->tx_lock);
1763 exit:
1764 	return (mp);
1765 }
1766 
1767 
1768 /*
1769  * This function builds an mblk list for the given inbound
1770  * completion.
1771  */
1772 
1773 static mblk_t *
1774 ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
1775     struct ib_mac_iocb_rsp *ib_mac_rsp)
1776 {
1777 	mblk_t *mp = NULL;
1778 	mblk_t *mp1 = NULL;	/* packet header */
1779 	mblk_t *mp2 = NULL;	/* packet content */
1780 	struct bq_desc *lbq_desc;
1781 	struct bq_desc *sbq_desc;
1782 	uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
1783 	uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
1784 	uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1785 	uint32_t pkt_len = payload_len + header_len;
1786 	uint32_t done;
1787 	uint64_t *curr_ial_ptr;
1788 	uint32_t ial_data_addr_low;
1789 	uint32_t actual_data_addr_low;
1790 	mblk_t *mp_ial = NULL;	/* ial chained packets */
1791 	uint32_t size;
1792 
1793 	/*
1794 	 * Check if error flags are set
1795 	 */
1796 	if (err_flag != 0) {
1797 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
1798 			rx_ring->frame_too_long++;
1799 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
1800 			rx_ring->frame_too_short++;
1801 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
1802 			rx_ring->fcs_err++;
1803 #ifdef QLGE_LOAD_UNLOAD
1804 		cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
1805 #endif
1806 		QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
1807 		    (uint8_t *)ib_mac_rsp, 8,
1808 		    (size_t)sizeof (struct ib_mac_iocb_rsp));
1809 	}
1810 
1811 	/* header should not be in large buffer */
1812 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
1813 		cmn_err(CE_WARN, "header in large buffer or invalid!");
1814 		err_flag |= 1;
1815 	}
1816 	/*
1817 	 * Handle the header buffer if present.
1818 	 * packet header must be valid and saved in one small buffer
1819 	 * broadcast/multicast packets' headers not splitted
1820 	 */
1821 	if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
1822 	    (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1823 		QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
1824 		    header_len));
1825 		/* Sync access */
1826 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1827 
1828 		ASSERT(sbq_desc != NULL);
1829 
1830 		/*
1831 		 * Validate addresses from the ASIC with the
1832 		 * expected sbuf address
1833 		 */
1834 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1835 		    != ib_mac_rsp->hdr_addr) {
1836 			/* Small buffer address mismatch */
1837 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1838 			    " in wrong small buffer",
1839 			    __func__, qlge->instance, rx_ring->cq_id);
1840 			goto fetal_error;
1841 		}
1842 		/* get this packet */
1843 		mp1 = sbq_desc->mp;
1844 		if ((err_flag != 0)|| (mp1 == NULL)) {
1845 			/* failed on this packet, put it back for re-arming */
1846 #ifdef QLGE_LOAD_UNLOAD
1847 			cmn_err(CE_WARN, "get header from small buffer fail");
1848 #endif
1849 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1850 			mp1 = NULL;
1851 		} else {
1852 			/* Flush DMA'd data */
1853 			(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1854 			    0, header_len, DDI_DMA_SYNC_FORKERNEL);
1855 
1856 			if ((qlge->ip_hdr_offset != 0)&&
1857 			    (header_len < SMALL_BUFFER_SIZE)) {
1858 				/*
1859 				 * copy entire header to a 2 bytes boundary
1860 				 * address for 8100 adapters so that the IP
1861 				 * header can be on a 4 byte boundary address
1862 				 */
1863 				bcopy(mp1->b_rptr,
1864 				    (mp1->b_rptr + SMALL_BUFFER_SIZE +
1865 				    qlge->ip_hdr_offset),
1866 				    header_len);
1867 				mp1->b_rptr += SMALL_BUFFER_SIZE +
1868 				    qlge->ip_hdr_offset;
1869 			}
1870 
1871 			/*
1872 			 * Adjust the mp payload_len to match
1873 			 * the packet header payload_len
1874 			 */
1875 			mp1->b_wptr = mp1->b_rptr + header_len;
1876 			mp1->b_next = mp1->b_cont = NULL;
1877 			QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
1878 			    (uint8_t *)mp1->b_rptr, 8, header_len);
1879 		}
1880 	}
1881 
1882 	/*
1883 	 * packet data or whole packet can be in small or one or
1884 	 * several large buffer(s)
1885 	 */
1886 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1887 		/*
1888 		 * The data is in a single small buffer.
1889 		 */
1890 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1891 
1892 		ASSERT(sbq_desc != NULL);
1893 
1894 		QL_PRINT(DBG_RX,
1895 		    ("%d bytes in a single small buffer, sbq_desc = %p, "
1896 		    "sbq_desc->bd_dma.dma_addr = %x,"
1897 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
1898 		    payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
1899 		    ib_mac_rsp->data_addr, sbq_desc->mp));
1900 
1901 		/*
1902 		 * Validate  addresses from the ASIC with the
1903 		 * expected sbuf address
1904 		 */
1905 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1906 		    != ib_mac_rsp->data_addr) {
1907 			/* Small buffer address mismatch */
1908 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1909 			    " in wrong small buffer",
1910 			    __func__, qlge->instance, rx_ring->cq_id);
1911 			goto fetal_error;
1912 		}
1913 		/* get this packet */
1914 		mp2 = sbq_desc->mp;
1915 		if ((err_flag != 0) || (mp2 == NULL)) {
1916 #ifdef QLGE_LOAD_UNLOAD
1917 			/* failed on this packet, put it back for re-arming */
1918 			cmn_err(CE_WARN, "ignore bad data from small buffer");
1919 #endif
1920 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1921 			mp2 = NULL;
1922 		} else {
1923 			/* Adjust the buffer length to match the payload_len */
1924 			mp2->b_wptr = mp2->b_rptr + payload_len;
1925 			mp2->b_next = mp2->b_cont = NULL;
1926 			/* Flush DMA'd data */
1927 			(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1928 			    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
1929 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
1930 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
1931 			/*
1932 			 * if payload is too small , copy to
1933 			 * the end of packet header
1934 			 */
1935 			if ((mp1 != NULL) &&
1936 			    (payload_len <= qlge->payload_copy_thresh) &&
1937 			    (pkt_len <
1938 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
1939 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
1940 				mp1->b_wptr += payload_len;
1941 				freemsg(mp2);
1942 				mp2 = NULL;
1943 			}
1944 		}
1945 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1946 		/*
1947 		 * The data is in a single large buffer.
1948 		 */
1949 		lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
1950 
1951 		QL_PRINT(DBG_RX,
1952 		    ("%d bytes in a single large buffer, lbq_desc = %p, "
1953 		    "lbq_desc->bd_dma.dma_addr = %x,"
1954 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
1955 		    payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
1956 		    ib_mac_rsp->data_addr, lbq_desc->mp));
1957 
1958 		ASSERT(lbq_desc != NULL);
1959 
1960 		/*
1961 		 * Validate  addresses from the ASIC with
1962 		 * the expected lbuf address
1963 		 */
1964 		if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
1965 		    != ib_mac_rsp->data_addr) {
1966 			/* Large buffer address mismatch */
1967 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1968 			    " in wrong large buffer",
1969 			    __func__, qlge->instance, rx_ring->cq_id);
1970 			goto fetal_error;
1971 		}
1972 		mp2 = lbq_desc->mp;
1973 		if ((err_flag != 0) || (mp2 == NULL)) {
1974 #ifdef QLGE_LOAD_UNLOAD
1975 			cmn_err(CE_WARN, "ignore bad data from large buffer");
1976 #endif
1977 			/* failed on this packet, put it back for re-arming */
1978 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
1979 			mp2 = NULL;
1980 		} else {
1981 			/*
1982 			 * Adjust the buffer length to match
1983 			 * the packet payload_len
1984 			 */
1985 			mp2->b_wptr = mp2->b_rptr + payload_len;
1986 			mp2->b_next = mp2->b_cont = NULL;
1987 			/* Flush DMA'd data */
1988 			(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
1989 			    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
1990 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
1991 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
1992 			/*
1993 			 * if payload is too small , copy to
1994 			 * the end of packet header
1995 			 */
1996 			if ((mp1 != NULL) &&
1997 			    (payload_len <= qlge->payload_copy_thresh) &&
1998 			    (pkt_len<
1999 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2000 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2001 				mp1->b_wptr += payload_len;
2002 				freemsg(mp2);
2003 				mp2 = NULL;
2004 			}
2005 		}
2006 	} else if (payload_len) {
2007 		/*
2008 		 * payload available but not in sml nor lrg buffer,
2009 		 * so, it is saved in IAL
2010 		 */
2011 #ifdef QLGE_LOAD_UNLOAD
2012 		cmn_err(CE_NOTE, "packet chained in IAL \n");
2013 #endif
2014 		/* lrg buf addresses are saved in one small buffer */
2015 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2016 		curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
2017 		done = 0;
2018 		while (!done) {
2019 			ial_data_addr_low =
2020 			    (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
2021 			    0xFFFFFFFE);
2022 			/* check if this is the last packet fragment */
2023 			done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
2024 			curr_ial_ptr++;
2025 			/*
2026 			 * The data is in one or several large buffer(s).
2027 			 */
2028 			lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2029 			actual_data_addr_low =
2030 			    (uint32_t)(lbq_desc->bd_dma.dma_addr &
2031 			    0xFFFFFFFE);
2032 			if (ial_data_addr_low != actual_data_addr_low) {
2033 				cmn_err(CE_WARN,
2034 				    "packet saved in wrong ial lrg buffer"
2035 				    " expected %x, actual %lx",
2036 				    ial_data_addr_low,
2037 				    (uintptr_t)lbq_desc->bd_dma.dma_addr);
2038 				goto fetal_error;
2039 			}
2040 
2041 			if (mp_ial == NULL) {
2042 				mp_ial = mp2 = lbq_desc->mp;
2043 			} else {
2044 				mp2->b_cont = lbq_desc->mp;
2045 				mp2 = lbq_desc->mp;
2046 			}
2047 			mp2->b_next = NULL;
2048 			mp2->b_cont = NULL;
2049 			size = (payload_len < rx_ring->lbq_buf_size)?
2050 			    payload_len : rx_ring->lbq_buf_size;
2051 			mp2->b_wptr = mp2->b_rptr + size;
2052 			/* Flush DMA'd data */
2053 			(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2054 			    0, size, DDI_DMA_SYNC_FORKERNEL);
2055 			payload_len -= size;
2056 			QL_DUMP(DBG_RX, "\t Mac data dump:\n",
2057 			    (uint8_t *)mp2->b_rptr, 8, size);
2058 		}
2059 		mp2 = mp_ial;
2060 		freemsg(sbq_desc->mp);
2061 	}
2062 	/*
2063 	 * some packets' hdr not split, then send mp2 upstream, otherwise,
2064 	 * concatenate message block mp2 to the tail of message header, mp1
2065 	 */
2066 	if (!err_flag) {
2067 		if (mp1) {
2068 			if (mp2) {
2069 				QL_PRINT(DBG_RX, ("packet in mp1 and mp2\n"));
2070 				linkb(mp1, mp2); /* mp1->b_cont = mp2; */
2071 				mp = mp1;
2072 			} else {
2073 				QL_PRINT(DBG_RX, ("packet in mp1 only\n"));
2074 				mp = mp1;
2075 			}
2076 		} else if (mp2) {
2077 			QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
2078 			mp = mp2;
2079 		}
2080 	}
2081 	return (mp);
2082 
2083 fetal_error:
2084 	/* Fetal Error! */
2085 	*mp->b_wptr = 0;
2086 	return (mp);
2087 
2088 }
2089 
2090 /*
2091  * Bump completion queue consumer index.
2092  */
2093 static void
2094 ql_update_cq(struct rx_ring *rx_ring)
2095 {
2096 	rx_ring->cnsmr_idx++;
2097 	rx_ring->curr_entry++;
2098 	if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
2099 		rx_ring->cnsmr_idx = 0;
2100 		rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
2101 	}
2102 }
2103 
2104 /*
2105  * Update completion queue consumer index.
2106  */
2107 static void
2108 ql_write_cq_idx(struct rx_ring *rx_ring)
2109 {
2110 	qlge_t *qlge = rx_ring->qlge;
2111 
2112 	ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
2113 	    rx_ring->cnsmr_idx);
2114 }
2115 
2116 /*
2117  * Processes a SYS-Chip Event Notification Completion Event.
2118  * The incoming notification event that describes a link up/down
2119  * or some sorts of error happens.
2120  */
2121 static void
2122 ql_process_chip_ae_intr(qlge_t *qlge,
2123     struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
2124 {
2125 	uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
2126 	uint32_t soft_req = 0;
2127 
2128 	switch (eventType) {
2129 		case SYS_EVENT_PORT_LINK_UP:	/* 0x0h */
2130 			QL_PRINT(DBG_MBX, ("Port Link Up\n"));
2131 			break;
2132 
2133 		case SYS_EVENT_PORT_LINK_DOWN:	/* 0x1h */
2134 			QL_PRINT(DBG_MBX, ("Port Link Down\n"));
2135 			break;
2136 
2137 		case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
2138 			cmn_err(CE_WARN, "A multiple CAM hits look up error "
2139 			    "occurred");
2140 			soft_req |= NEED_HW_RESET;
2141 			break;
2142 
2143 		case SYS_EVENT_SOFT_ECC_ERR:	/* 0x7h */
2144 			cmn_err(CE_WARN, "Soft ECC error detected");
2145 			soft_req |= NEED_HW_RESET;
2146 			break;
2147 
2148 		case SYS_EVENT_MGMT_FATAL_ERR:	/* 0x8h */
2149 			cmn_err(CE_WARN, "Management (MPI) Processor fatal"
2150 			    " error occured");
2151 			soft_req |= NEED_MPI_RESET;
2152 			break;
2153 
2154 		case SYS_EVENT_MAC_INTERRUPT:	/* 0x9h */
2155 			QL_PRINT(DBG_MBX, ("MAC Interrupt"));
2156 			break;
2157 
2158 		case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF:	/* 0x40h */
2159 			cmn_err(CE_WARN, "PCI Error reading small/large "
2160 			    "buffers occured");
2161 			soft_req |= NEED_HW_RESET;
2162 			break;
2163 
2164 		default:
2165 			QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
2166 			    "type 0x%x occured",
2167 			    __func__, qlge->instance, eventType));
2168 			break;
2169 	}
2170 
2171 	if ((soft_req & NEED_MPI_RESET) != 0) {
2172 		ql_wake_mpi_reset_soft_intr(qlge);
2173 	} else if ((soft_req & NEED_HW_RESET) != 0) {
2174 		ql_wake_asic_reset_soft_intr(qlge);
2175 	}
2176 }
2177 
2178 /*
2179  * set received packet checksum flag
2180  */
2181 void
2182 ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
2183 {
2184 	uint32_t flags;
2185 
2186 	/* Not TCP or UDP packet? nothing more to do */
2187 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
2188 	    ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
2189 	return;
2190 
2191 	/* No CKO support for IPv6 */
2192 	if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
2193 		return;
2194 
2195 	/*
2196 	 * If checksum error, don't set flags; stack will calculate
2197 	 * checksum, detect the error and update statistics
2198 	 */
2199 	if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
2200 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
2201 		return;
2202 
2203 	/* TCP or UDP packet and checksum valid */
2204 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
2205 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2206 		flags = HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
2207 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, flags, 0);
2208 	}
2209 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
2210 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2211 		flags = HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
2212 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, flags, 0);
2213 	}
2214 }
2215 
2216 /*
2217  * This function goes through h/w descriptor in one specified rx ring,
2218  * receives the data if the descriptor status shows the data is ready.
2219  * It returns a chain of mblks containing the received data, to be
2220  * passed up to mac_rx_ring().
2221  */
2222 mblk_t *
2223 ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
2224 {
2225 	qlge_t *qlge = rx_ring->qlge;
2226 	uint32_t prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2227 	struct ib_mac_iocb_rsp *net_rsp;
2228 	mblk_t *mp;
2229 	mblk_t *mblk_head;
2230 	mblk_t **mblk_tail;
2231 	uint32_t received_bytes = 0;
2232 	boolean_t done = B_FALSE;
2233 	uint32_t length;
2234 
2235 #ifdef QLGE_TRACK_BUFFER_USAGE
2236 	uint32_t consumer_idx;
2237 	uint32_t producer_idx;
2238 	uint32_t num_free_entries;
2239 	uint32_t temp;
2240 
2241 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2242 	consumer_idx = temp & 0x0000ffff;
2243 	producer_idx = (temp >> 16);
2244 
2245 	if (consumer_idx > producer_idx)
2246 		num_free_entries = (consumer_idx - producer_idx);
2247 	else
2248 		num_free_entries = NUM_RX_RING_ENTRIES - (
2249 		    producer_idx - consumer_idx);
2250 
2251 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2252 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2253 
2254 #endif
2255 	mblk_head = NULL;
2256 	mblk_tail = &mblk_head;
2257 
2258 	while (!done && (prod != rx_ring->cnsmr_idx)) {
2259 		QL_PRINT(DBG_RX,
2260 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
2261 		    __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2262 
2263 		net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
2264 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2265 		    (off_t)((uintptr_t)net_rsp -
2266 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2267 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2268 		QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
2269 		    rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
2270 
2271 		switch (net_rsp->opcode) {
2272 
2273 		case OPCODE_IB_MAC_IOCB:
2274 			/* Adding length of pkt header and payload */
2275 			length = le32_to_cpu(net_rsp->data_len) +
2276 			    le32_to_cpu(net_rsp->hdr_len);
2277 			if ((poll_bytes != QLGE_POLL_ALL) &&
2278 			    ((received_bytes + length) > poll_bytes)) {
2279 				done = B_TRUE;
2280 				continue;
2281 			}
2282 			received_bytes += length;
2283 
2284 			mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
2285 			if (mp != NULL) {
2286 				if (rx_ring->mac_flags != QL_MAC_STARTED) {
2287 					/*
2288 					 * Increment number of packets we have
2289 					 * indicated to the stack, should be
2290 					 * decremented when we get it back
2291 					 * or when freemsg is called
2292 					 */
2293 					ASSERT(rx_ring->rx_indicate
2294 					    <= rx_ring->cq_len);
2295 #ifdef QLGE_LOAD_UNLOAD
2296 					cmn_err(CE_WARN, "%s do not send to OS,"
2297 					    " mac_flags %d, indicate %d",
2298 					    __func__, rx_ring->mac_flags,
2299 					    rx_ring->rx_indicate);
2300 #endif
2301 					QL_PRINT(DBG_RX,
2302 					    ("cq_id = %d, packet "
2303 					    "dropped, mac not "
2304 					    "enabled.\n",
2305 					    rx_ring->cq_id));
2306 					rx_ring->rx_pkt_dropped_mac_unenabled++;
2307 
2308 					/* rx_lock is expected to be held */
2309 					mutex_exit(&rx_ring->rx_lock);
2310 					freemsg(mp);
2311 					mutex_enter(&rx_ring->rx_lock);
2312 					mp = NULL;
2313 				}
2314 
2315 				if (mp != NULL) {
2316 					/*
2317 					 * IP full packet has been
2318 					 * successfully verified by
2319 					 * H/W and is correct
2320 					 */
2321 					ql_set_rx_cksum(mp, net_rsp);
2322 
2323 					rx_ring->rx_packets++;
2324 					rx_ring->rx_bytes += length;
2325 					*mblk_tail = mp;
2326 					mblk_tail = &mp->b_next;
2327 				}
2328 			} else {
2329 				QL_PRINT(DBG_RX,
2330 				    ("cq_id = %d, packet dropped\n",
2331 				    rx_ring->cq_id));
2332 				rx_ring->rx_packets_dropped_no_buffer++;
2333 			}
2334 			break;
2335 
2336 		case OPCODE_IB_SYS_EVENT_IOCB:
2337 			ql_process_chip_ae_intr(qlge,
2338 			    (struct ib_sys_event_iocb_rsp *)
2339 			    net_rsp);
2340 			break;
2341 
2342 		default:
2343 			cmn_err(CE_WARN,
2344 			    "%s Ring(%d)Hit default case, not handled!"
2345 			    " dropping the packet, "
2346 			    "opcode = %x.", __func__, rx_ring->cq_id,
2347 			    net_rsp->opcode);
2348 			break;
2349 		}
2350 		/* increment cnsmr_idx and curr_entry */
2351 		ql_update_cq(rx_ring);
2352 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2353 
2354 	}
2355 	/* update cnsmr_idx */
2356 	ql_write_cq_idx(rx_ring);
2357 	/* do not enable interrupt for polling mode */
2358 	if (poll_bytes == QLGE_POLL_ALL)
2359 		ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2360 	return (mblk_head);
2361 }
2362 
2363 /* Process an outbound completion from an rx ring. */
2364 static void
2365 ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
2366 {
2367 	struct tx_ring *tx_ring;
2368 	struct tx_ring_desc *tx_ring_desc;
2369 	int j;
2370 
2371 	tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
2372 	tx_ring_desc = tx_ring->wq_desc;
2373 	tx_ring_desc += mac_rsp->tid;
2374 
2375 	if (tx_ring_desc->tx_type == USE_DMA) {
2376 		QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
2377 		    __func__, qlge->instance));
2378 
2379 		/*
2380 		 * Release the DMA resource that is used for
2381 		 * DMA binding.
2382 		 */
2383 		for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
2384 			(void) ddi_dma_unbind_handle(
2385 			    tx_ring_desc->tx_dma_handle[j]);
2386 		}
2387 
2388 		tx_ring_desc->tx_dma_handle_used = 0;
2389 		/*
2390 		 * Free the mblk after sending completed
2391 		 */
2392 		if (tx_ring_desc->mp != NULL) {
2393 			freemsg(tx_ring_desc->mp);
2394 			tx_ring_desc->mp = NULL;
2395 		}
2396 	}
2397 
2398 	tx_ring->obytes += tx_ring_desc->tx_bytes;
2399 	tx_ring->opackets++;
2400 
2401 	if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
2402 	    OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
2403 		tx_ring->errxmt++;
2404 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2405 			/* EMPTY */
2406 			QL_PRINT(DBG_TX,
2407 			    ("Total descriptor length did not match "
2408 			    "transfer length.\n"));
2409 		}
2410 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2411 			/* EMPTY */
2412 			QL_PRINT(DBG_TX,
2413 			    ("Frame too short to be legal, not sent.\n"));
2414 		}
2415 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2416 			/* EMPTY */
2417 			QL_PRINT(DBG_TX,
2418 			    ("Frame too long, but sent anyway.\n"));
2419 		}
2420 		if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
2421 			/* EMPTY */
2422 			QL_PRINT(DBG_TX,
2423 			    ("PCI backplane error. Frame not sent.\n"));
2424 		}
2425 	}
2426 	atomic_inc_32(&tx_ring->tx_free_count);
2427 }
2428 
2429 /*
2430  * clean up tx completion iocbs
2431  */
2432 static int
2433 ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2434 {
2435 	qlge_t *qlge = rx_ring->qlge;
2436 	uint32_t prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2437 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2438 	int count = 0;
2439 	struct tx_ring *tx_ring;
2440 	boolean_t resume_tx = B_FALSE;
2441 
2442 	mutex_enter(&rx_ring->rx_lock);
2443 #ifdef QLGE_TRACK_BUFFER_USAGE
2444 	{
2445 	uint32_t consumer_idx;
2446 	uint32_t producer_idx;
2447 	uint32_t num_free_entries;
2448 	uint32_t temp;
2449 
2450 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2451 	consumer_idx = temp & 0x0000ffff;
2452 	producer_idx = (temp >> 16);
2453 
2454 	if (consumer_idx > producer_idx)
2455 		num_free_entries = (consumer_idx - producer_idx);
2456 	else
2457 		num_free_entries = NUM_RX_RING_ENTRIES -
2458 		    (producer_idx - consumer_idx);
2459 
2460 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2461 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2462 
2463 	}
2464 #endif
2465 	/* While there are entries in the completion queue. */
2466 	while (prod != rx_ring->cnsmr_idx) {
2467 
2468 		QL_PRINT(DBG_RX,
2469 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
2470 		    rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2471 
2472 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2473 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2474 		    (off_t)((uintptr_t)net_rsp -
2475 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2476 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2477 
2478 		QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
2479 		    "response packet data\n",
2480 		    rx_ring->curr_entry, 8,
2481 		    (size_t)sizeof (*net_rsp));
2482 
2483 		switch (net_rsp->opcode) {
2484 
2485 		case OPCODE_OB_MAC_OFFLOAD_IOCB:
2486 		case OPCODE_OB_MAC_IOCB:
2487 			ql_process_mac_tx_intr(qlge, net_rsp);
2488 			break;
2489 
2490 		default:
2491 			cmn_err(CE_WARN,
2492 			    "%s Hit default case, not handled! "
2493 			    "dropping the packet,"
2494 			    " opcode = %x.",
2495 			    __func__, net_rsp->opcode);
2496 			break;
2497 		}
2498 		count++;
2499 		ql_update_cq(rx_ring);
2500 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2501 	}
2502 	ql_write_cq_idx(rx_ring);
2503 
2504 	mutex_exit(&rx_ring->rx_lock);
2505 
2506 	tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
2507 
2508 	mutex_enter(&tx_ring->tx_lock);
2509 
2510 	if (tx_ring->queue_stopped &&
2511 	    (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
2512 		/*
2513 		 * The queue got stopped because the tx_ring was full.
2514 		 * Wake it up, because it's now at least 25% empty.
2515 		 */
2516 		tx_ring->queue_stopped = 0;
2517 		resume_tx = B_TRUE;
2518 	}
2519 
2520 	mutex_exit(&tx_ring->tx_lock);
2521 	/* Don't hold the lock during OS callback */
2522 	if (resume_tx)
2523 		RESUME_TX(tx_ring);
2524 	return (count);
2525 }
2526 
2527 /*
2528  * reset asic when error happens
2529  */
2530 /* ARGSUSED */
2531 static uint_t
2532 ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
2533 {
2534 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2535 	int status;
2536 
2537 	mutex_enter(&qlge->gen_mutex);
2538 	status = ql_bringdown_adapter(qlge);
2539 	if (status != DDI_SUCCESS)
2540 		goto error;
2541 
2542 	status = ql_bringup_adapter(qlge);
2543 	if (status != DDI_SUCCESS)
2544 		goto error;
2545 	mutex_exit(&qlge->gen_mutex);
2546 	return (DDI_INTR_CLAIMED);
2547 
2548 error:
2549 	mutex_exit(&qlge->gen_mutex);
2550 	cmn_err(CE_WARN,
2551 	    "qlge up/down cycle failed, closing device");
2552 	return (DDI_INTR_CLAIMED);
2553 }
2554 
2555 /*
2556  * Reset MPI
2557  */
2558 /* ARGSUSED */
2559 static uint_t
2560 ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
2561 {
2562 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2563 
2564 	(void) ql_reset_mpi_risc(qlge);
2565 	return (DDI_INTR_CLAIMED);
2566 }
2567 
2568 /*
2569  * Process MPI mailbox messages
2570  */
2571 /* ARGSUSED */
2572 static uint_t
2573 ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
2574 {
2575 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2576 
2577 	ql_do_mpi_intr(qlge);
2578 	return (DDI_INTR_CLAIMED);
2579 }
2580 
2581 /* Fire up a handler to reset the MPI processor. */
2582 void
2583 ql_wake_asic_reset_soft_intr(qlge_t *qlge)
2584 {
2585 	(void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
2586 }
2587 
2588 static void
2589 ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
2590 {
2591 	(void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
2592 }
2593 
2594 static void
2595 ql_wake_mpi_event_soft_intr(qlge_t *qlge)
2596 {
2597 	(void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
2598 }
2599 
2600 /*
2601  * This handles a fatal error, MPI activity, and the default
2602  * rx_ring in an MSI-X multiple interrupt vector environment.
2603  * In MSI/Legacy environment it also process the rest of
2604  * the rx_rings.
2605  */
2606 /* ARGSUSED */
2607 static uint_t
2608 ql_isr(caddr_t arg1, caddr_t arg2)
2609 {
2610 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2611 	qlge_t *qlge = rx_ring->qlge;
2612 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
2613 	uint32_t var, prod;
2614 	int i;
2615 	int work_done = 0;
2616 
2617 	mblk_t *mp;
2618 
2619 	_NOTE(ARGUNUSED(arg2));
2620 
2621 	++qlge->rx_interrupts[rx_ring->cq_id];
2622 
2623 	if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
2624 		ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
2625 		var = ql_read_reg(qlge, REG_ERROR_STATUS);
2626 		var = ql_read_reg(qlge, REG_STATUS);
2627 		var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
2628 		return (DDI_INTR_CLAIMED);
2629 	}
2630 
2631 	ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2632 
2633 	/*
2634 	 * Check the default queue and wake handler if active.
2635 	 */
2636 	rx_ring = &qlge->rx_ring[0];
2637 	prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2638 	QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
2639 	    prod, rx_ring->cnsmr_idx));
2640 	/* check if interrupt is due to incoming packet */
2641 	if (prod != rx_ring->cnsmr_idx) {
2642 		QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
2643 		ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2644 		mutex_enter(&rx_ring->rx_lock);
2645 		mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2646 		mutex_exit(&rx_ring->rx_lock);
2647 
2648 		if (mp != NULL)
2649 			RX_UPSTREAM(rx_ring, mp);
2650 		work_done++;
2651 	} else {
2652 		/*
2653 		 * If interrupt is not due to incoming packet, read status
2654 		 * register to see if error happens or mailbox interrupt.
2655 		 */
2656 		var = ql_read_reg(qlge, REG_STATUS);
2657 		if ((var & STATUS_FE) != 0) {
2658 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2659 
2660 			cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
2661 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2662 			cmn_err(CE_WARN,
2663 			    "Resetting chip. Error Status Register = 0x%x",
2664 			    var);
2665 			ql_wake_asic_reset_soft_intr(qlge);
2666 			return (DDI_INTR_CLAIMED);
2667 		}
2668 
2669 		/*
2670 		 * Check MPI processor activity.
2671 		 */
2672 		if ((var & STATUS_PI) != 0) {
2673 			/*
2674 			 * We've got an async event or mailbox completion.
2675 			 * Handle it and clear the source of the interrupt.
2676 			 */
2677 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2678 
2679 			QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
2680 			ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2681 			ql_wake_mpi_event_soft_intr(qlge);
2682 			work_done++;
2683 		}
2684 	}
2685 
2686 	if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
2687 		/*
2688 		 * Start the DPC for each active queue.
2689 		 */
2690 		for (i = 1; i < qlge->rx_ring_count; i++) {
2691 			rx_ring = &qlge->rx_ring[i];
2692 
2693 			if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2694 			    rx_ring->cnsmr_idx) {
2695 				QL_PRINT(DBG_INTR,
2696 				    ("Waking handler for rx_ring[%d].\n", i));
2697 
2698 				ql_disable_completion_interrupt(qlge,
2699 				    rx_ring->irq);
2700 				if (rx_ring->type == TX_Q) {
2701 					(void) ql_clean_outbound_rx_ring(
2702 					    rx_ring);
2703 					ql_enable_completion_interrupt(
2704 					    rx_ring->qlge, rx_ring->irq);
2705 				} else {
2706 					mutex_enter(&rx_ring->rx_lock);
2707 					mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2708 					mutex_exit(&rx_ring->rx_lock);
2709 					if (mp != NULL)
2710 						RX_UPSTREAM(rx_ring, mp);
2711 #ifdef QLGE_LOAD_UNLOAD
2712 					if (rx_ring->mac_flags ==
2713 					    QL_MAC_STOPPED)
2714 						cmn_err(CE_NOTE,
2715 						    "%s rx_indicate(%d) %d\n",
2716 						    __func__, i,
2717 						    rx_ring->rx_indicate);
2718 #endif
2719 				}
2720 				work_done++;
2721 			}
2722 		}
2723 	}
2724 
2725 	ql_enable_completion_interrupt(qlge, intr_ctx->intr);
2726 
2727 	return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2728 }
2729 
2730 /*
2731  * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
2732  */
2733 /* ARGSUSED */
2734 static uint_t
2735 ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
2736 {
2737 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2738 	qlge_t *qlge = rx_ring->qlge;
2739 	_NOTE(ARGUNUSED(arg2));
2740 
2741 	++qlge->rx_interrupts[rx_ring->cq_id];
2742 	(void) ql_clean_outbound_rx_ring(rx_ring);
2743 	ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2744 
2745 	return (DDI_INTR_CLAIMED);
2746 }
2747 
2748 /*
2749  * Poll n_bytes of chained incoming packets
2750  */
2751 mblk_t *
2752 ql_ring_rx_poll(void *arg, int n_bytes)
2753 {
2754 	struct rx_ring *rx_ring = (struct rx_ring *)arg;
2755 	qlge_t *qlge = rx_ring->qlge;
2756 	mblk_t *mp = NULL;
2757 	uint32_t var;
2758 
2759 	ASSERT(n_bytes >= 0);
2760 	QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
2761 	    __func__, rx_ring->cq_id, n_bytes));
2762 
2763 	++qlge->rx_polls[rx_ring->cq_id];
2764 
2765 	if (n_bytes == 0)
2766 		return (mp);
2767 	mutex_enter(&rx_ring->rx_lock);
2768 	mp = ql_ring_rx(rx_ring, n_bytes);
2769 	mutex_exit(&rx_ring->rx_lock);
2770 
2771 	if ((rx_ring->cq_id == 0) && (mp == NULL)) {
2772 		var = ql_read_reg(qlge, REG_STATUS);
2773 		/*
2774 		 * Check for fatal error.
2775 		 */
2776 		if ((var & STATUS_FE) != 0) {
2777 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2778 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2779 			cmn_err(CE_WARN, "Got fatal error %x.", var);
2780 			ql_wake_asic_reset_soft_intr(qlge);
2781 		}
2782 		/*
2783 		 * Check MPI processor activity.
2784 		 */
2785 		if ((var & STATUS_PI) != 0) {
2786 			/*
2787 			 * We've got an async event or mailbox completion.
2788 			 * Handle it and clear the source of the interrupt.
2789 			 */
2790 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2791 			ql_do_mpi_intr(qlge);
2792 		}
2793 	}
2794 
2795 	return (mp);
2796 }
2797 
2798 /*
2799  * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
2800  */
2801 /* ARGSUSED */
2802 static uint_t
2803 ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
2804 {
2805 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2806 	qlge_t *qlge = rx_ring->qlge;
2807 	mblk_t *mp;
2808 	_NOTE(ARGUNUSED(arg2));
2809 
2810 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
2811 
2812 	++qlge->rx_interrupts[rx_ring->cq_id];
2813 
2814 	mutex_enter(&rx_ring->rx_lock);
2815 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2816 	mutex_exit(&rx_ring->rx_lock);
2817 
2818 	if (mp != NULL)
2819 		RX_UPSTREAM(rx_ring, mp);
2820 
2821 	return (DDI_INTR_CLAIMED);
2822 }
2823 
2824 
2825 /*
2826  *
2827  * Allocate DMA Buffer for ioctl service
2828  *
2829  */
2830 static int
2831 ql_alloc_ioctl_dma_buf(qlge_t *qlge)
2832 {
2833 	uint64_t phy_addr;
2834 	uint64_t alloc_size;
2835 	ddi_dma_cookie_t dma_cookie;
2836 
2837 	alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
2838 	    max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
2839 	if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
2840 	    &ql_buf_acc_attr,
2841 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2842 	    &qlge->ioctl_buf_dma_attr.acc_handle,
2843 	    (size_t)alloc_size,  /* mem size */
2844 	    (size_t)0,  /* alignment */
2845 	    (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
2846 	    &dma_cookie) != 0) {
2847 		cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
2848 		    __func__, qlge->instance);
2849 		return (DDI_FAILURE);
2850 	}
2851 
2852 	phy_addr = dma_cookie.dmac_laddress;
2853 
2854 	if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
2855 		cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
2856 		return (DDI_FAILURE);
2857 	}
2858 
2859 	qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
2860 
2861 	QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
2862 	    "phy_addr = 0x%lx\n",
2863 	    __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
2864 
2865 	return (DDI_SUCCESS);
2866 }
2867 
2868 
2869 /*
2870  * Function to free physical memory.
2871  */
2872 static void
2873 ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
2874 {
2875 	if (dma_handle != NULL) {
2876 		(void) ddi_dma_unbind_handle(*dma_handle);
2877 		if (acc_handle != NULL)
2878 			ddi_dma_mem_free(acc_handle);
2879 		ddi_dma_free_handle(dma_handle);
2880 	}
2881 }
2882 
2883 /*
2884  * Function to free ioctl dma buffer.
2885  */
2886 static void
2887 ql_free_ioctl_dma_buf(qlge_t *qlge)
2888 {
2889 	if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
2890 		ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
2891 		    &qlge->ioctl_buf_dma_attr.acc_handle);
2892 
2893 		qlge->ioctl_buf_dma_attr.vaddr = NULL;
2894 		qlge->ioctl_buf_dma_attr.dma_handle = NULL;
2895 	}
2896 }
2897 
2898 /*
2899  * Free shadow register space used for request and completion queues
2900  */
2901 static void
2902 ql_free_shadow_space(qlge_t *qlge)
2903 {
2904 	if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
2905 		ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
2906 		    &qlge->host_copy_shadow_dma_attr.acc_handle);
2907 		bzero(&qlge->host_copy_shadow_dma_attr,
2908 		    sizeof (qlge->host_copy_shadow_dma_attr));
2909 	}
2910 
2911 	if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
2912 		ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
2913 		    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
2914 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
2915 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
2916 	}
2917 }
2918 
2919 /*
2920  * Allocate shadow register space for request and completion queues
2921  */
2922 static int
2923 ql_alloc_shadow_space(qlge_t *qlge)
2924 {
2925 	ddi_dma_cookie_t dma_cookie;
2926 
2927 	if (ql_alloc_phys(qlge->dip,
2928 	    &qlge->host_copy_shadow_dma_attr.dma_handle,
2929 	    &ql_dev_acc_attr,
2930 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2931 	    &qlge->host_copy_shadow_dma_attr.acc_handle,
2932 	    (size_t)VM_PAGE_SIZE,  /* mem size */
2933 	    (size_t)4, /* 4 bytes alignment */
2934 	    (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
2935 	    &dma_cookie) != 0) {
2936 		bzero(&qlge->host_copy_shadow_dma_attr,
2937 		    sizeof (qlge->host_copy_shadow_dma_attr));
2938 
2939 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
2940 		    "response shadow registers", __func__, qlge->instance);
2941 		return (DDI_FAILURE);
2942 	}
2943 
2944 	qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
2945 
2946 	if (ql_alloc_phys(qlge->dip,
2947 	    &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
2948 	    &ql_desc_acc_attr,
2949 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2950 	    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
2951 	    (size_t)VM_PAGE_SIZE,  /* mem size */
2952 	    (size_t)4, /* 4 bytes alignment */
2953 	    (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
2954 	    &dma_cookie) != 0) {
2955 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
2956 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
2957 
2958 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
2959 		    "for request shadow registers",
2960 		    __func__, qlge->instance);
2961 		goto err_wqp_sh_area;
2962 	}
2963 	qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
2964 
2965 	return (DDI_SUCCESS);
2966 
2967 err_wqp_sh_area:
2968 	ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
2969 	    &qlge->host_copy_shadow_dma_attr.acc_handle);
2970 	bzero(&qlge->host_copy_shadow_dma_attr,
2971 	    sizeof (qlge->host_copy_shadow_dma_attr));
2972 
2973 	return (DDI_FAILURE);
2974 }
2975 
2976 /*
2977  * Initialize a tx ring
2978  */
2979 static void
2980 ql_init_tx_ring(struct tx_ring *tx_ring)
2981 {
2982 	int i;
2983 	struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr;
2984 	struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc;
2985 
2986 	for (i = 0; i < tx_ring->wq_len; i++) {
2987 		tx_ring_desc->index = i;
2988 		tx_ring_desc->queue_entry = mac_iocb_ptr;
2989 		mac_iocb_ptr++;
2990 		tx_ring_desc++;
2991 	}
2992 	tx_ring->tx_free_count = tx_ring->wq_len;
2993 	tx_ring->queue_stopped = 0;
2994 }
2995 
2996 /*
2997  * Free one tx ring resources
2998  */
2999 static void
3000 ql_free_tx_resources(struct tx_ring *tx_ring)
3001 {
3002 	struct tx_ring_desc *tx_ring_desc;
3003 	int i, j;
3004 
3005 	ql_free_phys(&tx_ring->wq_dma.dma_handle, &tx_ring->wq_dma.acc_handle);
3006 	bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3007 
3008 	if (tx_ring->wq_desc != NULL) {
3009 		tx_ring_desc = tx_ring->wq_desc;
3010 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3011 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3012 				if (tx_ring_desc->tx_dma_handle[j]) {
3013 					/*
3014 					 * The unbinding will happen in tx
3015 					 * completion, here we just free the
3016 					 * handles
3017 					 */
3018 					ddi_dma_free_handle(
3019 					    &(tx_ring_desc->tx_dma_handle[j]));
3020 					tx_ring_desc->tx_dma_handle[j] = NULL;
3021 				}
3022 			}
3023 			if (tx_ring_desc->oal != NULL) {
3024 				tx_ring_desc->oal_dma_addr = 0;
3025 				tx_ring_desc->oal = NULL;
3026 				tx_ring_desc->copy_buffer = NULL;
3027 				tx_ring_desc->copy_buffer_dma_addr = 0;
3028 
3029 				ql_free_phys(&tx_ring_desc->oal_dma.dma_handle,
3030 				    &tx_ring_desc->oal_dma.acc_handle);
3031 			}
3032 		}
3033 		kmem_free(tx_ring->wq_desc,
3034 		    tx_ring->wq_len * sizeof (struct tx_ring_desc));
3035 		tx_ring->wq_desc = NULL;
3036 	}
3037 	/* free the wqicb struct */
3038 	if (tx_ring->wqicb_dma.dma_handle) {
3039 		ql_free_phys(&tx_ring->wqicb_dma.dma_handle,
3040 		    &tx_ring->wqicb_dma.acc_handle);
3041 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3042 	}
3043 }
3044 
3045 /*
3046  * Allocate work (request) queue memory and transmit
3047  * descriptors for this transmit ring
3048  */
3049 static int
3050 ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring)
3051 {
3052 	ddi_dma_cookie_t dma_cookie;
3053 	struct tx_ring_desc *tx_ring_desc;
3054 	int i, j;
3055 	uint32_t length;
3056 
3057 	/* allocate dma buffers for obiocbs */
3058 	if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle,
3059 	    &ql_desc_acc_attr,
3060 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3061 	    &tx_ring->wq_dma.acc_handle,
3062 	    (size_t)tx_ring->wq_size,	/* mem size */
3063 	    (size_t)128, /* alignment:128 bytes boundary */
3064 	    (caddr_t *)&tx_ring->wq_dma.vaddr,
3065 	    &dma_cookie) != 0) {
3066 		bzero(&tx_ring->wq_dma, sizeof (&tx_ring->wq_dma));
3067 		cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.",
3068 		    __func__, qlge->instance);
3069 		return (DDI_FAILURE);
3070 	}
3071 	tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress;
3072 
3073 	tx_ring->wq_desc =
3074 	    kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc),
3075 	    KM_NOSLEEP);
3076 	if (tx_ring->wq_desc == NULL) {
3077 		goto err;
3078 	} else {
3079 		tx_ring_desc = tx_ring->wq_desc;
3080 		/*
3081 		 * Allocate a large enough structure to hold the following
3082 		 * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
3083 		 * 2. copy buffer of QL_MAX_COPY_LENGTH bytes
3084 		 */
3085 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3086 			length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)
3087 			    + QL_MAX_COPY_LENGTH;
3088 
3089 			if (ql_alloc_phys(qlge->dip,
3090 			    &tx_ring_desc->oal_dma.dma_handle,
3091 			    &ql_desc_acc_attr,
3092 			    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3093 			    &tx_ring_desc->oal_dma.acc_handle,
3094 			    (size_t)length,	/* mem size */
3095 			    (size_t)0, /* default alignment:8 bytes boundary */
3096 			    (caddr_t *)&tx_ring_desc->oal_dma.vaddr,
3097 			    &dma_cookie) != 0) {
3098 				bzero(&tx_ring_desc->oal_dma,
3099 				    sizeof (tx_ring_desc->oal_dma));
3100 				cmn_err(CE_WARN, "%s(%d): reqQ tx buf &"
3101 				    "oal alloc failed.",
3102 				    __func__, qlge->instance);
3103 				return (DDI_FAILURE);
3104 			}
3105 
3106 			tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr;
3107 			tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress;
3108 			tx_ring_desc->copy_buffer =
3109 			    (caddr_t)((uint8_t *)tx_ring_desc->oal
3110 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3111 			tx_ring_desc->copy_buffer_dma_addr =
3112 			    (tx_ring_desc->oal_dma_addr
3113 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3114 
3115 			/* Allocate dma handles for transmit buffers */
3116 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3117 				if (ddi_dma_alloc_handle(qlge->dip,
3118 				    &tx_mapping_dma_attr,
3119 				    DDI_DMA_DONTWAIT,
3120 				    0, &tx_ring_desc->tx_dma_handle[j])
3121 				    != DDI_SUCCESS) {
3122 					cmn_err(CE_WARN,
3123 					    "!%s: ddi_dma_alloc_handle: "
3124 					    "tx_dma_handle "
3125 					    "alloc failed", __func__);
3126 					goto err;
3127 				}
3128 			}
3129 		}
3130 	}
3131 	/* alloc a wqicb control block to load this tx ring to hw */
3132 	if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle,
3133 	    &ql_desc_acc_attr,
3134 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3135 	    &tx_ring->wqicb_dma.acc_handle,
3136 	    (size_t)sizeof (struct wqicb_t),	/* mem size */
3137 	    (size_t)0, /* alignment:128 bytes boundary */
3138 	    (caddr_t *)&tx_ring->wqicb_dma.vaddr,
3139 	    &dma_cookie) != 0) {
3140 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3141 		cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.",
3142 		    __func__, qlge->instance);
3143 		return (DDI_FAILURE);
3144 	}
3145 	tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3146 
3147 	return (DDI_SUCCESS);
3148 
3149 err:
3150 	ql_free_tx_resources(tx_ring);
3151 	return (DDI_FAILURE);
3152 }
3153 
3154 /*
3155  * Free one rx ring resources
3156  */
3157 static void
3158 ql_free_rx_resources(struct rx_ring *rx_ring)
3159 {
3160 	/* Free the small buffer queue. */
3161 	if (rx_ring->sbq_dma.dma_handle) {
3162 		ql_free_phys(&rx_ring->sbq_dma.dma_handle,
3163 		    &rx_ring->sbq_dma.acc_handle);
3164 		bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3165 	}
3166 
3167 	/* Free the small buffer queue control blocks. */
3168 	kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len *
3169 	    sizeof (struct bq_desc));
3170 	rx_ring->sbq_desc = NULL;
3171 
3172 	/* Free the large buffer queue. */
3173 	if (rx_ring->lbq_dma.dma_handle) {
3174 		ql_free_phys(&rx_ring->lbq_dma.dma_handle,
3175 		    &rx_ring->lbq_dma.acc_handle);
3176 		bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3177 	}
3178 
3179 	/* Free the large buffer queue control blocks. */
3180 	kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len *
3181 	    sizeof (struct bq_desc));
3182 	rx_ring->lbq_desc = NULL;
3183 
3184 	/* Free cqicb struct */
3185 	if (rx_ring->cqicb_dma.dma_handle) {
3186 		ql_free_phys(&rx_ring->cqicb_dma.dma_handle,
3187 		    &rx_ring->cqicb_dma.acc_handle);
3188 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3189 	}
3190 	/* Free the rx queue. */
3191 	if (rx_ring->cq_dma.dma_handle) {
3192 		ql_free_phys(&rx_ring->cq_dma.dma_handle,
3193 		    &rx_ring->cq_dma.acc_handle);
3194 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3195 	}
3196 }
3197 
3198 /*
3199  * Allocate queues and buffers for this completions queue based
3200  * on the values in the parameter structure.
3201  */
3202 static int
3203 ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring)
3204 {
3205 	ddi_dma_cookie_t dma_cookie;
3206 
3207 	if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle,
3208 	    &ql_desc_acc_attr,
3209 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3210 	    &rx_ring->cq_dma.acc_handle,
3211 	    (size_t)rx_ring->cq_size,  /* mem size */
3212 	    (size_t)128, /* alignment:128 bytes boundary */
3213 	    (caddr_t *)&rx_ring->cq_dma.vaddr,
3214 	    &dma_cookie) != 0)	{
3215 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3216 		cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.",
3217 		    __func__, qlge->instance);
3218 		return (DDI_FAILURE);
3219 	}
3220 	rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress;
3221 
3222 	if (rx_ring->sbq_len != 0) {
3223 		/*
3224 		 * Allocate small buffer queue.
3225 		 */
3226 		if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle,
3227 		    &ql_desc_acc_attr,
3228 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3229 		    &rx_ring->sbq_dma.acc_handle,
3230 		    (size_t)rx_ring->sbq_size,  /* mem size */
3231 		    (size_t)128, /* alignment:128 bytes boundary */
3232 		    (caddr_t *)&rx_ring->sbq_dma.vaddr,
3233 		    &dma_cookie) != 0) {
3234 			bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3235 			cmn_err(CE_WARN,
3236 			    "%s(%d): small buffer queue allocation failed.",
3237 			    __func__, qlge->instance);
3238 			goto err_mem;
3239 		}
3240 		rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress;
3241 
3242 		/*
3243 		 * Allocate small buffer queue control blocks.
3244 		 */
3245 		rx_ring->sbq_desc =
3246 		    kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc),
3247 		    KM_NOSLEEP);
3248 		if (rx_ring->sbq_desc == NULL) {
3249 			cmn_err(CE_WARN,
3250 			    "sbq control block allocation failed.");
3251 			goto err_mem;
3252 		}
3253 
3254 		ql_init_sbq_ring(rx_ring);
3255 	}
3256 
3257 	if (rx_ring->lbq_len != 0) {
3258 		/*
3259 		 * Allocate large buffer queue.
3260 		 */
3261 		if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle,
3262 		    &ql_desc_acc_attr,
3263 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3264 		    &rx_ring->lbq_dma.acc_handle,
3265 		    (size_t)rx_ring->lbq_size,  /* mem size */
3266 		    (size_t)128, /* alignment:128 bytes boundary */
3267 		    (caddr_t *)&rx_ring->lbq_dma.vaddr,
3268 		    &dma_cookie) != 0) {
3269 			bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3270 			cmn_err(CE_WARN, "%s(%d): lbq allocation failed.",
3271 			    __func__, qlge->instance);
3272 			goto err_mem;
3273 		}
3274 		rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress;
3275 
3276 		/*
3277 		 * Allocate large buffer queue control blocks.
3278 		 */
3279 		rx_ring->lbq_desc =
3280 		    kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc),
3281 		    KM_NOSLEEP);
3282 		if (rx_ring->lbq_desc == NULL) {
3283 			cmn_err(CE_WARN,
3284 			    "Large buffer queue control block allocation "
3285 			    "failed.");
3286 			goto err_mem;
3287 		}
3288 		ql_init_lbq_ring(rx_ring);
3289 	}
3290 
3291 	if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle,
3292 	    &ql_desc_acc_attr,
3293 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3294 	    &rx_ring->cqicb_dma.acc_handle,
3295 	    (size_t)sizeof (struct cqicb_t),  /* mem size */
3296 	    (size_t)0, /* alignment:128 bytes boundary */
3297 	    (caddr_t *)&rx_ring->cqicb_dma.vaddr,
3298 	    &dma_cookie) != 0) {
3299 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3300 		cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.",
3301 		    __func__, qlge->instance);
3302 		return (DDI_FAILURE);
3303 	}
3304 	rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3305 
3306 	return (DDI_SUCCESS);
3307 
3308 err_mem:
3309 	ql_free_rx_resources(rx_ring);
3310 	return (DDI_FAILURE);
3311 }
3312 
3313 /*
3314  * Frees tx/rx queues memory resources
3315  */
3316 static void
3317 ql_free_mem_resources(qlge_t *qlge)
3318 {
3319 	int i;
3320 
3321 	if (qlge->ricb_dma.dma_handle) {
3322 		/* free the ricb struct */
3323 		ql_free_phys(&qlge->ricb_dma.dma_handle,
3324 		    &qlge->ricb_dma.acc_handle);
3325 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3326 	}
3327 
3328 	ql_free_rx_buffers(qlge);
3329 
3330 	ql_free_ioctl_dma_buf(qlge);
3331 
3332 	for (i = 0; i < qlge->tx_ring_count; i++)
3333 		ql_free_tx_resources(&qlge->tx_ring[i]);
3334 
3335 	for (i = 0; i < qlge->rx_ring_count; i++)
3336 		ql_free_rx_resources(&qlge->rx_ring[i]);
3337 
3338 	ql_free_shadow_space(qlge);
3339 }
3340 
3341 /*
3342  * Allocate buffer queues, large buffers and small buffers etc
3343  *
3344  * This API is called in the gld_attach member function. It is called
3345  * only once.  Later reset,reboot should not re-allocate all rings and
3346  * buffers.
3347  */
3348 static int
3349 ql_alloc_mem_resources(qlge_t *qlge)
3350 {
3351 	int i;
3352 	ddi_dma_cookie_t dma_cookie;
3353 
3354 	/* Allocate space for our shadow registers */
3355 	if (ql_alloc_shadow_space(qlge))
3356 		return (DDI_FAILURE);
3357 
3358 	for (i = 0; i < qlge->rx_ring_count; i++) {
3359 		if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) {
3360 			cmn_err(CE_WARN, "RX resource allocation failed.");
3361 			goto err_mem;
3362 		}
3363 	}
3364 	/* Allocate tx queue resources */
3365 	for (i = 0; i < qlge->tx_ring_count; i++) {
3366 		if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) {
3367 			cmn_err(CE_WARN, "Tx resource allocation failed.");
3368 			goto err_mem;
3369 		}
3370 	}
3371 
3372 	if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) {
3373 		goto err_mem;
3374 	}
3375 
3376 	if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) {
3377 		cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed",
3378 		    __func__, qlge->instance);
3379 		goto err_mem;
3380 	}
3381 
3382 	qlge->sequence |= INIT_ALLOC_RX_BUF;
3383 
3384 	if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle,
3385 	    &ql_desc_acc_attr,
3386 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3387 	    &qlge->ricb_dma.acc_handle,
3388 	    (size_t)sizeof (struct ricb),  /* mem size */
3389 	    (size_t)0, /* alignment:128 bytes boundary */
3390 	    (caddr_t *)&qlge->ricb_dma.vaddr,
3391 	    &dma_cookie) != 0) {
3392 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3393 		cmn_err(CE_WARN, "%s(%d): ricb allocation failed.",
3394 		    __func__, qlge->instance);
3395 		return (DDI_FAILURE);
3396 	}
3397 	qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress;
3398 
3399 	return (DDI_SUCCESS);
3400 
3401 err_mem:
3402 	ql_free_mem_resources(qlge);
3403 	return (DDI_FAILURE);
3404 }
3405 
3406 
3407 /*
3408  * Function used to allocate physical memory and zero it.
3409  */
3410 
3411 static int
3412 ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3413     ddi_device_acc_attr_t *device_acc_attr,
3414     uint_t dma_flags,
3415     ddi_acc_handle_t *acc_handle,
3416     size_t size,
3417     size_t alignment,
3418     caddr_t *vaddr,
3419     ddi_dma_cookie_t *dma_cookie)
3420 {
3421 	size_t rlen;
3422 	uint_t cnt;
3423 
3424 	/*
3425 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3426 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3427 	 * to make sure buffer has enough room for overrun.
3428 	 */
3429 	if (size & 7) {
3430 		size += 8 - (size & 7);
3431 	}
3432 
3433 	/* Adjust the alignment if requested */
3434 	if (alignment) {
3435 		dma_attr.dma_attr_align = alignment;
3436 	}
3437 
3438 	/*
3439 	 * Allocate DMA handle
3440 	 */
3441 	if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_SLEEP, NULL,
3442 	    dma_handle) != DDI_SUCCESS) {
3443 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3444 		    __func__);
3445 		return (QL_ERROR);
3446 	}
3447 	/*
3448 	 * Allocate DMA memory
3449 	 */
3450 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3451 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING), DDI_DMA_SLEEP,
3452 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3453 		ddi_dma_free_handle(dma_handle);
3454 	}
3455 	if (vaddr == NULL) {
3456 		cmn_err(CE_WARN, "alloc_phys: Memory alloc Failed");
3457 		ddi_dma_free_handle(dma_handle);
3458 		return (QL_ERROR);
3459 	}
3460 
3461 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3462 	    dma_flags, DDI_DMA_SLEEP, NULL,
3463 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3464 		ddi_dma_mem_free(acc_handle);
3465 
3466 		ddi_dma_free_handle(dma_handle);
3467 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3468 		    __func__);
3469 		return (QL_ERROR);
3470 	}
3471 
3472 	if (cnt != 1) {
3473 
3474 		ql_free_phys(dma_handle, acc_handle);
3475 
3476 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3477 		    __func__);
3478 		return (QL_ERROR);
3479 	}
3480 
3481 	bzero((caddr_t)*vaddr, rlen);
3482 
3483 	return (0);
3484 }
3485 
3486 /*
3487  * Add interrupt handlers based on the interrupt type.
3488  * Before adding the interrupt handlers, the interrupt vectors should
3489  * have been allocated, and the rx/tx rings have also been allocated.
3490  */
3491 static int
3492 ql_add_intr_handlers(qlge_t *qlge)
3493 {
3494 	int vector = 0;
3495 	int rc, i;
3496 	uint32_t value;
3497 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3498 
3499 	switch (qlge->intr_type) {
3500 	case DDI_INTR_TYPE_MSIX:
3501 		/*
3502 		 * Add interrupt handler for rx and tx rings: vector[0 -
3503 		 * (qlge->intr_cnt -1)].
3504 		 */
3505 		value = 0;
3506 		for (vector = 0; vector < qlge->intr_cnt; vector++) {
3507 			ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3508 
3509 			/*
3510 			 * associate interrupt vector with interrupt handler
3511 			 */
3512 			rc = ddi_intr_add_handler(qlge->htable[vector],
3513 			    (ddi_intr_handler_t *)intr_ctx->handler,
3514 			    (void *)&qlge->rx_ring[vector], NULL);
3515 
3516 			if (rc != DDI_SUCCESS) {
3517 				QL_PRINT(DBG_INIT,
3518 				    ("Add rx interrupt handler failed. "
3519 				    "return: %d, vector: %d", rc, vector));
3520 				for (vector--; vector >= 0; vector--) {
3521 					(void) ddi_intr_remove_handler(
3522 					    qlge->htable[vector]);
3523 				}
3524 				return (DDI_FAILURE);
3525 			}
3526 			intr_ctx++;
3527 		}
3528 		break;
3529 
3530 	case DDI_INTR_TYPE_MSI:
3531 		/*
3532 		 * Add interrupt handlers for the only vector
3533 		 */
3534 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3535 
3536 		rc = ddi_intr_add_handler(qlge->htable[vector],
3537 		    ql_isr,
3538 		    (caddr_t)&qlge->rx_ring[0], NULL);
3539 
3540 		if (rc != DDI_SUCCESS) {
3541 			QL_PRINT(DBG_INIT,
3542 			    ("Add MSI interrupt handler failed: %d\n", rc));
3543 			return (DDI_FAILURE);
3544 		}
3545 		break;
3546 
3547 	case DDI_INTR_TYPE_FIXED:
3548 		/*
3549 		 * Add interrupt handlers for the only vector
3550 		 */
3551 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3552 
3553 		rc = ddi_intr_add_handler(qlge->htable[vector],
3554 		    ql_isr,
3555 		    (caddr_t)&qlge->rx_ring[0], NULL);
3556 
3557 		if (rc != DDI_SUCCESS) {
3558 			QL_PRINT(DBG_INIT,
3559 			    ("Add legacy interrupt handler failed: %d\n", rc));
3560 			return (DDI_FAILURE);
3561 		}
3562 		break;
3563 
3564 	default:
3565 		return (DDI_FAILURE);
3566 	}
3567 
3568 	/* Enable interrupts */
3569 	/* Block enable */
3570 	if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3571 		QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n",
3572 		    qlge->intr_cnt));
3573 		(void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt);
3574 	} else { /* Non block enable */
3575 		for (i = 0; i < qlge->intr_cnt; i++) {
3576 			QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d\n,"
3577 			    "handle 0x%x\n", i, qlge->htable[i]));
3578 			(void) ddi_intr_enable(qlge->htable[i]);
3579 		}
3580 	}
3581 	qlge->sequence |= INIT_INTR_ENABLED;
3582 
3583 	return (DDI_SUCCESS);
3584 }
3585 
3586 /*
3587  * Here we build the intr_ctx structures based on
3588  * our rx_ring count and intr vector count.
3589  * The intr_ctx structure is used to hook each vector
3590  * to possibly different handlers.
3591  */
3592 static void
3593 ql_resolve_queues_to_irqs(qlge_t *qlge)
3594 {
3595 	int i = 0;
3596 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3597 
3598 	if (qlge->intr_type == DDI_INTR_TYPE_MSIX) {
3599 		/*
3600 		 * Each rx_ring has its own intr_ctx since we
3601 		 * have separate vectors for each queue.
3602 		 * This only true when MSI-X is enabled.
3603 		 */
3604 		for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) {
3605 			qlge->rx_ring[i].irq = i;
3606 			intr_ctx->intr = i;
3607 			intr_ctx->qlge = qlge;
3608 
3609 			/*
3610 			 * We set up each vectors enable/disable/read bits so
3611 			 * there's no bit/mask calculations in critical path.
3612 			 */
3613 			intr_ctx->intr_en_mask =
3614 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3615 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
3616 			    INTR_EN_IHD | i;
3617 			intr_ctx->intr_dis_mask =
3618 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3619 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3620 			    INTR_EN_IHD | i;
3621 			intr_ctx->intr_read_mask =
3622 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3623 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
3624 			    | i;
3625 
3626 			if (i == 0) {
3627 				/*
3628 				 * Default queue handles bcast/mcast plus
3629 				 * async events.
3630 				 */
3631 				intr_ctx->handler = ql_isr;
3632 			} else if (qlge->rx_ring[i].type == TX_Q) {
3633 				/*
3634 				 * Outbound queue is for outbound completions
3635 				 * only.
3636 				 */
3637 				intr_ctx->handler = ql_msix_tx_isr;
3638 			} else {
3639 				/*
3640 				 * Inbound queues handle unicast frames only.
3641 				 */
3642 				intr_ctx->handler = ql_msix_rx_isr;
3643 			}
3644 		}
3645 	} else {
3646 		/*
3647 		 * All rx_rings use the same intr_ctx since
3648 		 * there is only one vector.
3649 		 */
3650 		intr_ctx->intr = 0;
3651 		intr_ctx->qlge = qlge;
3652 		/*
3653 		 * We set up each vectors enable/disable/read bits so
3654 		 * there's no bit/mask calculations in the critical path.
3655 		 */
3656 		intr_ctx->intr_en_mask =
3657 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3658 		    INTR_EN_TYPE_ENABLE;
3659 		intr_ctx->intr_dis_mask =
3660 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3661 		    INTR_EN_TYPE_DISABLE;
3662 		intr_ctx->intr_read_mask =
3663 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3664 		    INTR_EN_TYPE_READ;
3665 		/*
3666 		 * Single interrupt means one handler for all rings.
3667 		 */
3668 		intr_ctx->handler = ql_isr;
3669 		for (i = 0; i < qlge->rx_ring_count; i++)
3670 			qlge->rx_ring[i].irq = 0;
3671 	}
3672 }
3673 
3674 
3675 /*
3676  * Free allocated interrupts.
3677  */
3678 static void
3679 ql_free_irq_vectors(qlge_t *qlge)
3680 {
3681 	int i;
3682 	int rc;
3683 
3684 	if (qlge->sequence & INIT_INTR_ENABLED) {
3685 		/* Disable all interrupts */
3686 		if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3687 			/* Call ddi_intr_block_disable() */
3688 			(void) ddi_intr_block_disable(qlge->htable,
3689 			    qlge->intr_cnt);
3690 		} else {
3691 			for (i = 0; i < qlge->intr_cnt; i++) {
3692 				(void) ddi_intr_disable(qlge->htable[i]);
3693 			}
3694 		}
3695 
3696 		qlge->sequence &= ~INIT_INTR_ENABLED;
3697 	}
3698 
3699 	for (i = 0; i < qlge->intr_cnt; i++) {
3700 
3701 		if (qlge->sequence & INIT_ADD_INTERRUPT)
3702 			(void) ddi_intr_remove_handler(qlge->htable[i]);
3703 
3704 		if (qlge->sequence & INIT_INTR_ALLOC) {
3705 			rc = ddi_intr_free(qlge->htable[i]);
3706 			if (rc != DDI_SUCCESS) {
3707 				/* EMPTY */
3708 				QL_PRINT(DBG_INIT, ("Free intr failed: %d",
3709 				    rc));
3710 			}
3711 		}
3712 	}
3713 	if (qlge->sequence & INIT_INTR_ALLOC)
3714 		qlge->sequence &= ~INIT_INTR_ALLOC;
3715 
3716 	if (qlge->sequence & INIT_ADD_INTERRUPT)
3717 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
3718 
3719 	if (qlge->htable) {
3720 		kmem_free(qlge->htable, qlge->intr_size);
3721 		qlge->htable = NULL;
3722 	}
3723 }
3724 
3725 /*
3726  * Allocate interrupt vectors
3727  * For legacy and MSI, only 1 handle is needed.
3728  * For MSI-X, if fewer than 2 vectors are available, return failure.
3729  * Upon success, this maps the vectors to rx and tx rings for
3730  * interrupts.
3731  */
3732 static int
3733 ql_request_irq_vectors(qlge_t *qlge, int intr_type)
3734 {
3735 	dev_info_t *devinfo;
3736 	uint32_t request, orig;
3737 	int count, avail, actual;
3738 	int minimum;
3739 	int rc;
3740 
3741 	devinfo = qlge->dip;
3742 
3743 	switch (intr_type) {
3744 	case DDI_INTR_TYPE_FIXED:
3745 		request = 1;	/* Request 1 legacy interrupt handle */
3746 		minimum = 1;
3747 		QL_PRINT(DBG_INIT, ("interrupt type: legacy\n"));
3748 		break;
3749 
3750 	case DDI_INTR_TYPE_MSI:
3751 		request = 1;	/* Request 1 MSI interrupt handle */
3752 		minimum = 1;
3753 		QL_PRINT(DBG_INIT, ("interrupt type: MSI\n"));
3754 		break;
3755 
3756 	case DDI_INTR_TYPE_MSIX:
3757 		/*
3758 		 * Ideal number of vectors for the adapter is
3759 		 * # rss rings + tx completion rings for default completion
3760 		 * queue.
3761 		 */
3762 		request = qlge->rx_ring_count;
3763 
3764 		orig = request;
3765 		if (request > (MAX_RX_RINGS))
3766 			request = MAX_RX_RINGS;
3767 		minimum = 2;
3768 		QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n"));
3769 		break;
3770 
3771 	default:
3772 		QL_PRINT(DBG_INIT, ("Invalid parameter\n"));
3773 		return (DDI_FAILURE);
3774 	}
3775 
3776 	QL_PRINT(DBG_INIT, ("interrupt handles requested: %d  minimum: %d\n",
3777 	    request, minimum));
3778 
3779 	/*
3780 	 * Get number of supported interrupts
3781 	 */
3782 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3783 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3784 		QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, "
3785 		    "count: %d\n", rc, count));
3786 		return (DDI_FAILURE);
3787 	}
3788 	QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count));
3789 
3790 	/*
3791 	 * Get number of available interrupts
3792 	 */
3793 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3794 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3795 		QL_PRINT(DBG_INIT,
3796 		    ("Get interrupt available number failed. Return:"
3797 		    " %d, available: %d\n", rc, avail));
3798 		return (DDI_FAILURE);
3799 	}
3800 	QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail));
3801 
3802 	if (avail < request) {
3803 		QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n",
3804 		    request, avail));
3805 		request = avail;
3806 	}
3807 
3808 	actual = 0;
3809 	qlge->intr_cnt = 0;
3810 
3811 	/*
3812 	 * Allocate an array of interrupt handles
3813 	 */
3814 	qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t));
3815 	qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP);
3816 
3817 	rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0,
3818 	    (int)request, &actual, DDI_INTR_ALLOC_NORMAL);
3819 	if (rc != DDI_SUCCESS) {
3820 		cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:"
3821 		    " %d, request: %d, actual: %d",
3822 		    __func__, qlge->instance, rc, request, actual);
3823 		goto ql_intr_alloc_fail;
3824 	}
3825 	qlge->intr_cnt = actual;
3826 
3827 	qlge->sequence |= INIT_INTR_ALLOC;
3828 
3829 	/*
3830 	 * If the actual number of vectors is less than the minumum
3831 	 * then fail.
3832 	 */
3833 	if (actual < minimum) {
3834 		cmn_err(CE_WARN,
3835 		    "Insufficient interrupt handles available: %d", actual);
3836 		goto ql_intr_alloc_fail;
3837 	}
3838 
3839 	/*
3840 	 * For MSI-X, actual might force us to reduce number of tx & rx rings
3841 	 */
3842 	if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
3843 		if (actual < MAX_RX_RINGS) {
3844 			qlge->tx_ring_count = 1;
3845 			qlge->rss_ring_count = actual - 1;
3846 			qlge->rx_ring_count = qlge->tx_ring_count +
3847 			    qlge->rss_ring_count;
3848 		}
3849 	}
3850 	/*
3851 	 * Get priority for first vector, assume remaining are all the same
3852 	 */
3853 	rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri);
3854 	if (rc != DDI_SUCCESS) {
3855 		QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc));
3856 		goto ql_intr_alloc_fail;
3857 	}
3858 
3859 	rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap);
3860 	if (rc != DDI_SUCCESS) {
3861 		QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc));
3862 		goto ql_intr_alloc_fail;
3863 	}
3864 
3865 	qlge->intr_type = intr_type;
3866 
3867 	return (DDI_SUCCESS);
3868 
3869 ql_intr_alloc_fail:
3870 	ql_free_irq_vectors(qlge);
3871 
3872 	return (DDI_FAILURE);
3873 }
3874 
3875 /*
3876  * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
3877  * MSI or Legacy. In MSI and Legacy modes we only support a single receive and
3878  * transmit queue.
3879  */
3880 int
3881 ql_alloc_irqs(qlge_t *qlge)
3882 {
3883 	int intr_types;
3884 	int rval;
3885 
3886 	/*
3887 	 * Get supported interrupt types
3888 	 */
3889 	if (ddi_intr_get_supported_types(qlge->dip, &intr_types)
3890 	    != DDI_SUCCESS) {
3891 		cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed",
3892 		    __func__, qlge->instance);
3893 
3894 		return (DDI_FAILURE);
3895 	}
3896 
3897 	QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n",
3898 	    __func__, qlge->instance, intr_types));
3899 
3900 	/* Install MSI-X interrupts */
3901 	if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
3902 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n",
3903 		    __func__, qlge->instance, intr_types));
3904 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX);
3905 		if (rval == DDI_SUCCESS) {
3906 			return (rval);
3907 		}
3908 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed,"
3909 		    " trying MSI interrupts ...\n", __func__, qlge->instance));
3910 	}
3911 
3912 	/*
3913 	 * We will have 2 completion queues in MSI / Legacy mode,
3914 	 * Queue 0 for default completions
3915 	 * Queue 1 for transmit completions
3916 	 */
3917 	qlge->rss_ring_count = 1; /* Default completion queue (0) for all */
3918 	qlge->tx_ring_count = 1; /* Single tx completion queue */
3919 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
3920 
3921 	QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n",
3922 	    __func__, qlge->instance));
3923 	/*
3924 	 * Add the h/w interrupt handler and initialise mutexes
3925 	 */
3926 	rval = DDI_FAILURE;
3927 
3928 	/*
3929 	 * If OS supports MSIX interrupt but fails to allocate, then try
3930 	 * MSI interrupt. If MSI interrupt allocation fails also, then roll
3931 	 * back to fixed interrupt.
3932 	 */
3933 	if (intr_types & DDI_INTR_TYPE_MSI) {
3934 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI);
3935 		if (rval == DDI_SUCCESS) {
3936 			qlge->intr_type = DDI_INTR_TYPE_MSI;
3937 			QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n",
3938 			    __func__, qlge->instance));
3939 		}
3940 	}
3941 
3942 	/* Try Fixed interrupt Legacy mode */
3943 	if (rval != DDI_SUCCESS) {
3944 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED);
3945 		if (rval != DDI_SUCCESS) {
3946 			cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt "
3947 			    "allocation failed",
3948 			    __func__, qlge->instance);
3949 		} else {
3950 			qlge->intr_type = DDI_INTR_TYPE_FIXED;
3951 			QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n",
3952 			    __func__, qlge->instance));
3953 		}
3954 	}
3955 
3956 	return (rval);
3957 }
3958 
3959 static void
3960 ql_free_rx_tx_locks(qlge_t *qlge)
3961 {
3962 	int i;
3963 	struct rx_ring *rx_ring;
3964 	struct tx_ring *tx_ring;
3965 
3966 	for (i = 0; i < qlge->tx_ring_count; i++) {
3967 		tx_ring = &qlge->tx_ring[i];
3968 		mutex_destroy(&tx_ring->tx_lock);
3969 	}
3970 
3971 	for (i = 0; i < qlge->rx_ring_count; i++) {
3972 		rx_ring = &qlge->rx_ring[i];
3973 		mutex_destroy(&rx_ring->rx_lock);
3974 		mutex_destroy(&rx_ring->sbq_lock);
3975 		mutex_destroy(&rx_ring->lbq_lock);
3976 	}
3977 }
3978 
3979 /*
3980  * Frees all resources allocated during attach.
3981  *
3982  * Input:
3983  * dip = pointer to device information structure.
3984  * sequence = bits indicating resources to free.
3985  *
3986  * Context:
3987  * Kernel context.
3988  */
3989 static void
3990 ql_free_resources(dev_info_t *dip, qlge_t *qlge)
3991 {
3992 
3993 	/* Disable driver timer */
3994 	ql_stop_timer(qlge);
3995 
3996 	if (qlge->sequence & INIT_MAC_REGISTERED) {
3997 		(void) mac_unregister(qlge->mh);
3998 		qlge->sequence &= ~INIT_MAC_REGISTERED;
3999 	}
4000 
4001 	if (qlge->sequence & INIT_MAC_ALLOC) {
4002 		/* Nothing to do, macp is already freed */
4003 		qlge->sequence &= ~INIT_MAC_ALLOC;
4004 	}
4005 
4006 	if (qlge->sequence & INIT_PCI_CONFIG_SETUP) {
4007 		pci_config_teardown(&qlge->pci_handle);
4008 		qlge->sequence &= ~INIT_PCI_CONFIG_SETUP;
4009 	}
4010 
4011 	if (qlge->sequence & INIT_ADD_INTERRUPT) {
4012 		ql_free_irq_vectors(qlge);
4013 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4014 	}
4015 
4016 	if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) {
4017 		(void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl);
4018 		(void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl);
4019 		(void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl);
4020 		qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT;
4021 	}
4022 
4023 	if (qlge->sequence & INIT_KSTATS) {
4024 		ql_fini_kstats(qlge);
4025 		qlge->sequence &= ~INIT_KSTATS;
4026 	}
4027 
4028 	if (qlge->sequence & INIT_MUTEX) {
4029 		mutex_destroy(&qlge->gen_mutex);
4030 		mutex_destroy(&qlge->hw_mutex);
4031 		mutex_destroy(&qlge->mbx_mutex);
4032 		cv_destroy(&qlge->cv_mbx_intr);
4033 		qlge->sequence &= ~INIT_MUTEX;
4034 	}
4035 
4036 	if (qlge->sequence & INIT_LOCKS_CREATED) {
4037 		ql_free_rx_tx_locks(qlge);
4038 		qlge->sequence &= ~INIT_LOCKS_CREATED;
4039 	}
4040 
4041 	if (qlge->sequence & INIT_MEMORY_ALLOC) {
4042 		ql_free_mem_resources(qlge);
4043 		qlge->sequence &= ~INIT_MEMORY_ALLOC;
4044 	}
4045 
4046 	if (qlge->sequence & INIT_REGS_SETUP) {
4047 		ddi_regs_map_free(&qlge->dev_handle);
4048 		qlge->sequence &= ~INIT_REGS_SETUP;
4049 	}
4050 
4051 	if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) {
4052 		ddi_regs_map_free(&qlge->dev_doorbell_reg_handle);
4053 		qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP;
4054 	}
4055 
4056 	/*
4057 	 * free flash flt table that allocated in attach stage
4058 	 */
4059 	if ((qlge->flt.ql_flt_entry_ptr != NULL)&&
4060 	    (qlge->flt.header.length != 0)) {
4061 		kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
4062 		qlge->flt.ql_flt_entry_ptr = NULL;
4063 	}
4064 
4065 	/* finally, free qlge structure */
4066 	if (qlge->sequence & INIT_SOFTSTATE_ALLOC) {
4067 		kmem_free(qlge, sizeof (qlge_t));
4068 	}
4069 
4070 	ddi_prop_remove_all(dip);
4071 	ddi_set_driver_private(dip, NULL);
4072 
4073 }
4074 
4075 /*
4076  * Set promiscuous mode of the driver
4077  * Caller must catch HW_LOCK
4078  */
4079 void
4080 ql_set_promiscuous(qlge_t *qlge, int mode)
4081 {
4082 	if (mode) {
4083 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4084 		    RT_IDX_VALID, 1);
4085 	} else {
4086 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4087 		    RT_IDX_VALID, 0);
4088 	}
4089 }
4090 /*
4091  * Write 'data1' to Mac Protocol Address Index Register and
4092  * 'data2' to Mac Protocol Address Data Register
4093  *  Assuming that the Mac Protocol semaphore lock has been acquired.
4094  */
4095 static int
4096 ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2)
4097 {
4098 	int return_value = DDI_SUCCESS;
4099 
4100 	if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
4101 	    MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) {
4102 		cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register "
4103 		    "timeout.");
4104 		return_value = DDI_FAILURE;
4105 		goto out;
4106 	}
4107 	ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1);
4108 	ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2);
4109 out:
4110 	return (return_value);
4111 }
4112 /*
4113  * Enable the 'index'ed multicast address in the host memory's multicast_list
4114  */
4115 int
4116 ql_add_multicast_address(qlge_t *qlge, int index)
4117 {
4118 	int rtn_val = DDI_FAILURE;
4119 	uint32_t offset;
4120 	uint32_t value1, value2;
4121 
4122 	/* Acquire the required semaphore */
4123 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4124 		return (rtn_val);
4125 	}
4126 
4127 	/* Program Offset0 - lower 32 bits of the MAC address */
4128 	offset = 0;
4129 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4130 	    (index << 4) | offset;
4131 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4132 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4133 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4134 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4135 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS)
4136 		goto out;
4137 
4138 	/* Program offset1: upper 16 bits of the MAC address */
4139 	offset = 1;
4140 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4141 	    (index<<4) | offset;
4142 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8)
4143 	    |qlge->multicast_list[index].addr.ether_addr_octet[1]);
4144 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4145 		goto out;
4146 	}
4147 	rtn_val = DDI_SUCCESS;
4148 out:
4149 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4150 	return (rtn_val);
4151 }
4152 
4153 /*
4154  * Disable the 'index'ed multicast address in the host memory's multicast_list
4155  */
4156 int
4157 ql_remove_multicast_address(qlge_t *qlge, int index)
4158 {
4159 	int rtn_val = DDI_FAILURE;
4160 	uint32_t offset;
4161 	uint32_t value1, value2;
4162 
4163 	/* Acquire the required semaphore */
4164 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4165 		return (rtn_val);
4166 	}
4167 	/* Program Offset0 - lower 32 bits of the MAC address */
4168 	offset = 0;
4169 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4170 	value2 =
4171 	    ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4172 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4173 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4174 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4175 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4176 		goto out;
4177 	}
4178 	/* Program offset1: upper 16 bits of the MAC address */
4179 	offset = 1;
4180 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4181 	value2 = 0;
4182 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4183 		goto out;
4184 	}
4185 	rtn_val = DDI_SUCCESS;
4186 out:
4187 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4188 	return (rtn_val);
4189 }
4190 
4191 /*
4192  * Add a new multicast address to the list of supported list
4193  * This API is called after OS called gld_set_multicast (GLDv2)
4194  * or m_multicst (GLDv3)
4195  *
4196  * Restriction:
4197  * The number of maximum multicast address is limited by hardware.
4198  */
4199 int
4200 ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep)
4201 {
4202 	uint32_t index = qlge->multicast_list_count;
4203 	int rval = DDI_SUCCESS;
4204 	int status;
4205 
4206 	if ((ep[0] & 01) == 0) {
4207 		rval = EINVAL;
4208 		goto exit;
4209 	}
4210 
4211 	/* if there is an availabe space in multicast_list, then add it */
4212 	if (index < MAX_MULTICAST_LIST_SIZE) {
4213 		bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet,
4214 		    ETHERADDRL);
4215 		/* increment the total number of addresses in multicast list */
4216 		(void) ql_add_multicast_address(qlge, index);
4217 		qlge->multicast_list_count++;
4218 		QL_PRINT(DBG_GLD,
4219 		    ("%s(%d): added to index of multicast list= 0x%x, "
4220 		    "total %d\n", __func__, qlge->instance, index,
4221 		    qlge->multicast_list_count));
4222 
4223 		if (index > MAX_MULTICAST_HW_SIZE) {
4224 			if (!qlge->multicast_promisc) {
4225 				status = ql_set_routing_reg(qlge,
4226 				    RT_IDX_ALLMULTI_SLOT,
4227 				    RT_IDX_MCAST, 1);
4228 				if (status) {
4229 					cmn_err(CE_WARN,
4230 					    "Failed to init routing reg "
4231 					    "for mcast promisc mode.");
4232 					rval = ENOENT;
4233 					goto exit;
4234 				}
4235 				qlge->multicast_promisc = B_TRUE;
4236 			}
4237 		}
4238 	} else {
4239 		rval = ENOENT;
4240 	}
4241 exit:
4242 	return (rval);
4243 }
4244 
4245 /*
4246  * Remove an old multicast address from the list of supported multicast
4247  * addresses. This API is called after OS called gld_set_multicast (GLDv2)
4248  * or m_multicst (GLDv3)
4249  * The number of maximum multicast address is limited by hardware.
4250  */
4251 int
4252 ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep)
4253 {
4254 	uint32_t total = qlge->multicast_list_count;
4255 	int i = 0;
4256 	int rmv_index = 0;
4257 	size_t length = sizeof (ql_multicast_addr);
4258 	int status;
4259 
4260 	for (i = 0; i < total; i++) {
4261 		if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) {
4262 			continue;
4263 		}
4264 
4265 		rmv_index = i;
4266 		/* block move the reset of other multicast address forward */
4267 		length = ((total -1) -i) * sizeof (ql_multicast_addr);
4268 		if (length > 0) {
4269 			bcopy(&qlge->multicast_list[i+1],
4270 			    &qlge->multicast_list[i], length);
4271 		}
4272 		qlge->multicast_list_count--;
4273 		if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) {
4274 			/*
4275 			 * there is a deletion in multicast list table,
4276 			 * re-enable them
4277 			 */
4278 			for (i = rmv_index; i < qlge->multicast_list_count;
4279 			    i++) {
4280 				(void) ql_add_multicast_address(qlge, i);
4281 			}
4282 			/* and disable the last one */
4283 			(void) ql_remove_multicast_address(qlge, i);
4284 
4285 			/* disable multicast promiscuous mode */
4286 			if (qlge->multicast_promisc) {
4287 				status = ql_set_routing_reg(qlge,
4288 				    RT_IDX_ALLMULTI_SLOT,
4289 				    RT_IDX_MCAST, 0);
4290 				if (status) {
4291 					cmn_err(CE_WARN,
4292 					    "Failed to init routing reg for "
4293 					    "mcast promisc mode.");
4294 					goto exit;
4295 				}
4296 				/* write to config register */
4297 				qlge->multicast_promisc = B_FALSE;
4298 			}
4299 		}
4300 		break;
4301 	}
4302 exit:
4303 	return (DDI_SUCCESS);
4304 }
4305 
4306 /*
4307  * Read a XGMAC register
4308  */
4309 int
4310 ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val)
4311 {
4312 	int rtn_val = DDI_FAILURE;
4313 
4314 	/* wait for XGMAC Address register RDY bit set */
4315 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4316 	    BIT_SET, 10) != DDI_SUCCESS) {
4317 		goto out;
4318 	}
4319 	/* start rx transaction */
4320 	ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT);
4321 
4322 	/*
4323 	 * wait for XGMAC Address register RDY bit set,
4324 	 * which indicates data is ready
4325 	 */
4326 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4327 	    BIT_SET, 10) != DDI_SUCCESS) {
4328 		goto out;
4329 	}
4330 	/* read data from XGAMC_DATA register */
4331 	*val = ql_read_reg(qlge, REG_XGMAC_DATA);
4332 	rtn_val = DDI_SUCCESS;
4333 out:
4334 	return (rtn_val);
4335 }
4336 
4337 /*
4338  * Implement checksum offload for IPv4 IP packets
4339  */
4340 static void
4341 ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp,
4342     struct ob_mac_iocb_req *mac_iocb_ptr)
4343 {
4344 	struct ip *iphdr = NULL;
4345 	struct ether_header *ethhdr;
4346 	struct ether_vlan_header *ethvhdr;
4347 	struct tcphdr *tcp_hdr;
4348 	uint32_t etherType;
4349 	int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4350 	int ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4351 
4352 	ethhdr  = (struct ether_header *)((void *)bp);
4353 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
4354 	/* Is this vlan packet? */
4355 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4356 		mac_hdr_len = sizeof (struct ether_vlan_header);
4357 		etherType = ntohs(ethvhdr->ether_type);
4358 	} else {
4359 		mac_hdr_len = sizeof (struct ether_header);
4360 		etherType = ntohs(ethhdr->ether_type);
4361 	}
4362 	/* Is this IPv4 or IPv6 packet? */
4363 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) ==
4364 	    IPV4_VERSION) {
4365 		if (etherType == ETHERTYPE_IP /* 0800 */) {
4366 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
4367 		} else {
4368 			/* EMPTY */
4369 			QL_PRINT(DBG_TX,
4370 			    ("%s(%d) : IPv4 None IP packet type 0x%x\n",
4371 			    __func__, qlge->instance, etherType));
4372 		}
4373 	}
4374 	/* ipV4 packets */
4375 	if (iphdr != NULL) {
4376 
4377 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
4378 		QL_PRINT(DBG_TX,
4379 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
4380 		    " %d bytes \n", __func__, qlge->instance, ip_hdr_len));
4381 
4382 		ip_hdr_off = mac_hdr_len;
4383 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4384 		    __func__, qlge->instance, ip_hdr_len));
4385 
4386 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4387 		    OB_MAC_IOCB_REQ_IPv4);
4388 
4389 		if (pflags & HCK_IPV4_HDRCKSUM) {
4390 			QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n",
4391 			    __func__, qlge->instance));
4392 			mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB;
4393 			mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 |
4394 			    OB_MAC_IOCB_REQ_IC);
4395 			iphdr->ip_sum = 0;
4396 			mac_iocb_ptr->hdr_off = (uint16_t)
4397 			    cpu_to_le16(ip_hdr_off);
4398 		}
4399 		if (pflags & HCK_FULLCKSUM) {
4400 			if (iphdr->ip_p == IPPROTO_TCP) {
4401 				tcp_hdr =
4402 				    (struct tcphdr *)(void *)
4403 				    ((uint8_t *)(void *)iphdr + ip_hdr_len);
4404 				QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n",
4405 				    __func__, qlge->instance));
4406 				mac_iocb_ptr->opcode =
4407 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4408 				mac_iocb_ptr->flag1 =
4409 				    (uint8_t)(mac_iocb_ptr->flag1 |
4410 				    OB_MAC_IOCB_REQ_TC);
4411 				mac_iocb_ptr->flag2 =
4412 				    (uint8_t)(mac_iocb_ptr->flag2 |
4413 				    OB_MAC_IOCB_REQ_IC);
4414 				iphdr->ip_sum = 0;
4415 				tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len;
4416 				tcp_udp_hdr_len = tcp_hdr->th_off*4;
4417 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4418 				    __func__, qlge->instance, tcp_udp_hdr_len));
4419 				hdr_off = ip_hdr_off;
4420 				tcp_udp_hdr_off <<= 6;
4421 				hdr_off |= tcp_udp_hdr_off;
4422 				mac_iocb_ptr->hdr_off =
4423 				    (uint16_t)cpu_to_le16(hdr_off);
4424 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4425 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4426 				    tcp_udp_hdr_len);
4427 
4428 				/*
4429 				 * if the chip is unable to do pseudo header
4430 				 * cksum calculation, do it in then put the
4431 				 * result to the data passed to the chip
4432 				 */
4433 				if (qlge->cfg_flags &
4434 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4435 					ql_pseudo_cksum((uint8_t *)iphdr);
4436 				}
4437 			} else if (iphdr->ip_p == IPPROTO_UDP) {
4438 				QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n",
4439 				    __func__, qlge->instance));
4440 				mac_iocb_ptr->opcode =
4441 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4442 				mac_iocb_ptr->flag1 =
4443 				    (uint8_t)(mac_iocb_ptr->flag1 |
4444 				    OB_MAC_IOCB_REQ_UC);
4445 				mac_iocb_ptr->flag2 =
4446 				    (uint8_t)(mac_iocb_ptr->flag2 |
4447 				    OB_MAC_IOCB_REQ_IC);
4448 				iphdr->ip_sum = 0;
4449 				tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len;
4450 				tcp_udp_hdr_len = sizeof (struct udphdr);
4451 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4452 				    __func__, qlge->instance, tcp_udp_hdr_len));
4453 				hdr_off = ip_hdr_off;
4454 				tcp_udp_hdr_off <<= 6;
4455 				hdr_off |= tcp_udp_hdr_off;
4456 				mac_iocb_ptr->hdr_off =
4457 				    (uint16_t)cpu_to_le16(hdr_off);
4458 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4459 				    cpu_to_le16(mac_hdr_len + ip_hdr_len
4460 				    + tcp_udp_hdr_len);
4461 
4462 				/*
4463 				 * if the chip is unable to calculate pseudo
4464 				 * hdr cksum,do it in then put the result to
4465 				 * the data passed to the chip
4466 				 */
4467 				if (qlge->cfg_flags &
4468 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4469 					ql_pseudo_cksum((uint8_t *)iphdr);
4470 				}
4471 			}
4472 		}
4473 	}
4474 }
4475 
4476 /*
4477  * For TSO/LSO:
4478  * MAC frame transmission with TCP large segment offload is performed in the
4479  * same way as the MAC frame transmission with checksum offload with the
4480  * exception that the maximum TCP segment size (MSS) must be specified to
4481  * allow the chip to segment the data into legal sized frames.
4482  * The host also needs to calculate a pseudo-header checksum over the
4483  * following fields:
4484  * Source IP Address, Destination IP Address, and the Protocol.
4485  * The TCP length is not included in the pseudo-header calculation.
4486  * The pseudo-header checksum is place in the TCP checksum field of the
4487  * prototype header.
4488  */
4489 static void
4490 ql_lso_pseudo_cksum(uint8_t *buf)
4491 {
4492 	uint32_t cksum;
4493 	uint16_t iphl;
4494 	uint16_t proto;
4495 
4496 	/*
4497 	 * Calculate the LSO pseudo-header checksum.
4498 	 */
4499 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
4500 	cksum = proto = buf[9];
4501 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
4502 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
4503 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
4504 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
4505 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4506 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4507 
4508 	/*
4509 	 * Point it to the TCP/UDP header, and
4510 	 * update the checksum field.
4511 	 */
4512 	buf += iphl + ((proto == IPPROTO_TCP) ?
4513 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
4514 
4515 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
4516 }
4517 
4518 /*
4519  * Tell the hardware to do Large Send Offload (LSO)
4520  *
4521  * Some fields in ob_mac_iocb need to be set so hardware can know what is
4522  * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
4523  * in the right place of the packet etc, thus, hardware can process the
4524  * packet correctly.
4525  */
4526 static void
4527 ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp,
4528     struct ob_mac_iocb_req *mac_iocb_ptr)
4529 {
4530 	struct ip *iphdr = NULL;
4531 	struct ether_header *ethhdr;
4532 	struct ether_vlan_header *ethvhdr;
4533 	struct tcphdr *tcp_hdr;
4534 	struct udphdr *udp_hdr;
4535 	uint32_t etherType;
4536 	uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4537 	uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4538 
4539 	ethhdr = (struct ether_header *)(void *)bp;
4540 	ethvhdr = (struct ether_vlan_header *)(void *)bp;
4541 
4542 	/* Is this vlan packet? */
4543 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4544 		mac_hdr_len = sizeof (struct ether_vlan_header);
4545 		etherType = ntohs(ethvhdr->ether_type);
4546 	} else {
4547 		mac_hdr_len = sizeof (struct ether_header);
4548 		etherType = ntohs(ethhdr->ether_type);
4549 	}
4550 	/* Is this IPv4 or IPv6 packet? */
4551 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) ==
4552 	    IPV4_VERSION) {
4553 		if (etherType == ETHERTYPE_IP /* 0800 */) {
4554 			iphdr 	= (struct ip *)(void *)(bp+mac_hdr_len);
4555 		} else {
4556 			/* EMPTY */
4557 			QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet"
4558 			    " type 0x%x\n",
4559 			    __func__, qlge->instance, etherType));
4560 		}
4561 	}
4562 
4563 	if (iphdr != NULL) { /* ipV4 packets */
4564 		ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr);
4565 		QL_PRINT(DBG_TX,
4566 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
4567 		    " bytes \n", __func__, qlge->instance, ip_hdr_len));
4568 
4569 		ip_hdr_off = mac_hdr_len;
4570 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4571 		    __func__, qlge->instance, ip_hdr_len));
4572 
4573 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4574 		    OB_MAC_IOCB_REQ_IPv4);
4575 		if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) {
4576 			if (iphdr->ip_p == IPPROTO_TCP) {
4577 				tcp_hdr = (struct tcphdr *)(void *)
4578 				    ((uint8_t *)(void *)iphdr +
4579 				    ip_hdr_len);
4580 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP "
4581 				    "packet\n",
4582 				    __func__, qlge->instance));
4583 				mac_iocb_ptr->opcode =
4584 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4585 				mac_iocb_ptr->flag1 =
4586 				    (uint8_t)(mac_iocb_ptr->flag1 |
4587 				    OB_MAC_IOCB_REQ_LSO);
4588 				iphdr->ip_sum = 0;
4589 				tcp_udp_hdr_off =
4590 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
4591 				tcp_udp_hdr_len =
4592 				    (uint16_t)(tcp_hdr->th_off*4);
4593 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4594 				    __func__, qlge->instance, tcp_udp_hdr_len));
4595 				hdr_off = ip_hdr_off;
4596 				tcp_udp_hdr_off <<= 6;
4597 				hdr_off |= tcp_udp_hdr_off;
4598 				mac_iocb_ptr->hdr_off =
4599 				    (uint16_t)cpu_to_le16(hdr_off);
4600 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4601 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4602 				    tcp_udp_hdr_len);
4603 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
4604 
4605 				/*
4606 				 * if the chip is unable to calculate pseudo
4607 				 * header checksum, do it in then put the result
4608 				 * to the data passed to the chip
4609 				 */
4610 				if (qlge->cfg_flags &
4611 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
4612 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
4613 			} else if (iphdr->ip_p == IPPROTO_UDP) {
4614 				udp_hdr = (struct udphdr *)(void *)
4615 				    ((uint8_t *)(void *)iphdr
4616 				    + ip_hdr_len);
4617 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP "
4618 				    "packet\n",
4619 				    __func__, qlge->instance));
4620 				mac_iocb_ptr->opcode =
4621 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4622 				mac_iocb_ptr->flag1 =
4623 				    (uint8_t)(mac_iocb_ptr->flag1 |
4624 				    OB_MAC_IOCB_REQ_LSO);
4625 				iphdr->ip_sum = 0;
4626 				tcp_udp_hdr_off =
4627 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
4628 				tcp_udp_hdr_len =
4629 				    (uint16_t)(udp_hdr->uh_ulen*4);
4630 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4631 				    __func__, qlge->instance, tcp_udp_hdr_len));
4632 				hdr_off = ip_hdr_off;
4633 				tcp_udp_hdr_off <<= 6;
4634 				hdr_off |= tcp_udp_hdr_off;
4635 				mac_iocb_ptr->hdr_off =
4636 				    (uint16_t)cpu_to_le16(hdr_off);
4637 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4638 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4639 				    tcp_udp_hdr_len);
4640 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
4641 
4642 				/*
4643 				 * if the chip is unable to do pseudo header
4644 				 * checksum calculation, do it here then put the
4645 				 * result to the data passed to the chip
4646 				 */
4647 				if (qlge->cfg_flags &
4648 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
4649 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
4650 			}
4651 		}
4652 	}
4653 }
4654 
4655 /*
4656  * Generic packet sending function which is used to send one packet.
4657  */
4658 int
4659 ql_send_common(struct tx_ring *tx_ring, mblk_t *mp)
4660 {
4661 	struct tx_ring_desc *tx_cb;
4662 	struct ob_mac_iocb_req *mac_iocb_ptr;
4663 	mblk_t *tp;
4664 	size_t msg_len = 0;
4665 	size_t off;
4666 	caddr_t bp;
4667 	size_t nbyte, total_len;
4668 	uint_t i = 0;
4669 	int j = 0, frags = 0;
4670 	uint32_t phy_addr_low, phy_addr_high;
4671 	uint64_t phys_addr;
4672 	clock_t now;
4673 	uint32_t pflags = 0;
4674 	uint32_t mss = 0;
4675 	enum tx_mode_t tx_mode;
4676 	struct oal_entry *oal_entry;
4677 	int status;
4678 	uint_t ncookies, oal_entries, max_oal_entries;
4679 	size_t max_seg_len = 0;
4680 	boolean_t use_lso = B_FALSE;
4681 	struct oal_entry *tx_entry = NULL;
4682 	struct oal_entry *last_oal_entry;
4683 	qlge_t *qlge = tx_ring->qlge;
4684 	ddi_dma_cookie_t dma_cookie;
4685 	size_t tx_buf_len = QL_MAX_COPY_LENGTH;
4686 	int force_pullup = 0;
4687 
4688 	tp = mp;
4689 	total_len = msg_len = 0;
4690 	max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1;
4691 
4692 	/* Calculate number of data and segments in the incoming message */
4693 	for (tp = mp; tp != NULL; tp = tp->b_cont) {
4694 		nbyte = MBLKL(tp);
4695 		total_len += nbyte;
4696 		max_seg_len = max(nbyte, max_seg_len);
4697 		QL_PRINT(DBG_TX, ("Requested sending data in %d segments, "
4698 		    "total length: %d\n", frags, nbyte));
4699 		frags++;
4700 	}
4701 
4702 	if (total_len >= QL_LSO_MAX) {
4703 		freemsg(mp);
4704 #ifdef QLGE_LOAD_UNLOAD
4705 		cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n",
4706 		    __func__, (int)total_len);
4707 #endif
4708 		return (NULL);
4709 	}
4710 
4711 	bp = (caddr_t)mp->b_rptr;
4712 	if (bp[0] & 1) {
4713 		if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet,
4714 		    ETHERADDRL) == 0) {
4715 			QL_PRINT(DBG_TX, ("Broadcast packet\n"));
4716 			tx_ring->brdcstxmt++;
4717 		} else {
4718 			QL_PRINT(DBG_TX, ("multicast packet\n"));
4719 			tx_ring->multixmt++;
4720 		}
4721 	}
4722 
4723 	tx_ring->obytes += total_len;
4724 	tx_ring->opackets ++;
4725 
4726 	QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs,"
4727 	    " max seg len: %d\n", total_len, frags, max_seg_len));
4728 
4729 	/* claim a free slot in tx ring */
4730 	tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx];
4731 
4732 	/* get the tx descriptor */
4733 	mac_iocb_ptr = tx_cb->queue_entry;
4734 
4735 	bzero((void *)mac_iocb_ptr, sizeof (*mac_iocb_ptr));
4736 
4737 	ASSERT(tx_cb->mp == NULL);
4738 
4739 	/*
4740 	 * Decide to use DMA map or copy mode.
4741 	 * DMA map mode must be used when the total msg length is more than the
4742 	 * tx buffer length.
4743 	 */
4744 
4745 	if (total_len > tx_buf_len)
4746 		tx_mode = USE_DMA;
4747 	else if	(max_seg_len > QL_MAX_COPY_LENGTH)
4748 		tx_mode = USE_DMA;
4749 	else
4750 		tx_mode = USE_COPY;
4751 
4752 	if (qlge->chksum_cap) {
4753 		hcksum_retrieve(mp, NULL, NULL, NULL,
4754 		    NULL, NULL, NULL, &pflags);
4755 		QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability "
4756 		    "is 0x%x \n", pflags, qlge->chksum_cap));
4757 		if (qlge->lso_enable) {
4758 			uint32_t lso_flags = 0;
4759 			lso_info_get(mp, &mss, &lso_flags);
4760 			use_lso = (lso_flags == HW_LSO);
4761 		}
4762 		QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n",
4763 		    mss, use_lso));
4764 	}
4765 
4766 do_pullup:
4767 
4768 	/* concatenate all frags into one large packet if too fragmented */
4769 	if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) ||
4770 	    force_pullup) {
4771 		mblk_t *mp1;
4772 		if ((mp1 = msgpullup(mp, -1)) != NULL) {
4773 			freemsg(mp);
4774 			mp = mp1;
4775 			frags = 1;
4776 		} else {
4777 			tx_ring->tx_fail_dma_bind++;
4778 			goto bad;
4779 		}
4780 	}
4781 
4782 	tx_cb->tx_bytes = (uint32_t)total_len;
4783 	tx_cb->mp = mp;
4784 	tx_cb->tx_dma_handle_used = 0;
4785 
4786 	if (tx_mode == USE_DMA) {
4787 		msg_len = total_len;
4788 
4789 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
4790 		mac_iocb_ptr->tid = tx_ring->prod_idx;
4791 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
4792 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
4793 
4794 		tx_entry = &mac_iocb_ptr->oal_entry[0];
4795 		oal_entry = NULL;
4796 
4797 		for (tp = mp, oal_entries = j = 0; tp != NULL;
4798 		    tp = tp->b_cont) {
4799 			/* if too many tx dma handles needed */
4800 			if (j >= QL_MAX_TX_DMA_HANDLES) {
4801 				tx_ring->tx_no_dma_handle++;
4802 				if (!force_pullup) {
4803 					force_pullup = 1;
4804 					goto do_pullup;
4805 				} else {
4806 					goto bad;
4807 				}
4808 			}
4809 			nbyte = (uint16_t)MBLKL(tp);
4810 			if (nbyte == 0)
4811 				continue;
4812 
4813 			status = ddi_dma_addr_bind_handle(
4814 			    tx_cb->tx_dma_handle[j], NULL,
4815 			    (caddr_t)tp->b_rptr, nbyte,
4816 			    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
4817 			    0, &dma_cookie, &ncookies);
4818 
4819 			QL_PRINT(DBG_TX, ("map sending data segment: %d, "
4820 			    "length: %d, spans in %d cookies\n",
4821 			    j, nbyte, ncookies));
4822 
4823 			if (status != DDI_DMA_MAPPED) {
4824 				goto bad;
4825 			}
4826 			/*
4827 			 * Each fragment can span several cookies. One cookie
4828 			 * will use one tx descriptor to transmit.
4829 			 */
4830 			for (i = ncookies; i > 0; i--, tx_entry++,
4831 			    oal_entries++) {
4832 				/*
4833 				 * The number of TX descriptors that can be
4834 				 *  saved in tx iocb and oal list is limited
4835 				 */
4836 				if (oal_entries > max_oal_entries) {
4837 					tx_ring->tx_no_dma_cookie++;
4838 					if (!force_pullup) {
4839 						force_pullup = 1;
4840 						goto do_pullup;
4841 					} else {
4842 						goto bad;
4843 					}
4844 				}
4845 
4846 				if ((oal_entries == TX_DESC_PER_IOCB) &&
4847 				    !oal_entry) {
4848 					/*
4849 					 * Time to switch to an oal list
4850 					 * The last entry should be copied
4851 					 * to first entry in the oal list
4852 					 */
4853 					oal_entry = tx_cb->oal;
4854 					tx_entry =
4855 					    &mac_iocb_ptr->oal_entry[
4856 					    TX_DESC_PER_IOCB-1];
4857 					bcopy(tx_entry, oal_entry,
4858 					    sizeof (*oal_entry));
4859 
4860 					/*
4861 					 * last entry should be updated to
4862 					 * point to the extended oal list itself
4863 					 */
4864 					tx_entry->buf_addr_low =
4865 					    cpu_to_le32(
4866 					    LS_64BITS(tx_cb->oal_dma_addr));
4867 					tx_entry->buf_addr_high =
4868 					    cpu_to_le32(
4869 					    MS_64BITS(tx_cb->oal_dma_addr));
4870 					/*
4871 					 * Point tx_entry to the oal list
4872 					 * second entry
4873 					 */
4874 					tx_entry = &oal_entry[1];
4875 				}
4876 
4877 				tx_entry->buf_len =
4878 				    (uint32_t)cpu_to_le32(dma_cookie.dmac_size);
4879 				phys_addr = dma_cookie.dmac_laddress;
4880 				tx_entry->buf_addr_low =
4881 				    cpu_to_le32(LS_64BITS(phys_addr));
4882 				tx_entry->buf_addr_high =
4883 				    cpu_to_le32(MS_64BITS(phys_addr));
4884 
4885 				last_oal_entry = tx_entry;
4886 
4887 				if (i > 1)
4888 					ddi_dma_nextcookie(
4889 					    tx_cb->tx_dma_handle[j],
4890 					    &dma_cookie);
4891 			}
4892 			j++;
4893 		}
4894 		/*
4895 		 * if OAL is used, the last oal entry in tx iocb indicates
4896 		 * number of additional address/len pairs in OAL
4897 		 */
4898 		if (oal_entries > TX_DESC_PER_IOCB) {
4899 			tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1];
4900 			tx_entry->buf_len = (uint32_t)
4901 			    (cpu_to_le32((sizeof (struct oal_entry) *
4902 			    (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY));
4903 		}
4904 		last_oal_entry->buf_len = cpu_to_le32(
4905 		    le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY);
4906 
4907 		tx_cb->tx_dma_handle_used = j;
4908 		QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n",
4909 		    j, oal_entries));
4910 
4911 		bp = (caddr_t)mp->b_rptr;
4912 	}
4913 	if (tx_mode == USE_COPY) {
4914 		bp = tx_cb->copy_buffer;
4915 		off = 0;
4916 		nbyte = 0;
4917 		frags = 0;
4918 		/*
4919 		 * Copy up to tx_buf_len of the transmit data
4920 		 * from mp to tx buffer
4921 		 */
4922 		for (tp = mp; tp != NULL; tp = tp->b_cont) {
4923 			nbyte = MBLKL(tp);
4924 			if ((off + nbyte) <= tx_buf_len) {
4925 				bcopy(tp->b_rptr, &bp[off], nbyte);
4926 				off += nbyte;
4927 				frags ++;
4928 			}
4929 		}
4930 
4931 		msg_len = off;
4932 
4933 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
4934 		mac_iocb_ptr->tid = tx_ring->prod_idx;
4935 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
4936 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
4937 
4938 		QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, "
4939 		    "from %d segaments\n", msg_len, frags));
4940 
4941 		phys_addr = tx_cb->copy_buffer_dma_addr;
4942 		phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr));
4943 		phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr));
4944 
4945 		QL_DUMP(DBG_TX, "\t requested sending data:\n",
4946 		    (uint8_t *)tx_cb->copy_buffer, 8, total_len);
4947 
4948 		mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t)
4949 		    cpu_to_le32(msg_len | OAL_LAST_ENTRY);
4950 		mac_iocb_ptr->oal_entry[0].buf_addr_low  = phy_addr_low;
4951 		mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high;
4952 
4953 		freemsg(mp); /* no need, we have copied */
4954 		tx_cb->mp = NULL;
4955 	} /* End of Copy Mode */
4956 
4957 	/* Do TSO/LSO on TCP packet? */
4958 	if (use_lso && mss) {
4959 		ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr);
4960 	} else if (pflags & qlge->chksum_cap) {
4961 		/* Do checksum offloading */
4962 		ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr);
4963 	}
4964 
4965 	/* let device know the latest outbound IOCB */
4966 	(void) ddi_dma_sync(tx_ring->wq_dma.dma_handle,
4967 	    (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr),
4968 	    (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV);
4969 
4970 	if (tx_mode == USE_DMA) {
4971 		/* let device know the latest outbound OAL if necessary */
4972 		if (oal_entries > TX_DESC_PER_IOCB) {
4973 			(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
4974 			    (off_t)0,
4975 			    (sizeof (struct oal_entry) *
4976 			    (oal_entries -TX_DESC_PER_IOCB+1)),
4977 			    DDI_DMA_SYNC_FORDEV);
4978 		}
4979 	} else { /* for USE_COPY mode, tx buffer has changed */
4980 		/* let device know the latest change */
4981 		(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
4982 		/* copy buf offset */
4983 		    (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS),
4984 		    msg_len, DDI_DMA_SYNC_FORDEV);
4985 	}
4986 
4987 	/* save how the packet was sent */
4988 	tx_cb->tx_type = tx_mode;
4989 
4990 	QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries);
4991 	/* reduce the number of available tx slot */
4992 	atomic_dec_32(&tx_ring->tx_free_count);
4993 
4994 	tx_ring->prod_idx++;
4995 	if (tx_ring->prod_idx >= tx_ring->wq_len)
4996 		tx_ring->prod_idx = 0;
4997 
4998 	now = ddi_get_lbolt();
4999 	qlge->last_tx_time = now;
5000 
5001 	return (DDI_SUCCESS);
5002 
5003 bad:
5004 	/*
5005 	 * if for any reason driver can not send, delete
5006 	 * the message pointer, mp
5007 	 */
5008 	now = ddi_get_lbolt();
5009 	freemsg(mp);
5010 	mp = NULL;
5011 	for (i = 0; i < j; i++)
5012 		(void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]);
5013 
5014 	QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x",
5015 	    __func__, qlge->instance, (int)now));
5016 
5017 	return (DDI_SUCCESS);
5018 }
5019 
5020 
5021 /*
5022  * Initializes hardware and driver software flags before the driver
5023  * is finally ready to work.
5024  */
5025 int
5026 ql_do_start(qlge_t *qlge)
5027 {
5028 	int i;
5029 	struct rx_ring *rx_ring;
5030 	uint16_t lbq_buf_size;
5031 	int rings_done;
5032 
5033 	ASSERT(qlge != NULL);
5034 
5035 	mutex_enter(&qlge->hw_mutex);
5036 
5037 	/* Reset adapter */
5038 	(void) ql_asic_reset(qlge);
5039 
5040 	lbq_buf_size = (uint16_t)
5041 	    ((qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE);
5042 	if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) {
5043 #ifdef QLGE_LOAD_UNLOAD
5044 		cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n",
5045 		    qlge->rx_ring[0].lbq_buf_size, lbq_buf_size);
5046 #endif
5047 		/*
5048 		 * Check if any ring has buffers still with upper layers
5049 		 * If buffers are pending with upper layers, we use the
5050 		 * existing buffers and don't reallocate new ones
5051 		 * Unfortunately there is no way to evict buffers from
5052 		 * upper layers. Using buffers with the current size may
5053 		 * cause slightly sub-optimal performance, but that seems
5054 		 * to be the easiest way to handle this situation.
5055 		 */
5056 		rings_done = 0;
5057 		for (i = 0; i < qlge->rx_ring_count; i++) {
5058 			rx_ring = &qlge->rx_ring[i];
5059 			if (rx_ring->rx_indicate == 0)
5060 				rings_done++;
5061 			else
5062 				break;
5063 		}
5064 		/*
5065 		 * No buffers pending with upper layers;
5066 		 * reallocte them for new MTU size
5067 		 */
5068 		if (rings_done >= qlge->rx_ring_count) {
5069 			/* free large buffer pool */
5070 			for (i = 0; i < qlge->rx_ring_count; i++) {
5071 				rx_ring = &qlge->rx_ring[i];
5072 				if (rx_ring->type != TX_Q) {
5073 					ql_free_sbq_buffers(rx_ring);
5074 					ql_free_lbq_buffers(rx_ring);
5075 				}
5076 			}
5077 			/* reallocate large buffer pool */
5078 			for (i = 0; i < qlge->rx_ring_count; i++) {
5079 				rx_ring = &qlge->rx_ring[i];
5080 				if (rx_ring->type != TX_Q) {
5081 					(void) ql_alloc_sbufs(qlge, rx_ring);
5082 					(void) ql_alloc_lbufs(qlge, rx_ring);
5083 				}
5084 			}
5085 		}
5086 	}
5087 
5088 	if (ql_bringup_adapter(qlge) != DDI_SUCCESS) {
5089 		cmn_err(CE_WARN, "qlge bringup adapter failed");
5090 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
5091 		mutex_exit(&qlge->hw_mutex);
5092 		return (DDI_FAILURE);
5093 	}
5094 
5095 	mutex_exit(&qlge->hw_mutex);
5096 
5097 	/* Get current link state */
5098 	qlge->port_link_state = ql_get_link_state(qlge);
5099 
5100 	if (qlge->port_link_state == LS_UP) {
5101 		QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5102 		    __func__, qlge->instance));
5103 		/* If driver detects a carrier on */
5104 		CARRIER_ON(qlge);
5105 	} else {
5106 		QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5107 		    __func__, qlge->instance));
5108 		/* If driver detects a lack of carrier */
5109 		CARRIER_OFF(qlge);
5110 	}
5111 	qlge->mac_flags = QL_MAC_STARTED;
5112 	return (DDI_SUCCESS);
5113 }
5114 
5115 /*
5116  * Stop currently running driver
5117  * Driver needs to stop routing new packets to driver and wait until
5118  * all pending tx/rx buffers to be free-ed.
5119  */
5120 int
5121 ql_do_stop(qlge_t *qlge)
5122 {
5123 	int rc = DDI_FAILURE;
5124 	uint32_t i, j, k;
5125 	struct bq_desc *sbq_desc, *lbq_desc;
5126 	struct rx_ring *rx_ring;
5127 
5128 	ASSERT(qlge != NULL);
5129 
5130 	CARRIER_OFF(qlge);
5131 
5132 	rc = ql_bringdown_adapter(qlge);
5133 	if (rc != DDI_SUCCESS) {
5134 		cmn_err(CE_WARN, "qlge bringdown adapter failed.");
5135 	} else
5136 		rc = DDI_SUCCESS;
5137 
5138 	for (k = 0; k < qlge->rx_ring_count; k++) {
5139 		rx_ring = &qlge->rx_ring[k];
5140 		if (rx_ring->type != TX_Q) {
5141 			j = rx_ring->lbq_use_head;
5142 #ifdef QLGE_LOAD_UNLOAD
5143 			cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list"
5144 			    " to free list %d\n total %d\n",
5145 			    k, rx_ring->lbuf_in_use_count,
5146 			    rx_ring->lbuf_free_count,
5147 			    rx_ring->lbuf_in_use_count +
5148 			    rx_ring->lbuf_free_count);
5149 #endif
5150 			for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
5151 				lbq_desc = rx_ring->lbuf_in_use[j];
5152 				j++;
5153 				if (j >= rx_ring->lbq_len) {
5154 					j = 0;
5155 				}
5156 				if (lbq_desc->mp) {
5157 					atomic_inc_32(&rx_ring->rx_indicate);
5158 					freemsg(lbq_desc->mp);
5159 				}
5160 			}
5161 			rx_ring->lbq_use_head = j;
5162 			rx_ring->lbq_use_tail = j;
5163 			rx_ring->lbuf_in_use_count = 0;
5164 			j = rx_ring->sbq_use_head;
5165 #ifdef QLGE_LOAD_UNLOAD
5166 			cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list,"
5167 			    " to free list %d\n total %d \n",
5168 			    k, rx_ring->sbuf_in_use_count,
5169 			    rx_ring->sbuf_free_count,
5170 			    rx_ring->sbuf_in_use_count +
5171 			    rx_ring->sbuf_free_count);
5172 #endif
5173 			for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
5174 				sbq_desc = rx_ring->sbuf_in_use[j];
5175 				j++;
5176 				if (j >= rx_ring->sbq_len) {
5177 					j = 0;
5178 				}
5179 				if (sbq_desc->mp) {
5180 					atomic_inc_32(&rx_ring->rx_indicate);
5181 					freemsg(sbq_desc->mp);
5182 				}
5183 			}
5184 			rx_ring->sbq_use_head = j;
5185 			rx_ring->sbq_use_tail = j;
5186 			rx_ring->sbuf_in_use_count = 0;
5187 		}
5188 	}
5189 
5190 	qlge->mac_flags = QL_MAC_STOPPED;
5191 
5192 	return (rc);
5193 }
5194 
5195 /*
5196  * Support
5197  */
5198 
5199 void
5200 ql_disable_isr(qlge_t *qlge)
5201 {
5202 	/*
5203 	 * disable the hardware interrupt
5204 	 */
5205 	ISP_DISABLE_GLOBAL_INTRS(qlge);
5206 
5207 	qlge->flags &= ~INTERRUPTS_ENABLED;
5208 }
5209 
5210 
5211 
5212 /*
5213  * busy wait for 'usecs' microseconds.
5214  */
5215 void
5216 qlge_delay(clock_t usecs)
5217 {
5218 	drv_usecwait(usecs);
5219 }
5220 
5221 /*
5222  * retrieve firmware details.
5223  */
5224 
5225 pci_cfg_t *
5226 ql_get_pci_config(qlge_t *qlge)
5227 {
5228 	return (&(qlge->pci_cfg));
5229 }
5230 
5231 /*
5232  * Get current Link status
5233  */
5234 static uint32_t
5235 ql_get_link_state(qlge_t *qlge)
5236 {
5237 	uint32_t bitToCheck = 0;
5238 	uint32_t temp, linkState;
5239 
5240 	if (qlge->func_number == qlge->fn0_net) {
5241 		bitToCheck = STS_PL0;
5242 	} else {
5243 		bitToCheck = STS_PL1;
5244 	}
5245 	temp = ql_read_reg(qlge, REG_STATUS);
5246 	QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n",
5247 	    __func__, qlge->instance, temp));
5248 
5249 	if (temp & bitToCheck) {
5250 		linkState = LS_UP;
5251 	} else {
5252 		linkState = LS_DOWN;
5253 	}
5254 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
5255 		/* for Schultz, link Speed is fixed to 10G, full duplex */
5256 		qlge->speed  = SPEED_10G;
5257 		qlge->duplex = 1;
5258 	}
5259 	return (linkState);
5260 }
5261 /*
5262  * Get current link status and report to OS
5263  */
5264 static void
5265 ql_get_and_report_link_state(qlge_t *qlge)
5266 {
5267 	uint32_t cur_link_state;
5268 
5269 	/* Get current link state */
5270 	cur_link_state = ql_get_link_state(qlge);
5271 	/* if link state has changed */
5272 	if (cur_link_state != qlge->port_link_state) {
5273 
5274 		qlge->port_link_state = cur_link_state;
5275 
5276 		if (qlge->port_link_state == LS_UP) {
5277 			QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5278 			    __func__, qlge->instance));
5279 			/* If driver detects a carrier on */
5280 			CARRIER_ON(qlge);
5281 		} else {
5282 			QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5283 			    __func__, qlge->instance));
5284 			/* If driver detects a lack of carrier */
5285 			CARRIER_OFF(qlge);
5286 		}
5287 	}
5288 }
5289 
5290 /*
5291  * timer callback function executed after timer expires
5292  */
5293 static void
5294 ql_timer(void* arg)
5295 {
5296 	ql_get_and_report_link_state((qlge_t *)arg);
5297 }
5298 
5299 /*
5300  * stop the running timer if activated
5301  */
5302 static void
5303 ql_stop_timer(qlge_t *qlge)
5304 {
5305 	timeout_id_t timer_id;
5306 	/* Disable driver timer */
5307 	if (qlge->ql_timer_timeout_id != NULL) {
5308 		timer_id = qlge->ql_timer_timeout_id;
5309 		qlge->ql_timer_timeout_id = NULL;
5310 		(void) untimeout(timer_id);
5311 	}
5312 }
5313 
5314 /*
5315  * stop then restart timer
5316  */
5317 void
5318 ql_restart_timer(qlge_t *qlge)
5319 {
5320 	ql_stop_timer(qlge);
5321 	qlge->ql_timer_ticks = TICKS_PER_SEC / 4;
5322 	qlge->ql_timer_timeout_id = timeout(ql_timer,
5323 	    (void *)qlge, qlge->ql_timer_ticks);
5324 }
5325 
5326 /* ************************************************************************* */
5327 /*
5328  *		Hardware K-Stats Data Structures and Subroutines
5329  */
5330 /* ************************************************************************* */
5331 static const ql_ksindex_t ql_kstats_hw[] = {
5332 	/* PCI related hardware information */
5333 	{ 0, "Vendor Id"			},
5334 	{ 1, "Device Id"			},
5335 	{ 2, "Command"				},
5336 	{ 3, "Status"				},
5337 	{ 4, "Revision Id"			},
5338 	{ 5, "Cache Line Size"			},
5339 	{ 6, "Latency Timer"			},
5340 	{ 7, "Header Type"			},
5341 	{ 9, "I/O base addr"			},
5342 	{ 10, "Control Reg Base addr low"	},
5343 	{ 11, "Control Reg Base addr high"	},
5344 	{ 12, "Doorbell Reg Base addr low"	},
5345 	{ 13, "Doorbell Reg Base addr high"	},
5346 	{ 14, "Subsystem Vendor Id"		},
5347 	{ 15, "Subsystem Device ID"		},
5348 	{ 16, "PCIe Device Control"		},
5349 	{ 17, "PCIe Link Status"		},
5350 
5351 	{ -1,	NULL				},
5352 };
5353 
5354 /*
5355  * kstat update function for PCI registers
5356  */
5357 static int
5358 ql_kstats_get_pci_regs(kstat_t *ksp, int flag)
5359 {
5360 	qlge_t *qlge;
5361 	kstat_named_t *knp;
5362 
5363 	if (flag != KSTAT_READ)
5364 		return (EACCES);
5365 
5366 	qlge = ksp->ks_private;
5367 	knp = ksp->ks_data;
5368 	(knp++)->value.ui32 = qlge->pci_cfg.vendor_id;
5369 	(knp++)->value.ui32 = qlge->pci_cfg.device_id;
5370 	(knp++)->value.ui32 = qlge->pci_cfg.command;
5371 	(knp++)->value.ui32 = qlge->pci_cfg.status;
5372 	(knp++)->value.ui32 = qlge->pci_cfg.revision;
5373 	(knp++)->value.ui32 = qlge->pci_cfg.cache_line_size;
5374 	(knp++)->value.ui32 = qlge->pci_cfg.latency_timer;
5375 	(knp++)->value.ui32 = qlge->pci_cfg.header_type;
5376 	(knp++)->value.ui32 = qlge->pci_cfg.io_base_address;
5377 	(knp++)->value.ui32 =
5378 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower;
5379 	(knp++)->value.ui32 =
5380 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper;
5381 	(knp++)->value.ui32 =
5382 	    qlge->pci_cfg.pci_doorbell_mem_base_address_lower;
5383 	(knp++)->value.ui32 =
5384 	    qlge->pci_cfg.pci_doorbell_mem_base_address_upper;
5385 	(knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id;
5386 	(knp++)->value.ui32 = qlge->pci_cfg.sub_device_id;
5387 	(knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control;
5388 	(knp++)->value.ui32 = qlge->pci_cfg.link_status;
5389 
5390 	return (0);
5391 }
5392 
5393 static const ql_ksindex_t ql_kstats_mii[] = {
5394 	/* MAC/MII related hardware information */
5395 	{ 0, "mtu"},
5396 
5397 	{ -1, NULL},
5398 };
5399 
5400 
5401 /*
5402  * kstat update function for MII related information.
5403  */
5404 static int
5405 ql_kstats_mii_update(kstat_t *ksp, int flag)
5406 {
5407 	qlge_t *qlge;
5408 	kstat_named_t *knp;
5409 
5410 	if (flag != KSTAT_READ)
5411 		return (EACCES);
5412 
5413 	qlge = ksp->ks_private;
5414 	knp = ksp->ks_data;
5415 
5416 	(knp++)->value.ui32 = qlge->mtu;
5417 
5418 	return (0);
5419 }
5420 
5421 static const ql_ksindex_t ql_kstats_reg[] = {
5422 	/* Register information */
5423 	{ 0, "System (0x08)"			},
5424 	{ 1, "Reset/Fail Over(0x0Ch"		},
5425 	{ 2, "Function Specific Control(0x10)"	},
5426 	{ 3, "Status (0x30)"			},
5427 	{ 4, "Intr Enable (0x34)"		},
5428 	{ 5, "Intr Status1 (0x3C)"		},
5429 	{ 6, "Error Status (0x54)"		},
5430 	{ 7, "XGMAC Flow Control(0x11C)"	},
5431 	{ 8, "XGMAC Tx Pause Frames(0x230)"	},
5432 	{ 9, "XGMAC Rx Pause Frames(0x388)"	},
5433 	{ 10, "XGMAC Rx FIFO Drop Count(0x5B8)"	},
5434 	{ 11, "interrupts actually allocated"	},
5435 	{ 12, "interrupts on rx ring 0"		},
5436 	{ 13, "interrupts on rx ring 1"		},
5437 	{ 14, "interrupts on rx ring 2"		},
5438 	{ 15, "interrupts on rx ring 3"		},
5439 	{ 16, "interrupts on rx ring 4"		},
5440 	{ 17, "interrupts on rx ring 5"		},
5441 	{ 18, "interrupts on rx ring 6"		},
5442 	{ 19, "interrupts on rx ring 7"		},
5443 	{ 20, "polls on rx ring 0"		},
5444 	{ 21, "polls on rx ring 1"		},
5445 	{ 22, "polls on rx ring 2"		},
5446 	{ 23, "polls on rx ring 3"		},
5447 	{ 24, "polls on rx ring 4"		},
5448 	{ 25, "polls on rx ring 5"		},
5449 	{ 26, "polls on rx ring 6"		},
5450 	{ 27, "polls on rx ring 7"		},
5451 	{ 28, "tx no resource on ring 0"	},
5452 	{ 29, "tx dma bind fail on ring 0"	},
5453 	{ 30, "tx dma no handle on ring 0"	},
5454 	{ 31, "tx dma no cookie on ring 0"	},
5455 	{ 32, "MPI firmware major version"},
5456 	{ 33, "MPI firmware minor version"},
5457 	{ 34, "MPI firmware sub version"},
5458 
5459 	{ -1, NULL},
5460 };
5461 
5462 
5463 /*
5464  * kstat update function for device register set
5465  */
5466 static int
5467 ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag)
5468 {
5469 	qlge_t *qlge;
5470 	kstat_named_t *knp;
5471 	uint32_t val32;
5472 	int i = 0;
5473 	struct tx_ring *tx_ring;
5474 
5475 	if (flag != KSTAT_READ)
5476 		return (EACCES);
5477 
5478 	qlge = ksp->ks_private;
5479 	knp = ksp->ks_data;
5480 
5481 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM);
5482 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER);
5483 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL);
5484 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS);
5485 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
5486 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
5487 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS);
5488 
5489 	if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) {
5490 		return (0);
5491 	}
5492 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32);
5493 	(knp++)->value.ui32 = val32;
5494 
5495 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32);
5496 	(knp++)->value.ui32 = val32;
5497 
5498 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32);
5499 	(knp++)->value.ui32 = val32;
5500 
5501 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32);
5502 	(knp++)->value.ui32 = val32;
5503 
5504 	ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
5505 
5506 	(knp++)->value.ui32 = qlge->intr_cnt;
5507 
5508 	for (i = 0; i < 8; i++) {
5509 		(knp++)->value.ui32 = qlge->rx_interrupts[i];
5510 	}
5511 
5512 	for (i = 0; i < 8; i++) {
5513 		(knp++)->value.ui32 = qlge->rx_polls[i];
5514 	}
5515 
5516 	tx_ring = &qlge->tx_ring[0];
5517 	(knp++)->value.ui32 = tx_ring->defer;
5518 	(knp++)->value.ui32 = tx_ring->tx_fail_dma_bind;
5519 	(knp++)->value.ui32 = tx_ring->tx_no_dma_handle;
5520 	(knp++)->value.ui32 = tx_ring->tx_no_dma_cookie;
5521 
5522 	(knp++)->value.ui32 = qlge->fw_version_info.major_version;
5523 	(knp++)->value.ui32 = qlge->fw_version_info.minor_version;
5524 	(knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version;
5525 
5526 	return (0);
5527 }
5528 
5529 
5530 static kstat_t *
5531 ql_setup_named_kstat(qlge_t *qlge, int instance, char *name,
5532     const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
5533 {
5534 	kstat_t *ksp;
5535 	kstat_named_t *knp;
5536 	char *np;
5537 	int type;
5538 
5539 	size /= sizeof (ql_ksindex_t);
5540 	ksp = kstat_create(ADAPTER_NAME, instance, name, "net",
5541 	    KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT);
5542 	if (ksp == NULL)
5543 		return (NULL);
5544 
5545 	ksp->ks_private = qlge;
5546 	ksp->ks_update = update;
5547 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
5548 		switch (*np) {
5549 		default:
5550 			type = KSTAT_DATA_UINT32;
5551 			break;
5552 		case '&':
5553 			np += 1;
5554 			type = KSTAT_DATA_CHAR;
5555 			break;
5556 		}
5557 		kstat_named_init(knp, np, (uint8_t)type);
5558 	}
5559 	kstat_install(ksp);
5560 
5561 	return (ksp);
5562 }
5563 
5564 /*
5565  * Setup various kstat
5566  */
5567 int
5568 ql_init_kstats(qlge_t *qlge)
5569 {
5570 	/* Hardware KStats */
5571 	qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge,
5572 	    qlge->instance, "chip", ql_kstats_hw,
5573 	    sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
5574 	if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) {
5575 		return (DDI_FAILURE);
5576 	}
5577 
5578 	/* MII KStats */
5579 	qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge,
5580 	    qlge->instance, "mii", ql_kstats_mii,
5581 	    sizeof (ql_kstats_mii), ql_kstats_mii_update);
5582 	if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) {
5583 		return (DDI_FAILURE);
5584 	}
5585 
5586 	/* REG KStats */
5587 	qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge,
5588 	    qlge->instance, "reg", ql_kstats_reg,
5589 	    sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
5590 	if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) {
5591 		return (DDI_FAILURE);
5592 	}
5593 	return (DDI_SUCCESS);
5594 }
5595 
5596 /*
5597  * delete all kstat
5598  */
5599 void
5600 ql_fini_kstats(qlge_t *qlge)
5601 {
5602 	int i;
5603 
5604 	for (i = 0; i < QL_KSTAT_COUNT; i++) {
5605 		if (qlge->ql_kstats[i] != NULL)
5606 			kstat_delete(qlge->ql_kstats[i]);
5607 	}
5608 }
5609 
5610 /* ************************************************************************* */
5611 /*
5612  *                                 kstat end
5613  */
5614 /* ************************************************************************* */
5615 
5616 /*
5617  * Setup the parameters for receive and transmit rings including buffer sizes
5618  * and completion queue sizes
5619  */
5620 static int
5621 ql_setup_rings(qlge_t *qlge)
5622 {
5623 	uint8_t i;
5624 	struct rx_ring *rx_ring;
5625 	struct tx_ring *tx_ring;
5626 	uint16_t lbq_buf_size;
5627 
5628 	lbq_buf_size = (uint16_t)
5629 	    ((qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE);
5630 
5631 	/*
5632 	 * rx_ring[0] is always the default queue.
5633 	 */
5634 	/*
5635 	 * qlge->rx_ring_count:
5636 	 * Total number of rx_rings. This includes a number
5637 	 * of outbound completion handler rx_rings, and a
5638 	 * number of inbound completion handler rx_rings.
5639 	 * rss is only enabled if we have more than 1 rx completion
5640 	 * queue. If we have a single rx completion queue
5641 	 * then all rx completions go to this queue and
5642 	 * the last completion queue
5643 	 */
5644 
5645 	qlge->tx_ring_first_cq_id = qlge->rss_ring_count;
5646 
5647 	for (i = 0; i < qlge->tx_ring_count; i++) {
5648 		tx_ring = &qlge->tx_ring[i];
5649 		bzero((void *)tx_ring, sizeof (*tx_ring));
5650 		tx_ring->qlge = qlge;
5651 		tx_ring->wq_id = i;
5652 		tx_ring->wq_len = qlge->tx_ring_size;
5653 		tx_ring->wq_size = (uint32_t)(
5654 		    tx_ring->wq_len * sizeof (struct ob_mac_iocb_req));
5655 
5656 		/*
5657 		 * The completion queue ID for the tx rings start
5658 		 * immediately after the last rss completion queue.
5659 		 */
5660 		tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id);
5661 	}
5662 
5663 	for (i = 0; i < qlge->rx_ring_count; i++) {
5664 		rx_ring = &qlge->rx_ring[i];
5665 		bzero((void *)rx_ring, sizeof (*rx_ring));
5666 		rx_ring->qlge = qlge;
5667 		rx_ring->cq_id = i;
5668 		if (i != 0)
5669 			rx_ring->cpu = (i) % qlge->rx_ring_count;
5670 		else
5671 			rx_ring->cpu = 0;
5672 
5673 		if (i < qlge->rss_ring_count) {
5674 			/*
5675 			 * Inbound completions (RSS) queues
5676 			 * Default queue is queue 0 which handles
5677 			 * unicast plus bcast/mcast and async events.
5678 			 * Other inbound queues handle unicast frames only.
5679 			 */
5680 			rx_ring->cq_len = qlge->rx_ring_size;
5681 			rx_ring->cq_size = (uint32_t)
5682 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
5683 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
5684 			rx_ring->lbq_size = (uint32_t)
5685 			    (rx_ring->lbq_len * sizeof (uint64_t));
5686 			rx_ring->lbq_buf_size = lbq_buf_size;
5687 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
5688 			rx_ring->sbq_size = (uint32_t)
5689 			    (rx_ring->sbq_len * sizeof (uint64_t));
5690 			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
5691 			rx_ring->type = RX_Q;
5692 
5693 			QL_PRINT(DBG_GLD,
5694 			    ("%s(%d)Allocating rss completion queue %d "
5695 			    "on cpu %d\n", __func__, qlge->instance,
5696 			    rx_ring->cq_id, rx_ring->cpu));
5697 		} else {
5698 			/*
5699 			 * Outbound queue handles outbound completions only
5700 			 */
5701 			/* outbound cq is same size as tx_ring it services. */
5702 			rx_ring->cq_len = qlge->tx_ring_size;
5703 			rx_ring->cq_size = (uint32_t)
5704 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
5705 			rx_ring->lbq_len = 0;
5706 			rx_ring->lbq_size = 0;
5707 			rx_ring->lbq_buf_size = 0;
5708 			rx_ring->sbq_len = 0;
5709 			rx_ring->sbq_size = 0;
5710 			rx_ring->sbq_buf_size = 0;
5711 			rx_ring->type = TX_Q;
5712 
5713 			QL_PRINT(DBG_GLD,
5714 			    ("%s(%d)Allocating TX completion queue %d on"
5715 			    " cpu %d\n", __func__, qlge->instance,
5716 			    rx_ring->cq_id, rx_ring->cpu));
5717 		}
5718 	}
5719 
5720 	return (DDI_SUCCESS);
5721 }
5722 
5723 static int
5724 ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring)
5725 {
5726 	struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr;
5727 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
5728 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
5729 	/* first shadow area is used by wqicb's host copy of consumer index */
5730 	    + sizeof (uint64_t);
5731 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
5732 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
5733 	    + sizeof (uint64_t);
5734 	/* lrg/sml bufq pointers */
5735 	uint8_t *buf_q_base_reg =
5736 	    (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr +
5737 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
5738 	uint64_t buf_q_base_reg_dma =
5739 	    qlge->buf_q_ptr_base_addr_dma_attr.dma_addr +
5740 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
5741 	caddr_t doorbell_area =
5742 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id));
5743 	int err = 0;
5744 	uint16_t bq_len;
5745 	uint64_t tmp;
5746 	uint64_t *base_indirect_ptr;
5747 	int page_entries;
5748 
5749 	/* Set up the shadow registers for this ring. */
5750 	rx_ring->prod_idx_sh_reg = shadow_reg;
5751 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
5752 
5753 	rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
5754 	rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma;
5755 
5756 	QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx,"
5757 	    " phys_addr 0x%lx\n", __func__, rx_ring->cq_id,
5758 	    rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma));
5759 
5760 	buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
5761 	buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
5762 	rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
5763 	rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma;
5764 
5765 	/* PCI doorbell mem area + 0x00 for consumer index register */
5766 	rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area;
5767 	rx_ring->cnsmr_idx = 0;
5768 	*rx_ring->prod_idx_sh_reg = 0;
5769 	rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
5770 
5771 	/* PCI doorbell mem area + 0x04 for valid register */
5772 	rx_ring->valid_db_reg = (uint32_t *)(void *)
5773 	    ((uint8_t *)(void *)doorbell_area + 0x04);
5774 
5775 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
5776 	rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *)
5777 	    ((uint8_t *)(void *)doorbell_area + 0x18);
5778 
5779 	/* PCI doorbell mem area + 0x1c */
5780 	rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *)
5781 	    ((uint8_t *)(void *)doorbell_area + 0x1c);
5782 
5783 	bzero((void *)cqicb, sizeof (*cqicb));
5784 
5785 	cqicb->msix_vect = (uint8_t)rx_ring->irq;
5786 
5787 	bq_len = (uint16_t)((rx_ring->cq_len == 65536) ?
5788 	    (uint16_t)0 : (uint16_t)rx_ring->cq_len);
5789 	cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
5790 
5791 	cqicb->cq_base_addr_lo =
5792 	    cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr));
5793 	cqicb->cq_base_addr_hi =
5794 	    cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr));
5795 
5796 	cqicb->prod_idx_addr_lo =
5797 	    cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma));
5798 	cqicb->prod_idx_addr_hi =
5799 	    cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma));
5800 
5801 	/*
5802 	 * Set up the control block load flags.
5803 	 */
5804 	cqicb->flags = FLAGS_LC | /* Load queue base address */
5805 	    FLAGS_LV | /* Load MSI-X vector */
5806 	    FLAGS_LI;  /* Load irq delay values */
5807 	if (rx_ring->lbq_len) {
5808 		/* Load lbq values */
5809 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL);
5810 		tmp = (uint64_t)rx_ring->lbq_dma.dma_addr;
5811 		base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect;
5812 		page_entries = 0;
5813 		do {
5814 			*base_indirect_ptr = cpu_to_le64(tmp);
5815 			tmp += VM_PAGE_SIZE;
5816 			base_indirect_ptr++;
5817 			page_entries++;
5818 		} while (page_entries < (int)(
5819 		    ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
5820 
5821 		cqicb->lbq_addr_lo =
5822 		    cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma));
5823 		cqicb->lbq_addr_hi =
5824 		    cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma));
5825 		bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ?
5826 		    (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size);
5827 		cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len);
5828 		bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 :
5829 		    (uint16_t)rx_ring->lbq_len);
5830 		cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len);
5831 		rx_ring->lbq_prod_idx = 0;
5832 		rx_ring->lbq_curr_idx = 0;
5833 	}
5834 	if (rx_ring->sbq_len) {
5835 		/* Load sbq values */
5836 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS);
5837 		tmp = (uint64_t)rx_ring->sbq_dma.dma_addr;
5838 		base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect;
5839 		page_entries = 0;
5840 
5841 		do {
5842 			*base_indirect_ptr = cpu_to_le64(tmp);
5843 			tmp += VM_PAGE_SIZE;
5844 			base_indirect_ptr++;
5845 			page_entries++;
5846 		} while (page_entries < (uint32_t)
5847 		    (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
5848 
5849 		cqicb->sbq_addr_lo =
5850 		    cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma));
5851 		cqicb->sbq_addr_hi =
5852 		    cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma));
5853 		cqicb->sbq_buf_size = (uint16_t)
5854 		    cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2));
5855 		bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ?
5856 		    (uint16_t)0 : (uint16_t)rx_ring->sbq_len);
5857 		cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len);
5858 		rx_ring->sbq_prod_idx = 0;
5859 		rx_ring->sbq_curr_idx = 0;
5860 	}
5861 	switch (rx_ring->type) {
5862 	case TX_Q:
5863 		cqicb->irq_delay = (uint16_t)
5864 		    cpu_to_le16(qlge->tx_coalesce_usecs);
5865 		cqicb->pkt_delay = (uint16_t)
5866 		    cpu_to_le16(qlge->tx_max_coalesced_frames);
5867 		break;
5868 
5869 	case DEFAULT_Q:
5870 		cqicb->irq_delay = 0;
5871 		cqicb->pkt_delay = 0;
5872 		break;
5873 
5874 	case RX_Q:
5875 		/*
5876 		 * Inbound completion handling rx_rings run in
5877 		 * separate NAPI contexts.
5878 		 */
5879 		cqicb->irq_delay = (uint16_t)
5880 		    cpu_to_le16(qlge->rx_coalesce_usecs);
5881 		cqicb->pkt_delay = (uint16_t)
5882 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
5883 		break;
5884 	default:
5885 		cmn_err(CE_WARN, "Invalid rx_ring->type = %d.",
5886 		    rx_ring->type);
5887 	}
5888 	QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n",
5889 	    rx_ring->cq_id));
5890 	/* QL_DUMP_CQICB(qlge, cqicb); */
5891 	err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr,
5892 	    rx_ring->cq_id);
5893 	if (err) {
5894 		cmn_err(CE_WARN, "Failed to load CQICB.");
5895 		return (err);
5896 	}
5897 
5898 	rx_ring->rx_packets_dropped_no_buffer = 0;
5899 	rx_ring->rx_pkt_dropped_mac_unenabled = 0;
5900 	rx_ring->rx_failed_sbq_allocs = 0;
5901 	rx_ring->rx_failed_lbq_allocs = 0;
5902 	rx_ring->rx_packets = 0;
5903 	rx_ring->rx_bytes = 0;
5904 	rx_ring->frame_too_long = 0;
5905 	rx_ring->frame_too_short = 0;
5906 	rx_ring->fcs_err = 0;
5907 
5908 	return (err);
5909 }
5910 
5911 /*
5912  * start RSS
5913  */
5914 static int
5915 ql_start_rss(qlge_t *qlge)
5916 {
5917 	struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr;
5918 	int status = 0;
5919 	int i;
5920 	uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id;
5921 
5922 	bzero((void *)ricb, sizeof (*ricb));
5923 
5924 	ricb->base_cq = RSS_L4K;
5925 	ricb->flags =
5926 	    (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
5927 	    RSS_RT6);
5928 	ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1);
5929 
5930 	/*
5931 	 * Fill out the Indirection Table.
5932 	 */
5933 	for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
5934 		hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1));
5935 
5936 	(void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40);
5937 	(void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16);
5938 
5939 	QL_PRINT(DBG_INIT, ("Initializing RSS.\n"));
5940 
5941 	status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0);
5942 	if (status) {
5943 		cmn_err(CE_WARN, "Failed to load RICB.");
5944 		return (status);
5945 	}
5946 
5947 	return (status);
5948 }
5949 
5950 /*
5951  * load a tx ring control block to hw and start this ring
5952  */
5953 static int
5954 ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring)
5955 {
5956 	struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr;
5957 	caddr_t doorbell_area =
5958 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id);
5959 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
5960 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
5961 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
5962 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
5963 	int err = 0;
5964 
5965 	/*
5966 	 * Assign doorbell registers for this tx_ring.
5967 	 */
5968 
5969 	/* TX PCI doorbell mem area for tx producer index */
5970 	tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area;
5971 	tx_ring->prod_idx = 0;
5972 	/* TX PCI doorbell mem area + 0x04 */
5973 	tx_ring->valid_db_reg = (uint32_t *)(void *)
5974 	    ((uint8_t *)(void *)doorbell_area + 0x04);
5975 
5976 	/*
5977 	 * Assign shadow registers for this tx_ring.
5978 	 */
5979 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
5980 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
5981 	*tx_ring->cnsmr_idx_sh_reg = 0;
5982 
5983 	QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx,"
5984 	    " phys_addr 0x%lx\n",
5985 	    __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg,
5986 	    tx_ring->cnsmr_idx_sh_reg_dma));
5987 
5988 	wqicb->len =
5989 	    (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
5990 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
5991 	    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
5992 	wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id);
5993 	wqicb->rid = 0;
5994 	wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr));
5995 	wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr));
5996 	wqicb->cnsmr_idx_addr_lo =
5997 	    cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
5998 	wqicb->cnsmr_idx_addr_hi =
5999 	    cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6000 
6001 	ql_init_tx_ring(tx_ring);
6002 	/* QL_DUMP_WQICB(qlge, wqicb); */
6003 	err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr,
6004 	    tx_ring->wq_id);
6005 
6006 	if (err) {
6007 		cmn_err(CE_WARN, "Failed to load WQICB.");
6008 		return (err);
6009 	}
6010 	return (err);
6011 }
6012 
6013 /*
6014  * Set up a MAC, multicast or VLAN address for the
6015  * inbound frame matching.
6016  */
6017 int
6018 ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type,
6019     uint16_t index)
6020 {
6021 	uint32_t offset = 0;
6022 	int status = DDI_SUCCESS;
6023 
6024 	switch (type) {
6025 	case MAC_ADDR_TYPE_MULTI_MAC:
6026 	case MAC_ADDR_TYPE_CAM_MAC: {
6027 		uint32_t cam_output;
6028 		uint32_t upper = (addr[0] << 8) | addr[1];
6029 		uint32_t lower =
6030 		    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
6031 		    (addr[5]);
6032 
6033 		QL_PRINT(DBG_INIT, ("Adding %s ", (type ==
6034 		    MAC_ADDR_TYPE_MULTI_MAC) ?
6035 		    "MULTICAST" : "UNICAST"));
6036 		QL_PRINT(DBG_INIT,
6037 		    ("addr %02x %02x %02x %02x %02x %02x at index %d in "
6038 		    "the CAM.\n",
6039 		    addr[0], addr[1], addr[2], addr[3], addr[4],
6040 		    addr[5], index));
6041 
6042 		status = ql_wait_reg_rdy(qlge,
6043 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6044 		if (status)
6045 			goto exit;
6046 		/* offset 0 - lower 32 bits of the MAC address */
6047 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6048 		    (offset++) |
6049 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6050 		    type);	/* type */
6051 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower);
6052 		status = ql_wait_reg_rdy(qlge,
6053 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6054 		if (status)
6055 			goto exit;
6056 		/* offset 1 - upper 16 bits of the MAC address */
6057 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6058 		    (offset++) |
6059 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6060 		    type);	/* type */
6061 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper);
6062 		status = ql_wait_reg_rdy(qlge,
6063 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6064 		if (status)
6065 			goto exit;
6066 		/* offset 2 - CQ ID associated with this MAC address */
6067 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6068 		    (offset) | (index << MAC_ADDR_IDX_SHIFT) |	/* index */
6069 		    type);	/* type */
6070 		/*
6071 		 * This field should also include the queue id
6072 		 * and possibly the function id.  Right now we hardcode
6073 		 * the route field to NIC core.
6074 		 */
6075 		if (type == MAC_ADDR_TYPE_CAM_MAC) {
6076 			cam_output = (CAM_OUT_ROUTE_NIC |
6077 			    (qlge->func_number << CAM_OUT_FUNC_SHIFT) |
6078 			    (0 <<
6079 			    CAM_OUT_CQ_ID_SHIFT));
6080 
6081 			/* route to NIC core */
6082 			ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA,
6083 			    cam_output);
6084 			}
6085 		break;
6086 		}
6087 	default:
6088 		cmn_err(CE_WARN,
6089 		    "Address type %d not yet supported.", type);
6090 		status = DDI_FAILURE;
6091 	}
6092 exit:
6093 	return (status);
6094 }
6095 
6096 /*
6097  * The NIC function for this chip has 16 routing indexes.  Each one can be used
6098  * to route different frame types to various inbound queues.  We send broadcast
6099  * multicast/error frames to the default queue for slow handling,
6100  * and CAM hit/RSS frames to the fast handling queues.
6101  */
6102 static int
6103 ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable)
6104 {
6105 	int status;
6106 	uint32_t value = 0;
6107 
6108 	QL_PRINT(DBG_INIT,
6109 	    ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
6110 	    (enable ? "Adding" : "Removing"),
6111 	    ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
6112 	    ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
6113 	    ((index ==
6114 	    RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
6115 	    ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
6116 	    ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
6117 	    ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
6118 	    ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
6119 	    ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
6120 	    ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
6121 	    ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
6122 	    ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
6123 	    ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
6124 	    ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
6125 	    ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
6126 	    ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
6127 	    ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
6128 	    (enable ? "to" : "from")));
6129 
6130 	switch (mask) {
6131 	case RT_IDX_CAM_HIT:
6132 		value = RT_IDX_DST_CAM_Q | /* dest */
6133 		    RT_IDX_TYPE_NICQ | /* type */
6134 		    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */
6135 		break;
6136 
6137 	case RT_IDX_VALID: /* Promiscuous Mode frames. */
6138 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6139 		    RT_IDX_TYPE_NICQ |	/* type */
6140 		    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */
6141 		break;
6142 
6143 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
6144 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6145 		    RT_IDX_TYPE_NICQ |	/* type */
6146 		    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */
6147 		break;
6148 
6149 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
6150 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6151 		    RT_IDX_TYPE_NICQ |	/* type */
6152 		    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */
6153 		break;
6154 
6155 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
6156 		value = RT_IDX_DST_CAM_Q |	/* dest */
6157 		    RT_IDX_TYPE_NICQ |	/* type */
6158 		    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */
6159 		break;
6160 
6161 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
6162 		value = RT_IDX_DST_CAM_Q |	/* dest */
6163 		    RT_IDX_TYPE_NICQ |	/* type */
6164 		    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6165 		break;
6166 
6167 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
6168 		value = RT_IDX_DST_RSS |	/* dest */
6169 		    RT_IDX_TYPE_NICQ |	/* type */
6170 		    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6171 		break;
6172 
6173 	case 0:	/* Clear the E-bit on an entry. */
6174 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6175 		    RT_IDX_TYPE_NICQ |	/* type */
6176 		    (index << RT_IDX_IDX_SHIFT); /* index */
6177 		break;
6178 
6179 	default:
6180 		cmn_err(CE_WARN, "Mask type %d not yet supported.",
6181 		    mask);
6182 		status = -EPERM;
6183 		goto exit;
6184 	}
6185 
6186 	if (value != 0) {
6187 		status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0);
6188 		if (status)
6189 			goto exit;
6190 		value |= (enable ? RT_IDX_E : 0);
6191 		ql_write_reg(qlge, REG_ROUTING_INDEX, value);
6192 		ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0);
6193 	}
6194 
6195 exit:
6196 	return (status);
6197 }
6198 
6199 /*
6200  * Clear all the entries in the routing table.
6201  * Caller must get semaphore in advance.
6202  */
6203 
6204 static int
6205 ql_stop_routing(qlge_t *qlge)
6206 {
6207 	int status = 0;
6208 	int i;
6209 	/* Clear all the entries in the routing table. */
6210 	for (i = 0; i < 16; i++) {
6211 		status = ql_set_routing_reg(qlge, i, 0, 0);
6212 		if (status) {
6213 			cmn_err(CE_WARN, "Stop routing failed. ");
6214 		}
6215 	}
6216 	return (status);
6217 }
6218 
6219 /* Initialize the frame-to-queue routing. */
6220 static int
6221 ql_route_initialize(qlge_t *qlge)
6222 {
6223 	int status = 0;
6224 
6225 	status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
6226 	if (status != DDI_SUCCESS)
6227 		return (status);
6228 
6229 	/* Clear all the entries in the routing table. */
6230 	status = ql_stop_routing(qlge);
6231 	if (status) {
6232 		goto exit;
6233 	}
6234 	status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
6235 	if (status) {
6236 		cmn_err(CE_WARN,
6237 		    "Failed to init routing register for broadcast packets.");
6238 		goto exit;
6239 	}
6240 	/*
6241 	 * If we have more than one inbound queue, then turn on RSS in the
6242 	 * routing block.
6243 	 */
6244 	if (qlge->rss_ring_count > 1) {
6245 		status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT,
6246 		    RT_IDX_RSS_MATCH, 1);
6247 		if (status) {
6248 			cmn_err(CE_WARN,
6249 			    "Failed to init routing register for MATCH RSS "
6250 			    "packets.");
6251 			goto exit;
6252 		}
6253 	}
6254 
6255 	status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT,
6256 	    RT_IDX_CAM_HIT, 1);
6257 	if (status) {
6258 		cmn_err(CE_WARN,
6259 		    "Failed to init routing register for CAM packets.");
6260 		goto exit;
6261 	}
6262 
6263 	status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT,
6264 	    RT_IDX_MCAST_MATCH, 1);
6265 	if (status) {
6266 		cmn_err(CE_WARN,
6267 		    "Failed to init routing register for Multicast "
6268 		    "packets.");
6269 	}
6270 
6271 exit:
6272 	ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
6273 	return (status);
6274 }
6275 
6276 /*
6277  * Initialize hardware
6278  */
6279 static int
6280 ql_device_initialize(qlge_t *qlge)
6281 {
6282 	uint32_t value, mask, required_max_frame_size;
6283 	int i;
6284 	int status = 0;
6285 	uint16_t pause = PAUSE_MODE_DISABLED;
6286 	boolean_t update_port_config = B_FALSE;
6287 	/*
6288 	 * Set up the System register to halt on errors.
6289 	 */
6290 	value = SYS_EFE | SYS_FAE;
6291 	mask = value << 16;
6292 	ql_write_reg(qlge, REG_SYSTEM, mask | value);
6293 
6294 	/* Set the default queue. */
6295 	value = NIC_RCV_CFG_DFQ;
6296 	mask = NIC_RCV_CFG_DFQ_MASK;
6297 
6298 	ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value);
6299 
6300 	/* Enable the MPI interrupt. */
6301 	ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16)
6302 	    | INTR_MASK_PI);
6303 	/* Enable the function, set pagesize, enable error checking. */
6304 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
6305 	    FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
6306 	/* Set/clear header splitting. */
6307 	if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) {
6308 		value |= FSC_SH;
6309 		ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE);
6310 	}
6311 	mask = FSC_VM_PAGESIZE_MASK |
6312 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
6313 	ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value);
6314 	/*
6315 	 * check current port max frame size, if different from OS setting,
6316 	 * then we need to change
6317 	 */
6318 	required_max_frame_size =
6319 	    (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
6320 
6321 	if (ql_get_port_cfg(qlge) == DDI_SUCCESS) {
6322 		/* if correct frame size but different from required size */
6323 		if (qlge->port_cfg_info.max_frame_size !=
6324 		    required_max_frame_size) {
6325 			QL_PRINT(DBG_MBX,
6326 			    ("update frame size, current %d, new %d\n",
6327 			    qlge->port_cfg_info.max_frame_size,
6328 			    required_max_frame_size));
6329 			qlge->port_cfg_info.max_frame_size =
6330 			    required_max_frame_size;
6331 			update_port_config = B_TRUE;
6332 		}
6333 		if (qlge->port_cfg_info.link_cfg & STD_PAUSE)
6334 			pause = PAUSE_MODE_STANDARD;
6335 		else if (qlge->port_cfg_info.link_cfg & PP_PAUSE)
6336 			pause = PAUSE_MODE_PER_PRIORITY;
6337 		if (pause != qlge->pause) {
6338 			update_port_config = B_TRUE;
6339 		}
6340 		/*
6341 		 * Always update port config for now to work around
6342 		 * a hardware bug
6343 		 */
6344 		update_port_config = B_TRUE;
6345 
6346 		/* if need to update port configuration */
6347 		if (update_port_config)
6348 			(void) ql_set_port_cfg(qlge);
6349 	} else
6350 		cmn_err(CE_WARN, "ql_get_port_cfg failed");
6351 
6352 	/* Start up the rx queues. */
6353 	for (i = 0; i < qlge->rx_ring_count; i++) {
6354 		status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]);
6355 		if (status) {
6356 			cmn_err(CE_WARN,
6357 			    "Failed to start rx ring[%d]", i);
6358 			return (status);
6359 		}
6360 	}
6361 
6362 	/*
6363 	 * If there is more than one inbound completion queue
6364 	 * then download a RICB to configure RSS.
6365 	 */
6366 	if (qlge->rss_ring_count > 1) {
6367 		status = ql_start_rss(qlge);
6368 		if (status) {
6369 			cmn_err(CE_WARN, "Failed to start RSS.");
6370 			return (status);
6371 		}
6372 	}
6373 
6374 	/* Start up the tx queues. */
6375 	for (i = 0; i < qlge->tx_ring_count; i++) {
6376 		status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]);
6377 		if (status) {
6378 			cmn_err(CE_WARN,
6379 			    "Failed to start tx ring[%d]", i);
6380 			return (status);
6381 		}
6382 	}
6383 	qlge->selected_tx_ring = 0;
6384 	/* Set the frame routing filter. */
6385 	status = ql_route_initialize(qlge);
6386 	if (status) {
6387 		cmn_err(CE_WARN,
6388 		    "Failed to init CAM/Routing tables.");
6389 		return (status);
6390 	}
6391 
6392 	return (status);
6393 }
6394 
6395 /*
6396  * Issue soft reset to chip.
6397  */
6398 static int
6399 ql_asic_reset(qlge_t *qlge)
6400 {
6401 	uint32_t value;
6402 	int max_wait_time = 3;
6403 	int status = DDI_SUCCESS;
6404 
6405 	ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK
6406 	    |FUNCTION_RESET);
6407 
6408 	max_wait_time = 3;
6409 	do {
6410 		value =  ql_read_reg(qlge, REG_RESET_FAILOVER);
6411 		if ((value & FUNCTION_RESET) == 0)
6412 			break;
6413 		qlge_delay(QL_ONE_SEC_DELAY);
6414 	} while ((--max_wait_time));
6415 
6416 	if (max_wait_time == 0) {
6417 		cmn_err(CE_WARN,
6418 		    "TIMEOUT!!! errored out of resetting the chip!");
6419 		status = DDI_FAILURE;
6420 	}
6421 
6422 	return (status);
6423 }
6424 
6425 /*
6426  * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
6427  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
6428  * to be used by hardware.
6429  */
6430 static void
6431 ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring)
6432 {
6433 	struct bq_desc *sbq_desc;
6434 	int i;
6435 	uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr;
6436 	uint32_t arm_count;
6437 
6438 	if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT)
6439 		arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT);
6440 	else {
6441 		/* Adjust to a multiple of 16 */
6442 		arm_count = (rx_ring->sbuf_free_count / 16) * 16;
6443 #ifdef QLGE_LOAD_UNLOAD
6444 		cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count);
6445 #endif
6446 	}
6447 	for (i = 0; i < arm_count; i++) {
6448 		sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
6449 		if (sbq_desc == NULL)
6450 			break;
6451 		/* Arm asic */
6452 		*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
6453 		sbq_entry++;
6454 
6455 		/* link the descriptors to in_use_list */
6456 		ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
6457 		rx_ring->sbq_prod_idx++;
6458 	}
6459 	ql_update_sbq_prod_idx(qlge, rx_ring);
6460 }
6461 
6462 /*
6463  * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
6464  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
6465  * to be used by hardware.
6466  */
6467 static void
6468 ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring)
6469 {
6470 	struct bq_desc *lbq_desc;
6471 	int i;
6472 	uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr;
6473 	uint32_t arm_count;
6474 
6475 	if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT)
6476 		arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT);
6477 	else {
6478 		/* Adjust to a multiple of 16 */
6479 		arm_count = (rx_ring->lbuf_free_count / 16) * 16;
6480 #ifdef QLGE_LOAD_UNLOAD
6481 		cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count);
6482 #endif
6483 	}
6484 	for (i = 0; i < arm_count; i++) {
6485 		lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
6486 		if (lbq_desc == NULL)
6487 			break;
6488 		/* Arm asic */
6489 		*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
6490 		lbq_entry++;
6491 
6492 		/* link the descriptors to in_use_list */
6493 		ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
6494 		rx_ring->lbq_prod_idx++;
6495 	}
6496 	ql_update_lbq_prod_idx(qlge, rx_ring);
6497 }
6498 
6499 
6500 /*
6501  * Initializes the adapter by configuring request and response queues,
6502  * allocates and ARMs small and large receive buffers to the
6503  * hardware
6504  */
6505 static int
6506 ql_bringup_adapter(qlge_t *qlge)
6507 {
6508 	int i;
6509 
6510 	if (ql_device_initialize(qlge) != DDI_SUCCESS) {
6511 		cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed",
6512 		    __func__, qlge->instance);
6513 		goto err_bringup;
6514 	}
6515 	qlge->sequence |= INIT_ADAPTER_UP;
6516 
6517 #ifdef QLGE_TRACK_BUFFER_USAGE
6518 	for (i = 0; i < qlge->rx_ring_count; i++) {
6519 		if (qlge->rx_ring[i].type != TX_Q) {
6520 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
6521 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
6522 		}
6523 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
6524 	}
6525 #endif
6526 	/* Arm buffers */
6527 	for (i = 0; i < qlge->rx_ring_count; i++) {
6528 		if (qlge->rx_ring[i].type != TX_Q) {
6529 			ql_arm_sbuf(qlge, &qlge->rx_ring[i]);
6530 			ql_arm_lbuf(qlge, &qlge->rx_ring[i]);
6531 		}
6532 	}
6533 
6534 	/* Enable work/request queues */
6535 	for (i = 0; i < qlge->tx_ring_count; i++) {
6536 		if (qlge->tx_ring[i].valid_db_reg)
6537 			ql_write_doorbell_reg(qlge,
6538 			    qlge->tx_ring[i].valid_db_reg,
6539 			    REQ_Q_VALID);
6540 	}
6541 
6542 	/* Enable completion queues */
6543 	for (i = 0; i < qlge->rx_ring_count; i++) {
6544 		if (qlge->rx_ring[i].valid_db_reg)
6545 			ql_write_doorbell_reg(qlge,
6546 			    qlge->rx_ring[i].valid_db_reg,
6547 			    RSP_Q_VALID);
6548 	}
6549 
6550 	for (i = 0; i < qlge->tx_ring_count; i++) {
6551 		mutex_enter(&qlge->tx_ring[i].tx_lock);
6552 		qlge->tx_ring[i].mac_flags = QL_MAC_STARTED;
6553 		mutex_exit(&qlge->tx_ring[i].tx_lock);
6554 	}
6555 
6556 	for (i = 0; i < qlge->rx_ring_count; i++) {
6557 		mutex_enter(&qlge->rx_ring[i].rx_lock);
6558 		qlge->rx_ring[i].mac_flags = QL_MAC_STARTED;
6559 		mutex_exit(&qlge->rx_ring[i].rx_lock);
6560 	}
6561 
6562 	/* This mutex will get re-acquired in enable_completion interrupt */
6563 	mutex_exit(&qlge->hw_mutex);
6564 	/* Traffic can start flowing now */
6565 	ql_enable_all_completion_interrupts(qlge);
6566 	mutex_enter(&qlge->hw_mutex);
6567 
6568 	ql_enable_global_interrupt(qlge);
6569 
6570 	qlge->sequence |= ADAPTER_INIT;
6571 	return (DDI_SUCCESS);
6572 
6573 err_bringup:
6574 	(void) ql_asic_reset(qlge);
6575 	return (DDI_FAILURE);
6576 }
6577 
6578 /*
6579  * Initialize mutexes of each rx/tx rings
6580  */
6581 static int
6582 ql_init_rx_tx_locks(qlge_t *qlge)
6583 {
6584 	struct tx_ring *tx_ring;
6585 	struct rx_ring *rx_ring;
6586 	int i;
6587 
6588 	for (i = 0; i < qlge->tx_ring_count; i++) {
6589 		tx_ring = &qlge->tx_ring[i];
6590 		mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER,
6591 		    DDI_INTR_PRI(qlge->intr_pri));
6592 	}
6593 
6594 	for (i = 0; i < qlge->rx_ring_count; i++) {
6595 		rx_ring = &qlge->rx_ring[i];
6596 		mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER,
6597 		    DDI_INTR_PRI(qlge->intr_pri));
6598 		mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER,
6599 		    DDI_INTR_PRI(qlge->intr_pri));
6600 		mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER,
6601 		    DDI_INTR_PRI(qlge->intr_pri));
6602 	}
6603 
6604 	return (DDI_SUCCESS);
6605 }
6606 
6607 /*
6608  * ql_attach - Driver attach.
6609  */
6610 static int
6611 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
6612 {
6613 	int instance;
6614 	qlge_t *qlge;
6615 	int rval;
6616 	uint16_t w;
6617 	mac_register_t *macp = NULL;
6618 	rval = DDI_FAILURE;
6619 
6620 	/* first get the instance */
6621 	instance = ddi_get_instance(dip);
6622 
6623 	switch (cmd) {
6624 	case DDI_ATTACH:
6625 		/*
6626 		 * Check that hardware is installed in a DMA-capable slot
6627 		 */
6628 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
6629 			cmn_err(CE_WARN, "?%s(%d): Not installed in a "
6630 			    "DMA-capable slot", ADAPTER_NAME, instance);
6631 			break;
6632 		}
6633 
6634 		/*
6635 		 * No support for high-level interrupts
6636 		 */
6637 		if (ddi_intr_hilevel(dip, 0) != 0) {
6638 			cmn_err(CE_WARN, "?%s(%d): No support for high-level"
6639 			    " intrs", ADAPTER_NAME, instance);
6640 			break;
6641 		}
6642 
6643 		/*
6644 		 * Allocate our per-device-instance structure
6645 		 */
6646 
6647 		qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP);
6648 		ASSERT(qlge != NULL);
6649 
6650 		qlge->sequence |= INIT_SOFTSTATE_ALLOC;
6651 
6652 		qlge->dip = dip;
6653 		qlge->instance = instance;
6654 
6655 		/*
6656 		 * Setup the ISP8x00 registers address mapping to be
6657 		 * accessed by this particular driver.
6658 		 * 0x0   Configuration Space
6659 		 * 0x1   I/O Space
6660 		 * 0x2   1st Memory Space address - Control Register Set
6661 		 * 0x3   2nd Memory Space address - Doorbell Memory Space
6662 		 */
6663 
6664 		w = 2;
6665 		if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0,
6666 		    sizeof (dev_reg_t), &ql_dev_acc_attr,
6667 		    &qlge->dev_handle) != DDI_SUCCESS) {
6668 			cmn_err(CE_WARN, "%s(%d): Unable to map device "
6669 			    "registers", ADAPTER_NAME, instance);
6670 			ql_free_resources(dip, qlge);
6671 			break;
6672 		}
6673 
6674 		QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n",
6675 		    qlge->iobase));
6676 
6677 		qlge->sequence |= INIT_REGS_SETUP;
6678 
6679 		/* map Doorbell memory space */
6680 		w = 3;
6681 		if (ddi_regs_map_setup(dip, w,
6682 		    (caddr_t *)&qlge->doorbell_reg_iobase, 0,
6683 		    0x100000 /* sizeof (dev_doorbell_reg_t) */,
6684 		    &ql_dev_acc_attr,
6685 		    &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) {
6686 			cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell "
6687 			    "registers",
6688 			    ADAPTER_NAME, instance);
6689 			ql_free_resources(dip, qlge);
6690 			break;
6691 		}
6692 
6693 		QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n",
6694 		    qlge->doorbell_reg_iobase));
6695 
6696 		qlge->sequence |= INIT_DOORBELL_REGS_SETUP;
6697 
6698 		/*
6699 		 * Allocate a macinfo structure for this instance
6700 		 */
6701 		if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
6702 			cmn_err(CE_WARN, "%s(%d): mac_alloc failed",
6703 			    __func__, instance);
6704 			ql_free_resources(dip, qlge);
6705 			return (NULL);
6706 		}
6707 		/* save adapter status to dip private data */
6708 		ddi_set_driver_private(dip, qlge);
6709 		QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n",
6710 		    ADAPTER_NAME, instance));
6711 
6712 		qlge->sequence |= INIT_MAC_ALLOC;
6713 
6714 		/*
6715 		 * Attach this instance of the device
6716 		 */
6717 		/* Setup PCI Local Bus Configuration resource. */
6718 		if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) {
6719 			cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources",
6720 			    ADAPTER_NAME, instance);
6721 			ql_free_resources(dip, qlge);
6722 			break;
6723 		}
6724 
6725 		qlge->sequence |= INIT_PCI_CONFIG_SETUP;
6726 
6727 		if (ql_init_instance(qlge) != DDI_SUCCESS) {
6728 			cmn_err(CE_WARN, "%s(%d): Unable to initialize device "
6729 			    "instance", ADAPTER_NAME, instance);
6730 			ql_free_resources(dip, qlge);
6731 			break;
6732 		}
6733 
6734 		/* Setup interrupt vectors */
6735 		if (ql_alloc_irqs(qlge) != DDI_SUCCESS) {
6736 			ql_free_resources(dip, qlge);
6737 			break;
6738 		}
6739 		qlge->sequence |= INIT_INTR_ALLOC;
6740 
6741 		/* Configure queues */
6742 		if (ql_setup_rings(qlge) != DDI_SUCCESS) {
6743 			ql_free_resources(dip, qlge);
6744 			break;
6745 		}
6746 
6747 		qlge->sequence |= INIT_SETUP_RINGS;
6748 		/*
6749 		 * Map queues to interrupt vectors
6750 		 */
6751 		ql_resolve_queues_to_irqs(qlge);
6752 		/*
6753 		 * Add interrupt handlers
6754 		 */
6755 		if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) {
6756 			cmn_err(CE_WARN, "Failed to add interrupt "
6757 			    "handlers");
6758 			ql_free_resources(dip, qlge);
6759 			break;
6760 		}
6761 
6762 		qlge->sequence |= INIT_ADD_INTERRUPT;
6763 		QL_PRINT(DBG_GLD, ("%s(%d): Add interrupt handler done\n",
6764 		    ADAPTER_NAME, instance));
6765 
6766 		/* Initialize mutex, need the interrupt priority */
6767 		(void) ql_init_rx_tx_locks(qlge);
6768 
6769 		qlge->sequence |= INIT_LOCKS_CREATED;
6770 
6771 		/*
6772 		 * Use a soft interrupt to do something that we do not want
6773 		 * to do in regular network functions or with mutexs being held
6774 		 */
6775 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl,
6776 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge)
6777 		    != DDI_SUCCESS) {
6778 			ql_free_resources(dip, qlge);
6779 			break;
6780 		}
6781 
6782 		if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl,
6783 		    DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge)
6784 		    != DDI_SUCCESS) {
6785 			ql_free_resources(dip, qlge);
6786 			break;
6787 		}
6788 
6789 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl,
6790 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge)
6791 		    != DDI_SUCCESS) {
6792 			ql_free_resources(dip, qlge);
6793 			break;
6794 		}
6795 
6796 		qlge->sequence |= INIT_ADD_SOFT_INTERRUPT;
6797 
6798 		/*
6799 		 * mutex to protect the adapter state structure.
6800 		 * initialize mutexes according to the interrupt priority
6801 		 */
6802 		mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER,
6803 		    DDI_INTR_PRI(qlge->intr_pri));
6804 		mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER,
6805 		    DDI_INTR_PRI(qlge->intr_pri));
6806 		mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER,
6807 		    DDI_INTR_PRI(qlge->intr_pri));
6808 
6809 		/* Mailbox wait and interrupt conditional variable. */
6810 		cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL);
6811 
6812 		qlge->sequence |= INIT_MUTEX;
6813 
6814 		/*
6815 		 * KStats
6816 		 */
6817 		if (ql_init_kstats(qlge) != DDI_SUCCESS) {
6818 			cmn_err(CE_WARN, "%s(%d): KState initialization failed",
6819 			    ADAPTER_NAME, instance);
6820 			ql_free_resources(dip, qlge);
6821 			break;
6822 		}
6823 		qlge->sequence |= INIT_KSTATS;
6824 
6825 		/*
6826 		 * Initialize gld macinfo structure
6827 		 */
6828 		ql_gld3_init(qlge, macp);
6829 
6830 		if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) {
6831 			cmn_err(CE_WARN, "%s(%d): mac_register failed",
6832 			    __func__, instance);
6833 			ql_free_resources(dip, qlge);
6834 			break;
6835 		}
6836 		qlge->sequence |= INIT_MAC_REGISTERED;
6837 		QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n",
6838 		    ADAPTER_NAME, instance));
6839 
6840 		mac_free(macp);
6841 		macp = NULL;
6842 
6843 		qlge->mac_flags = QL_MAC_ATTACHED;
6844 
6845 		/*
6846 		 * Allocate memory resources
6847 		 */
6848 		if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) {
6849 			cmn_err(CE_WARN, "%s(%d): memory allocation failed",
6850 			    __func__, qlge->instance);
6851 			ql_free_mem_resources(qlge);
6852 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
6853 			return (DDI_FAILURE);
6854 		}
6855 		qlge->sequence |= INIT_MEMORY_ALLOC;
6856 
6857 		ddi_report_dev(dip);
6858 
6859 		rval = DDI_SUCCESS;
6860 	break;
6861 /*
6862  * DDI_RESUME
6863  * When called  with  cmd  set  to  DDI_RESUME,  attach()  must
6864  * restore  the hardware state of a device (power may have been
6865  * removed from the device), allow  pending  requests  to  con-
6866  * tinue,  and  service  new requests. In this case, the driver
6867  * must not  make  any  assumptions  about  the  state  of  the
6868  * hardware,  but  must  restore the state of the device except
6869  * for the power level of components.
6870  *
6871  */
6872 	case DDI_RESUME:
6873 
6874 		if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL)
6875 			return (DDI_FAILURE);
6876 
6877 		QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n",
6878 		    __func__, qlge->instance));
6879 
6880 		mutex_enter(&qlge->gen_mutex);
6881 		rval = ql_do_start(qlge);
6882 		mutex_exit(&qlge->gen_mutex);
6883 		break;
6884 
6885 	default:
6886 		break;
6887 	}
6888 	return (rval);
6889 }
6890 
6891 /*
6892  * Unbind all pending tx dma handles during driver bring down
6893  */
6894 static void
6895 ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring)
6896 {
6897 	struct tx_ring_desc *tx_ring_desc;
6898 	int i, j;
6899 
6900 	if (tx_ring->wq_desc) {
6901 		tx_ring_desc = tx_ring->wq_desc;
6902 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
6903 			for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
6904 				if (tx_ring_desc->tx_dma_handle[j]) {
6905 					(void) ddi_dma_unbind_handle(
6906 					    tx_ring_desc->tx_dma_handle[j]);
6907 				}
6908 			}
6909 			tx_ring_desc->tx_dma_handle_used = 0;
6910 		} /* end of for loop */
6911 	}
6912 }
6913 /*
6914  * Wait for all the packets sent to the chip to finish transmission
6915  * to prevent buffers to be unmapped before or during a transmit operation
6916  */
6917 static int
6918 ql_wait_tx_quiesce(qlge_t *qlge)
6919 {
6920 	int count = MAX_TX_WAIT_COUNT, i;
6921 	int rings_done;
6922 	volatile struct tx_ring *tx_ring;
6923 	uint32_t consumer_idx;
6924 	uint32_t producer_idx;
6925 	uint32_t temp;
6926 	int done = 0;
6927 	int rval = DDI_FAILURE;
6928 
6929 	while (!done) {
6930 		rings_done = 0;
6931 
6932 		for (i = 0; i < qlge->tx_ring_count; i++) {
6933 			tx_ring = &qlge->tx_ring[i];
6934 			temp = ql_read_doorbell_reg(qlge,
6935 			    tx_ring->prod_idx_db_reg);
6936 			producer_idx = temp & 0x0000ffff;
6937 			consumer_idx = (temp >> 16);
6938 
6939 			/*
6940 			 * Get the pending iocb count, ones which have not been
6941 			 * pulled down by the chip
6942 			 */
6943 			if (producer_idx >= consumer_idx)
6944 				temp = (producer_idx - consumer_idx);
6945 			else
6946 				temp = (tx_ring->wq_len - consumer_idx) +
6947 				    producer_idx;
6948 
6949 			if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len)
6950 				rings_done++;
6951 			else {
6952 				done = 1;
6953 				break;
6954 			}
6955 		}
6956 
6957 		/* If all the rings are done */
6958 		if (rings_done >= qlge->tx_ring_count) {
6959 #ifdef QLGE_LOAD_UNLOAD
6960 			cmn_err(CE_NOTE, "%s(%d) done successfully \n",
6961 			    __func__, qlge->instance);
6962 #endif
6963 			rval = DDI_SUCCESS;
6964 			break;
6965 		}
6966 
6967 		qlge_delay(100);
6968 
6969 		count--;
6970 		if (!count) {
6971 
6972 			count = MAX_TX_WAIT_COUNT;
6973 #ifdef QLGE_LOAD_UNLOAD
6974 			volatile struct rx_ring *rx_ring;
6975 			cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending"
6976 			    " Transmits on queue %d to complete .\n",
6977 			    __func__, qlge->instance,
6978 			    (qlge->tx_ring[i].wq_len -
6979 			    qlge->tx_ring[i].tx_free_count),
6980 			    i);
6981 
6982 			rx_ring = &qlge->rx_ring[i+1];
6983 			temp = ql_read_doorbell_reg(qlge,
6984 			    rx_ring->cnsmr_idx_db_reg);
6985 			consumer_idx = temp & 0x0000ffff;
6986 			producer_idx = (temp >> 16);
6987 			cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d,"
6988 			    " Producer %d, Consumer %d\n",
6989 			    __func__, qlge->instance,
6990 			    i+1,
6991 			    producer_idx, consumer_idx);
6992 
6993 			temp = ql_read_doorbell_reg(qlge,
6994 			    tx_ring->prod_idx_db_reg);
6995 			producer_idx = temp & 0x0000ffff;
6996 			consumer_idx = (temp >> 16);
6997 			cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d,"
6998 			    " Producer %d, Consumer %d\n",
6999 			    __func__, qlge->instance, i,
7000 			    producer_idx, consumer_idx);
7001 #endif
7002 
7003 			/* For now move on */
7004 			break;
7005 		}
7006 	}
7007 	/* Stop the request queue */
7008 	mutex_enter(&qlge->hw_mutex);
7009 	for (i = 0; i < qlge->tx_ring_count; i++) {
7010 		if (qlge->tx_ring[i].valid_db_reg) {
7011 			ql_write_doorbell_reg(qlge,
7012 			    qlge->tx_ring[i].valid_db_reg, 0);
7013 		}
7014 	}
7015 	mutex_exit(&qlge->hw_mutex);
7016 	return (rval);
7017 }
7018 
7019 /*
7020  * Wait for all the receives indicated to the stack to come back
7021  */
7022 static int
7023 ql_wait_rx_complete(qlge_t *qlge)
7024 {
7025 	int i;
7026 	/* Disable all the completion queues */
7027 	mutex_enter(&qlge->hw_mutex);
7028 	for (i = 0; i < qlge->rx_ring_count; i++) {
7029 		if (qlge->rx_ring[i].valid_db_reg) {
7030 			ql_write_doorbell_reg(qlge,
7031 			    qlge->rx_ring[i].valid_db_reg, 0);
7032 		}
7033 	}
7034 	mutex_exit(&qlge->hw_mutex);
7035 
7036 	/* Wait for OS to return all rx buffers */
7037 	qlge_delay(QL_ONE_SEC_DELAY);
7038 	return (DDI_SUCCESS);
7039 }
7040 
7041 /*
7042  * stop the driver
7043  */
7044 static int
7045 ql_bringdown_adapter(qlge_t *qlge)
7046 {
7047 	int i;
7048 	int status = DDI_SUCCESS;
7049 
7050 	qlge->mac_flags = QL_MAC_BRINGDOWN;
7051 	if (qlge->sequence & ADAPTER_INIT) {
7052 		/* stop forwarding external packets to driver */
7053 		status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7054 		if (status)
7055 			return (status);
7056 		(void) ql_stop_routing(qlge);
7057 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7058 		/*
7059 		 * Set the flag for receive and transmit
7060 		 * operations to cease
7061 		 */
7062 		for (i = 0; i < qlge->tx_ring_count; i++) {
7063 			mutex_enter(&qlge->tx_ring[i].tx_lock);
7064 			qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED;
7065 			mutex_exit(&qlge->tx_ring[i].tx_lock);
7066 		}
7067 
7068 		for (i = 0; i < qlge->rx_ring_count; i++) {
7069 			mutex_enter(&qlge->rx_ring[i].rx_lock);
7070 			qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED;
7071 			mutex_exit(&qlge->rx_ring[i].rx_lock);
7072 		}
7073 
7074 		/*
7075 		 * Need interrupts to be running while the transmit
7076 		 * completions are cleared. Wait for the packets
7077 		 * queued to the chip to be sent out
7078 		 */
7079 		(void) ql_wait_tx_quiesce(qlge);
7080 		/* Interrupts not needed from now */
7081 		ql_disable_all_completion_interrupts(qlge);
7082 
7083 		mutex_enter(&qlge->hw_mutex);
7084 		/* Disable Global interrupt */
7085 		ql_disable_global_interrupt(qlge);
7086 		mutex_exit(&qlge->hw_mutex);
7087 
7088 		/* Wait for all the indicated packets to come back */
7089 		status = ql_wait_rx_complete(qlge);
7090 
7091 		mutex_enter(&qlge->hw_mutex);
7092 		/* Reset adapter */
7093 		(void) ql_asic_reset(qlge);
7094 		/*
7095 		 * Unbind all tx dma handles to prevent pending tx descriptors'
7096 		 * dma handles from being re-used.
7097 		 */
7098 		for (i = 0; i < qlge->tx_ring_count; i++) {
7099 			ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]);
7100 		}
7101 
7102 		qlge->sequence &= ~ADAPTER_INIT;
7103 
7104 		mutex_exit(&qlge->hw_mutex);
7105 	}
7106 	return (status);
7107 }
7108 
7109 /*
7110  * ql_detach
7111  * Used to remove all the states associated with a given
7112  * instances of a device node prior to the removal of that
7113  * instance from the system.
7114  */
7115 static int
7116 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7117 {
7118 	qlge_t *qlge;
7119 	int rval;
7120 
7121 	rval = DDI_SUCCESS;
7122 
7123 	switch (cmd) {
7124 	case DDI_DETACH:
7125 
7126 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7127 			return (DDI_FAILURE);
7128 		rval = ql_bringdown_adapter(qlge);
7129 		if (rval != DDI_SUCCESS)
7130 			break;
7131 
7132 		qlge->mac_flags = QL_MAC_DETACH;
7133 
7134 		/* free memory resources */
7135 		if (qlge->sequence & INIT_MEMORY_ALLOC) {
7136 			ql_free_mem_resources(qlge);
7137 			qlge->sequence &= ~INIT_MEMORY_ALLOC;
7138 		}
7139 		ql_free_resources(dip, qlge);
7140 
7141 		break;
7142 
7143 	case DDI_SUSPEND:
7144 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7145 			return (DDI_FAILURE);
7146 
7147 		mutex_enter(&qlge->gen_mutex);
7148 		if ((qlge->mac_flags == QL_MAC_ATTACHED) ||
7149 		    (qlge->mac_flags == QL_MAC_STARTED)) {
7150 			(void) ql_do_stop(qlge);
7151 		}
7152 		qlge->mac_flags = QL_MAC_SUSPENDED;
7153 		mutex_exit(&qlge->gen_mutex);
7154 
7155 		break;
7156 	default:
7157 		rval = DDI_FAILURE;
7158 		break;
7159 	}
7160 
7161 	return (rval);
7162 }
7163 
7164 /*
7165  * quiesce(9E) entry point.
7166  *
7167  * This function is called when the system is single-threaded at high
7168  * PIL with preemption disabled. Therefore, this function must not be
7169  * blocked.
7170  *
7171  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7172  */
7173 int
7174 ql_quiesce(dev_info_t *dip)
7175 {
7176 	qlge_t *qlge;
7177 	int i;
7178 
7179 	if ((qlge = QL_GET_DEV(dip)) == NULL)
7180 		return (DDI_FAILURE);
7181 
7182 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
7183 		/* stop forwarding external packets to driver */
7184 		(void) ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7185 		(void) ql_stop_routing(qlge);
7186 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7187 		/* Stop all the request queues */
7188 		for (i = 0; i < qlge->tx_ring_count; i++) {
7189 			if (qlge->tx_ring[i].valid_db_reg) {
7190 				ql_write_doorbell_reg(qlge,
7191 				    qlge->tx_ring[i].valid_db_reg, 0);
7192 			}
7193 		}
7194 		qlge_delay(QL_ONE_SEC_DELAY/4);
7195 		/* Interrupts not needed from now */
7196 		/* Disable MPI interrupt */
7197 		ql_write_reg(qlge, REG_INTERRUPT_MASK,
7198 		    (INTR_MASK_PI << 16));
7199 		ql_disable_global_interrupt(qlge);
7200 
7201 		/* Disable all the rx completion queues */
7202 		for (i = 0; i < qlge->rx_ring_count; i++) {
7203 			if (qlge->rx_ring[i].valid_db_reg) {
7204 				ql_write_doorbell_reg(qlge,
7205 				    qlge->rx_ring[i].valid_db_reg, 0);
7206 			}
7207 		}
7208 		qlge_delay(QL_ONE_SEC_DELAY/4);
7209 		qlge->mac_flags = QL_MAC_STOPPED;
7210 		/* Reset adapter */
7211 		(void) ql_asic_reset(qlge);
7212 		qlge_delay(100);
7213 	}
7214 
7215 	return (DDI_SUCCESS);
7216 }
7217 
7218 QL_STREAM_OPS(ql_ops, ql_attach, ql_detach);
7219 
7220 /*
7221  * Loadable Driver Interface Structures.
7222  * Declare and initialize the module configuration section...
7223  */
7224 static struct modldrv modldrv = {
7225 	&mod_driverops,		/* type of module: driver */
7226 	version,		/* name of module */
7227 	&ql_ops			/* driver dev_ops */
7228 };
7229 
7230 static struct modlinkage modlinkage = {
7231 	MODREV_1, 	&modldrv,	NULL
7232 };
7233 
7234 /*
7235  * Loadable Module Routines
7236  */
7237 
7238 /*
7239  * _init
7240  * Initializes a loadable module. It is called before any other
7241  * routine in a loadable module.
7242  */
7243 int
7244 _init(void)
7245 {
7246 	int rval;
7247 
7248 	mac_init_ops(&ql_ops, ADAPTER_NAME);
7249 	rval = mod_install(&modlinkage);
7250 	if (rval != DDI_SUCCESS) {
7251 		mac_fini_ops(&ql_ops);
7252 		cmn_err(CE_WARN, "?Unable to install/attach driver '%s'",
7253 		    ADAPTER_NAME);
7254 	}
7255 
7256 	return (rval);
7257 }
7258 
7259 /*
7260  * _fini
7261  * Prepares a module for unloading. It is called when the system
7262  * wants to unload a module. If the module determines that it can
7263  * be unloaded, then _fini() returns the value returned by
7264  * mod_remove(). Upon successful return from _fini() no other
7265  * routine in the module will be called before _init() is called.
7266  */
7267 int
7268 _fini(void)
7269 {
7270 	int rval;
7271 
7272 	rval = mod_remove(&modlinkage);
7273 	if (rval == DDI_SUCCESS) {
7274 		mac_fini_ops(&ql_ops);
7275 	}
7276 
7277 	return (rval);
7278 }
7279 
7280 /*
7281  * _info
7282  * Returns information about loadable module.
7283  */
7284 int
7285 _info(struct modinfo *modinfop)
7286 {
7287 	return (mod_info(&modlinkage, modinfop));
7288 }
7289