xref: /titanic_41/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge.c (revision 803376f094adaaf2e4d9aa1c1d0efd56b642d838)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 QLogic Corporation. All rights reserved.
24  */
25 
26 #include <qlge.h>
27 #include <sys/atomic.h>
28 #include <sys/strsubr.h>
29 #include <sys/pattr.h>
30 #include <netinet/in.h>
31 #include <netinet/ip.h>
32 #include <netinet/ip6.h>
33 #include <netinet/tcp.h>
34 #include <netinet/udp.h>
35 #include <inet/ip.h>
36 
37 
38 
39 /*
40  * Local variables
41  */
42 static struct ether_addr ql_ether_broadcast_addr =
43 	{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
44 static char version[] = "GLDv3 QLogic 81XX " VERSIONSTR;
45 
46 /*
47  * Local function prototypes
48  */
49 static void ql_free_resources(qlge_t *);
50 static void ql_fini_kstats(qlge_t *);
51 static uint32_t ql_get_link_state(qlge_t *);
52 static void ql_read_conf(qlge_t *);
53 static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
54     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
55     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
56 static int ql_alloc_phys_rbuf(dev_info_t *, ddi_dma_handle_t *,
57     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
58     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
59 static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
60 static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
61 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
62 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
63 static int ql_bringdown_adapter(qlge_t *);
64 static int ql_bringup_adapter(qlge_t *);
65 static int ql_asic_reset(qlge_t *);
66 static void ql_wake_mpi_reset_soft_intr(qlge_t *);
67 static void ql_stop_timer(qlge_t *qlge);
68 static void ql_fm_fini(qlge_t *qlge);
69 int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring);
70 
71 /*
72  * TX dma maping handlers allow multiple sscatter-gather lists
73  */
74 ddi_dma_attr_t  tx_mapping_dma_attr = {
75 	DMA_ATTR_V0,			/* dma_attr_version */
76 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
77 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
78 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
79 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
80 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
81 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
82 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
83 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
84 	QL_MAX_TX_DMA_HANDLES,		/* s/g list length */
85 	QL_DMA_GRANULARITY,		/* granularity of device */
86 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
87 };
88 
89 /*
90  * Receive buffers and Request/Response queues do not allow scatter-gather lists
91  */
92 ddi_dma_attr_t  dma_attr = {
93 	DMA_ATTR_V0,			/* dma_attr_version */
94 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
95 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
96 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
97 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
98 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
99 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
100 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
101 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
102 	1,				/* s/g list length, i.e no sg list */
103 	QL_DMA_GRANULARITY,		/* granularity of device */
104 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
105 };
106 /*
107  * Receive buffers do not allow scatter-gather lists
108  */
109 ddi_dma_attr_t  dma_attr_rbuf = {
110 	DMA_ATTR_V0,			/* dma_attr_version */
111 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
112 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
113 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
114 	0x1,				/* DMA address alignment, default - 8 */
115 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
116 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
117 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
118 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
119 	1,				/* s/g list length, i.e no sg list */
120 	QL_DMA_GRANULARITY,		/* granularity of device */
121 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
122 };
123 /*
124  * DMA access attribute structure.
125  */
126 /* device register access from host */
127 ddi_device_acc_attr_t ql_dev_acc_attr = {
128 	DDI_DEVICE_ATTR_V0,
129 	DDI_STRUCTURE_LE_ACC,
130 	DDI_STRICTORDER_ACC
131 };
132 
133 /* host ring descriptors */
134 ddi_device_acc_attr_t ql_desc_acc_attr = {
135 	DDI_DEVICE_ATTR_V0,
136 	DDI_NEVERSWAP_ACC,
137 	DDI_STRICTORDER_ACC
138 };
139 
140 /* host ring buffer */
141 ddi_device_acc_attr_t ql_buf_acc_attr = {
142 	DDI_DEVICE_ATTR_V0,
143 	DDI_NEVERSWAP_ACC,
144 	DDI_STRICTORDER_ACC
145 };
146 
147 /*
148  * Hash key table for Receive Side Scaling (RSS) support
149  */
150 const uint8_t key_data[] = {
151 	0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
152 	0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
153 	0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
154 	0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
155 
156 /*
157  * Shadow Registers:
158  * Outbound queues have a consumer index that is maintained by the chip.
159  * Inbound queues have a producer index that is maintained by the chip.
160  * For lower overhead, these registers are "shadowed" to host memory
161  * which allows the device driver to track the queue progress without
162  * PCI reads. When an entry is placed on an inbound queue, the chip will
163  * update the relevant index register and then copy the value to the
164  * shadow register in host memory.
165  * Currently, ql_read_sh_reg only read Inbound queues'producer index.
166  */
167 
168 static inline unsigned int
ql_read_sh_reg(qlge_t * qlge,struct rx_ring * rx_ring)169 ql_read_sh_reg(qlge_t *qlge, struct rx_ring *rx_ring)
170 {
171 	uint32_t rtn;
172 
173 	/* re-synchronize shadow prod index dma buffer before reading */
174 	(void) ddi_dma_sync(qlge->host_copy_shadow_dma_attr.dma_handle,
175 	    rx_ring->prod_idx_sh_reg_offset,
176 	    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
177 
178 	rtn = ddi_get32(qlge->host_copy_shadow_dma_attr.acc_handle,
179 	    (uint32_t *)rx_ring->prod_idx_sh_reg);
180 
181 	return (rtn);
182 }
183 
184 /*
185  * Read 32 bit atomically
186  */
187 uint32_t
ql_atomic_read_32(volatile uint32_t * target)188 ql_atomic_read_32(volatile uint32_t *target)
189 {
190 	/*
191 	 * atomic_add_32_nv returns the new value after the add,
192 	 * we are adding 0 so we should get the original value
193 	 */
194 	return (atomic_add_32_nv(target, 0));
195 }
196 
197 /*
198  * Set 32 bit atomically
199  */
200 void
ql_atomic_set_32(volatile uint32_t * target,uint32_t newval)201 ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
202 {
203 	(void) atomic_swap_32(target, newval);
204 }
205 
206 
207 /*
208  * Setup device PCI configuration registers.
209  * Kernel context.
210  */
211 static void
ql_pci_config(qlge_t * qlge)212 ql_pci_config(qlge_t *qlge)
213 {
214 	uint16_t w;
215 
216 	qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
217 	    PCI_CONF_VENID);
218 	qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
219 	    PCI_CONF_DEVID);
220 
221 	/*
222 	 * we want to respect framework's setting of PCI
223 	 * configuration space command register and also
224 	 * want to make sure that all bits of interest to us
225 	 * are properly set in PCI Command register(0x04).
226 	 * PCI_COMM_IO		0x1	 I/O access enable
227 	 * PCI_COMM_MAE		0x2	 Memory access enable
228 	 * PCI_COMM_ME		0x4	 bus master enable
229 	 * PCI_COMM_MEMWR_INVAL	0x10	 memory write and invalidate enable.
230 	 */
231 	w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
232 	w = (uint16_t)(w & (~PCI_COMM_IO));
233 	w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
234 	    /* PCI_COMM_MEMWR_INVAL | */
235 	    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
236 
237 	pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
238 
239 	w = pci_config_get16(qlge->pci_handle, 0x54);
240 	w = (uint16_t)(w & (~0x7000));
241 	w = (uint16_t)(w | 0x5000);
242 	pci_config_put16(qlge->pci_handle, 0x54, w);
243 
244 	ql_dump_pci_config(qlge);
245 }
246 
247 /*
248  * This routine parforms the neccessary steps to set GLD mac information
249  * such as Function number, xgmac mask and shift bits
250  */
251 static int
ql_set_mac_info(qlge_t * qlge)252 ql_set_mac_info(qlge_t *qlge)
253 {
254 	uint32_t value;
255 	int rval = DDI_FAILURE;
256 	uint32_t fn0_net, fn1_net;
257 
258 	/* set default value */
259 	qlge->fn0_net = FN0_NET;
260 	qlge->fn1_net = FN1_NET;
261 
262 	if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
263 		cmn_err(CE_WARN, "%s(%d) read MPI register failed",
264 		    __func__, qlge->instance);
265 		goto exit;
266 	} else {
267 		fn0_net = (value >> 1) & 0x07;
268 		fn1_net = (value >> 5) & 0x07;
269 		if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
270 			cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
271 			    "nic0 function number %d,"
272 			    "nic1 function number %d "
273 			    "use default\n",
274 			    __func__, qlge->instance, value, fn0_net, fn1_net);
275 			goto exit;
276 		} else {
277 			qlge->fn0_net = fn0_net;
278 			qlge->fn1_net = fn1_net;
279 		}
280 	}
281 
282 	/* Get the function number that the driver is associated with */
283 	value = ql_read_reg(qlge, REG_STATUS);
284 	qlge->func_number = (uint8_t)((value >> 6) & 0x03);
285 	QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
286 	    value, qlge->func_number));
287 
288 	/* The driver is loaded on a non-NIC function? */
289 	if ((qlge->func_number != qlge->fn0_net) &&
290 	    (qlge->func_number != qlge->fn1_net)) {
291 		cmn_err(CE_WARN,
292 		    "Invalid function number = 0x%x\n", qlge->func_number);
293 		goto exit;
294 	}
295 	/* network port 0? */
296 	if (qlge->func_number == qlge->fn0_net) {
297 		qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
298 		qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
299 	} else {
300 		qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
301 		qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
302 	}
303 	rval = DDI_SUCCESS;
304 exit:
305 	return (rval);
306 
307 }
308 
309 /*
310  * write to doorbell register
311  */
312 void
ql_write_doorbell_reg(qlge_t * qlge,uint32_t * addr,uint32_t data)313 ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
314 {
315 	ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
316 }
317 
318 /*
319  * read from doorbell register
320  */
321 uint32_t
ql_read_doorbell_reg(qlge_t * qlge,uint32_t * addr)322 ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
323 {
324 	uint32_t ret;
325 
326 	ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
327 
328 	return	(ret);
329 }
330 
331 /*
332  * This function waits for a specific bit to come ready
333  * in a given register.  It is used mostly by the initialize
334  * process, but is also used in kernel thread API such as
335  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
336  */
337 static int
ql_wait_reg_rdy(qlge_t * qlge,uint32_t reg,uint32_t bit,uint32_t err_bit)338 ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
339 {
340 	uint32_t temp;
341 	int count = UDELAY_COUNT;
342 
343 	while (count) {
344 		temp = ql_read_reg(qlge, reg);
345 
346 		/* check for errors */
347 		if ((temp & err_bit) != 0) {
348 			break;
349 		} else if ((temp & bit) != 0)
350 			return (DDI_SUCCESS);
351 		qlge_delay(UDELAY_DELAY);
352 		count--;
353 	}
354 	cmn_err(CE_WARN,
355 	    "Waiting for reg %x to come ready failed.", reg);
356 	if (qlge->fm_enable) {
357 		ql_fm_ereport(qlge, DDI_FM_DEVICE_NO_RESPONSE);
358 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
359 	}
360 	return (DDI_FAILURE);
361 }
362 
363 /*
364  * The CFG register is used to download TX and RX control blocks
365  * to the chip. This function waits for an operation to complete.
366  */
367 static int
ql_wait_cfg(qlge_t * qlge,uint32_t bit)368 ql_wait_cfg(qlge_t *qlge, uint32_t bit)
369 {
370 	return (ql_wait_reg_bit(qlge, REG_CONFIGURATION, bit, BIT_RESET, 0));
371 }
372 
373 
374 /*
375  * Used to issue init control blocks to hw. Maps control block,
376  * sets address, triggers download, waits for completion.
377  */
378 static int
ql_write_cfg(qlge_t * qlge,uint32_t bit,uint64_t phy_addr,uint16_t q_id)379 ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
380 {
381 	int status = DDI_SUCCESS;
382 	uint32_t mask;
383 	uint32_t value;
384 
385 	status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
386 	if (status != DDI_SUCCESS) {
387 		goto exit;
388 	}
389 	status = ql_wait_cfg(qlge, bit);
390 	if (status != DDI_SUCCESS) {
391 		goto exit;
392 	}
393 
394 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
395 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
396 
397 	mask = CFG_Q_MASK | (bit << 16);
398 	value = bit | (q_id << CFG_Q_SHIFT);
399 	ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
400 
401 	/*
402 	 * Wait for the bit to clear after signaling hw.
403 	 */
404 	status = ql_wait_cfg(qlge, bit);
405 	ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
406 
407 exit:
408 	return (status);
409 }
410 
411 /*
412  * Initialize adapter instance
413  */
414 static int
ql_init_instance(qlge_t * qlge)415 ql_init_instance(qlge_t *qlge)
416 {
417 	int i;
418 
419 	/* Default value */
420 	qlge->mac_flags = QL_MAC_INIT;
421 	qlge->mtu = ETHERMTU;		/* set normal size as default */
422 	qlge->page_size = VM_PAGE_SIZE;	/* default page size */
423 
424 	for (i = 0; i < MAX_RX_RINGS; i++) {
425 		qlge->rx_polls[i] = 0;
426 		qlge->rx_interrupts[i] = 0;
427 	}
428 
429 	/*
430 	 * Set up the operating parameters.
431 	 */
432 	qlge->multicast_list_count = 0;
433 
434 	/*
435 	 * Set up the max number of unicast list
436 	 */
437 	qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
438 	qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
439 
440 	/*
441 	 * read user defined properties in .conf file
442 	 */
443 	ql_read_conf(qlge); /* mtu, pause, LSO etc */
444 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
445 
446 	QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
447 
448 	/* choose Memory Space mapping and get Vendor Id, Device ID etc */
449 	ql_pci_config(qlge);
450 	qlge->ip_hdr_offset = 0;
451 
452 	if (qlge->device_id == 0x8000) {
453 		/* Schultz card */
454 		qlge->cfg_flags |= CFG_CHIP_8100;
455 		/* enable just ipv4 chksum offload for Schultz */
456 		qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
457 		/*
458 		 * Schultz firmware does not do pseduo IP header checksum
459 		 * calculation, needed to be done by driver
460 		 */
461 		qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
462 		if (qlge->lso_enable)
463 			qlge->cfg_flags |= CFG_LSO;
464 		qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
465 		/* Schultz must split packet header */
466 		qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
467 		qlge->max_read_mbx = 5;
468 		qlge->ip_hdr_offset = 2;
469 	}
470 
471 	/* Set Function Number and some of the iocb mac information */
472 	if (ql_set_mac_info(qlge) != DDI_SUCCESS)
473 		return (DDI_FAILURE);
474 
475 	/* Read network settings from NVRAM */
476 	/* After nvram is read successfully, update dev_addr */
477 	if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
478 		QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
479 		for (i = 0; i < ETHERADDRL; i++) {
480 			qlge->dev_addr.ether_addr_octet[i] =
481 			    qlge->nic_config.factory_MAC[i];
482 		}
483 	} else {
484 		cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
485 		    __func__, qlge->instance);
486 		return (DDI_FAILURE);
487 	}
488 
489 	bcopy(qlge->dev_addr.ether_addr_octet,
490 	    qlge->unicst_addr[0].addr.ether_addr_octet,
491 	    ETHERADDRL);
492 	QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
493 	    &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
494 
495 	qlge->port_link_state = LS_DOWN;
496 
497 	return (DDI_SUCCESS);
498 }
499 
500 
501 /*
502  * This hardware semaphore provides the mechanism for exclusive access to
503  * resources shared between the NIC driver, MPI firmware,
504  * FCOE firmware and the FC driver.
505  */
506 static int
ql_sem_trylock(qlge_t * qlge,uint32_t sem_mask)507 ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
508 {
509 	uint32_t sem_bits = 0;
510 
511 	switch (sem_mask) {
512 	case SEM_XGMAC0_MASK:
513 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
514 		break;
515 	case SEM_XGMAC1_MASK:
516 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
517 		break;
518 	case SEM_ICB_MASK:
519 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
520 		break;
521 	case SEM_MAC_ADDR_MASK:
522 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
523 		break;
524 	case SEM_FLASH_MASK:
525 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
526 		break;
527 	case SEM_PROBE_MASK:
528 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
529 		break;
530 	case SEM_RT_IDX_MASK:
531 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
532 		break;
533 	case SEM_PROC_REG_MASK:
534 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
535 		break;
536 	default:
537 		cmn_err(CE_WARN, "Bad Semaphore mask!.");
538 		return (DDI_FAILURE);
539 	}
540 
541 	ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
542 	return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
543 }
544 
545 /*
546  * Lock a specific bit of Semaphore register to gain
547  * access to a particular shared register
548  */
549 int
ql_sem_spinlock(qlge_t * qlge,uint32_t sem_mask)550 ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
551 {
552 	unsigned int wait_count = 30;
553 
554 	while (wait_count) {
555 		if (!ql_sem_trylock(qlge, sem_mask))
556 			return (DDI_SUCCESS);
557 		qlge_delay(100);
558 		wait_count--;
559 	}
560 	cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
561 	    __func__, qlge->instance, sem_mask);
562 	return (DDI_FAILURE);
563 }
564 
565 /*
566  * Unock a specific bit of Semaphore register to release
567  * access to a particular shared register
568  */
569 void
ql_sem_unlock(qlge_t * qlge,uint32_t sem_mask)570 ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
571 {
572 	ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
573 	(void) ql_read_reg(qlge, REG_SEMAPHORE);	/* flush */
574 }
575 
576 /*
577  * Get property value from configuration file.
578  *
579  * string = property string pointer.
580  *
581  * Returns:
582  * 0xFFFFFFFF = no property else property value.
583  */
584 static uint32_t
ql_get_prop(qlge_t * qlge,char * string)585 ql_get_prop(qlge_t *qlge, char *string)
586 {
587 	char buf[256];
588 	uint32_t data;
589 
590 	/* Get adapter instance parameter. */
591 	(void) sprintf(buf, "hba%d-%s", qlge->instance, string);
592 	data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
593 	    (int)0xffffffff);
594 
595 	/* Adapter instance parameter found? */
596 	if (data == 0xffffffff) {
597 		/* No, get default parameter. */
598 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
599 		    string, (int)0xffffffff);
600 	}
601 
602 	return (data);
603 }
604 
605 /*
606  * Read user setting from configuration file.
607  */
608 static void
ql_read_conf(qlge_t * qlge)609 ql_read_conf(qlge_t *qlge)
610 {
611 	uint32_t data;
612 
613 	/* clear configuration flags */
614 	qlge->cfg_flags = 0;
615 
616 	/* Set up the default ring sizes. */
617 	qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
618 	data = ql_get_prop(qlge, "tx_ring_size");
619 	/* if data is valid */
620 	if ((data != 0xffffffff) && data) {
621 		if (qlge->tx_ring_size != data) {
622 			qlge->tx_ring_size = (uint16_t)data;
623 		}
624 	}
625 
626 	qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
627 	data = ql_get_prop(qlge, "rx_ring_size");
628 	/* if data is valid */
629 	if ((data != 0xffffffff) && data) {
630 		if (qlge->rx_ring_size != data) {
631 			qlge->rx_ring_size = (uint16_t)data;
632 		}
633 	}
634 
635 	qlge->tx_ring_count = 8;
636 	data = ql_get_prop(qlge, "tx_ring_count");
637 	/* if data is valid */
638 	if ((data != 0xffffffff) && data) {
639 		if (qlge->tx_ring_count != data) {
640 			qlge->tx_ring_count = (uint16_t)data;
641 		}
642 	}
643 
644 	qlge->rss_ring_count = 8;
645 	data = ql_get_prop(qlge, "rss_ring_count");
646 	/* if data is valid */
647 	if ((data != 0xffffffff) && data) {
648 		if (qlge->rss_ring_count != data) {
649 			qlge->rss_ring_count = (uint16_t)data;
650 		}
651 	}
652 
653 	/* Get default rx_copy enable/disable. */
654 	if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
655 	    data == 0) {
656 		qlge->rx_copy = B_FALSE;
657 		QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
658 	} else if (data == 1) {
659 		qlge->rx_copy = B_TRUE;
660 		QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
661 	}
662 
663 	qlge->rx_copy_threshold = qlge->rx_ring_size / 4;
664 	data = ql_get_prop(qlge, "rx_copy_threshold");
665 	if ((data != 0xffffffff) && (data != 0)) {
666 		qlge->rx_copy_threshold = data;
667 		cmn_err(CE_NOTE, "!new rx_copy_threshold %d \n",
668 		    qlge->rx_copy_threshold);
669 	}
670 
671 	/* Get mtu packet size. */
672 	data = ql_get_prop(qlge, "mtu");
673 	if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
674 		if (qlge->mtu != data) {
675 			qlge->mtu = data;
676 			cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
677 		}
678 	}
679 
680 	if (qlge->mtu == JUMBO_MTU) {
681 		qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT_JUMBO;
682 		qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT_JUMBO;
683 		qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT_JUMBO;
684 		qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT_JUMBO;
685 	}
686 
687 
688 	/* Get pause mode, default is Per Priority mode. */
689 	qlge->pause = PAUSE_MODE_PER_PRIORITY;
690 	data = ql_get_prop(qlge, "pause");
691 	if (data <= PAUSE_MODE_PER_PRIORITY) {
692 		if (qlge->pause != data) {
693 			qlge->pause = data;
694 			cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
695 		}
696 	}
697 	/* Receive interrupt delay */
698 	qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
699 	data = ql_get_prop(qlge, "rx_intr_delay");
700 	/* if data is valid */
701 	if ((data != 0xffffffff) && data) {
702 		if (qlge->rx_coalesce_usecs != data) {
703 			qlge->rx_coalesce_usecs = (uint16_t)data;
704 		}
705 	}
706 	/* Rx inter-packet delay. */
707 	qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
708 	data = ql_get_prop(qlge, "rx_ipkt_delay");
709 	/* if data is valid */
710 	if ((data != 0xffffffff) && data) {
711 		if (qlge->rx_max_coalesced_frames != data) {
712 			qlge->rx_max_coalesced_frames = (uint16_t)data;
713 		}
714 	}
715 	/* Transmit interrupt delay */
716 	qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
717 	data = ql_get_prop(qlge, "tx_intr_delay");
718 	/* if data is valid */
719 	if ((data != 0xffffffff) && data) {
720 		if (qlge->tx_coalesce_usecs != data) {
721 			qlge->tx_coalesce_usecs = (uint16_t)data;
722 		}
723 	}
724 	/* Tx inter-packet delay. */
725 	qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
726 	data = ql_get_prop(qlge, "tx_ipkt_delay");
727 	/* if data is valid */
728 	if ((data != 0xffffffff) && data) {
729 		if (qlge->tx_max_coalesced_frames != data) {
730 			qlge->tx_max_coalesced_frames = (uint16_t)data;
731 		}
732 	}
733 
734 	/* Get split header payload_copy_thresh. */
735 	qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
736 	data = ql_get_prop(qlge, "payload_copy_thresh");
737 	/* if data is valid */
738 	if ((data != 0xffffffff) && (data != 0)) {
739 		if (qlge->payload_copy_thresh != data) {
740 			qlge->payload_copy_thresh = data;
741 		}
742 	}
743 
744 	/* large send offload (LSO) capability. */
745 	qlge->lso_enable = 1;
746 	data = ql_get_prop(qlge, "lso_enable");
747 	/* if data is valid */
748 	if ((data == 0) || (data == 1)) {
749 		if (qlge->lso_enable != data) {
750 			qlge->lso_enable = (uint16_t)data;
751 		}
752 	}
753 
754 	/* dcbx capability. */
755 	qlge->dcbx_enable = 1;
756 	data = ql_get_prop(qlge, "dcbx_enable");
757 	/* if data is valid */
758 	if ((data == 0) || (data == 1)) {
759 		if (qlge->dcbx_enable != data) {
760 			qlge->dcbx_enable = (uint16_t)data;
761 		}
762 	}
763 	/* fault management enable */
764 	qlge->fm_enable = B_TRUE;
765 	data = ql_get_prop(qlge, "fm-enable");
766 	if ((data == 0x1) || (data == 0)) {
767 		qlge->fm_enable = (boolean_t)data;
768 	}
769 
770 }
771 
772 /*
773  * Enable global interrupt
774  */
775 static void
ql_enable_global_interrupt(qlge_t * qlge)776 ql_enable_global_interrupt(qlge_t *qlge)
777 {
778 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
779 	    (INTR_EN_EI << 16) | INTR_EN_EI);
780 	qlge->flags |= INTERRUPTS_ENABLED;
781 }
782 
783 /*
784  * Disable global interrupt
785  */
786 static void
ql_disable_global_interrupt(qlge_t * qlge)787 ql_disable_global_interrupt(qlge_t *qlge)
788 {
789 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
790 	qlge->flags &= ~INTERRUPTS_ENABLED;
791 }
792 
793 /*
794  * Enable one ring interrupt
795  */
796 void
ql_enable_completion_interrupt(qlge_t * qlge,uint32_t intr)797 ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
798 {
799 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
800 
801 	QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
802 	    __func__, qlge->instance, intr, ctx->irq_cnt));
803 
804 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
805 		/*
806 		 * Always enable if we're MSIX multi interrupts and
807 		 * it's not the default (zeroeth) interrupt.
808 		 */
809 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
810 		return;
811 	}
812 
813 	if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
814 		mutex_enter(&qlge->hw_mutex);
815 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
816 		mutex_exit(&qlge->hw_mutex);
817 		QL_PRINT(DBG_INTR,
818 		    ("%s(%d): write %x to intr enable register \n",
819 		    __func__, qlge->instance, ctx->intr_en_mask));
820 	}
821 }
822 
823 /*
824  * ql_forced_disable_completion_interrupt
825  * Used by call from OS, may be called without
826  * a pending interrupt so force the disable
827  */
828 uint32_t
ql_forced_disable_completion_interrupt(qlge_t * qlge,uint32_t intr)829 ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
830 {
831 	uint32_t var = 0;
832 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
833 
834 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
835 	    __func__, qlge->instance, intr, ctx->irq_cnt));
836 
837 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
838 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
839 		var = ql_read_reg(qlge, REG_STATUS);
840 		return (var);
841 	}
842 
843 	mutex_enter(&qlge->hw_mutex);
844 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
845 	var = ql_read_reg(qlge, REG_STATUS);
846 	mutex_exit(&qlge->hw_mutex);
847 
848 	return (var);
849 }
850 
851 /*
852  * Disable a completion interrupt
853  */
854 void
ql_disable_completion_interrupt(qlge_t * qlge,uint32_t intr)855 ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
856 {
857 	struct intr_ctx *ctx;
858 
859 	ctx = qlge->intr_ctx + intr;
860 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
861 	    __func__, qlge->instance, intr, ctx->irq_cnt));
862 	/*
863 	 * HW disables for us if we're MSIX multi interrupts and
864 	 * it's not the default (zeroeth) interrupt.
865 	 */
866 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
867 		return;
868 
869 	if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
870 		mutex_enter(&qlge->hw_mutex);
871 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
872 		mutex_exit(&qlge->hw_mutex);
873 	}
874 	atomic_inc_32(&ctx->irq_cnt);
875 }
876 
877 /*
878  * Enable all completion interrupts
879  */
880 static void
ql_enable_all_completion_interrupts(qlge_t * qlge)881 ql_enable_all_completion_interrupts(qlge_t *qlge)
882 {
883 	int i;
884 	uint32_t value = 1;
885 
886 	for (i = 0; i < qlge->intr_cnt; i++) {
887 		/*
888 		 * Set the count to 1 for Legacy / MSI interrupts or for the
889 		 * default interrupt (0)
890 		 */
891 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
892 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
893 		}
894 		ql_enable_completion_interrupt(qlge, i);
895 	}
896 }
897 
898 /*
899  * Disable all completion interrupts
900  */
901 static void
ql_disable_all_completion_interrupts(qlge_t * qlge)902 ql_disable_all_completion_interrupts(qlge_t *qlge)
903 {
904 	int i;
905 	uint32_t value = 0;
906 
907 	for (i = 0; i < qlge->intr_cnt; i++) {
908 
909 		/*
910 		 * Set the count to 0 for Legacy / MSI interrupts or for the
911 		 * default interrupt (0)
912 		 */
913 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
914 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
915 
916 		ql_disable_completion_interrupt(qlge, i);
917 	}
918 }
919 
920 /*
921  * Update small buffer queue producer index
922  */
923 static void
ql_update_sbq_prod_idx(qlge_t * qlge,struct rx_ring * rx_ring)924 ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
925 {
926 	/* Update the buffer producer index */
927 	QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
928 	    rx_ring->sbq_prod_idx));
929 	ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
930 	    rx_ring->sbq_prod_idx);
931 }
932 
933 /*
934  * Update large buffer queue producer index
935  */
936 static void
ql_update_lbq_prod_idx(qlge_t * qlge,struct rx_ring * rx_ring)937 ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
938 {
939 	/* Update the buffer producer index */
940 	QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
941 	    rx_ring->lbq_prod_idx));
942 	ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
943 	    rx_ring->lbq_prod_idx);
944 }
945 
946 /*
947  * Adds a small buffer descriptor to end of its in use list,
948  * assumes sbq_lock is already taken
949  */
950 static void
ql_add_sbuf_to_in_use_list(struct rx_ring * rx_ring,struct bq_desc * sbq_desc)951 ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
952     struct bq_desc *sbq_desc)
953 {
954 	uint32_t inuse_idx = rx_ring->sbq_use_tail;
955 
956 	rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
957 	inuse_idx++;
958 	if (inuse_idx >= rx_ring->sbq_len)
959 		inuse_idx = 0;
960 	rx_ring->sbq_use_tail = inuse_idx;
961 	atomic_inc_32(&rx_ring->sbuf_in_use_count);
962 	ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
963 }
964 
965 /*
966  * Get a small buffer descriptor from its in use list
967  */
968 static struct bq_desc *
ql_get_sbuf_from_in_use_list(struct rx_ring * rx_ring)969 ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
970 {
971 	struct bq_desc *sbq_desc = NULL;
972 	uint32_t inuse_idx;
973 
974 	/* Pick from head of in use list */
975 	inuse_idx = rx_ring->sbq_use_head;
976 	sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
977 	rx_ring->sbuf_in_use[inuse_idx] = NULL;
978 
979 	if (sbq_desc != NULL) {
980 		inuse_idx++;
981 		if (inuse_idx >= rx_ring->sbq_len)
982 			inuse_idx = 0;
983 		rx_ring->sbq_use_head = inuse_idx;
984 		atomic_dec_32(&rx_ring->sbuf_in_use_count);
985 		atomic_inc_32(&rx_ring->rx_indicate);
986 		sbq_desc->upl_inuse = 1;
987 		/* if mp is NULL */
988 		if (sbq_desc->mp == NULL) {
989 			/* try to remap mp again */
990 			sbq_desc->mp =
991 			    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
992 			    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
993 		}
994 	}
995 
996 	return (sbq_desc);
997 }
998 
999 /*
1000  * Add a small buffer descriptor to its free list
1001  */
1002 static void
ql_add_sbuf_to_free_list(struct rx_ring * rx_ring,struct bq_desc * sbq_desc)1003 ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
1004     struct bq_desc *sbq_desc)
1005 {
1006 	uint32_t free_idx;
1007 
1008 	/* Add to the end of free list */
1009 	free_idx = rx_ring->sbq_free_tail;
1010 	rx_ring->sbuf_free[free_idx] = sbq_desc;
1011 	ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
1012 	free_idx++;
1013 	if (free_idx >= rx_ring->sbq_len)
1014 		free_idx = 0;
1015 	rx_ring->sbq_free_tail = free_idx;
1016 	atomic_inc_32(&rx_ring->sbuf_free_count);
1017 }
1018 
1019 /*
1020  * Get a small buffer descriptor from its free list
1021  */
1022 static struct bq_desc *
ql_get_sbuf_from_free_list(struct rx_ring * rx_ring)1023 ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
1024 {
1025 	struct bq_desc *sbq_desc;
1026 	uint32_t free_idx;
1027 
1028 	free_idx = rx_ring->sbq_free_head;
1029 	/* Pick from top of free list */
1030 	sbq_desc = rx_ring->sbuf_free[free_idx];
1031 	rx_ring->sbuf_free[free_idx] = NULL;
1032 	if (sbq_desc != NULL) {
1033 		free_idx++;
1034 		if (free_idx >= rx_ring->sbq_len)
1035 			free_idx = 0;
1036 		rx_ring->sbq_free_head = free_idx;
1037 		atomic_dec_32(&rx_ring->sbuf_free_count);
1038 	}
1039 	return (sbq_desc);
1040 }
1041 
1042 /*
1043  * Add a large buffer descriptor to its in use list
1044  */
1045 static void
ql_add_lbuf_to_in_use_list(struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1046 ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
1047     struct bq_desc *lbq_desc)
1048 {
1049 	uint32_t inuse_idx;
1050 
1051 	inuse_idx = rx_ring->lbq_use_tail;
1052 
1053 	rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
1054 	inuse_idx++;
1055 	if (inuse_idx >= rx_ring->lbq_len)
1056 		inuse_idx = 0;
1057 	rx_ring->lbq_use_tail = inuse_idx;
1058 	atomic_inc_32(&rx_ring->lbuf_in_use_count);
1059 }
1060 
1061 /*
1062  * Get a large buffer descriptor from in use list
1063  */
1064 static struct bq_desc *
ql_get_lbuf_from_in_use_list(struct rx_ring * rx_ring)1065 ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
1066 {
1067 	struct bq_desc *lbq_desc;
1068 	uint32_t inuse_idx;
1069 
1070 	/* Pick from head of in use list */
1071 	inuse_idx = rx_ring->lbq_use_head;
1072 	lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
1073 	rx_ring->lbuf_in_use[inuse_idx] = NULL;
1074 
1075 	if (lbq_desc != NULL) {
1076 		inuse_idx++;
1077 		if (inuse_idx >= rx_ring->lbq_len)
1078 			inuse_idx = 0;
1079 		rx_ring->lbq_use_head = inuse_idx;
1080 		atomic_dec_32(&rx_ring->lbuf_in_use_count);
1081 		atomic_inc_32(&rx_ring->rx_indicate);
1082 		lbq_desc->upl_inuse = 1;
1083 
1084 		/* if mp is NULL */
1085 		if (lbq_desc->mp == NULL) {
1086 			/* try to remap mp again */
1087 			lbq_desc->mp =
1088 			    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1089 			    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1090 		}
1091 	}
1092 	return (lbq_desc);
1093 }
1094 
1095 /*
1096  * Add a large buffer descriptor to free list
1097  */
1098 static void
ql_add_lbuf_to_free_list(struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1099 ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
1100     struct bq_desc *lbq_desc)
1101 {
1102 	uint32_t free_idx;
1103 
1104 	/* Add to the end of free list */
1105 	free_idx = rx_ring->lbq_free_tail;
1106 	rx_ring->lbuf_free[free_idx] = lbq_desc;
1107 	free_idx++;
1108 	if (free_idx >= rx_ring->lbq_len)
1109 		free_idx = 0;
1110 	rx_ring->lbq_free_tail = free_idx;
1111 	atomic_inc_32(&rx_ring->lbuf_free_count);
1112 	ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
1113 }
1114 
1115 /*
1116  * Get a large buffer descriptor from its free list
1117  */
1118 static struct bq_desc *
ql_get_lbuf_from_free_list(struct rx_ring * rx_ring)1119 ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
1120 {
1121 	struct bq_desc *lbq_desc;
1122 	uint32_t free_idx;
1123 
1124 	free_idx = rx_ring->lbq_free_head;
1125 	/* Pick from head of free list */
1126 	lbq_desc = rx_ring->lbuf_free[free_idx];
1127 	rx_ring->lbuf_free[free_idx] = NULL;
1128 
1129 	if (lbq_desc != NULL) {
1130 		free_idx++;
1131 		if (free_idx >= rx_ring->lbq_len)
1132 			free_idx = 0;
1133 		rx_ring->lbq_free_head = free_idx;
1134 		atomic_dec_32(&rx_ring->lbuf_free_count);
1135 	}
1136 	return (lbq_desc);
1137 }
1138 
1139 /*
1140  * Add a small buffer descriptor to free list
1141  */
1142 static void
ql_refill_sbuf_free_list(struct bq_desc * sbq_desc,boolean_t alloc_memory)1143 ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
1144 {
1145 	struct rx_ring *rx_ring = sbq_desc->rx_ring;
1146 	uint64_t *sbq_entry;
1147 	qlge_t *qlge = (qlge_t *)rx_ring->qlge;
1148 	/*
1149 	 * Sync access
1150 	 */
1151 	mutex_enter(&rx_ring->sbq_lock);
1152 
1153 	sbq_desc->upl_inuse = 0;
1154 
1155 	/*
1156 	 * If we are freeing the buffers as a result of adapter unload, get out
1157 	 */
1158 	if ((sbq_desc->free_buf != NULL) ||
1159 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1160 		if (sbq_desc->free_buf == NULL)
1161 			atomic_dec_32(&rx_ring->rx_indicate);
1162 		mutex_exit(&rx_ring->sbq_lock);
1163 		return;
1164 	}
1165 #ifdef QLGE_LOAD_UNLOAD
1166 	if (rx_ring->rx_indicate == 0)
1167 		cmn_err(CE_WARN, "sbq: indicate wrong");
1168 #endif
1169 #ifdef QLGE_TRACK_BUFFER_USAGE
1170 	uint32_t sb_consumer_idx;
1171 	uint32_t sb_producer_idx;
1172 	uint32_t num_free_buffers;
1173 	uint32_t temp;
1174 
1175 	temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
1176 	sb_producer_idx = temp & 0x0000ffff;
1177 	sb_consumer_idx = (temp >> 16);
1178 
1179 	if (sb_consumer_idx > sb_producer_idx)
1180 		num_free_buffers = NUM_SMALL_BUFFERS -
1181 		    (sb_consumer_idx - sb_producer_idx);
1182 	else
1183 		num_free_buffers = sb_producer_idx - sb_consumer_idx;
1184 
1185 	if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
1186 		qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
1187 
1188 #endif
1189 
1190 #ifdef QLGE_LOAD_UNLOAD
1191 	if (rx_ring->rx_indicate > 0xFF000000)
1192 		cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
1193 		    " sbq_desc index %d.",
1194 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1195 		    sbq_desc->index);
1196 #endif
1197 	if (alloc_memory) {
1198 		sbq_desc->mp =
1199 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1200 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1201 		if (sbq_desc->mp == NULL) {
1202 			rx_ring->rx_failed_sbq_allocs++;
1203 		}
1204 	}
1205 
1206 	/* Got the packet from the stack decrement rx_indicate count */
1207 	atomic_dec_32(&rx_ring->rx_indicate);
1208 
1209 	ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1210 
1211 	/* Rearm if possible */
1212 	if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1213 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1214 		sbq_entry = rx_ring->sbq_dma.vaddr;
1215 		sbq_entry += rx_ring->sbq_prod_idx;
1216 
1217 		while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1218 			/* Get first one from free list */
1219 			sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
1220 
1221 			*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
1222 			sbq_entry++;
1223 			rx_ring->sbq_prod_idx++;
1224 			if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
1225 				rx_ring->sbq_prod_idx = 0;
1226 				sbq_entry = rx_ring->sbq_dma.vaddr;
1227 			}
1228 			/* Add to end of in use list */
1229 			ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
1230 		}
1231 
1232 		/* Update small buffer queue producer index */
1233 		ql_update_sbq_prod_idx(qlge, rx_ring);
1234 	}
1235 
1236 	mutex_exit(&rx_ring->sbq_lock);
1237 	QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
1238 	    __func__, qlge->instance, rx_ring->sbuf_free_count));
1239 }
1240 
1241 /*
1242  * rx recycle call back function
1243  */
1244 static void
ql_release_to_sbuf_free_list(caddr_t p)1245 ql_release_to_sbuf_free_list(caddr_t p)
1246 {
1247 	struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
1248 
1249 	if (sbq_desc == NULL)
1250 		return;
1251 	ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
1252 }
1253 
1254 /*
1255  * Add a large buffer descriptor to free list
1256  */
1257 static void
ql_refill_lbuf_free_list(struct bq_desc * lbq_desc,boolean_t alloc_memory)1258 ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
1259 {
1260 	struct rx_ring *rx_ring = lbq_desc->rx_ring;
1261 	uint64_t *lbq_entry;
1262 	qlge_t *qlge = rx_ring->qlge;
1263 
1264 	/* Sync access */
1265 	mutex_enter(&rx_ring->lbq_lock);
1266 
1267 	lbq_desc->upl_inuse = 0;
1268 	/*
1269 	 * If we are freeing the buffers as a result of adapter unload, get out
1270 	 */
1271 	if ((lbq_desc->free_buf != NULL) ||
1272 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1273 		if (lbq_desc->free_buf == NULL)
1274 			atomic_dec_32(&rx_ring->rx_indicate);
1275 		mutex_exit(&rx_ring->lbq_lock);
1276 		return;
1277 	}
1278 #ifdef QLGE_LOAD_UNLOAD
1279 	if (rx_ring->rx_indicate == 0)
1280 		cmn_err(CE_WARN, "lbq: indicate wrong");
1281 #endif
1282 #ifdef QLGE_TRACK_BUFFER_USAGE
1283 	uint32_t lb_consumer_idx;
1284 	uint32_t lb_producer_idx;
1285 	uint32_t num_free_buffers;
1286 	uint32_t temp;
1287 
1288 	temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
1289 
1290 	lb_producer_idx = temp & 0x0000ffff;
1291 	lb_consumer_idx = (temp >> 16);
1292 
1293 	if (lb_consumer_idx > lb_producer_idx)
1294 		num_free_buffers = NUM_LARGE_BUFFERS -
1295 		    (lb_consumer_idx - lb_producer_idx);
1296 	else
1297 		num_free_buffers = lb_producer_idx - lb_consumer_idx;
1298 
1299 	if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
1300 		qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
1301 	}
1302 #endif
1303 
1304 #ifdef QLGE_LOAD_UNLOAD
1305 	if (rx_ring->rx_indicate > 0xFF000000)
1306 		cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
1307 		    "lbq_desc index %d",
1308 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1309 		    lbq_desc->index);
1310 #endif
1311 	if (alloc_memory) {
1312 		lbq_desc->mp =
1313 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1314 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1315 		if (lbq_desc->mp == NULL) {
1316 			rx_ring->rx_failed_lbq_allocs++;
1317 		}
1318 	}
1319 
1320 	/* Got the packet from the stack decrement rx_indicate count */
1321 	atomic_dec_32(&rx_ring->rx_indicate);
1322 
1323 	ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1324 
1325 	/* Rearm if possible */
1326 	if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1327 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1328 		lbq_entry = rx_ring->lbq_dma.vaddr;
1329 		lbq_entry += rx_ring->lbq_prod_idx;
1330 		while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1331 			/* Get first one from free list */
1332 			lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
1333 
1334 			*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
1335 			lbq_entry++;
1336 			rx_ring->lbq_prod_idx++;
1337 			if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
1338 				rx_ring->lbq_prod_idx = 0;
1339 				lbq_entry = rx_ring->lbq_dma.vaddr;
1340 			}
1341 
1342 			/* Add to end of in use list */
1343 			ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
1344 		}
1345 
1346 		/* Update large buffer queue producer index */
1347 		ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
1348 	}
1349 
1350 	mutex_exit(&rx_ring->lbq_lock);
1351 	QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
1352 	    __func__, rx_ring->lbuf_free_count));
1353 }
1354 /*
1355  * rx recycle call back function
1356  */
1357 static void
ql_release_to_lbuf_free_list(caddr_t p)1358 ql_release_to_lbuf_free_list(caddr_t p)
1359 {
1360 	struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
1361 
1362 	if (lbq_desc == NULL)
1363 		return;
1364 	ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
1365 }
1366 
1367 /*
1368  * free small buffer queue buffers
1369  */
1370 static void
ql_free_sbq_buffers(struct rx_ring * rx_ring)1371 ql_free_sbq_buffers(struct rx_ring *rx_ring)
1372 {
1373 	struct bq_desc *sbq_desc;
1374 	uint32_t i;
1375 	uint32_t j = rx_ring->sbq_free_head;
1376 	int  force_cnt = 0;
1377 
1378 	for (i = 0; i < rx_ring->sbuf_free_count; i++) {
1379 		sbq_desc = rx_ring->sbuf_free[j];
1380 		sbq_desc->free_buf = 1;
1381 		j++;
1382 		if (j >= rx_ring->sbq_len) {
1383 			j = 0;
1384 		}
1385 		if (sbq_desc->mp != NULL) {
1386 			freemsg(sbq_desc->mp);
1387 			sbq_desc->mp = NULL;
1388 		}
1389 	}
1390 	rx_ring->sbuf_free_count = 0;
1391 
1392 	j = rx_ring->sbq_use_head;
1393 	for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
1394 		sbq_desc = rx_ring->sbuf_in_use[j];
1395 		sbq_desc->free_buf = 1;
1396 		j++;
1397 		if (j >= rx_ring->sbq_len) {
1398 			j = 0;
1399 		}
1400 		if (sbq_desc->mp != NULL) {
1401 			freemsg(sbq_desc->mp);
1402 			sbq_desc->mp = NULL;
1403 		}
1404 	}
1405 	rx_ring->sbuf_in_use_count = 0;
1406 
1407 	sbq_desc = &rx_ring->sbq_desc[0];
1408 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1409 		/*
1410 		 * Set flag so that the callback does not allocate a new buffer
1411 		 */
1412 		sbq_desc->free_buf = 1;
1413 		if (sbq_desc->upl_inuse != 0) {
1414 			force_cnt++;
1415 		}
1416 		if (sbq_desc->bd_dma.dma_handle != NULL) {
1417 			ql_free_phys(&sbq_desc->bd_dma.dma_handle,
1418 			    &sbq_desc->bd_dma.acc_handle);
1419 			sbq_desc->bd_dma.dma_handle = NULL;
1420 			sbq_desc->bd_dma.acc_handle = NULL;
1421 		}
1422 	}
1423 #ifdef QLGE_LOAD_UNLOAD
1424 	cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
1425 	    rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
1426 #endif
1427 	if (rx_ring->sbuf_in_use != NULL) {
1428 		kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
1429 		    sizeof (struct bq_desc *)));
1430 		rx_ring->sbuf_in_use = NULL;
1431 	}
1432 
1433 	if (rx_ring->sbuf_free != NULL) {
1434 		kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
1435 		    sizeof (struct bq_desc *)));
1436 		rx_ring->sbuf_free = NULL;
1437 	}
1438 }
1439 
1440 /* Allocate small buffers */
1441 static int
ql_alloc_sbufs(qlge_t * qlge,struct rx_ring * rx_ring)1442 ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1443 {
1444 	struct bq_desc *sbq_desc;
1445 	int i;
1446 	ddi_dma_cookie_t dma_cookie;
1447 
1448 	rx_ring->sbq_use_head = 0;
1449 	rx_ring->sbq_use_tail = 0;
1450 	rx_ring->sbuf_in_use_count = 0;
1451 	rx_ring->sbq_free_head = 0;
1452 	rx_ring->sbq_free_tail = 0;
1453 	rx_ring->sbuf_free_count = 0;
1454 	rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
1455 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1456 	if (rx_ring->sbuf_free == NULL) {
1457 		cmn_err(CE_WARN,
1458 		    "!%s: sbuf_free_list alloc: failed",
1459 		    __func__);
1460 		goto alloc_sbuf_err;
1461 	}
1462 
1463 	rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
1464 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1465 	if (rx_ring->sbuf_in_use == NULL) {
1466 		cmn_err(CE_WARN,
1467 		    "!%s: sbuf_inuse_list alloc: failed",
1468 		    __func__);
1469 		goto alloc_sbuf_err;
1470 	}
1471 
1472 	sbq_desc = &rx_ring->sbq_desc[0];
1473 
1474 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1475 		/* Allocate buffer */
1476 		if (ql_alloc_phys_rbuf(qlge->dip, &sbq_desc->bd_dma.dma_handle,
1477 		    &ql_buf_acc_attr,
1478 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1479 		    &sbq_desc->bd_dma.acc_handle,
1480 		    (size_t)rx_ring->sbq_buf_size,	/* mem size */
1481 		    (size_t)0,				/* default alignment */
1482 		    (caddr_t *)&sbq_desc->bd_dma.vaddr,
1483 		    &dma_cookie) != 0) {
1484 			cmn_err(CE_WARN,
1485 			    "!%s: ddi_dma_alloc_handle: failed",
1486 			    __func__);
1487 			goto alloc_sbuf_err;
1488 		}
1489 
1490 		/* Set context for Return buffer callback */
1491 		sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1492 		sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
1493 		sbq_desc->rx_recycle.free_arg  = (caddr_t)sbq_desc;
1494 		sbq_desc->rx_ring = rx_ring;
1495 		sbq_desc->upl_inuse = 0;
1496 		sbq_desc->free_buf = 0;
1497 
1498 		sbq_desc->mp =
1499 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1500 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1501 		if (sbq_desc->mp == NULL) {
1502 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1503 			goto alloc_sbuf_err;
1504 		}
1505 		ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1506 	}
1507 
1508 	return (DDI_SUCCESS);
1509 
1510 alloc_sbuf_err:
1511 	ql_free_sbq_buffers(rx_ring);
1512 	return (DDI_FAILURE);
1513 }
1514 
1515 static void
ql_free_lbq_buffers(struct rx_ring * rx_ring)1516 ql_free_lbq_buffers(struct rx_ring *rx_ring)
1517 {
1518 	struct bq_desc *lbq_desc;
1519 	uint32_t i, j;
1520 	int force_cnt = 0;
1521 
1522 	j = rx_ring->lbq_free_head;
1523 	for (i = 0; i < rx_ring->lbuf_free_count; i++) {
1524 		lbq_desc = rx_ring->lbuf_free[j];
1525 		lbq_desc->free_buf = 1;
1526 		j++;
1527 		if (j >= rx_ring->lbq_len)
1528 			j = 0;
1529 		if (lbq_desc->mp != NULL) {
1530 			freemsg(lbq_desc->mp);
1531 			lbq_desc->mp = NULL;
1532 		}
1533 	}
1534 	rx_ring->lbuf_free_count = 0;
1535 
1536 	j = rx_ring->lbq_use_head;
1537 	for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
1538 		lbq_desc = rx_ring->lbuf_in_use[j];
1539 		lbq_desc->free_buf = 1;
1540 		j++;
1541 		if (j >= rx_ring->lbq_len) {
1542 			j = 0;
1543 		}
1544 		if (lbq_desc->mp != NULL) {
1545 			freemsg(lbq_desc->mp);
1546 			lbq_desc->mp = NULL;
1547 		}
1548 	}
1549 	rx_ring->lbuf_in_use_count = 0;
1550 
1551 	lbq_desc = &rx_ring->lbq_desc[0];
1552 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1553 		/* Set flag so that callback will not allocate a new buffer */
1554 		lbq_desc->free_buf = 1;
1555 		if (lbq_desc->upl_inuse != 0) {
1556 			force_cnt++;
1557 		}
1558 		if (lbq_desc->bd_dma.dma_handle != NULL) {
1559 			ql_free_phys(&lbq_desc->bd_dma.dma_handle,
1560 			    &lbq_desc->bd_dma.acc_handle);
1561 			lbq_desc->bd_dma.dma_handle = NULL;
1562 			lbq_desc->bd_dma.acc_handle = NULL;
1563 		}
1564 	}
1565 #ifdef QLGE_LOAD_UNLOAD
1566 	if (force_cnt) {
1567 		cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
1568 		    rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
1569 		    force_cnt);
1570 	}
1571 #endif
1572 	if (rx_ring->lbuf_in_use != NULL) {
1573 		kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
1574 		    sizeof (struct bq_desc *)));
1575 		rx_ring->lbuf_in_use = NULL;
1576 	}
1577 
1578 	if (rx_ring->lbuf_free != NULL) {
1579 		kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
1580 		    sizeof (struct bq_desc *)));
1581 		rx_ring->lbuf_free = NULL;
1582 	}
1583 }
1584 
1585 /* Allocate large buffers */
1586 static int
ql_alloc_lbufs(qlge_t * qlge,struct rx_ring * rx_ring)1587 ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1588 {
1589 	struct bq_desc *lbq_desc;
1590 	ddi_dma_cookie_t dma_cookie;
1591 	int i;
1592 	uint32_t lbq_buf_size;
1593 
1594 	rx_ring->lbq_use_head = 0;
1595 	rx_ring->lbq_use_tail = 0;
1596 	rx_ring->lbuf_in_use_count = 0;
1597 	rx_ring->lbq_free_head = 0;
1598 	rx_ring->lbq_free_tail = 0;
1599 	rx_ring->lbuf_free_count = 0;
1600 	rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
1601 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1602 	if (rx_ring->lbuf_free == NULL) {
1603 		cmn_err(CE_WARN,
1604 		    "!%s: lbuf_free_list alloc: failed",
1605 		    __func__);
1606 		goto alloc_lbuf_err;
1607 	}
1608 
1609 	rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
1610 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1611 
1612 	if (rx_ring->lbuf_in_use == NULL) {
1613 		cmn_err(CE_WARN,
1614 		    "!%s: lbuf_inuse_list alloc: failed",
1615 		    __func__);
1616 		goto alloc_lbuf_err;
1617 	}
1618 
1619 	lbq_buf_size = (qlge->mtu == ETHERMTU) ?
1620 	    LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE;
1621 
1622 	lbq_desc = &rx_ring->lbq_desc[0];
1623 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1624 		rx_ring->lbq_buf_size = lbq_buf_size;
1625 		/* Allocate buffer */
1626 		if (ql_alloc_phys_rbuf(qlge->dip, &lbq_desc->bd_dma.dma_handle,
1627 		    &ql_buf_acc_attr,
1628 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1629 		    &lbq_desc->bd_dma.acc_handle,
1630 		    (size_t)rx_ring->lbq_buf_size,  /* mem size */
1631 		    (size_t)0, /* default alignment */
1632 		    (caddr_t *)&lbq_desc->bd_dma.vaddr,
1633 		    &dma_cookie) != 0) {
1634 			cmn_err(CE_WARN,
1635 			    "!%s: ddi_dma_alloc_handle: failed",
1636 			    __func__);
1637 			goto alloc_lbuf_err;
1638 		}
1639 
1640 		/* Set context for Return buffer callback */
1641 		lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1642 		lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
1643 		lbq_desc->rx_recycle.free_arg  = (caddr_t)lbq_desc;
1644 		lbq_desc->rx_ring = rx_ring;
1645 		lbq_desc->upl_inuse = 0;
1646 		lbq_desc->free_buf = 0;
1647 
1648 		lbq_desc->mp =
1649 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1650 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1651 		if (lbq_desc->mp == NULL) {
1652 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1653 			goto alloc_lbuf_err;
1654 		}
1655 		ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1656 	} /* For all large buffers */
1657 
1658 	return (DDI_SUCCESS);
1659 
1660 alloc_lbuf_err:
1661 	ql_free_lbq_buffers(rx_ring);
1662 	return (DDI_FAILURE);
1663 }
1664 
1665 /*
1666  * Free rx buffers
1667  */
1668 static void
ql_free_rx_buffers(qlge_t * qlge)1669 ql_free_rx_buffers(qlge_t *qlge)
1670 {
1671 	int i;
1672 	struct rx_ring *rx_ring;
1673 
1674 	for (i = 0; i < qlge->rx_ring_count; i++) {
1675 		rx_ring = &qlge->rx_ring[i];
1676 		if (rx_ring->type != TX_Q) {
1677 			ql_free_lbq_buffers(rx_ring);
1678 			ql_free_sbq_buffers(rx_ring);
1679 		}
1680 	}
1681 }
1682 
1683 /*
1684  * Allocate rx buffers
1685  */
1686 static int
ql_alloc_rx_buffers(qlge_t * qlge)1687 ql_alloc_rx_buffers(qlge_t *qlge)
1688 {
1689 	struct rx_ring *rx_ring;
1690 	int i;
1691 
1692 	for (i = 0; i < qlge->rx_ring_count; i++) {
1693 		rx_ring = &qlge->rx_ring[i];
1694 		if (rx_ring->type != TX_Q) {
1695 			if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
1696 				goto alloc_err;
1697 			if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
1698 				goto alloc_err;
1699 		}
1700 	}
1701 #ifdef QLGE_TRACK_BUFFER_USAGE
1702 	for (i = 0; i < qlge->rx_ring_count; i++) {
1703 		if (qlge->rx_ring[i].type == RX_Q) {
1704 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
1705 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
1706 		}
1707 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
1708 	}
1709 #endif
1710 	return (DDI_SUCCESS);
1711 
1712 alloc_err:
1713 	ql_free_rx_buffers(qlge);
1714 	return (DDI_FAILURE);
1715 }
1716 
1717 /*
1718  * Initialize large buffer queue ring
1719  */
1720 static void
ql_init_lbq_ring(struct rx_ring * rx_ring)1721 ql_init_lbq_ring(struct rx_ring *rx_ring)
1722 {
1723 	uint16_t i;
1724 	struct bq_desc *lbq_desc;
1725 
1726 	bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
1727 	for (i = 0; i < rx_ring->lbq_len; i++) {
1728 		lbq_desc = &rx_ring->lbq_desc[i];
1729 		lbq_desc->index = i;
1730 	}
1731 }
1732 
1733 /*
1734  * Initialize small buffer queue ring
1735  */
1736 static void
ql_init_sbq_ring(struct rx_ring * rx_ring)1737 ql_init_sbq_ring(struct rx_ring *rx_ring)
1738 {
1739 	uint16_t i;
1740 	struct bq_desc *sbq_desc;
1741 
1742 	bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
1743 	for (i = 0; i < rx_ring->sbq_len; i++) {
1744 		sbq_desc = &rx_ring->sbq_desc[i];
1745 		sbq_desc->index = i;
1746 	}
1747 }
1748 
1749 /*
1750  * Calculate the pseudo-header checksum if hardware can not do
1751  */
1752 static void
ql_pseudo_cksum(uint8_t * buf)1753 ql_pseudo_cksum(uint8_t *buf)
1754 {
1755 	uint32_t cksum;
1756 	uint16_t iphl;
1757 	uint16_t proto;
1758 
1759 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
1760 	cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
1761 	cksum += proto = buf[9];
1762 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
1763 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
1764 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
1765 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
1766 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1767 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1768 
1769 	/*
1770 	 * Point it to the TCP/UDP header, and
1771 	 * update the checksum field.
1772 	 */
1773 	buf += iphl + ((proto == IPPROTO_TCP) ?
1774 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
1775 
1776 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
1777 
1778 }
1779 
1780 /*
1781  * Transmit an incoming packet.
1782  */
1783 mblk_t *
ql_ring_tx(void * arg,mblk_t * mp)1784 ql_ring_tx(void *arg, mblk_t *mp)
1785 {
1786 	struct tx_ring *tx_ring = (struct tx_ring *)arg;
1787 	qlge_t *qlge = tx_ring->qlge;
1788 	mblk_t *next;
1789 	int rval;
1790 	uint32_t tx_count = 0;
1791 
1792 	if (qlge->port_link_state == LS_DOWN) {
1793 		/* can not send message while link is down */
1794 		mblk_t *tp;
1795 
1796 		while (mp != NULL) {
1797 			tp = mp->b_next;
1798 			mp->b_next = NULL;
1799 			freemsg(mp);
1800 			mp = tp;
1801 		}
1802 		goto exit;
1803 	}
1804 
1805 	mutex_enter(&tx_ring->tx_lock);
1806 	/* if mac is not started, driver is not ready, can not send */
1807 	if (tx_ring->mac_flags != QL_MAC_STARTED) {
1808 		cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
1809 		    " return packets",
1810 		    __func__, qlge->instance, tx_ring->mac_flags);
1811 		mutex_exit(&tx_ring->tx_lock);
1812 		goto exit;
1813 	}
1814 
1815 	/* we must try to send all */
1816 	while (mp != NULL) {
1817 		/*
1818 		 * if number of available slots is less than a threshold,
1819 		 * then quit
1820 		 */
1821 		if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
1822 			tx_ring->queue_stopped = 1;
1823 			rval = DDI_FAILURE;
1824 #ifdef QLGE_LOAD_UNLOAD
1825 			cmn_err(CE_WARN, "%s(%d) no resources",
1826 			    __func__, qlge->instance);
1827 #endif
1828 			tx_ring->defer++;
1829 			/*
1830 			 * If we return the buffer back we are expected to call
1831 			 * mac_tx_ring_update() when resources are available
1832 			 */
1833 			break;
1834 		}
1835 
1836 		next = mp->b_next;
1837 		mp->b_next = NULL;
1838 
1839 		rval = ql_send_common(tx_ring, mp);
1840 
1841 		if (rval != DDI_SUCCESS) {
1842 			mp->b_next = next;
1843 			break;
1844 		}
1845 		tx_count++;
1846 		mp = next;
1847 	}
1848 
1849 	/*
1850 	 * After all msg blocks are mapped or copied to tx buffer,
1851 	 * trigger the hardware to send!
1852 	 */
1853 	if (tx_count > 0) {
1854 		ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
1855 		    tx_ring->prod_idx);
1856 	}
1857 
1858 	mutex_exit(&tx_ring->tx_lock);
1859 exit:
1860 	return (mp);
1861 }
1862 
1863 
1864 /*
1865  * This function builds an mblk list for the given inbound
1866  * completion.
1867  */
1868 
1869 static mblk_t *
ql_build_rx_mp(qlge_t * qlge,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1870 ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
1871     struct ib_mac_iocb_rsp *ib_mac_rsp)
1872 {
1873 	mblk_t *mp = NULL;
1874 	mblk_t *mp1 = NULL;	/* packet header */
1875 	mblk_t *mp2 = NULL;	/* packet content */
1876 	struct bq_desc *lbq_desc;
1877 	struct bq_desc *sbq_desc;
1878 	uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
1879 	uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
1880 	uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1881 	uint32_t pkt_len = payload_len + header_len;
1882 	uint32_t done;
1883 	uint64_t *curr_ial_ptr;
1884 	uint32_t ial_data_addr_low;
1885 	uint32_t actual_data_addr_low;
1886 	mblk_t *mp_ial = NULL;	/* ial chained packets */
1887 	uint32_t size;
1888 	uint32_t cp_offset;
1889 	boolean_t rx_copy = B_FALSE;
1890 	mblk_t *tp = NULL;
1891 
1892 	/*
1893 	 * Check if error flags are set
1894 	 */
1895 	if (err_flag != 0) {
1896 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
1897 			rx_ring->frame_too_long++;
1898 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
1899 			rx_ring->frame_too_short++;
1900 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
1901 			rx_ring->fcs_err++;
1902 #ifdef QLGE_LOAD_UNLOAD
1903 		cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
1904 #endif
1905 		QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
1906 		    (uint8_t *)ib_mac_rsp, 8,
1907 		    (size_t)sizeof (struct ib_mac_iocb_rsp));
1908 	}
1909 
1910 	/* header should not be in large buffer */
1911 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
1912 		cmn_err(CE_WARN, "header in large buffer or invalid!");
1913 		err_flag |= 1;
1914 	}
1915 	/* if whole packet is too big than rx buffer size */
1916 	if (pkt_len > qlge->max_frame_size) {
1917 		cmn_err(CE_WARN, "ql_build_rx_mpframe too long(%d)!", pkt_len);
1918 		err_flag |= 1;
1919 	}
1920 	if (qlge->rx_copy ||
1921 	    (rx_ring->sbuf_in_use_count <= qlge->rx_copy_threshold) ||
1922 	    (rx_ring->lbuf_in_use_count <= qlge->rx_copy_threshold)) {
1923 		rx_copy = B_TRUE;
1924 	}
1925 
1926 	/* if using rx copy mode, we need to allocate a big enough buffer */
1927 	if (rx_copy) {
1928 		qlge->stats.norcvbuf++;
1929 		tp = allocb(payload_len + header_len + qlge->ip_hdr_offset,
1930 		    BPRI_MED);
1931 		if (tp == NULL) {
1932 			cmn_err(CE_WARN, "rx copy failed to allocate memory");
1933 		} else {
1934 			tp->b_rptr += qlge->ip_hdr_offset;
1935 		}
1936 	}
1937 	/*
1938 	 * Handle the header buffer if present.
1939 	 * packet header must be valid and saved in one small buffer
1940 	 * broadcast/multicast packets' headers not splitted
1941 	 */
1942 	if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
1943 	    (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1944 		QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
1945 		    header_len));
1946 		/* Sync access */
1947 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1948 
1949 		ASSERT(sbq_desc != NULL);
1950 
1951 		/*
1952 		 * Validate addresses from the ASIC with the
1953 		 * expected sbuf address
1954 		 */
1955 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1956 		    != ib_mac_rsp->hdr_addr) {
1957 			/* Small buffer address mismatch */
1958 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1959 			    " in wrong small buffer",
1960 			    __func__, qlge->instance, rx_ring->cq_id);
1961 			goto fatal_error;
1962 		}
1963 		/* get this packet */
1964 		mp1 = sbq_desc->mp;
1965 		/* Flush DMA'd data */
1966 		(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1967 		    0, header_len, DDI_DMA_SYNC_FORKERNEL);
1968 
1969 		if ((err_flag != 0)|| (mp1 == NULL)) {
1970 			/* failed on this packet, put it back for re-arming */
1971 #ifdef QLGE_LOAD_UNLOAD
1972 			cmn_err(CE_WARN, "get header from small buffer fail");
1973 #endif
1974 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1975 			mp1 = NULL;
1976 		} else if (rx_copy) {
1977 			if (tp != NULL) {
1978 				bcopy(sbq_desc->bd_dma.vaddr, tp->b_rptr,
1979 				    header_len);
1980 			}
1981 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1982 			mp1 = NULL;
1983 		} else {
1984 			if ((qlge->ip_hdr_offset != 0)&&
1985 			    (header_len < SMALL_BUFFER_SIZE)) {
1986 				/*
1987 				 * copy entire header to a 2 bytes boundary
1988 				 * address for 8100 adapters so that the IP
1989 				 * header can be on a 4 byte boundary address
1990 				 */
1991 				bcopy(mp1->b_rptr,
1992 				    (mp1->b_rptr + SMALL_BUFFER_SIZE +
1993 				    qlge->ip_hdr_offset),
1994 				    header_len);
1995 				mp1->b_rptr += SMALL_BUFFER_SIZE +
1996 				    qlge->ip_hdr_offset;
1997 			}
1998 
1999 			/*
2000 			 * Adjust the mp payload_len to match
2001 			 * the packet header payload_len
2002 			 */
2003 			mp1->b_wptr = mp1->b_rptr + header_len;
2004 			mp1->b_next = mp1->b_cont = NULL;
2005 			QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
2006 			    (uint8_t *)mp1->b_rptr, 8, header_len);
2007 		}
2008 	}
2009 
2010 	/*
2011 	 * packet data or whole packet can be in small or one or
2012 	 * several large buffer(s)
2013 	 */
2014 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2015 		/*
2016 		 * The data is in a single small buffer.
2017 		 */
2018 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2019 
2020 		ASSERT(sbq_desc != NULL);
2021 
2022 		QL_PRINT(DBG_RX,
2023 		    ("%d bytes in a single small buffer, sbq_desc = %p, "
2024 		    "sbq_desc->bd_dma.dma_addr = %x,"
2025 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
2026 		    payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
2027 		    ib_mac_rsp->data_addr, sbq_desc->mp));
2028 
2029 		/*
2030 		 * Validate  addresses from the ASIC with the
2031 		 * expected sbuf address
2032 		 */
2033 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
2034 		    != ib_mac_rsp->data_addr) {
2035 			/* Small buffer address mismatch */
2036 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2037 			    " in wrong small buffer",
2038 			    __func__, qlge->instance, rx_ring->cq_id);
2039 			goto fatal_error;
2040 		}
2041 		/* get this packet */
2042 		mp2 = sbq_desc->mp;
2043 		(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
2044 		    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2045 		if ((err_flag != 0) || (mp2 == NULL)) {
2046 #ifdef QLGE_LOAD_UNLOAD
2047 			/* failed on this packet, put it back for re-arming */
2048 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2049 #endif
2050 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2051 			mp2 = NULL;
2052 		} else if (rx_copy) {
2053 			if (tp != NULL) {
2054 				bcopy(sbq_desc->bd_dma.vaddr,
2055 				    tp->b_rptr + header_len, payload_len);
2056 				tp->b_wptr =
2057 				    tp->b_rptr + header_len + payload_len;
2058 			}
2059 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2060 			mp2 = NULL;
2061 		} else {
2062 			/* Adjust the buffer length to match the payload_len */
2063 			mp2->b_wptr = mp2->b_rptr + payload_len;
2064 			mp2->b_next = mp2->b_cont = NULL;
2065 			/* Flush DMA'd data */
2066 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2067 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2068 			/*
2069 			 * if payload is too small , copy to
2070 			 * the end of packet header
2071 			 */
2072 			if ((mp1 != NULL) &&
2073 			    (payload_len <= qlge->payload_copy_thresh) &&
2074 			    (pkt_len <
2075 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2076 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2077 				mp1->b_wptr += payload_len;
2078 				freemsg(mp2);
2079 				mp2 = NULL;
2080 			}
2081 		}
2082 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2083 		/*
2084 		 * The data is in a single large buffer.
2085 		 */
2086 		lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2087 
2088 		QL_PRINT(DBG_RX,
2089 		    ("%d bytes in a single large buffer, lbq_desc = %p, "
2090 		    "lbq_desc->bd_dma.dma_addr = %x,"
2091 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
2092 		    payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
2093 		    ib_mac_rsp->data_addr, lbq_desc->mp));
2094 
2095 		ASSERT(lbq_desc != NULL);
2096 
2097 		/*
2098 		 * Validate  addresses from the ASIC with
2099 		 * the expected lbuf address
2100 		 */
2101 		if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
2102 		    != ib_mac_rsp->data_addr) {
2103 			/* Large buffer address mismatch */
2104 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2105 			    " in wrong large buffer",
2106 			    __func__, qlge->instance, rx_ring->cq_id);
2107 			goto fatal_error;
2108 		}
2109 		mp2 = lbq_desc->mp;
2110 		/* Flush DMA'd data */
2111 		(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2112 		    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2113 		if ((err_flag != 0) || (mp2 == NULL)) {
2114 #ifdef QLGE_LOAD_UNLOAD
2115 			cmn_err(CE_WARN, "ignore bad data from large buffer");
2116 #endif
2117 			/* failed on this packet, put it back for re-arming */
2118 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2119 			mp2 = NULL;
2120 		} else if (rx_copy) {
2121 			if (tp != NULL) {
2122 				bcopy(lbq_desc->bd_dma.vaddr,
2123 				    tp->b_rptr + header_len, payload_len);
2124 				tp->b_wptr =
2125 				    tp->b_rptr + header_len + payload_len;
2126 			}
2127 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2128 			mp2 = NULL;
2129 		} else {
2130 			/*
2131 			 * Adjust the buffer length to match
2132 			 * the packet payload_len
2133 			 */
2134 			mp2->b_wptr = mp2->b_rptr + payload_len;
2135 			mp2->b_next = mp2->b_cont = NULL;
2136 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2137 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2138 			/*
2139 			 * if payload is too small , copy to
2140 			 * the end of packet header
2141 			 */
2142 			if ((mp1 != NULL) &&
2143 			    (payload_len <= qlge->payload_copy_thresh) &&
2144 			    (pkt_len<
2145 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2146 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2147 				mp1->b_wptr += payload_len;
2148 				freemsg(mp2);
2149 				mp2 = NULL;
2150 			}
2151 		}
2152 	} else if (payload_len) { /* ial case */
2153 		/*
2154 		 * payload available but not in sml nor lrg buffer,
2155 		 * so, it is saved in IAL
2156 		 */
2157 #ifdef QLGE_LOAD_UNLOAD
2158 		cmn_err(CE_NOTE, "packet chained in IAL \n");
2159 #endif
2160 		/* lrg buf addresses are saved in one small buffer */
2161 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2162 		curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
2163 		done = 0;
2164 		cp_offset = 0;
2165 
2166 		while (!done) {
2167 			ial_data_addr_low =
2168 			    (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
2169 			    0xFFFFFFFE);
2170 			/* check if this is the last packet fragment */
2171 			done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
2172 			curr_ial_ptr++;
2173 			/*
2174 			 * The data is in one or several large buffer(s).
2175 			 */
2176 			lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2177 			actual_data_addr_low =
2178 			    (uint32_t)(lbq_desc->bd_dma.dma_addr &
2179 			    0xFFFFFFFE);
2180 			if (ial_data_addr_low != actual_data_addr_low) {
2181 				cmn_err(CE_WARN,
2182 				    "packet saved in wrong ial lrg buffer"
2183 				    " expected %x, actual %lx",
2184 				    ial_data_addr_low,
2185 				    (uintptr_t)lbq_desc->bd_dma.dma_addr);
2186 				goto fatal_error;
2187 			}
2188 
2189 			size = (payload_len < rx_ring->lbq_buf_size)?
2190 			    payload_len : rx_ring->lbq_buf_size;
2191 			payload_len -= size;
2192 			mp2 = lbq_desc->mp;
2193 			if ((err_flag != 0) || (mp2 == NULL)) {
2194 #ifdef QLGE_LOAD_UNLOAD
2195 				cmn_err(CE_WARN,
2196 				    "ignore bad data from large buffer");
2197 #endif
2198 				ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2199 				mp2 = NULL;
2200 			} else if (rx_copy) {
2201 				if (tp != NULL) {
2202 					(void) ddi_dma_sync(
2203 					    lbq_desc->bd_dma.dma_handle,
2204 					    0, size, DDI_DMA_SYNC_FORKERNEL);
2205 					bcopy(lbq_desc->bd_dma.vaddr,
2206 					    tp->b_rptr + header_len + cp_offset,
2207 					    size);
2208 					tp->b_wptr =
2209 					    tp->b_rptr + size + cp_offset +
2210 					    header_len;
2211 					cp_offset += size;
2212 				}
2213 				ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2214 				mp2 = NULL;
2215 			} else {
2216 				if (mp_ial == NULL) {
2217 					mp_ial = mp2;
2218 				} else {
2219 					linkb(mp_ial, mp2);
2220 				}
2221 
2222 				mp2->b_next = NULL;
2223 				mp2->b_cont = NULL;
2224 				mp2->b_wptr = mp2->b_rptr + size;
2225 				/* Flush DMA'd data */
2226 				(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2227 				    0, size, DDI_DMA_SYNC_FORKERNEL);
2228 				QL_PRINT(DBG_RX, ("ial %d payload received \n",
2229 				    size));
2230 				QL_DUMP(DBG_RX, "\t Mac data dump:\n",
2231 				    (uint8_t *)mp2->b_rptr, 8, size);
2232 			}
2233 		}
2234 		if (err_flag != 0) {
2235 #ifdef QLGE_LOAD_UNLOAD
2236 			/* failed on this packet, put it back for re-arming */
2237 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2238 #endif
2239 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2240 		} else {
2241 			mp2 = mp_ial;
2242 			freemsg(sbq_desc->mp);
2243 		}
2244 	}
2245 	/*
2246 	 * some packets' hdr not split, then send mp2 upstream, otherwise,
2247 	 * concatenate message block mp2 to the tail of message header, mp1
2248 	 */
2249 	if (!err_flag) {
2250 		if (rx_copy) {
2251 			if (tp != NULL) {
2252 				tp->b_next = NULL;
2253 				tp->b_cont = NULL;
2254 				tp->b_wptr = tp->b_rptr +
2255 				    header_len + payload_len;
2256 			}
2257 			mp = tp;
2258 		} else {
2259 			if (mp1) {
2260 				if (mp2) {
2261 					QL_PRINT(DBG_RX,
2262 					    ("packet in mp1 and mp2\n"));
2263 					/* mp1->b_cont = mp2; */
2264 					linkb(mp1, mp2);
2265 					mp = mp1;
2266 				} else {
2267 					QL_PRINT(DBG_RX,
2268 					    ("packet in mp1 only\n"));
2269 					mp = mp1;
2270 				}
2271 			} else if (mp2) {
2272 				QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
2273 				mp = mp2;
2274 			}
2275 		}
2276 	}
2277 	return (mp);
2278 
2279 fatal_error:
2280 	/* fatal Error! */
2281 	if (qlge->fm_enable) {
2282 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2283 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2284 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2285 	}
2286 	if (tp) {
2287 		freemsg(tp);
2288 	}
2289 
2290 	/* *mp->b_wptr = 0; */
2291 	ql_wake_asic_reset_soft_intr(qlge);
2292 	return (NULL);
2293 
2294 }
2295 
2296 /*
2297  * Bump completion queue consumer index.
2298  */
2299 static void
ql_update_cq(struct rx_ring * rx_ring)2300 ql_update_cq(struct rx_ring *rx_ring)
2301 {
2302 	rx_ring->cnsmr_idx++;
2303 	rx_ring->curr_entry++;
2304 	if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
2305 		rx_ring->cnsmr_idx = 0;
2306 		rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
2307 	}
2308 }
2309 
2310 /*
2311  * Update completion queue consumer index.
2312  */
2313 static void
ql_write_cq_idx(struct rx_ring * rx_ring)2314 ql_write_cq_idx(struct rx_ring *rx_ring)
2315 {
2316 	qlge_t *qlge = rx_ring->qlge;
2317 
2318 	ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
2319 	    rx_ring->cnsmr_idx);
2320 }
2321 
2322 /*
2323  * Processes a SYS-Chip Event Notification Completion Event.
2324  * The incoming notification event that describes a link up/down
2325  * or some sorts of error happens.
2326  */
2327 static void
ql_process_chip_ae_intr(qlge_t * qlge,struct ib_sys_event_iocb_rsp * ib_sys_event_rsp_ptr)2328 ql_process_chip_ae_intr(qlge_t *qlge,
2329     struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
2330 {
2331 	uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
2332 	uint32_t soft_req = 0;
2333 
2334 	switch (eventType) {
2335 		case SYS_EVENT_PORT_LINK_UP:	/* 0x0h */
2336 			QL_PRINT(DBG_MBX, ("Port Link Up\n"));
2337 			break;
2338 
2339 		case SYS_EVENT_PORT_LINK_DOWN:	/* 0x1h */
2340 			QL_PRINT(DBG_MBX, ("Port Link Down\n"));
2341 			break;
2342 
2343 		case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
2344 			cmn_err(CE_WARN, "A multiple CAM hits look up error "
2345 			    "occurred");
2346 			soft_req |= NEED_HW_RESET;
2347 			break;
2348 
2349 		case SYS_EVENT_SOFT_ECC_ERR:	/* 0x7h */
2350 			cmn_err(CE_WARN, "Soft ECC error detected");
2351 			soft_req |= NEED_HW_RESET;
2352 			break;
2353 
2354 		case SYS_EVENT_MGMT_FATAL_ERR:	/* 0x8h */
2355 			cmn_err(CE_WARN, "Management (MPI) Processor fatal"
2356 			    " error occured");
2357 			soft_req |= NEED_MPI_RESET;
2358 			break;
2359 
2360 		case SYS_EVENT_MAC_INTERRUPT:	/* 0x9h */
2361 			QL_PRINT(DBG_MBX, ("MAC Interrupt"));
2362 			break;
2363 
2364 		case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF:	/* 0x40h */
2365 			cmn_err(CE_WARN, "PCI Error reading small/large "
2366 			    "buffers occured");
2367 			soft_req |= NEED_HW_RESET;
2368 			break;
2369 
2370 		default:
2371 			QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
2372 			    "type 0x%x occured",
2373 			    __func__, qlge->instance, eventType));
2374 			break;
2375 	}
2376 
2377 	if ((soft_req & NEED_MPI_RESET) != 0) {
2378 		ql_wake_mpi_reset_soft_intr(qlge);
2379 		if (qlge->fm_enable) {
2380 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2381 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2382 		}
2383 	} else if ((soft_req & NEED_HW_RESET) != 0) {
2384 		ql_wake_asic_reset_soft_intr(qlge);
2385 		if (qlge->fm_enable) {
2386 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2387 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2388 		}
2389 	}
2390 }
2391 
2392 /*
2393  * set received packet checksum flag
2394  */
2395 void
ql_set_rx_cksum(mblk_t * mp,struct ib_mac_iocb_rsp * net_rsp)2396 ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
2397 {
2398 	uint32_t flags;
2399 
2400 	/* Not TCP or UDP packet? nothing more to do */
2401 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
2402 	    ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
2403 	return;
2404 
2405 	/* No CKO support for IPv6 */
2406 	if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
2407 		return;
2408 
2409 	/*
2410 	 * If checksum error, don't set flags; stack will calculate
2411 	 * checksum, detect the error and update statistics
2412 	 */
2413 	if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
2414 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
2415 		return;
2416 
2417 	/* TCP or UDP packet and checksum valid */
2418 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
2419 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2420 		flags = HCK_FULLCKSUM_OK;
2421 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2422 	}
2423 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
2424 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2425 		flags = HCK_FULLCKSUM_OK;
2426 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2427 	}
2428 }
2429 
2430 /*
2431  * This function goes through h/w descriptor in one specified rx ring,
2432  * receives the data if the descriptor status shows the data is ready.
2433  * It returns a chain of mblks containing the received data, to be
2434  * passed up to mac_rx_ring().
2435  */
2436 mblk_t *
ql_ring_rx(struct rx_ring * rx_ring,int poll_bytes)2437 ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
2438 {
2439 	qlge_t *qlge = rx_ring->qlge;
2440 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2441 	struct ib_mac_iocb_rsp *net_rsp;
2442 	mblk_t *mp;
2443 	mblk_t *mblk_head;
2444 	mblk_t **mblk_tail;
2445 	uint32_t received_bytes = 0;
2446 	uint32_t length;
2447 #ifdef QLGE_PERFORMANCE
2448 	uint32_t pkt_ct = 0;
2449 #endif
2450 
2451 #ifdef QLGE_TRACK_BUFFER_USAGE
2452 	uint32_t consumer_idx;
2453 	uint32_t producer_idx;
2454 	uint32_t num_free_entries;
2455 	uint32_t temp;
2456 
2457 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2458 	consumer_idx = temp & 0x0000ffff;
2459 	producer_idx = (temp >> 16);
2460 
2461 	if (consumer_idx > producer_idx)
2462 		num_free_entries = (consumer_idx - producer_idx);
2463 	else
2464 		num_free_entries = NUM_RX_RING_ENTRIES - (
2465 		    producer_idx - consumer_idx);
2466 
2467 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2468 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2469 
2470 #endif
2471 	mblk_head = NULL;
2472 	mblk_tail = &mblk_head;
2473 
2474 	while ((prod != rx_ring->cnsmr_idx)) {
2475 		QL_PRINT(DBG_RX,
2476 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
2477 		    __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2478 
2479 		net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
2480 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2481 		    (off_t)((uintptr_t)net_rsp -
2482 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2483 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2484 		QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
2485 		    rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
2486 
2487 		switch (net_rsp->opcode) {
2488 
2489 		case OPCODE_IB_MAC_IOCB:
2490 			/* Adding length of pkt header and payload */
2491 			length = le32_to_cpu(net_rsp->data_len) +
2492 			    le32_to_cpu(net_rsp->hdr_len);
2493 			if ((poll_bytes != QLGE_POLL_ALL) &&
2494 			    ((received_bytes + length) > poll_bytes)) {
2495 				continue;
2496 			}
2497 			received_bytes += length;
2498 
2499 #ifdef QLGE_PERFORMANCE
2500 			pkt_ct++;
2501 #endif
2502 			mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
2503 			if (mp != NULL) {
2504 				if (rx_ring->mac_flags != QL_MAC_STARTED) {
2505 					/*
2506 					 * Increment number of packets we have
2507 					 * indicated to the stack, should be
2508 					 * decremented when we get it back
2509 					 * or when freemsg is called
2510 					 */
2511 					ASSERT(rx_ring->rx_indicate
2512 					    <= rx_ring->cq_len);
2513 #ifdef QLGE_LOAD_UNLOAD
2514 					cmn_err(CE_WARN, "%s do not send to OS,"
2515 					    " mac_flags %d, indicate %d",
2516 					    __func__, rx_ring->mac_flags,
2517 					    rx_ring->rx_indicate);
2518 #endif
2519 					QL_PRINT(DBG_RX,
2520 					    ("cq_id = %d, packet "
2521 					    "dropped, mac not "
2522 					    "enabled.\n",
2523 					    rx_ring->cq_id));
2524 					rx_ring->rx_pkt_dropped_mac_unenabled++;
2525 
2526 					/* rx_lock is expected to be held */
2527 					mutex_exit(&rx_ring->rx_lock);
2528 					freemsg(mp);
2529 					mutex_enter(&rx_ring->rx_lock);
2530 					mp = NULL;
2531 				}
2532 
2533 				if (mp != NULL) {
2534 					/*
2535 					 * IP full packet has been
2536 					 * successfully verified by
2537 					 * H/W and is correct
2538 					 */
2539 					ql_set_rx_cksum(mp, net_rsp);
2540 
2541 					rx_ring->rx_packets++;
2542 					rx_ring->rx_bytes += length;
2543 					*mblk_tail = mp;
2544 					mblk_tail = &mp->b_next;
2545 				}
2546 			} else {
2547 				QL_PRINT(DBG_RX,
2548 				    ("cq_id = %d, packet dropped\n",
2549 				    rx_ring->cq_id));
2550 				rx_ring->rx_packets_dropped_no_buffer++;
2551 			}
2552 			break;
2553 
2554 		case OPCODE_IB_SYS_EVENT_IOCB:
2555 			ql_process_chip_ae_intr(qlge,
2556 			    (struct ib_sys_event_iocb_rsp *)
2557 			    net_rsp);
2558 			break;
2559 
2560 		default:
2561 			cmn_err(CE_WARN,
2562 			    "%s Ring(%d)Hit default case, not handled!"
2563 			    " dropping the packet, "
2564 			    "opcode = %x.", __func__, rx_ring->cq_id,
2565 			    net_rsp->opcode);
2566 			break;
2567 		}
2568 		/* increment cnsmr_idx and curr_entry */
2569 		ql_update_cq(rx_ring);
2570 		prod = ql_read_sh_reg(qlge, rx_ring);
2571 
2572 	}
2573 
2574 #ifdef QLGE_PERFORMANCE
2575 	if (pkt_ct >= 7)
2576 		rx_ring->hist[7]++;
2577 	else if (pkt_ct == 6)
2578 		rx_ring->hist[6]++;
2579 	else if (pkt_ct == 5)
2580 		rx_ring->hist[5]++;
2581 	else if (pkt_ct == 4)
2582 		rx_ring->hist[4]++;
2583 	else if (pkt_ct == 3)
2584 		rx_ring->hist[3]++;
2585 	else if (pkt_ct == 2)
2586 		rx_ring->hist[2]++;
2587 	else if (pkt_ct == 1)
2588 		rx_ring->hist[1]++;
2589 	else if (pkt_ct == 0)
2590 		rx_ring->hist[0]++;
2591 #endif
2592 
2593 	/* update cnsmr_idx */
2594 	ql_write_cq_idx(rx_ring);
2595 	/* do not enable interrupt for polling mode */
2596 	if (poll_bytes == QLGE_POLL_ALL)
2597 		ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2598 	return (mblk_head);
2599 }
2600 
2601 /* Process an outbound completion from an rx ring. */
2602 static void
ql_process_mac_tx_intr(qlge_t * qlge,struct ob_mac_iocb_rsp * mac_rsp)2603 ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
2604 {
2605 	struct tx_ring *tx_ring;
2606 	struct tx_ring_desc *tx_ring_desc;
2607 	int j;
2608 
2609 	tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
2610 	tx_ring_desc = tx_ring->wq_desc;
2611 	tx_ring_desc += mac_rsp->tid;
2612 
2613 	if (tx_ring_desc->tx_type == USE_DMA) {
2614 		QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
2615 		    __func__, qlge->instance));
2616 
2617 		/*
2618 		 * Release the DMA resource that is used for
2619 		 * DMA binding.
2620 		 */
2621 		for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
2622 			(void) ddi_dma_unbind_handle(
2623 			    tx_ring_desc->tx_dma_handle[j]);
2624 		}
2625 
2626 		tx_ring_desc->tx_dma_handle_used = 0;
2627 		/*
2628 		 * Free the mblk after sending completed
2629 		 */
2630 		if (tx_ring_desc->mp != NULL) {
2631 			freemsg(tx_ring_desc->mp);
2632 			tx_ring_desc->mp = NULL;
2633 		}
2634 	}
2635 
2636 	tx_ring->obytes += tx_ring_desc->tx_bytes;
2637 	tx_ring->opackets++;
2638 
2639 	if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
2640 	    OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
2641 		tx_ring->errxmt++;
2642 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2643 			/* EMPTY */
2644 			QL_PRINT(DBG_TX,
2645 			    ("Total descriptor length did not match "
2646 			    "transfer length.\n"));
2647 		}
2648 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2649 			/* EMPTY */
2650 			QL_PRINT(DBG_TX,
2651 			    ("Frame too short to be legal, not sent.\n"));
2652 		}
2653 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2654 			/* EMPTY */
2655 			QL_PRINT(DBG_TX,
2656 			    ("Frame too long, but sent anyway.\n"));
2657 		}
2658 		if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
2659 			/* EMPTY */
2660 			QL_PRINT(DBG_TX,
2661 			    ("PCI backplane error. Frame not sent.\n"));
2662 		}
2663 	}
2664 	atomic_inc_32(&tx_ring->tx_free_count);
2665 }
2666 
2667 /*
2668  * clean up tx completion iocbs
2669  */
2670 int
ql_clean_outbound_rx_ring(struct rx_ring * rx_ring)2671 ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2672 {
2673 	qlge_t *qlge = rx_ring->qlge;
2674 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2675 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2676 	int count = 0;
2677 	struct tx_ring *tx_ring;
2678 	boolean_t resume_tx = B_FALSE;
2679 
2680 	mutex_enter(&rx_ring->rx_lock);
2681 #ifdef QLGE_TRACK_BUFFER_USAGE
2682 	{
2683 	uint32_t consumer_idx;
2684 	uint32_t producer_idx;
2685 	uint32_t num_free_entries;
2686 	uint32_t temp;
2687 
2688 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2689 	consumer_idx = temp & 0x0000ffff;
2690 	producer_idx = (temp >> 16);
2691 
2692 	if (consumer_idx > producer_idx)
2693 		num_free_entries = (consumer_idx - producer_idx);
2694 	else
2695 		num_free_entries = NUM_RX_RING_ENTRIES -
2696 		    (producer_idx - consumer_idx);
2697 
2698 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2699 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2700 
2701 	}
2702 #endif
2703 	/* While there are entries in the completion queue. */
2704 	while (prod != rx_ring->cnsmr_idx) {
2705 
2706 		QL_PRINT(DBG_RX,
2707 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
2708 		    rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2709 
2710 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2711 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2712 		    (off_t)((uintptr_t)net_rsp -
2713 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2714 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2715 
2716 		QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
2717 		    "response packet data\n",
2718 		    rx_ring->curr_entry, 8,
2719 		    (size_t)sizeof (*net_rsp));
2720 
2721 		switch (net_rsp->opcode) {
2722 
2723 		case OPCODE_OB_MAC_OFFLOAD_IOCB:
2724 		case OPCODE_OB_MAC_IOCB:
2725 			ql_process_mac_tx_intr(qlge, net_rsp);
2726 			break;
2727 
2728 		default:
2729 			cmn_err(CE_WARN,
2730 			    "%s Hit default case, not handled! "
2731 			    "dropping the packet,"
2732 			    " opcode = %x.",
2733 			    __func__, net_rsp->opcode);
2734 			break;
2735 		}
2736 		count++;
2737 		ql_update_cq(rx_ring);
2738 		prod = ql_read_sh_reg(qlge, rx_ring);
2739 	}
2740 	ql_write_cq_idx(rx_ring);
2741 
2742 	mutex_exit(&rx_ring->rx_lock);
2743 
2744 	net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2745 	tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
2746 
2747 	mutex_enter(&tx_ring->tx_lock);
2748 
2749 	if (tx_ring->queue_stopped &&
2750 	    (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
2751 		/*
2752 		 * The queue got stopped because the tx_ring was full.
2753 		 * Wake it up, because it's now at least 25% empty.
2754 		 */
2755 		tx_ring->queue_stopped = 0;
2756 		resume_tx = B_TRUE;
2757 	}
2758 
2759 	mutex_exit(&tx_ring->tx_lock);
2760 	/* Don't hold the lock during OS callback */
2761 	if (resume_tx)
2762 		RESUME_TX(tx_ring);
2763 	return (count);
2764 }
2765 
2766 /*
2767  * reset asic when error happens
2768  */
2769 /* ARGSUSED */
2770 static uint_t
ql_asic_reset_work(caddr_t arg1,caddr_t arg2)2771 ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
2772 {
2773 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2774 	int status;
2775 
2776 	mutex_enter(&qlge->gen_mutex);
2777 	(void) ql_do_stop(qlge);
2778 	/*
2779 	 * Write default ethernet address to chip register Mac
2780 	 * Address slot 0 and Enable Primary Mac Function.
2781 	 */
2782 	mutex_enter(&qlge->hw_mutex);
2783 	(void) ql_unicst_set(qlge,
2784 	    (uint8_t *)qlge->unicst_addr[0].addr.ether_addr_octet, 0);
2785 	mutex_exit(&qlge->hw_mutex);
2786 	qlge->mac_flags = QL_MAC_INIT;
2787 	status = ql_do_start(qlge);
2788 	if (status != DDI_SUCCESS)
2789 		goto error;
2790 	qlge->mac_flags = QL_MAC_STARTED;
2791 	mutex_exit(&qlge->gen_mutex);
2792 	ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
2793 
2794 	return (DDI_INTR_CLAIMED);
2795 
2796 error:
2797 	mutex_exit(&qlge->gen_mutex);
2798 	cmn_err(CE_WARN,
2799 	    "qlge up/down cycle failed, closing device");
2800 	if (qlge->fm_enable) {
2801 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2802 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
2803 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2804 	}
2805 	return (DDI_INTR_CLAIMED);
2806 }
2807 
2808 /*
2809  * Reset MPI
2810  */
2811 /* ARGSUSED */
2812 static uint_t
ql_mpi_reset_work(caddr_t arg1,caddr_t arg2)2813 ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
2814 {
2815 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2816 
2817 	(void) ql_reset_mpi_risc(qlge);
2818 	return (DDI_INTR_CLAIMED);
2819 }
2820 
2821 /*
2822  * Process MPI mailbox messages
2823  */
2824 /* ARGSUSED */
2825 static uint_t
ql_mpi_event_work(caddr_t arg1,caddr_t arg2)2826 ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
2827 {
2828 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2829 
2830 	ql_do_mpi_intr(qlge);
2831 	return (DDI_INTR_CLAIMED);
2832 }
2833 
2834 /* Fire up a handler to reset the MPI processor. */
2835 void
ql_wake_asic_reset_soft_intr(qlge_t * qlge)2836 ql_wake_asic_reset_soft_intr(qlge_t *qlge)
2837 {
2838 	(void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
2839 }
2840 
2841 static void
ql_wake_mpi_reset_soft_intr(qlge_t * qlge)2842 ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
2843 {
2844 	(void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
2845 }
2846 
2847 static void
ql_wake_mpi_event_soft_intr(qlge_t * qlge)2848 ql_wake_mpi_event_soft_intr(qlge_t *qlge)
2849 {
2850 	(void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
2851 }
2852 
2853 /*
2854  * This handles a fatal error, MPI activity, and the default
2855  * rx_ring in an MSI-X multiple interrupt vector environment.
2856  * In MSI/Legacy environment it also process the rest of
2857  * the rx_rings.
2858  */
2859 /* ARGSUSED */
2860 static uint_t
ql_isr(caddr_t arg1,caddr_t arg2)2861 ql_isr(caddr_t arg1, caddr_t arg2)
2862 {
2863 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2864 	struct rx_ring *ob_ring;
2865 	qlge_t *qlge = rx_ring->qlge;
2866 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
2867 	uint32_t var, prod;
2868 	int i;
2869 	int work_done = 0;
2870 
2871 	mblk_t *mp;
2872 
2873 	_NOTE(ARGUNUSED(arg2));
2874 
2875 	++qlge->rx_interrupts[rx_ring->cq_id];
2876 
2877 	if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
2878 		ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
2879 		var = ql_read_reg(qlge, REG_ERROR_STATUS);
2880 		var = ql_read_reg(qlge, REG_STATUS);
2881 		var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
2882 		return (DDI_INTR_CLAIMED);
2883 	}
2884 
2885 	ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2886 
2887 	/*
2888 	 * process send completes on first stride tx ring if available
2889 	 */
2890 	if (qlge->isr_stride) {
2891 		ob_ring = &qlge->rx_ring[qlge->isr_stride];
2892 		if (ql_read_sh_reg(qlge, ob_ring) !=
2893 		    ob_ring->cnsmr_idx) {
2894 			(void) ql_clean_outbound_rx_ring(ob_ring);
2895 		}
2896 	}
2897 	/*
2898 	 * Check the default queue and wake handler if active.
2899 	 */
2900 	rx_ring = &qlge->rx_ring[0];
2901 	prod = ql_read_sh_reg(qlge, rx_ring);
2902 	QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
2903 	    prod, rx_ring->cnsmr_idx));
2904 	/* check if interrupt is due to incoming packet */
2905 	if (prod != rx_ring->cnsmr_idx) {
2906 		QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
2907 		ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2908 		mutex_enter(&rx_ring->rx_lock);
2909 		mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2910 		mutex_exit(&rx_ring->rx_lock);
2911 
2912 		if (mp != NULL)
2913 			RX_UPSTREAM(rx_ring, mp);
2914 		work_done++;
2915 	} else {
2916 		/*
2917 		 * If interrupt is not due to incoming packet, read status
2918 		 * register to see if error happens or mailbox interrupt.
2919 		 */
2920 		var = ql_read_reg(qlge, REG_STATUS);
2921 		if ((var & STATUS_FE) != 0) {
2922 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2923 			if (qlge->fm_enable) {
2924 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2925 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2926 				ddi_fm_service_impact(qlge->dip,
2927 				    DDI_SERVICE_LOST);
2928 			}
2929 			cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
2930 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2931 			cmn_err(CE_WARN,
2932 			    "Resetting chip. Error Status Register = 0x%x",
2933 			    var);
2934 			ql_wake_asic_reset_soft_intr(qlge);
2935 			return (DDI_INTR_CLAIMED);
2936 		}
2937 
2938 		/*
2939 		 * Check MPI processor activity.
2940 		 */
2941 		if ((var & STATUS_PI) != 0) {
2942 			/*
2943 			 * We've got an async event or mailbox completion.
2944 			 * Handle it and clear the source of the interrupt.
2945 			 */
2946 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2947 
2948 			QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
2949 			ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2950 			ql_wake_mpi_event_soft_intr(qlge);
2951 			work_done++;
2952 		}
2953 	}
2954 
2955 
2956 	if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
2957 		/*
2958 		 * Start the DPC for each active queue.
2959 		 */
2960 		for (i = 1; i < qlge->rx_ring_count; i++) {
2961 			rx_ring = &qlge->rx_ring[i];
2962 
2963 			if (ql_read_sh_reg(qlge, rx_ring) !=
2964 			    rx_ring->cnsmr_idx) {
2965 				QL_PRINT(DBG_INTR,
2966 				    ("Waking handler for rx_ring[%d].\n", i));
2967 
2968 				ql_disable_completion_interrupt(qlge,
2969 				    rx_ring->irq);
2970 				if (rx_ring->type == TX_Q) {
2971 					(void) ql_clean_outbound_rx_ring(
2972 					    rx_ring);
2973 					ql_enable_completion_interrupt(
2974 					    rx_ring->qlge, rx_ring->irq);
2975 				} else {
2976 					mutex_enter(&rx_ring->rx_lock);
2977 					mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2978 					mutex_exit(&rx_ring->rx_lock);
2979 					if (mp != NULL)
2980 						RX_UPSTREAM(rx_ring, mp);
2981 #ifdef QLGE_LOAD_UNLOAD
2982 					if (rx_ring->mac_flags ==
2983 					    QL_MAC_STOPPED)
2984 						cmn_err(CE_NOTE,
2985 						    "%s rx_indicate(%d) %d\n",
2986 						    __func__, i,
2987 						    rx_ring->rx_indicate);
2988 #endif
2989 				}
2990 				work_done++;
2991 			}
2992 		}
2993 	}
2994 
2995 	ql_enable_completion_interrupt(qlge, intr_ctx->intr);
2996 
2997 	return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2998 }
2999 
3000 /*
3001  * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
3002  */
3003 /* ARGSUSED */
3004 static uint_t
ql_msix_tx_isr(caddr_t arg1,caddr_t arg2)3005 ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
3006 {
3007 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3008 	qlge_t *qlge = rx_ring->qlge;
3009 	_NOTE(ARGUNUSED(arg2));
3010 
3011 	++qlge->rx_interrupts[rx_ring->cq_id];
3012 	(void) ql_clean_outbound_rx_ring(rx_ring);
3013 	ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
3014 
3015 	return (DDI_INTR_CLAIMED);
3016 }
3017 
3018 /*
3019  * MSI-X Multiple Vector Interrupt Handler
3020  */
3021 /* ARGSUSED */
3022 static uint_t
ql_msix_isr(caddr_t arg1,caddr_t arg2)3023 ql_msix_isr(caddr_t arg1, caddr_t arg2)
3024 {
3025 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3026 	struct rx_ring *ob_ring;
3027 	qlge_t *qlge = rx_ring->qlge;
3028 	mblk_t *mp;
3029 	_NOTE(ARGUNUSED(arg2));
3030 
3031 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3032 
3033 	ql_disable_completion_interrupt(qlge, rx_ring->irq);
3034 
3035 	/*
3036 	 * process send completes on stride tx ring if available
3037 	 */
3038 	if (qlge->isr_stride) {
3039 		ob_ring = rx_ring + qlge->isr_stride;
3040 		if (ql_read_sh_reg(qlge, ob_ring) !=
3041 		    ob_ring->cnsmr_idx) {
3042 			++qlge->rx_interrupts[ob_ring->cq_id];
3043 			(void) ql_clean_outbound_rx_ring(ob_ring);
3044 		}
3045 	}
3046 
3047 	++qlge->rx_interrupts[rx_ring->cq_id];
3048 
3049 	mutex_enter(&rx_ring->rx_lock);
3050 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3051 	mutex_exit(&rx_ring->rx_lock);
3052 
3053 	if (mp != NULL)
3054 		RX_UPSTREAM(rx_ring, mp);
3055 
3056 	return (DDI_INTR_CLAIMED);
3057 }
3058 
3059 /*
3060  * Poll n_bytes of chained incoming packets
3061  */
3062 mblk_t *
ql_ring_rx_poll(void * arg,int n_bytes)3063 ql_ring_rx_poll(void *arg, int n_bytes)
3064 {
3065 	struct rx_ring *rx_ring = (struct rx_ring *)arg;
3066 	qlge_t *qlge = rx_ring->qlge;
3067 	mblk_t *mp = NULL;
3068 	uint32_t var;
3069 
3070 	ASSERT(n_bytes >= 0);
3071 	QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
3072 	    __func__, rx_ring->cq_id, n_bytes));
3073 
3074 	++qlge->rx_polls[rx_ring->cq_id];
3075 
3076 	if (n_bytes == 0)
3077 		return (mp);
3078 	mutex_enter(&rx_ring->rx_lock);
3079 	mp = ql_ring_rx(rx_ring, n_bytes);
3080 	mutex_exit(&rx_ring->rx_lock);
3081 
3082 	if ((rx_ring->cq_id == 0) && (mp == NULL)) {
3083 		var = ql_read_reg(qlge, REG_STATUS);
3084 		/*
3085 		 * Check for fatal error.
3086 		 */
3087 		if ((var & STATUS_FE) != 0) {
3088 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
3089 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
3090 			cmn_err(CE_WARN, "Got fatal error %x.", var);
3091 			ql_wake_asic_reset_soft_intr(qlge);
3092 			if (qlge->fm_enable) {
3093 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
3094 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
3095 				ddi_fm_service_impact(qlge->dip,
3096 				    DDI_SERVICE_LOST);
3097 			}
3098 		}
3099 		/*
3100 		 * Check MPI processor activity.
3101 		 */
3102 		if ((var & STATUS_PI) != 0) {
3103 			/*
3104 			 * We've got an async event or mailbox completion.
3105 			 * Handle it and clear the source of the interrupt.
3106 			 */
3107 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
3108 			ql_do_mpi_intr(qlge);
3109 		}
3110 	}
3111 
3112 	return (mp);
3113 }
3114 
3115 /*
3116  * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
3117  */
3118 /* ARGSUSED */
3119 static uint_t
ql_msix_rx_isr(caddr_t arg1,caddr_t arg2)3120 ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
3121 {
3122 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3123 	qlge_t *qlge = rx_ring->qlge;
3124 	mblk_t *mp;
3125 	_NOTE(ARGUNUSED(arg2));
3126 
3127 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3128 
3129 	++qlge->rx_interrupts[rx_ring->cq_id];
3130 
3131 	mutex_enter(&rx_ring->rx_lock);
3132 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3133 	mutex_exit(&rx_ring->rx_lock);
3134 
3135 	if (mp != NULL)
3136 		RX_UPSTREAM(rx_ring, mp);
3137 
3138 	return (DDI_INTR_CLAIMED);
3139 }
3140 
3141 
3142 /*
3143  *
3144  * Allocate DMA Buffer for ioctl service
3145  *
3146  */
3147 static int
ql_alloc_ioctl_dma_buf(qlge_t * qlge)3148 ql_alloc_ioctl_dma_buf(qlge_t *qlge)
3149 {
3150 	uint64_t phy_addr;
3151 	uint64_t alloc_size;
3152 	ddi_dma_cookie_t dma_cookie;
3153 
3154 	alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
3155 	    max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
3156 	if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
3157 	    &ql_buf_acc_attr,
3158 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3159 	    &qlge->ioctl_buf_dma_attr.acc_handle,
3160 	    (size_t)alloc_size,  /* mem size */
3161 	    (size_t)0,  /* alignment */
3162 	    (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
3163 	    &dma_cookie) != 0) {
3164 		cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
3165 		    __func__, qlge->instance);
3166 		return (DDI_FAILURE);
3167 	}
3168 
3169 	phy_addr = dma_cookie.dmac_laddress;
3170 
3171 	if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
3172 		cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
3173 		return (DDI_FAILURE);
3174 	}
3175 
3176 	qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
3177 
3178 	QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
3179 	    "phy_addr = 0x%lx\n",
3180 	    __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
3181 
3182 	return (DDI_SUCCESS);
3183 }
3184 
3185 
3186 /*
3187  * Function to free physical memory.
3188  */
3189 static void
ql_free_phys(ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * acc_handle)3190 ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
3191 {
3192 	if (*dma_handle != NULL) {
3193 		(void) ddi_dma_unbind_handle(*dma_handle);
3194 		if (*acc_handle != NULL)
3195 			ddi_dma_mem_free(acc_handle);
3196 		ddi_dma_free_handle(dma_handle);
3197 		*acc_handle = NULL;
3198 		*dma_handle = NULL;
3199 	}
3200 }
3201 
3202 /*
3203  * Function to free ioctl dma buffer.
3204  */
3205 static void
ql_free_ioctl_dma_buf(qlge_t * qlge)3206 ql_free_ioctl_dma_buf(qlge_t *qlge)
3207 {
3208 	if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
3209 		ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
3210 		    &qlge->ioctl_buf_dma_attr.acc_handle);
3211 
3212 		qlge->ioctl_buf_dma_attr.vaddr = NULL;
3213 		qlge->ioctl_buf_dma_attr.dma_handle = NULL;
3214 	}
3215 }
3216 
3217 /*
3218  * Free shadow register space used for request and completion queues
3219  */
3220 static void
ql_free_shadow_space(qlge_t * qlge)3221 ql_free_shadow_space(qlge_t *qlge)
3222 {
3223 	if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
3224 		ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3225 		    &qlge->host_copy_shadow_dma_attr.acc_handle);
3226 		bzero(&qlge->host_copy_shadow_dma_attr,
3227 		    sizeof (qlge->host_copy_shadow_dma_attr));
3228 	}
3229 
3230 	if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
3231 		ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3232 		    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
3233 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3234 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3235 	}
3236 }
3237 
3238 /*
3239  * Allocate shadow register space for request and completion queues
3240  */
3241 static int
ql_alloc_shadow_space(qlge_t * qlge)3242 ql_alloc_shadow_space(qlge_t *qlge)
3243 {
3244 	ddi_dma_cookie_t dma_cookie;
3245 
3246 	if (ql_alloc_phys(qlge->dip,
3247 	    &qlge->host_copy_shadow_dma_attr.dma_handle,
3248 	    &ql_dev_acc_attr,
3249 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3250 	    &qlge->host_copy_shadow_dma_attr.acc_handle,
3251 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3252 	    (size_t)4, /* 4 bytes alignment */
3253 	    (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
3254 	    &dma_cookie) != 0) {
3255 		bzero(&qlge->host_copy_shadow_dma_attr,
3256 		    sizeof (qlge->host_copy_shadow_dma_attr));
3257 
3258 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
3259 		    "response shadow registers", __func__, qlge->instance);
3260 		return (DDI_FAILURE);
3261 	}
3262 
3263 	qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3264 
3265 	if (ql_alloc_phys(qlge->dip,
3266 	    &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3267 	    &ql_desc_acc_attr,
3268 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3269 	    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
3270 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3271 	    (size_t)4, /* 4 bytes alignment */
3272 	    (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
3273 	    &dma_cookie) != 0) {
3274 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3275 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3276 
3277 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
3278 		    "for request shadow registers",
3279 		    __func__, qlge->instance);
3280 		goto err_wqp_sh_area;
3281 	}
3282 	qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3283 
3284 	return (DDI_SUCCESS);
3285 
3286 err_wqp_sh_area:
3287 	ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3288 	    &qlge->host_copy_shadow_dma_attr.acc_handle);
3289 	bzero(&qlge->host_copy_shadow_dma_attr,
3290 	    sizeof (qlge->host_copy_shadow_dma_attr));
3291 
3292 	return (DDI_FAILURE);
3293 }
3294 
3295 /*
3296  * Initialize a tx ring
3297  */
3298 static void
ql_init_tx_ring(struct tx_ring * tx_ring)3299 ql_init_tx_ring(struct tx_ring *tx_ring)
3300 {
3301 	int i;
3302 	struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr;
3303 	struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc;
3304 
3305 	for (i = 0; i < tx_ring->wq_len; i++) {
3306 		tx_ring_desc->index = i;
3307 		tx_ring_desc->queue_entry = mac_iocb_ptr;
3308 		mac_iocb_ptr++;
3309 		tx_ring_desc++;
3310 	}
3311 	tx_ring->tx_free_count = tx_ring->wq_len;
3312 	tx_ring->queue_stopped = 0;
3313 }
3314 
3315 /*
3316  * Free one tx ring resources
3317  */
3318 static void
ql_free_tx_resources(struct tx_ring * tx_ring)3319 ql_free_tx_resources(struct tx_ring *tx_ring)
3320 {
3321 	struct tx_ring_desc *tx_ring_desc;
3322 	int i, j;
3323 
3324 	if (tx_ring->wq_dma.dma_handle != NULL) {
3325 		ql_free_phys(&tx_ring->wq_dma.dma_handle,
3326 		    &tx_ring->wq_dma.acc_handle);
3327 		bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3328 	}
3329 	if (tx_ring->wq_desc != NULL) {
3330 		tx_ring_desc = tx_ring->wq_desc;
3331 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3332 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3333 				if (tx_ring_desc->tx_dma_handle[j]) {
3334 					/*
3335 					 * The unbinding will happen in tx
3336 					 * completion, here we just free the
3337 					 * handles
3338 					 */
3339 					ddi_dma_free_handle(
3340 					    &(tx_ring_desc->tx_dma_handle[j]));
3341 					tx_ring_desc->tx_dma_handle[j] = NULL;
3342 				}
3343 			}
3344 			if (tx_ring_desc->oal != NULL) {
3345 				tx_ring_desc->oal_dma_addr = 0;
3346 				tx_ring_desc->oal = NULL;
3347 				tx_ring_desc->copy_buffer = NULL;
3348 				tx_ring_desc->copy_buffer_dma_addr = 0;
3349 
3350 				ql_free_phys(&tx_ring_desc->oal_dma.dma_handle,
3351 				    &tx_ring_desc->oal_dma.acc_handle);
3352 			}
3353 		}
3354 		kmem_free(tx_ring->wq_desc,
3355 		    tx_ring->wq_len * sizeof (struct tx_ring_desc));
3356 		tx_ring->wq_desc = NULL;
3357 	}
3358 	/* free the wqicb struct */
3359 	if (tx_ring->wqicb_dma.dma_handle) {
3360 		ql_free_phys(&tx_ring->wqicb_dma.dma_handle,
3361 		    &tx_ring->wqicb_dma.acc_handle);
3362 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3363 	}
3364 }
3365 
3366 /*
3367  * Allocate work (request) queue memory and transmit
3368  * descriptors for this transmit ring
3369  */
3370 static int
ql_alloc_tx_resources(qlge_t * qlge,struct tx_ring * tx_ring)3371 ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring)
3372 {
3373 	ddi_dma_cookie_t dma_cookie;
3374 	struct tx_ring_desc *tx_ring_desc;
3375 	int i, j;
3376 	uint32_t length;
3377 
3378 	/* allocate dma buffers for obiocbs */
3379 	if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle,
3380 	    &ql_desc_acc_attr,
3381 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3382 	    &tx_ring->wq_dma.acc_handle,
3383 	    (size_t)tx_ring->wq_size,	/* mem size */
3384 	    (size_t)128, /* alignment:128 bytes boundary */
3385 	    (caddr_t *)&tx_ring->wq_dma.vaddr,
3386 	    &dma_cookie) != 0) {
3387 		bzero(&tx_ring->wq_dma, sizeof (&tx_ring->wq_dma));
3388 		cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.",
3389 		    __func__, qlge->instance);
3390 		return (DDI_FAILURE);
3391 	}
3392 	tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress;
3393 
3394 	tx_ring->wq_desc =
3395 	    kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc),
3396 	    KM_NOSLEEP);
3397 	if (tx_ring->wq_desc == NULL) {
3398 		goto err;
3399 	} else {
3400 		tx_ring_desc = tx_ring->wq_desc;
3401 		/*
3402 		 * Allocate a large enough structure to hold the following
3403 		 * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
3404 		 * 2. copy buffer of QL_MAX_COPY_LENGTH bytes
3405 		 */
3406 		length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)
3407 		    + QL_MAX_COPY_LENGTH;
3408 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3409 
3410 			if (ql_alloc_phys(qlge->dip,
3411 			    &tx_ring_desc->oal_dma.dma_handle,
3412 			    &ql_desc_acc_attr,
3413 			    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3414 			    &tx_ring_desc->oal_dma.acc_handle,
3415 			    (size_t)length,	/* mem size */
3416 			    (size_t)0, /* default alignment:8 bytes boundary */
3417 			    (caddr_t *)&tx_ring_desc->oal_dma.vaddr,
3418 			    &dma_cookie) != 0) {
3419 				bzero(&tx_ring_desc->oal_dma,
3420 				    sizeof (tx_ring_desc->oal_dma));
3421 				cmn_err(CE_WARN, "%s(%d): reqQ tx buf &"
3422 				    "oal alloc failed.",
3423 				    __func__, qlge->instance);
3424 				goto err;
3425 			}
3426 
3427 			tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr;
3428 			tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress;
3429 			tx_ring_desc->copy_buffer =
3430 			    (caddr_t)((uint8_t *)tx_ring_desc->oal
3431 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3432 			tx_ring_desc->copy_buffer_dma_addr =
3433 			    (tx_ring_desc->oal_dma_addr
3434 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3435 
3436 			/* Allocate dma handles for transmit buffers */
3437 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3438 				if (ddi_dma_alloc_handle(qlge->dip,
3439 				    &tx_mapping_dma_attr,
3440 				    DDI_DMA_DONTWAIT,
3441 				    0, &tx_ring_desc->tx_dma_handle[j])
3442 				    != DDI_SUCCESS) {
3443 					tx_ring_desc->tx_dma_handle[j] = NULL;
3444 					cmn_err(CE_WARN,
3445 					    "!%s: ddi_dma_alloc_handle: "
3446 					    "tx_dma_handle "
3447 					    "alloc failed", __func__);
3448 					ql_free_phys(
3449 					    &tx_ring_desc->oal_dma.dma_handle,
3450 					    &tx_ring_desc->oal_dma.acc_handle);
3451 					goto err;
3452 				}
3453 			}
3454 		}
3455 	}
3456 	/* alloc a wqicb control block to load this tx ring to hw */
3457 	if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle,
3458 	    &ql_desc_acc_attr,
3459 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3460 	    &tx_ring->wqicb_dma.acc_handle,
3461 	    (size_t)sizeof (struct wqicb_t),	/* mem size */
3462 	    (size_t)0, /* alignment:128 bytes boundary */
3463 	    (caddr_t *)&tx_ring->wqicb_dma.vaddr,
3464 	    &dma_cookie) != 0) {
3465 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3466 		cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.",
3467 		    __func__, qlge->instance);
3468 		goto err;
3469 	}
3470 	tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3471 
3472 	return (DDI_SUCCESS);
3473 
3474 err:
3475 	ql_free_tx_resources(tx_ring);
3476 	return (DDI_FAILURE);
3477 }
3478 
3479 /*
3480  * Free one rx ring resources
3481  */
3482 static void
ql_free_rx_resources(struct rx_ring * rx_ring)3483 ql_free_rx_resources(struct rx_ring *rx_ring)
3484 {
3485 	/* Free the small buffer queue. */
3486 	if (rx_ring->sbq_dma.dma_handle) {
3487 		ql_free_phys(&rx_ring->sbq_dma.dma_handle,
3488 		    &rx_ring->sbq_dma.acc_handle);
3489 		bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3490 	}
3491 
3492 	/* Free the small buffer queue control blocks. */
3493 	if (rx_ring->sbq_desc != NULL) {
3494 		kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len *
3495 		    sizeof (struct bq_desc));
3496 		rx_ring->sbq_desc = NULL;
3497 	}
3498 
3499 	/* Free the large buffer queue. */
3500 	if (rx_ring->lbq_dma.dma_handle) {
3501 		ql_free_phys(&rx_ring->lbq_dma.dma_handle,
3502 		    &rx_ring->lbq_dma.acc_handle);
3503 		bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3504 	}
3505 
3506 	/* Free the large buffer queue control blocks. */
3507 	if (rx_ring->lbq_desc != NULL) {
3508 		kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len *
3509 		    sizeof (struct bq_desc));
3510 		rx_ring->lbq_desc = NULL;
3511 	}
3512 
3513 	/* Free cqicb struct */
3514 	if (rx_ring->cqicb_dma.dma_handle) {
3515 		ql_free_phys(&rx_ring->cqicb_dma.dma_handle,
3516 		    &rx_ring->cqicb_dma.acc_handle);
3517 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3518 	}
3519 	/* Free the rx queue. */
3520 	if (rx_ring->cq_dma.dma_handle) {
3521 		ql_free_phys(&rx_ring->cq_dma.dma_handle,
3522 		    &rx_ring->cq_dma.acc_handle);
3523 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3524 	}
3525 }
3526 
3527 /*
3528  * Allocate queues and buffers for this completions queue based
3529  * on the values in the parameter structure.
3530  */
3531 static int
ql_alloc_rx_resources(qlge_t * qlge,struct rx_ring * rx_ring)3532 ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring)
3533 {
3534 	ddi_dma_cookie_t dma_cookie;
3535 
3536 	if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle,
3537 	    &ql_desc_acc_attr,
3538 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3539 	    &rx_ring->cq_dma.acc_handle,
3540 	    (size_t)rx_ring->cq_size,  /* mem size */
3541 	    (size_t)128, /* alignment:128 bytes boundary */
3542 	    (caddr_t *)&rx_ring->cq_dma.vaddr,
3543 	    &dma_cookie) != 0)	{
3544 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3545 		cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.",
3546 		    __func__, qlge->instance);
3547 		return (DDI_FAILURE);
3548 	}
3549 	rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress;
3550 
3551 	if (rx_ring->sbq_len != 0) {
3552 		/*
3553 		 * Allocate small buffer queue.
3554 		 */
3555 		if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle,
3556 		    &ql_desc_acc_attr,
3557 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3558 		    &rx_ring->sbq_dma.acc_handle,
3559 		    (size_t)rx_ring->sbq_size,  /* mem size */
3560 		    (size_t)128, /* alignment:128 bytes boundary */
3561 		    (caddr_t *)&rx_ring->sbq_dma.vaddr,
3562 		    &dma_cookie) != 0) {
3563 			bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3564 			cmn_err(CE_WARN,
3565 			    "%s(%d): small buffer queue allocation failed.",
3566 			    __func__, qlge->instance);
3567 			goto err_mem;
3568 		}
3569 		rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress;
3570 
3571 		/*
3572 		 * Allocate small buffer queue control blocks.
3573 		 */
3574 		rx_ring->sbq_desc =
3575 		    kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc),
3576 		    KM_NOSLEEP);
3577 		if (rx_ring->sbq_desc == NULL) {
3578 			cmn_err(CE_WARN,
3579 			    "sbq control block allocation failed.");
3580 			goto err_mem;
3581 		}
3582 
3583 		ql_init_sbq_ring(rx_ring);
3584 	}
3585 
3586 	if (rx_ring->lbq_len != 0) {
3587 		/*
3588 		 * Allocate large buffer queue.
3589 		 */
3590 		if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle,
3591 		    &ql_desc_acc_attr,
3592 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3593 		    &rx_ring->lbq_dma.acc_handle,
3594 		    (size_t)rx_ring->lbq_size,  /* mem size */
3595 		    (size_t)128, /* alignment:128 bytes boundary */
3596 		    (caddr_t *)&rx_ring->lbq_dma.vaddr,
3597 		    &dma_cookie) != 0) {
3598 			bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3599 			cmn_err(CE_WARN, "%s(%d): lbq allocation failed.",
3600 			    __func__, qlge->instance);
3601 			goto err_mem;
3602 		}
3603 		rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress;
3604 
3605 		/*
3606 		 * Allocate large buffer queue control blocks.
3607 		 */
3608 		rx_ring->lbq_desc =
3609 		    kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc),
3610 		    KM_NOSLEEP);
3611 		if (rx_ring->lbq_desc == NULL) {
3612 			cmn_err(CE_WARN,
3613 			    "Large buffer queue control block allocation "
3614 			    "failed.");
3615 			goto err_mem;
3616 		}
3617 		ql_init_lbq_ring(rx_ring);
3618 	}
3619 
3620 	if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle,
3621 	    &ql_desc_acc_attr,
3622 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3623 	    &rx_ring->cqicb_dma.acc_handle,
3624 	    (size_t)sizeof (struct cqicb_t),  /* mem size */
3625 	    (size_t)0, /* alignment:128 bytes boundary */
3626 	    (caddr_t *)&rx_ring->cqicb_dma.vaddr,
3627 	    &dma_cookie) != 0) {
3628 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3629 		cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.",
3630 		    __func__, qlge->instance);
3631 		goto err_mem;
3632 	}
3633 	rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3634 
3635 	return (DDI_SUCCESS);
3636 
3637 err_mem:
3638 	ql_free_rx_resources(rx_ring);
3639 	return (DDI_FAILURE);
3640 }
3641 
3642 /*
3643  * Frees tx/rx queues memory resources
3644  */
3645 static void
ql_free_mem_resources(qlge_t * qlge)3646 ql_free_mem_resources(qlge_t *qlge)
3647 {
3648 	int i;
3649 
3650 	if (qlge->ricb_dma.dma_handle) {
3651 		/* free the ricb struct */
3652 		ql_free_phys(&qlge->ricb_dma.dma_handle,
3653 		    &qlge->ricb_dma.acc_handle);
3654 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3655 	}
3656 
3657 	ql_free_rx_buffers(qlge);
3658 
3659 	ql_free_ioctl_dma_buf(qlge);
3660 
3661 	for (i = 0; i < qlge->tx_ring_count; i++)
3662 		ql_free_tx_resources(&qlge->tx_ring[i]);
3663 
3664 	for (i = 0; i < qlge->rx_ring_count; i++)
3665 		ql_free_rx_resources(&qlge->rx_ring[i]);
3666 
3667 	ql_free_shadow_space(qlge);
3668 }
3669 
3670 /*
3671  * Allocate buffer queues, large buffers and small buffers etc
3672  *
3673  * This API is called in the gld_attach member function. It is called
3674  * only once.  Later reset,reboot should not re-allocate all rings and
3675  * buffers.
3676  */
3677 static int
ql_alloc_mem_resources(qlge_t * qlge)3678 ql_alloc_mem_resources(qlge_t *qlge)
3679 {
3680 	int i;
3681 	ddi_dma_cookie_t dma_cookie;
3682 
3683 	/* Allocate space for our shadow registers */
3684 	if (ql_alloc_shadow_space(qlge))
3685 		return (DDI_FAILURE);
3686 
3687 	for (i = 0; i < qlge->rx_ring_count; i++) {
3688 		if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) {
3689 			cmn_err(CE_WARN, "RX resource allocation failed.");
3690 			goto err_mem;
3691 		}
3692 	}
3693 	/* Allocate tx queue resources */
3694 	for (i = 0; i < qlge->tx_ring_count; i++) {
3695 		if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) {
3696 			cmn_err(CE_WARN, "Tx resource allocation failed.");
3697 			goto err_mem;
3698 		}
3699 	}
3700 
3701 	if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) {
3702 		goto err_mem;
3703 	}
3704 
3705 	if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) {
3706 		cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed",
3707 		    __func__, qlge->instance);
3708 		goto err_mem;
3709 	}
3710 
3711 	qlge->sequence |= INIT_ALLOC_RX_BUF;
3712 
3713 	if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle,
3714 	    &ql_desc_acc_attr,
3715 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3716 	    &qlge->ricb_dma.acc_handle,
3717 	    (size_t)sizeof (struct ricb),  /* mem size */
3718 	    (size_t)0, /* alignment:128 bytes boundary */
3719 	    (caddr_t *)&qlge->ricb_dma.vaddr,
3720 	    &dma_cookie) != 0) {
3721 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3722 		cmn_err(CE_WARN, "%s(%d): ricb allocation failed.",
3723 		    __func__, qlge->instance);
3724 		goto err_mem;
3725 	}
3726 	qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress;
3727 
3728 	return (DDI_SUCCESS);
3729 
3730 err_mem:
3731 	ql_free_mem_resources(qlge);
3732 	return (DDI_FAILURE);
3733 }
3734 
3735 
3736 /*
3737  * Function used to allocate physical memory and zero it.
3738  */
3739 
3740 static int
ql_alloc_phys_rbuf(dev_info_t * dip,ddi_dma_handle_t * dma_handle,ddi_device_acc_attr_t * device_acc_attr,uint_t dma_flags,ddi_acc_handle_t * acc_handle,size_t size,size_t alignment,caddr_t * vaddr,ddi_dma_cookie_t * dma_cookie)3741 ql_alloc_phys_rbuf(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3742     ddi_device_acc_attr_t *device_acc_attr,
3743     uint_t dma_flags,
3744     ddi_acc_handle_t *acc_handle,
3745     size_t size,
3746     size_t alignment,
3747     caddr_t *vaddr,
3748     ddi_dma_cookie_t *dma_cookie)
3749 {
3750 	size_t rlen;
3751 	uint_t cnt;
3752 
3753 	/*
3754 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3755 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3756 	 * to make sure buffer has enough room for overrun.
3757 	 */
3758 	if (size & 7) {
3759 		size += 8 - (size & 7);
3760 	}
3761 
3762 	/* Adjust the alignment if requested */
3763 	if (alignment) {
3764 		dma_attr.dma_attr_align = alignment;
3765 	}
3766 
3767 	/*
3768 	 * Allocate DMA handle
3769 	 */
3770 	if (ddi_dma_alloc_handle(dip, &dma_attr_rbuf, DDI_DMA_DONTWAIT, NULL,
3771 	    dma_handle) != DDI_SUCCESS) {
3772 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3773 		    __func__);
3774 		*dma_handle = NULL;
3775 		return (QL_ERROR);
3776 	}
3777 	/*
3778 	 * Allocate DMA memory
3779 	 */
3780 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3781 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3782 	    DDI_DMA_DONTWAIT,
3783 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3784 		cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3785 		ddi_dma_free_handle(dma_handle);
3786 		*acc_handle = NULL;
3787 		*dma_handle = NULL;
3788 		return (QL_ERROR);
3789 	}
3790 
3791 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3792 	    dma_flags, DDI_DMA_DONTWAIT, NULL,
3793 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3794 		ddi_dma_mem_free(acc_handle);
3795 
3796 		ddi_dma_free_handle(dma_handle);
3797 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3798 		    __func__);
3799 		*acc_handle = NULL;
3800 		*dma_handle = NULL;
3801 		return (QL_ERROR);
3802 	}
3803 
3804 	if (cnt != 1) {
3805 
3806 		ql_free_phys(dma_handle, acc_handle);
3807 
3808 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3809 		    __func__);
3810 		return (QL_ERROR);
3811 	}
3812 
3813 	bzero((caddr_t)*vaddr, rlen);
3814 
3815 	return (0);
3816 }
3817 
3818 /*
3819  * Function used to allocate physical memory and zero it.
3820  */
3821 static int
ql_alloc_phys(dev_info_t * dip,ddi_dma_handle_t * dma_handle,ddi_device_acc_attr_t * device_acc_attr,uint_t dma_flags,ddi_acc_handle_t * acc_handle,size_t size,size_t alignment,caddr_t * vaddr,ddi_dma_cookie_t * dma_cookie)3822 ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3823     ddi_device_acc_attr_t *device_acc_attr,
3824     uint_t dma_flags,
3825     ddi_acc_handle_t *acc_handle,
3826     size_t size,
3827     size_t alignment,
3828     caddr_t *vaddr,
3829     ddi_dma_cookie_t *dma_cookie)
3830 {
3831 	size_t rlen;
3832 	uint_t cnt;
3833 
3834 	/*
3835 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3836 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3837 	 * to make sure buffer has enough room for overrun.
3838 	 */
3839 	if (size & 7) {
3840 		size += 8 - (size & 7);
3841 	}
3842 
3843 	/* Adjust the alignment if requested */
3844 	if (alignment) {
3845 		dma_attr.dma_attr_align = alignment;
3846 	}
3847 
3848 	/*
3849 	 * Allocate DMA handle
3850 	 */
3851 	if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_DONTWAIT, NULL,
3852 	    dma_handle) != DDI_SUCCESS) {
3853 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3854 		    __func__);
3855 		*dma_handle = NULL;
3856 		return (QL_ERROR);
3857 	}
3858 	/*
3859 	 * Allocate DMA memory
3860 	 */
3861 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3862 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3863 	    DDI_DMA_DONTWAIT,
3864 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3865 		cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3866 		ddi_dma_free_handle(dma_handle);
3867 		*acc_handle = NULL;
3868 		*dma_handle = NULL;
3869 		return (QL_ERROR);
3870 	}
3871 
3872 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3873 	    dma_flags, DDI_DMA_DONTWAIT, NULL,
3874 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3875 		ddi_dma_mem_free(acc_handle);
3876 		ddi_dma_free_handle(dma_handle);
3877 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3878 		    __func__);
3879 		*acc_handle = NULL;
3880 		*dma_handle = NULL;
3881 		return (QL_ERROR);
3882 	}
3883 
3884 	if (cnt != 1) {
3885 
3886 		ql_free_phys(dma_handle, acc_handle);
3887 
3888 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3889 		    __func__);
3890 		return (QL_ERROR);
3891 	}
3892 
3893 	bzero((caddr_t)*vaddr, rlen);
3894 
3895 	return (0);
3896 }
3897 
3898 /*
3899  * Add interrupt handlers based on the interrupt type.
3900  * Before adding the interrupt handlers, the interrupt vectors should
3901  * have been allocated, and the rx/tx rings have also been allocated.
3902  */
3903 static int
ql_add_intr_handlers(qlge_t * qlge)3904 ql_add_intr_handlers(qlge_t *qlge)
3905 {
3906 	int vector = 0;
3907 	int rc, i;
3908 	uint32_t value;
3909 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3910 
3911 	switch (qlge->intr_type) {
3912 	case DDI_INTR_TYPE_MSIX:
3913 		/*
3914 		 * Add interrupt handler for rx and tx rings: vector[0 -
3915 		 * (qlge->intr_cnt -1)].
3916 		 */
3917 		value = 0;
3918 		for (vector = 0; vector < qlge->intr_cnt; vector++) {
3919 			ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3920 
3921 			/*
3922 			 * associate interrupt vector with interrupt handler
3923 			 */
3924 			rc = ddi_intr_add_handler(qlge->htable[vector],
3925 			    (ddi_intr_handler_t *)intr_ctx->handler,
3926 			    (void *)&qlge->rx_ring[vector], NULL);
3927 
3928 			QL_PRINT(DBG_INIT, ("rx_ring[%d] 0x%p\n",
3929 			    vector, &qlge->rx_ring[vector]));
3930 			if (rc != DDI_SUCCESS) {
3931 				QL_PRINT(DBG_INIT,
3932 				    ("Add rx interrupt handler failed. "
3933 				    "return: %d, vector: %d", rc, vector));
3934 				for (vector--; vector >= 0; vector--) {
3935 					(void) ddi_intr_remove_handler(
3936 					    qlge->htable[vector]);
3937 				}
3938 				return (DDI_FAILURE);
3939 			}
3940 			intr_ctx++;
3941 		}
3942 		break;
3943 
3944 	case DDI_INTR_TYPE_MSI:
3945 		/*
3946 		 * Add interrupt handlers for the only vector
3947 		 */
3948 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3949 
3950 		rc = ddi_intr_add_handler(qlge->htable[vector],
3951 		    ql_isr,
3952 		    (caddr_t)&qlge->rx_ring[0], NULL);
3953 
3954 		if (rc != DDI_SUCCESS) {
3955 			QL_PRINT(DBG_INIT,
3956 			    ("Add MSI interrupt handler failed: %d\n", rc));
3957 			return (DDI_FAILURE);
3958 		}
3959 		break;
3960 
3961 	case DDI_INTR_TYPE_FIXED:
3962 		/*
3963 		 * Add interrupt handlers for the only vector
3964 		 */
3965 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3966 
3967 		rc = ddi_intr_add_handler(qlge->htable[vector],
3968 		    ql_isr,
3969 		    (caddr_t)&qlge->rx_ring[0], NULL);
3970 
3971 		if (rc != DDI_SUCCESS) {
3972 			QL_PRINT(DBG_INIT,
3973 			    ("Add legacy interrupt handler failed: %d\n", rc));
3974 			return (DDI_FAILURE);
3975 		}
3976 		break;
3977 
3978 	default:
3979 		return (DDI_FAILURE);
3980 	}
3981 
3982 	/* Enable interrupts */
3983 	/* Block enable */
3984 	if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3985 		QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n",
3986 		    qlge->intr_cnt));
3987 		(void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt);
3988 	} else { /* Non block enable */
3989 		for (i = 0; i < qlge->intr_cnt; i++) {
3990 			QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d "
3991 			    "handle 0x%x\n", i, qlge->htable[i]));
3992 			(void) ddi_intr_enable(qlge->htable[i]);
3993 		}
3994 	}
3995 	qlge->sequence |= INIT_INTR_ENABLED;
3996 
3997 	return (DDI_SUCCESS);
3998 }
3999 
4000 /*
4001  * Here we build the intr_ctx structures based on
4002  * our rx_ring count and intr vector count.
4003  * The intr_ctx structure is used to hook each vector
4004  * to possibly different handlers.
4005  */
4006 static void
ql_resolve_queues_to_irqs(qlge_t * qlge)4007 ql_resolve_queues_to_irqs(qlge_t *qlge)
4008 {
4009 	int i = 0;
4010 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
4011 
4012 	if (qlge->intr_type == DDI_INTR_TYPE_MSIX) {
4013 		/*
4014 		 * Each rx_ring has its own intr_ctx since we
4015 		 * have separate vectors for each queue.
4016 		 * This only true when MSI-X is enabled.
4017 		 */
4018 		for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) {
4019 			qlge->rx_ring[i].irq = i;
4020 			intr_ctx->intr = i;
4021 			intr_ctx->qlge = qlge;
4022 
4023 			/*
4024 			 * We set up each vectors enable/disable/read bits so
4025 			 * there's no bit/mask calculations in critical path.
4026 			 */
4027 			intr_ctx->intr_en_mask =
4028 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4029 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4030 			    INTR_EN_IHD | i;
4031 			intr_ctx->intr_dis_mask =
4032 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4033 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4034 			    INTR_EN_IHD | i;
4035 			intr_ctx->intr_read_mask =
4036 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4037 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4038 			    | i;
4039 
4040 			if (i == 0) {
4041 				/*
4042 				 * Default queue handles bcast/mcast plus
4043 				 * async events.
4044 				 */
4045 				intr_ctx->handler = ql_isr;
4046 			} else if (qlge->rx_ring[i].type == TX_Q) {
4047 				/*
4048 				 * Outbound queue is for outbound completions
4049 				 * only.
4050 				 */
4051 				if (qlge->isr_stride)
4052 					intr_ctx->handler = ql_msix_isr;
4053 				else
4054 					intr_ctx->handler = ql_msix_tx_isr;
4055 			} else {
4056 				/*
4057 				 * Inbound queues handle unicast frames only.
4058 				 */
4059 				if (qlge->isr_stride)
4060 					intr_ctx->handler = ql_msix_isr;
4061 				else
4062 					intr_ctx->handler = ql_msix_rx_isr;
4063 			}
4064 		}
4065 		i = qlge->intr_cnt;
4066 		for (; i < qlge->rx_ring_count; i++, intr_ctx++) {
4067 			int iv = i - qlge->isr_stride;
4068 			qlge->rx_ring[i].irq = iv;
4069 			intr_ctx->intr = iv;
4070 			intr_ctx->qlge = qlge;
4071 
4072 			/*
4073 			 * We set up each vectors enable/disable/read bits so
4074 			 * there's no bit/mask calculations in critical path.
4075 			 */
4076 			intr_ctx->intr_en_mask =
4077 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4078 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4079 			    INTR_EN_IHD | iv;
4080 			intr_ctx->intr_dis_mask =
4081 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4082 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4083 			    INTR_EN_IHD | iv;
4084 			intr_ctx->intr_read_mask =
4085 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4086 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4087 			    | iv;
4088 
4089 			if (qlge->rx_ring[i].type == TX_Q) {
4090 				/*
4091 				 * Outbound queue is for outbound completions
4092 				 * only.
4093 				 */
4094 				intr_ctx->handler = ql_msix_isr;
4095 			} else {
4096 				/*
4097 				 * Inbound queues handle unicast frames only.
4098 				 */
4099 				intr_ctx->handler = ql_msix_rx_isr;
4100 			}
4101 		}
4102 	} else {
4103 		/*
4104 		 * All rx_rings use the same intr_ctx since
4105 		 * there is only one vector.
4106 		 */
4107 		intr_ctx->intr = 0;
4108 		intr_ctx->qlge = qlge;
4109 		/*
4110 		 * We set up each vectors enable/disable/read bits so
4111 		 * there's no bit/mask calculations in the critical path.
4112 		 */
4113 		intr_ctx->intr_en_mask =
4114 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4115 		    INTR_EN_TYPE_ENABLE;
4116 		intr_ctx->intr_dis_mask =
4117 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4118 		    INTR_EN_TYPE_DISABLE;
4119 		intr_ctx->intr_read_mask =
4120 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4121 		    INTR_EN_TYPE_READ;
4122 		/*
4123 		 * Single interrupt means one handler for all rings.
4124 		 */
4125 		intr_ctx->handler = ql_isr;
4126 		for (i = 0; i < qlge->rx_ring_count; i++)
4127 			qlge->rx_ring[i].irq = 0;
4128 	}
4129 }
4130 
4131 
4132 /*
4133  * Free allocated interrupts.
4134  */
4135 static void
ql_free_irq_vectors(qlge_t * qlge)4136 ql_free_irq_vectors(qlge_t *qlge)
4137 {
4138 	int i;
4139 	int rc;
4140 
4141 	if (qlge->sequence & INIT_INTR_ENABLED) {
4142 		/* Disable all interrupts */
4143 		if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
4144 			/* Call ddi_intr_block_disable() */
4145 			(void) ddi_intr_block_disable(qlge->htable,
4146 			    qlge->intr_cnt);
4147 		} else {
4148 			for (i = 0; i < qlge->intr_cnt; i++) {
4149 				(void) ddi_intr_disable(qlge->htable[i]);
4150 			}
4151 		}
4152 
4153 		qlge->sequence &= ~INIT_INTR_ENABLED;
4154 	}
4155 
4156 	for (i = 0; i < qlge->intr_cnt; i++) {
4157 
4158 		if (qlge->sequence & INIT_ADD_INTERRUPT)
4159 			(void) ddi_intr_remove_handler(qlge->htable[i]);
4160 
4161 		if (qlge->sequence & INIT_INTR_ALLOC) {
4162 			rc = ddi_intr_free(qlge->htable[i]);
4163 			if (rc != DDI_SUCCESS) {
4164 				/* EMPTY */
4165 				QL_PRINT(DBG_INIT, ("Free intr failed: %d",
4166 				    rc));
4167 			}
4168 		}
4169 	}
4170 	if (qlge->sequence & INIT_INTR_ALLOC)
4171 		qlge->sequence &= ~INIT_INTR_ALLOC;
4172 
4173 	if (qlge->sequence & INIT_ADD_INTERRUPT)
4174 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4175 
4176 	if (qlge->htable) {
4177 		kmem_free(qlge->htable, qlge->intr_size);
4178 		qlge->htable = NULL;
4179 	}
4180 }
4181 
4182 /*
4183  * Allocate interrupt vectors
4184  * For legacy and MSI, only 1 handle is needed.
4185  * For MSI-X, if fewer than 2 vectors are available, return failure.
4186  * Upon success, this maps the vectors to rx and tx rings for
4187  * interrupts.
4188  */
4189 static int
ql_request_irq_vectors(qlge_t * qlge,int intr_type)4190 ql_request_irq_vectors(qlge_t *qlge, int intr_type)
4191 {
4192 	dev_info_t *devinfo;
4193 	uint32_t request, orig;
4194 	int count, avail, actual;
4195 	int minimum;
4196 	int rc;
4197 
4198 	devinfo = qlge->dip;
4199 
4200 	switch (intr_type) {
4201 	case DDI_INTR_TYPE_FIXED:
4202 		request = 1;	/* Request 1 legacy interrupt handle */
4203 		minimum = 1;
4204 		QL_PRINT(DBG_INIT, ("interrupt type: legacy\n"));
4205 		break;
4206 
4207 	case DDI_INTR_TYPE_MSI:
4208 		request = 1;	/* Request 1 MSI interrupt handle */
4209 		minimum = 1;
4210 		QL_PRINT(DBG_INIT, ("interrupt type: MSI\n"));
4211 		break;
4212 
4213 	case DDI_INTR_TYPE_MSIX:
4214 		/*
4215 		 * Ideal number of vectors for the adapter is
4216 		 * # rss rings + tx completion rings for default completion
4217 		 * queue.
4218 		 */
4219 		request = qlge->rx_ring_count;
4220 
4221 		orig = request;
4222 		if (request > (MAX_RX_RINGS))
4223 			request = MAX_RX_RINGS;
4224 		minimum = 2;
4225 		QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n"));
4226 		break;
4227 
4228 	default:
4229 		QL_PRINT(DBG_INIT, ("Invalid parameter\n"));
4230 		return (DDI_FAILURE);
4231 	}
4232 
4233 	QL_PRINT(DBG_INIT, ("interrupt handles requested: %d  minimum: %d\n",
4234 	    request, minimum));
4235 
4236 	/*
4237 	 * Get number of supported interrupts
4238 	 */
4239 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4240 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
4241 		QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, "
4242 		    "count: %d\n", rc, count));
4243 		return (DDI_FAILURE);
4244 	}
4245 	QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count));
4246 
4247 	/*
4248 	 * Get number of available interrupts
4249 	 */
4250 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
4251 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
4252 		QL_PRINT(DBG_INIT,
4253 		    ("Get interrupt available number failed. Return:"
4254 		    " %d, available: %d\n", rc, avail));
4255 		return (DDI_FAILURE);
4256 	}
4257 	QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail));
4258 
4259 	if (avail < request) {
4260 		QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n",
4261 		    request, avail));
4262 		request = avail;
4263 	}
4264 
4265 	actual = 0;
4266 	qlge->intr_cnt = 0;
4267 
4268 	/*
4269 	 * Allocate an array of interrupt handles
4270 	 */
4271 	qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t));
4272 	qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP);
4273 
4274 	rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0,
4275 	    (int)request, &actual, DDI_INTR_ALLOC_NORMAL);
4276 	if (rc != DDI_SUCCESS) {
4277 		cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:"
4278 		    " %d, request: %d, actual: %d",
4279 		    __func__, qlge->instance, rc, request, actual);
4280 		goto ql_intr_alloc_fail;
4281 	}
4282 	qlge->intr_cnt = actual;
4283 
4284 	qlge->sequence |= INIT_INTR_ALLOC;
4285 
4286 	/*
4287 	 * If the actual number of vectors is less than the minumum
4288 	 * then fail.
4289 	 */
4290 	if (actual < minimum) {
4291 		cmn_err(CE_WARN,
4292 		    "Insufficient interrupt handles available: %d", actual);
4293 		goto ql_intr_alloc_fail;
4294 	}
4295 
4296 	/*
4297 	 * For MSI-X, actual might force us to reduce number of tx & rx rings
4298 	 */
4299 	if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
4300 		if (actual >= (orig / 2)) {
4301 			count = orig / 2;
4302 			qlge->rss_ring_count = count;
4303 			qlge->tx_ring_count = count;
4304 			qlge->isr_stride = count;
4305 		} else if (actual >= (orig / 4)) {
4306 			count = orig / 4;
4307 			qlge->rss_ring_count = count;
4308 			qlge->tx_ring_count = count;
4309 			qlge->isr_stride = count;
4310 		} else if (actual >= (orig / 8)) {
4311 			count = orig / 8;
4312 			qlge->rss_ring_count = count;
4313 			qlge->tx_ring_count = count;
4314 			qlge->isr_stride = count;
4315 		} else if (actual < MAX_RX_RINGS) {
4316 			qlge->tx_ring_count = 1;
4317 			qlge->rss_ring_count = actual - 1;
4318 		}
4319 		qlge->intr_cnt = count;
4320 		qlge->rx_ring_count = qlge->tx_ring_count +
4321 		    qlge->rss_ring_count;
4322 	}
4323 	cmn_err(CE_NOTE, "!qlge(%d) tx %d, rss %d, stride %d\n", qlge->instance,
4324 	    qlge->tx_ring_count, qlge->rss_ring_count, qlge->isr_stride);
4325 
4326 	/*
4327 	 * Get priority for first vector, assume remaining are all the same
4328 	 */
4329 	rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri);
4330 	if (rc != DDI_SUCCESS) {
4331 		QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc));
4332 		goto ql_intr_alloc_fail;
4333 	}
4334 
4335 	rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap);
4336 	if (rc != DDI_SUCCESS) {
4337 		QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc));
4338 		goto ql_intr_alloc_fail;
4339 	}
4340 
4341 	qlge->intr_type = intr_type;
4342 
4343 	return (DDI_SUCCESS);
4344 
4345 ql_intr_alloc_fail:
4346 	ql_free_irq_vectors(qlge);
4347 
4348 	return (DDI_FAILURE);
4349 }
4350 
4351 /*
4352  * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
4353  * MSI or Legacy. In MSI and Legacy modes we only support a single receive and
4354  * transmit queue.
4355  */
4356 int
ql_alloc_irqs(qlge_t * qlge)4357 ql_alloc_irqs(qlge_t *qlge)
4358 {
4359 	int intr_types;
4360 	int rval;
4361 
4362 	/*
4363 	 * Get supported interrupt types
4364 	 */
4365 	if (ddi_intr_get_supported_types(qlge->dip, &intr_types)
4366 	    != DDI_SUCCESS) {
4367 		cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed",
4368 		    __func__, qlge->instance);
4369 
4370 		return (DDI_FAILURE);
4371 	}
4372 
4373 	QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n",
4374 	    __func__, qlge->instance, intr_types));
4375 
4376 	/* Install MSI-X interrupts */
4377 	if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
4378 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n",
4379 		    __func__, qlge->instance, intr_types));
4380 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX);
4381 		if (rval == DDI_SUCCESS) {
4382 			return (rval);
4383 		}
4384 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed,"
4385 		    " trying MSI interrupts ...\n", __func__, qlge->instance));
4386 	}
4387 
4388 	/*
4389 	 * We will have 2 completion queues in MSI / Legacy mode,
4390 	 * Queue 0 for default completions
4391 	 * Queue 1 for transmit completions
4392 	 */
4393 	qlge->rss_ring_count = 1; /* Default completion queue (0) for all */
4394 	qlge->tx_ring_count = 1; /* Single tx completion queue */
4395 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
4396 
4397 	QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n",
4398 	    __func__, qlge->instance));
4399 	/*
4400 	 * Add the h/w interrupt handler and initialise mutexes
4401 	 */
4402 	rval = DDI_FAILURE;
4403 
4404 	/*
4405 	 * If OS supports MSIX interrupt but fails to allocate, then try
4406 	 * MSI interrupt. If MSI interrupt allocation fails also, then roll
4407 	 * back to fixed interrupt.
4408 	 */
4409 	if (intr_types & DDI_INTR_TYPE_MSI) {
4410 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI);
4411 		if (rval == DDI_SUCCESS) {
4412 			qlge->intr_type = DDI_INTR_TYPE_MSI;
4413 			QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n",
4414 			    __func__, qlge->instance));
4415 		}
4416 	}
4417 
4418 	/* Try Fixed interrupt Legacy mode */
4419 	if (rval != DDI_SUCCESS) {
4420 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED);
4421 		if (rval != DDI_SUCCESS) {
4422 			cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt "
4423 			    "allocation failed",
4424 			    __func__, qlge->instance);
4425 		} else {
4426 			qlge->intr_type = DDI_INTR_TYPE_FIXED;
4427 			QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n",
4428 			    __func__, qlge->instance));
4429 		}
4430 	}
4431 
4432 	return (rval);
4433 }
4434 
4435 static void
ql_free_rx_tx_locks(qlge_t * qlge)4436 ql_free_rx_tx_locks(qlge_t *qlge)
4437 {
4438 	int i;
4439 	struct rx_ring *rx_ring;
4440 	struct tx_ring *tx_ring;
4441 
4442 	for (i = 0; i < qlge->tx_ring_count; i++) {
4443 		tx_ring = &qlge->tx_ring[i];
4444 		mutex_destroy(&tx_ring->tx_lock);
4445 	}
4446 
4447 	for (i = 0; i < qlge->rx_ring_count; i++) {
4448 		rx_ring = &qlge->rx_ring[i];
4449 		mutex_destroy(&rx_ring->rx_lock);
4450 		mutex_destroy(&rx_ring->sbq_lock);
4451 		mutex_destroy(&rx_ring->lbq_lock);
4452 	}
4453 }
4454 
4455 /*
4456  * Frees all resources allocated during attach.
4457  *
4458  * Input:
4459  * dip = pointer to device information structure.
4460  * sequence = bits indicating resources to free.
4461  *
4462  * Context:
4463  * Kernel context.
4464  */
4465 static void
ql_free_resources(qlge_t * qlge)4466 ql_free_resources(qlge_t *qlge)
4467 {
4468 
4469 	/* Disable driver timer */
4470 	ql_stop_timer(qlge);
4471 
4472 	if (qlge->sequence & INIT_MAC_REGISTERED) {
4473 		(void) mac_unregister(qlge->mh);
4474 		qlge->sequence &= ~INIT_MAC_REGISTERED;
4475 	}
4476 
4477 	if (qlge->sequence & INIT_MAC_ALLOC) {
4478 		/* Nothing to do, macp is already freed */
4479 		qlge->sequence &= ~INIT_MAC_ALLOC;
4480 	}
4481 
4482 	if (qlge->sequence & INIT_PCI_CONFIG_SETUP) {
4483 		pci_config_teardown(&qlge->pci_handle);
4484 		qlge->sequence &= ~INIT_PCI_CONFIG_SETUP;
4485 	}
4486 
4487 	if (qlge->sequence & INIT_INTR_ALLOC) {
4488 		ql_free_irq_vectors(qlge);
4489 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4490 	}
4491 
4492 	if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) {
4493 		(void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl);
4494 		(void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl);
4495 		(void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl);
4496 		qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT;
4497 	}
4498 
4499 	if (qlge->sequence & INIT_KSTATS) {
4500 		ql_fini_kstats(qlge);
4501 		qlge->sequence &= ~INIT_KSTATS;
4502 	}
4503 
4504 	if (qlge->sequence & INIT_MUTEX) {
4505 		mutex_destroy(&qlge->gen_mutex);
4506 		mutex_destroy(&qlge->hw_mutex);
4507 		mutex_destroy(&qlge->mbx_mutex);
4508 		cv_destroy(&qlge->cv_mbx_intr);
4509 		qlge->sequence &= ~INIT_MUTEX;
4510 	}
4511 
4512 	if (qlge->sequence & INIT_LOCKS_CREATED) {
4513 		ql_free_rx_tx_locks(qlge);
4514 		qlge->sequence &= ~INIT_LOCKS_CREATED;
4515 	}
4516 
4517 	if (qlge->sequence & INIT_MEMORY_ALLOC) {
4518 		ql_free_mem_resources(qlge);
4519 		qlge->sequence &= ~INIT_MEMORY_ALLOC;
4520 	}
4521 
4522 	if (qlge->sequence & INIT_REGS_SETUP) {
4523 		ddi_regs_map_free(&qlge->dev_handle);
4524 		qlge->sequence &= ~INIT_REGS_SETUP;
4525 	}
4526 
4527 	if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) {
4528 		ddi_regs_map_free(&qlge->dev_doorbell_reg_handle);
4529 		qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP;
4530 	}
4531 
4532 	/*
4533 	 * free flash flt table that allocated in attach stage
4534 	 */
4535 	if ((qlge->flt.ql_flt_entry_ptr != NULL)&&
4536 	    (qlge->flt.header.length != 0)) {
4537 		kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
4538 		qlge->flt.ql_flt_entry_ptr = NULL;
4539 	}
4540 
4541 	if (qlge->sequence & INIT_FM) {
4542 		ql_fm_fini(qlge);
4543 		qlge->sequence &= ~INIT_FM;
4544 	}
4545 
4546 	ddi_prop_remove_all(qlge->dip);
4547 	ddi_set_driver_private(qlge->dip, NULL);
4548 
4549 	/* finally, free qlge structure */
4550 	if (qlge->sequence & INIT_SOFTSTATE_ALLOC) {
4551 		kmem_free(qlge, sizeof (qlge_t));
4552 	}
4553 }
4554 
4555 /*
4556  * Set promiscuous mode of the driver
4557  * Caller must catch HW_LOCK
4558  */
4559 void
ql_set_promiscuous(qlge_t * qlge,int mode)4560 ql_set_promiscuous(qlge_t *qlge, int mode)
4561 {
4562 	if (mode) {
4563 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4564 		    RT_IDX_VALID, 1);
4565 	} else {
4566 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4567 		    RT_IDX_VALID, 0);
4568 	}
4569 }
4570 /*
4571  * Write 'data1' to Mac Protocol Address Index Register and
4572  * 'data2' to Mac Protocol Address Data Register
4573  *  Assuming that the Mac Protocol semaphore lock has been acquired.
4574  */
4575 static int
ql_write_mac_proto_regs(qlge_t * qlge,uint32_t data1,uint32_t data2)4576 ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2)
4577 {
4578 	int return_value = DDI_SUCCESS;
4579 
4580 	if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
4581 	    MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) {
4582 		cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register "
4583 		    "timeout.");
4584 		return_value = DDI_FAILURE;
4585 		goto out;
4586 	}
4587 	ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1);
4588 	ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2);
4589 out:
4590 	return (return_value);
4591 }
4592 /*
4593  * Enable the 'index'ed multicast address in the host memory's multicast_list
4594  */
4595 int
ql_add_multicast_address(qlge_t * qlge,int index)4596 ql_add_multicast_address(qlge_t *qlge, int index)
4597 {
4598 	int rtn_val = DDI_FAILURE;
4599 	uint32_t offset;
4600 	uint32_t value1, value2;
4601 
4602 	/* Acquire the required semaphore */
4603 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4604 		return (rtn_val);
4605 	}
4606 
4607 	/* Program Offset0 - lower 32 bits of the MAC address */
4608 	offset = 0;
4609 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4610 	    (index << 4) | offset;
4611 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4612 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4613 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4614 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4615 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS)
4616 		goto out;
4617 
4618 	/* Program offset1: upper 16 bits of the MAC address */
4619 	offset = 1;
4620 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4621 	    (index<<4) | offset;
4622 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8)
4623 	    |qlge->multicast_list[index].addr.ether_addr_octet[1]);
4624 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4625 		goto out;
4626 	}
4627 	rtn_val = DDI_SUCCESS;
4628 out:
4629 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4630 	return (rtn_val);
4631 }
4632 
4633 /*
4634  * Disable the 'index'ed multicast address in the host memory's multicast_list
4635  */
4636 int
ql_remove_multicast_address(qlge_t * qlge,int index)4637 ql_remove_multicast_address(qlge_t *qlge, int index)
4638 {
4639 	int rtn_val = DDI_FAILURE;
4640 	uint32_t offset;
4641 	uint32_t value1, value2;
4642 
4643 	/* Acquire the required semaphore */
4644 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4645 		return (rtn_val);
4646 	}
4647 	/* Program Offset0 - lower 32 bits of the MAC address */
4648 	offset = 0;
4649 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4650 	value2 =
4651 	    ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4652 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4653 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4654 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4655 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4656 		goto out;
4657 	}
4658 	/* Program offset1: upper 16 bits of the MAC address */
4659 	offset = 1;
4660 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4661 	value2 = 0;
4662 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4663 		goto out;
4664 	}
4665 	rtn_val = DDI_SUCCESS;
4666 out:
4667 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4668 	return (rtn_val);
4669 }
4670 
4671 /*
4672  * Add a new multicast address to the list of supported list
4673  * This API is called after OS called gld_set_multicast (GLDv2)
4674  * or m_multicst (GLDv3)
4675  *
4676  * Restriction:
4677  * The number of maximum multicast address is limited by hardware.
4678  */
4679 int
ql_add_to_multicast_list(qlge_t * qlge,uint8_t * ep)4680 ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep)
4681 {
4682 	uint32_t index = qlge->multicast_list_count;
4683 	int rval = DDI_SUCCESS;
4684 	int status;
4685 
4686 	if ((ep[0] & 01) == 0) {
4687 		rval = EINVAL;
4688 		goto exit;
4689 	}
4690 
4691 	/* if there is an availabe space in multicast_list, then add it */
4692 	if (index < MAX_MULTICAST_LIST_SIZE) {
4693 		bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet,
4694 		    ETHERADDRL);
4695 		/* increment the total number of addresses in multicast list */
4696 		(void) ql_add_multicast_address(qlge, index);
4697 		qlge->multicast_list_count++;
4698 		QL_PRINT(DBG_GLD,
4699 		    ("%s(%d): added to index of multicast list= 0x%x, "
4700 		    "total %d\n", __func__, qlge->instance, index,
4701 		    qlge->multicast_list_count));
4702 
4703 		if (index > MAX_MULTICAST_HW_SIZE) {
4704 			if (!qlge->multicast_promisc) {
4705 				status = ql_set_routing_reg(qlge,
4706 				    RT_IDX_ALLMULTI_SLOT,
4707 				    RT_IDX_MCAST, 1);
4708 				if (status) {
4709 					cmn_err(CE_WARN,
4710 					    "Failed to init routing reg "
4711 					    "for mcast promisc mode.");
4712 					rval = ENOENT;
4713 					goto exit;
4714 				}
4715 				qlge->multicast_promisc = B_TRUE;
4716 			}
4717 		}
4718 	} else {
4719 		rval = ENOENT;
4720 	}
4721 exit:
4722 	return (rval);
4723 }
4724 
4725 /*
4726  * Remove an old multicast address from the list of supported multicast
4727  * addresses. This API is called after OS called gld_set_multicast (GLDv2)
4728  * or m_multicst (GLDv3)
4729  * The number of maximum multicast address is limited by hardware.
4730  */
4731 int
ql_remove_from_multicast_list(qlge_t * qlge,uint8_t * ep)4732 ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep)
4733 {
4734 	uint32_t total = qlge->multicast_list_count;
4735 	int i = 0;
4736 	int rmv_index = 0;
4737 	size_t length = sizeof (ql_multicast_addr);
4738 	int status;
4739 
4740 	for (i = 0; i < total; i++) {
4741 		if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) {
4742 			continue;
4743 		}
4744 
4745 		rmv_index = i;
4746 		/* block move the reset of other multicast address forward */
4747 		length = ((total -1) -i) * sizeof (ql_multicast_addr);
4748 		if (length > 0) {
4749 			bcopy(&qlge->multicast_list[i+1],
4750 			    &qlge->multicast_list[i], length);
4751 		}
4752 		qlge->multicast_list_count--;
4753 		if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) {
4754 			/*
4755 			 * there is a deletion in multicast list table,
4756 			 * re-enable them
4757 			 */
4758 			for (i = rmv_index; i < qlge->multicast_list_count;
4759 			    i++) {
4760 				(void) ql_add_multicast_address(qlge, i);
4761 			}
4762 			/* and disable the last one */
4763 			(void) ql_remove_multicast_address(qlge, i);
4764 
4765 			/* disable multicast promiscuous mode */
4766 			if (qlge->multicast_promisc) {
4767 				status = ql_set_routing_reg(qlge,
4768 				    RT_IDX_ALLMULTI_SLOT,
4769 				    RT_IDX_MCAST, 0);
4770 				if (status) {
4771 					cmn_err(CE_WARN,
4772 					    "Failed to init routing reg for "
4773 					    "mcast promisc mode.");
4774 					goto exit;
4775 				}
4776 				/* write to config register */
4777 				qlge->multicast_promisc = B_FALSE;
4778 			}
4779 		}
4780 		break;
4781 	}
4782 exit:
4783 	return (DDI_SUCCESS);
4784 }
4785 
4786 /*
4787  * Read a XGMAC register
4788  */
4789 int
ql_read_xgmac_reg(qlge_t * qlge,uint32_t addr,uint32_t * val)4790 ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val)
4791 {
4792 	int rtn_val = DDI_FAILURE;
4793 
4794 	/* wait for XGMAC Address register RDY bit set */
4795 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4796 	    BIT_SET, 10) != DDI_SUCCESS) {
4797 		goto out;
4798 	}
4799 	/* start rx transaction */
4800 	ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT);
4801 
4802 	/*
4803 	 * wait for XGMAC Address register RDY bit set,
4804 	 * which indicates data is ready
4805 	 */
4806 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4807 	    BIT_SET, 10) != DDI_SUCCESS) {
4808 		goto out;
4809 	}
4810 	/* read data from XGAMC_DATA register */
4811 	*val = ql_read_reg(qlge, REG_XGMAC_DATA);
4812 	rtn_val = DDI_SUCCESS;
4813 out:
4814 	return (rtn_val);
4815 }
4816 
4817 /*
4818  * Implement checksum offload for IPv4 IP packets
4819  */
4820 static void
ql_hw_csum_setup(qlge_t * qlge,uint32_t pflags,caddr_t bp,struct ob_mac_iocb_req * mac_iocb_ptr)4821 ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp,
4822     struct ob_mac_iocb_req *mac_iocb_ptr)
4823 {
4824 	struct ip *iphdr = NULL;
4825 	struct ether_header *ethhdr;
4826 	struct ether_vlan_header *ethvhdr;
4827 	struct tcphdr *tcp_hdr;
4828 	uint32_t etherType;
4829 	int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4830 	int ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4831 
4832 	ethhdr  = (struct ether_header *)((void *)bp);
4833 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
4834 	/* Is this vlan packet? */
4835 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4836 		mac_hdr_len = sizeof (struct ether_vlan_header);
4837 		etherType = ntohs(ethvhdr->ether_type);
4838 	} else {
4839 		mac_hdr_len = sizeof (struct ether_header);
4840 		etherType = ntohs(ethhdr->ether_type);
4841 	}
4842 	/* Is this IPv4 or IPv6 packet? */
4843 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) ==
4844 	    IPV4_VERSION) {
4845 		if (etherType == ETHERTYPE_IP /* 0800 */) {
4846 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
4847 		} else {
4848 			/* EMPTY */
4849 			QL_PRINT(DBG_TX,
4850 			    ("%s(%d) : IPv4 None IP packet type 0x%x\n",
4851 			    __func__, qlge->instance, etherType));
4852 		}
4853 	}
4854 	/* ipV4 packets */
4855 	if (iphdr != NULL) {
4856 
4857 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
4858 		QL_PRINT(DBG_TX,
4859 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
4860 		    " %d bytes \n", __func__, qlge->instance, ip_hdr_len));
4861 
4862 		ip_hdr_off = mac_hdr_len;
4863 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4864 		    __func__, qlge->instance, ip_hdr_len));
4865 
4866 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4867 		    OB_MAC_IOCB_REQ_IPv4);
4868 
4869 		if (pflags & HCK_IPV4_HDRCKSUM) {
4870 			QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n",
4871 			    __func__, qlge->instance));
4872 			mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB;
4873 			mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 |
4874 			    OB_MAC_IOCB_REQ_IC);
4875 			iphdr->ip_sum = 0;
4876 			mac_iocb_ptr->hdr_off = (uint16_t)
4877 			    cpu_to_le16(ip_hdr_off);
4878 		}
4879 		if (pflags & HCK_FULLCKSUM) {
4880 			if (iphdr->ip_p == IPPROTO_TCP) {
4881 				tcp_hdr =
4882 				    (struct tcphdr *)(void *)
4883 				    ((uint8_t *)(void *)iphdr + ip_hdr_len);
4884 				QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n",
4885 				    __func__, qlge->instance));
4886 				mac_iocb_ptr->opcode =
4887 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4888 				mac_iocb_ptr->flag1 =
4889 				    (uint8_t)(mac_iocb_ptr->flag1 |
4890 				    OB_MAC_IOCB_REQ_TC);
4891 				mac_iocb_ptr->flag2 =
4892 				    (uint8_t)(mac_iocb_ptr->flag2 |
4893 				    OB_MAC_IOCB_REQ_IC);
4894 				iphdr->ip_sum = 0;
4895 				tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len;
4896 				tcp_udp_hdr_len = tcp_hdr->th_off*4;
4897 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4898 				    __func__, qlge->instance, tcp_udp_hdr_len));
4899 				hdr_off = ip_hdr_off;
4900 				tcp_udp_hdr_off <<= 6;
4901 				hdr_off |= tcp_udp_hdr_off;
4902 				mac_iocb_ptr->hdr_off =
4903 				    (uint16_t)cpu_to_le16(hdr_off);
4904 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4905 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4906 				    tcp_udp_hdr_len);
4907 
4908 				/*
4909 				 * if the chip is unable to do pseudo header
4910 				 * cksum calculation, do it in then put the
4911 				 * result to the data passed to the chip
4912 				 */
4913 				if (qlge->cfg_flags &
4914 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4915 					ql_pseudo_cksum((uint8_t *)iphdr);
4916 				}
4917 			} else if (iphdr->ip_p == IPPROTO_UDP) {
4918 				QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n",
4919 				    __func__, qlge->instance));
4920 				mac_iocb_ptr->opcode =
4921 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4922 				mac_iocb_ptr->flag1 =
4923 				    (uint8_t)(mac_iocb_ptr->flag1 |
4924 				    OB_MAC_IOCB_REQ_UC);
4925 				mac_iocb_ptr->flag2 =
4926 				    (uint8_t)(mac_iocb_ptr->flag2 |
4927 				    OB_MAC_IOCB_REQ_IC);
4928 				iphdr->ip_sum = 0;
4929 				tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len;
4930 				tcp_udp_hdr_len = sizeof (struct udphdr);
4931 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4932 				    __func__, qlge->instance, tcp_udp_hdr_len));
4933 				hdr_off = ip_hdr_off;
4934 				tcp_udp_hdr_off <<= 6;
4935 				hdr_off |= tcp_udp_hdr_off;
4936 				mac_iocb_ptr->hdr_off =
4937 				    (uint16_t)cpu_to_le16(hdr_off);
4938 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4939 				    cpu_to_le16(mac_hdr_len + ip_hdr_len
4940 				    + tcp_udp_hdr_len);
4941 
4942 				/*
4943 				 * if the chip is unable to calculate pseudo
4944 				 * hdr cksum,do it in then put the result to
4945 				 * the data passed to the chip
4946 				 */
4947 				if (qlge->cfg_flags &
4948 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4949 					ql_pseudo_cksum((uint8_t *)iphdr);
4950 				}
4951 			}
4952 		}
4953 	}
4954 }
4955 
4956 /*
4957  * For TSO/LSO:
4958  * MAC frame transmission with TCP large segment offload is performed in the
4959  * same way as the MAC frame transmission with checksum offload with the
4960  * exception that the maximum TCP segment size (MSS) must be specified to
4961  * allow the chip to segment the data into legal sized frames.
4962  * The host also needs to calculate a pseudo-header checksum over the
4963  * following fields:
4964  * Source IP Address, Destination IP Address, and the Protocol.
4965  * The TCP length is not included in the pseudo-header calculation.
4966  * The pseudo-header checksum is place in the TCP checksum field of the
4967  * prototype header.
4968  */
4969 static void
ql_lso_pseudo_cksum(uint8_t * buf)4970 ql_lso_pseudo_cksum(uint8_t *buf)
4971 {
4972 	uint32_t cksum;
4973 	uint16_t iphl;
4974 	uint16_t proto;
4975 
4976 	/*
4977 	 * Calculate the LSO pseudo-header checksum.
4978 	 */
4979 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
4980 	cksum = proto = buf[9];
4981 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
4982 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
4983 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
4984 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
4985 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4986 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4987 
4988 	/*
4989 	 * Point it to the TCP/UDP header, and
4990 	 * update the checksum field.
4991 	 */
4992 	buf += iphl + ((proto == IPPROTO_TCP) ?
4993 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
4994 
4995 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
4996 }
4997 
4998 /*
4999  * For IPv4 IP packets, distribute the tx packets evenly among tx rings
5000  */
5001 typedef	uint32_t	ub4; /* unsigned 4-byte quantities */
5002 typedef	uint8_t		ub1;
5003 
5004 #define	hashsize(n)	((ub4)1<<(n))
5005 #define	hashmask(n)	(hashsize(n)-1)
5006 
5007 #define	mix(a, b, c) \
5008 { \
5009 	a -= b; a -= c; a ^= (c>>13); \
5010 	b -= c; b -= a; b ^= (a<<8); \
5011 	c -= a; c -= b; c ^= (b>>13); \
5012 	a -= b; a -= c; a ^= (c>>12);  \
5013 	b -= c; b -= a; b ^= (a<<16); \
5014 	c -= a; c -= b; c ^= (b>>5); \
5015 	a -= b; a -= c; a ^= (c>>3);  \
5016 	b -= c; b -= a; b ^= (a<<10); \
5017 	c -= a; c -= b; c ^= (b>>15); \
5018 }
5019 
5020 ub4
hash(k,length,initval)5021 hash(k, length, initval)
5022 register ub1 *k;	/* the key */
5023 register ub4 length;	/* the length of the key */
5024 register ub4 initval;	/* the previous hash, or an arbitrary value */
5025 {
5026 	register ub4 a, b, c, len;
5027 
5028 	/* Set up the internal state */
5029 	len = length;
5030 	a = b = 0x9e3779b9;	/* the golden ratio; an arbitrary value */
5031 	c = initval;		/* the previous hash value */
5032 
5033 	/* handle most of the key */
5034 	while (len >= 12) {
5035 		a += (k[0] +((ub4)k[1]<<8) +((ub4)k[2]<<16) +((ub4)k[3]<<24));
5036 		b += (k[4] +((ub4)k[5]<<8) +((ub4)k[6]<<16) +((ub4)k[7]<<24));
5037 		c += (k[8] +((ub4)k[9]<<8) +((ub4)k[10]<<16)+((ub4)k[11]<<24));
5038 		mix(a, b, c);
5039 		k += 12;
5040 		len -= 12;
5041 	}
5042 
5043 	/* handle the last 11 bytes */
5044 	c += length;
5045 	/* all the case statements fall through */
5046 	switch (len) {
5047 		/* FALLTHRU */
5048 	case 11: c += ((ub4)k[10]<<24);
5049 		/* FALLTHRU */
5050 	case 10: c += ((ub4)k[9]<<16);
5051 		/* FALLTHRU */
5052 	case 9 : c += ((ub4)k[8]<<8);
5053 	/* the first byte of c is reserved for the length */
5054 		/* FALLTHRU */
5055 	case 8 : b += ((ub4)k[7]<<24);
5056 		/* FALLTHRU */
5057 	case 7 : b += ((ub4)k[6]<<16);
5058 		/* FALLTHRU */
5059 	case 6 : b += ((ub4)k[5]<<8);
5060 		/* FALLTHRU */
5061 	case 5 : b += k[4];
5062 		/* FALLTHRU */
5063 	case 4 : a += ((ub4)k[3]<<24);
5064 		/* FALLTHRU */
5065 	case 3 : a += ((ub4)k[2]<<16);
5066 		/* FALLTHRU */
5067 	case 2 : a += ((ub4)k[1]<<8);
5068 		/* FALLTHRU */
5069 	case 1 : a += k[0];
5070 	/* case 0: nothing left to add */
5071 	}
5072 	mix(a, b, c);
5073 	/* report the result */
5074 	return (c);
5075 }
5076 
5077 uint8_t
ql_tx_hashing(qlge_t * qlge,caddr_t bp)5078 ql_tx_hashing(qlge_t *qlge, caddr_t bp)
5079 {
5080 	struct ip *iphdr = NULL;
5081 	struct ether_header *ethhdr;
5082 	struct ether_vlan_header *ethvhdr;
5083 	struct tcphdr *tcp_hdr;
5084 	struct udphdr *udp_hdr;
5085 	uint32_t etherType;
5086 	int mac_hdr_len, ip_hdr_len;
5087 	uint32_t h = 0; /* 0 by default */
5088 	uint8_t tx_ring_id = 0;
5089 	uint32_t ip_src_addr = 0;
5090 	uint32_t ip_desc_addr = 0;
5091 	uint16_t src_port = 0;
5092 	uint16_t dest_port = 0;
5093 	uint8_t key[12];
5094 	QL_PRINT(DBG_TX, ("%s(%d) entered \n", __func__, qlge->instance));
5095 
5096 	ethhdr = (struct ether_header *)((void *)bp);
5097 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
5098 
5099 	if (qlge->tx_ring_count == 1)
5100 		return (tx_ring_id);
5101 
5102 	/* Is this vlan packet? */
5103 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5104 		mac_hdr_len = sizeof (struct ether_vlan_header);
5105 		etherType = ntohs(ethvhdr->ether_type);
5106 	} else {
5107 		mac_hdr_len = sizeof (struct ether_header);
5108 		etherType = ntohs(ethhdr->ether_type);
5109 	}
5110 	/* Is this IPv4 or IPv6 packet? */
5111 	if (etherType == ETHERTYPE_IP /* 0800 */) {
5112 		if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len))
5113 		    == IPV4_VERSION) {
5114 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
5115 		}
5116 		if (((unsigned long)iphdr) & 0x3) {
5117 			/*  IP hdr not 4-byte aligned */
5118 			return (tx_ring_id);
5119 		}
5120 	}
5121 	/* ipV4 packets */
5122 	if (iphdr) {
5123 
5124 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
5125 		ip_src_addr = iphdr->ip_src.s_addr;
5126 		ip_desc_addr = iphdr->ip_dst.s_addr;
5127 
5128 		if (iphdr->ip_p == IPPROTO_TCP) {
5129 			tcp_hdr = (struct tcphdr *)(void *)
5130 			    ((uint8_t *)iphdr + ip_hdr_len);
5131 			src_port = tcp_hdr->th_sport;
5132 			dest_port = tcp_hdr->th_dport;
5133 		} else if (iphdr->ip_p == IPPROTO_UDP) {
5134 			udp_hdr = (struct udphdr *)(void *)
5135 			    ((uint8_t *)iphdr + ip_hdr_len);
5136 			src_port = udp_hdr->uh_sport;
5137 			dest_port = udp_hdr->uh_dport;
5138 		}
5139 		key[0] = (uint8_t)((ip_src_addr) &0xFF);
5140 		key[1] = (uint8_t)((ip_src_addr >> 8) &0xFF);
5141 		key[2] = (uint8_t)((ip_src_addr >> 16) &0xFF);
5142 		key[3] = (uint8_t)((ip_src_addr >> 24) &0xFF);
5143 		key[4] = (uint8_t)((ip_desc_addr) &0xFF);
5144 		key[5] = (uint8_t)((ip_desc_addr >> 8) &0xFF);
5145 		key[6] = (uint8_t)((ip_desc_addr >> 16) &0xFF);
5146 		key[7] = (uint8_t)((ip_desc_addr >> 24) &0xFF);
5147 		key[8] = (uint8_t)((src_port) &0xFF);
5148 		key[9] = (uint8_t)((src_port >> 8) &0xFF);
5149 		key[10] = (uint8_t)((dest_port) &0xFF);
5150 		key[11] = (uint8_t)((dest_port >> 8) &0xFF);
5151 		h = hash(key, 12, 0); /* return 32 bit */
5152 		tx_ring_id = (h & (qlge->tx_ring_count - 1));
5153 		if (tx_ring_id >= qlge->tx_ring_count) {
5154 			cmn_err(CE_WARN, "%s bad tx_ring_id %d\n",
5155 			    __func__, tx_ring_id);
5156 			tx_ring_id = 0;
5157 		}
5158 	}
5159 	return (tx_ring_id);
5160 }
5161 
5162 /*
5163  * Tell the hardware to do Large Send Offload (LSO)
5164  *
5165  * Some fields in ob_mac_iocb need to be set so hardware can know what is
5166  * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
5167  * in the right place of the packet etc, thus, hardware can process the
5168  * packet correctly.
5169  */
5170 static void
ql_hw_lso_setup(qlge_t * qlge,uint32_t mss,caddr_t bp,struct ob_mac_iocb_req * mac_iocb_ptr)5171 ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp,
5172     struct ob_mac_iocb_req *mac_iocb_ptr)
5173 {
5174 	struct ip *iphdr = NULL;
5175 	struct ether_header *ethhdr;
5176 	struct ether_vlan_header *ethvhdr;
5177 	struct tcphdr *tcp_hdr;
5178 	struct udphdr *udp_hdr;
5179 	uint32_t etherType;
5180 	uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
5181 	uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off;
5182 
5183 	ethhdr = (struct ether_header *)(void *)bp;
5184 	ethvhdr = (struct ether_vlan_header *)(void *)bp;
5185 
5186 	/* Is this vlan packet? */
5187 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5188 		mac_hdr_len = sizeof (struct ether_vlan_header);
5189 		etherType = ntohs(ethvhdr->ether_type);
5190 	} else {
5191 		mac_hdr_len = sizeof (struct ether_header);
5192 		etherType = ntohs(ethhdr->ether_type);
5193 	}
5194 	/* Is this IPv4 or IPv6 packet? */
5195 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) ==
5196 	    IPV4_VERSION) {
5197 		if (etherType == ETHERTYPE_IP /* 0800 */) {
5198 			iphdr 	= (struct ip *)(void *)(bp+mac_hdr_len);
5199 		} else {
5200 			/* EMPTY */
5201 			QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet"
5202 			    " type 0x%x\n",
5203 			    __func__, qlge->instance, etherType));
5204 		}
5205 	}
5206 
5207 	if (iphdr != NULL) { /* ipV4 packets */
5208 		ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr);
5209 		QL_PRINT(DBG_TX,
5210 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
5211 		    " bytes \n", __func__, qlge->instance, ip_hdr_len));
5212 
5213 		ip_hdr_off = mac_hdr_len;
5214 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
5215 		    __func__, qlge->instance, ip_hdr_len));
5216 
5217 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
5218 		    OB_MAC_IOCB_REQ_IPv4);
5219 		if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) {
5220 			if (iphdr->ip_p == IPPROTO_TCP) {
5221 				tcp_hdr = (struct tcphdr *)(void *)
5222 				    ((uint8_t *)(void *)iphdr +
5223 				    ip_hdr_len);
5224 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP "
5225 				    "packet\n",
5226 				    __func__, qlge->instance));
5227 				mac_iocb_ptr->opcode =
5228 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
5229 				mac_iocb_ptr->flag1 =
5230 				    (uint8_t)(mac_iocb_ptr->flag1 |
5231 				    OB_MAC_IOCB_REQ_LSO);
5232 				iphdr->ip_sum = 0;
5233 				tcp_udp_hdr_off =
5234 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
5235 				tcp_udp_hdr_len =
5236 				    (uint16_t)(tcp_hdr->th_off*4);
5237 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
5238 				    __func__, qlge->instance, tcp_udp_hdr_len));
5239 				hdr_off = ip_hdr_off;
5240 				tcp_udp_hdr_off <<= 6;
5241 				hdr_off |= tcp_udp_hdr_off;
5242 				mac_iocb_ptr->hdr_off =
5243 				    (uint16_t)cpu_to_le16(hdr_off);
5244 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5245 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
5246 				    tcp_udp_hdr_len);
5247 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5248 
5249 				/*
5250 				 * if the chip is unable to calculate pseudo
5251 				 * header checksum, do it in then put the result
5252 				 * to the data passed to the chip
5253 				 */
5254 				if (qlge->cfg_flags &
5255 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5256 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
5257 			} else if (iphdr->ip_p == IPPROTO_UDP) {
5258 				udp_hdr = (struct udphdr *)(void *)
5259 				    ((uint8_t *)(void *)iphdr
5260 				    + ip_hdr_len);
5261 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP "
5262 				    "packet\n",
5263 				    __func__, qlge->instance));
5264 				mac_iocb_ptr->opcode =
5265 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
5266 				mac_iocb_ptr->flag1 =
5267 				    (uint8_t)(mac_iocb_ptr->flag1 |
5268 				    OB_MAC_IOCB_REQ_LSO);
5269 				iphdr->ip_sum = 0;
5270 				tcp_udp_hdr_off =
5271 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
5272 				tcp_udp_hdr_len =
5273 				    (uint16_t)(udp_hdr->uh_ulen*4);
5274 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
5275 				    __func__, qlge->instance, tcp_udp_hdr_len));
5276 				hdr_off = ip_hdr_off;
5277 				tcp_udp_hdr_off <<= 6;
5278 				hdr_off |= tcp_udp_hdr_off;
5279 				mac_iocb_ptr->hdr_off =
5280 				    (uint16_t)cpu_to_le16(hdr_off);
5281 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5282 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
5283 				    tcp_udp_hdr_len);
5284 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5285 
5286 				/*
5287 				 * if the chip is unable to do pseudo header
5288 				 * checksum calculation, do it here then put the
5289 				 * result to the data passed to the chip
5290 				 */
5291 				if (qlge->cfg_flags &
5292 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5293 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
5294 			}
5295 		}
5296 	}
5297 }
5298 
5299 /*
5300  * Generic packet sending function which is used to send one packet.
5301  */
5302 int
ql_send_common(struct tx_ring * tx_ring,mblk_t * mp)5303 ql_send_common(struct tx_ring *tx_ring, mblk_t *mp)
5304 {
5305 	struct tx_ring_desc *tx_cb;
5306 	struct ob_mac_iocb_req *mac_iocb_ptr;
5307 	mblk_t *tp;
5308 	size_t msg_len = 0;
5309 	size_t off;
5310 	caddr_t bp;
5311 	size_t nbyte, total_len;
5312 	uint_t i = 0;
5313 	int j = 0, frags = 0;
5314 	uint32_t phy_addr_low, phy_addr_high;
5315 	uint64_t phys_addr;
5316 	clock_t now;
5317 	uint32_t pflags = 0;
5318 	uint32_t mss = 0;
5319 	enum tx_mode_t tx_mode;
5320 	struct oal_entry *oal_entry;
5321 	int status;
5322 	uint_t ncookies, oal_entries, max_oal_entries;
5323 	size_t max_seg_len = 0;
5324 	boolean_t use_lso = B_FALSE;
5325 	struct oal_entry *tx_entry = NULL;
5326 	struct oal_entry *last_oal_entry;
5327 	qlge_t *qlge = tx_ring->qlge;
5328 	ddi_dma_cookie_t dma_cookie;
5329 	size_t tx_buf_len = QL_MAX_COPY_LENGTH;
5330 	int force_pullup = 0;
5331 
5332 	tp = mp;
5333 	total_len = msg_len = 0;
5334 	max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1;
5335 
5336 	/* Calculate number of data and segments in the incoming message */
5337 	for (tp = mp; tp != NULL; tp = tp->b_cont) {
5338 		nbyte = MBLKL(tp);
5339 		total_len += nbyte;
5340 		max_seg_len = max(nbyte, max_seg_len);
5341 		QL_PRINT(DBG_TX, ("Requested sending data in %d segments, "
5342 		    "total length: %d\n", frags, nbyte));
5343 		frags++;
5344 	}
5345 
5346 	if (total_len >= QL_LSO_MAX) {
5347 		freemsg(mp);
5348 #ifdef QLGE_LOAD_UNLOAD
5349 		cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n",
5350 		    __func__, (int)total_len);
5351 #endif
5352 		return (NULL);
5353 	}
5354 
5355 	bp = (caddr_t)mp->b_rptr;
5356 	if (bp[0] & 1) {
5357 		if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet,
5358 		    ETHERADDRL) == 0) {
5359 			QL_PRINT(DBG_TX, ("Broadcast packet\n"));
5360 			tx_ring->brdcstxmt++;
5361 		} else {
5362 			QL_PRINT(DBG_TX, ("multicast packet\n"));
5363 			tx_ring->multixmt++;
5364 		}
5365 	}
5366 
5367 	tx_ring->obytes += total_len;
5368 	tx_ring->opackets ++;
5369 
5370 	QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs,"
5371 	    " max seg len: %d\n", total_len, frags, max_seg_len));
5372 
5373 	/* claim a free slot in tx ring */
5374 	tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx];
5375 
5376 	/* get the tx descriptor */
5377 	mac_iocb_ptr = tx_cb->queue_entry;
5378 
5379 	bzero((void *)mac_iocb_ptr, 20);
5380 
5381 	ASSERT(tx_cb->mp == NULL);
5382 
5383 	/*
5384 	 * Decide to use DMA map or copy mode.
5385 	 * DMA map mode must be used when the total msg length is more than the
5386 	 * tx buffer length.
5387 	 */
5388 
5389 	if (total_len > tx_buf_len)
5390 		tx_mode = USE_DMA;
5391 	else if	(max_seg_len > QL_MAX_COPY_LENGTH)
5392 		tx_mode = USE_DMA;
5393 	else
5394 		tx_mode = USE_COPY;
5395 
5396 	if (qlge->chksum_cap) {
5397 		mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
5398 		QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability "
5399 		    "is 0x%x \n", pflags, qlge->chksum_cap));
5400 		if (qlge->lso_enable) {
5401 			uint32_t lso_flags = 0;
5402 			mac_lso_get(mp, &mss, &lso_flags);
5403 			use_lso = (lso_flags == HW_LSO);
5404 		}
5405 		QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n",
5406 		    mss, use_lso));
5407 	}
5408 
5409 do_pullup:
5410 
5411 	/* concatenate all frags into one large packet if too fragmented */
5412 	if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) ||
5413 	    force_pullup) {
5414 		mblk_t *mp1;
5415 		if ((mp1 = msgpullup(mp, -1)) != NULL) {
5416 			freemsg(mp);
5417 			mp = mp1;
5418 			frags = 1;
5419 		} else {
5420 			tx_ring->tx_fail_dma_bind++;
5421 			goto bad;
5422 		}
5423 	}
5424 
5425 	tx_cb->tx_bytes = (uint32_t)total_len;
5426 	tx_cb->mp = mp;
5427 	tx_cb->tx_dma_handle_used = 0;
5428 
5429 	if (tx_mode == USE_DMA) {
5430 		msg_len = total_len;
5431 
5432 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5433 		mac_iocb_ptr->tid = tx_ring->prod_idx;
5434 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5435 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5436 
5437 		tx_entry = &mac_iocb_ptr->oal_entry[0];
5438 		oal_entry = NULL;
5439 
5440 		for (tp = mp, oal_entries = j = 0; tp != NULL;
5441 		    tp = tp->b_cont) {
5442 			/* if too many tx dma handles needed */
5443 			if (j >= QL_MAX_TX_DMA_HANDLES) {
5444 				tx_ring->tx_no_dma_handle++;
5445 				if (!force_pullup) {
5446 					force_pullup = 1;
5447 					goto do_pullup;
5448 				} else {
5449 					goto bad;
5450 				}
5451 			}
5452 			nbyte = (uint16_t)MBLKL(tp);
5453 			if (nbyte == 0)
5454 				continue;
5455 
5456 			status = ddi_dma_addr_bind_handle(
5457 			    tx_cb->tx_dma_handle[j], NULL,
5458 			    (caddr_t)tp->b_rptr, nbyte,
5459 			    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
5460 			    0, &dma_cookie, &ncookies);
5461 
5462 			QL_PRINT(DBG_TX, ("map sending data segment: %d, "
5463 			    "length: %d, spans in %d cookies\n",
5464 			    j, nbyte, ncookies));
5465 
5466 			if (status != DDI_DMA_MAPPED) {
5467 				goto bad;
5468 			}
5469 			/*
5470 			 * Each fragment can span several cookies. One cookie
5471 			 * will use one tx descriptor to transmit.
5472 			 */
5473 			for (i = ncookies; i > 0; i--, tx_entry++,
5474 			    oal_entries++) {
5475 				/*
5476 				 * The number of TX descriptors that can be
5477 				 *  saved in tx iocb and oal list is limited
5478 				 */
5479 				if (oal_entries > max_oal_entries) {
5480 					tx_ring->tx_no_dma_cookie++;
5481 					if (!force_pullup) {
5482 						force_pullup = 1;
5483 						goto do_pullup;
5484 					} else {
5485 						goto bad;
5486 					}
5487 				}
5488 
5489 				if ((oal_entries == TX_DESC_PER_IOCB) &&
5490 				    !oal_entry) {
5491 					/*
5492 					 * Time to switch to an oal list
5493 					 * The last entry should be copied
5494 					 * to first entry in the oal list
5495 					 */
5496 					oal_entry = tx_cb->oal;
5497 					tx_entry =
5498 					    &mac_iocb_ptr->oal_entry[
5499 					    TX_DESC_PER_IOCB-1];
5500 					bcopy(tx_entry, oal_entry,
5501 					    sizeof (*oal_entry));
5502 
5503 					/*
5504 					 * last entry should be updated to
5505 					 * point to the extended oal list itself
5506 					 */
5507 					tx_entry->buf_addr_low =
5508 					    cpu_to_le32(
5509 					    LS_64BITS(tx_cb->oal_dma_addr));
5510 					tx_entry->buf_addr_high =
5511 					    cpu_to_le32(
5512 					    MS_64BITS(tx_cb->oal_dma_addr));
5513 					/*
5514 					 * Point tx_entry to the oal list
5515 					 * second entry
5516 					 */
5517 					tx_entry = &oal_entry[1];
5518 				}
5519 
5520 				tx_entry->buf_len =
5521 				    (uint32_t)cpu_to_le32(dma_cookie.dmac_size);
5522 				phys_addr = dma_cookie.dmac_laddress;
5523 				tx_entry->buf_addr_low =
5524 				    cpu_to_le32(LS_64BITS(phys_addr));
5525 				tx_entry->buf_addr_high =
5526 				    cpu_to_le32(MS_64BITS(phys_addr));
5527 
5528 				last_oal_entry = tx_entry;
5529 
5530 				if (i > 1)
5531 					ddi_dma_nextcookie(
5532 					    tx_cb->tx_dma_handle[j],
5533 					    &dma_cookie);
5534 			}
5535 			j++;
5536 		}
5537 		/*
5538 		 * if OAL is used, the last oal entry in tx iocb indicates
5539 		 * number of additional address/len pairs in OAL
5540 		 */
5541 		if (oal_entries > TX_DESC_PER_IOCB) {
5542 			tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1];
5543 			tx_entry->buf_len = (uint32_t)
5544 			    (cpu_to_le32((sizeof (struct oal_entry) *
5545 			    (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY));
5546 		}
5547 		last_oal_entry->buf_len = cpu_to_le32(
5548 		    le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY);
5549 
5550 		tx_cb->tx_dma_handle_used = j;
5551 		QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n",
5552 		    j, oal_entries));
5553 
5554 		bp = (caddr_t)mp->b_rptr;
5555 	}
5556 	if (tx_mode == USE_COPY) {
5557 		bp = tx_cb->copy_buffer;
5558 		off = 0;
5559 		nbyte = 0;
5560 		frags = 0;
5561 		/*
5562 		 * Copy up to tx_buf_len of the transmit data
5563 		 * from mp to tx buffer
5564 		 */
5565 		for (tp = mp; tp != NULL; tp = tp->b_cont) {
5566 			nbyte = MBLKL(tp);
5567 			if ((off + nbyte) <= tx_buf_len) {
5568 				bcopy(tp->b_rptr, &bp[off], nbyte);
5569 				off += nbyte;
5570 				frags ++;
5571 			}
5572 		}
5573 
5574 		msg_len = off;
5575 
5576 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5577 		mac_iocb_ptr->tid = tx_ring->prod_idx;
5578 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5579 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5580 
5581 		QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, "
5582 		    "from %d segaments\n", msg_len, frags));
5583 
5584 		phys_addr = tx_cb->copy_buffer_dma_addr;
5585 		phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr));
5586 		phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr));
5587 
5588 		QL_DUMP(DBG_TX, "\t requested sending data:\n",
5589 		    (uint8_t *)tx_cb->copy_buffer, 8, total_len);
5590 
5591 		mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t)
5592 		    cpu_to_le32(msg_len | OAL_LAST_ENTRY);
5593 		mac_iocb_ptr->oal_entry[0].buf_addr_low  = phy_addr_low;
5594 		mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high;
5595 
5596 		freemsg(mp); /* no need, we have copied */
5597 		tx_cb->mp = NULL;
5598 	} /* End of Copy Mode */
5599 
5600 	/* Do TSO/LSO on TCP packet? */
5601 	if (use_lso && mss) {
5602 		ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr);
5603 	} else if (pflags & qlge->chksum_cap) {
5604 		/* Do checksum offloading */
5605 		ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr);
5606 	}
5607 
5608 	/* let device know the latest outbound IOCB */
5609 	(void) ddi_dma_sync(tx_ring->wq_dma.dma_handle,
5610 	    (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr),
5611 	    (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV);
5612 
5613 	if (tx_mode == USE_DMA) {
5614 		/* let device know the latest outbound OAL if necessary */
5615 		if (oal_entries > TX_DESC_PER_IOCB) {
5616 			(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5617 			    (off_t)0,
5618 			    (sizeof (struct oal_entry) *
5619 			    (oal_entries -TX_DESC_PER_IOCB+1)),
5620 			    DDI_DMA_SYNC_FORDEV);
5621 		}
5622 	} else { /* for USE_COPY mode, tx buffer has changed */
5623 		/* let device know the latest change */
5624 		(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5625 		/* copy buf offset */
5626 		    (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS),
5627 		    msg_len, DDI_DMA_SYNC_FORDEV);
5628 	}
5629 
5630 	/* save how the packet was sent */
5631 	tx_cb->tx_type = tx_mode;
5632 
5633 	QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries);
5634 	/* reduce the number of available tx slot */
5635 	atomic_dec_32(&tx_ring->tx_free_count);
5636 
5637 	tx_ring->prod_idx++;
5638 	if (tx_ring->prod_idx >= tx_ring->wq_len)
5639 		tx_ring->prod_idx = 0;
5640 
5641 	now = ddi_get_lbolt();
5642 	qlge->last_tx_time = now;
5643 
5644 	return (DDI_SUCCESS);
5645 
5646 bad:
5647 	/*
5648 	 * if for any reason driver can not send, delete
5649 	 * the message pointer, mp
5650 	 */
5651 	now = ddi_get_lbolt();
5652 	freemsg(mp);
5653 	mp = NULL;
5654 	tx_cb->mp = NULL;
5655 	for (i = 0; i < j; i++)
5656 		(void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]);
5657 
5658 	QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x",
5659 	    __func__, qlge->instance, (int)now));
5660 
5661 	return (DDI_SUCCESS);
5662 }
5663 
5664 
5665 /*
5666  * Initializes hardware and driver software flags before the driver
5667  * is finally ready to work.
5668  */
5669 int
ql_do_start(qlge_t * qlge)5670 ql_do_start(qlge_t *qlge)
5671 {
5672 	int i;
5673 	struct rx_ring *rx_ring;
5674 	uint16_t lbq_buf_size;
5675 	int rings_done;
5676 
5677 	ASSERT(qlge != NULL);
5678 
5679 	mutex_enter(&qlge->hw_mutex);
5680 
5681 	/* Reset adapter */
5682 	(void) ql_asic_reset(qlge);
5683 
5684 	lbq_buf_size = (uint16_t)
5685 	    ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
5686 	if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) {
5687 #ifdef QLGE_LOAD_UNLOAD
5688 		cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n",
5689 		    qlge->rx_ring[0].lbq_buf_size, lbq_buf_size);
5690 #endif
5691 		/*
5692 		 * Check if any ring has buffers still with upper layers
5693 		 * If buffers are pending with upper layers, we use the
5694 		 * existing buffers and don't reallocate new ones
5695 		 * Unfortunately there is no way to evict buffers from
5696 		 * upper layers. Using buffers with the current size may
5697 		 * cause slightly sub-optimal performance, but that seems
5698 		 * to be the easiest way to handle this situation.
5699 		 */
5700 		rings_done = 0;
5701 		for (i = 0; i < qlge->rx_ring_count; i++) {
5702 			rx_ring = &qlge->rx_ring[i];
5703 			if (rx_ring->rx_indicate == 0)
5704 				rings_done++;
5705 			else
5706 				break;
5707 		}
5708 		/*
5709 		 * No buffers pending with upper layers;
5710 		 * reallocte them for new MTU size
5711 		 */
5712 		if (rings_done >= qlge->rx_ring_count) {
5713 			/* free large buffer pool */
5714 			for (i = 0; i < qlge->rx_ring_count; i++) {
5715 				rx_ring = &qlge->rx_ring[i];
5716 				if (rx_ring->type != TX_Q) {
5717 					ql_free_sbq_buffers(rx_ring);
5718 					ql_free_lbq_buffers(rx_ring);
5719 				}
5720 			}
5721 			/* reallocate large buffer pool */
5722 			for (i = 0; i < qlge->rx_ring_count; i++) {
5723 				rx_ring = &qlge->rx_ring[i];
5724 				if (rx_ring->type != TX_Q) {
5725 					(void) ql_alloc_sbufs(qlge, rx_ring);
5726 					(void) ql_alloc_lbufs(qlge, rx_ring);
5727 				}
5728 			}
5729 		}
5730 	}
5731 
5732 	if (ql_bringup_adapter(qlge) != DDI_SUCCESS) {
5733 		cmn_err(CE_WARN, "qlge bringup adapter failed");
5734 		mutex_exit(&qlge->hw_mutex);
5735 		if (qlge->fm_enable) {
5736 			atomic_or_32(&qlge->flags, ADAPTER_ERROR);
5737 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
5738 		}
5739 		return (DDI_FAILURE);
5740 	}
5741 
5742 	mutex_exit(&qlge->hw_mutex);
5743 	/* if adapter is up successfully but was bad before */
5744 	if (qlge->flags & ADAPTER_ERROR) {
5745 		atomic_and_32(&qlge->flags, ~ADAPTER_ERROR);
5746 		if (qlge->fm_enable) {
5747 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
5748 		}
5749 	}
5750 
5751 	/* Get current link state */
5752 	qlge->port_link_state = ql_get_link_state(qlge);
5753 
5754 	if (qlge->port_link_state == LS_UP) {
5755 		QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5756 		    __func__, qlge->instance));
5757 		/* If driver detects a carrier on */
5758 		CARRIER_ON(qlge);
5759 	} else {
5760 		QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5761 		    __func__, qlge->instance));
5762 		/* If driver detects a lack of carrier */
5763 		CARRIER_OFF(qlge);
5764 	}
5765 	qlge->mac_flags = QL_MAC_STARTED;
5766 	return (DDI_SUCCESS);
5767 }
5768 
5769 /*
5770  * Stop currently running driver
5771  * Driver needs to stop routing new packets to driver and wait until
5772  * all pending tx/rx buffers to be free-ed.
5773  */
5774 int
ql_do_stop(qlge_t * qlge)5775 ql_do_stop(qlge_t *qlge)
5776 {
5777 	int rc = DDI_FAILURE;
5778 	uint32_t i, j, k;
5779 	struct bq_desc *sbq_desc, *lbq_desc;
5780 	struct rx_ring *rx_ring;
5781 
5782 	ASSERT(qlge != NULL);
5783 
5784 	CARRIER_OFF(qlge);
5785 
5786 	rc = ql_bringdown_adapter(qlge);
5787 	if (rc != DDI_SUCCESS) {
5788 		cmn_err(CE_WARN, "qlge bringdown adapter failed.");
5789 	} else
5790 		rc = DDI_SUCCESS;
5791 
5792 	for (k = 0; k < qlge->rx_ring_count; k++) {
5793 		rx_ring = &qlge->rx_ring[k];
5794 		if (rx_ring->type != TX_Q) {
5795 			j = rx_ring->lbq_use_head;
5796 #ifdef QLGE_LOAD_UNLOAD
5797 			cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list"
5798 			    " to free list %d\n total %d\n",
5799 			    k, rx_ring->lbuf_in_use_count,
5800 			    rx_ring->lbuf_free_count,
5801 			    rx_ring->lbuf_in_use_count +
5802 			    rx_ring->lbuf_free_count);
5803 #endif
5804 			for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
5805 				lbq_desc = rx_ring->lbuf_in_use[j];
5806 				j++;
5807 				if (j >= rx_ring->lbq_len) {
5808 					j = 0;
5809 				}
5810 				if (lbq_desc->mp) {
5811 					atomic_inc_32(&rx_ring->rx_indicate);
5812 					freemsg(lbq_desc->mp);
5813 				}
5814 			}
5815 			rx_ring->lbq_use_head = j;
5816 			rx_ring->lbq_use_tail = j;
5817 			rx_ring->lbuf_in_use_count = 0;
5818 			j = rx_ring->sbq_use_head;
5819 #ifdef QLGE_LOAD_UNLOAD
5820 			cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list,"
5821 			    " to free list %d\n total %d \n",
5822 			    k, rx_ring->sbuf_in_use_count,
5823 			    rx_ring->sbuf_free_count,
5824 			    rx_ring->sbuf_in_use_count +
5825 			    rx_ring->sbuf_free_count);
5826 #endif
5827 			for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
5828 				sbq_desc = rx_ring->sbuf_in_use[j];
5829 				j++;
5830 				if (j >= rx_ring->sbq_len) {
5831 					j = 0;
5832 				}
5833 				if (sbq_desc->mp) {
5834 					atomic_inc_32(&rx_ring->rx_indicate);
5835 					freemsg(sbq_desc->mp);
5836 				}
5837 			}
5838 			rx_ring->sbq_use_head = j;
5839 			rx_ring->sbq_use_tail = j;
5840 			rx_ring->sbuf_in_use_count = 0;
5841 		}
5842 	}
5843 
5844 	qlge->mac_flags = QL_MAC_STOPPED;
5845 
5846 	return (rc);
5847 }
5848 
5849 /*
5850  * Support
5851  */
5852 
5853 void
ql_disable_isr(qlge_t * qlge)5854 ql_disable_isr(qlge_t *qlge)
5855 {
5856 	/*
5857 	 * disable the hardware interrupt
5858 	 */
5859 	ISP_DISABLE_GLOBAL_INTRS(qlge);
5860 
5861 	qlge->flags &= ~INTERRUPTS_ENABLED;
5862 }
5863 
5864 
5865 
5866 /*
5867  * busy wait for 'usecs' microseconds.
5868  */
5869 void
qlge_delay(clock_t usecs)5870 qlge_delay(clock_t usecs)
5871 {
5872 	drv_usecwait(usecs);
5873 }
5874 
5875 /*
5876  * retrieve firmware details.
5877  */
5878 
5879 pci_cfg_t *
ql_get_pci_config(qlge_t * qlge)5880 ql_get_pci_config(qlge_t *qlge)
5881 {
5882 	return (&(qlge->pci_cfg));
5883 }
5884 
5885 /*
5886  * Get current Link status
5887  */
5888 static uint32_t
ql_get_link_state(qlge_t * qlge)5889 ql_get_link_state(qlge_t *qlge)
5890 {
5891 	uint32_t bitToCheck = 0;
5892 	uint32_t temp, linkState;
5893 
5894 	if (qlge->func_number == qlge->fn0_net) {
5895 		bitToCheck = STS_PL0;
5896 	} else {
5897 		bitToCheck = STS_PL1;
5898 	}
5899 	temp = ql_read_reg(qlge, REG_STATUS);
5900 	QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n",
5901 	    __func__, qlge->instance, temp));
5902 
5903 	if (temp & bitToCheck) {
5904 		linkState = LS_UP;
5905 	} else {
5906 		linkState = LS_DOWN;
5907 	}
5908 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
5909 		/* for Schultz, link Speed is fixed to 10G, full duplex */
5910 		qlge->speed  = SPEED_10G;
5911 		qlge->duplex = 1;
5912 	}
5913 	return (linkState);
5914 }
5915 /*
5916  * Get current link status and report to OS
5917  */
5918 static void
ql_get_and_report_link_state(qlge_t * qlge)5919 ql_get_and_report_link_state(qlge_t *qlge)
5920 {
5921 	uint32_t cur_link_state;
5922 
5923 	/* Get current link state */
5924 	cur_link_state = ql_get_link_state(qlge);
5925 	/* if link state has changed */
5926 	if (cur_link_state != qlge->port_link_state) {
5927 
5928 		qlge->port_link_state = cur_link_state;
5929 
5930 		if (qlge->port_link_state == LS_UP) {
5931 			QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5932 			    __func__, qlge->instance));
5933 			/* If driver detects a carrier on */
5934 			CARRIER_ON(qlge);
5935 		} else {
5936 			QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5937 			    __func__, qlge->instance));
5938 			/* If driver detects a lack of carrier */
5939 			CARRIER_OFF(qlge);
5940 		}
5941 	}
5942 }
5943 
5944 /*
5945  * timer callback function executed after timer expires
5946  */
5947 static void
ql_timer(void * arg)5948 ql_timer(void* arg)
5949 {
5950 	ql_get_and_report_link_state((qlge_t *)arg);
5951 }
5952 
5953 /*
5954  * stop the running timer if activated
5955  */
5956 static void
ql_stop_timer(qlge_t * qlge)5957 ql_stop_timer(qlge_t *qlge)
5958 {
5959 	timeout_id_t timer_id;
5960 	/* Disable driver timer */
5961 	if (qlge->ql_timer_timeout_id != NULL) {
5962 		timer_id = qlge->ql_timer_timeout_id;
5963 		qlge->ql_timer_timeout_id = NULL;
5964 		(void) untimeout(timer_id);
5965 	}
5966 }
5967 
5968 /*
5969  * stop then restart timer
5970  */
5971 void
ql_restart_timer(qlge_t * qlge)5972 ql_restart_timer(qlge_t *qlge)
5973 {
5974 	ql_stop_timer(qlge);
5975 	qlge->ql_timer_ticks = TICKS_PER_SEC / 4;
5976 	qlge->ql_timer_timeout_id = timeout(ql_timer,
5977 	    (void *)qlge, qlge->ql_timer_ticks);
5978 }
5979 
5980 /* ************************************************************************* */
5981 /*
5982  *		Hardware K-Stats Data Structures and Subroutines
5983  */
5984 /* ************************************************************************* */
5985 static const ql_ksindex_t ql_kstats_hw[] = {
5986 	/* PCI related hardware information */
5987 	{ 0, "Vendor Id"			},
5988 	{ 1, "Device Id"			},
5989 	{ 2, "Command"				},
5990 	{ 3, "Status"				},
5991 	{ 4, "Revision Id"			},
5992 	{ 5, "Cache Line Size"			},
5993 	{ 6, "Latency Timer"			},
5994 	{ 7, "Header Type"			},
5995 	{ 9, "I/O base addr"			},
5996 	{ 10, "Control Reg Base addr low"	},
5997 	{ 11, "Control Reg Base addr high"	},
5998 	{ 12, "Doorbell Reg Base addr low"	},
5999 	{ 13, "Doorbell Reg Base addr high"	},
6000 	{ 14, "Subsystem Vendor Id"		},
6001 	{ 15, "Subsystem Device ID"		},
6002 	{ 16, "PCIe Device Control"		},
6003 	{ 17, "PCIe Link Status"		},
6004 
6005 	{ -1,	NULL				},
6006 };
6007 
6008 /*
6009  * kstat update function for PCI registers
6010  */
6011 static int
ql_kstats_get_pci_regs(kstat_t * ksp,int flag)6012 ql_kstats_get_pci_regs(kstat_t *ksp, int flag)
6013 {
6014 	qlge_t *qlge;
6015 	kstat_named_t *knp;
6016 
6017 	if (flag != KSTAT_READ)
6018 		return (EACCES);
6019 
6020 	qlge = ksp->ks_private;
6021 	knp = ksp->ks_data;
6022 	(knp++)->value.ui32 = qlge->pci_cfg.vendor_id;
6023 	(knp++)->value.ui32 = qlge->pci_cfg.device_id;
6024 	(knp++)->value.ui32 = qlge->pci_cfg.command;
6025 	(knp++)->value.ui32 = qlge->pci_cfg.status;
6026 	(knp++)->value.ui32 = qlge->pci_cfg.revision;
6027 	(knp++)->value.ui32 = qlge->pci_cfg.cache_line_size;
6028 	(knp++)->value.ui32 = qlge->pci_cfg.latency_timer;
6029 	(knp++)->value.ui32 = qlge->pci_cfg.header_type;
6030 	(knp++)->value.ui32 = qlge->pci_cfg.io_base_address;
6031 	(knp++)->value.ui32 =
6032 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower;
6033 	(knp++)->value.ui32 =
6034 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper;
6035 	(knp++)->value.ui32 =
6036 	    qlge->pci_cfg.pci_doorbell_mem_base_address_lower;
6037 	(knp++)->value.ui32 =
6038 	    qlge->pci_cfg.pci_doorbell_mem_base_address_upper;
6039 	(knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id;
6040 	(knp++)->value.ui32 = qlge->pci_cfg.sub_device_id;
6041 	(knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control;
6042 	(knp++)->value.ui32 = qlge->pci_cfg.link_status;
6043 
6044 	return (0);
6045 }
6046 
6047 static const ql_ksindex_t ql_kstats_mii[] = {
6048 	/* MAC/MII related hardware information */
6049 	{ 0, "mtu"},
6050 
6051 	{ -1, NULL},
6052 };
6053 
6054 
6055 /*
6056  * kstat update function for MII related information.
6057  */
6058 static int
ql_kstats_mii_update(kstat_t * ksp,int flag)6059 ql_kstats_mii_update(kstat_t *ksp, int flag)
6060 {
6061 	qlge_t *qlge;
6062 	kstat_named_t *knp;
6063 
6064 	if (flag != KSTAT_READ)
6065 		return (EACCES);
6066 
6067 	qlge = ksp->ks_private;
6068 	knp = ksp->ks_data;
6069 
6070 	(knp++)->value.ui32 = qlge->mtu;
6071 
6072 	return (0);
6073 }
6074 
6075 static const ql_ksindex_t ql_kstats_reg[] = {
6076 	/* Register information */
6077 	{ 0, "System (0x08)"			},
6078 	{ 1, "Reset/Fail Over(0x0Ch"		},
6079 	{ 2, "Function Specific Control(0x10)"	},
6080 	{ 3, "Status (0x30)"			},
6081 	{ 4, "Intr Enable (0x34)"		},
6082 	{ 5, "Intr Status1 (0x3C)"		},
6083 	{ 6, "Error Status (0x54)"		},
6084 	{ 7, "XGMAC Flow Control(0x11C)"	},
6085 	{ 8, "XGMAC Tx Pause Frames(0x230)"	},
6086 	{ 9, "XGMAC Rx Pause Frames(0x388)"	},
6087 	{ 10, "XGMAC Rx FIFO Drop Count(0x5B8)"	},
6088 	{ 11, "interrupts actually allocated"	},
6089 	{ 12, "interrupts on rx ring 0"		},
6090 	{ 13, "interrupts on rx ring 1"		},
6091 	{ 14, "interrupts on rx ring 2"		},
6092 	{ 15, "interrupts on rx ring 3"		},
6093 	{ 16, "interrupts on rx ring 4"		},
6094 	{ 17, "interrupts on rx ring 5"		},
6095 	{ 18, "interrupts on rx ring 6"		},
6096 	{ 19, "interrupts on rx ring 7"		},
6097 	{ 20, "polls on rx ring 0"		},
6098 	{ 21, "polls on rx ring 1"		},
6099 	{ 22, "polls on rx ring 2"		},
6100 	{ 23, "polls on rx ring 3"		},
6101 	{ 24, "polls on rx ring 4"		},
6102 	{ 25, "polls on rx ring 5"		},
6103 	{ 26, "polls on rx ring 6"		},
6104 	{ 27, "polls on rx ring 7"		},
6105 	{ 28, "tx no resource on ring 0"	},
6106 	{ 29, "tx dma bind fail on ring 0"	},
6107 	{ 30, "tx dma no handle on ring 0"	},
6108 	{ 31, "tx dma no cookie on ring 0"	},
6109 	{ 32, "MPI firmware major version"	},
6110 	{ 33, "MPI firmware minor version"	},
6111 	{ 34, "MPI firmware sub version"	},
6112 	{ 35, "rx no resource"			},
6113 
6114 	{ -1, NULL},
6115 };
6116 
6117 
6118 /*
6119  * kstat update function for device register set
6120  */
6121 static int
ql_kstats_get_reg_and_dev_stats(kstat_t * ksp,int flag)6122 ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag)
6123 {
6124 	qlge_t *qlge;
6125 	kstat_named_t *knp;
6126 	uint32_t val32;
6127 	int i = 0;
6128 	struct tx_ring *tx_ring;
6129 	struct rx_ring *rx_ring;
6130 
6131 	if (flag != KSTAT_READ)
6132 		return (EACCES);
6133 
6134 	qlge = ksp->ks_private;
6135 	knp = ksp->ks_data;
6136 
6137 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM);
6138 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER);
6139 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL);
6140 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS);
6141 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
6142 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
6143 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS);
6144 
6145 	if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) {
6146 		return (0);
6147 	}
6148 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32);
6149 	(knp++)->value.ui32 = val32;
6150 
6151 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32);
6152 	(knp++)->value.ui32 = val32;
6153 
6154 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32);
6155 	(knp++)->value.ui32 = val32;
6156 
6157 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32);
6158 	(knp++)->value.ui32 = val32;
6159 
6160 	ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
6161 
6162 	(knp++)->value.ui32 = qlge->intr_cnt;
6163 
6164 	for (i = 0; i < 8; i++) {
6165 		(knp++)->value.ui32 = qlge->rx_interrupts[i];
6166 	}
6167 
6168 	for (i = 0; i < 8; i++) {
6169 		(knp++)->value.ui32 = qlge->rx_polls[i];
6170 	}
6171 
6172 	tx_ring = &qlge->tx_ring[0];
6173 	(knp++)->value.ui32 = tx_ring->defer;
6174 	(knp++)->value.ui32 = tx_ring->tx_fail_dma_bind;
6175 	(knp++)->value.ui32 = tx_ring->tx_no_dma_handle;
6176 	(knp++)->value.ui32 = tx_ring->tx_no_dma_cookie;
6177 
6178 	(knp++)->value.ui32 = qlge->fw_version_info.major_version;
6179 	(knp++)->value.ui32 = qlge->fw_version_info.minor_version;
6180 	(knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version;
6181 
6182 	for (i = 0; i < qlge->rx_ring_count; i++) {
6183 		rx_ring = &qlge->rx_ring[i];
6184 		val32 += rx_ring->rx_packets_dropped_no_buffer;
6185 	}
6186 	(knp++)->value.ui32 = val32;
6187 
6188 	return (0);
6189 }
6190 
6191 
6192 static kstat_t *
ql_setup_named_kstat(qlge_t * qlge,int instance,char * name,const ql_ksindex_t * ksip,size_t size,int (* update)(kstat_t *,int))6193 ql_setup_named_kstat(qlge_t *qlge, int instance, char *name,
6194     const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
6195 {
6196 	kstat_t *ksp;
6197 	kstat_named_t *knp;
6198 	char *np;
6199 	int type;
6200 
6201 	size /= sizeof (ql_ksindex_t);
6202 	ksp = kstat_create(ADAPTER_NAME, instance, name, "net",
6203 	    KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT);
6204 	if (ksp == NULL)
6205 		return (NULL);
6206 
6207 	ksp->ks_private = qlge;
6208 	ksp->ks_update = update;
6209 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
6210 		switch (*np) {
6211 		default:
6212 			type = KSTAT_DATA_UINT32;
6213 			break;
6214 		case '&':
6215 			np += 1;
6216 			type = KSTAT_DATA_CHAR;
6217 			break;
6218 		}
6219 		kstat_named_init(knp, np, (uint8_t)type);
6220 	}
6221 	kstat_install(ksp);
6222 
6223 	return (ksp);
6224 }
6225 
6226 /*
6227  * Setup various kstat
6228  */
6229 int
ql_init_kstats(qlge_t * qlge)6230 ql_init_kstats(qlge_t *qlge)
6231 {
6232 	/* Hardware KStats */
6233 	qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge,
6234 	    qlge->instance, "chip", ql_kstats_hw,
6235 	    sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
6236 	if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) {
6237 		return (DDI_FAILURE);
6238 	}
6239 
6240 	/* MII KStats */
6241 	qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge,
6242 	    qlge->instance, "mii", ql_kstats_mii,
6243 	    sizeof (ql_kstats_mii), ql_kstats_mii_update);
6244 	if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) {
6245 		return (DDI_FAILURE);
6246 	}
6247 
6248 	/* REG KStats */
6249 	qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge,
6250 	    qlge->instance, "reg", ql_kstats_reg,
6251 	    sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
6252 	if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) {
6253 		return (DDI_FAILURE);
6254 	}
6255 	return (DDI_SUCCESS);
6256 }
6257 
6258 /*
6259  * delete all kstat
6260  */
6261 void
ql_fini_kstats(qlge_t * qlge)6262 ql_fini_kstats(qlge_t *qlge)
6263 {
6264 	int i;
6265 
6266 	for (i = 0; i < QL_KSTAT_COUNT; i++) {
6267 		if (qlge->ql_kstats[i] != NULL)
6268 			kstat_delete(qlge->ql_kstats[i]);
6269 	}
6270 }
6271 
6272 /* ************************************************************************* */
6273 /*
6274  *                                 kstat end
6275  */
6276 /* ************************************************************************* */
6277 
6278 /*
6279  * Setup the parameters for receive and transmit rings including buffer sizes
6280  * and completion queue sizes
6281  */
6282 static int
ql_setup_rings(qlge_t * qlge)6283 ql_setup_rings(qlge_t *qlge)
6284 {
6285 	uint8_t i;
6286 	struct rx_ring *rx_ring;
6287 	struct tx_ring *tx_ring;
6288 	uint16_t lbq_buf_size;
6289 
6290 	lbq_buf_size = (uint16_t)
6291 	    ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
6292 
6293 	/*
6294 	 * rx_ring[0] is always the default queue.
6295 	 */
6296 	/*
6297 	 * qlge->rx_ring_count:
6298 	 * Total number of rx_rings. This includes a number
6299 	 * of outbound completion handler rx_rings, and a
6300 	 * number of inbound completion handler rx_rings.
6301 	 * rss is only enabled if we have more than 1 rx completion
6302 	 * queue. If we have a single rx completion queue
6303 	 * then all rx completions go to this queue and
6304 	 * the last completion queue
6305 	 */
6306 
6307 	qlge->tx_ring_first_cq_id = qlge->rss_ring_count;
6308 
6309 	for (i = 0; i < qlge->tx_ring_count; i++) {
6310 		tx_ring = &qlge->tx_ring[i];
6311 		bzero((void *)tx_ring, sizeof (*tx_ring));
6312 		tx_ring->qlge = qlge;
6313 		tx_ring->wq_id = i;
6314 		tx_ring->wq_len = qlge->tx_ring_size;
6315 		tx_ring->wq_size = (uint32_t)(
6316 		    tx_ring->wq_len * sizeof (struct ob_mac_iocb_req));
6317 
6318 		/*
6319 		 * The completion queue ID for the tx rings start
6320 		 * immediately after the last rss completion queue.
6321 		 */
6322 		tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id);
6323 	}
6324 
6325 	for (i = 0; i < qlge->rx_ring_count; i++) {
6326 		rx_ring = &qlge->rx_ring[i];
6327 		bzero((void *)rx_ring, sizeof (*rx_ring));
6328 		rx_ring->qlge = qlge;
6329 		rx_ring->cq_id = i;
6330 		if (i != 0)
6331 			rx_ring->cpu = (i) % qlge->rx_ring_count;
6332 		else
6333 			rx_ring->cpu = 0;
6334 
6335 		if (i < qlge->rss_ring_count) {
6336 			/*
6337 			 * Inbound completions (RSS) queues
6338 			 * Default queue is queue 0 which handles
6339 			 * unicast plus bcast/mcast and async events.
6340 			 * Other inbound queues handle unicast frames only.
6341 			 */
6342 			rx_ring->cq_len = qlge->rx_ring_size;
6343 			rx_ring->cq_size = (uint32_t)
6344 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6345 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
6346 			rx_ring->lbq_size = (uint32_t)
6347 			    (rx_ring->lbq_len * sizeof (uint64_t));
6348 			rx_ring->lbq_buf_size = lbq_buf_size;
6349 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
6350 			rx_ring->sbq_size = (uint32_t)
6351 			    (rx_ring->sbq_len * sizeof (uint64_t));
6352 			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
6353 			rx_ring->type = RX_Q;
6354 
6355 			QL_PRINT(DBG_GLD,
6356 			    ("%s(%d)Allocating rss completion queue %d "
6357 			    "on cpu %d\n", __func__, qlge->instance,
6358 			    rx_ring->cq_id, rx_ring->cpu));
6359 		} else {
6360 			/*
6361 			 * Outbound queue handles outbound completions only
6362 			 */
6363 			/* outbound cq is same size as tx_ring it services. */
6364 			QL_PRINT(DBG_INIT, ("rx_ring 0x%p i %d\n", rx_ring, i));
6365 			rx_ring->cq_len = qlge->tx_ring_size;
6366 			rx_ring->cq_size = (uint32_t)
6367 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6368 			rx_ring->lbq_len = 0;
6369 			rx_ring->lbq_size = 0;
6370 			rx_ring->lbq_buf_size = 0;
6371 			rx_ring->sbq_len = 0;
6372 			rx_ring->sbq_size = 0;
6373 			rx_ring->sbq_buf_size = 0;
6374 			rx_ring->type = TX_Q;
6375 
6376 			QL_PRINT(DBG_GLD,
6377 			    ("%s(%d)Allocating TX completion queue %d on"
6378 			    " cpu %d\n", __func__, qlge->instance,
6379 			    rx_ring->cq_id, rx_ring->cpu));
6380 		}
6381 	}
6382 
6383 	return (DDI_SUCCESS);
6384 }
6385 
6386 static int
ql_start_rx_ring(qlge_t * qlge,struct rx_ring * rx_ring)6387 ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring)
6388 {
6389 	struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr;
6390 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6391 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6392 	/* first shadow area is used by wqicb's host copy of consumer index */
6393 	    + sizeof (uint64_t);
6394 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6395 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6396 	    + sizeof (uint64_t);
6397 	/* lrg/sml bufq pointers */
6398 	uint8_t *buf_q_base_reg =
6399 	    (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr +
6400 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6401 	uint64_t buf_q_base_reg_dma =
6402 	    qlge->buf_q_ptr_base_addr_dma_attr.dma_addr +
6403 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6404 	caddr_t doorbell_area =
6405 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id));
6406 	int err = 0;
6407 	uint16_t bq_len;
6408 	uint64_t tmp;
6409 	uint64_t *base_indirect_ptr;
6410 	int page_entries;
6411 
6412 	/* Set up the shadow registers for this ring. */
6413 	rx_ring->prod_idx_sh_reg = shadow_reg;
6414 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
6415 	rx_ring->prod_idx_sh_reg_offset = (off_t)(((rx_ring->cq_id *
6416 	    sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE) + sizeof (uint64_t)));
6417 
6418 	rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6419 	rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma;
6420 
6421 	QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx,"
6422 	    " phys_addr 0x%lx\n", __func__, rx_ring->cq_id,
6423 	    rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma));
6424 
6425 	buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6426 	buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6427 	rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6428 	rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma;
6429 
6430 	/* PCI doorbell mem area + 0x00 for consumer index register */
6431 	rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6432 	rx_ring->cnsmr_idx = 0;
6433 	*rx_ring->prod_idx_sh_reg = 0;
6434 	rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
6435 
6436 	/* PCI doorbell mem area + 0x04 for valid register */
6437 	rx_ring->valid_db_reg = (uint32_t *)(void *)
6438 	    ((uint8_t *)(void *)doorbell_area + 0x04);
6439 
6440 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
6441 	rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *)
6442 	    ((uint8_t *)(void *)doorbell_area + 0x18);
6443 
6444 	/* PCI doorbell mem area + 0x1c */
6445 	rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *)
6446 	    ((uint8_t *)(void *)doorbell_area + 0x1c);
6447 
6448 	bzero((void *)cqicb, sizeof (*cqicb));
6449 
6450 	cqicb->msix_vect = (uint8_t)rx_ring->irq;
6451 
6452 	bq_len = (uint16_t)((rx_ring->cq_len == 65536) ?
6453 	    (uint16_t)0 : (uint16_t)rx_ring->cq_len);
6454 	cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
6455 
6456 	cqicb->cq_base_addr_lo =
6457 	    cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr));
6458 	cqicb->cq_base_addr_hi =
6459 	    cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr));
6460 
6461 	cqicb->prod_idx_addr_lo =
6462 	    cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6463 	cqicb->prod_idx_addr_hi =
6464 	    cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6465 
6466 	/*
6467 	 * Set up the control block load flags.
6468 	 */
6469 	cqicb->flags = FLAGS_LC | /* Load queue base address */
6470 	    FLAGS_LV | /* Load MSI-X vector */
6471 	    FLAGS_LI;  /* Load irq delay values */
6472 	if (rx_ring->lbq_len) {
6473 		/* Load lbq values */
6474 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL);
6475 		tmp = (uint64_t)rx_ring->lbq_dma.dma_addr;
6476 		base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect;
6477 		page_entries = 0;
6478 		do {
6479 			*base_indirect_ptr = cpu_to_le64(tmp);
6480 			tmp += VM_PAGE_SIZE;
6481 			base_indirect_ptr++;
6482 			page_entries++;
6483 		} while (page_entries < (int)(
6484 		    ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6485 
6486 		cqicb->lbq_addr_lo =
6487 		    cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma));
6488 		cqicb->lbq_addr_hi =
6489 		    cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma));
6490 		bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ?
6491 		    (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size);
6492 		cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len);
6493 		bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 :
6494 		    (uint16_t)rx_ring->lbq_len);
6495 		cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len);
6496 		rx_ring->lbq_prod_idx = 0;
6497 		rx_ring->lbq_curr_idx = 0;
6498 	}
6499 	if (rx_ring->sbq_len) {
6500 		/* Load sbq values */
6501 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS);
6502 		tmp = (uint64_t)rx_ring->sbq_dma.dma_addr;
6503 		base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect;
6504 		page_entries = 0;
6505 
6506 		do {
6507 			*base_indirect_ptr = cpu_to_le64(tmp);
6508 			tmp += VM_PAGE_SIZE;
6509 			base_indirect_ptr++;
6510 			page_entries++;
6511 		} while (page_entries < (uint32_t)
6512 		    (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6513 
6514 		cqicb->sbq_addr_lo =
6515 		    cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma));
6516 		cqicb->sbq_addr_hi =
6517 		    cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma));
6518 		cqicb->sbq_buf_size = (uint16_t)
6519 		    cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2));
6520 		bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ?
6521 		    (uint16_t)0 : (uint16_t)rx_ring->sbq_len);
6522 		cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len);
6523 		rx_ring->sbq_prod_idx = 0;
6524 		rx_ring->sbq_curr_idx = 0;
6525 	}
6526 	switch (rx_ring->type) {
6527 	case TX_Q:
6528 		cqicb->irq_delay = (uint16_t)
6529 		    cpu_to_le16(qlge->tx_coalesce_usecs);
6530 		cqicb->pkt_delay = (uint16_t)
6531 		    cpu_to_le16(qlge->tx_max_coalesced_frames);
6532 		break;
6533 
6534 	case DEFAULT_Q:
6535 		cqicb->irq_delay = (uint16_t)
6536 		    cpu_to_le16(qlge->rx_coalesce_usecs);
6537 		cqicb->pkt_delay = (uint16_t)
6538 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
6539 		break;
6540 
6541 	case RX_Q:
6542 		/*
6543 		 * Inbound completion handling rx_rings run in
6544 		 * separate NAPI contexts.
6545 		 */
6546 		cqicb->irq_delay = (uint16_t)
6547 		    cpu_to_le16(qlge->rx_coalesce_usecs);
6548 		cqicb->pkt_delay = (uint16_t)
6549 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
6550 		break;
6551 	default:
6552 		cmn_err(CE_WARN, "Invalid rx_ring->type = %d.",
6553 		    rx_ring->type);
6554 	}
6555 	QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n",
6556 	    rx_ring->cq_id));
6557 	/* QL_DUMP_CQICB(qlge, cqicb); */
6558 	err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr,
6559 	    rx_ring->cq_id);
6560 	if (err) {
6561 		cmn_err(CE_WARN, "Failed to load CQICB.");
6562 		return (err);
6563 	}
6564 
6565 	rx_ring->rx_packets_dropped_no_buffer = 0;
6566 	rx_ring->rx_pkt_dropped_mac_unenabled = 0;
6567 	rx_ring->rx_failed_sbq_allocs = 0;
6568 	rx_ring->rx_failed_lbq_allocs = 0;
6569 	rx_ring->rx_packets = 0;
6570 	rx_ring->rx_bytes = 0;
6571 	rx_ring->frame_too_long = 0;
6572 	rx_ring->frame_too_short = 0;
6573 	rx_ring->fcs_err = 0;
6574 
6575 	return (err);
6576 }
6577 
6578 /*
6579  * start RSS
6580  */
6581 static int
ql_start_rss(qlge_t * qlge)6582 ql_start_rss(qlge_t *qlge)
6583 {
6584 	struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr;
6585 	int status = 0;
6586 	int i;
6587 	uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id;
6588 
6589 	bzero((void *)ricb, sizeof (*ricb));
6590 
6591 	ricb->base_cq = RSS_L4K;
6592 	ricb->flags =
6593 	    (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
6594 	    RSS_RT6);
6595 	ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1);
6596 
6597 	/*
6598 	 * Fill out the Indirection Table.
6599 	 */
6600 	for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
6601 		hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1));
6602 
6603 	(void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40);
6604 	(void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16);
6605 
6606 	QL_PRINT(DBG_INIT, ("Initializing RSS.\n"));
6607 
6608 	status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0);
6609 	if (status) {
6610 		cmn_err(CE_WARN, "Failed to load RICB.");
6611 		return (status);
6612 	}
6613 
6614 	return (status);
6615 }
6616 
6617 /*
6618  * load a tx ring control block to hw and start this ring
6619  */
6620 static int
ql_start_tx_ring(qlge_t * qlge,struct tx_ring * tx_ring)6621 ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring)
6622 {
6623 	struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr;
6624 	caddr_t doorbell_area =
6625 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id);
6626 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6627 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6628 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6629 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6630 	int err = 0;
6631 
6632 	/*
6633 	 * Assign doorbell registers for this tx_ring.
6634 	 */
6635 
6636 	/* TX PCI doorbell mem area for tx producer index */
6637 	tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6638 	tx_ring->prod_idx = 0;
6639 	/* TX PCI doorbell mem area + 0x04 */
6640 	tx_ring->valid_db_reg = (uint32_t *)(void *)
6641 	    ((uint8_t *)(void *)doorbell_area + 0x04);
6642 
6643 	/*
6644 	 * Assign shadow registers for this tx_ring.
6645 	 */
6646 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
6647 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
6648 	*tx_ring->cnsmr_idx_sh_reg = 0;
6649 
6650 	QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx,"
6651 	    " phys_addr 0x%lx\n",
6652 	    __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg,
6653 	    tx_ring->cnsmr_idx_sh_reg_dma));
6654 
6655 	wqicb->len =
6656 	    (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
6657 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
6658 	    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
6659 	wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id);
6660 	wqicb->rid = 0;
6661 	wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr));
6662 	wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr));
6663 	wqicb->cnsmr_idx_addr_lo =
6664 	    cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6665 	wqicb->cnsmr_idx_addr_hi =
6666 	    cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6667 
6668 	ql_init_tx_ring(tx_ring);
6669 	/* QL_DUMP_WQICB(qlge, wqicb); */
6670 	err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr,
6671 	    tx_ring->wq_id);
6672 
6673 	if (err) {
6674 		cmn_err(CE_WARN, "Failed to load WQICB.");
6675 		return (err);
6676 	}
6677 	return (err);
6678 }
6679 
6680 /*
6681  * Set up a MAC, multicast or VLAN address for the
6682  * inbound frame matching.
6683  */
6684 int
ql_set_mac_addr_reg(qlge_t * qlge,uint8_t * addr,uint32_t type,uint16_t index)6685 ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type,
6686     uint16_t index)
6687 {
6688 	uint32_t offset = 0;
6689 	int status = DDI_SUCCESS;
6690 
6691 	switch (type) {
6692 	case MAC_ADDR_TYPE_MULTI_MAC:
6693 	case MAC_ADDR_TYPE_CAM_MAC: {
6694 		uint32_t cam_output;
6695 		uint32_t upper = (addr[0] << 8) | addr[1];
6696 		uint32_t lower =
6697 		    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
6698 		    (addr[5]);
6699 
6700 		QL_PRINT(DBG_INIT, ("Adding %s ", (type ==
6701 		    MAC_ADDR_TYPE_MULTI_MAC) ?
6702 		    "MULTICAST" : "UNICAST"));
6703 		QL_PRINT(DBG_INIT,
6704 		    ("addr %02x %02x %02x %02x %02x %02x at index %d in "
6705 		    "the CAM.\n",
6706 		    addr[0], addr[1], addr[2], addr[3], addr[4],
6707 		    addr[5], index));
6708 
6709 		status = ql_wait_reg_rdy(qlge,
6710 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6711 		if (status)
6712 			goto exit;
6713 		/* offset 0 - lower 32 bits of the MAC address */
6714 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6715 		    (offset++) |
6716 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6717 		    type);	/* type */
6718 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower);
6719 		status = ql_wait_reg_rdy(qlge,
6720 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6721 		if (status)
6722 			goto exit;
6723 		/* offset 1 - upper 16 bits of the MAC address */
6724 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6725 		    (offset++) |
6726 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6727 		    type);	/* type */
6728 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper);
6729 		status = ql_wait_reg_rdy(qlge,
6730 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6731 		if (status)
6732 			goto exit;
6733 		/* offset 2 - CQ ID associated with this MAC address */
6734 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6735 		    (offset) | (index << MAC_ADDR_IDX_SHIFT) |	/* index */
6736 		    type);	/* type */
6737 		/*
6738 		 * This field should also include the queue id
6739 		 * and possibly the function id.  Right now we hardcode
6740 		 * the route field to NIC core.
6741 		 */
6742 		if (type == MAC_ADDR_TYPE_CAM_MAC) {
6743 			cam_output = (CAM_OUT_ROUTE_NIC |
6744 			    (qlge->func_number << CAM_OUT_FUNC_SHIFT) |
6745 			    (0 <<
6746 			    CAM_OUT_CQ_ID_SHIFT));
6747 
6748 			/* route to NIC core */
6749 			ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA,
6750 			    cam_output);
6751 			}
6752 		break;
6753 		}
6754 	default:
6755 		cmn_err(CE_WARN,
6756 		    "Address type %d not yet supported.", type);
6757 		status = DDI_FAILURE;
6758 	}
6759 exit:
6760 	return (status);
6761 }
6762 
6763 /*
6764  * The NIC function for this chip has 16 routing indexes.  Each one can be used
6765  * to route different frame types to various inbound queues.  We send broadcast
6766  * multicast/error frames to the default queue for slow handling,
6767  * and CAM hit/RSS frames to the fast handling queues.
6768  */
6769 static int
ql_set_routing_reg(qlge_t * qlge,uint32_t index,uint32_t mask,int enable)6770 ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable)
6771 {
6772 	int status;
6773 	uint32_t value = 0;
6774 
6775 	QL_PRINT(DBG_INIT,
6776 	    ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
6777 	    (enable ? "Adding" : "Removing"),
6778 	    ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
6779 	    ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
6780 	    ((index ==
6781 	    RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
6782 	    ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
6783 	    ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
6784 	    ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
6785 	    ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
6786 	    ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
6787 	    ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
6788 	    ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
6789 	    ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
6790 	    ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
6791 	    ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
6792 	    ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
6793 	    ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
6794 	    ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
6795 	    (enable ? "to" : "from")));
6796 
6797 	switch (mask) {
6798 	case RT_IDX_CAM_HIT:
6799 		value = RT_IDX_DST_CAM_Q | /* dest */
6800 		    RT_IDX_TYPE_NICQ | /* type */
6801 		    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */
6802 		break;
6803 
6804 	case RT_IDX_VALID: /* Promiscuous Mode frames. */
6805 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6806 		    RT_IDX_TYPE_NICQ |	/* type */
6807 		    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */
6808 		break;
6809 
6810 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
6811 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6812 		    RT_IDX_TYPE_NICQ |	/* type */
6813 		    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */
6814 		break;
6815 
6816 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
6817 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6818 		    RT_IDX_TYPE_NICQ |	/* type */
6819 		    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */
6820 		break;
6821 
6822 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
6823 		value = RT_IDX_DST_CAM_Q |	/* dest */
6824 		    RT_IDX_TYPE_NICQ |	/* type */
6825 		    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */
6826 		break;
6827 
6828 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
6829 		value = RT_IDX_DST_CAM_Q |	/* dest */
6830 		    RT_IDX_TYPE_NICQ |	/* type */
6831 		    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6832 		break;
6833 
6834 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
6835 		value = RT_IDX_DST_RSS |	/* dest */
6836 		    RT_IDX_TYPE_NICQ |	/* type */
6837 		    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6838 		break;
6839 
6840 	case 0:	/* Clear the E-bit on an entry. */
6841 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6842 		    RT_IDX_TYPE_NICQ |	/* type */
6843 		    (index << RT_IDX_IDX_SHIFT); /* index */
6844 		break;
6845 
6846 	default:
6847 		cmn_err(CE_WARN, "Mask type %d not yet supported.",
6848 		    mask);
6849 		status = -EPERM;
6850 		goto exit;
6851 	}
6852 
6853 	if (value != 0) {
6854 		status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0);
6855 		if (status)
6856 			goto exit;
6857 		value |= (enable ? RT_IDX_E : 0);
6858 		ql_write_reg(qlge, REG_ROUTING_INDEX, value);
6859 		ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0);
6860 	}
6861 
6862 exit:
6863 	return (status);
6864 }
6865 
6866 /*
6867  * Clear all the entries in the routing table.
6868  * Caller must get semaphore in advance.
6869  */
6870 
6871 static int
ql_stop_routing(qlge_t * qlge)6872 ql_stop_routing(qlge_t *qlge)
6873 {
6874 	int status = 0;
6875 	int i;
6876 	/* Clear all the entries in the routing table. */
6877 	for (i = 0; i < 16; i++) {
6878 		status = ql_set_routing_reg(qlge, i, 0, 0);
6879 		if (status) {
6880 			cmn_err(CE_WARN, "Stop routing failed. ");
6881 		}
6882 	}
6883 	return (status);
6884 }
6885 
6886 /* Initialize the frame-to-queue routing. */
6887 int
ql_route_initialize(qlge_t * qlge)6888 ql_route_initialize(qlge_t *qlge)
6889 {
6890 	int status = 0;
6891 
6892 	status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
6893 	if (status != DDI_SUCCESS)
6894 		return (status);
6895 
6896 	/* Clear all the entries in the routing table. */
6897 	status = ql_stop_routing(qlge);
6898 	if (status) {
6899 		goto exit;
6900 	}
6901 	status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
6902 	if (status) {
6903 		cmn_err(CE_WARN,
6904 		    "Failed to init routing register for broadcast packets.");
6905 		goto exit;
6906 	}
6907 	/*
6908 	 * If we have more than one inbound queue, then turn on RSS in the
6909 	 * routing block.
6910 	 */
6911 	if (qlge->rss_ring_count > 1) {
6912 		status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT,
6913 		    RT_IDX_RSS_MATCH, 1);
6914 		if (status) {
6915 			cmn_err(CE_WARN,
6916 			    "Failed to init routing register for MATCH RSS "
6917 			    "packets.");
6918 			goto exit;
6919 		}
6920 	}
6921 
6922 	status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT,
6923 	    RT_IDX_CAM_HIT, 1);
6924 	if (status) {
6925 		cmn_err(CE_WARN,
6926 		    "Failed to init routing register for CAM packets.");
6927 		goto exit;
6928 	}
6929 
6930 	status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT,
6931 	    RT_IDX_MCAST_MATCH, 1);
6932 	if (status) {
6933 		cmn_err(CE_WARN,
6934 		    "Failed to init routing register for Multicast "
6935 		    "packets.");
6936 	}
6937 
6938 exit:
6939 	ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
6940 	return (status);
6941 }
6942 
6943 /*
6944  * Initialize hardware
6945  */
6946 static int
ql_device_initialize(qlge_t * qlge)6947 ql_device_initialize(qlge_t *qlge)
6948 {
6949 	uint32_t value, mask;
6950 	int i;
6951 	int status = 0;
6952 	uint16_t pause = PAUSE_MODE_DISABLED;
6953 	boolean_t update_port_config = B_FALSE;
6954 	uint32_t pause_bit_mask;
6955 	boolean_t dcbx_enable = B_FALSE;
6956 	uint32_t dcbx_bit_mask = 0x10;
6957 	/*
6958 	 * Set up the System register to halt on errors.
6959 	 */
6960 	value = SYS_EFE | SYS_FAE;
6961 	mask = value << 16;
6962 	ql_write_reg(qlge, REG_SYSTEM, mask | value);
6963 
6964 	/* Set the default queue. */
6965 	value = NIC_RCV_CFG_DFQ;
6966 	mask = NIC_RCV_CFG_DFQ_MASK;
6967 
6968 	ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value);
6969 
6970 	/* Enable the MPI interrupt. */
6971 	ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16)
6972 	    | INTR_MASK_PI);
6973 	/* Enable the function, set pagesize, enable error checking. */
6974 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
6975 	    FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
6976 	/* Set/clear header splitting. */
6977 	if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) {
6978 		value |= FSC_SH;
6979 		ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE);
6980 	}
6981 	mask = FSC_VM_PAGESIZE_MASK |
6982 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
6983 	ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value);
6984 	/*
6985 	 * check current port max frame size, if different from OS setting,
6986 	 * then we need to change
6987 	 */
6988 	qlge->max_frame_size =
6989 	    (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
6990 
6991 	mutex_enter(&qlge->mbx_mutex);
6992 	status = ql_get_port_cfg(qlge);
6993 	mutex_exit(&qlge->mbx_mutex);
6994 
6995 	if (status == DDI_SUCCESS) {
6996 		/* if current frame size is smaller than required size */
6997 		if (qlge->port_cfg_info.max_frame_size <
6998 		    qlge->max_frame_size) {
6999 			QL_PRINT(DBG_MBX,
7000 			    ("update frame size, current %d, new %d\n",
7001 			    qlge->port_cfg_info.max_frame_size,
7002 			    qlge->max_frame_size));
7003 			qlge->port_cfg_info.max_frame_size =
7004 			    qlge->max_frame_size;
7005 			qlge->port_cfg_info.link_cfg |= ENABLE_JUMBO;
7006 			update_port_config = B_TRUE;
7007 		}
7008 
7009 		if (qlge->port_cfg_info.link_cfg & STD_PAUSE)
7010 			pause = PAUSE_MODE_STANDARD;
7011 		else if (qlge->port_cfg_info.link_cfg & PP_PAUSE)
7012 			pause = PAUSE_MODE_PER_PRIORITY;
7013 
7014 		if (pause != qlge->pause) {
7015 			pause_bit_mask = 0x60;	/* bit 5-6 */
7016 			/* clear pause bits */
7017 			qlge->port_cfg_info.link_cfg &= ~pause_bit_mask;
7018 			if (qlge->pause == PAUSE_MODE_STANDARD)
7019 				qlge->port_cfg_info.link_cfg |= STD_PAUSE;
7020 			else if (qlge->pause == PAUSE_MODE_PER_PRIORITY)
7021 				qlge->port_cfg_info.link_cfg |= PP_PAUSE;
7022 			update_port_config = B_TRUE;
7023 		}
7024 
7025 		if (qlge->port_cfg_info.link_cfg & DCBX_ENABLE)
7026 			dcbx_enable = B_TRUE;
7027 		if (dcbx_enable != qlge->dcbx_enable) {
7028 			qlge->port_cfg_info.link_cfg &= ~dcbx_bit_mask;
7029 			if (qlge->dcbx_enable)
7030 				qlge->port_cfg_info.link_cfg |= DCBX_ENABLE;
7031 		}
7032 
7033 		update_port_config = B_TRUE;
7034 
7035 		/* if need to update port configuration */
7036 		if (update_port_config) {
7037 			mutex_enter(&qlge->mbx_mutex);
7038 			(void) ql_set_mpi_port_config(qlge,
7039 			    qlge->port_cfg_info);
7040 			mutex_exit(&qlge->mbx_mutex);
7041 		}
7042 	} else
7043 		cmn_err(CE_WARN, "ql_get_port_cfg failed");
7044 
7045 	/* Start up the rx queues. */
7046 	for (i = 0; i < qlge->rx_ring_count; i++) {
7047 		status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]);
7048 		if (status) {
7049 			cmn_err(CE_WARN,
7050 			    "Failed to start rx ring[%d]", i);
7051 			return (status);
7052 		}
7053 	}
7054 
7055 	/*
7056 	 * If there is more than one inbound completion queue
7057 	 * then download a RICB to configure RSS.
7058 	 */
7059 	if (qlge->rss_ring_count > 1) {
7060 		status = ql_start_rss(qlge);
7061 		if (status) {
7062 			cmn_err(CE_WARN, "Failed to start RSS.");
7063 			return (status);
7064 		}
7065 	}
7066 
7067 	/* Start up the tx queues. */
7068 	for (i = 0; i < qlge->tx_ring_count; i++) {
7069 		status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]);
7070 		if (status) {
7071 			cmn_err(CE_WARN,
7072 			    "Failed to start tx ring[%d]", i);
7073 			return (status);
7074 		}
7075 	}
7076 	qlge->selected_tx_ring = 0;
7077 	/* Set the frame routing filter. */
7078 	status = ql_route_initialize(qlge);
7079 	if (status) {
7080 		cmn_err(CE_WARN,
7081 		    "Failed to init CAM/Routing tables.");
7082 		return (status);
7083 	}
7084 
7085 	return (status);
7086 }
7087 /*
7088  * Issue soft reset to chip.
7089  */
7090 static int
ql_asic_reset(qlge_t * qlge)7091 ql_asic_reset(qlge_t *qlge)
7092 {
7093 	int status = DDI_SUCCESS;
7094 
7095 	ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK
7096 	    |FUNCTION_RESET);
7097 
7098 	if (ql_wait_reg_bit(qlge, REG_RESET_FAILOVER, FUNCTION_RESET,
7099 	    BIT_RESET, 0) != DDI_SUCCESS) {
7100 		cmn_err(CE_WARN,
7101 		    "TIMEOUT!!! errored out of resetting the chip!");
7102 		status = DDI_FAILURE;
7103 	}
7104 
7105 	return (status);
7106 }
7107 
7108 /*
7109  * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
7110  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7111  * to be used by hardware.
7112  */
7113 static void
ql_arm_sbuf(qlge_t * qlge,struct rx_ring * rx_ring)7114 ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7115 {
7116 	struct bq_desc *sbq_desc;
7117 	int i;
7118 	uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr;
7119 	uint32_t arm_count;
7120 
7121 	if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT)
7122 		arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT);
7123 	else {
7124 		/* Adjust to a multiple of 16 */
7125 		arm_count = (rx_ring->sbuf_free_count / 16) * 16;
7126 #ifdef QLGE_LOAD_UNLOAD
7127 		cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count);
7128 #endif
7129 	}
7130 	for (i = 0; i < arm_count; i++) {
7131 		sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
7132 		if (sbq_desc == NULL)
7133 			break;
7134 		/* Arm asic */
7135 		*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
7136 		sbq_entry++;
7137 
7138 		/* link the descriptors to in_use_list */
7139 		ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
7140 		rx_ring->sbq_prod_idx++;
7141 	}
7142 	ql_update_sbq_prod_idx(qlge, rx_ring);
7143 }
7144 
7145 /*
7146  * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
7147  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7148  * to be used by hardware.
7149  */
7150 static void
ql_arm_lbuf(qlge_t * qlge,struct rx_ring * rx_ring)7151 ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7152 {
7153 	struct bq_desc *lbq_desc;
7154 	int i;
7155 	uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr;
7156 	uint32_t arm_count;
7157 
7158 	if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT)
7159 		arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT);
7160 	else {
7161 		/* Adjust to a multiple of 16 */
7162 		arm_count = (rx_ring->lbuf_free_count / 16) * 16;
7163 #ifdef QLGE_LOAD_UNLOAD
7164 		cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count);
7165 #endif
7166 	}
7167 	for (i = 0; i < arm_count; i++) {
7168 		lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
7169 		if (lbq_desc == NULL)
7170 			break;
7171 		/* Arm asic */
7172 		*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
7173 		lbq_entry++;
7174 
7175 		/* link the descriptors to in_use_list */
7176 		ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
7177 		rx_ring->lbq_prod_idx++;
7178 	}
7179 	ql_update_lbq_prod_idx(qlge, rx_ring);
7180 }
7181 
7182 
7183 /*
7184  * Initializes the adapter by configuring request and response queues,
7185  * allocates and ARMs small and large receive buffers to the
7186  * hardware
7187  */
7188 static int
ql_bringup_adapter(qlge_t * qlge)7189 ql_bringup_adapter(qlge_t *qlge)
7190 {
7191 	int i;
7192 
7193 	if (ql_device_initialize(qlge) != DDI_SUCCESS) {
7194 		cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed",
7195 		    __func__, qlge->instance);
7196 		goto err_bringup;
7197 	}
7198 	qlge->sequence |= INIT_ADAPTER_UP;
7199 
7200 #ifdef QLGE_TRACK_BUFFER_USAGE
7201 	for (i = 0; i < qlge->rx_ring_count; i++) {
7202 		if (qlge->rx_ring[i].type != TX_Q) {
7203 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
7204 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
7205 		}
7206 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
7207 	}
7208 #endif
7209 	/* Arm buffers */
7210 	for (i = 0; i < qlge->rx_ring_count; i++) {
7211 		if (qlge->rx_ring[i].type != TX_Q) {
7212 			ql_arm_sbuf(qlge, &qlge->rx_ring[i]);
7213 			ql_arm_lbuf(qlge, &qlge->rx_ring[i]);
7214 		}
7215 	}
7216 
7217 	/* Enable work/request queues */
7218 	for (i = 0; i < qlge->tx_ring_count; i++) {
7219 		if (qlge->tx_ring[i].valid_db_reg)
7220 			ql_write_doorbell_reg(qlge,
7221 			    qlge->tx_ring[i].valid_db_reg,
7222 			    REQ_Q_VALID);
7223 	}
7224 
7225 	/* Enable completion queues */
7226 	for (i = 0; i < qlge->rx_ring_count; i++) {
7227 		if (qlge->rx_ring[i].valid_db_reg)
7228 			ql_write_doorbell_reg(qlge,
7229 			    qlge->rx_ring[i].valid_db_reg,
7230 			    RSP_Q_VALID);
7231 	}
7232 
7233 	for (i = 0; i < qlge->tx_ring_count; i++) {
7234 		mutex_enter(&qlge->tx_ring[i].tx_lock);
7235 		qlge->tx_ring[i].mac_flags = QL_MAC_STARTED;
7236 		mutex_exit(&qlge->tx_ring[i].tx_lock);
7237 	}
7238 
7239 	for (i = 0; i < qlge->rx_ring_count; i++) {
7240 		mutex_enter(&qlge->rx_ring[i].rx_lock);
7241 		qlge->rx_ring[i].mac_flags = QL_MAC_STARTED;
7242 		mutex_exit(&qlge->rx_ring[i].rx_lock);
7243 	}
7244 
7245 	/* This mutex will get re-acquired in enable_completion interrupt */
7246 	mutex_exit(&qlge->hw_mutex);
7247 	/* Traffic can start flowing now */
7248 	ql_enable_all_completion_interrupts(qlge);
7249 	mutex_enter(&qlge->hw_mutex);
7250 
7251 	ql_enable_global_interrupt(qlge);
7252 
7253 	qlge->sequence |= ADAPTER_INIT;
7254 	return (DDI_SUCCESS);
7255 
7256 err_bringup:
7257 	(void) ql_asic_reset(qlge);
7258 	return (DDI_FAILURE);
7259 }
7260 
7261 /*
7262  * Initialize mutexes of each rx/tx rings
7263  */
7264 static int
ql_init_rx_tx_locks(qlge_t * qlge)7265 ql_init_rx_tx_locks(qlge_t *qlge)
7266 {
7267 	struct tx_ring *tx_ring;
7268 	struct rx_ring *rx_ring;
7269 	int i;
7270 
7271 	for (i = 0; i < qlge->tx_ring_count; i++) {
7272 		tx_ring = &qlge->tx_ring[i];
7273 		mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER,
7274 		    DDI_INTR_PRI(qlge->intr_pri));
7275 	}
7276 
7277 	for (i = 0; i < qlge->rx_ring_count; i++) {
7278 		rx_ring = &qlge->rx_ring[i];
7279 		mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER,
7280 		    DDI_INTR_PRI(qlge->intr_pri));
7281 		mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER,
7282 		    DDI_INTR_PRI(qlge->intr_pri));
7283 		mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER,
7284 		    DDI_INTR_PRI(qlge->intr_pri));
7285 	}
7286 
7287 	return (DDI_SUCCESS);
7288 }
7289 
7290 /*ARGSUSED*/
7291 /*
7292  * Simply call pci_ereport_post which generates ereports for errors
7293  * that occur in the PCI local bus configuration status registers.
7294  */
7295 static int
ql_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)7296 ql_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7297 {
7298 	pci_ereport_post(dip, err, NULL);
7299 	return (err->fme_status);
7300 }
7301 
7302 static void
ql_fm_init(qlge_t * qlge)7303 ql_fm_init(qlge_t *qlge)
7304 {
7305 	ddi_iblock_cookie_t iblk;
7306 
7307 	QL_PRINT(DBG_INIT, ("ql_fm_init(%d) entered, FMA capability %x\n",
7308 	    qlge->instance, qlge->fm_capabilities));
7309 	/*
7310 	 * Register capabilities with IO Fault Services. The capabilities
7311 	 * set above may not be supported by the parent nexus, in that case
7312 	 * some capability bits may be cleared.
7313 	 */
7314 	if (qlge->fm_capabilities)
7315 		ddi_fm_init(qlge->dip, &qlge->fm_capabilities, &iblk);
7316 
7317 	/*
7318 	 * Initialize pci ereport capabilities if ereport capable
7319 	 */
7320 	if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7321 	    DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7322 		pci_ereport_setup(qlge->dip);
7323 	}
7324 
7325 	/* Register error callback if error callback capable */
7326 	if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7327 		ddi_fm_handler_register(qlge->dip,
7328 		    ql_fm_error_cb, (void*) qlge);
7329 	}
7330 
7331 	/*
7332 	 * DDI_FLGERR_ACC indicates:
7333 	 *  Driver will check its access handle(s) for faults on
7334 	 *   a regular basis by calling ddi_fm_acc_err_get
7335 	 *  Driver is able to cope with incorrect results of I/O
7336 	 *   operations resulted from an I/O fault
7337 	 */
7338 	if (DDI_FM_ACC_ERR_CAP(qlge->fm_capabilities)) {
7339 		ql_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7340 	}
7341 
7342 	/*
7343 	 * DDI_DMA_FLAGERR indicates:
7344 	 *  Driver will check its DMA handle(s) for faults on a
7345 	 *   regular basis using ddi_fm_dma_err_get
7346 	 *  Driver is able to cope with incorrect results of DMA
7347 	 *   operations resulted from an I/O fault
7348 	 */
7349 	if (DDI_FM_DMA_ERR_CAP(qlge->fm_capabilities)) {
7350 		tx_mapping_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7351 		dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7352 	}
7353 	QL_PRINT(DBG_INIT, ("ql_fm_init(%d) done\n",
7354 	    qlge->instance));
7355 }
7356 
7357 static void
ql_fm_fini(qlge_t * qlge)7358 ql_fm_fini(qlge_t *qlge)
7359 {
7360 	QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) entered\n",
7361 	    qlge->instance));
7362 	/* Only unregister FMA capabilities if we registered some */
7363 	if (qlge->fm_capabilities) {
7364 
7365 		/*
7366 		 * Release any resources allocated by pci_ereport_setup()
7367 		 */
7368 		if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7369 		    DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7370 			pci_ereport_teardown(qlge->dip);
7371 
7372 		/*
7373 		 * Un-register error callback if error callback capable
7374 		 */
7375 		if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7376 			ddi_fm_handler_unregister(qlge->dip);
7377 
7378 		/* Unregister from IO Fault Services */
7379 		ddi_fm_fini(qlge->dip);
7380 	}
7381 	QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) done\n",
7382 	    qlge->instance));
7383 }
7384 /*
7385  * ql_attach - Driver attach.
7386  */
7387 static int
ql_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)7388 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
7389 {
7390 	int instance;
7391 	qlge_t *qlge = NULL;
7392 	int rval;
7393 	uint16_t w;
7394 	mac_register_t *macp = NULL;
7395 	uint32_t data;
7396 
7397 	rval = DDI_FAILURE;
7398 
7399 	/* first get the instance */
7400 	instance = ddi_get_instance(dip);
7401 
7402 	switch (cmd) {
7403 	case DDI_ATTACH:
7404 		/*
7405 		 * Allocate our per-device-instance structure
7406 		 */
7407 		qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP);
7408 		ASSERT(qlge != NULL);
7409 		qlge->sequence |= INIT_SOFTSTATE_ALLOC;
7410 
7411 		qlge->dip = dip;
7412 		qlge->instance = instance;
7413 		/* Set up the coalescing parameters. */
7414 		qlge->ql_dbgprnt = 0;
7415 #if QL_DEBUG
7416 		qlge->ql_dbgprnt = QL_DEBUG;
7417 #endif /* QL_DEBUG */
7418 
7419 		/*
7420 		 * Initialize for fma support
7421 		 */
7422 		/* fault management (fm) capabilities. */
7423 		qlge->fm_capabilities =
7424 		    DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE;
7425 		data = ql_get_prop(qlge, "fm-capable");
7426 		if (data <= 0xf) {
7427 			qlge->fm_capabilities = data;
7428 		}
7429 		ql_fm_init(qlge);
7430 		qlge->sequence |= INIT_FM;
7431 		QL_PRINT(DBG_INIT, ("ql_attach(%d): fma init done\n",
7432 		    qlge->instance));
7433 
7434 		/*
7435 		 * Setup the ISP8x00 registers address mapping to be
7436 		 * accessed by this particular driver.
7437 		 * 0x0   Configuration Space
7438 		 * 0x1   I/O Space
7439 		 * 0x2   1st Memory Space address - Control Register Set
7440 		 * 0x3   2nd Memory Space address - Doorbell Memory Space
7441 		 */
7442 		w = 2;
7443 		if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0,
7444 		    sizeof (dev_reg_t), &ql_dev_acc_attr,
7445 		    &qlge->dev_handle) != DDI_SUCCESS) {
7446 			cmn_err(CE_WARN, "%s(%d): Unable to map device "
7447 			    "registers", ADAPTER_NAME, instance);
7448 			break;
7449 		}
7450 		QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n",
7451 		    qlge->iobase));
7452 		qlge->sequence |= INIT_REGS_SETUP;
7453 
7454 		/* map Doorbell memory space */
7455 		w = 3;
7456 		if (ddi_regs_map_setup(dip, w,
7457 		    (caddr_t *)&qlge->doorbell_reg_iobase, 0,
7458 		    0x100000 /* sizeof (dev_doorbell_reg_t) */,
7459 		    &ql_dev_acc_attr,
7460 		    &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) {
7461 			cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell "
7462 			    "registers",
7463 			    ADAPTER_NAME, instance);
7464 			break;
7465 		}
7466 		QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n",
7467 		    qlge->doorbell_reg_iobase));
7468 		qlge->sequence |= INIT_DOORBELL_REGS_SETUP;
7469 
7470 		/*
7471 		 * Allocate a macinfo structure for this instance
7472 		 */
7473 		if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
7474 			cmn_err(CE_WARN, "%s(%d): mac_alloc failed",
7475 			    __func__, instance);
7476 			break;
7477 		}
7478 		/* save adapter status to dip private data */
7479 		ddi_set_driver_private(dip, qlge);
7480 		QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n",
7481 		    ADAPTER_NAME, instance));
7482 		qlge->sequence |= INIT_MAC_ALLOC;
7483 
7484 		/*
7485 		 * Attach this instance of the device
7486 		 */
7487 		/* Setup PCI Local Bus Configuration resource. */
7488 		if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) {
7489 			cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources",
7490 			    ADAPTER_NAME, instance);
7491 			if (qlge->fm_enable) {
7492 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7493 				ddi_fm_service_impact(qlge->dip,
7494 				    DDI_SERVICE_LOST);
7495 			}
7496 			break;
7497 		}
7498 		qlge->sequence |= INIT_PCI_CONFIG_SETUP;
7499 		QL_PRINT(DBG_GLD, ("ql_attach(%d): pci_config_setup done\n",
7500 		    instance));
7501 
7502 		if (ql_init_instance(qlge) != DDI_SUCCESS) {
7503 			cmn_err(CE_WARN, "%s(%d): Unable to initialize device "
7504 			    "instance", ADAPTER_NAME, instance);
7505 			if (qlge->fm_enable) {
7506 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7507 				ddi_fm_service_impact(qlge->dip,
7508 				    DDI_SERVICE_LOST);
7509 			}
7510 			break;
7511 		}
7512 		QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_init_instance done\n",
7513 		    instance));
7514 
7515 		/* Setup interrupt vectors */
7516 		if (ql_alloc_irqs(qlge) != DDI_SUCCESS) {
7517 			break;
7518 		}
7519 		qlge->sequence |= INIT_INTR_ALLOC;
7520 		QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_alloc_irqs done\n",
7521 		    instance));
7522 
7523 		/* Configure queues */
7524 		if (ql_setup_rings(qlge) != DDI_SUCCESS) {
7525 			break;
7526 		}
7527 		qlge->sequence |= INIT_SETUP_RINGS;
7528 		QL_PRINT(DBG_GLD, ("ql_attach(%d): setup rings done\n",
7529 		    instance));
7530 
7531 		/*
7532 		 * Allocate memory resources
7533 		 */
7534 		if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) {
7535 			cmn_err(CE_WARN, "%s(%d): memory allocation failed",
7536 			    __func__, qlge->instance);
7537 			break;
7538 		}
7539 		qlge->sequence |= INIT_MEMORY_ALLOC;
7540 		QL_PRINT(DBG_GLD, ("ql_alloc_mem_resources(%d) done\n",
7541 		    instance));
7542 
7543 		/*
7544 		 * Map queues to interrupt vectors
7545 		 */
7546 		ql_resolve_queues_to_irqs(qlge);
7547 
7548 		/* Initialize mutex, need the interrupt priority */
7549 		(void) ql_init_rx_tx_locks(qlge);
7550 		qlge->sequence |= INIT_LOCKS_CREATED;
7551 		QL_PRINT(DBG_INIT, ("%s(%d): ql_init_rx_tx_locks done\n",
7552 		    ADAPTER_NAME, instance));
7553 
7554 		/*
7555 		 * Use a soft interrupt to do something that we do not want
7556 		 * to do in regular network functions or with mutexs being held
7557 		 */
7558 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl,
7559 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge)
7560 		    != DDI_SUCCESS) {
7561 			break;
7562 		}
7563 
7564 		if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl,
7565 		    DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge)
7566 		    != DDI_SUCCESS) {
7567 			break;
7568 		}
7569 
7570 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl,
7571 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge)
7572 		    != DDI_SUCCESS) {
7573 			break;
7574 		}
7575 		qlge->sequence |= INIT_ADD_SOFT_INTERRUPT;
7576 		QL_PRINT(DBG_INIT, ("%s(%d): ddi_intr_add_softint done\n",
7577 		    ADAPTER_NAME, instance));
7578 
7579 		/*
7580 		 * mutex to protect the adapter state structure.
7581 		 * initialize mutexes according to the interrupt priority
7582 		 */
7583 		mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER,
7584 		    DDI_INTR_PRI(qlge->intr_pri));
7585 		mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER,
7586 		    DDI_INTR_PRI(qlge->intr_pri));
7587 		mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER,
7588 		    DDI_INTR_PRI(qlge->intr_pri));
7589 
7590 		/* Mailbox wait and interrupt conditional variable. */
7591 		cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL);
7592 		qlge->sequence |= INIT_MUTEX;
7593 		QL_PRINT(DBG_INIT, ("%s(%d): mutex_init done\n",
7594 		    ADAPTER_NAME, instance));
7595 
7596 		/*
7597 		 * KStats
7598 		 */
7599 		if (ql_init_kstats(qlge) != DDI_SUCCESS) {
7600 			cmn_err(CE_WARN, "%s(%d): KState initialization failed",
7601 			    ADAPTER_NAME, instance);
7602 			break;
7603 		}
7604 		qlge->sequence |= INIT_KSTATS;
7605 		QL_PRINT(DBG_INIT, ("%s(%d): ql_init_kstats done\n",
7606 		    ADAPTER_NAME, instance));
7607 
7608 		/*
7609 		 * Initialize gld macinfo structure
7610 		 */
7611 		ql_gld3_init(qlge, macp);
7612 		/*
7613 		 * Add interrupt handlers
7614 		 */
7615 		if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) {
7616 			cmn_err(CE_WARN, "Failed to add interrupt "
7617 			    "handlers");
7618 			break;
7619 		}
7620 		qlge->sequence |= INIT_ADD_INTERRUPT;
7621 		QL_PRINT(DBG_INIT, ("%s(%d): Add interrupt handler done\n",
7622 		    ADAPTER_NAME, instance));
7623 
7624 		/*
7625 		 * MAC Register
7626 		 */
7627 		if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) {
7628 			cmn_err(CE_WARN, "%s(%d): mac_register failed",
7629 			    __func__, instance);
7630 			break;
7631 		}
7632 		qlge->sequence |= INIT_MAC_REGISTERED;
7633 		QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n",
7634 		    ADAPTER_NAME, instance));
7635 
7636 		mac_free(macp);
7637 		macp = NULL;
7638 
7639 		qlge->mac_flags = QL_MAC_ATTACHED;
7640 
7641 		ddi_report_dev(dip);
7642 
7643 		rval = DDI_SUCCESS;
7644 
7645 	break;
7646 /*
7647  * DDI_RESUME
7648  * When called  with  cmd  set  to  DDI_RESUME,  attach()  must
7649  * restore  the hardware state of a device (power may have been
7650  * removed from the device), allow  pending  requests  to  con-
7651  * tinue,  and  service  new requests. In this case, the driver
7652  * must not  make  any  assumptions  about  the  state  of  the
7653  * hardware,  but  must  restore the state of the device except
7654  * for the power level of components.
7655  *
7656  */
7657 	case DDI_RESUME:
7658 
7659 		if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL)
7660 			return (DDI_FAILURE);
7661 
7662 		QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n",
7663 		    __func__, qlge->instance));
7664 
7665 		mutex_enter(&qlge->gen_mutex);
7666 		rval = ql_do_start(qlge);
7667 		mutex_exit(&qlge->gen_mutex);
7668 		break;
7669 
7670 	default:
7671 		break;
7672 	}
7673 
7674 	/* if failed to attach */
7675 	if ((cmd == DDI_ATTACH) && (rval != DDI_SUCCESS) && (qlge != NULL)) {
7676 		cmn_err(CE_WARN, "qlge driver attach failed, sequence %x",
7677 		    qlge->sequence);
7678 		ql_free_resources(qlge);
7679 	}
7680 
7681 	return (rval);
7682 }
7683 
7684 /*
7685  * Unbind all pending tx dma handles during driver bring down
7686  */
7687 static void
ql_unbind_pending_tx_dma_handle(struct tx_ring * tx_ring)7688 ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring)
7689 {
7690 	struct tx_ring_desc *tx_ring_desc;
7691 	int i, j;
7692 
7693 	if (tx_ring->wq_desc) {
7694 		tx_ring_desc = tx_ring->wq_desc;
7695 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
7696 			for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
7697 				if (tx_ring_desc->tx_dma_handle[j]) {
7698 					(void) ddi_dma_unbind_handle(
7699 					    tx_ring_desc->tx_dma_handle[j]);
7700 				}
7701 			}
7702 			tx_ring_desc->tx_dma_handle_used = 0;
7703 		} /* end of for loop */
7704 	}
7705 }
7706 /*
7707  * Wait for all the packets sent to the chip to finish transmission
7708  * to prevent buffers to be unmapped before or during a transmit operation
7709  */
7710 static int
ql_wait_tx_quiesce(qlge_t * qlge)7711 ql_wait_tx_quiesce(qlge_t *qlge)
7712 {
7713 	int count = MAX_TX_WAIT_COUNT, i;
7714 	int rings_done;
7715 	volatile struct tx_ring *tx_ring;
7716 	uint32_t consumer_idx;
7717 	uint32_t producer_idx;
7718 	uint32_t temp;
7719 	int done = 0;
7720 	int rval = DDI_FAILURE;
7721 
7722 	while (!done) {
7723 		rings_done = 0;
7724 
7725 		for (i = 0; i < qlge->tx_ring_count; i++) {
7726 			tx_ring = &qlge->tx_ring[i];
7727 			temp = ql_read_doorbell_reg(qlge,
7728 			    tx_ring->prod_idx_db_reg);
7729 			producer_idx = temp & 0x0000ffff;
7730 			consumer_idx = (temp >> 16);
7731 
7732 			if (qlge->isr_stride) {
7733 				struct rx_ring *ob_ring;
7734 				ob_ring = &qlge->rx_ring[tx_ring->cq_id];
7735 				if (producer_idx != ob_ring->cnsmr_idx) {
7736 					cmn_err(CE_NOTE, " force clean \n");
7737 					(void) ql_clean_outbound_rx_ring(
7738 					    ob_ring);
7739 				}
7740 			}
7741 			/*
7742 			 * Get the pending iocb count, ones which have not been
7743 			 * pulled down by the chip
7744 			 */
7745 			if (producer_idx >= consumer_idx)
7746 				temp = (producer_idx - consumer_idx);
7747 			else
7748 				temp = (tx_ring->wq_len - consumer_idx) +
7749 				    producer_idx;
7750 
7751 			if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len)
7752 				rings_done++;
7753 			else {
7754 				done = 1;
7755 				break;
7756 			}
7757 		}
7758 
7759 		/* If all the rings are done */
7760 		if (rings_done >= qlge->tx_ring_count) {
7761 #ifdef QLGE_LOAD_UNLOAD
7762 			cmn_err(CE_NOTE, "%s(%d) done successfully \n",
7763 			    __func__, qlge->instance);
7764 #endif
7765 			rval = DDI_SUCCESS;
7766 			break;
7767 		}
7768 
7769 		qlge_delay(100);
7770 
7771 		count--;
7772 		if (!count) {
7773 
7774 			count = MAX_TX_WAIT_COUNT;
7775 #ifdef QLGE_LOAD_UNLOAD
7776 			volatile struct rx_ring *rx_ring;
7777 			cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending"
7778 			    " Transmits on queue %d to complete .\n",
7779 			    __func__, qlge->instance,
7780 			    (qlge->tx_ring[i].wq_len -
7781 			    qlge->tx_ring[i].tx_free_count),
7782 			    i);
7783 
7784 			rx_ring = &qlge->rx_ring[i+1];
7785 			temp = ql_read_doorbell_reg(qlge,
7786 			    rx_ring->cnsmr_idx_db_reg);
7787 			consumer_idx = temp & 0x0000ffff;
7788 			producer_idx = (temp >> 16);
7789 			cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d,"
7790 			    " Producer %d, Consumer %d\n",
7791 			    __func__, qlge->instance,
7792 			    i+1,
7793 			    producer_idx, consumer_idx);
7794 
7795 			temp = ql_read_doorbell_reg(qlge,
7796 			    tx_ring->prod_idx_db_reg);
7797 			producer_idx = temp & 0x0000ffff;
7798 			consumer_idx = (temp >> 16);
7799 			cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d,"
7800 			    " Producer %d, Consumer %d\n",
7801 			    __func__, qlge->instance, i,
7802 			    producer_idx, consumer_idx);
7803 #endif
7804 
7805 			/* For now move on */
7806 			break;
7807 		}
7808 	}
7809 	/* Stop the request queue */
7810 	mutex_enter(&qlge->hw_mutex);
7811 	for (i = 0; i < qlge->tx_ring_count; i++) {
7812 		if (qlge->tx_ring[i].valid_db_reg) {
7813 			ql_write_doorbell_reg(qlge,
7814 			    qlge->tx_ring[i].valid_db_reg, 0);
7815 		}
7816 	}
7817 	mutex_exit(&qlge->hw_mutex);
7818 	return (rval);
7819 }
7820 
7821 /*
7822  * Wait for all the receives indicated to the stack to come back
7823  */
7824 static int
ql_wait_rx_complete(qlge_t * qlge)7825 ql_wait_rx_complete(qlge_t *qlge)
7826 {
7827 	int i;
7828 	/* Disable all the completion queues */
7829 	mutex_enter(&qlge->hw_mutex);
7830 	for (i = 0; i < qlge->rx_ring_count; i++) {
7831 		if (qlge->rx_ring[i].valid_db_reg) {
7832 			ql_write_doorbell_reg(qlge,
7833 			    qlge->rx_ring[i].valid_db_reg, 0);
7834 		}
7835 	}
7836 	mutex_exit(&qlge->hw_mutex);
7837 
7838 	/* Wait for OS to return all rx buffers */
7839 	qlge_delay(QL_ONE_SEC_DELAY);
7840 	return (DDI_SUCCESS);
7841 }
7842 
7843 /*
7844  * stop the driver
7845  */
7846 static int
ql_bringdown_adapter(qlge_t * qlge)7847 ql_bringdown_adapter(qlge_t *qlge)
7848 {
7849 	int i;
7850 	int status = DDI_SUCCESS;
7851 
7852 	qlge->mac_flags = QL_MAC_BRINGDOWN;
7853 	if (qlge->sequence & ADAPTER_INIT) {
7854 		/* stop forwarding external packets to driver */
7855 		status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7856 		if (status)
7857 			return (status);
7858 		(void) ql_stop_routing(qlge);
7859 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7860 		/*
7861 		 * Set the flag for receive and transmit
7862 		 * operations to cease
7863 		 */
7864 		for (i = 0; i < qlge->tx_ring_count; i++) {
7865 			mutex_enter(&qlge->tx_ring[i].tx_lock);
7866 			qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED;
7867 			mutex_exit(&qlge->tx_ring[i].tx_lock);
7868 		}
7869 
7870 		for (i = 0; i < qlge->rx_ring_count; i++) {
7871 			mutex_enter(&qlge->rx_ring[i].rx_lock);
7872 			qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED;
7873 			mutex_exit(&qlge->rx_ring[i].rx_lock);
7874 		}
7875 
7876 		/*
7877 		 * Need interrupts to be running while the transmit
7878 		 * completions are cleared. Wait for the packets
7879 		 * queued to the chip to be sent out
7880 		 */
7881 		(void) ql_wait_tx_quiesce(qlge);
7882 		/* Interrupts not needed from now */
7883 		ql_disable_all_completion_interrupts(qlge);
7884 
7885 		mutex_enter(&qlge->hw_mutex);
7886 		/* Disable Global interrupt */
7887 		ql_disable_global_interrupt(qlge);
7888 		mutex_exit(&qlge->hw_mutex);
7889 
7890 		/* Wait for all the indicated packets to come back */
7891 		status = ql_wait_rx_complete(qlge);
7892 
7893 		mutex_enter(&qlge->hw_mutex);
7894 		/* Reset adapter */
7895 		(void) ql_asic_reset(qlge);
7896 		/*
7897 		 * Unbind all tx dma handles to prevent pending tx descriptors'
7898 		 * dma handles from being re-used.
7899 		 */
7900 		for (i = 0; i < qlge->tx_ring_count; i++) {
7901 			ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]);
7902 		}
7903 
7904 		qlge->sequence &= ~ADAPTER_INIT;
7905 
7906 		mutex_exit(&qlge->hw_mutex);
7907 	}
7908 	return (status);
7909 }
7910 
7911 /*
7912  * ql_detach
7913  * Used to remove all the states associated with a given
7914  * instances of a device node prior to the removal of that
7915  * instance from the system.
7916  */
7917 static int
ql_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)7918 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7919 {
7920 	qlge_t *qlge;
7921 	int rval;
7922 
7923 	rval = DDI_SUCCESS;
7924 
7925 	switch (cmd) {
7926 	case DDI_DETACH:
7927 
7928 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7929 			return (DDI_FAILURE);
7930 		rval = ql_bringdown_adapter(qlge);
7931 		if (rval != DDI_SUCCESS)
7932 			break;
7933 
7934 		qlge->mac_flags = QL_MAC_DETACH;
7935 
7936 		/* free memory resources */
7937 		if (qlge->sequence & INIT_MEMORY_ALLOC) {
7938 			ql_free_mem_resources(qlge);
7939 			qlge->sequence &= ~INIT_MEMORY_ALLOC;
7940 		}
7941 		ql_free_resources(qlge);
7942 
7943 		break;
7944 
7945 	case DDI_SUSPEND:
7946 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7947 			return (DDI_FAILURE);
7948 
7949 		mutex_enter(&qlge->gen_mutex);
7950 		if ((qlge->mac_flags == QL_MAC_ATTACHED) ||
7951 		    (qlge->mac_flags == QL_MAC_STARTED)) {
7952 			(void) ql_do_stop(qlge);
7953 		}
7954 		qlge->mac_flags = QL_MAC_SUSPENDED;
7955 		mutex_exit(&qlge->gen_mutex);
7956 
7957 		break;
7958 	default:
7959 		rval = DDI_FAILURE;
7960 		break;
7961 	}
7962 
7963 	return (rval);
7964 }
7965 
7966 /*
7967  * quiesce(9E) entry point.
7968  *
7969  * This function is called when the system is single-threaded at high
7970  * PIL with preemption disabled. Therefore, this function must not be
7971  * blocked.
7972  *
7973  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7974  */
7975 int
ql_quiesce(dev_info_t * dip)7976 ql_quiesce(dev_info_t *dip)
7977 {
7978 	qlge_t *qlge;
7979 	int i;
7980 
7981 	if ((qlge = QL_GET_DEV(dip)) == NULL)
7982 		return (DDI_FAILURE);
7983 
7984 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
7985 		/* stop forwarding external packets to driver */
7986 		(void) ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7987 		(void) ql_stop_routing(qlge);
7988 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7989 		/* Stop all the request queues */
7990 		for (i = 0; i < qlge->tx_ring_count; i++) {
7991 			if (qlge->tx_ring[i].valid_db_reg) {
7992 				ql_write_doorbell_reg(qlge,
7993 				    qlge->tx_ring[i].valid_db_reg, 0);
7994 			}
7995 		}
7996 		qlge_delay(QL_ONE_SEC_DELAY/4);
7997 		/* Interrupts not needed from now */
7998 		/* Disable MPI interrupt */
7999 		ql_write_reg(qlge, REG_INTERRUPT_MASK,
8000 		    (INTR_MASK_PI << 16));
8001 		ql_disable_global_interrupt(qlge);
8002 
8003 		/* Disable all the rx completion queues */
8004 		for (i = 0; i < qlge->rx_ring_count; i++) {
8005 			if (qlge->rx_ring[i].valid_db_reg) {
8006 				ql_write_doorbell_reg(qlge,
8007 				    qlge->rx_ring[i].valid_db_reg, 0);
8008 			}
8009 		}
8010 		qlge_delay(QL_ONE_SEC_DELAY/4);
8011 		qlge->mac_flags = QL_MAC_STOPPED;
8012 		/* Reset adapter */
8013 		(void) ql_asic_reset(qlge);
8014 		qlge_delay(100);
8015 	}
8016 
8017 	return (DDI_SUCCESS);
8018 }
8019 
8020 QL_STREAM_OPS(ql_ops, ql_attach, ql_detach);
8021 
8022 /*
8023  * Loadable Driver Interface Structures.
8024  * Declare and initialize the module configuration section...
8025  */
8026 static struct modldrv modldrv = {
8027 	&mod_driverops,		/* type of module: driver */
8028 	version,		/* name of module */
8029 	&ql_ops			/* driver dev_ops */
8030 };
8031 
8032 static struct modlinkage modlinkage = {
8033 	MODREV_1, 	&modldrv,	NULL
8034 };
8035 
8036 /*
8037  * Loadable Module Routines
8038  */
8039 
8040 /*
8041  * _init
8042  * Initializes a loadable module. It is called before any other
8043  * routine in a loadable module.
8044  */
8045 int
_init(void)8046 _init(void)
8047 {
8048 	int rval;
8049 
8050 	mac_init_ops(&ql_ops, ADAPTER_NAME);
8051 	rval = mod_install(&modlinkage);
8052 	if (rval != DDI_SUCCESS) {
8053 		mac_fini_ops(&ql_ops);
8054 		cmn_err(CE_WARN, "?Unable to install/attach driver '%s'",
8055 		    ADAPTER_NAME);
8056 	}
8057 
8058 	return (rval);
8059 }
8060 
8061 /*
8062  * _fini
8063  * Prepares a module for unloading. It is called when the system
8064  * wants to unload a module. If the module determines that it can
8065  * be unloaded, then _fini() returns the value returned by
8066  * mod_remove(). Upon successful return from _fini() no other
8067  * routine in the module will be called before _init() is called.
8068  */
8069 int
_fini(void)8070 _fini(void)
8071 {
8072 	int rval;
8073 
8074 	rval = mod_remove(&modlinkage);
8075 	if (rval == DDI_SUCCESS) {
8076 		mac_fini_ops(&ql_ops);
8077 	}
8078 
8079 	return (rval);
8080 }
8081 
8082 /*
8083  * _info
8084  * Returns information about loadable module.
8085  */
8086 int
_info(struct modinfo * modinfop)8087 _info(struct modinfo *modinfop)
8088 {
8089 	return (mod_info(&modlinkage, modinfop));
8090 }
8091