1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 QLogic Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2018, Joyent, Inc.
28 */
29
30 #include <qlge.h>
31 #include <sys/atomic.h>
32 #include <sys/strsubr.h>
33 #include <sys/pattr.h>
34 #include <netinet/in.h>
35 #include <netinet/ip.h>
36 #include <netinet/ip6.h>
37 #include <netinet/tcp.h>
38 #include <netinet/udp.h>
39 #include <inet/ip.h>
40
41
42
43 /*
44 * Local variables
45 */
46 static struct ether_addr ql_ether_broadcast_addr =
47 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
48 static char version[] = "GLDv3 QLogic 81XX " VERSIONSTR;
49
50 /*
51 * Local function prototypes
52 */
53 static void ql_free_resources(qlge_t *);
54 static void ql_fini_kstats(qlge_t *);
55 static uint32_t ql_get_link_state(qlge_t *);
56 static void ql_read_conf(qlge_t *);
57 static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
58 ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
59 size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
60 static int ql_alloc_phys_rbuf(dev_info_t *, ddi_dma_handle_t *,
61 ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
62 size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
63 static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
64 static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
65 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
66 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
67 static int ql_bringdown_adapter(qlge_t *);
68 static int ql_bringup_adapter(qlge_t *);
69 static int ql_asic_reset(qlge_t *);
70 static void ql_wake_mpi_reset_soft_intr(qlge_t *);
71 static void ql_stop_timer(qlge_t *qlge);
72 static void ql_fm_fini(qlge_t *qlge);
73 int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring);
74
75 /*
76 * TX dma maping handlers allow multiple sscatter-gather lists
77 */
78 ddi_dma_attr_t tx_mapping_dma_attr = {
79 DMA_ATTR_V0, /* dma_attr_version */
80 QL_DMA_LOW_ADDRESS, /* low DMA address range */
81 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
82 QL_DMA_XFER_COUNTER, /* DMA counter register */
83 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment, default - 8 */
84 QL_DMA_BURSTSIZES, /* DMA burstsizes */
85 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
86 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
87 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
88 QL_MAX_TX_DMA_HANDLES, /* s/g list length */
89 QL_DMA_GRANULARITY, /* granularity of device */
90 DDI_DMA_RELAXED_ORDERING /* DMA transfer flags */
91 };
92
93 /*
94 * Receive buffers and Request/Response queues do not allow scatter-gather lists
95 */
96 ddi_dma_attr_t dma_attr = {
97 DMA_ATTR_V0, /* dma_attr_version */
98 QL_DMA_LOW_ADDRESS, /* low DMA address range */
99 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
100 QL_DMA_XFER_COUNTER, /* DMA counter register */
101 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment, default - 8 */
102 QL_DMA_BURSTSIZES, /* DMA burstsizes */
103 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
104 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
105 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
106 1, /* s/g list length, i.e no sg list */
107 QL_DMA_GRANULARITY, /* granularity of device */
108 QL_DMA_XFER_FLAGS /* DMA transfer flags */
109 };
110 /*
111 * Receive buffers do not allow scatter-gather lists
112 */
113 ddi_dma_attr_t dma_attr_rbuf = {
114 DMA_ATTR_V0, /* dma_attr_version */
115 QL_DMA_LOW_ADDRESS, /* low DMA address range */
116 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
117 QL_DMA_XFER_COUNTER, /* DMA counter register */
118 0x1, /* DMA address alignment, default - 8 */
119 QL_DMA_BURSTSIZES, /* DMA burstsizes */
120 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
121 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
122 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
123 1, /* s/g list length, i.e no sg list */
124 QL_DMA_GRANULARITY, /* granularity of device */
125 DDI_DMA_RELAXED_ORDERING /* DMA transfer flags */
126 };
127 /*
128 * DMA access attribute structure.
129 */
130 /* device register access from host */
131 ddi_device_acc_attr_t ql_dev_acc_attr = {
132 DDI_DEVICE_ATTR_V0,
133 DDI_STRUCTURE_LE_ACC,
134 DDI_STRICTORDER_ACC
135 };
136
137 /* host ring descriptors */
138 ddi_device_acc_attr_t ql_desc_acc_attr = {
139 DDI_DEVICE_ATTR_V0,
140 DDI_NEVERSWAP_ACC,
141 DDI_STRICTORDER_ACC
142 };
143
144 /* host ring buffer */
145 ddi_device_acc_attr_t ql_buf_acc_attr = {
146 DDI_DEVICE_ATTR_V0,
147 DDI_NEVERSWAP_ACC,
148 DDI_STRICTORDER_ACC
149 };
150
151 /*
152 * Hash key table for Receive Side Scaling (RSS) support
153 */
154 const uint8_t key_data[] = {
155 0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
156 0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
157 0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
158 0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
159
160 /*
161 * Shadow Registers:
162 * Outbound queues have a consumer index that is maintained by the chip.
163 * Inbound queues have a producer index that is maintained by the chip.
164 * For lower overhead, these registers are "shadowed" to host memory
165 * which allows the device driver to track the queue progress without
166 * PCI reads. When an entry is placed on an inbound queue, the chip will
167 * update the relevant index register and then copy the value to the
168 * shadow register in host memory.
169 * Currently, ql_read_sh_reg only read Inbound queues'producer index.
170 */
171
172 static inline unsigned int
ql_read_sh_reg(qlge_t * qlge,struct rx_ring * rx_ring)173 ql_read_sh_reg(qlge_t *qlge, struct rx_ring *rx_ring)
174 {
175 uint32_t rtn;
176
177 /* re-synchronize shadow prod index dma buffer before reading */
178 (void) ddi_dma_sync(qlge->host_copy_shadow_dma_attr.dma_handle,
179 rx_ring->prod_idx_sh_reg_offset,
180 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
181
182 rtn = ddi_get32(qlge->host_copy_shadow_dma_attr.acc_handle,
183 (uint32_t *)rx_ring->prod_idx_sh_reg);
184
185 return (rtn);
186 }
187
188 /*
189 * Read 32 bit atomically
190 */
191 uint32_t
ql_atomic_read_32(volatile uint32_t * target)192 ql_atomic_read_32(volatile uint32_t *target)
193 {
194 /*
195 * atomic_add_32_nv returns the new value after the add,
196 * we are adding 0 so we should get the original value
197 */
198 return (atomic_add_32_nv(target, 0));
199 }
200
201 /*
202 * Set 32 bit atomically
203 */
204 void
ql_atomic_set_32(volatile uint32_t * target,uint32_t newval)205 ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
206 {
207 (void) atomic_swap_32(target, newval);
208 }
209
210
211 /*
212 * Setup device PCI configuration registers.
213 * Kernel context.
214 */
215 static void
ql_pci_config(qlge_t * qlge)216 ql_pci_config(qlge_t *qlge)
217 {
218 uint16_t w;
219
220 qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
221 PCI_CONF_VENID);
222 qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
223 PCI_CONF_DEVID);
224
225 /*
226 * we want to respect framework's setting of PCI
227 * configuration space command register and also
228 * want to make sure that all bits of interest to us
229 * are properly set in PCI Command register(0x04).
230 * PCI_COMM_IO 0x1 I/O access enable
231 * PCI_COMM_MAE 0x2 Memory access enable
232 * PCI_COMM_ME 0x4 bus master enable
233 * PCI_COMM_MEMWR_INVAL 0x10 memory write and invalidate enable.
234 */
235 w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
236 w = (uint16_t)(w & (~PCI_COMM_IO));
237 w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
238 /* PCI_COMM_MEMWR_INVAL | */
239 PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
240
241 pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
242
243 w = pci_config_get16(qlge->pci_handle, 0x54);
244 w = (uint16_t)(w & (~0x7000));
245 w = (uint16_t)(w | 0x5000);
246 pci_config_put16(qlge->pci_handle, 0x54, w);
247
248 ql_dump_pci_config(qlge);
249 }
250
251 /*
252 * This routine parforms the neccessary steps to set GLD mac information
253 * such as Function number, xgmac mask and shift bits
254 */
255 static int
ql_set_mac_info(qlge_t * qlge)256 ql_set_mac_info(qlge_t *qlge)
257 {
258 uint32_t value;
259 int rval = DDI_FAILURE;
260 uint32_t fn0_net, fn1_net;
261
262 /* set default value */
263 qlge->fn0_net = FN0_NET;
264 qlge->fn1_net = FN1_NET;
265
266 if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
267 cmn_err(CE_WARN, "%s(%d) read MPI register failed",
268 __func__, qlge->instance);
269 goto exit;
270 } else {
271 fn0_net = (value >> 1) & 0x07;
272 fn1_net = (value >> 5) & 0x07;
273 if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
274 cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
275 "nic0 function number %d,"
276 "nic1 function number %d "
277 "use default\n",
278 __func__, qlge->instance, value, fn0_net, fn1_net);
279 goto exit;
280 } else {
281 qlge->fn0_net = fn0_net;
282 qlge->fn1_net = fn1_net;
283 }
284 }
285
286 /* Get the function number that the driver is associated with */
287 value = ql_read_reg(qlge, REG_STATUS);
288 qlge->func_number = (uint8_t)((value >> 6) & 0x03);
289 QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
290 value, qlge->func_number));
291
292 /* The driver is loaded on a non-NIC function? */
293 if ((qlge->func_number != qlge->fn0_net) &&
294 (qlge->func_number != qlge->fn1_net)) {
295 cmn_err(CE_WARN,
296 "Invalid function number = 0x%x\n", qlge->func_number);
297 goto exit;
298 }
299 /* network port 0? */
300 if (qlge->func_number == qlge->fn0_net) {
301 qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
302 qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
303 } else {
304 qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
305 qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
306 }
307 rval = DDI_SUCCESS;
308 exit:
309 return (rval);
310
311 }
312
313 /*
314 * write to doorbell register
315 */
316 void
ql_write_doorbell_reg(qlge_t * qlge,uint32_t * addr,uint32_t data)317 ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
318 {
319 ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
320 }
321
322 /*
323 * read from doorbell register
324 */
325 uint32_t
ql_read_doorbell_reg(qlge_t * qlge,uint32_t * addr)326 ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
327 {
328 uint32_t ret;
329
330 ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
331
332 return (ret);
333 }
334
335 /*
336 * This function waits for a specific bit to come ready
337 * in a given register. It is used mostly by the initialize
338 * process, but is also used in kernel thread API such as
339 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
340 */
341 static int
ql_wait_reg_rdy(qlge_t * qlge,uint32_t reg,uint32_t bit,uint32_t err_bit)342 ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
343 {
344 uint32_t temp;
345 int count = UDELAY_COUNT;
346
347 while (count) {
348 temp = ql_read_reg(qlge, reg);
349
350 /* check for errors */
351 if ((temp & err_bit) != 0) {
352 break;
353 } else if ((temp & bit) != 0)
354 return (DDI_SUCCESS);
355 qlge_delay(UDELAY_DELAY);
356 count--;
357 }
358 cmn_err(CE_WARN,
359 "Waiting for reg %x to come ready failed.", reg);
360 if (qlge->fm_enable) {
361 ql_fm_ereport(qlge, DDI_FM_DEVICE_NO_RESPONSE);
362 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
363 }
364 return (DDI_FAILURE);
365 }
366
367 /*
368 * The CFG register is used to download TX and RX control blocks
369 * to the chip. This function waits for an operation to complete.
370 */
371 static int
ql_wait_cfg(qlge_t * qlge,uint32_t bit)372 ql_wait_cfg(qlge_t *qlge, uint32_t bit)
373 {
374 return (ql_wait_reg_bit(qlge, REG_CONFIGURATION, bit, BIT_RESET, 0));
375 }
376
377
378 /*
379 * Used to issue init control blocks to hw. Maps control block,
380 * sets address, triggers download, waits for completion.
381 */
382 static int
ql_write_cfg(qlge_t * qlge,uint32_t bit,uint64_t phy_addr,uint16_t q_id)383 ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
384 {
385 int status = DDI_SUCCESS;
386 uint32_t mask;
387 uint32_t value;
388
389 status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
390 if (status != DDI_SUCCESS) {
391 goto exit;
392 }
393 status = ql_wait_cfg(qlge, bit);
394 if (status != DDI_SUCCESS) {
395 goto exit;
396 }
397
398 ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
399 ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
400
401 mask = CFG_Q_MASK | (bit << 16);
402 value = bit | (q_id << CFG_Q_SHIFT);
403 ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
404
405 /*
406 * Wait for the bit to clear after signaling hw.
407 */
408 status = ql_wait_cfg(qlge, bit);
409 ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
410
411 exit:
412 return (status);
413 }
414
415 /*
416 * Initialize adapter instance
417 */
418 static int
ql_init_instance(qlge_t * qlge)419 ql_init_instance(qlge_t *qlge)
420 {
421 int i;
422
423 /* Default value */
424 qlge->mac_flags = QL_MAC_INIT;
425 qlge->mtu = ETHERMTU; /* set normal size as default */
426 qlge->page_size = VM_PAGE_SIZE; /* default page size */
427
428 for (i = 0; i < MAX_RX_RINGS; i++) {
429 qlge->rx_polls[i] = 0;
430 qlge->rx_interrupts[i] = 0;
431 }
432
433 /*
434 * Set up the operating parameters.
435 */
436 qlge->multicast_list_count = 0;
437
438 /*
439 * Set up the max number of unicast list
440 */
441 qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
442 qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
443
444 /*
445 * read user defined properties in .conf file
446 */
447 ql_read_conf(qlge); /* mtu, pause, LSO etc */
448 qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
449
450 QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
451
452 /* choose Memory Space mapping and get Vendor Id, Device ID etc */
453 ql_pci_config(qlge);
454 qlge->ip_hdr_offset = 0;
455
456 if (qlge->device_id == 0x8000) {
457 /* Schultz card */
458 qlge->cfg_flags |= CFG_CHIP_8100;
459 /* enable just ipv4 chksum offload for Schultz */
460 qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
461 /*
462 * Schultz firmware does not do pseduo IP header checksum
463 * calculation, needed to be done by driver
464 */
465 qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
466 if (qlge->lso_enable)
467 qlge->cfg_flags |= CFG_LSO;
468 qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
469 /* Schultz must split packet header */
470 qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
471 qlge->max_read_mbx = 5;
472 qlge->ip_hdr_offset = 2;
473 }
474
475 /* Set Function Number and some of the iocb mac information */
476 if (ql_set_mac_info(qlge) != DDI_SUCCESS)
477 return (DDI_FAILURE);
478
479 /* Read network settings from NVRAM */
480 /* After nvram is read successfully, update dev_addr */
481 if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
482 QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
483 for (i = 0; i < ETHERADDRL; i++) {
484 qlge->dev_addr.ether_addr_octet[i] =
485 qlge->nic_config.factory_MAC[i];
486 }
487 } else {
488 cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
489 __func__, qlge->instance);
490 return (DDI_FAILURE);
491 }
492
493 bcopy(qlge->dev_addr.ether_addr_octet,
494 qlge->unicst_addr[0].addr.ether_addr_octet,
495 ETHERADDRL);
496 QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
497 &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
498
499 qlge->port_link_state = LS_DOWN;
500
501 return (DDI_SUCCESS);
502 }
503
504
505 /*
506 * This hardware semaphore provides the mechanism for exclusive access to
507 * resources shared between the NIC driver, MPI firmware,
508 * FCOE firmware and the FC driver.
509 */
510 static int
ql_sem_trylock(qlge_t * qlge,uint32_t sem_mask)511 ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
512 {
513 uint32_t sem_bits = 0;
514
515 switch (sem_mask) {
516 case SEM_XGMAC0_MASK:
517 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
518 break;
519 case SEM_XGMAC1_MASK:
520 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
521 break;
522 case SEM_ICB_MASK:
523 sem_bits = SEM_SET << SEM_ICB_SHIFT;
524 break;
525 case SEM_MAC_ADDR_MASK:
526 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
527 break;
528 case SEM_FLASH_MASK:
529 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
530 break;
531 case SEM_PROBE_MASK:
532 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
533 break;
534 case SEM_RT_IDX_MASK:
535 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
536 break;
537 case SEM_PROC_REG_MASK:
538 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
539 break;
540 default:
541 cmn_err(CE_WARN, "Bad Semaphore mask!.");
542 return (DDI_FAILURE);
543 }
544
545 ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
546 return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
547 }
548
549 /*
550 * Lock a specific bit of Semaphore register to gain
551 * access to a particular shared register
552 */
553 int
ql_sem_spinlock(qlge_t * qlge,uint32_t sem_mask)554 ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
555 {
556 unsigned int wait_count = 30;
557
558 while (wait_count) {
559 if (!ql_sem_trylock(qlge, sem_mask))
560 return (DDI_SUCCESS);
561 qlge_delay(100);
562 wait_count--;
563 }
564 cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
565 __func__, qlge->instance, sem_mask);
566 return (DDI_FAILURE);
567 }
568
569 /*
570 * Unock a specific bit of Semaphore register to release
571 * access to a particular shared register
572 */
573 void
ql_sem_unlock(qlge_t * qlge,uint32_t sem_mask)574 ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
575 {
576 ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
577 (void) ql_read_reg(qlge, REG_SEMAPHORE); /* flush */
578 }
579
580 /*
581 * Get property value from configuration file.
582 *
583 * string = property string pointer.
584 *
585 * Returns:
586 * 0xFFFFFFFF = no property else property value.
587 */
588 static uint32_t
ql_get_prop(qlge_t * qlge,char * string)589 ql_get_prop(qlge_t *qlge, char *string)
590 {
591 char buf[256];
592 uint32_t data;
593
594 /* Get adapter instance parameter. */
595 (void) sprintf(buf, "hba%d-%s", qlge->instance, string);
596 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
597 (int)0xffffffff);
598
599 /* Adapter instance parameter found? */
600 if (data == 0xffffffff) {
601 /* No, get default parameter. */
602 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
603 string, (int)0xffffffff);
604 }
605
606 return (data);
607 }
608
609 /*
610 * Read user setting from configuration file.
611 */
612 static void
ql_read_conf(qlge_t * qlge)613 ql_read_conf(qlge_t *qlge)
614 {
615 uint32_t data;
616
617 /* clear configuration flags */
618 qlge->cfg_flags = 0;
619
620 /* Set up the default ring sizes. */
621 qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
622 data = ql_get_prop(qlge, "tx_ring_size");
623 /* if data is valid */
624 if ((data != 0xffffffff) && data) {
625 if (qlge->tx_ring_size != data) {
626 qlge->tx_ring_size = (uint16_t)data;
627 }
628 }
629
630 qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
631 data = ql_get_prop(qlge, "rx_ring_size");
632 /* if data is valid */
633 if ((data != 0xffffffff) && data) {
634 if (qlge->rx_ring_size != data) {
635 qlge->rx_ring_size = (uint16_t)data;
636 }
637 }
638
639 qlge->tx_ring_count = 8;
640 data = ql_get_prop(qlge, "tx_ring_count");
641 /* if data is valid */
642 if ((data != 0xffffffff) && data) {
643 if (qlge->tx_ring_count != data) {
644 qlge->tx_ring_count = (uint16_t)data;
645 }
646 }
647
648 qlge->rss_ring_count = 8;
649 data = ql_get_prop(qlge, "rss_ring_count");
650 /* if data is valid */
651 if ((data != 0xffffffff) && data) {
652 if (qlge->rss_ring_count != data) {
653 qlge->rss_ring_count = (uint16_t)data;
654 }
655 }
656
657 /* Get default rx_copy enable/disable. */
658 if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
659 data == 0) {
660 qlge->rx_copy = B_FALSE;
661 QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
662 } else if (data == 1) {
663 qlge->rx_copy = B_TRUE;
664 QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
665 }
666
667 qlge->rx_copy_threshold = qlge->rx_ring_size / 4;
668 data = ql_get_prop(qlge, "rx_copy_threshold");
669 if ((data != 0xffffffff) && (data != 0)) {
670 qlge->rx_copy_threshold = data;
671 cmn_err(CE_NOTE, "!new rx_copy_threshold %d \n",
672 qlge->rx_copy_threshold);
673 }
674
675 /* Get mtu packet size. */
676 data = ql_get_prop(qlge, "mtu");
677 if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
678 if (qlge->mtu != data) {
679 qlge->mtu = data;
680 cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
681 }
682 }
683
684 if (qlge->mtu == JUMBO_MTU) {
685 qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT_JUMBO;
686 qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT_JUMBO;
687 qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT_JUMBO;
688 qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT_JUMBO;
689 }
690
691
692 /* Get pause mode, default is Per Priority mode. */
693 qlge->pause = PAUSE_MODE_PER_PRIORITY;
694 data = ql_get_prop(qlge, "pause");
695 if (data <= PAUSE_MODE_PER_PRIORITY) {
696 if (qlge->pause != data) {
697 qlge->pause = data;
698 cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
699 }
700 }
701 /* Receive interrupt delay */
702 qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
703 data = ql_get_prop(qlge, "rx_intr_delay");
704 /* if data is valid */
705 if ((data != 0xffffffff) && data) {
706 if (qlge->rx_coalesce_usecs != data) {
707 qlge->rx_coalesce_usecs = (uint16_t)data;
708 }
709 }
710 /* Rx inter-packet delay. */
711 qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
712 data = ql_get_prop(qlge, "rx_ipkt_delay");
713 /* if data is valid */
714 if ((data != 0xffffffff) && data) {
715 if (qlge->rx_max_coalesced_frames != data) {
716 qlge->rx_max_coalesced_frames = (uint16_t)data;
717 }
718 }
719 /* Transmit interrupt delay */
720 qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
721 data = ql_get_prop(qlge, "tx_intr_delay");
722 /* if data is valid */
723 if ((data != 0xffffffff) && data) {
724 if (qlge->tx_coalesce_usecs != data) {
725 qlge->tx_coalesce_usecs = (uint16_t)data;
726 }
727 }
728 /* Tx inter-packet delay. */
729 qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
730 data = ql_get_prop(qlge, "tx_ipkt_delay");
731 /* if data is valid */
732 if ((data != 0xffffffff) && data) {
733 if (qlge->tx_max_coalesced_frames != data) {
734 qlge->tx_max_coalesced_frames = (uint16_t)data;
735 }
736 }
737
738 /* Get split header payload_copy_thresh. */
739 qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
740 data = ql_get_prop(qlge, "payload_copy_thresh");
741 /* if data is valid */
742 if ((data != 0xffffffff) && (data != 0)) {
743 if (qlge->payload_copy_thresh != data) {
744 qlge->payload_copy_thresh = data;
745 }
746 }
747
748 /* large send offload (LSO) capability. */
749 qlge->lso_enable = 1;
750 data = ql_get_prop(qlge, "lso_enable");
751 /* if data is valid */
752 if ((data == 0) || (data == 1)) {
753 if (qlge->lso_enable != data) {
754 qlge->lso_enable = (uint16_t)data;
755 }
756 }
757
758 /* dcbx capability. */
759 qlge->dcbx_enable = 1;
760 data = ql_get_prop(qlge, "dcbx_enable");
761 /* if data is valid */
762 if ((data == 0) || (data == 1)) {
763 if (qlge->dcbx_enable != data) {
764 qlge->dcbx_enable = (uint16_t)data;
765 }
766 }
767 /* fault management enable */
768 qlge->fm_enable = B_TRUE;
769 data = ql_get_prop(qlge, "fm-enable");
770 if ((data == 0x1) || (data == 0)) {
771 qlge->fm_enable = (boolean_t)data;
772 }
773
774 }
775
776 /*
777 * Enable global interrupt
778 */
779 static void
ql_enable_global_interrupt(qlge_t * qlge)780 ql_enable_global_interrupt(qlge_t *qlge)
781 {
782 ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
783 (INTR_EN_EI << 16) | INTR_EN_EI);
784 qlge->flags |= INTERRUPTS_ENABLED;
785 }
786
787 /*
788 * Disable global interrupt
789 */
790 static void
ql_disable_global_interrupt(qlge_t * qlge)791 ql_disable_global_interrupt(qlge_t *qlge)
792 {
793 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
794 qlge->flags &= ~INTERRUPTS_ENABLED;
795 }
796
797 /*
798 * Enable one ring interrupt
799 */
800 void
ql_enable_completion_interrupt(qlge_t * qlge,uint32_t intr)801 ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
802 {
803 struct intr_ctx *ctx = qlge->intr_ctx + intr;
804
805 QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
806 __func__, qlge->instance, intr, ctx->irq_cnt));
807
808 if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
809 /*
810 * Always enable if we're MSIX multi interrupts and
811 * it's not the default (zeroeth) interrupt.
812 */
813 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
814 return;
815 }
816
817 if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
818 mutex_enter(&qlge->hw_mutex);
819 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
820 mutex_exit(&qlge->hw_mutex);
821 QL_PRINT(DBG_INTR,
822 ("%s(%d): write %x to intr enable register \n",
823 __func__, qlge->instance, ctx->intr_en_mask));
824 }
825 }
826
827 /*
828 * ql_forced_disable_completion_interrupt
829 * Used by call from OS, may be called without
830 * a pending interrupt so force the disable
831 */
832 uint32_t
ql_forced_disable_completion_interrupt(qlge_t * qlge,uint32_t intr)833 ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
834 {
835 uint32_t var = 0;
836 struct intr_ctx *ctx = qlge->intr_ctx + intr;
837
838 QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
839 __func__, qlge->instance, intr, ctx->irq_cnt));
840
841 if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
842 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
843 var = ql_read_reg(qlge, REG_STATUS);
844 return (var);
845 }
846
847 mutex_enter(&qlge->hw_mutex);
848 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
849 var = ql_read_reg(qlge, REG_STATUS);
850 mutex_exit(&qlge->hw_mutex);
851
852 return (var);
853 }
854
855 /*
856 * Disable a completion interrupt
857 */
858 void
ql_disable_completion_interrupt(qlge_t * qlge,uint32_t intr)859 ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
860 {
861 struct intr_ctx *ctx;
862
863 ctx = qlge->intr_ctx + intr;
864 QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
865 __func__, qlge->instance, intr, ctx->irq_cnt));
866 /*
867 * HW disables for us if we're MSIX multi interrupts and
868 * it's not the default (zeroeth) interrupt.
869 */
870 if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
871 return;
872
873 if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
874 mutex_enter(&qlge->hw_mutex);
875 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
876 mutex_exit(&qlge->hw_mutex);
877 }
878 atomic_inc_32(&ctx->irq_cnt);
879 }
880
881 /*
882 * Enable all completion interrupts
883 */
884 static void
ql_enable_all_completion_interrupts(qlge_t * qlge)885 ql_enable_all_completion_interrupts(qlge_t *qlge)
886 {
887 int i;
888 uint32_t value = 1;
889
890 for (i = 0; i < qlge->intr_cnt; i++) {
891 /*
892 * Set the count to 1 for Legacy / MSI interrupts or for the
893 * default interrupt (0)
894 */
895 if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
896 ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
897 }
898 ql_enable_completion_interrupt(qlge, i);
899 }
900 }
901
902 /*
903 * Disable all completion interrupts
904 */
905 static void
ql_disable_all_completion_interrupts(qlge_t * qlge)906 ql_disable_all_completion_interrupts(qlge_t *qlge)
907 {
908 int i;
909 uint32_t value = 0;
910
911 for (i = 0; i < qlge->intr_cnt; i++) {
912
913 /*
914 * Set the count to 0 for Legacy / MSI interrupts or for the
915 * default interrupt (0)
916 */
917 if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
918 ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
919
920 ql_disable_completion_interrupt(qlge, i);
921 }
922 }
923
924 /*
925 * Update small buffer queue producer index
926 */
927 static void
ql_update_sbq_prod_idx(qlge_t * qlge,struct rx_ring * rx_ring)928 ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
929 {
930 /* Update the buffer producer index */
931 QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
932 rx_ring->sbq_prod_idx));
933 ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
934 rx_ring->sbq_prod_idx);
935 }
936
937 /*
938 * Update large buffer queue producer index
939 */
940 static void
ql_update_lbq_prod_idx(qlge_t * qlge,struct rx_ring * rx_ring)941 ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
942 {
943 /* Update the buffer producer index */
944 QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
945 rx_ring->lbq_prod_idx));
946 ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
947 rx_ring->lbq_prod_idx);
948 }
949
950 /*
951 * Adds a small buffer descriptor to end of its in use list,
952 * assumes sbq_lock is already taken
953 */
954 static void
ql_add_sbuf_to_in_use_list(struct rx_ring * rx_ring,struct bq_desc * sbq_desc)955 ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
956 struct bq_desc *sbq_desc)
957 {
958 uint32_t inuse_idx = rx_ring->sbq_use_tail;
959
960 rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
961 inuse_idx++;
962 if (inuse_idx >= rx_ring->sbq_len)
963 inuse_idx = 0;
964 rx_ring->sbq_use_tail = inuse_idx;
965 atomic_inc_32(&rx_ring->sbuf_in_use_count);
966 ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
967 }
968
969 /*
970 * Get a small buffer descriptor from its in use list
971 */
972 static struct bq_desc *
ql_get_sbuf_from_in_use_list(struct rx_ring * rx_ring)973 ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
974 {
975 struct bq_desc *sbq_desc = NULL;
976 uint32_t inuse_idx;
977
978 /* Pick from head of in use list */
979 inuse_idx = rx_ring->sbq_use_head;
980 sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
981 rx_ring->sbuf_in_use[inuse_idx] = NULL;
982
983 if (sbq_desc != NULL) {
984 inuse_idx++;
985 if (inuse_idx >= rx_ring->sbq_len)
986 inuse_idx = 0;
987 rx_ring->sbq_use_head = inuse_idx;
988 atomic_dec_32(&rx_ring->sbuf_in_use_count);
989 atomic_inc_32(&rx_ring->rx_indicate);
990 sbq_desc->upl_inuse = 1;
991 /* if mp is NULL */
992 if (sbq_desc->mp == NULL) {
993 /* try to remap mp again */
994 sbq_desc->mp =
995 desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
996 rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
997 }
998 }
999
1000 return (sbq_desc);
1001 }
1002
1003 /*
1004 * Add a small buffer descriptor to its free list
1005 */
1006 static void
ql_add_sbuf_to_free_list(struct rx_ring * rx_ring,struct bq_desc * sbq_desc)1007 ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
1008 struct bq_desc *sbq_desc)
1009 {
1010 uint32_t free_idx;
1011
1012 /* Add to the end of free list */
1013 free_idx = rx_ring->sbq_free_tail;
1014 rx_ring->sbuf_free[free_idx] = sbq_desc;
1015 ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
1016 free_idx++;
1017 if (free_idx >= rx_ring->sbq_len)
1018 free_idx = 0;
1019 rx_ring->sbq_free_tail = free_idx;
1020 atomic_inc_32(&rx_ring->sbuf_free_count);
1021 }
1022
1023 /*
1024 * Get a small buffer descriptor from its free list
1025 */
1026 static struct bq_desc *
ql_get_sbuf_from_free_list(struct rx_ring * rx_ring)1027 ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
1028 {
1029 struct bq_desc *sbq_desc;
1030 uint32_t free_idx;
1031
1032 free_idx = rx_ring->sbq_free_head;
1033 /* Pick from top of free list */
1034 sbq_desc = rx_ring->sbuf_free[free_idx];
1035 rx_ring->sbuf_free[free_idx] = NULL;
1036 if (sbq_desc != NULL) {
1037 free_idx++;
1038 if (free_idx >= rx_ring->sbq_len)
1039 free_idx = 0;
1040 rx_ring->sbq_free_head = free_idx;
1041 atomic_dec_32(&rx_ring->sbuf_free_count);
1042 }
1043 return (sbq_desc);
1044 }
1045
1046 /*
1047 * Add a large buffer descriptor to its in use list
1048 */
1049 static void
ql_add_lbuf_to_in_use_list(struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1050 ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
1051 struct bq_desc *lbq_desc)
1052 {
1053 uint32_t inuse_idx;
1054
1055 inuse_idx = rx_ring->lbq_use_tail;
1056
1057 rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
1058 inuse_idx++;
1059 if (inuse_idx >= rx_ring->lbq_len)
1060 inuse_idx = 0;
1061 rx_ring->lbq_use_tail = inuse_idx;
1062 atomic_inc_32(&rx_ring->lbuf_in_use_count);
1063 }
1064
1065 /*
1066 * Get a large buffer descriptor from in use list
1067 */
1068 static struct bq_desc *
ql_get_lbuf_from_in_use_list(struct rx_ring * rx_ring)1069 ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
1070 {
1071 struct bq_desc *lbq_desc;
1072 uint32_t inuse_idx;
1073
1074 /* Pick from head of in use list */
1075 inuse_idx = rx_ring->lbq_use_head;
1076 lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
1077 rx_ring->lbuf_in_use[inuse_idx] = NULL;
1078
1079 if (lbq_desc != NULL) {
1080 inuse_idx++;
1081 if (inuse_idx >= rx_ring->lbq_len)
1082 inuse_idx = 0;
1083 rx_ring->lbq_use_head = inuse_idx;
1084 atomic_dec_32(&rx_ring->lbuf_in_use_count);
1085 atomic_inc_32(&rx_ring->rx_indicate);
1086 lbq_desc->upl_inuse = 1;
1087
1088 /* if mp is NULL */
1089 if (lbq_desc->mp == NULL) {
1090 /* try to remap mp again */
1091 lbq_desc->mp =
1092 desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1093 rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1094 }
1095 }
1096 return (lbq_desc);
1097 }
1098
1099 /*
1100 * Add a large buffer descriptor to free list
1101 */
1102 static void
ql_add_lbuf_to_free_list(struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1103 ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
1104 struct bq_desc *lbq_desc)
1105 {
1106 uint32_t free_idx;
1107
1108 /* Add to the end of free list */
1109 free_idx = rx_ring->lbq_free_tail;
1110 rx_ring->lbuf_free[free_idx] = lbq_desc;
1111 free_idx++;
1112 if (free_idx >= rx_ring->lbq_len)
1113 free_idx = 0;
1114 rx_ring->lbq_free_tail = free_idx;
1115 atomic_inc_32(&rx_ring->lbuf_free_count);
1116 ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
1117 }
1118
1119 /*
1120 * Get a large buffer descriptor from its free list
1121 */
1122 static struct bq_desc *
ql_get_lbuf_from_free_list(struct rx_ring * rx_ring)1123 ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
1124 {
1125 struct bq_desc *lbq_desc;
1126 uint32_t free_idx;
1127
1128 free_idx = rx_ring->lbq_free_head;
1129 /* Pick from head of free list */
1130 lbq_desc = rx_ring->lbuf_free[free_idx];
1131 rx_ring->lbuf_free[free_idx] = NULL;
1132
1133 if (lbq_desc != NULL) {
1134 free_idx++;
1135 if (free_idx >= rx_ring->lbq_len)
1136 free_idx = 0;
1137 rx_ring->lbq_free_head = free_idx;
1138 atomic_dec_32(&rx_ring->lbuf_free_count);
1139 }
1140 return (lbq_desc);
1141 }
1142
1143 /*
1144 * Add a small buffer descriptor to free list
1145 */
1146 static void
ql_refill_sbuf_free_list(struct bq_desc * sbq_desc,boolean_t alloc_memory)1147 ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
1148 {
1149 struct rx_ring *rx_ring = sbq_desc->rx_ring;
1150 uint64_t *sbq_entry;
1151 qlge_t *qlge = (qlge_t *)rx_ring->qlge;
1152 /*
1153 * Sync access
1154 */
1155 mutex_enter(&rx_ring->sbq_lock);
1156
1157 sbq_desc->upl_inuse = 0;
1158
1159 /*
1160 * If we are freeing the buffers as a result of adapter unload, get out
1161 */
1162 if ((sbq_desc->free_buf != 0) ||
1163 (qlge->mac_flags == QL_MAC_DETACH)) {
1164 if (sbq_desc->free_buf == 0)
1165 atomic_dec_32(&rx_ring->rx_indicate);
1166 mutex_exit(&rx_ring->sbq_lock);
1167 return;
1168 }
1169 #ifdef QLGE_LOAD_UNLOAD
1170 if (rx_ring->rx_indicate == 0)
1171 cmn_err(CE_WARN, "sbq: indicate wrong");
1172 #endif
1173 #ifdef QLGE_TRACK_BUFFER_USAGE
1174 uint32_t sb_consumer_idx;
1175 uint32_t sb_producer_idx;
1176 uint32_t num_free_buffers;
1177 uint32_t temp;
1178
1179 temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
1180 sb_producer_idx = temp & 0x0000ffff;
1181 sb_consumer_idx = (temp >> 16);
1182
1183 if (sb_consumer_idx > sb_producer_idx)
1184 num_free_buffers = NUM_SMALL_BUFFERS -
1185 (sb_consumer_idx - sb_producer_idx);
1186 else
1187 num_free_buffers = sb_producer_idx - sb_consumer_idx;
1188
1189 if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
1190 qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
1191
1192 #endif
1193
1194 #ifdef QLGE_LOAD_UNLOAD
1195 if (rx_ring->rx_indicate > 0xFF000000)
1196 cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
1197 " sbq_desc index %d.",
1198 rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1199 sbq_desc->index);
1200 #endif
1201 if (alloc_memory) {
1202 sbq_desc->mp =
1203 desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1204 rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1205 if (sbq_desc->mp == NULL) {
1206 rx_ring->rx_failed_sbq_allocs++;
1207 }
1208 }
1209
1210 /* Got the packet from the stack decrement rx_indicate count */
1211 atomic_dec_32(&rx_ring->rx_indicate);
1212
1213 ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1214
1215 /* Rearm if possible */
1216 if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1217 (qlge->mac_flags == QL_MAC_STARTED)) {
1218 sbq_entry = rx_ring->sbq_dma.vaddr;
1219 sbq_entry += rx_ring->sbq_prod_idx;
1220
1221 while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1222 /* Get first one from free list */
1223 sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
1224
1225 *sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
1226 sbq_entry++;
1227 rx_ring->sbq_prod_idx++;
1228 if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
1229 rx_ring->sbq_prod_idx = 0;
1230 sbq_entry = rx_ring->sbq_dma.vaddr;
1231 }
1232 /* Add to end of in use list */
1233 ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
1234 }
1235
1236 /* Update small buffer queue producer index */
1237 ql_update_sbq_prod_idx(qlge, rx_ring);
1238 }
1239
1240 mutex_exit(&rx_ring->sbq_lock);
1241 QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
1242 __func__, qlge->instance, rx_ring->sbuf_free_count));
1243 }
1244
1245 /*
1246 * rx recycle call back function
1247 */
1248 static void
ql_release_to_sbuf_free_list(caddr_t p)1249 ql_release_to_sbuf_free_list(caddr_t p)
1250 {
1251 struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
1252
1253 if (sbq_desc == NULL)
1254 return;
1255 ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
1256 }
1257
1258 /*
1259 * Add a large buffer descriptor to free list
1260 */
1261 static void
ql_refill_lbuf_free_list(struct bq_desc * lbq_desc,boolean_t alloc_memory)1262 ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
1263 {
1264 struct rx_ring *rx_ring = lbq_desc->rx_ring;
1265 uint64_t *lbq_entry;
1266 qlge_t *qlge = rx_ring->qlge;
1267
1268 /* Sync access */
1269 mutex_enter(&rx_ring->lbq_lock);
1270
1271 lbq_desc->upl_inuse = 0;
1272 /*
1273 * If we are freeing the buffers as a result of adapter unload, get out
1274 */
1275 if ((lbq_desc->free_buf != 0) ||
1276 (qlge->mac_flags == QL_MAC_DETACH)) {
1277 if (lbq_desc->free_buf == 0)
1278 atomic_dec_32(&rx_ring->rx_indicate);
1279 mutex_exit(&rx_ring->lbq_lock);
1280 return;
1281 }
1282 #ifdef QLGE_LOAD_UNLOAD
1283 if (rx_ring->rx_indicate == 0)
1284 cmn_err(CE_WARN, "lbq: indicate wrong");
1285 #endif
1286 #ifdef QLGE_TRACK_BUFFER_USAGE
1287 uint32_t lb_consumer_idx;
1288 uint32_t lb_producer_idx;
1289 uint32_t num_free_buffers;
1290 uint32_t temp;
1291
1292 temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
1293
1294 lb_producer_idx = temp & 0x0000ffff;
1295 lb_consumer_idx = (temp >> 16);
1296
1297 if (lb_consumer_idx > lb_producer_idx)
1298 num_free_buffers = NUM_LARGE_BUFFERS -
1299 (lb_consumer_idx - lb_producer_idx);
1300 else
1301 num_free_buffers = lb_producer_idx - lb_consumer_idx;
1302
1303 if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
1304 qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
1305 }
1306 #endif
1307
1308 #ifdef QLGE_LOAD_UNLOAD
1309 if (rx_ring->rx_indicate > 0xFF000000)
1310 cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
1311 "lbq_desc index %d",
1312 rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1313 lbq_desc->index);
1314 #endif
1315 if (alloc_memory) {
1316 lbq_desc->mp =
1317 desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1318 rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1319 if (lbq_desc->mp == NULL) {
1320 rx_ring->rx_failed_lbq_allocs++;
1321 }
1322 }
1323
1324 /* Got the packet from the stack decrement rx_indicate count */
1325 atomic_dec_32(&rx_ring->rx_indicate);
1326
1327 ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1328
1329 /* Rearm if possible */
1330 if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1331 (qlge->mac_flags == QL_MAC_STARTED)) {
1332 lbq_entry = rx_ring->lbq_dma.vaddr;
1333 lbq_entry += rx_ring->lbq_prod_idx;
1334 while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1335 /* Get first one from free list */
1336 lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
1337
1338 *lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
1339 lbq_entry++;
1340 rx_ring->lbq_prod_idx++;
1341 if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
1342 rx_ring->lbq_prod_idx = 0;
1343 lbq_entry = rx_ring->lbq_dma.vaddr;
1344 }
1345
1346 /* Add to end of in use list */
1347 ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
1348 }
1349
1350 /* Update large buffer queue producer index */
1351 ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
1352 }
1353
1354 mutex_exit(&rx_ring->lbq_lock);
1355 QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
1356 __func__, rx_ring->lbuf_free_count));
1357 }
1358 /*
1359 * rx recycle call back function
1360 */
1361 static void
ql_release_to_lbuf_free_list(caddr_t p)1362 ql_release_to_lbuf_free_list(caddr_t p)
1363 {
1364 struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
1365
1366 if (lbq_desc == NULL)
1367 return;
1368 ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
1369 }
1370
1371 /*
1372 * free small buffer queue buffers
1373 */
1374 static void
ql_free_sbq_buffers(struct rx_ring * rx_ring)1375 ql_free_sbq_buffers(struct rx_ring *rx_ring)
1376 {
1377 struct bq_desc *sbq_desc;
1378 uint32_t i;
1379 uint32_t j = rx_ring->sbq_free_head;
1380 int force_cnt = 0;
1381
1382 for (i = 0; i < rx_ring->sbuf_free_count; i++) {
1383 sbq_desc = rx_ring->sbuf_free[j];
1384 sbq_desc->free_buf = 1;
1385 j++;
1386 if (j >= rx_ring->sbq_len) {
1387 j = 0;
1388 }
1389 if (sbq_desc->mp != NULL) {
1390 freemsg(sbq_desc->mp);
1391 sbq_desc->mp = NULL;
1392 }
1393 }
1394 rx_ring->sbuf_free_count = 0;
1395
1396 j = rx_ring->sbq_use_head;
1397 for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
1398 sbq_desc = rx_ring->sbuf_in_use[j];
1399 sbq_desc->free_buf = 1;
1400 j++;
1401 if (j >= rx_ring->sbq_len) {
1402 j = 0;
1403 }
1404 if (sbq_desc->mp != NULL) {
1405 freemsg(sbq_desc->mp);
1406 sbq_desc->mp = NULL;
1407 }
1408 }
1409 rx_ring->sbuf_in_use_count = 0;
1410
1411 sbq_desc = &rx_ring->sbq_desc[0];
1412 for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1413 /*
1414 * Set flag so that the callback does not allocate a new buffer
1415 */
1416 sbq_desc->free_buf = 1;
1417 if (sbq_desc->upl_inuse != 0) {
1418 force_cnt++;
1419 }
1420 if (sbq_desc->bd_dma.dma_handle != NULL) {
1421 ql_free_phys(&sbq_desc->bd_dma.dma_handle,
1422 &sbq_desc->bd_dma.acc_handle);
1423 sbq_desc->bd_dma.dma_handle = NULL;
1424 sbq_desc->bd_dma.acc_handle = NULL;
1425 }
1426 }
1427 #ifdef QLGE_LOAD_UNLOAD
1428 cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
1429 rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
1430 #endif
1431 if (rx_ring->sbuf_in_use != NULL) {
1432 kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
1433 sizeof (struct bq_desc *)));
1434 rx_ring->sbuf_in_use = NULL;
1435 }
1436
1437 if (rx_ring->sbuf_free != NULL) {
1438 kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
1439 sizeof (struct bq_desc *)));
1440 rx_ring->sbuf_free = NULL;
1441 }
1442 }
1443
1444 /* Allocate small buffers */
1445 static int
ql_alloc_sbufs(qlge_t * qlge,struct rx_ring * rx_ring)1446 ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1447 {
1448 struct bq_desc *sbq_desc;
1449 int i;
1450 ddi_dma_cookie_t dma_cookie;
1451
1452 rx_ring->sbq_use_head = 0;
1453 rx_ring->sbq_use_tail = 0;
1454 rx_ring->sbuf_in_use_count = 0;
1455 rx_ring->sbq_free_head = 0;
1456 rx_ring->sbq_free_tail = 0;
1457 rx_ring->sbuf_free_count = 0;
1458 rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
1459 sizeof (struct bq_desc *), KM_NOSLEEP);
1460 if (rx_ring->sbuf_free == NULL) {
1461 cmn_err(CE_WARN,
1462 "!%s: sbuf_free_list alloc: failed",
1463 __func__);
1464 goto alloc_sbuf_err;
1465 }
1466
1467 rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
1468 sizeof (struct bq_desc *), KM_NOSLEEP);
1469 if (rx_ring->sbuf_in_use == NULL) {
1470 cmn_err(CE_WARN,
1471 "!%s: sbuf_inuse_list alloc: failed",
1472 __func__);
1473 goto alloc_sbuf_err;
1474 }
1475
1476 sbq_desc = &rx_ring->sbq_desc[0];
1477
1478 for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1479 /* Allocate buffer */
1480 if (ql_alloc_phys_rbuf(qlge->dip, &sbq_desc->bd_dma.dma_handle,
1481 &ql_buf_acc_attr,
1482 DDI_DMA_READ | DDI_DMA_STREAMING,
1483 &sbq_desc->bd_dma.acc_handle,
1484 (size_t)rx_ring->sbq_buf_size, /* mem size */
1485 (size_t)0, /* default alignment */
1486 (caddr_t *)&sbq_desc->bd_dma.vaddr,
1487 &dma_cookie) != 0) {
1488 cmn_err(CE_WARN,
1489 "!%s: ddi_dma_alloc_handle: failed",
1490 __func__);
1491 goto alloc_sbuf_err;
1492 }
1493
1494 /* Set context for Return buffer callback */
1495 sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1496 sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
1497 sbq_desc->rx_recycle.free_arg = (caddr_t)sbq_desc;
1498 sbq_desc->rx_ring = rx_ring;
1499 sbq_desc->upl_inuse = 0;
1500 sbq_desc->free_buf = 0;
1501
1502 sbq_desc->mp =
1503 desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1504 rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1505 if (sbq_desc->mp == NULL) {
1506 cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1507 goto alloc_sbuf_err;
1508 }
1509 ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1510 }
1511
1512 return (DDI_SUCCESS);
1513
1514 alloc_sbuf_err:
1515 ql_free_sbq_buffers(rx_ring);
1516 return (DDI_FAILURE);
1517 }
1518
1519 static void
ql_free_lbq_buffers(struct rx_ring * rx_ring)1520 ql_free_lbq_buffers(struct rx_ring *rx_ring)
1521 {
1522 struct bq_desc *lbq_desc;
1523 uint32_t i, j;
1524 int force_cnt = 0;
1525
1526 j = rx_ring->lbq_free_head;
1527 for (i = 0; i < rx_ring->lbuf_free_count; i++) {
1528 lbq_desc = rx_ring->lbuf_free[j];
1529 lbq_desc->free_buf = 1;
1530 j++;
1531 if (j >= rx_ring->lbq_len)
1532 j = 0;
1533 if (lbq_desc->mp != NULL) {
1534 freemsg(lbq_desc->mp);
1535 lbq_desc->mp = NULL;
1536 }
1537 }
1538 rx_ring->lbuf_free_count = 0;
1539
1540 j = rx_ring->lbq_use_head;
1541 for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
1542 lbq_desc = rx_ring->lbuf_in_use[j];
1543 lbq_desc->free_buf = 1;
1544 j++;
1545 if (j >= rx_ring->lbq_len) {
1546 j = 0;
1547 }
1548 if (lbq_desc->mp != NULL) {
1549 freemsg(lbq_desc->mp);
1550 lbq_desc->mp = NULL;
1551 }
1552 }
1553 rx_ring->lbuf_in_use_count = 0;
1554
1555 lbq_desc = &rx_ring->lbq_desc[0];
1556 for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1557 /* Set flag so that callback will not allocate a new buffer */
1558 lbq_desc->free_buf = 1;
1559 if (lbq_desc->upl_inuse != 0) {
1560 force_cnt++;
1561 }
1562 if (lbq_desc->bd_dma.dma_handle != NULL) {
1563 ql_free_phys(&lbq_desc->bd_dma.dma_handle,
1564 &lbq_desc->bd_dma.acc_handle);
1565 lbq_desc->bd_dma.dma_handle = NULL;
1566 lbq_desc->bd_dma.acc_handle = NULL;
1567 }
1568 }
1569 #ifdef QLGE_LOAD_UNLOAD
1570 if (force_cnt) {
1571 cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
1572 rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
1573 force_cnt);
1574 }
1575 #endif
1576 if (rx_ring->lbuf_in_use != NULL) {
1577 kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
1578 sizeof (struct bq_desc *)));
1579 rx_ring->lbuf_in_use = NULL;
1580 }
1581
1582 if (rx_ring->lbuf_free != NULL) {
1583 kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
1584 sizeof (struct bq_desc *)));
1585 rx_ring->lbuf_free = NULL;
1586 }
1587 }
1588
1589 /* Allocate large buffers */
1590 static int
ql_alloc_lbufs(qlge_t * qlge,struct rx_ring * rx_ring)1591 ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1592 {
1593 struct bq_desc *lbq_desc;
1594 ddi_dma_cookie_t dma_cookie;
1595 int i;
1596 uint32_t lbq_buf_size;
1597
1598 rx_ring->lbq_use_head = 0;
1599 rx_ring->lbq_use_tail = 0;
1600 rx_ring->lbuf_in_use_count = 0;
1601 rx_ring->lbq_free_head = 0;
1602 rx_ring->lbq_free_tail = 0;
1603 rx_ring->lbuf_free_count = 0;
1604 rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
1605 sizeof (struct bq_desc *), KM_NOSLEEP);
1606 if (rx_ring->lbuf_free == NULL) {
1607 cmn_err(CE_WARN,
1608 "!%s: lbuf_free_list alloc: failed",
1609 __func__);
1610 goto alloc_lbuf_err;
1611 }
1612
1613 rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
1614 sizeof (struct bq_desc *), KM_NOSLEEP);
1615
1616 if (rx_ring->lbuf_in_use == NULL) {
1617 cmn_err(CE_WARN,
1618 "!%s: lbuf_inuse_list alloc: failed",
1619 __func__);
1620 goto alloc_lbuf_err;
1621 }
1622
1623 lbq_buf_size = (qlge->mtu == ETHERMTU) ?
1624 LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE;
1625
1626 lbq_desc = &rx_ring->lbq_desc[0];
1627 for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1628 rx_ring->lbq_buf_size = lbq_buf_size;
1629 /* Allocate buffer */
1630 if (ql_alloc_phys_rbuf(qlge->dip, &lbq_desc->bd_dma.dma_handle,
1631 &ql_buf_acc_attr,
1632 DDI_DMA_READ | DDI_DMA_STREAMING,
1633 &lbq_desc->bd_dma.acc_handle,
1634 (size_t)rx_ring->lbq_buf_size, /* mem size */
1635 (size_t)0, /* default alignment */
1636 (caddr_t *)&lbq_desc->bd_dma.vaddr,
1637 &dma_cookie) != 0) {
1638 cmn_err(CE_WARN,
1639 "!%s: ddi_dma_alloc_handle: failed",
1640 __func__);
1641 goto alloc_lbuf_err;
1642 }
1643
1644 /* Set context for Return buffer callback */
1645 lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1646 lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
1647 lbq_desc->rx_recycle.free_arg = (caddr_t)lbq_desc;
1648 lbq_desc->rx_ring = rx_ring;
1649 lbq_desc->upl_inuse = 0;
1650 lbq_desc->free_buf = 0;
1651
1652 lbq_desc->mp =
1653 desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1654 rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1655 if (lbq_desc->mp == NULL) {
1656 cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1657 goto alloc_lbuf_err;
1658 }
1659 ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1660 } /* For all large buffers */
1661
1662 return (DDI_SUCCESS);
1663
1664 alloc_lbuf_err:
1665 ql_free_lbq_buffers(rx_ring);
1666 return (DDI_FAILURE);
1667 }
1668
1669 /*
1670 * Free rx buffers
1671 */
1672 static void
ql_free_rx_buffers(qlge_t * qlge)1673 ql_free_rx_buffers(qlge_t *qlge)
1674 {
1675 int i;
1676 struct rx_ring *rx_ring;
1677
1678 for (i = 0; i < qlge->rx_ring_count; i++) {
1679 rx_ring = &qlge->rx_ring[i];
1680 if (rx_ring->type != TX_Q) {
1681 ql_free_lbq_buffers(rx_ring);
1682 ql_free_sbq_buffers(rx_ring);
1683 }
1684 }
1685 }
1686
1687 /*
1688 * Allocate rx buffers
1689 */
1690 static int
ql_alloc_rx_buffers(qlge_t * qlge)1691 ql_alloc_rx_buffers(qlge_t *qlge)
1692 {
1693 struct rx_ring *rx_ring;
1694 int i;
1695
1696 for (i = 0; i < qlge->rx_ring_count; i++) {
1697 rx_ring = &qlge->rx_ring[i];
1698 if (rx_ring->type != TX_Q) {
1699 if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
1700 goto alloc_err;
1701 if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
1702 goto alloc_err;
1703 }
1704 }
1705 #ifdef QLGE_TRACK_BUFFER_USAGE
1706 for (i = 0; i < qlge->rx_ring_count; i++) {
1707 if (qlge->rx_ring[i].type == RX_Q) {
1708 qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
1709 qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
1710 }
1711 qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
1712 }
1713 #endif
1714 return (DDI_SUCCESS);
1715
1716 alloc_err:
1717 ql_free_rx_buffers(qlge);
1718 return (DDI_FAILURE);
1719 }
1720
1721 /*
1722 * Initialize large buffer queue ring
1723 */
1724 static void
ql_init_lbq_ring(struct rx_ring * rx_ring)1725 ql_init_lbq_ring(struct rx_ring *rx_ring)
1726 {
1727 uint16_t i;
1728 struct bq_desc *lbq_desc;
1729
1730 bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
1731 for (i = 0; i < rx_ring->lbq_len; i++) {
1732 lbq_desc = &rx_ring->lbq_desc[i];
1733 lbq_desc->index = i;
1734 }
1735 }
1736
1737 /*
1738 * Initialize small buffer queue ring
1739 */
1740 static void
ql_init_sbq_ring(struct rx_ring * rx_ring)1741 ql_init_sbq_ring(struct rx_ring *rx_ring)
1742 {
1743 uint16_t i;
1744 struct bq_desc *sbq_desc;
1745
1746 bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
1747 for (i = 0; i < rx_ring->sbq_len; i++) {
1748 sbq_desc = &rx_ring->sbq_desc[i];
1749 sbq_desc->index = i;
1750 }
1751 }
1752
1753 /*
1754 * Calculate the pseudo-header checksum if hardware can not do
1755 */
1756 static void
ql_pseudo_cksum(uint8_t * buf)1757 ql_pseudo_cksum(uint8_t *buf)
1758 {
1759 uint32_t cksum;
1760 uint16_t iphl;
1761 uint16_t proto;
1762
1763 iphl = (uint16_t)(4 * (buf[0] & 0xF));
1764 cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
1765 cksum += proto = buf[9];
1766 cksum += (((uint16_t)buf[12])<<8) + buf[13];
1767 cksum += (((uint16_t)buf[14])<<8) + buf[15];
1768 cksum += (((uint16_t)buf[16])<<8) + buf[17];
1769 cksum += (((uint16_t)buf[18])<<8) + buf[19];
1770 cksum = (cksum>>16) + (cksum & 0xFFFF);
1771 cksum = (cksum>>16) + (cksum & 0xFFFF);
1772
1773 /*
1774 * Point it to the TCP/UDP header, and
1775 * update the checksum field.
1776 */
1777 buf += iphl + ((proto == IPPROTO_TCP) ?
1778 TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
1779
1780 *(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
1781
1782 }
1783
1784 /*
1785 * Transmit an incoming packet.
1786 */
1787 mblk_t *
ql_ring_tx(void * arg,mblk_t * mp)1788 ql_ring_tx(void *arg, mblk_t *mp)
1789 {
1790 struct tx_ring *tx_ring = (struct tx_ring *)arg;
1791 qlge_t *qlge = tx_ring->qlge;
1792 mblk_t *next;
1793 int rval;
1794 uint32_t tx_count = 0;
1795
1796 if (qlge->port_link_state == LS_DOWN) {
1797 /* can not send message while link is down */
1798 mblk_t *tp;
1799
1800 while (mp != NULL) {
1801 tp = mp->b_next;
1802 mp->b_next = NULL;
1803 freemsg(mp);
1804 mp = tp;
1805 }
1806 goto exit;
1807 }
1808
1809 mutex_enter(&tx_ring->tx_lock);
1810 /* if mac is not started, driver is not ready, can not send */
1811 if (tx_ring->mac_flags != QL_MAC_STARTED) {
1812 cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
1813 " return packets",
1814 __func__, qlge->instance, tx_ring->mac_flags);
1815 mutex_exit(&tx_ring->tx_lock);
1816 goto exit;
1817 }
1818
1819 /* we must try to send all */
1820 while (mp != NULL) {
1821 /*
1822 * if number of available slots is less than a threshold,
1823 * then quit
1824 */
1825 if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
1826 tx_ring->queue_stopped = 1;
1827 rval = DDI_FAILURE;
1828 #ifdef QLGE_LOAD_UNLOAD
1829 cmn_err(CE_WARN, "%s(%d) no resources",
1830 __func__, qlge->instance);
1831 #endif
1832 tx_ring->defer++;
1833 /*
1834 * If we return the buffer back we are expected to call
1835 * mac_tx_ring_update() when resources are available
1836 */
1837 break;
1838 }
1839
1840 next = mp->b_next;
1841 mp->b_next = NULL;
1842
1843 rval = ql_send_common(tx_ring, mp);
1844
1845 if (rval != DDI_SUCCESS) {
1846 mp->b_next = next;
1847 break;
1848 }
1849 tx_count++;
1850 mp = next;
1851 }
1852
1853 /*
1854 * After all msg blocks are mapped or copied to tx buffer,
1855 * trigger the hardware to send!
1856 */
1857 if (tx_count > 0) {
1858 ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
1859 tx_ring->prod_idx);
1860 }
1861
1862 mutex_exit(&tx_ring->tx_lock);
1863 exit:
1864 return (mp);
1865 }
1866
1867
1868 /*
1869 * This function builds an mblk list for the given inbound
1870 * completion.
1871 */
1872
1873 static mblk_t *
ql_build_rx_mp(qlge_t * qlge,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1874 ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
1875 struct ib_mac_iocb_rsp *ib_mac_rsp)
1876 {
1877 mblk_t *mp = NULL;
1878 mblk_t *mp1 = NULL; /* packet header */
1879 mblk_t *mp2 = NULL; /* packet content */
1880 struct bq_desc *lbq_desc;
1881 struct bq_desc *sbq_desc;
1882 uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
1883 uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
1884 uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1885 uint32_t pkt_len = payload_len + header_len;
1886 uint32_t done;
1887 uint64_t *curr_ial_ptr;
1888 uint32_t ial_data_addr_low;
1889 uint32_t actual_data_addr_low;
1890 mblk_t *mp_ial = NULL; /* ial chained packets */
1891 uint32_t size;
1892 uint32_t cp_offset;
1893 boolean_t rx_copy = B_FALSE;
1894 mblk_t *tp = NULL;
1895
1896 /*
1897 * Check if error flags are set
1898 */
1899 if (err_flag != 0) {
1900 if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
1901 rx_ring->frame_too_long++;
1902 if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
1903 rx_ring->frame_too_short++;
1904 if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
1905 rx_ring->fcs_err++;
1906 #ifdef QLGE_LOAD_UNLOAD
1907 cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
1908 #endif
1909 QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
1910 (uint8_t *)ib_mac_rsp, 8,
1911 (size_t)sizeof (struct ib_mac_iocb_rsp));
1912 }
1913
1914 /* header should not be in large buffer */
1915 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
1916 cmn_err(CE_WARN, "header in large buffer or invalid!");
1917 err_flag |= 1;
1918 }
1919 /* if whole packet is too big than rx buffer size */
1920 if (pkt_len > qlge->max_frame_size) {
1921 cmn_err(CE_WARN, "ql_build_rx_mpframe too long(%d)!", pkt_len);
1922 err_flag |= 1;
1923 }
1924 if (qlge->rx_copy ||
1925 (rx_ring->sbuf_in_use_count <= qlge->rx_copy_threshold) ||
1926 (rx_ring->lbuf_in_use_count <= qlge->rx_copy_threshold)) {
1927 rx_copy = B_TRUE;
1928 }
1929
1930 /* if using rx copy mode, we need to allocate a big enough buffer */
1931 if (rx_copy) {
1932 qlge->stats.norcvbuf++;
1933 tp = allocb(payload_len + header_len + qlge->ip_hdr_offset,
1934 BPRI_MED);
1935 if (tp == NULL) {
1936 cmn_err(CE_WARN, "rx copy failed to allocate memory");
1937 } else {
1938 tp->b_rptr += qlge->ip_hdr_offset;
1939 }
1940 }
1941 /*
1942 * Handle the header buffer if present.
1943 * packet header must be valid and saved in one small buffer
1944 * broadcast/multicast packets' headers not splitted
1945 */
1946 if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
1947 (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1948 QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
1949 header_len));
1950 /* Sync access */
1951 sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1952
1953 ASSERT(sbq_desc != NULL);
1954
1955 /*
1956 * Validate addresses from the ASIC with the
1957 * expected sbuf address
1958 */
1959 if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1960 != ib_mac_rsp->hdr_addr) {
1961 /* Small buffer address mismatch */
1962 cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1963 " in wrong small buffer",
1964 __func__, qlge->instance, rx_ring->cq_id);
1965 goto fatal_error;
1966 }
1967 /* get this packet */
1968 mp1 = sbq_desc->mp;
1969 /* Flush DMA'd data */
1970 (void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1971 0, header_len, DDI_DMA_SYNC_FORKERNEL);
1972
1973 if ((err_flag != 0)|| (mp1 == NULL)) {
1974 /* failed on this packet, put it back for re-arming */
1975 #ifdef QLGE_LOAD_UNLOAD
1976 cmn_err(CE_WARN, "get header from small buffer fail");
1977 #endif
1978 ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1979 mp1 = NULL;
1980 } else if (rx_copy) {
1981 if (tp != NULL) {
1982 bcopy(sbq_desc->bd_dma.vaddr, tp->b_rptr,
1983 header_len);
1984 }
1985 ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1986 mp1 = NULL;
1987 } else {
1988 if ((qlge->ip_hdr_offset != 0)&&
1989 (header_len < SMALL_BUFFER_SIZE)) {
1990 /*
1991 * copy entire header to a 2 bytes boundary
1992 * address for 8100 adapters so that the IP
1993 * header can be on a 4 byte boundary address
1994 */
1995 bcopy(mp1->b_rptr,
1996 (mp1->b_rptr + SMALL_BUFFER_SIZE +
1997 qlge->ip_hdr_offset),
1998 header_len);
1999 mp1->b_rptr += SMALL_BUFFER_SIZE +
2000 qlge->ip_hdr_offset;
2001 }
2002
2003 /*
2004 * Adjust the mp payload_len to match
2005 * the packet header payload_len
2006 */
2007 mp1->b_wptr = mp1->b_rptr + header_len;
2008 mp1->b_next = mp1->b_cont = NULL;
2009 QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
2010 (uint8_t *)mp1->b_rptr, 8, header_len);
2011 }
2012 }
2013
2014 /*
2015 * packet data or whole packet can be in small or one or
2016 * several large buffer(s)
2017 */
2018 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2019 /*
2020 * The data is in a single small buffer.
2021 */
2022 sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2023
2024 ASSERT(sbq_desc != NULL);
2025
2026 QL_PRINT(DBG_RX,
2027 ("%d bytes in a single small buffer, sbq_desc = %p, "
2028 "sbq_desc->bd_dma.dma_addr = %x,"
2029 " ib_mac_rsp->data_addr = %x, mp = %p\n",
2030 payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
2031 ib_mac_rsp->data_addr, sbq_desc->mp));
2032
2033 /*
2034 * Validate addresses from the ASIC with the
2035 * expected sbuf address
2036 */
2037 if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
2038 != ib_mac_rsp->data_addr) {
2039 /* Small buffer address mismatch */
2040 cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2041 " in wrong small buffer",
2042 __func__, qlge->instance, rx_ring->cq_id);
2043 goto fatal_error;
2044 }
2045 /* get this packet */
2046 mp2 = sbq_desc->mp;
2047 (void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
2048 0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2049 if ((err_flag != 0) || (mp2 == NULL)) {
2050 #ifdef QLGE_LOAD_UNLOAD
2051 /* failed on this packet, put it back for re-arming */
2052 cmn_err(CE_WARN, "ignore bad data from small buffer");
2053 #endif
2054 ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2055 mp2 = NULL;
2056 } else if (rx_copy) {
2057 if (tp != NULL) {
2058 bcopy(sbq_desc->bd_dma.vaddr,
2059 tp->b_rptr + header_len, payload_len);
2060 tp->b_wptr =
2061 tp->b_rptr + header_len + payload_len;
2062 }
2063 ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2064 mp2 = NULL;
2065 } else {
2066 /* Adjust the buffer length to match the payload_len */
2067 mp2->b_wptr = mp2->b_rptr + payload_len;
2068 mp2->b_next = mp2->b_cont = NULL;
2069 /* Flush DMA'd data */
2070 QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2071 (uint8_t *)mp2->b_rptr, 8, payload_len);
2072 /*
2073 * if payload is too small , copy to
2074 * the end of packet header
2075 */
2076 if ((mp1 != NULL) &&
2077 (payload_len <= qlge->payload_copy_thresh) &&
2078 (pkt_len <
2079 (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2080 bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2081 mp1->b_wptr += payload_len;
2082 freemsg(mp2);
2083 mp2 = NULL;
2084 }
2085 }
2086 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2087 /*
2088 * The data is in a single large buffer.
2089 */
2090 lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2091
2092 QL_PRINT(DBG_RX,
2093 ("%d bytes in a single large buffer, lbq_desc = %p, "
2094 "lbq_desc->bd_dma.dma_addr = %x,"
2095 " ib_mac_rsp->data_addr = %x, mp = %p\n",
2096 payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
2097 ib_mac_rsp->data_addr, lbq_desc->mp));
2098
2099 ASSERT(lbq_desc != NULL);
2100
2101 /*
2102 * Validate addresses from the ASIC with
2103 * the expected lbuf address
2104 */
2105 if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
2106 != ib_mac_rsp->data_addr) {
2107 /* Large buffer address mismatch */
2108 cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2109 " in wrong large buffer",
2110 __func__, qlge->instance, rx_ring->cq_id);
2111 goto fatal_error;
2112 }
2113 mp2 = lbq_desc->mp;
2114 /* Flush DMA'd data */
2115 (void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2116 0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2117 if ((err_flag != 0) || (mp2 == NULL)) {
2118 #ifdef QLGE_LOAD_UNLOAD
2119 cmn_err(CE_WARN, "ignore bad data from large buffer");
2120 #endif
2121 /* failed on this packet, put it back for re-arming */
2122 ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2123 mp2 = NULL;
2124 } else if (rx_copy) {
2125 if (tp != NULL) {
2126 bcopy(lbq_desc->bd_dma.vaddr,
2127 tp->b_rptr + header_len, payload_len);
2128 tp->b_wptr =
2129 tp->b_rptr + header_len + payload_len;
2130 }
2131 ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2132 mp2 = NULL;
2133 } else {
2134 /*
2135 * Adjust the buffer length to match
2136 * the packet payload_len
2137 */
2138 mp2->b_wptr = mp2->b_rptr + payload_len;
2139 mp2->b_next = mp2->b_cont = NULL;
2140 QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2141 (uint8_t *)mp2->b_rptr, 8, payload_len);
2142 /*
2143 * if payload is too small , copy to
2144 * the end of packet header
2145 */
2146 if ((mp1 != NULL) &&
2147 (payload_len <= qlge->payload_copy_thresh) &&
2148 (pkt_len<
2149 (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2150 bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2151 mp1->b_wptr += payload_len;
2152 freemsg(mp2);
2153 mp2 = NULL;
2154 }
2155 }
2156 } else if (payload_len) { /* ial case */
2157 /*
2158 * payload available but not in sml nor lrg buffer,
2159 * so, it is saved in IAL
2160 */
2161 #ifdef QLGE_LOAD_UNLOAD
2162 cmn_err(CE_NOTE, "packet chained in IAL \n");
2163 #endif
2164 /* lrg buf addresses are saved in one small buffer */
2165 sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2166 curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
2167 done = 0;
2168 cp_offset = 0;
2169
2170 while (!done) {
2171 ial_data_addr_low =
2172 (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
2173 0xFFFFFFFE);
2174 /* check if this is the last packet fragment */
2175 done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
2176 curr_ial_ptr++;
2177 /*
2178 * The data is in one or several large buffer(s).
2179 */
2180 lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2181 actual_data_addr_low =
2182 (uint32_t)(lbq_desc->bd_dma.dma_addr &
2183 0xFFFFFFFE);
2184 if (ial_data_addr_low != actual_data_addr_low) {
2185 cmn_err(CE_WARN,
2186 "packet saved in wrong ial lrg buffer"
2187 " expected %x, actual %lx",
2188 ial_data_addr_low,
2189 (uintptr_t)lbq_desc->bd_dma.dma_addr);
2190 goto fatal_error;
2191 }
2192
2193 size = (payload_len < rx_ring->lbq_buf_size)?
2194 payload_len : rx_ring->lbq_buf_size;
2195 payload_len -= size;
2196 mp2 = lbq_desc->mp;
2197 if ((err_flag != 0) || (mp2 == NULL)) {
2198 #ifdef QLGE_LOAD_UNLOAD
2199 cmn_err(CE_WARN,
2200 "ignore bad data from large buffer");
2201 #endif
2202 ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2203 mp2 = NULL;
2204 } else if (rx_copy) {
2205 if (tp != NULL) {
2206 (void) ddi_dma_sync(
2207 lbq_desc->bd_dma.dma_handle,
2208 0, size, DDI_DMA_SYNC_FORKERNEL);
2209 bcopy(lbq_desc->bd_dma.vaddr,
2210 tp->b_rptr + header_len + cp_offset,
2211 size);
2212 tp->b_wptr =
2213 tp->b_rptr + size + cp_offset +
2214 header_len;
2215 cp_offset += size;
2216 }
2217 ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2218 mp2 = NULL;
2219 } else {
2220 if (mp_ial == NULL) {
2221 mp_ial = mp2;
2222 } else {
2223 linkb(mp_ial, mp2);
2224 }
2225
2226 mp2->b_next = NULL;
2227 mp2->b_cont = NULL;
2228 mp2->b_wptr = mp2->b_rptr + size;
2229 /* Flush DMA'd data */
2230 (void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2231 0, size, DDI_DMA_SYNC_FORKERNEL);
2232 QL_PRINT(DBG_RX, ("ial %d payload received \n",
2233 size));
2234 QL_DUMP(DBG_RX, "\t Mac data dump:\n",
2235 (uint8_t *)mp2->b_rptr, 8, size);
2236 }
2237 }
2238 if (err_flag != 0) {
2239 #ifdef QLGE_LOAD_UNLOAD
2240 /* failed on this packet, put it back for re-arming */
2241 cmn_err(CE_WARN, "ignore bad data from small buffer");
2242 #endif
2243 ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2244 } else {
2245 mp2 = mp_ial;
2246 freemsg(sbq_desc->mp);
2247 }
2248 }
2249 /*
2250 * some packets' hdr not split, then send mp2 upstream, otherwise,
2251 * concatenate message block mp2 to the tail of message header, mp1
2252 */
2253 if (!err_flag) {
2254 if (rx_copy) {
2255 if (tp != NULL) {
2256 tp->b_next = NULL;
2257 tp->b_cont = NULL;
2258 tp->b_wptr = tp->b_rptr +
2259 header_len + payload_len;
2260 }
2261 mp = tp;
2262 } else {
2263 if (mp1) {
2264 if (mp2) {
2265 QL_PRINT(DBG_RX,
2266 ("packet in mp1 and mp2\n"));
2267 /* mp1->b_cont = mp2; */
2268 linkb(mp1, mp2);
2269 mp = mp1;
2270 } else {
2271 QL_PRINT(DBG_RX,
2272 ("packet in mp1 only\n"));
2273 mp = mp1;
2274 }
2275 } else if (mp2) {
2276 QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
2277 mp = mp2;
2278 }
2279 }
2280 }
2281 return (mp);
2282
2283 fatal_error:
2284 /* fatal Error! */
2285 if (qlge->fm_enable) {
2286 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2287 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2288 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2289 }
2290 if (tp) {
2291 freemsg(tp);
2292 }
2293
2294 /* *mp->b_wptr = 0; */
2295 ql_wake_asic_reset_soft_intr(qlge);
2296 return (NULL);
2297
2298 }
2299
2300 /*
2301 * Bump completion queue consumer index.
2302 */
2303 static void
ql_update_cq(struct rx_ring * rx_ring)2304 ql_update_cq(struct rx_ring *rx_ring)
2305 {
2306 rx_ring->cnsmr_idx++;
2307 rx_ring->curr_entry++;
2308 if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
2309 rx_ring->cnsmr_idx = 0;
2310 rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
2311 }
2312 }
2313
2314 /*
2315 * Update completion queue consumer index.
2316 */
2317 static void
ql_write_cq_idx(struct rx_ring * rx_ring)2318 ql_write_cq_idx(struct rx_ring *rx_ring)
2319 {
2320 qlge_t *qlge = rx_ring->qlge;
2321
2322 ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
2323 rx_ring->cnsmr_idx);
2324 }
2325
2326 /*
2327 * Processes a SYS-Chip Event Notification Completion Event.
2328 * The incoming notification event that describes a link up/down
2329 * or some sorts of error happens.
2330 */
2331 static void
ql_process_chip_ae_intr(qlge_t * qlge,struct ib_sys_event_iocb_rsp * ib_sys_event_rsp_ptr)2332 ql_process_chip_ae_intr(qlge_t *qlge,
2333 struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
2334 {
2335 uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
2336 uint32_t soft_req = 0;
2337
2338 switch (eventType) {
2339 case SYS_EVENT_PORT_LINK_UP: /* 0x0h */
2340 QL_PRINT(DBG_MBX, ("Port Link Up\n"));
2341 break;
2342
2343 case SYS_EVENT_PORT_LINK_DOWN: /* 0x1h */
2344 QL_PRINT(DBG_MBX, ("Port Link Down\n"));
2345 break;
2346
2347 case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
2348 cmn_err(CE_WARN, "A multiple CAM hits look up error "
2349 "occurred");
2350 soft_req |= NEED_HW_RESET;
2351 break;
2352
2353 case SYS_EVENT_SOFT_ECC_ERR: /* 0x7h */
2354 cmn_err(CE_WARN, "Soft ECC error detected");
2355 soft_req |= NEED_HW_RESET;
2356 break;
2357
2358 case SYS_EVENT_MGMT_FATAL_ERR: /* 0x8h */
2359 cmn_err(CE_WARN, "Management (MPI) Processor fatal"
2360 " error occured");
2361 soft_req |= NEED_MPI_RESET;
2362 break;
2363
2364 case SYS_EVENT_MAC_INTERRUPT: /* 0x9h */
2365 QL_PRINT(DBG_MBX, ("MAC Interrupt"));
2366 break;
2367
2368 case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF: /* 0x40h */
2369 cmn_err(CE_WARN, "PCI Error reading small/large "
2370 "buffers occured");
2371 soft_req |= NEED_HW_RESET;
2372 break;
2373
2374 default:
2375 QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
2376 "type 0x%x occured",
2377 __func__, qlge->instance, eventType));
2378 break;
2379 }
2380
2381 if ((soft_req & NEED_MPI_RESET) != 0) {
2382 ql_wake_mpi_reset_soft_intr(qlge);
2383 if (qlge->fm_enable) {
2384 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2385 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2386 }
2387 } else if ((soft_req & NEED_HW_RESET) != 0) {
2388 ql_wake_asic_reset_soft_intr(qlge);
2389 if (qlge->fm_enable) {
2390 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2391 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2392 }
2393 }
2394 }
2395
2396 /*
2397 * set received packet checksum flag
2398 */
2399 void
ql_set_rx_cksum(mblk_t * mp,struct ib_mac_iocb_rsp * net_rsp)2400 ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
2401 {
2402 uint32_t flags;
2403
2404 /* Not TCP or UDP packet? nothing more to do */
2405 if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
2406 ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
2407 return;
2408
2409 /* No CKO support for IPv6 */
2410 if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
2411 return;
2412
2413 /*
2414 * If checksum error, don't set flags; stack will calculate
2415 * checksum, detect the error and update statistics
2416 */
2417 if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
2418 ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
2419 return;
2420
2421 /* TCP or UDP packet and checksum valid */
2422 if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
2423 ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2424 flags = HCK_FULLCKSUM_OK;
2425 mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2426 }
2427 if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
2428 ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2429 flags = HCK_FULLCKSUM_OK;
2430 mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2431 }
2432 }
2433
2434 /*
2435 * This function goes through h/w descriptor in one specified rx ring,
2436 * receives the data if the descriptor status shows the data is ready.
2437 * It returns a chain of mblks containing the received data, to be
2438 * passed up to mac_rx_ring().
2439 */
2440 mblk_t *
ql_ring_rx(struct rx_ring * rx_ring,int poll_bytes)2441 ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
2442 {
2443 qlge_t *qlge = rx_ring->qlge;
2444 uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2445 struct ib_mac_iocb_rsp *net_rsp;
2446 mblk_t *mp;
2447 mblk_t *mblk_head;
2448 mblk_t **mblk_tail;
2449 uint32_t received_bytes = 0;
2450 uint32_t length;
2451 #ifdef QLGE_PERFORMANCE
2452 uint32_t pkt_ct = 0;
2453 #endif
2454
2455 #ifdef QLGE_TRACK_BUFFER_USAGE
2456 uint32_t consumer_idx;
2457 uint32_t producer_idx;
2458 uint32_t num_free_entries;
2459 uint32_t temp;
2460
2461 temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2462 consumer_idx = temp & 0x0000ffff;
2463 producer_idx = (temp >> 16);
2464
2465 if (consumer_idx > producer_idx)
2466 num_free_entries = (consumer_idx - producer_idx);
2467 else
2468 num_free_entries = NUM_RX_RING_ENTRIES - (
2469 producer_idx - consumer_idx);
2470
2471 if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2472 qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2473
2474 #endif
2475 mblk_head = NULL;
2476 mblk_tail = &mblk_head;
2477
2478 while ((prod != rx_ring->cnsmr_idx)) {
2479 QL_PRINT(DBG_RX,
2480 ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
2481 __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2482
2483 net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
2484 (void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2485 (off_t)((uintptr_t)net_rsp -
2486 (uintptr_t)rx_ring->cq_dma.vaddr),
2487 (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2488 QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
2489 rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
2490
2491 switch (net_rsp->opcode) {
2492
2493 case OPCODE_IB_MAC_IOCB:
2494 /* Adding length of pkt header and payload */
2495 length = le32_to_cpu(net_rsp->data_len) +
2496 le32_to_cpu(net_rsp->hdr_len);
2497 if ((poll_bytes != QLGE_POLL_ALL) &&
2498 ((received_bytes + length) > poll_bytes)) {
2499 continue;
2500 }
2501 received_bytes += length;
2502
2503 #ifdef QLGE_PERFORMANCE
2504 pkt_ct++;
2505 #endif
2506 mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
2507 if (mp != NULL) {
2508 if (rx_ring->mac_flags != QL_MAC_STARTED) {
2509 /*
2510 * Increment number of packets we have
2511 * indicated to the stack, should be
2512 * decremented when we get it back
2513 * or when freemsg is called
2514 */
2515 ASSERT(rx_ring->rx_indicate
2516 <= rx_ring->cq_len);
2517 #ifdef QLGE_LOAD_UNLOAD
2518 cmn_err(CE_WARN, "%s do not send to OS,"
2519 " mac_flags %d, indicate %d",
2520 __func__, rx_ring->mac_flags,
2521 rx_ring->rx_indicate);
2522 #endif
2523 QL_PRINT(DBG_RX,
2524 ("cq_id = %d, packet "
2525 "dropped, mac not "
2526 "enabled.\n",
2527 rx_ring->cq_id));
2528 rx_ring->rx_pkt_dropped_mac_unenabled++;
2529
2530 /* rx_lock is expected to be held */
2531 mutex_exit(&rx_ring->rx_lock);
2532 freemsg(mp);
2533 mutex_enter(&rx_ring->rx_lock);
2534 mp = NULL;
2535 }
2536
2537 if (mp != NULL) {
2538 /*
2539 * IP full packet has been
2540 * successfully verified by
2541 * H/W and is correct
2542 */
2543 ql_set_rx_cksum(mp, net_rsp);
2544
2545 rx_ring->rx_packets++;
2546 rx_ring->rx_bytes += length;
2547 *mblk_tail = mp;
2548 mblk_tail = &mp->b_next;
2549 }
2550 } else {
2551 QL_PRINT(DBG_RX,
2552 ("cq_id = %d, packet dropped\n",
2553 rx_ring->cq_id));
2554 rx_ring->rx_packets_dropped_no_buffer++;
2555 }
2556 break;
2557
2558 case OPCODE_IB_SYS_EVENT_IOCB:
2559 ql_process_chip_ae_intr(qlge,
2560 (struct ib_sys_event_iocb_rsp *)
2561 net_rsp);
2562 break;
2563
2564 default:
2565 cmn_err(CE_WARN,
2566 "%s Ring(%d)Hit default case, not handled!"
2567 " dropping the packet, "
2568 "opcode = %x.", __func__, rx_ring->cq_id,
2569 net_rsp->opcode);
2570 break;
2571 }
2572 /* increment cnsmr_idx and curr_entry */
2573 ql_update_cq(rx_ring);
2574 prod = ql_read_sh_reg(qlge, rx_ring);
2575
2576 }
2577
2578 #ifdef QLGE_PERFORMANCE
2579 if (pkt_ct >= 7)
2580 rx_ring->hist[7]++;
2581 else if (pkt_ct == 6)
2582 rx_ring->hist[6]++;
2583 else if (pkt_ct == 5)
2584 rx_ring->hist[5]++;
2585 else if (pkt_ct == 4)
2586 rx_ring->hist[4]++;
2587 else if (pkt_ct == 3)
2588 rx_ring->hist[3]++;
2589 else if (pkt_ct == 2)
2590 rx_ring->hist[2]++;
2591 else if (pkt_ct == 1)
2592 rx_ring->hist[1]++;
2593 else if (pkt_ct == 0)
2594 rx_ring->hist[0]++;
2595 #endif
2596
2597 /* update cnsmr_idx */
2598 ql_write_cq_idx(rx_ring);
2599 /* do not enable interrupt for polling mode */
2600 if (poll_bytes == QLGE_POLL_ALL)
2601 ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2602 return (mblk_head);
2603 }
2604
2605 /* Process an outbound completion from an rx ring. */
2606 static void
ql_process_mac_tx_intr(qlge_t * qlge,struct ob_mac_iocb_rsp * mac_rsp)2607 ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
2608 {
2609 struct tx_ring *tx_ring;
2610 struct tx_ring_desc *tx_ring_desc;
2611 int j;
2612
2613 tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
2614 tx_ring_desc = tx_ring->wq_desc;
2615 tx_ring_desc += mac_rsp->tid;
2616
2617 if (tx_ring_desc->tx_type == USE_DMA) {
2618 QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
2619 __func__, qlge->instance));
2620
2621 /*
2622 * Release the DMA resource that is used for
2623 * DMA binding.
2624 */
2625 for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
2626 (void) ddi_dma_unbind_handle(
2627 tx_ring_desc->tx_dma_handle[j]);
2628 }
2629
2630 tx_ring_desc->tx_dma_handle_used = 0;
2631 /*
2632 * Free the mblk after sending completed
2633 */
2634 if (tx_ring_desc->mp != NULL) {
2635 freemsg(tx_ring_desc->mp);
2636 tx_ring_desc->mp = NULL;
2637 }
2638 }
2639
2640 tx_ring->obytes += tx_ring_desc->tx_bytes;
2641 tx_ring->opackets++;
2642
2643 if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
2644 OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
2645 tx_ring->errxmt++;
2646 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2647 /* EMPTY */
2648 QL_PRINT(DBG_TX,
2649 ("Total descriptor length did not match "
2650 "transfer length.\n"));
2651 }
2652 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2653 /* EMPTY */
2654 QL_PRINT(DBG_TX,
2655 ("Frame too short to be legal, not sent.\n"));
2656 }
2657 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2658 /* EMPTY */
2659 QL_PRINT(DBG_TX,
2660 ("Frame too long, but sent anyway.\n"));
2661 }
2662 if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
2663 /* EMPTY */
2664 QL_PRINT(DBG_TX,
2665 ("PCI backplane error. Frame not sent.\n"));
2666 }
2667 }
2668 atomic_inc_32(&tx_ring->tx_free_count);
2669 }
2670
2671 /*
2672 * clean up tx completion iocbs
2673 */
2674 int
ql_clean_outbound_rx_ring(struct rx_ring * rx_ring)2675 ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2676 {
2677 qlge_t *qlge = rx_ring->qlge;
2678 uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2679 struct ob_mac_iocb_rsp *net_rsp = NULL;
2680 int count = 0;
2681 struct tx_ring *tx_ring;
2682 boolean_t resume_tx = B_FALSE;
2683
2684 mutex_enter(&rx_ring->rx_lock);
2685 #ifdef QLGE_TRACK_BUFFER_USAGE
2686 {
2687 uint32_t consumer_idx;
2688 uint32_t producer_idx;
2689 uint32_t num_free_entries;
2690 uint32_t temp;
2691
2692 temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2693 consumer_idx = temp & 0x0000ffff;
2694 producer_idx = (temp >> 16);
2695
2696 if (consumer_idx > producer_idx)
2697 num_free_entries = (consumer_idx - producer_idx);
2698 else
2699 num_free_entries = NUM_RX_RING_ENTRIES -
2700 (producer_idx - consumer_idx);
2701
2702 if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2703 qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2704
2705 }
2706 #endif
2707 /* While there are entries in the completion queue. */
2708 while (prod != rx_ring->cnsmr_idx) {
2709
2710 QL_PRINT(DBG_RX,
2711 ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
2712 rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2713
2714 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2715 (void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2716 (off_t)((uintptr_t)net_rsp -
2717 (uintptr_t)rx_ring->cq_dma.vaddr),
2718 (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2719
2720 QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
2721 "response packet data\n",
2722 rx_ring->curr_entry, 8,
2723 (size_t)sizeof (*net_rsp));
2724
2725 switch (net_rsp->opcode) {
2726
2727 case OPCODE_OB_MAC_OFFLOAD_IOCB:
2728 case OPCODE_OB_MAC_IOCB:
2729 ql_process_mac_tx_intr(qlge, net_rsp);
2730 break;
2731
2732 default:
2733 cmn_err(CE_WARN,
2734 "%s Hit default case, not handled! "
2735 "dropping the packet,"
2736 " opcode = %x.",
2737 __func__, net_rsp->opcode);
2738 break;
2739 }
2740 count++;
2741 ql_update_cq(rx_ring);
2742 prod = ql_read_sh_reg(qlge, rx_ring);
2743 }
2744 ql_write_cq_idx(rx_ring);
2745
2746 mutex_exit(&rx_ring->rx_lock);
2747
2748 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2749 tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
2750
2751 mutex_enter(&tx_ring->tx_lock);
2752
2753 if (tx_ring->queue_stopped &&
2754 (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
2755 /*
2756 * The queue got stopped because the tx_ring was full.
2757 * Wake it up, because it's now at least 25% empty.
2758 */
2759 tx_ring->queue_stopped = 0;
2760 resume_tx = B_TRUE;
2761 }
2762
2763 mutex_exit(&tx_ring->tx_lock);
2764 /* Don't hold the lock during OS callback */
2765 if (resume_tx)
2766 RESUME_TX(tx_ring);
2767 return (count);
2768 }
2769
2770 /*
2771 * reset asic when error happens
2772 */
2773 /* ARGSUSED */
2774 static uint_t
ql_asic_reset_work(caddr_t arg1,caddr_t arg2)2775 ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
2776 {
2777 qlge_t *qlge = (qlge_t *)((void *)arg1);
2778 int status;
2779
2780 mutex_enter(&qlge->gen_mutex);
2781 (void) ql_do_stop(qlge);
2782 /*
2783 * Write default ethernet address to chip register Mac
2784 * Address slot 0 and Enable Primary Mac Function.
2785 */
2786 mutex_enter(&qlge->hw_mutex);
2787 (void) ql_unicst_set(qlge,
2788 (uint8_t *)qlge->unicst_addr[0].addr.ether_addr_octet, 0);
2789 mutex_exit(&qlge->hw_mutex);
2790 qlge->mac_flags = QL_MAC_INIT;
2791 status = ql_do_start(qlge);
2792 if (status != DDI_SUCCESS)
2793 goto error;
2794 qlge->mac_flags = QL_MAC_STARTED;
2795 mutex_exit(&qlge->gen_mutex);
2796 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
2797
2798 return (DDI_INTR_CLAIMED);
2799
2800 error:
2801 mutex_exit(&qlge->gen_mutex);
2802 cmn_err(CE_WARN,
2803 "qlge up/down cycle failed, closing device");
2804 if (qlge->fm_enable) {
2805 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2806 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
2807 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2808 }
2809 return (DDI_INTR_CLAIMED);
2810 }
2811
2812 /*
2813 * Reset MPI
2814 */
2815 /* ARGSUSED */
2816 static uint_t
ql_mpi_reset_work(caddr_t arg1,caddr_t arg2)2817 ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
2818 {
2819 qlge_t *qlge = (qlge_t *)((void *)arg1);
2820
2821 (void) ql_reset_mpi_risc(qlge);
2822 return (DDI_INTR_CLAIMED);
2823 }
2824
2825 /*
2826 * Process MPI mailbox messages
2827 */
2828 /* ARGSUSED */
2829 static uint_t
ql_mpi_event_work(caddr_t arg1,caddr_t arg2)2830 ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
2831 {
2832 qlge_t *qlge = (qlge_t *)((void *)arg1);
2833
2834 ql_do_mpi_intr(qlge);
2835 return (DDI_INTR_CLAIMED);
2836 }
2837
2838 /* Fire up a handler to reset the MPI processor. */
2839 void
ql_wake_asic_reset_soft_intr(qlge_t * qlge)2840 ql_wake_asic_reset_soft_intr(qlge_t *qlge)
2841 {
2842 (void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
2843 }
2844
2845 static void
ql_wake_mpi_reset_soft_intr(qlge_t * qlge)2846 ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
2847 {
2848 (void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
2849 }
2850
2851 static void
ql_wake_mpi_event_soft_intr(qlge_t * qlge)2852 ql_wake_mpi_event_soft_intr(qlge_t *qlge)
2853 {
2854 (void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
2855 }
2856
2857 /*
2858 * This handles a fatal error, MPI activity, and the default
2859 * rx_ring in an MSI-X multiple interrupt vector environment.
2860 * In MSI/Legacy environment it also process the rest of
2861 * the rx_rings.
2862 */
2863 /* ARGSUSED */
2864 static uint_t
ql_isr(caddr_t arg1,caddr_t arg2)2865 ql_isr(caddr_t arg1, caddr_t arg2)
2866 {
2867 struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2868 struct rx_ring *ob_ring;
2869 qlge_t *qlge = rx_ring->qlge;
2870 struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
2871 uint32_t var, prod;
2872 int i;
2873 int work_done = 0;
2874
2875 mblk_t *mp;
2876
2877 _NOTE(ARGUNUSED(arg2));
2878
2879 ++qlge->rx_interrupts[rx_ring->cq_id];
2880
2881 if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
2882 ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
2883 var = ql_read_reg(qlge, REG_ERROR_STATUS);
2884 var = ql_read_reg(qlge, REG_STATUS);
2885 var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
2886 return (DDI_INTR_CLAIMED);
2887 }
2888
2889 ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2890
2891 /*
2892 * process send completes on first stride tx ring if available
2893 */
2894 if (qlge->isr_stride) {
2895 ob_ring = &qlge->rx_ring[qlge->isr_stride];
2896 if (ql_read_sh_reg(qlge, ob_ring) !=
2897 ob_ring->cnsmr_idx) {
2898 (void) ql_clean_outbound_rx_ring(ob_ring);
2899 }
2900 }
2901 /*
2902 * Check the default queue and wake handler if active.
2903 */
2904 rx_ring = &qlge->rx_ring[0];
2905 prod = ql_read_sh_reg(qlge, rx_ring);
2906 QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
2907 prod, rx_ring->cnsmr_idx));
2908 /* check if interrupt is due to incoming packet */
2909 if (prod != rx_ring->cnsmr_idx) {
2910 QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
2911 ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2912 mutex_enter(&rx_ring->rx_lock);
2913 mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2914 mutex_exit(&rx_ring->rx_lock);
2915
2916 if (mp != NULL)
2917 RX_UPSTREAM(rx_ring, mp);
2918 work_done++;
2919 } else {
2920 /*
2921 * If interrupt is not due to incoming packet, read status
2922 * register to see if error happens or mailbox interrupt.
2923 */
2924 var = ql_read_reg(qlge, REG_STATUS);
2925 if ((var & STATUS_FE) != 0) {
2926 ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2927 if (qlge->fm_enable) {
2928 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2929 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2930 ddi_fm_service_impact(qlge->dip,
2931 DDI_SERVICE_LOST);
2932 }
2933 cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
2934 var = ql_read_reg(qlge, REG_ERROR_STATUS);
2935 cmn_err(CE_WARN,
2936 "Resetting chip. Error Status Register = 0x%x",
2937 var);
2938 ql_wake_asic_reset_soft_intr(qlge);
2939 return (DDI_INTR_CLAIMED);
2940 }
2941
2942 /*
2943 * Check MPI processor activity.
2944 */
2945 if ((var & STATUS_PI) != 0) {
2946 /*
2947 * We've got an async event or mailbox completion.
2948 * Handle it and clear the source of the interrupt.
2949 */
2950 ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2951
2952 QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
2953 ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2954 ql_wake_mpi_event_soft_intr(qlge);
2955 work_done++;
2956 }
2957 }
2958
2959
2960 if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
2961 /*
2962 * Start the DPC for each active queue.
2963 */
2964 for (i = 1; i < qlge->rx_ring_count; i++) {
2965 rx_ring = &qlge->rx_ring[i];
2966
2967 if (ql_read_sh_reg(qlge, rx_ring) !=
2968 rx_ring->cnsmr_idx) {
2969 QL_PRINT(DBG_INTR,
2970 ("Waking handler for rx_ring[%d].\n", i));
2971
2972 ql_disable_completion_interrupt(qlge,
2973 rx_ring->irq);
2974 if (rx_ring->type == TX_Q) {
2975 (void) ql_clean_outbound_rx_ring(
2976 rx_ring);
2977 ql_enable_completion_interrupt(
2978 rx_ring->qlge, rx_ring->irq);
2979 } else {
2980 mutex_enter(&rx_ring->rx_lock);
2981 mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2982 mutex_exit(&rx_ring->rx_lock);
2983 if (mp != NULL)
2984 RX_UPSTREAM(rx_ring, mp);
2985 #ifdef QLGE_LOAD_UNLOAD
2986 if (rx_ring->mac_flags ==
2987 QL_MAC_STOPPED)
2988 cmn_err(CE_NOTE,
2989 "%s rx_indicate(%d) %d\n",
2990 __func__, i,
2991 rx_ring->rx_indicate);
2992 #endif
2993 }
2994 work_done++;
2995 }
2996 }
2997 }
2998
2999 ql_enable_completion_interrupt(qlge, intr_ctx->intr);
3000
3001 return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
3002 }
3003
3004 /*
3005 * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
3006 */
3007 /* ARGSUSED */
3008 static uint_t
ql_msix_tx_isr(caddr_t arg1,caddr_t arg2)3009 ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
3010 {
3011 struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3012 qlge_t *qlge = rx_ring->qlge;
3013 _NOTE(ARGUNUSED(arg2));
3014
3015 ++qlge->rx_interrupts[rx_ring->cq_id];
3016 (void) ql_clean_outbound_rx_ring(rx_ring);
3017 ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
3018
3019 return (DDI_INTR_CLAIMED);
3020 }
3021
3022 /*
3023 * MSI-X Multiple Vector Interrupt Handler
3024 */
3025 /* ARGSUSED */
3026 static uint_t
ql_msix_isr(caddr_t arg1,caddr_t arg2)3027 ql_msix_isr(caddr_t arg1, caddr_t arg2)
3028 {
3029 struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3030 struct rx_ring *ob_ring;
3031 qlge_t *qlge = rx_ring->qlge;
3032 mblk_t *mp;
3033 _NOTE(ARGUNUSED(arg2));
3034
3035 QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3036
3037 ql_disable_completion_interrupt(qlge, rx_ring->irq);
3038
3039 /*
3040 * process send completes on stride tx ring if available
3041 */
3042 if (qlge->isr_stride) {
3043 ob_ring = rx_ring + qlge->isr_stride;
3044 if (ql_read_sh_reg(qlge, ob_ring) !=
3045 ob_ring->cnsmr_idx) {
3046 ++qlge->rx_interrupts[ob_ring->cq_id];
3047 (void) ql_clean_outbound_rx_ring(ob_ring);
3048 }
3049 }
3050
3051 ++qlge->rx_interrupts[rx_ring->cq_id];
3052
3053 mutex_enter(&rx_ring->rx_lock);
3054 mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3055 mutex_exit(&rx_ring->rx_lock);
3056
3057 if (mp != NULL)
3058 RX_UPSTREAM(rx_ring, mp);
3059
3060 return (DDI_INTR_CLAIMED);
3061 }
3062
3063 /*
3064 * Poll n_bytes of chained incoming packets
3065 */
3066 mblk_t *
ql_ring_rx_poll(void * arg,int n_bytes)3067 ql_ring_rx_poll(void *arg, int n_bytes)
3068 {
3069 struct rx_ring *rx_ring = (struct rx_ring *)arg;
3070 qlge_t *qlge = rx_ring->qlge;
3071 mblk_t *mp = NULL;
3072 uint32_t var;
3073
3074 ASSERT(n_bytes >= 0);
3075 QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
3076 __func__, rx_ring->cq_id, n_bytes));
3077
3078 ++qlge->rx_polls[rx_ring->cq_id];
3079
3080 if (n_bytes == 0)
3081 return (mp);
3082 mutex_enter(&rx_ring->rx_lock);
3083 mp = ql_ring_rx(rx_ring, n_bytes);
3084 mutex_exit(&rx_ring->rx_lock);
3085
3086 if ((rx_ring->cq_id == 0) && (mp == NULL)) {
3087 var = ql_read_reg(qlge, REG_STATUS);
3088 /*
3089 * Check for fatal error.
3090 */
3091 if ((var & STATUS_FE) != 0) {
3092 ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
3093 var = ql_read_reg(qlge, REG_ERROR_STATUS);
3094 cmn_err(CE_WARN, "Got fatal error %x.", var);
3095 ql_wake_asic_reset_soft_intr(qlge);
3096 if (qlge->fm_enable) {
3097 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
3098 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
3099 ddi_fm_service_impact(qlge->dip,
3100 DDI_SERVICE_LOST);
3101 }
3102 }
3103 /*
3104 * Check MPI processor activity.
3105 */
3106 if ((var & STATUS_PI) != 0) {
3107 /*
3108 * We've got an async event or mailbox completion.
3109 * Handle it and clear the source of the interrupt.
3110 */
3111 ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
3112 ql_do_mpi_intr(qlge);
3113 }
3114 }
3115
3116 return (mp);
3117 }
3118
3119 /*
3120 * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
3121 */
3122 /* ARGSUSED */
3123 static uint_t
ql_msix_rx_isr(caddr_t arg1,caddr_t arg2)3124 ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
3125 {
3126 struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3127 qlge_t *qlge = rx_ring->qlge;
3128 mblk_t *mp;
3129 _NOTE(ARGUNUSED(arg2));
3130
3131 QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3132
3133 ++qlge->rx_interrupts[rx_ring->cq_id];
3134
3135 mutex_enter(&rx_ring->rx_lock);
3136 mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3137 mutex_exit(&rx_ring->rx_lock);
3138
3139 if (mp != NULL)
3140 RX_UPSTREAM(rx_ring, mp);
3141
3142 return (DDI_INTR_CLAIMED);
3143 }
3144
3145
3146 /*
3147 *
3148 * Allocate DMA Buffer for ioctl service
3149 *
3150 */
3151 static int
ql_alloc_ioctl_dma_buf(qlge_t * qlge)3152 ql_alloc_ioctl_dma_buf(qlge_t *qlge)
3153 {
3154 uint64_t phy_addr;
3155 uint64_t alloc_size;
3156 ddi_dma_cookie_t dma_cookie;
3157
3158 alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
3159 max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
3160 if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
3161 &ql_buf_acc_attr,
3162 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3163 &qlge->ioctl_buf_dma_attr.acc_handle,
3164 (size_t)alloc_size, /* mem size */
3165 (size_t)0, /* alignment */
3166 (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
3167 &dma_cookie) != 0) {
3168 cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
3169 __func__, qlge->instance);
3170 return (DDI_FAILURE);
3171 }
3172
3173 phy_addr = dma_cookie.dmac_laddress;
3174
3175 if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
3176 cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
3177 return (DDI_FAILURE);
3178 }
3179
3180 qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
3181
3182 QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
3183 "phy_addr = 0x%lx\n",
3184 __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
3185
3186 return (DDI_SUCCESS);
3187 }
3188
3189
3190 /*
3191 * Function to free physical memory.
3192 */
3193 static void
ql_free_phys(ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * acc_handle)3194 ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
3195 {
3196 if (*dma_handle != NULL) {
3197 (void) ddi_dma_unbind_handle(*dma_handle);
3198 if (*acc_handle != NULL)
3199 ddi_dma_mem_free(acc_handle);
3200 ddi_dma_free_handle(dma_handle);
3201 *acc_handle = NULL;
3202 *dma_handle = NULL;
3203 }
3204 }
3205
3206 /*
3207 * Function to free ioctl dma buffer.
3208 */
3209 static void
ql_free_ioctl_dma_buf(qlge_t * qlge)3210 ql_free_ioctl_dma_buf(qlge_t *qlge)
3211 {
3212 if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
3213 ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
3214 &qlge->ioctl_buf_dma_attr.acc_handle);
3215
3216 qlge->ioctl_buf_dma_attr.vaddr = NULL;
3217 qlge->ioctl_buf_dma_attr.dma_handle = NULL;
3218 }
3219 }
3220
3221 /*
3222 * Free shadow register space used for request and completion queues
3223 */
3224 static void
ql_free_shadow_space(qlge_t * qlge)3225 ql_free_shadow_space(qlge_t *qlge)
3226 {
3227 if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
3228 ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3229 &qlge->host_copy_shadow_dma_attr.acc_handle);
3230 bzero(&qlge->host_copy_shadow_dma_attr,
3231 sizeof (qlge->host_copy_shadow_dma_attr));
3232 }
3233
3234 if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
3235 ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3236 &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
3237 bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3238 sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3239 }
3240 }
3241
3242 /*
3243 * Allocate shadow register space for request and completion queues
3244 */
3245 static int
ql_alloc_shadow_space(qlge_t * qlge)3246 ql_alloc_shadow_space(qlge_t *qlge)
3247 {
3248 ddi_dma_cookie_t dma_cookie;
3249
3250 if (ql_alloc_phys(qlge->dip,
3251 &qlge->host_copy_shadow_dma_attr.dma_handle,
3252 &ql_dev_acc_attr,
3253 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3254 &qlge->host_copy_shadow_dma_attr.acc_handle,
3255 (size_t)VM_PAGE_SIZE, /* mem size */
3256 (size_t)4, /* 4 bytes alignment */
3257 (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
3258 &dma_cookie) != 0) {
3259 bzero(&qlge->host_copy_shadow_dma_attr,
3260 sizeof (qlge->host_copy_shadow_dma_attr));
3261
3262 cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
3263 "response shadow registers", __func__, qlge->instance);
3264 return (DDI_FAILURE);
3265 }
3266
3267 qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3268
3269 if (ql_alloc_phys(qlge->dip,
3270 &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3271 &ql_desc_acc_attr,
3272 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3273 &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
3274 (size_t)VM_PAGE_SIZE, /* mem size */
3275 (size_t)4, /* 4 bytes alignment */
3276 (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
3277 &dma_cookie) != 0) {
3278 bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3279 sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3280
3281 cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
3282 "for request shadow registers",
3283 __func__, qlge->instance);
3284 goto err_wqp_sh_area;
3285 }
3286 qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3287
3288 return (DDI_SUCCESS);
3289
3290 err_wqp_sh_area:
3291 ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3292 &qlge->host_copy_shadow_dma_attr.acc_handle);
3293 bzero(&qlge->host_copy_shadow_dma_attr,
3294 sizeof (qlge->host_copy_shadow_dma_attr));
3295
3296 return (DDI_FAILURE);
3297 }
3298
3299 /*
3300 * Initialize a tx ring
3301 */
3302 static void
ql_init_tx_ring(struct tx_ring * tx_ring)3303 ql_init_tx_ring(struct tx_ring *tx_ring)
3304 {
3305 int i;
3306 struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr;
3307 struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc;
3308
3309 for (i = 0; i < tx_ring->wq_len; i++) {
3310 tx_ring_desc->index = i;
3311 tx_ring_desc->queue_entry = mac_iocb_ptr;
3312 mac_iocb_ptr++;
3313 tx_ring_desc++;
3314 }
3315 tx_ring->tx_free_count = tx_ring->wq_len;
3316 tx_ring->queue_stopped = 0;
3317 }
3318
3319 /*
3320 * Free one tx ring resources
3321 */
3322 static void
ql_free_tx_resources(struct tx_ring * tx_ring)3323 ql_free_tx_resources(struct tx_ring *tx_ring)
3324 {
3325 struct tx_ring_desc *tx_ring_desc;
3326 int i, j;
3327
3328 if (tx_ring->wq_dma.dma_handle != NULL) {
3329 ql_free_phys(&tx_ring->wq_dma.dma_handle,
3330 &tx_ring->wq_dma.acc_handle);
3331 bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3332 }
3333 if (tx_ring->wq_desc != NULL) {
3334 tx_ring_desc = tx_ring->wq_desc;
3335 for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3336 for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3337 if (tx_ring_desc->tx_dma_handle[j]) {
3338 /*
3339 * The unbinding will happen in tx
3340 * completion, here we just free the
3341 * handles
3342 */
3343 ddi_dma_free_handle(
3344 &(tx_ring_desc->tx_dma_handle[j]));
3345 tx_ring_desc->tx_dma_handle[j] = NULL;
3346 }
3347 }
3348 if (tx_ring_desc->oal != NULL) {
3349 tx_ring_desc->oal_dma_addr = 0;
3350 tx_ring_desc->oal = NULL;
3351 tx_ring_desc->copy_buffer = NULL;
3352 tx_ring_desc->copy_buffer_dma_addr = 0;
3353
3354 ql_free_phys(&tx_ring_desc->oal_dma.dma_handle,
3355 &tx_ring_desc->oal_dma.acc_handle);
3356 }
3357 }
3358 kmem_free(tx_ring->wq_desc,
3359 tx_ring->wq_len * sizeof (struct tx_ring_desc));
3360 tx_ring->wq_desc = NULL;
3361 }
3362 /* free the wqicb struct */
3363 if (tx_ring->wqicb_dma.dma_handle) {
3364 ql_free_phys(&tx_ring->wqicb_dma.dma_handle,
3365 &tx_ring->wqicb_dma.acc_handle);
3366 bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3367 }
3368 }
3369
3370 /*
3371 * Allocate work (request) queue memory and transmit
3372 * descriptors for this transmit ring
3373 */
3374 static int
ql_alloc_tx_resources(qlge_t * qlge,struct tx_ring * tx_ring)3375 ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring)
3376 {
3377 ddi_dma_cookie_t dma_cookie;
3378 struct tx_ring_desc *tx_ring_desc;
3379 int i, j;
3380 uint32_t length;
3381
3382 /* allocate dma buffers for obiocbs */
3383 if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle,
3384 &ql_desc_acc_attr,
3385 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3386 &tx_ring->wq_dma.acc_handle,
3387 (size_t)tx_ring->wq_size, /* mem size */
3388 (size_t)128, /* alignment:128 bytes boundary */
3389 (caddr_t *)&tx_ring->wq_dma.vaddr,
3390 &dma_cookie) != 0) {
3391 bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3392 cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.",
3393 __func__, qlge->instance);
3394 return (DDI_FAILURE);
3395 }
3396 tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress;
3397
3398 tx_ring->wq_desc =
3399 kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc),
3400 KM_NOSLEEP);
3401 if (tx_ring->wq_desc == NULL) {
3402 goto err;
3403 } else {
3404 tx_ring_desc = tx_ring->wq_desc;
3405 /*
3406 * Allocate a large enough structure to hold the following
3407 * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
3408 * 2. copy buffer of QL_MAX_COPY_LENGTH bytes
3409 */
3410 length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)
3411 + QL_MAX_COPY_LENGTH;
3412 for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3413
3414 if (ql_alloc_phys(qlge->dip,
3415 &tx_ring_desc->oal_dma.dma_handle,
3416 &ql_desc_acc_attr,
3417 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3418 &tx_ring_desc->oal_dma.acc_handle,
3419 (size_t)length, /* mem size */
3420 (size_t)0, /* default alignment:8 bytes boundary */
3421 (caddr_t *)&tx_ring_desc->oal_dma.vaddr,
3422 &dma_cookie) != 0) {
3423 bzero(&tx_ring_desc->oal_dma,
3424 sizeof (tx_ring_desc->oal_dma));
3425 cmn_err(CE_WARN, "%s(%d): reqQ tx buf &"
3426 "oal alloc failed.",
3427 __func__, qlge->instance);
3428 goto err;
3429 }
3430
3431 tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr;
3432 tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress;
3433 tx_ring_desc->copy_buffer =
3434 (caddr_t)((uint8_t *)tx_ring_desc->oal
3435 + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3436 tx_ring_desc->copy_buffer_dma_addr =
3437 (tx_ring_desc->oal_dma_addr
3438 + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3439
3440 /* Allocate dma handles for transmit buffers */
3441 for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3442 if (ddi_dma_alloc_handle(qlge->dip,
3443 &tx_mapping_dma_attr,
3444 DDI_DMA_DONTWAIT,
3445 0, &tx_ring_desc->tx_dma_handle[j])
3446 != DDI_SUCCESS) {
3447 tx_ring_desc->tx_dma_handle[j] = NULL;
3448 cmn_err(CE_WARN,
3449 "!%s: ddi_dma_alloc_handle: "
3450 "tx_dma_handle "
3451 "alloc failed", __func__);
3452 ql_free_phys(
3453 &tx_ring_desc->oal_dma.dma_handle,
3454 &tx_ring_desc->oal_dma.acc_handle);
3455 goto err;
3456 }
3457 }
3458 }
3459 }
3460 /* alloc a wqicb control block to load this tx ring to hw */
3461 if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle,
3462 &ql_desc_acc_attr,
3463 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3464 &tx_ring->wqicb_dma.acc_handle,
3465 (size_t)sizeof (struct wqicb_t), /* mem size */
3466 (size_t)0, /* alignment:128 bytes boundary */
3467 (caddr_t *)&tx_ring->wqicb_dma.vaddr,
3468 &dma_cookie) != 0) {
3469 bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3470 cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.",
3471 __func__, qlge->instance);
3472 goto err;
3473 }
3474 tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3475
3476 return (DDI_SUCCESS);
3477
3478 err:
3479 ql_free_tx_resources(tx_ring);
3480 return (DDI_FAILURE);
3481 }
3482
3483 /*
3484 * Free one rx ring resources
3485 */
3486 static void
ql_free_rx_resources(struct rx_ring * rx_ring)3487 ql_free_rx_resources(struct rx_ring *rx_ring)
3488 {
3489 /* Free the small buffer queue. */
3490 if (rx_ring->sbq_dma.dma_handle) {
3491 ql_free_phys(&rx_ring->sbq_dma.dma_handle,
3492 &rx_ring->sbq_dma.acc_handle);
3493 bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3494 }
3495
3496 /* Free the small buffer queue control blocks. */
3497 if (rx_ring->sbq_desc != NULL) {
3498 kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len *
3499 sizeof (struct bq_desc));
3500 rx_ring->sbq_desc = NULL;
3501 }
3502
3503 /* Free the large buffer queue. */
3504 if (rx_ring->lbq_dma.dma_handle) {
3505 ql_free_phys(&rx_ring->lbq_dma.dma_handle,
3506 &rx_ring->lbq_dma.acc_handle);
3507 bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3508 }
3509
3510 /* Free the large buffer queue control blocks. */
3511 if (rx_ring->lbq_desc != NULL) {
3512 kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len *
3513 sizeof (struct bq_desc));
3514 rx_ring->lbq_desc = NULL;
3515 }
3516
3517 /* Free cqicb struct */
3518 if (rx_ring->cqicb_dma.dma_handle) {
3519 ql_free_phys(&rx_ring->cqicb_dma.dma_handle,
3520 &rx_ring->cqicb_dma.acc_handle);
3521 bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3522 }
3523 /* Free the rx queue. */
3524 if (rx_ring->cq_dma.dma_handle) {
3525 ql_free_phys(&rx_ring->cq_dma.dma_handle,
3526 &rx_ring->cq_dma.acc_handle);
3527 bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3528 }
3529 }
3530
3531 /*
3532 * Allocate queues and buffers for this completions queue based
3533 * on the values in the parameter structure.
3534 */
3535 static int
ql_alloc_rx_resources(qlge_t * qlge,struct rx_ring * rx_ring)3536 ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring)
3537 {
3538 ddi_dma_cookie_t dma_cookie;
3539
3540 if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle,
3541 &ql_desc_acc_attr,
3542 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3543 &rx_ring->cq_dma.acc_handle,
3544 (size_t)rx_ring->cq_size, /* mem size */
3545 (size_t)128, /* alignment:128 bytes boundary */
3546 (caddr_t *)&rx_ring->cq_dma.vaddr,
3547 &dma_cookie) != 0) {
3548 bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3549 cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.",
3550 __func__, qlge->instance);
3551 return (DDI_FAILURE);
3552 }
3553 rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress;
3554
3555 if (rx_ring->sbq_len != 0) {
3556 /*
3557 * Allocate small buffer queue.
3558 */
3559 if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle,
3560 &ql_desc_acc_attr,
3561 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3562 &rx_ring->sbq_dma.acc_handle,
3563 (size_t)rx_ring->sbq_size, /* mem size */
3564 (size_t)128, /* alignment:128 bytes boundary */
3565 (caddr_t *)&rx_ring->sbq_dma.vaddr,
3566 &dma_cookie) != 0) {
3567 bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3568 cmn_err(CE_WARN,
3569 "%s(%d): small buffer queue allocation failed.",
3570 __func__, qlge->instance);
3571 goto err_mem;
3572 }
3573 rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress;
3574
3575 /*
3576 * Allocate small buffer queue control blocks.
3577 */
3578 rx_ring->sbq_desc =
3579 kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc),
3580 KM_NOSLEEP);
3581 if (rx_ring->sbq_desc == NULL) {
3582 cmn_err(CE_WARN,
3583 "sbq control block allocation failed.");
3584 goto err_mem;
3585 }
3586
3587 ql_init_sbq_ring(rx_ring);
3588 }
3589
3590 if (rx_ring->lbq_len != 0) {
3591 /*
3592 * Allocate large buffer queue.
3593 */
3594 if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle,
3595 &ql_desc_acc_attr,
3596 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3597 &rx_ring->lbq_dma.acc_handle,
3598 (size_t)rx_ring->lbq_size, /* mem size */
3599 (size_t)128, /* alignment:128 bytes boundary */
3600 (caddr_t *)&rx_ring->lbq_dma.vaddr,
3601 &dma_cookie) != 0) {
3602 bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3603 cmn_err(CE_WARN, "%s(%d): lbq allocation failed.",
3604 __func__, qlge->instance);
3605 goto err_mem;
3606 }
3607 rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress;
3608
3609 /*
3610 * Allocate large buffer queue control blocks.
3611 */
3612 rx_ring->lbq_desc =
3613 kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc),
3614 KM_NOSLEEP);
3615 if (rx_ring->lbq_desc == NULL) {
3616 cmn_err(CE_WARN,
3617 "Large buffer queue control block allocation "
3618 "failed.");
3619 goto err_mem;
3620 }
3621 ql_init_lbq_ring(rx_ring);
3622 }
3623
3624 if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle,
3625 &ql_desc_acc_attr,
3626 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3627 &rx_ring->cqicb_dma.acc_handle,
3628 (size_t)sizeof (struct cqicb_t), /* mem size */
3629 (size_t)0, /* alignment:128 bytes boundary */
3630 (caddr_t *)&rx_ring->cqicb_dma.vaddr,
3631 &dma_cookie) != 0) {
3632 bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3633 cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.",
3634 __func__, qlge->instance);
3635 goto err_mem;
3636 }
3637 rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3638
3639 return (DDI_SUCCESS);
3640
3641 err_mem:
3642 ql_free_rx_resources(rx_ring);
3643 return (DDI_FAILURE);
3644 }
3645
3646 /*
3647 * Frees tx/rx queues memory resources
3648 */
3649 static void
ql_free_mem_resources(qlge_t * qlge)3650 ql_free_mem_resources(qlge_t *qlge)
3651 {
3652 int i;
3653
3654 if (qlge->ricb_dma.dma_handle) {
3655 /* free the ricb struct */
3656 ql_free_phys(&qlge->ricb_dma.dma_handle,
3657 &qlge->ricb_dma.acc_handle);
3658 bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3659 }
3660
3661 ql_free_rx_buffers(qlge);
3662
3663 ql_free_ioctl_dma_buf(qlge);
3664
3665 for (i = 0; i < qlge->tx_ring_count; i++)
3666 ql_free_tx_resources(&qlge->tx_ring[i]);
3667
3668 for (i = 0; i < qlge->rx_ring_count; i++)
3669 ql_free_rx_resources(&qlge->rx_ring[i]);
3670
3671 ql_free_shadow_space(qlge);
3672 }
3673
3674 /*
3675 * Allocate buffer queues, large buffers and small buffers etc
3676 *
3677 * This API is called in the gld_attach member function. It is called
3678 * only once. Later reset,reboot should not re-allocate all rings and
3679 * buffers.
3680 */
3681 static int
ql_alloc_mem_resources(qlge_t * qlge)3682 ql_alloc_mem_resources(qlge_t *qlge)
3683 {
3684 int i;
3685 ddi_dma_cookie_t dma_cookie;
3686
3687 /* Allocate space for our shadow registers */
3688 if (ql_alloc_shadow_space(qlge))
3689 return (DDI_FAILURE);
3690
3691 for (i = 0; i < qlge->rx_ring_count; i++) {
3692 if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) {
3693 cmn_err(CE_WARN, "RX resource allocation failed.");
3694 goto err_mem;
3695 }
3696 }
3697 /* Allocate tx queue resources */
3698 for (i = 0; i < qlge->tx_ring_count; i++) {
3699 if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) {
3700 cmn_err(CE_WARN, "Tx resource allocation failed.");
3701 goto err_mem;
3702 }
3703 }
3704
3705 if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) {
3706 goto err_mem;
3707 }
3708
3709 if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) {
3710 cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed",
3711 __func__, qlge->instance);
3712 goto err_mem;
3713 }
3714
3715 qlge->sequence |= INIT_ALLOC_RX_BUF;
3716
3717 if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle,
3718 &ql_desc_acc_attr,
3719 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3720 &qlge->ricb_dma.acc_handle,
3721 (size_t)sizeof (struct ricb), /* mem size */
3722 (size_t)0, /* alignment:128 bytes boundary */
3723 (caddr_t *)&qlge->ricb_dma.vaddr,
3724 &dma_cookie) != 0) {
3725 bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3726 cmn_err(CE_WARN, "%s(%d): ricb allocation failed.",
3727 __func__, qlge->instance);
3728 goto err_mem;
3729 }
3730 qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress;
3731
3732 return (DDI_SUCCESS);
3733
3734 err_mem:
3735 ql_free_mem_resources(qlge);
3736 return (DDI_FAILURE);
3737 }
3738
3739
3740 /*
3741 * Function used to allocate physical memory and zero it.
3742 */
3743
3744 static int
ql_alloc_phys_rbuf(dev_info_t * dip,ddi_dma_handle_t * dma_handle,ddi_device_acc_attr_t * device_acc_attr,uint_t dma_flags,ddi_acc_handle_t * acc_handle,size_t size,size_t alignment,caddr_t * vaddr,ddi_dma_cookie_t * dma_cookie)3745 ql_alloc_phys_rbuf(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3746 ddi_device_acc_attr_t *device_acc_attr,
3747 uint_t dma_flags,
3748 ddi_acc_handle_t *acc_handle,
3749 size_t size,
3750 size_t alignment,
3751 caddr_t *vaddr,
3752 ddi_dma_cookie_t *dma_cookie)
3753 {
3754 size_t rlen;
3755 uint_t cnt;
3756
3757 /*
3758 * Workaround for SUN XMITS buffer must end and start on 8 byte
3759 * boundary. Else, hardware will overrun the buffer. Simple fix is
3760 * to make sure buffer has enough room for overrun.
3761 */
3762 if (size & 7) {
3763 size += 8 - (size & 7);
3764 }
3765
3766 /* Adjust the alignment if requested */
3767 if (alignment) {
3768 dma_attr.dma_attr_align = alignment;
3769 }
3770
3771 /*
3772 * Allocate DMA handle
3773 */
3774 if (ddi_dma_alloc_handle(dip, &dma_attr_rbuf, DDI_DMA_DONTWAIT, NULL,
3775 dma_handle) != DDI_SUCCESS) {
3776 cmn_err(CE_WARN, QL_BANG "%s: ddi_dma_alloc_handle FAILED",
3777 __func__);
3778 *dma_handle = NULL;
3779 return (QL_ERROR);
3780 }
3781 /*
3782 * Allocate DMA memory
3783 */
3784 if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3785 dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3786 DDI_DMA_DONTWAIT,
3787 NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3788 cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3789 ddi_dma_free_handle(dma_handle);
3790 *acc_handle = NULL;
3791 *dma_handle = NULL;
3792 return (QL_ERROR);
3793 }
3794
3795 if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3796 dma_flags, DDI_DMA_DONTWAIT, NULL,
3797 dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3798 ddi_dma_mem_free(acc_handle);
3799
3800 ddi_dma_free_handle(dma_handle);
3801 cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3802 __func__);
3803 *acc_handle = NULL;
3804 *dma_handle = NULL;
3805 return (QL_ERROR);
3806 }
3807
3808 if (cnt != 1) {
3809
3810 ql_free_phys(dma_handle, acc_handle);
3811
3812 cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3813 __func__);
3814 return (QL_ERROR);
3815 }
3816
3817 bzero((caddr_t)*vaddr, rlen);
3818
3819 return (0);
3820 }
3821
3822 /*
3823 * Function used to allocate physical memory and zero it.
3824 */
3825 static int
ql_alloc_phys(dev_info_t * dip,ddi_dma_handle_t * dma_handle,ddi_device_acc_attr_t * device_acc_attr,uint_t dma_flags,ddi_acc_handle_t * acc_handle,size_t size,size_t alignment,caddr_t * vaddr,ddi_dma_cookie_t * dma_cookie)3826 ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3827 ddi_device_acc_attr_t *device_acc_attr,
3828 uint_t dma_flags,
3829 ddi_acc_handle_t *acc_handle,
3830 size_t size,
3831 size_t alignment,
3832 caddr_t *vaddr,
3833 ddi_dma_cookie_t *dma_cookie)
3834 {
3835 size_t rlen;
3836 uint_t cnt;
3837
3838 /*
3839 * Workaround for SUN XMITS buffer must end and start on 8 byte
3840 * boundary. Else, hardware will overrun the buffer. Simple fix is
3841 * to make sure buffer has enough room for overrun.
3842 */
3843 if (size & 7) {
3844 size += 8 - (size & 7);
3845 }
3846
3847 /* Adjust the alignment if requested */
3848 if (alignment) {
3849 dma_attr.dma_attr_align = alignment;
3850 }
3851
3852 /*
3853 * Allocate DMA handle
3854 */
3855 if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_DONTWAIT, NULL,
3856 dma_handle) != DDI_SUCCESS) {
3857 cmn_err(CE_WARN, QL_BANG "%s: ddi_dma_alloc_handle FAILED",
3858 __func__);
3859 *dma_handle = NULL;
3860 return (QL_ERROR);
3861 }
3862 /*
3863 * Allocate DMA memory
3864 */
3865 if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3866 dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3867 DDI_DMA_DONTWAIT,
3868 NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3869 cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3870 ddi_dma_free_handle(dma_handle);
3871 *acc_handle = NULL;
3872 *dma_handle = NULL;
3873 return (QL_ERROR);
3874 }
3875
3876 if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3877 dma_flags, DDI_DMA_DONTWAIT, NULL,
3878 dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3879 ddi_dma_mem_free(acc_handle);
3880 ddi_dma_free_handle(dma_handle);
3881 cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3882 __func__);
3883 *acc_handle = NULL;
3884 *dma_handle = NULL;
3885 return (QL_ERROR);
3886 }
3887
3888 if (cnt != 1) {
3889
3890 ql_free_phys(dma_handle, acc_handle);
3891
3892 cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3893 __func__);
3894 return (QL_ERROR);
3895 }
3896
3897 bzero((caddr_t)*vaddr, rlen);
3898
3899 return (0);
3900 }
3901
3902 /*
3903 * Add interrupt handlers based on the interrupt type.
3904 * Before adding the interrupt handlers, the interrupt vectors should
3905 * have been allocated, and the rx/tx rings have also been allocated.
3906 */
3907 static int
ql_add_intr_handlers(qlge_t * qlge)3908 ql_add_intr_handlers(qlge_t *qlge)
3909 {
3910 int vector = 0;
3911 int rc, i;
3912 uint32_t value;
3913 struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3914
3915 switch (qlge->intr_type) {
3916 case DDI_INTR_TYPE_MSIX:
3917 /*
3918 * Add interrupt handler for rx and tx rings: vector[0 -
3919 * (qlge->intr_cnt -1)].
3920 */
3921 value = 0;
3922 for (vector = 0; vector < qlge->intr_cnt; vector++) {
3923 ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3924
3925 /*
3926 * associate interrupt vector with interrupt handler
3927 */
3928 rc = ddi_intr_add_handler(qlge->htable[vector],
3929 (ddi_intr_handler_t *)intr_ctx->handler,
3930 (void *)&qlge->rx_ring[vector], NULL);
3931
3932 QL_PRINT(DBG_INIT, ("rx_ring[%d] 0x%p\n",
3933 vector, &qlge->rx_ring[vector]));
3934 if (rc != DDI_SUCCESS) {
3935 QL_PRINT(DBG_INIT,
3936 ("Add rx interrupt handler failed. "
3937 "return: %d, vector: %d", rc, vector));
3938 for (vector--; vector >= 0; vector--) {
3939 (void) ddi_intr_remove_handler(
3940 qlge->htable[vector]);
3941 }
3942 return (DDI_FAILURE);
3943 }
3944 intr_ctx++;
3945 }
3946 break;
3947
3948 case DDI_INTR_TYPE_MSI:
3949 /*
3950 * Add interrupt handlers for the only vector
3951 */
3952 ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3953
3954 rc = ddi_intr_add_handler(qlge->htable[vector],
3955 ql_isr,
3956 (caddr_t)&qlge->rx_ring[0], NULL);
3957
3958 if (rc != DDI_SUCCESS) {
3959 QL_PRINT(DBG_INIT,
3960 ("Add MSI interrupt handler failed: %d\n", rc));
3961 return (DDI_FAILURE);
3962 }
3963 break;
3964
3965 case DDI_INTR_TYPE_FIXED:
3966 /*
3967 * Add interrupt handlers for the only vector
3968 */
3969 ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3970
3971 rc = ddi_intr_add_handler(qlge->htable[vector],
3972 ql_isr,
3973 (caddr_t)&qlge->rx_ring[0], NULL);
3974
3975 if (rc != DDI_SUCCESS) {
3976 QL_PRINT(DBG_INIT,
3977 ("Add legacy interrupt handler failed: %d\n", rc));
3978 return (DDI_FAILURE);
3979 }
3980 break;
3981
3982 default:
3983 return (DDI_FAILURE);
3984 }
3985
3986 /* Enable interrupts */
3987 /* Block enable */
3988 if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3989 QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n",
3990 qlge->intr_cnt));
3991 (void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt);
3992 } else { /* Non block enable */
3993 for (i = 0; i < qlge->intr_cnt; i++) {
3994 QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d "
3995 "handle 0x%x\n", i, qlge->htable[i]));
3996 (void) ddi_intr_enable(qlge->htable[i]);
3997 }
3998 }
3999 qlge->sequence |= INIT_INTR_ENABLED;
4000
4001 return (DDI_SUCCESS);
4002 }
4003
4004 /*
4005 * Here we build the intr_ctx structures based on
4006 * our rx_ring count and intr vector count.
4007 * The intr_ctx structure is used to hook each vector
4008 * to possibly different handlers.
4009 */
4010 static void
ql_resolve_queues_to_irqs(qlge_t * qlge)4011 ql_resolve_queues_to_irqs(qlge_t *qlge)
4012 {
4013 int i = 0;
4014 struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
4015
4016 if (qlge->intr_type == DDI_INTR_TYPE_MSIX) {
4017 /*
4018 * Each rx_ring has its own intr_ctx since we
4019 * have separate vectors for each queue.
4020 * This only true when MSI-X is enabled.
4021 */
4022 for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) {
4023 qlge->rx_ring[i].irq = i;
4024 intr_ctx->intr = i;
4025 intr_ctx->qlge = qlge;
4026
4027 /*
4028 * We set up each vectors enable/disable/read bits so
4029 * there's no bit/mask calculations in critical path.
4030 */
4031 intr_ctx->intr_en_mask =
4032 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4033 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4034 INTR_EN_IHD | i;
4035 intr_ctx->intr_dis_mask =
4036 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4037 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4038 INTR_EN_IHD | i;
4039 intr_ctx->intr_read_mask =
4040 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4041 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4042 | i;
4043
4044 if (i == 0) {
4045 /*
4046 * Default queue handles bcast/mcast plus
4047 * async events.
4048 */
4049 intr_ctx->handler = ql_isr;
4050 } else if (qlge->rx_ring[i].type == TX_Q) {
4051 /*
4052 * Outbound queue is for outbound completions
4053 * only.
4054 */
4055 if (qlge->isr_stride)
4056 intr_ctx->handler = ql_msix_isr;
4057 else
4058 intr_ctx->handler = ql_msix_tx_isr;
4059 } else {
4060 /*
4061 * Inbound queues handle unicast frames only.
4062 */
4063 if (qlge->isr_stride)
4064 intr_ctx->handler = ql_msix_isr;
4065 else
4066 intr_ctx->handler = ql_msix_rx_isr;
4067 }
4068 }
4069 i = qlge->intr_cnt;
4070 for (; i < qlge->rx_ring_count; i++, intr_ctx++) {
4071 int iv = i - qlge->isr_stride;
4072 qlge->rx_ring[i].irq = iv;
4073 intr_ctx->intr = iv;
4074 intr_ctx->qlge = qlge;
4075
4076 /*
4077 * We set up each vectors enable/disable/read bits so
4078 * there's no bit/mask calculations in critical path.
4079 */
4080 intr_ctx->intr_en_mask =
4081 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4082 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4083 INTR_EN_IHD | iv;
4084 intr_ctx->intr_dis_mask =
4085 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4086 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4087 INTR_EN_IHD | iv;
4088 intr_ctx->intr_read_mask =
4089 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4090 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4091 | iv;
4092
4093 if (qlge->rx_ring[i].type == TX_Q) {
4094 /*
4095 * Outbound queue is for outbound completions
4096 * only.
4097 */
4098 intr_ctx->handler = ql_msix_isr;
4099 } else {
4100 /*
4101 * Inbound queues handle unicast frames only.
4102 */
4103 intr_ctx->handler = ql_msix_rx_isr;
4104 }
4105 }
4106 } else {
4107 /*
4108 * All rx_rings use the same intr_ctx since
4109 * there is only one vector.
4110 */
4111 intr_ctx->intr = 0;
4112 intr_ctx->qlge = qlge;
4113 /*
4114 * We set up each vectors enable/disable/read bits so
4115 * there's no bit/mask calculations in the critical path.
4116 */
4117 intr_ctx->intr_en_mask =
4118 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4119 INTR_EN_TYPE_ENABLE;
4120 intr_ctx->intr_dis_mask =
4121 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4122 INTR_EN_TYPE_DISABLE;
4123 intr_ctx->intr_read_mask =
4124 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4125 INTR_EN_TYPE_READ;
4126 /*
4127 * Single interrupt means one handler for all rings.
4128 */
4129 intr_ctx->handler = ql_isr;
4130 for (i = 0; i < qlge->rx_ring_count; i++)
4131 qlge->rx_ring[i].irq = 0;
4132 }
4133 }
4134
4135
4136 /*
4137 * Free allocated interrupts.
4138 */
4139 static void
ql_free_irq_vectors(qlge_t * qlge)4140 ql_free_irq_vectors(qlge_t *qlge)
4141 {
4142 int i;
4143 int rc;
4144
4145 if (qlge->sequence & INIT_INTR_ENABLED) {
4146 /* Disable all interrupts */
4147 if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
4148 /* Call ddi_intr_block_disable() */
4149 (void) ddi_intr_block_disable(qlge->htable,
4150 qlge->intr_cnt);
4151 } else {
4152 for (i = 0; i < qlge->intr_cnt; i++) {
4153 (void) ddi_intr_disable(qlge->htable[i]);
4154 }
4155 }
4156
4157 qlge->sequence &= ~INIT_INTR_ENABLED;
4158 }
4159
4160 for (i = 0; i < qlge->intr_cnt; i++) {
4161
4162 if (qlge->sequence & INIT_ADD_INTERRUPT)
4163 (void) ddi_intr_remove_handler(qlge->htable[i]);
4164
4165 if (qlge->sequence & INIT_INTR_ALLOC) {
4166 rc = ddi_intr_free(qlge->htable[i]);
4167 if (rc != DDI_SUCCESS) {
4168 /* EMPTY */
4169 QL_PRINT(DBG_INIT, ("Free intr failed: %d",
4170 rc));
4171 }
4172 }
4173 }
4174 if (qlge->sequence & INIT_INTR_ALLOC)
4175 qlge->sequence &= ~INIT_INTR_ALLOC;
4176
4177 if (qlge->sequence & INIT_ADD_INTERRUPT)
4178 qlge->sequence &= ~INIT_ADD_INTERRUPT;
4179
4180 if (qlge->htable) {
4181 kmem_free(qlge->htable, qlge->intr_size);
4182 qlge->htable = NULL;
4183 }
4184 }
4185
4186 /*
4187 * Allocate interrupt vectors
4188 * For legacy and MSI, only 1 handle is needed.
4189 * For MSI-X, if fewer than 2 vectors are available, return failure.
4190 * Upon success, this maps the vectors to rx and tx rings for
4191 * interrupts.
4192 */
4193 static int
ql_request_irq_vectors(qlge_t * qlge,int intr_type)4194 ql_request_irq_vectors(qlge_t *qlge, int intr_type)
4195 {
4196 dev_info_t *devinfo;
4197 uint32_t request, orig;
4198 int count, avail, actual;
4199 int minimum;
4200 int rc;
4201
4202 devinfo = qlge->dip;
4203
4204 switch (intr_type) {
4205 case DDI_INTR_TYPE_FIXED:
4206 request = 1; /* Request 1 legacy interrupt handle */
4207 minimum = 1;
4208 QL_PRINT(DBG_INIT, ("interrupt type: legacy\n"));
4209 break;
4210
4211 case DDI_INTR_TYPE_MSI:
4212 request = 1; /* Request 1 MSI interrupt handle */
4213 minimum = 1;
4214 QL_PRINT(DBG_INIT, ("interrupt type: MSI\n"));
4215 break;
4216
4217 case DDI_INTR_TYPE_MSIX:
4218 /*
4219 * Ideal number of vectors for the adapter is
4220 * # rss rings + tx completion rings for default completion
4221 * queue.
4222 */
4223 request = qlge->rx_ring_count;
4224
4225 orig = request;
4226 if (request > (MAX_RX_RINGS))
4227 request = MAX_RX_RINGS;
4228 minimum = 2;
4229 QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n"));
4230 break;
4231
4232 default:
4233 QL_PRINT(DBG_INIT, ("Invalid parameter\n"));
4234 return (DDI_FAILURE);
4235 }
4236
4237 QL_PRINT(DBG_INIT, ("interrupt handles requested: %d minimum: %d\n",
4238 request, minimum));
4239
4240 /*
4241 * Get number of supported interrupts
4242 */
4243 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4244 if ((rc != DDI_SUCCESS) || (count < minimum)) {
4245 QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, "
4246 "count: %d\n", rc, count));
4247 return (DDI_FAILURE);
4248 }
4249 QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count));
4250
4251 /*
4252 * Get number of available interrupts
4253 */
4254 rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
4255 if ((rc != DDI_SUCCESS) || (avail < minimum)) {
4256 QL_PRINT(DBG_INIT,
4257 ("Get interrupt available number failed. Return:"
4258 " %d, available: %d\n", rc, avail));
4259 return (DDI_FAILURE);
4260 }
4261 QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail));
4262
4263 if (avail < request) {
4264 QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n",
4265 request, avail));
4266 request = avail;
4267 }
4268
4269 actual = 0;
4270 qlge->intr_cnt = 0;
4271
4272 /*
4273 * Allocate an array of interrupt handles
4274 */
4275 qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t));
4276 qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP);
4277
4278 rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0,
4279 (int)request, &actual, DDI_INTR_ALLOC_NORMAL);
4280 if (rc != DDI_SUCCESS) {
4281 cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:"
4282 " %d, request: %d, actual: %d",
4283 __func__, qlge->instance, rc, request, actual);
4284 goto ql_intr_alloc_fail;
4285 }
4286 qlge->intr_cnt = actual;
4287
4288 qlge->sequence |= INIT_INTR_ALLOC;
4289
4290 /*
4291 * If the actual number of vectors is less than the minumum
4292 * then fail.
4293 */
4294 if (actual < minimum) {
4295 cmn_err(CE_WARN,
4296 "Insufficient interrupt handles available: %d", actual);
4297 goto ql_intr_alloc_fail;
4298 }
4299
4300 /*
4301 * For MSI-X, actual might force us to reduce number of tx & rx rings
4302 */
4303 if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
4304 if (actual >= (orig / 2)) {
4305 count = orig / 2;
4306 qlge->rss_ring_count = count;
4307 qlge->tx_ring_count = count;
4308 qlge->isr_stride = count;
4309 } else if (actual >= (orig / 4)) {
4310 count = orig / 4;
4311 qlge->rss_ring_count = count;
4312 qlge->tx_ring_count = count;
4313 qlge->isr_stride = count;
4314 } else if (actual >= (orig / 8)) {
4315 count = orig / 8;
4316 qlge->rss_ring_count = count;
4317 qlge->tx_ring_count = count;
4318 qlge->isr_stride = count;
4319 } else if (actual < MAX_RX_RINGS) {
4320 qlge->tx_ring_count = 1;
4321 qlge->rss_ring_count = actual - 1;
4322 }
4323 qlge->intr_cnt = count;
4324 qlge->rx_ring_count = qlge->tx_ring_count +
4325 qlge->rss_ring_count;
4326 }
4327 cmn_err(CE_NOTE, "!qlge(%d) tx %d, rss %d, stride %d\n", qlge->instance,
4328 qlge->tx_ring_count, qlge->rss_ring_count, qlge->isr_stride);
4329
4330 /*
4331 * Get priority for first vector, assume remaining are all the same
4332 */
4333 rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri);
4334 if (rc != DDI_SUCCESS) {
4335 QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc));
4336 goto ql_intr_alloc_fail;
4337 }
4338
4339 rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap);
4340 if (rc != DDI_SUCCESS) {
4341 QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc));
4342 goto ql_intr_alloc_fail;
4343 }
4344
4345 qlge->intr_type = intr_type;
4346
4347 return (DDI_SUCCESS);
4348
4349 ql_intr_alloc_fail:
4350 ql_free_irq_vectors(qlge);
4351
4352 return (DDI_FAILURE);
4353 }
4354
4355 /*
4356 * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
4357 * MSI or Legacy. In MSI and Legacy modes we only support a single receive and
4358 * transmit queue.
4359 */
4360 int
ql_alloc_irqs(qlge_t * qlge)4361 ql_alloc_irqs(qlge_t *qlge)
4362 {
4363 int intr_types;
4364 int rval;
4365
4366 /*
4367 * Get supported interrupt types
4368 */
4369 if (ddi_intr_get_supported_types(qlge->dip, &intr_types)
4370 != DDI_SUCCESS) {
4371 cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed",
4372 __func__, qlge->instance);
4373
4374 return (DDI_FAILURE);
4375 }
4376
4377 QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n",
4378 __func__, qlge->instance, intr_types));
4379
4380 /* Install MSI-X interrupts */
4381 if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
4382 QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n",
4383 __func__, qlge->instance, intr_types));
4384 rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX);
4385 if (rval == DDI_SUCCESS) {
4386 return (rval);
4387 }
4388 QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed,"
4389 " trying MSI interrupts ...\n", __func__, qlge->instance));
4390 }
4391
4392 /*
4393 * We will have 2 completion queues in MSI / Legacy mode,
4394 * Queue 0 for default completions
4395 * Queue 1 for transmit completions
4396 */
4397 qlge->rss_ring_count = 1; /* Default completion queue (0) for all */
4398 qlge->tx_ring_count = 1; /* Single tx completion queue */
4399 qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
4400
4401 QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n",
4402 __func__, qlge->instance));
4403 /*
4404 * Add the h/w interrupt handler and initialise mutexes
4405 */
4406 rval = DDI_FAILURE;
4407
4408 /*
4409 * If OS supports MSIX interrupt but fails to allocate, then try
4410 * MSI interrupt. If MSI interrupt allocation fails also, then roll
4411 * back to fixed interrupt.
4412 */
4413 if (intr_types & DDI_INTR_TYPE_MSI) {
4414 rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI);
4415 if (rval == DDI_SUCCESS) {
4416 qlge->intr_type = DDI_INTR_TYPE_MSI;
4417 QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n",
4418 __func__, qlge->instance));
4419 }
4420 }
4421
4422 /* Try Fixed interrupt Legacy mode */
4423 if (rval != DDI_SUCCESS) {
4424 rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED);
4425 if (rval != DDI_SUCCESS) {
4426 cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt "
4427 "allocation failed",
4428 __func__, qlge->instance);
4429 } else {
4430 qlge->intr_type = DDI_INTR_TYPE_FIXED;
4431 QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n",
4432 __func__, qlge->instance));
4433 }
4434 }
4435
4436 return (rval);
4437 }
4438
4439 static void
ql_free_rx_tx_locks(qlge_t * qlge)4440 ql_free_rx_tx_locks(qlge_t *qlge)
4441 {
4442 int i;
4443 struct rx_ring *rx_ring;
4444 struct tx_ring *tx_ring;
4445
4446 for (i = 0; i < qlge->tx_ring_count; i++) {
4447 tx_ring = &qlge->tx_ring[i];
4448 mutex_destroy(&tx_ring->tx_lock);
4449 }
4450
4451 for (i = 0; i < qlge->rx_ring_count; i++) {
4452 rx_ring = &qlge->rx_ring[i];
4453 mutex_destroy(&rx_ring->rx_lock);
4454 mutex_destroy(&rx_ring->sbq_lock);
4455 mutex_destroy(&rx_ring->lbq_lock);
4456 }
4457 }
4458
4459 /*
4460 * Frees all resources allocated during attach.
4461 *
4462 * Input:
4463 * dip = pointer to device information structure.
4464 * sequence = bits indicating resources to free.
4465 *
4466 * Context:
4467 * Kernel context.
4468 */
4469 static void
ql_free_resources(qlge_t * qlge)4470 ql_free_resources(qlge_t *qlge)
4471 {
4472
4473 /* Disable driver timer */
4474 ql_stop_timer(qlge);
4475
4476 if (qlge->sequence & INIT_MAC_REGISTERED) {
4477 (void) mac_unregister(qlge->mh);
4478 qlge->sequence &= ~INIT_MAC_REGISTERED;
4479 }
4480
4481 if (qlge->sequence & INIT_MAC_ALLOC) {
4482 /* Nothing to do, macp is already freed */
4483 qlge->sequence &= ~INIT_MAC_ALLOC;
4484 }
4485
4486 if (qlge->sequence & INIT_PCI_CONFIG_SETUP) {
4487 pci_config_teardown(&qlge->pci_handle);
4488 qlge->sequence &= ~INIT_PCI_CONFIG_SETUP;
4489 }
4490
4491 if (qlge->sequence & INIT_INTR_ALLOC) {
4492 ql_free_irq_vectors(qlge);
4493 qlge->sequence &= ~INIT_ADD_INTERRUPT;
4494 }
4495
4496 if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) {
4497 (void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl);
4498 (void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl);
4499 (void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl);
4500 qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT;
4501 }
4502
4503 if (qlge->sequence & INIT_KSTATS) {
4504 ql_fini_kstats(qlge);
4505 qlge->sequence &= ~INIT_KSTATS;
4506 }
4507
4508 if (qlge->sequence & INIT_MUTEX) {
4509 mutex_destroy(&qlge->gen_mutex);
4510 mutex_destroy(&qlge->hw_mutex);
4511 mutex_destroy(&qlge->mbx_mutex);
4512 cv_destroy(&qlge->cv_mbx_intr);
4513 qlge->sequence &= ~INIT_MUTEX;
4514 }
4515
4516 if (qlge->sequence & INIT_LOCKS_CREATED) {
4517 ql_free_rx_tx_locks(qlge);
4518 qlge->sequence &= ~INIT_LOCKS_CREATED;
4519 }
4520
4521 if (qlge->sequence & INIT_MEMORY_ALLOC) {
4522 ql_free_mem_resources(qlge);
4523 qlge->sequence &= ~INIT_MEMORY_ALLOC;
4524 }
4525
4526 if (qlge->sequence & INIT_REGS_SETUP) {
4527 ddi_regs_map_free(&qlge->dev_handle);
4528 qlge->sequence &= ~INIT_REGS_SETUP;
4529 }
4530
4531 if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) {
4532 ddi_regs_map_free(&qlge->dev_doorbell_reg_handle);
4533 qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP;
4534 }
4535
4536 /*
4537 * free flash flt table that allocated in attach stage
4538 */
4539 if ((qlge->flt.ql_flt_entry_ptr != NULL)&&
4540 (qlge->flt.header.length != 0)) {
4541 kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
4542 qlge->flt.ql_flt_entry_ptr = NULL;
4543 }
4544
4545 if (qlge->sequence & INIT_FM) {
4546 ql_fm_fini(qlge);
4547 qlge->sequence &= ~INIT_FM;
4548 }
4549
4550 ddi_prop_remove_all(qlge->dip);
4551 ddi_set_driver_private(qlge->dip, NULL);
4552
4553 /* finally, free qlge structure */
4554 if (qlge->sequence & INIT_SOFTSTATE_ALLOC) {
4555 kmem_free(qlge, sizeof (qlge_t));
4556 }
4557 }
4558
4559 /*
4560 * Set promiscuous mode of the driver
4561 * Caller must catch HW_LOCK
4562 */
4563 void
ql_set_promiscuous(qlge_t * qlge,int mode)4564 ql_set_promiscuous(qlge_t *qlge, int mode)
4565 {
4566 if (mode) {
4567 (void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4568 RT_IDX_VALID, 1);
4569 } else {
4570 (void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4571 RT_IDX_VALID, 0);
4572 }
4573 }
4574 /*
4575 * Write 'data1' to Mac Protocol Address Index Register and
4576 * 'data2' to Mac Protocol Address Data Register
4577 * Assuming that the Mac Protocol semaphore lock has been acquired.
4578 */
4579 static int
ql_write_mac_proto_regs(qlge_t * qlge,uint32_t data1,uint32_t data2)4580 ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2)
4581 {
4582 int return_value = DDI_SUCCESS;
4583
4584 if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
4585 MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) {
4586 cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register "
4587 "timeout.");
4588 return_value = DDI_FAILURE;
4589 goto out;
4590 }
4591 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1);
4592 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2);
4593 out:
4594 return (return_value);
4595 }
4596 /*
4597 * Enable the 'index'ed multicast address in the host memory's multicast_list
4598 */
4599 int
ql_add_multicast_address(qlge_t * qlge,int index)4600 ql_add_multicast_address(qlge_t *qlge, int index)
4601 {
4602 int rtn_val = DDI_FAILURE;
4603 uint32_t offset;
4604 uint32_t value1, value2;
4605
4606 /* Acquire the required semaphore */
4607 if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4608 return (rtn_val);
4609 }
4610
4611 /* Program Offset0 - lower 32 bits of the MAC address */
4612 offset = 0;
4613 value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4614 (index << 4) | offset;
4615 value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4616 |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4617 |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4618 |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4619 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS)
4620 goto out;
4621
4622 /* Program offset1: upper 16 bits of the MAC address */
4623 offset = 1;
4624 value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4625 (index<<4) | offset;
4626 value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8)
4627 |qlge->multicast_list[index].addr.ether_addr_octet[1]);
4628 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4629 goto out;
4630 }
4631 rtn_val = DDI_SUCCESS;
4632 out:
4633 ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4634 return (rtn_val);
4635 }
4636
4637 /*
4638 * Disable the 'index'ed multicast address in the host memory's multicast_list
4639 */
4640 int
ql_remove_multicast_address(qlge_t * qlge,int index)4641 ql_remove_multicast_address(qlge_t *qlge, int index)
4642 {
4643 int rtn_val = DDI_FAILURE;
4644 uint32_t offset;
4645 uint32_t value1, value2;
4646
4647 /* Acquire the required semaphore */
4648 if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4649 return (rtn_val);
4650 }
4651 /* Program Offset0 - lower 32 bits of the MAC address */
4652 offset = 0;
4653 value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4654 value2 =
4655 ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4656 |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4657 |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4658 |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4659 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4660 goto out;
4661 }
4662 /* Program offset1: upper 16 bits of the MAC address */
4663 offset = 1;
4664 value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4665 value2 = 0;
4666 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4667 goto out;
4668 }
4669 rtn_val = DDI_SUCCESS;
4670 out:
4671 ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4672 return (rtn_val);
4673 }
4674
4675 /*
4676 * Add a new multicast address to the list of supported list
4677 * This API is called after OS called gld_set_multicast (GLDv2)
4678 * or m_multicst (GLDv3)
4679 *
4680 * Restriction:
4681 * The number of maximum multicast address is limited by hardware.
4682 */
4683 int
ql_add_to_multicast_list(qlge_t * qlge,uint8_t * ep)4684 ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep)
4685 {
4686 uint32_t index = qlge->multicast_list_count;
4687 int rval = DDI_SUCCESS;
4688 int status;
4689
4690 if ((ep[0] & 01) == 0) {
4691 rval = EINVAL;
4692 goto exit;
4693 }
4694
4695 /* if there is an availabe space in multicast_list, then add it */
4696 if (index < MAX_MULTICAST_LIST_SIZE) {
4697 bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet,
4698 ETHERADDRL);
4699 /* increment the total number of addresses in multicast list */
4700 (void) ql_add_multicast_address(qlge, index);
4701 qlge->multicast_list_count++;
4702 QL_PRINT(DBG_GLD,
4703 ("%s(%d): added to index of multicast list= 0x%x, "
4704 "total %d\n", __func__, qlge->instance, index,
4705 qlge->multicast_list_count));
4706
4707 if (index > MAX_MULTICAST_HW_SIZE) {
4708 if (!qlge->multicast_promisc) {
4709 status = ql_set_routing_reg(qlge,
4710 RT_IDX_ALLMULTI_SLOT,
4711 RT_IDX_MCAST, 1);
4712 if (status) {
4713 cmn_err(CE_WARN,
4714 "Failed to init routing reg "
4715 "for mcast promisc mode.");
4716 rval = ENOENT;
4717 goto exit;
4718 }
4719 qlge->multicast_promisc = B_TRUE;
4720 }
4721 }
4722 } else {
4723 rval = ENOENT;
4724 }
4725 exit:
4726 return (rval);
4727 }
4728
4729 /*
4730 * Remove an old multicast address from the list of supported multicast
4731 * addresses. This API is called after OS called gld_set_multicast (GLDv2)
4732 * or m_multicst (GLDv3)
4733 * The number of maximum multicast address is limited by hardware.
4734 */
4735 int
ql_remove_from_multicast_list(qlge_t * qlge,uint8_t * ep)4736 ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep)
4737 {
4738 uint32_t total = qlge->multicast_list_count;
4739 int i = 0;
4740 int rmv_index = 0;
4741 size_t length = sizeof (ql_multicast_addr);
4742 int status;
4743
4744 for (i = 0; i < total; i++) {
4745 if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) {
4746 continue;
4747 }
4748
4749 rmv_index = i;
4750 /* block move the reset of other multicast address forward */
4751 length = ((total -1) -i) * sizeof (ql_multicast_addr);
4752 if (length > 0) {
4753 bcopy(&qlge->multicast_list[i+1],
4754 &qlge->multicast_list[i], length);
4755 }
4756 qlge->multicast_list_count--;
4757 if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) {
4758 /*
4759 * there is a deletion in multicast list table,
4760 * re-enable them
4761 */
4762 for (i = rmv_index; i < qlge->multicast_list_count;
4763 i++) {
4764 (void) ql_add_multicast_address(qlge, i);
4765 }
4766 /* and disable the last one */
4767 (void) ql_remove_multicast_address(qlge, i);
4768
4769 /* disable multicast promiscuous mode */
4770 if (qlge->multicast_promisc) {
4771 status = ql_set_routing_reg(qlge,
4772 RT_IDX_ALLMULTI_SLOT,
4773 RT_IDX_MCAST, 0);
4774 if (status) {
4775 cmn_err(CE_WARN,
4776 "Failed to init routing reg for "
4777 "mcast promisc mode.");
4778 goto exit;
4779 }
4780 /* write to config register */
4781 qlge->multicast_promisc = B_FALSE;
4782 }
4783 }
4784 break;
4785 }
4786 exit:
4787 return (DDI_SUCCESS);
4788 }
4789
4790 /*
4791 * Read a XGMAC register
4792 */
4793 int
ql_read_xgmac_reg(qlge_t * qlge,uint32_t addr,uint32_t * val)4794 ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val)
4795 {
4796 int rtn_val = DDI_FAILURE;
4797
4798 /* wait for XGMAC Address register RDY bit set */
4799 if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4800 BIT_SET, 10) != DDI_SUCCESS) {
4801 goto out;
4802 }
4803 /* start rx transaction */
4804 ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT);
4805
4806 /*
4807 * wait for XGMAC Address register RDY bit set,
4808 * which indicates data is ready
4809 */
4810 if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4811 BIT_SET, 10) != DDI_SUCCESS) {
4812 goto out;
4813 }
4814 /* read data from XGAMC_DATA register */
4815 *val = ql_read_reg(qlge, REG_XGMAC_DATA);
4816 rtn_val = DDI_SUCCESS;
4817 out:
4818 return (rtn_val);
4819 }
4820
4821 /*
4822 * Implement checksum offload for IPv4 IP packets
4823 */
4824 static void
ql_hw_csum_setup(qlge_t * qlge,uint32_t pflags,caddr_t bp,struct ob_mac_iocb_req * mac_iocb_ptr)4825 ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp,
4826 struct ob_mac_iocb_req *mac_iocb_ptr)
4827 {
4828 struct ip *iphdr = NULL;
4829 struct ether_header *ethhdr;
4830 struct ether_vlan_header *ethvhdr;
4831 struct tcphdr *tcp_hdr;
4832 uint32_t etherType;
4833 int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4834 int ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4835
4836 ethhdr = (struct ether_header *)((void *)bp);
4837 ethvhdr = (struct ether_vlan_header *)((void *)bp);
4838 /* Is this vlan packet? */
4839 if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4840 mac_hdr_len = sizeof (struct ether_vlan_header);
4841 etherType = ntohs(ethvhdr->ether_type);
4842 } else {
4843 mac_hdr_len = sizeof (struct ether_header);
4844 etherType = ntohs(ethhdr->ether_type);
4845 }
4846 /* Is this IPv4 or IPv6 packet? */
4847 if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) ==
4848 IPV4_VERSION) {
4849 if (etherType == ETHERTYPE_IP /* 0800 */) {
4850 iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
4851 } else {
4852 /* EMPTY */
4853 QL_PRINT(DBG_TX,
4854 ("%s(%d) : IPv4 None IP packet type 0x%x\n",
4855 __func__, qlge->instance, etherType));
4856 }
4857 }
4858 /* ipV4 packets */
4859 if (iphdr != NULL) {
4860
4861 ip_hdr_len = IPH_HDR_LENGTH(iphdr);
4862 QL_PRINT(DBG_TX,
4863 ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
4864 " %d bytes \n", __func__, qlge->instance, ip_hdr_len));
4865
4866 ip_hdr_off = mac_hdr_len;
4867 QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4868 __func__, qlge->instance, ip_hdr_len));
4869
4870 mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4871 OB_MAC_IOCB_REQ_IPv4);
4872
4873 if (pflags & HCK_IPV4_HDRCKSUM) {
4874 QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n",
4875 __func__, qlge->instance));
4876 mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB;
4877 mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 |
4878 OB_MAC_IOCB_REQ_IC);
4879 iphdr->ip_sum = 0;
4880 mac_iocb_ptr->hdr_off = (uint16_t)
4881 cpu_to_le16(ip_hdr_off);
4882 }
4883 if (pflags & HCK_FULLCKSUM) {
4884 if (iphdr->ip_p == IPPROTO_TCP) {
4885 tcp_hdr =
4886 (struct tcphdr *)(void *)
4887 ((uint8_t *)(void *)iphdr + ip_hdr_len);
4888 QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n",
4889 __func__, qlge->instance));
4890 mac_iocb_ptr->opcode =
4891 OPCODE_OB_MAC_OFFLOAD_IOCB;
4892 mac_iocb_ptr->flag1 =
4893 (uint8_t)(mac_iocb_ptr->flag1 |
4894 OB_MAC_IOCB_REQ_TC);
4895 mac_iocb_ptr->flag2 =
4896 (uint8_t)(mac_iocb_ptr->flag2 |
4897 OB_MAC_IOCB_REQ_IC);
4898 iphdr->ip_sum = 0;
4899 tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len;
4900 tcp_udp_hdr_len = tcp_hdr->th_off*4;
4901 QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4902 __func__, qlge->instance, tcp_udp_hdr_len));
4903 hdr_off = ip_hdr_off;
4904 tcp_udp_hdr_off <<= 6;
4905 hdr_off |= tcp_udp_hdr_off;
4906 mac_iocb_ptr->hdr_off =
4907 (uint16_t)cpu_to_le16(hdr_off);
4908 mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4909 cpu_to_le16(mac_hdr_len + ip_hdr_len +
4910 tcp_udp_hdr_len);
4911
4912 /*
4913 * if the chip is unable to do pseudo header
4914 * cksum calculation, do it in then put the
4915 * result to the data passed to the chip
4916 */
4917 if (qlge->cfg_flags &
4918 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4919 ql_pseudo_cksum((uint8_t *)iphdr);
4920 }
4921 } else if (iphdr->ip_p == IPPROTO_UDP) {
4922 QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n",
4923 __func__, qlge->instance));
4924 mac_iocb_ptr->opcode =
4925 OPCODE_OB_MAC_OFFLOAD_IOCB;
4926 mac_iocb_ptr->flag1 =
4927 (uint8_t)(mac_iocb_ptr->flag1 |
4928 OB_MAC_IOCB_REQ_UC);
4929 mac_iocb_ptr->flag2 =
4930 (uint8_t)(mac_iocb_ptr->flag2 |
4931 OB_MAC_IOCB_REQ_IC);
4932 iphdr->ip_sum = 0;
4933 tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len;
4934 tcp_udp_hdr_len = sizeof (struct udphdr);
4935 QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4936 __func__, qlge->instance, tcp_udp_hdr_len));
4937 hdr_off = ip_hdr_off;
4938 tcp_udp_hdr_off <<= 6;
4939 hdr_off |= tcp_udp_hdr_off;
4940 mac_iocb_ptr->hdr_off =
4941 (uint16_t)cpu_to_le16(hdr_off);
4942 mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4943 cpu_to_le16(mac_hdr_len + ip_hdr_len
4944 + tcp_udp_hdr_len);
4945
4946 /*
4947 * if the chip is unable to calculate pseudo
4948 * hdr cksum,do it in then put the result to
4949 * the data passed to the chip
4950 */
4951 if (qlge->cfg_flags &
4952 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4953 ql_pseudo_cksum((uint8_t *)iphdr);
4954 }
4955 }
4956 }
4957 }
4958 }
4959
4960 /*
4961 * For TSO/LSO:
4962 * MAC frame transmission with TCP large segment offload is performed in the
4963 * same way as the MAC frame transmission with checksum offload with the
4964 * exception that the maximum TCP segment size (MSS) must be specified to
4965 * allow the chip to segment the data into legal sized frames.
4966 * The host also needs to calculate a pseudo-header checksum over the
4967 * following fields:
4968 * Source IP Address, Destination IP Address, and the Protocol.
4969 * The TCP length is not included in the pseudo-header calculation.
4970 * The pseudo-header checksum is place in the TCP checksum field of the
4971 * prototype header.
4972 */
4973 static void
ql_lso_pseudo_cksum(uint8_t * buf)4974 ql_lso_pseudo_cksum(uint8_t *buf)
4975 {
4976 uint32_t cksum;
4977 uint16_t iphl;
4978 uint16_t proto;
4979
4980 /*
4981 * Calculate the LSO pseudo-header checksum.
4982 */
4983 iphl = (uint16_t)(4 * (buf[0] & 0xF));
4984 cksum = proto = buf[9];
4985 cksum += (((uint16_t)buf[12])<<8) + buf[13];
4986 cksum += (((uint16_t)buf[14])<<8) + buf[15];
4987 cksum += (((uint16_t)buf[16])<<8) + buf[17];
4988 cksum += (((uint16_t)buf[18])<<8) + buf[19];
4989 cksum = (cksum>>16) + (cksum & 0xFFFF);
4990 cksum = (cksum>>16) + (cksum & 0xFFFF);
4991
4992 /*
4993 * Point it to the TCP/UDP header, and
4994 * update the checksum field.
4995 */
4996 buf += iphl + ((proto == IPPROTO_TCP) ?
4997 TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
4998
4999 *(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
5000 }
5001
5002 /*
5003 * For IPv4 IP packets, distribute the tx packets evenly among tx rings
5004 */
5005 typedef uint32_t ub4; /* unsigned 4-byte quantities */
5006 typedef uint8_t ub1;
5007
5008 #define hashsize(n) ((ub4)1<<(n))
5009 #define hashmask(n) (hashsize(n)-1)
5010
5011 #define mix(a, b, c) \
5012 { \
5013 a -= b; a -= c; a ^= (c>>13); \
5014 b -= c; b -= a; b ^= (a<<8); \
5015 c -= a; c -= b; c ^= (b>>13); \
5016 a -= b; a -= c; a ^= (c>>12); \
5017 b -= c; b -= a; b ^= (a<<16); \
5018 c -= a; c -= b; c ^= (b>>5); \
5019 a -= b; a -= c; a ^= (c>>3); \
5020 b -= c; b -= a; b ^= (a<<10); \
5021 c -= a; c -= b; c ^= (b>>15); \
5022 }
5023
5024 ub4
hash(ub1 * k,ub4 length,ub4 initval)5025 hash(ub1 *k, ub4 length, ub4 initval)
5026 {
5027 ub4 a, b, c, len;
5028
5029 /* Set up the internal state */
5030 len = length;
5031 a = b = 0x9e3779b9; /* the golden ratio; an arbitrary value */
5032 c = initval; /* the previous hash value */
5033
5034 /* handle most of the key */
5035 while (len >= 12) {
5036 a += (k[0] +((ub4)k[1]<<8) +((ub4)k[2]<<16) +((ub4)k[3]<<24));
5037 b += (k[4] +((ub4)k[5]<<8) +((ub4)k[6]<<16) +((ub4)k[7]<<24));
5038 c += (k[8] +((ub4)k[9]<<8) +((ub4)k[10]<<16)+((ub4)k[11]<<24));
5039 mix(a, b, c);
5040 k += 12;
5041 len -= 12;
5042 }
5043
5044 /* handle the last 11 bytes */
5045 c += length;
5046 /* all the case statements fall through */
5047 switch (len) {
5048 /* FALLTHRU */
5049 case 11: c += ((ub4)k[10]<<24);
5050 /* FALLTHRU */
5051 case 10: c += ((ub4)k[9]<<16);
5052 /* FALLTHRU */
5053 case 9 : c += ((ub4)k[8]<<8);
5054 /* the first byte of c is reserved for the length */
5055 /* FALLTHRU */
5056 case 8 : b += ((ub4)k[7]<<24);
5057 /* FALLTHRU */
5058 case 7 : b += ((ub4)k[6]<<16);
5059 /* FALLTHRU */
5060 case 6 : b += ((ub4)k[5]<<8);
5061 /* FALLTHRU */
5062 case 5 : b += k[4];
5063 /* FALLTHRU */
5064 case 4 : a += ((ub4)k[3]<<24);
5065 /* FALLTHRU */
5066 case 3 : a += ((ub4)k[2]<<16);
5067 /* FALLTHRU */
5068 case 2 : a += ((ub4)k[1]<<8);
5069 /* FALLTHRU */
5070 case 1 : a += k[0];
5071 /* case 0: nothing left to add */
5072 }
5073 mix(a, b, c);
5074 /* report the result */
5075 return (c);
5076 }
5077
5078 uint8_t
ql_tx_hashing(qlge_t * qlge,caddr_t bp)5079 ql_tx_hashing(qlge_t *qlge, caddr_t bp)
5080 {
5081 struct ip *iphdr = NULL;
5082 struct ether_header *ethhdr;
5083 struct ether_vlan_header *ethvhdr;
5084 struct tcphdr *tcp_hdr;
5085 struct udphdr *udp_hdr;
5086 uint32_t etherType;
5087 int mac_hdr_len, ip_hdr_len;
5088 uint32_t h = 0; /* 0 by default */
5089 uint8_t tx_ring_id = 0;
5090 uint32_t ip_src_addr = 0;
5091 uint32_t ip_desc_addr = 0;
5092 uint16_t src_port = 0;
5093 uint16_t dest_port = 0;
5094 uint8_t key[12];
5095 QL_PRINT(DBG_TX, ("%s(%d) entered \n", __func__, qlge->instance));
5096
5097 ethhdr = (struct ether_header *)((void *)bp);
5098 ethvhdr = (struct ether_vlan_header *)((void *)bp);
5099
5100 if (qlge->tx_ring_count == 1)
5101 return (tx_ring_id);
5102
5103 /* Is this vlan packet? */
5104 if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5105 mac_hdr_len = sizeof (struct ether_vlan_header);
5106 etherType = ntohs(ethvhdr->ether_type);
5107 } else {
5108 mac_hdr_len = sizeof (struct ether_header);
5109 etherType = ntohs(ethhdr->ether_type);
5110 }
5111 /* Is this IPv4 or IPv6 packet? */
5112 if (etherType == ETHERTYPE_IP /* 0800 */) {
5113 if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len))
5114 == IPV4_VERSION) {
5115 iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
5116 }
5117 if (((unsigned long)iphdr) & 0x3) {
5118 /* IP hdr not 4-byte aligned */
5119 return (tx_ring_id);
5120 }
5121 }
5122 /* ipV4 packets */
5123 if (iphdr) {
5124
5125 ip_hdr_len = IPH_HDR_LENGTH(iphdr);
5126 ip_src_addr = iphdr->ip_src.s_addr;
5127 ip_desc_addr = iphdr->ip_dst.s_addr;
5128
5129 if (iphdr->ip_p == IPPROTO_TCP) {
5130 tcp_hdr = (struct tcphdr *)(void *)
5131 ((uint8_t *)iphdr + ip_hdr_len);
5132 src_port = tcp_hdr->th_sport;
5133 dest_port = tcp_hdr->th_dport;
5134 } else if (iphdr->ip_p == IPPROTO_UDP) {
5135 udp_hdr = (struct udphdr *)(void *)
5136 ((uint8_t *)iphdr + ip_hdr_len);
5137 src_port = udp_hdr->uh_sport;
5138 dest_port = udp_hdr->uh_dport;
5139 }
5140 key[0] = (uint8_t)((ip_src_addr) &0xFF);
5141 key[1] = (uint8_t)((ip_src_addr >> 8) &0xFF);
5142 key[2] = (uint8_t)((ip_src_addr >> 16) &0xFF);
5143 key[3] = (uint8_t)((ip_src_addr >> 24) &0xFF);
5144 key[4] = (uint8_t)((ip_desc_addr) &0xFF);
5145 key[5] = (uint8_t)((ip_desc_addr >> 8) &0xFF);
5146 key[6] = (uint8_t)((ip_desc_addr >> 16) &0xFF);
5147 key[7] = (uint8_t)((ip_desc_addr >> 24) &0xFF);
5148 key[8] = (uint8_t)((src_port) &0xFF);
5149 key[9] = (uint8_t)((src_port >> 8) &0xFF);
5150 key[10] = (uint8_t)((dest_port) &0xFF);
5151 key[11] = (uint8_t)((dest_port >> 8) &0xFF);
5152 h = hash(key, 12, 0); /* return 32 bit */
5153 tx_ring_id = (h & (qlge->tx_ring_count - 1));
5154 if (tx_ring_id >= qlge->tx_ring_count) {
5155 cmn_err(CE_WARN, "%s bad tx_ring_id %d\n",
5156 __func__, tx_ring_id);
5157 tx_ring_id = 0;
5158 }
5159 }
5160 return (tx_ring_id);
5161 }
5162
5163 /*
5164 * Tell the hardware to do Large Send Offload (LSO)
5165 *
5166 * Some fields in ob_mac_iocb need to be set so hardware can know what is
5167 * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
5168 * in the right place of the packet etc, thus, hardware can process the
5169 * packet correctly.
5170 */
5171 static void
ql_hw_lso_setup(qlge_t * qlge,uint32_t mss,caddr_t bp,struct ob_mac_iocb_req * mac_iocb_ptr)5172 ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp,
5173 struct ob_mac_iocb_req *mac_iocb_ptr)
5174 {
5175 struct ip *iphdr = NULL;
5176 struct ether_header *ethhdr;
5177 struct ether_vlan_header *ethvhdr;
5178 struct tcphdr *tcp_hdr;
5179 struct udphdr *udp_hdr;
5180 uint32_t etherType;
5181 uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
5182 uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off;
5183
5184 ethhdr = (struct ether_header *)(void *)bp;
5185 ethvhdr = (struct ether_vlan_header *)(void *)bp;
5186
5187 /* Is this vlan packet? */
5188 if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5189 mac_hdr_len = sizeof (struct ether_vlan_header);
5190 etherType = ntohs(ethvhdr->ether_type);
5191 } else {
5192 mac_hdr_len = sizeof (struct ether_header);
5193 etherType = ntohs(ethhdr->ether_type);
5194 }
5195 /* Is this IPv4 or IPv6 packet? */
5196 if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) ==
5197 IPV4_VERSION) {
5198 if (etherType == ETHERTYPE_IP /* 0800 */) {
5199 iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
5200 } else {
5201 /* EMPTY */
5202 QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet"
5203 " type 0x%x\n",
5204 __func__, qlge->instance, etherType));
5205 }
5206 }
5207
5208 if (iphdr != NULL) { /* ipV4 packets */
5209 ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr);
5210 QL_PRINT(DBG_TX,
5211 ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
5212 " bytes \n", __func__, qlge->instance, ip_hdr_len));
5213
5214 ip_hdr_off = mac_hdr_len;
5215 QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
5216 __func__, qlge->instance, ip_hdr_len));
5217
5218 mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
5219 OB_MAC_IOCB_REQ_IPv4);
5220 if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) {
5221 if (iphdr->ip_p == IPPROTO_TCP) {
5222 tcp_hdr = (struct tcphdr *)(void *)
5223 ((uint8_t *)(void *)iphdr +
5224 ip_hdr_len);
5225 QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP "
5226 "packet\n",
5227 __func__, qlge->instance));
5228 mac_iocb_ptr->opcode =
5229 OPCODE_OB_MAC_OFFLOAD_IOCB;
5230 mac_iocb_ptr->flag1 =
5231 (uint8_t)(mac_iocb_ptr->flag1 |
5232 OB_MAC_IOCB_REQ_LSO);
5233 iphdr->ip_sum = 0;
5234 tcp_udp_hdr_off =
5235 (uint16_t)(mac_hdr_len+ip_hdr_len);
5236 tcp_udp_hdr_len =
5237 (uint16_t)(tcp_hdr->th_off*4);
5238 QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
5239 __func__, qlge->instance, tcp_udp_hdr_len));
5240 hdr_off = ip_hdr_off;
5241 tcp_udp_hdr_off <<= 6;
5242 hdr_off |= tcp_udp_hdr_off;
5243 mac_iocb_ptr->hdr_off =
5244 (uint16_t)cpu_to_le16(hdr_off);
5245 mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5246 cpu_to_le16(mac_hdr_len + ip_hdr_len +
5247 tcp_udp_hdr_len);
5248 mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5249
5250 /*
5251 * if the chip is unable to calculate pseudo
5252 * header checksum, do it in then put the result
5253 * to the data passed to the chip
5254 */
5255 if (qlge->cfg_flags &
5256 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5257 ql_lso_pseudo_cksum((uint8_t *)iphdr);
5258 } else if (iphdr->ip_p == IPPROTO_UDP) {
5259 udp_hdr = (struct udphdr *)(void *)
5260 ((uint8_t *)(void *)iphdr
5261 + ip_hdr_len);
5262 QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP "
5263 "packet\n",
5264 __func__, qlge->instance));
5265 mac_iocb_ptr->opcode =
5266 OPCODE_OB_MAC_OFFLOAD_IOCB;
5267 mac_iocb_ptr->flag1 =
5268 (uint8_t)(mac_iocb_ptr->flag1 |
5269 OB_MAC_IOCB_REQ_LSO);
5270 iphdr->ip_sum = 0;
5271 tcp_udp_hdr_off =
5272 (uint16_t)(mac_hdr_len+ip_hdr_len);
5273 tcp_udp_hdr_len =
5274 (uint16_t)(udp_hdr->uh_ulen*4);
5275 QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
5276 __func__, qlge->instance, tcp_udp_hdr_len));
5277 hdr_off = ip_hdr_off;
5278 tcp_udp_hdr_off <<= 6;
5279 hdr_off |= tcp_udp_hdr_off;
5280 mac_iocb_ptr->hdr_off =
5281 (uint16_t)cpu_to_le16(hdr_off);
5282 mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5283 cpu_to_le16(mac_hdr_len + ip_hdr_len +
5284 tcp_udp_hdr_len);
5285 mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5286
5287 /*
5288 * if the chip is unable to do pseudo header
5289 * checksum calculation, do it here then put the
5290 * result to the data passed to the chip
5291 */
5292 if (qlge->cfg_flags &
5293 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5294 ql_lso_pseudo_cksum((uint8_t *)iphdr);
5295 }
5296 }
5297 }
5298 }
5299
5300 /*
5301 * Generic packet sending function which is used to send one packet.
5302 */
5303 int
ql_send_common(struct tx_ring * tx_ring,mblk_t * mp)5304 ql_send_common(struct tx_ring *tx_ring, mblk_t *mp)
5305 {
5306 struct tx_ring_desc *tx_cb;
5307 struct ob_mac_iocb_req *mac_iocb_ptr;
5308 mblk_t *tp;
5309 size_t msg_len = 0;
5310 size_t off;
5311 caddr_t bp;
5312 size_t nbyte, total_len;
5313 uint_t i = 0;
5314 int j = 0, frags = 0;
5315 uint32_t phy_addr_low, phy_addr_high;
5316 uint64_t phys_addr;
5317 clock_t now;
5318 uint32_t pflags = 0;
5319 uint32_t mss = 0;
5320 enum tx_mode_t tx_mode;
5321 struct oal_entry *oal_entry;
5322 int status;
5323 uint_t ncookies, oal_entries, max_oal_entries;
5324 size_t max_seg_len = 0;
5325 boolean_t use_lso = B_FALSE;
5326 struct oal_entry *tx_entry = NULL;
5327 struct oal_entry *last_oal_entry;
5328 qlge_t *qlge = tx_ring->qlge;
5329 ddi_dma_cookie_t dma_cookie;
5330 size_t tx_buf_len = QL_MAX_COPY_LENGTH;
5331 int force_pullup = 0;
5332
5333 tp = mp;
5334 total_len = msg_len = 0;
5335 max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1;
5336
5337 /* Calculate number of data and segments in the incoming message */
5338 for (tp = mp; tp != NULL; tp = tp->b_cont) {
5339 nbyte = MBLKL(tp);
5340 total_len += nbyte;
5341 max_seg_len = max(nbyte, max_seg_len);
5342 QL_PRINT(DBG_TX, ("Requested sending data in %d segments, "
5343 "total length: %d\n", frags, nbyte));
5344 frags++;
5345 }
5346
5347 if (total_len >= QL_LSO_MAX) {
5348 freemsg(mp);
5349 #ifdef QLGE_LOAD_UNLOAD
5350 cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n",
5351 __func__, (int)total_len);
5352 #endif
5353 return (0);
5354 }
5355
5356 bp = (caddr_t)mp->b_rptr;
5357 if (bp[0] & 1) {
5358 if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet,
5359 ETHERADDRL) == 0) {
5360 QL_PRINT(DBG_TX, ("Broadcast packet\n"));
5361 tx_ring->brdcstxmt++;
5362 } else {
5363 QL_PRINT(DBG_TX, ("multicast packet\n"));
5364 tx_ring->multixmt++;
5365 }
5366 }
5367
5368 tx_ring->obytes += total_len;
5369 tx_ring->opackets ++;
5370
5371 QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs,"
5372 " max seg len: %d\n", total_len, frags, max_seg_len));
5373
5374 /* claim a free slot in tx ring */
5375 tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx];
5376
5377 /* get the tx descriptor */
5378 mac_iocb_ptr = tx_cb->queue_entry;
5379
5380 bzero((void *)mac_iocb_ptr, 20);
5381
5382 ASSERT(tx_cb->mp == NULL);
5383
5384 /*
5385 * Decide to use DMA map or copy mode.
5386 * DMA map mode must be used when the total msg length is more than the
5387 * tx buffer length.
5388 */
5389
5390 if (total_len > tx_buf_len)
5391 tx_mode = USE_DMA;
5392 else if (max_seg_len > QL_MAX_COPY_LENGTH)
5393 tx_mode = USE_DMA;
5394 else
5395 tx_mode = USE_COPY;
5396
5397 if (qlge->chksum_cap) {
5398 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
5399 QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability "
5400 "is 0x%x \n", pflags, qlge->chksum_cap));
5401 if (qlge->lso_enable) {
5402 uint32_t lso_flags = 0;
5403 mac_lso_get(mp, &mss, &lso_flags);
5404 use_lso = (lso_flags == HW_LSO);
5405 }
5406 QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n",
5407 mss, use_lso));
5408 }
5409
5410 do_pullup:
5411
5412 /* concatenate all frags into one large packet if too fragmented */
5413 if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) ||
5414 force_pullup) {
5415 mblk_t *mp1;
5416 if ((mp1 = msgpullup(mp, -1)) != NULL) {
5417 freemsg(mp);
5418 mp = mp1;
5419 frags = 1;
5420 } else {
5421 tx_ring->tx_fail_dma_bind++;
5422 goto bad;
5423 }
5424 }
5425
5426 tx_cb->tx_bytes = (uint32_t)total_len;
5427 tx_cb->mp = mp;
5428 tx_cb->tx_dma_handle_used = 0;
5429
5430 if (tx_mode == USE_DMA) {
5431 msg_len = total_len;
5432
5433 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5434 mac_iocb_ptr->tid = tx_ring->prod_idx;
5435 mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5436 mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5437
5438 tx_entry = &mac_iocb_ptr->oal_entry[0];
5439 oal_entry = NULL;
5440
5441 for (tp = mp, oal_entries = j = 0; tp != NULL;
5442 tp = tp->b_cont) {
5443 /* if too many tx dma handles needed */
5444 if (j >= QL_MAX_TX_DMA_HANDLES) {
5445 tx_ring->tx_no_dma_handle++;
5446 if (!force_pullup) {
5447 force_pullup = 1;
5448 goto do_pullup;
5449 } else {
5450 goto bad;
5451 }
5452 }
5453 nbyte = (uint16_t)MBLKL(tp);
5454 if (nbyte == 0)
5455 continue;
5456
5457 status = ddi_dma_addr_bind_handle(
5458 tx_cb->tx_dma_handle[j], NULL,
5459 (caddr_t)tp->b_rptr, nbyte,
5460 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
5461 0, &dma_cookie, &ncookies);
5462
5463 QL_PRINT(DBG_TX, ("map sending data segment: %d, "
5464 "length: %d, spans in %d cookies\n",
5465 j, nbyte, ncookies));
5466
5467 if (status != DDI_DMA_MAPPED) {
5468 goto bad;
5469 }
5470 /*
5471 * Each fragment can span several cookies. One cookie
5472 * will use one tx descriptor to transmit.
5473 */
5474 for (i = ncookies; i > 0; i--, tx_entry++,
5475 oal_entries++) {
5476 /*
5477 * The number of TX descriptors that can be
5478 * saved in tx iocb and oal list is limited
5479 */
5480 if (oal_entries > max_oal_entries) {
5481 tx_ring->tx_no_dma_cookie++;
5482 if (!force_pullup) {
5483 force_pullup = 1;
5484 goto do_pullup;
5485 } else {
5486 goto bad;
5487 }
5488 }
5489
5490 if ((oal_entries == TX_DESC_PER_IOCB) &&
5491 !oal_entry) {
5492 /*
5493 * Time to switch to an oal list
5494 * The last entry should be copied
5495 * to first entry in the oal list
5496 */
5497 oal_entry = tx_cb->oal;
5498 tx_entry =
5499 &mac_iocb_ptr->oal_entry[
5500 TX_DESC_PER_IOCB-1];
5501 bcopy(tx_entry, oal_entry,
5502 sizeof (*oal_entry));
5503
5504 /*
5505 * last entry should be updated to
5506 * point to the extended oal list itself
5507 */
5508 tx_entry->buf_addr_low =
5509 cpu_to_le32(
5510 LS_64BITS(tx_cb->oal_dma_addr));
5511 tx_entry->buf_addr_high =
5512 cpu_to_le32(
5513 MS_64BITS(tx_cb->oal_dma_addr));
5514 /*
5515 * Point tx_entry to the oal list
5516 * second entry
5517 */
5518 tx_entry = &oal_entry[1];
5519 }
5520
5521 tx_entry->buf_len =
5522 (uint32_t)cpu_to_le32(dma_cookie.dmac_size);
5523 phys_addr = dma_cookie.dmac_laddress;
5524 tx_entry->buf_addr_low =
5525 cpu_to_le32(LS_64BITS(phys_addr));
5526 tx_entry->buf_addr_high =
5527 cpu_to_le32(MS_64BITS(phys_addr));
5528
5529 last_oal_entry = tx_entry;
5530
5531 if (i > 1)
5532 ddi_dma_nextcookie(
5533 tx_cb->tx_dma_handle[j],
5534 &dma_cookie);
5535 }
5536 j++;
5537 }
5538 /*
5539 * if OAL is used, the last oal entry in tx iocb indicates
5540 * number of additional address/len pairs in OAL
5541 */
5542 if (oal_entries > TX_DESC_PER_IOCB) {
5543 tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1];
5544 tx_entry->buf_len = (uint32_t)
5545 (cpu_to_le32((sizeof (struct oal_entry) *
5546 (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY));
5547 }
5548 last_oal_entry->buf_len = cpu_to_le32(
5549 le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY);
5550
5551 tx_cb->tx_dma_handle_used = j;
5552 QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n",
5553 j, oal_entries));
5554
5555 bp = (caddr_t)mp->b_rptr;
5556 }
5557 if (tx_mode == USE_COPY) {
5558 bp = tx_cb->copy_buffer;
5559 off = 0;
5560 nbyte = 0;
5561 frags = 0;
5562 /*
5563 * Copy up to tx_buf_len of the transmit data
5564 * from mp to tx buffer
5565 */
5566 for (tp = mp; tp != NULL; tp = tp->b_cont) {
5567 nbyte = MBLKL(tp);
5568 if ((off + nbyte) <= tx_buf_len) {
5569 bcopy(tp->b_rptr, &bp[off], nbyte);
5570 off += nbyte;
5571 frags ++;
5572 }
5573 }
5574
5575 msg_len = off;
5576
5577 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5578 mac_iocb_ptr->tid = tx_ring->prod_idx;
5579 mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5580 mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5581
5582 QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, "
5583 "from %d segaments\n", msg_len, frags));
5584
5585 phys_addr = tx_cb->copy_buffer_dma_addr;
5586 phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr));
5587 phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr));
5588
5589 QL_DUMP(DBG_TX, "\t requested sending data:\n",
5590 (uint8_t *)tx_cb->copy_buffer, 8, total_len);
5591
5592 mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t)
5593 cpu_to_le32(msg_len | OAL_LAST_ENTRY);
5594 mac_iocb_ptr->oal_entry[0].buf_addr_low = phy_addr_low;
5595 mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high;
5596
5597 freemsg(mp); /* no need, we have copied */
5598 tx_cb->mp = NULL;
5599 } /* End of Copy Mode */
5600
5601 /* Do TSO/LSO on TCP packet? */
5602 if (use_lso && mss) {
5603 ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr);
5604 } else if (pflags & qlge->chksum_cap) {
5605 /* Do checksum offloading */
5606 ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr);
5607 }
5608
5609 /* let device know the latest outbound IOCB */
5610 (void) ddi_dma_sync(tx_ring->wq_dma.dma_handle,
5611 (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr),
5612 (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV);
5613
5614 if (tx_mode == USE_DMA) {
5615 /* let device know the latest outbound OAL if necessary */
5616 if (oal_entries > TX_DESC_PER_IOCB) {
5617 (void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5618 (off_t)0,
5619 (sizeof (struct oal_entry) *
5620 (oal_entries -TX_DESC_PER_IOCB+1)),
5621 DDI_DMA_SYNC_FORDEV);
5622 }
5623 } else { /* for USE_COPY mode, tx buffer has changed */
5624 /* let device know the latest change */
5625 (void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5626 /* copy buf offset */
5627 (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS),
5628 msg_len, DDI_DMA_SYNC_FORDEV);
5629 }
5630
5631 /* save how the packet was sent */
5632 tx_cb->tx_type = tx_mode;
5633
5634 QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries);
5635 /* reduce the number of available tx slot */
5636 atomic_dec_32(&tx_ring->tx_free_count);
5637
5638 tx_ring->prod_idx++;
5639 if (tx_ring->prod_idx >= tx_ring->wq_len)
5640 tx_ring->prod_idx = 0;
5641
5642 now = ddi_get_lbolt();
5643 qlge->last_tx_time = now;
5644
5645 return (DDI_SUCCESS);
5646
5647 bad:
5648 /*
5649 * if for any reason driver can not send, delete
5650 * the message pointer, mp
5651 */
5652 now = ddi_get_lbolt();
5653 freemsg(mp);
5654 mp = NULL;
5655 tx_cb->mp = NULL;
5656 for (i = 0; i < j; i++)
5657 (void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]);
5658
5659 QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x",
5660 __func__, qlge->instance, (int)now));
5661
5662 return (DDI_SUCCESS);
5663 }
5664
5665
5666 /*
5667 * Initializes hardware and driver software flags before the driver
5668 * is finally ready to work.
5669 */
5670 int
ql_do_start(qlge_t * qlge)5671 ql_do_start(qlge_t *qlge)
5672 {
5673 int i;
5674 struct rx_ring *rx_ring;
5675 uint16_t lbq_buf_size;
5676 int rings_done;
5677
5678 ASSERT(qlge != NULL);
5679
5680 mutex_enter(&qlge->hw_mutex);
5681
5682 /* Reset adapter */
5683 (void) ql_asic_reset(qlge);
5684
5685 lbq_buf_size = (uint16_t)
5686 ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
5687 if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) {
5688 #ifdef QLGE_LOAD_UNLOAD
5689 cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n",
5690 qlge->rx_ring[0].lbq_buf_size, lbq_buf_size);
5691 #endif
5692 /*
5693 * Check if any ring has buffers still with upper layers
5694 * If buffers are pending with upper layers, we use the
5695 * existing buffers and don't reallocate new ones
5696 * Unfortunately there is no way to evict buffers from
5697 * upper layers. Using buffers with the current size may
5698 * cause slightly sub-optimal performance, but that seems
5699 * to be the easiest way to handle this situation.
5700 */
5701 rings_done = 0;
5702 for (i = 0; i < qlge->rx_ring_count; i++) {
5703 rx_ring = &qlge->rx_ring[i];
5704 if (rx_ring->rx_indicate == 0)
5705 rings_done++;
5706 else
5707 break;
5708 }
5709 /*
5710 * No buffers pending with upper layers;
5711 * reallocte them for new MTU size
5712 */
5713 if (rings_done >= qlge->rx_ring_count) {
5714 /* free large buffer pool */
5715 for (i = 0; i < qlge->rx_ring_count; i++) {
5716 rx_ring = &qlge->rx_ring[i];
5717 if (rx_ring->type != TX_Q) {
5718 ql_free_sbq_buffers(rx_ring);
5719 ql_free_lbq_buffers(rx_ring);
5720 }
5721 }
5722 /* reallocate large buffer pool */
5723 for (i = 0; i < qlge->rx_ring_count; i++) {
5724 rx_ring = &qlge->rx_ring[i];
5725 if (rx_ring->type != TX_Q) {
5726 (void) ql_alloc_sbufs(qlge, rx_ring);
5727 (void) ql_alloc_lbufs(qlge, rx_ring);
5728 }
5729 }
5730 }
5731 }
5732
5733 if (ql_bringup_adapter(qlge) != DDI_SUCCESS) {
5734 cmn_err(CE_WARN, "qlge bringup adapter failed");
5735 mutex_exit(&qlge->hw_mutex);
5736 if (qlge->fm_enable) {
5737 atomic_or_32(&qlge->flags, ADAPTER_ERROR);
5738 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
5739 }
5740 return (DDI_FAILURE);
5741 }
5742
5743 mutex_exit(&qlge->hw_mutex);
5744 /* if adapter is up successfully but was bad before */
5745 if (qlge->flags & ADAPTER_ERROR) {
5746 atomic_and_32(&qlge->flags, ~ADAPTER_ERROR);
5747 if (qlge->fm_enable) {
5748 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
5749 }
5750 }
5751
5752 /* Get current link state */
5753 qlge->port_link_state = ql_get_link_state(qlge);
5754
5755 if (qlge->port_link_state == LS_UP) {
5756 QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5757 __func__, qlge->instance));
5758 /* If driver detects a carrier on */
5759 CARRIER_ON(qlge);
5760 } else {
5761 QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5762 __func__, qlge->instance));
5763 /* If driver detects a lack of carrier */
5764 CARRIER_OFF(qlge);
5765 }
5766 qlge->mac_flags = QL_MAC_STARTED;
5767 return (DDI_SUCCESS);
5768 }
5769
5770 /*
5771 * Stop currently running driver
5772 * Driver needs to stop routing new packets to driver and wait until
5773 * all pending tx/rx buffers to be free-ed.
5774 */
5775 int
ql_do_stop(qlge_t * qlge)5776 ql_do_stop(qlge_t *qlge)
5777 {
5778 int rc = DDI_FAILURE;
5779 uint32_t i, j, k;
5780 struct bq_desc *sbq_desc, *lbq_desc;
5781 struct rx_ring *rx_ring;
5782
5783 ASSERT(qlge != NULL);
5784
5785 CARRIER_OFF(qlge);
5786
5787 rc = ql_bringdown_adapter(qlge);
5788 if (rc != DDI_SUCCESS) {
5789 cmn_err(CE_WARN, "qlge bringdown adapter failed.");
5790 } else
5791 rc = DDI_SUCCESS;
5792
5793 for (k = 0; k < qlge->rx_ring_count; k++) {
5794 rx_ring = &qlge->rx_ring[k];
5795 if (rx_ring->type != TX_Q) {
5796 j = rx_ring->lbq_use_head;
5797 #ifdef QLGE_LOAD_UNLOAD
5798 cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list"
5799 " to free list %d\n total %d\n",
5800 k, rx_ring->lbuf_in_use_count,
5801 rx_ring->lbuf_free_count,
5802 rx_ring->lbuf_in_use_count +
5803 rx_ring->lbuf_free_count);
5804 #endif
5805 for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
5806 lbq_desc = rx_ring->lbuf_in_use[j];
5807 j++;
5808 if (j >= rx_ring->lbq_len) {
5809 j = 0;
5810 }
5811 if (lbq_desc->mp) {
5812 atomic_inc_32(&rx_ring->rx_indicate);
5813 freemsg(lbq_desc->mp);
5814 }
5815 }
5816 rx_ring->lbq_use_head = j;
5817 rx_ring->lbq_use_tail = j;
5818 rx_ring->lbuf_in_use_count = 0;
5819 j = rx_ring->sbq_use_head;
5820 #ifdef QLGE_LOAD_UNLOAD
5821 cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list,"
5822 " to free list %d\n total %d \n",
5823 k, rx_ring->sbuf_in_use_count,
5824 rx_ring->sbuf_free_count,
5825 rx_ring->sbuf_in_use_count +
5826 rx_ring->sbuf_free_count);
5827 #endif
5828 for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
5829 sbq_desc = rx_ring->sbuf_in_use[j];
5830 j++;
5831 if (j >= rx_ring->sbq_len) {
5832 j = 0;
5833 }
5834 if (sbq_desc->mp) {
5835 atomic_inc_32(&rx_ring->rx_indicate);
5836 freemsg(sbq_desc->mp);
5837 }
5838 }
5839 rx_ring->sbq_use_head = j;
5840 rx_ring->sbq_use_tail = j;
5841 rx_ring->sbuf_in_use_count = 0;
5842 }
5843 }
5844
5845 qlge->mac_flags = QL_MAC_STOPPED;
5846
5847 return (rc);
5848 }
5849
5850 /*
5851 * Support
5852 */
5853
5854 void
ql_disable_isr(qlge_t * qlge)5855 ql_disable_isr(qlge_t *qlge)
5856 {
5857 /*
5858 * disable the hardware interrupt
5859 */
5860 ISP_DISABLE_GLOBAL_INTRS(qlge);
5861
5862 qlge->flags &= ~INTERRUPTS_ENABLED;
5863 }
5864
5865
5866
5867 /*
5868 * busy wait for 'usecs' microseconds.
5869 */
5870 void
qlge_delay(clock_t usecs)5871 qlge_delay(clock_t usecs)
5872 {
5873 drv_usecwait(usecs);
5874 }
5875
5876 /*
5877 * retrieve firmware details.
5878 */
5879
5880 pci_cfg_t *
ql_get_pci_config(qlge_t * qlge)5881 ql_get_pci_config(qlge_t *qlge)
5882 {
5883 return (&(qlge->pci_cfg));
5884 }
5885
5886 /*
5887 * Get current Link status
5888 */
5889 static uint32_t
ql_get_link_state(qlge_t * qlge)5890 ql_get_link_state(qlge_t *qlge)
5891 {
5892 uint32_t bitToCheck = 0;
5893 uint32_t temp, linkState;
5894
5895 if (qlge->func_number == qlge->fn0_net) {
5896 bitToCheck = STS_PL0;
5897 } else {
5898 bitToCheck = STS_PL1;
5899 }
5900 temp = ql_read_reg(qlge, REG_STATUS);
5901 QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n",
5902 __func__, qlge->instance, temp));
5903
5904 if (temp & bitToCheck) {
5905 linkState = LS_UP;
5906 } else {
5907 linkState = LS_DOWN;
5908 }
5909 if (CFG_IST(qlge, CFG_CHIP_8100)) {
5910 /* for Schultz, link Speed is fixed to 10G, full duplex */
5911 qlge->speed = SPEED_10G;
5912 qlge->duplex = 1;
5913 }
5914 return (linkState);
5915 }
5916 /*
5917 * Get current link status and report to OS
5918 */
5919 static void
ql_get_and_report_link_state(qlge_t * qlge)5920 ql_get_and_report_link_state(qlge_t *qlge)
5921 {
5922 uint32_t cur_link_state;
5923
5924 /* Get current link state */
5925 cur_link_state = ql_get_link_state(qlge);
5926 /* if link state has changed */
5927 if (cur_link_state != qlge->port_link_state) {
5928
5929 qlge->port_link_state = cur_link_state;
5930
5931 if (qlge->port_link_state == LS_UP) {
5932 QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5933 __func__, qlge->instance));
5934 /* If driver detects a carrier on */
5935 CARRIER_ON(qlge);
5936 } else {
5937 QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5938 __func__, qlge->instance));
5939 /* If driver detects a lack of carrier */
5940 CARRIER_OFF(qlge);
5941 }
5942 }
5943 }
5944
5945 /*
5946 * timer callback function executed after timer expires
5947 */
5948 static void
ql_timer(void * arg)5949 ql_timer(void* arg)
5950 {
5951 ql_get_and_report_link_state((qlge_t *)arg);
5952 }
5953
5954 /*
5955 * stop the running timer if activated
5956 */
5957 static void
ql_stop_timer(qlge_t * qlge)5958 ql_stop_timer(qlge_t *qlge)
5959 {
5960 timeout_id_t timer_id;
5961 /* Disable driver timer */
5962 if (qlge->ql_timer_timeout_id != NULL) {
5963 timer_id = qlge->ql_timer_timeout_id;
5964 qlge->ql_timer_timeout_id = NULL;
5965 (void) untimeout(timer_id);
5966 }
5967 }
5968
5969 /*
5970 * stop then restart timer
5971 */
5972 void
ql_restart_timer(qlge_t * qlge)5973 ql_restart_timer(qlge_t *qlge)
5974 {
5975 ql_stop_timer(qlge);
5976 qlge->ql_timer_ticks = TICKS_PER_SEC / 4;
5977 qlge->ql_timer_timeout_id = timeout(ql_timer,
5978 (void *)qlge, qlge->ql_timer_ticks);
5979 }
5980
5981 /* ************************************************************************* */
5982 /*
5983 * Hardware K-Stats Data Structures and Subroutines
5984 */
5985 /* ************************************************************************* */
5986 static const ql_ksindex_t ql_kstats_hw[] = {
5987 /* PCI related hardware information */
5988 { 0, "Vendor Id" },
5989 { 1, "Device Id" },
5990 { 2, "Command" },
5991 { 3, "Status" },
5992 { 4, "Revision Id" },
5993 { 5, "Cache Line Size" },
5994 { 6, "Latency Timer" },
5995 { 7, "Header Type" },
5996 { 9, "I/O base addr" },
5997 { 10, "Control Reg Base addr low" },
5998 { 11, "Control Reg Base addr high" },
5999 { 12, "Doorbell Reg Base addr low" },
6000 { 13, "Doorbell Reg Base addr high" },
6001 { 14, "Subsystem Vendor Id" },
6002 { 15, "Subsystem Device ID" },
6003 { 16, "PCIe Device Control" },
6004 { 17, "PCIe Link Status" },
6005
6006 { -1, NULL },
6007 };
6008
6009 /*
6010 * kstat update function for PCI registers
6011 */
6012 static int
ql_kstats_get_pci_regs(kstat_t * ksp,int flag)6013 ql_kstats_get_pci_regs(kstat_t *ksp, int flag)
6014 {
6015 qlge_t *qlge;
6016 kstat_named_t *knp;
6017
6018 if (flag != KSTAT_READ)
6019 return (EACCES);
6020
6021 qlge = ksp->ks_private;
6022 knp = ksp->ks_data;
6023 (knp++)->value.ui32 = qlge->pci_cfg.vendor_id;
6024 (knp++)->value.ui32 = qlge->pci_cfg.device_id;
6025 (knp++)->value.ui32 = qlge->pci_cfg.command;
6026 (knp++)->value.ui32 = qlge->pci_cfg.status;
6027 (knp++)->value.ui32 = qlge->pci_cfg.revision;
6028 (knp++)->value.ui32 = qlge->pci_cfg.cache_line_size;
6029 (knp++)->value.ui32 = qlge->pci_cfg.latency_timer;
6030 (knp++)->value.ui32 = qlge->pci_cfg.header_type;
6031 (knp++)->value.ui32 = qlge->pci_cfg.io_base_address;
6032 (knp++)->value.ui32 =
6033 qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower;
6034 (knp++)->value.ui32 =
6035 qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper;
6036 (knp++)->value.ui32 =
6037 qlge->pci_cfg.pci_doorbell_mem_base_address_lower;
6038 (knp++)->value.ui32 =
6039 qlge->pci_cfg.pci_doorbell_mem_base_address_upper;
6040 (knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id;
6041 (knp++)->value.ui32 = qlge->pci_cfg.sub_device_id;
6042 (knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control;
6043 (knp++)->value.ui32 = qlge->pci_cfg.link_status;
6044
6045 return (0);
6046 }
6047
6048 static const ql_ksindex_t ql_kstats_mii[] = {
6049 /* MAC/MII related hardware information */
6050 { 0, "mtu"},
6051
6052 { -1, NULL},
6053 };
6054
6055
6056 /*
6057 * kstat update function for MII related information.
6058 */
6059 static int
ql_kstats_mii_update(kstat_t * ksp,int flag)6060 ql_kstats_mii_update(kstat_t *ksp, int flag)
6061 {
6062 qlge_t *qlge;
6063 kstat_named_t *knp;
6064
6065 if (flag != KSTAT_READ)
6066 return (EACCES);
6067
6068 qlge = ksp->ks_private;
6069 knp = ksp->ks_data;
6070
6071 (knp++)->value.ui32 = qlge->mtu;
6072
6073 return (0);
6074 }
6075
6076 static const ql_ksindex_t ql_kstats_reg[] = {
6077 /* Register information */
6078 { 0, "System (0x08)" },
6079 { 1, "Reset/Fail Over(0x0Ch" },
6080 { 2, "Function Specific Control(0x10)" },
6081 { 3, "Status (0x30)" },
6082 { 4, "Intr Enable (0x34)" },
6083 { 5, "Intr Status1 (0x3C)" },
6084 { 6, "Error Status (0x54)" },
6085 { 7, "XGMAC Flow Control(0x11C)" },
6086 { 8, "XGMAC Tx Pause Frames(0x230)" },
6087 { 9, "XGMAC Rx Pause Frames(0x388)" },
6088 { 10, "XGMAC Rx FIFO Drop Count(0x5B8)" },
6089 { 11, "interrupts actually allocated" },
6090 { 12, "interrupts on rx ring 0" },
6091 { 13, "interrupts on rx ring 1" },
6092 { 14, "interrupts on rx ring 2" },
6093 { 15, "interrupts on rx ring 3" },
6094 { 16, "interrupts on rx ring 4" },
6095 { 17, "interrupts on rx ring 5" },
6096 { 18, "interrupts on rx ring 6" },
6097 { 19, "interrupts on rx ring 7" },
6098 { 20, "polls on rx ring 0" },
6099 { 21, "polls on rx ring 1" },
6100 { 22, "polls on rx ring 2" },
6101 { 23, "polls on rx ring 3" },
6102 { 24, "polls on rx ring 4" },
6103 { 25, "polls on rx ring 5" },
6104 { 26, "polls on rx ring 6" },
6105 { 27, "polls on rx ring 7" },
6106 { 28, "tx no resource on ring 0" },
6107 { 29, "tx dma bind fail on ring 0" },
6108 { 30, "tx dma no handle on ring 0" },
6109 { 31, "tx dma no cookie on ring 0" },
6110 { 32, "MPI firmware major version" },
6111 { 33, "MPI firmware minor version" },
6112 { 34, "MPI firmware sub version" },
6113 { 35, "rx no resource" },
6114
6115 { -1, NULL},
6116 };
6117
6118
6119 /*
6120 * kstat update function for device register set
6121 */
6122 static int
ql_kstats_get_reg_and_dev_stats(kstat_t * ksp,int flag)6123 ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag)
6124 {
6125 qlge_t *qlge;
6126 kstat_named_t *knp;
6127 uint32_t val32;
6128 int i = 0;
6129 struct tx_ring *tx_ring;
6130 struct rx_ring *rx_ring;
6131
6132 if (flag != KSTAT_READ)
6133 return (EACCES);
6134
6135 qlge = ksp->ks_private;
6136 knp = ksp->ks_data;
6137
6138 (knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM);
6139 (knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER);
6140 (knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL);
6141 (knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS);
6142 (knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
6143 (knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
6144 (knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS);
6145
6146 if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) {
6147 return (0);
6148 }
6149 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32);
6150 (knp++)->value.ui32 = val32;
6151
6152 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32);
6153 (knp++)->value.ui32 = val32;
6154
6155 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32);
6156 (knp++)->value.ui32 = val32;
6157
6158 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32);
6159 (knp++)->value.ui32 = val32;
6160
6161 ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
6162
6163 (knp++)->value.ui32 = qlge->intr_cnt;
6164
6165 for (i = 0; i < 8; i++) {
6166 (knp++)->value.ui32 = qlge->rx_interrupts[i];
6167 }
6168
6169 for (i = 0; i < 8; i++) {
6170 (knp++)->value.ui32 = qlge->rx_polls[i];
6171 }
6172
6173 tx_ring = &qlge->tx_ring[0];
6174 (knp++)->value.ui32 = tx_ring->defer;
6175 (knp++)->value.ui32 = tx_ring->tx_fail_dma_bind;
6176 (knp++)->value.ui32 = tx_ring->tx_no_dma_handle;
6177 (knp++)->value.ui32 = tx_ring->tx_no_dma_cookie;
6178
6179 (knp++)->value.ui32 = qlge->fw_version_info.major_version;
6180 (knp++)->value.ui32 = qlge->fw_version_info.minor_version;
6181 (knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version;
6182
6183 for (i = 0; i < qlge->rx_ring_count; i++) {
6184 rx_ring = &qlge->rx_ring[i];
6185 val32 += rx_ring->rx_packets_dropped_no_buffer;
6186 }
6187 (knp++)->value.ui32 = val32;
6188
6189 return (0);
6190 }
6191
6192
6193 static kstat_t *
ql_setup_named_kstat(qlge_t * qlge,int instance,char * name,const ql_ksindex_t * ksip,size_t size,int (* update)(kstat_t *,int))6194 ql_setup_named_kstat(qlge_t *qlge, int instance, char *name,
6195 const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
6196 {
6197 kstat_t *ksp;
6198 kstat_named_t *knp;
6199 char *np;
6200 int type;
6201
6202 size /= sizeof (ql_ksindex_t);
6203 ksp = kstat_create(ADAPTER_NAME, instance, name, "net",
6204 KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT);
6205 if (ksp == NULL)
6206 return (NULL);
6207
6208 ksp->ks_private = qlge;
6209 ksp->ks_update = update;
6210 for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
6211 switch (*np) {
6212 default:
6213 type = KSTAT_DATA_UINT32;
6214 break;
6215 case '&':
6216 np += 1;
6217 type = KSTAT_DATA_CHAR;
6218 break;
6219 }
6220 kstat_named_init(knp, np, (uint8_t)type);
6221 }
6222 kstat_install(ksp);
6223
6224 return (ksp);
6225 }
6226
6227 /*
6228 * Setup various kstat
6229 */
6230 int
ql_init_kstats(qlge_t * qlge)6231 ql_init_kstats(qlge_t *qlge)
6232 {
6233 /* Hardware KStats */
6234 qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge,
6235 qlge->instance, "chip", ql_kstats_hw,
6236 sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
6237 if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) {
6238 return (DDI_FAILURE);
6239 }
6240
6241 /* MII KStats */
6242 qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge,
6243 qlge->instance, "mii", ql_kstats_mii,
6244 sizeof (ql_kstats_mii), ql_kstats_mii_update);
6245 if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) {
6246 return (DDI_FAILURE);
6247 }
6248
6249 /* REG KStats */
6250 qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge,
6251 qlge->instance, "reg", ql_kstats_reg,
6252 sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
6253 if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) {
6254 return (DDI_FAILURE);
6255 }
6256 return (DDI_SUCCESS);
6257 }
6258
6259 /*
6260 * delete all kstat
6261 */
6262 void
ql_fini_kstats(qlge_t * qlge)6263 ql_fini_kstats(qlge_t *qlge)
6264 {
6265 int i;
6266
6267 for (i = 0; i < QL_KSTAT_COUNT; i++) {
6268 if (qlge->ql_kstats[i] != NULL)
6269 kstat_delete(qlge->ql_kstats[i]);
6270 }
6271 }
6272
6273 /* ************************************************************************* */
6274 /*
6275 * kstat end
6276 */
6277 /* ************************************************************************* */
6278
6279 /*
6280 * Setup the parameters for receive and transmit rings including buffer sizes
6281 * and completion queue sizes
6282 */
6283 static int
ql_setup_rings(qlge_t * qlge)6284 ql_setup_rings(qlge_t *qlge)
6285 {
6286 uint8_t i;
6287 struct rx_ring *rx_ring;
6288 struct tx_ring *tx_ring;
6289 uint16_t lbq_buf_size;
6290
6291 lbq_buf_size = (uint16_t)
6292 ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
6293
6294 /*
6295 * rx_ring[0] is always the default queue.
6296 */
6297 /*
6298 * qlge->rx_ring_count:
6299 * Total number of rx_rings. This includes a number
6300 * of outbound completion handler rx_rings, and a
6301 * number of inbound completion handler rx_rings.
6302 * rss is only enabled if we have more than 1 rx completion
6303 * queue. If we have a single rx completion queue
6304 * then all rx completions go to this queue and
6305 * the last completion queue
6306 */
6307
6308 qlge->tx_ring_first_cq_id = qlge->rss_ring_count;
6309
6310 for (i = 0; i < qlge->tx_ring_count; i++) {
6311 tx_ring = &qlge->tx_ring[i];
6312 bzero((void *)tx_ring, sizeof (*tx_ring));
6313 tx_ring->qlge = qlge;
6314 tx_ring->wq_id = i;
6315 tx_ring->wq_len = qlge->tx_ring_size;
6316 tx_ring->wq_size = (uint32_t)(
6317 tx_ring->wq_len * sizeof (struct ob_mac_iocb_req));
6318
6319 /*
6320 * The completion queue ID for the tx rings start
6321 * immediately after the last rss completion queue.
6322 */
6323 tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id);
6324 }
6325
6326 for (i = 0; i < qlge->rx_ring_count; i++) {
6327 rx_ring = &qlge->rx_ring[i];
6328 bzero((void *)rx_ring, sizeof (*rx_ring));
6329 rx_ring->qlge = qlge;
6330 rx_ring->cq_id = i;
6331 if (i != 0)
6332 rx_ring->cpu = (i) % qlge->rx_ring_count;
6333 else
6334 rx_ring->cpu = 0;
6335
6336 if (i < qlge->rss_ring_count) {
6337 /*
6338 * Inbound completions (RSS) queues
6339 * Default queue is queue 0 which handles
6340 * unicast plus bcast/mcast and async events.
6341 * Other inbound queues handle unicast frames only.
6342 */
6343 rx_ring->cq_len = qlge->rx_ring_size;
6344 rx_ring->cq_size = (uint32_t)
6345 (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6346 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
6347 rx_ring->lbq_size = (uint32_t)
6348 (rx_ring->lbq_len * sizeof (uint64_t));
6349 rx_ring->lbq_buf_size = lbq_buf_size;
6350 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
6351 rx_ring->sbq_size = (uint32_t)
6352 (rx_ring->sbq_len * sizeof (uint64_t));
6353 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
6354 rx_ring->type = RX_Q;
6355
6356 QL_PRINT(DBG_GLD,
6357 ("%s(%d)Allocating rss completion queue %d "
6358 "on cpu %d\n", __func__, qlge->instance,
6359 rx_ring->cq_id, rx_ring->cpu));
6360 } else {
6361 /*
6362 * Outbound queue handles outbound completions only
6363 */
6364 /* outbound cq is same size as tx_ring it services. */
6365 QL_PRINT(DBG_INIT, ("rx_ring 0x%p i %d\n", rx_ring, i));
6366 rx_ring->cq_len = qlge->tx_ring_size;
6367 rx_ring->cq_size = (uint32_t)
6368 (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6369 rx_ring->lbq_len = 0;
6370 rx_ring->lbq_size = 0;
6371 rx_ring->lbq_buf_size = 0;
6372 rx_ring->sbq_len = 0;
6373 rx_ring->sbq_size = 0;
6374 rx_ring->sbq_buf_size = 0;
6375 rx_ring->type = TX_Q;
6376
6377 QL_PRINT(DBG_GLD,
6378 ("%s(%d)Allocating TX completion queue %d on"
6379 " cpu %d\n", __func__, qlge->instance,
6380 rx_ring->cq_id, rx_ring->cpu));
6381 }
6382 }
6383
6384 return (DDI_SUCCESS);
6385 }
6386
6387 static int
ql_start_rx_ring(qlge_t * qlge,struct rx_ring * rx_ring)6388 ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring)
6389 {
6390 struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr;
6391 void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6392 (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6393 /* first shadow area is used by wqicb's host copy of consumer index */
6394 + sizeof (uint64_t);
6395 uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6396 (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6397 + sizeof (uint64_t);
6398 /* lrg/sml bufq pointers */
6399 uint8_t *buf_q_base_reg =
6400 (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr +
6401 (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6402 uint64_t buf_q_base_reg_dma =
6403 qlge->buf_q_ptr_base_addr_dma_attr.dma_addr +
6404 (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6405 caddr_t doorbell_area =
6406 qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id));
6407 int err = 0;
6408 uint16_t bq_len;
6409 uint64_t tmp;
6410 uint64_t *base_indirect_ptr;
6411 int page_entries;
6412
6413 /* Set up the shadow registers for this ring. */
6414 rx_ring->prod_idx_sh_reg = shadow_reg;
6415 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
6416 rx_ring->prod_idx_sh_reg_offset = (off_t)(((rx_ring->cq_id *
6417 sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE) + sizeof (uint64_t)));
6418
6419 rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6420 rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma;
6421
6422 QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx,"
6423 " phys_addr 0x%lx\n", __func__, rx_ring->cq_id,
6424 rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma));
6425
6426 buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6427 buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6428 rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6429 rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma;
6430
6431 /* PCI doorbell mem area + 0x00 for consumer index register */
6432 rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6433 rx_ring->cnsmr_idx = 0;
6434 *rx_ring->prod_idx_sh_reg = 0;
6435 rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
6436
6437 /* PCI doorbell mem area + 0x04 for valid register */
6438 rx_ring->valid_db_reg = (uint32_t *)(void *)
6439 ((uint8_t *)(void *)doorbell_area + 0x04);
6440
6441 /* PCI doorbell mem area + 0x18 for large buffer consumer */
6442 rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *)
6443 ((uint8_t *)(void *)doorbell_area + 0x18);
6444
6445 /* PCI doorbell mem area + 0x1c */
6446 rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *)
6447 ((uint8_t *)(void *)doorbell_area + 0x1c);
6448
6449 bzero((void *)cqicb, sizeof (*cqicb));
6450
6451 cqicb->msix_vect = (uint8_t)rx_ring->irq;
6452
6453 bq_len = (uint16_t)((rx_ring->cq_len == 65536) ?
6454 (uint16_t)0 : (uint16_t)rx_ring->cq_len);
6455 cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
6456
6457 cqicb->cq_base_addr_lo =
6458 cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr));
6459 cqicb->cq_base_addr_hi =
6460 cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr));
6461
6462 cqicb->prod_idx_addr_lo =
6463 cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6464 cqicb->prod_idx_addr_hi =
6465 cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6466
6467 /*
6468 * Set up the control block load flags.
6469 */
6470 cqicb->flags = FLAGS_LC | /* Load queue base address */
6471 FLAGS_LV | /* Load MSI-X vector */
6472 FLAGS_LI; /* Load irq delay values */
6473 if (rx_ring->lbq_len) {
6474 /* Load lbq values */
6475 cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL);
6476 tmp = (uint64_t)rx_ring->lbq_dma.dma_addr;
6477 base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect;
6478 page_entries = 0;
6479 do {
6480 *base_indirect_ptr = cpu_to_le64(tmp);
6481 tmp += VM_PAGE_SIZE;
6482 base_indirect_ptr++;
6483 page_entries++;
6484 } while (page_entries < (int)(
6485 ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6486
6487 cqicb->lbq_addr_lo =
6488 cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma));
6489 cqicb->lbq_addr_hi =
6490 cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma));
6491 bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ?
6492 (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size);
6493 cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len);
6494 bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 :
6495 (uint16_t)rx_ring->lbq_len);
6496 cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len);
6497 rx_ring->lbq_prod_idx = 0;
6498 rx_ring->lbq_curr_idx = 0;
6499 }
6500 if (rx_ring->sbq_len) {
6501 /* Load sbq values */
6502 cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS);
6503 tmp = (uint64_t)rx_ring->sbq_dma.dma_addr;
6504 base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect;
6505 page_entries = 0;
6506
6507 do {
6508 *base_indirect_ptr = cpu_to_le64(tmp);
6509 tmp += VM_PAGE_SIZE;
6510 base_indirect_ptr++;
6511 page_entries++;
6512 } while (page_entries < (uint32_t)
6513 (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6514
6515 cqicb->sbq_addr_lo =
6516 cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma));
6517 cqicb->sbq_addr_hi =
6518 cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma));
6519 cqicb->sbq_buf_size = (uint16_t)
6520 cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2));
6521 bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ?
6522 (uint16_t)0 : (uint16_t)rx_ring->sbq_len);
6523 cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len);
6524 rx_ring->sbq_prod_idx = 0;
6525 rx_ring->sbq_curr_idx = 0;
6526 }
6527 switch (rx_ring->type) {
6528 case TX_Q:
6529 cqicb->irq_delay = (uint16_t)
6530 cpu_to_le16(qlge->tx_coalesce_usecs);
6531 cqicb->pkt_delay = (uint16_t)
6532 cpu_to_le16(qlge->tx_max_coalesced_frames);
6533 break;
6534
6535 case DEFAULT_Q:
6536 cqicb->irq_delay = (uint16_t)
6537 cpu_to_le16(qlge->rx_coalesce_usecs);
6538 cqicb->pkt_delay = (uint16_t)
6539 cpu_to_le16(qlge->rx_max_coalesced_frames);
6540 break;
6541
6542 case RX_Q:
6543 /*
6544 * Inbound completion handling rx_rings run in
6545 * separate NAPI contexts.
6546 */
6547 cqicb->irq_delay = (uint16_t)
6548 cpu_to_le16(qlge->rx_coalesce_usecs);
6549 cqicb->pkt_delay = (uint16_t)
6550 cpu_to_le16(qlge->rx_max_coalesced_frames);
6551 break;
6552 default:
6553 cmn_err(CE_WARN, "Invalid rx_ring->type = %d.",
6554 rx_ring->type);
6555 }
6556 QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n",
6557 rx_ring->cq_id));
6558 /* QL_DUMP_CQICB(qlge, cqicb); */
6559 err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr,
6560 rx_ring->cq_id);
6561 if (err) {
6562 cmn_err(CE_WARN, "Failed to load CQICB.");
6563 return (err);
6564 }
6565
6566 rx_ring->rx_packets_dropped_no_buffer = 0;
6567 rx_ring->rx_pkt_dropped_mac_unenabled = 0;
6568 rx_ring->rx_failed_sbq_allocs = 0;
6569 rx_ring->rx_failed_lbq_allocs = 0;
6570 rx_ring->rx_packets = 0;
6571 rx_ring->rx_bytes = 0;
6572 rx_ring->frame_too_long = 0;
6573 rx_ring->frame_too_short = 0;
6574 rx_ring->fcs_err = 0;
6575
6576 return (err);
6577 }
6578
6579 /*
6580 * start RSS
6581 */
6582 static int
ql_start_rss(qlge_t * qlge)6583 ql_start_rss(qlge_t *qlge)
6584 {
6585 struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr;
6586 int status = 0;
6587 int i;
6588 uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id;
6589
6590 bzero((void *)ricb, sizeof (*ricb));
6591
6592 ricb->base_cq = RSS_L4K;
6593 ricb->flags =
6594 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
6595 RSS_RT6);
6596 ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1);
6597
6598 /*
6599 * Fill out the Indirection Table.
6600 */
6601 for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
6602 hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1));
6603
6604 (void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40);
6605 (void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16);
6606
6607 QL_PRINT(DBG_INIT, ("Initializing RSS.\n"));
6608
6609 status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0);
6610 if (status) {
6611 cmn_err(CE_WARN, "Failed to load RICB.");
6612 return (status);
6613 }
6614
6615 return (status);
6616 }
6617
6618 /*
6619 * load a tx ring control block to hw and start this ring
6620 */
6621 static int
ql_start_tx_ring(qlge_t * qlge,struct tx_ring * tx_ring)6622 ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring)
6623 {
6624 struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr;
6625 caddr_t doorbell_area =
6626 qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id);
6627 void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6628 (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6629 uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6630 (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6631 int err = 0;
6632
6633 /*
6634 * Assign doorbell registers for this tx_ring.
6635 */
6636
6637 /* TX PCI doorbell mem area for tx producer index */
6638 tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6639 tx_ring->prod_idx = 0;
6640 /* TX PCI doorbell mem area + 0x04 */
6641 tx_ring->valid_db_reg = (uint32_t *)(void *)
6642 ((uint8_t *)(void *)doorbell_area + 0x04);
6643
6644 /*
6645 * Assign shadow registers for this tx_ring.
6646 */
6647 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
6648 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
6649 *tx_ring->cnsmr_idx_sh_reg = 0;
6650
6651 QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx,"
6652 " phys_addr 0x%lx\n",
6653 __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg,
6654 tx_ring->cnsmr_idx_sh_reg_dma));
6655
6656 wqicb->len =
6657 (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
6658 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
6659 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
6660 wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id);
6661 wqicb->rid = 0;
6662 wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr));
6663 wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr));
6664 wqicb->cnsmr_idx_addr_lo =
6665 cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6666 wqicb->cnsmr_idx_addr_hi =
6667 cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6668
6669 ql_init_tx_ring(tx_ring);
6670 /* QL_DUMP_WQICB(qlge, wqicb); */
6671 err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr,
6672 tx_ring->wq_id);
6673
6674 if (err) {
6675 cmn_err(CE_WARN, "Failed to load WQICB.");
6676 return (err);
6677 }
6678 return (err);
6679 }
6680
6681 /*
6682 * Set up a MAC, multicast or VLAN address for the
6683 * inbound frame matching.
6684 */
6685 int
ql_set_mac_addr_reg(qlge_t * qlge,uint8_t * addr,uint32_t type,uint16_t index)6686 ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type,
6687 uint16_t index)
6688 {
6689 uint32_t offset = 0;
6690 int status = DDI_SUCCESS;
6691
6692 switch (type) {
6693 case MAC_ADDR_TYPE_MULTI_MAC:
6694 case MAC_ADDR_TYPE_CAM_MAC: {
6695 uint32_t cam_output;
6696 uint32_t upper = (addr[0] << 8) | addr[1];
6697 uint32_t lower =
6698 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
6699 (addr[5]);
6700
6701 QL_PRINT(DBG_INIT, ("Adding %s ", (type ==
6702 MAC_ADDR_TYPE_MULTI_MAC) ?
6703 "MULTICAST" : "UNICAST"));
6704 QL_PRINT(DBG_INIT,
6705 ("addr %02x %02x %02x %02x %02x %02x at index %d in "
6706 "the CAM.\n",
6707 addr[0], addr[1], addr[2], addr[3], addr[4],
6708 addr[5], index));
6709
6710 status = ql_wait_reg_rdy(qlge,
6711 REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6712 if (status)
6713 goto exit;
6714 /* offset 0 - lower 32 bits of the MAC address */
6715 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6716 (offset++) |
6717 (index << MAC_ADDR_IDX_SHIFT) | /* index */
6718 type); /* type */
6719 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower);
6720 status = ql_wait_reg_rdy(qlge,
6721 REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6722 if (status)
6723 goto exit;
6724 /* offset 1 - upper 16 bits of the MAC address */
6725 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6726 (offset++) |
6727 (index << MAC_ADDR_IDX_SHIFT) | /* index */
6728 type); /* type */
6729 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper);
6730 status = ql_wait_reg_rdy(qlge,
6731 REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6732 if (status)
6733 goto exit;
6734 /* offset 2 - CQ ID associated with this MAC address */
6735 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6736 (offset) | (index << MAC_ADDR_IDX_SHIFT) | /* index */
6737 type); /* type */
6738 /*
6739 * This field should also include the queue id
6740 * and possibly the function id. Right now we hardcode
6741 * the route field to NIC core.
6742 */
6743 if (type == MAC_ADDR_TYPE_CAM_MAC) {
6744 cam_output = (CAM_OUT_ROUTE_NIC |
6745 (qlge->func_number << CAM_OUT_FUNC_SHIFT) |
6746 (0 <<
6747 CAM_OUT_CQ_ID_SHIFT));
6748
6749 /* route to NIC core */
6750 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA,
6751 cam_output);
6752 }
6753 break;
6754 }
6755 default:
6756 cmn_err(CE_WARN,
6757 "Address type %d not yet supported.", type);
6758 status = DDI_FAILURE;
6759 }
6760 exit:
6761 return (status);
6762 }
6763
6764 /*
6765 * The NIC function for this chip has 16 routing indexes. Each one can be used
6766 * to route different frame types to various inbound queues. We send broadcast
6767 * multicast/error frames to the default queue for slow handling,
6768 * and CAM hit/RSS frames to the fast handling queues.
6769 */
6770 static int
ql_set_routing_reg(qlge_t * qlge,uint32_t index,uint32_t mask,int enable)6771 ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable)
6772 {
6773 int status;
6774 uint32_t value = 0;
6775
6776 QL_PRINT(DBG_INIT,
6777 ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
6778 (enable ? "Adding" : "Removing"),
6779 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
6780 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
6781 ((index ==
6782 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
6783 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
6784 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
6785 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
6786 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
6787 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
6788 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
6789 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
6790 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
6791 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
6792 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
6793 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
6794 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
6795 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
6796 (enable ? "to" : "from")));
6797
6798 switch (mask) {
6799 case RT_IDX_CAM_HIT:
6800 value = RT_IDX_DST_CAM_Q | /* dest */
6801 RT_IDX_TYPE_NICQ | /* type */
6802 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */
6803 break;
6804
6805 case RT_IDX_VALID: /* Promiscuous Mode frames. */
6806 value = RT_IDX_DST_DFLT_Q | /* dest */
6807 RT_IDX_TYPE_NICQ | /* type */
6808 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */
6809 break;
6810
6811 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
6812 value = RT_IDX_DST_DFLT_Q | /* dest */
6813 RT_IDX_TYPE_NICQ | /* type */
6814 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */
6815 break;
6816
6817 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
6818 value = RT_IDX_DST_DFLT_Q | /* dest */
6819 RT_IDX_TYPE_NICQ | /* type */
6820 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */
6821 break;
6822
6823 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
6824 value = RT_IDX_DST_CAM_Q | /* dest */
6825 RT_IDX_TYPE_NICQ | /* type */
6826 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */
6827 break;
6828
6829 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
6830 value = RT_IDX_DST_CAM_Q | /* dest */
6831 RT_IDX_TYPE_NICQ | /* type */
6832 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6833 break;
6834
6835 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
6836 value = RT_IDX_DST_RSS | /* dest */
6837 RT_IDX_TYPE_NICQ | /* type */
6838 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6839 break;
6840
6841 case 0: /* Clear the E-bit on an entry. */
6842 value = RT_IDX_DST_DFLT_Q | /* dest */
6843 RT_IDX_TYPE_NICQ | /* type */
6844 (index << RT_IDX_IDX_SHIFT); /* index */
6845 break;
6846
6847 default:
6848 cmn_err(CE_WARN, "Mask type %d not yet supported.",
6849 mask);
6850 status = -EPERM;
6851 goto exit;
6852 }
6853
6854 if (value != 0) {
6855 status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0);
6856 if (status)
6857 goto exit;
6858 value |= (enable ? RT_IDX_E : 0);
6859 ql_write_reg(qlge, REG_ROUTING_INDEX, value);
6860 ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0);
6861 }
6862
6863 exit:
6864 return (status);
6865 }
6866
6867 /*
6868 * Clear all the entries in the routing table.
6869 * Caller must get semaphore in advance.
6870 */
6871
6872 static int
ql_stop_routing(qlge_t * qlge)6873 ql_stop_routing(qlge_t *qlge)
6874 {
6875 int status = 0;
6876 int i;
6877 /* Clear all the entries in the routing table. */
6878 for (i = 0; i < 16; i++) {
6879 status = ql_set_routing_reg(qlge, i, 0, 0);
6880 if (status) {
6881 cmn_err(CE_WARN, "Stop routing failed. ");
6882 }
6883 }
6884 return (status);
6885 }
6886
6887 /* Initialize the frame-to-queue routing. */
6888 int
ql_route_initialize(qlge_t * qlge)6889 ql_route_initialize(qlge_t *qlge)
6890 {
6891 int status = 0;
6892
6893 status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
6894 if (status != DDI_SUCCESS)
6895 return (status);
6896
6897 /* Clear all the entries in the routing table. */
6898 status = ql_stop_routing(qlge);
6899 if (status) {
6900 goto exit;
6901 }
6902 status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
6903 if (status) {
6904 cmn_err(CE_WARN,
6905 "Failed to init routing register for broadcast packets.");
6906 goto exit;
6907 }
6908 /*
6909 * If we have more than one inbound queue, then turn on RSS in the
6910 * routing block.
6911 */
6912 if (qlge->rss_ring_count > 1) {
6913 status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT,
6914 RT_IDX_RSS_MATCH, 1);
6915 if (status) {
6916 cmn_err(CE_WARN,
6917 "Failed to init routing register for MATCH RSS "
6918 "packets.");
6919 goto exit;
6920 }
6921 }
6922
6923 status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT,
6924 RT_IDX_CAM_HIT, 1);
6925 if (status) {
6926 cmn_err(CE_WARN,
6927 "Failed to init routing register for CAM packets.");
6928 goto exit;
6929 }
6930
6931 status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT,
6932 RT_IDX_MCAST_MATCH, 1);
6933 if (status) {
6934 cmn_err(CE_WARN,
6935 "Failed to init routing register for Multicast "
6936 "packets.");
6937 }
6938
6939 exit:
6940 ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
6941 return (status);
6942 }
6943
6944 /*
6945 * Initialize hardware
6946 */
6947 static int
ql_device_initialize(qlge_t * qlge)6948 ql_device_initialize(qlge_t *qlge)
6949 {
6950 uint32_t value, mask;
6951 int i;
6952 int status = 0;
6953 uint16_t pause = PAUSE_MODE_DISABLED;
6954 boolean_t update_port_config = B_FALSE;
6955 uint32_t pause_bit_mask;
6956 boolean_t dcbx_enable = B_FALSE;
6957 uint32_t dcbx_bit_mask = 0x10;
6958 /*
6959 * Set up the System register to halt on errors.
6960 */
6961 value = SYS_EFE | SYS_FAE;
6962 mask = value << 16;
6963 ql_write_reg(qlge, REG_SYSTEM, mask | value);
6964
6965 /* Set the default queue. */
6966 value = NIC_RCV_CFG_DFQ;
6967 mask = NIC_RCV_CFG_DFQ_MASK;
6968
6969 ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value);
6970
6971 /* Enable the MPI interrupt. */
6972 ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16)
6973 | INTR_MASK_PI);
6974 /* Enable the function, set pagesize, enable error checking. */
6975 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
6976 FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
6977 /* Set/clear header splitting. */
6978 if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) {
6979 value |= FSC_SH;
6980 ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE);
6981 }
6982 mask = FSC_VM_PAGESIZE_MASK |
6983 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
6984 ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value);
6985 /*
6986 * check current port max frame size, if different from OS setting,
6987 * then we need to change
6988 */
6989 qlge->max_frame_size =
6990 (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
6991
6992 mutex_enter(&qlge->mbx_mutex);
6993 status = ql_get_port_cfg(qlge);
6994 mutex_exit(&qlge->mbx_mutex);
6995
6996 if (status == DDI_SUCCESS) {
6997 /* if current frame size is smaller than required size */
6998 if (qlge->port_cfg_info.max_frame_size <
6999 qlge->max_frame_size) {
7000 QL_PRINT(DBG_MBX,
7001 ("update frame size, current %d, new %d\n",
7002 qlge->port_cfg_info.max_frame_size,
7003 qlge->max_frame_size));
7004 qlge->port_cfg_info.max_frame_size =
7005 qlge->max_frame_size;
7006 qlge->port_cfg_info.link_cfg |= ENABLE_JUMBO;
7007 update_port_config = B_TRUE;
7008 }
7009
7010 if (qlge->port_cfg_info.link_cfg & STD_PAUSE)
7011 pause = PAUSE_MODE_STANDARD;
7012 else if (qlge->port_cfg_info.link_cfg & PP_PAUSE)
7013 pause = PAUSE_MODE_PER_PRIORITY;
7014
7015 if (pause != qlge->pause) {
7016 pause_bit_mask = 0x60; /* bit 5-6 */
7017 /* clear pause bits */
7018 qlge->port_cfg_info.link_cfg &= ~pause_bit_mask;
7019 if (qlge->pause == PAUSE_MODE_STANDARD)
7020 qlge->port_cfg_info.link_cfg |= STD_PAUSE;
7021 else if (qlge->pause == PAUSE_MODE_PER_PRIORITY)
7022 qlge->port_cfg_info.link_cfg |= PP_PAUSE;
7023 update_port_config = B_TRUE;
7024 }
7025
7026 if (qlge->port_cfg_info.link_cfg & DCBX_ENABLE)
7027 dcbx_enable = B_TRUE;
7028 if (dcbx_enable != qlge->dcbx_enable) {
7029 qlge->port_cfg_info.link_cfg &= ~dcbx_bit_mask;
7030 if (qlge->dcbx_enable)
7031 qlge->port_cfg_info.link_cfg |= DCBX_ENABLE;
7032 }
7033
7034 update_port_config = B_TRUE;
7035
7036 /* if need to update port configuration */
7037 if (update_port_config) {
7038 mutex_enter(&qlge->mbx_mutex);
7039 (void) ql_set_mpi_port_config(qlge,
7040 qlge->port_cfg_info);
7041 mutex_exit(&qlge->mbx_mutex);
7042 }
7043 } else
7044 cmn_err(CE_WARN, "ql_get_port_cfg failed");
7045
7046 /* Start up the rx queues. */
7047 for (i = 0; i < qlge->rx_ring_count; i++) {
7048 status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]);
7049 if (status) {
7050 cmn_err(CE_WARN,
7051 "Failed to start rx ring[%d]", i);
7052 return (status);
7053 }
7054 }
7055
7056 /*
7057 * If there is more than one inbound completion queue
7058 * then download a RICB to configure RSS.
7059 */
7060 if (qlge->rss_ring_count > 1) {
7061 status = ql_start_rss(qlge);
7062 if (status) {
7063 cmn_err(CE_WARN, "Failed to start RSS.");
7064 return (status);
7065 }
7066 }
7067
7068 /* Start up the tx queues. */
7069 for (i = 0; i < qlge->tx_ring_count; i++) {
7070 status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]);
7071 if (status) {
7072 cmn_err(CE_WARN,
7073 "Failed to start tx ring[%d]", i);
7074 return (status);
7075 }
7076 }
7077 qlge->selected_tx_ring = 0;
7078 /* Set the frame routing filter. */
7079 status = ql_route_initialize(qlge);
7080 if (status) {
7081 cmn_err(CE_WARN,
7082 "Failed to init CAM/Routing tables.");
7083 return (status);
7084 }
7085
7086 return (status);
7087 }
7088 /*
7089 * Issue soft reset to chip.
7090 */
7091 static int
ql_asic_reset(qlge_t * qlge)7092 ql_asic_reset(qlge_t *qlge)
7093 {
7094 int status = DDI_SUCCESS;
7095
7096 ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK
7097 |FUNCTION_RESET);
7098
7099 if (ql_wait_reg_bit(qlge, REG_RESET_FAILOVER, FUNCTION_RESET,
7100 BIT_RESET, 0) != DDI_SUCCESS) {
7101 cmn_err(CE_WARN,
7102 "TIMEOUT!!! errored out of resetting the chip!");
7103 status = DDI_FAILURE;
7104 }
7105
7106 return (status);
7107 }
7108
7109 /*
7110 * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
7111 * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7112 * to be used by hardware.
7113 */
7114 static void
ql_arm_sbuf(qlge_t * qlge,struct rx_ring * rx_ring)7115 ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7116 {
7117 struct bq_desc *sbq_desc;
7118 int i;
7119 uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr;
7120 uint32_t arm_count;
7121
7122 if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT)
7123 arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT);
7124 else {
7125 /* Adjust to a multiple of 16 */
7126 arm_count = (rx_ring->sbuf_free_count / 16) * 16;
7127 #ifdef QLGE_LOAD_UNLOAD
7128 cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count);
7129 #endif
7130 }
7131 for (i = 0; i < arm_count; i++) {
7132 sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
7133 if (sbq_desc == NULL)
7134 break;
7135 /* Arm asic */
7136 *sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
7137 sbq_entry++;
7138
7139 /* link the descriptors to in_use_list */
7140 ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
7141 rx_ring->sbq_prod_idx++;
7142 }
7143 ql_update_sbq_prod_idx(qlge, rx_ring);
7144 }
7145
7146 /*
7147 * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
7148 * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7149 * to be used by hardware.
7150 */
7151 static void
ql_arm_lbuf(qlge_t * qlge,struct rx_ring * rx_ring)7152 ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7153 {
7154 struct bq_desc *lbq_desc;
7155 int i;
7156 uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr;
7157 uint32_t arm_count;
7158
7159 if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT)
7160 arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT);
7161 else {
7162 /* Adjust to a multiple of 16 */
7163 arm_count = (rx_ring->lbuf_free_count / 16) * 16;
7164 #ifdef QLGE_LOAD_UNLOAD
7165 cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count);
7166 #endif
7167 }
7168 for (i = 0; i < arm_count; i++) {
7169 lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
7170 if (lbq_desc == NULL)
7171 break;
7172 /* Arm asic */
7173 *lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
7174 lbq_entry++;
7175
7176 /* link the descriptors to in_use_list */
7177 ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
7178 rx_ring->lbq_prod_idx++;
7179 }
7180 ql_update_lbq_prod_idx(qlge, rx_ring);
7181 }
7182
7183
7184 /*
7185 * Initializes the adapter by configuring request and response queues,
7186 * allocates and ARMs small and large receive buffers to the
7187 * hardware
7188 */
7189 static int
ql_bringup_adapter(qlge_t * qlge)7190 ql_bringup_adapter(qlge_t *qlge)
7191 {
7192 int i;
7193
7194 if (ql_device_initialize(qlge) != DDI_SUCCESS) {
7195 cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed",
7196 __func__, qlge->instance);
7197 goto err_bringup;
7198 }
7199 qlge->sequence |= INIT_ADAPTER_UP;
7200
7201 #ifdef QLGE_TRACK_BUFFER_USAGE
7202 for (i = 0; i < qlge->rx_ring_count; i++) {
7203 if (qlge->rx_ring[i].type != TX_Q) {
7204 qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
7205 qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
7206 }
7207 qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
7208 }
7209 #endif
7210 /* Arm buffers */
7211 for (i = 0; i < qlge->rx_ring_count; i++) {
7212 if (qlge->rx_ring[i].type != TX_Q) {
7213 ql_arm_sbuf(qlge, &qlge->rx_ring[i]);
7214 ql_arm_lbuf(qlge, &qlge->rx_ring[i]);
7215 }
7216 }
7217
7218 /* Enable work/request queues */
7219 for (i = 0; i < qlge->tx_ring_count; i++) {
7220 if (qlge->tx_ring[i].valid_db_reg)
7221 ql_write_doorbell_reg(qlge,
7222 qlge->tx_ring[i].valid_db_reg,
7223 REQ_Q_VALID);
7224 }
7225
7226 /* Enable completion queues */
7227 for (i = 0; i < qlge->rx_ring_count; i++) {
7228 if (qlge->rx_ring[i].valid_db_reg)
7229 ql_write_doorbell_reg(qlge,
7230 qlge->rx_ring[i].valid_db_reg,
7231 RSP_Q_VALID);
7232 }
7233
7234 for (i = 0; i < qlge->tx_ring_count; i++) {
7235 mutex_enter(&qlge->tx_ring[i].tx_lock);
7236 qlge->tx_ring[i].mac_flags = QL_MAC_STARTED;
7237 mutex_exit(&qlge->tx_ring[i].tx_lock);
7238 }
7239
7240 for (i = 0; i < qlge->rx_ring_count; i++) {
7241 mutex_enter(&qlge->rx_ring[i].rx_lock);
7242 qlge->rx_ring[i].mac_flags = QL_MAC_STARTED;
7243 mutex_exit(&qlge->rx_ring[i].rx_lock);
7244 }
7245
7246 /* This mutex will get re-acquired in enable_completion interrupt */
7247 mutex_exit(&qlge->hw_mutex);
7248 /* Traffic can start flowing now */
7249 ql_enable_all_completion_interrupts(qlge);
7250 mutex_enter(&qlge->hw_mutex);
7251
7252 ql_enable_global_interrupt(qlge);
7253
7254 qlge->sequence |= ADAPTER_INIT;
7255 return (DDI_SUCCESS);
7256
7257 err_bringup:
7258 (void) ql_asic_reset(qlge);
7259 return (DDI_FAILURE);
7260 }
7261
7262 /*
7263 * Initialize mutexes of each rx/tx rings
7264 */
7265 static int
ql_init_rx_tx_locks(qlge_t * qlge)7266 ql_init_rx_tx_locks(qlge_t *qlge)
7267 {
7268 struct tx_ring *tx_ring;
7269 struct rx_ring *rx_ring;
7270 int i;
7271
7272 for (i = 0; i < qlge->tx_ring_count; i++) {
7273 tx_ring = &qlge->tx_ring[i];
7274 mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER,
7275 DDI_INTR_PRI(qlge->intr_pri));
7276 }
7277
7278 for (i = 0; i < qlge->rx_ring_count; i++) {
7279 rx_ring = &qlge->rx_ring[i];
7280 mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER,
7281 DDI_INTR_PRI(qlge->intr_pri));
7282 mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER,
7283 DDI_INTR_PRI(qlge->intr_pri));
7284 mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER,
7285 DDI_INTR_PRI(qlge->intr_pri));
7286 }
7287
7288 return (DDI_SUCCESS);
7289 }
7290
7291 /*ARGSUSED*/
7292 /*
7293 * Simply call pci_ereport_post which generates ereports for errors
7294 * that occur in the PCI local bus configuration status registers.
7295 */
7296 static int
ql_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)7297 ql_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7298 {
7299 pci_ereport_post(dip, err, NULL);
7300 return (err->fme_status);
7301 }
7302
7303 static void
ql_fm_init(qlge_t * qlge)7304 ql_fm_init(qlge_t *qlge)
7305 {
7306 ddi_iblock_cookie_t iblk;
7307
7308 QL_PRINT(DBG_INIT, ("ql_fm_init(%d) entered, FMA capability %x\n",
7309 qlge->instance, qlge->fm_capabilities));
7310 /*
7311 * Register capabilities with IO Fault Services. The capabilities
7312 * set above may not be supported by the parent nexus, in that case
7313 * some capability bits may be cleared.
7314 */
7315 if (qlge->fm_capabilities)
7316 ddi_fm_init(qlge->dip, &qlge->fm_capabilities, &iblk);
7317
7318 /*
7319 * Initialize pci ereport capabilities if ereport capable
7320 */
7321 if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7322 DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7323 pci_ereport_setup(qlge->dip);
7324 }
7325
7326 /* Register error callback if error callback capable */
7327 if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7328 ddi_fm_handler_register(qlge->dip,
7329 ql_fm_error_cb, (void*) qlge);
7330 }
7331
7332 /*
7333 * DDI_FLGERR_ACC indicates:
7334 * Driver will check its access handle(s) for faults on
7335 * a regular basis by calling ddi_fm_acc_err_get
7336 * Driver is able to cope with incorrect results of I/O
7337 * operations resulted from an I/O fault
7338 */
7339 if (DDI_FM_ACC_ERR_CAP(qlge->fm_capabilities)) {
7340 ql_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7341 }
7342
7343 /*
7344 * DDI_DMA_FLAGERR indicates:
7345 * Driver will check its DMA handle(s) for faults on a
7346 * regular basis using ddi_fm_dma_err_get
7347 * Driver is able to cope with incorrect results of DMA
7348 * operations resulted from an I/O fault
7349 */
7350 if (DDI_FM_DMA_ERR_CAP(qlge->fm_capabilities)) {
7351 tx_mapping_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7352 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7353 }
7354 QL_PRINT(DBG_INIT, ("ql_fm_init(%d) done\n",
7355 qlge->instance));
7356 }
7357
7358 static void
ql_fm_fini(qlge_t * qlge)7359 ql_fm_fini(qlge_t *qlge)
7360 {
7361 QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) entered\n",
7362 qlge->instance));
7363 /* Only unregister FMA capabilities if we registered some */
7364 if (qlge->fm_capabilities) {
7365
7366 /*
7367 * Release any resources allocated by pci_ereport_setup()
7368 */
7369 if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7370 DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7371 pci_ereport_teardown(qlge->dip);
7372
7373 /*
7374 * Un-register error callback if error callback capable
7375 */
7376 if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7377 ddi_fm_handler_unregister(qlge->dip);
7378
7379 /* Unregister from IO Fault Services */
7380 ddi_fm_fini(qlge->dip);
7381 }
7382 QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) done\n",
7383 qlge->instance));
7384 }
7385 /*
7386 * ql_attach - Driver attach.
7387 */
7388 static int
ql_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)7389 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
7390 {
7391 int instance;
7392 qlge_t *qlge = NULL;
7393 int rval;
7394 uint16_t w;
7395 mac_register_t *macp = NULL;
7396 uint32_t data;
7397
7398 rval = DDI_FAILURE;
7399
7400 /* first get the instance */
7401 instance = ddi_get_instance(dip);
7402
7403 switch (cmd) {
7404 case DDI_ATTACH:
7405 /*
7406 * Allocate our per-device-instance structure
7407 */
7408 qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP);
7409 ASSERT(qlge != NULL);
7410 qlge->sequence |= INIT_SOFTSTATE_ALLOC;
7411
7412 qlge->dip = dip;
7413 qlge->instance = instance;
7414 /* Set up the coalescing parameters. */
7415 qlge->ql_dbgprnt = 0;
7416 #if QL_DEBUG
7417 qlge->ql_dbgprnt = QL_DEBUG;
7418 #endif /* QL_DEBUG */
7419
7420 /*
7421 * Initialize for fma support
7422 */
7423 /* fault management (fm) capabilities. */
7424 qlge->fm_capabilities =
7425 DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE;
7426 data = ql_get_prop(qlge, "fm-capable");
7427 if (data <= 0xf) {
7428 qlge->fm_capabilities = data;
7429 }
7430 ql_fm_init(qlge);
7431 qlge->sequence |= INIT_FM;
7432 QL_PRINT(DBG_INIT, ("ql_attach(%d): fma init done\n",
7433 qlge->instance));
7434
7435 /*
7436 * Setup the ISP8x00 registers address mapping to be
7437 * accessed by this particular driver.
7438 * 0x0 Configuration Space
7439 * 0x1 I/O Space
7440 * 0x2 1st Memory Space address - Control Register Set
7441 * 0x3 2nd Memory Space address - Doorbell Memory Space
7442 */
7443 w = 2;
7444 if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0,
7445 sizeof (dev_reg_t), &ql_dev_acc_attr,
7446 &qlge->dev_handle) != DDI_SUCCESS) {
7447 cmn_err(CE_WARN, "%s(%d): Unable to map device "
7448 "registers", ADAPTER_NAME, instance);
7449 break;
7450 }
7451 QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n",
7452 qlge->iobase));
7453 qlge->sequence |= INIT_REGS_SETUP;
7454
7455 /* map Doorbell memory space */
7456 w = 3;
7457 if (ddi_regs_map_setup(dip, w,
7458 (caddr_t *)&qlge->doorbell_reg_iobase, 0,
7459 0x100000 /* sizeof (dev_doorbell_reg_t) */,
7460 &ql_dev_acc_attr,
7461 &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) {
7462 cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell "
7463 "registers",
7464 ADAPTER_NAME, instance);
7465 break;
7466 }
7467 QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n",
7468 qlge->doorbell_reg_iobase));
7469 qlge->sequence |= INIT_DOORBELL_REGS_SETUP;
7470
7471 /*
7472 * Allocate a macinfo structure for this instance
7473 */
7474 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
7475 cmn_err(CE_WARN, "%s(%d): mac_alloc failed",
7476 __func__, instance);
7477 break;
7478 }
7479 /* save adapter status to dip private data */
7480 ddi_set_driver_private(dip, qlge);
7481 QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n",
7482 ADAPTER_NAME, instance));
7483 qlge->sequence |= INIT_MAC_ALLOC;
7484
7485 /*
7486 * Attach this instance of the device
7487 */
7488 /* Setup PCI Local Bus Configuration resource. */
7489 if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) {
7490 cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources",
7491 ADAPTER_NAME, instance);
7492 if (qlge->fm_enable) {
7493 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7494 ddi_fm_service_impact(qlge->dip,
7495 DDI_SERVICE_LOST);
7496 }
7497 break;
7498 }
7499 qlge->sequence |= INIT_PCI_CONFIG_SETUP;
7500 QL_PRINT(DBG_GLD, ("ql_attach(%d): pci_config_setup done\n",
7501 instance));
7502
7503 if (ql_init_instance(qlge) != DDI_SUCCESS) {
7504 cmn_err(CE_WARN, "%s(%d): Unable to initialize device "
7505 "instance", ADAPTER_NAME, instance);
7506 if (qlge->fm_enable) {
7507 ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7508 ddi_fm_service_impact(qlge->dip,
7509 DDI_SERVICE_LOST);
7510 }
7511 break;
7512 }
7513 QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_init_instance done\n",
7514 instance));
7515
7516 /* Setup interrupt vectors */
7517 if (ql_alloc_irqs(qlge) != DDI_SUCCESS) {
7518 break;
7519 }
7520 qlge->sequence |= INIT_INTR_ALLOC;
7521 QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_alloc_irqs done\n",
7522 instance));
7523
7524 /* Configure queues */
7525 if (ql_setup_rings(qlge) != DDI_SUCCESS) {
7526 break;
7527 }
7528 qlge->sequence |= INIT_SETUP_RINGS;
7529 QL_PRINT(DBG_GLD, ("ql_attach(%d): setup rings done\n",
7530 instance));
7531
7532 /*
7533 * Allocate memory resources
7534 */
7535 if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) {
7536 cmn_err(CE_WARN, "%s(%d): memory allocation failed",
7537 __func__, qlge->instance);
7538 break;
7539 }
7540 qlge->sequence |= INIT_MEMORY_ALLOC;
7541 QL_PRINT(DBG_GLD, ("ql_alloc_mem_resources(%d) done\n",
7542 instance));
7543
7544 /*
7545 * Map queues to interrupt vectors
7546 */
7547 ql_resolve_queues_to_irqs(qlge);
7548
7549 /* Initialize mutex, need the interrupt priority */
7550 (void) ql_init_rx_tx_locks(qlge);
7551 qlge->sequence |= INIT_LOCKS_CREATED;
7552 QL_PRINT(DBG_INIT, ("%s(%d): ql_init_rx_tx_locks done\n",
7553 ADAPTER_NAME, instance));
7554
7555 /*
7556 * Use a soft interrupt to do something that we do not want
7557 * to do in regular network functions or with mutexs being held
7558 */
7559 if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl,
7560 DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge)
7561 != DDI_SUCCESS) {
7562 break;
7563 }
7564
7565 if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl,
7566 DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge)
7567 != DDI_SUCCESS) {
7568 break;
7569 }
7570
7571 if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl,
7572 DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge)
7573 != DDI_SUCCESS) {
7574 break;
7575 }
7576 qlge->sequence |= INIT_ADD_SOFT_INTERRUPT;
7577 QL_PRINT(DBG_INIT, ("%s(%d): ddi_intr_add_softint done\n",
7578 ADAPTER_NAME, instance));
7579
7580 /*
7581 * mutex to protect the adapter state structure.
7582 * initialize mutexes according to the interrupt priority
7583 */
7584 mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER,
7585 DDI_INTR_PRI(qlge->intr_pri));
7586 mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER,
7587 DDI_INTR_PRI(qlge->intr_pri));
7588 mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER,
7589 DDI_INTR_PRI(qlge->intr_pri));
7590
7591 /* Mailbox wait and interrupt conditional variable. */
7592 cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL);
7593 qlge->sequence |= INIT_MUTEX;
7594 QL_PRINT(DBG_INIT, ("%s(%d): mutex_init done\n",
7595 ADAPTER_NAME, instance));
7596
7597 /*
7598 * KStats
7599 */
7600 if (ql_init_kstats(qlge) != DDI_SUCCESS) {
7601 cmn_err(CE_WARN, "%s(%d): KState initialization failed",
7602 ADAPTER_NAME, instance);
7603 break;
7604 }
7605 qlge->sequence |= INIT_KSTATS;
7606 QL_PRINT(DBG_INIT, ("%s(%d): ql_init_kstats done\n",
7607 ADAPTER_NAME, instance));
7608
7609 /*
7610 * Initialize gld macinfo structure
7611 */
7612 ql_gld3_init(qlge, macp);
7613 /*
7614 * Add interrupt handlers
7615 */
7616 if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) {
7617 cmn_err(CE_WARN, "Failed to add interrupt "
7618 "handlers");
7619 break;
7620 }
7621 qlge->sequence |= INIT_ADD_INTERRUPT;
7622 QL_PRINT(DBG_INIT, ("%s(%d): Add interrupt handler done\n",
7623 ADAPTER_NAME, instance));
7624
7625 /*
7626 * MAC Register
7627 */
7628 if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) {
7629 cmn_err(CE_WARN, "%s(%d): mac_register failed",
7630 __func__, instance);
7631 break;
7632 }
7633 qlge->sequence |= INIT_MAC_REGISTERED;
7634 QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n",
7635 ADAPTER_NAME, instance));
7636
7637 mac_free(macp);
7638 macp = NULL;
7639
7640 qlge->mac_flags = QL_MAC_ATTACHED;
7641
7642 ddi_report_dev(dip);
7643
7644 rval = DDI_SUCCESS;
7645
7646 break;
7647 /*
7648 * DDI_RESUME
7649 * When called with cmd set to DDI_RESUME, attach() must
7650 * restore the hardware state of a device (power may have been
7651 * removed from the device), allow pending requests to con-
7652 * tinue, and service new requests. In this case, the driver
7653 * must not make any assumptions about the state of the
7654 * hardware, but must restore the state of the device except
7655 * for the power level of components.
7656 *
7657 */
7658 case DDI_RESUME:
7659
7660 if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL)
7661 return (DDI_FAILURE);
7662
7663 QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n",
7664 __func__, qlge->instance));
7665
7666 mutex_enter(&qlge->gen_mutex);
7667 rval = ql_do_start(qlge);
7668 mutex_exit(&qlge->gen_mutex);
7669 break;
7670
7671 default:
7672 break;
7673 }
7674
7675 /* if failed to attach */
7676 if ((cmd == DDI_ATTACH) && (rval != DDI_SUCCESS) && (qlge != NULL)) {
7677 cmn_err(CE_WARN, "qlge driver attach failed, sequence %x",
7678 qlge->sequence);
7679 ql_free_resources(qlge);
7680 }
7681
7682 return (rval);
7683 }
7684
7685 /*
7686 * Unbind all pending tx dma handles during driver bring down
7687 */
7688 static void
ql_unbind_pending_tx_dma_handle(struct tx_ring * tx_ring)7689 ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring)
7690 {
7691 struct tx_ring_desc *tx_ring_desc;
7692 int i, j;
7693
7694 if (tx_ring->wq_desc) {
7695 tx_ring_desc = tx_ring->wq_desc;
7696 for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
7697 for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
7698 if (tx_ring_desc->tx_dma_handle[j]) {
7699 (void) ddi_dma_unbind_handle(
7700 tx_ring_desc->tx_dma_handle[j]);
7701 }
7702 }
7703 tx_ring_desc->tx_dma_handle_used = 0;
7704 } /* end of for loop */
7705 }
7706 }
7707 /*
7708 * Wait for all the packets sent to the chip to finish transmission
7709 * to prevent buffers to be unmapped before or during a transmit operation
7710 */
7711 static int
ql_wait_tx_quiesce(qlge_t * qlge)7712 ql_wait_tx_quiesce(qlge_t *qlge)
7713 {
7714 int count = MAX_TX_WAIT_COUNT, i;
7715 int rings_done;
7716 volatile struct tx_ring *tx_ring;
7717 uint32_t consumer_idx;
7718 uint32_t producer_idx;
7719 uint32_t temp;
7720 int done = 0;
7721 int rval = DDI_FAILURE;
7722
7723 while (!done) {
7724 rings_done = 0;
7725
7726 for (i = 0; i < qlge->tx_ring_count; i++) {
7727 tx_ring = &qlge->tx_ring[i];
7728 temp = ql_read_doorbell_reg(qlge,
7729 tx_ring->prod_idx_db_reg);
7730 producer_idx = temp & 0x0000ffff;
7731 consumer_idx = (temp >> 16);
7732
7733 if (qlge->isr_stride) {
7734 struct rx_ring *ob_ring;
7735 ob_ring = &qlge->rx_ring[tx_ring->cq_id];
7736 if (producer_idx != ob_ring->cnsmr_idx) {
7737 cmn_err(CE_NOTE, " force clean \n");
7738 (void) ql_clean_outbound_rx_ring(
7739 ob_ring);
7740 }
7741 }
7742 /*
7743 * Get the pending iocb count, ones which have not been
7744 * pulled down by the chip
7745 */
7746 if (producer_idx >= consumer_idx)
7747 temp = (producer_idx - consumer_idx);
7748 else
7749 temp = (tx_ring->wq_len - consumer_idx) +
7750 producer_idx;
7751
7752 if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len)
7753 rings_done++;
7754 else {
7755 done = 1;
7756 break;
7757 }
7758 }
7759
7760 /* If all the rings are done */
7761 if (rings_done >= qlge->tx_ring_count) {
7762 #ifdef QLGE_LOAD_UNLOAD
7763 cmn_err(CE_NOTE, "%s(%d) done successfully \n",
7764 __func__, qlge->instance);
7765 #endif
7766 rval = DDI_SUCCESS;
7767 break;
7768 }
7769
7770 qlge_delay(100);
7771
7772 count--;
7773 if (!count) {
7774
7775 count = MAX_TX_WAIT_COUNT;
7776 #ifdef QLGE_LOAD_UNLOAD
7777 volatile struct rx_ring *rx_ring;
7778 cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending"
7779 " Transmits on queue %d to complete .\n",
7780 __func__, qlge->instance,
7781 (qlge->tx_ring[i].wq_len -
7782 qlge->tx_ring[i].tx_free_count),
7783 i);
7784
7785 rx_ring = &qlge->rx_ring[i+1];
7786 temp = ql_read_doorbell_reg(qlge,
7787 rx_ring->cnsmr_idx_db_reg);
7788 consumer_idx = temp & 0x0000ffff;
7789 producer_idx = (temp >> 16);
7790 cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d,"
7791 " Producer %d, Consumer %d\n",
7792 __func__, qlge->instance,
7793 i+1,
7794 producer_idx, consumer_idx);
7795
7796 temp = ql_read_doorbell_reg(qlge,
7797 tx_ring->prod_idx_db_reg);
7798 producer_idx = temp & 0x0000ffff;
7799 consumer_idx = (temp >> 16);
7800 cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d,"
7801 " Producer %d, Consumer %d\n",
7802 __func__, qlge->instance, i,
7803 producer_idx, consumer_idx);
7804 #endif
7805
7806 /* For now move on */
7807 break;
7808 }
7809 }
7810 /* Stop the request queue */
7811 mutex_enter(&qlge->hw_mutex);
7812 for (i = 0; i < qlge->tx_ring_count; i++) {
7813 if (qlge->tx_ring[i].valid_db_reg) {
7814 ql_write_doorbell_reg(qlge,
7815 qlge->tx_ring[i].valid_db_reg, 0);
7816 }
7817 }
7818 mutex_exit(&qlge->hw_mutex);
7819 return (rval);
7820 }
7821
7822 /*
7823 * Wait for all the receives indicated to the stack to come back
7824 */
7825 static int
ql_wait_rx_complete(qlge_t * qlge)7826 ql_wait_rx_complete(qlge_t *qlge)
7827 {
7828 int i;
7829 /* Disable all the completion queues */
7830 mutex_enter(&qlge->hw_mutex);
7831 for (i = 0; i < qlge->rx_ring_count; i++) {
7832 if (qlge->rx_ring[i].valid_db_reg) {
7833 ql_write_doorbell_reg(qlge,
7834 qlge->rx_ring[i].valid_db_reg, 0);
7835 }
7836 }
7837 mutex_exit(&qlge->hw_mutex);
7838
7839 /* Wait for OS to return all rx buffers */
7840 qlge_delay(QL_ONE_SEC_DELAY);
7841 return (DDI_SUCCESS);
7842 }
7843
7844 /*
7845 * stop the driver
7846 */
7847 static int
ql_bringdown_adapter(qlge_t * qlge)7848 ql_bringdown_adapter(qlge_t *qlge)
7849 {
7850 int i;
7851 int status = DDI_SUCCESS;
7852
7853 qlge->mac_flags = QL_MAC_BRINGDOWN;
7854 if (qlge->sequence & ADAPTER_INIT) {
7855 /* stop forwarding external packets to driver */
7856 status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7857 if (status)
7858 return (status);
7859 (void) ql_stop_routing(qlge);
7860 ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7861 /*
7862 * Set the flag for receive and transmit
7863 * operations to cease
7864 */
7865 for (i = 0; i < qlge->tx_ring_count; i++) {
7866 mutex_enter(&qlge->tx_ring[i].tx_lock);
7867 qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED;
7868 mutex_exit(&qlge->tx_ring[i].tx_lock);
7869 }
7870
7871 for (i = 0; i < qlge->rx_ring_count; i++) {
7872 mutex_enter(&qlge->rx_ring[i].rx_lock);
7873 qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED;
7874 mutex_exit(&qlge->rx_ring[i].rx_lock);
7875 }
7876
7877 /*
7878 * Need interrupts to be running while the transmit
7879 * completions are cleared. Wait for the packets
7880 * queued to the chip to be sent out
7881 */
7882 (void) ql_wait_tx_quiesce(qlge);
7883 /* Interrupts not needed from now */
7884 ql_disable_all_completion_interrupts(qlge);
7885
7886 mutex_enter(&qlge->hw_mutex);
7887 /* Disable Global interrupt */
7888 ql_disable_global_interrupt(qlge);
7889 mutex_exit(&qlge->hw_mutex);
7890
7891 /* Wait for all the indicated packets to come back */
7892 status = ql_wait_rx_complete(qlge);
7893
7894 mutex_enter(&qlge->hw_mutex);
7895 /* Reset adapter */
7896 (void) ql_asic_reset(qlge);
7897 /*
7898 * Unbind all tx dma handles to prevent pending tx descriptors'
7899 * dma handles from being re-used.
7900 */
7901 for (i = 0; i < qlge->tx_ring_count; i++) {
7902 ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]);
7903 }
7904
7905 qlge->sequence &= ~ADAPTER_INIT;
7906
7907 mutex_exit(&qlge->hw_mutex);
7908 }
7909 return (status);
7910 }
7911
7912 /*
7913 * ql_detach
7914 * Used to remove all the states associated with a given
7915 * instances of a device node prior to the removal of that
7916 * instance from the system.
7917 */
7918 static int
ql_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)7919 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7920 {
7921 qlge_t *qlge;
7922 int rval;
7923
7924 rval = DDI_SUCCESS;
7925
7926 switch (cmd) {
7927 case DDI_DETACH:
7928
7929 if ((qlge = QL_GET_DEV(dip)) == NULL)
7930 return (DDI_FAILURE);
7931 rval = ql_bringdown_adapter(qlge);
7932 if (rval != DDI_SUCCESS)
7933 break;
7934
7935 qlge->mac_flags = QL_MAC_DETACH;
7936
7937 /* free memory resources */
7938 if (qlge->sequence & INIT_MEMORY_ALLOC) {
7939 ql_free_mem_resources(qlge);
7940 qlge->sequence &= ~INIT_MEMORY_ALLOC;
7941 }
7942 ql_free_resources(qlge);
7943
7944 break;
7945
7946 case DDI_SUSPEND:
7947 if ((qlge = QL_GET_DEV(dip)) == NULL)
7948 return (DDI_FAILURE);
7949
7950 mutex_enter(&qlge->gen_mutex);
7951 if ((qlge->mac_flags == QL_MAC_ATTACHED) ||
7952 (qlge->mac_flags == QL_MAC_STARTED)) {
7953 (void) ql_do_stop(qlge);
7954 }
7955 qlge->mac_flags = QL_MAC_SUSPENDED;
7956 mutex_exit(&qlge->gen_mutex);
7957
7958 break;
7959 default:
7960 rval = DDI_FAILURE;
7961 break;
7962 }
7963
7964 return (rval);
7965 }
7966
7967 /*
7968 * quiesce(9E) entry point.
7969 *
7970 * This function is called when the system is single-threaded at high
7971 * PIL with preemption disabled. Therefore, this function must not be
7972 * blocked.
7973 *
7974 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7975 */
7976 int
ql_quiesce(dev_info_t * dip)7977 ql_quiesce(dev_info_t *dip)
7978 {
7979 qlge_t *qlge;
7980 int i;
7981
7982 if ((qlge = QL_GET_DEV(dip)) == NULL)
7983 return (DDI_FAILURE);
7984
7985 if (CFG_IST(qlge, CFG_CHIP_8100)) {
7986 /* stop forwarding external packets to driver */
7987 (void) ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7988 (void) ql_stop_routing(qlge);
7989 ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7990 /* Stop all the request queues */
7991 for (i = 0; i < qlge->tx_ring_count; i++) {
7992 if (qlge->tx_ring[i].valid_db_reg) {
7993 ql_write_doorbell_reg(qlge,
7994 qlge->tx_ring[i].valid_db_reg, 0);
7995 }
7996 }
7997 qlge_delay(QL_ONE_SEC_DELAY/4);
7998 /* Interrupts not needed from now */
7999 /* Disable MPI interrupt */
8000 ql_write_reg(qlge, REG_INTERRUPT_MASK,
8001 (INTR_MASK_PI << 16));
8002 ql_disable_global_interrupt(qlge);
8003
8004 /* Disable all the rx completion queues */
8005 for (i = 0; i < qlge->rx_ring_count; i++) {
8006 if (qlge->rx_ring[i].valid_db_reg) {
8007 ql_write_doorbell_reg(qlge,
8008 qlge->rx_ring[i].valid_db_reg, 0);
8009 }
8010 }
8011 qlge_delay(QL_ONE_SEC_DELAY/4);
8012 qlge->mac_flags = QL_MAC_STOPPED;
8013 /* Reset adapter */
8014 (void) ql_asic_reset(qlge);
8015 qlge_delay(100);
8016 }
8017
8018 return (DDI_SUCCESS);
8019 }
8020
8021 QL_STREAM_OPS(ql_ops, ql_attach, ql_detach);
8022
8023 /*
8024 * Loadable Driver Interface Structures.
8025 * Declare and initialize the module configuration section...
8026 */
8027 static struct modldrv modldrv = {
8028 &mod_driverops, /* type of module: driver */
8029 version, /* name of module */
8030 &ql_ops /* driver dev_ops */
8031 };
8032
8033 static struct modlinkage modlinkage = {
8034 MODREV_1, &modldrv, NULL
8035 };
8036
8037 /*
8038 * Loadable Module Routines
8039 */
8040
8041 /*
8042 * _init
8043 * Initializes a loadable module. It is called before any other
8044 * routine in a loadable module.
8045 */
8046 int
_init(void)8047 _init(void)
8048 {
8049 int rval;
8050
8051 mac_init_ops(&ql_ops, ADAPTER_NAME);
8052 rval = mod_install(&modlinkage);
8053 if (rval != DDI_SUCCESS) {
8054 mac_fini_ops(&ql_ops);
8055 cmn_err(CE_WARN, "?Unable to install/attach driver '%s'",
8056 ADAPTER_NAME);
8057 }
8058
8059 return (rval);
8060 }
8061
8062 /*
8063 * _fini
8064 * Prepares a module for unloading. It is called when the system
8065 * wants to unload a module. If the module determines that it can
8066 * be unloaded, then _fini() returns the value returned by
8067 * mod_remove(). Upon successful return from _fini() no other
8068 * routine in the module will be called before _init() is called.
8069 */
8070 int
_fini(void)8071 _fini(void)
8072 {
8073 int rval;
8074
8075 rval = mod_remove(&modlinkage);
8076 if (rval == DDI_SUCCESS) {
8077 mac_fini_ops(&ql_ops);
8078 }
8079
8080 return (rval);
8081 }
8082
8083 /*
8084 * _info
8085 * Returns information about loadable module.
8086 */
8087 int
_info(struct modinfo * modinfop)8088 _info(struct modinfo *modinfop)
8089 {
8090 return (mod_info(&modlinkage, modinfop));
8091 }
8092