1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013-2016 Qlogic Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29 /*
30 * File: ql_ioctl.c
31 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
32 */
33
34 #include <sys/cdefs.h>
35 #include "ql_os.h"
36 #include "ql_hw.h"
37 #include "ql_def.h"
38 #include "ql_inline.h"
39 #include "ql_glbl.h"
40 #include "ql_ioctl.h"
41 #include "ql_ver.h"
42 #include "ql_dbg.h"
43
44 static int ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log);
45 static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *drvr_state);
46 static uint32_t ql_drvr_state_size(qla_host_t *ha);
47 static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
48 struct thread *td);
49
50 static struct cdevsw qla_cdevsw = {
51 .d_version = D_VERSION,
52 .d_ioctl = ql_eioctl,
53 .d_name = "qlcnic",
54 };
55
56 int
ql_make_cdev(qla_host_t * ha)57 ql_make_cdev(qla_host_t *ha)
58 {
59 ha->ioctl_dev = make_dev(&qla_cdevsw,
60 if_getdunit(ha->ifp),
61 UID_ROOT,
62 GID_WHEEL,
63 0600,
64 "%s",
65 if_name(ha->ifp));
66
67 if (ha->ioctl_dev == NULL)
68 return (-1);
69
70 ha->ioctl_dev->si_drv1 = ha;
71
72 return (0);
73 }
74
75 void
ql_del_cdev(qla_host_t * ha)76 ql_del_cdev(qla_host_t *ha)
77 {
78 if (ha->ioctl_dev != NULL)
79 destroy_dev(ha->ioctl_dev);
80 return;
81 }
82
83 static int
ql_eioctl(struct cdev * dev,u_long cmd,caddr_t data,int fflag,struct thread * td)84 ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
85 struct thread *td)
86 {
87 qla_host_t *ha;
88 int rval = 0;
89 device_t pci_dev;
90 if_t ifp;
91 int count;
92
93 q80_offchip_mem_val_t val;
94 qla_rd_pci_ids_t *pci_ids;
95 qla_rd_fw_dump_t *fw_dump;
96 union {
97 qla_reg_val_t *rv;
98 qla_rd_flash_t *rdf;
99 qla_wr_flash_t *wrf;
100 qla_erase_flash_t *erf;
101 qla_offchip_mem_val_t *mem;
102 } u;
103
104 if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
105 return ENXIO;
106
107 pci_dev= ha->pci_dev;
108
109 switch(cmd) {
110 case QLA_RDWR_REG:
111
112 u.rv = (qla_reg_val_t *)data;
113
114 if (u.rv->direct) {
115 if (u.rv->rd) {
116 u.rv->val = READ_REG32(ha, u.rv->reg);
117 } else {
118 WRITE_REG32(ha, u.rv->reg, u.rv->val);
119 }
120 } else {
121 if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val,
122 u.rv->rd)))
123 rval = ENXIO;
124 }
125 break;
126
127 case QLA_RD_FLASH:
128
129 if (!ha->hw.flags.fdt_valid) {
130 rval = EIO;
131 break;
132 }
133
134 u.rdf = (qla_rd_flash_t *)data;
135 if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data)))
136 rval = ENXIO;
137 break;
138
139 case QLA_WR_FLASH:
140
141 ifp = ha->ifp;
142
143 if (ifp == NULL) {
144 rval = ENXIO;
145 break;
146 }
147
148 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
149 rval = ENXIO;
150 break;
151 }
152
153 if (!ha->hw.flags.fdt_valid) {
154 rval = EIO;
155 break;
156 }
157
158 u.wrf = (qla_wr_flash_t *)data;
159 if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size,
160 u.wrf->buffer))) {
161 printf("flash write failed[%d]\n", rval);
162 rval = ENXIO;
163 }
164 break;
165
166 case QLA_ERASE_FLASH:
167
168 ifp = ha->ifp;
169
170 if (ifp == NULL) {
171 rval = ENXIO;
172 break;
173 }
174
175 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
176 rval = ENXIO;
177 break;
178 }
179
180 if (!ha->hw.flags.fdt_valid) {
181 rval = EIO;
182 break;
183 }
184
185 u.erf = (qla_erase_flash_t *)data;
186 if ((rval = ql_erase_flash(ha, u.erf->off,
187 u.erf->size))) {
188 printf("flash erase failed[%d]\n", rval);
189 rval = ENXIO;
190 }
191 break;
192
193 case QLA_RDWR_MS_MEM:
194 u.mem = (qla_offchip_mem_val_t *)data;
195
196 if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val,
197 u.mem->rd)))
198 rval = ENXIO;
199 else {
200 u.mem->data_lo = val.data_lo;
201 u.mem->data_hi = val.data_hi;
202 u.mem->data_ulo = val.data_ulo;
203 u.mem->data_uhi = val.data_uhi;
204 }
205
206 break;
207
208 case QLA_RD_FW_DUMP_SIZE:
209
210 if (ha->hw.mdump_init == 0) {
211 rval = EINVAL;
212 break;
213 }
214
215 fw_dump = (qla_rd_fw_dump_t *)data;
216 fw_dump->minidump_size = ha->hw.mdump_buffer_size +
217 ha->hw.mdump_template_size;
218 fw_dump->pci_func = ha->pci_func;
219
220 break;
221
222 case QLA_RD_FW_DUMP:
223
224 if (ha->hw.mdump_init == 0) {
225 device_printf(pci_dev, "%s: minidump not initialized\n", __func__);
226 rval = EINVAL;
227 break;
228 }
229
230 fw_dump = (qla_rd_fw_dump_t *)data;
231
232 if ((fw_dump->minidump == NULL) ||
233 (fw_dump->minidump_size != (ha->hw.mdump_buffer_size +
234 ha->hw.mdump_template_size))) {
235 device_printf(pci_dev,
236 "%s: minidump buffer [%p] size = [%d, %d] invalid\n", __func__,
237 fw_dump->minidump, fw_dump->minidump_size,
238 (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size));
239 rval = EINVAL;
240 break;
241 }
242
243 if ((ha->pci_func & 0x1)) {
244 device_printf(pci_dev, "%s: mindump allowed only on Port0\n", __func__);
245 rval = ENXIO;
246 break;
247 }
248
249 fw_dump->saved = 1;
250
251 if (ha->offline) {
252 if (ha->enable_minidump)
253 ql_minidump(ha);
254
255 fw_dump->saved = 0;
256 fw_dump->usec_ts = ha->hw.mdump_usec_ts;
257
258 if (!ha->hw.mdump_done) {
259 device_printf(pci_dev,
260 "%s: port offline minidump failed\n", __func__);
261 rval = ENXIO;
262 break;
263 }
264 } else {
265 #define QLA_LOCK_MDUMP_MS_TIMEOUT (QLA_LOCK_DEFAULT_MS_TIMEOUT * 5)
266 if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) {
267 if (!ha->hw.mdump_done) {
268 fw_dump->saved = 0;
269 QL_INITIATE_RECOVERY(ha);
270 device_printf(pci_dev, "%s: recovery initiated "
271 " to trigger minidump\n",
272 __func__);
273 }
274 QLA_UNLOCK(ha, __func__);
275 } else {
276 device_printf(pci_dev, "%s: QLA_LOCK() failed0\n", __func__);
277 rval = ENXIO;
278 break;
279 }
280
281 #define QLNX_DUMP_WAIT_SECS 30
282
283 count = QLNX_DUMP_WAIT_SECS * 1000;
284
285 while (count) {
286 if (ha->hw.mdump_done)
287 break;
288 qla_mdelay(__func__, 100);
289 count -= 100;
290 }
291
292 if (!ha->hw.mdump_done) {
293 device_printf(pci_dev,
294 "%s: port not offline minidump failed\n", __func__);
295 rval = ENXIO;
296 break;
297 }
298 fw_dump->usec_ts = ha->hw.mdump_usec_ts;
299
300 if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) {
301 ha->hw.mdump_done = 0;
302 QLA_UNLOCK(ha, __func__);
303 } else {
304 device_printf(pci_dev, "%s: QLA_LOCK() failed1\n", __func__);
305 rval = ENXIO;
306 break;
307 }
308 }
309
310 if ((rval = copyout(ha->hw.mdump_template,
311 fw_dump->minidump, ha->hw.mdump_template_size))) {
312 device_printf(pci_dev, "%s: template copyout failed\n", __func__);
313 rval = ENXIO;
314 break;
315 }
316
317 if ((rval = copyout(ha->hw.mdump_buffer,
318 ((uint8_t *)fw_dump->minidump +
319 ha->hw.mdump_template_size),
320 ha->hw.mdump_buffer_size))) {
321 device_printf(pci_dev, "%s: minidump copyout failed\n", __func__);
322 rval = ENXIO;
323 }
324 break;
325
326 case QLA_RD_DRVR_STATE:
327 rval = ql_drvr_state(ha, (qla_driver_state_t *)data);
328 break;
329
330 case QLA_RD_SLOWPATH_LOG:
331 rval = ql_slowpath_log(ha, (qla_sp_log_t *)data);
332 break;
333
334 case QLA_RD_PCI_IDS:
335 pci_ids = (qla_rd_pci_ids_t *)data;
336 pci_ids->ven_id = pci_get_vendor(pci_dev);
337 pci_ids->dev_id = pci_get_device(pci_dev);
338 pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev);
339 pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev);
340 pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1);
341 break;
342
343 default:
344 break;
345 }
346
347 return rval;
348 }
349
350 static int
ql_drvr_state(qla_host_t * ha,qla_driver_state_t * state)351 ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state)
352 {
353 int rval = 0;
354 uint32_t drvr_state_size;
355
356 drvr_state_size = ql_drvr_state_size(ha);
357
358 if (state->buffer == NULL) {
359 state->size = drvr_state_size;
360 return (0);
361 }
362
363 if (state->size < drvr_state_size)
364 return (ENXIO);
365
366 if (ha->hw.drvr_state == NULL)
367 return (ENOMEM);
368
369 ql_capture_drvr_state(ha);
370
371 rval = copyout(ha->hw.drvr_state, state->buffer, drvr_state_size);
372
373 bzero(ha->hw.drvr_state, drvr_state_size);
374
375 return (rval);
376 }
377
378 static uint32_t
ql_drvr_state_size(qla_host_t * ha)379 ql_drvr_state_size(qla_host_t *ha)
380 {
381 uint32_t drvr_state_size;
382 uint32_t size;
383
384 size = sizeof (qla_drvr_state_hdr_t);
385 drvr_state_size = QL_ALIGN(size, 64);
386
387 size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
388 drvr_state_size += QL_ALIGN(size, 64);
389
390 size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
391 drvr_state_size += QL_ALIGN(size, 64);
392
393 size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
394 drvr_state_size += QL_ALIGN(size, 64);
395
396 size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings;
397 drvr_state_size += QL_ALIGN(size, 64);
398
399 size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings;
400 drvr_state_size += QL_ALIGN(size, 64);
401
402 size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS *
403 ha->hw.num_sds_rings;
404 drvr_state_size += QL_ALIGN(size, 64);
405
406 return (drvr_state_size);
407 }
408
409 static void
ql_get_tx_state(qla_host_t * ha,qla_drvr_state_tx_t * tx_state)410 ql_get_tx_state(qla_host_t *ha, qla_drvr_state_tx_t *tx_state)
411 {
412 int i;
413
414 for (i = 0; i < ha->hw.num_tx_rings; i++) {
415 tx_state->base_p_addr = ha->hw.tx_cntxt[i].tx_ring_paddr;
416 tx_state->cons_p_addr = ha->hw.tx_cntxt[i].tx_cons_paddr;
417 tx_state->tx_prod_reg = ha->hw.tx_cntxt[i].tx_prod_reg;
418 tx_state->tx_cntxt_id = ha->hw.tx_cntxt[i].tx_cntxt_id;
419 tx_state->txr_free = ha->hw.tx_cntxt[i].txr_free;
420 tx_state->txr_next = ha->hw.tx_cntxt[i].txr_next;
421 tx_state->txr_comp = ha->hw.tx_cntxt[i].txr_comp;
422 tx_state++;
423 }
424 return;
425 }
426
427 static void
ql_get_rx_state(qla_host_t * ha,qla_drvr_state_rx_t * rx_state)428 ql_get_rx_state(qla_host_t *ha, qla_drvr_state_rx_t *rx_state)
429 {
430 int i;
431
432 for (i = 0; i < ha->hw.num_rds_rings; i++) {
433 rx_state->prod_std = ha->hw.rds[i].prod_std;
434 rx_state->rx_next = ha->hw.rds[i].rx_next;
435 rx_state++;
436 }
437 return;
438 }
439
440 static void
ql_get_sds_state(qla_host_t * ha,qla_drvr_state_sds_t * sds_state)441 ql_get_sds_state(qla_host_t *ha, qla_drvr_state_sds_t *sds_state)
442 {
443 int i;
444
445 for (i = 0; i < ha->hw.num_sds_rings; i++) {
446 sds_state->sdsr_next = ha->hw.sds[i].sdsr_next;
447 sds_state->sds_consumer = ha->hw.sds[i].sds_consumer;
448 sds_state++;
449 }
450 return;
451 }
452
453 void
ql_capture_drvr_state(qla_host_t * ha)454 ql_capture_drvr_state(qla_host_t *ha)
455 {
456 uint8_t *state_buffer;
457 uint8_t *ptr;
458 qla_drvr_state_hdr_t *hdr;
459 uint32_t size;
460 int i;
461
462 state_buffer = ha->hw.drvr_state;
463
464 if (state_buffer == NULL)
465 return;
466
467 hdr = (qla_drvr_state_hdr_t *)state_buffer;
468
469 hdr->saved = 0;
470
471 if (hdr->drvr_version_major) {
472 hdr->saved = 1;
473 return;
474 }
475
476 hdr->usec_ts = qla_get_usec_timestamp();
477
478 hdr->drvr_version_major = QLA_VERSION_MAJOR;
479 hdr->drvr_version_minor = QLA_VERSION_MINOR;
480 hdr->drvr_version_build = QLA_VERSION_BUILD;
481
482 bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN);
483
484 hdr->link_speed = ha->hw.link_speed;
485 hdr->cable_length = ha->hw.cable_length;
486 hdr->cable_oui = ha->hw.cable_oui;
487 hdr->link_up = ha->hw.link_up;
488 hdr->module_type = ha->hw.module_type;
489 hdr->link_faults = ha->hw.link_faults;
490 hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce;
491 hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce;
492
493 size = sizeof (qla_drvr_state_hdr_t);
494 hdr->tx_state_offset = QL_ALIGN(size, 64);
495
496 ptr = state_buffer + hdr->tx_state_offset;
497
498 ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr);
499
500 size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
501 hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64);
502 ptr = state_buffer + hdr->rx_state_offset;
503
504 ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr);
505
506 size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
507 hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64);
508 ptr = state_buffer + hdr->sds_state_offset;
509
510 ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr);
511
512 size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
513 hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64);
514 ptr = state_buffer + hdr->txr_offset;
515
516 hdr->num_tx_rings = ha->hw.num_tx_rings;
517 hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS;
518 hdr->txr_entries = NUM_TX_DESCRIPTORS;
519
520 size = hdr->num_tx_rings * hdr->txr_size;
521 bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size);
522
523 hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64);
524 ptr = state_buffer + hdr->rxr_offset;
525
526 hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS;
527 hdr->rxr_entries = NUM_RX_DESCRIPTORS;
528 hdr->num_rx_rings = ha->hw.num_rds_rings;
529
530 for (i = 0; i < ha->hw.num_rds_rings; i++) {
531 bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size);
532 ptr += hdr->rxr_size;
533 }
534
535 size = hdr->rxr_size * hdr->num_rx_rings;
536 hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64);
537 hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS;
538 hdr->sds_entries = NUM_STATUS_DESCRIPTORS;
539 hdr->num_sds_rings = ha->hw.num_sds_rings;
540
541 ptr = state_buffer + hdr->sds_offset;
542 for (i = 0; i < ha->hw.num_sds_rings; i++) {
543 bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size);
544 ptr += hdr->sds_ring_size;
545 }
546 return;
547 }
548
549 void
ql_alloc_drvr_state_buffer(qla_host_t * ha)550 ql_alloc_drvr_state_buffer(qla_host_t *ha)
551 {
552 uint32_t drvr_state_size;
553
554 drvr_state_size = ql_drvr_state_size(ha);
555
556 ha->hw.drvr_state = malloc(drvr_state_size, M_QLA83XXBUF, M_NOWAIT);
557
558 if (ha->hw.drvr_state != NULL)
559 bzero(ha->hw.drvr_state, drvr_state_size);
560
561 return;
562 }
563
564 void
ql_free_drvr_state_buffer(qla_host_t * ha)565 ql_free_drvr_state_buffer(qla_host_t *ha)
566 {
567 if (ha->hw.drvr_state != NULL)
568 free(ha->hw.drvr_state, M_QLA83XXBUF);
569 return;
570 }
571
572 void
ql_sp_log(qla_host_t * ha,uint16_t fmtstr_idx,uint16_t num_params,uint32_t param0,uint32_t param1,uint32_t param2,uint32_t param3,uint32_t param4)573 ql_sp_log(qla_host_t *ha, uint16_t fmtstr_idx, uint16_t num_params,
574 uint32_t param0, uint32_t param1, uint32_t param2, uint32_t param3,
575 uint32_t param4)
576 {
577 qla_sp_log_entry_t *sp_e, *sp_log;
578
579 if (((sp_log = ha->hw.sp_log) == NULL) || ha->hw.sp_log_stop)
580 return;
581
582 mtx_lock(&ha->sp_log_lock);
583
584 sp_e = &sp_log[ha->hw.sp_log_index];
585
586 bzero(sp_e, sizeof (qla_sp_log_entry_t));
587
588 sp_e->fmtstr_idx = fmtstr_idx;
589 sp_e->num_params = num_params;
590
591 sp_e->usec_ts = qla_get_usec_timestamp();
592
593 sp_e->params[0] = param0;
594 sp_e->params[1] = param1;
595 sp_e->params[2] = param2;
596 sp_e->params[3] = param3;
597 sp_e->params[4] = param4;
598
599 ha->hw.sp_log_index = (ha->hw.sp_log_index + 1) & (NUM_LOG_ENTRIES - 1);
600
601 if (ha->hw.sp_log_num_entries < NUM_LOG_ENTRIES)
602 ha->hw.sp_log_num_entries++;
603
604 mtx_unlock(&ha->sp_log_lock);
605
606 return;
607 }
608
609 void
ql_alloc_sp_log_buffer(qla_host_t * ha)610 ql_alloc_sp_log_buffer(qla_host_t *ha)
611 {
612 uint32_t size;
613
614 size = (sizeof(qla_sp_log_entry_t)) * NUM_LOG_ENTRIES;
615
616 ha->hw.sp_log = malloc(size, M_QLA83XXBUF, M_NOWAIT);
617
618 if (ha->hw.sp_log != NULL)
619 bzero(ha->hw.sp_log, size);
620
621 ha->hw.sp_log_index = 0;
622 ha->hw.sp_log_num_entries = 0;
623
624 return;
625 }
626
627 void
ql_free_sp_log_buffer(qla_host_t * ha)628 ql_free_sp_log_buffer(qla_host_t *ha)
629 {
630 if (ha->hw.sp_log != NULL)
631 free(ha->hw.sp_log, M_QLA83XXBUF);
632 return;
633 }
634
635 static int
ql_slowpath_log(qla_host_t * ha,qla_sp_log_t * log)636 ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log)
637 {
638 int rval = 0;
639 uint32_t size;
640
641 if ((ha->hw.sp_log == NULL) || (log->buffer == NULL))
642 return (EINVAL);
643
644 size = (sizeof(qla_sp_log_entry_t) * NUM_LOG_ENTRIES);
645
646 mtx_lock(&ha->sp_log_lock);
647
648 rval = copyout(ha->hw.sp_log, log->buffer, size);
649
650 if (!rval) {
651 log->next_idx = ha->hw.sp_log_index;
652 log->num_entries = ha->hw.sp_log_num_entries;
653 }
654 device_printf(ha->pci_dev,
655 "%s: exit [rval = %d][next_idx = %d, %d entries, %d bytes]\n",
656 __func__, rval, log->next_idx, log->num_entries, size);
657 mtx_unlock(&ha->sp_log_lock);
658
659 return (rval);
660 }
661