xref: /freebsd/sys/dev/qlxgbe/ql_ioctl.c (revision 7ec2f6bce5d28e6662c29e63f6ab6b7ef57d98b2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 /*
30  * File: ql_ioctl.c
31  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_glbl.h"
42 #include "ql_ioctl.h"
43 #include "ql_ver.h"
44 #include "ql_dbg.h"
45 
46 static int ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log);
47 static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *drvr_state);
48 static uint32_t ql_drvr_state_size(qla_host_t *ha);
49 static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
50 		struct thread *td);
51 
52 static struct cdevsw qla_cdevsw = {
53 	.d_version = D_VERSION,
54 	.d_ioctl = ql_eioctl,
55 	.d_name = "qlcnic",
56 };
57 
58 int
59 ql_make_cdev(qla_host_t *ha)
60 {
61         ha->ioctl_dev = make_dev(&qla_cdevsw,
62 				ha->ifp->if_dunit,
63                                 UID_ROOT,
64                                 GID_WHEEL,
65                                 0600,
66                                 "%s",
67                                 if_name(ha->ifp));
68 
69 	if (ha->ioctl_dev == NULL)
70 		return (-1);
71 
72         ha->ioctl_dev->si_drv1 = ha;
73 
74 	return (0);
75 }
76 
77 void
78 ql_del_cdev(qla_host_t *ha)
79 {
80 	if (ha->ioctl_dev != NULL)
81 		destroy_dev(ha->ioctl_dev);
82 	return;
83 }
84 
85 static int
86 ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
87 	struct thread *td)
88 {
89         qla_host_t *ha;
90         int rval = 0;
91 	device_t pci_dev;
92 	struct ifnet *ifp;
93 	int count;
94 
95 	q80_offchip_mem_val_t val;
96 	qla_rd_pci_ids_t *pci_ids;
97 	qla_rd_fw_dump_t *fw_dump;
98         union {
99 		qla_reg_val_t *rv;
100 	        qla_rd_flash_t *rdf;
101 		qla_wr_flash_t *wrf;
102 		qla_erase_flash_t *erf;
103 		qla_offchip_mem_val_t *mem;
104 	} u;
105 
106         if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
107                 return ENXIO;
108 
109 	pci_dev= ha->pci_dev;
110 
111         switch(cmd) {
112         case QLA_RDWR_REG:
113 
114                 u.rv = (qla_reg_val_t *)data;
115 
116                 if (u.rv->direct) {
117                         if (u.rv->rd) {
118                                 u.rv->val = READ_REG32(ha, u.rv->reg);
119                         } else {
120                                 WRITE_REG32(ha, u.rv->reg, u.rv->val);
121                         }
122                 } else {
123                         if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val,
124                                 u.rv->rd)))
125                                 rval = ENXIO;
126                 }
127                 break;
128 
129         case QLA_RD_FLASH:
130 
131 		if (!ha->hw.flags.fdt_valid) {
132 			rval = EIO;
133 			break;
134 		}
135 
136                 u.rdf = (qla_rd_flash_t *)data;
137                 if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data)))
138                         rval = ENXIO;
139                 break;
140 
141 	case QLA_WR_FLASH:
142 
143 		ifp = ha->ifp;
144 
145 		if (ifp == NULL) {
146 			rval = ENXIO;
147 			break;
148 		}
149 
150 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
151 			rval = ENXIO;
152 			break;
153 		}
154 
155 		if (!ha->hw.flags.fdt_valid) {
156 			rval = EIO;
157 			break;
158 		}
159 
160 		u.wrf = (qla_wr_flash_t *)data;
161 		if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size,
162 			u.wrf->buffer))) {
163 			printf("flash write failed[%d]\n", rval);
164 			rval = ENXIO;
165 		}
166 		break;
167 
168 	case QLA_ERASE_FLASH:
169 
170 		ifp = ha->ifp;
171 
172 		if (ifp == NULL) {
173 			rval = ENXIO;
174 			break;
175 		}
176 
177 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
178 			rval = ENXIO;
179 			break;
180 		}
181 
182 		if (!ha->hw.flags.fdt_valid) {
183 			rval = EIO;
184 			break;
185 		}
186 
187 		u.erf = (qla_erase_flash_t *)data;
188 		if ((rval = ql_erase_flash(ha, u.erf->off,
189 			u.erf->size))) {
190 			printf("flash erase failed[%d]\n", rval);
191 			rval = ENXIO;
192 		}
193 		break;
194 
195 	case QLA_RDWR_MS_MEM:
196 		u.mem = (qla_offchip_mem_val_t *)data;
197 
198 		if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val,
199 			u.mem->rd)))
200 			rval = ENXIO;
201 		else {
202 			u.mem->data_lo = val.data_lo;
203 			u.mem->data_hi = val.data_hi;
204 			u.mem->data_ulo = val.data_ulo;
205 			u.mem->data_uhi = val.data_uhi;
206 		}
207 
208 		break;
209 
210 	case QLA_RD_FW_DUMP_SIZE:
211 
212 		if (ha->hw.mdump_init == 0) {
213 			rval = EINVAL;
214 			break;
215 		}
216 
217 		fw_dump = (qla_rd_fw_dump_t *)data;
218 		fw_dump->minidump_size = ha->hw.mdump_buffer_size +
219 						ha->hw.mdump_template_size;
220 		fw_dump->pci_func = ha->pci_func;
221 
222 		break;
223 
224 	case QLA_RD_FW_DUMP:
225 
226 		if (ha->hw.mdump_init == 0) {
227 			device_printf(pci_dev, "%s: minidump not initialized\n", __func__);
228 			rval = EINVAL;
229 			break;
230 		}
231 
232 		fw_dump = (qla_rd_fw_dump_t *)data;
233 
234 		if ((fw_dump->minidump == NULL) ||
235 			(fw_dump->minidump_size != (ha->hw.mdump_buffer_size +
236 				ha->hw.mdump_template_size))) {
237 			device_printf(pci_dev,
238 				"%s: minidump buffer [%p] size = [%d, %d] invalid\n", __func__,
239 				fw_dump->minidump, fw_dump->minidump_size,
240 				(ha->hw.mdump_buffer_size + ha->hw.mdump_template_size));
241 			rval = EINVAL;
242 			break;
243 		}
244 
245 		if ((ha->pci_func & 0x1)) {
246 			device_printf(pci_dev, "%s: mindump allowed only on Port0\n", __func__);
247 			rval = ENXIO;
248 			break;
249 		}
250 
251 		fw_dump->saved = 1;
252 
253 		if (ha->offline) {
254 			if (ha->enable_minidump)
255 				ql_minidump(ha);
256 
257 			fw_dump->saved = 0;
258 			fw_dump->usec_ts = ha->hw.mdump_usec_ts;
259 
260 			if (!ha->hw.mdump_done) {
261 				device_printf(pci_dev,
262 					"%s: port offline minidump failed\n", __func__);
263 				rval = ENXIO;
264 				break;
265 			}
266 		} else {
267 #define QLA_LOCK_MDUMP_MS_TIMEOUT (QLA_LOCK_DEFAULT_MS_TIMEOUT * 5)
268 			if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) {
269 				if (!ha->hw.mdump_done) {
270 					fw_dump->saved = 0;
271 					QL_INITIATE_RECOVERY(ha);
272 					device_printf(pci_dev, "%s: recovery initiated "
273 						" to trigger minidump\n",
274 						__func__);
275 				}
276 				QLA_UNLOCK(ha, __func__);
277 			} else {
278 				device_printf(pci_dev, "%s: QLA_LOCK() failed0\n", __func__);
279 				rval = ENXIO;
280 				break;
281 			}
282 
283 #define QLNX_DUMP_WAIT_SECS	30
284 
285 			count = QLNX_DUMP_WAIT_SECS * 1000;
286 
287 			while (count) {
288 				if (ha->hw.mdump_done)
289 					break;
290 				qla_mdelay(__func__, 100);
291 				count -= 100;
292 			}
293 
294 			if (!ha->hw.mdump_done) {
295 				device_printf(pci_dev,
296 					"%s: port not offline minidump failed\n", __func__);
297 				rval = ENXIO;
298 				break;
299 			}
300 			fw_dump->usec_ts = ha->hw.mdump_usec_ts;
301 
302 			if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) {
303 				ha->hw.mdump_done = 0;
304 				QLA_UNLOCK(ha, __func__);
305 			} else {
306 				device_printf(pci_dev, "%s: QLA_LOCK() failed1\n", __func__);
307 				rval = ENXIO;
308 				break;
309 			}
310 		}
311 
312 		if ((rval = copyout(ha->hw.mdump_template,
313 			fw_dump->minidump, ha->hw.mdump_template_size))) {
314 			device_printf(pci_dev, "%s: template copyout failed\n", __func__);
315 			rval = ENXIO;
316 			break;
317 		}
318 
319 		if ((rval = copyout(ha->hw.mdump_buffer,
320 				((uint8_t *)fw_dump->minidump +
321 					ha->hw.mdump_template_size),
322 				ha->hw.mdump_buffer_size))) {
323 			device_printf(pci_dev, "%s: minidump copyout failed\n", __func__);
324 			rval = ENXIO;
325 		}
326 		break;
327 
328 	case QLA_RD_DRVR_STATE:
329 		rval = ql_drvr_state(ha, (qla_driver_state_t *)data);
330 		break;
331 
332 	case QLA_RD_SLOWPATH_LOG:
333 		rval = ql_slowpath_log(ha, (qla_sp_log_t *)data);
334 		break;
335 
336 	case QLA_RD_PCI_IDS:
337 		pci_ids = (qla_rd_pci_ids_t *)data;
338 		pci_ids->ven_id = pci_get_vendor(pci_dev);
339 		pci_ids->dev_id = pci_get_device(pci_dev);
340 		pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev);
341 		pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev);
342 		pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1);
343 		break;
344 
345         default:
346                 break;
347         }
348 
349         return rval;
350 }
351 
352 static int
353 ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state)
354 {
355 	int rval = 0;
356 	uint32_t drvr_state_size;
357 
358 	drvr_state_size = ql_drvr_state_size(ha);
359 
360 	if (state->buffer == NULL) {
361 		state->size = drvr_state_size;
362 		return (0);
363 	}
364 
365 	if (state->size < drvr_state_size)
366 		return (ENXIO);
367 
368 	if (ha->hw.drvr_state == NULL)
369 		return (ENOMEM);
370 
371 	ql_capture_drvr_state(ha);
372 
373 	rval = copyout(ha->hw.drvr_state, state->buffer, drvr_state_size);
374 
375 	bzero(ha->hw.drvr_state, drvr_state_size);
376 
377 	return (rval);
378 }
379 
380 static uint32_t
381 ql_drvr_state_size(qla_host_t *ha)
382 {
383 	uint32_t drvr_state_size;
384 	uint32_t size;
385 
386 	size = sizeof (qla_drvr_state_hdr_t);
387 	drvr_state_size = QL_ALIGN(size, 64);
388 
389 	size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
390 	drvr_state_size += QL_ALIGN(size, 64);
391 
392 	size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
393 	drvr_state_size += QL_ALIGN(size, 64);
394 
395 	size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
396 	drvr_state_size += QL_ALIGN(size, 64);
397 
398 	size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings;
399 	drvr_state_size += QL_ALIGN(size, 64);
400 
401 	size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings;
402 	drvr_state_size += QL_ALIGN(size, 64);
403 
404 	size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS *
405 			ha->hw.num_sds_rings;
406 	drvr_state_size += QL_ALIGN(size, 64);
407 
408 	return (drvr_state_size);
409 }
410 
411 static void
412 ql_get_tx_state(qla_host_t *ha, qla_drvr_state_tx_t *tx_state)
413 {
414 	int i;
415 
416 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
417 		tx_state->base_p_addr = ha->hw.tx_cntxt[i].tx_ring_paddr;
418 		tx_state->cons_p_addr = ha->hw.tx_cntxt[i].tx_cons_paddr;
419 		tx_state->tx_prod_reg = ha->hw.tx_cntxt[i].tx_prod_reg;
420 		tx_state->tx_cntxt_id = ha->hw.tx_cntxt[i].tx_cntxt_id;
421 		tx_state->txr_free = ha->hw.tx_cntxt[i].txr_free;
422 		tx_state->txr_next = ha->hw.tx_cntxt[i].txr_next;
423 		tx_state->txr_comp = ha->hw.tx_cntxt[i].txr_comp;
424 		tx_state++;
425 	}
426 	return;
427 }
428 
429 static void
430 ql_get_rx_state(qla_host_t *ha, qla_drvr_state_rx_t *rx_state)
431 {
432 	int i;
433 
434 	for (i = 0; i < ha->hw.num_rds_rings; i++) {
435 		rx_state->prod_std = ha->hw.rds[i].prod_std;
436 		rx_state->rx_next = ha->hw.rds[i].rx_next;
437 		rx_state++;
438 	}
439 	return;
440 }
441 
442 static void
443 ql_get_sds_state(qla_host_t *ha, qla_drvr_state_sds_t *sds_state)
444 {
445 	int i;
446 
447 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
448 		sds_state->sdsr_next = ha->hw.sds[i].sdsr_next;
449 		sds_state->sds_consumer = ha->hw.sds[i].sds_consumer;
450 		sds_state++;
451 	}
452 	return;
453 }
454 
455 void
456 ql_capture_drvr_state(qla_host_t *ha)
457 {
458 	uint8_t *state_buffer;
459 	uint8_t *ptr;
460 	qla_drvr_state_hdr_t *hdr;
461 	uint32_t size;
462 	int i;
463 
464 	state_buffer =  ha->hw.drvr_state;
465 
466 	if (state_buffer == NULL)
467 		return;
468 
469 	hdr = (qla_drvr_state_hdr_t *)state_buffer;
470 
471 	hdr->saved = 0;
472 
473 	if (hdr->drvr_version_major) {
474 		hdr->saved = 1;
475 		return;
476 	}
477 
478 	hdr->usec_ts = qla_get_usec_timestamp();
479 
480 	hdr->drvr_version_major = QLA_VERSION_MAJOR;
481 	hdr->drvr_version_minor = QLA_VERSION_MINOR;
482 	hdr->drvr_version_build = QLA_VERSION_BUILD;
483 
484 	bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN);
485 
486 	hdr->link_speed = ha->hw.link_speed;
487 	hdr->cable_length = ha->hw.cable_length;
488 	hdr->cable_oui = ha->hw.cable_oui;
489 	hdr->link_up = ha->hw.link_up;
490 	hdr->module_type = ha->hw.module_type;
491 	hdr->link_faults = ha->hw.link_faults;
492 	hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce;
493 	hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce;
494 
495 	size = sizeof (qla_drvr_state_hdr_t);
496 	hdr->tx_state_offset = QL_ALIGN(size, 64);
497 
498 	ptr = state_buffer + hdr->tx_state_offset;
499 
500 	ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr);
501 
502 	size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
503 	hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64);
504 	ptr = state_buffer + hdr->rx_state_offset;
505 
506 	ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr);
507 
508 	size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
509 	hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64);
510 	ptr = state_buffer + hdr->sds_state_offset;
511 
512 	ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr);
513 
514 	size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
515 	hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64);
516 	ptr = state_buffer + hdr->txr_offset;
517 
518 	hdr->num_tx_rings = ha->hw.num_tx_rings;
519 	hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS;
520 	hdr->txr_entries = NUM_TX_DESCRIPTORS;
521 
522 	size = hdr->num_tx_rings * hdr->txr_size;
523 	bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size);
524 
525 	hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64);
526 	ptr = state_buffer + hdr->rxr_offset;
527 
528 	hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS;
529 	hdr->rxr_entries = NUM_RX_DESCRIPTORS;
530 	hdr->num_rx_rings = ha->hw.num_rds_rings;
531 
532 	for (i = 0; i < ha->hw.num_rds_rings; i++) {
533 		bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size);
534 		ptr += hdr->rxr_size;
535 	}
536 
537 	size = hdr->rxr_size * hdr->num_rx_rings;
538 	hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64);
539 	hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS;
540 	hdr->sds_entries = NUM_STATUS_DESCRIPTORS;
541 	hdr->num_sds_rings = ha->hw.num_sds_rings;
542 
543 	ptr = state_buffer + hdr->sds_offset;
544 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
545 		bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size);
546 		ptr += hdr->sds_ring_size;
547 	}
548 	return;
549 }
550 
551 void
552 ql_alloc_drvr_state_buffer(qla_host_t *ha)
553 {
554 	uint32_t drvr_state_size;
555 
556 	drvr_state_size = ql_drvr_state_size(ha);
557 
558 	ha->hw.drvr_state =  malloc(drvr_state_size, M_QLA83XXBUF, M_NOWAIT);
559 
560 	if (ha->hw.drvr_state != NULL)
561 		bzero(ha->hw.drvr_state, drvr_state_size);
562 
563 	return;
564 }
565 
566 void
567 ql_free_drvr_state_buffer(qla_host_t *ha)
568 {
569 	if (ha->hw.drvr_state != NULL)
570 		free(ha->hw.drvr_state, M_QLA83XXBUF);
571 	return;
572 }
573 
574 void
575 ql_sp_log(qla_host_t *ha, uint16_t fmtstr_idx, uint16_t num_params,
576 	uint32_t param0, uint32_t param1, uint32_t param2, uint32_t param3,
577 	uint32_t param4)
578 {
579 	qla_sp_log_entry_t *sp_e, *sp_log;
580 
581 	if (((sp_log = ha->hw.sp_log) == NULL) || ha->hw.sp_log_stop)
582 		return;
583 
584 	mtx_lock(&ha->sp_log_lock);
585 
586 	sp_e = &sp_log[ha->hw.sp_log_index];
587 
588 	bzero(sp_e, sizeof (qla_sp_log_entry_t));
589 
590 	sp_e->fmtstr_idx = fmtstr_idx;
591 	sp_e->num_params = num_params;
592 
593 	sp_e->usec_ts = qla_get_usec_timestamp();
594 
595 	sp_e->params[0] = param0;
596 	sp_e->params[1] = param1;
597 	sp_e->params[2] = param2;
598 	sp_e->params[3] = param3;
599 	sp_e->params[4] = param4;
600 
601 	ha->hw.sp_log_index = (ha->hw.sp_log_index + 1) & (NUM_LOG_ENTRIES - 1);
602 
603 	if (ha->hw.sp_log_num_entries < NUM_LOG_ENTRIES)
604 		ha->hw.sp_log_num_entries++;
605 
606 	mtx_unlock(&ha->sp_log_lock);
607 
608 	return;
609 }
610 
611 void
612 ql_alloc_sp_log_buffer(qla_host_t *ha)
613 {
614 	uint32_t size;
615 
616 	size = (sizeof(qla_sp_log_entry_t)) * NUM_LOG_ENTRIES;
617 
618 	ha->hw.sp_log =  malloc(size, M_QLA83XXBUF, M_NOWAIT);
619 
620 	if (ha->hw.sp_log != NULL)
621 		bzero(ha->hw.sp_log, size);
622 
623 	ha->hw.sp_log_index = 0;
624 	ha->hw.sp_log_num_entries = 0;
625 
626 	return;
627 }
628 
629 void
630 ql_free_sp_log_buffer(qla_host_t *ha)
631 {
632 	if (ha->hw.sp_log != NULL)
633 		free(ha->hw.sp_log, M_QLA83XXBUF);
634 	return;
635 }
636 
637 static int
638 ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log)
639 {
640 	int rval = 0;
641 	uint32_t size;
642 
643 	if ((ha->hw.sp_log == NULL) || (log->buffer == NULL))
644 		return (EINVAL);
645 
646 	size = (sizeof(qla_sp_log_entry_t) * NUM_LOG_ENTRIES);
647 
648 	mtx_lock(&ha->sp_log_lock);
649 
650 	rval = copyout(ha->hw.sp_log, log->buffer, size);
651 
652 	if (!rval) {
653 		log->next_idx = ha->hw.sp_log_index;
654 		log->num_entries = ha->hw.sp_log_num_entries;
655 	}
656 	device_printf(ha->pci_dev,
657 		"%s: exit [rval = %d][%p, next_idx = %d, %d entries, %d bytes]\n",
658 		__func__, rval, log->buffer, log->next_idx, log->num_entries, size);
659 	mtx_unlock(&ha->sp_log_lock);
660 
661 	return (rval);
662 }
663