xref: /freebsd/sys/dev/qlxgbe/ql_ioctl.c (revision d06955f9bdb1416d9196043ed781f9b36dae9adc)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 /*
30  * File: ql_ioctl.c
31  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 
38 #include "ql_os.h"
39 #include "ql_hw.h"
40 #include "ql_def.h"
41 #include "ql_inline.h"
42 #include "ql_glbl.h"
43 #include "ql_ioctl.h"
44 #include "ql_ver.h"
45 #include "ql_dbg.h"
46 
47 static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *drvr_state);
48 static uint32_t ql_drvr_state_size(qla_host_t *ha);
49 static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
50 		struct thread *td);
51 
52 static struct cdevsw qla_cdevsw = {
53 	.d_version = D_VERSION,
54 	.d_ioctl = ql_eioctl,
55 	.d_name = "qlcnic",
56 };
57 
58 int
59 ql_make_cdev(qla_host_t *ha)
60 {
61         ha->ioctl_dev = make_dev(&qla_cdevsw,
62 				ha->ifp->if_dunit,
63                                 UID_ROOT,
64                                 GID_WHEEL,
65                                 0600,
66                                 "%s",
67                                 if_name(ha->ifp));
68 
69 	if (ha->ioctl_dev == NULL)
70 		return (-1);
71 
72         ha->ioctl_dev->si_drv1 = ha;
73 
74 	return (0);
75 }
76 
77 void
78 ql_del_cdev(qla_host_t *ha)
79 {
80 	if (ha->ioctl_dev != NULL)
81 		destroy_dev(ha->ioctl_dev);
82 	return;
83 }
84 
85 static int
86 ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
87 	struct thread *td)
88 {
89         qla_host_t *ha;
90         int rval = 0;
91 	device_t pci_dev;
92 	struct ifnet *ifp;
93 	int count;
94 
95 	q80_offchip_mem_val_t val;
96 	qla_rd_pci_ids_t *pci_ids;
97 	qla_rd_fw_dump_t *fw_dump;
98         union {
99 		qla_reg_val_t *rv;
100 	        qla_rd_flash_t *rdf;
101 		qla_wr_flash_t *wrf;
102 		qla_erase_flash_t *erf;
103 		qla_offchip_mem_val_t *mem;
104 	} u;
105 
106 
107         if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
108                 return ENXIO;
109 
110 	pci_dev= ha->pci_dev;
111 
112         switch(cmd) {
113 
114         case QLA_RDWR_REG:
115 
116                 u.rv = (qla_reg_val_t *)data;
117 
118                 if (u.rv->direct) {
119                         if (u.rv->rd) {
120                                 u.rv->val = READ_REG32(ha, u.rv->reg);
121                         } else {
122                                 WRITE_REG32(ha, u.rv->reg, u.rv->val);
123                         }
124                 } else {
125                         if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val,
126                                 u.rv->rd)))
127                                 rval = ENXIO;
128                 }
129                 break;
130 
131         case QLA_RD_FLASH:
132 
133 		if (!ha->hw.flags.fdt_valid) {
134 			rval = EIO;
135 			break;
136 		}
137 
138                 u.rdf = (qla_rd_flash_t *)data;
139                 if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data)))
140                         rval = ENXIO;
141                 break;
142 
143 	case QLA_WR_FLASH:
144 
145 		ifp = ha->ifp;
146 
147 		if (ifp == NULL) {
148 			rval = ENXIO;
149 			break;
150 		}
151 
152 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
153 			rval = ENXIO;
154 			break;
155 		}
156 
157 		if (!ha->hw.flags.fdt_valid) {
158 			rval = EIO;
159 			break;
160 		}
161 
162 		u.wrf = (qla_wr_flash_t *)data;
163 		if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size,
164 			u.wrf->buffer))) {
165 			printf("flash write failed[%d]\n", rval);
166 			rval = ENXIO;
167 		}
168 		break;
169 
170 	case QLA_ERASE_FLASH:
171 
172 		ifp = ha->ifp;
173 
174 		if (ifp == NULL) {
175 			rval = ENXIO;
176 			break;
177 		}
178 
179 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
180 			rval = ENXIO;
181 			break;
182 		}
183 
184 		if (!ha->hw.flags.fdt_valid) {
185 			rval = EIO;
186 			break;
187 		}
188 
189 		u.erf = (qla_erase_flash_t *)data;
190 		if ((rval = ql_erase_flash(ha, u.erf->off,
191 			u.erf->size))) {
192 			printf("flash erase failed[%d]\n", rval);
193 			rval = ENXIO;
194 		}
195 		break;
196 
197 	case QLA_RDWR_MS_MEM:
198 		u.mem = (qla_offchip_mem_val_t *)data;
199 
200 		if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val,
201 			u.mem->rd)))
202 			rval = ENXIO;
203 		else {
204 			u.mem->data_lo = val.data_lo;
205 			u.mem->data_hi = val.data_hi;
206 			u.mem->data_ulo = val.data_ulo;
207 			u.mem->data_uhi = val.data_uhi;
208 		}
209 
210 		break;
211 
212 	case QLA_RD_FW_DUMP_SIZE:
213 
214 		if (ha->hw.mdump_init == 0) {
215 			rval = EINVAL;
216 			break;
217 		}
218 
219 		fw_dump = (qla_rd_fw_dump_t *)data;
220 		fw_dump->minidump_size = ha->hw.mdump_buffer_size +
221 						ha->hw.mdump_template_size;
222 		fw_dump->pci_func = ha->pci_func;
223 
224 		break;
225 
226 	case QLA_RD_FW_DUMP:
227 
228 		if (ha->hw.mdump_init == 0) {
229 			rval = EINVAL;
230 			break;
231 		}
232 
233 		fw_dump = (qla_rd_fw_dump_t *)data;
234 
235 		if ((fw_dump->minidump == NULL) ||
236 			(fw_dump->minidump_size != (ha->hw.mdump_buffer_size +
237 				ha->hw.mdump_template_size))) {
238 			rval = EINVAL;
239 			break;
240 		}
241 
242 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
243 			if (!ha->hw.mdump_done)
244 				ha->qla_initiate_recovery = 1;
245 			QLA_UNLOCK(ha, __func__);
246 		} else {
247 			rval = ENXIO;
248 			break;
249 		}
250 
251 #define QLNX_DUMP_WAIT_SECS	30
252 
253 		count = QLNX_DUMP_WAIT_SECS * 1000;
254 
255 		while (count) {
256 			if (ha->hw.mdump_done)
257 				break;
258 			qla_mdelay(__func__, 100);
259 			count -= 100;
260 		}
261 
262 		if (!ha->hw.mdump_done) {
263 			rval = ENXIO;
264 			break;
265 		}
266 
267 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
268 			ha->hw.mdump_done = 0;
269 			QLA_UNLOCK(ha, __func__);
270 		} else {
271 			rval = ENXIO;
272 			break;
273 		}
274 
275 		if ((rval = copyout(ha->hw.mdump_template,
276 			fw_dump->minidump, ha->hw.mdump_template_size))) {
277 			rval = ENXIO;
278 			break;
279 		}
280 
281 		if ((rval = copyout(ha->hw.mdump_buffer,
282 				((uint8_t *)fw_dump->minidump +
283 					ha->hw.mdump_template_size),
284 				ha->hw.mdump_buffer_size)))
285 			rval = ENXIO;
286 		break;
287 
288 	case QLA_RD_DRVR_STATE:
289 		rval = ql_drvr_state(ha, (qla_driver_state_t *)data);
290 		break;
291 
292 	case QLA_RD_PCI_IDS:
293 		pci_ids = (qla_rd_pci_ids_t *)data;
294 		pci_ids->ven_id = pci_get_vendor(pci_dev);
295 		pci_ids->dev_id = pci_get_device(pci_dev);
296 		pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev);
297 		pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev);
298 		pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1);
299 		break;
300 
301         default:
302                 break;
303         }
304 
305         return rval;
306 }
307 
308 
309 static int
310 ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state)
311 {
312 	int rval = 0;
313 	uint32_t drvr_state_size;
314 	qla_drvr_state_hdr_t *hdr;
315 
316 	drvr_state_size = ql_drvr_state_size(ha);
317 
318 	if (state->buffer == NULL) {
319 		state->size = drvr_state_size;
320 		return (0);
321 	}
322 
323 	if (state->size < drvr_state_size)
324 		return (ENXIO);
325 
326 	if (ha->hw.drvr_state == NULL)
327 		return (ENOMEM);
328 
329 	hdr = ha->hw.drvr_state;
330 
331 	if (!hdr->drvr_version_major)
332 		ql_capture_drvr_state(ha);
333 
334 	rval = copyout(ha->hw.drvr_state, state->buffer, drvr_state_size);
335 
336 	bzero(ha->hw.drvr_state, drvr_state_size);
337 
338 	return (rval);
339 }
340 
341 static uint32_t
342 ql_drvr_state_size(qla_host_t *ha)
343 {
344 	uint32_t drvr_state_size;
345 	uint32_t size;
346 
347 	size = sizeof (qla_drvr_state_hdr_t);
348 	drvr_state_size = QL_ALIGN(size, 64);
349 
350 	size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
351 	drvr_state_size += QL_ALIGN(size, 64);
352 
353 	size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
354 	drvr_state_size += QL_ALIGN(size, 64);
355 
356 	size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
357 	drvr_state_size += QL_ALIGN(size, 64);
358 
359 	size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings;
360 	drvr_state_size += QL_ALIGN(size, 64);
361 
362 	size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings;
363 	drvr_state_size += QL_ALIGN(size, 64);
364 
365 	size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS *
366 			ha->hw.num_sds_rings;
367 	drvr_state_size += QL_ALIGN(size, 64);
368 
369 	return (drvr_state_size);
370 }
371 
372 static void
373 ql_get_tx_state(qla_host_t *ha, qla_drvr_state_tx_t *tx_state)
374 {
375 	int i;
376 
377 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
378 		tx_state->base_p_addr = ha->hw.tx_cntxt[i].tx_ring_paddr;
379 		tx_state->cons_p_addr = ha->hw.tx_cntxt[i].tx_cons_paddr;
380 		tx_state->tx_prod_reg = ha->hw.tx_cntxt[i].tx_prod_reg;
381 		tx_state->tx_cntxt_id = ha->hw.tx_cntxt[i].tx_cntxt_id;
382 		tx_state->txr_free = ha->hw.tx_cntxt[i].txr_free;
383 		tx_state->txr_next = ha->hw.tx_cntxt[i].txr_next;
384 		tx_state->txr_comp = ha->hw.tx_cntxt[i].txr_comp;
385 		tx_state++;
386 	}
387 	return;
388 }
389 
390 static void
391 ql_get_rx_state(qla_host_t *ha, qla_drvr_state_rx_t *rx_state)
392 {
393 	int i;
394 
395 	for (i = 0; i < ha->hw.num_rds_rings; i++) {
396 		rx_state->prod_std = ha->hw.rds[i].prod_std;
397 		rx_state->rx_next = ha->hw.rds[i].rx_next;
398 		rx_state++;
399 	}
400 	return;
401 }
402 
403 static void
404 ql_get_sds_state(qla_host_t *ha, qla_drvr_state_sds_t *sds_state)
405 {
406 	int i;
407 
408 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
409 		sds_state->sdsr_next = ha->hw.sds[i].sdsr_next;
410 		sds_state->sds_consumer = ha->hw.sds[i].sds_consumer;
411 		sds_state++;
412 	}
413 	return;
414 }
415 
416 void
417 ql_capture_drvr_state(qla_host_t *ha)
418 {
419 	uint8_t *state_buffer;
420 	uint8_t *ptr;
421 	uint32_t drvr_state_size;
422 	qla_drvr_state_hdr_t *hdr;
423 	uint32_t size;
424 	int i;
425 
426 	drvr_state_size = ql_drvr_state_size(ha);
427 
428 	state_buffer =  ha->hw.drvr_state;
429 
430 	if (state_buffer == NULL)
431 		return;
432 
433 	bzero(state_buffer, drvr_state_size);
434 
435 	hdr = (qla_drvr_state_hdr_t *)state_buffer;
436 
437 	hdr->drvr_version_major = QLA_VERSION_MAJOR;
438 	hdr->drvr_version_minor = QLA_VERSION_MINOR;
439 	hdr->drvr_version_build = QLA_VERSION_BUILD;
440 
441 	bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN);
442 
443 	hdr->link_speed = ha->hw.link_speed;
444 	hdr->cable_length = ha->hw.cable_length;
445 	hdr->cable_oui = ha->hw.cable_oui;
446 	hdr->link_up = ha->hw.link_up;
447 	hdr->module_type = ha->hw.module_type;
448 	hdr->link_faults = ha->hw.link_faults;
449 	hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce;
450 	hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce;
451 
452 	size = sizeof (qla_drvr_state_hdr_t);
453 	hdr->tx_state_offset = QL_ALIGN(size, 64);
454 
455 	ptr = state_buffer + hdr->tx_state_offset;
456 
457 	ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr);
458 
459 	size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
460 	hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64);
461 	ptr = state_buffer + hdr->rx_state_offset;
462 
463 	ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr);
464 
465 	size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
466 	hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64);
467 	ptr = state_buffer + hdr->sds_state_offset;
468 
469 	ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr);
470 
471 	size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
472 	hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64);
473 	ptr = state_buffer + hdr->txr_offset;
474 
475 	hdr->num_tx_rings = ha->hw.num_tx_rings;
476 	hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS;
477 	hdr->txr_entries = NUM_TX_DESCRIPTORS;
478 
479 	size = hdr->num_tx_rings * hdr->txr_size;
480 	bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size);
481 
482 	hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64);
483 	ptr = state_buffer + hdr->rxr_offset;
484 
485 	hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS;
486 	hdr->rxr_entries = NUM_RX_DESCRIPTORS;
487 	hdr->num_rx_rings = ha->hw.num_rds_rings;
488 
489 	for (i = 0; i < ha->hw.num_rds_rings; i++) {
490 		bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size);
491 		ptr += hdr->rxr_size;
492 	}
493 
494 	size = hdr->rxr_size * hdr->num_rx_rings;
495 	hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64);
496 	hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS;
497 	hdr->sds_entries = NUM_STATUS_DESCRIPTORS;
498 	hdr->num_sds_rings = ha->hw.num_sds_rings;
499 
500 	ptr = state_buffer + hdr->sds_offset;
501 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
502 		bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size);
503 		ptr += hdr->sds_ring_size;
504 	}
505 	return;
506 }
507 
508 void
509 ql_alloc_drvr_state_buffer(qla_host_t *ha)
510 {
511 	uint32_t drvr_state_size;
512 
513 	drvr_state_size = ql_drvr_state_size(ha);
514 
515 	ha->hw.drvr_state =  malloc(drvr_state_size, M_QLA83XXBUF, M_NOWAIT);
516 
517 	return;
518 }
519 
520 void
521 ql_free_drvr_state_buffer(qla_host_t *ha)
522 {
523 	if (ha->hw.drvr_state != NULL)
524 		free(ha->hw.drvr_state, M_QLA83XXBUF);
525 	return;
526 }
527 
528