xref: /freebsd/sys/dev/qlnx/qlnxe/qlnx_ioctl.c (revision 0183e0151669735d62584fbba9125ed90716af5e)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 
29 /*
30  * File: qlnx_ioctl.c
31  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "qlnx_os.h"
38 #include "bcm_osal.h"
39 
40 #include "reg_addr.h"
41 #include "ecore_gtt_reg_addr.h"
42 #include "ecore.h"
43 #include "ecore_chain.h"
44 #include "ecore_status.h"
45 #include "ecore_hw.h"
46 #include "ecore_rt_defs.h"
47 #include "ecore_init_ops.h"
48 #include "ecore_int.h"
49 #include "ecore_cxt.h"
50 #include "ecore_spq.h"
51 #include "ecore_init_fw_funcs.h"
52 #include "ecore_sp_commands.h"
53 #include "ecore_dev_api.h"
54 #include "ecore_l2_api.h"
55 #include "ecore_mcp.h"
56 #include "ecore_hw_defs.h"
57 #include "mcp_public.h"
58 #include "ecore_iro.h"
59 #include "nvm_cfg.h"
60 #include "ecore_dev_api.h"
61 #include "ecore_dbg_fw_funcs.h"
62 
63 #include "qlnx_ioctl.h"
64 #include "qlnx_def.h"
65 #include "qlnx_ver.h"
66 #include <sys/smp.h>
67 
68 
69 static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
70                 struct thread *td);
71 
72 static struct cdevsw qlnx_cdevsw = {
73         .d_version = D_VERSION,
74         .d_ioctl = qlnx_eioctl,
75         .d_name = "qlnxioctl",
76 };
77 
78 int
79 qlnx_make_cdev(qlnx_host_t *ha)
80 {
81 	ha->ioctl_dev = make_dev(&qlnx_cdevsw,
82 				ha->ifp->if_dunit,
83 				UID_ROOT,
84 				GID_WHEEL,
85 				0600,
86 				"%s",
87 				if_name(ha->ifp));
88 
89 	if (ha->ioctl_dev == NULL)
90 		return (-1);
91 
92 	ha->ioctl_dev->si_drv1 = ha;
93 
94 	return (0);
95 }
96 
97 void
98 qlnx_del_cdev(qlnx_host_t *ha)
99 {
100 	if (ha->ioctl_dev != NULL)
101 		destroy_dev(ha->ioctl_dev);
102 	return;
103 }
104 
105 int
106 qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
107 {
108 	int rval = EINVAL;
109 	struct ecore_hwfn *p_hwfn;
110 	struct ecore_ptt *p_ptt;
111 
112 	if (ha->grcdump_dwords[hwfn_index]) {
113 		/* the grcdump is already available */
114 		*num_dumped_dwords = ha->grcdump_dwords[hwfn_index];
115 		return (0);
116 	}
117 
118 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
119 
120 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
121 	p_ptt = ecore_ptt_acquire(p_hwfn);
122 
123 	if (!p_ptt) {
124 		QL_DPRINT1(ha, (ha->pci_dev, "%s : ecore_ptt_acquire failed\n",
125 			__func__));
126 		return (rval);
127 	}
128 
129 	if ((rval = ecore_dbg_grc_dump(p_hwfn, p_ptt,
130 			ha->grcdump[hwfn_index],
131 			(ha->grcdump_size[hwfn_index] >> 2),
132 			num_dumped_dwords)) == DBG_STATUS_OK) {
133 	 	rval = 0;
134 		ha->grcdump_taken = 1;
135 	} else
136 		QL_DPRINT1(ha, (ha->pci_dev,
137 			"%s : ecore_dbg_grc_dump failed [%d, 0x%x]\n",
138 			__func__, hwfn_index, rval));
139 
140 	ecore_ptt_release(p_hwfn, p_ptt);
141 
142 	return (rval);
143 }
144 
145 static void
146 qlnx_get_grc_dump_size(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
147 {
148 	int i;
149 
150 	grcdump->pci_func = ha->pci_func;
151 
152 	for (i = 0; i < ha->cdev.num_hwfns; i++)
153 		grcdump->grcdump_size[i] = ha->grcdump_size[i];
154 
155 	return;
156 }
157 
158 static int
159 qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
160 {
161 	int		i;
162 	int		rval = 0;
163 	uint32_t	dwords = 0;
164 
165 	grcdump->pci_func = ha->pci_func;
166 
167 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
168 
169 		if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
170 			(grcdump->grcdump_size[i] < ha->grcdump_size[i]))
171 			return (EINVAL);
172 
173 		rval = qlnx_grc_dump(ha, &dwords, i);
174 
175 		if (rval)
176 			break;
177 
178 		grcdump->grcdump_dwords[i] = dwords;
179 
180 		QL_DPRINT1(ha, (ha->pci_dev, "%s: grcdump_dwords[%d] = 0x%x\n",
181 			__func__, i, dwords));
182 
183 		rval = copyout(ha->grcdump[i], grcdump->grcdump[i],
184 				ha->grcdump_size[i]);
185 
186 		if (rval)
187 			break;
188 
189 		ha->grcdump_dwords[i] = 0;
190 	}
191 
192 	ha->grcdump_taken = 0;
193 
194 	return (rval);
195 }
196 
197 int
198 qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
199 {
200 	int rval = EINVAL;
201 	struct ecore_hwfn *p_hwfn;
202 	struct ecore_ptt *p_ptt;
203 
204 	if (ha->idle_chk_dwords[hwfn_index]) {
205 		/* the idle check is already available */
206 		*num_dumped_dwords = ha->idle_chk_dwords[hwfn_index];
207 		return (0);
208 	}
209 
210 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
211 
212 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
213 	p_ptt = ecore_ptt_acquire(p_hwfn);
214 
215 	if (!p_ptt) {
216 		QL_DPRINT1(ha, (ha->pci_dev,
217 			"%s : ecore_ptt_acquire failed\n", __func__));
218 		return (rval);
219 	}
220 
221 	if ((rval = ecore_dbg_idle_chk_dump(p_hwfn, p_ptt,
222 			ha->idle_chk[hwfn_index],
223 			(ha->idle_chk_size[hwfn_index] >> 2),
224 			num_dumped_dwords)) == DBG_STATUS_OK) {
225 	 	rval = 0;
226 		ha->idle_chk_taken = 1;
227 	} else
228 		QL_DPRINT1(ha, (ha->pci_dev,
229 			"%s : ecore_dbg_idle_chk_dump failed [%d, 0x%x]\n",
230 			__func__, hwfn_index, rval));
231 
232 	ecore_ptt_release(p_hwfn, p_ptt);
233 
234 	return (rval);
235 }
236 
237 static void
238 qlnx_get_idle_chk_size(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
239 {
240 	int i;
241 
242 	idle_chk->pci_func = ha->pci_func;
243 
244 	for (i = 0; i < ha->cdev.num_hwfns; i++)
245 		idle_chk->idle_chk_size[i] = ha->idle_chk_size[i];
246 
247 	return;
248 }
249 
250 static int
251 qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
252 {
253 	int		i;
254 	int		rval = 0;
255 	uint32_t	dwords = 0;
256 
257 	idle_chk->pci_func = ha->pci_func;
258 
259 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
260 
261 		if ((ha->idle_chk[i] == NULL) ||
262 				(idle_chk->idle_chk[i] == NULL) ||
263 				(idle_chk->idle_chk_size[i] <
264 					ha->idle_chk_size[i]))
265 			return (EINVAL);
266 
267 		rval = qlnx_idle_chk(ha, &dwords, i);
268 
269 		if (rval)
270 			break;
271 
272 		idle_chk->idle_chk_dwords[i] = dwords;
273 
274 		QL_DPRINT1(ha, (ha->pci_dev, "%s: idle_chk_dwords[%d] = 0x%x\n",
275 			__func__, i, dwords));
276 
277                	rval = copyout(ha->idle_chk[i], idle_chk->idle_chk[i],
278 				ha->idle_chk_size[i]);
279 
280 		if (rval)
281 			break;
282 
283 		ha->idle_chk_dwords[i] = 0;
284 	}
285 	ha->idle_chk_taken = 0;
286 
287 	return (rval);
288 }
289 
290 static uint32_t
291 qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
292 {
293         int rval = -1;
294         struct ecore_hwfn *p_hwfn;
295         struct ecore_ptt *p_ptt;
296 	uint32_t num_dwords = 0;
297 
298         p_hwfn = &ha->cdev.hwfns[hwfn_index];
299         p_ptt = ecore_ptt_acquire(p_hwfn);
300 
301         if (!p_ptt) {
302                 QL_DPRINT1(ha, (ha->pci_dev,
303 			"%s: ecore_ptt_acquire [%d, 0x%x]failed\n",
304                         __func__, hwfn_index, cmd));
305                 return (0);
306         }
307 
308 	switch (cmd) {
309 
310 	case QLNX_MCP_TRACE:
311         	rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
312 				p_ptt, &num_dwords);
313 		break;
314 
315 	case QLNX_REG_FIFO:
316         	rval = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
317 				p_ptt, &num_dwords);
318 		break;
319 
320 	case QLNX_IGU_FIFO:
321         	rval = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
322 				p_ptt, &num_dwords);
323 		break;
324 
325 	case QLNX_PROTECTION_OVERRIDE:
326         	rval = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn,
327 				p_ptt, &num_dwords);
328 		break;
329 
330 	case QLNX_FW_ASSERTS:
331         	rval = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
332 				p_ptt, &num_dwords);
333 		break;
334 	}
335 
336         if (rval != DBG_STATUS_OK) {
337                 QL_DPRINT1(ha, (ha->pci_dev, "%s : cmd = 0x%x failed [0x%x]\n",
338                         __func__, cmd, rval));
339 		num_dwords = 0;
340         }
341 
342         ecore_ptt_release(p_hwfn, p_ptt);
343 
344         return ((num_dwords * sizeof (uint32_t)));
345 }
346 
347 static void
348 qlnx_get_trace_size(qlnx_host_t *ha, qlnx_trace_t *trace)
349 {
350 	int i;
351 
352 	trace->pci_func = ha->pci_func;
353 
354 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
355 		trace->size[i] = qlnx_get_trace_cmd_size(ha, i, trace->cmd);
356 	}
357 
358 	return;
359 }
360 
361 static int
362 qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
363 {
364         int rval = -1;
365         struct ecore_hwfn *p_hwfn;
366         struct ecore_ptt *p_ptt;
367 	uint32_t num_dwords = 0;
368 	void *buffer;
369 
370 	buffer = qlnx_zalloc(trace->size[hwfn_index]);
371 	if (buffer == NULL) {
372                 QL_DPRINT1(ha, (ha->pci_dev,
373 			"%s: qlnx_zalloc [%d, 0x%x]failed\n",
374                         __func__, hwfn_index, trace->cmd));
375                 return (ENXIO);
376 	}
377 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
378 
379         p_hwfn = &ha->cdev.hwfns[hwfn_index];
380         p_ptt = ecore_ptt_acquire(p_hwfn);
381 
382         if (!p_ptt) {
383                 QL_DPRINT1(ha, (ha->pci_dev,
384 			"%s: ecore_ptt_acquire [%d, 0x%x]failed\n",
385                         __func__, hwfn_index, trace->cmd));
386                 return (ENXIO);
387         }
388 
389 	switch (trace->cmd) {
390 
391 	case QLNX_MCP_TRACE:
392         	rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
393 				buffer, (trace->size[hwfn_index] >> 2),
394 				&num_dwords);
395 		break;
396 
397 	case QLNX_REG_FIFO:
398         	rval = ecore_dbg_reg_fifo_dump(p_hwfn, p_ptt,
399 				buffer, (trace->size[hwfn_index] >> 2),
400 				&num_dwords);
401 		break;
402 
403 	case QLNX_IGU_FIFO:
404         	rval = ecore_dbg_igu_fifo_dump(p_hwfn, p_ptt,
405 				buffer, (trace->size[hwfn_index] >> 2),
406 				&num_dwords);
407 		break;
408 
409 	case QLNX_PROTECTION_OVERRIDE:
410         	rval = ecore_dbg_protection_override_dump(p_hwfn, p_ptt,
411 				buffer, (trace->size[hwfn_index] >> 2),
412 				&num_dwords);
413 		break;
414 
415 	case QLNX_FW_ASSERTS:
416         	rval = ecore_dbg_fw_asserts_dump(p_hwfn, p_ptt,
417 				buffer, (trace->size[hwfn_index] >> 2),
418 				&num_dwords);
419 		break;
420 	}
421 
422         if (rval != DBG_STATUS_OK) {
423                 QL_DPRINT1(ha, (ha->pci_dev, "%s : cmd = 0x%x failed [0x%x]\n",
424                         __func__, trace->cmd, rval));
425 		num_dwords = 0;
426         }
427 
428         ecore_ptt_release(p_hwfn, p_ptt);
429 
430 	trace->dwords[hwfn_index] = num_dwords;
431 
432 	if (num_dwords) {
433                	rval = copyout(buffer, trace->buffer[hwfn_index],
434 				(num_dwords << 2));
435 	}
436 
437         return (rval);
438 }
439 
440 static int
441 qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
442 {
443 	int			rval = 0;
444 	struct ecore_hwfn	*p_hwfn;
445 
446 	if (reg_rd_wr->hwfn_index >= QLNX_MAX_HW_FUNCS) {
447 		return (EINVAL);
448 	}
449 
450 	p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
451 
452 	switch (reg_rd_wr->cmd) {
453 
454 		case QLNX_REG_READ_CMD:
455 			if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
456 				reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
457 							reg_rd_wr->addr);
458 			}
459 			break;
460 
461 		case QLNX_REG_WRITE_CMD:
462 			if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
463 				qlnx_reg_wr32(p_hwfn, reg_rd_wr->addr,
464 					reg_rd_wr->val);
465 			}
466 			break;
467 
468 		default:
469 			rval = EINVAL;
470 			break;
471 	}
472 
473 	return (rval);
474 }
475 
476 static int
477 qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
478 {
479 	int rval = 0;
480 
481 	switch (pci_cfg_rd_wr->cmd) {
482 
483 		case QLNX_PCICFG_READ:
484 			pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
485 						pci_cfg_rd_wr->reg,
486 						pci_cfg_rd_wr->width);
487 			break;
488 
489 		case QLNX_PCICFG_WRITE:
490 			pci_write_config(ha->pci_dev, pci_cfg_rd_wr->reg,
491 				pci_cfg_rd_wr->val, pci_cfg_rd_wr->width);
492 			break;
493 
494 		default:
495 			rval = EINVAL;
496 			break;
497 	}
498 
499 	return (rval);
500 }
501 
502 static void
503 qlnx_mac_addr(qlnx_host_t *ha, qlnx_perm_mac_addr_t *mac_addr)
504 {
505 	bzero(mac_addr->addr, sizeof(mac_addr->addr));
506 	snprintf(mac_addr->addr, sizeof(mac_addr->addr),
507 		"%02x:%02x:%02x:%02x:%02x:%02x",
508 		ha->primary_mac[0], ha->primary_mac[1], ha->primary_mac[2],
509 		ha->primary_mac[3], ha->primary_mac[4], ha->primary_mac[5]);
510 
511 	return;
512 }
513 
514 static int
515 qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
516 {
517 	int		i;
518 	int		rval = 0;
519 	uint32_t	dwords = 0;
520 	uint8_t		*outb;
521 
522 	regs->reg_buf_len = 0;
523 	outb = regs->reg_buf;
524 
525 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
526 
527 		rval = qlnx_grc_dump(ha, &dwords, i);
528 
529 		if (rval)
530 			break;
531 
532 		regs->reg_buf_len += (dwords << 2);
533 
534 		rval = copyout(ha->grcdump[i], outb, ha->grcdump_size[i]);
535 
536 		if (rval)
537 			break;
538 
539 		ha->grcdump_dwords[i] = 0;
540 		outb += regs->reg_buf_len;
541 	}
542 
543 	ha->grcdump_taken = 0;
544 
545 	return (rval);
546 }
547 
548 static int
549 qlnx_drv_info(qlnx_host_t *ha, qlnx_drvinfo_t *drv_info)
550 {
551 	int i;
552 	extern char qlnx_name_str[];
553 	extern char qlnx_ver_str[];
554 
555 	bzero(drv_info, sizeof(qlnx_drvinfo_t));
556 
557 	snprintf(drv_info->drv_name, sizeof(drv_info->drv_name), "%s",
558 		qlnx_name_str);
559 	snprintf(drv_info->drv_version, sizeof(drv_info->drv_version), "%s",
560 		qlnx_ver_str);
561 	snprintf(drv_info->mfw_version, sizeof(drv_info->mfw_version), "%s",
562 		ha->mfw_ver);
563 	snprintf(drv_info->stormfw_version, sizeof(drv_info->stormfw_version),
564 		"%s", ha->stormfw_ver);
565 
566 	drv_info->eeprom_dump_len = ha->flash_size;
567 
568 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
569 		drv_info->reg_dump_len += ha->grcdump_size[i];
570 	}
571 
572 	snprintf(drv_info->bus_info, sizeof(drv_info->bus_info),
573 		"%d:%d:%d", pci_get_bus(ha->pci_dev),
574 		pci_get_slot(ha->pci_dev), ha->pci_func);
575 
576 	return (0);
577 }
578 
579 static int
580 qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
581 {
582 	struct ecore_hwfn *p_hwfn;
583 	struct qlnx_link_output if_link;
584 
585 	p_hwfn = &ha->cdev.hwfns[0];
586 
587 	qlnx_fill_link(p_hwfn, &if_link);
588 
589 	dev_info->supported = if_link.supported_caps;
590 	dev_info->advertising = if_link.advertised_caps;
591 	dev_info->speed = if_link.speed;
592 	dev_info->duplex = if_link.duplex;
593 	dev_info->port = ha->pci_func & 0x1;
594 	dev_info->autoneg = if_link.autoneg;
595 
596 	return (0);
597 }
598 
599 static int
600 qlnx_write_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram, uint32_t cmd)
601 {
602 	uint8_t *buf;
603 	int ret = 0;
604 
605 	if ((nvram->data == NULL) || (nvram->data_len == 0))
606 		return (EINVAL);
607 
608 	buf = qlnx_zalloc(nvram->data_len);
609 
610 	ret = copyin(nvram->data, buf, nvram->data_len);
611 
612 	QL_DPRINT9(ha,
613 		(ha->pci_dev, "%s: issue cmd = 0x%x data = %p "
614 		" data_len = 0x%x ret = 0x%x exit\n", __func__,
615 		cmd, nvram->data, nvram->data_len, ret));
616 
617 	if (ret == 0) {
618 		ret = ecore_mcp_nvm_write(&ha->cdev, cmd,
619 			nvram->offset, buf, nvram->data_len);
620 	}
621 
622 	QL_DPRINT9(ha,
623 		(ha->pci_dev, "%s: cmd = 0x%x data = %p "
624 		" data_len = 0x%x resp = 0x%x ret = 0x%x exit\n",
625 		__func__, cmd, nvram->data, nvram->data_len,
626 		ha->cdev.mcp_nvm_resp, ret));
627 
628 	free(buf, M_QLNXBUF);
629 
630 	return (ret);
631 }
632 
633 static int
634 qlnx_read_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
635 {
636 	uint8_t *buf;
637 	int ret = 0;
638 
639 	if ((nvram->data == NULL) || (nvram->data_len == 0))
640 		return (EINVAL);
641 
642 	buf = qlnx_zalloc(nvram->data_len);
643 
644 	ret = ecore_mcp_nvm_read(&ha->cdev, nvram->offset, buf,
645 		nvram->data_len);
646 
647 	QL_DPRINT9(ha, (ha->pci_dev, "%s: data = %p data_len = 0x%x "
648 		" resp = 0x%x ret = 0x%x exit\n", __func__,
649 		nvram->data, nvram->data_len,
650 		ha->cdev.mcp_nvm_resp, ret));
651 
652 	if (ret == 0) {
653 		ret = copyout(buf, nvram->data, nvram->data_len);
654 	}
655 
656 	free(buf, M_QLNXBUF);
657 
658 	return (ret);
659 }
660 
661 static int
662 qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
663 {
664 	uint8_t *buf;
665 	int ret = 0;
666 
667 	if ((nvram->data == NULL) || (nvram->data_len == 0))
668 		return (EINVAL);
669 
670 	buf = qlnx_zalloc(nvram->data_len);
671 
672 
673 	ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
674 
675 	QL_DPRINT9(ha, (ha->pci_dev, "%s: data = %p data_len = 0x%x "
676 		" resp = 0x%x ret = 0x%x exit\n", __func__,
677 		nvram->data, nvram->data_len,
678 		ha->cdev.mcp_nvm_resp, ret));
679 
680 	if (ret == 0) {
681 		ret = copyout(buf, nvram->data, nvram->data_len);
682 	}
683 
684 	free(buf, M_QLNXBUF);
685 
686 	return (ret);
687 }
688 
689 static int
690 qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
691 {
692 	int ret = 0;
693 
694 	switch (nvram->cmd) {
695 
696 	case QLNX_NVRAM_CMD_WRITE_NVRAM:
697 		ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
698 		break;
699 
700 	case QLNX_NVRAM_CMD_PUT_FILE_DATA:
701 		ret = qlnx_write_nvram(ha, nvram, ECORE_PUT_FILE_DATA);
702 		break;
703 
704 	case QLNX_NVRAM_CMD_READ_NVRAM:
705 		ret = qlnx_read_nvram(ha, nvram);
706 		break;
707 
708 	case QLNX_NVRAM_CMD_SET_SECURE_MODE:
709 		ret = ecore_mcp_nvm_set_secure_mode(&ha->cdev, nvram->offset);
710 
711 		QL_DPRINT9(ha, (ha->pci_dev,
712 			"%s: QLNX_NVRAM_CMD_SET_SECURE_MODE "
713 			" resp = 0x%x ret = 0x%x exit\n", __func__,
714 			ha->cdev.mcp_nvm_resp, ret));
715 		break;
716 
717 	case QLNX_NVRAM_CMD_DEL_FILE:
718 		ret = ecore_mcp_nvm_del_file(&ha->cdev, nvram->offset);
719 
720 		QL_DPRINT9(ha, (ha->pci_dev,
721 			"%s: QLNX_NVRAM_CMD_DEL_FILE "
722 			" resp = 0x%x ret = 0x%x exit\n", __func__,
723 			ha->cdev.mcp_nvm_resp, ret));
724 		break;
725 
726 	case QLNX_NVRAM_CMD_PUT_FILE_BEGIN:
727 		ret = ecore_mcp_nvm_put_file_begin(&ha->cdev, nvram->offset);
728 
729 		QL_DPRINT9(ha, (ha->pci_dev,
730 			"%s: QLNX_NVRAM_CMD_PUT_FILE_BEGIN "
731 			" resp = 0x%x ret = 0x%x exit\n", __func__,
732 			ha->cdev.mcp_nvm_resp, ret));
733 		break;
734 
735 	case QLNX_NVRAM_CMD_GET_NVRAM_RESP:
736 		ret = qlnx_get_nvram_resp(ha, nvram);
737 		break;
738 
739 	default:
740 		ret = EINVAL;
741 		break;
742 	}
743 
744 	return (ret);
745 }
746 
747 static void
748 qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
749 {
750 	int i;
751 	int index;
752 	int ret;
753 	int stats_copied = 0;
754 
755 	s_stats->num_hwfns = ha->cdev.num_hwfns;
756 
757 //	if (ha->storm_stats_index < QLNX_STORM_STATS_SAMPLES_PER_HWFN)
758 //		return;
759 
760 	s_stats->num_samples = ha->storm_stats_index;
761 
762 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
763 
764 		index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
765 
766 		if (s_stats->buffer[i]) {
767 
768 			ret = copyout(&ha->storm_stats[index],
769 					s_stats->buffer[i],
770 					QLNX_STORM_STATS_BYTES_PER_HWFN);
771 			if (ret) {
772 				printf("%s [%d]: failed\n", __func__, i);
773 			}
774 
775 			if (s_stats->num_samples ==
776 				QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
777 
778 				bzero((void *)&ha->storm_stats[i],
779 					QLNX_STORM_STATS_BYTES_PER_HWFN);
780 
781 				stats_copied = 1;
782 			}
783 		}
784 	}
785 
786 	if (stats_copied)
787 		ha->storm_stats_index = 0;
788 
789 	return;
790 }
791 
792 
793 static int
794 qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
795 	struct thread *td)
796 {
797 	qlnx_host_t	*ha;
798 	int		rval = 0;
799 	struct ifnet	*ifp;
800 	qlnx_trace_t	*trace;
801 	int		i;
802 
803 	if ((ha = (qlnx_host_t *)dev->si_drv1) == NULL)
804 		return ENXIO;
805 
806 	ifp = ha->ifp;
807 
808 	switch (cmd) {
809 
810 	case QLNX_GRC_DUMP_SIZE:
811 		qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
812 		break;
813 
814 	case QLNX_GRC_DUMP:
815 		rval = qlnx_get_grc_dump(ha, (qlnx_grcdump_t *)data);
816 		break;
817 
818 	case QLNX_IDLE_CHK_SIZE:
819 		qlnx_get_idle_chk_size(ha, (qlnx_idle_chk_t *)data);
820 		break;
821 
822 	case QLNX_IDLE_CHK:
823 		rval = qlnx_get_idle_chk(ha, (qlnx_idle_chk_t *)data);
824 		break;
825 
826 	case QLNX_DRV_INFO:
827 		rval = qlnx_drv_info(ha, (qlnx_drvinfo_t *)data);
828 		break;
829 
830 	case QLNX_DEV_SETTING:
831 		rval = qlnx_dev_settings(ha, (qlnx_dev_setting_t *)data);
832 		break;
833 
834 	case QLNX_GET_REGS:
835 		rval = qlnx_get_regs(ha, (qlnx_get_regs_t *)data);
836 		break;
837 
838 	case QLNX_NVRAM:
839 		rval = qlnx_nvram(ha, (qlnx_nvram_t *)data);
840 		break;
841 
842 	case QLNX_RD_WR_REG:
843 		rval = qlnx_reg_rd_wr(ha, (qlnx_reg_rd_wr_t *)data);
844 		break;
845 
846 	case QLNX_RD_WR_PCICFG:
847 		rval = qlnx_rd_wr_pci_config(ha, (qlnx_pcicfg_rd_wr_t *)data);
848 		break;
849 
850 	case QLNX_MAC_ADDR:
851 		qlnx_mac_addr(ha, (qlnx_perm_mac_addr_t *)data);
852 		break;
853 
854 	case QLNX_STORM_STATS:
855 		qlnx_storm_stats(ha, (qlnx_storm_stats_dump_t *)data);
856 		break;
857 
858 	case QLNX_TRACE_SIZE:
859 		qlnx_get_trace_size(ha, (qlnx_trace_t *)data);
860 		break;
861 
862 	case QLNX_TRACE:
863 		trace = (qlnx_trace_t *)data;
864 
865 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
866 
867 			if (trace->size[i] && trace->cmd && trace->buffer[i])
868 				rval = qlnx_get_trace(ha, i, trace);
869 
870 			if (rval)
871 				break;
872 		}
873 		break;
874 
875 	default:
876 		rval = EINVAL;
877 		break;
878 	}
879 
880 	return (rval);
881 }
882 
883