xref: /freebsd/sys/dev/qlnx/qlnxe/qlnx_ioctl.c (revision f0cfa1b168014f56c02b83e5f28412cc5f78d117)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 
29 /*
30  * File: qlnx_ioctl.c
31  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "qlnx_os.h"
38 #include "bcm_osal.h"
39 
40 #include "reg_addr.h"
41 #include "ecore_gtt_reg_addr.h"
42 #include "ecore.h"
43 #include "ecore_chain.h"
44 #include "ecore_status.h"
45 #include "ecore_hw.h"
46 #include "ecore_rt_defs.h"
47 #include "ecore_init_ops.h"
48 #include "ecore_int.h"
49 #include "ecore_cxt.h"
50 #include "ecore_spq.h"
51 #include "ecore_init_fw_funcs.h"
52 #include "ecore_sp_commands.h"
53 #include "ecore_dev_api.h"
54 #include "ecore_l2_api.h"
55 #include "ecore_mcp.h"
56 #include "ecore_hw_defs.h"
57 #include "mcp_public.h"
58 #include "ecore_iro.h"
59 #include "nvm_cfg.h"
60 #include "ecore_dev_api.h"
61 #include "ecore_dbg_fw_funcs.h"
62 
63 #include "qlnx_ioctl.h"
64 #include "qlnx_def.h"
65 #include "qlnx_ver.h"
66 #include <sys/smp.h>
67 
68 
69 static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
70                 struct thread *td);
71 
72 static struct cdevsw qlnx_cdevsw = {
73         .d_version = D_VERSION,
74         .d_ioctl = qlnx_eioctl,
75         .d_name = "qlnxioctl",
76 };
77 
78 int
79 qlnx_make_cdev(qlnx_host_t *ha)
80 {
81 	ha->ioctl_dev = make_dev(&qlnx_cdevsw,
82 				ha->ifp->if_dunit,
83 				UID_ROOT,
84 				GID_WHEEL,
85 				0600,
86 				"%s",
87 				if_name(ha->ifp));
88 
89 	if (ha->ioctl_dev == NULL)
90 		return (-1);
91 
92 	ha->ioctl_dev->si_drv1 = ha;
93 
94 	return (0);
95 }
96 
97 void
98 qlnx_del_cdev(qlnx_host_t *ha)
99 {
100 	if (ha->ioctl_dev != NULL)
101 		destroy_dev(ha->ioctl_dev);
102 	return;
103 }
104 
105 int
106 qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
107 {
108 	int rval = EINVAL;
109 	struct ecore_hwfn *p_hwfn;
110 	struct ecore_ptt *p_ptt;
111 
112 	if (ha->grcdump_dwords[hwfn_index]) {
113 		/* the grcdump is already available */
114 		*num_dumped_dwords = ha->grcdump_dwords[hwfn_index];
115 		return (0);
116 	}
117 
118 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
119 
120 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
121 	p_ptt = ecore_ptt_acquire(p_hwfn);
122 
123 	if (!p_ptt) {
124 		QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
125 		return (rval);
126 	}
127 
128 	if ((rval = ecore_dbg_grc_dump(p_hwfn, p_ptt,
129 			ha->grcdump[hwfn_index],
130 			(ha->grcdump_size[hwfn_index] >> 2),
131 			num_dumped_dwords)) == DBG_STATUS_OK) {
132 	 	rval = 0;
133 		ha->grcdump_taken = 1;
134 	} else
135 		QL_DPRINT1(ha,"ecore_dbg_grc_dump failed [%d, 0x%x]\n",
136 			   hwfn_index, rval);
137 
138 	ecore_ptt_release(p_hwfn, p_ptt);
139 
140 	return (rval);
141 }
142 
143 static void
144 qlnx_get_grc_dump_size(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
145 {
146 	int i;
147 
148 	grcdump->pci_func = ha->pci_func;
149 
150 	for (i = 0; i < ha->cdev.num_hwfns; i++)
151 		grcdump->grcdump_size[i] = ha->grcdump_size[i];
152 
153 	return;
154 }
155 
156 static int
157 qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
158 {
159 	int		i;
160 	int		rval = 0;
161 	uint32_t	dwords = 0;
162 
163 	grcdump->pci_func = ha->pci_func;
164 
165 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
166 
167 		if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
168 			(grcdump->grcdump_size[i] < ha->grcdump_size[i]))
169 			return (EINVAL);
170 
171 		rval = qlnx_grc_dump(ha, &dwords, i);
172 
173 		if (rval)
174 			break;
175 
176 		grcdump->grcdump_dwords[i] = dwords;
177 
178 		QL_DPRINT1(ha,"grcdump_dwords[%d] = 0x%x\n", i, dwords);
179 
180 		rval = copyout(ha->grcdump[i], grcdump->grcdump[i],
181 				ha->grcdump_size[i]);
182 
183 		if (rval)
184 			break;
185 
186 		ha->grcdump_dwords[i] = 0;
187 	}
188 
189 	ha->grcdump_taken = 0;
190 
191 	return (rval);
192 }
193 
194 int
195 qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
196 {
197 	int rval = EINVAL;
198 	struct ecore_hwfn *p_hwfn;
199 	struct ecore_ptt *p_ptt;
200 
201 	if (ha->idle_chk_dwords[hwfn_index]) {
202 		/* the idle check is already available */
203 		*num_dumped_dwords = ha->idle_chk_dwords[hwfn_index];
204 		return (0);
205 	}
206 
207 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
208 
209 	p_hwfn = &ha->cdev.hwfns[hwfn_index];
210 	p_ptt = ecore_ptt_acquire(p_hwfn);
211 
212 	if (!p_ptt) {
213 		QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
214 		return (rval);
215 	}
216 
217 	if ((rval = ecore_dbg_idle_chk_dump(p_hwfn, p_ptt,
218 			ha->idle_chk[hwfn_index],
219 			(ha->idle_chk_size[hwfn_index] >> 2),
220 			num_dumped_dwords)) == DBG_STATUS_OK) {
221 	 	rval = 0;
222 		ha->idle_chk_taken = 1;
223 	} else
224 		QL_DPRINT1(ha,"ecore_dbg_idle_chk_dump failed [%d, 0x%x]\n",
225 			   hwfn_index, rval);
226 
227 	ecore_ptt_release(p_hwfn, p_ptt);
228 
229 	return (rval);
230 }
231 
232 static void
233 qlnx_get_idle_chk_size(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
234 {
235 	int i;
236 
237 	idle_chk->pci_func = ha->pci_func;
238 
239 	for (i = 0; i < ha->cdev.num_hwfns; i++)
240 		idle_chk->idle_chk_size[i] = ha->idle_chk_size[i];
241 
242 	return;
243 }
244 
245 static int
246 qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
247 {
248 	int		i;
249 	int		rval = 0;
250 	uint32_t	dwords = 0;
251 
252 	idle_chk->pci_func = ha->pci_func;
253 
254 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
255 
256 		if ((ha->idle_chk[i] == NULL) ||
257 				(idle_chk->idle_chk[i] == NULL) ||
258 				(idle_chk->idle_chk_size[i] <
259 					ha->idle_chk_size[i]))
260 			return (EINVAL);
261 
262 		rval = qlnx_idle_chk(ha, &dwords, i);
263 
264 		if (rval)
265 			break;
266 
267 		idle_chk->idle_chk_dwords[i] = dwords;
268 
269 		QL_DPRINT1(ha,"idle_chk_dwords[%d] = 0x%x\n", i, dwords);
270 
271                	rval = copyout(ha->idle_chk[i], idle_chk->idle_chk[i],
272 				ha->idle_chk_size[i]);
273 
274 		if (rval)
275 			break;
276 
277 		ha->idle_chk_dwords[i] = 0;
278 	}
279 	ha->idle_chk_taken = 0;
280 
281 	return (rval);
282 }
283 
284 static uint32_t
285 qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
286 {
287         int rval = -1;
288         struct ecore_hwfn *p_hwfn;
289         struct ecore_ptt *p_ptt;
290 	uint32_t num_dwords = 0;
291 
292         p_hwfn = &ha->cdev.hwfns[hwfn_index];
293         p_ptt = ecore_ptt_acquire(p_hwfn);
294 
295         if (!p_ptt) {
296                 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
297                            hwfn_index, cmd);
298                 return (0);
299         }
300 
301 	switch (cmd) {
302 
303 	case QLNX_MCP_TRACE:
304         	rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
305 				p_ptt, &num_dwords);
306 		break;
307 
308 	case QLNX_REG_FIFO:
309         	rval = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
310 				p_ptt, &num_dwords);
311 		break;
312 
313 	case QLNX_IGU_FIFO:
314         	rval = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
315 				p_ptt, &num_dwords);
316 		break;
317 
318 	case QLNX_PROTECTION_OVERRIDE:
319         	rval = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn,
320 				p_ptt, &num_dwords);
321 		break;
322 
323 	case QLNX_FW_ASSERTS:
324         	rval = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
325 				p_ptt, &num_dwords);
326 		break;
327 	}
328 
329         if (rval != DBG_STATUS_OK) {
330                 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", cmd, rval);
331 		num_dwords = 0;
332         }
333 
334         ecore_ptt_release(p_hwfn, p_ptt);
335 
336         return ((num_dwords * sizeof (uint32_t)));
337 }
338 
339 static void
340 qlnx_get_trace_size(qlnx_host_t *ha, qlnx_trace_t *trace)
341 {
342 	int i;
343 
344 	trace->pci_func = ha->pci_func;
345 
346 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
347 		trace->size[i] = qlnx_get_trace_cmd_size(ha, i, trace->cmd);
348 	}
349 
350 	return;
351 }
352 
353 static int
354 qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
355 {
356         int rval = -1;
357         struct ecore_hwfn *p_hwfn;
358         struct ecore_ptt *p_ptt;
359 	uint32_t num_dwords = 0;
360 	void *buffer;
361 
362 	buffer = qlnx_zalloc(trace->size[hwfn_index]);
363 	if (buffer == NULL) {
364                 QL_DPRINT1(ha,"qlnx_zalloc [%d, 0x%x]failed\n",
365                            hwfn_index, trace->cmd);
366                 return (ENXIO);
367 	}
368 	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
369 
370         p_hwfn = &ha->cdev.hwfns[hwfn_index];
371         p_ptt = ecore_ptt_acquire(p_hwfn);
372 
373         if (!p_ptt) {
374                 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
375                            hwfn_index, trace->cmd);
376                 return (ENXIO);
377         }
378 
379 	switch (trace->cmd) {
380 
381 	case QLNX_MCP_TRACE:
382         	rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
383 				buffer, (trace->size[hwfn_index] >> 2),
384 				&num_dwords);
385 		break;
386 
387 	case QLNX_REG_FIFO:
388         	rval = ecore_dbg_reg_fifo_dump(p_hwfn, p_ptt,
389 				buffer, (trace->size[hwfn_index] >> 2),
390 				&num_dwords);
391 		break;
392 
393 	case QLNX_IGU_FIFO:
394         	rval = ecore_dbg_igu_fifo_dump(p_hwfn, p_ptt,
395 				buffer, (trace->size[hwfn_index] >> 2),
396 				&num_dwords);
397 		break;
398 
399 	case QLNX_PROTECTION_OVERRIDE:
400         	rval = ecore_dbg_protection_override_dump(p_hwfn, p_ptt,
401 				buffer, (trace->size[hwfn_index] >> 2),
402 				&num_dwords);
403 		break;
404 
405 	case QLNX_FW_ASSERTS:
406         	rval = ecore_dbg_fw_asserts_dump(p_hwfn, p_ptt,
407 				buffer, (trace->size[hwfn_index] >> 2),
408 				&num_dwords);
409 		break;
410 	}
411 
412         if (rval != DBG_STATUS_OK) {
413                 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", trace->cmd, rval);
414 		num_dwords = 0;
415         }
416 
417         ecore_ptt_release(p_hwfn, p_ptt);
418 
419 	trace->dwords[hwfn_index] = num_dwords;
420 
421 	if (num_dwords) {
422                	rval = copyout(buffer, trace->buffer[hwfn_index],
423 				(num_dwords << 2));
424 	}
425 
426         return (rval);
427 }
428 
429 static int
430 qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
431 {
432 	int			rval = 0;
433 	struct ecore_hwfn	*p_hwfn;
434 
435 	if (reg_rd_wr->hwfn_index >= QLNX_MAX_HW_FUNCS) {
436 		return (EINVAL);
437 	}
438 
439 	p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
440 
441 	switch (reg_rd_wr->cmd) {
442 
443 		case QLNX_REG_READ_CMD:
444 			if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
445 				reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
446 							reg_rd_wr->addr);
447 			}
448 			break;
449 
450 		case QLNX_REG_WRITE_CMD:
451 			if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
452 				qlnx_reg_wr32(p_hwfn, reg_rd_wr->addr,
453 					reg_rd_wr->val);
454 			}
455 			break;
456 
457 		default:
458 			rval = EINVAL;
459 			break;
460 	}
461 
462 	return (rval);
463 }
464 
465 static int
466 qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
467 {
468 	int rval = 0;
469 
470 	switch (pci_cfg_rd_wr->cmd) {
471 
472 		case QLNX_PCICFG_READ:
473 			pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
474 						pci_cfg_rd_wr->reg,
475 						pci_cfg_rd_wr->width);
476 			break;
477 
478 		case QLNX_PCICFG_WRITE:
479 			pci_write_config(ha->pci_dev, pci_cfg_rd_wr->reg,
480 				pci_cfg_rd_wr->val, pci_cfg_rd_wr->width);
481 			break;
482 
483 		default:
484 			rval = EINVAL;
485 			break;
486 	}
487 
488 	return (rval);
489 }
490 
491 static void
492 qlnx_mac_addr(qlnx_host_t *ha, qlnx_perm_mac_addr_t *mac_addr)
493 {
494 	bzero(mac_addr->addr, sizeof(mac_addr->addr));
495 	snprintf(mac_addr->addr, sizeof(mac_addr->addr),
496 		"%02x:%02x:%02x:%02x:%02x:%02x",
497 		ha->primary_mac[0], ha->primary_mac[1], ha->primary_mac[2],
498 		ha->primary_mac[3], ha->primary_mac[4], ha->primary_mac[5]);
499 
500 	return;
501 }
502 
503 static int
504 qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
505 {
506 	int		i;
507 	int		rval = 0;
508 	uint32_t	dwords = 0;
509 	uint8_t		*outb;
510 
511 	regs->reg_buf_len = 0;
512 	outb = regs->reg_buf;
513 
514 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
515 
516 		rval = qlnx_grc_dump(ha, &dwords, i);
517 
518 		if (rval)
519 			break;
520 
521 		regs->reg_buf_len += (dwords << 2);
522 
523 		rval = copyout(ha->grcdump[i], outb, ha->grcdump_size[i]);
524 
525 		if (rval)
526 			break;
527 
528 		ha->grcdump_dwords[i] = 0;
529 		outb += regs->reg_buf_len;
530 	}
531 
532 	ha->grcdump_taken = 0;
533 
534 	return (rval);
535 }
536 
537 extern char qlnx_name_str[];
538 extern char qlnx_ver_str[];
539 
540 static int
541 qlnx_drv_info(qlnx_host_t *ha, qlnx_drvinfo_t *drv_info)
542 {
543 	int i;
544 
545 	bzero(drv_info, sizeof(qlnx_drvinfo_t));
546 
547 	snprintf(drv_info->drv_name, sizeof(drv_info->drv_name), "%s",
548 		qlnx_name_str);
549 	snprintf(drv_info->drv_version, sizeof(drv_info->drv_version), "%s",
550 		qlnx_ver_str);
551 	snprintf(drv_info->mfw_version, sizeof(drv_info->mfw_version), "%s",
552 		ha->mfw_ver);
553 	snprintf(drv_info->stormfw_version, sizeof(drv_info->stormfw_version),
554 		"%s", ha->stormfw_ver);
555 
556 	drv_info->eeprom_dump_len = ha->flash_size;
557 
558 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
559 		drv_info->reg_dump_len += ha->grcdump_size[i];
560 	}
561 
562 	snprintf(drv_info->bus_info, sizeof(drv_info->bus_info),
563 		"%d:%d:%d", pci_get_bus(ha->pci_dev),
564 		pci_get_slot(ha->pci_dev), ha->pci_func);
565 
566 	return (0);
567 }
568 
569 static int
570 qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
571 {
572 	struct ecore_hwfn *p_hwfn;
573 	struct qlnx_link_output if_link;
574 
575 	p_hwfn = &ha->cdev.hwfns[0];
576 
577 	qlnx_fill_link(p_hwfn, &if_link);
578 
579 	dev_info->supported = if_link.supported_caps;
580 	dev_info->advertising = if_link.advertised_caps;
581 	dev_info->speed = if_link.speed;
582 	dev_info->duplex = if_link.duplex;
583 	dev_info->port = ha->pci_func & 0x1;
584 	dev_info->autoneg = if_link.autoneg;
585 
586 	return (0);
587 }
588 
589 static int
590 qlnx_write_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram, uint32_t cmd)
591 {
592 	uint8_t *buf;
593 	int ret = 0;
594 
595 	if ((nvram->data == NULL) || (nvram->data_len == 0))
596 		return (EINVAL);
597 
598 	buf = qlnx_zalloc(nvram->data_len);
599 
600 	ret = copyin(nvram->data, buf, nvram->data_len);
601 
602 	QL_DPRINT9(ha, "issue cmd = 0x%x data = %p \
603 		 data_len = 0x%x ret = 0x%x exit\n",
604 		cmd, nvram->data, nvram->data_len, ret);
605 
606 	if (ret == 0) {
607 		ret = ecore_mcp_nvm_write(&ha->cdev, cmd,
608 			nvram->offset, buf, nvram->data_len);
609 	}
610 
611 	QL_DPRINT9(ha, "cmd = 0x%x data = %p \
612 		 data_len = 0x%x resp = 0x%x ret = 0x%x exit\n",
613 		cmd, nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
614 
615 	free(buf, M_QLNXBUF);
616 
617 	return (ret);
618 }
619 
620 static int
621 qlnx_read_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
622 {
623 	uint8_t *buf;
624 	int ret = 0;
625 
626 	if ((nvram->data == NULL) || (nvram->data_len == 0))
627 		return (EINVAL);
628 
629 	buf = qlnx_zalloc(nvram->data_len);
630 
631 	ret = ecore_mcp_nvm_read(&ha->cdev, nvram->offset, buf,
632 		nvram->data_len);
633 
634 	QL_DPRINT9(ha, " data = %p data_len = 0x%x \
635 		 resp = 0x%x ret = 0x%x exit\n",
636 		nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
637 
638 	if (ret == 0) {
639 		ret = copyout(buf, nvram->data, nvram->data_len);
640 	}
641 
642 	free(buf, M_QLNXBUF);
643 
644 	return (ret);
645 }
646 
647 static int
648 qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
649 {
650 	uint8_t *buf;
651 	int ret = 0;
652 
653 	if ((nvram->data == NULL) || (nvram->data_len == 0))
654 		return (EINVAL);
655 
656 	buf = qlnx_zalloc(nvram->data_len);
657 
658 
659 	ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
660 
661 	QL_DPRINT9(ha, "data = %p data_len = 0x%x \
662 		 resp = 0x%x ret = 0x%x exit\n",
663 		nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
664 
665 	if (ret == 0) {
666 		ret = copyout(buf, nvram->data, nvram->data_len);
667 	}
668 
669 	free(buf, M_QLNXBUF);
670 
671 	return (ret);
672 }
673 
674 static int
675 qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
676 {
677 	int ret = 0;
678 
679 	switch (nvram->cmd) {
680 
681 	case QLNX_NVRAM_CMD_WRITE_NVRAM:
682 		ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
683 		break;
684 
685 	case QLNX_NVRAM_CMD_PUT_FILE_DATA:
686 		ret = qlnx_write_nvram(ha, nvram, ECORE_PUT_FILE_DATA);
687 		break;
688 
689 	case QLNX_NVRAM_CMD_READ_NVRAM:
690 		ret = qlnx_read_nvram(ha, nvram);
691 		break;
692 
693 	case QLNX_NVRAM_CMD_SET_SECURE_MODE:
694 		ret = ecore_mcp_nvm_set_secure_mode(&ha->cdev, nvram->offset);
695 
696 		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_SET_SECURE_MODE \
697 			 resp = 0x%x ret = 0x%x exit\n",
698 			 ha->cdev.mcp_nvm_resp, ret);
699 		break;
700 
701 	case QLNX_NVRAM_CMD_DEL_FILE:
702 		ret = ecore_mcp_nvm_del_file(&ha->cdev, nvram->offset);
703 
704 		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_DEL_FILE \
705 			 resp = 0x%x ret = 0x%x exit\n",
706 			ha->cdev.mcp_nvm_resp, ret);
707 		break;
708 
709 	case QLNX_NVRAM_CMD_PUT_FILE_BEGIN:
710 		ret = ecore_mcp_nvm_put_file_begin(&ha->cdev, nvram->offset);
711 
712 		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_PUT_FILE_BEGIN \
713 			 resp = 0x%x ret = 0x%x exit\n",
714 			ha->cdev.mcp_nvm_resp, ret);
715 		break;
716 
717 	case QLNX_NVRAM_CMD_GET_NVRAM_RESP:
718 		ret = qlnx_get_nvram_resp(ha, nvram);
719 		break;
720 
721 	default:
722 		ret = EINVAL;
723 		break;
724 	}
725 
726 	return (ret);
727 }
728 
729 static void
730 qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
731 {
732 	int i;
733 	int index;
734 	int ret;
735 	int stats_copied = 0;
736 
737 	s_stats->num_hwfns = ha->cdev.num_hwfns;
738 
739 //	if (ha->storm_stats_index < QLNX_STORM_STATS_SAMPLES_PER_HWFN)
740 //		return;
741 
742 	s_stats->num_samples = ha->storm_stats_index;
743 
744 	for (i = 0; i < ha->cdev.num_hwfns; i++) {
745 
746 		index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
747 
748 		if (s_stats->buffer[i]) {
749 
750 			ret = copyout(&ha->storm_stats[index],
751 					s_stats->buffer[i],
752 					QLNX_STORM_STATS_BYTES_PER_HWFN);
753 			if (ret) {
754 				printf("%s [%d]: failed\n", __func__, i);
755 			}
756 
757 			if (s_stats->num_samples ==
758 				QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
759 
760 				bzero((void *)&ha->storm_stats[i],
761 					QLNX_STORM_STATS_BYTES_PER_HWFN);
762 
763 				stats_copied = 1;
764 			}
765 		}
766 	}
767 
768 	if (stats_copied)
769 		ha->storm_stats_index = 0;
770 
771 	return;
772 }
773 
774 
775 static int
776 qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
777 	struct thread *td)
778 {
779 	qlnx_host_t	*ha;
780 	int		rval = 0;
781 	struct ifnet	*ifp;
782 	qlnx_trace_t	*trace;
783 	int		i;
784 
785 	if ((ha = (qlnx_host_t *)dev->si_drv1) == NULL)
786 		return ENXIO;
787 
788 	ifp = ha->ifp;
789 
790 	switch (cmd) {
791 
792 	case QLNX_GRC_DUMP_SIZE:
793 		qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
794 		break;
795 
796 	case QLNX_GRC_DUMP:
797 		rval = qlnx_get_grc_dump(ha, (qlnx_grcdump_t *)data);
798 		break;
799 
800 	case QLNX_IDLE_CHK_SIZE:
801 		qlnx_get_idle_chk_size(ha, (qlnx_idle_chk_t *)data);
802 		break;
803 
804 	case QLNX_IDLE_CHK:
805 		rval = qlnx_get_idle_chk(ha, (qlnx_idle_chk_t *)data);
806 		break;
807 
808 	case QLNX_DRV_INFO:
809 		rval = qlnx_drv_info(ha, (qlnx_drvinfo_t *)data);
810 		break;
811 
812 	case QLNX_DEV_SETTING:
813 		rval = qlnx_dev_settings(ha, (qlnx_dev_setting_t *)data);
814 		break;
815 
816 	case QLNX_GET_REGS:
817 		rval = qlnx_get_regs(ha, (qlnx_get_regs_t *)data);
818 		break;
819 
820 	case QLNX_NVRAM:
821 		rval = qlnx_nvram(ha, (qlnx_nvram_t *)data);
822 		break;
823 
824 	case QLNX_RD_WR_REG:
825 		rval = qlnx_reg_rd_wr(ha, (qlnx_reg_rd_wr_t *)data);
826 		break;
827 
828 	case QLNX_RD_WR_PCICFG:
829 		rval = qlnx_rd_wr_pci_config(ha, (qlnx_pcicfg_rd_wr_t *)data);
830 		break;
831 
832 	case QLNX_MAC_ADDR:
833 		qlnx_mac_addr(ha, (qlnx_perm_mac_addr_t *)data);
834 		break;
835 
836 	case QLNX_STORM_STATS:
837 		qlnx_storm_stats(ha, (qlnx_storm_stats_dump_t *)data);
838 		break;
839 
840 	case QLNX_TRACE_SIZE:
841 		qlnx_get_trace_size(ha, (qlnx_trace_t *)data);
842 		break;
843 
844 	case QLNX_TRACE:
845 		trace = (qlnx_trace_t *)data;
846 
847 		for (i = 0; i < ha->cdev.num_hwfns; i++) {
848 
849 			if (trace->size[i] && trace->cmd && trace->buffer[i])
850 				rval = qlnx_get_trace(ha, i, trace);
851 
852 			if (rval)
853 				break;
854 		}
855 		break;
856 
857 	default:
858 		rval = EINVAL;
859 		break;
860 	}
861 
862 	return (rval);
863 }
864 
865