1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * File: qlnx_ioctl.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33 #include <sys/cdefs.h>
34 #include "qlnx_os.h"
35 #include "bcm_osal.h"
36
37 #include "reg_addr.h"
38 #include "ecore_gtt_reg_addr.h"
39 #include "ecore.h"
40 #include "ecore_chain.h"
41 #include "ecore_status.h"
42 #include "ecore_hw.h"
43 #include "ecore_rt_defs.h"
44 #include "ecore_init_ops.h"
45 #include "ecore_int.h"
46 #include "ecore_cxt.h"
47 #include "ecore_spq.h"
48 #include "ecore_init_fw_funcs.h"
49 #include "ecore_sp_commands.h"
50 #include "ecore_dev_api.h"
51 #include "ecore_l2_api.h"
52 #include "ecore_mcp.h"
53 #include "ecore_hw_defs.h"
54 #include "mcp_public.h"
55 #include "ecore_iro.h"
56 #include "nvm_cfg.h"
57 #include "ecore_dev_api.h"
58 #include "ecore_dbg_fw_funcs.h"
59 #include "ecore_dcbx_api.h"
60
61 #include "qlnx_ioctl.h"
62 #include "qlnx_def.h"
63 #include "qlnx_ver.h"
64 #include <sys/smp.h>
65
66 static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
67 struct thread *td);
68
69 static struct cdevsw qlnx_cdevsw = {
70 .d_version = D_VERSION,
71 .d_ioctl = qlnx_eioctl,
72 .d_name = "qlnxioctl",
73 };
74
75 int
qlnx_make_cdev(qlnx_host_t * ha)76 qlnx_make_cdev(qlnx_host_t *ha)
77 {
78 ha->ioctl_dev = make_dev(&qlnx_cdevsw,
79 if_getdunit(ha->ifp),
80 UID_ROOT,
81 GID_WHEEL,
82 0600,
83 "%s",
84 if_name(ha->ifp));
85
86 if (ha->ioctl_dev == NULL)
87 return (-1);
88
89 ha->ioctl_dev->si_drv1 = ha;
90
91 return (0);
92 }
93
94 void
qlnx_del_cdev(qlnx_host_t * ha)95 qlnx_del_cdev(qlnx_host_t *ha)
96 {
97 if (ha->ioctl_dev != NULL)
98 destroy_dev(ha->ioctl_dev);
99 return;
100 }
101
102 int
qlnx_grc_dump(qlnx_host_t * ha,uint32_t * num_dumped_dwords,int hwfn_index)103 qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
104 {
105 int rval = EINVAL;
106 struct ecore_hwfn *p_hwfn;
107 struct ecore_ptt *p_ptt;
108
109 if (ha->grcdump_dwords[hwfn_index]) {
110 /* the grcdump is already available */
111 *num_dumped_dwords = ha->grcdump_dwords[hwfn_index];
112 return (0);
113 }
114
115 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
116
117 p_hwfn = &ha->cdev.hwfns[hwfn_index];
118 p_ptt = ecore_ptt_acquire(p_hwfn);
119
120 if (!p_ptt) {
121 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
122 return (rval);
123 }
124
125 if ((rval = ecore_dbg_grc_dump(p_hwfn, p_ptt,
126 ha->grcdump[hwfn_index],
127 (ha->grcdump_size[hwfn_index] >> 2),
128 num_dumped_dwords)) == DBG_STATUS_OK) {
129 rval = 0;
130 ha->grcdump_taken = 1;
131 } else
132 QL_DPRINT1(ha,"ecore_dbg_grc_dump failed [%d, 0x%x]\n",
133 hwfn_index, rval);
134
135 ecore_ptt_release(p_hwfn, p_ptt);
136
137 return (rval);
138 }
139
140 static void
qlnx_get_grc_dump_size(qlnx_host_t * ha,qlnx_grcdump_t * grcdump)141 qlnx_get_grc_dump_size(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
142 {
143 int i;
144
145 grcdump->pci_func = ha->pci_func;
146
147 for (i = 0; i < ha->cdev.num_hwfns; i++)
148 grcdump->grcdump_size[i] = ha->grcdump_size[i];
149
150 return;
151 }
152
153 static int
qlnx_get_grc_dump(qlnx_host_t * ha,qlnx_grcdump_t * grcdump)154 qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
155 {
156 int i;
157 int rval = 0;
158 uint32_t dwords = 0;
159
160 grcdump->pci_func = ha->pci_func;
161
162 for (i = 0; i < ha->cdev.num_hwfns; i++) {
163 if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
164 (grcdump->grcdump_size[i] < ha->grcdump_size[i]))
165 return (EINVAL);
166
167 rval = qlnx_grc_dump(ha, &dwords, i);
168
169 if (rval)
170 break;
171
172 grcdump->grcdump_dwords[i] = dwords;
173
174 QL_DPRINT1(ha,"grcdump_dwords[%d] = 0x%x\n", i, dwords);
175
176 rval = copyout(ha->grcdump[i], grcdump->grcdump[i],
177 ha->grcdump_size[i]);
178
179 if (rval)
180 break;
181
182 ha->grcdump_dwords[i] = 0;
183 }
184
185 ha->grcdump_taken = 0;
186
187 return (rval);
188 }
189
190 int
qlnx_idle_chk(qlnx_host_t * ha,uint32_t * num_dumped_dwords,int hwfn_index)191 qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
192 {
193 int rval = EINVAL;
194 struct ecore_hwfn *p_hwfn;
195 struct ecore_ptt *p_ptt;
196
197 if (ha->idle_chk_dwords[hwfn_index]) {
198 /* the idle check is already available */
199 *num_dumped_dwords = ha->idle_chk_dwords[hwfn_index];
200 return (0);
201 }
202
203 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
204
205 p_hwfn = &ha->cdev.hwfns[hwfn_index];
206 p_ptt = ecore_ptt_acquire(p_hwfn);
207
208 if (!p_ptt) {
209 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
210 return (rval);
211 }
212
213 if ((rval = ecore_dbg_idle_chk_dump(p_hwfn, p_ptt,
214 ha->idle_chk[hwfn_index],
215 (ha->idle_chk_size[hwfn_index] >> 2),
216 num_dumped_dwords)) == DBG_STATUS_OK) {
217 rval = 0;
218 ha->idle_chk_taken = 1;
219 } else
220 QL_DPRINT1(ha,"ecore_dbg_idle_chk_dump failed [%d, 0x%x]\n",
221 hwfn_index, rval);
222
223 ecore_ptt_release(p_hwfn, p_ptt);
224
225 return (rval);
226 }
227
228 static void
qlnx_get_idle_chk_size(qlnx_host_t * ha,qlnx_idle_chk_t * idle_chk)229 qlnx_get_idle_chk_size(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
230 {
231 int i;
232
233 idle_chk->pci_func = ha->pci_func;
234
235 for (i = 0; i < ha->cdev.num_hwfns; i++)
236 idle_chk->idle_chk_size[i] = ha->idle_chk_size[i];
237
238 return;
239 }
240
241 static int
qlnx_get_idle_chk(qlnx_host_t * ha,qlnx_idle_chk_t * idle_chk)242 qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
243 {
244 int i;
245 int rval = 0;
246 uint32_t dwords = 0;
247
248 idle_chk->pci_func = ha->pci_func;
249
250 for (i = 0; i < ha->cdev.num_hwfns; i++) {
251 if ((ha->idle_chk[i] == NULL) ||
252 (idle_chk->idle_chk[i] == NULL) ||
253 (idle_chk->idle_chk_size[i] <
254 ha->idle_chk_size[i]))
255 return (EINVAL);
256
257 rval = qlnx_idle_chk(ha, &dwords, i);
258
259 if (rval)
260 break;
261
262 idle_chk->idle_chk_dwords[i] = dwords;
263
264 QL_DPRINT1(ha,"idle_chk_dwords[%d] = 0x%x\n", i, dwords);
265
266 rval = copyout(ha->idle_chk[i], idle_chk->idle_chk[i],
267 ha->idle_chk_size[i]);
268
269 if (rval)
270 break;
271
272 ha->idle_chk_dwords[i] = 0;
273 }
274 ha->idle_chk_taken = 0;
275
276 return (rval);
277 }
278
279 static uint32_t
qlnx_get_trace_cmd_size(qlnx_host_t * ha,int hwfn_index,uint16_t cmd)280 qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
281 {
282 int rval = -1;
283 struct ecore_hwfn *p_hwfn;
284 struct ecore_ptt *p_ptt;
285 uint32_t num_dwords = 0;
286
287 p_hwfn = &ha->cdev.hwfns[hwfn_index];
288 p_ptt = ecore_ptt_acquire(p_hwfn);
289
290 if (!p_ptt) {
291 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
292 hwfn_index, cmd);
293 return (0);
294 }
295
296 switch (cmd) {
297 case QLNX_MCP_TRACE:
298 rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
299 p_ptt, &num_dwords);
300 break;
301
302 case QLNX_REG_FIFO:
303 rval = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
304 p_ptt, &num_dwords);
305 break;
306
307 case QLNX_IGU_FIFO:
308 rval = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
309 p_ptt, &num_dwords);
310 break;
311
312 case QLNX_PROTECTION_OVERRIDE:
313 rval = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn,
314 p_ptt, &num_dwords);
315 break;
316
317 case QLNX_FW_ASSERTS:
318 rval = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
319 p_ptt, &num_dwords);
320 break;
321 }
322
323 if (rval != DBG_STATUS_OK) {
324 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", cmd, rval);
325 num_dwords = 0;
326 }
327
328 ecore_ptt_release(p_hwfn, p_ptt);
329
330 return ((num_dwords * sizeof (uint32_t)));
331 }
332
333 static void
qlnx_get_trace_size(qlnx_host_t * ha,qlnx_trace_t * trace)334 qlnx_get_trace_size(qlnx_host_t *ha, qlnx_trace_t *trace)
335 {
336 int i;
337
338 trace->pci_func = ha->pci_func;
339
340 for (i = 0; i < ha->cdev.num_hwfns; i++) {
341 trace->size[i] = qlnx_get_trace_cmd_size(ha, i, trace->cmd);
342 }
343
344 return;
345 }
346
347 static int
qlnx_get_trace(qlnx_host_t * ha,int hwfn_index,qlnx_trace_t * trace)348 qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
349 {
350 int rval = -1;
351 struct ecore_hwfn *p_hwfn;
352 struct ecore_ptt *p_ptt;
353 uint32_t num_dwords = 0;
354 void *buffer;
355
356 buffer = qlnx_zalloc(trace->size[hwfn_index]);
357 if (buffer == NULL) {
358 QL_DPRINT1(ha,"qlnx_zalloc [%d, 0x%x]failed\n",
359 hwfn_index, trace->cmd);
360 return (ENXIO);
361 }
362 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
363
364 p_hwfn = &ha->cdev.hwfns[hwfn_index];
365 p_ptt = ecore_ptt_acquire(p_hwfn);
366
367 if (!p_ptt) {
368 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
369 hwfn_index, trace->cmd);
370 return (ENXIO);
371 }
372
373 switch (trace->cmd) {
374 case QLNX_MCP_TRACE:
375 rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
376 buffer, (trace->size[hwfn_index] >> 2),
377 &num_dwords);
378 break;
379
380 case QLNX_REG_FIFO:
381 rval = ecore_dbg_reg_fifo_dump(p_hwfn, p_ptt,
382 buffer, (trace->size[hwfn_index] >> 2),
383 &num_dwords);
384 break;
385
386 case QLNX_IGU_FIFO:
387 rval = ecore_dbg_igu_fifo_dump(p_hwfn, p_ptt,
388 buffer, (trace->size[hwfn_index] >> 2),
389 &num_dwords);
390 break;
391
392 case QLNX_PROTECTION_OVERRIDE:
393 rval = ecore_dbg_protection_override_dump(p_hwfn, p_ptt,
394 buffer, (trace->size[hwfn_index] >> 2),
395 &num_dwords);
396 break;
397
398 case QLNX_FW_ASSERTS:
399 rval = ecore_dbg_fw_asserts_dump(p_hwfn, p_ptt,
400 buffer, (trace->size[hwfn_index] >> 2),
401 &num_dwords);
402 break;
403 }
404
405 if (rval != DBG_STATUS_OK) {
406 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", trace->cmd, rval);
407 num_dwords = 0;
408 }
409
410 ecore_ptt_release(p_hwfn, p_ptt);
411
412 trace->dwords[hwfn_index] = num_dwords;
413
414 if (num_dwords) {
415 rval = copyout(buffer, trace->buffer[hwfn_index],
416 (num_dwords << 2));
417 }
418
419 return (rval);
420 }
421
422 static int
qlnx_reg_rd_wr(qlnx_host_t * ha,qlnx_reg_rd_wr_t * reg_rd_wr)423 qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
424 {
425 int rval = 0;
426 struct ecore_hwfn *p_hwfn;
427
428 if (reg_rd_wr->hwfn_index >= QLNX_MAX_HW_FUNCS) {
429 return (EINVAL);
430 }
431
432 p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
433
434 switch (reg_rd_wr->cmd) {
435 case QLNX_REG_READ_CMD:
436 if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
437 reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
438 reg_rd_wr->addr);
439 }
440 break;
441
442 case QLNX_REG_WRITE_CMD:
443 if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
444 qlnx_reg_wr32(p_hwfn, reg_rd_wr->addr,
445 reg_rd_wr->val);
446 }
447 break;
448
449 default:
450 rval = EINVAL;
451 break;
452 }
453
454 return (rval);
455 }
456
457 static int
qlnx_rd_wr_pci_config(qlnx_host_t * ha,qlnx_pcicfg_rd_wr_t * pci_cfg_rd_wr)458 qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
459 {
460 int rval = 0;
461
462 switch (pci_cfg_rd_wr->cmd) {
463 case QLNX_PCICFG_READ:
464 pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
465 pci_cfg_rd_wr->reg,
466 pci_cfg_rd_wr->width);
467 break;
468
469 case QLNX_PCICFG_WRITE:
470 pci_write_config(ha->pci_dev, pci_cfg_rd_wr->reg,
471 pci_cfg_rd_wr->val, pci_cfg_rd_wr->width);
472 break;
473
474 default:
475 rval = EINVAL;
476 break;
477 }
478
479 return (rval);
480 }
481
482 static void
qlnx_mac_addr(qlnx_host_t * ha,qlnx_perm_mac_addr_t * mac_addr)483 qlnx_mac_addr(qlnx_host_t *ha, qlnx_perm_mac_addr_t *mac_addr)
484 {
485 bzero(mac_addr->addr, sizeof(mac_addr->addr));
486 snprintf(mac_addr->addr, sizeof(mac_addr->addr),
487 "%02x:%02x:%02x:%02x:%02x:%02x",
488 ha->primary_mac[0], ha->primary_mac[1], ha->primary_mac[2],
489 ha->primary_mac[3], ha->primary_mac[4], ha->primary_mac[5]);
490
491 return;
492 }
493
494 static int
qlnx_get_regs(qlnx_host_t * ha,qlnx_get_regs_t * regs)495 qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
496 {
497 int i;
498 int rval = 0;
499 uint32_t dwords = 0;
500 uint8_t *outb;
501
502 regs->reg_buf_len = 0;
503 outb = regs->reg_buf;
504
505 for (i = 0; i < ha->cdev.num_hwfns; i++) {
506 rval = qlnx_grc_dump(ha, &dwords, i);
507
508 if (rval)
509 break;
510
511 regs->reg_buf_len += (dwords << 2);
512
513 rval = copyout(ha->grcdump[i], outb, ha->grcdump_size[i]);
514
515 if (rval)
516 break;
517
518 ha->grcdump_dwords[i] = 0;
519 outb += regs->reg_buf_len;
520 }
521
522 ha->grcdump_taken = 0;
523
524 return (rval);
525 }
526
527 extern char qlnx_name_str[];
528 extern char qlnx_ver_str[];
529
530 static int
qlnx_drv_info(qlnx_host_t * ha,qlnx_drvinfo_t * drv_info)531 qlnx_drv_info(qlnx_host_t *ha, qlnx_drvinfo_t *drv_info)
532 {
533 int i;
534
535 bzero(drv_info, sizeof(qlnx_drvinfo_t));
536
537 snprintf(drv_info->drv_name, sizeof(drv_info->drv_name), "%s",
538 qlnx_name_str);
539 snprintf(drv_info->drv_version, sizeof(drv_info->drv_version), "%s",
540 qlnx_ver_str);
541 snprintf(drv_info->mfw_version, sizeof(drv_info->mfw_version), "%s",
542 ha->mfw_ver);
543 snprintf(drv_info->stormfw_version, sizeof(drv_info->stormfw_version),
544 "%s", ha->stormfw_ver);
545
546 drv_info->eeprom_dump_len = ha->flash_size;
547
548 for (i = 0; i < ha->cdev.num_hwfns; i++) {
549 drv_info->reg_dump_len += ha->grcdump_size[i];
550 }
551
552 snprintf(drv_info->bus_info, sizeof(drv_info->bus_info),
553 "%d:%d:%d", pci_get_bus(ha->pci_dev),
554 pci_get_slot(ha->pci_dev), ha->pci_func);
555
556 return (0);
557 }
558
559 static int
qlnx_dev_settings(qlnx_host_t * ha,qlnx_dev_setting_t * dev_info)560 qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
561 {
562 struct ecore_hwfn *p_hwfn;
563 struct qlnx_link_output if_link;
564
565 p_hwfn = &ha->cdev.hwfns[0];
566
567 qlnx_fill_link(ha, p_hwfn, &if_link);
568
569 dev_info->supported = if_link.supported_caps;
570 dev_info->advertising = if_link.advertised_caps;
571 dev_info->speed = if_link.speed;
572 dev_info->duplex = if_link.duplex;
573 dev_info->port = ha->pci_func & 0x1;
574 dev_info->autoneg = if_link.autoneg;
575
576 return (0);
577 }
578
579 static int
qlnx_write_nvram(qlnx_host_t * ha,qlnx_nvram_t * nvram,uint32_t cmd)580 qlnx_write_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram, uint32_t cmd)
581 {
582 uint8_t *buf;
583 int ret = 0;
584
585 if ((nvram->data == NULL) || (nvram->data_len == 0))
586 return (EINVAL);
587
588 buf = qlnx_zalloc(nvram->data_len);
589
590 ret = copyin(nvram->data, buf, nvram->data_len);
591
592 QL_DPRINT9(ha, "issue cmd = 0x%x data = %p \
593 data_len = 0x%x ret = 0x%x exit\n",
594 cmd, nvram->data, nvram->data_len, ret);
595
596 if (ret == 0) {
597 ret = ecore_mcp_nvm_write(&ha->cdev, cmd,
598 nvram->offset, buf, nvram->data_len);
599 }
600
601 QL_DPRINT9(ha, "cmd = 0x%x data = %p \
602 data_len = 0x%x resp = 0x%x ret = 0x%x exit\n",
603 cmd, nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
604
605 free(buf, M_QLNXBUF);
606
607 return (ret);
608 }
609
610 static int
qlnx_read_nvram(qlnx_host_t * ha,qlnx_nvram_t * nvram)611 qlnx_read_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
612 {
613 uint8_t *buf;
614 int ret = 0;
615
616 if ((nvram->data == NULL) || (nvram->data_len == 0))
617 return (EINVAL);
618
619 buf = qlnx_zalloc(nvram->data_len);
620
621 ret = ecore_mcp_nvm_read(&ha->cdev, nvram->offset, buf,
622 nvram->data_len);
623
624 QL_DPRINT9(ha, " data = %p data_len = 0x%x \
625 resp = 0x%x ret = 0x%x exit\n",
626 nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
627
628 if (ret == 0) {
629 ret = copyout(buf, nvram->data, nvram->data_len);
630 }
631
632 free(buf, M_QLNXBUF);
633
634 return (ret);
635 }
636
637 static int
qlnx_get_nvram_resp(qlnx_host_t * ha,qlnx_nvram_t * nvram)638 qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
639 {
640 uint8_t *buf;
641 int ret = 0;
642
643 if ((nvram->data == NULL) || (nvram->data_len == 0))
644 return (EINVAL);
645
646 buf = qlnx_zalloc(nvram->data_len);
647
648 ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
649
650 QL_DPRINT9(ha, "data = %p data_len = 0x%x \
651 resp = 0x%x ret = 0x%x exit\n",
652 nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
653
654 if (ret == 0) {
655 ret = copyout(buf, nvram->data, nvram->data_len);
656 }
657
658 free(buf, M_QLNXBUF);
659
660 return (ret);
661 }
662
663 static int
qlnx_nvram(qlnx_host_t * ha,qlnx_nvram_t * nvram)664 qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
665 {
666 int ret = 0;
667
668 switch (nvram->cmd) {
669 case QLNX_NVRAM_CMD_WRITE_NVRAM:
670 ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
671 break;
672
673 case QLNX_NVRAM_CMD_PUT_FILE_DATA:
674 ret = qlnx_write_nvram(ha, nvram, ECORE_PUT_FILE_DATA);
675 break;
676
677 case QLNX_NVRAM_CMD_READ_NVRAM:
678 ret = qlnx_read_nvram(ha, nvram);
679 break;
680
681 case QLNX_NVRAM_CMD_SET_SECURE_MODE:
682 ret = ecore_mcp_nvm_set_secure_mode(&ha->cdev, nvram->offset);
683
684 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_SET_SECURE_MODE \
685 resp = 0x%x ret = 0x%x exit\n",
686 ha->cdev.mcp_nvm_resp, ret);
687 break;
688
689 case QLNX_NVRAM_CMD_DEL_FILE:
690 ret = ecore_mcp_nvm_del_file(&ha->cdev, nvram->offset);
691
692 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_DEL_FILE \
693 resp = 0x%x ret = 0x%x exit\n",
694 ha->cdev.mcp_nvm_resp, ret);
695 break;
696
697 case QLNX_NVRAM_CMD_PUT_FILE_BEGIN:
698 ret = ecore_mcp_nvm_put_file_begin(&ha->cdev, nvram->offset);
699
700 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_PUT_FILE_BEGIN \
701 resp = 0x%x ret = 0x%x exit\n",
702 ha->cdev.mcp_nvm_resp, ret);
703 break;
704
705 case QLNX_NVRAM_CMD_GET_NVRAM_RESP:
706 ret = qlnx_get_nvram_resp(ha, nvram);
707 break;
708
709 default:
710 ret = EINVAL;
711 break;
712 }
713
714 return (ret);
715 }
716
717 static void
qlnx_storm_stats(qlnx_host_t * ha,qlnx_storm_stats_dump_t * s_stats)718 qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
719 {
720 int i;
721 int index;
722 int ret;
723 int stats_copied = 0;
724
725 s_stats->num_hwfns = ha->cdev.num_hwfns;
726
727 // if (ha->storm_stats_index < QLNX_STORM_STATS_SAMPLES_PER_HWFN)
728 // return;
729
730 s_stats->num_samples = ha->storm_stats_index;
731
732 for (i = 0; i < ha->cdev.num_hwfns; i++) {
733 index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
734
735 if (s_stats->buffer[i]) {
736 ret = copyout(&ha->storm_stats[index],
737 s_stats->buffer[i],
738 QLNX_STORM_STATS_BYTES_PER_HWFN);
739 if (ret) {
740 printf("%s [%d]: failed\n", __func__, i);
741 }
742
743 if (s_stats->num_samples ==
744 QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
745 bzero((void *)&ha->storm_stats[i],
746 QLNX_STORM_STATS_BYTES_PER_HWFN);
747
748 stats_copied = 1;
749 }
750 }
751 }
752
753 if (stats_copied)
754 ha->storm_stats_index = 0;
755
756 return;
757 }
758
759 #ifdef QLNX_USER_LLDP
760
761 static int
qlnx_lldp_configure(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,uint32_t enable)762 qlnx_lldp_configure(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
763 struct ecore_ptt *p_ptt, uint32_t enable)
764 {
765 int ret = 0;
766 uint8_t lldp_mac[6] = {0};
767 struct ecore_lldp_config_params lldp_params;
768 struct ecore_lldp_sys_tlvs tlv_params;
769
770 ret = ecore_mcp_get_lldp_mac(p_hwfn, p_ptt, lldp_mac);
771
772 if (ret != ECORE_SUCCESS) {
773 device_printf(ha->pci_dev,
774 "%s: ecore_mcp_get_lldp_mac failed\n", __func__);
775 return (-1);
776 }
777
778 bzero(&lldp_params, sizeof(struct ecore_lldp_config_params));
779 bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
780
781 lldp_params.agent = ECORE_LLDP_NEAREST_BRIDGE;
782 lldp_params.tx_interval = 30; //Default value used as suggested by MFW
783 lldp_params.tx_hold = 4; //Default value used as suggested by MFW
784 lldp_params.tx_credit = 5; //Default value used as suggested by MFW
785 lldp_params.rx_enable = enable ? 1 : 0;
786 lldp_params.tx_enable = enable ? 1 : 0;
787
788 lldp_params.chassis_id_tlv[0] = 0;
789 lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_TYPE_CHASSIS_ID << 1);
790 lldp_params.chassis_id_tlv[0] |=
791 ((QLNX_LLDP_CHASSIS_ID_SUBTYPE_OCTETS +
792 QLNX_LLDP_CHASSIS_ID_MAC_ADDR_LEN) << 8);
793 lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_CHASSIS_ID_SUBTYPE_MAC << 16);
794 lldp_params.chassis_id_tlv[0] |= lldp_mac[0] << 24;
795 lldp_params.chassis_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
796 (lldp_mac[3] << 16) | (lldp_mac[4] << 24);
797 lldp_params.chassis_id_tlv[2] = lldp_mac[5];
798
799 lldp_params.port_id_tlv[0] = 0;
800 lldp_params.port_id_tlv[0] |= (QLNX_LLDP_TYPE_PORT_ID << 1);
801 lldp_params.port_id_tlv[0] |=
802 ((QLNX_LLDP_PORT_ID_SUBTYPE_OCTETS +
803 QLNX_LLDP_PORT_ID_MAC_ADDR_LEN) << 8);
804 lldp_params.port_id_tlv[0] |= (QLNX_LLDP_PORT_ID_SUBTYPE_MAC << 16);
805 lldp_params.port_id_tlv[0] |= lldp_mac[0] << 24;
806 lldp_params.port_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
807 (lldp_mac[3] << 16) | (lldp_mac[4] << 24);
808 lldp_params.port_id_tlv[2] = lldp_mac[5];
809
810 ret = ecore_lldp_set_params(p_hwfn, p_ptt, &lldp_params);
811
812 if (ret != ECORE_SUCCESS) {
813 device_printf(ha->pci_dev,
814 "%s: ecore_lldp_set_params failed\n", __func__);
815 return (-1);
816 }
817
818 //If LLDP is disable then disable discard_mandatory_tlv flag
819 if (!enable) {
820 tlv_params.discard_mandatory_tlv = false;
821 tlv_params.buf_size = 0;
822 ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
823 }
824
825 if (ret != ECORE_SUCCESS) {
826 device_printf(ha->pci_dev,
827 "%s: ecore_lldp_set_system_tlvs failed\n", __func__);
828 }
829
830 return (ret);
831 }
832
833 static int
qlnx_register_default_lldp_tlvs(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)834 qlnx_register_default_lldp_tlvs(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
835 struct ecore_ptt *p_ptt)
836 {
837 int ret = 0;
838
839 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
840 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_CHASSIS_ID);
841 if (ret != ECORE_SUCCESS) {
842 device_printf(ha->pci_dev,
843 "%s: QLNX_LLDP_TYPE_CHASSIS_ID failed\n", __func__);
844 goto qlnx_register_default_lldp_tlvs_exit;
845 }
846
847 //register Port ID TLV
848 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
849 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_ID);
850 if (ret != ECORE_SUCCESS) {
851 device_printf(ha->pci_dev,
852 "%s: QLNX_LLDP_TYPE_PORT_ID failed\n", __func__);
853 goto qlnx_register_default_lldp_tlvs_exit;
854 }
855
856 //register TTL TLV
857 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
858 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_TTL);
859 if (ret != ECORE_SUCCESS) {
860 device_printf(ha->pci_dev,
861 "%s: QLNX_LLDP_TYPE_TTL failed\n", __func__);
862 goto qlnx_register_default_lldp_tlvs_exit;
863 }
864
865 //register Port Description TLV
866 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
867 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_DESC);
868 if (ret != ECORE_SUCCESS) {
869 device_printf(ha->pci_dev,
870 "%s: QLNX_LLDP_TYPE_PORT_DESC failed\n", __func__);
871 goto qlnx_register_default_lldp_tlvs_exit;
872 }
873
874 //register System Name TLV
875 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
876 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_NAME);
877 if (ret != ECORE_SUCCESS) {
878 device_printf(ha->pci_dev,
879 "%s: QLNX_LLDP_TYPE_SYS_NAME failed\n", __func__);
880 goto qlnx_register_default_lldp_tlvs_exit;
881 }
882
883 //register System Description TLV
884 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
885 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_DESC);
886 if (ret != ECORE_SUCCESS) {
887 device_printf(ha->pci_dev,
888 "%s: QLNX_LLDP_TYPE_SYS_DESC failed\n", __func__);
889 goto qlnx_register_default_lldp_tlvs_exit;
890 }
891
892 //register System Capabilities TLV
893 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
894 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_CAPS);
895 if (ret != ECORE_SUCCESS) {
896 device_printf(ha->pci_dev,
897 "%s: QLNX_LLDP_TYPE_SYS_CAPS failed\n", __func__);
898 goto qlnx_register_default_lldp_tlvs_exit;
899 }
900
901 //register Management Address TLV
902 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
903 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_MGMT_ADDR);
904 if (ret != ECORE_SUCCESS) {
905 device_printf(ha->pci_dev,
906 "%s: QLNX_LLDP_TYPE_MGMT_ADDR failed\n", __func__);
907 goto qlnx_register_default_lldp_tlvs_exit;
908 }
909
910 //register Organizationally Specific TLVs
911 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
912 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_ORG_SPECIFIC);
913 if (ret != ECORE_SUCCESS) {
914 device_printf(ha->pci_dev,
915 "%s: QLNX_LLDP_TYPE_ORG_SPECIFIC failed\n", __func__);
916 }
917
918 qlnx_register_default_lldp_tlvs_exit:
919 return (ret);
920 }
921
922 int
qlnx_set_lldp_tlvx(qlnx_host_t * ha,qlnx_lldp_sys_tlvs_t * lldp_tlvs)923 qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs)
924 {
925 int ret = 0;
926 struct ecore_hwfn *p_hwfn;
927 struct ecore_ptt *p_ptt;
928 struct ecore_lldp_sys_tlvs tlv_params;
929
930 p_hwfn = &ha->cdev.hwfns[0];
931 p_ptt = ecore_ptt_acquire(p_hwfn);
932
933 if (!p_ptt) {
934 device_printf(ha->pci_dev,
935 "%s: ecore_ptt_acquire failed\n", __func__);
936 return (ENXIO);
937 }
938
939 ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 0);
940
941 if (ret) {
942 device_printf(ha->pci_dev,
943 "%s: qlnx_lldp_configure disable failed\n", __func__);
944 goto qlnx_set_lldp_tlvx_exit;
945 }
946
947 ret = qlnx_register_default_lldp_tlvs(ha, p_hwfn, p_ptt);
948
949 if (ret) {
950 device_printf(ha->pci_dev,
951 "%s: qlnx_register_default_lldp_tlvs failed\n",
952 __func__);
953 goto qlnx_set_lldp_tlvx_exit;
954 }
955
956 ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 1);
957
958 if (ret) {
959 device_printf(ha->pci_dev,
960 "%s: qlnx_lldp_configure enable failed\n", __func__);
961 goto qlnx_set_lldp_tlvx_exit;
962 }
963
964 if (lldp_tlvs != NULL) {
965 bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
966
967 tlv_params.discard_mandatory_tlv =
968 (lldp_tlvs->discard_mandatory_tlv ? true: false);
969 tlv_params.buf_size = lldp_tlvs->buf_size;
970 memcpy(tlv_params.buf, lldp_tlvs->buf, lldp_tlvs->buf_size);
971
972 ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
973
974 if (ret) {
975 device_printf(ha->pci_dev,
976 "%s: ecore_lldp_set_system_tlvs failed\n",
977 __func__);
978 }
979 }
980 qlnx_set_lldp_tlvx_exit:
981
982 ecore_ptt_release(p_hwfn, p_ptt);
983 return (ret);
984 }
985
986 #endif /* #ifdef QLNX_USER_LLDP */
987
988 static int
qlnx_eioctl(struct cdev * dev,u_long cmd,caddr_t data,int fflag,struct thread * td)989 qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
990 struct thread *td)
991 {
992 qlnx_host_t *ha;
993 int rval = 0;
994 qlnx_trace_t *trace;
995 int i;
996
997 if ((ha = (qlnx_host_t *)dev->si_drv1) == NULL)
998 return ENXIO;
999
1000 switch (cmd) {
1001 case QLNX_GRC_DUMP_SIZE:
1002 qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
1003 break;
1004
1005 case QLNX_GRC_DUMP:
1006 rval = qlnx_get_grc_dump(ha, (qlnx_grcdump_t *)data);
1007 break;
1008
1009 case QLNX_IDLE_CHK_SIZE:
1010 qlnx_get_idle_chk_size(ha, (qlnx_idle_chk_t *)data);
1011 break;
1012
1013 case QLNX_IDLE_CHK:
1014 rval = qlnx_get_idle_chk(ha, (qlnx_idle_chk_t *)data);
1015 break;
1016
1017 case QLNX_DRV_INFO:
1018 rval = qlnx_drv_info(ha, (qlnx_drvinfo_t *)data);
1019 break;
1020
1021 case QLNX_DEV_SETTING:
1022 rval = qlnx_dev_settings(ha, (qlnx_dev_setting_t *)data);
1023 break;
1024
1025 case QLNX_GET_REGS:
1026 rval = qlnx_get_regs(ha, (qlnx_get_regs_t *)data);
1027 break;
1028
1029 case QLNX_NVRAM:
1030 rval = qlnx_nvram(ha, (qlnx_nvram_t *)data);
1031 break;
1032
1033 case QLNX_RD_WR_REG:
1034 rval = qlnx_reg_rd_wr(ha, (qlnx_reg_rd_wr_t *)data);
1035 break;
1036
1037 case QLNX_RD_WR_PCICFG:
1038 rval = qlnx_rd_wr_pci_config(ha, (qlnx_pcicfg_rd_wr_t *)data);
1039 break;
1040
1041 case QLNX_MAC_ADDR:
1042 qlnx_mac_addr(ha, (qlnx_perm_mac_addr_t *)data);
1043 break;
1044
1045 case QLNX_STORM_STATS:
1046 qlnx_storm_stats(ha, (qlnx_storm_stats_dump_t *)data);
1047 break;
1048
1049 case QLNX_TRACE_SIZE:
1050 qlnx_get_trace_size(ha, (qlnx_trace_t *)data);
1051 break;
1052
1053 case QLNX_TRACE:
1054 trace = (qlnx_trace_t *)data;
1055
1056 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1057 if (trace->size[i] && trace->cmd && trace->buffer[i])
1058 rval = qlnx_get_trace(ha, i, trace);
1059
1060 if (rval)
1061 break;
1062 }
1063 break;
1064
1065 #ifdef QLNX_USER_LLDP
1066 case QLNX_SET_LLDP_TLVS:
1067 rval = qlnx_set_lldp_tlvx(ha, (qlnx_lldp_sys_tlvs_t *)data);
1068 break;
1069 #endif /* #ifdef QLNX_USER_LLDP */
1070
1071 default:
1072 rval = EINVAL;
1073 break;
1074 }
1075
1076 return (rval);
1077 }
1078