1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013-2016 Qlogic Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * File: ql_hw.c
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 * Content: Contains Hardware dependent functions
34 */
35
36 #include <sys/cdefs.h>
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include "ql_minidump.h"
45
46 /*
47 * Static Functions
48 */
49
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static int qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57 uint32_t num_intrs, uint32_t create);
58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60 int tenable, int rcv);
61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
63
64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
65 uint8_t *hdr);
66 static int qla_hw_add_all_mcast(qla_host_t *ha);
67 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
68
69 static int qla_init_nic_func(qla_host_t *ha);
70 static int qla_stop_nic_func(qla_host_t *ha);
71 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
72 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
73 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
74 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
75 static int qla_get_cam_search_mode(qla_host_t *ha);
76
77 static void ql_minidump_free(qla_host_t *ha);
78
79 #ifdef QL_DBG
80
81 static void
qla_stop_pegs(qla_host_t * ha)82 qla_stop_pegs(qla_host_t *ha)
83 {
84 uint32_t val = 1;
85
86 ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
87 ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
88 ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
89 ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
90 ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
91 device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
92 }
93
94 static int
qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)95 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
96 {
97 int err, ret = 0;
98 qla_host_t *ha;
99
100 err = sysctl_handle_int(oidp, &ret, 0, req);
101
102 if (err || !req->newptr)
103 return (err);
104
105 if (ret == 1) {
106 ha = (qla_host_t *)arg1;
107 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
108 qla_stop_pegs(ha);
109 QLA_UNLOCK(ha, __func__);
110 }
111 }
112
113 return err;
114 }
115 #endif /* #ifdef QL_DBG */
116
117 static int
qla_validate_set_port_cfg_bit(uint32_t bits)118 qla_validate_set_port_cfg_bit(uint32_t bits)
119 {
120 if ((bits & 0xF) > 1)
121 return (-1);
122
123 if (((bits >> 4) & 0xF) > 2)
124 return (-1);
125
126 if (((bits >> 8) & 0xF) > 2)
127 return (-1);
128
129 return (0);
130 }
131
132 static int
qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)133 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
134 {
135 int err, ret = 0;
136 qla_host_t *ha;
137 uint32_t cfg_bits;
138
139 err = sysctl_handle_int(oidp, &ret, 0, req);
140
141 if (err || !req->newptr)
142 return (err);
143
144 ha = (qla_host_t *)arg1;
145
146 if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
147 err = qla_get_port_config(ha, &cfg_bits);
148
149 if (err)
150 goto qla_sysctl_set_port_cfg_exit;
151
152 if (ret & 0x1) {
153 cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
154 } else {
155 cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
156 }
157
158 ret = ret >> 4;
159 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
160
161 if ((ret & 0xF) == 0) {
162 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
163 } else if ((ret & 0xF) == 1){
164 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
165 } else {
166 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
167 }
168
169 ret = ret >> 4;
170 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
171
172 if (ret == 0) {
173 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
174 } else if (ret == 1){
175 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
176 } else {
177 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
178 }
179
180 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
181 err = qla_set_port_config(ha, cfg_bits);
182 QLA_UNLOCK(ha, __func__);
183 } else {
184 device_printf(ha->pci_dev, "%s: failed\n", __func__);
185 }
186 } else {
187 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
188 err = qla_get_port_config(ha, &cfg_bits);
189 QLA_UNLOCK(ha, __func__);
190 } else {
191 device_printf(ha->pci_dev, "%s: failed\n", __func__);
192 }
193 }
194
195 qla_sysctl_set_port_cfg_exit:
196 return err;
197 }
198
199 static int
qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)200 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
201 {
202 int err, ret = 0;
203 qla_host_t *ha;
204
205 err = sysctl_handle_int(oidp, &ret, 0, req);
206
207 if (err || !req->newptr)
208 return (err);
209
210 ha = (qla_host_t *)arg1;
211
212 if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
213 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
214 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
215 err = qla_set_cam_search_mode(ha, (uint32_t)ret);
216 QLA_UNLOCK(ha, __func__);
217 } else {
218 device_printf(ha->pci_dev, "%s: failed\n", __func__);
219 }
220
221 } else {
222 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
223 }
224
225 return (err);
226 }
227
228 static int
qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)229 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
230 {
231 int err, ret = 0;
232 qla_host_t *ha;
233
234 err = sysctl_handle_int(oidp, &ret, 0, req);
235
236 if (err || !req->newptr)
237 return (err);
238
239 ha = (qla_host_t *)arg1;
240 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
241 err = qla_get_cam_search_mode(ha);
242 QLA_UNLOCK(ha, __func__);
243 } else {
244 device_printf(ha->pci_dev, "%s: failed\n", __func__);
245 }
246
247 return (err);
248 }
249
250 static void
qlnx_add_hw_mac_stats_sysctls(qla_host_t * ha)251 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
252 {
253 struct sysctl_ctx_list *ctx;
254 struct sysctl_oid_list *children;
255 struct sysctl_oid *ctx_oid;
256
257 ctx = device_get_sysctl_ctx(ha->pci_dev);
258 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
259
260 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
261 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_mac");
262 children = SYSCTL_CHILDREN(ctx_oid);
263
264 SYSCTL_ADD_QUAD(ctx, children,
265 OID_AUTO, "xmt_frames",
266 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
267 "xmt_frames");
268
269 SYSCTL_ADD_QUAD(ctx, children,
270 OID_AUTO, "xmt_bytes",
271 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
272 "xmt_frames");
273
274 SYSCTL_ADD_QUAD(ctx, children,
275 OID_AUTO, "xmt_mcast_pkts",
276 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
277 "xmt_mcast_pkts");
278
279 SYSCTL_ADD_QUAD(ctx, children,
280 OID_AUTO, "xmt_bcast_pkts",
281 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
282 "xmt_bcast_pkts");
283
284 SYSCTL_ADD_QUAD(ctx, children,
285 OID_AUTO, "xmt_pause_frames",
286 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
287 "xmt_pause_frames");
288
289 SYSCTL_ADD_QUAD(ctx, children,
290 OID_AUTO, "xmt_cntrl_pkts",
291 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
292 "xmt_cntrl_pkts");
293
294 SYSCTL_ADD_QUAD(ctx, children,
295 OID_AUTO, "xmt_pkt_lt_64bytes",
296 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
297 "xmt_pkt_lt_64bytes");
298
299 SYSCTL_ADD_QUAD(ctx, children,
300 OID_AUTO, "xmt_pkt_lt_127bytes",
301 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
302 "xmt_pkt_lt_127bytes");
303
304 SYSCTL_ADD_QUAD(ctx, children,
305 OID_AUTO, "xmt_pkt_lt_255bytes",
306 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
307 "xmt_pkt_lt_255bytes");
308
309 SYSCTL_ADD_QUAD(ctx, children,
310 OID_AUTO, "xmt_pkt_lt_511bytes",
311 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
312 "xmt_pkt_lt_511bytes");
313
314 SYSCTL_ADD_QUAD(ctx, children,
315 OID_AUTO, "xmt_pkt_lt_1023bytes",
316 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
317 "xmt_pkt_lt_1023bytes");
318
319 SYSCTL_ADD_QUAD(ctx, children,
320 OID_AUTO, "xmt_pkt_lt_1518bytes",
321 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
322 "xmt_pkt_lt_1518bytes");
323
324 SYSCTL_ADD_QUAD(ctx, children,
325 OID_AUTO, "xmt_pkt_gt_1518bytes",
326 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
327 "xmt_pkt_gt_1518bytes");
328
329 SYSCTL_ADD_QUAD(ctx, children,
330 OID_AUTO, "rcv_frames",
331 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
332 "rcv_frames");
333
334 SYSCTL_ADD_QUAD(ctx, children,
335 OID_AUTO, "rcv_bytes",
336 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
337 "rcv_bytes");
338
339 SYSCTL_ADD_QUAD(ctx, children,
340 OID_AUTO, "rcv_mcast_pkts",
341 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
342 "rcv_mcast_pkts");
343
344 SYSCTL_ADD_QUAD(ctx, children,
345 OID_AUTO, "rcv_bcast_pkts",
346 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
347 "rcv_bcast_pkts");
348
349 SYSCTL_ADD_QUAD(ctx, children,
350 OID_AUTO, "rcv_pause_frames",
351 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
352 "rcv_pause_frames");
353
354 SYSCTL_ADD_QUAD(ctx, children,
355 OID_AUTO, "rcv_cntrl_pkts",
356 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
357 "rcv_cntrl_pkts");
358
359 SYSCTL_ADD_QUAD(ctx, children,
360 OID_AUTO, "rcv_pkt_lt_64bytes",
361 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
362 "rcv_pkt_lt_64bytes");
363
364 SYSCTL_ADD_QUAD(ctx, children,
365 OID_AUTO, "rcv_pkt_lt_127bytes",
366 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
367 "rcv_pkt_lt_127bytes");
368
369 SYSCTL_ADD_QUAD(ctx, children,
370 OID_AUTO, "rcv_pkt_lt_255bytes",
371 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
372 "rcv_pkt_lt_255bytes");
373
374 SYSCTL_ADD_QUAD(ctx, children,
375 OID_AUTO, "rcv_pkt_lt_511bytes",
376 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
377 "rcv_pkt_lt_511bytes");
378
379 SYSCTL_ADD_QUAD(ctx, children,
380 OID_AUTO, "rcv_pkt_lt_1023bytes",
381 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
382 "rcv_pkt_lt_1023bytes");
383
384 SYSCTL_ADD_QUAD(ctx, children,
385 OID_AUTO, "rcv_pkt_lt_1518bytes",
386 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
387 "rcv_pkt_lt_1518bytes");
388
389 SYSCTL_ADD_QUAD(ctx, children,
390 OID_AUTO, "rcv_pkt_gt_1518bytes",
391 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
392 "rcv_pkt_gt_1518bytes");
393
394 SYSCTL_ADD_QUAD(ctx, children,
395 OID_AUTO, "rcv_len_error",
396 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
397 "rcv_len_error");
398
399 SYSCTL_ADD_QUAD(ctx, children,
400 OID_AUTO, "rcv_len_small",
401 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
402 "rcv_len_small");
403
404 SYSCTL_ADD_QUAD(ctx, children,
405 OID_AUTO, "rcv_len_large",
406 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
407 "rcv_len_large");
408
409 SYSCTL_ADD_QUAD(ctx, children,
410 OID_AUTO, "rcv_jabber",
411 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
412 "rcv_jabber");
413
414 SYSCTL_ADD_QUAD(ctx, children,
415 OID_AUTO, "rcv_dropped",
416 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
417 "rcv_dropped");
418
419 SYSCTL_ADD_QUAD(ctx, children,
420 OID_AUTO, "fcs_error",
421 CTLFLAG_RD, &ha->hw.mac.fcs_error,
422 "fcs_error");
423
424 SYSCTL_ADD_QUAD(ctx, children,
425 OID_AUTO, "align_error",
426 CTLFLAG_RD, &ha->hw.mac.align_error,
427 "align_error");
428
429 SYSCTL_ADD_QUAD(ctx, children,
430 OID_AUTO, "eswitched_frames",
431 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
432 "eswitched_frames");
433
434 SYSCTL_ADD_QUAD(ctx, children,
435 OID_AUTO, "eswitched_bytes",
436 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
437 "eswitched_bytes");
438
439 SYSCTL_ADD_QUAD(ctx, children,
440 OID_AUTO, "eswitched_mcast_frames",
441 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
442 "eswitched_mcast_frames");
443
444 SYSCTL_ADD_QUAD(ctx, children,
445 OID_AUTO, "eswitched_bcast_frames",
446 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
447 "eswitched_bcast_frames");
448
449 SYSCTL_ADD_QUAD(ctx, children,
450 OID_AUTO, "eswitched_ucast_frames",
451 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
452 "eswitched_ucast_frames");
453
454 SYSCTL_ADD_QUAD(ctx, children,
455 OID_AUTO, "eswitched_err_free_frames",
456 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
457 "eswitched_err_free_frames");
458
459 SYSCTL_ADD_QUAD(ctx, children,
460 OID_AUTO, "eswitched_err_free_bytes",
461 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
462 "eswitched_err_free_bytes");
463
464 return;
465 }
466
467 static void
qlnx_add_hw_rcv_stats_sysctls(qla_host_t * ha)468 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
469 {
470 struct sysctl_ctx_list *ctx;
471 struct sysctl_oid_list *children;
472 struct sysctl_oid *ctx_oid;
473
474 ctx = device_get_sysctl_ctx(ha->pci_dev);
475 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
476
477 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
478 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_rcv");
479 children = SYSCTL_CHILDREN(ctx_oid);
480
481 SYSCTL_ADD_QUAD(ctx, children,
482 OID_AUTO, "total_bytes",
483 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
484 "total_bytes");
485
486 SYSCTL_ADD_QUAD(ctx, children,
487 OID_AUTO, "total_pkts",
488 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
489 "total_pkts");
490
491 SYSCTL_ADD_QUAD(ctx, children,
492 OID_AUTO, "lro_pkt_count",
493 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
494 "lro_pkt_count");
495
496 SYSCTL_ADD_QUAD(ctx, children,
497 OID_AUTO, "sw_pkt_count",
498 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
499 "sw_pkt_count");
500
501 SYSCTL_ADD_QUAD(ctx, children,
502 OID_AUTO, "ip_chksum_err",
503 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
504 "ip_chksum_err");
505
506 SYSCTL_ADD_QUAD(ctx, children,
507 OID_AUTO, "pkts_wo_acntxts",
508 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
509 "pkts_wo_acntxts");
510
511 SYSCTL_ADD_QUAD(ctx, children,
512 OID_AUTO, "pkts_dropped_no_sds_card",
513 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
514 "pkts_dropped_no_sds_card");
515
516 SYSCTL_ADD_QUAD(ctx, children,
517 OID_AUTO, "pkts_dropped_no_sds_host",
518 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
519 "pkts_dropped_no_sds_host");
520
521 SYSCTL_ADD_QUAD(ctx, children,
522 OID_AUTO, "oversized_pkts",
523 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
524 "oversized_pkts");
525
526 SYSCTL_ADD_QUAD(ctx, children,
527 OID_AUTO, "pkts_dropped_no_rds",
528 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
529 "pkts_dropped_no_rds");
530
531 SYSCTL_ADD_QUAD(ctx, children,
532 OID_AUTO, "unxpctd_mcast_pkts",
533 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
534 "unxpctd_mcast_pkts");
535
536 SYSCTL_ADD_QUAD(ctx, children,
537 OID_AUTO, "re1_fbq_error",
538 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
539 "re1_fbq_error");
540
541 SYSCTL_ADD_QUAD(ctx, children,
542 OID_AUTO, "invalid_mac_addr",
543 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
544 "invalid_mac_addr");
545
546 SYSCTL_ADD_QUAD(ctx, children,
547 OID_AUTO, "rds_prime_trys",
548 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
549 "rds_prime_trys");
550
551 SYSCTL_ADD_QUAD(ctx, children,
552 OID_AUTO, "rds_prime_success",
553 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
554 "rds_prime_success");
555
556 SYSCTL_ADD_QUAD(ctx, children,
557 OID_AUTO, "lro_flows_added",
558 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
559 "lro_flows_added");
560
561 SYSCTL_ADD_QUAD(ctx, children,
562 OID_AUTO, "lro_flows_deleted",
563 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
564 "lro_flows_deleted");
565
566 SYSCTL_ADD_QUAD(ctx, children,
567 OID_AUTO, "lro_flows_active",
568 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
569 "lro_flows_active");
570
571 SYSCTL_ADD_QUAD(ctx, children,
572 OID_AUTO, "pkts_droped_unknown",
573 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
574 "pkts_droped_unknown");
575
576 SYSCTL_ADD_QUAD(ctx, children,
577 OID_AUTO, "pkts_cnt_oversized",
578 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
579 "pkts_cnt_oversized");
580
581 return;
582 }
583
584 static void
qlnx_add_hw_xmt_stats_sysctls(qla_host_t * ha)585 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
586 {
587 struct sysctl_ctx_list *ctx;
588 struct sysctl_oid_list *children;
589 struct sysctl_oid_list *node_children;
590 struct sysctl_oid *ctx_oid;
591 int i;
592 uint8_t name_str[16];
593
594 ctx = device_get_sysctl_ctx(ha->pci_dev);
595 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
596
597 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
598 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_xmt");
599 children = SYSCTL_CHILDREN(ctx_oid);
600
601 for (i = 0; i < ha->hw.num_tx_rings; i++) {
602 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
603 snprintf(name_str, sizeof(name_str), "%d", i);
604
605 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
606 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
607 node_children = SYSCTL_CHILDREN(ctx_oid);
608
609 /* Tx Related */
610
611 SYSCTL_ADD_QUAD(ctx, node_children,
612 OID_AUTO, "total_bytes",
613 CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
614 "total_bytes");
615
616 SYSCTL_ADD_QUAD(ctx, node_children,
617 OID_AUTO, "total_pkts",
618 CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
619 "total_pkts");
620
621 SYSCTL_ADD_QUAD(ctx, node_children,
622 OID_AUTO, "errors",
623 CTLFLAG_RD, &ha->hw.xmt[i].errors,
624 "errors");
625
626 SYSCTL_ADD_QUAD(ctx, node_children,
627 OID_AUTO, "pkts_dropped",
628 CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
629 "pkts_dropped");
630
631 SYSCTL_ADD_QUAD(ctx, node_children,
632 OID_AUTO, "switch_pkts",
633 CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
634 "switch_pkts");
635
636 SYSCTL_ADD_QUAD(ctx, node_children,
637 OID_AUTO, "num_buffers",
638 CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
639 "num_buffers");
640 }
641
642 return;
643 }
644
645 static void
qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t * ha)646 qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t *ha)
647 {
648 struct sysctl_ctx_list *ctx;
649 struct sysctl_oid_list *node_children;
650
651 ctx = device_get_sysctl_ctx(ha->pci_dev);
652 node_children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
653
654 SYSCTL_ADD_QUAD(ctx, node_children,
655 OID_AUTO, "mbx_completion_time_lt_200ms",
656 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[0],
657 "mbx_completion_time_lt_200ms");
658
659 SYSCTL_ADD_QUAD(ctx, node_children,
660 OID_AUTO, "mbx_completion_time_200ms_400ms",
661 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[1],
662 "mbx_completion_time_200ms_400ms");
663
664 SYSCTL_ADD_QUAD(ctx, node_children,
665 OID_AUTO, "mbx_completion_time_400ms_600ms",
666 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[2],
667 "mbx_completion_time_400ms_600ms");
668
669 SYSCTL_ADD_QUAD(ctx, node_children,
670 OID_AUTO, "mbx_completion_time_600ms_800ms",
671 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[3],
672 "mbx_completion_time_600ms_800ms");
673
674 SYSCTL_ADD_QUAD(ctx, node_children,
675 OID_AUTO, "mbx_completion_time_800ms_1000ms",
676 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[4],
677 "mbx_completion_time_800ms_1000ms");
678
679 SYSCTL_ADD_QUAD(ctx, node_children,
680 OID_AUTO, "mbx_completion_time_1000ms_1200ms",
681 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[5],
682 "mbx_completion_time_1000ms_1200ms");
683
684 SYSCTL_ADD_QUAD(ctx, node_children,
685 OID_AUTO, "mbx_completion_time_1200ms_1400ms",
686 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[6],
687 "mbx_completion_time_1200ms_1400ms");
688
689 SYSCTL_ADD_QUAD(ctx, node_children,
690 OID_AUTO, "mbx_completion_time_1400ms_1600ms",
691 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[7],
692 "mbx_completion_time_1400ms_1600ms");
693
694 SYSCTL_ADD_QUAD(ctx, node_children,
695 OID_AUTO, "mbx_completion_time_1600ms_1800ms",
696 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[8],
697 "mbx_completion_time_1600ms_1800ms");
698
699 SYSCTL_ADD_QUAD(ctx, node_children,
700 OID_AUTO, "mbx_completion_time_1800ms_2000ms",
701 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[9],
702 "mbx_completion_time_1800ms_2000ms");
703
704 SYSCTL_ADD_QUAD(ctx, node_children,
705 OID_AUTO, "mbx_completion_time_2000ms_2200ms",
706 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[10],
707 "mbx_completion_time_2000ms_2200ms");
708
709 SYSCTL_ADD_QUAD(ctx, node_children,
710 OID_AUTO, "mbx_completion_time_2200ms_2400ms",
711 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[11],
712 "mbx_completion_time_2200ms_2400ms");
713
714 SYSCTL_ADD_QUAD(ctx, node_children,
715 OID_AUTO, "mbx_completion_time_2400ms_2600ms",
716 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[12],
717 "mbx_completion_time_2400ms_2600ms");
718
719 SYSCTL_ADD_QUAD(ctx, node_children,
720 OID_AUTO, "mbx_completion_time_2600ms_2800ms",
721 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[13],
722 "mbx_completion_time_2600ms_2800ms");
723
724 SYSCTL_ADD_QUAD(ctx, node_children,
725 OID_AUTO, "mbx_completion_time_2800ms_3000ms",
726 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[14],
727 "mbx_completion_time_2800ms_3000ms");
728
729 SYSCTL_ADD_QUAD(ctx, node_children,
730 OID_AUTO, "mbx_completion_time_3000ms_4000ms",
731 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[15],
732 "mbx_completion_time_3000ms_4000ms");
733
734 SYSCTL_ADD_QUAD(ctx, node_children,
735 OID_AUTO, "mbx_completion_time_4000ms_5000ms",
736 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[16],
737 "mbx_completion_time_4000ms_5000ms");
738
739 SYSCTL_ADD_QUAD(ctx, node_children,
740 OID_AUTO, "mbx_completion_host_mbx_cntrl_timeout",
741 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[17],
742 "mbx_completion_host_mbx_cntrl_timeout");
743
744 SYSCTL_ADD_QUAD(ctx, node_children,
745 OID_AUTO, "mbx_completion_fw_mbx_cntrl_timeout",
746 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[18],
747 "mbx_completion_fw_mbx_cntrl_timeout");
748 return;
749 }
750
751 static void
qlnx_add_hw_stats_sysctls(qla_host_t * ha)752 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
753 {
754 qlnx_add_hw_mac_stats_sysctls(ha);
755 qlnx_add_hw_rcv_stats_sysctls(ha);
756 qlnx_add_hw_xmt_stats_sysctls(ha);
757 qlnx_add_hw_mbx_cmpl_stats_sysctls(ha);
758
759 return;
760 }
761
762 static void
qlnx_add_drvr_sds_stats(qla_host_t * ha)763 qlnx_add_drvr_sds_stats(qla_host_t *ha)
764 {
765 struct sysctl_ctx_list *ctx;
766 struct sysctl_oid_list *children;
767 struct sysctl_oid_list *node_children;
768 struct sysctl_oid *ctx_oid;
769 int i;
770 uint8_t name_str[16];
771
772 ctx = device_get_sysctl_ctx(ha->pci_dev);
773 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
774
775 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
776 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_sds");
777 children = SYSCTL_CHILDREN(ctx_oid);
778
779 for (i = 0; i < ha->hw.num_sds_rings; i++) {
780 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
781 snprintf(name_str, sizeof(name_str), "%d", i);
782
783 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
784 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
785 node_children = SYSCTL_CHILDREN(ctx_oid);
786
787 SYSCTL_ADD_QUAD(ctx, node_children,
788 OID_AUTO, "intr_count",
789 CTLFLAG_RD, &ha->hw.sds[i].intr_count,
790 "intr_count");
791
792 SYSCTL_ADD_UINT(ctx, node_children,
793 OID_AUTO, "rx_free",
794 CTLFLAG_RD, &ha->hw.sds[i].rx_free,
795 ha->hw.sds[i].rx_free, "rx_free");
796 }
797
798 return;
799 }
800 static void
qlnx_add_drvr_rds_stats(qla_host_t * ha)801 qlnx_add_drvr_rds_stats(qla_host_t *ha)
802 {
803 struct sysctl_ctx_list *ctx;
804 struct sysctl_oid_list *children;
805 struct sysctl_oid_list *node_children;
806 struct sysctl_oid *ctx_oid;
807 int i;
808 uint8_t name_str[16];
809
810 ctx = device_get_sysctl_ctx(ha->pci_dev);
811 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
812
813 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
814 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_rds");
815 children = SYSCTL_CHILDREN(ctx_oid);
816
817 for (i = 0; i < ha->hw.num_rds_rings; i++) {
818 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
819 snprintf(name_str, sizeof(name_str), "%d", i);
820
821 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
822 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
823 node_children = SYSCTL_CHILDREN(ctx_oid);
824
825 SYSCTL_ADD_QUAD(ctx, node_children,
826 OID_AUTO, "count",
827 CTLFLAG_RD, &ha->hw.rds[i].count,
828 "count");
829
830 SYSCTL_ADD_QUAD(ctx, node_children,
831 OID_AUTO, "lro_pkt_count",
832 CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
833 "lro_pkt_count");
834
835 SYSCTL_ADD_QUAD(ctx, node_children,
836 OID_AUTO, "lro_bytes",
837 CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
838 "lro_bytes");
839 }
840
841 return;
842 }
843
844 static void
qlnx_add_drvr_tx_stats(qla_host_t * ha)845 qlnx_add_drvr_tx_stats(qla_host_t *ha)
846 {
847 struct sysctl_ctx_list *ctx;
848 struct sysctl_oid_list *children;
849 struct sysctl_oid_list *node_children;
850 struct sysctl_oid *ctx_oid;
851 int i;
852 uint8_t name_str[16];
853
854 ctx = device_get_sysctl_ctx(ha->pci_dev);
855 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
856
857 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
858 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_xmt");
859 children = SYSCTL_CHILDREN(ctx_oid);
860
861 for (i = 0; i < ha->hw.num_tx_rings; i++) {
862 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
863 snprintf(name_str, sizeof(name_str), "%d", i);
864
865 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
866 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
867 node_children = SYSCTL_CHILDREN(ctx_oid);
868
869 SYSCTL_ADD_QUAD(ctx, node_children,
870 OID_AUTO, "count",
871 CTLFLAG_RD, &ha->tx_ring[i].count,
872 "count");
873
874 #ifdef QL_ENABLE_ISCSI_TLV
875 SYSCTL_ADD_QUAD(ctx, node_children,
876 OID_AUTO, "iscsi_pkt_count",
877 CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
878 "iscsi_pkt_count");
879 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
880 }
881
882 return;
883 }
884
885 static void
qlnx_add_drvr_stats_sysctls(qla_host_t * ha)886 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
887 {
888 qlnx_add_drvr_sds_stats(ha);
889 qlnx_add_drvr_rds_stats(ha);
890 qlnx_add_drvr_tx_stats(ha);
891 return;
892 }
893
894 /*
895 * Name: ql_hw_add_sysctls
896 * Function: Add P3Plus specific sysctls
897 */
898 void
ql_hw_add_sysctls(qla_host_t * ha)899 ql_hw_add_sysctls(qla_host_t *ha)
900 {
901 device_t dev;
902
903 dev = ha->pci_dev;
904
905 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
906 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
907 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
908 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
909
910 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
911 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
912 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
913 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
914
915 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
916 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
917 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
918 ha->hw.num_tx_rings, "Number of Transmit Rings");
919
920 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
921 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
922 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
923 ha->txr_idx, "Tx Ring Used");
924
925 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
926 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
927 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
928 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
929
930 ha->hw.sds_cidx_thres = 32;
931 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
932 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
933 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
934 ha->hw.sds_cidx_thres,
935 "Number of SDS entries to process before updating"
936 " SDS Ring Consumer Index");
937
938 ha->hw.rds_pidx_thres = 32;
939 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
940 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
941 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
942 ha->hw.rds_pidx_thres,
943 "Number of Rcv Rings Entries to post before updating"
944 " RDS Ring Producer Index");
945
946 ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
947 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
948 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
949 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
950 &ha->hw.rcv_intr_coalesce,
951 ha->hw.rcv_intr_coalesce,
952 "Rcv Intr Coalescing Parameters\n"
953 "\tbits 15:0 max packets\n"
954 "\tbits 31:16 max micro-seconds to wait\n"
955 "\tplease run\n"
956 "\tifconfig <if> down && ifconfig <if> up\n"
957 "\tto take effect \n");
958
959 ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
960 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
961 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
962 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
963 &ha->hw.xmt_intr_coalesce,
964 ha->hw.xmt_intr_coalesce,
965 "Xmt Intr Coalescing Parameters\n"
966 "\tbits 15:0 max packets\n"
967 "\tbits 31:16 max micro-seconds to wait\n"
968 "\tplease run\n"
969 "\tifconfig <if> down && ifconfig <if> up\n"
970 "\tto take effect \n");
971
972 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
973 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
974 "port_cfg", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
975 (void *)ha, 0, qla_sysctl_port_cfg, "I",
976 "Set Port Configuration if values below "
977 "otherwise Get Port Configuration\n"
978 "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
979 "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
980 "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
981 " 1 = xmt only; 2 = rcv only;\n");
982
983 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
984 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
985 "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
986 (void *)ha, 0, qla_sysctl_set_cam_search_mode, "I",
987 "Set CAM Search Mode"
988 "\t 1 = search mode internal\n"
989 "\t 2 = search mode auto\n");
990
991 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
992 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
993 "get_cam_search_mode",
994 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
995 qla_sysctl_get_cam_search_mode, "I",
996 "Get CAM Search Mode"
997 "\t 1 = search mode internal\n"
998 "\t 2 = search mode auto\n");
999
1000 ha->hw.enable_9kb = 1;
1001
1002 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1003 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1004 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
1005 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
1006
1007 ha->hw.enable_hw_lro = 1;
1008
1009 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1010 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1011 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
1012 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
1013 "\t 1 : Hardware LRO if LRO is enabled\n"
1014 "\t 0 : Software LRO if LRO is enabled\n"
1015 "\t Any change requires ifconfig down/up to take effect\n"
1016 "\t Note that LRO may be turned off/on via ifconfig\n");
1017
1018 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1019 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1020 OID_AUTO, "sp_log_index", CTLFLAG_RW, &ha->hw.sp_log_index,
1021 ha->hw.sp_log_index, "sp_log_index");
1022
1023 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1024 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1025 OID_AUTO, "sp_log_stop", CTLFLAG_RW, &ha->hw.sp_log_stop,
1026 ha->hw.sp_log_stop, "sp_log_stop");
1027
1028 ha->hw.sp_log_stop_events = 0;
1029
1030 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1031 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1032 OID_AUTO, "sp_log_stop_events", CTLFLAG_RW,
1033 &ha->hw.sp_log_stop_events,
1034 ha->hw.sp_log_stop_events, "Slow path event log is stopped"
1035 " when OR of the following events occur \n"
1036 "\t 0x01 : Heart beat Failure\n"
1037 "\t 0x02 : Temperature Failure\n"
1038 "\t 0x04 : HW Initialization Failure\n"
1039 "\t 0x08 : Interface Initialization Failure\n"
1040 "\t 0x10 : Error Recovery Failure\n");
1041
1042 ha->hw.mdump_active = 0;
1043 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1044 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1045 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
1046 ha->hw.mdump_active,
1047 "Minidump retrieval is Active");
1048
1049 ha->hw.mdump_done = 0;
1050 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1051 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1052 OID_AUTO, "mdump_done", CTLFLAG_RW,
1053 &ha->hw.mdump_done, ha->hw.mdump_done,
1054 "Minidump has been done and available for retrieval");
1055
1056 ha->hw.mdump_capture_mask = 0xF;
1057 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1058 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1059 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
1060 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
1061 "Minidump capture mask");
1062 #ifdef QL_DBG
1063
1064 ha->err_inject = 0;
1065 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1066 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1067 OID_AUTO, "err_inject",
1068 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
1069 "Error to be injected\n"
1070 "\t\t\t 0: No Errors\n"
1071 "\t\t\t 1: rcv: rxb struct invalid\n"
1072 "\t\t\t 2: rcv: mp == NULL\n"
1073 "\t\t\t 3: lro: rxb struct invalid\n"
1074 "\t\t\t 4: lro: mp == NULL\n"
1075 "\t\t\t 5: rcv: num handles invalid\n"
1076 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
1077 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
1078 "\t\t\t 8: mbx: mailbox command failure\n"
1079 "\t\t\t 9: heartbeat failure\n"
1080 "\t\t\t A: temperature failure\n"
1081 "\t\t\t 11: m_getcl or m_getjcl failure\n"
1082 "\t\t\t 13: Invalid Descriptor Count in SGL Receive\n"
1083 "\t\t\t 14: Invalid Descriptor Count in LRO Receive\n"
1084 "\t\t\t 15: peer port error recovery failure\n"
1085 "\t\t\t 16: tx_buf[next_prod_index].mbuf != NULL\n" );
1086
1087 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1088 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
1089 "peg_stop", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1090 (void *)ha, 0, qla_sysctl_stop_pegs, "I", "Peg Stop");
1091
1092 #endif /* #ifdef QL_DBG */
1093
1094 ha->hw.user_pri_nic = 0;
1095 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1096 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1097 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
1098 ha->hw.user_pri_nic,
1099 "VLAN Tag User Priority for Normal Ethernet Packets");
1100
1101 ha->hw.user_pri_iscsi = 4;
1102 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1103 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1104 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
1105 ha->hw.user_pri_iscsi,
1106 "VLAN Tag User Priority for iSCSI Packets");
1107
1108 qlnx_add_hw_stats_sysctls(ha);
1109 qlnx_add_drvr_stats_sysctls(ha);
1110
1111 return;
1112 }
1113
1114 void
ql_hw_link_status(qla_host_t * ha)1115 ql_hw_link_status(qla_host_t *ha)
1116 {
1117 device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
1118
1119 if (ha->hw.link_up) {
1120 device_printf(ha->pci_dev, "link Up\n");
1121 } else {
1122 device_printf(ha->pci_dev, "link Down\n");
1123 }
1124
1125 if (ha->hw.fduplex) {
1126 device_printf(ha->pci_dev, "Full Duplex\n");
1127 } else {
1128 device_printf(ha->pci_dev, "Half Duplex\n");
1129 }
1130
1131 if (ha->hw.autoneg) {
1132 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1133 } else {
1134 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1135 }
1136
1137 switch (ha->hw.link_speed) {
1138 case 0x710:
1139 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1140 break;
1141
1142 case 0x3E8:
1143 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1144 break;
1145
1146 case 0x64:
1147 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1148 break;
1149
1150 default:
1151 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1152 break;
1153 }
1154
1155 switch (ha->hw.module_type) {
1156 case 0x01:
1157 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1158 break;
1159
1160 case 0x02:
1161 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1162 break;
1163
1164 case 0x03:
1165 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1166 break;
1167
1168 case 0x04:
1169 device_printf(ha->pci_dev,
1170 "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1171 ha->hw.cable_length);
1172 break;
1173
1174 case 0x05:
1175 device_printf(ha->pci_dev, "Module Type 10GE Active"
1176 " Limiting Copper(Compliant)[%d m]\n",
1177 ha->hw.cable_length);
1178 break;
1179
1180 case 0x06:
1181 device_printf(ha->pci_dev,
1182 "Module Type 10GE Passive Copper"
1183 " (Legacy, Best Effort)[%d m]\n",
1184 ha->hw.cable_length);
1185 break;
1186
1187 case 0x07:
1188 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1189 break;
1190
1191 case 0x08:
1192 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1193 break;
1194
1195 case 0x09:
1196 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1197 break;
1198
1199 case 0x0A:
1200 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1201 break;
1202
1203 case 0x0B:
1204 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1205 "(Legacy, Best Effort)\n");
1206 break;
1207
1208 default:
1209 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1210 ha->hw.module_type);
1211 break;
1212 }
1213
1214 if (ha->hw.link_faults == 1)
1215 device_printf(ha->pci_dev, "SFP Power Fault\n");
1216 }
1217
1218 /*
1219 * Name: ql_free_dma
1220 * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1221 */
1222 void
ql_free_dma(qla_host_t * ha)1223 ql_free_dma(qla_host_t *ha)
1224 {
1225 uint32_t i;
1226
1227 if (ha->hw.dma_buf.flags.sds_ring) {
1228 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1229 ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1230 }
1231 ha->hw.dma_buf.flags.sds_ring = 0;
1232 }
1233
1234 if (ha->hw.dma_buf.flags.rds_ring) {
1235 for (i = 0; i < ha->hw.num_rds_rings; i++) {
1236 ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1237 }
1238 ha->hw.dma_buf.flags.rds_ring = 0;
1239 }
1240
1241 if (ha->hw.dma_buf.flags.tx_ring) {
1242 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1243 ha->hw.dma_buf.flags.tx_ring = 0;
1244 }
1245 ql_minidump_free(ha);
1246 }
1247
1248 /*
1249 * Name: ql_alloc_dma
1250 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1251 */
1252 int
ql_alloc_dma(qla_host_t * ha)1253 ql_alloc_dma(qla_host_t *ha)
1254 {
1255 device_t dev;
1256 uint32_t i, j, size, tx_ring_size;
1257 qla_hw_t *hw;
1258 qla_hw_tx_cntxt_t *tx_cntxt;
1259 uint8_t *vaddr;
1260 bus_addr_t paddr;
1261
1262 dev = ha->pci_dev;
1263
1264 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1265
1266 hw = &ha->hw;
1267 /*
1268 * Allocate Transmit Ring
1269 */
1270 tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1271 size = (tx_ring_size * ha->hw.num_tx_rings);
1272
1273 hw->dma_buf.tx_ring.alignment = 8;
1274 hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1275
1276 if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1277 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1278 goto ql_alloc_dma_exit;
1279 }
1280
1281 vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1282 paddr = hw->dma_buf.tx_ring.dma_addr;
1283
1284 for (i = 0; i < ha->hw.num_tx_rings; i++) {
1285 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1286
1287 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1288 tx_cntxt->tx_ring_paddr = paddr;
1289
1290 vaddr += tx_ring_size;
1291 paddr += tx_ring_size;
1292 }
1293
1294 for (i = 0; i < ha->hw.num_tx_rings; i++) {
1295 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1296
1297 tx_cntxt->tx_cons = (uint32_t *)vaddr;
1298 tx_cntxt->tx_cons_paddr = paddr;
1299
1300 vaddr += sizeof (uint32_t);
1301 paddr += sizeof (uint32_t);
1302 }
1303
1304 ha->hw.dma_buf.flags.tx_ring = 1;
1305
1306 QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1307 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1308 hw->dma_buf.tx_ring.dma_b));
1309 /*
1310 * Allocate Receive Descriptor Rings
1311 */
1312
1313 for (i = 0; i < hw->num_rds_rings; i++) {
1314 hw->dma_buf.rds_ring[i].alignment = 8;
1315 hw->dma_buf.rds_ring[i].size =
1316 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1317
1318 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1319 device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1320 __func__, i);
1321
1322 for (j = 0; j < i; j++)
1323 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1324
1325 goto ql_alloc_dma_exit;
1326 }
1327 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1328 __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1329 hw->dma_buf.rds_ring[i].dma_b));
1330 }
1331
1332 hw->dma_buf.flags.rds_ring = 1;
1333
1334 /*
1335 * Allocate Status Descriptor Rings
1336 */
1337
1338 for (i = 0; i < hw->num_sds_rings; i++) {
1339 hw->dma_buf.sds_ring[i].alignment = 8;
1340 hw->dma_buf.sds_ring[i].size =
1341 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1342
1343 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1344 device_printf(dev, "%s: sds ring alloc failed\n",
1345 __func__);
1346
1347 for (j = 0; j < i; j++)
1348 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1349
1350 goto ql_alloc_dma_exit;
1351 }
1352 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1353 __func__, i,
1354 (void *)(hw->dma_buf.sds_ring[i].dma_addr),
1355 hw->dma_buf.sds_ring[i].dma_b));
1356 }
1357 for (i = 0; i < hw->num_sds_rings; i++) {
1358 hw->sds[i].sds_ring_base =
1359 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1360 }
1361
1362 hw->dma_buf.flags.sds_ring = 1;
1363
1364 return 0;
1365
1366 ql_alloc_dma_exit:
1367 ql_free_dma(ha);
1368 return -1;
1369 }
1370
1371 #define Q8_MBX_MSEC_DELAY 5000
1372
1373 static int
qla_mbx_cmd(qla_host_t * ha,uint32_t * h_mbox,uint32_t n_hmbox,uint32_t * fw_mbox,uint32_t n_fwmbox,uint32_t no_pause)1374 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1375 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1376 {
1377 uint32_t i;
1378 uint32_t data;
1379 int ret = 0;
1380 uint64_t start_usecs;
1381 uint64_t end_usecs;
1382 uint64_t msecs_200;
1383
1384 ql_sp_log(ha, 0, 5, no_pause, h_mbox[0], h_mbox[1], h_mbox[2], h_mbox[3]);
1385
1386 if (ha->offline || ha->qla_initiate_recovery) {
1387 ql_sp_log(ha, 1, 2, ha->offline, ha->qla_initiate_recovery, 0, 0, 0);
1388 goto exit_qla_mbx_cmd;
1389 }
1390
1391 if (((ha->err_inject & 0xFFFF) == INJCT_MBX_CMD_FAILURE) &&
1392 (((ha->err_inject & ~0xFFFF) == ((h_mbox[0] & 0xFFFF) << 16))||
1393 !(ha->err_inject & ~0xFFFF))) {
1394 ret = -3;
1395 QL_INITIATE_RECOVERY(ha);
1396 goto exit_qla_mbx_cmd;
1397 }
1398
1399 start_usecs = qla_get_usec_timestamp();
1400
1401 if (no_pause)
1402 i = 1000;
1403 else
1404 i = Q8_MBX_MSEC_DELAY;
1405
1406 while (i) {
1407 if (ha->qla_initiate_recovery) {
1408 ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1409 return (-1);
1410 }
1411
1412 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1413 if (data == 0)
1414 break;
1415 if (no_pause) {
1416 DELAY(1000);
1417 } else {
1418 qla_mdelay(__func__, 1);
1419 }
1420 i--;
1421 }
1422
1423 if (i == 0) {
1424 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1425 __func__, data);
1426 ql_sp_log(ha, 3, 1, data, 0, 0, 0, 0);
1427 ret = -1;
1428 ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 2)]++;
1429 QL_INITIATE_RECOVERY(ha);
1430 goto exit_qla_mbx_cmd;
1431 }
1432
1433 for (i = 0; i < n_hmbox; i++) {
1434 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1435 h_mbox++;
1436 }
1437
1438 WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1439
1440 i = Q8_MBX_MSEC_DELAY;
1441 while (i) {
1442 if (ha->qla_initiate_recovery) {
1443 ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1444 return (-1);
1445 }
1446
1447 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1448
1449 if ((data & 0x3) == 1) {
1450 data = READ_REG32(ha, Q8_FW_MBOX0);
1451 if ((data & 0xF000) != 0x8000)
1452 break;
1453 }
1454 if (no_pause) {
1455 DELAY(1000);
1456 } else {
1457 qla_mdelay(__func__, 1);
1458 }
1459 i--;
1460 }
1461 if (i == 0) {
1462 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1463 __func__, data);
1464 ql_sp_log(ha, 5, 1, data, 0, 0, 0, 0);
1465 ret = -2;
1466 ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 1)]++;
1467 QL_INITIATE_RECOVERY(ha);
1468 goto exit_qla_mbx_cmd;
1469 }
1470
1471 for (i = 0; i < n_fwmbox; i++) {
1472 if (ha->qla_initiate_recovery) {
1473 ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1474 return (-1);
1475 }
1476
1477 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1478 }
1479
1480 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1481 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1482
1483 end_usecs = qla_get_usec_timestamp();
1484
1485 if (end_usecs > start_usecs) {
1486 msecs_200 = (end_usecs - start_usecs)/(1000 * 200);
1487
1488 if (msecs_200 < 15)
1489 ha->hw.mbx_comp_msecs[msecs_200]++;
1490 else if (msecs_200 < 20)
1491 ha->hw.mbx_comp_msecs[15]++;
1492 else {
1493 device_printf(ha->pci_dev, "%s: [%ld, %ld] %ld\n", __func__,
1494 start_usecs, end_usecs, msecs_200);
1495 ha->hw.mbx_comp_msecs[16]++;
1496 }
1497 }
1498 ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]);
1499
1500 exit_qla_mbx_cmd:
1501 return (ret);
1502 }
1503
1504 int
qla_get_nic_partition(qla_host_t * ha,uint32_t * supports_9kb,uint32_t * num_rcvq)1505 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1506 uint32_t *num_rcvq)
1507 {
1508 uint32_t *mbox, err;
1509 device_t dev = ha->pci_dev;
1510
1511 bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1512
1513 mbox = ha->hw.mbox;
1514
1515 mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
1516
1517 if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1518 device_printf(dev, "%s: failed0\n", __func__);
1519 return (-1);
1520 }
1521 err = mbox[0] >> 25;
1522
1523 if (supports_9kb != NULL) {
1524 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1525 *supports_9kb = 1;
1526 else
1527 *supports_9kb = 0;
1528 }
1529
1530 if (num_rcvq != NULL)
1531 *num_rcvq = ((mbox[6] >> 16) & 0xFFFF);
1532
1533 if ((err != 1) && (err != 0)) {
1534 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1535 return (-1);
1536 }
1537 return 0;
1538 }
1539
1540 static int
qla_config_intr_cntxt(qla_host_t * ha,uint32_t start_idx,uint32_t num_intrs,uint32_t create)1541 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1542 uint32_t create)
1543 {
1544 uint32_t i, err;
1545 device_t dev = ha->pci_dev;
1546 q80_config_intr_t *c_intr;
1547 q80_config_intr_rsp_t *c_intr_rsp;
1548
1549 c_intr = (q80_config_intr_t *)ha->hw.mbox;
1550 bzero(c_intr, (sizeof (q80_config_intr_t)));
1551
1552 c_intr->opcode = Q8_MBX_CONFIG_INTR;
1553
1554 c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1555 c_intr->count_version |= Q8_MBX_CMD_VERSION;
1556
1557 c_intr->nentries = num_intrs;
1558
1559 for (i = 0; i < num_intrs; i++) {
1560 if (create) {
1561 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1562 c_intr->intr[i].msix_index = start_idx + 1 + i;
1563 } else {
1564 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1565 c_intr->intr[i].msix_index =
1566 ha->hw.intr_id[(start_idx + i)];
1567 }
1568
1569 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1570 }
1571
1572 if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1573 (sizeof (q80_config_intr_t) >> 2),
1574 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1575 device_printf(dev, "%s: %s failed0\n", __func__,
1576 (create ? "create" : "delete"));
1577 return (-1);
1578 }
1579
1580 c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1581
1582 err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1583
1584 if (err) {
1585 device_printf(dev, "%s: %s failed1 [0x%08x, %d]\n", __func__,
1586 (create ? "create" : "delete"), err, c_intr_rsp->nentries);
1587
1588 for (i = 0; i < c_intr_rsp->nentries; i++) {
1589 device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1590 __func__, i,
1591 c_intr_rsp->intr[i].status,
1592 c_intr_rsp->intr[i].intr_id,
1593 c_intr_rsp->intr[i].intr_src);
1594 }
1595
1596 return (-1);
1597 }
1598
1599 for (i = 0; ((i < num_intrs) && create); i++) {
1600 if (!c_intr_rsp->intr[i].status) {
1601 ha->hw.intr_id[(start_idx + i)] =
1602 c_intr_rsp->intr[i].intr_id;
1603 ha->hw.intr_src[(start_idx + i)] =
1604 c_intr_rsp->intr[i].intr_src;
1605 }
1606 }
1607
1608 return (0);
1609 }
1610
1611 /*
1612 * Name: qla_config_rss
1613 * Function: Configure RSS for the context/interface.
1614 */
1615 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1616 0x8030f20c77cb2da3ULL,
1617 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1618 0x255b0ec26d5a56daULL };
1619
1620 static int
qla_config_rss(qla_host_t * ha,uint16_t cntxt_id)1621 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1622 {
1623 q80_config_rss_t *c_rss;
1624 q80_config_rss_rsp_t *c_rss_rsp;
1625 uint32_t err, i;
1626 device_t dev = ha->pci_dev;
1627
1628 c_rss = (q80_config_rss_t *)ha->hw.mbox;
1629 bzero(c_rss, (sizeof (q80_config_rss_t)));
1630
1631 c_rss->opcode = Q8_MBX_CONFIG_RSS;
1632
1633 c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1634 c_rss->count_version |= Q8_MBX_CMD_VERSION;
1635
1636 c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1637 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1638 //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1639 // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1640
1641 c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1642 c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1643
1644 c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1645
1646 c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1647 c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1648
1649 c_rss->cntxt_id = cntxt_id;
1650
1651 for (i = 0; i < 5; i++) {
1652 c_rss->rss_key[i] = rss_key[i];
1653 }
1654
1655 if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1656 (sizeof (q80_config_rss_t) >> 2),
1657 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1658 device_printf(dev, "%s: failed0\n", __func__);
1659 return (-1);
1660 }
1661 c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1662
1663 err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1664
1665 if (err) {
1666 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1667 return (-1);
1668 }
1669 return 0;
1670 }
1671
1672 static int
qla_set_rss_ind_table(qla_host_t * ha,uint32_t start_idx,uint32_t count,uint16_t cntxt_id,uint8_t * ind_table)1673 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1674 uint16_t cntxt_id, uint8_t *ind_table)
1675 {
1676 q80_config_rss_ind_table_t *c_rss_ind;
1677 q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp;
1678 uint32_t err;
1679 device_t dev = ha->pci_dev;
1680
1681 if ((count > Q8_RSS_IND_TBL_SIZE) ||
1682 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1683 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1684 start_idx, count);
1685 return (-1);
1686 }
1687
1688 c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1689 bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1690
1691 c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1692 c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1693 c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1694
1695 c_rss_ind->start_idx = start_idx;
1696 c_rss_ind->end_idx = start_idx + count - 1;
1697 c_rss_ind->cntxt_id = cntxt_id;
1698 bcopy(ind_table, c_rss_ind->ind_table, count);
1699
1700 if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1701 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1702 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1703 device_printf(dev, "%s: failed0\n", __func__);
1704 return (-1);
1705 }
1706
1707 c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1708 err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1709
1710 if (err) {
1711 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1712 return (-1);
1713 }
1714 return 0;
1715 }
1716
1717 /*
1718 * Name: qla_config_intr_coalesce
1719 * Function: Configure Interrupt Coalescing.
1720 */
1721 static int
qla_config_intr_coalesce(qla_host_t * ha,uint16_t cntxt_id,int tenable,int rcv)1722 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1723 int rcv)
1724 {
1725 q80_config_intr_coalesc_t *intrc;
1726 q80_config_intr_coalesc_rsp_t *intrc_rsp;
1727 uint32_t err, i;
1728 device_t dev = ha->pci_dev;
1729
1730 intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1731 bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1732
1733 intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1734 intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1735 intrc->count_version |= Q8_MBX_CMD_VERSION;
1736
1737 if (rcv) {
1738 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1739 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1740 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1741 } else {
1742 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1743 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1744 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1745 }
1746
1747 intrc->cntxt_id = cntxt_id;
1748
1749 if (tenable) {
1750 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1751 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1752
1753 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1754 intrc->sds_ring_mask |= (1 << i);
1755 }
1756 intrc->ms_timeout = 1000;
1757 }
1758
1759 if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1760 (sizeof (q80_config_intr_coalesc_t) >> 2),
1761 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1762 device_printf(dev, "%s: failed0\n", __func__);
1763 return (-1);
1764 }
1765 intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1766
1767 err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1768
1769 if (err) {
1770 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1771 return (-1);
1772 }
1773
1774 return 0;
1775 }
1776
1777 /*
1778 * Name: qla_config_mac_addr
1779 * Function: binds a MAC address to the context/interface.
1780 * Can be unicast, multicast or broadcast.
1781 */
1782 static int
qla_config_mac_addr(qla_host_t * ha,uint8_t * mac_addr,uint32_t add_mac,uint32_t num_mac)1783 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1784 uint32_t num_mac)
1785 {
1786 q80_config_mac_addr_t *cmac;
1787 q80_config_mac_addr_rsp_t *cmac_rsp;
1788 uint32_t err;
1789 device_t dev = ha->pci_dev;
1790 int i;
1791 uint8_t *mac_cpy = mac_addr;
1792
1793 if (num_mac > Q8_MAX_MAC_ADDRS) {
1794 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1795 __func__, (add_mac ? "Add" : "Del"), num_mac);
1796 return (-1);
1797 }
1798
1799 cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1800 bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1801
1802 cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1803 cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1804 cmac->count_version |= Q8_MBX_CMD_VERSION;
1805
1806 if (add_mac)
1807 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1808 else
1809 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1810
1811 cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1812
1813 cmac->nmac_entries = num_mac;
1814 cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1815
1816 for (i = 0; i < num_mac; i++) {
1817 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN);
1818 mac_addr = mac_addr + ETHER_ADDR_LEN;
1819 }
1820
1821 if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1822 (sizeof (q80_config_mac_addr_t) >> 2),
1823 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1824 device_printf(dev, "%s: %s failed0\n", __func__,
1825 (add_mac ? "Add" : "Del"));
1826 return (-1);
1827 }
1828 cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1829
1830 err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1831
1832 if (err) {
1833 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1834 (add_mac ? "Add" : "Del"), err);
1835 for (i = 0; i < num_mac; i++) {
1836 device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1837 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1838 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1839 mac_cpy += ETHER_ADDR_LEN;
1840 }
1841 return (-1);
1842 }
1843
1844 return 0;
1845 }
1846
1847 /*
1848 * Name: qla_set_mac_rcv_mode
1849 * Function: Enable/Disable AllMulticast and Promiscous Modes.
1850 */
1851 static int
qla_set_mac_rcv_mode(qla_host_t * ha,uint32_t mode)1852 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1853 {
1854 q80_config_mac_rcv_mode_t *rcv_mode;
1855 uint32_t err;
1856 q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp;
1857 device_t dev = ha->pci_dev;
1858
1859 rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1860 bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1861
1862 rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1863 rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1864 rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1865
1866 rcv_mode->mode = mode;
1867
1868 rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1869
1870 if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1871 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1872 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1873 device_printf(dev, "%s: failed0\n", __func__);
1874 return (-1);
1875 }
1876 rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1877
1878 err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1879
1880 if (err) {
1881 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1882 return (-1);
1883 }
1884
1885 return 0;
1886 }
1887
1888 int
ql_set_promisc(qla_host_t * ha)1889 ql_set_promisc(qla_host_t *ha)
1890 {
1891 int ret;
1892
1893 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1894 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1895 return (ret);
1896 }
1897
1898 void
qla_reset_promisc(qla_host_t * ha)1899 qla_reset_promisc(qla_host_t *ha)
1900 {
1901 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1902 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1903 }
1904
1905 int
ql_set_allmulti(qla_host_t * ha)1906 ql_set_allmulti(qla_host_t *ha)
1907 {
1908 int ret;
1909
1910 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1911 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1912 return (ret);
1913 }
1914
1915 void
qla_reset_allmulti(qla_host_t * ha)1916 qla_reset_allmulti(qla_host_t *ha)
1917 {
1918 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1919 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1920 }
1921
1922 /*
1923 * Name: ql_set_max_mtu
1924 * Function:
1925 * Sets the maximum transfer unit size for the specified rcv context.
1926 */
1927 int
ql_set_max_mtu(qla_host_t * ha,uint32_t mtu,uint16_t cntxt_id)1928 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1929 {
1930 device_t dev;
1931 q80_set_max_mtu_t *max_mtu;
1932 q80_set_max_mtu_rsp_t *max_mtu_rsp;
1933 uint32_t err;
1934
1935 dev = ha->pci_dev;
1936
1937 max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1938 bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1939
1940 max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1941 max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1942 max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1943
1944 max_mtu->cntxt_id = cntxt_id;
1945 max_mtu->mtu = mtu;
1946
1947 if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1948 (sizeof (q80_set_max_mtu_t) >> 2),
1949 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1950 device_printf(dev, "%s: failed\n", __func__);
1951 return -1;
1952 }
1953
1954 max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1955
1956 err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1957
1958 if (err) {
1959 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1960 }
1961
1962 return 0;
1963 }
1964
1965 static int
qla_link_event_req(qla_host_t * ha,uint16_t cntxt_id)1966 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1967 {
1968 device_t dev;
1969 q80_link_event_t *lnk;
1970 q80_link_event_rsp_t *lnk_rsp;
1971 uint32_t err;
1972
1973 dev = ha->pci_dev;
1974
1975 lnk = (q80_link_event_t *)ha->hw.mbox;
1976 bzero(lnk, (sizeof (q80_link_event_t)));
1977
1978 lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1979 lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1980 lnk->count_version |= Q8_MBX_CMD_VERSION;
1981
1982 lnk->cntxt_id = cntxt_id;
1983 lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1984
1985 if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1986 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1987 device_printf(dev, "%s: failed\n", __func__);
1988 return -1;
1989 }
1990
1991 lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1992
1993 err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1994
1995 if (err) {
1996 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1997 }
1998
1999 return 0;
2000 }
2001
2002 static int
qla_config_fw_lro(qla_host_t * ha,uint16_t cntxt_id)2003 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
2004 {
2005 device_t dev;
2006 q80_config_fw_lro_t *fw_lro;
2007 q80_config_fw_lro_rsp_t *fw_lro_rsp;
2008 uint32_t err;
2009
2010 dev = ha->pci_dev;
2011
2012 fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
2013 bzero(fw_lro, sizeof(q80_config_fw_lro_t));
2014
2015 fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
2016 fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
2017 fw_lro->count_version |= Q8_MBX_CMD_VERSION;
2018
2019 fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
2020 fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
2021
2022 fw_lro->cntxt_id = cntxt_id;
2023
2024 if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
2025 (sizeof (q80_config_fw_lro_t) >> 2),
2026 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
2027 device_printf(dev, "%s: failed\n", __func__);
2028 return -1;
2029 }
2030
2031 fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
2032
2033 err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
2034
2035 if (err) {
2036 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2037 }
2038
2039 return 0;
2040 }
2041
2042 static int
qla_set_cam_search_mode(qla_host_t * ha,uint32_t search_mode)2043 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
2044 {
2045 device_t dev;
2046 q80_hw_config_t *hw_config;
2047 q80_hw_config_rsp_t *hw_config_rsp;
2048 uint32_t err;
2049
2050 dev = ha->pci_dev;
2051
2052 hw_config = (q80_hw_config_t *)ha->hw.mbox;
2053 bzero(hw_config, sizeof (q80_hw_config_t));
2054
2055 hw_config->opcode = Q8_MBX_HW_CONFIG;
2056 hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
2057 hw_config->count_version |= Q8_MBX_CMD_VERSION;
2058
2059 hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
2060
2061 hw_config->u.set_cam_search_mode.mode = search_mode;
2062
2063 if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2064 (sizeof (q80_hw_config_t) >> 2),
2065 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2066 device_printf(dev, "%s: failed\n", __func__);
2067 return -1;
2068 }
2069 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2070
2071 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2072
2073 if (err) {
2074 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2075 }
2076
2077 return 0;
2078 }
2079
2080 static int
qla_get_cam_search_mode(qla_host_t * ha)2081 qla_get_cam_search_mode(qla_host_t *ha)
2082 {
2083 device_t dev;
2084 q80_hw_config_t *hw_config;
2085 q80_hw_config_rsp_t *hw_config_rsp;
2086 uint32_t err;
2087
2088 dev = ha->pci_dev;
2089
2090 hw_config = (q80_hw_config_t *)ha->hw.mbox;
2091 bzero(hw_config, sizeof (q80_hw_config_t));
2092
2093 hw_config->opcode = Q8_MBX_HW_CONFIG;
2094 hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
2095 hw_config->count_version |= Q8_MBX_CMD_VERSION;
2096
2097 hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
2098
2099 if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2100 (sizeof (q80_hw_config_t) >> 2),
2101 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2102 device_printf(dev, "%s: failed\n", __func__);
2103 return -1;
2104 }
2105 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2106
2107 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2108
2109 if (err) {
2110 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2111 } else {
2112 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
2113 hw_config_rsp->u.get_cam_search_mode.mode);
2114 }
2115
2116 return 0;
2117 }
2118
2119 static int
qla_get_hw_stats(qla_host_t * ha,uint32_t cmd,uint32_t rsp_size)2120 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
2121 {
2122 device_t dev;
2123 q80_get_stats_t *stat;
2124 q80_get_stats_rsp_t *stat_rsp;
2125 uint32_t err;
2126
2127 dev = ha->pci_dev;
2128
2129 stat = (q80_get_stats_t *)ha->hw.mbox;
2130 bzero(stat, (sizeof (q80_get_stats_t)));
2131
2132 stat->opcode = Q8_MBX_GET_STATS;
2133 stat->count_version = 2;
2134 stat->count_version |= Q8_MBX_CMD_VERSION;
2135
2136 stat->cmd = cmd;
2137
2138 if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
2139 ha->hw.mbox, (rsp_size >> 2), 0)) {
2140 device_printf(dev, "%s: failed\n", __func__);
2141 return -1;
2142 }
2143
2144 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2145
2146 err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
2147
2148 if (err) {
2149 return -1;
2150 }
2151
2152 return 0;
2153 }
2154
2155 void
ql_get_stats(qla_host_t * ha)2156 ql_get_stats(qla_host_t *ha)
2157 {
2158 q80_get_stats_rsp_t *stat_rsp;
2159 q80_mac_stats_t *mstat;
2160 q80_xmt_stats_t *xstat;
2161 q80_rcv_stats_t *rstat;
2162 uint32_t cmd;
2163 int i;
2164 if_t ifp = ha->ifp;
2165
2166 if (ifp == NULL)
2167 return;
2168
2169 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2170 device_printf(ha->pci_dev, "%s: failed\n", __func__);
2171 return;
2172 }
2173
2174 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
2175 QLA_UNLOCK(ha, __func__);
2176 return;
2177 }
2178
2179 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2180 /*
2181 * Get MAC Statistics
2182 */
2183 cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2184 // cmd |= Q8_GET_STATS_CMD_CLEAR;
2185
2186 cmd |= ((ha->pci_func & 0x1) << 16);
2187
2188 if (ha->qla_watchdog_pause || (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) ||
2189 ha->offline)
2190 goto ql_get_stats_exit;
2191
2192 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2193 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2194 bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2195 } else {
2196 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2197 __func__, ha->hw.mbox[0]);
2198 }
2199 /*
2200 * Get RCV Statistics
2201 */
2202 cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2203 // cmd |= Q8_GET_STATS_CMD_CLEAR;
2204 cmd |= (ha->hw.rcv_cntxt_id << 16);
2205
2206 if (ha->qla_watchdog_pause || (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) ||
2207 ha->offline)
2208 goto ql_get_stats_exit;
2209
2210 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2211 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2212 bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2213 } else {
2214 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2215 __func__, ha->hw.mbox[0]);
2216 }
2217
2218 if (ha->qla_watchdog_pause || (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) ||
2219 ha->offline)
2220 goto ql_get_stats_exit;
2221 /*
2222 * Get XMT Statistics
2223 */
2224 for (i = 0 ; (i < ha->hw.num_tx_rings); i++) {
2225 if (ha->qla_watchdog_pause ||
2226 (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) ||
2227 ha->offline)
2228 goto ql_get_stats_exit;
2229
2230 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2231 // cmd |= Q8_GET_STATS_CMD_CLEAR;
2232 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2233
2234 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2235 == 0) {
2236 xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2237 bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2238 } else {
2239 device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2240 __func__, ha->hw.mbox[0]);
2241 }
2242 }
2243
2244 ql_get_stats_exit:
2245 QLA_UNLOCK(ha, __func__);
2246
2247 return;
2248 }
2249
2250 /*
2251 * Name: qla_tx_tso
2252 * Function: Checks if the packet to be transmitted is a candidate for
2253 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2254 * Ring Structure are plugged in.
2255 */
2256 static int
qla_tx_tso(qla_host_t * ha,struct mbuf * mp,q80_tx_cmd_t * tx_cmd,uint8_t * hdr)2257 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2258 {
2259 struct ether_vlan_header *eh;
2260 struct ip *ip = NULL;
2261 struct ip6_hdr *ip6 = NULL;
2262 struct tcphdr *th = NULL;
2263 uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2264 uint16_t etype, opcode, offload = 1;
2265
2266 eh = mtod(mp, struct ether_vlan_header *);
2267
2268 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2269 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2270 etype = ntohs(eh->evl_proto);
2271 } else {
2272 ehdrlen = ETHER_HDR_LEN;
2273 etype = ntohs(eh->evl_encap_proto);
2274 }
2275
2276 hdrlen = 0;
2277
2278 switch (etype) {
2279 case ETHERTYPE_IP:
2280
2281 tcp_opt_off = ehdrlen + sizeof(struct ip) +
2282 sizeof(struct tcphdr);
2283
2284 if (mp->m_len < tcp_opt_off) {
2285 m_copydata(mp, 0, tcp_opt_off, hdr);
2286 ip = (struct ip *)(hdr + ehdrlen);
2287 } else {
2288 ip = (struct ip *)(mp->m_data + ehdrlen);
2289 }
2290
2291 ip_hlen = ip->ip_hl << 2;
2292 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2293
2294
2295 if ((ip->ip_p != IPPROTO_TCP) ||
2296 (ip_hlen != sizeof (struct ip))){
2297 /* IP Options are not supported */
2298
2299 offload = 0;
2300 } else
2301 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2302
2303 break;
2304
2305 case ETHERTYPE_IPV6:
2306
2307 tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2308 sizeof (struct tcphdr);
2309
2310 if (mp->m_len < tcp_opt_off) {
2311 m_copydata(mp, 0, tcp_opt_off, hdr);
2312 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2313 } else {
2314 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2315 }
2316
2317 ip_hlen = sizeof(struct ip6_hdr);
2318 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2319
2320 if (ip6->ip6_nxt != IPPROTO_TCP) {
2321 //device_printf(dev, "%s: ipv6\n", __func__);
2322 offload = 0;
2323 } else
2324 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2325 break;
2326
2327 default:
2328 QL_DPRINT8(ha, (ha->pci_dev, "%s: type!=ip\n", __func__));
2329 offload = 0;
2330 break;
2331 }
2332
2333 if (!offload)
2334 return (-1);
2335
2336 tcp_hlen = th->th_off << 2;
2337 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2338
2339 if (mp->m_len < hdrlen) {
2340 if (mp->m_len < tcp_opt_off) {
2341 if (tcp_hlen > sizeof(struct tcphdr)) {
2342 m_copydata(mp, tcp_opt_off,
2343 (tcp_hlen - sizeof(struct tcphdr)),
2344 &hdr[tcp_opt_off]);
2345 }
2346 } else {
2347 m_copydata(mp, 0, hdrlen, hdr);
2348 }
2349 }
2350
2351 tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2352
2353 tx_cmd->flags_opcode = opcode ;
2354 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2355 tx_cmd->total_hdr_len = hdrlen;
2356
2357 /* Check for Multicast least significant bit of MSB == 1 */
2358 if (eh->evl_dhost[0] & 0x01) {
2359 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2360 }
2361
2362 if (mp->m_len < hdrlen) {
2363 printf("%d\n", hdrlen);
2364 return (1);
2365 }
2366
2367 return (0);
2368 }
2369
2370 /*
2371 * Name: qla_tx_chksum
2372 * Function: Checks if the packet to be transmitted is a candidate for
2373 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2374 * Ring Structure are plugged in.
2375 */
2376 static int
qla_tx_chksum(qla_host_t * ha,struct mbuf * mp,uint32_t * op_code,uint32_t * tcp_hdr_off)2377 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2378 uint32_t *tcp_hdr_off)
2379 {
2380 struct ether_vlan_header *eh;
2381 struct ip *ip;
2382 struct ip6_hdr *ip6;
2383 uint32_t ehdrlen, ip_hlen;
2384 uint16_t etype, opcode, offload = 1;
2385 uint8_t buf[sizeof(struct ip6_hdr)];
2386
2387 *op_code = 0;
2388
2389 if ((mp->m_pkthdr.csum_flags &
2390 (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2391 return (-1);
2392
2393 eh = mtod(mp, struct ether_vlan_header *);
2394
2395 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2396 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2397 etype = ntohs(eh->evl_proto);
2398 } else {
2399 ehdrlen = ETHER_HDR_LEN;
2400 etype = ntohs(eh->evl_encap_proto);
2401 }
2402
2403
2404 switch (etype) {
2405 case ETHERTYPE_IP:
2406 ip = (struct ip *)(mp->m_data + ehdrlen);
2407
2408 ip_hlen = sizeof (struct ip);
2409
2410 if (mp->m_len < (ehdrlen + ip_hlen)) {
2411 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2412 ip = (struct ip *)buf;
2413 }
2414
2415 if (ip->ip_p == IPPROTO_TCP)
2416 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2417 else if (ip->ip_p == IPPROTO_UDP)
2418 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2419 else {
2420 //device_printf(dev, "%s: ipv4\n", __func__);
2421 offload = 0;
2422 }
2423 break;
2424
2425 case ETHERTYPE_IPV6:
2426 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2427
2428 ip_hlen = sizeof(struct ip6_hdr);
2429
2430 if (mp->m_len < (ehdrlen + ip_hlen)) {
2431 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2432 buf);
2433 ip6 = (struct ip6_hdr *)buf;
2434 }
2435
2436 if (ip6->ip6_nxt == IPPROTO_TCP)
2437 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2438 else if (ip6->ip6_nxt == IPPROTO_UDP)
2439 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2440 else {
2441 //device_printf(dev, "%s: ipv6\n", __func__);
2442 offload = 0;
2443 }
2444 break;
2445
2446 default:
2447 offload = 0;
2448 break;
2449 }
2450 if (!offload)
2451 return (-1);
2452
2453 *op_code = opcode;
2454 *tcp_hdr_off = (ip_hlen + ehdrlen);
2455
2456 return (0);
2457 }
2458
2459 #define QLA_TX_MIN_FREE 2
2460 /*
2461 * Name: ql_hw_send
2462 * Function: Transmits a packet. It first checks if the packet is a
2463 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2464 * offload. If either of these creteria are not met, it is transmitted
2465 * as a regular ethernet frame.
2466 */
2467 int
ql_hw_send(qla_host_t * ha,bus_dma_segment_t * segs,int nsegs,uint32_t tx_idx,struct mbuf * mp,uint32_t txr_idx,uint32_t iscsi_pdu)2468 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2469 uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2470 {
2471 struct ether_vlan_header *eh;
2472 qla_hw_t *hw = &ha->hw;
2473 q80_tx_cmd_t *tx_cmd, tso_cmd;
2474 bus_dma_segment_t *c_seg;
2475 uint32_t num_tx_cmds, hdr_len = 0;
2476 uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2477 device_t dev;
2478 int i, ret;
2479 uint8_t *src = NULL, *dst = NULL;
2480 uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2481 uint32_t op_code = 0;
2482 uint32_t tcp_hdr_off = 0;
2483
2484 dev = ha->pci_dev;
2485
2486 /*
2487 * Always make sure there is atleast one empty slot in the tx_ring
2488 * tx_ring is considered full when there only one entry available
2489 */
2490 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2491
2492 total_length = mp->m_pkthdr.len;
2493 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2494 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2495 __func__, total_length);
2496 return (EINVAL);
2497 }
2498 eh = mtod(mp, struct ether_vlan_header *);
2499
2500 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2501 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2502
2503 src = frame_hdr;
2504 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2505
2506 if (!(ret & ~1)) {
2507 /* find the additional tx_cmd descriptors required */
2508
2509 if (mp->m_flags & M_VLANTAG)
2510 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2511
2512 hdr_len = tso_cmd.total_hdr_len;
2513
2514 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2515 bytes = QL_MIN(bytes, hdr_len);
2516
2517 num_tx_cmds++;
2518 hdr_len -= bytes;
2519
2520 while (hdr_len) {
2521 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2522 hdr_len -= bytes;
2523 num_tx_cmds++;
2524 }
2525 hdr_len = tso_cmd.total_hdr_len;
2526
2527 if (ret == 0)
2528 src = (uint8_t *)eh;
2529 } else
2530 return (EINVAL);
2531 } else {
2532 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2533 }
2534
2535 if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2536 ql_hw_tx_done_locked(ha, txr_idx);
2537 if (hw->tx_cntxt[txr_idx].txr_free <=
2538 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2539 QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2540 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2541 __func__));
2542 return (-1);
2543 }
2544 }
2545
2546 for (i = 0; i < num_tx_cmds; i++) {
2547 int j;
2548
2549 j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1);
2550
2551 if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) {
2552 QL_ASSERT(ha, 0, \
2553 ("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\
2554 __func__, __LINE__, txr_idx, j,\
2555 ha->tx_ring[txr_idx].tx_buf[j].m_head));
2556 return (EINVAL);
2557 }
2558 }
2559
2560 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2561
2562 if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2563 if (nsegs > ha->hw.max_tx_segs)
2564 ha->hw.max_tx_segs = nsegs;
2565
2566 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2567
2568 if (op_code) {
2569 tx_cmd->flags_opcode = op_code;
2570 tx_cmd->tcp_hdr_off = tcp_hdr_off;
2571
2572 } else {
2573 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2574 }
2575 } else {
2576 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2577 ha->tx_tso_frames++;
2578 }
2579
2580 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2581 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2582
2583 if (iscsi_pdu)
2584 eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2585
2586 } else if (mp->m_flags & M_VLANTAG) {
2587 if (hdr_len) { /* TSO */
2588 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2589 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2590 tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2591 } else
2592 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2593
2594 ha->hw_vlan_tx_frames++;
2595 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2596
2597 if (iscsi_pdu) {
2598 tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2599 mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2600 }
2601 }
2602
2603 tx_cmd->n_bufs = (uint8_t)nsegs;
2604 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2605 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2606 tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2607
2608 c_seg = segs;
2609
2610 while (1) {
2611 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2612 switch (i) {
2613 case 0:
2614 tx_cmd->buf1_addr = c_seg->ds_addr;
2615 tx_cmd->buf1_len = c_seg->ds_len;
2616 break;
2617
2618 case 1:
2619 tx_cmd->buf2_addr = c_seg->ds_addr;
2620 tx_cmd->buf2_len = c_seg->ds_len;
2621 break;
2622
2623 case 2:
2624 tx_cmd->buf3_addr = c_seg->ds_addr;
2625 tx_cmd->buf3_len = c_seg->ds_len;
2626 break;
2627
2628 case 3:
2629 tx_cmd->buf4_addr = c_seg->ds_addr;
2630 tx_cmd->buf4_len = c_seg->ds_len;
2631 break;
2632 }
2633
2634 c_seg++;
2635 nsegs--;
2636 }
2637
2638 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2639 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2640 (NUM_TX_DESCRIPTORS - 1);
2641 tx_cmd_count++;
2642
2643 if (!nsegs)
2644 break;
2645
2646 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2647 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2648 }
2649
2650 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2651 /* TSO : Copy the header in the following tx cmd descriptors */
2652
2653 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2654
2655 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2656 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2657
2658 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2659 bytes = QL_MIN(bytes, hdr_len);
2660
2661 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2662
2663 if (mp->m_flags & M_VLANTAG) {
2664 /* first copy the src/dst MAC addresses */
2665 bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2666 dst += (ETHER_ADDR_LEN * 2);
2667 src += (ETHER_ADDR_LEN * 2);
2668
2669 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2670 dst += 2;
2671 *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2672 dst += 2;
2673
2674 /* bytes left in src header */
2675 hdr_len -= ((ETHER_ADDR_LEN * 2) +
2676 ETHER_VLAN_ENCAP_LEN);
2677
2678 /* bytes left in TxCmd Entry */
2679 bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2680
2681 bcopy(src, dst, bytes);
2682 src += bytes;
2683 hdr_len -= bytes;
2684 } else {
2685 bcopy(src, dst, bytes);
2686 src += bytes;
2687 hdr_len -= bytes;
2688 }
2689
2690 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2691 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2692 (NUM_TX_DESCRIPTORS - 1);
2693 tx_cmd_count++;
2694
2695 while (hdr_len) {
2696 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2697 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2698
2699 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2700
2701 bcopy(src, tx_cmd, bytes);
2702 src += bytes;
2703 hdr_len -= bytes;
2704
2705 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2706 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2707 (NUM_TX_DESCRIPTORS - 1);
2708 tx_cmd_count++;
2709 }
2710 }
2711
2712 hw->tx_cntxt[txr_idx].txr_free =
2713 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2714
2715 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2716 txr_idx);
2717 QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2718
2719 return (0);
2720 }
2721
2722 #define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2723 static int
qla_config_rss_ind_table(qla_host_t * ha)2724 qla_config_rss_ind_table(qla_host_t *ha)
2725 {
2726 uint32_t i, count;
2727 uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2728
2729 for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2730 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2731 }
2732
2733 for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2734 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2735 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2736 count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2737 } else {
2738 count = Q8_CONFIG_IND_TBL_SIZE;
2739 }
2740
2741 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2742 rss_ind_tbl))
2743 return (-1);
2744 }
2745
2746 return (0);
2747 }
2748
2749 static int
qla_config_soft_lro(qla_host_t * ha)2750 qla_config_soft_lro(qla_host_t *ha)
2751 {
2752 #if defined(INET) || defined(INET6)
2753 int i;
2754 qla_hw_t *hw = &ha->hw;
2755 struct lro_ctrl *lro;
2756
2757 for (i = 0; i < hw->num_sds_rings; i++) {
2758 lro = &hw->sds[i].lro;
2759
2760 bzero(lro, sizeof(struct lro_ctrl));
2761
2762 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2763 device_printf(ha->pci_dev,
2764 "%s: tcp_lro_init_args [%d] failed\n",
2765 __func__, i);
2766 return (-1);
2767 }
2768
2769 lro->ifp = ha->ifp;
2770 }
2771
2772 QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2773 #endif
2774 return (0);
2775 }
2776
2777 static void
qla_drain_soft_lro(qla_host_t * ha)2778 qla_drain_soft_lro(qla_host_t *ha)
2779 {
2780 #if defined(INET) || defined(INET6)
2781 int i;
2782 qla_hw_t *hw = &ha->hw;
2783 struct lro_ctrl *lro;
2784
2785 for (i = 0; i < hw->num_sds_rings; i++) {
2786 lro = &hw->sds[i].lro;
2787
2788 tcp_lro_flush_all(lro);
2789 }
2790 #endif
2791
2792 return;
2793 }
2794
2795 static void
qla_free_soft_lro(qla_host_t * ha)2796 qla_free_soft_lro(qla_host_t *ha)
2797 {
2798 #if defined(INET) || defined(INET6)
2799 int i;
2800 qla_hw_t *hw = &ha->hw;
2801 struct lro_ctrl *lro;
2802
2803 for (i = 0; i < hw->num_sds_rings; i++) {
2804 lro = &hw->sds[i].lro;
2805 tcp_lro_free(lro);
2806 }
2807 #endif
2808
2809 return;
2810 }
2811
2812 /*
2813 * Name: ql_del_hw_if
2814 * Function: Destroys the hardware specific entities corresponding to an
2815 * Ethernet Interface
2816 */
2817 void
ql_del_hw_if(qla_host_t * ha)2818 ql_del_hw_if(qla_host_t *ha)
2819 {
2820 uint32_t i;
2821 uint32_t num_msix;
2822
2823 (void)qla_stop_nic_func(ha);
2824
2825 qla_del_rcv_cntxt(ha);
2826
2827 if(qla_del_xmt_cntxt(ha))
2828 goto ql_del_hw_if_exit;
2829
2830 if (ha->hw.flags.init_intr_cnxt) {
2831 for (i = 0; i < ha->hw.num_sds_rings; ) {
2832 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2833 num_msix = Q8_MAX_INTR_VECTORS;
2834 else
2835 num_msix = ha->hw.num_sds_rings - i;
2836
2837 if (qla_config_intr_cntxt(ha, i, num_msix, 0))
2838 break;
2839
2840 i += num_msix;
2841 }
2842
2843 ha->hw.flags.init_intr_cnxt = 0;
2844 }
2845
2846 ql_del_hw_if_exit:
2847 if (ha->hw.enable_soft_lro) {
2848 qla_drain_soft_lro(ha);
2849 qla_free_soft_lro(ha);
2850 }
2851
2852 return;
2853 }
2854
2855 void
qla_confirm_9kb_enable(qla_host_t * ha)2856 qla_confirm_9kb_enable(qla_host_t *ha)
2857 {
2858 // uint32_t supports_9kb = 0;
2859
2860 ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2861
2862 /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2863 WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2864 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2865
2866 #if 0
2867 qla_get_nic_partition(ha, &supports_9kb, NULL);
2868
2869 if (!supports_9kb)
2870 #endif
2871 ha->hw.enable_9kb = 0;
2872
2873 return;
2874 }
2875
2876 /*
2877 * Name: ql_init_hw_if
2878 * Function: Creates the hardware specific entities corresponding to an
2879 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2880 * corresponding to the interface. Enables LRO if allowed.
2881 */
2882 int
ql_init_hw_if(qla_host_t * ha)2883 ql_init_hw_if(qla_host_t *ha)
2884 {
2885 uint32_t i;
2886 uint8_t bcast_mac[6];
2887 qla_rdesc_t *rdesc;
2888 uint32_t num_msix;
2889
2890 for (i = 0; i < ha->hw.num_sds_rings; i++) {
2891 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2892 ha->hw.dma_buf.sds_ring[i].size);
2893 }
2894
2895 for (i = 0; i < ha->hw.num_sds_rings; ) {
2896 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2897 num_msix = Q8_MAX_INTR_VECTORS;
2898 else
2899 num_msix = ha->hw.num_sds_rings - i;
2900
2901 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2902 if (i > 0) {
2903 num_msix = i;
2904
2905 for (i = 0; i < num_msix; ) {
2906 qla_config_intr_cntxt(ha, i,
2907 Q8_MAX_INTR_VECTORS, 0);
2908 i += Q8_MAX_INTR_VECTORS;
2909 }
2910 }
2911 return (-1);
2912 }
2913
2914 i = i + num_msix;
2915 }
2916
2917 ha->hw.flags.init_intr_cnxt = 1;
2918
2919 /*
2920 * Create Receive Context
2921 */
2922 if (qla_init_rcv_cntxt(ha)) {
2923 return (-1);
2924 }
2925
2926 for (i = 0; i < ha->hw.num_rds_rings; i++) {
2927 rdesc = &ha->hw.rds[i];
2928 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2929 rdesc->rx_in = 0;
2930 /* Update the RDS Producer Indices */
2931 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2932 rdesc->rx_next);
2933 }
2934
2935 /*
2936 * Create Transmit Context
2937 */
2938 if (qla_init_xmt_cntxt(ha)) {
2939 qla_del_rcv_cntxt(ha);
2940 return (-1);
2941 }
2942 ha->hw.max_tx_segs = 0;
2943
2944 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2945 return(-1);
2946
2947 ha->hw.flags.unicast_mac = 1;
2948
2949 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2950 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2951
2952 if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2953 return (-1);
2954
2955 ha->hw.flags.bcast_mac = 1;
2956
2957 /*
2958 * program any cached multicast addresses
2959 */
2960 if (qla_hw_add_all_mcast(ha))
2961 return (-1);
2962
2963 if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
2964 return (-1);
2965
2966 if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2967 return (-1);
2968
2969 if (qla_config_rss_ind_table(ha))
2970 return (-1);
2971
2972 if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2973 return (-1);
2974
2975 if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2976 return (-1);
2977
2978 if (if_getcapenable(ha->ifp) & IFCAP_LRO) {
2979 if (ha->hw.enable_hw_lro) {
2980 ha->hw.enable_soft_lro = 0;
2981
2982 if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2983 return (-1);
2984 } else {
2985 ha->hw.enable_soft_lro = 1;
2986
2987 if (qla_config_soft_lro(ha))
2988 return (-1);
2989 }
2990 }
2991
2992 if (qla_init_nic_func(ha))
2993 return (-1);
2994
2995 if (qla_query_fw_dcbx_caps(ha))
2996 return (-1);
2997
2998 for (i = 0; i < ha->hw.num_sds_rings; i++)
2999 QL_ENABLE_INTERRUPTS(ha, i);
3000
3001 return (0);
3002 }
3003
3004 static int
qla_map_sds_to_rds(qla_host_t * ha,uint32_t start_idx,uint32_t num_idx)3005 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
3006 {
3007 device_t dev = ha->pci_dev;
3008 q80_rq_map_sds_to_rds_t *map_rings;
3009 q80_rsp_map_sds_to_rds_t *map_rings_rsp;
3010 uint32_t i, err;
3011 qla_hw_t *hw = &ha->hw;
3012
3013 map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
3014 bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
3015
3016 map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
3017 map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
3018 map_rings->count_version |= Q8_MBX_CMD_VERSION;
3019
3020 map_rings->cntxt_id = hw->rcv_cntxt_id;
3021 map_rings->num_rings = num_idx;
3022
3023 for (i = 0; i < num_idx; i++) {
3024 map_rings->sds_rds[i].sds_ring = i + start_idx;
3025 map_rings->sds_rds[i].rds_ring = i + start_idx;
3026 }
3027
3028 if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
3029 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
3030 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3031 device_printf(dev, "%s: failed0\n", __func__);
3032 return (-1);
3033 }
3034
3035 map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
3036
3037 err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
3038
3039 if (err) {
3040 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3041 return (-1);
3042 }
3043
3044 return (0);
3045 }
3046
3047 /*
3048 * Name: qla_init_rcv_cntxt
3049 * Function: Creates the Receive Context.
3050 */
3051 static int
qla_init_rcv_cntxt(qla_host_t * ha)3052 qla_init_rcv_cntxt(qla_host_t *ha)
3053 {
3054 q80_rq_rcv_cntxt_t *rcntxt;
3055 q80_rsp_rcv_cntxt_t *rcntxt_rsp;
3056 q80_stat_desc_t *sdesc;
3057 int i, j;
3058 qla_hw_t *hw = &ha->hw;
3059 device_t dev;
3060 uint32_t err;
3061 uint32_t rcntxt_sds_rings;
3062 uint32_t rcntxt_rds_rings;
3063 uint32_t max_idx;
3064
3065 dev = ha->pci_dev;
3066
3067 /*
3068 * Create Receive Context
3069 */
3070
3071 for (i = 0; i < hw->num_sds_rings; i++) {
3072 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
3073
3074 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
3075 sdesc->data[0] = 1ULL;
3076 sdesc->data[1] = 1ULL;
3077 }
3078 }
3079
3080 rcntxt_sds_rings = hw->num_sds_rings;
3081 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
3082 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
3083
3084 rcntxt_rds_rings = hw->num_rds_rings;
3085
3086 if (hw->num_rds_rings > MAX_RDS_RING_SETS)
3087 rcntxt_rds_rings = MAX_RDS_RING_SETS;
3088
3089 rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
3090 bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
3091
3092 rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
3093 rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
3094 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3095
3096 rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
3097 Q8_RCV_CNTXT_CAP0_LRO |
3098 Q8_RCV_CNTXT_CAP0_HW_LRO |
3099 Q8_RCV_CNTXT_CAP0_RSS |
3100 Q8_RCV_CNTXT_CAP0_SGL_LRO;
3101
3102 if (ha->hw.enable_9kb)
3103 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
3104 else
3105 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
3106
3107 if (ha->hw.num_rds_rings > 1) {
3108 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
3109 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
3110 } else
3111 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
3112
3113 rcntxt->nsds_rings = rcntxt_sds_rings;
3114
3115 rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
3116
3117 rcntxt->rcv_vpid = 0;
3118
3119 for (i = 0; i < rcntxt_sds_rings; i++) {
3120 rcntxt->sds[i].paddr =
3121 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
3122 rcntxt->sds[i].size =
3123 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3124 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
3125 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
3126 }
3127
3128 for (i = 0; i < rcntxt_rds_rings; i++) {
3129 rcntxt->rds[i].paddr_std =
3130 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
3131
3132 if (ha->hw.enable_9kb)
3133 rcntxt->rds[i].std_bsize =
3134 qla_host_to_le64(MJUM9BYTES);
3135 else
3136 rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3137
3138 rcntxt->rds[i].std_nentries =
3139 qla_host_to_le32(NUM_RX_DESCRIPTORS);
3140 }
3141
3142 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3143 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
3144 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
3145 device_printf(dev, "%s: failed0\n", __func__);
3146 return (-1);
3147 }
3148
3149 rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
3150
3151 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3152
3153 if (err) {
3154 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3155 return (-1);
3156 }
3157
3158 for (i = 0; i < rcntxt_sds_rings; i++) {
3159 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3160 }
3161
3162 for (i = 0; i < rcntxt_rds_rings; i++) {
3163 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3164 }
3165
3166 hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3167
3168 ha->hw.flags.init_rx_cnxt = 1;
3169
3170 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3171 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3172 if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3173 max_idx = MAX_RCNTXT_SDS_RINGS;
3174 else
3175 max_idx = hw->num_sds_rings - i;
3176
3177 err = qla_add_rcv_rings(ha, i, max_idx);
3178 if (err)
3179 return -1;
3180
3181 i += max_idx;
3182 }
3183 }
3184
3185 if (hw->num_rds_rings > 1) {
3186 for (i = 0; i < hw->num_rds_rings; ) {
3187 if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3188 max_idx = MAX_SDS_TO_RDS_MAP;
3189 else
3190 max_idx = hw->num_rds_rings - i;
3191
3192 err = qla_map_sds_to_rds(ha, i, max_idx);
3193 if (err)
3194 return -1;
3195
3196 i += max_idx;
3197 }
3198 }
3199
3200 return (0);
3201 }
3202
3203 static int
qla_add_rcv_rings(qla_host_t * ha,uint32_t sds_idx,uint32_t nsds)3204 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3205 {
3206 device_t dev = ha->pci_dev;
3207 q80_rq_add_rcv_rings_t *add_rcv;
3208 q80_rsp_add_rcv_rings_t *add_rcv_rsp;
3209 uint32_t i,j, err;
3210 qla_hw_t *hw = &ha->hw;
3211
3212 add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3213 bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3214
3215 add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3216 add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3217 add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3218
3219 add_rcv->nrds_sets_rings = nsds | (1 << 5);
3220 add_rcv->nsds_rings = nsds;
3221 add_rcv->cntxt_id = hw->rcv_cntxt_id;
3222
3223 for (i = 0; i < nsds; i++) {
3224 j = i + sds_idx;
3225
3226 add_rcv->sds[i].paddr =
3227 qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3228
3229 add_rcv->sds[i].size =
3230 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3231
3232 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3233 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3234 }
3235
3236 for (i = 0; (i < nsds); i++) {
3237 j = i + sds_idx;
3238
3239 add_rcv->rds[i].paddr_std =
3240 qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3241
3242 if (ha->hw.enable_9kb)
3243 add_rcv->rds[i].std_bsize =
3244 qla_host_to_le64(MJUM9BYTES);
3245 else
3246 add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3247
3248 add_rcv->rds[i].std_nentries =
3249 qla_host_to_le32(NUM_RX_DESCRIPTORS);
3250 }
3251
3252 if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3253 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
3254 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3255 device_printf(dev, "%s: failed0\n", __func__);
3256 return (-1);
3257 }
3258
3259 add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3260
3261 err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3262
3263 if (err) {
3264 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3265 return (-1);
3266 }
3267
3268 for (i = 0; i < nsds; i++) {
3269 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3270 }
3271
3272 for (i = 0; i < nsds; i++) {
3273 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3274 }
3275
3276 return (0);
3277 }
3278
3279 /*
3280 * Name: qla_del_rcv_cntxt
3281 * Function: Destroys the Receive Context.
3282 */
3283 static void
qla_del_rcv_cntxt(qla_host_t * ha)3284 qla_del_rcv_cntxt(qla_host_t *ha)
3285 {
3286 device_t dev = ha->pci_dev;
3287 q80_rcv_cntxt_destroy_t *rcntxt;
3288 q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp;
3289 uint32_t err;
3290 uint8_t bcast_mac[6];
3291
3292 if (!ha->hw.flags.init_rx_cnxt)
3293 return;
3294
3295 if (qla_hw_del_all_mcast(ha))
3296 return;
3297
3298 if (ha->hw.flags.bcast_mac) {
3299 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3300 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3301
3302 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3303 return;
3304 ha->hw.flags.bcast_mac = 0;
3305 }
3306
3307 if (ha->hw.flags.unicast_mac) {
3308 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3309 return;
3310 ha->hw.flags.unicast_mac = 0;
3311 }
3312
3313 rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3314 bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3315
3316 rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3317 rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3318 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3319
3320 rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3321
3322 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3323 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3324 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3325 device_printf(dev, "%s: failed0\n", __func__);
3326 return;
3327 }
3328 rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3329
3330 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3331
3332 if (err) {
3333 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3334 }
3335
3336 ha->hw.flags.init_rx_cnxt = 0;
3337 return;
3338 }
3339
3340 /*
3341 * Name: qla_init_xmt_cntxt
3342 * Function: Creates the Transmit Context.
3343 */
3344 static int
qla_init_xmt_cntxt_i(qla_host_t * ha,uint32_t txr_idx)3345 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3346 {
3347 device_t dev;
3348 qla_hw_t *hw = &ha->hw;
3349 q80_rq_tx_cntxt_t *tcntxt;
3350 q80_rsp_tx_cntxt_t *tcntxt_rsp;
3351 uint32_t err;
3352 qla_hw_tx_cntxt_t *hw_tx_cntxt;
3353 uint32_t intr_idx;
3354
3355 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3356
3357 dev = ha->pci_dev;
3358
3359 /*
3360 * Create Transmit Context
3361 */
3362 tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3363 bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3364
3365 tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3366 tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3367 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3368
3369 intr_idx = txr_idx;
3370
3371 #ifdef QL_ENABLE_ISCSI_TLV
3372
3373 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3374 Q8_TX_CNTXT_CAP0_TC;
3375
3376 if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3377 tcntxt->traffic_class = 1;
3378 }
3379
3380 intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3381
3382 #else
3383 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3384
3385 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3386
3387 tcntxt->ntx_rings = 1;
3388
3389 tcntxt->tx_ring[0].paddr =
3390 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3391 tcntxt->tx_ring[0].tx_consumer =
3392 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3393 tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3394
3395 tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3396 tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3397
3398 hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3399 hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3400 *(hw_tx_cntxt->tx_cons) = 0;
3401
3402 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3403 (sizeof (q80_rq_tx_cntxt_t) >> 2),
3404 ha->hw.mbox,
3405 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3406 device_printf(dev, "%s: failed0\n", __func__);
3407 return (-1);
3408 }
3409 tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3410
3411 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3412
3413 if (err) {
3414 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3415 return -1;
3416 }
3417
3418 hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3419 hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3420
3421 if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3422 return (-1);
3423
3424 return (0);
3425 }
3426
3427 /*
3428 * Name: qla_del_xmt_cntxt
3429 * Function: Destroys the Transmit Context.
3430 */
3431 static int
qla_del_xmt_cntxt_i(qla_host_t * ha,uint32_t txr_idx)3432 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3433 {
3434 device_t dev = ha->pci_dev;
3435 q80_tx_cntxt_destroy_t *tcntxt;
3436 q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp;
3437 uint32_t err;
3438
3439 tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3440 bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3441
3442 tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3443 tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3444 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3445
3446 tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3447
3448 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3449 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
3450 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3451 device_printf(dev, "%s: failed0\n", __func__);
3452 return (-1);
3453 }
3454 tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3455
3456 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3457
3458 if (err) {
3459 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3460 return (-1);
3461 }
3462
3463 return (0);
3464 }
3465 static int
qla_del_xmt_cntxt(qla_host_t * ha)3466 qla_del_xmt_cntxt(qla_host_t *ha)
3467 {
3468 uint32_t i;
3469 int ret = 0;
3470
3471 if (!ha->hw.flags.init_tx_cnxt)
3472 return (ret);
3473
3474 for (i = 0; i < ha->hw.num_tx_rings; i++) {
3475 if ((ret = qla_del_xmt_cntxt_i(ha, i)) != 0)
3476 break;
3477 }
3478 ha->hw.flags.init_tx_cnxt = 0;
3479
3480 return (ret);
3481 }
3482
3483 static int
qla_init_xmt_cntxt(qla_host_t * ha)3484 qla_init_xmt_cntxt(qla_host_t *ha)
3485 {
3486 uint32_t i, j;
3487
3488 for (i = 0; i < ha->hw.num_tx_rings; i++) {
3489 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3490 for (j = 0; j < i; j++) {
3491 if (qla_del_xmt_cntxt_i(ha, j))
3492 break;
3493 }
3494 return (-1);
3495 }
3496 }
3497 ha->hw.flags.init_tx_cnxt = 1;
3498 return (0);
3499 }
3500
3501 static int
qla_hw_all_mcast(qla_host_t * ha,uint32_t add_mcast)3502 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3503 {
3504 int i, nmcast;
3505 uint32_t count = 0;
3506 uint8_t *mcast;
3507
3508 nmcast = ha->hw.nmcast;
3509
3510 QL_DPRINT2(ha, (ha->pci_dev,
3511 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3512
3513 mcast = ha->hw.mac_addr_arr;
3514 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3515
3516 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3517 if ((ha->hw.mcast[i].addr[0] != 0) ||
3518 (ha->hw.mcast[i].addr[1] != 0) ||
3519 (ha->hw.mcast[i].addr[2] != 0) ||
3520 (ha->hw.mcast[i].addr[3] != 0) ||
3521 (ha->hw.mcast[i].addr[4] != 0) ||
3522 (ha->hw.mcast[i].addr[5] != 0)) {
3523 bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3524 mcast = mcast + ETHER_ADDR_LEN;
3525 count++;
3526
3527 device_printf(ha->pci_dev,
3528 "%s: %x:%x:%x:%x:%x:%x \n",
3529 __func__, ha->hw.mcast[i].addr[0],
3530 ha->hw.mcast[i].addr[1], ha->hw.mcast[i].addr[2],
3531 ha->hw.mcast[i].addr[3], ha->hw.mcast[i].addr[4],
3532 ha->hw.mcast[i].addr[5]);
3533
3534 if (count == Q8_MAX_MAC_ADDRS) {
3535 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3536 add_mcast, count)) {
3537 device_printf(ha->pci_dev,
3538 "%s: failed\n", __func__);
3539 return (-1);
3540 }
3541
3542 count = 0;
3543 mcast = ha->hw.mac_addr_arr;
3544 memset(mcast, 0,
3545 (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3546 }
3547
3548 nmcast--;
3549 }
3550 }
3551
3552 if (count) {
3553 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3554 count)) {
3555 device_printf(ha->pci_dev, "%s: failed\n", __func__);
3556 return (-1);
3557 }
3558 }
3559 QL_DPRINT2(ha, (ha->pci_dev,
3560 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3561
3562 return 0;
3563 }
3564
3565 static int
qla_hw_add_all_mcast(qla_host_t * ha)3566 qla_hw_add_all_mcast(qla_host_t *ha)
3567 {
3568 int ret;
3569
3570 ret = qla_hw_all_mcast(ha, 1);
3571
3572 return (ret);
3573 }
3574
3575 int
qla_hw_del_all_mcast(qla_host_t * ha)3576 qla_hw_del_all_mcast(qla_host_t *ha)
3577 {
3578 int ret;
3579
3580 ret = qla_hw_all_mcast(ha, 0);
3581
3582 bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3583 ha->hw.nmcast = 0;
3584
3585 return (ret);
3586 }
3587
3588 static int
qla_hw_mac_addr_present(qla_host_t * ha,uint8_t * mta)3589 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3590 {
3591 int i;
3592
3593 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3594 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3595 return (0); /* its been already added */
3596 }
3597 return (-1);
3598 }
3599
3600 static int
qla_hw_add_mcast(qla_host_t * ha,uint8_t * mta,uint32_t nmcast)3601 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3602 {
3603 int i;
3604
3605 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3606 if ((ha->hw.mcast[i].addr[0] == 0) &&
3607 (ha->hw.mcast[i].addr[1] == 0) &&
3608 (ha->hw.mcast[i].addr[2] == 0) &&
3609 (ha->hw.mcast[i].addr[3] == 0) &&
3610 (ha->hw.mcast[i].addr[4] == 0) &&
3611 (ha->hw.mcast[i].addr[5] == 0)) {
3612 bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3613 ha->hw.nmcast++;
3614
3615 mta = mta + ETHER_ADDR_LEN;
3616 nmcast--;
3617
3618 if (nmcast == 0)
3619 break;
3620 }
3621 }
3622 return 0;
3623 }
3624
3625 static int
qla_hw_del_mcast(qla_host_t * ha,uint8_t * mta,uint32_t nmcast)3626 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3627 {
3628 int i;
3629
3630 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3631 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3632 ha->hw.mcast[i].addr[0] = 0;
3633 ha->hw.mcast[i].addr[1] = 0;
3634 ha->hw.mcast[i].addr[2] = 0;
3635 ha->hw.mcast[i].addr[3] = 0;
3636 ha->hw.mcast[i].addr[4] = 0;
3637 ha->hw.mcast[i].addr[5] = 0;
3638
3639 ha->hw.nmcast--;
3640
3641 mta = mta + ETHER_ADDR_LEN;
3642 nmcast--;
3643
3644 if (nmcast == 0)
3645 break;
3646 }
3647 }
3648 return 0;
3649 }
3650
3651 /*
3652 * Name: ql_hw_set_multi
3653 * Function: Sets the Multicast Addresses provided by the host O.S into the
3654 * hardware (for the given interface)
3655 */
3656 int
ql_hw_set_multi(qla_host_t * ha,uint8_t * mcast_addr,uint32_t mcnt,uint32_t add_mac)3657 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3658 uint32_t add_mac)
3659 {
3660 uint8_t *mta = mcast_addr;
3661 int i;
3662 int ret = 0;
3663 uint32_t count = 0;
3664 uint8_t *mcast;
3665
3666 mcast = ha->hw.mac_addr_arr;
3667 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3668
3669 for (i = 0; i < mcnt; i++) {
3670 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3671 if (add_mac) {
3672 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3673 bcopy(mta, mcast, ETHER_ADDR_LEN);
3674 mcast = mcast + ETHER_ADDR_LEN;
3675 count++;
3676 }
3677 } else {
3678 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3679 bcopy(mta, mcast, ETHER_ADDR_LEN);
3680 mcast = mcast + ETHER_ADDR_LEN;
3681 count++;
3682 }
3683 }
3684 }
3685 if (count == Q8_MAX_MAC_ADDRS) {
3686 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3687 add_mac, count)) {
3688 device_printf(ha->pci_dev, "%s: failed\n",
3689 __func__);
3690 return (-1);
3691 }
3692
3693 if (add_mac) {
3694 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3695 count);
3696 } else {
3697 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3698 count);
3699 }
3700
3701 count = 0;
3702 mcast = ha->hw.mac_addr_arr;
3703 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3704 }
3705
3706 mta += Q8_MAC_ADDR_LEN;
3707 }
3708
3709 if (count) {
3710 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3711 count)) {
3712 device_printf(ha->pci_dev, "%s: failed\n", __func__);
3713 return (-1);
3714 }
3715 if (add_mac) {
3716 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3717 } else {
3718 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3719 }
3720 }
3721
3722 return (ret);
3723 }
3724
3725 /*
3726 * Name: ql_hw_tx_done_locked
3727 * Function: Handle Transmit Completions
3728 */
3729 void
ql_hw_tx_done_locked(qla_host_t * ha,uint32_t txr_idx)3730 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3731 {
3732 qla_tx_buf_t *txb;
3733 qla_hw_t *hw = &ha->hw;
3734 uint32_t comp_idx, comp_count = 0;
3735 qla_hw_tx_cntxt_t *hw_tx_cntxt;
3736
3737 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3738
3739 /* retrieve index of last entry in tx ring completed */
3740 comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3741
3742 while (comp_idx != hw_tx_cntxt->txr_comp) {
3743 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3744
3745 hw_tx_cntxt->txr_comp++;
3746 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3747 hw_tx_cntxt->txr_comp = 0;
3748
3749 comp_count++;
3750
3751 if (txb->m_head) {
3752 if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
3753
3754 bus_dmamap_sync(ha->tx_tag, txb->map,
3755 BUS_DMASYNC_POSTWRITE);
3756 bus_dmamap_unload(ha->tx_tag, txb->map);
3757 m_freem(txb->m_head);
3758
3759 txb->m_head = NULL;
3760 }
3761 }
3762
3763 hw_tx_cntxt->txr_free += comp_count;
3764
3765 if (hw_tx_cntxt->txr_free > NUM_TX_DESCRIPTORS)
3766 device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d txr_free = %d"
3767 "txr_next = %d txr_comp = %d\n", __func__, __LINE__,
3768 txr_idx, hw_tx_cntxt->txr_free,
3769 hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp);
3770
3771 QL_ASSERT(ha, (hw_tx_cntxt->txr_free <= NUM_TX_DESCRIPTORS), \
3772 ("%s [%d]: txr_idx = %d txr_free = %d txr_next = %d txr_comp = %d\n",\
3773 __func__, __LINE__, txr_idx, hw_tx_cntxt->txr_free, \
3774 hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp));
3775
3776 return;
3777 }
3778
3779 void
ql_update_link_state(qla_host_t * ha)3780 ql_update_link_state(qla_host_t *ha)
3781 {
3782 uint32_t link_state = 0;
3783 uint32_t prev_link_state;
3784
3785 prev_link_state = ha->hw.link_up;
3786
3787 if (if_getdrvflags(ha->ifp) & IFF_DRV_RUNNING) {
3788 link_state = READ_REG32(ha, Q8_LINK_STATE);
3789
3790 if (ha->pci_func == 0) {
3791 link_state = (((link_state & 0xF) == 1)? 1 : 0);
3792 } else {
3793 link_state = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3794 }
3795 }
3796
3797 atomic_store_rel_8(&ha->hw.link_up, (uint8_t)link_state);
3798
3799 if (prev_link_state != ha->hw.link_up) {
3800 if (ha->hw.link_up) {
3801 if_link_state_change(ha->ifp, LINK_STATE_UP);
3802 } else {
3803 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3804 }
3805 }
3806 return;
3807 }
3808
3809 int
ql_hw_check_health(qla_host_t * ha)3810 ql_hw_check_health(qla_host_t *ha)
3811 {
3812 uint32_t val;
3813
3814 ha->hw.health_count++;
3815
3816 if (ha->hw.health_count < 500)
3817 return 0;
3818
3819 ha->hw.health_count = 0;
3820
3821 val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3822
3823 if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3824 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3825 device_printf(ha->pci_dev, "%s: Temperature Alert"
3826 " at ts_usecs %ld ts_reg = 0x%08x\n",
3827 __func__, qla_get_usec_timestamp(), val);
3828
3829 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_TEMP_FAILURE)
3830 ha->hw.sp_log_stop = -1;
3831
3832 QL_INITIATE_RECOVERY(ha);
3833 return -1;
3834 }
3835
3836 val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3837
3838 if ((val != ha->hw.hbeat_value) &&
3839 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3840 ha->hw.hbeat_value = val;
3841 ha->hw.hbeat_failure = 0;
3842 return 0;
3843 }
3844
3845 ha->hw.hbeat_failure++;
3846
3847 if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3848 device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3849 __func__, val);
3850 if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3851 return 0;
3852 else {
3853 uint32_t peg_halt_status1;
3854 uint32_t peg_halt_status2;
3855
3856 peg_halt_status1 = READ_REG32(ha, Q8_PEG_HALT_STATUS1);
3857 peg_halt_status2 = READ_REG32(ha, Q8_PEG_HALT_STATUS2);
3858
3859 device_printf(ha->pci_dev,
3860 "%s: Heartbeat Failue at ts_usecs = %ld "
3861 "fw_heart_beat = 0x%08x "
3862 "peg_halt_status1 = 0x%08x "
3863 "peg_halt_status2 = 0x%08x\n",
3864 __func__, qla_get_usec_timestamp(), val,
3865 peg_halt_status1, peg_halt_status2);
3866
3867 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HBEAT_FAILURE)
3868 ha->hw.sp_log_stop = -1;
3869 }
3870 QL_INITIATE_RECOVERY(ha);
3871
3872 return -1;
3873 }
3874
3875 static int
qla_init_nic_func(qla_host_t * ha)3876 qla_init_nic_func(qla_host_t *ha)
3877 {
3878 device_t dev;
3879 q80_init_nic_func_t *init_nic;
3880 q80_init_nic_func_rsp_t *init_nic_rsp;
3881 uint32_t err;
3882
3883 dev = ha->pci_dev;
3884
3885 init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3886 bzero(init_nic, sizeof(q80_init_nic_func_t));
3887
3888 init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3889 init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3890 init_nic->count_version |= Q8_MBX_CMD_VERSION;
3891
3892 init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3893 init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3894 init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3895
3896 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3897 if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3898 (sizeof (q80_init_nic_func_t) >> 2),
3899 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3900 device_printf(dev, "%s: failed\n", __func__);
3901 return -1;
3902 }
3903
3904 init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3905 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3906
3907 err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3908
3909 if (err) {
3910 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3911 } else {
3912 device_printf(dev, "%s: successful\n", __func__);
3913 }
3914
3915 return 0;
3916 }
3917
3918 static int
qla_stop_nic_func(qla_host_t * ha)3919 qla_stop_nic_func(qla_host_t *ha)
3920 {
3921 device_t dev;
3922 q80_stop_nic_func_t *stop_nic;
3923 q80_stop_nic_func_rsp_t *stop_nic_rsp;
3924 uint32_t err;
3925
3926 dev = ha->pci_dev;
3927
3928 stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3929 bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3930
3931 stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3932 stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3933 stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3934
3935 stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3936 stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3937
3938 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3939 if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3940 (sizeof (q80_stop_nic_func_t) >> 2),
3941 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3942 device_printf(dev, "%s: failed\n", __func__);
3943 return -1;
3944 }
3945
3946 stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3947 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3948
3949 err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3950
3951 if (err) {
3952 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3953 }
3954
3955 return 0;
3956 }
3957
3958 static int
qla_query_fw_dcbx_caps(qla_host_t * ha)3959 qla_query_fw_dcbx_caps(qla_host_t *ha)
3960 {
3961 device_t dev;
3962 q80_query_fw_dcbx_caps_t *fw_dcbx;
3963 q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp;
3964 uint32_t err;
3965
3966 dev = ha->pci_dev;
3967
3968 fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3969 bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3970
3971 fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3972 fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3973 fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3974
3975 ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3976 if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3977 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3978 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3979 device_printf(dev, "%s: failed\n", __func__);
3980 return -1;
3981 }
3982
3983 fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3984 ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3985 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3986
3987 err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3988
3989 if (err) {
3990 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3991 }
3992
3993 return 0;
3994 }
3995
3996 static int
qla_idc_ack(qla_host_t * ha,uint32_t aen_mb1,uint32_t aen_mb2,uint32_t aen_mb3,uint32_t aen_mb4)3997 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3998 uint32_t aen_mb3, uint32_t aen_mb4)
3999 {
4000 device_t dev;
4001 q80_idc_ack_t *idc_ack;
4002 q80_idc_ack_rsp_t *idc_ack_rsp;
4003 uint32_t err;
4004 int count = 300;
4005
4006 dev = ha->pci_dev;
4007
4008 idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
4009 bzero(idc_ack, sizeof(q80_idc_ack_t));
4010
4011 idc_ack->opcode = Q8_MBX_IDC_ACK;
4012 idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
4013 idc_ack->count_version |= Q8_MBX_CMD_VERSION;
4014
4015 idc_ack->aen_mb1 = aen_mb1;
4016 idc_ack->aen_mb2 = aen_mb2;
4017 idc_ack->aen_mb3 = aen_mb3;
4018 idc_ack->aen_mb4 = aen_mb4;
4019
4020 ha->hw.imd_compl= 0;
4021
4022 if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
4023 (sizeof (q80_idc_ack_t) >> 2),
4024 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
4025 device_printf(dev, "%s: failed\n", __func__);
4026 return -1;
4027 }
4028
4029 idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
4030
4031 err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
4032
4033 if (err) {
4034 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4035 return(-1);
4036 }
4037
4038 while (count && !ha->hw.imd_compl) {
4039 qla_mdelay(__func__, 100);
4040 count--;
4041 }
4042
4043 if (!count)
4044 return -1;
4045 else
4046 device_printf(dev, "%s: count %d\n", __func__, count);
4047
4048 return (0);
4049 }
4050
4051 static int
qla_set_port_config(qla_host_t * ha,uint32_t cfg_bits)4052 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
4053 {
4054 device_t dev;
4055 q80_set_port_cfg_t *pcfg;
4056 q80_set_port_cfg_rsp_t *pfg_rsp;
4057 uint32_t err;
4058 int count = 300;
4059
4060 dev = ha->pci_dev;
4061
4062 pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
4063 bzero(pcfg, sizeof(q80_set_port_cfg_t));
4064
4065 pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
4066 pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
4067 pcfg->count_version |= Q8_MBX_CMD_VERSION;
4068
4069 pcfg->cfg_bits = cfg_bits;
4070
4071 device_printf(dev, "%s: cfg_bits"
4072 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4073 " [0x%x, 0x%x, 0x%x]\n", __func__,
4074 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4075 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4076 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
4077
4078 ha->hw.imd_compl= 0;
4079
4080 if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4081 (sizeof (q80_set_port_cfg_t) >> 2),
4082 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
4083 device_printf(dev, "%s: failed\n", __func__);
4084 return -1;
4085 }
4086
4087 pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
4088
4089 err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
4090
4091 if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
4092 while (count && !ha->hw.imd_compl) {
4093 qla_mdelay(__func__, 100);
4094 count--;
4095 }
4096 if (count) {
4097 device_printf(dev, "%s: count %d\n", __func__, count);
4098
4099 err = 0;
4100 }
4101 }
4102
4103 if (err) {
4104 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4105 return(-1);
4106 }
4107
4108 return (0);
4109 }
4110
4111 static int
qla_get_minidump_tmplt_size(qla_host_t * ha,uint32_t * size)4112 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
4113 {
4114 uint32_t err;
4115 device_t dev = ha->pci_dev;
4116 q80_config_md_templ_size_t *md_size;
4117 q80_config_md_templ_size_rsp_t *md_size_rsp;
4118
4119 #ifndef QL_LDFLASH_FW
4120
4121 ql_minidump_template_hdr_t *hdr;
4122
4123 hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
4124 *size = hdr->size_of_template;
4125 return (0);
4126
4127 #endif /* #ifdef QL_LDFLASH_FW */
4128
4129 md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
4130 bzero(md_size, sizeof(q80_config_md_templ_size_t));
4131
4132 md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
4133 md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
4134 md_size->count_version |= Q8_MBX_CMD_VERSION;
4135
4136 if (qla_mbx_cmd(ha, (uint32_t *) md_size,
4137 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
4138 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
4139 device_printf(dev, "%s: failed\n", __func__);
4140
4141 return (-1);
4142 }
4143
4144 md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
4145
4146 err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
4147
4148 if (err) {
4149 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4150 return(-1);
4151 }
4152
4153 *size = md_size_rsp->templ_size;
4154
4155 return (0);
4156 }
4157
4158 static int
qla_get_port_config(qla_host_t * ha,uint32_t * cfg_bits)4159 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
4160 {
4161 device_t dev;
4162 q80_get_port_cfg_t *pcfg;
4163 q80_get_port_cfg_rsp_t *pcfg_rsp;
4164 uint32_t err;
4165
4166 dev = ha->pci_dev;
4167
4168 pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
4169 bzero(pcfg, sizeof(q80_get_port_cfg_t));
4170
4171 pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
4172 pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
4173 pcfg->count_version |= Q8_MBX_CMD_VERSION;
4174
4175 if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4176 (sizeof (q80_get_port_cfg_t) >> 2),
4177 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
4178 device_printf(dev, "%s: failed\n", __func__);
4179 return -1;
4180 }
4181
4182 pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
4183
4184 err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
4185
4186 if (err) {
4187 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4188 return(-1);
4189 }
4190
4191 device_printf(dev, "%s: [cfg_bits, port type]"
4192 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4193 " [0x%x, 0x%x, 0x%x]\n", __func__,
4194 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4195 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4196 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4197 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4198 );
4199
4200 *cfg_bits = pcfg_rsp->cfg_bits;
4201
4202 return (0);
4203 }
4204
4205 int
ql_iscsi_pdu(qla_host_t * ha,struct mbuf * mp)4206 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4207 {
4208 struct ether_vlan_header *eh;
4209 uint16_t etype;
4210 struct ip *ip = NULL;
4211 struct ip6_hdr *ip6 = NULL;
4212 struct tcphdr *th = NULL;
4213 uint32_t hdrlen;
4214 uint32_t offset;
4215 uint8_t buf[sizeof(struct ip6_hdr)];
4216
4217 eh = mtod(mp, struct ether_vlan_header *);
4218
4219 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4220 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4221 etype = ntohs(eh->evl_proto);
4222 } else {
4223 hdrlen = ETHER_HDR_LEN;
4224 etype = ntohs(eh->evl_encap_proto);
4225 }
4226
4227 if (etype == ETHERTYPE_IP) {
4228 offset = (hdrlen + sizeof (struct ip));
4229
4230 if (mp->m_len >= offset) {
4231 ip = (struct ip *)(mp->m_data + hdrlen);
4232 } else {
4233 m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4234 ip = (struct ip *)buf;
4235 }
4236
4237 if (ip->ip_p == IPPROTO_TCP) {
4238 hdrlen += ip->ip_hl << 2;
4239 offset = hdrlen + 4;
4240
4241 if (mp->m_len >= offset) {
4242 th = (struct tcphdr *)(mp->m_data + hdrlen);
4243 } else {
4244 m_copydata(mp, hdrlen, 4, buf);
4245 th = (struct tcphdr *)buf;
4246 }
4247 }
4248
4249 } else if (etype == ETHERTYPE_IPV6) {
4250 offset = (hdrlen + sizeof (struct ip6_hdr));
4251
4252 if (mp->m_len >= offset) {
4253 ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4254 } else {
4255 m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4256 ip6 = (struct ip6_hdr *)buf;
4257 }
4258
4259 if (ip6->ip6_nxt == IPPROTO_TCP) {
4260 hdrlen += sizeof(struct ip6_hdr);
4261 offset = hdrlen + 4;
4262
4263 if (mp->m_len >= offset) {
4264 th = (struct tcphdr *)(mp->m_data + hdrlen);
4265 } else {
4266 m_copydata(mp, hdrlen, 4, buf);
4267 th = (struct tcphdr *)buf;
4268 }
4269 }
4270 }
4271
4272 if (th != NULL) {
4273 if ((th->th_sport == htons(3260)) ||
4274 (th->th_dport == htons(3260)))
4275 return 0;
4276 }
4277 return (-1);
4278 }
4279
4280 void
qla_hw_async_event(qla_host_t * ha)4281 qla_hw_async_event(qla_host_t *ha)
4282 {
4283 switch (ha->hw.aen_mb0) {
4284 case 0x8101:
4285 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4286 ha->hw.aen_mb3, ha->hw.aen_mb4);
4287
4288 break;
4289
4290 default:
4291 break;
4292 }
4293
4294 return;
4295 }
4296
4297 #ifdef QL_LDFLASH_FW
4298 static int
ql_get_minidump_template(qla_host_t * ha)4299 ql_get_minidump_template(qla_host_t *ha)
4300 {
4301 uint32_t err;
4302 device_t dev = ha->pci_dev;
4303 q80_config_md_templ_cmd_t *md_templ;
4304 q80_config_md_templ_cmd_rsp_t *md_templ_rsp;
4305
4306 md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4307 bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4308
4309 md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4310 md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4311 md_templ->count_version |= Q8_MBX_CMD_VERSION;
4312
4313 md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4314 md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4315
4316 if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4317 (sizeof(q80_config_md_templ_cmd_t) >> 2),
4318 ha->hw.mbox,
4319 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4320 device_printf(dev, "%s: failed\n", __func__);
4321
4322 return (-1);
4323 }
4324
4325 md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4326
4327 err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4328
4329 if (err) {
4330 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4331 return (-1);
4332 }
4333
4334 return (0);
4335
4336 }
4337 #endif /* #ifdef QL_LDFLASH_FW */
4338
4339 /*
4340 * Minidump related functionality
4341 */
4342
4343 static int ql_parse_template(qla_host_t *ha);
4344
4345 static uint32_t ql_rdcrb(qla_host_t *ha,
4346 ql_minidump_entry_rdcrb_t *crb_entry,
4347 uint32_t * data_buff);
4348
4349 static uint32_t ql_pollrd(qla_host_t *ha,
4350 ql_minidump_entry_pollrd_t *entry,
4351 uint32_t * data_buff);
4352
4353 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4354 ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4355 uint32_t *data_buff);
4356
4357 static uint32_t ql_L2Cache(qla_host_t *ha,
4358 ql_minidump_entry_cache_t *cacheEntry,
4359 uint32_t * data_buff);
4360
4361 static uint32_t ql_L1Cache(qla_host_t *ha,
4362 ql_minidump_entry_cache_t *cacheEntry,
4363 uint32_t *data_buff);
4364
4365 static uint32_t ql_rdocm(qla_host_t *ha,
4366 ql_minidump_entry_rdocm_t *ocmEntry,
4367 uint32_t *data_buff);
4368
4369 static uint32_t ql_rdmem(qla_host_t *ha,
4370 ql_minidump_entry_rdmem_t *mem_entry,
4371 uint32_t *data_buff);
4372
4373 static uint32_t ql_rdrom(qla_host_t *ha,
4374 ql_minidump_entry_rdrom_t *romEntry,
4375 uint32_t *data_buff);
4376
4377 static uint32_t ql_rdmux(qla_host_t *ha,
4378 ql_minidump_entry_mux_t *muxEntry,
4379 uint32_t *data_buff);
4380
4381 static uint32_t ql_rdmux2(qla_host_t *ha,
4382 ql_minidump_entry_mux2_t *muxEntry,
4383 uint32_t *data_buff);
4384
4385 static uint32_t ql_rdqueue(qla_host_t *ha,
4386 ql_minidump_entry_queue_t *queueEntry,
4387 uint32_t *data_buff);
4388
4389 static uint32_t ql_cntrl(qla_host_t *ha,
4390 ql_minidump_template_hdr_t *template_hdr,
4391 ql_minidump_entry_cntrl_t *crbEntry);
4392
4393 static uint32_t
ql_minidump_size(qla_host_t * ha)4394 ql_minidump_size(qla_host_t *ha)
4395 {
4396 uint32_t i, k;
4397 uint32_t size = 0;
4398 ql_minidump_template_hdr_t *hdr;
4399
4400 hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4401
4402 i = 0x2;
4403
4404 for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4405 if (i & ha->hw.mdump_capture_mask)
4406 size += hdr->capture_size_array[k];
4407 i = i << 1;
4408 }
4409 return (size);
4410 }
4411
4412 static void
ql_free_minidump_buffer(qla_host_t * ha)4413 ql_free_minidump_buffer(qla_host_t *ha)
4414 {
4415 if (ha->hw.mdump_buffer != NULL) {
4416 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4417 ha->hw.mdump_buffer = NULL;
4418 ha->hw.mdump_buffer_size = 0;
4419 }
4420 return;
4421 }
4422
4423 static int
ql_alloc_minidump_buffer(qla_host_t * ha)4424 ql_alloc_minidump_buffer(qla_host_t *ha)
4425 {
4426 ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4427
4428 if (!ha->hw.mdump_buffer_size)
4429 return (-1);
4430
4431 ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4432 M_NOWAIT);
4433
4434 if (ha->hw.mdump_buffer == NULL)
4435 return (-1);
4436
4437 return (0);
4438 }
4439
4440 static void
ql_free_minidump_template_buffer(qla_host_t * ha)4441 ql_free_minidump_template_buffer(qla_host_t *ha)
4442 {
4443 if (ha->hw.mdump_template != NULL) {
4444 free(ha->hw.mdump_template, M_QLA83XXBUF);
4445 ha->hw.mdump_template = NULL;
4446 ha->hw.mdump_template_size = 0;
4447 }
4448 return;
4449 }
4450
4451 static int
ql_alloc_minidump_template_buffer(qla_host_t * ha)4452 ql_alloc_minidump_template_buffer(qla_host_t *ha)
4453 {
4454 ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4455
4456 ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4457 M_QLA83XXBUF, M_NOWAIT);
4458
4459 if (ha->hw.mdump_template == NULL)
4460 return (-1);
4461
4462 return (0);
4463 }
4464
4465 static int
ql_alloc_minidump_buffers(qla_host_t * ha)4466 ql_alloc_minidump_buffers(qla_host_t *ha)
4467 {
4468 int ret;
4469
4470 ret = ql_alloc_minidump_template_buffer(ha);
4471
4472 if (ret)
4473 return (ret);
4474
4475 ret = ql_alloc_minidump_buffer(ha);
4476
4477 if (ret)
4478 ql_free_minidump_template_buffer(ha);
4479
4480 return (ret);
4481 }
4482
4483 static uint32_t
ql_validate_minidump_checksum(qla_host_t * ha)4484 ql_validate_minidump_checksum(qla_host_t *ha)
4485 {
4486 uint64_t sum = 0;
4487 int count;
4488 uint32_t *template_buff;
4489
4490 count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4491 template_buff = ha->hw.dma_buf.minidump.dma_b;
4492
4493 while (count-- > 0) {
4494 sum += *template_buff++;
4495 }
4496
4497 while (sum >> 32) {
4498 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4499 }
4500
4501 return (~sum);
4502 }
4503
4504 int
ql_minidump_init(qla_host_t * ha)4505 ql_minidump_init(qla_host_t *ha)
4506 {
4507 int ret = 0;
4508 uint32_t template_size = 0;
4509 device_t dev = ha->pci_dev;
4510
4511 /*
4512 * Get Minidump Template Size
4513 */
4514 ret = qla_get_minidump_tmplt_size(ha, &template_size);
4515
4516 if (ret || (template_size == 0)) {
4517 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4518 template_size);
4519 return (-1);
4520 }
4521
4522 /*
4523 * Allocate Memory for Minidump Template
4524 */
4525
4526 ha->hw.dma_buf.minidump.alignment = 8;
4527 ha->hw.dma_buf.minidump.size = template_size;
4528
4529 #ifdef QL_LDFLASH_FW
4530 if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4531 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4532
4533 return (-1);
4534 }
4535 ha->hw.dma_buf.flags.minidump = 1;
4536
4537 /*
4538 * Retrieve Minidump Template
4539 */
4540 ret = ql_get_minidump_template(ha);
4541 #else
4542 ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4543
4544 #endif /* #ifdef QL_LDFLASH_FW */
4545
4546 if (ret == 0) {
4547 ret = ql_validate_minidump_checksum(ha);
4548
4549 if (ret == 0) {
4550 ret = ql_alloc_minidump_buffers(ha);
4551
4552 if (ret == 0)
4553 ha->hw.mdump_init = 1;
4554 else
4555 device_printf(dev,
4556 "%s: ql_alloc_minidump_buffers"
4557 " failed\n", __func__);
4558 } else {
4559 device_printf(dev, "%s: ql_validate_minidump_checksum"
4560 " failed\n", __func__);
4561 }
4562 } else {
4563 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4564 __func__);
4565 }
4566
4567 if (ret)
4568 ql_minidump_free(ha);
4569
4570 return (ret);
4571 }
4572
4573 static void
ql_minidump_free(qla_host_t * ha)4574 ql_minidump_free(qla_host_t *ha)
4575 {
4576 ha->hw.mdump_init = 0;
4577 if (ha->hw.dma_buf.flags.minidump) {
4578 ha->hw.dma_buf.flags.minidump = 0;
4579 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4580 }
4581
4582 ql_free_minidump_template_buffer(ha);
4583 ql_free_minidump_buffer(ha);
4584
4585 return;
4586 }
4587
4588 void
ql_minidump(qla_host_t * ha)4589 ql_minidump(qla_host_t *ha)
4590 {
4591 if (!ha->hw.mdump_init)
4592 return;
4593
4594 if (ha->hw.mdump_done)
4595 return;
4596 ha->hw.mdump_usec_ts = qla_get_usec_timestamp();
4597 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4598
4599 bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4600 bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4601
4602 bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4603 ha->hw.mdump_template_size);
4604
4605 ql_parse_template(ha);
4606
4607 ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4608
4609 ha->hw.mdump_done = 1;
4610
4611 return;
4612 }
4613
4614 /*
4615 * helper routines
4616 */
4617 static void
ql_entry_err_chk(ql_minidump_entry_t * entry,uint32_t esize)4618 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4619 {
4620 if (esize != entry->hdr.entry_capture_size) {
4621 entry->hdr.entry_capture_size = esize;
4622 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4623 }
4624 return;
4625 }
4626
4627 static int
ql_parse_template(qla_host_t * ha)4628 ql_parse_template(qla_host_t *ha)
4629 {
4630 uint32_t num_of_entries, buff_level, e_cnt, esize;
4631 uint32_t rv = 0;
4632 char *dump_buff, *dbuff;
4633 int sane_start = 0, sane_end = 0;
4634 ql_minidump_template_hdr_t *template_hdr;
4635 ql_minidump_entry_t *entry;
4636 uint32_t capture_mask;
4637 uint32_t dump_size;
4638
4639 /* Setup parameters */
4640 template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4641
4642 if (template_hdr->entry_type == TLHDR)
4643 sane_start = 1;
4644
4645 dump_buff = (char *) ha->hw.mdump_buffer;
4646
4647 num_of_entries = template_hdr->num_of_entries;
4648
4649 entry = (ql_minidump_entry_t *) ((char *)template_hdr
4650 + template_hdr->first_entry_offset );
4651
4652 template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4653 template_hdr->ocm_window_array[ha->pci_func];
4654 template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4655
4656 capture_mask = ha->hw.mdump_capture_mask;
4657 dump_size = ha->hw.mdump_buffer_size;
4658
4659 template_hdr->driver_capture_mask = capture_mask;
4660
4661 QL_DPRINT80(ha, (ha->pci_dev,
4662 "%s: sane_start = %d num_of_entries = %d "
4663 "capture_mask = 0x%x dump_size = %d \n",
4664 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4665
4666 for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4667 /*
4668 * If the capture_mask of the entry does not match capture mask
4669 * skip the entry after marking the driver_flags indicator.
4670 */
4671
4672 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4673 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4674 entry = (ql_minidump_entry_t *) ((char *) entry
4675 + entry->hdr.entry_size);
4676 continue;
4677 }
4678
4679 /*
4680 * This is ONLY needed in implementations where
4681 * the capture buffer allocated is too small to capture
4682 * all of the required entries for a given capture mask.
4683 * We need to empty the buffer contents to a file
4684 * if possible, before processing the next entry
4685 * If the buff_full_flag is set, no further capture will happen
4686 * and all remaining non-control entries will be skipped.
4687 */
4688 if (entry->hdr.entry_capture_size != 0) {
4689 if ((buff_level + entry->hdr.entry_capture_size) >
4690 dump_size) {
4691 /* Try to recover by emptying buffer to file */
4692 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4693 entry = (ql_minidump_entry_t *) ((char *) entry
4694 + entry->hdr.entry_size);
4695 continue;
4696 }
4697 }
4698
4699 /*
4700 * Decode the entry type and process it accordingly
4701 */
4702
4703 switch (entry->hdr.entry_type) {
4704 case RDNOP:
4705 break;
4706
4707 case RDEND:
4708 sane_end++;
4709 break;
4710
4711 case RDCRB:
4712 dbuff = dump_buff + buff_level;
4713 esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4714 ql_entry_err_chk(entry, esize);
4715 buff_level += esize;
4716 break;
4717
4718 case POLLRD:
4719 dbuff = dump_buff + buff_level;
4720 esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4721 ql_entry_err_chk(entry, esize);
4722 buff_level += esize;
4723 break;
4724
4725 case POLLRDMWR:
4726 dbuff = dump_buff + buff_level;
4727 esize = ql_pollrd_modify_write(ha, (void *)entry,
4728 (void *)dbuff);
4729 ql_entry_err_chk(entry, esize);
4730 buff_level += esize;
4731 break;
4732
4733 case L2ITG:
4734 case L2DTG:
4735 case L2DAT:
4736 case L2INS:
4737 dbuff = dump_buff + buff_level;
4738 esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4739 if (esize == -1) {
4740 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4741 } else {
4742 ql_entry_err_chk(entry, esize);
4743 buff_level += esize;
4744 }
4745 break;
4746
4747 case L1DAT:
4748 case L1INS:
4749 dbuff = dump_buff + buff_level;
4750 esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4751 ql_entry_err_chk(entry, esize);
4752 buff_level += esize;
4753 break;
4754
4755 case RDOCM:
4756 dbuff = dump_buff + buff_level;
4757 esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4758 ql_entry_err_chk(entry, esize);
4759 buff_level += esize;
4760 break;
4761
4762 case RDMEM:
4763 dbuff = dump_buff + buff_level;
4764 esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4765 ql_entry_err_chk(entry, esize);
4766 buff_level += esize;
4767 break;
4768
4769 case BOARD:
4770 case RDROM:
4771 dbuff = dump_buff + buff_level;
4772 esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4773 ql_entry_err_chk(entry, esize);
4774 buff_level += esize;
4775 break;
4776
4777 case RDMUX:
4778 dbuff = dump_buff + buff_level;
4779 esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4780 ql_entry_err_chk(entry, esize);
4781 buff_level += esize;
4782 break;
4783
4784 case RDMUX2:
4785 dbuff = dump_buff + buff_level;
4786 esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4787 ql_entry_err_chk(entry, esize);
4788 buff_level += esize;
4789 break;
4790
4791 case QUEUE:
4792 dbuff = dump_buff + buff_level;
4793 esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4794 ql_entry_err_chk(entry, esize);
4795 buff_level += esize;
4796 break;
4797
4798 case CNTRL:
4799 if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4800 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4801 }
4802 break;
4803 default:
4804 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4805 break;
4806 }
4807 /* next entry in the template */
4808 entry = (ql_minidump_entry_t *) ((char *) entry
4809 + entry->hdr.entry_size);
4810 }
4811
4812 if (!sane_start || (sane_end > 1)) {
4813 device_printf(ha->pci_dev,
4814 "\n%s: Template configuration error. Check Template\n",
4815 __func__);
4816 }
4817
4818 QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4819 __func__, template_hdr->num_of_entries));
4820
4821 return 0;
4822 }
4823
4824 /*
4825 * Read CRB operation.
4826 */
4827 static uint32_t
ql_rdcrb(qla_host_t * ha,ql_minidump_entry_rdcrb_t * crb_entry,uint32_t * data_buff)4828 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4829 uint32_t * data_buff)
4830 {
4831 int loop_cnt;
4832 int ret;
4833 uint32_t op_count, addr, stride, value = 0;
4834
4835 addr = crb_entry->addr;
4836 op_count = crb_entry->op_count;
4837 stride = crb_entry->addr_stride;
4838
4839 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4840 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4841
4842 if (ret)
4843 return (0);
4844
4845 *data_buff++ = addr;
4846 *data_buff++ = value;
4847 addr = addr + stride;
4848 }
4849
4850 /*
4851 * for testing purpose we return amount of data written
4852 */
4853 return (op_count * (2 * sizeof(uint32_t)));
4854 }
4855
4856 /*
4857 * Handle L2 Cache.
4858 */
4859
4860 static uint32_t
ql_L2Cache(qla_host_t * ha,ql_minidump_entry_cache_t * cacheEntry,uint32_t * data_buff)4861 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4862 uint32_t * data_buff)
4863 {
4864 int i, k;
4865 int loop_cnt;
4866 int ret;
4867
4868 uint32_t read_value;
4869 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4870 uint32_t tag_value, read_cnt;
4871 volatile uint8_t cntl_value_r;
4872 long timeout;
4873 uint32_t data;
4874
4875 loop_cnt = cacheEntry->op_count;
4876
4877 read_addr = cacheEntry->read_addr;
4878 cntrl_addr = cacheEntry->control_addr;
4879 cntl_value_w = (uint32_t) cacheEntry->write_value;
4880
4881 tag_reg_addr = cacheEntry->tag_reg_addr;
4882
4883 tag_value = cacheEntry->init_tag_value;
4884 read_cnt = cacheEntry->read_addr_cnt;
4885
4886 for (i = 0; i < loop_cnt; i++) {
4887 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4888 if (ret)
4889 return (0);
4890
4891 if (cacheEntry->write_value != 0) {
4892
4893 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4894 &cntl_value_w, 0);
4895 if (ret)
4896 return (0);
4897 }
4898
4899 if (cacheEntry->poll_mask != 0) {
4900
4901 timeout = cacheEntry->poll_wait;
4902
4903 ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4904 if (ret)
4905 return (0);
4906
4907 cntl_value_r = (uint8_t)data;
4908
4909 while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4910 if (timeout) {
4911 qla_mdelay(__func__, 1);
4912 timeout--;
4913 } else
4914 break;
4915
4916 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4917 &data, 1);
4918 if (ret)
4919 return (0);
4920
4921 cntl_value_r = (uint8_t)data;
4922 }
4923 if (!timeout) {
4924 /* Report timeout error.
4925 * core dump capture failed
4926 * Skip remaining entries.
4927 * Write buffer out to file
4928 * Use driver specific fields in template header
4929 * to report this error.
4930 */
4931 return (-1);
4932 }
4933 }
4934
4935 addr = read_addr;
4936 for (k = 0; k < read_cnt; k++) {
4937 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4938 if (ret)
4939 return (0);
4940
4941 *data_buff++ = read_value;
4942 addr += cacheEntry->read_addr_stride;
4943 }
4944
4945 tag_value += cacheEntry->tag_value_stride;
4946 }
4947
4948 return (read_cnt * loop_cnt * sizeof(uint32_t));
4949 }
4950
4951 /*
4952 * Handle L1 Cache.
4953 */
4954
4955 static uint32_t
ql_L1Cache(qla_host_t * ha,ql_minidump_entry_cache_t * cacheEntry,uint32_t * data_buff)4956 ql_L1Cache(qla_host_t *ha,
4957 ql_minidump_entry_cache_t *cacheEntry,
4958 uint32_t *data_buff)
4959 {
4960 int ret;
4961 int i, k;
4962 int loop_cnt;
4963
4964 uint32_t read_value;
4965 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4966 uint32_t tag_value, read_cnt;
4967 uint32_t cntl_value_w;
4968
4969 loop_cnt = cacheEntry->op_count;
4970
4971 read_addr = cacheEntry->read_addr;
4972 cntrl_addr = cacheEntry->control_addr;
4973 cntl_value_w = (uint32_t) cacheEntry->write_value;
4974
4975 tag_reg_addr = cacheEntry->tag_reg_addr;
4976
4977 tag_value = cacheEntry->init_tag_value;
4978 read_cnt = cacheEntry->read_addr_cnt;
4979
4980 for (i = 0; i < loop_cnt; i++) {
4981 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4982 if (ret)
4983 return (0);
4984
4985 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4986 if (ret)
4987 return (0);
4988
4989 addr = read_addr;
4990 for (k = 0; k < read_cnt; k++) {
4991 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4992 if (ret)
4993 return (0);
4994
4995 *data_buff++ = read_value;
4996 addr += cacheEntry->read_addr_stride;
4997 }
4998
4999 tag_value += cacheEntry->tag_value_stride;
5000 }
5001
5002 return (read_cnt * loop_cnt * sizeof(uint32_t));
5003 }
5004
5005 /*
5006 * Reading OCM memory
5007 */
5008
5009 static uint32_t
ql_rdocm(qla_host_t * ha,ql_minidump_entry_rdocm_t * ocmEntry,uint32_t * data_buff)5010 ql_rdocm(qla_host_t *ha,
5011 ql_minidump_entry_rdocm_t *ocmEntry,
5012 uint32_t *data_buff)
5013 {
5014 int i, loop_cnt;
5015 volatile uint32_t addr;
5016 volatile uint32_t value;
5017
5018 addr = ocmEntry->read_addr;
5019 loop_cnt = ocmEntry->op_count;
5020
5021 for (i = 0; i < loop_cnt; i++) {
5022 value = READ_REG32(ha, addr);
5023 *data_buff++ = value;
5024 addr += ocmEntry->read_addr_stride;
5025 }
5026 return (loop_cnt * sizeof(value));
5027 }
5028
5029 /*
5030 * Read memory
5031 */
5032
5033 static uint32_t
ql_rdmem(qla_host_t * ha,ql_minidump_entry_rdmem_t * mem_entry,uint32_t * data_buff)5034 ql_rdmem(qla_host_t *ha,
5035 ql_minidump_entry_rdmem_t *mem_entry,
5036 uint32_t *data_buff)
5037 {
5038 int ret;
5039 int i, loop_cnt;
5040 volatile uint32_t addr;
5041 q80_offchip_mem_val_t val;
5042
5043 addr = mem_entry->read_addr;
5044
5045 /* size in bytes / 16 */
5046 loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
5047
5048 for (i = 0; i < loop_cnt; i++) {
5049 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
5050 if (ret)
5051 return (0);
5052
5053 *data_buff++ = val.data_lo;
5054 *data_buff++ = val.data_hi;
5055 *data_buff++ = val.data_ulo;
5056 *data_buff++ = val.data_uhi;
5057
5058 addr += (sizeof(uint32_t) * 4);
5059 }
5060
5061 return (loop_cnt * (sizeof(uint32_t) * 4));
5062 }
5063
5064 /*
5065 * Read Rom
5066 */
5067
5068 static uint32_t
ql_rdrom(qla_host_t * ha,ql_minidump_entry_rdrom_t * romEntry,uint32_t * data_buff)5069 ql_rdrom(qla_host_t *ha,
5070 ql_minidump_entry_rdrom_t *romEntry,
5071 uint32_t *data_buff)
5072 {
5073 int ret;
5074 int i, loop_cnt;
5075 uint32_t addr;
5076 uint32_t value;
5077
5078 addr = romEntry->read_addr;
5079 loop_cnt = romEntry->read_data_size; /* This is size in bytes */
5080 loop_cnt /= sizeof(value);
5081
5082 for (i = 0; i < loop_cnt; i++) {
5083 ret = ql_rd_flash32(ha, addr, &value);
5084 if (ret)
5085 return (0);
5086
5087 *data_buff++ = value;
5088 addr += sizeof(value);
5089 }
5090
5091 return (loop_cnt * sizeof(value));
5092 }
5093
5094 /*
5095 * Read MUX data
5096 */
5097
5098 static uint32_t
ql_rdmux(qla_host_t * ha,ql_minidump_entry_mux_t * muxEntry,uint32_t * data_buff)5099 ql_rdmux(qla_host_t *ha,
5100 ql_minidump_entry_mux_t *muxEntry,
5101 uint32_t *data_buff)
5102 {
5103 int ret;
5104 int loop_cnt;
5105 uint32_t read_value, sel_value;
5106 uint32_t read_addr, select_addr;
5107
5108 select_addr = muxEntry->select_addr;
5109 sel_value = muxEntry->select_value;
5110 read_addr = muxEntry->read_addr;
5111
5112 for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
5113 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
5114 if (ret)
5115 return (0);
5116
5117 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5118 if (ret)
5119 return (0);
5120
5121 *data_buff++ = sel_value;
5122 *data_buff++ = read_value;
5123
5124 sel_value += muxEntry->select_value_stride;
5125 }
5126
5127 return (loop_cnt * (2 * sizeof(uint32_t)));
5128 }
5129
5130 static uint32_t
ql_rdmux2(qla_host_t * ha,ql_minidump_entry_mux2_t * muxEntry,uint32_t * data_buff)5131 ql_rdmux2(qla_host_t *ha,
5132 ql_minidump_entry_mux2_t *muxEntry,
5133 uint32_t *data_buff)
5134 {
5135 int ret;
5136 int loop_cnt;
5137
5138 uint32_t select_addr_1, select_addr_2;
5139 uint32_t select_value_1, select_value_2;
5140 uint32_t select_value_count, select_value_mask;
5141 uint32_t read_addr, read_value;
5142
5143 select_addr_1 = muxEntry->select_addr_1;
5144 select_addr_2 = muxEntry->select_addr_2;
5145 select_value_1 = muxEntry->select_value_1;
5146 select_value_2 = muxEntry->select_value_2;
5147 select_value_count = muxEntry->select_value_count;
5148 select_value_mask = muxEntry->select_value_mask;
5149
5150 read_addr = muxEntry->read_addr;
5151
5152 for (loop_cnt = 0; loop_cnt < select_value_count; loop_cnt++) {
5153 uint32_t temp_sel_val;
5154
5155 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
5156 if (ret)
5157 return (0);
5158
5159 temp_sel_val = select_value_1 & select_value_mask;
5160
5161 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5162 if (ret)
5163 return (0);
5164
5165 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5166 if (ret)
5167 return (0);
5168
5169 *data_buff++ = temp_sel_val;
5170 *data_buff++ = read_value;
5171
5172 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5173 if (ret)
5174 return (0);
5175
5176 temp_sel_val = select_value_2 & select_value_mask;
5177
5178 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5179 if (ret)
5180 return (0);
5181
5182 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5183 if (ret)
5184 return (0);
5185
5186 *data_buff++ = temp_sel_val;
5187 *data_buff++ = read_value;
5188
5189 select_value_1 += muxEntry->select_value_stride;
5190 select_value_2 += muxEntry->select_value_stride;
5191 }
5192
5193 return (loop_cnt * (4 * sizeof(uint32_t)));
5194 }
5195
5196 /*
5197 * Handling Queue State Reads.
5198 */
5199
5200 static uint32_t
ql_rdqueue(qla_host_t * ha,ql_minidump_entry_queue_t * queueEntry,uint32_t * data_buff)5201 ql_rdqueue(qla_host_t *ha,
5202 ql_minidump_entry_queue_t *queueEntry,
5203 uint32_t *data_buff)
5204 {
5205 int ret;
5206 int loop_cnt, k;
5207 uint32_t read_value;
5208 uint32_t read_addr, read_stride, select_addr;
5209 uint32_t queue_id, read_cnt;
5210
5211 read_cnt = queueEntry->read_addr_cnt;
5212 read_stride = queueEntry->read_addr_stride;
5213 select_addr = queueEntry->select_addr;
5214
5215 for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5216 loop_cnt++) {
5217 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5218 if (ret)
5219 return (0);
5220
5221 read_addr = queueEntry->read_addr;
5222
5223 for (k = 0; k < read_cnt; k++) {
5224 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5225 if (ret)
5226 return (0);
5227
5228 *data_buff++ = read_value;
5229 read_addr += read_stride;
5230 }
5231
5232 queue_id += queueEntry->queue_id_stride;
5233 }
5234
5235 return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5236 }
5237
5238 /*
5239 * Handling control entries.
5240 */
5241
5242 static uint32_t
ql_cntrl(qla_host_t * ha,ql_minidump_template_hdr_t * template_hdr,ql_minidump_entry_cntrl_t * crbEntry)5243 ql_cntrl(qla_host_t *ha,
5244 ql_minidump_template_hdr_t *template_hdr,
5245 ql_minidump_entry_cntrl_t *crbEntry)
5246 {
5247 int ret;
5248 int count;
5249 uint32_t opcode, read_value, addr, entry_addr;
5250 long timeout;
5251
5252 entry_addr = crbEntry->addr;
5253
5254 for (count = 0; count < crbEntry->op_count; count++) {
5255 opcode = crbEntry->opcode;
5256
5257 if (opcode & QL_DBG_OPCODE_WR) {
5258 ret = ql_rdwr_indreg32(ha, entry_addr,
5259 &crbEntry->value_1, 0);
5260 if (ret)
5261 return (0);
5262
5263 opcode &= ~QL_DBG_OPCODE_WR;
5264 }
5265
5266 if (opcode & QL_DBG_OPCODE_RW) {
5267 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5268 if (ret)
5269 return (0);
5270
5271 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5272 if (ret)
5273 return (0);
5274
5275 opcode &= ~QL_DBG_OPCODE_RW;
5276 }
5277
5278 if (opcode & QL_DBG_OPCODE_AND) {
5279 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5280 if (ret)
5281 return (0);
5282
5283 read_value &= crbEntry->value_2;
5284 opcode &= ~QL_DBG_OPCODE_AND;
5285
5286 if (opcode & QL_DBG_OPCODE_OR) {
5287 read_value |= crbEntry->value_3;
5288 opcode &= ~QL_DBG_OPCODE_OR;
5289 }
5290
5291 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5292 if (ret)
5293 return (0);
5294 }
5295
5296 if (opcode & QL_DBG_OPCODE_OR) {
5297 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5298 if (ret)
5299 return (0);
5300
5301 read_value |= crbEntry->value_3;
5302
5303 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5304 if (ret)
5305 return (0);
5306
5307 opcode &= ~QL_DBG_OPCODE_OR;
5308 }
5309
5310 if (opcode & QL_DBG_OPCODE_POLL) {
5311 opcode &= ~QL_DBG_OPCODE_POLL;
5312 timeout = crbEntry->poll_timeout;
5313 addr = entry_addr;
5314
5315 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5316 if (ret)
5317 return (0);
5318
5319 while ((read_value & crbEntry->value_2)
5320 != crbEntry->value_1) {
5321 if (timeout) {
5322 qla_mdelay(__func__, 1);
5323 timeout--;
5324 } else
5325 break;
5326
5327 ret = ql_rdwr_indreg32(ha, addr,
5328 &read_value, 1);
5329 if (ret)
5330 return (0);
5331 }
5332
5333 if (!timeout) {
5334 /*
5335 * Report timeout error.
5336 * core dump capture failed
5337 * Skip remaining entries.
5338 * Write buffer out to file
5339 * Use driver specific fields in template header
5340 * to report this error.
5341 */
5342 return (-1);
5343 }
5344 }
5345
5346 if (opcode & QL_DBG_OPCODE_RDSTATE) {
5347 /*
5348 * decide which address to use.
5349 */
5350 if (crbEntry->state_index_a) {
5351 addr = template_hdr->saved_state_array[
5352 crbEntry-> state_index_a];
5353 } else {
5354 addr = entry_addr;
5355 }
5356
5357 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5358 if (ret)
5359 return (0);
5360
5361 template_hdr->saved_state_array[crbEntry->state_index_v]
5362 = read_value;
5363 opcode &= ~QL_DBG_OPCODE_RDSTATE;
5364 }
5365
5366 if (opcode & QL_DBG_OPCODE_WRSTATE) {
5367 /*
5368 * decide which value to use.
5369 */
5370 if (crbEntry->state_index_v) {
5371 read_value = template_hdr->saved_state_array[
5372 crbEntry->state_index_v];
5373 } else {
5374 read_value = crbEntry->value_1;
5375 }
5376 /*
5377 * decide which address to use.
5378 */
5379 if (crbEntry->state_index_a) {
5380 addr = template_hdr->saved_state_array[
5381 crbEntry-> state_index_a];
5382 } else {
5383 addr = entry_addr;
5384 }
5385
5386 ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5387 if (ret)
5388 return (0);
5389
5390 opcode &= ~QL_DBG_OPCODE_WRSTATE;
5391 }
5392
5393 if (opcode & QL_DBG_OPCODE_MDSTATE) {
5394 /* Read value from saved state using index */
5395 read_value = template_hdr->saved_state_array[
5396 crbEntry->state_index_v];
5397
5398 read_value <<= crbEntry->shl; /*Shift left operation */
5399 read_value >>= crbEntry->shr; /*Shift right operation */
5400
5401 if (crbEntry->value_2) {
5402 /* check if AND mask is provided */
5403 read_value &= crbEntry->value_2;
5404 }
5405
5406 read_value |= crbEntry->value_3; /* OR operation */
5407 read_value += crbEntry->value_1; /* increment op */
5408
5409 /* Write value back to state area. */
5410
5411 template_hdr->saved_state_array[crbEntry->state_index_v]
5412 = read_value;
5413 opcode &= ~QL_DBG_OPCODE_MDSTATE;
5414 }
5415
5416 entry_addr += crbEntry->addr_stride;
5417 }
5418
5419 return (0);
5420 }
5421
5422 /*
5423 * Handling rd poll entry.
5424 */
5425
5426 static uint32_t
ql_pollrd(qla_host_t * ha,ql_minidump_entry_pollrd_t * entry,uint32_t * data_buff)5427 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5428 uint32_t *data_buff)
5429 {
5430 int ret;
5431 int loop_cnt;
5432 uint32_t op_count, select_addr, select_value_stride, select_value;
5433 uint32_t read_addr, poll, mask, data;
5434 uint32_t wait_count = 0;
5435
5436 select_addr = entry->select_addr;
5437 read_addr = entry->read_addr;
5438 select_value = entry->select_value;
5439 select_value_stride = entry->select_value_stride;
5440 op_count = entry->op_count;
5441 poll = entry->poll;
5442 mask = entry->mask;
5443
5444 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5445 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5446 if (ret)
5447 return (0);
5448
5449 wait_count = 0;
5450
5451 while (wait_count < poll) {
5452 uint32_t temp;
5453
5454 ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5455 if (ret)
5456 return (0);
5457
5458 if ( (temp & mask) != 0 ) {
5459 break;
5460 }
5461 wait_count++;
5462 }
5463
5464 if (wait_count == poll) {
5465 device_printf(ha->pci_dev,
5466 "%s: Error in processing entry\n", __func__);
5467 device_printf(ha->pci_dev,
5468 "%s: wait_count <0x%x> poll <0x%x>\n",
5469 __func__, wait_count, poll);
5470 return 0;
5471 }
5472
5473 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5474 if (ret)
5475 return (0);
5476
5477 *data_buff++ = select_value;
5478 *data_buff++ = data;
5479 select_value = select_value + select_value_stride;
5480 }
5481
5482 /*
5483 * for testing purpose we return amount of data written
5484 */
5485 return (loop_cnt * (2 * sizeof(uint32_t)));
5486 }
5487
5488 /*
5489 * Handling rd modify write poll entry.
5490 */
5491
5492 static uint32_t
ql_pollrd_modify_write(qla_host_t * ha,ql_minidump_entry_rd_modify_wr_with_poll_t * entry,uint32_t * data_buff)5493 ql_pollrd_modify_write(qla_host_t *ha,
5494 ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5495 uint32_t *data_buff)
5496 {
5497 int ret;
5498 uint32_t addr_1, addr_2, value_1, value_2, data;
5499 uint32_t poll, mask, modify_mask;
5500 uint32_t wait_count = 0;
5501
5502 addr_1 = entry->addr_1;
5503 addr_2 = entry->addr_2;
5504 value_1 = entry->value_1;
5505 value_2 = entry->value_2;
5506
5507 poll = entry->poll;
5508 mask = entry->mask;
5509 modify_mask = entry->modify_mask;
5510
5511 ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5512 if (ret)
5513 return (0);
5514
5515 wait_count = 0;
5516 while (wait_count < poll) {
5517 uint32_t temp;
5518
5519 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5520 if (ret)
5521 return (0);
5522
5523 if ( (temp & mask) != 0 ) {
5524 break;
5525 }
5526 wait_count++;
5527 }
5528
5529 if (wait_count == poll) {
5530 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5531 __func__);
5532 } else {
5533 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5534 if (ret)
5535 return (0);
5536
5537 data = (data & modify_mask);
5538
5539 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5540 if (ret)
5541 return (0);
5542
5543 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5544 if (ret)
5545 return (0);
5546
5547 /* Poll again */
5548 wait_count = 0;
5549 while (wait_count < poll) {
5550 uint32_t temp;
5551
5552 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5553 if (ret)
5554 return (0);
5555
5556 if ( (temp & mask) != 0 ) {
5557 break;
5558 }
5559 wait_count++;
5560 }
5561 *data_buff++ = addr_2;
5562 *data_buff++ = data;
5563 }
5564
5565 /*
5566 * for testing purpose we return amount of data written
5567 */
5568 return (2 * sizeof(uint32_t));
5569 }
5570