xref: /freebsd/sys/dev/qlxgbe/ql_hw.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: ql_hw.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  * Content: Contains Hardware dependent functions
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "ql_os.h"
40 #include "ql_hw.h"
41 #include "ql_def.h"
42 #include "ql_inline.h"
43 #include "ql_ver.h"
44 #include "ql_glbl.h"
45 #include "ql_dbg.h"
46 #include "ql_minidump.h"
47 
48 /*
49  * Static Functions
50  */
51 
52 static void qla_del_rcv_cntxt(qla_host_t *ha);
53 static int qla_init_rcv_cntxt(qla_host_t *ha);
54 static int qla_del_xmt_cntxt(qla_host_t *ha);
55 static int qla_init_xmt_cntxt(qla_host_t *ha);
56 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
57 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
58 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
59 	uint32_t num_intrs, uint32_t create);
60 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
61 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
62 	int tenable, int rcv);
63 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
64 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
65 
66 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
67 		uint8_t *hdr);
68 static int qla_hw_add_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
70 
71 static int qla_init_nic_func(qla_host_t *ha);
72 static int qla_stop_nic_func(qla_host_t *ha);
73 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
74 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
75 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
76 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
77 static int qla_get_cam_search_mode(qla_host_t *ha);
78 
79 static void ql_minidump_free(qla_host_t *ha);
80 
81 #ifdef QL_DBG
82 
83 static void
84 qla_stop_pegs(qla_host_t *ha)
85 {
86         uint32_t val = 1;
87 
88         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
89         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
90         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
91         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
92         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
93         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
94 }
95 
96 static int
97 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
98 {
99 	int err, ret = 0;
100 	qla_host_t *ha;
101 
102 	err = sysctl_handle_int(oidp, &ret, 0, req);
103 
104 	if (err || !req->newptr)
105 		return (err);
106 
107 	if (ret == 1) {
108 		ha = (qla_host_t *)arg1;
109 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
110 			qla_stop_pegs(ha);
111 			QLA_UNLOCK(ha, __func__);
112 		}
113 	}
114 
115 	return err;
116 }
117 #endif /* #ifdef QL_DBG */
118 
119 static int
120 qla_validate_set_port_cfg_bit(uint32_t bits)
121 {
122         if ((bits & 0xF) > 1)
123                 return (-1);
124 
125         if (((bits >> 4) & 0xF) > 2)
126                 return (-1);
127 
128         if (((bits >> 8) & 0xF) > 2)
129                 return (-1);
130 
131         return (0);
132 }
133 
134 static int
135 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
136 {
137         int err, ret = 0;
138         qla_host_t *ha;
139         uint32_t cfg_bits;
140 
141         err = sysctl_handle_int(oidp, &ret, 0, req);
142 
143         if (err || !req->newptr)
144                 return (err);
145 
146 	ha = (qla_host_t *)arg1;
147 
148         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
149                 err = qla_get_port_config(ha, &cfg_bits);
150 
151                 if (err)
152                         goto qla_sysctl_set_port_cfg_exit;
153 
154                 if (ret & 0x1) {
155                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
156                 } else {
157                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
158                 }
159 
160                 ret = ret >> 4;
161                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
162 
163                 if ((ret & 0xF) == 0) {
164                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
165                 } else if ((ret & 0xF) == 1){
166                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
167                 } else {
168                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
169                 }
170 
171                 ret = ret >> 4;
172                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
173 
174                 if (ret == 0) {
175                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
176                 } else if (ret == 1){
177                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
178                 } else {
179                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
180                 }
181 
182 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
183                 	err = qla_set_port_config(ha, cfg_bits);
184 			QLA_UNLOCK(ha, __func__);
185 		} else {
186 			device_printf(ha->pci_dev, "%s: failed\n", __func__);
187 		}
188         } else {
189 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
190                 	err = qla_get_port_config(ha, &cfg_bits);
191 			QLA_UNLOCK(ha, __func__);
192 		} else {
193 			device_printf(ha->pci_dev, "%s: failed\n", __func__);
194 		}
195         }
196 
197 qla_sysctl_set_port_cfg_exit:
198         return err;
199 }
200 
201 static int
202 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
203 {
204 	int err, ret = 0;
205 	qla_host_t *ha;
206 
207 	err = sysctl_handle_int(oidp, &ret, 0, req);
208 
209 	if (err || !req->newptr)
210 		return (err);
211 
212 	ha = (qla_host_t *)arg1;
213 
214 	if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
215 		(ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
216 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
217 			err = qla_set_cam_search_mode(ha, (uint32_t)ret);
218 			QLA_UNLOCK(ha, __func__);
219 		} else {
220 			device_printf(ha->pci_dev, "%s: failed\n", __func__);
221 		}
222 
223 	} else {
224 		device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
225 	}
226 
227 	return (err);
228 }
229 
230 static int
231 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
232 {
233 	int err, ret = 0;
234 	qla_host_t *ha;
235 
236 	err = sysctl_handle_int(oidp, &ret, 0, req);
237 
238 	if (err || !req->newptr)
239 		return (err);
240 
241 	ha = (qla_host_t *)arg1;
242 	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
243 		err = qla_get_cam_search_mode(ha);
244 		QLA_UNLOCK(ha, __func__);
245 	} else {
246 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
247 	}
248 
249 	return (err);
250 }
251 
252 static void
253 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
254 {
255         struct sysctl_ctx_list  *ctx;
256         struct sysctl_oid_list  *children;
257         struct sysctl_oid       *ctx_oid;
258 
259         ctx = device_get_sysctl_ctx(ha->pci_dev);
260         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
261 
262         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
263 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_mac");
264         children = SYSCTL_CHILDREN(ctx_oid);
265 
266         SYSCTL_ADD_QUAD(ctx, children,
267                 OID_AUTO, "xmt_frames",
268                 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
269                 "xmt_frames");
270 
271         SYSCTL_ADD_QUAD(ctx, children,
272                 OID_AUTO, "xmt_bytes",
273                 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
274                 "xmt_frames");
275 
276         SYSCTL_ADD_QUAD(ctx, children,
277                 OID_AUTO, "xmt_mcast_pkts",
278                 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
279                 "xmt_mcast_pkts");
280 
281         SYSCTL_ADD_QUAD(ctx, children,
282                 OID_AUTO, "xmt_bcast_pkts",
283                 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
284                 "xmt_bcast_pkts");
285 
286         SYSCTL_ADD_QUAD(ctx, children,
287                 OID_AUTO, "xmt_pause_frames",
288                 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
289                 "xmt_pause_frames");
290 
291         SYSCTL_ADD_QUAD(ctx, children,
292                 OID_AUTO, "xmt_cntrl_pkts",
293                 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
294                 "xmt_cntrl_pkts");
295 
296         SYSCTL_ADD_QUAD(ctx, children,
297                 OID_AUTO, "xmt_pkt_lt_64bytes",
298                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
299                 "xmt_pkt_lt_64bytes");
300 
301         SYSCTL_ADD_QUAD(ctx, children,
302                 OID_AUTO, "xmt_pkt_lt_127bytes",
303                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
304                 "xmt_pkt_lt_127bytes");
305 
306         SYSCTL_ADD_QUAD(ctx, children,
307                 OID_AUTO, "xmt_pkt_lt_255bytes",
308                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
309                 "xmt_pkt_lt_255bytes");
310 
311         SYSCTL_ADD_QUAD(ctx, children,
312                 OID_AUTO, "xmt_pkt_lt_511bytes",
313                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
314                 "xmt_pkt_lt_511bytes");
315 
316         SYSCTL_ADD_QUAD(ctx, children,
317                 OID_AUTO, "xmt_pkt_lt_1023bytes",
318                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
319                 "xmt_pkt_lt_1023bytes");
320 
321         SYSCTL_ADD_QUAD(ctx, children,
322                 OID_AUTO, "xmt_pkt_lt_1518bytes",
323                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
324                 "xmt_pkt_lt_1518bytes");
325 
326         SYSCTL_ADD_QUAD(ctx, children,
327                 OID_AUTO, "xmt_pkt_gt_1518bytes",
328                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
329                 "xmt_pkt_gt_1518bytes");
330 
331         SYSCTL_ADD_QUAD(ctx, children,
332                 OID_AUTO, "rcv_frames",
333                 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
334                 "rcv_frames");
335 
336         SYSCTL_ADD_QUAD(ctx, children,
337                 OID_AUTO, "rcv_bytes",
338                 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
339                 "rcv_bytes");
340 
341         SYSCTL_ADD_QUAD(ctx, children,
342                 OID_AUTO, "rcv_mcast_pkts",
343                 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
344                 "rcv_mcast_pkts");
345 
346         SYSCTL_ADD_QUAD(ctx, children,
347                 OID_AUTO, "rcv_bcast_pkts",
348                 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
349                 "rcv_bcast_pkts");
350 
351         SYSCTL_ADD_QUAD(ctx, children,
352                 OID_AUTO, "rcv_pause_frames",
353                 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
354                 "rcv_pause_frames");
355 
356         SYSCTL_ADD_QUAD(ctx, children,
357                 OID_AUTO, "rcv_cntrl_pkts",
358                 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
359                 "rcv_cntrl_pkts");
360 
361         SYSCTL_ADD_QUAD(ctx, children,
362                 OID_AUTO, "rcv_pkt_lt_64bytes",
363                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
364                 "rcv_pkt_lt_64bytes");
365 
366         SYSCTL_ADD_QUAD(ctx, children,
367                 OID_AUTO, "rcv_pkt_lt_127bytes",
368                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
369                 "rcv_pkt_lt_127bytes");
370 
371         SYSCTL_ADD_QUAD(ctx, children,
372                 OID_AUTO, "rcv_pkt_lt_255bytes",
373                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
374                 "rcv_pkt_lt_255bytes");
375 
376         SYSCTL_ADD_QUAD(ctx, children,
377                 OID_AUTO, "rcv_pkt_lt_511bytes",
378                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
379                 "rcv_pkt_lt_511bytes");
380 
381         SYSCTL_ADD_QUAD(ctx, children,
382                 OID_AUTO, "rcv_pkt_lt_1023bytes",
383                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
384                 "rcv_pkt_lt_1023bytes");
385 
386         SYSCTL_ADD_QUAD(ctx, children,
387                 OID_AUTO, "rcv_pkt_lt_1518bytes",
388                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
389                 "rcv_pkt_lt_1518bytes");
390 
391         SYSCTL_ADD_QUAD(ctx, children,
392                 OID_AUTO, "rcv_pkt_gt_1518bytes",
393                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
394                 "rcv_pkt_gt_1518bytes");
395 
396         SYSCTL_ADD_QUAD(ctx, children,
397                 OID_AUTO, "rcv_len_error",
398                 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
399                 "rcv_len_error");
400 
401         SYSCTL_ADD_QUAD(ctx, children,
402                 OID_AUTO, "rcv_len_small",
403                 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
404                 "rcv_len_small");
405 
406         SYSCTL_ADD_QUAD(ctx, children,
407                 OID_AUTO, "rcv_len_large",
408                 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
409                 "rcv_len_large");
410 
411         SYSCTL_ADD_QUAD(ctx, children,
412                 OID_AUTO, "rcv_jabber",
413                 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
414                 "rcv_jabber");
415 
416         SYSCTL_ADD_QUAD(ctx, children,
417                 OID_AUTO, "rcv_dropped",
418                 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
419                 "rcv_dropped");
420 
421         SYSCTL_ADD_QUAD(ctx, children,
422                 OID_AUTO, "fcs_error",
423                 CTLFLAG_RD, &ha->hw.mac.fcs_error,
424                 "fcs_error");
425 
426         SYSCTL_ADD_QUAD(ctx, children,
427                 OID_AUTO, "align_error",
428                 CTLFLAG_RD, &ha->hw.mac.align_error,
429                 "align_error");
430 
431         SYSCTL_ADD_QUAD(ctx, children,
432                 OID_AUTO, "eswitched_frames",
433                 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
434                 "eswitched_frames");
435 
436         SYSCTL_ADD_QUAD(ctx, children,
437                 OID_AUTO, "eswitched_bytes",
438                 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
439                 "eswitched_bytes");
440 
441         SYSCTL_ADD_QUAD(ctx, children,
442                 OID_AUTO, "eswitched_mcast_frames",
443                 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
444                 "eswitched_mcast_frames");
445 
446         SYSCTL_ADD_QUAD(ctx, children,
447                 OID_AUTO, "eswitched_bcast_frames",
448                 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
449                 "eswitched_bcast_frames");
450 
451         SYSCTL_ADD_QUAD(ctx, children,
452                 OID_AUTO, "eswitched_ucast_frames",
453                 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
454                 "eswitched_ucast_frames");
455 
456         SYSCTL_ADD_QUAD(ctx, children,
457                 OID_AUTO, "eswitched_err_free_frames",
458                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
459                 "eswitched_err_free_frames");
460 
461         SYSCTL_ADD_QUAD(ctx, children,
462                 OID_AUTO, "eswitched_err_free_bytes",
463                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
464                 "eswitched_err_free_bytes");
465 
466 	return;
467 }
468 
469 static void
470 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
471 {
472         struct sysctl_ctx_list  *ctx;
473         struct sysctl_oid_list  *children;
474         struct sysctl_oid       *ctx_oid;
475 
476         ctx = device_get_sysctl_ctx(ha->pci_dev);
477         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
478 
479         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
480 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_rcv");
481         children = SYSCTL_CHILDREN(ctx_oid);
482 
483         SYSCTL_ADD_QUAD(ctx, children,
484                 OID_AUTO, "total_bytes",
485                 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
486                 "total_bytes");
487 
488         SYSCTL_ADD_QUAD(ctx, children,
489                 OID_AUTO, "total_pkts",
490                 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
491                 "total_pkts");
492 
493         SYSCTL_ADD_QUAD(ctx, children,
494                 OID_AUTO, "lro_pkt_count",
495                 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
496                 "lro_pkt_count");
497 
498         SYSCTL_ADD_QUAD(ctx, children,
499                 OID_AUTO, "sw_pkt_count",
500                 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
501                 "sw_pkt_count");
502 
503         SYSCTL_ADD_QUAD(ctx, children,
504                 OID_AUTO, "ip_chksum_err",
505                 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
506                 "ip_chksum_err");
507 
508         SYSCTL_ADD_QUAD(ctx, children,
509                 OID_AUTO, "pkts_wo_acntxts",
510                 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
511                 "pkts_wo_acntxts");
512 
513         SYSCTL_ADD_QUAD(ctx, children,
514                 OID_AUTO, "pkts_dropped_no_sds_card",
515                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
516                 "pkts_dropped_no_sds_card");
517 
518         SYSCTL_ADD_QUAD(ctx, children,
519                 OID_AUTO, "pkts_dropped_no_sds_host",
520                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
521                 "pkts_dropped_no_sds_host");
522 
523         SYSCTL_ADD_QUAD(ctx, children,
524                 OID_AUTO, "oversized_pkts",
525                 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
526                 "oversized_pkts");
527 
528         SYSCTL_ADD_QUAD(ctx, children,
529                 OID_AUTO, "pkts_dropped_no_rds",
530                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
531                 "pkts_dropped_no_rds");
532 
533         SYSCTL_ADD_QUAD(ctx, children,
534                 OID_AUTO, "unxpctd_mcast_pkts",
535                 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
536                 "unxpctd_mcast_pkts");
537 
538         SYSCTL_ADD_QUAD(ctx, children,
539                 OID_AUTO, "re1_fbq_error",
540                 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
541                 "re1_fbq_error");
542 
543         SYSCTL_ADD_QUAD(ctx, children,
544                 OID_AUTO, "invalid_mac_addr",
545                 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
546                 "invalid_mac_addr");
547 
548         SYSCTL_ADD_QUAD(ctx, children,
549                 OID_AUTO, "rds_prime_trys",
550                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
551                 "rds_prime_trys");
552 
553         SYSCTL_ADD_QUAD(ctx, children,
554                 OID_AUTO, "rds_prime_success",
555                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
556                 "rds_prime_success");
557 
558         SYSCTL_ADD_QUAD(ctx, children,
559                 OID_AUTO, "lro_flows_added",
560                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
561                 "lro_flows_added");
562 
563         SYSCTL_ADD_QUAD(ctx, children,
564                 OID_AUTO, "lro_flows_deleted",
565                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
566                 "lro_flows_deleted");
567 
568         SYSCTL_ADD_QUAD(ctx, children,
569                 OID_AUTO, "lro_flows_active",
570                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
571                 "lro_flows_active");
572 
573         SYSCTL_ADD_QUAD(ctx, children,
574                 OID_AUTO, "pkts_droped_unknown",
575                 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
576                 "pkts_droped_unknown");
577 
578         SYSCTL_ADD_QUAD(ctx, children,
579                 OID_AUTO, "pkts_cnt_oversized",
580                 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
581                 "pkts_cnt_oversized");
582 
583 	return;
584 }
585 
586 static void
587 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
588 {
589         struct sysctl_ctx_list  *ctx;
590         struct sysctl_oid_list  *children;
591         struct sysctl_oid_list  *node_children;
592         struct sysctl_oid       *ctx_oid;
593         int                     i;
594         uint8_t                 name_str[16];
595 
596         ctx = device_get_sysctl_ctx(ha->pci_dev);
597         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
598 
599         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
600 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_xmt");
601         children = SYSCTL_CHILDREN(ctx_oid);
602 
603         for (i = 0; i < ha->hw.num_tx_rings; i++) {
604                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
605                 snprintf(name_str, sizeof(name_str), "%d", i);
606 
607                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
608                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
609                 node_children = SYSCTL_CHILDREN(ctx_oid);
610 
611                 /* Tx Related */
612 
613                 SYSCTL_ADD_QUAD(ctx, node_children,
614 			OID_AUTO, "total_bytes",
615                         CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
616                         "total_bytes");
617 
618                 SYSCTL_ADD_QUAD(ctx, node_children,
619 			OID_AUTO, "total_pkts",
620                         CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
621                         "total_pkts");
622 
623                 SYSCTL_ADD_QUAD(ctx, node_children,
624 			OID_AUTO, "errors",
625                         CTLFLAG_RD, &ha->hw.xmt[i].errors,
626                         "errors");
627 
628                 SYSCTL_ADD_QUAD(ctx, node_children,
629 			OID_AUTO, "pkts_dropped",
630                         CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
631                         "pkts_dropped");
632 
633                 SYSCTL_ADD_QUAD(ctx, node_children,
634 			OID_AUTO, "switch_pkts",
635                         CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
636                         "switch_pkts");
637 
638                 SYSCTL_ADD_QUAD(ctx, node_children,
639 			OID_AUTO, "num_buffers",
640                         CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
641                         "num_buffers");
642 	}
643 
644 	return;
645 }
646 
647 static void
648 qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t *ha)
649 {
650         struct sysctl_ctx_list  *ctx;
651         struct sysctl_oid_list  *node_children;
652 
653         ctx = device_get_sysctl_ctx(ha->pci_dev);
654         node_children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
655 
656 	SYSCTL_ADD_QUAD(ctx, node_children,
657 		OID_AUTO, "mbx_completion_time_lt_200ms",
658 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[0],
659 		"mbx_completion_time_lt_200ms");
660 
661 	SYSCTL_ADD_QUAD(ctx, node_children,
662 		OID_AUTO, "mbx_completion_time_200ms_400ms",
663 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[1],
664 		"mbx_completion_time_200ms_400ms");
665 
666 	SYSCTL_ADD_QUAD(ctx, node_children,
667 		OID_AUTO, "mbx_completion_time_400ms_600ms",
668 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[2],
669 		"mbx_completion_time_400ms_600ms");
670 
671 	SYSCTL_ADD_QUAD(ctx, node_children,
672 		OID_AUTO, "mbx_completion_time_600ms_800ms",
673 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[3],
674 		"mbx_completion_time_600ms_800ms");
675 
676 	SYSCTL_ADD_QUAD(ctx, node_children,
677 		OID_AUTO, "mbx_completion_time_800ms_1000ms",
678 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[4],
679 		"mbx_completion_time_800ms_1000ms");
680 
681 	SYSCTL_ADD_QUAD(ctx, node_children,
682 		OID_AUTO, "mbx_completion_time_1000ms_1200ms",
683 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[5],
684 		"mbx_completion_time_1000ms_1200ms");
685 
686 	SYSCTL_ADD_QUAD(ctx, node_children,
687 		OID_AUTO, "mbx_completion_time_1200ms_1400ms",
688 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[6],
689 		"mbx_completion_time_1200ms_1400ms");
690 
691 	SYSCTL_ADD_QUAD(ctx, node_children,
692 		OID_AUTO, "mbx_completion_time_1400ms_1600ms",
693 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[7],
694 		"mbx_completion_time_1400ms_1600ms");
695 
696 	SYSCTL_ADD_QUAD(ctx, node_children,
697 		OID_AUTO, "mbx_completion_time_1600ms_1800ms",
698 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[8],
699 		"mbx_completion_time_1600ms_1800ms");
700 
701 	SYSCTL_ADD_QUAD(ctx, node_children,
702 		OID_AUTO, "mbx_completion_time_1800ms_2000ms",
703 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[9],
704 		"mbx_completion_time_1800ms_2000ms");
705 
706 	SYSCTL_ADD_QUAD(ctx, node_children,
707 		OID_AUTO, "mbx_completion_time_2000ms_2200ms",
708 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[10],
709 		"mbx_completion_time_2000ms_2200ms");
710 
711 	SYSCTL_ADD_QUAD(ctx, node_children,
712 		OID_AUTO, "mbx_completion_time_2200ms_2400ms",
713 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[11],
714 		"mbx_completion_time_2200ms_2400ms");
715 
716 	SYSCTL_ADD_QUAD(ctx, node_children,
717 		OID_AUTO, "mbx_completion_time_2400ms_2600ms",
718 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[12],
719 		"mbx_completion_time_2400ms_2600ms");
720 
721 	SYSCTL_ADD_QUAD(ctx, node_children,
722 		OID_AUTO, "mbx_completion_time_2600ms_2800ms",
723 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[13],
724 		"mbx_completion_time_2600ms_2800ms");
725 
726 	SYSCTL_ADD_QUAD(ctx, node_children,
727 		OID_AUTO, "mbx_completion_time_2800ms_3000ms",
728 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[14],
729 		"mbx_completion_time_2800ms_3000ms");
730 
731 	SYSCTL_ADD_QUAD(ctx, node_children,
732 		OID_AUTO, "mbx_completion_time_3000ms_4000ms",
733 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[15],
734 		"mbx_completion_time_3000ms_4000ms");
735 
736 	SYSCTL_ADD_QUAD(ctx, node_children,
737 		OID_AUTO, "mbx_completion_time_4000ms_5000ms",
738 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[16],
739 		"mbx_completion_time_4000ms_5000ms");
740 
741 	SYSCTL_ADD_QUAD(ctx, node_children,
742 		OID_AUTO, "mbx_completion_host_mbx_cntrl_timeout",
743 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[17],
744 		"mbx_completion_host_mbx_cntrl_timeout");
745 
746 	SYSCTL_ADD_QUAD(ctx, node_children,
747 		OID_AUTO, "mbx_completion_fw_mbx_cntrl_timeout",
748 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[18],
749 		"mbx_completion_fw_mbx_cntrl_timeout");
750 	return;
751 }
752 
753 static void
754 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
755 {
756 	qlnx_add_hw_mac_stats_sysctls(ha);
757 	qlnx_add_hw_rcv_stats_sysctls(ha);
758 	qlnx_add_hw_xmt_stats_sysctls(ha);
759 	qlnx_add_hw_mbx_cmpl_stats_sysctls(ha);
760 
761 	return;
762 }
763 
764 static void
765 qlnx_add_drvr_sds_stats(qla_host_t *ha)
766 {
767         struct sysctl_ctx_list  *ctx;
768         struct sysctl_oid_list  *children;
769         struct sysctl_oid_list  *node_children;
770         struct sysctl_oid       *ctx_oid;
771         int                     i;
772         uint8_t                 name_str[16];
773 
774         ctx = device_get_sysctl_ctx(ha->pci_dev);
775         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
776 
777         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
778 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_sds");
779         children = SYSCTL_CHILDREN(ctx_oid);
780 
781         for (i = 0; i < ha->hw.num_sds_rings; i++) {
782                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
783                 snprintf(name_str, sizeof(name_str), "%d", i);
784 
785                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
786 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
787                 node_children = SYSCTL_CHILDREN(ctx_oid);
788 
789                 SYSCTL_ADD_QUAD(ctx, node_children,
790 			OID_AUTO, "intr_count",
791                         CTLFLAG_RD, &ha->hw.sds[i].intr_count,
792                         "intr_count");
793 
794                 SYSCTL_ADD_UINT(ctx, node_children,
795 			OID_AUTO, "rx_free",
796                         CTLFLAG_RD, &ha->hw.sds[i].rx_free,
797 			ha->hw.sds[i].rx_free, "rx_free");
798 	}
799 
800 	return;
801 }
802 static void
803 qlnx_add_drvr_rds_stats(qla_host_t *ha)
804 {
805         struct sysctl_ctx_list  *ctx;
806         struct sysctl_oid_list  *children;
807         struct sysctl_oid_list  *node_children;
808         struct sysctl_oid       *ctx_oid;
809         int                     i;
810         uint8_t                 name_str[16];
811 
812         ctx = device_get_sysctl_ctx(ha->pci_dev);
813         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
814 
815         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
816             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_rds");
817         children = SYSCTL_CHILDREN(ctx_oid);
818 
819         for (i = 0; i < ha->hw.num_rds_rings; i++) {
820                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
821                 snprintf(name_str, sizeof(name_str), "%d", i);
822 
823                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
824                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
825                 node_children = SYSCTL_CHILDREN(ctx_oid);
826 
827                 SYSCTL_ADD_QUAD(ctx, node_children,
828 			OID_AUTO, "count",
829                         CTLFLAG_RD, &ha->hw.rds[i].count,
830                         "count");
831 
832                 SYSCTL_ADD_QUAD(ctx, node_children,
833 			OID_AUTO, "lro_pkt_count",
834                         CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
835                         "lro_pkt_count");
836 
837                 SYSCTL_ADD_QUAD(ctx, node_children,
838 			OID_AUTO, "lro_bytes",
839                         CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
840                         "lro_bytes");
841 	}
842 
843 	return;
844 }
845 
846 static void
847 qlnx_add_drvr_tx_stats(qla_host_t *ha)
848 {
849         struct sysctl_ctx_list  *ctx;
850         struct sysctl_oid_list  *children;
851         struct sysctl_oid_list  *node_children;
852         struct sysctl_oid       *ctx_oid;
853         int                     i;
854         uint8_t                 name_str[16];
855 
856         ctx = device_get_sysctl_ctx(ha->pci_dev);
857         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
858 
859         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
860             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_xmt");
861         children = SYSCTL_CHILDREN(ctx_oid);
862 
863         for (i = 0; i < ha->hw.num_tx_rings; i++) {
864                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
865                 snprintf(name_str, sizeof(name_str), "%d", i);
866 
867                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
868                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
869                 node_children = SYSCTL_CHILDREN(ctx_oid);
870 
871                 SYSCTL_ADD_QUAD(ctx, node_children,
872 			OID_AUTO, "count",
873                         CTLFLAG_RD, &ha->tx_ring[i].count,
874                         "count");
875 
876 #ifdef QL_ENABLE_ISCSI_TLV
877                 SYSCTL_ADD_QUAD(ctx, node_children,
878 			OID_AUTO, "iscsi_pkt_count",
879                         CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
880                         "iscsi_pkt_count");
881 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
882 	}
883 
884 	return;
885 }
886 
887 static void
888 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
889 {
890 	qlnx_add_drvr_sds_stats(ha);
891 	qlnx_add_drvr_rds_stats(ha);
892 	qlnx_add_drvr_tx_stats(ha);
893 	return;
894 }
895 
896 /*
897  * Name: ql_hw_add_sysctls
898  * Function: Add P3Plus specific sysctls
899  */
900 void
901 ql_hw_add_sysctls(qla_host_t *ha)
902 {
903         device_t	dev;
904 
905         dev = ha->pci_dev;
906 
907 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
908 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
909 		OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
910 		ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
911 
912         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
913                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
914                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
915 		ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
916 
917         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
918                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
919                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
920 		ha->hw.num_tx_rings, "Number of Transmit Rings");
921 
922         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
923                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
924                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
925 		ha->txr_idx, "Tx Ring Used");
926 
927         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
928                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
929                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
930 		ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
931 
932 	ha->hw.sds_cidx_thres = 32;
933         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
934                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
935                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
936 		ha->hw.sds_cidx_thres,
937 		"Number of SDS entries to process before updating"
938 		" SDS Ring Consumer Index");
939 
940 	ha->hw.rds_pidx_thres = 32;
941         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
942                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
943                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
944 		ha->hw.rds_pidx_thres,
945 		"Number of Rcv Rings Entries to post before updating"
946 		" RDS Ring Producer Index");
947 
948         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
949         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
950                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
951                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
952                 &ha->hw.rcv_intr_coalesce,
953                 ha->hw.rcv_intr_coalesce,
954                 "Rcv Intr Coalescing Parameters\n"
955                 "\tbits 15:0 max packets\n"
956                 "\tbits 31:16 max micro-seconds to wait\n"
957                 "\tplease run\n"
958                 "\tifconfig <if> down && ifconfig <if> up\n"
959                 "\tto take effect \n");
960 
961         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
962         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
963                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
964                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
965                 &ha->hw.xmt_intr_coalesce,
966                 ha->hw.xmt_intr_coalesce,
967                 "Xmt Intr Coalescing Parameters\n"
968                 "\tbits 15:0 max packets\n"
969                 "\tbits 31:16 max micro-seconds to wait\n"
970                 "\tplease run\n"
971                 "\tifconfig <if> down && ifconfig <if> up\n"
972                 "\tto take effect \n");
973 
974         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
975             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
976 	    "port_cfg", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
977 	    (void *)ha, 0, qla_sysctl_port_cfg, "I",
978 	    "Set Port Configuration if values below "
979 	    "otherwise Get Port Configuration\n"
980 	    "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
981 	    "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
982 	    "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
983 	    " 1 = xmt only; 2 = rcv only;\n");
984 
985 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
986 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
987 	    "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
988 	    (void *)ha, 0, qla_sysctl_set_cam_search_mode, "I",
989 	    "Set CAM Search Mode"
990 	    "\t 1 = search mode internal\n"
991 	    "\t 2 = search mode auto\n");
992 
993 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
994 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
995 		"get_cam_search_mode",
996 		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
997 		qla_sysctl_get_cam_search_mode, "I",
998 		"Get CAM Search Mode"
999 		"\t 1 = search mode internal\n"
1000 		"\t 2 = search mode auto\n");
1001 
1002         ha->hw.enable_9kb = 1;
1003 
1004         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1005                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1006                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
1007                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
1008 
1009         ha->hw.enable_hw_lro = 1;
1010 
1011         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1012                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1013                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
1014                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
1015 		"\t 1 : Hardware LRO if LRO is enabled\n"
1016 		"\t 0 : Software LRO if LRO is enabled\n"
1017 		"\t Any change requires ifconfig down/up to take effect\n"
1018 		"\t Note that LRO may be turned off/on via ifconfig\n");
1019 
1020         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1021                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1022                 OID_AUTO, "sp_log_index", CTLFLAG_RW, &ha->hw.sp_log_index,
1023                 ha->hw.sp_log_index, "sp_log_index");
1024 
1025         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1026                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1027                 OID_AUTO, "sp_log_stop", CTLFLAG_RW, &ha->hw.sp_log_stop,
1028                 ha->hw.sp_log_stop, "sp_log_stop");
1029 
1030         ha->hw.sp_log_stop_events = 0;
1031 
1032         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1033                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1034                 OID_AUTO, "sp_log_stop_events", CTLFLAG_RW,
1035 		&ha->hw.sp_log_stop_events,
1036                 ha->hw.sp_log_stop_events, "Slow path event log is stopped"
1037 		" when OR of the following events occur \n"
1038 		"\t 0x01 : Heart beat Failure\n"
1039 		"\t 0x02 : Temperature Failure\n"
1040 		"\t 0x04 : HW Initialization Failure\n"
1041 		"\t 0x08 : Interface Initialization Failure\n"
1042 		"\t 0x10 : Error Recovery Failure\n");
1043 
1044 	ha->hw.mdump_active = 0;
1045         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1046                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1047                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
1048 		ha->hw.mdump_active,
1049 		"Minidump retrieval is Active");
1050 
1051 	ha->hw.mdump_done = 0;
1052         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1053                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1054                 OID_AUTO, "mdump_done", CTLFLAG_RW,
1055 		&ha->hw.mdump_done, ha->hw.mdump_done,
1056 		"Minidump has been done and available for retrieval");
1057 
1058 	ha->hw.mdump_capture_mask = 0xF;
1059         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1060                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1061                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
1062 		&ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
1063 		"Minidump capture mask");
1064 #ifdef QL_DBG
1065 
1066 	ha->err_inject = 0;
1067         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1068                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1069                 OID_AUTO, "err_inject",
1070                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
1071                 "Error to be injected\n"
1072                 "\t\t\t 0: No Errors\n"
1073                 "\t\t\t 1: rcv: rxb struct invalid\n"
1074                 "\t\t\t 2: rcv: mp == NULL\n"
1075                 "\t\t\t 3: lro: rxb struct invalid\n"
1076                 "\t\t\t 4: lro: mp == NULL\n"
1077                 "\t\t\t 5: rcv: num handles invalid\n"
1078                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
1079                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
1080                 "\t\t\t 8: mbx: mailbox command failure\n"
1081                 "\t\t\t 9: heartbeat failure\n"
1082                 "\t\t\t A: temperature failure\n"
1083 		"\t\t\t 11: m_getcl or m_getjcl failure\n"
1084 		"\t\t\t 13: Invalid Descriptor Count in SGL Receive\n"
1085 		"\t\t\t 14: Invalid Descriptor Count in LRO Receive\n"
1086 		"\t\t\t 15: peer port error recovery failure\n"
1087 		"\t\t\t 16: tx_buf[next_prod_index].mbuf != NULL\n" );
1088 
1089 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1090             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
1091 	    "peg_stop", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1092 	    (void *)ha, 0, qla_sysctl_stop_pegs, "I", "Peg Stop");
1093 
1094 #endif /* #ifdef QL_DBG */
1095 
1096         ha->hw.user_pri_nic = 0;
1097         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1098                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1099                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
1100                 ha->hw.user_pri_nic,
1101                 "VLAN Tag User Priority for Normal Ethernet Packets");
1102 
1103         ha->hw.user_pri_iscsi = 4;
1104         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1105                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1106                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
1107                 ha->hw.user_pri_iscsi,
1108                 "VLAN Tag User Priority for iSCSI Packets");
1109 
1110 	qlnx_add_hw_stats_sysctls(ha);
1111 	qlnx_add_drvr_stats_sysctls(ha);
1112 
1113 	return;
1114 }
1115 
1116 void
1117 ql_hw_link_status(qla_host_t *ha)
1118 {
1119 	device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
1120 
1121 	if (ha->hw.link_up) {
1122 		device_printf(ha->pci_dev, "link Up\n");
1123 	} else {
1124 		device_printf(ha->pci_dev, "link Down\n");
1125 	}
1126 
1127 	if (ha->hw.fduplex) {
1128 		device_printf(ha->pci_dev, "Full Duplex\n");
1129 	} else {
1130 		device_printf(ha->pci_dev, "Half Duplex\n");
1131 	}
1132 
1133 	if (ha->hw.autoneg) {
1134 		device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1135 	} else {
1136 		device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1137 	}
1138 
1139 	switch (ha->hw.link_speed) {
1140 	case 0x710:
1141 		device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1142 		break;
1143 
1144 	case 0x3E8:
1145 		device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1146 		break;
1147 
1148 	case 0x64:
1149 		device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1150 		break;
1151 
1152 	default:
1153 		device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1154 		break;
1155 	}
1156 
1157 	switch (ha->hw.module_type) {
1158 	case 0x01:
1159 		device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1160 		break;
1161 
1162 	case 0x02:
1163 		device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1164 		break;
1165 
1166 	case 0x03:
1167 		device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1168 		break;
1169 
1170 	case 0x04:
1171 		device_printf(ha->pci_dev,
1172 			"Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1173 			ha->hw.cable_length);
1174 		break;
1175 
1176 	case 0x05:
1177 		device_printf(ha->pci_dev, "Module Type 10GE Active"
1178 			" Limiting Copper(Compliant)[%d m]\n",
1179 			ha->hw.cable_length);
1180 		break;
1181 
1182 	case 0x06:
1183 		device_printf(ha->pci_dev,
1184 			"Module Type 10GE Passive Copper"
1185 			" (Legacy, Best Effort)[%d m]\n",
1186 			ha->hw.cable_length);
1187 		break;
1188 
1189 	case 0x07:
1190 		device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1191 		break;
1192 
1193 	case 0x08:
1194 		device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1195 		break;
1196 
1197 	case 0x09:
1198 		device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1199 		break;
1200 
1201 	case 0x0A:
1202 		device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1203 		break;
1204 
1205 	case 0x0B:
1206 		device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1207 			"(Legacy, Best Effort)\n");
1208 		break;
1209 
1210 	default:
1211 		device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1212 			ha->hw.module_type);
1213 		break;
1214 	}
1215 
1216 	if (ha->hw.link_faults == 1)
1217 		device_printf(ha->pci_dev, "SFP Power Fault\n");
1218 }
1219 
1220 /*
1221  * Name: ql_free_dma
1222  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1223  */
1224 void
1225 ql_free_dma(qla_host_t *ha)
1226 {
1227 	uint32_t i;
1228 
1229         if (ha->hw.dma_buf.flags.sds_ring) {
1230 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1231 			ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1232 		}
1233         	ha->hw.dma_buf.flags.sds_ring = 0;
1234 	}
1235 
1236         if (ha->hw.dma_buf.flags.rds_ring) {
1237 		for (i = 0; i < ha->hw.num_rds_rings; i++) {
1238 			ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1239 		}
1240         	ha->hw.dma_buf.flags.rds_ring = 0;
1241 	}
1242 
1243         if (ha->hw.dma_buf.flags.tx_ring) {
1244 		ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1245         	ha->hw.dma_buf.flags.tx_ring = 0;
1246 	}
1247 	ql_minidump_free(ha);
1248 }
1249 
1250 /*
1251  * Name: ql_alloc_dma
1252  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1253  */
1254 int
1255 ql_alloc_dma(qla_host_t *ha)
1256 {
1257         device_t                dev;
1258 	uint32_t		i, j, size, tx_ring_size;
1259 	qla_hw_t		*hw;
1260 	qla_hw_tx_cntxt_t	*tx_cntxt;
1261 	uint8_t			*vaddr;
1262 	bus_addr_t		paddr;
1263 
1264         dev = ha->pci_dev;
1265 
1266         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1267 
1268 	hw = &ha->hw;
1269 	/*
1270 	 * Allocate Transmit Ring
1271 	 */
1272 	tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1273 	size = (tx_ring_size * ha->hw.num_tx_rings);
1274 
1275 	hw->dma_buf.tx_ring.alignment = 8;
1276 	hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1277 
1278         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1279                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1280                 goto ql_alloc_dma_exit;
1281         }
1282 
1283 	vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1284 	paddr = hw->dma_buf.tx_ring.dma_addr;
1285 
1286 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1287 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1288 
1289 		tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1290 		tx_cntxt->tx_ring_paddr = paddr;
1291 
1292 		vaddr += tx_ring_size;
1293 		paddr += tx_ring_size;
1294 	}
1295 
1296 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1297 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1298 
1299 		tx_cntxt->tx_cons = (uint32_t *)vaddr;
1300 		tx_cntxt->tx_cons_paddr = paddr;
1301 
1302 		vaddr += sizeof (uint32_t);
1303 		paddr += sizeof (uint32_t);
1304 	}
1305 
1306         ha->hw.dma_buf.flags.tx_ring = 1;
1307 
1308 	QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1309 		__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1310 		hw->dma_buf.tx_ring.dma_b));
1311 	/*
1312 	 * Allocate Receive Descriptor Rings
1313 	 */
1314 
1315 	for (i = 0; i < hw->num_rds_rings; i++) {
1316 		hw->dma_buf.rds_ring[i].alignment = 8;
1317 		hw->dma_buf.rds_ring[i].size =
1318 			(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1319 
1320 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1321 			device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1322 				__func__, i);
1323 
1324 			for (j = 0; j < i; j++)
1325 				ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1326 
1327 			goto ql_alloc_dma_exit;
1328 		}
1329 		QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1330 			__func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1331 			hw->dma_buf.rds_ring[i].dma_b));
1332 	}
1333 
1334 	hw->dma_buf.flags.rds_ring = 1;
1335 
1336 	/*
1337 	 * Allocate Status Descriptor Rings
1338 	 */
1339 
1340 	for (i = 0; i < hw->num_sds_rings; i++) {
1341 		hw->dma_buf.sds_ring[i].alignment = 8;
1342 		hw->dma_buf.sds_ring[i].size =
1343 			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1344 
1345 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1346 			device_printf(dev, "%s: sds ring alloc failed\n",
1347 				__func__);
1348 
1349 			for (j = 0; j < i; j++)
1350 				ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1351 
1352 			goto ql_alloc_dma_exit;
1353 		}
1354 		QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1355 			__func__, i,
1356 			(void *)(hw->dma_buf.sds_ring[i].dma_addr),
1357 			hw->dma_buf.sds_ring[i].dma_b));
1358 	}
1359 	for (i = 0; i < hw->num_sds_rings; i++) {
1360 		hw->sds[i].sds_ring_base =
1361 			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1362 	}
1363 
1364 	hw->dma_buf.flags.sds_ring = 1;
1365 
1366 	return 0;
1367 
1368 ql_alloc_dma_exit:
1369 	ql_free_dma(ha);
1370 	return -1;
1371 }
1372 
1373 #define Q8_MBX_MSEC_DELAY	5000
1374 
1375 static int
1376 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1377 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1378 {
1379 	uint32_t i;
1380 	uint32_t data;
1381 	int ret = 0;
1382 	uint64_t start_usecs;
1383 	uint64_t end_usecs;
1384 	uint64_t msecs_200;
1385 
1386 	ql_sp_log(ha, 0, 5, no_pause, h_mbox[0], h_mbox[1], h_mbox[2], h_mbox[3]);
1387 
1388 	if (ha->offline || ha->qla_initiate_recovery) {
1389 		ql_sp_log(ha, 1, 2, ha->offline, ha->qla_initiate_recovery, 0, 0, 0);
1390 		goto exit_qla_mbx_cmd;
1391 	}
1392 
1393 	if (((ha->err_inject & 0xFFFF) == INJCT_MBX_CMD_FAILURE) &&
1394 		(((ha->err_inject & ~0xFFFF) == ((h_mbox[0] & 0xFFFF) << 16))||
1395 		!(ha->err_inject & ~0xFFFF))) {
1396 		ret = -3;
1397 		QL_INITIATE_RECOVERY(ha);
1398 		goto exit_qla_mbx_cmd;
1399 	}
1400 
1401 	start_usecs = qla_get_usec_timestamp();
1402 
1403 	if (no_pause)
1404 		i = 1000;
1405 	else
1406 		i = Q8_MBX_MSEC_DELAY;
1407 
1408 	while (i) {
1409 		if (ha->qla_initiate_recovery) {
1410 			ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1411 			return (-1);
1412 		}
1413 
1414 		data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1415 		if (data == 0)
1416 			break;
1417 		if (no_pause) {
1418 			DELAY(1000);
1419 		} else {
1420 			qla_mdelay(__func__, 1);
1421 		}
1422 		i--;
1423 	}
1424 
1425 	if (i == 0) {
1426 		device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1427 			__func__, data);
1428 		ql_sp_log(ha, 3, 1, data, 0, 0, 0, 0);
1429 		ret = -1;
1430 		ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 2)]++;
1431 		QL_INITIATE_RECOVERY(ha);
1432 		goto exit_qla_mbx_cmd;
1433 	}
1434 
1435 	for (i = 0; i < n_hmbox; i++) {
1436 		WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1437 		h_mbox++;
1438 	}
1439 
1440 	WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1441 
1442 	i = Q8_MBX_MSEC_DELAY;
1443 	while (i) {
1444 		if (ha->qla_initiate_recovery) {
1445 			ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1446 			return (-1);
1447 		}
1448 
1449 		data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1450 
1451 		if ((data & 0x3) == 1) {
1452 			data = READ_REG32(ha, Q8_FW_MBOX0);
1453 			if ((data & 0xF000) != 0x8000)
1454 				break;
1455 		}
1456 		if (no_pause) {
1457 			DELAY(1000);
1458 		} else {
1459 			qla_mdelay(__func__, 1);
1460 		}
1461 		i--;
1462 	}
1463 	if (i == 0) {
1464 		device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1465 			__func__, data);
1466 		ql_sp_log(ha, 5, 1, data, 0, 0, 0, 0);
1467 		ret = -2;
1468 		ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 1)]++;
1469 		QL_INITIATE_RECOVERY(ha);
1470 		goto exit_qla_mbx_cmd;
1471 	}
1472 
1473 	for (i = 0; i < n_fwmbox; i++) {
1474 		if (ha->qla_initiate_recovery) {
1475 			ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1476 			return (-1);
1477 		}
1478 
1479 		*fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1480 	}
1481 
1482 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1483 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1484 
1485 	end_usecs = qla_get_usec_timestamp();
1486 
1487 	if (end_usecs > start_usecs) {
1488 		msecs_200 = (end_usecs - start_usecs)/(1000 * 200);
1489 
1490 		if (msecs_200 < 15)
1491 			ha->hw.mbx_comp_msecs[msecs_200]++;
1492 		else if (msecs_200 < 20)
1493 			ha->hw.mbx_comp_msecs[15]++;
1494 		else {
1495 			device_printf(ha->pci_dev, "%s: [%ld, %ld] %ld\n", __func__,
1496 				start_usecs, end_usecs, msecs_200);
1497 			ha->hw.mbx_comp_msecs[16]++;
1498 		}
1499 	}
1500 	ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]);
1501 
1502 exit_qla_mbx_cmd:
1503 	return (ret);
1504 }
1505 
1506 int
1507 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1508 	uint32_t *num_rcvq)
1509 {
1510 	uint32_t *mbox, err;
1511 	device_t dev = ha->pci_dev;
1512 
1513 	bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1514 
1515 	mbox = ha->hw.mbox;
1516 
1517 	mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
1518 
1519 	if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1520 		device_printf(dev, "%s: failed0\n", __func__);
1521 		return (-1);
1522 	}
1523 	err = mbox[0] >> 25;
1524 
1525 	if (supports_9kb != NULL) {
1526 		if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1527 			*supports_9kb = 1;
1528 		else
1529 			*supports_9kb = 0;
1530 	}
1531 
1532 	if (num_rcvq != NULL)
1533 		*num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
1534 
1535 	if ((err != 1) && (err != 0)) {
1536 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1537 		return (-1);
1538 	}
1539 	return 0;
1540 }
1541 
1542 static int
1543 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1544 	uint32_t create)
1545 {
1546 	uint32_t i, err;
1547 	device_t dev = ha->pci_dev;
1548 	q80_config_intr_t *c_intr;
1549 	q80_config_intr_rsp_t *c_intr_rsp;
1550 
1551 	c_intr = (q80_config_intr_t *)ha->hw.mbox;
1552 	bzero(c_intr, (sizeof (q80_config_intr_t)));
1553 
1554 	c_intr->opcode = Q8_MBX_CONFIG_INTR;
1555 
1556 	c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1557 	c_intr->count_version |= Q8_MBX_CMD_VERSION;
1558 
1559 	c_intr->nentries = num_intrs;
1560 
1561 	for (i = 0; i < num_intrs; i++) {
1562 		if (create) {
1563 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1564 			c_intr->intr[i].msix_index = start_idx + 1 + i;
1565 		} else {
1566 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1567 			c_intr->intr[i].msix_index =
1568 				ha->hw.intr_id[(start_idx + i)];
1569 		}
1570 
1571 		c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1572 	}
1573 
1574 	if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1575 		(sizeof (q80_config_intr_t) >> 2),
1576 		ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1577 		device_printf(dev, "%s: %s failed0\n", __func__,
1578 			(create ? "create" : "delete"));
1579 		return (-1);
1580 	}
1581 
1582 	c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1583 
1584 	err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1585 
1586 	if (err) {
1587 		device_printf(dev, "%s: %s failed1 [0x%08x, %d]\n", __func__,
1588 			(create ? "create" : "delete"), err, c_intr_rsp->nentries);
1589 
1590 		for (i = 0; i < c_intr_rsp->nentries; i++) {
1591 			device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1592 				__func__, i,
1593 				c_intr_rsp->intr[i].status,
1594 				c_intr_rsp->intr[i].intr_id,
1595 				c_intr_rsp->intr[i].intr_src);
1596 		}
1597 
1598 		return (-1);
1599 	}
1600 
1601 	for (i = 0; ((i < num_intrs) && create); i++) {
1602 		if (!c_intr_rsp->intr[i].status) {
1603 			ha->hw.intr_id[(start_idx + i)] =
1604 				c_intr_rsp->intr[i].intr_id;
1605 			ha->hw.intr_src[(start_idx + i)] =
1606 				c_intr_rsp->intr[i].intr_src;
1607 		}
1608 	}
1609 
1610 	return (0);
1611 }
1612 
1613 /*
1614  * Name: qla_config_rss
1615  * Function: Configure RSS for the context/interface.
1616  */
1617 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1618 			0x8030f20c77cb2da3ULL,
1619 			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1620 			0x255b0ec26d5a56daULL };
1621 
1622 static int
1623 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1624 {
1625 	q80_config_rss_t	*c_rss;
1626 	q80_config_rss_rsp_t	*c_rss_rsp;
1627 	uint32_t		err, i;
1628 	device_t		dev = ha->pci_dev;
1629 
1630 	c_rss = (q80_config_rss_t *)ha->hw.mbox;
1631 	bzero(c_rss, (sizeof (q80_config_rss_t)));
1632 
1633 	c_rss->opcode = Q8_MBX_CONFIG_RSS;
1634 
1635 	c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1636 	c_rss->count_version |= Q8_MBX_CMD_VERSION;
1637 
1638 	c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1639 				Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1640 	//c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1641 	//			Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1642 
1643 	c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1644 	c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1645 
1646 	c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1647 
1648 	c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1649 	c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1650 
1651 	c_rss->cntxt_id = cntxt_id;
1652 
1653 	for (i = 0; i < 5; i++) {
1654 		c_rss->rss_key[i] = rss_key[i];
1655 	}
1656 
1657 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1658 		(sizeof (q80_config_rss_t) >> 2),
1659 		ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1660 		device_printf(dev, "%s: failed0\n", __func__);
1661 		return (-1);
1662 	}
1663 	c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1664 
1665 	err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1666 
1667 	if (err) {
1668 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1669 		return (-1);
1670 	}
1671 	return 0;
1672 }
1673 
1674 static int
1675 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1676         uint16_t cntxt_id, uint8_t *ind_table)
1677 {
1678         q80_config_rss_ind_table_t      *c_rss_ind;
1679         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1680         uint32_t                        err;
1681         device_t                        dev = ha->pci_dev;
1682 
1683 	if ((count > Q8_RSS_IND_TBL_SIZE) ||
1684 		((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1685 		device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1686 			start_idx, count);
1687 		return (-1);
1688 	}
1689 
1690         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1691         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1692 
1693         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1694         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1695         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1696 
1697 	c_rss_ind->start_idx = start_idx;
1698 	c_rss_ind->end_idx = start_idx + count - 1;
1699 	c_rss_ind->cntxt_id = cntxt_id;
1700 	bcopy(ind_table, c_rss_ind->ind_table, count);
1701 
1702 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1703 		(sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1704 		(sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1705 		device_printf(dev, "%s: failed0\n", __func__);
1706 		return (-1);
1707 	}
1708 
1709 	c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1710 	err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1711 
1712 	if (err) {
1713 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1714 		return (-1);
1715 	}
1716 	return 0;
1717 }
1718 
1719 /*
1720  * Name: qla_config_intr_coalesce
1721  * Function: Configure Interrupt Coalescing.
1722  */
1723 static int
1724 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1725 	int rcv)
1726 {
1727 	q80_config_intr_coalesc_t	*intrc;
1728 	q80_config_intr_coalesc_rsp_t	*intrc_rsp;
1729 	uint32_t			err, i;
1730 	device_t			dev = ha->pci_dev;
1731 
1732 	intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1733 	bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1734 
1735 	intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1736 	intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1737 	intrc->count_version |= Q8_MBX_CMD_VERSION;
1738 
1739 	if (rcv) {
1740 		intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1741 		intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1742 		intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1743 	} else {
1744 		intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1745 		intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1746 		intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1747 	}
1748 
1749 	intrc->cntxt_id = cntxt_id;
1750 
1751 	if (tenable) {
1752 		intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1753 		intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1754 
1755 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1756 			intrc->sds_ring_mask |= (1 << i);
1757 		}
1758 		intrc->ms_timeout = 1000;
1759 	}
1760 
1761 	if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1762 		(sizeof (q80_config_intr_coalesc_t) >> 2),
1763 		ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1764 		device_printf(dev, "%s: failed0\n", __func__);
1765 		return (-1);
1766 	}
1767 	intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1768 
1769 	err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1770 
1771 	if (err) {
1772 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1773 		return (-1);
1774 	}
1775 
1776 	return 0;
1777 }
1778 
1779 /*
1780  * Name: qla_config_mac_addr
1781  * Function: binds a MAC address to the context/interface.
1782  *	Can be unicast, multicast or broadcast.
1783  */
1784 static int
1785 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1786 	uint32_t num_mac)
1787 {
1788 	q80_config_mac_addr_t		*cmac;
1789 	q80_config_mac_addr_rsp_t	*cmac_rsp;
1790 	uint32_t			err;
1791 	device_t			dev = ha->pci_dev;
1792 	int				i;
1793 	uint8_t				*mac_cpy = mac_addr;
1794 
1795 	if (num_mac > Q8_MAX_MAC_ADDRS) {
1796 		device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1797 			__func__, (add_mac ? "Add" : "Del"), num_mac);
1798 		return (-1);
1799 	}
1800 
1801 	cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1802 	bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1803 
1804 	cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1805 	cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1806 	cmac->count_version |= Q8_MBX_CMD_VERSION;
1807 
1808 	if (add_mac)
1809 		cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1810 	else
1811 		cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1812 
1813 	cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1814 
1815 	cmac->nmac_entries = num_mac;
1816 	cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1817 
1818 	for (i = 0; i < num_mac; i++) {
1819 		bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN);
1820 		mac_addr = mac_addr + ETHER_ADDR_LEN;
1821 	}
1822 
1823 	if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1824 		(sizeof (q80_config_mac_addr_t) >> 2),
1825 		ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1826 		device_printf(dev, "%s: %s failed0\n", __func__,
1827 			(add_mac ? "Add" : "Del"));
1828 		return (-1);
1829 	}
1830 	cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1831 
1832 	err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1833 
1834 	if (err) {
1835 		device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1836 			(add_mac ? "Add" : "Del"), err);
1837 		for (i = 0; i < num_mac; i++) {
1838 			device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1839 				__func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1840 				mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1841 			mac_cpy += ETHER_ADDR_LEN;
1842 		}
1843 		return (-1);
1844 	}
1845 
1846 	return 0;
1847 }
1848 
1849 /*
1850  * Name: qla_set_mac_rcv_mode
1851  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1852  */
1853 static int
1854 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1855 {
1856 	q80_config_mac_rcv_mode_t	*rcv_mode;
1857 	uint32_t			err;
1858 	q80_config_mac_rcv_mode_rsp_t	*rcv_mode_rsp;
1859 	device_t			dev = ha->pci_dev;
1860 
1861 	rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1862 	bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1863 
1864 	rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1865 	rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1866 	rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1867 
1868 	rcv_mode->mode = mode;
1869 
1870 	rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1871 
1872 	if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1873 		(sizeof (q80_config_mac_rcv_mode_t) >> 2),
1874 		ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1875 		device_printf(dev, "%s: failed0\n", __func__);
1876 		return (-1);
1877 	}
1878 	rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1879 
1880 	err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1881 
1882 	if (err) {
1883 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1884 		return (-1);
1885 	}
1886 
1887 	return 0;
1888 }
1889 
1890 int
1891 ql_set_promisc(qla_host_t *ha)
1892 {
1893 	int ret;
1894 
1895 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1896 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1897 	return (ret);
1898 }
1899 
1900 void
1901 qla_reset_promisc(qla_host_t *ha)
1902 {
1903 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1904 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1905 }
1906 
1907 int
1908 ql_set_allmulti(qla_host_t *ha)
1909 {
1910 	int ret;
1911 
1912 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1913 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1914 	return (ret);
1915 }
1916 
1917 void
1918 qla_reset_allmulti(qla_host_t *ha)
1919 {
1920 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1921 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1922 }
1923 
1924 /*
1925  * Name: ql_set_max_mtu
1926  * Function:
1927  *	Sets the maximum transfer unit size for the specified rcv context.
1928  */
1929 int
1930 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1931 {
1932 	device_t		dev;
1933 	q80_set_max_mtu_t	*max_mtu;
1934 	q80_set_max_mtu_rsp_t	*max_mtu_rsp;
1935 	uint32_t		err;
1936 
1937 	dev = ha->pci_dev;
1938 
1939 	max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1940 	bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1941 
1942 	max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1943 	max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1944 	max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1945 
1946 	max_mtu->cntxt_id = cntxt_id;
1947 	max_mtu->mtu = mtu;
1948 
1949         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1950 		(sizeof (q80_set_max_mtu_t) >> 2),
1951                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1952                 device_printf(dev, "%s: failed\n", __func__);
1953                 return -1;
1954         }
1955 
1956 	max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1957 
1958         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1959 
1960         if (err) {
1961                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1962         }
1963 
1964 	return 0;
1965 }
1966 
1967 static int
1968 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1969 {
1970 	device_t		dev;
1971 	q80_link_event_t	*lnk;
1972 	q80_link_event_rsp_t	*lnk_rsp;
1973 	uint32_t		err;
1974 
1975 	dev = ha->pci_dev;
1976 
1977 	lnk = (q80_link_event_t *)ha->hw.mbox;
1978 	bzero(lnk, (sizeof (q80_link_event_t)));
1979 
1980 	lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1981 	lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1982 	lnk->count_version |= Q8_MBX_CMD_VERSION;
1983 
1984 	lnk->cntxt_id = cntxt_id;
1985 	lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1986 
1987         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1988                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1989                 device_printf(dev, "%s: failed\n", __func__);
1990                 return -1;
1991         }
1992 
1993 	lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1994 
1995         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1996 
1997         if (err) {
1998                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1999         }
2000 
2001 	return 0;
2002 }
2003 
2004 static int
2005 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
2006 {
2007 	device_t		dev;
2008 	q80_config_fw_lro_t	*fw_lro;
2009 	q80_config_fw_lro_rsp_t	*fw_lro_rsp;
2010 	uint32_t		err;
2011 
2012 	dev = ha->pci_dev;
2013 
2014 	fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
2015 	bzero(fw_lro, sizeof(q80_config_fw_lro_t));
2016 
2017 	fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
2018 	fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
2019 	fw_lro->count_version |= Q8_MBX_CMD_VERSION;
2020 
2021 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
2022 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
2023 
2024 	fw_lro->cntxt_id = cntxt_id;
2025 
2026 	if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
2027 		(sizeof (q80_config_fw_lro_t) >> 2),
2028 		ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
2029 		device_printf(dev, "%s: failed\n", __func__);
2030 		return -1;
2031 	}
2032 
2033 	fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
2034 
2035 	err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
2036 
2037 	if (err) {
2038 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2039 	}
2040 
2041 	return 0;
2042 }
2043 
2044 static int
2045 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
2046 {
2047 	device_t                dev;
2048 	q80_hw_config_t         *hw_config;
2049 	q80_hw_config_rsp_t     *hw_config_rsp;
2050 	uint32_t                err;
2051 
2052 	dev = ha->pci_dev;
2053 
2054 	hw_config = (q80_hw_config_t *)ha->hw.mbox;
2055 	bzero(hw_config, sizeof (q80_hw_config_t));
2056 
2057 	hw_config->opcode = Q8_MBX_HW_CONFIG;
2058 	hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
2059 	hw_config->count_version |= Q8_MBX_CMD_VERSION;
2060 
2061 	hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
2062 
2063 	hw_config->u.set_cam_search_mode.mode = search_mode;
2064 
2065 	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2066 		(sizeof (q80_hw_config_t) >> 2),
2067 		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2068 		device_printf(dev, "%s: failed\n", __func__);
2069 		return -1;
2070 	}
2071 	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2072 
2073 	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2074 
2075 	if (err) {
2076 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2077 	}
2078 
2079 	return 0;
2080 }
2081 
2082 static int
2083 qla_get_cam_search_mode(qla_host_t *ha)
2084 {
2085 	device_t                dev;
2086 	q80_hw_config_t         *hw_config;
2087 	q80_hw_config_rsp_t     *hw_config_rsp;
2088 	uint32_t                err;
2089 
2090 	dev = ha->pci_dev;
2091 
2092 	hw_config = (q80_hw_config_t *)ha->hw.mbox;
2093 	bzero(hw_config, sizeof (q80_hw_config_t));
2094 
2095 	hw_config->opcode = Q8_MBX_HW_CONFIG;
2096 	hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
2097 	hw_config->count_version |= Q8_MBX_CMD_VERSION;
2098 
2099 	hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
2100 
2101 	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2102 		(sizeof (q80_hw_config_t) >> 2),
2103 		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2104 		device_printf(dev, "%s: failed\n", __func__);
2105 		return -1;
2106 	}
2107 	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2108 
2109 	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2110 
2111 	if (err) {
2112 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2113 	} else {
2114 		device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
2115 			hw_config_rsp->u.get_cam_search_mode.mode);
2116 	}
2117 
2118 	return 0;
2119 }
2120 
2121 static int
2122 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
2123 {
2124 	device_t		dev;
2125 	q80_get_stats_t		*stat;
2126 	q80_get_stats_rsp_t	*stat_rsp;
2127 	uint32_t		err;
2128 
2129 	dev = ha->pci_dev;
2130 
2131 	stat = (q80_get_stats_t *)ha->hw.mbox;
2132 	bzero(stat, (sizeof (q80_get_stats_t)));
2133 
2134 	stat->opcode = Q8_MBX_GET_STATS;
2135 	stat->count_version = 2;
2136 	stat->count_version |= Q8_MBX_CMD_VERSION;
2137 
2138 	stat->cmd = cmd;
2139 
2140         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
2141                 ha->hw.mbox, (rsp_size >> 2), 0)) {
2142                 device_printf(dev, "%s: failed\n", __func__);
2143                 return -1;
2144         }
2145 
2146 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2147 
2148         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
2149 
2150         if (err) {
2151                 return -1;
2152         }
2153 
2154 	return 0;
2155 }
2156 
2157 void
2158 ql_get_stats(qla_host_t *ha)
2159 {
2160 	q80_get_stats_rsp_t	*stat_rsp;
2161 	q80_mac_stats_t		*mstat;
2162 	q80_xmt_stats_t		*xstat;
2163 	q80_rcv_stats_t		*rstat;
2164 	uint32_t		cmd;
2165 	int			i;
2166 	struct ifnet *ifp = ha->ifp;
2167 
2168 	if (ifp == NULL)
2169 		return;
2170 
2171 	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2172 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
2173 		return;
2174 	}
2175 
2176 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2177 		QLA_UNLOCK(ha, __func__);
2178 		return;
2179 	}
2180 
2181 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2182 	/*
2183 	 * Get MAC Statistics
2184 	 */
2185 	cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2186 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
2187 
2188 	cmd |= ((ha->pci_func & 0x1) << 16);
2189 
2190 	if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2191 		ha->offline)
2192 		goto ql_get_stats_exit;
2193 
2194 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2195 		mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2196 		bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2197 	} else {
2198                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2199 			__func__, ha->hw.mbox[0]);
2200 	}
2201 	/*
2202 	 * Get RCV Statistics
2203 	 */
2204 	cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2205 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
2206 	cmd |= (ha->hw.rcv_cntxt_id << 16);
2207 
2208 	if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2209 		ha->offline)
2210 		goto ql_get_stats_exit;
2211 
2212 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2213 		rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2214 		bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2215 	} else {
2216                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2217 			__func__, ha->hw.mbox[0]);
2218 	}
2219 
2220 	if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2221 		ha->offline)
2222 		goto ql_get_stats_exit;
2223 	/*
2224 	 * Get XMT Statistics
2225 	 */
2226 	for (i = 0 ; (i < ha->hw.num_tx_rings); i++) {
2227 		if (ha->qla_watchdog_pause ||
2228 			(!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2229 			ha->offline)
2230 			goto ql_get_stats_exit;
2231 
2232 		cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2233 //		cmd |= Q8_GET_STATS_CMD_CLEAR;
2234 		cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2235 
2236 		if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2237 			== 0) {
2238 			xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2239 			bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2240 		} else {
2241 			device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2242 				__func__, ha->hw.mbox[0]);
2243 		}
2244 	}
2245 
2246 ql_get_stats_exit:
2247 	QLA_UNLOCK(ha, __func__);
2248 
2249 	return;
2250 }
2251 
2252 /*
2253  * Name: qla_tx_tso
2254  * Function: Checks if the packet to be transmitted is a candidate for
2255  *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2256  *	Ring Structure are plugged in.
2257  */
2258 static int
2259 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2260 {
2261 	struct ether_vlan_header *eh;
2262 	struct ip *ip = NULL;
2263 	struct ip6_hdr *ip6 = NULL;
2264 	struct tcphdr *th = NULL;
2265 	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2266 	uint16_t etype, opcode, offload = 1;
2267 	device_t dev;
2268 
2269 	dev = ha->pci_dev;
2270 
2271 	eh = mtod(mp, struct ether_vlan_header *);
2272 
2273 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2274 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2275 		etype = ntohs(eh->evl_proto);
2276 	} else {
2277 		ehdrlen = ETHER_HDR_LEN;
2278 		etype = ntohs(eh->evl_encap_proto);
2279 	}
2280 
2281 	hdrlen = 0;
2282 
2283 	switch (etype) {
2284 		case ETHERTYPE_IP:
2285 
2286 			tcp_opt_off = ehdrlen + sizeof(struct ip) +
2287 					sizeof(struct tcphdr);
2288 
2289 			if (mp->m_len < tcp_opt_off) {
2290 				m_copydata(mp, 0, tcp_opt_off, hdr);
2291 				ip = (struct ip *)(hdr + ehdrlen);
2292 			} else {
2293 				ip = (struct ip *)(mp->m_data + ehdrlen);
2294 			}
2295 
2296 			ip_hlen = ip->ip_hl << 2;
2297 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2298 
2299 
2300 			if ((ip->ip_p != IPPROTO_TCP) ||
2301 				(ip_hlen != sizeof (struct ip))){
2302 				/* IP Options are not supported */
2303 
2304 				offload = 0;
2305 			} else
2306 				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2307 
2308 		break;
2309 
2310 		case ETHERTYPE_IPV6:
2311 
2312 			tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2313 					sizeof (struct tcphdr);
2314 
2315 			if (mp->m_len < tcp_opt_off) {
2316 				m_copydata(mp, 0, tcp_opt_off, hdr);
2317 				ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2318 			} else {
2319 				ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2320 			}
2321 
2322 			ip_hlen = sizeof(struct ip6_hdr);
2323 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2324 
2325 			if (ip6->ip6_nxt != IPPROTO_TCP) {
2326 				//device_printf(dev, "%s: ipv6\n", __func__);
2327 				offload = 0;
2328 			} else
2329 				th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2330 		break;
2331 
2332 		default:
2333 			QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
2334 			offload = 0;
2335 		break;
2336 	}
2337 
2338 	if (!offload)
2339 		return (-1);
2340 
2341 	tcp_hlen = th->th_off << 2;
2342 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2343 
2344         if (mp->m_len < hdrlen) {
2345                 if (mp->m_len < tcp_opt_off) {
2346                         if (tcp_hlen > sizeof(struct tcphdr)) {
2347                                 m_copydata(mp, tcp_opt_off,
2348                                         (tcp_hlen - sizeof(struct tcphdr)),
2349                                         &hdr[tcp_opt_off]);
2350                         }
2351                 } else {
2352                         m_copydata(mp, 0, hdrlen, hdr);
2353                 }
2354         }
2355 
2356 	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2357 
2358 	tx_cmd->flags_opcode = opcode ;
2359 	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2360 	tx_cmd->total_hdr_len = hdrlen;
2361 
2362 	/* Check for Multicast least significant bit of MSB == 1 */
2363 	if (eh->evl_dhost[0] & 0x01) {
2364 		tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2365 	}
2366 
2367 	if (mp->m_len < hdrlen) {
2368 		printf("%d\n", hdrlen);
2369 		return (1);
2370 	}
2371 
2372 	return (0);
2373 }
2374 
2375 /*
2376  * Name: qla_tx_chksum
2377  * Function: Checks if the packet to be transmitted is a candidate for
2378  *	TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2379  *	Ring Structure are plugged in.
2380  */
2381 static int
2382 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2383 	uint32_t *tcp_hdr_off)
2384 {
2385 	struct ether_vlan_header *eh;
2386 	struct ip *ip;
2387 	struct ip6_hdr *ip6;
2388 	uint32_t ehdrlen, ip_hlen;
2389 	uint16_t etype, opcode, offload = 1;
2390 	device_t dev;
2391 	uint8_t buf[sizeof(struct ip6_hdr)];
2392 
2393 	dev = ha->pci_dev;
2394 
2395 	*op_code = 0;
2396 
2397 	if ((mp->m_pkthdr.csum_flags &
2398 		(CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2399 		return (-1);
2400 
2401 	eh = mtod(mp, struct ether_vlan_header *);
2402 
2403 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2404 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2405 		etype = ntohs(eh->evl_proto);
2406 	} else {
2407 		ehdrlen = ETHER_HDR_LEN;
2408 		etype = ntohs(eh->evl_encap_proto);
2409 	}
2410 
2411 
2412 	switch (etype) {
2413 		case ETHERTYPE_IP:
2414 			ip = (struct ip *)(mp->m_data + ehdrlen);
2415 
2416 			ip_hlen = sizeof (struct ip);
2417 
2418 			if (mp->m_len < (ehdrlen + ip_hlen)) {
2419 				m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2420 				ip = (struct ip *)buf;
2421 			}
2422 
2423 			if (ip->ip_p == IPPROTO_TCP)
2424 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2425 			else if (ip->ip_p == IPPROTO_UDP)
2426 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2427 			else {
2428 				//device_printf(dev, "%s: ipv4\n", __func__);
2429 				offload = 0;
2430 			}
2431 		break;
2432 
2433 		case ETHERTYPE_IPV6:
2434 			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2435 
2436 			ip_hlen = sizeof(struct ip6_hdr);
2437 
2438 			if (mp->m_len < (ehdrlen + ip_hlen)) {
2439 				m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2440 					buf);
2441 				ip6 = (struct ip6_hdr *)buf;
2442 			}
2443 
2444 			if (ip6->ip6_nxt == IPPROTO_TCP)
2445 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2446 			else if (ip6->ip6_nxt == IPPROTO_UDP)
2447 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2448 			else {
2449 				//device_printf(dev, "%s: ipv6\n", __func__);
2450 				offload = 0;
2451 			}
2452 		break;
2453 
2454 		default:
2455 			offload = 0;
2456 		break;
2457 	}
2458 	if (!offload)
2459 		return (-1);
2460 
2461 	*op_code = opcode;
2462 	*tcp_hdr_off = (ip_hlen + ehdrlen);
2463 
2464 	return (0);
2465 }
2466 
2467 #define QLA_TX_MIN_FREE 2
2468 /*
2469  * Name: ql_hw_send
2470  * Function: Transmits a packet. It first checks if the packet is a
2471  *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2472  *	offload. If either of these creteria are not met, it is transmitted
2473  *	as a regular ethernet frame.
2474  */
2475 int
2476 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2477 	uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2478 {
2479 	struct ether_vlan_header *eh;
2480 	qla_hw_t *hw = &ha->hw;
2481 	q80_tx_cmd_t *tx_cmd, tso_cmd;
2482 	bus_dma_segment_t *c_seg;
2483 	uint32_t num_tx_cmds, hdr_len = 0;
2484 	uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2485 	device_t dev;
2486 	int i, ret;
2487 	uint8_t *src = NULL, *dst = NULL;
2488 	uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2489 	uint32_t op_code = 0;
2490 	uint32_t tcp_hdr_off = 0;
2491 
2492 	dev = ha->pci_dev;
2493 
2494 	/*
2495 	 * Always make sure there is atleast one empty slot in the tx_ring
2496 	 * tx_ring is considered full when there only one entry available
2497 	 */
2498         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2499 
2500 	total_length = mp->m_pkthdr.len;
2501 	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2502 		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2503 			__func__, total_length);
2504 		return (EINVAL);
2505 	}
2506 	eh = mtod(mp, struct ether_vlan_header *);
2507 
2508 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2509 		bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2510 
2511 		src = frame_hdr;
2512 		ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2513 
2514 		if (!(ret & ~1)) {
2515 			/* find the additional tx_cmd descriptors required */
2516 
2517 			if (mp->m_flags & M_VLANTAG)
2518 				tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2519 
2520 			hdr_len = tso_cmd.total_hdr_len;
2521 
2522 			bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2523 			bytes = QL_MIN(bytes, hdr_len);
2524 
2525 			num_tx_cmds++;
2526 			hdr_len -= bytes;
2527 
2528 			while (hdr_len) {
2529 				bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2530 				hdr_len -= bytes;
2531 				num_tx_cmds++;
2532 			}
2533 			hdr_len = tso_cmd.total_hdr_len;
2534 
2535 			if (ret == 0)
2536 				src = (uint8_t *)eh;
2537 		} else
2538 			return (EINVAL);
2539 	} else {
2540 		(void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2541 	}
2542 
2543 	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2544 		ql_hw_tx_done_locked(ha, txr_idx);
2545 		if (hw->tx_cntxt[txr_idx].txr_free <=
2546 				(num_tx_cmds + QLA_TX_MIN_FREE)) {
2547         		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2548 				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2549 				__func__));
2550 			return (-1);
2551 		}
2552 	}
2553 
2554 	for (i = 0; i < num_tx_cmds; i++) {
2555 		int j;
2556 
2557 		j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1);
2558 
2559 		if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) {
2560 			QL_ASSERT(ha, 0, \
2561 				("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\
2562 				__func__, __LINE__, txr_idx, j,\
2563 				ha->tx_ring[txr_idx].tx_buf[j].m_head));
2564 			return (EINVAL);
2565 		}
2566 	}
2567 
2568 	tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2569 
2570         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2571                 if (nsegs > ha->hw.max_tx_segs)
2572                         ha->hw.max_tx_segs = nsegs;
2573 
2574                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2575 
2576                 if (op_code) {
2577                         tx_cmd->flags_opcode = op_code;
2578                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2579 
2580                 } else {
2581                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2582                 }
2583 	} else {
2584 		bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2585 		ha->tx_tso_frames++;
2586 	}
2587 
2588 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2589         	tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2590 
2591 		if (iscsi_pdu)
2592 			eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2593 
2594 	} else if (mp->m_flags & M_VLANTAG) {
2595 		if (hdr_len) { /* TSO */
2596 			tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2597 						Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2598 			tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2599 		} else
2600 			tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2601 
2602 		ha->hw_vlan_tx_frames++;
2603 		tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2604 
2605 		if (iscsi_pdu) {
2606 			tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2607 			mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2608 		}
2609 	}
2610 
2611         tx_cmd->n_bufs = (uint8_t)nsegs;
2612         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2613         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2614 	tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2615 
2616 	c_seg = segs;
2617 
2618 	while (1) {
2619 		for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2620 			switch (i) {
2621 			case 0:
2622 				tx_cmd->buf1_addr = c_seg->ds_addr;
2623 				tx_cmd->buf1_len = c_seg->ds_len;
2624 				break;
2625 
2626 			case 1:
2627 				tx_cmd->buf2_addr = c_seg->ds_addr;
2628 				tx_cmd->buf2_len = c_seg->ds_len;
2629 				break;
2630 
2631 			case 2:
2632 				tx_cmd->buf3_addr = c_seg->ds_addr;
2633 				tx_cmd->buf3_len = c_seg->ds_len;
2634 				break;
2635 
2636 			case 3:
2637 				tx_cmd->buf4_addr = c_seg->ds_addr;
2638 				tx_cmd->buf4_len = c_seg->ds_len;
2639 				break;
2640 			}
2641 
2642 			c_seg++;
2643 			nsegs--;
2644 		}
2645 
2646 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2647 			(hw->tx_cntxt[txr_idx].txr_next + 1) &
2648 				(NUM_TX_DESCRIPTORS - 1);
2649 		tx_cmd_count++;
2650 
2651 		if (!nsegs)
2652 			break;
2653 
2654 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2655 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2656 	}
2657 
2658 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2659 		/* TSO : Copy the header in the following tx cmd descriptors */
2660 
2661 		txr_next = hw->tx_cntxt[txr_idx].txr_next;
2662 
2663 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2664 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2665 
2666 		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2667 		bytes = QL_MIN(bytes, hdr_len);
2668 
2669 		dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2670 
2671 		if (mp->m_flags & M_VLANTAG) {
2672 			/* first copy the src/dst MAC addresses */
2673 			bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2674 			dst += (ETHER_ADDR_LEN * 2);
2675 			src += (ETHER_ADDR_LEN * 2);
2676 
2677 			*((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2678 			dst += 2;
2679 			*((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2680 			dst += 2;
2681 
2682 			/* bytes left in src header */
2683 			hdr_len -= ((ETHER_ADDR_LEN * 2) +
2684 					ETHER_VLAN_ENCAP_LEN);
2685 
2686 			/* bytes left in TxCmd Entry */
2687 			bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2688 
2689 			bcopy(src, dst, bytes);
2690 			src += bytes;
2691 			hdr_len -= bytes;
2692 		} else {
2693 			bcopy(src, dst, bytes);
2694 			src += bytes;
2695 			hdr_len -= bytes;
2696 		}
2697 
2698 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2699 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2700 					(NUM_TX_DESCRIPTORS - 1);
2701 		tx_cmd_count++;
2702 
2703 		while (hdr_len) {
2704 			tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2705 			bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2706 
2707 			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2708 
2709 			bcopy(src, tx_cmd, bytes);
2710 			src += bytes;
2711 			hdr_len -= bytes;
2712 
2713 			txr_next = hw->tx_cntxt[txr_idx].txr_next =
2714 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2715 					(NUM_TX_DESCRIPTORS - 1);
2716 			tx_cmd_count++;
2717 		}
2718 	}
2719 
2720 	hw->tx_cntxt[txr_idx].txr_free =
2721 		hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2722 
2723 	QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2724 		txr_idx);
2725        	QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2726 
2727 	return (0);
2728 }
2729 
2730 #define Q8_CONFIG_IND_TBL_SIZE	32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2731 static int
2732 qla_config_rss_ind_table(qla_host_t *ha)
2733 {
2734 	uint32_t i, count;
2735 	uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2736 
2737 	for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2738 		rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2739 	}
2740 
2741 	for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2742 		i = i + Q8_CONFIG_IND_TBL_SIZE) {
2743 		if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2744 			count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2745 		} else {
2746 			count = Q8_CONFIG_IND_TBL_SIZE;
2747 		}
2748 
2749 		if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2750 			rss_ind_tbl))
2751 			return (-1);
2752 	}
2753 
2754 	return (0);
2755 }
2756 
2757 static int
2758 qla_config_soft_lro(qla_host_t *ha)
2759 {
2760         int i;
2761         qla_hw_t *hw = &ha->hw;
2762         struct lro_ctrl *lro;
2763 
2764         for (i = 0; i < hw->num_sds_rings; i++) {
2765                 lro = &hw->sds[i].lro;
2766 
2767 		bzero(lro, sizeof(struct lro_ctrl));
2768 
2769 #if (__FreeBSD_version >= 1100101)
2770                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2771                         device_printf(ha->pci_dev,
2772 				"%s: tcp_lro_init_args [%d] failed\n",
2773                                 __func__, i);
2774                         return (-1);
2775                 }
2776 #else
2777                 if (tcp_lro_init(lro)) {
2778                         device_printf(ha->pci_dev,
2779 				"%s: tcp_lro_init [%d] failed\n",
2780                                 __func__, i);
2781                         return (-1);
2782                 }
2783 #endif /* #if (__FreeBSD_version >= 1100101) */
2784 
2785                 lro->ifp = ha->ifp;
2786         }
2787 
2788         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2789         return (0);
2790 }
2791 
2792 static void
2793 qla_drain_soft_lro(qla_host_t *ha)
2794 {
2795         int i;
2796         qla_hw_t *hw = &ha->hw;
2797         struct lro_ctrl *lro;
2798 
2799        	for (i = 0; i < hw->num_sds_rings; i++) {
2800                	lro = &hw->sds[i].lro;
2801 
2802 #if (__FreeBSD_version >= 1100101)
2803 		tcp_lro_flush_all(lro);
2804 #else
2805                 struct lro_entry *queued;
2806 
2807 		while ((!SLIST_EMPTY(&lro->lro_active))) {
2808 			queued = SLIST_FIRST(&lro->lro_active);
2809 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
2810 			tcp_lro_flush(lro, queued);
2811 		}
2812 #endif /* #if (__FreeBSD_version >= 1100101) */
2813 	}
2814 
2815 	return;
2816 }
2817 
2818 static void
2819 qla_free_soft_lro(qla_host_t *ha)
2820 {
2821         int i;
2822         qla_hw_t *hw = &ha->hw;
2823         struct lro_ctrl *lro;
2824 
2825         for (i = 0; i < hw->num_sds_rings; i++) {
2826                	lro = &hw->sds[i].lro;
2827 		tcp_lro_free(lro);
2828 	}
2829 
2830 	return;
2831 }
2832 
2833 /*
2834  * Name: ql_del_hw_if
2835  * Function: Destroys the hardware specific entities corresponding to an
2836  *	Ethernet Interface
2837  */
2838 void
2839 ql_del_hw_if(qla_host_t *ha)
2840 {
2841 	uint32_t i;
2842 	uint32_t num_msix;
2843 
2844 	(void)qla_stop_nic_func(ha);
2845 
2846 	qla_del_rcv_cntxt(ha);
2847 
2848 	if(qla_del_xmt_cntxt(ha))
2849 		goto ql_del_hw_if_exit;
2850 
2851 	if (ha->hw.flags.init_intr_cnxt) {
2852 		for (i = 0; i < ha->hw.num_sds_rings; ) {
2853 			if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2854 				num_msix = Q8_MAX_INTR_VECTORS;
2855 			else
2856 				num_msix = ha->hw.num_sds_rings - i;
2857 
2858 			if (qla_config_intr_cntxt(ha, i, num_msix, 0))
2859 				break;
2860 
2861 			i += num_msix;
2862 		}
2863 
2864 		ha->hw.flags.init_intr_cnxt = 0;
2865 	}
2866 
2867 ql_del_hw_if_exit:
2868 	if (ha->hw.enable_soft_lro) {
2869 		qla_drain_soft_lro(ha);
2870 		qla_free_soft_lro(ha);
2871 	}
2872 
2873 	return;
2874 }
2875 
2876 void
2877 qla_confirm_9kb_enable(qla_host_t *ha)
2878 {
2879 //	uint32_t supports_9kb = 0;
2880 
2881 	ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2882 
2883 	/* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2884 	WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2885 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2886 
2887 #if 0
2888 	qla_get_nic_partition(ha, &supports_9kb, NULL);
2889 
2890 	if (!supports_9kb)
2891 #endif
2892 	ha->hw.enable_9kb = 0;
2893 
2894 	return;
2895 }
2896 
2897 /*
2898  * Name: ql_init_hw_if
2899  * Function: Creates the hardware specific entities corresponding to an
2900  *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2901  *	corresponding to the interface. Enables LRO if allowed.
2902  */
2903 int
2904 ql_init_hw_if(qla_host_t *ha)
2905 {
2906 	device_t	dev;
2907 	uint32_t	i;
2908 	uint8_t		bcast_mac[6];
2909 	qla_rdesc_t	*rdesc;
2910 	uint32_t	num_msix;
2911 
2912 	dev = ha->pci_dev;
2913 
2914 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
2915 		bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2916 			ha->hw.dma_buf.sds_ring[i].size);
2917 	}
2918 
2919 	for (i = 0; i < ha->hw.num_sds_rings; ) {
2920 		if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2921 			num_msix = Q8_MAX_INTR_VECTORS;
2922 		else
2923 			num_msix = ha->hw.num_sds_rings - i;
2924 
2925 		if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2926 			if (i > 0) {
2927 				num_msix = i;
2928 
2929 				for (i = 0; i < num_msix; ) {
2930 					qla_config_intr_cntxt(ha, i,
2931 						Q8_MAX_INTR_VECTORS, 0);
2932 					i += Q8_MAX_INTR_VECTORS;
2933 				}
2934 			}
2935 			return (-1);
2936 		}
2937 
2938 		i = i + num_msix;
2939 	}
2940 
2941         ha->hw.flags.init_intr_cnxt = 1;
2942 
2943 	/*
2944 	 * Create Receive Context
2945 	 */
2946 	if (qla_init_rcv_cntxt(ha)) {
2947 		return (-1);
2948 	}
2949 
2950 	for (i = 0; i < ha->hw.num_rds_rings; i++) {
2951 		rdesc = &ha->hw.rds[i];
2952 		rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2953 		rdesc->rx_in = 0;
2954 		/* Update the RDS Producer Indices */
2955 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2956 			rdesc->rx_next);
2957 	}
2958 
2959 	/*
2960 	 * Create Transmit Context
2961 	 */
2962 	if (qla_init_xmt_cntxt(ha)) {
2963 		qla_del_rcv_cntxt(ha);
2964 		return (-1);
2965 	}
2966 	ha->hw.max_tx_segs = 0;
2967 
2968 	if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2969 		return(-1);
2970 
2971 	ha->hw.flags.unicast_mac = 1;
2972 
2973 	bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2974 	bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2975 
2976 	if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2977 		return (-1);
2978 
2979 	ha->hw.flags.bcast_mac = 1;
2980 
2981 	/*
2982 	 * program any cached multicast addresses
2983 	 */
2984 	if (qla_hw_add_all_mcast(ha))
2985 		return (-1);
2986 
2987 	if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
2988 		return (-1);
2989 
2990 	if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2991 		return (-1);
2992 
2993 	if (qla_config_rss_ind_table(ha))
2994 		return (-1);
2995 
2996 	if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2997 		return (-1);
2998 
2999 	if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
3000 		return (-1);
3001 
3002 	if (ha->ifp->if_capenable & IFCAP_LRO) {
3003 		if (ha->hw.enable_hw_lro) {
3004 			ha->hw.enable_soft_lro = 0;
3005 
3006 			if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
3007 				return (-1);
3008 		} else {
3009 			ha->hw.enable_soft_lro = 1;
3010 
3011 			if (qla_config_soft_lro(ha))
3012 				return (-1);
3013 		}
3014 	}
3015 
3016         if (qla_init_nic_func(ha))
3017                 return (-1);
3018 
3019         if (qla_query_fw_dcbx_caps(ha))
3020                 return (-1);
3021 
3022 	for (i = 0; i < ha->hw.num_sds_rings; i++)
3023 		QL_ENABLE_INTERRUPTS(ha, i);
3024 
3025 	return (0);
3026 }
3027 
3028 static int
3029 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
3030 {
3031         device_t                dev = ha->pci_dev;
3032         q80_rq_map_sds_to_rds_t *map_rings;
3033 	q80_rsp_map_sds_to_rds_t *map_rings_rsp;
3034         uint32_t                i, err;
3035         qla_hw_t                *hw = &ha->hw;
3036 
3037         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
3038         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
3039 
3040         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
3041         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
3042         map_rings->count_version |= Q8_MBX_CMD_VERSION;
3043 
3044         map_rings->cntxt_id = hw->rcv_cntxt_id;
3045         map_rings->num_rings = num_idx;
3046 
3047 	for (i = 0; i < num_idx; i++) {
3048 		map_rings->sds_rds[i].sds_ring = i + start_idx;
3049 		map_rings->sds_rds[i].rds_ring = i + start_idx;
3050 	}
3051 
3052         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
3053                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
3054                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3055                 device_printf(dev, "%s: failed0\n", __func__);
3056                 return (-1);
3057         }
3058 
3059         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
3060 
3061         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
3062 
3063         if (err) {
3064                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3065                 return (-1);
3066         }
3067 
3068         return (0);
3069 }
3070 
3071 /*
3072  * Name: qla_init_rcv_cntxt
3073  * Function: Creates the Receive Context.
3074  */
3075 static int
3076 qla_init_rcv_cntxt(qla_host_t *ha)
3077 {
3078 	q80_rq_rcv_cntxt_t	*rcntxt;
3079 	q80_rsp_rcv_cntxt_t	*rcntxt_rsp;
3080 	q80_stat_desc_t		*sdesc;
3081 	int			i, j;
3082         qla_hw_t		*hw = &ha->hw;
3083 	device_t		dev;
3084 	uint32_t		err;
3085 	uint32_t		rcntxt_sds_rings;
3086 	uint32_t		rcntxt_rds_rings;
3087 	uint32_t		max_idx;
3088 
3089 	dev = ha->pci_dev;
3090 
3091 	/*
3092 	 * Create Receive Context
3093 	 */
3094 
3095 	for (i = 0; i < hw->num_sds_rings; i++) {
3096 		sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
3097 
3098 		for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
3099 			sdesc->data[0] = 1ULL;
3100 			sdesc->data[1] = 1ULL;
3101 		}
3102 	}
3103 
3104 	rcntxt_sds_rings = hw->num_sds_rings;
3105 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
3106 		rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
3107 
3108 	rcntxt_rds_rings = hw->num_rds_rings;
3109 
3110 	if (hw->num_rds_rings > MAX_RDS_RING_SETS)
3111 		rcntxt_rds_rings = MAX_RDS_RING_SETS;
3112 
3113 	rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
3114 	bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
3115 
3116 	rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
3117 	rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
3118 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3119 
3120 	rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
3121 			Q8_RCV_CNTXT_CAP0_LRO |
3122 			Q8_RCV_CNTXT_CAP0_HW_LRO |
3123 			Q8_RCV_CNTXT_CAP0_RSS |
3124 			Q8_RCV_CNTXT_CAP0_SGL_LRO;
3125 
3126 	if (ha->hw.enable_9kb)
3127 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
3128 	else
3129 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
3130 
3131 	if (ha->hw.num_rds_rings > 1) {
3132 		rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
3133 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
3134 	} else
3135 		rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
3136 
3137 	rcntxt->nsds_rings = rcntxt_sds_rings;
3138 
3139 	rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
3140 
3141 	rcntxt->rcv_vpid = 0;
3142 
3143 	for (i = 0; i <  rcntxt_sds_rings; i++) {
3144 		rcntxt->sds[i].paddr =
3145 			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
3146 		rcntxt->sds[i].size =
3147 			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3148 		rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
3149 		rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
3150 	}
3151 
3152 	for (i = 0; i <  rcntxt_rds_rings; i++) {
3153 		rcntxt->rds[i].paddr_std =
3154 			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
3155 
3156 		if (ha->hw.enable_9kb)
3157 			rcntxt->rds[i].std_bsize =
3158 				qla_host_to_le64(MJUM9BYTES);
3159 		else
3160 			rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3161 
3162 		rcntxt->rds[i].std_nentries =
3163 			qla_host_to_le32(NUM_RX_DESCRIPTORS);
3164 	}
3165 
3166         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3167 		(sizeof (q80_rq_rcv_cntxt_t) >> 2),
3168                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
3169                 device_printf(dev, "%s: failed0\n", __func__);
3170                 return (-1);
3171         }
3172 
3173         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
3174 
3175         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3176 
3177         if (err) {
3178                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3179                 return (-1);
3180         }
3181 
3182 	for (i = 0; i <  rcntxt_sds_rings; i++) {
3183 		hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3184 	}
3185 
3186 	for (i = 0; i <  rcntxt_rds_rings; i++) {
3187 		hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3188 	}
3189 
3190 	hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3191 
3192 	ha->hw.flags.init_rx_cnxt = 1;
3193 
3194 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3195 		for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3196 			if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3197 				max_idx = MAX_RCNTXT_SDS_RINGS;
3198 			else
3199 				max_idx = hw->num_sds_rings - i;
3200 
3201 			err = qla_add_rcv_rings(ha, i, max_idx);
3202 			if (err)
3203 				return -1;
3204 
3205 			i += max_idx;
3206 		}
3207 	}
3208 
3209 	if (hw->num_rds_rings > 1) {
3210 		for (i = 0; i < hw->num_rds_rings; ) {
3211 			if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3212 				max_idx = MAX_SDS_TO_RDS_MAP;
3213 			else
3214 				max_idx = hw->num_rds_rings - i;
3215 
3216 			err = qla_map_sds_to_rds(ha, i, max_idx);
3217 			if (err)
3218 				return -1;
3219 
3220 			i += max_idx;
3221 		}
3222 	}
3223 
3224 	return (0);
3225 }
3226 
3227 static int
3228 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3229 {
3230 	device_t		dev = ha->pci_dev;
3231 	q80_rq_add_rcv_rings_t	*add_rcv;
3232 	q80_rsp_add_rcv_rings_t	*add_rcv_rsp;
3233 	uint32_t		i,j, err;
3234         qla_hw_t		*hw = &ha->hw;
3235 
3236 	add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3237 	bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3238 
3239 	add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3240 	add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3241 	add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3242 
3243 	add_rcv->nrds_sets_rings = nsds | (1 << 5);
3244 	add_rcv->nsds_rings = nsds;
3245 	add_rcv->cntxt_id = hw->rcv_cntxt_id;
3246 
3247         for (i = 0; i <  nsds; i++) {
3248 		j = i + sds_idx;
3249 
3250                 add_rcv->sds[i].paddr =
3251                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3252 
3253                 add_rcv->sds[i].size =
3254                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3255 
3256                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3257                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3258         }
3259 
3260         for (i = 0; (i <  nsds); i++) {
3261                 j = i + sds_idx;
3262 
3263                 add_rcv->rds[i].paddr_std =
3264                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3265 
3266 		if (ha->hw.enable_9kb)
3267 			add_rcv->rds[i].std_bsize =
3268 				qla_host_to_le64(MJUM9BYTES);
3269 		else
3270                 	add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3271 
3272                 add_rcv->rds[i].std_nentries =
3273                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
3274         }
3275 
3276         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3277 		(sizeof (q80_rq_add_rcv_rings_t) >> 2),
3278                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3279                 device_printf(dev, "%s: failed0\n", __func__);
3280                 return (-1);
3281         }
3282 
3283         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3284 
3285         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3286 
3287         if (err) {
3288                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3289                 return (-1);
3290         }
3291 
3292 	for (i = 0; i < nsds; i++) {
3293 		hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3294 	}
3295 
3296 	for (i = 0; i < nsds; i++) {
3297 		hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3298 	}
3299 
3300 	return (0);
3301 }
3302 
3303 /*
3304  * Name: qla_del_rcv_cntxt
3305  * Function: Destroys the Receive Context.
3306  */
3307 static void
3308 qla_del_rcv_cntxt(qla_host_t *ha)
3309 {
3310 	device_t			dev = ha->pci_dev;
3311 	q80_rcv_cntxt_destroy_t		*rcntxt;
3312 	q80_rcv_cntxt_destroy_rsp_t	*rcntxt_rsp;
3313 	uint32_t			err;
3314 	uint8_t				bcast_mac[6];
3315 
3316 	if (!ha->hw.flags.init_rx_cnxt)
3317 		return;
3318 
3319 	if (qla_hw_del_all_mcast(ha))
3320 		return;
3321 
3322 	if (ha->hw.flags.bcast_mac) {
3323 		bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3324 		bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3325 
3326 		if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3327 			return;
3328 		ha->hw.flags.bcast_mac = 0;
3329 	}
3330 
3331 	if (ha->hw.flags.unicast_mac) {
3332 		if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3333 			return;
3334 		ha->hw.flags.unicast_mac = 0;
3335 	}
3336 
3337 	rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3338 	bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3339 
3340 	rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3341 	rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3342 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3343 
3344 	rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3345 
3346         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3347 		(sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3348                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3349                 device_printf(dev, "%s: failed0\n", __func__);
3350                 return;
3351         }
3352         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3353 
3354         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3355 
3356         if (err) {
3357                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3358         }
3359 
3360 	ha->hw.flags.init_rx_cnxt = 0;
3361 	return;
3362 }
3363 
3364 /*
3365  * Name: qla_init_xmt_cntxt
3366  * Function: Creates the Transmit Context.
3367  */
3368 static int
3369 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3370 {
3371 	device_t		dev;
3372         qla_hw_t		*hw = &ha->hw;
3373 	q80_rq_tx_cntxt_t	*tcntxt;
3374 	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
3375 	uint32_t		err;
3376 	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
3377 	uint32_t		intr_idx;
3378 
3379 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3380 
3381 	dev = ha->pci_dev;
3382 
3383 	/*
3384 	 * Create Transmit Context
3385 	 */
3386 	tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3387 	bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3388 
3389 	tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3390 	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3391 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3392 
3393 	intr_idx = txr_idx;
3394 
3395 #ifdef QL_ENABLE_ISCSI_TLV
3396 
3397 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3398 				Q8_TX_CNTXT_CAP0_TC;
3399 
3400 	if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3401 		tcntxt->traffic_class = 1;
3402 	}
3403 
3404 	intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3405 
3406 #else
3407 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3408 
3409 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3410 
3411 	tcntxt->ntx_rings = 1;
3412 
3413 	tcntxt->tx_ring[0].paddr =
3414 		qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3415 	tcntxt->tx_ring[0].tx_consumer =
3416 		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3417 	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3418 
3419 	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3420 	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3421 
3422 	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3423 	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3424 	*(hw_tx_cntxt->tx_cons) = 0;
3425 
3426         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3427 		(sizeof (q80_rq_tx_cntxt_t) >> 2),
3428                 ha->hw.mbox,
3429 		(sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3430                 device_printf(dev, "%s: failed0\n", __func__);
3431                 return (-1);
3432         }
3433         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3434 
3435         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3436 
3437         if (err) {
3438                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3439 		return -1;
3440         }
3441 
3442 	hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3443 	hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3444 
3445 	if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3446 		return (-1);
3447 
3448 	return (0);
3449 }
3450 
3451 /*
3452  * Name: qla_del_xmt_cntxt
3453  * Function: Destroys the Transmit Context.
3454  */
3455 static int
3456 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3457 {
3458 	device_t			dev = ha->pci_dev;
3459 	q80_tx_cntxt_destroy_t		*tcntxt;
3460 	q80_tx_cntxt_destroy_rsp_t	*tcntxt_rsp;
3461 	uint32_t			err;
3462 
3463 	tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3464 	bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3465 
3466 	tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3467 	tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3468 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3469 
3470 	tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3471 
3472         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3473 		(sizeof (q80_tx_cntxt_destroy_t) >> 2),
3474                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3475                 device_printf(dev, "%s: failed0\n", __func__);
3476                 return (-1);
3477         }
3478         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3479 
3480         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3481 
3482         if (err) {
3483                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3484 		return (-1);
3485         }
3486 
3487 	return (0);
3488 }
3489 static int
3490 qla_del_xmt_cntxt(qla_host_t *ha)
3491 {
3492 	uint32_t i;
3493 	int ret = 0;
3494 
3495 	if (!ha->hw.flags.init_tx_cnxt)
3496 		return (ret);
3497 
3498 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
3499 		if ((ret = qla_del_xmt_cntxt_i(ha, i)) != 0)
3500 			break;
3501 	}
3502 	ha->hw.flags.init_tx_cnxt = 0;
3503 
3504 	return (ret);
3505 }
3506 
3507 static int
3508 qla_init_xmt_cntxt(qla_host_t *ha)
3509 {
3510 	uint32_t i, j;
3511 
3512 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
3513 		if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3514 			for (j = 0; j < i; j++) {
3515 				if (qla_del_xmt_cntxt_i(ha, j))
3516 					break;
3517 			}
3518 			return (-1);
3519 		}
3520 	}
3521 	ha->hw.flags.init_tx_cnxt = 1;
3522 	return (0);
3523 }
3524 
3525 static int
3526 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3527 {
3528 	int i, nmcast;
3529 	uint32_t count = 0;
3530 	uint8_t *mcast;
3531 
3532 	nmcast = ha->hw.nmcast;
3533 
3534 	QL_DPRINT2(ha, (ha->pci_dev,
3535 		"%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3536 
3537 	mcast = ha->hw.mac_addr_arr;
3538 	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3539 
3540 	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3541 		if ((ha->hw.mcast[i].addr[0] != 0) ||
3542 			(ha->hw.mcast[i].addr[1] != 0) ||
3543 			(ha->hw.mcast[i].addr[2] != 0) ||
3544 			(ha->hw.mcast[i].addr[3] != 0) ||
3545 			(ha->hw.mcast[i].addr[4] != 0) ||
3546 			(ha->hw.mcast[i].addr[5] != 0)) {
3547 			bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3548 			mcast = mcast + ETHER_ADDR_LEN;
3549 			count++;
3550 
3551 			device_printf(ha->pci_dev,
3552 				"%s: %x:%x:%x:%x:%x:%x \n",
3553 				__func__, ha->hw.mcast[i].addr[0],
3554 				ha->hw.mcast[i].addr[1], ha->hw.mcast[i].addr[2],
3555 				ha->hw.mcast[i].addr[3], ha->hw.mcast[i].addr[4],
3556 				ha->hw.mcast[i].addr[5]);
3557 
3558 			if (count == Q8_MAX_MAC_ADDRS) {
3559 				if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3560 					add_mcast, count)) {
3561                 			device_printf(ha->pci_dev,
3562 						"%s: failed\n", __func__);
3563 					return (-1);
3564 				}
3565 
3566 				count = 0;
3567 				mcast = ha->hw.mac_addr_arr;
3568 				memset(mcast, 0,
3569 					(Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3570 			}
3571 
3572 			nmcast--;
3573 		}
3574 	}
3575 
3576 	if (count) {
3577 		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3578 			count)) {
3579                 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
3580 			return (-1);
3581 		}
3582 	}
3583 	QL_DPRINT2(ha, (ha->pci_dev,
3584 		"%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3585 
3586 	return 0;
3587 }
3588 
3589 static int
3590 qla_hw_add_all_mcast(qla_host_t *ha)
3591 {
3592 	int ret;
3593 
3594 	ret = qla_hw_all_mcast(ha, 1);
3595 
3596 	return (ret);
3597 }
3598 
3599 int
3600 qla_hw_del_all_mcast(qla_host_t *ha)
3601 {
3602 	int ret;
3603 
3604 	ret = qla_hw_all_mcast(ha, 0);
3605 
3606 	bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3607 	ha->hw.nmcast = 0;
3608 
3609 	return (ret);
3610 }
3611 
3612 static int
3613 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3614 {
3615 	int i;
3616 
3617 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3618 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3619 			return (0); /* its been already added */
3620 	}
3621 	return (-1);
3622 }
3623 
3624 static int
3625 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3626 {
3627 	int i;
3628 
3629 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3630 		if ((ha->hw.mcast[i].addr[0] == 0) &&
3631 			(ha->hw.mcast[i].addr[1] == 0) &&
3632 			(ha->hw.mcast[i].addr[2] == 0) &&
3633 			(ha->hw.mcast[i].addr[3] == 0) &&
3634 			(ha->hw.mcast[i].addr[4] == 0) &&
3635 			(ha->hw.mcast[i].addr[5] == 0)) {
3636 			bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3637 			ha->hw.nmcast++;
3638 
3639 			mta = mta + ETHER_ADDR_LEN;
3640 			nmcast--;
3641 
3642 			if (nmcast == 0)
3643 				break;
3644 		}
3645 	}
3646 	return 0;
3647 }
3648 
3649 static int
3650 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3651 {
3652 	int i;
3653 
3654 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3655 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3656 			ha->hw.mcast[i].addr[0] = 0;
3657 			ha->hw.mcast[i].addr[1] = 0;
3658 			ha->hw.mcast[i].addr[2] = 0;
3659 			ha->hw.mcast[i].addr[3] = 0;
3660 			ha->hw.mcast[i].addr[4] = 0;
3661 			ha->hw.mcast[i].addr[5] = 0;
3662 
3663 			ha->hw.nmcast--;
3664 
3665 			mta = mta + ETHER_ADDR_LEN;
3666 			nmcast--;
3667 
3668 			if (nmcast == 0)
3669 				break;
3670 		}
3671 	}
3672 	return 0;
3673 }
3674 
3675 /*
3676  * Name: ql_hw_set_multi
3677  * Function: Sets the Multicast Addresses provided by the host O.S into the
3678  *	hardware (for the given interface)
3679  */
3680 int
3681 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3682 	uint32_t add_mac)
3683 {
3684 	uint8_t *mta = mcast_addr;
3685 	int i;
3686 	int ret = 0;
3687 	uint32_t count = 0;
3688 	uint8_t *mcast;
3689 
3690 	mcast = ha->hw.mac_addr_arr;
3691 	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3692 
3693 	for (i = 0; i < mcnt; i++) {
3694 		if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3695 			if (add_mac) {
3696 				if (qla_hw_mac_addr_present(ha, mta) != 0) {
3697 					bcopy(mta, mcast, ETHER_ADDR_LEN);
3698 					mcast = mcast + ETHER_ADDR_LEN;
3699 					count++;
3700 				}
3701 			} else {
3702 				if (qla_hw_mac_addr_present(ha, mta) == 0) {
3703 					bcopy(mta, mcast, ETHER_ADDR_LEN);
3704 					mcast = mcast + ETHER_ADDR_LEN;
3705 					count++;
3706 				}
3707 			}
3708 		}
3709 		if (count == Q8_MAX_MAC_ADDRS) {
3710 			if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3711 				add_mac, count)) {
3712                 		device_printf(ha->pci_dev, "%s: failed\n",
3713 					__func__);
3714 				return (-1);
3715 			}
3716 
3717 			if (add_mac) {
3718 				qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3719 					count);
3720 			} else {
3721 				qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3722 					count);
3723 			}
3724 
3725 			count = 0;
3726 			mcast = ha->hw.mac_addr_arr;
3727 			memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3728 		}
3729 
3730 		mta += Q8_MAC_ADDR_LEN;
3731 	}
3732 
3733 	if (count) {
3734 		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3735 			count)) {
3736                 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
3737 			return (-1);
3738 		}
3739 		if (add_mac) {
3740 			qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3741 		} else {
3742 			qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3743 		}
3744 	}
3745 
3746 	return (ret);
3747 }
3748 
3749 /*
3750  * Name: ql_hw_tx_done_locked
3751  * Function: Handle Transmit Completions
3752  */
3753 void
3754 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3755 {
3756 	qla_tx_buf_t *txb;
3757         qla_hw_t *hw = &ha->hw;
3758 	uint32_t comp_idx, comp_count = 0;
3759 	qla_hw_tx_cntxt_t *hw_tx_cntxt;
3760 
3761 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3762 
3763 	/* retrieve index of last entry in tx ring completed */
3764 	comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3765 
3766 	while (comp_idx != hw_tx_cntxt->txr_comp) {
3767 		txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3768 
3769 		hw_tx_cntxt->txr_comp++;
3770 		if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3771 			hw_tx_cntxt->txr_comp = 0;
3772 
3773 		comp_count++;
3774 
3775 		if (txb->m_head) {
3776 			if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
3777 
3778 			bus_dmamap_sync(ha->tx_tag, txb->map,
3779 				BUS_DMASYNC_POSTWRITE);
3780 			bus_dmamap_unload(ha->tx_tag, txb->map);
3781 			m_freem(txb->m_head);
3782 
3783 			txb->m_head = NULL;
3784 		}
3785 	}
3786 
3787 	hw_tx_cntxt->txr_free += comp_count;
3788 
3789 	if (hw_tx_cntxt->txr_free > NUM_TX_DESCRIPTORS)
3790 		device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d txr_free = %d"
3791 			"txr_next = %d txr_comp = %d\n", __func__, __LINE__,
3792 			txr_idx, hw_tx_cntxt->txr_free,
3793 			hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp);
3794 
3795 	QL_ASSERT(ha, (hw_tx_cntxt->txr_free <= NUM_TX_DESCRIPTORS), \
3796 		("%s [%d]: txr_idx = %d txr_free = %d txr_next = %d txr_comp = %d\n",\
3797 		__func__, __LINE__, txr_idx, hw_tx_cntxt->txr_free, \
3798 		hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp));
3799 
3800 	return;
3801 }
3802 
3803 void
3804 ql_update_link_state(qla_host_t *ha)
3805 {
3806 	uint32_t link_state = 0;
3807 	uint32_t prev_link_state;
3808 
3809 	prev_link_state =  ha->hw.link_up;
3810 
3811 	if (ha->ifp->if_drv_flags & IFF_DRV_RUNNING) {
3812 		link_state = READ_REG32(ha, Q8_LINK_STATE);
3813 
3814 		if (ha->pci_func == 0) {
3815 			link_state = (((link_state & 0xF) == 1)? 1 : 0);
3816 		} else {
3817 			link_state = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3818 		}
3819 	}
3820 
3821 	atomic_store_rel_8(&ha->hw.link_up, (uint8_t)link_state);
3822 
3823 	if (prev_link_state !=  ha->hw.link_up) {
3824 		if (ha->hw.link_up) {
3825 			if_link_state_change(ha->ifp, LINK_STATE_UP);
3826 		} else {
3827 			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3828 		}
3829 	}
3830 	return;
3831 }
3832 
3833 int
3834 ql_hw_check_health(qla_host_t *ha)
3835 {
3836 	uint32_t val;
3837 
3838 	ha->hw.health_count++;
3839 
3840 	if (ha->hw.health_count < 500)
3841 		return 0;
3842 
3843 	ha->hw.health_count = 0;
3844 
3845 	val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3846 
3847 	if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3848 		(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3849 		device_printf(ha->pci_dev, "%s: Temperature Alert"
3850 			" at ts_usecs %ld ts_reg = 0x%08x\n",
3851 			__func__, qla_get_usec_timestamp(), val);
3852 
3853 		if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_TEMP_FAILURE)
3854 			ha->hw.sp_log_stop = -1;
3855 
3856 		QL_INITIATE_RECOVERY(ha);
3857 		return -1;
3858 	}
3859 
3860 	val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3861 
3862 	if ((val != ha->hw.hbeat_value) &&
3863 		(!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3864 		ha->hw.hbeat_value = val;
3865 		ha->hw.hbeat_failure = 0;
3866 		return 0;
3867 	}
3868 
3869 	ha->hw.hbeat_failure++;
3870 
3871 	if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3872 		device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3873 			__func__, val);
3874 	if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3875 		return 0;
3876 	else {
3877 		uint32_t peg_halt_status1;
3878 		uint32_t peg_halt_status2;
3879 
3880 		peg_halt_status1 = READ_REG32(ha, Q8_PEG_HALT_STATUS1);
3881 		peg_halt_status2 = READ_REG32(ha, Q8_PEG_HALT_STATUS2);
3882 
3883 		device_printf(ha->pci_dev,
3884 			"%s: Heartbeat Failue at ts_usecs = %ld "
3885 			"fw_heart_beat = 0x%08x "
3886 			"peg_halt_status1 = 0x%08x "
3887 			"peg_halt_status2 = 0x%08x\n",
3888 			__func__, qla_get_usec_timestamp(), val,
3889 			peg_halt_status1, peg_halt_status2);
3890 
3891 		if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HBEAT_FAILURE)
3892 			ha->hw.sp_log_stop = -1;
3893 	}
3894 	QL_INITIATE_RECOVERY(ha);
3895 
3896 	return -1;
3897 }
3898 
3899 static int
3900 qla_init_nic_func(qla_host_t *ha)
3901 {
3902         device_t                dev;
3903         q80_init_nic_func_t     *init_nic;
3904         q80_init_nic_func_rsp_t *init_nic_rsp;
3905         uint32_t                err;
3906 
3907         dev = ha->pci_dev;
3908 
3909         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3910         bzero(init_nic, sizeof(q80_init_nic_func_t));
3911 
3912         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3913         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3914         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3915 
3916         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3917         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3918         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3919 
3920 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3921         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3922                 (sizeof (q80_init_nic_func_t) >> 2),
3923                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3924                 device_printf(dev, "%s: failed\n", __func__);
3925                 return -1;
3926         }
3927 
3928         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3929 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3930 
3931         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3932 
3933         if (err) {
3934                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3935         } else {
3936                 device_printf(dev, "%s: successful\n", __func__);
3937 	}
3938 
3939         return 0;
3940 }
3941 
3942 static int
3943 qla_stop_nic_func(qla_host_t *ha)
3944 {
3945         device_t                dev;
3946         q80_stop_nic_func_t     *stop_nic;
3947         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3948         uint32_t                err;
3949 
3950         dev = ha->pci_dev;
3951 
3952         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3953         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3954 
3955         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3956         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3957         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3958 
3959         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3960         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3961 
3962 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3963         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3964                 (sizeof (q80_stop_nic_func_t) >> 2),
3965                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3966                 device_printf(dev, "%s: failed\n", __func__);
3967                 return -1;
3968         }
3969 
3970         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3971 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3972 
3973         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3974 
3975         if (err) {
3976                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3977         }
3978 
3979         return 0;
3980 }
3981 
3982 static int
3983 qla_query_fw_dcbx_caps(qla_host_t *ha)
3984 {
3985         device_t                        dev;
3986         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3987         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3988         uint32_t                        err;
3989 
3990         dev = ha->pci_dev;
3991 
3992         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3993         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3994 
3995         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3996         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3997         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3998 
3999         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
4000         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
4001                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
4002                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
4003                 device_printf(dev, "%s: failed\n", __func__);
4004                 return -1;
4005         }
4006 
4007         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
4008         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
4009                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
4010 
4011         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
4012 
4013         if (err) {
4014                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4015         }
4016 
4017         return 0;
4018 }
4019 
4020 static int
4021 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
4022         uint32_t aen_mb3, uint32_t aen_mb4)
4023 {
4024         device_t                dev;
4025         q80_idc_ack_t           *idc_ack;
4026         q80_idc_ack_rsp_t       *idc_ack_rsp;
4027         uint32_t                err;
4028         int                     count = 300;
4029 
4030         dev = ha->pci_dev;
4031 
4032         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
4033         bzero(idc_ack, sizeof(q80_idc_ack_t));
4034 
4035         idc_ack->opcode = Q8_MBX_IDC_ACK;
4036         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
4037         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
4038 
4039         idc_ack->aen_mb1 = aen_mb1;
4040         idc_ack->aen_mb2 = aen_mb2;
4041         idc_ack->aen_mb3 = aen_mb3;
4042         idc_ack->aen_mb4 = aen_mb4;
4043 
4044         ha->hw.imd_compl= 0;
4045 
4046         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
4047                 (sizeof (q80_idc_ack_t) >> 2),
4048                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
4049                 device_printf(dev, "%s: failed\n", __func__);
4050                 return -1;
4051         }
4052 
4053         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
4054 
4055         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
4056 
4057         if (err) {
4058                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4059                 return(-1);
4060         }
4061 
4062         while (count && !ha->hw.imd_compl) {
4063                 qla_mdelay(__func__, 100);
4064                 count--;
4065         }
4066 
4067         if (!count)
4068                 return -1;
4069         else
4070                 device_printf(dev, "%s: count %d\n", __func__, count);
4071 
4072         return (0);
4073 }
4074 
4075 static int
4076 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
4077 {
4078         device_t                dev;
4079         q80_set_port_cfg_t      *pcfg;
4080         q80_set_port_cfg_rsp_t  *pfg_rsp;
4081         uint32_t                err;
4082         int                     count = 300;
4083 
4084         dev = ha->pci_dev;
4085 
4086         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
4087         bzero(pcfg, sizeof(q80_set_port_cfg_t));
4088 
4089         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
4090         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
4091         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4092 
4093         pcfg->cfg_bits = cfg_bits;
4094 
4095         device_printf(dev, "%s: cfg_bits"
4096                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4097                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4098                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4099                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4100                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
4101 
4102         ha->hw.imd_compl= 0;
4103 
4104         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4105                 (sizeof (q80_set_port_cfg_t) >> 2),
4106                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
4107                 device_printf(dev, "%s: failed\n", __func__);
4108                 return -1;
4109         }
4110 
4111         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
4112 
4113         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
4114 
4115         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
4116                 while (count && !ha->hw.imd_compl) {
4117                         qla_mdelay(__func__, 100);
4118                         count--;
4119                 }
4120                 if (count) {
4121                         device_printf(dev, "%s: count %d\n", __func__, count);
4122 
4123                         err = 0;
4124                 }
4125         }
4126 
4127         if (err) {
4128                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4129                 return(-1);
4130         }
4131 
4132         return (0);
4133 }
4134 
4135 static int
4136 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
4137 {
4138 	uint32_t			err;
4139 	device_t			dev = ha->pci_dev;
4140 	q80_config_md_templ_size_t	*md_size;
4141 	q80_config_md_templ_size_rsp_t	*md_size_rsp;
4142 
4143 #ifndef QL_LDFLASH_FW
4144 
4145 	ql_minidump_template_hdr_t *hdr;
4146 
4147 	hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
4148 	*size = hdr->size_of_template;
4149 	return (0);
4150 
4151 #endif /* #ifdef QL_LDFLASH_FW */
4152 
4153 	md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
4154 	bzero(md_size, sizeof(q80_config_md_templ_size_t));
4155 
4156 	md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
4157 	md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
4158 	md_size->count_version |= Q8_MBX_CMD_VERSION;
4159 
4160 	if (qla_mbx_cmd(ha, (uint32_t *) md_size,
4161 		(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
4162 		(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
4163 		device_printf(dev, "%s: failed\n", __func__);
4164 
4165 		return (-1);
4166 	}
4167 
4168 	md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
4169 
4170 	err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
4171 
4172         if (err) {
4173 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4174 		return(-1);
4175         }
4176 
4177 	*size = md_size_rsp->templ_size;
4178 
4179 	return (0);
4180 }
4181 
4182 static int
4183 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
4184 {
4185         device_t                dev;
4186         q80_get_port_cfg_t      *pcfg;
4187         q80_get_port_cfg_rsp_t  *pcfg_rsp;
4188         uint32_t                err;
4189 
4190         dev = ha->pci_dev;
4191 
4192         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
4193         bzero(pcfg, sizeof(q80_get_port_cfg_t));
4194 
4195         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
4196         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
4197         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4198 
4199         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4200                 (sizeof (q80_get_port_cfg_t) >> 2),
4201                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
4202                 device_printf(dev, "%s: failed\n", __func__);
4203                 return -1;
4204         }
4205 
4206         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
4207 
4208         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
4209 
4210         if (err) {
4211                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4212                 return(-1);
4213         }
4214 
4215         device_printf(dev, "%s: [cfg_bits, port type]"
4216                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4217                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4218                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4219                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4220                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4221                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4222                 );
4223 
4224         *cfg_bits = pcfg_rsp->cfg_bits;
4225 
4226         return (0);
4227 }
4228 
4229 int
4230 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4231 {
4232         struct ether_vlan_header        *eh;
4233         uint16_t                        etype;
4234         struct ip                       *ip = NULL;
4235         struct ip6_hdr                  *ip6 = NULL;
4236         struct tcphdr                   *th = NULL;
4237         uint32_t                        hdrlen;
4238         uint32_t                        offset;
4239         uint8_t                         buf[sizeof(struct ip6_hdr)];
4240 
4241         eh = mtod(mp, struct ether_vlan_header *);
4242 
4243         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4244                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4245                 etype = ntohs(eh->evl_proto);
4246         } else {
4247                 hdrlen = ETHER_HDR_LEN;
4248                 etype = ntohs(eh->evl_encap_proto);
4249         }
4250 
4251 	if (etype == ETHERTYPE_IP) {
4252 		offset = (hdrlen + sizeof (struct ip));
4253 
4254 		if (mp->m_len >= offset) {
4255                         ip = (struct ip *)(mp->m_data + hdrlen);
4256 		} else {
4257 			m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4258                         ip = (struct ip *)buf;
4259 		}
4260 
4261                 if (ip->ip_p == IPPROTO_TCP) {
4262 			hdrlen += ip->ip_hl << 2;
4263 			offset = hdrlen + 4;
4264 
4265 			if (mp->m_len >= offset) {
4266 				th = (struct tcphdr *)(mp->m_data + hdrlen);
4267 			} else {
4268                                 m_copydata(mp, hdrlen, 4, buf);
4269 				th = (struct tcphdr *)buf;
4270 			}
4271                 }
4272 
4273 	} else if (etype == ETHERTYPE_IPV6) {
4274 		offset = (hdrlen + sizeof (struct ip6_hdr));
4275 
4276 		if (mp->m_len >= offset) {
4277                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4278 		} else {
4279                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4280                         ip6 = (struct ip6_hdr *)buf;
4281 		}
4282 
4283                 if (ip6->ip6_nxt == IPPROTO_TCP) {
4284 			hdrlen += sizeof(struct ip6_hdr);
4285 			offset = hdrlen + 4;
4286 
4287 			if (mp->m_len >= offset) {
4288 				th = (struct tcphdr *)(mp->m_data + hdrlen);
4289 			} else {
4290 				m_copydata(mp, hdrlen, 4, buf);
4291 				th = (struct tcphdr *)buf;
4292 			}
4293                 }
4294 	}
4295 
4296         if (th != NULL) {
4297                 if ((th->th_sport == htons(3260)) ||
4298                         (th->th_dport == htons(3260)))
4299                         return 0;
4300         }
4301         return (-1);
4302 }
4303 
4304 void
4305 qla_hw_async_event(qla_host_t *ha)
4306 {
4307         switch (ha->hw.aen_mb0) {
4308         case 0x8101:
4309                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4310                         ha->hw.aen_mb3, ha->hw.aen_mb4);
4311 
4312                 break;
4313 
4314         default:
4315                 break;
4316         }
4317 
4318         return;
4319 }
4320 
4321 #ifdef QL_LDFLASH_FW
4322 static int
4323 ql_get_minidump_template(qla_host_t *ha)
4324 {
4325 	uint32_t			err;
4326 	device_t			dev = ha->pci_dev;
4327 	q80_config_md_templ_cmd_t	*md_templ;
4328 	q80_config_md_templ_cmd_rsp_t	*md_templ_rsp;
4329 
4330 	md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4331 	bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4332 
4333 	md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4334 	md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4335 	md_templ->count_version |= Q8_MBX_CMD_VERSION;
4336 
4337 	md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4338 	md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4339 
4340 	if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4341 		(sizeof(q80_config_md_templ_cmd_t) >> 2),
4342 		 ha->hw.mbox,
4343 		(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4344 		device_printf(dev, "%s: failed\n", __func__);
4345 
4346 		return (-1);
4347 	}
4348 
4349 	md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4350 
4351 	err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4352 
4353 	if (err) {
4354 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4355 		return (-1);
4356 	}
4357 
4358 	return (0);
4359 
4360 }
4361 #endif /* #ifdef QL_LDFLASH_FW */
4362 
4363 /*
4364  * Minidump related functionality
4365  */
4366 
4367 static int ql_parse_template(qla_host_t *ha);
4368 
4369 static uint32_t ql_rdcrb(qla_host_t *ha,
4370 			ql_minidump_entry_rdcrb_t *crb_entry,
4371 			uint32_t * data_buff);
4372 
4373 static uint32_t ql_pollrd(qla_host_t *ha,
4374 			ql_minidump_entry_pollrd_t *entry,
4375 			uint32_t * data_buff);
4376 
4377 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4378 			ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4379 			uint32_t *data_buff);
4380 
4381 static uint32_t ql_L2Cache(qla_host_t *ha,
4382 			ql_minidump_entry_cache_t *cacheEntry,
4383 			uint32_t * data_buff);
4384 
4385 static uint32_t ql_L1Cache(qla_host_t *ha,
4386 			ql_minidump_entry_cache_t *cacheEntry,
4387 			uint32_t *data_buff);
4388 
4389 static uint32_t ql_rdocm(qla_host_t *ha,
4390 			ql_minidump_entry_rdocm_t *ocmEntry,
4391 			uint32_t *data_buff);
4392 
4393 static uint32_t ql_rdmem(qla_host_t *ha,
4394 			ql_minidump_entry_rdmem_t *mem_entry,
4395 			uint32_t *data_buff);
4396 
4397 static uint32_t ql_rdrom(qla_host_t *ha,
4398 			ql_minidump_entry_rdrom_t *romEntry,
4399 			uint32_t *data_buff);
4400 
4401 static uint32_t ql_rdmux(qla_host_t *ha,
4402 			ql_minidump_entry_mux_t *muxEntry,
4403 			uint32_t *data_buff);
4404 
4405 static uint32_t ql_rdmux2(qla_host_t *ha,
4406 			ql_minidump_entry_mux2_t *muxEntry,
4407 			uint32_t *data_buff);
4408 
4409 static uint32_t ql_rdqueue(qla_host_t *ha,
4410 			ql_minidump_entry_queue_t *queueEntry,
4411 			uint32_t *data_buff);
4412 
4413 static uint32_t ql_cntrl(qla_host_t *ha,
4414 			ql_minidump_template_hdr_t *template_hdr,
4415 			ql_minidump_entry_cntrl_t *crbEntry);
4416 
4417 static uint32_t
4418 ql_minidump_size(qla_host_t *ha)
4419 {
4420 	uint32_t i, k;
4421 	uint32_t size = 0;
4422 	ql_minidump_template_hdr_t *hdr;
4423 
4424 	hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4425 
4426 	i = 0x2;
4427 
4428 	for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4429 		if (i & ha->hw.mdump_capture_mask)
4430 			size += hdr->capture_size_array[k];
4431 		i = i << 1;
4432 	}
4433 	return (size);
4434 }
4435 
4436 static void
4437 ql_free_minidump_buffer(qla_host_t *ha)
4438 {
4439 	if (ha->hw.mdump_buffer != NULL) {
4440 		free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4441 		ha->hw.mdump_buffer = NULL;
4442 		ha->hw.mdump_buffer_size = 0;
4443 	}
4444 	return;
4445 }
4446 
4447 static int
4448 ql_alloc_minidump_buffer(qla_host_t *ha)
4449 {
4450 	ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4451 
4452 	if (!ha->hw.mdump_buffer_size)
4453 		return (-1);
4454 
4455 	ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4456 					M_NOWAIT);
4457 
4458 	if (ha->hw.mdump_buffer == NULL)
4459 		return (-1);
4460 
4461 	return (0);
4462 }
4463 
4464 static void
4465 ql_free_minidump_template_buffer(qla_host_t *ha)
4466 {
4467 	if (ha->hw.mdump_template != NULL) {
4468 		free(ha->hw.mdump_template, M_QLA83XXBUF);
4469 		ha->hw.mdump_template = NULL;
4470 		ha->hw.mdump_template_size = 0;
4471 	}
4472 	return;
4473 }
4474 
4475 static int
4476 ql_alloc_minidump_template_buffer(qla_host_t *ha)
4477 {
4478 	ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4479 
4480 	ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4481 					M_QLA83XXBUF, M_NOWAIT);
4482 
4483 	if (ha->hw.mdump_template == NULL)
4484 		return (-1);
4485 
4486 	return (0);
4487 }
4488 
4489 static int
4490 ql_alloc_minidump_buffers(qla_host_t *ha)
4491 {
4492 	int ret;
4493 
4494 	ret = ql_alloc_minidump_template_buffer(ha);
4495 
4496 	if (ret)
4497 		return (ret);
4498 
4499 	ret = ql_alloc_minidump_buffer(ha);
4500 
4501 	if (ret)
4502 		ql_free_minidump_template_buffer(ha);
4503 
4504 	return (ret);
4505 }
4506 
4507 static uint32_t
4508 ql_validate_minidump_checksum(qla_host_t *ha)
4509 {
4510         uint64_t sum = 0;
4511 	int count;
4512 	uint32_t *template_buff;
4513 
4514 	count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4515 	template_buff = ha->hw.dma_buf.minidump.dma_b;
4516 
4517 	while (count-- > 0) {
4518 		sum += *template_buff++;
4519 	}
4520 
4521 	while (sum >> 32) {
4522 		sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4523 	}
4524 
4525 	return (~sum);
4526 }
4527 
4528 int
4529 ql_minidump_init(qla_host_t *ha)
4530 {
4531 	int		ret = 0;
4532 	uint32_t	template_size = 0;
4533 	device_t	dev = ha->pci_dev;
4534 
4535 	/*
4536 	 * Get Minidump Template Size
4537  	 */
4538 	ret = qla_get_minidump_tmplt_size(ha, &template_size);
4539 
4540 	if (ret || (template_size == 0)) {
4541 		device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4542 			template_size);
4543 		return (-1);
4544 	}
4545 
4546 	/*
4547 	 * Allocate Memory for Minidump Template
4548 	 */
4549 
4550 	ha->hw.dma_buf.minidump.alignment = 8;
4551 	ha->hw.dma_buf.minidump.size = template_size;
4552 
4553 #ifdef QL_LDFLASH_FW
4554 	if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4555 		device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4556 
4557 		return (-1);
4558 	}
4559 	ha->hw.dma_buf.flags.minidump = 1;
4560 
4561 	/*
4562 	 * Retrieve Minidump Template
4563 	 */
4564 	ret = ql_get_minidump_template(ha);
4565 #else
4566 	ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4567 
4568 #endif /* #ifdef QL_LDFLASH_FW */
4569 
4570 	if (ret == 0) {
4571 		ret = ql_validate_minidump_checksum(ha);
4572 
4573 		if (ret == 0) {
4574 			ret = ql_alloc_minidump_buffers(ha);
4575 
4576 			if (ret == 0)
4577 		ha->hw.mdump_init = 1;
4578 			else
4579 				device_printf(dev,
4580 					"%s: ql_alloc_minidump_buffers"
4581 					" failed\n", __func__);
4582 		} else {
4583 			device_printf(dev, "%s: ql_validate_minidump_checksum"
4584 				" failed\n", __func__);
4585 		}
4586 	} else {
4587 		device_printf(dev, "%s: ql_get_minidump_template failed\n",
4588 			 __func__);
4589 	}
4590 
4591 	if (ret)
4592 		ql_minidump_free(ha);
4593 
4594 	return (ret);
4595 }
4596 
4597 static void
4598 ql_minidump_free(qla_host_t *ha)
4599 {
4600 	ha->hw.mdump_init = 0;
4601 	if (ha->hw.dma_buf.flags.minidump) {
4602 		ha->hw.dma_buf.flags.minidump = 0;
4603 		ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4604 	}
4605 
4606 	ql_free_minidump_template_buffer(ha);
4607 	ql_free_minidump_buffer(ha);
4608 
4609 	return;
4610 }
4611 
4612 void
4613 ql_minidump(qla_host_t *ha)
4614 {
4615 	if (!ha->hw.mdump_init)
4616 		return;
4617 
4618 	if (ha->hw.mdump_done)
4619 		return;
4620 	ha->hw.mdump_usec_ts = qla_get_usec_timestamp();
4621 	ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4622 
4623 	bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4624 	bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4625 
4626 	bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4627 		ha->hw.mdump_template_size);
4628 
4629 	ql_parse_template(ha);
4630 
4631 	ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4632 
4633 	ha->hw.mdump_done = 1;
4634 
4635 	return;
4636 }
4637 
4638 /*
4639  * helper routines
4640  */
4641 static void
4642 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4643 {
4644 	if (esize != entry->hdr.entry_capture_size) {
4645 		entry->hdr.entry_capture_size = esize;
4646 		entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4647 	}
4648 	return;
4649 }
4650 
4651 static int
4652 ql_parse_template(qla_host_t *ha)
4653 {
4654 	uint32_t num_of_entries, buff_level, e_cnt, esize;
4655 	uint32_t end_cnt, rv = 0;
4656 	char *dump_buff, *dbuff;
4657 	int sane_start = 0, sane_end = 0;
4658 	ql_minidump_template_hdr_t *template_hdr;
4659 	ql_minidump_entry_t *entry;
4660 	uint32_t capture_mask;
4661 	uint32_t dump_size;
4662 
4663 	/* Setup parameters */
4664 	template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4665 
4666 	if (template_hdr->entry_type == TLHDR)
4667 		sane_start = 1;
4668 
4669 	dump_buff = (char *) ha->hw.mdump_buffer;
4670 
4671 	num_of_entries = template_hdr->num_of_entries;
4672 
4673 	entry = (ql_minidump_entry_t *) ((char *)template_hdr
4674 			+ template_hdr->first_entry_offset );
4675 
4676 	template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4677 		template_hdr->ocm_window_array[ha->pci_func];
4678 	template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4679 
4680 	capture_mask = ha->hw.mdump_capture_mask;
4681 	dump_size = ha->hw.mdump_buffer_size;
4682 
4683 	template_hdr->driver_capture_mask = capture_mask;
4684 
4685 	QL_DPRINT80(ha, (ha->pci_dev,
4686 		"%s: sane_start = %d num_of_entries = %d "
4687 		"capture_mask = 0x%x dump_size = %d \n",
4688 		__func__, sane_start, num_of_entries, capture_mask, dump_size));
4689 
4690 	for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4691 		/*
4692 		 * If the capture_mask of the entry does not match capture mask
4693 		 * skip the entry after marking the driver_flags indicator.
4694 		 */
4695 
4696 		if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4697 			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4698 			entry = (ql_minidump_entry_t *) ((char *) entry
4699 					+ entry->hdr.entry_size);
4700 			continue;
4701 		}
4702 
4703 		/*
4704 		 * This is ONLY needed in implementations where
4705 		 * the capture buffer allocated is too small to capture
4706 		 * all of the required entries for a given capture mask.
4707 		 * We need to empty the buffer contents to a file
4708 		 * if possible, before processing the next entry
4709 		 * If the buff_full_flag is set, no further capture will happen
4710 		 * and all remaining non-control entries will be skipped.
4711 		 */
4712 		if (entry->hdr.entry_capture_size != 0) {
4713 			if ((buff_level + entry->hdr.entry_capture_size) >
4714 				dump_size) {
4715 				/*  Try to recover by emptying buffer to file */
4716 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4717 				entry = (ql_minidump_entry_t *) ((char *) entry
4718 						+ entry->hdr.entry_size);
4719 				continue;
4720 			}
4721 		}
4722 
4723 		/*
4724 		 * Decode the entry type and process it accordingly
4725 		 */
4726 
4727 		switch (entry->hdr.entry_type) {
4728 		case RDNOP:
4729 			break;
4730 
4731 		case RDEND:
4732 			if (sane_end == 0) {
4733 				end_cnt = e_cnt;
4734 			}
4735 			sane_end++;
4736 			break;
4737 
4738 		case RDCRB:
4739 			dbuff = dump_buff + buff_level;
4740 			esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4741 			ql_entry_err_chk(entry, esize);
4742 			buff_level += esize;
4743 			break;
4744 
4745                 case POLLRD:
4746                         dbuff = dump_buff + buff_level;
4747                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4748                         ql_entry_err_chk(entry, esize);
4749                         buff_level += esize;
4750                         break;
4751 
4752                 case POLLRDMWR:
4753                         dbuff = dump_buff + buff_level;
4754                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4755 					(void *)dbuff);
4756                         ql_entry_err_chk(entry, esize);
4757                         buff_level += esize;
4758                         break;
4759 
4760 		case L2ITG:
4761 		case L2DTG:
4762 		case L2DAT:
4763 		case L2INS:
4764 			dbuff = dump_buff + buff_level;
4765 			esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4766 			if (esize == -1) {
4767 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4768 			} else {
4769 				ql_entry_err_chk(entry, esize);
4770 				buff_level += esize;
4771 			}
4772 			break;
4773 
4774 		case L1DAT:
4775 		case L1INS:
4776 			dbuff = dump_buff + buff_level;
4777 			esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4778 			ql_entry_err_chk(entry, esize);
4779 			buff_level += esize;
4780 			break;
4781 
4782 		case RDOCM:
4783 			dbuff = dump_buff + buff_level;
4784 			esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4785 			ql_entry_err_chk(entry, esize);
4786 			buff_level += esize;
4787 			break;
4788 
4789 		case RDMEM:
4790 			dbuff = dump_buff + buff_level;
4791 			esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4792 			ql_entry_err_chk(entry, esize);
4793 			buff_level += esize;
4794 			break;
4795 
4796 		case BOARD:
4797 		case RDROM:
4798 			dbuff = dump_buff + buff_level;
4799 			esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4800 			ql_entry_err_chk(entry, esize);
4801 			buff_level += esize;
4802 			break;
4803 
4804 		case RDMUX:
4805 			dbuff = dump_buff + buff_level;
4806 			esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4807 			ql_entry_err_chk(entry, esize);
4808 			buff_level += esize;
4809 			break;
4810 
4811                 case RDMUX2:
4812                         dbuff = dump_buff + buff_level;
4813                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4814                         ql_entry_err_chk(entry, esize);
4815                         buff_level += esize;
4816                         break;
4817 
4818 		case QUEUE:
4819 			dbuff = dump_buff + buff_level;
4820 			esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4821 			ql_entry_err_chk(entry, esize);
4822 			buff_level += esize;
4823 			break;
4824 
4825 		case CNTRL:
4826 			if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4827 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4828 			}
4829 			break;
4830 		default:
4831 			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4832 			break;
4833 		}
4834 		/*  next entry in the template */
4835 		entry = (ql_minidump_entry_t *) ((char *) entry
4836 						+ entry->hdr.entry_size);
4837 	}
4838 
4839 	if (!sane_start || (sane_end > 1)) {
4840 		device_printf(ha->pci_dev,
4841 			"\n%s: Template configuration error. Check Template\n",
4842 			__func__);
4843 	}
4844 
4845 	QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4846 		__func__, template_hdr->num_of_entries));
4847 
4848 	return 0;
4849 }
4850 
4851 /*
4852  * Read CRB operation.
4853  */
4854 static uint32_t
4855 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4856 	uint32_t * data_buff)
4857 {
4858 	int loop_cnt;
4859 	int ret;
4860 	uint32_t op_count, addr, stride, value = 0;
4861 
4862 	addr = crb_entry->addr;
4863 	op_count = crb_entry->op_count;
4864 	stride = crb_entry->addr_stride;
4865 
4866 	for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4867 		ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4868 
4869 		if (ret)
4870 			return (0);
4871 
4872 		*data_buff++ = addr;
4873 		*data_buff++ = value;
4874 		addr = addr + stride;
4875 	}
4876 
4877 	/*
4878 	 * for testing purpose we return amount of data written
4879 	 */
4880 	return (op_count * (2 * sizeof(uint32_t)));
4881 }
4882 
4883 /*
4884  * Handle L2 Cache.
4885  */
4886 
4887 static uint32_t
4888 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4889 	uint32_t * data_buff)
4890 {
4891 	int i, k;
4892 	int loop_cnt;
4893 	int ret;
4894 
4895 	uint32_t read_value;
4896 	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4897 	uint32_t tag_value, read_cnt;
4898 	volatile uint8_t cntl_value_r;
4899 	long timeout;
4900 	uint32_t data;
4901 
4902 	loop_cnt = cacheEntry->op_count;
4903 
4904 	read_addr = cacheEntry->read_addr;
4905 	cntrl_addr = cacheEntry->control_addr;
4906 	cntl_value_w = (uint32_t) cacheEntry->write_value;
4907 
4908 	tag_reg_addr = cacheEntry->tag_reg_addr;
4909 
4910 	tag_value = cacheEntry->init_tag_value;
4911 	read_cnt = cacheEntry->read_addr_cnt;
4912 
4913 	for (i = 0; i < loop_cnt; i++) {
4914 		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4915 		if (ret)
4916 			return (0);
4917 
4918 		if (cacheEntry->write_value != 0) {
4919 
4920 			ret = ql_rdwr_indreg32(ha, cntrl_addr,
4921 					&cntl_value_w, 0);
4922 			if (ret)
4923 				return (0);
4924 		}
4925 
4926 		if (cacheEntry->poll_mask != 0) {
4927 
4928 			timeout = cacheEntry->poll_wait;
4929 
4930 			ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4931 			if (ret)
4932 				return (0);
4933 
4934 			cntl_value_r = (uint8_t)data;
4935 
4936 			while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4937 				if (timeout) {
4938 					qla_mdelay(__func__, 1);
4939 					timeout--;
4940 				} else
4941 					break;
4942 
4943 				ret = ql_rdwr_indreg32(ha, cntrl_addr,
4944 						&data, 1);
4945 				if (ret)
4946 					return (0);
4947 
4948 				cntl_value_r = (uint8_t)data;
4949 			}
4950 			if (!timeout) {
4951 				/* Report timeout error.
4952 				 * core dump capture failed
4953 				 * Skip remaining entries.
4954 				 * Write buffer out to file
4955 				 * Use driver specific fields in template header
4956 				 * to report this error.
4957 				 */
4958 				return (-1);
4959 			}
4960 		}
4961 
4962 		addr = read_addr;
4963 		for (k = 0; k < read_cnt; k++) {
4964 			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4965 			if (ret)
4966 				return (0);
4967 
4968 			*data_buff++ = read_value;
4969 			addr += cacheEntry->read_addr_stride;
4970 		}
4971 
4972 		tag_value += cacheEntry->tag_value_stride;
4973 	}
4974 
4975 	return (read_cnt * loop_cnt * sizeof(uint32_t));
4976 }
4977 
4978 /*
4979  * Handle L1 Cache.
4980  */
4981 
4982 static uint32_t
4983 ql_L1Cache(qla_host_t *ha,
4984 	ql_minidump_entry_cache_t *cacheEntry,
4985 	uint32_t *data_buff)
4986 {
4987 	int ret;
4988 	int i, k;
4989 	int loop_cnt;
4990 
4991 	uint32_t read_value;
4992 	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4993 	uint32_t tag_value, read_cnt;
4994 	uint32_t cntl_value_w;
4995 
4996 	loop_cnt = cacheEntry->op_count;
4997 
4998 	read_addr = cacheEntry->read_addr;
4999 	cntrl_addr = cacheEntry->control_addr;
5000 	cntl_value_w = (uint32_t) cacheEntry->write_value;
5001 
5002 	tag_reg_addr = cacheEntry->tag_reg_addr;
5003 
5004 	tag_value = cacheEntry->init_tag_value;
5005 	read_cnt = cacheEntry->read_addr_cnt;
5006 
5007 	for (i = 0; i < loop_cnt; i++) {
5008 		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
5009 		if (ret)
5010 			return (0);
5011 
5012 		ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
5013 		if (ret)
5014 			return (0);
5015 
5016 		addr = read_addr;
5017 		for (k = 0; k < read_cnt; k++) {
5018 			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5019 			if (ret)
5020 				return (0);
5021 
5022 			*data_buff++ = read_value;
5023 			addr += cacheEntry->read_addr_stride;
5024 		}
5025 
5026 		tag_value += cacheEntry->tag_value_stride;
5027 	}
5028 
5029 	return (read_cnt * loop_cnt * sizeof(uint32_t));
5030 }
5031 
5032 /*
5033  * Reading OCM memory
5034  */
5035 
5036 static uint32_t
5037 ql_rdocm(qla_host_t *ha,
5038 	ql_minidump_entry_rdocm_t *ocmEntry,
5039 	uint32_t *data_buff)
5040 {
5041 	int i, loop_cnt;
5042 	volatile uint32_t addr;
5043 	volatile uint32_t value;
5044 
5045 	addr = ocmEntry->read_addr;
5046 	loop_cnt = ocmEntry->op_count;
5047 
5048 	for (i = 0; i < loop_cnt; i++) {
5049 		value = READ_REG32(ha, addr);
5050 		*data_buff++ = value;
5051 		addr += ocmEntry->read_addr_stride;
5052 	}
5053 	return (loop_cnt * sizeof(value));
5054 }
5055 
5056 /*
5057  * Read memory
5058  */
5059 
5060 static uint32_t
5061 ql_rdmem(qla_host_t *ha,
5062 	ql_minidump_entry_rdmem_t *mem_entry,
5063 	uint32_t *data_buff)
5064 {
5065 	int ret;
5066         int i, loop_cnt;
5067         volatile uint32_t addr;
5068 	q80_offchip_mem_val_t val;
5069 
5070         addr = mem_entry->read_addr;
5071 
5072 	/* size in bytes / 16 */
5073         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
5074 
5075         for (i = 0; i < loop_cnt; i++) {
5076 		ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
5077 		if (ret)
5078 			return (0);
5079 
5080                 *data_buff++ = val.data_lo;
5081                 *data_buff++ = val.data_hi;
5082                 *data_buff++ = val.data_ulo;
5083                 *data_buff++ = val.data_uhi;
5084 
5085                 addr += (sizeof(uint32_t) * 4);
5086         }
5087 
5088         return (loop_cnt * (sizeof(uint32_t) * 4));
5089 }
5090 
5091 /*
5092  * Read Rom
5093  */
5094 
5095 static uint32_t
5096 ql_rdrom(qla_host_t *ha,
5097 	ql_minidump_entry_rdrom_t *romEntry,
5098 	uint32_t *data_buff)
5099 {
5100 	int ret;
5101 	int i, loop_cnt;
5102 	uint32_t addr;
5103 	uint32_t value;
5104 
5105 	addr = romEntry->read_addr;
5106 	loop_cnt = romEntry->read_data_size; /* This is size in bytes */
5107 	loop_cnt /= sizeof(value);
5108 
5109 	for (i = 0; i < loop_cnt; i++) {
5110 		ret = ql_rd_flash32(ha, addr, &value);
5111 		if (ret)
5112 			return (0);
5113 
5114 		*data_buff++ = value;
5115 		addr += sizeof(value);
5116 	}
5117 
5118 	return (loop_cnt * sizeof(value));
5119 }
5120 
5121 /*
5122  * Read MUX data
5123  */
5124 
5125 static uint32_t
5126 ql_rdmux(qla_host_t *ha,
5127 	ql_minidump_entry_mux_t *muxEntry,
5128 	uint32_t *data_buff)
5129 {
5130 	int ret;
5131 	int loop_cnt;
5132 	uint32_t read_value, sel_value;
5133 	uint32_t read_addr, select_addr;
5134 
5135 	select_addr = muxEntry->select_addr;
5136 	sel_value = muxEntry->select_value;
5137 	read_addr = muxEntry->read_addr;
5138 
5139 	for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
5140 		ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
5141 		if (ret)
5142 			return (0);
5143 
5144 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5145 		if (ret)
5146 			return (0);
5147 
5148 		*data_buff++ = sel_value;
5149 		*data_buff++ = read_value;
5150 
5151 		sel_value += muxEntry->select_value_stride;
5152 	}
5153 
5154 	return (loop_cnt * (2 * sizeof(uint32_t)));
5155 }
5156 
5157 static uint32_t
5158 ql_rdmux2(qla_host_t *ha,
5159 	ql_minidump_entry_mux2_t *muxEntry,
5160 	uint32_t *data_buff)
5161 {
5162 	int ret;
5163         int loop_cnt;
5164 
5165         uint32_t select_addr_1, select_addr_2;
5166         uint32_t select_value_1, select_value_2;
5167         uint32_t select_value_count, select_value_mask;
5168         uint32_t read_addr, read_value;
5169 
5170         select_addr_1 = muxEntry->select_addr_1;
5171         select_addr_2 = muxEntry->select_addr_2;
5172         select_value_1 = muxEntry->select_value_1;
5173         select_value_2 = muxEntry->select_value_2;
5174         select_value_count = muxEntry->select_value_count;
5175         select_value_mask  = muxEntry->select_value_mask;
5176 
5177         read_addr = muxEntry->read_addr;
5178 
5179         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
5180 		loop_cnt++) {
5181                 uint32_t temp_sel_val;
5182 
5183 		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
5184 		if (ret)
5185 			return (0);
5186 
5187                 temp_sel_val = select_value_1 & select_value_mask;
5188 
5189 		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5190 		if (ret)
5191 			return (0);
5192 
5193 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5194 		if (ret)
5195 			return (0);
5196 
5197                 *data_buff++ = temp_sel_val;
5198                 *data_buff++ = read_value;
5199 
5200 		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5201 		if (ret)
5202 			return (0);
5203 
5204                 temp_sel_val = select_value_2 & select_value_mask;
5205 
5206 		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5207 		if (ret)
5208 			return (0);
5209 
5210 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5211 		if (ret)
5212 			return (0);
5213 
5214                 *data_buff++ = temp_sel_val;
5215                 *data_buff++ = read_value;
5216 
5217                 select_value_1 += muxEntry->select_value_stride;
5218                 select_value_2 += muxEntry->select_value_stride;
5219         }
5220 
5221         return (loop_cnt * (4 * sizeof(uint32_t)));
5222 }
5223 
5224 /*
5225  * Handling Queue State Reads.
5226  */
5227 
5228 static uint32_t
5229 ql_rdqueue(qla_host_t *ha,
5230 	ql_minidump_entry_queue_t *queueEntry,
5231 	uint32_t *data_buff)
5232 {
5233 	int ret;
5234 	int loop_cnt, k;
5235 	uint32_t read_value;
5236 	uint32_t read_addr, read_stride, select_addr;
5237 	uint32_t queue_id, read_cnt;
5238 
5239 	read_cnt = queueEntry->read_addr_cnt;
5240 	read_stride = queueEntry->read_addr_stride;
5241 	select_addr = queueEntry->select_addr;
5242 
5243 	for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5244 		loop_cnt++) {
5245 		ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5246 		if (ret)
5247 			return (0);
5248 
5249 		read_addr = queueEntry->read_addr;
5250 
5251 		for (k = 0; k < read_cnt; k++) {
5252 			ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5253 			if (ret)
5254 				return (0);
5255 
5256 			*data_buff++ = read_value;
5257 			read_addr += read_stride;
5258 		}
5259 
5260 		queue_id += queueEntry->queue_id_stride;
5261 	}
5262 
5263 	return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5264 }
5265 
5266 /*
5267  * Handling control entries.
5268  */
5269 
5270 static uint32_t
5271 ql_cntrl(qla_host_t *ha,
5272 	ql_minidump_template_hdr_t *template_hdr,
5273 	ql_minidump_entry_cntrl_t *crbEntry)
5274 {
5275 	int ret;
5276 	int count;
5277 	uint32_t opcode, read_value, addr, entry_addr;
5278 	long timeout;
5279 
5280 	entry_addr = crbEntry->addr;
5281 
5282 	for (count = 0; count < crbEntry->op_count; count++) {
5283 		opcode = crbEntry->opcode;
5284 
5285 		if (opcode & QL_DBG_OPCODE_WR) {
5286                 	ret = ql_rdwr_indreg32(ha, entry_addr,
5287 					&crbEntry->value_1, 0);
5288 			if (ret)
5289 				return (0);
5290 
5291 			opcode &= ~QL_DBG_OPCODE_WR;
5292 		}
5293 
5294 		if (opcode & QL_DBG_OPCODE_RW) {
5295                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5296 			if (ret)
5297 				return (0);
5298 
5299                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5300 			if (ret)
5301 				return (0);
5302 
5303 			opcode &= ~QL_DBG_OPCODE_RW;
5304 		}
5305 
5306 		if (opcode & QL_DBG_OPCODE_AND) {
5307                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5308 			if (ret)
5309 				return (0);
5310 
5311 			read_value &= crbEntry->value_2;
5312 			opcode &= ~QL_DBG_OPCODE_AND;
5313 
5314 			if (opcode & QL_DBG_OPCODE_OR) {
5315 				read_value |= crbEntry->value_3;
5316 				opcode &= ~QL_DBG_OPCODE_OR;
5317 			}
5318 
5319                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5320 			if (ret)
5321 				return (0);
5322 		}
5323 
5324 		if (opcode & QL_DBG_OPCODE_OR) {
5325                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5326 			if (ret)
5327 				return (0);
5328 
5329 			read_value |= crbEntry->value_3;
5330 
5331                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5332 			if (ret)
5333 				return (0);
5334 
5335 			opcode &= ~QL_DBG_OPCODE_OR;
5336 		}
5337 
5338 		if (opcode & QL_DBG_OPCODE_POLL) {
5339 			opcode &= ~QL_DBG_OPCODE_POLL;
5340 			timeout = crbEntry->poll_timeout;
5341 			addr = entry_addr;
5342 
5343                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5344 			if (ret)
5345 				return (0);
5346 
5347 			while ((read_value & crbEntry->value_2)
5348 				!= crbEntry->value_1) {
5349 				if (timeout) {
5350 					qla_mdelay(__func__, 1);
5351 					timeout--;
5352 				} else
5353 					break;
5354 
5355                 		ret = ql_rdwr_indreg32(ha, addr,
5356 						&read_value, 1);
5357 				if (ret)
5358 					return (0);
5359 			}
5360 
5361 			if (!timeout) {
5362 				/*
5363 				 * Report timeout error.
5364 				 * core dump capture failed
5365 				 * Skip remaining entries.
5366 				 * Write buffer out to file
5367 				 * Use driver specific fields in template header
5368 				 * to report this error.
5369 				 */
5370 				return (-1);
5371 			}
5372 		}
5373 
5374 		if (opcode & QL_DBG_OPCODE_RDSTATE) {
5375 			/*
5376 			 * decide which address to use.
5377 			 */
5378 			if (crbEntry->state_index_a) {
5379 				addr = template_hdr->saved_state_array[
5380 						crbEntry-> state_index_a];
5381 			} else {
5382 				addr = entry_addr;
5383 			}
5384 
5385                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5386 			if (ret)
5387 				return (0);
5388 
5389 			template_hdr->saved_state_array[crbEntry->state_index_v]
5390 					= read_value;
5391 			opcode &= ~QL_DBG_OPCODE_RDSTATE;
5392 		}
5393 
5394 		if (opcode & QL_DBG_OPCODE_WRSTATE) {
5395 			/*
5396 			 * decide which value to use.
5397 			 */
5398 			if (crbEntry->state_index_v) {
5399 				read_value = template_hdr->saved_state_array[
5400 						crbEntry->state_index_v];
5401 			} else {
5402 				read_value = crbEntry->value_1;
5403 			}
5404 			/*
5405 			 * decide which address to use.
5406 			 */
5407 			if (crbEntry->state_index_a) {
5408 				addr = template_hdr->saved_state_array[
5409 						crbEntry-> state_index_a];
5410 			} else {
5411 				addr = entry_addr;
5412 			}
5413 
5414                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5415 			if (ret)
5416 				return (0);
5417 
5418 			opcode &= ~QL_DBG_OPCODE_WRSTATE;
5419 		}
5420 
5421 		if (opcode & QL_DBG_OPCODE_MDSTATE) {
5422 			/*  Read value from saved state using index */
5423 			read_value = template_hdr->saved_state_array[
5424 						crbEntry->state_index_v];
5425 
5426 			read_value <<= crbEntry->shl; /*Shift left operation */
5427 			read_value >>= crbEntry->shr; /*Shift right operation */
5428 
5429 			if (crbEntry->value_2) {
5430 				/* check if AND mask is provided */
5431 				read_value &= crbEntry->value_2;
5432 			}
5433 
5434 			read_value |= crbEntry->value_3; /* OR operation */
5435 			read_value += crbEntry->value_1; /* increment op */
5436 
5437 			/* Write value back to state area. */
5438 
5439 			template_hdr->saved_state_array[crbEntry->state_index_v]
5440 					= read_value;
5441 			opcode &= ~QL_DBG_OPCODE_MDSTATE;
5442 		}
5443 
5444 		entry_addr += crbEntry->addr_stride;
5445 	}
5446 
5447 	return (0);
5448 }
5449 
5450 /*
5451  * Handling rd poll entry.
5452  */
5453 
5454 static uint32_t
5455 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5456 	uint32_t *data_buff)
5457 {
5458         int ret;
5459         int loop_cnt;
5460         uint32_t op_count, select_addr, select_value_stride, select_value;
5461         uint32_t read_addr, poll, mask, data_size, data;
5462         uint32_t wait_count = 0;
5463 
5464         select_addr            = entry->select_addr;
5465         read_addr              = entry->read_addr;
5466         select_value           = entry->select_value;
5467         select_value_stride    = entry->select_value_stride;
5468         op_count               = entry->op_count;
5469         poll                   = entry->poll;
5470         mask                   = entry->mask;
5471         data_size              = entry->data_size;
5472 
5473         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5474                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5475 		if (ret)
5476 			return (0);
5477 
5478                 wait_count = 0;
5479 
5480                 while (wait_count < poll) {
5481                         uint32_t temp;
5482 
5483 			ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5484 			if (ret)
5485 				return (0);
5486 
5487                         if ( (temp & mask) != 0 ) {
5488                                 break;
5489                         }
5490                         wait_count++;
5491                 }
5492 
5493                 if (wait_count == poll) {
5494                         device_printf(ha->pci_dev,
5495 				"%s: Error in processing entry\n", __func__);
5496                         device_printf(ha->pci_dev,
5497 				"%s: wait_count <0x%x> poll <0x%x>\n",
5498 				__func__, wait_count, poll);
5499                         return 0;
5500                 }
5501 
5502 		ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5503 		if (ret)
5504 			return (0);
5505 
5506                 *data_buff++ = select_value;
5507                 *data_buff++ = data;
5508                 select_value = select_value + select_value_stride;
5509         }
5510 
5511         /*
5512          * for testing purpose we return amount of data written
5513          */
5514         return (loop_cnt * (2 * sizeof(uint32_t)));
5515 }
5516 
5517 /*
5518  * Handling rd modify write poll entry.
5519  */
5520 
5521 static uint32_t
5522 ql_pollrd_modify_write(qla_host_t *ha,
5523 	ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5524 	uint32_t *data_buff)
5525 {
5526 	int ret;
5527         uint32_t addr_1, addr_2, value_1, value_2, data;
5528         uint32_t poll, mask, data_size, modify_mask;
5529         uint32_t wait_count = 0;
5530 
5531         addr_1		= entry->addr_1;
5532         addr_2		= entry->addr_2;
5533         value_1		= entry->value_1;
5534         value_2		= entry->value_2;
5535 
5536         poll		= entry->poll;
5537         mask		= entry->mask;
5538         modify_mask	= entry->modify_mask;
5539         data_size	= entry->data_size;
5540 
5541 	ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5542 	if (ret)
5543 		return (0);
5544 
5545         wait_count = 0;
5546         while (wait_count < poll) {
5547 		uint32_t temp;
5548 
5549 		ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5550 		if (ret)
5551 			return (0);
5552 
5553                 if ( (temp & mask) != 0 ) {
5554                         break;
5555                 }
5556                 wait_count++;
5557         }
5558 
5559         if (wait_count == poll) {
5560                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5561 			__func__);
5562         } else {
5563 		ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5564 		if (ret)
5565 			return (0);
5566 
5567                 data = (data & modify_mask);
5568 
5569 		ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5570 		if (ret)
5571 			return (0);
5572 
5573 		ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5574 		if (ret)
5575 			return (0);
5576 
5577                 /* Poll again */
5578                 wait_count = 0;
5579                 while (wait_count < poll) {
5580                         uint32_t temp;
5581 
5582 			ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5583 			if (ret)
5584 				return (0);
5585 
5586                         if ( (temp & mask) != 0 ) {
5587                                 break;
5588                         }
5589                         wait_count++;
5590                 }
5591                 *data_buff++ = addr_2;
5592                 *data_buff++ = data;
5593         }
5594 
5595         /*
5596          * for testing purpose we return amount of data written
5597          */
5598         return (2 * sizeof(uint32_t));
5599 }
5600