xref: /freebsd/sys/dev/qlxgbe/ql_hw.c (revision 6829dae12bb055451fa467da4589c43bd03b1e64)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: ql_hw.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  * Content: Contains Hardware dependent functions
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "ql_os.h"
40 #include "ql_hw.h"
41 #include "ql_def.h"
42 #include "ql_inline.h"
43 #include "ql_ver.h"
44 #include "ql_glbl.h"
45 #include "ql_dbg.h"
46 #include "ql_minidump.h"
47 
48 /*
49  * Static Functions
50  */
51 
52 static void qla_del_rcv_cntxt(qla_host_t *ha);
53 static int qla_init_rcv_cntxt(qla_host_t *ha);
54 static int qla_del_xmt_cntxt(qla_host_t *ha);
55 static int qla_init_xmt_cntxt(qla_host_t *ha);
56 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
57 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
58 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
59 	uint32_t num_intrs, uint32_t create);
60 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
61 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
62 	int tenable, int rcv);
63 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
64 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
65 
66 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
67 		uint8_t *hdr);
68 static int qla_hw_add_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
70 
71 static int qla_init_nic_func(qla_host_t *ha);
72 static int qla_stop_nic_func(qla_host_t *ha);
73 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
74 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
75 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
76 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
77 static int qla_get_cam_search_mode(qla_host_t *ha);
78 
79 static void ql_minidump_free(qla_host_t *ha);
80 
81 #ifdef QL_DBG
82 
83 static void
84 qla_stop_pegs(qla_host_t *ha)
85 {
86         uint32_t val = 1;
87 
88         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
89         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
90         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
91         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
92         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
93         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
94 }
95 
96 static int
97 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
98 {
99 	int err, ret = 0;
100 	qla_host_t *ha;
101 
102 	err = sysctl_handle_int(oidp, &ret, 0, req);
103 
104 
105 	if (err || !req->newptr)
106 		return (err);
107 
108 	if (ret == 1) {
109 		ha = (qla_host_t *)arg1;
110 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
111 			qla_stop_pegs(ha);
112 			QLA_UNLOCK(ha, __func__);
113 		}
114 	}
115 
116 	return err;
117 }
118 #endif /* #ifdef QL_DBG */
119 
120 static int
121 qla_validate_set_port_cfg_bit(uint32_t bits)
122 {
123         if ((bits & 0xF) > 1)
124                 return (-1);
125 
126         if (((bits >> 4) & 0xF) > 2)
127                 return (-1);
128 
129         if (((bits >> 8) & 0xF) > 2)
130                 return (-1);
131 
132         return (0);
133 }
134 
135 static int
136 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
137 {
138         int err, ret = 0;
139         qla_host_t *ha;
140         uint32_t cfg_bits;
141 
142         err = sysctl_handle_int(oidp, &ret, 0, req);
143 
144         if (err || !req->newptr)
145                 return (err);
146 
147 	ha = (qla_host_t *)arg1;
148 
149         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
150 
151                 err = qla_get_port_config(ha, &cfg_bits);
152 
153                 if (err)
154                         goto qla_sysctl_set_port_cfg_exit;
155 
156                 if (ret & 0x1) {
157                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
158                 } else {
159                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
160                 }
161 
162                 ret = ret >> 4;
163                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
164 
165                 if ((ret & 0xF) == 0) {
166                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
167                 } else if ((ret & 0xF) == 1){
168                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
169                 } else {
170                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
171                 }
172 
173                 ret = ret >> 4;
174                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
175 
176                 if (ret == 0) {
177                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
178                 } else if (ret == 1){
179                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
180                 } else {
181                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
182                 }
183 
184 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
185                 	err = qla_set_port_config(ha, cfg_bits);
186 			QLA_UNLOCK(ha, __func__);
187 		} else {
188 			device_printf(ha->pci_dev, "%s: failed\n", __func__);
189 		}
190         } else {
191 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
192                 	err = qla_get_port_config(ha, &cfg_bits);
193 			QLA_UNLOCK(ha, __func__);
194 		} else {
195 			device_printf(ha->pci_dev, "%s: failed\n", __func__);
196 		}
197         }
198 
199 qla_sysctl_set_port_cfg_exit:
200         return err;
201 }
202 
203 static int
204 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
205 {
206 	int err, ret = 0;
207 	qla_host_t *ha;
208 
209 	err = sysctl_handle_int(oidp, &ret, 0, req);
210 
211 	if (err || !req->newptr)
212 		return (err);
213 
214 	ha = (qla_host_t *)arg1;
215 
216 	if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
217 		(ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
218 
219 		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
220 			err = qla_set_cam_search_mode(ha, (uint32_t)ret);
221 			QLA_UNLOCK(ha, __func__);
222 		} else {
223 			device_printf(ha->pci_dev, "%s: failed\n", __func__);
224 		}
225 
226 	} else {
227 		device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
228 	}
229 
230 	return (err);
231 }
232 
233 static int
234 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
235 {
236 	int err, ret = 0;
237 	qla_host_t *ha;
238 
239 	err = sysctl_handle_int(oidp, &ret, 0, req);
240 
241 	if (err || !req->newptr)
242 		return (err);
243 
244 	ha = (qla_host_t *)arg1;
245 	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
246 		err = qla_get_cam_search_mode(ha);
247 		QLA_UNLOCK(ha, __func__);
248 	} else {
249 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
250 	}
251 
252 	return (err);
253 }
254 
255 static void
256 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
257 {
258         struct sysctl_ctx_list  *ctx;
259         struct sysctl_oid_list  *children;
260         struct sysctl_oid       *ctx_oid;
261 
262         ctx = device_get_sysctl_ctx(ha->pci_dev);
263         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
264 
265         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
266                         CTLFLAG_RD, NULL, "stats_hw_mac");
267         children = SYSCTL_CHILDREN(ctx_oid);
268 
269         SYSCTL_ADD_QUAD(ctx, children,
270                 OID_AUTO, "xmt_frames",
271                 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
272                 "xmt_frames");
273 
274         SYSCTL_ADD_QUAD(ctx, children,
275                 OID_AUTO, "xmt_bytes",
276                 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
277                 "xmt_frames");
278 
279         SYSCTL_ADD_QUAD(ctx, children,
280                 OID_AUTO, "xmt_mcast_pkts",
281                 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
282                 "xmt_mcast_pkts");
283 
284         SYSCTL_ADD_QUAD(ctx, children,
285                 OID_AUTO, "xmt_bcast_pkts",
286                 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
287                 "xmt_bcast_pkts");
288 
289         SYSCTL_ADD_QUAD(ctx, children,
290                 OID_AUTO, "xmt_pause_frames",
291                 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
292                 "xmt_pause_frames");
293 
294         SYSCTL_ADD_QUAD(ctx, children,
295                 OID_AUTO, "xmt_cntrl_pkts",
296                 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
297                 "xmt_cntrl_pkts");
298 
299         SYSCTL_ADD_QUAD(ctx, children,
300                 OID_AUTO, "xmt_pkt_lt_64bytes",
301                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
302                 "xmt_pkt_lt_64bytes");
303 
304         SYSCTL_ADD_QUAD(ctx, children,
305                 OID_AUTO, "xmt_pkt_lt_127bytes",
306                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
307                 "xmt_pkt_lt_127bytes");
308 
309         SYSCTL_ADD_QUAD(ctx, children,
310                 OID_AUTO, "xmt_pkt_lt_255bytes",
311                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
312                 "xmt_pkt_lt_255bytes");
313 
314         SYSCTL_ADD_QUAD(ctx, children,
315                 OID_AUTO, "xmt_pkt_lt_511bytes",
316                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
317                 "xmt_pkt_lt_511bytes");
318 
319         SYSCTL_ADD_QUAD(ctx, children,
320                 OID_AUTO, "xmt_pkt_lt_1023bytes",
321                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
322                 "xmt_pkt_lt_1023bytes");
323 
324         SYSCTL_ADD_QUAD(ctx, children,
325                 OID_AUTO, "xmt_pkt_lt_1518bytes",
326                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
327                 "xmt_pkt_lt_1518bytes");
328 
329         SYSCTL_ADD_QUAD(ctx, children,
330                 OID_AUTO, "xmt_pkt_gt_1518bytes",
331                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
332                 "xmt_pkt_gt_1518bytes");
333 
334         SYSCTL_ADD_QUAD(ctx, children,
335                 OID_AUTO, "rcv_frames",
336                 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
337                 "rcv_frames");
338 
339         SYSCTL_ADD_QUAD(ctx, children,
340                 OID_AUTO, "rcv_bytes",
341                 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
342                 "rcv_bytes");
343 
344         SYSCTL_ADD_QUAD(ctx, children,
345                 OID_AUTO, "rcv_mcast_pkts",
346                 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
347                 "rcv_mcast_pkts");
348 
349         SYSCTL_ADD_QUAD(ctx, children,
350                 OID_AUTO, "rcv_bcast_pkts",
351                 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
352                 "rcv_bcast_pkts");
353 
354         SYSCTL_ADD_QUAD(ctx, children,
355                 OID_AUTO, "rcv_pause_frames",
356                 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
357                 "rcv_pause_frames");
358 
359         SYSCTL_ADD_QUAD(ctx, children,
360                 OID_AUTO, "rcv_cntrl_pkts",
361                 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
362                 "rcv_cntrl_pkts");
363 
364         SYSCTL_ADD_QUAD(ctx, children,
365                 OID_AUTO, "rcv_pkt_lt_64bytes",
366                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
367                 "rcv_pkt_lt_64bytes");
368 
369         SYSCTL_ADD_QUAD(ctx, children,
370                 OID_AUTO, "rcv_pkt_lt_127bytes",
371                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
372                 "rcv_pkt_lt_127bytes");
373 
374         SYSCTL_ADD_QUAD(ctx, children,
375                 OID_AUTO, "rcv_pkt_lt_255bytes",
376                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
377                 "rcv_pkt_lt_255bytes");
378 
379         SYSCTL_ADD_QUAD(ctx, children,
380                 OID_AUTO, "rcv_pkt_lt_511bytes",
381                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
382                 "rcv_pkt_lt_511bytes");
383 
384         SYSCTL_ADD_QUAD(ctx, children,
385                 OID_AUTO, "rcv_pkt_lt_1023bytes",
386                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
387                 "rcv_pkt_lt_1023bytes");
388 
389         SYSCTL_ADD_QUAD(ctx, children,
390                 OID_AUTO, "rcv_pkt_lt_1518bytes",
391                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
392                 "rcv_pkt_lt_1518bytes");
393 
394         SYSCTL_ADD_QUAD(ctx, children,
395                 OID_AUTO, "rcv_pkt_gt_1518bytes",
396                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
397                 "rcv_pkt_gt_1518bytes");
398 
399         SYSCTL_ADD_QUAD(ctx, children,
400                 OID_AUTO, "rcv_len_error",
401                 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
402                 "rcv_len_error");
403 
404         SYSCTL_ADD_QUAD(ctx, children,
405                 OID_AUTO, "rcv_len_small",
406                 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
407                 "rcv_len_small");
408 
409         SYSCTL_ADD_QUAD(ctx, children,
410                 OID_AUTO, "rcv_len_large",
411                 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
412                 "rcv_len_large");
413 
414         SYSCTL_ADD_QUAD(ctx, children,
415                 OID_AUTO, "rcv_jabber",
416                 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
417                 "rcv_jabber");
418 
419         SYSCTL_ADD_QUAD(ctx, children,
420                 OID_AUTO, "rcv_dropped",
421                 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
422                 "rcv_dropped");
423 
424         SYSCTL_ADD_QUAD(ctx, children,
425                 OID_AUTO, "fcs_error",
426                 CTLFLAG_RD, &ha->hw.mac.fcs_error,
427                 "fcs_error");
428 
429         SYSCTL_ADD_QUAD(ctx, children,
430                 OID_AUTO, "align_error",
431                 CTLFLAG_RD, &ha->hw.mac.align_error,
432                 "align_error");
433 
434         SYSCTL_ADD_QUAD(ctx, children,
435                 OID_AUTO, "eswitched_frames",
436                 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
437                 "eswitched_frames");
438 
439         SYSCTL_ADD_QUAD(ctx, children,
440                 OID_AUTO, "eswitched_bytes",
441                 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
442                 "eswitched_bytes");
443 
444         SYSCTL_ADD_QUAD(ctx, children,
445                 OID_AUTO, "eswitched_mcast_frames",
446                 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
447                 "eswitched_mcast_frames");
448 
449         SYSCTL_ADD_QUAD(ctx, children,
450                 OID_AUTO, "eswitched_bcast_frames",
451                 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
452                 "eswitched_bcast_frames");
453 
454         SYSCTL_ADD_QUAD(ctx, children,
455                 OID_AUTO, "eswitched_ucast_frames",
456                 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
457                 "eswitched_ucast_frames");
458 
459         SYSCTL_ADD_QUAD(ctx, children,
460                 OID_AUTO, "eswitched_err_free_frames",
461                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
462                 "eswitched_err_free_frames");
463 
464         SYSCTL_ADD_QUAD(ctx, children,
465                 OID_AUTO, "eswitched_err_free_bytes",
466                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
467                 "eswitched_err_free_bytes");
468 
469 	return;
470 }
471 
472 static void
473 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
474 {
475         struct sysctl_ctx_list  *ctx;
476         struct sysctl_oid_list  *children;
477         struct sysctl_oid       *ctx_oid;
478 
479         ctx = device_get_sysctl_ctx(ha->pci_dev);
480         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
481 
482         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
483                         CTLFLAG_RD, NULL, "stats_hw_rcv");
484         children = SYSCTL_CHILDREN(ctx_oid);
485 
486         SYSCTL_ADD_QUAD(ctx, children,
487                 OID_AUTO, "total_bytes",
488                 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
489                 "total_bytes");
490 
491         SYSCTL_ADD_QUAD(ctx, children,
492                 OID_AUTO, "total_pkts",
493                 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
494                 "total_pkts");
495 
496         SYSCTL_ADD_QUAD(ctx, children,
497                 OID_AUTO, "lro_pkt_count",
498                 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
499                 "lro_pkt_count");
500 
501         SYSCTL_ADD_QUAD(ctx, children,
502                 OID_AUTO, "sw_pkt_count",
503                 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
504                 "sw_pkt_count");
505 
506         SYSCTL_ADD_QUAD(ctx, children,
507                 OID_AUTO, "ip_chksum_err",
508                 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
509                 "ip_chksum_err");
510 
511         SYSCTL_ADD_QUAD(ctx, children,
512                 OID_AUTO, "pkts_wo_acntxts",
513                 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
514                 "pkts_wo_acntxts");
515 
516         SYSCTL_ADD_QUAD(ctx, children,
517                 OID_AUTO, "pkts_dropped_no_sds_card",
518                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
519                 "pkts_dropped_no_sds_card");
520 
521         SYSCTL_ADD_QUAD(ctx, children,
522                 OID_AUTO, "pkts_dropped_no_sds_host",
523                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
524                 "pkts_dropped_no_sds_host");
525 
526         SYSCTL_ADD_QUAD(ctx, children,
527                 OID_AUTO, "oversized_pkts",
528                 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
529                 "oversized_pkts");
530 
531         SYSCTL_ADD_QUAD(ctx, children,
532                 OID_AUTO, "pkts_dropped_no_rds",
533                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
534                 "pkts_dropped_no_rds");
535 
536         SYSCTL_ADD_QUAD(ctx, children,
537                 OID_AUTO, "unxpctd_mcast_pkts",
538                 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
539                 "unxpctd_mcast_pkts");
540 
541         SYSCTL_ADD_QUAD(ctx, children,
542                 OID_AUTO, "re1_fbq_error",
543                 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
544                 "re1_fbq_error");
545 
546         SYSCTL_ADD_QUAD(ctx, children,
547                 OID_AUTO, "invalid_mac_addr",
548                 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
549                 "invalid_mac_addr");
550 
551         SYSCTL_ADD_QUAD(ctx, children,
552                 OID_AUTO, "rds_prime_trys",
553                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
554                 "rds_prime_trys");
555 
556         SYSCTL_ADD_QUAD(ctx, children,
557                 OID_AUTO, "rds_prime_success",
558                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
559                 "rds_prime_success");
560 
561         SYSCTL_ADD_QUAD(ctx, children,
562                 OID_AUTO, "lro_flows_added",
563                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
564                 "lro_flows_added");
565 
566         SYSCTL_ADD_QUAD(ctx, children,
567                 OID_AUTO, "lro_flows_deleted",
568                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
569                 "lro_flows_deleted");
570 
571         SYSCTL_ADD_QUAD(ctx, children,
572                 OID_AUTO, "lro_flows_active",
573                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
574                 "lro_flows_active");
575 
576         SYSCTL_ADD_QUAD(ctx, children,
577                 OID_AUTO, "pkts_droped_unknown",
578                 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
579                 "pkts_droped_unknown");
580 
581         SYSCTL_ADD_QUAD(ctx, children,
582                 OID_AUTO, "pkts_cnt_oversized",
583                 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
584                 "pkts_cnt_oversized");
585 
586 	return;
587 }
588 
589 static void
590 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
591 {
592         struct sysctl_ctx_list  *ctx;
593         struct sysctl_oid_list  *children;
594         struct sysctl_oid_list  *node_children;
595         struct sysctl_oid       *ctx_oid;
596         int                     i;
597         uint8_t                 name_str[16];
598 
599         ctx = device_get_sysctl_ctx(ha->pci_dev);
600         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
601 
602         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
603                         CTLFLAG_RD, NULL, "stats_hw_xmt");
604         children = SYSCTL_CHILDREN(ctx_oid);
605 
606         for (i = 0; i < ha->hw.num_tx_rings; i++) {
607 
608                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
609                 snprintf(name_str, sizeof(name_str), "%d", i);
610 
611                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
612                         CTLFLAG_RD, NULL, name_str);
613                 node_children = SYSCTL_CHILDREN(ctx_oid);
614 
615                 /* Tx Related */
616 
617                 SYSCTL_ADD_QUAD(ctx, node_children,
618 			OID_AUTO, "total_bytes",
619                         CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
620                         "total_bytes");
621 
622                 SYSCTL_ADD_QUAD(ctx, node_children,
623 			OID_AUTO, "total_pkts",
624                         CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
625                         "total_pkts");
626 
627                 SYSCTL_ADD_QUAD(ctx, node_children,
628 			OID_AUTO, "errors",
629                         CTLFLAG_RD, &ha->hw.xmt[i].errors,
630                         "errors");
631 
632                 SYSCTL_ADD_QUAD(ctx, node_children,
633 			OID_AUTO, "pkts_dropped",
634                         CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
635                         "pkts_dropped");
636 
637                 SYSCTL_ADD_QUAD(ctx, node_children,
638 			OID_AUTO, "switch_pkts",
639                         CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
640                         "switch_pkts");
641 
642                 SYSCTL_ADD_QUAD(ctx, node_children,
643 			OID_AUTO, "num_buffers",
644                         CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
645                         "num_buffers");
646 	}
647 
648 	return;
649 }
650 
651 static void
652 qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t *ha)
653 {
654         struct sysctl_ctx_list  *ctx;
655         struct sysctl_oid_list  *node_children;
656 
657         ctx = device_get_sysctl_ctx(ha->pci_dev);
658         node_children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
659 
660 	SYSCTL_ADD_QUAD(ctx, node_children,
661 		OID_AUTO, "mbx_completion_time_lt_200ms",
662 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[0],
663 		"mbx_completion_time_lt_200ms");
664 
665 	SYSCTL_ADD_QUAD(ctx, node_children,
666 		OID_AUTO, "mbx_completion_time_200ms_400ms",
667 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[1],
668 		"mbx_completion_time_200ms_400ms");
669 
670 	SYSCTL_ADD_QUAD(ctx, node_children,
671 		OID_AUTO, "mbx_completion_time_400ms_600ms",
672 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[2],
673 		"mbx_completion_time_400ms_600ms");
674 
675 	SYSCTL_ADD_QUAD(ctx, node_children,
676 		OID_AUTO, "mbx_completion_time_600ms_800ms",
677 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[3],
678 		"mbx_completion_time_600ms_800ms");
679 
680 	SYSCTL_ADD_QUAD(ctx, node_children,
681 		OID_AUTO, "mbx_completion_time_800ms_1000ms",
682 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[4],
683 		"mbx_completion_time_800ms_1000ms");
684 
685 	SYSCTL_ADD_QUAD(ctx, node_children,
686 		OID_AUTO, "mbx_completion_time_1000ms_1200ms",
687 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[5],
688 		"mbx_completion_time_1000ms_1200ms");
689 
690 	SYSCTL_ADD_QUAD(ctx, node_children,
691 		OID_AUTO, "mbx_completion_time_1200ms_1400ms",
692 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[6],
693 		"mbx_completion_time_1200ms_1400ms");
694 
695 	SYSCTL_ADD_QUAD(ctx, node_children,
696 		OID_AUTO, "mbx_completion_time_1400ms_1600ms",
697 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[7],
698 		"mbx_completion_time_1400ms_1600ms");
699 
700 	SYSCTL_ADD_QUAD(ctx, node_children,
701 		OID_AUTO, "mbx_completion_time_1600ms_1800ms",
702 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[8],
703 		"mbx_completion_time_1600ms_1800ms");
704 
705 	SYSCTL_ADD_QUAD(ctx, node_children,
706 		OID_AUTO, "mbx_completion_time_1800ms_2000ms",
707 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[9],
708 		"mbx_completion_time_1800ms_2000ms");
709 
710 	SYSCTL_ADD_QUAD(ctx, node_children,
711 		OID_AUTO, "mbx_completion_time_2000ms_2200ms",
712 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[10],
713 		"mbx_completion_time_2000ms_2200ms");
714 
715 	SYSCTL_ADD_QUAD(ctx, node_children,
716 		OID_AUTO, "mbx_completion_time_2200ms_2400ms",
717 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[11],
718 		"mbx_completion_time_2200ms_2400ms");
719 
720 	SYSCTL_ADD_QUAD(ctx, node_children,
721 		OID_AUTO, "mbx_completion_time_2400ms_2600ms",
722 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[12],
723 		"mbx_completion_time_2400ms_2600ms");
724 
725 	SYSCTL_ADD_QUAD(ctx, node_children,
726 		OID_AUTO, "mbx_completion_time_2600ms_2800ms",
727 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[13],
728 		"mbx_completion_time_2600ms_2800ms");
729 
730 	SYSCTL_ADD_QUAD(ctx, node_children,
731 		OID_AUTO, "mbx_completion_time_2800ms_3000ms",
732 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[14],
733 		"mbx_completion_time_2800ms_3000ms");
734 
735 	SYSCTL_ADD_QUAD(ctx, node_children,
736 		OID_AUTO, "mbx_completion_time_3000ms_4000ms",
737 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[15],
738 		"mbx_completion_time_3000ms_4000ms");
739 
740 	SYSCTL_ADD_QUAD(ctx, node_children,
741 		OID_AUTO, "mbx_completion_time_4000ms_5000ms",
742 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[16],
743 		"mbx_completion_time_4000ms_5000ms");
744 
745 	SYSCTL_ADD_QUAD(ctx, node_children,
746 		OID_AUTO, "mbx_completion_host_mbx_cntrl_timeout",
747 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[17],
748 		"mbx_completion_host_mbx_cntrl_timeout");
749 
750 	SYSCTL_ADD_QUAD(ctx, node_children,
751 		OID_AUTO, "mbx_completion_fw_mbx_cntrl_timeout",
752 		CTLFLAG_RD, &ha->hw.mbx_comp_msecs[18],
753 		"mbx_completion_fw_mbx_cntrl_timeout");
754 	return;
755 }
756 
757 static void
758 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
759 {
760 	qlnx_add_hw_mac_stats_sysctls(ha);
761 	qlnx_add_hw_rcv_stats_sysctls(ha);
762 	qlnx_add_hw_xmt_stats_sysctls(ha);
763 	qlnx_add_hw_mbx_cmpl_stats_sysctls(ha);
764 
765 	return;
766 }
767 
768 static void
769 qlnx_add_drvr_sds_stats(qla_host_t *ha)
770 {
771         struct sysctl_ctx_list  *ctx;
772         struct sysctl_oid_list  *children;
773         struct sysctl_oid_list  *node_children;
774         struct sysctl_oid       *ctx_oid;
775         int                     i;
776         uint8_t                 name_str[16];
777 
778         ctx = device_get_sysctl_ctx(ha->pci_dev);
779         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
780 
781         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
782                         CTLFLAG_RD, NULL, "stats_drvr_sds");
783         children = SYSCTL_CHILDREN(ctx_oid);
784 
785         for (i = 0; i < ha->hw.num_sds_rings; i++) {
786 
787                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
788                 snprintf(name_str, sizeof(name_str), "%d", i);
789 
790                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
791                         CTLFLAG_RD, NULL, name_str);
792                 node_children = SYSCTL_CHILDREN(ctx_oid);
793 
794                 SYSCTL_ADD_QUAD(ctx, node_children,
795 			OID_AUTO, "intr_count",
796                         CTLFLAG_RD, &ha->hw.sds[i].intr_count,
797                         "intr_count");
798 
799                 SYSCTL_ADD_UINT(ctx, node_children,
800 			OID_AUTO, "rx_free",
801                         CTLFLAG_RD, &ha->hw.sds[i].rx_free,
802 			ha->hw.sds[i].rx_free, "rx_free");
803 	}
804 
805 	return;
806 }
807 static void
808 qlnx_add_drvr_rds_stats(qla_host_t *ha)
809 {
810         struct sysctl_ctx_list  *ctx;
811         struct sysctl_oid_list  *children;
812         struct sysctl_oid_list  *node_children;
813         struct sysctl_oid       *ctx_oid;
814         int                     i;
815         uint8_t                 name_str[16];
816 
817         ctx = device_get_sysctl_ctx(ha->pci_dev);
818         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
819 
820         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
821                         CTLFLAG_RD, NULL, "stats_drvr_rds");
822         children = SYSCTL_CHILDREN(ctx_oid);
823 
824         for (i = 0; i < ha->hw.num_rds_rings; i++) {
825 
826                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
827                 snprintf(name_str, sizeof(name_str), "%d", i);
828 
829                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
830                         CTLFLAG_RD, NULL, name_str);
831                 node_children = SYSCTL_CHILDREN(ctx_oid);
832 
833                 SYSCTL_ADD_QUAD(ctx, node_children,
834 			OID_AUTO, "count",
835                         CTLFLAG_RD, &ha->hw.rds[i].count,
836                         "count");
837 
838                 SYSCTL_ADD_QUAD(ctx, node_children,
839 			OID_AUTO, "lro_pkt_count",
840                         CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
841                         "lro_pkt_count");
842 
843                 SYSCTL_ADD_QUAD(ctx, node_children,
844 			OID_AUTO, "lro_bytes",
845                         CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
846                         "lro_bytes");
847 	}
848 
849 	return;
850 }
851 
852 static void
853 qlnx_add_drvr_tx_stats(qla_host_t *ha)
854 {
855         struct sysctl_ctx_list  *ctx;
856         struct sysctl_oid_list  *children;
857         struct sysctl_oid_list  *node_children;
858         struct sysctl_oid       *ctx_oid;
859         int                     i;
860         uint8_t                 name_str[16];
861 
862         ctx = device_get_sysctl_ctx(ha->pci_dev);
863         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
864 
865         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
866                         CTLFLAG_RD, NULL, "stats_drvr_xmt");
867         children = SYSCTL_CHILDREN(ctx_oid);
868 
869         for (i = 0; i < ha->hw.num_tx_rings; i++) {
870 
871                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
872                 snprintf(name_str, sizeof(name_str), "%d", i);
873 
874                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
875                         CTLFLAG_RD, NULL, name_str);
876                 node_children = SYSCTL_CHILDREN(ctx_oid);
877 
878                 SYSCTL_ADD_QUAD(ctx, node_children,
879 			OID_AUTO, "count",
880                         CTLFLAG_RD, &ha->tx_ring[i].count,
881                         "count");
882 
883 #ifdef QL_ENABLE_ISCSI_TLV
884                 SYSCTL_ADD_QUAD(ctx, node_children,
885 			OID_AUTO, "iscsi_pkt_count",
886                         CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
887                         "iscsi_pkt_count");
888 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
889 	}
890 
891 	return;
892 }
893 
894 static void
895 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
896 {
897 	qlnx_add_drvr_sds_stats(ha);
898 	qlnx_add_drvr_rds_stats(ha);
899 	qlnx_add_drvr_tx_stats(ha);
900 	return;
901 }
902 
903 /*
904  * Name: ql_hw_add_sysctls
905  * Function: Add P3Plus specific sysctls
906  */
907 void
908 ql_hw_add_sysctls(qla_host_t *ha)
909 {
910         device_t	dev;
911 
912         dev = ha->pci_dev;
913 
914 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
915 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
916 		OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
917 		ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
918 
919         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
920                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
921                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
922 		ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
923 
924         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
925                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
926                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
927 		ha->hw.num_tx_rings, "Number of Transmit Rings");
928 
929         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
930                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
931                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
932 		ha->txr_idx, "Tx Ring Used");
933 
934         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
935                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
936                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
937 		ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
938 
939 	ha->hw.sds_cidx_thres = 32;
940         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
941                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
942                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
943 		ha->hw.sds_cidx_thres,
944 		"Number of SDS entries to process before updating"
945 		" SDS Ring Consumer Index");
946 
947 	ha->hw.rds_pidx_thres = 32;
948         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
949                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
950                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
951 		ha->hw.rds_pidx_thres,
952 		"Number of Rcv Rings Entries to post before updating"
953 		" RDS Ring Producer Index");
954 
955         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
956         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
957                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
958                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
959                 &ha->hw.rcv_intr_coalesce,
960                 ha->hw.rcv_intr_coalesce,
961                 "Rcv Intr Coalescing Parameters\n"
962                 "\tbits 15:0 max packets\n"
963                 "\tbits 31:16 max micro-seconds to wait\n"
964                 "\tplease run\n"
965                 "\tifconfig <if> down && ifconfig <if> up\n"
966                 "\tto take effect \n");
967 
968         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
969         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
970                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
971                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
972                 &ha->hw.xmt_intr_coalesce,
973                 ha->hw.xmt_intr_coalesce,
974                 "Xmt Intr Coalescing Parameters\n"
975                 "\tbits 15:0 max packets\n"
976                 "\tbits 31:16 max micro-seconds to wait\n"
977                 "\tplease run\n"
978                 "\tifconfig <if> down && ifconfig <if> up\n"
979                 "\tto take effect \n");
980 
981         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
982                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
983                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
984                 (void *)ha, 0,
985                 qla_sysctl_port_cfg, "I",
986                         "Set Port Configuration if values below "
987                         "otherwise Get Port Configuration\n"
988                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
989                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
990                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
991                         " 1 = xmt only; 2 = rcv only;\n"
992                 );
993 
994 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
995 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
996 		OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
997 		(void *)ha, 0,
998 		qla_sysctl_set_cam_search_mode, "I",
999 			"Set CAM Search Mode"
1000 			"\t 1 = search mode internal\n"
1001 			"\t 2 = search mode auto\n");
1002 
1003 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1004 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1005 		OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
1006 		(void *)ha, 0,
1007 		qla_sysctl_get_cam_search_mode, "I",
1008 			"Get CAM Search Mode"
1009 			"\t 1 = search mode internal\n"
1010 			"\t 2 = search mode auto\n");
1011 
1012         ha->hw.enable_9kb = 1;
1013 
1014         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1015                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1016                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
1017                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
1018 
1019         ha->hw.enable_hw_lro = 1;
1020 
1021         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1022                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1023                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
1024                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
1025 		"\t 1 : Hardware LRO if LRO is enabled\n"
1026 		"\t 0 : Software LRO if LRO is enabled\n"
1027 		"\t Any change requires ifconfig down/up to take effect\n"
1028 		"\t Note that LRO may be turned off/on via ifconfig\n");
1029 
1030         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1031                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1032                 OID_AUTO, "sp_log_index", CTLFLAG_RW, &ha->hw.sp_log_index,
1033                 ha->hw.sp_log_index, "sp_log_index");
1034 
1035         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1036                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1037                 OID_AUTO, "sp_log_stop", CTLFLAG_RW, &ha->hw.sp_log_stop,
1038                 ha->hw.sp_log_stop, "sp_log_stop");
1039 
1040         ha->hw.sp_log_stop_events = 0;
1041 
1042         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1043                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1044                 OID_AUTO, "sp_log_stop_events", CTLFLAG_RW,
1045 		&ha->hw.sp_log_stop_events,
1046                 ha->hw.sp_log_stop_events, "Slow path event log is stopped"
1047 		" when OR of the following events occur \n"
1048 		"\t 0x01 : Heart beat Failure\n"
1049 		"\t 0x02 : Temperature Failure\n"
1050 		"\t 0x04 : HW Initialization Failure\n"
1051 		"\t 0x08 : Interface Initialization Failure\n"
1052 		"\t 0x10 : Error Recovery Failure\n");
1053 
1054 	ha->hw.mdump_active = 0;
1055         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1056                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1057                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
1058 		ha->hw.mdump_active,
1059 		"Minidump retrieval is Active");
1060 
1061 	ha->hw.mdump_done = 0;
1062         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1063                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1064                 OID_AUTO, "mdump_done", CTLFLAG_RW,
1065 		&ha->hw.mdump_done, ha->hw.mdump_done,
1066 		"Minidump has been done and available for retrieval");
1067 
1068 	ha->hw.mdump_capture_mask = 0xF;
1069         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1070                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1071                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
1072 		&ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
1073 		"Minidump capture mask");
1074 #ifdef QL_DBG
1075 
1076 	ha->err_inject = 0;
1077         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1078                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1079                 OID_AUTO, "err_inject",
1080                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
1081                 "Error to be injected\n"
1082                 "\t\t\t 0: No Errors\n"
1083                 "\t\t\t 1: rcv: rxb struct invalid\n"
1084                 "\t\t\t 2: rcv: mp == NULL\n"
1085                 "\t\t\t 3: lro: rxb struct invalid\n"
1086                 "\t\t\t 4: lro: mp == NULL\n"
1087                 "\t\t\t 5: rcv: num handles invalid\n"
1088                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
1089                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
1090                 "\t\t\t 8: mbx: mailbox command failure\n"
1091                 "\t\t\t 9: heartbeat failure\n"
1092                 "\t\t\t A: temperature failure\n"
1093 		"\t\t\t 11: m_getcl or m_getjcl failure\n"
1094 		"\t\t\t 13: Invalid Descriptor Count in SGL Receive\n"
1095 		"\t\t\t 14: Invalid Descriptor Count in LRO Receive\n"
1096 		"\t\t\t 15: peer port error recovery failure\n"
1097 		"\t\t\t 16: tx_buf[next_prod_index].mbuf != NULL\n" );
1098 
1099 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1100                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1101                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
1102                 (void *)ha, 0,
1103                 qla_sysctl_stop_pegs, "I", "Peg Stop");
1104 
1105 #endif /* #ifdef QL_DBG */
1106 
1107         ha->hw.user_pri_nic = 0;
1108         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1109                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1110                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
1111                 ha->hw.user_pri_nic,
1112                 "VLAN Tag User Priority for Normal Ethernet Packets");
1113 
1114         ha->hw.user_pri_iscsi = 4;
1115         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1116                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1117                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
1118                 ha->hw.user_pri_iscsi,
1119                 "VLAN Tag User Priority for iSCSI Packets");
1120 
1121 	qlnx_add_hw_stats_sysctls(ha);
1122 	qlnx_add_drvr_stats_sysctls(ha);
1123 
1124 	return;
1125 }
1126 
1127 void
1128 ql_hw_link_status(qla_host_t *ha)
1129 {
1130 	device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
1131 
1132 	if (ha->hw.link_up) {
1133 		device_printf(ha->pci_dev, "link Up\n");
1134 	} else {
1135 		device_printf(ha->pci_dev, "link Down\n");
1136 	}
1137 
1138 	if (ha->hw.fduplex) {
1139 		device_printf(ha->pci_dev, "Full Duplex\n");
1140 	} else {
1141 		device_printf(ha->pci_dev, "Half Duplex\n");
1142 	}
1143 
1144 	if (ha->hw.autoneg) {
1145 		device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1146 	} else {
1147 		device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1148 	}
1149 
1150 	switch (ha->hw.link_speed) {
1151 	case 0x710:
1152 		device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1153 		break;
1154 
1155 	case 0x3E8:
1156 		device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1157 		break;
1158 
1159 	case 0x64:
1160 		device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1161 		break;
1162 
1163 	default:
1164 		device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1165 		break;
1166 	}
1167 
1168 	switch (ha->hw.module_type) {
1169 
1170 	case 0x01:
1171 		device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1172 		break;
1173 
1174 	case 0x02:
1175 		device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1176 		break;
1177 
1178 	case 0x03:
1179 		device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1180 		break;
1181 
1182 	case 0x04:
1183 		device_printf(ha->pci_dev,
1184 			"Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1185 			ha->hw.cable_length);
1186 		break;
1187 
1188 	case 0x05:
1189 		device_printf(ha->pci_dev, "Module Type 10GE Active"
1190 			" Limiting Copper(Compliant)[%d m]\n",
1191 			ha->hw.cable_length);
1192 		break;
1193 
1194 	case 0x06:
1195 		device_printf(ha->pci_dev,
1196 			"Module Type 10GE Passive Copper"
1197 			" (Legacy, Best Effort)[%d m]\n",
1198 			ha->hw.cable_length);
1199 		break;
1200 
1201 	case 0x07:
1202 		device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1203 		break;
1204 
1205 	case 0x08:
1206 		device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1207 		break;
1208 
1209 	case 0x09:
1210 		device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1211 		break;
1212 
1213 	case 0x0A:
1214 		device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1215 		break;
1216 
1217 	case 0x0B:
1218 		device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1219 			"(Legacy, Best Effort)\n");
1220 		break;
1221 
1222 	default:
1223 		device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1224 			ha->hw.module_type);
1225 		break;
1226 	}
1227 
1228 	if (ha->hw.link_faults == 1)
1229 		device_printf(ha->pci_dev, "SFP Power Fault\n");
1230 }
1231 
1232 /*
1233  * Name: ql_free_dma
1234  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1235  */
1236 void
1237 ql_free_dma(qla_host_t *ha)
1238 {
1239 	uint32_t i;
1240 
1241         if (ha->hw.dma_buf.flags.sds_ring) {
1242 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1243 			ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1244 		}
1245         	ha->hw.dma_buf.flags.sds_ring = 0;
1246 	}
1247 
1248         if (ha->hw.dma_buf.flags.rds_ring) {
1249 		for (i = 0; i < ha->hw.num_rds_rings; i++) {
1250 			ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1251 		}
1252         	ha->hw.dma_buf.flags.rds_ring = 0;
1253 	}
1254 
1255         if (ha->hw.dma_buf.flags.tx_ring) {
1256 		ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1257         	ha->hw.dma_buf.flags.tx_ring = 0;
1258 	}
1259 	ql_minidump_free(ha);
1260 }
1261 
1262 /*
1263  * Name: ql_alloc_dma
1264  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1265  */
1266 int
1267 ql_alloc_dma(qla_host_t *ha)
1268 {
1269         device_t                dev;
1270 	uint32_t		i, j, size, tx_ring_size;
1271 	qla_hw_t		*hw;
1272 	qla_hw_tx_cntxt_t	*tx_cntxt;
1273 	uint8_t			*vaddr;
1274 	bus_addr_t		paddr;
1275 
1276         dev = ha->pci_dev;
1277 
1278         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1279 
1280 	hw = &ha->hw;
1281 	/*
1282 	 * Allocate Transmit Ring
1283 	 */
1284 	tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1285 	size = (tx_ring_size * ha->hw.num_tx_rings);
1286 
1287 	hw->dma_buf.tx_ring.alignment = 8;
1288 	hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1289 
1290         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1291                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1292                 goto ql_alloc_dma_exit;
1293         }
1294 
1295 	vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1296 	paddr = hw->dma_buf.tx_ring.dma_addr;
1297 
1298 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1299 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1300 
1301 		tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1302 		tx_cntxt->tx_ring_paddr = paddr;
1303 
1304 		vaddr += tx_ring_size;
1305 		paddr += tx_ring_size;
1306 	}
1307 
1308 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1309 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1310 
1311 		tx_cntxt->tx_cons = (uint32_t *)vaddr;
1312 		tx_cntxt->tx_cons_paddr = paddr;
1313 
1314 		vaddr += sizeof (uint32_t);
1315 		paddr += sizeof (uint32_t);
1316 	}
1317 
1318         ha->hw.dma_buf.flags.tx_ring = 1;
1319 
1320 	QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1321 		__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1322 		hw->dma_buf.tx_ring.dma_b));
1323 	/*
1324 	 * Allocate Receive Descriptor Rings
1325 	 */
1326 
1327 	for (i = 0; i < hw->num_rds_rings; i++) {
1328 
1329 		hw->dma_buf.rds_ring[i].alignment = 8;
1330 		hw->dma_buf.rds_ring[i].size =
1331 			(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1332 
1333 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1334 			device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1335 				__func__, i);
1336 
1337 			for (j = 0; j < i; j++)
1338 				ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1339 
1340 			goto ql_alloc_dma_exit;
1341 		}
1342 		QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1343 			__func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1344 			hw->dma_buf.rds_ring[i].dma_b));
1345 	}
1346 
1347 	hw->dma_buf.flags.rds_ring = 1;
1348 
1349 	/*
1350 	 * Allocate Status Descriptor Rings
1351 	 */
1352 
1353 	for (i = 0; i < hw->num_sds_rings; i++) {
1354 		hw->dma_buf.sds_ring[i].alignment = 8;
1355 		hw->dma_buf.sds_ring[i].size =
1356 			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1357 
1358 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1359 			device_printf(dev, "%s: sds ring alloc failed\n",
1360 				__func__);
1361 
1362 			for (j = 0; j < i; j++)
1363 				ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1364 
1365 			goto ql_alloc_dma_exit;
1366 		}
1367 		QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1368 			__func__, i,
1369 			(void *)(hw->dma_buf.sds_ring[i].dma_addr),
1370 			hw->dma_buf.sds_ring[i].dma_b));
1371 	}
1372 	for (i = 0; i < hw->num_sds_rings; i++) {
1373 		hw->sds[i].sds_ring_base =
1374 			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1375 	}
1376 
1377 	hw->dma_buf.flags.sds_ring = 1;
1378 
1379 	return 0;
1380 
1381 ql_alloc_dma_exit:
1382 	ql_free_dma(ha);
1383 	return -1;
1384 }
1385 
1386 #define Q8_MBX_MSEC_DELAY	5000
1387 
1388 static int
1389 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1390 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1391 {
1392 	uint32_t i;
1393 	uint32_t data;
1394 	int ret = 0;
1395 	uint64_t start_usecs;
1396 	uint64_t end_usecs;
1397 	uint64_t msecs_200;
1398 
1399 	ql_sp_log(ha, 0, 5, no_pause, h_mbox[0], h_mbox[1], h_mbox[2], h_mbox[3]);
1400 
1401 	if (ha->offline || ha->qla_initiate_recovery) {
1402 		ql_sp_log(ha, 1, 2, ha->offline, ha->qla_initiate_recovery, 0, 0, 0);
1403 		goto exit_qla_mbx_cmd;
1404 	}
1405 
1406 	if (((ha->err_inject & 0xFFFF) == INJCT_MBX_CMD_FAILURE) &&
1407 		(((ha->err_inject & ~0xFFFF) == ((h_mbox[0] & 0xFFFF) << 16))||
1408 		!(ha->err_inject & ~0xFFFF))) {
1409 		ret = -3;
1410 		QL_INITIATE_RECOVERY(ha);
1411 		goto exit_qla_mbx_cmd;
1412 	}
1413 
1414 	start_usecs = qla_get_usec_timestamp();
1415 
1416 	if (no_pause)
1417 		i = 1000;
1418 	else
1419 		i = Q8_MBX_MSEC_DELAY;
1420 
1421 	while (i) {
1422 
1423 		if (ha->qla_initiate_recovery) {
1424 			ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1425 			return (-1);
1426 		}
1427 
1428 		data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1429 		if (data == 0)
1430 			break;
1431 		if (no_pause) {
1432 			DELAY(1000);
1433 		} else {
1434 			qla_mdelay(__func__, 1);
1435 		}
1436 		i--;
1437 	}
1438 
1439 	if (i == 0) {
1440 		device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1441 			__func__, data);
1442 		ql_sp_log(ha, 3, 1, data, 0, 0, 0, 0);
1443 		ret = -1;
1444 		ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 2)]++;
1445 		QL_INITIATE_RECOVERY(ha);
1446 		goto exit_qla_mbx_cmd;
1447 	}
1448 
1449 	for (i = 0; i < n_hmbox; i++) {
1450 		WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1451 		h_mbox++;
1452 	}
1453 
1454 	WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1455 
1456 
1457 	i = Q8_MBX_MSEC_DELAY;
1458 	while (i) {
1459 
1460 		if (ha->qla_initiate_recovery) {
1461 			ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1462 			return (-1);
1463 		}
1464 
1465 		data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1466 
1467 		if ((data & 0x3) == 1) {
1468 			data = READ_REG32(ha, Q8_FW_MBOX0);
1469 			if ((data & 0xF000) != 0x8000)
1470 				break;
1471 		}
1472 		if (no_pause) {
1473 			DELAY(1000);
1474 		} else {
1475 			qla_mdelay(__func__, 1);
1476 		}
1477 		i--;
1478 	}
1479 	if (i == 0) {
1480 		device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1481 			__func__, data);
1482 		ql_sp_log(ha, 5, 1, data, 0, 0, 0, 0);
1483 		ret = -2;
1484 		ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 1)]++;
1485 		QL_INITIATE_RECOVERY(ha);
1486 		goto exit_qla_mbx_cmd;
1487 	}
1488 
1489 	for (i = 0; i < n_fwmbox; i++) {
1490 
1491 		if (ha->qla_initiate_recovery) {
1492 			ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1493 			return (-1);
1494 		}
1495 
1496 		*fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1497 	}
1498 
1499 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1500 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1501 
1502 	end_usecs = qla_get_usec_timestamp();
1503 
1504 	if (end_usecs > start_usecs) {
1505 		msecs_200 = (end_usecs - start_usecs)/(1000 * 200);
1506 
1507 		if (msecs_200 < 15)
1508 			ha->hw.mbx_comp_msecs[msecs_200]++;
1509 		else if (msecs_200 < 20)
1510 			ha->hw.mbx_comp_msecs[15]++;
1511 		else {
1512 			device_printf(ha->pci_dev, "%s: [%ld, %ld] %ld\n", __func__,
1513 				start_usecs, end_usecs, msecs_200);
1514 			ha->hw.mbx_comp_msecs[16]++;
1515 		}
1516 	}
1517 	ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]);
1518 
1519 
1520 exit_qla_mbx_cmd:
1521 	return (ret);
1522 }
1523 
1524 int
1525 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1526 	uint32_t *num_rcvq)
1527 {
1528 	uint32_t *mbox, err;
1529 	device_t dev = ha->pci_dev;
1530 
1531 	bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1532 
1533 	mbox = ha->hw.mbox;
1534 
1535 	mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
1536 
1537 	if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1538 		device_printf(dev, "%s: failed0\n", __func__);
1539 		return (-1);
1540 	}
1541 	err = mbox[0] >> 25;
1542 
1543 	if (supports_9kb != NULL) {
1544 		if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1545 			*supports_9kb = 1;
1546 		else
1547 			*supports_9kb = 0;
1548 	}
1549 
1550 	if (num_rcvq != NULL)
1551 		*num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
1552 
1553 	if ((err != 1) && (err != 0)) {
1554 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1555 		return (-1);
1556 	}
1557 	return 0;
1558 }
1559 
1560 static int
1561 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1562 	uint32_t create)
1563 {
1564 	uint32_t i, err;
1565 	device_t dev = ha->pci_dev;
1566 	q80_config_intr_t *c_intr;
1567 	q80_config_intr_rsp_t *c_intr_rsp;
1568 
1569 	c_intr = (q80_config_intr_t *)ha->hw.mbox;
1570 	bzero(c_intr, (sizeof (q80_config_intr_t)));
1571 
1572 	c_intr->opcode = Q8_MBX_CONFIG_INTR;
1573 
1574 	c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1575 	c_intr->count_version |= Q8_MBX_CMD_VERSION;
1576 
1577 	c_intr->nentries = num_intrs;
1578 
1579 	for (i = 0; i < num_intrs; i++) {
1580 		if (create) {
1581 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1582 			c_intr->intr[i].msix_index = start_idx + 1 + i;
1583 		} else {
1584 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1585 			c_intr->intr[i].msix_index =
1586 				ha->hw.intr_id[(start_idx + i)];
1587 		}
1588 
1589 		c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1590 	}
1591 
1592 	if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1593 		(sizeof (q80_config_intr_t) >> 2),
1594 		ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1595 		device_printf(dev, "%s: %s failed0\n", __func__,
1596 			(create ? "create" : "delete"));
1597 		return (-1);
1598 	}
1599 
1600 	c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1601 
1602 	err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1603 
1604 	if (err) {
1605 		device_printf(dev, "%s: %s failed1 [0x%08x, %d]\n", __func__,
1606 			(create ? "create" : "delete"), err, c_intr_rsp->nentries);
1607 
1608 		for (i = 0; i < c_intr_rsp->nentries; i++) {
1609 			device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1610 				__func__, i,
1611 				c_intr_rsp->intr[i].status,
1612 				c_intr_rsp->intr[i].intr_id,
1613 				c_intr_rsp->intr[i].intr_src);
1614 		}
1615 
1616 		return (-1);
1617 	}
1618 
1619 	for (i = 0; ((i < num_intrs) && create); i++) {
1620 		if (!c_intr_rsp->intr[i].status) {
1621 			ha->hw.intr_id[(start_idx + i)] =
1622 				c_intr_rsp->intr[i].intr_id;
1623 			ha->hw.intr_src[(start_idx + i)] =
1624 				c_intr_rsp->intr[i].intr_src;
1625 		}
1626 	}
1627 
1628 	return (0);
1629 }
1630 
1631 /*
1632  * Name: qla_config_rss
1633  * Function: Configure RSS for the context/interface.
1634  */
1635 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1636 			0x8030f20c77cb2da3ULL,
1637 			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1638 			0x255b0ec26d5a56daULL };
1639 
1640 static int
1641 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1642 {
1643 	q80_config_rss_t	*c_rss;
1644 	q80_config_rss_rsp_t	*c_rss_rsp;
1645 	uint32_t		err, i;
1646 	device_t		dev = ha->pci_dev;
1647 
1648 	c_rss = (q80_config_rss_t *)ha->hw.mbox;
1649 	bzero(c_rss, (sizeof (q80_config_rss_t)));
1650 
1651 	c_rss->opcode = Q8_MBX_CONFIG_RSS;
1652 
1653 	c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1654 	c_rss->count_version |= Q8_MBX_CMD_VERSION;
1655 
1656 	c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1657 				Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1658 	//c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1659 	//			Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1660 
1661 	c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1662 	c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1663 
1664 	c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1665 
1666 	c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1667 	c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1668 
1669 	c_rss->cntxt_id = cntxt_id;
1670 
1671 	for (i = 0; i < 5; i++) {
1672 		c_rss->rss_key[i] = rss_key[i];
1673 	}
1674 
1675 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1676 		(sizeof (q80_config_rss_t) >> 2),
1677 		ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1678 		device_printf(dev, "%s: failed0\n", __func__);
1679 		return (-1);
1680 	}
1681 	c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1682 
1683 	err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1684 
1685 	if (err) {
1686 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1687 		return (-1);
1688 	}
1689 	return 0;
1690 }
1691 
1692 static int
1693 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1694         uint16_t cntxt_id, uint8_t *ind_table)
1695 {
1696         q80_config_rss_ind_table_t      *c_rss_ind;
1697         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1698         uint32_t                        err;
1699         device_t                        dev = ha->pci_dev;
1700 
1701 	if ((count > Q8_RSS_IND_TBL_SIZE) ||
1702 		((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1703 		device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1704 			start_idx, count);
1705 		return (-1);
1706 	}
1707 
1708         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1709         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1710 
1711         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1712         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1713         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1714 
1715 	c_rss_ind->start_idx = start_idx;
1716 	c_rss_ind->end_idx = start_idx + count - 1;
1717 	c_rss_ind->cntxt_id = cntxt_id;
1718 	bcopy(ind_table, c_rss_ind->ind_table, count);
1719 
1720 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1721 		(sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1722 		(sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1723 		device_printf(dev, "%s: failed0\n", __func__);
1724 		return (-1);
1725 	}
1726 
1727 	c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1728 	err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1729 
1730 	if (err) {
1731 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1732 		return (-1);
1733 	}
1734 	return 0;
1735 }
1736 
1737 /*
1738  * Name: qla_config_intr_coalesce
1739  * Function: Configure Interrupt Coalescing.
1740  */
1741 static int
1742 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1743 	int rcv)
1744 {
1745 	q80_config_intr_coalesc_t	*intrc;
1746 	q80_config_intr_coalesc_rsp_t	*intrc_rsp;
1747 	uint32_t			err, i;
1748 	device_t			dev = ha->pci_dev;
1749 
1750 	intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1751 	bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1752 
1753 	intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1754 	intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1755 	intrc->count_version |= Q8_MBX_CMD_VERSION;
1756 
1757 	if (rcv) {
1758 		intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1759 		intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1760 		intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1761 	} else {
1762 		intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1763 		intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1764 		intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1765 	}
1766 
1767 	intrc->cntxt_id = cntxt_id;
1768 
1769 	if (tenable) {
1770 		intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1771 		intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1772 
1773 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1774 			intrc->sds_ring_mask |= (1 << i);
1775 		}
1776 		intrc->ms_timeout = 1000;
1777 	}
1778 
1779 	if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1780 		(sizeof (q80_config_intr_coalesc_t) >> 2),
1781 		ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1782 		device_printf(dev, "%s: failed0\n", __func__);
1783 		return (-1);
1784 	}
1785 	intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1786 
1787 	err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1788 
1789 	if (err) {
1790 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1791 		return (-1);
1792 	}
1793 
1794 	return 0;
1795 }
1796 
1797 
1798 /*
1799  * Name: qla_config_mac_addr
1800  * Function: binds a MAC address to the context/interface.
1801  *	Can be unicast, multicast or broadcast.
1802  */
1803 static int
1804 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1805 	uint32_t num_mac)
1806 {
1807 	q80_config_mac_addr_t		*cmac;
1808 	q80_config_mac_addr_rsp_t	*cmac_rsp;
1809 	uint32_t			err;
1810 	device_t			dev = ha->pci_dev;
1811 	int				i;
1812 	uint8_t				*mac_cpy = mac_addr;
1813 
1814 	if (num_mac > Q8_MAX_MAC_ADDRS) {
1815 		device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1816 			__func__, (add_mac ? "Add" : "Del"), num_mac);
1817 		return (-1);
1818 	}
1819 
1820 	cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1821 	bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1822 
1823 	cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1824 	cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1825 	cmac->count_version |= Q8_MBX_CMD_VERSION;
1826 
1827 	if (add_mac)
1828 		cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1829 	else
1830 		cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1831 
1832 	cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1833 
1834 	cmac->nmac_entries = num_mac;
1835 	cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1836 
1837 	for (i = 0; i < num_mac; i++) {
1838 		bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN);
1839 		mac_addr = mac_addr + ETHER_ADDR_LEN;
1840 	}
1841 
1842 	if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1843 		(sizeof (q80_config_mac_addr_t) >> 2),
1844 		ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1845 		device_printf(dev, "%s: %s failed0\n", __func__,
1846 			(add_mac ? "Add" : "Del"));
1847 		return (-1);
1848 	}
1849 	cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1850 
1851 	err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1852 
1853 	if (err) {
1854 		device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1855 			(add_mac ? "Add" : "Del"), err);
1856 		for (i = 0; i < num_mac; i++) {
1857 			device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1858 				__func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1859 				mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1860 			mac_cpy += ETHER_ADDR_LEN;
1861 		}
1862 		return (-1);
1863 	}
1864 
1865 	return 0;
1866 }
1867 
1868 
1869 /*
1870  * Name: qla_set_mac_rcv_mode
1871  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1872  */
1873 static int
1874 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1875 {
1876 	q80_config_mac_rcv_mode_t	*rcv_mode;
1877 	uint32_t			err;
1878 	q80_config_mac_rcv_mode_rsp_t	*rcv_mode_rsp;
1879 	device_t			dev = ha->pci_dev;
1880 
1881 	rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1882 	bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1883 
1884 	rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1885 	rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1886 	rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1887 
1888 	rcv_mode->mode = mode;
1889 
1890 	rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1891 
1892 	if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1893 		(sizeof (q80_config_mac_rcv_mode_t) >> 2),
1894 		ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1895 		device_printf(dev, "%s: failed0\n", __func__);
1896 		return (-1);
1897 	}
1898 	rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1899 
1900 	err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1901 
1902 	if (err) {
1903 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1904 		return (-1);
1905 	}
1906 
1907 	return 0;
1908 }
1909 
1910 int
1911 ql_set_promisc(qla_host_t *ha)
1912 {
1913 	int ret;
1914 
1915 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1916 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1917 	return (ret);
1918 }
1919 
1920 void
1921 qla_reset_promisc(qla_host_t *ha)
1922 {
1923 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1924 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1925 }
1926 
1927 int
1928 ql_set_allmulti(qla_host_t *ha)
1929 {
1930 	int ret;
1931 
1932 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1933 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1934 	return (ret);
1935 }
1936 
1937 void
1938 qla_reset_allmulti(qla_host_t *ha)
1939 {
1940 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1941 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1942 }
1943 
1944 /*
1945  * Name: ql_set_max_mtu
1946  * Function:
1947  *	Sets the maximum transfer unit size for the specified rcv context.
1948  */
1949 int
1950 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1951 {
1952 	device_t		dev;
1953 	q80_set_max_mtu_t	*max_mtu;
1954 	q80_set_max_mtu_rsp_t	*max_mtu_rsp;
1955 	uint32_t		err;
1956 
1957 	dev = ha->pci_dev;
1958 
1959 	max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1960 	bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1961 
1962 	max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1963 	max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1964 	max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1965 
1966 	max_mtu->cntxt_id = cntxt_id;
1967 	max_mtu->mtu = mtu;
1968 
1969         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1970 		(sizeof (q80_set_max_mtu_t) >> 2),
1971                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1972                 device_printf(dev, "%s: failed\n", __func__);
1973                 return -1;
1974         }
1975 
1976 	max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1977 
1978         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1979 
1980         if (err) {
1981                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1982         }
1983 
1984 	return 0;
1985 }
1986 
1987 static int
1988 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1989 {
1990 	device_t		dev;
1991 	q80_link_event_t	*lnk;
1992 	q80_link_event_rsp_t	*lnk_rsp;
1993 	uint32_t		err;
1994 
1995 	dev = ha->pci_dev;
1996 
1997 	lnk = (q80_link_event_t *)ha->hw.mbox;
1998 	bzero(lnk, (sizeof (q80_link_event_t)));
1999 
2000 	lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
2001 	lnk->count_version = (sizeof (q80_link_event_t) >> 2);
2002 	lnk->count_version |= Q8_MBX_CMD_VERSION;
2003 
2004 	lnk->cntxt_id = cntxt_id;
2005 	lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
2006 
2007         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
2008                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
2009                 device_printf(dev, "%s: failed\n", __func__);
2010                 return -1;
2011         }
2012 
2013 	lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
2014 
2015         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
2016 
2017         if (err) {
2018                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2019         }
2020 
2021 	return 0;
2022 }
2023 
2024 static int
2025 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
2026 {
2027 	device_t		dev;
2028 	q80_config_fw_lro_t	*fw_lro;
2029 	q80_config_fw_lro_rsp_t	*fw_lro_rsp;
2030 	uint32_t		err;
2031 
2032 	dev = ha->pci_dev;
2033 
2034 	fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
2035 	bzero(fw_lro, sizeof(q80_config_fw_lro_t));
2036 
2037 	fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
2038 	fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
2039 	fw_lro->count_version |= Q8_MBX_CMD_VERSION;
2040 
2041 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
2042 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
2043 
2044 	fw_lro->cntxt_id = cntxt_id;
2045 
2046 	if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
2047 		(sizeof (q80_config_fw_lro_t) >> 2),
2048 		ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
2049 		device_printf(dev, "%s: failed\n", __func__);
2050 		return -1;
2051 	}
2052 
2053 	fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
2054 
2055 	err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
2056 
2057 	if (err) {
2058 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 static int
2065 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
2066 {
2067 	device_t                dev;
2068 	q80_hw_config_t         *hw_config;
2069 	q80_hw_config_rsp_t     *hw_config_rsp;
2070 	uint32_t                err;
2071 
2072 	dev = ha->pci_dev;
2073 
2074 	hw_config = (q80_hw_config_t *)ha->hw.mbox;
2075 	bzero(hw_config, sizeof (q80_hw_config_t));
2076 
2077 	hw_config->opcode = Q8_MBX_HW_CONFIG;
2078 	hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
2079 	hw_config->count_version |= Q8_MBX_CMD_VERSION;
2080 
2081 	hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
2082 
2083 	hw_config->u.set_cam_search_mode.mode = search_mode;
2084 
2085 	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2086 		(sizeof (q80_hw_config_t) >> 2),
2087 		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2088 		device_printf(dev, "%s: failed\n", __func__);
2089 		return -1;
2090 	}
2091 	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2092 
2093 	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2094 
2095 	if (err) {
2096 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2097 	}
2098 
2099 	return 0;
2100 }
2101 
2102 static int
2103 qla_get_cam_search_mode(qla_host_t *ha)
2104 {
2105 	device_t                dev;
2106 	q80_hw_config_t         *hw_config;
2107 	q80_hw_config_rsp_t     *hw_config_rsp;
2108 	uint32_t                err;
2109 
2110 	dev = ha->pci_dev;
2111 
2112 	hw_config = (q80_hw_config_t *)ha->hw.mbox;
2113 	bzero(hw_config, sizeof (q80_hw_config_t));
2114 
2115 	hw_config->opcode = Q8_MBX_HW_CONFIG;
2116 	hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
2117 	hw_config->count_version |= Q8_MBX_CMD_VERSION;
2118 
2119 	hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
2120 
2121 	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2122 		(sizeof (q80_hw_config_t) >> 2),
2123 		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2124 		device_printf(dev, "%s: failed\n", __func__);
2125 		return -1;
2126 	}
2127 	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2128 
2129 	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2130 
2131 	if (err) {
2132 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2133 	} else {
2134 		device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
2135 			hw_config_rsp->u.get_cam_search_mode.mode);
2136 	}
2137 
2138 	return 0;
2139 }
2140 
2141 static int
2142 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
2143 {
2144 	device_t		dev;
2145 	q80_get_stats_t		*stat;
2146 	q80_get_stats_rsp_t	*stat_rsp;
2147 	uint32_t		err;
2148 
2149 	dev = ha->pci_dev;
2150 
2151 	stat = (q80_get_stats_t *)ha->hw.mbox;
2152 	bzero(stat, (sizeof (q80_get_stats_t)));
2153 
2154 	stat->opcode = Q8_MBX_GET_STATS;
2155 	stat->count_version = 2;
2156 	stat->count_version |= Q8_MBX_CMD_VERSION;
2157 
2158 	stat->cmd = cmd;
2159 
2160         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
2161                 ha->hw.mbox, (rsp_size >> 2), 0)) {
2162                 device_printf(dev, "%s: failed\n", __func__);
2163                 return -1;
2164         }
2165 
2166 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2167 
2168         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
2169 
2170         if (err) {
2171                 return -1;
2172         }
2173 
2174 	return 0;
2175 }
2176 
2177 void
2178 ql_get_stats(qla_host_t *ha)
2179 {
2180 	q80_get_stats_rsp_t	*stat_rsp;
2181 	q80_mac_stats_t		*mstat;
2182 	q80_xmt_stats_t		*xstat;
2183 	q80_rcv_stats_t		*rstat;
2184 	uint32_t		cmd;
2185 	int			i;
2186 	struct ifnet *ifp = ha->ifp;
2187 
2188 	if (ifp == NULL)
2189 		return;
2190 
2191 	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2192 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
2193 		return;
2194 	}
2195 
2196 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2197 		QLA_UNLOCK(ha, __func__);
2198 		return;
2199 	}
2200 
2201 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2202 	/*
2203 	 * Get MAC Statistics
2204 	 */
2205 	cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2206 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
2207 
2208 	cmd |= ((ha->pci_func & 0x1) << 16);
2209 
2210 	if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2211 		ha->offline)
2212 		goto ql_get_stats_exit;
2213 
2214 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2215 		mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2216 		bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2217 	} else {
2218                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2219 			__func__, ha->hw.mbox[0]);
2220 	}
2221 	/*
2222 	 * Get RCV Statistics
2223 	 */
2224 	cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2225 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
2226 	cmd |= (ha->hw.rcv_cntxt_id << 16);
2227 
2228 	if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2229 		ha->offline)
2230 		goto ql_get_stats_exit;
2231 
2232 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2233 		rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2234 		bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2235 	} else {
2236                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2237 			__func__, ha->hw.mbox[0]);
2238 	}
2239 
2240 	if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2241 		ha->offline)
2242 		goto ql_get_stats_exit;
2243 	/*
2244 	 * Get XMT Statistics
2245 	 */
2246 	for (i = 0 ; (i < ha->hw.num_tx_rings); i++) {
2247 		if (ha->qla_watchdog_pause ||
2248 			(!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2249 			ha->offline)
2250 			goto ql_get_stats_exit;
2251 
2252 		cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2253 //		cmd |= Q8_GET_STATS_CMD_CLEAR;
2254 		cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2255 
2256 		if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2257 			== 0) {
2258 			xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2259 			bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2260 		} else {
2261 			device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2262 				__func__, ha->hw.mbox[0]);
2263 		}
2264 	}
2265 
2266 ql_get_stats_exit:
2267 	QLA_UNLOCK(ha, __func__);
2268 
2269 	return;
2270 }
2271 
2272 /*
2273  * Name: qla_tx_tso
2274  * Function: Checks if the packet to be transmitted is a candidate for
2275  *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2276  *	Ring Structure are plugged in.
2277  */
2278 static int
2279 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2280 {
2281 	struct ether_vlan_header *eh;
2282 	struct ip *ip = NULL;
2283 	struct ip6_hdr *ip6 = NULL;
2284 	struct tcphdr *th = NULL;
2285 	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2286 	uint16_t etype, opcode, offload = 1;
2287 	device_t dev;
2288 
2289 	dev = ha->pci_dev;
2290 
2291 
2292 	eh = mtod(mp, struct ether_vlan_header *);
2293 
2294 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2295 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2296 		etype = ntohs(eh->evl_proto);
2297 	} else {
2298 		ehdrlen = ETHER_HDR_LEN;
2299 		etype = ntohs(eh->evl_encap_proto);
2300 	}
2301 
2302 	hdrlen = 0;
2303 
2304 	switch (etype) {
2305 		case ETHERTYPE_IP:
2306 
2307 			tcp_opt_off = ehdrlen + sizeof(struct ip) +
2308 					sizeof(struct tcphdr);
2309 
2310 			if (mp->m_len < tcp_opt_off) {
2311 				m_copydata(mp, 0, tcp_opt_off, hdr);
2312 				ip = (struct ip *)(hdr + ehdrlen);
2313 			} else {
2314 				ip = (struct ip *)(mp->m_data + ehdrlen);
2315 			}
2316 
2317 			ip_hlen = ip->ip_hl << 2;
2318 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2319 
2320 
2321 			if ((ip->ip_p != IPPROTO_TCP) ||
2322 				(ip_hlen != sizeof (struct ip))){
2323 				/* IP Options are not supported */
2324 
2325 				offload = 0;
2326 			} else
2327 				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2328 
2329 		break;
2330 
2331 		case ETHERTYPE_IPV6:
2332 
2333 			tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2334 					sizeof (struct tcphdr);
2335 
2336 			if (mp->m_len < tcp_opt_off) {
2337 				m_copydata(mp, 0, tcp_opt_off, hdr);
2338 				ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2339 			} else {
2340 				ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2341 			}
2342 
2343 			ip_hlen = sizeof(struct ip6_hdr);
2344 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2345 
2346 			if (ip6->ip6_nxt != IPPROTO_TCP) {
2347 				//device_printf(dev, "%s: ipv6\n", __func__);
2348 				offload = 0;
2349 			} else
2350 				th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2351 		break;
2352 
2353 		default:
2354 			QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
2355 			offload = 0;
2356 		break;
2357 	}
2358 
2359 	if (!offload)
2360 		return (-1);
2361 
2362 	tcp_hlen = th->th_off << 2;
2363 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2364 
2365         if (mp->m_len < hdrlen) {
2366                 if (mp->m_len < tcp_opt_off) {
2367                         if (tcp_hlen > sizeof(struct tcphdr)) {
2368                                 m_copydata(mp, tcp_opt_off,
2369                                         (tcp_hlen - sizeof(struct tcphdr)),
2370                                         &hdr[tcp_opt_off]);
2371                         }
2372                 } else {
2373                         m_copydata(mp, 0, hdrlen, hdr);
2374                 }
2375         }
2376 
2377 	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2378 
2379 	tx_cmd->flags_opcode = opcode ;
2380 	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2381 	tx_cmd->total_hdr_len = hdrlen;
2382 
2383 	/* Check for Multicast least significant bit of MSB == 1 */
2384 	if (eh->evl_dhost[0] & 0x01) {
2385 		tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2386 	}
2387 
2388 	if (mp->m_len < hdrlen) {
2389 		printf("%d\n", hdrlen);
2390 		return (1);
2391 	}
2392 
2393 	return (0);
2394 }
2395 
2396 /*
2397  * Name: qla_tx_chksum
2398  * Function: Checks if the packet to be transmitted is a candidate for
2399  *	TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2400  *	Ring Structure are plugged in.
2401  */
2402 static int
2403 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2404 	uint32_t *tcp_hdr_off)
2405 {
2406 	struct ether_vlan_header *eh;
2407 	struct ip *ip;
2408 	struct ip6_hdr *ip6;
2409 	uint32_t ehdrlen, ip_hlen;
2410 	uint16_t etype, opcode, offload = 1;
2411 	device_t dev;
2412 	uint8_t buf[sizeof(struct ip6_hdr)];
2413 
2414 	dev = ha->pci_dev;
2415 
2416 	*op_code = 0;
2417 
2418 	if ((mp->m_pkthdr.csum_flags &
2419 		(CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2420 		return (-1);
2421 
2422 	eh = mtod(mp, struct ether_vlan_header *);
2423 
2424 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2425 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2426 		etype = ntohs(eh->evl_proto);
2427 	} else {
2428 		ehdrlen = ETHER_HDR_LEN;
2429 		etype = ntohs(eh->evl_encap_proto);
2430 	}
2431 
2432 
2433 	switch (etype) {
2434 		case ETHERTYPE_IP:
2435 			ip = (struct ip *)(mp->m_data + ehdrlen);
2436 
2437 			ip_hlen = sizeof (struct ip);
2438 
2439 			if (mp->m_len < (ehdrlen + ip_hlen)) {
2440 				m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2441 				ip = (struct ip *)buf;
2442 			}
2443 
2444 			if (ip->ip_p == IPPROTO_TCP)
2445 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2446 			else if (ip->ip_p == IPPROTO_UDP)
2447 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2448 			else {
2449 				//device_printf(dev, "%s: ipv4\n", __func__);
2450 				offload = 0;
2451 			}
2452 		break;
2453 
2454 		case ETHERTYPE_IPV6:
2455 			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2456 
2457 			ip_hlen = sizeof(struct ip6_hdr);
2458 
2459 			if (mp->m_len < (ehdrlen + ip_hlen)) {
2460 				m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2461 					buf);
2462 				ip6 = (struct ip6_hdr *)buf;
2463 			}
2464 
2465 			if (ip6->ip6_nxt == IPPROTO_TCP)
2466 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2467 			else if (ip6->ip6_nxt == IPPROTO_UDP)
2468 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2469 			else {
2470 				//device_printf(dev, "%s: ipv6\n", __func__);
2471 				offload = 0;
2472 			}
2473 		break;
2474 
2475 		default:
2476 			offload = 0;
2477 		break;
2478 	}
2479 	if (!offload)
2480 		return (-1);
2481 
2482 	*op_code = opcode;
2483 	*tcp_hdr_off = (ip_hlen + ehdrlen);
2484 
2485 	return (0);
2486 }
2487 
2488 #define QLA_TX_MIN_FREE 2
2489 /*
2490  * Name: ql_hw_send
2491  * Function: Transmits a packet. It first checks if the packet is a
2492  *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2493  *	offload. If either of these creteria are not met, it is transmitted
2494  *	as a regular ethernet frame.
2495  */
2496 int
2497 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2498 	uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2499 {
2500 	struct ether_vlan_header *eh;
2501 	qla_hw_t *hw = &ha->hw;
2502 	q80_tx_cmd_t *tx_cmd, tso_cmd;
2503 	bus_dma_segment_t *c_seg;
2504 	uint32_t num_tx_cmds, hdr_len = 0;
2505 	uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2506 	device_t dev;
2507 	int i, ret;
2508 	uint8_t *src = NULL, *dst = NULL;
2509 	uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2510 	uint32_t op_code = 0;
2511 	uint32_t tcp_hdr_off = 0;
2512 
2513 	dev = ha->pci_dev;
2514 
2515 	/*
2516 	 * Always make sure there is atleast one empty slot in the tx_ring
2517 	 * tx_ring is considered full when there only one entry available
2518 	 */
2519         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2520 
2521 	total_length = mp->m_pkthdr.len;
2522 	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2523 		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2524 			__func__, total_length);
2525 		return (EINVAL);
2526 	}
2527 	eh = mtod(mp, struct ether_vlan_header *);
2528 
2529 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2530 
2531 		bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2532 
2533 		src = frame_hdr;
2534 		ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2535 
2536 		if (!(ret & ~1)) {
2537 			/* find the additional tx_cmd descriptors required */
2538 
2539 			if (mp->m_flags & M_VLANTAG)
2540 				tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2541 
2542 			hdr_len = tso_cmd.total_hdr_len;
2543 
2544 			bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2545 			bytes = QL_MIN(bytes, hdr_len);
2546 
2547 			num_tx_cmds++;
2548 			hdr_len -= bytes;
2549 
2550 			while (hdr_len) {
2551 				bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2552 				hdr_len -= bytes;
2553 				num_tx_cmds++;
2554 			}
2555 			hdr_len = tso_cmd.total_hdr_len;
2556 
2557 			if (ret == 0)
2558 				src = (uint8_t *)eh;
2559 		} else
2560 			return (EINVAL);
2561 	} else {
2562 		(void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2563 	}
2564 
2565 	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2566 		ql_hw_tx_done_locked(ha, txr_idx);
2567 		if (hw->tx_cntxt[txr_idx].txr_free <=
2568 				(num_tx_cmds + QLA_TX_MIN_FREE)) {
2569         		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2570 				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2571 				__func__));
2572 			return (-1);
2573 		}
2574 	}
2575 
2576 	for (i = 0; i < num_tx_cmds; i++) {
2577 		int j;
2578 
2579 		j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1);
2580 
2581 		if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) {
2582 			QL_ASSERT(ha, 0, \
2583 				("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\
2584 				__func__, __LINE__, txr_idx, j,\
2585 				ha->tx_ring[txr_idx].tx_buf[j].m_head));
2586 			return (EINVAL);
2587 		}
2588 	}
2589 
2590 	tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2591 
2592         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2593 
2594                 if (nsegs > ha->hw.max_tx_segs)
2595                         ha->hw.max_tx_segs = nsegs;
2596 
2597                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2598 
2599                 if (op_code) {
2600                         tx_cmd->flags_opcode = op_code;
2601                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2602 
2603                 } else {
2604                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2605                 }
2606 	} else {
2607 		bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2608 		ha->tx_tso_frames++;
2609 	}
2610 
2611 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2612         	tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2613 
2614 		if (iscsi_pdu)
2615 			eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2616 
2617 	} else if (mp->m_flags & M_VLANTAG) {
2618 
2619 		if (hdr_len) { /* TSO */
2620 			tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2621 						Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2622 			tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2623 		} else
2624 			tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2625 
2626 		ha->hw_vlan_tx_frames++;
2627 		tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2628 
2629 		if (iscsi_pdu) {
2630 			tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2631 			mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2632 		}
2633 	}
2634 
2635 
2636         tx_cmd->n_bufs = (uint8_t)nsegs;
2637         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2638         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2639 	tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2640 
2641 	c_seg = segs;
2642 
2643 	while (1) {
2644 		for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2645 
2646 			switch (i) {
2647 			case 0:
2648 				tx_cmd->buf1_addr = c_seg->ds_addr;
2649 				tx_cmd->buf1_len = c_seg->ds_len;
2650 				break;
2651 
2652 			case 1:
2653 				tx_cmd->buf2_addr = c_seg->ds_addr;
2654 				tx_cmd->buf2_len = c_seg->ds_len;
2655 				break;
2656 
2657 			case 2:
2658 				tx_cmd->buf3_addr = c_seg->ds_addr;
2659 				tx_cmd->buf3_len = c_seg->ds_len;
2660 				break;
2661 
2662 			case 3:
2663 				tx_cmd->buf4_addr = c_seg->ds_addr;
2664 				tx_cmd->buf4_len = c_seg->ds_len;
2665 				break;
2666 			}
2667 
2668 			c_seg++;
2669 			nsegs--;
2670 		}
2671 
2672 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2673 			(hw->tx_cntxt[txr_idx].txr_next + 1) &
2674 				(NUM_TX_DESCRIPTORS - 1);
2675 		tx_cmd_count++;
2676 
2677 		if (!nsegs)
2678 			break;
2679 
2680 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2681 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2682 	}
2683 
2684 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2685 
2686 		/* TSO : Copy the header in the following tx cmd descriptors */
2687 
2688 		txr_next = hw->tx_cntxt[txr_idx].txr_next;
2689 
2690 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2691 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2692 
2693 		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2694 		bytes = QL_MIN(bytes, hdr_len);
2695 
2696 		dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2697 
2698 		if (mp->m_flags & M_VLANTAG) {
2699 			/* first copy the src/dst MAC addresses */
2700 			bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2701 			dst += (ETHER_ADDR_LEN * 2);
2702 			src += (ETHER_ADDR_LEN * 2);
2703 
2704 			*((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2705 			dst += 2;
2706 			*((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2707 			dst += 2;
2708 
2709 			/* bytes left in src header */
2710 			hdr_len -= ((ETHER_ADDR_LEN * 2) +
2711 					ETHER_VLAN_ENCAP_LEN);
2712 
2713 			/* bytes left in TxCmd Entry */
2714 			bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2715 
2716 
2717 			bcopy(src, dst, bytes);
2718 			src += bytes;
2719 			hdr_len -= bytes;
2720 		} else {
2721 			bcopy(src, dst, bytes);
2722 			src += bytes;
2723 			hdr_len -= bytes;
2724 		}
2725 
2726 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2727 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2728 					(NUM_TX_DESCRIPTORS - 1);
2729 		tx_cmd_count++;
2730 
2731 		while (hdr_len) {
2732 			tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2733 			bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2734 
2735 			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2736 
2737 			bcopy(src, tx_cmd, bytes);
2738 			src += bytes;
2739 			hdr_len -= bytes;
2740 
2741 			txr_next = hw->tx_cntxt[txr_idx].txr_next =
2742 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2743 					(NUM_TX_DESCRIPTORS - 1);
2744 			tx_cmd_count++;
2745 		}
2746 	}
2747 
2748 	hw->tx_cntxt[txr_idx].txr_free =
2749 		hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2750 
2751 	QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2752 		txr_idx);
2753        	QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2754 
2755 	return (0);
2756 }
2757 
2758 
2759 
2760 #define Q8_CONFIG_IND_TBL_SIZE	32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2761 static int
2762 qla_config_rss_ind_table(qla_host_t *ha)
2763 {
2764 	uint32_t i, count;
2765 	uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2766 
2767 
2768 	for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2769 		rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2770 	}
2771 
2772 	for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2773 		i = i + Q8_CONFIG_IND_TBL_SIZE) {
2774 
2775 		if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2776 			count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2777 		} else {
2778 			count = Q8_CONFIG_IND_TBL_SIZE;
2779 		}
2780 
2781 		if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2782 			rss_ind_tbl))
2783 			return (-1);
2784 	}
2785 
2786 	return (0);
2787 }
2788 
2789 static int
2790 qla_config_soft_lro(qla_host_t *ha)
2791 {
2792         int i;
2793         qla_hw_t *hw = &ha->hw;
2794         struct lro_ctrl *lro;
2795 
2796         for (i = 0; i < hw->num_sds_rings; i++) {
2797                 lro = &hw->sds[i].lro;
2798 
2799 		bzero(lro, sizeof(struct lro_ctrl));
2800 
2801 #if (__FreeBSD_version >= 1100101)
2802                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2803                         device_printf(ha->pci_dev,
2804 				"%s: tcp_lro_init_args [%d] failed\n",
2805                                 __func__, i);
2806                         return (-1);
2807                 }
2808 #else
2809                 if (tcp_lro_init(lro)) {
2810                         device_printf(ha->pci_dev,
2811 				"%s: tcp_lro_init [%d] failed\n",
2812                                 __func__, i);
2813                         return (-1);
2814                 }
2815 #endif /* #if (__FreeBSD_version >= 1100101) */
2816 
2817                 lro->ifp = ha->ifp;
2818         }
2819 
2820         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2821         return (0);
2822 }
2823 
2824 static void
2825 qla_drain_soft_lro(qla_host_t *ha)
2826 {
2827         int i;
2828         qla_hw_t *hw = &ha->hw;
2829         struct lro_ctrl *lro;
2830 
2831        	for (i = 0; i < hw->num_sds_rings; i++) {
2832                	lro = &hw->sds[i].lro;
2833 
2834 #if (__FreeBSD_version >= 1100101)
2835 		tcp_lro_flush_all(lro);
2836 #else
2837                 struct lro_entry *queued;
2838 
2839 		while ((!SLIST_EMPTY(&lro->lro_active))) {
2840 			queued = SLIST_FIRST(&lro->lro_active);
2841 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
2842 			tcp_lro_flush(lro, queued);
2843 		}
2844 #endif /* #if (__FreeBSD_version >= 1100101) */
2845 	}
2846 
2847 	return;
2848 }
2849 
2850 static void
2851 qla_free_soft_lro(qla_host_t *ha)
2852 {
2853         int i;
2854         qla_hw_t *hw = &ha->hw;
2855         struct lro_ctrl *lro;
2856 
2857         for (i = 0; i < hw->num_sds_rings; i++) {
2858                	lro = &hw->sds[i].lro;
2859 		tcp_lro_free(lro);
2860 	}
2861 
2862 	return;
2863 }
2864 
2865 
2866 /*
2867  * Name: ql_del_hw_if
2868  * Function: Destroys the hardware specific entities corresponding to an
2869  *	Ethernet Interface
2870  */
2871 void
2872 ql_del_hw_if(qla_host_t *ha)
2873 {
2874 	uint32_t i;
2875 	uint32_t num_msix;
2876 
2877 	(void)qla_stop_nic_func(ha);
2878 
2879 	qla_del_rcv_cntxt(ha);
2880 
2881 	if(qla_del_xmt_cntxt(ha))
2882 		goto ql_del_hw_if_exit;
2883 
2884 	if (ha->hw.flags.init_intr_cnxt) {
2885 		for (i = 0; i < ha->hw.num_sds_rings; ) {
2886 
2887 			if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2888 				num_msix = Q8_MAX_INTR_VECTORS;
2889 			else
2890 				num_msix = ha->hw.num_sds_rings - i;
2891 
2892 			if (qla_config_intr_cntxt(ha, i, num_msix, 0))
2893 				break;
2894 
2895 			i += num_msix;
2896 		}
2897 
2898 		ha->hw.flags.init_intr_cnxt = 0;
2899 	}
2900 
2901 ql_del_hw_if_exit:
2902 	if (ha->hw.enable_soft_lro) {
2903 		qla_drain_soft_lro(ha);
2904 		qla_free_soft_lro(ha);
2905 	}
2906 
2907 	return;
2908 }
2909 
2910 void
2911 qla_confirm_9kb_enable(qla_host_t *ha)
2912 {
2913 //	uint32_t supports_9kb = 0;
2914 
2915 	ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2916 
2917 	/* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2918 	WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2919 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2920 
2921 #if 0
2922 	qla_get_nic_partition(ha, &supports_9kb, NULL);
2923 
2924 	if (!supports_9kb)
2925 #endif
2926 	ha->hw.enable_9kb = 0;
2927 
2928 	return;
2929 }
2930 
2931 /*
2932  * Name: ql_init_hw_if
2933  * Function: Creates the hardware specific entities corresponding to an
2934  *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2935  *	corresponding to the interface. Enables LRO if allowed.
2936  */
2937 int
2938 ql_init_hw_if(qla_host_t *ha)
2939 {
2940 	device_t	dev;
2941 	uint32_t	i;
2942 	uint8_t		bcast_mac[6];
2943 	qla_rdesc_t	*rdesc;
2944 	uint32_t	num_msix;
2945 
2946 	dev = ha->pci_dev;
2947 
2948 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
2949 		bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2950 			ha->hw.dma_buf.sds_ring[i].size);
2951 	}
2952 
2953 	for (i = 0; i < ha->hw.num_sds_rings; ) {
2954 
2955 		if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2956 			num_msix = Q8_MAX_INTR_VECTORS;
2957 		else
2958 			num_msix = ha->hw.num_sds_rings - i;
2959 
2960 		if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2961 
2962 			if (i > 0) {
2963 
2964 				num_msix = i;
2965 
2966 				for (i = 0; i < num_msix; ) {
2967 					qla_config_intr_cntxt(ha, i,
2968 						Q8_MAX_INTR_VECTORS, 0);
2969 					i += Q8_MAX_INTR_VECTORS;
2970 				}
2971 			}
2972 			return (-1);
2973 		}
2974 
2975 		i = i + num_msix;
2976 	}
2977 
2978         ha->hw.flags.init_intr_cnxt = 1;
2979 
2980 	/*
2981 	 * Create Receive Context
2982 	 */
2983 	if (qla_init_rcv_cntxt(ha)) {
2984 		return (-1);
2985 	}
2986 
2987 	for (i = 0; i < ha->hw.num_rds_rings; i++) {
2988 		rdesc = &ha->hw.rds[i];
2989 		rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2990 		rdesc->rx_in = 0;
2991 		/* Update the RDS Producer Indices */
2992 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2993 			rdesc->rx_next);
2994 	}
2995 
2996 	/*
2997 	 * Create Transmit Context
2998 	 */
2999 	if (qla_init_xmt_cntxt(ha)) {
3000 		qla_del_rcv_cntxt(ha);
3001 		return (-1);
3002 	}
3003 	ha->hw.max_tx_segs = 0;
3004 
3005 	if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
3006 		return(-1);
3007 
3008 	ha->hw.flags.unicast_mac = 1;
3009 
3010 	bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3011 	bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3012 
3013 	if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
3014 		return (-1);
3015 
3016 	ha->hw.flags.bcast_mac = 1;
3017 
3018 	/*
3019 	 * program any cached multicast addresses
3020 	 */
3021 	if (qla_hw_add_all_mcast(ha))
3022 		return (-1);
3023 
3024 	if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
3025 		return (-1);
3026 
3027 	if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
3028 		return (-1);
3029 
3030 	if (qla_config_rss_ind_table(ha))
3031 		return (-1);
3032 
3033 	if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
3034 		return (-1);
3035 
3036 	if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
3037 		return (-1);
3038 
3039 	if (ha->ifp->if_capenable & IFCAP_LRO) {
3040 		if (ha->hw.enable_hw_lro) {
3041 			ha->hw.enable_soft_lro = 0;
3042 
3043 			if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
3044 				return (-1);
3045 		} else {
3046 			ha->hw.enable_soft_lro = 1;
3047 
3048 			if (qla_config_soft_lro(ha))
3049 				return (-1);
3050 		}
3051 	}
3052 
3053         if (qla_init_nic_func(ha))
3054                 return (-1);
3055 
3056         if (qla_query_fw_dcbx_caps(ha))
3057                 return (-1);
3058 
3059 	for (i = 0; i < ha->hw.num_sds_rings; i++)
3060 		QL_ENABLE_INTERRUPTS(ha, i);
3061 
3062 	return (0);
3063 }
3064 
3065 static int
3066 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
3067 {
3068         device_t                dev = ha->pci_dev;
3069         q80_rq_map_sds_to_rds_t *map_rings;
3070 	q80_rsp_map_sds_to_rds_t *map_rings_rsp;
3071         uint32_t                i, err;
3072         qla_hw_t                *hw = &ha->hw;
3073 
3074         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
3075         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
3076 
3077         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
3078         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
3079         map_rings->count_version |= Q8_MBX_CMD_VERSION;
3080 
3081         map_rings->cntxt_id = hw->rcv_cntxt_id;
3082         map_rings->num_rings = num_idx;
3083 
3084 	for (i = 0; i < num_idx; i++) {
3085 		map_rings->sds_rds[i].sds_ring = i + start_idx;
3086 		map_rings->sds_rds[i].rds_ring = i + start_idx;
3087 	}
3088 
3089         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
3090                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
3091                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3092                 device_printf(dev, "%s: failed0\n", __func__);
3093                 return (-1);
3094         }
3095 
3096         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
3097 
3098         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
3099 
3100         if (err) {
3101                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3102                 return (-1);
3103         }
3104 
3105         return (0);
3106 }
3107 
3108 /*
3109  * Name: qla_init_rcv_cntxt
3110  * Function: Creates the Receive Context.
3111  */
3112 static int
3113 qla_init_rcv_cntxt(qla_host_t *ha)
3114 {
3115 	q80_rq_rcv_cntxt_t	*rcntxt;
3116 	q80_rsp_rcv_cntxt_t	*rcntxt_rsp;
3117 	q80_stat_desc_t		*sdesc;
3118 	int			i, j;
3119         qla_hw_t		*hw = &ha->hw;
3120 	device_t		dev;
3121 	uint32_t		err;
3122 	uint32_t		rcntxt_sds_rings;
3123 	uint32_t		rcntxt_rds_rings;
3124 	uint32_t		max_idx;
3125 
3126 	dev = ha->pci_dev;
3127 
3128 	/*
3129 	 * Create Receive Context
3130 	 */
3131 
3132 	for (i = 0; i < hw->num_sds_rings; i++) {
3133 		sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
3134 
3135 		for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
3136 			sdesc->data[0] = 1ULL;
3137 			sdesc->data[1] = 1ULL;
3138 		}
3139 	}
3140 
3141 	rcntxt_sds_rings = hw->num_sds_rings;
3142 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
3143 		rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
3144 
3145 	rcntxt_rds_rings = hw->num_rds_rings;
3146 
3147 	if (hw->num_rds_rings > MAX_RDS_RING_SETS)
3148 		rcntxt_rds_rings = MAX_RDS_RING_SETS;
3149 
3150 	rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
3151 	bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
3152 
3153 	rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
3154 	rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
3155 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3156 
3157 	rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
3158 			Q8_RCV_CNTXT_CAP0_LRO |
3159 			Q8_RCV_CNTXT_CAP0_HW_LRO |
3160 			Q8_RCV_CNTXT_CAP0_RSS |
3161 			Q8_RCV_CNTXT_CAP0_SGL_LRO;
3162 
3163 	if (ha->hw.enable_9kb)
3164 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
3165 	else
3166 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
3167 
3168 	if (ha->hw.num_rds_rings > 1) {
3169 		rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
3170 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
3171 	} else
3172 		rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
3173 
3174 	rcntxt->nsds_rings = rcntxt_sds_rings;
3175 
3176 	rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
3177 
3178 	rcntxt->rcv_vpid = 0;
3179 
3180 	for (i = 0; i <  rcntxt_sds_rings; i++) {
3181 		rcntxt->sds[i].paddr =
3182 			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
3183 		rcntxt->sds[i].size =
3184 			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3185 		rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
3186 		rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
3187 	}
3188 
3189 	for (i = 0; i <  rcntxt_rds_rings; i++) {
3190 		rcntxt->rds[i].paddr_std =
3191 			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
3192 
3193 		if (ha->hw.enable_9kb)
3194 			rcntxt->rds[i].std_bsize =
3195 				qla_host_to_le64(MJUM9BYTES);
3196 		else
3197 			rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3198 
3199 		rcntxt->rds[i].std_nentries =
3200 			qla_host_to_le32(NUM_RX_DESCRIPTORS);
3201 	}
3202 
3203         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3204 		(sizeof (q80_rq_rcv_cntxt_t) >> 2),
3205                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
3206                 device_printf(dev, "%s: failed0\n", __func__);
3207                 return (-1);
3208         }
3209 
3210         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
3211 
3212         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3213 
3214         if (err) {
3215                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3216                 return (-1);
3217         }
3218 
3219 	for (i = 0; i <  rcntxt_sds_rings; i++) {
3220 		hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3221 	}
3222 
3223 	for (i = 0; i <  rcntxt_rds_rings; i++) {
3224 		hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3225 	}
3226 
3227 	hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3228 
3229 	ha->hw.flags.init_rx_cnxt = 1;
3230 
3231 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3232 
3233 		for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3234 
3235 			if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3236 				max_idx = MAX_RCNTXT_SDS_RINGS;
3237 			else
3238 				max_idx = hw->num_sds_rings - i;
3239 
3240 			err = qla_add_rcv_rings(ha, i, max_idx);
3241 			if (err)
3242 				return -1;
3243 
3244 			i += max_idx;
3245 		}
3246 	}
3247 
3248 	if (hw->num_rds_rings > 1) {
3249 
3250 		for (i = 0; i < hw->num_rds_rings; ) {
3251 
3252 			if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3253 				max_idx = MAX_SDS_TO_RDS_MAP;
3254 			else
3255 				max_idx = hw->num_rds_rings - i;
3256 
3257 			err = qla_map_sds_to_rds(ha, i, max_idx);
3258 			if (err)
3259 				return -1;
3260 
3261 			i += max_idx;
3262 		}
3263 	}
3264 
3265 	return (0);
3266 }
3267 
3268 static int
3269 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3270 {
3271 	device_t		dev = ha->pci_dev;
3272 	q80_rq_add_rcv_rings_t	*add_rcv;
3273 	q80_rsp_add_rcv_rings_t	*add_rcv_rsp;
3274 	uint32_t		i,j, err;
3275         qla_hw_t		*hw = &ha->hw;
3276 
3277 	add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3278 	bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3279 
3280 	add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3281 	add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3282 	add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3283 
3284 	add_rcv->nrds_sets_rings = nsds | (1 << 5);
3285 	add_rcv->nsds_rings = nsds;
3286 	add_rcv->cntxt_id = hw->rcv_cntxt_id;
3287 
3288         for (i = 0; i <  nsds; i++) {
3289 
3290 		j = i + sds_idx;
3291 
3292                 add_rcv->sds[i].paddr =
3293                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3294 
3295                 add_rcv->sds[i].size =
3296                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3297 
3298                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3299                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3300 
3301         }
3302 
3303         for (i = 0; (i <  nsds); i++) {
3304                 j = i + sds_idx;
3305 
3306                 add_rcv->rds[i].paddr_std =
3307                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3308 
3309 		if (ha->hw.enable_9kb)
3310 			add_rcv->rds[i].std_bsize =
3311 				qla_host_to_le64(MJUM9BYTES);
3312 		else
3313                 	add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3314 
3315                 add_rcv->rds[i].std_nentries =
3316                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
3317         }
3318 
3319 
3320         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3321 		(sizeof (q80_rq_add_rcv_rings_t) >> 2),
3322                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3323                 device_printf(dev, "%s: failed0\n", __func__);
3324                 return (-1);
3325         }
3326 
3327         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3328 
3329         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3330 
3331         if (err) {
3332                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3333                 return (-1);
3334         }
3335 
3336 	for (i = 0; i < nsds; i++) {
3337 		hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3338 	}
3339 
3340 	for (i = 0; i < nsds; i++) {
3341 		hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3342 	}
3343 
3344 	return (0);
3345 }
3346 
3347 /*
3348  * Name: qla_del_rcv_cntxt
3349  * Function: Destroys the Receive Context.
3350  */
3351 static void
3352 qla_del_rcv_cntxt(qla_host_t *ha)
3353 {
3354 	device_t			dev = ha->pci_dev;
3355 	q80_rcv_cntxt_destroy_t		*rcntxt;
3356 	q80_rcv_cntxt_destroy_rsp_t	*rcntxt_rsp;
3357 	uint32_t			err;
3358 	uint8_t				bcast_mac[6];
3359 
3360 	if (!ha->hw.flags.init_rx_cnxt)
3361 		return;
3362 
3363 	if (qla_hw_del_all_mcast(ha))
3364 		return;
3365 
3366 	if (ha->hw.flags.bcast_mac) {
3367 
3368 		bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3369 		bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3370 
3371 		if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3372 			return;
3373 		ha->hw.flags.bcast_mac = 0;
3374 
3375 	}
3376 
3377 	if (ha->hw.flags.unicast_mac) {
3378 		if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3379 			return;
3380 		ha->hw.flags.unicast_mac = 0;
3381 	}
3382 
3383 	rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3384 	bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3385 
3386 	rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3387 	rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3388 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3389 
3390 	rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3391 
3392         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3393 		(sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3394                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3395                 device_printf(dev, "%s: failed0\n", __func__);
3396                 return;
3397         }
3398         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3399 
3400         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3401 
3402         if (err) {
3403                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3404         }
3405 
3406 	ha->hw.flags.init_rx_cnxt = 0;
3407 	return;
3408 }
3409 
3410 /*
3411  * Name: qla_init_xmt_cntxt
3412  * Function: Creates the Transmit Context.
3413  */
3414 static int
3415 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3416 {
3417 	device_t		dev;
3418         qla_hw_t		*hw = &ha->hw;
3419 	q80_rq_tx_cntxt_t	*tcntxt;
3420 	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
3421 	uint32_t		err;
3422 	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
3423 	uint32_t		intr_idx;
3424 
3425 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3426 
3427 	dev = ha->pci_dev;
3428 
3429 	/*
3430 	 * Create Transmit Context
3431 	 */
3432 	tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3433 	bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3434 
3435 	tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3436 	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3437 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3438 
3439 	intr_idx = txr_idx;
3440 
3441 #ifdef QL_ENABLE_ISCSI_TLV
3442 
3443 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3444 				Q8_TX_CNTXT_CAP0_TC;
3445 
3446 	if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3447 		tcntxt->traffic_class = 1;
3448 	}
3449 
3450 	intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3451 
3452 #else
3453 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3454 
3455 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3456 
3457 	tcntxt->ntx_rings = 1;
3458 
3459 	tcntxt->tx_ring[0].paddr =
3460 		qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3461 	tcntxt->tx_ring[0].tx_consumer =
3462 		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3463 	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3464 
3465 	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3466 	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3467 
3468 	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3469 	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3470 	*(hw_tx_cntxt->tx_cons) = 0;
3471 
3472         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3473 		(sizeof (q80_rq_tx_cntxt_t) >> 2),
3474                 ha->hw.mbox,
3475 		(sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3476                 device_printf(dev, "%s: failed0\n", __func__);
3477                 return (-1);
3478         }
3479         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3480 
3481         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3482 
3483         if (err) {
3484                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3485 		return -1;
3486         }
3487 
3488 	hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3489 	hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3490 
3491 	if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3492 		return (-1);
3493 
3494 	return (0);
3495 }
3496 
3497 
3498 /*
3499  * Name: qla_del_xmt_cntxt
3500  * Function: Destroys the Transmit Context.
3501  */
3502 static int
3503 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3504 {
3505 	device_t			dev = ha->pci_dev;
3506 	q80_tx_cntxt_destroy_t		*tcntxt;
3507 	q80_tx_cntxt_destroy_rsp_t	*tcntxt_rsp;
3508 	uint32_t			err;
3509 
3510 	tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3511 	bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3512 
3513 	tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3514 	tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3515 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3516 
3517 	tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3518 
3519         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3520 		(sizeof (q80_tx_cntxt_destroy_t) >> 2),
3521                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3522                 device_printf(dev, "%s: failed0\n", __func__);
3523                 return (-1);
3524         }
3525         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3526 
3527         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3528 
3529         if (err) {
3530                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3531 		return (-1);
3532         }
3533 
3534 	return (0);
3535 }
3536 static int
3537 qla_del_xmt_cntxt(qla_host_t *ha)
3538 {
3539 	uint32_t i;
3540 	int ret = 0;
3541 
3542 	if (!ha->hw.flags.init_tx_cnxt)
3543 		return (ret);
3544 
3545 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
3546 		if ((ret = qla_del_xmt_cntxt_i(ha, i)) != 0)
3547 			break;
3548 	}
3549 	ha->hw.flags.init_tx_cnxt = 0;
3550 
3551 	return (ret);
3552 }
3553 
3554 static int
3555 qla_init_xmt_cntxt(qla_host_t *ha)
3556 {
3557 	uint32_t i, j;
3558 
3559 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
3560 		if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3561 			for (j = 0; j < i; j++) {
3562 				if (qla_del_xmt_cntxt_i(ha, j))
3563 					break;
3564 			}
3565 			return (-1);
3566 		}
3567 	}
3568 	ha->hw.flags.init_tx_cnxt = 1;
3569 	return (0);
3570 }
3571 
3572 static int
3573 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3574 {
3575 	int i, nmcast;
3576 	uint32_t count = 0;
3577 	uint8_t *mcast;
3578 
3579 	nmcast = ha->hw.nmcast;
3580 
3581 	QL_DPRINT2(ha, (ha->pci_dev,
3582 		"%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3583 
3584 	mcast = ha->hw.mac_addr_arr;
3585 	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3586 
3587 	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3588 		if ((ha->hw.mcast[i].addr[0] != 0) ||
3589 			(ha->hw.mcast[i].addr[1] != 0) ||
3590 			(ha->hw.mcast[i].addr[2] != 0) ||
3591 			(ha->hw.mcast[i].addr[3] != 0) ||
3592 			(ha->hw.mcast[i].addr[4] != 0) ||
3593 			(ha->hw.mcast[i].addr[5] != 0)) {
3594 
3595 			bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3596 			mcast = mcast + ETHER_ADDR_LEN;
3597 			count++;
3598 
3599 			device_printf(ha->pci_dev,
3600 				"%s: %x:%x:%x:%x:%x:%x \n",
3601 				__func__, ha->hw.mcast[i].addr[0],
3602 				ha->hw.mcast[i].addr[1], ha->hw.mcast[i].addr[2],
3603 				ha->hw.mcast[i].addr[3], ha->hw.mcast[i].addr[4],
3604 				ha->hw.mcast[i].addr[5]);
3605 
3606 			if (count == Q8_MAX_MAC_ADDRS) {
3607 				if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3608 					add_mcast, count)) {
3609                 			device_printf(ha->pci_dev,
3610 						"%s: failed\n", __func__);
3611 					return (-1);
3612 				}
3613 
3614 				count = 0;
3615 				mcast = ha->hw.mac_addr_arr;
3616 				memset(mcast, 0,
3617 					(Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3618 			}
3619 
3620 			nmcast--;
3621 		}
3622 	}
3623 
3624 	if (count) {
3625 		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3626 			count)) {
3627                 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
3628 			return (-1);
3629 		}
3630 	}
3631 	QL_DPRINT2(ha, (ha->pci_dev,
3632 		"%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3633 
3634 	return 0;
3635 }
3636 
3637 static int
3638 qla_hw_add_all_mcast(qla_host_t *ha)
3639 {
3640 	int ret;
3641 
3642 	ret = qla_hw_all_mcast(ha, 1);
3643 
3644 	return (ret);
3645 }
3646 
3647 int
3648 qla_hw_del_all_mcast(qla_host_t *ha)
3649 {
3650 	int ret;
3651 
3652 	ret = qla_hw_all_mcast(ha, 0);
3653 
3654 	bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3655 	ha->hw.nmcast = 0;
3656 
3657 	return (ret);
3658 }
3659 
3660 static int
3661 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3662 {
3663 	int i;
3664 
3665 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3666 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3667 			return (0); /* its been already added */
3668 	}
3669 	return (-1);
3670 }
3671 
3672 static int
3673 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3674 {
3675 	int i;
3676 
3677 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3678 
3679 		if ((ha->hw.mcast[i].addr[0] == 0) &&
3680 			(ha->hw.mcast[i].addr[1] == 0) &&
3681 			(ha->hw.mcast[i].addr[2] == 0) &&
3682 			(ha->hw.mcast[i].addr[3] == 0) &&
3683 			(ha->hw.mcast[i].addr[4] == 0) &&
3684 			(ha->hw.mcast[i].addr[5] == 0)) {
3685 
3686 			bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3687 			ha->hw.nmcast++;
3688 
3689 			mta = mta + ETHER_ADDR_LEN;
3690 			nmcast--;
3691 
3692 			if (nmcast == 0)
3693 				break;
3694 		}
3695 
3696 	}
3697 	return 0;
3698 }
3699 
3700 static int
3701 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3702 {
3703 	int i;
3704 
3705 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3706 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3707 
3708 			ha->hw.mcast[i].addr[0] = 0;
3709 			ha->hw.mcast[i].addr[1] = 0;
3710 			ha->hw.mcast[i].addr[2] = 0;
3711 			ha->hw.mcast[i].addr[3] = 0;
3712 			ha->hw.mcast[i].addr[4] = 0;
3713 			ha->hw.mcast[i].addr[5] = 0;
3714 
3715 			ha->hw.nmcast--;
3716 
3717 			mta = mta + ETHER_ADDR_LEN;
3718 			nmcast--;
3719 
3720 			if (nmcast == 0)
3721 				break;
3722 		}
3723 	}
3724 	return 0;
3725 }
3726 
3727 /*
3728  * Name: ql_hw_set_multi
3729  * Function: Sets the Multicast Addresses provided by the host O.S into the
3730  *	hardware (for the given interface)
3731  */
3732 int
3733 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3734 	uint32_t add_mac)
3735 {
3736 	uint8_t *mta = mcast_addr;
3737 	int i;
3738 	int ret = 0;
3739 	uint32_t count = 0;
3740 	uint8_t *mcast;
3741 
3742 	mcast = ha->hw.mac_addr_arr;
3743 	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3744 
3745 	for (i = 0; i < mcnt; i++) {
3746 		if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3747 			if (add_mac) {
3748 				if (qla_hw_mac_addr_present(ha, mta) != 0) {
3749 					bcopy(mta, mcast, ETHER_ADDR_LEN);
3750 					mcast = mcast + ETHER_ADDR_LEN;
3751 					count++;
3752 				}
3753 			} else {
3754 				if (qla_hw_mac_addr_present(ha, mta) == 0) {
3755 					bcopy(mta, mcast, ETHER_ADDR_LEN);
3756 					mcast = mcast + ETHER_ADDR_LEN;
3757 					count++;
3758 				}
3759 			}
3760 		}
3761 		if (count == Q8_MAX_MAC_ADDRS) {
3762 			if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3763 				add_mac, count)) {
3764                 		device_printf(ha->pci_dev, "%s: failed\n",
3765 					__func__);
3766 				return (-1);
3767 			}
3768 
3769 			if (add_mac) {
3770 				qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3771 					count);
3772 			} else {
3773 				qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3774 					count);
3775 			}
3776 
3777 			count = 0;
3778 			mcast = ha->hw.mac_addr_arr;
3779 			memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3780 		}
3781 
3782 		mta += Q8_MAC_ADDR_LEN;
3783 	}
3784 
3785 	if (count) {
3786 		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3787 			count)) {
3788                 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
3789 			return (-1);
3790 		}
3791 		if (add_mac) {
3792 			qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3793 		} else {
3794 			qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3795 		}
3796 	}
3797 
3798 	return (ret);
3799 }
3800 
3801 /*
3802  * Name: ql_hw_tx_done_locked
3803  * Function: Handle Transmit Completions
3804  */
3805 void
3806 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3807 {
3808 	qla_tx_buf_t *txb;
3809         qla_hw_t *hw = &ha->hw;
3810 	uint32_t comp_idx, comp_count = 0;
3811 	qla_hw_tx_cntxt_t *hw_tx_cntxt;
3812 
3813 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3814 
3815 	/* retrieve index of last entry in tx ring completed */
3816 	comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3817 
3818 	while (comp_idx != hw_tx_cntxt->txr_comp) {
3819 
3820 		txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3821 
3822 		hw_tx_cntxt->txr_comp++;
3823 		if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3824 			hw_tx_cntxt->txr_comp = 0;
3825 
3826 		comp_count++;
3827 
3828 		if (txb->m_head) {
3829 			if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
3830 
3831 			bus_dmamap_sync(ha->tx_tag, txb->map,
3832 				BUS_DMASYNC_POSTWRITE);
3833 			bus_dmamap_unload(ha->tx_tag, txb->map);
3834 			m_freem(txb->m_head);
3835 
3836 			txb->m_head = NULL;
3837 		}
3838 	}
3839 
3840 	hw_tx_cntxt->txr_free += comp_count;
3841 
3842 	if (hw_tx_cntxt->txr_free > NUM_TX_DESCRIPTORS)
3843 		device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d txr_free = %d"
3844 			"txr_next = %d txr_comp = %d\n", __func__, __LINE__,
3845 			txr_idx, hw_tx_cntxt->txr_free,
3846 			hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp);
3847 
3848 	QL_ASSERT(ha, (hw_tx_cntxt->txr_free <= NUM_TX_DESCRIPTORS), \
3849 		("%s [%d]: txr_idx = %d txr_free = %d txr_next = %d txr_comp = %d\n",\
3850 		__func__, __LINE__, txr_idx, hw_tx_cntxt->txr_free, \
3851 		hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp));
3852 
3853 	return;
3854 }
3855 
3856 void
3857 ql_update_link_state(qla_host_t *ha)
3858 {
3859 	uint32_t link_state = 0;
3860 	uint32_t prev_link_state;
3861 
3862 	prev_link_state =  ha->hw.link_up;
3863 
3864 	if (ha->ifp->if_drv_flags & IFF_DRV_RUNNING) {
3865 		link_state = READ_REG32(ha, Q8_LINK_STATE);
3866 
3867 		if (ha->pci_func == 0) {
3868 			link_state = (((link_state & 0xF) == 1)? 1 : 0);
3869 		} else {
3870 			link_state = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3871 		}
3872 	}
3873 
3874 	atomic_store_rel_8(&ha->hw.link_up, (uint8_t)link_state);
3875 
3876 	if (prev_link_state !=  ha->hw.link_up) {
3877 		if (ha->hw.link_up) {
3878 			if_link_state_change(ha->ifp, LINK_STATE_UP);
3879 		} else {
3880 			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3881 		}
3882 	}
3883 	return;
3884 }
3885 
3886 int
3887 ql_hw_check_health(qla_host_t *ha)
3888 {
3889 	uint32_t val;
3890 
3891 	ha->hw.health_count++;
3892 
3893 	if (ha->hw.health_count < 500)
3894 		return 0;
3895 
3896 	ha->hw.health_count = 0;
3897 
3898 	val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3899 
3900 	if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3901 		(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3902 		device_printf(ha->pci_dev, "%s: Temperature Alert"
3903 			" at ts_usecs %ld ts_reg = 0x%08x\n",
3904 			__func__, qla_get_usec_timestamp(), val);
3905 
3906 		if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_TEMP_FAILURE)
3907 			ha->hw.sp_log_stop = -1;
3908 
3909 		QL_INITIATE_RECOVERY(ha);
3910 		return -1;
3911 	}
3912 
3913 	val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3914 
3915 	if ((val != ha->hw.hbeat_value) &&
3916 		(!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3917 		ha->hw.hbeat_value = val;
3918 		ha->hw.hbeat_failure = 0;
3919 		return 0;
3920 	}
3921 
3922 	ha->hw.hbeat_failure++;
3923 
3924 
3925 	if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3926 		device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3927 			__func__, val);
3928 	if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3929 		return 0;
3930 	else {
3931 		uint32_t peg_halt_status1;
3932 		uint32_t peg_halt_status2;
3933 
3934 		peg_halt_status1 = READ_REG32(ha, Q8_PEG_HALT_STATUS1);
3935 		peg_halt_status2 = READ_REG32(ha, Q8_PEG_HALT_STATUS2);
3936 
3937 		device_printf(ha->pci_dev,
3938 			"%s: Heartbeat Failue at ts_usecs = %ld "
3939 			"fw_heart_beat = 0x%08x "
3940 			"peg_halt_status1 = 0x%08x "
3941 			"peg_halt_status2 = 0x%08x\n",
3942 			__func__, qla_get_usec_timestamp(), val,
3943 			peg_halt_status1, peg_halt_status2);
3944 
3945 		if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HBEAT_FAILURE)
3946 			ha->hw.sp_log_stop = -1;
3947 	}
3948 	QL_INITIATE_RECOVERY(ha);
3949 
3950 	return -1;
3951 }
3952 
3953 static int
3954 qla_init_nic_func(qla_host_t *ha)
3955 {
3956         device_t                dev;
3957         q80_init_nic_func_t     *init_nic;
3958         q80_init_nic_func_rsp_t *init_nic_rsp;
3959         uint32_t                err;
3960 
3961         dev = ha->pci_dev;
3962 
3963         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3964         bzero(init_nic, sizeof(q80_init_nic_func_t));
3965 
3966         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3967         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3968         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3969 
3970         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3971         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3972         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3973 
3974 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3975         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3976                 (sizeof (q80_init_nic_func_t) >> 2),
3977                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3978                 device_printf(dev, "%s: failed\n", __func__);
3979                 return -1;
3980         }
3981 
3982         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3983 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3984 
3985         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3986 
3987         if (err) {
3988                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3989         } else {
3990                 device_printf(dev, "%s: successful\n", __func__);
3991 	}
3992 
3993         return 0;
3994 }
3995 
3996 static int
3997 qla_stop_nic_func(qla_host_t *ha)
3998 {
3999         device_t                dev;
4000         q80_stop_nic_func_t     *stop_nic;
4001         q80_stop_nic_func_rsp_t *stop_nic_rsp;
4002         uint32_t                err;
4003 
4004         dev = ha->pci_dev;
4005 
4006         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
4007         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
4008 
4009         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
4010         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
4011         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
4012 
4013         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
4014         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
4015 
4016 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
4017         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
4018                 (sizeof (q80_stop_nic_func_t) >> 2),
4019                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
4020                 device_printf(dev, "%s: failed\n", __func__);
4021                 return -1;
4022         }
4023 
4024         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
4025 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
4026 
4027         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
4028 
4029         if (err) {
4030                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4031         }
4032 
4033         return 0;
4034 }
4035 
4036 static int
4037 qla_query_fw_dcbx_caps(qla_host_t *ha)
4038 {
4039         device_t                        dev;
4040         q80_query_fw_dcbx_caps_t        *fw_dcbx;
4041         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
4042         uint32_t                        err;
4043 
4044         dev = ha->pci_dev;
4045 
4046         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
4047         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
4048 
4049         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
4050         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
4051         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
4052 
4053         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
4054         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
4055                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
4056                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
4057                 device_printf(dev, "%s: failed\n", __func__);
4058                 return -1;
4059         }
4060 
4061         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
4062         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
4063                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
4064 
4065         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
4066 
4067         if (err) {
4068                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4069         }
4070 
4071         return 0;
4072 }
4073 
4074 static int
4075 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
4076         uint32_t aen_mb3, uint32_t aen_mb4)
4077 {
4078         device_t                dev;
4079         q80_idc_ack_t           *idc_ack;
4080         q80_idc_ack_rsp_t       *idc_ack_rsp;
4081         uint32_t                err;
4082         int                     count = 300;
4083 
4084         dev = ha->pci_dev;
4085 
4086         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
4087         bzero(idc_ack, sizeof(q80_idc_ack_t));
4088 
4089         idc_ack->opcode = Q8_MBX_IDC_ACK;
4090         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
4091         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
4092 
4093         idc_ack->aen_mb1 = aen_mb1;
4094         idc_ack->aen_mb2 = aen_mb2;
4095         idc_ack->aen_mb3 = aen_mb3;
4096         idc_ack->aen_mb4 = aen_mb4;
4097 
4098         ha->hw.imd_compl= 0;
4099 
4100         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
4101                 (sizeof (q80_idc_ack_t) >> 2),
4102                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
4103                 device_printf(dev, "%s: failed\n", __func__);
4104                 return -1;
4105         }
4106 
4107         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
4108 
4109         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
4110 
4111         if (err) {
4112                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4113                 return(-1);
4114         }
4115 
4116         while (count && !ha->hw.imd_compl) {
4117                 qla_mdelay(__func__, 100);
4118                 count--;
4119         }
4120 
4121         if (!count)
4122                 return -1;
4123         else
4124                 device_printf(dev, "%s: count %d\n", __func__, count);
4125 
4126         return (0);
4127 }
4128 
4129 static int
4130 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
4131 {
4132         device_t                dev;
4133         q80_set_port_cfg_t      *pcfg;
4134         q80_set_port_cfg_rsp_t  *pfg_rsp;
4135         uint32_t                err;
4136         int                     count = 300;
4137 
4138         dev = ha->pci_dev;
4139 
4140         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
4141         bzero(pcfg, sizeof(q80_set_port_cfg_t));
4142 
4143         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
4144         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
4145         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4146 
4147         pcfg->cfg_bits = cfg_bits;
4148 
4149         device_printf(dev, "%s: cfg_bits"
4150                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4151                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4152                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4153                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4154                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
4155 
4156         ha->hw.imd_compl= 0;
4157 
4158         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4159                 (sizeof (q80_set_port_cfg_t) >> 2),
4160                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
4161                 device_printf(dev, "%s: failed\n", __func__);
4162                 return -1;
4163         }
4164 
4165         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
4166 
4167         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
4168 
4169         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
4170                 while (count && !ha->hw.imd_compl) {
4171                         qla_mdelay(__func__, 100);
4172                         count--;
4173                 }
4174                 if (count) {
4175                         device_printf(dev, "%s: count %d\n", __func__, count);
4176 
4177                         err = 0;
4178                 }
4179         }
4180 
4181         if (err) {
4182                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4183                 return(-1);
4184         }
4185 
4186         return (0);
4187 }
4188 
4189 
4190 static int
4191 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
4192 {
4193 	uint32_t			err;
4194 	device_t			dev = ha->pci_dev;
4195 	q80_config_md_templ_size_t	*md_size;
4196 	q80_config_md_templ_size_rsp_t	*md_size_rsp;
4197 
4198 #ifndef QL_LDFLASH_FW
4199 
4200 	ql_minidump_template_hdr_t *hdr;
4201 
4202 	hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
4203 	*size = hdr->size_of_template;
4204 	return (0);
4205 
4206 #endif /* #ifdef QL_LDFLASH_FW */
4207 
4208 	md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
4209 	bzero(md_size, sizeof(q80_config_md_templ_size_t));
4210 
4211 	md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
4212 	md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
4213 	md_size->count_version |= Q8_MBX_CMD_VERSION;
4214 
4215 	if (qla_mbx_cmd(ha, (uint32_t *) md_size,
4216 		(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
4217 		(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
4218 
4219 		device_printf(dev, "%s: failed\n", __func__);
4220 
4221 		return (-1);
4222 	}
4223 
4224 	md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
4225 
4226 	err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
4227 
4228         if (err) {
4229 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4230 		return(-1);
4231         }
4232 
4233 	*size = md_size_rsp->templ_size;
4234 
4235 	return (0);
4236 }
4237 
4238 static int
4239 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
4240 {
4241         device_t                dev;
4242         q80_get_port_cfg_t      *pcfg;
4243         q80_get_port_cfg_rsp_t  *pcfg_rsp;
4244         uint32_t                err;
4245 
4246         dev = ha->pci_dev;
4247 
4248         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
4249         bzero(pcfg, sizeof(q80_get_port_cfg_t));
4250 
4251         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
4252         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
4253         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4254 
4255         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4256                 (sizeof (q80_get_port_cfg_t) >> 2),
4257                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
4258                 device_printf(dev, "%s: failed\n", __func__);
4259                 return -1;
4260         }
4261 
4262         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
4263 
4264         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
4265 
4266         if (err) {
4267                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4268                 return(-1);
4269         }
4270 
4271         device_printf(dev, "%s: [cfg_bits, port type]"
4272                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4273                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4274                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4275                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4276                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4277                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4278                 );
4279 
4280         *cfg_bits = pcfg_rsp->cfg_bits;
4281 
4282         return (0);
4283 }
4284 
4285 int
4286 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4287 {
4288         struct ether_vlan_header        *eh;
4289         uint16_t                        etype;
4290         struct ip                       *ip = NULL;
4291         struct ip6_hdr                  *ip6 = NULL;
4292         struct tcphdr                   *th = NULL;
4293         uint32_t                        hdrlen;
4294         uint32_t                        offset;
4295         uint8_t                         buf[sizeof(struct ip6_hdr)];
4296 
4297         eh = mtod(mp, struct ether_vlan_header *);
4298 
4299         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4300                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4301                 etype = ntohs(eh->evl_proto);
4302         } else {
4303                 hdrlen = ETHER_HDR_LEN;
4304                 etype = ntohs(eh->evl_encap_proto);
4305         }
4306 
4307 	if (etype == ETHERTYPE_IP) {
4308 
4309 		offset = (hdrlen + sizeof (struct ip));
4310 
4311 		if (mp->m_len >= offset) {
4312                         ip = (struct ip *)(mp->m_data + hdrlen);
4313 		} else {
4314 			m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4315                         ip = (struct ip *)buf;
4316 		}
4317 
4318                 if (ip->ip_p == IPPROTO_TCP) {
4319 
4320 			hdrlen += ip->ip_hl << 2;
4321 			offset = hdrlen + 4;
4322 
4323 			if (mp->m_len >= offset) {
4324 				th = (struct tcphdr *)(mp->m_data + hdrlen);;
4325 			} else {
4326                                 m_copydata(mp, hdrlen, 4, buf);
4327 				th = (struct tcphdr *)buf;
4328 			}
4329                 }
4330 
4331 	} else if (etype == ETHERTYPE_IPV6) {
4332 
4333 		offset = (hdrlen + sizeof (struct ip6_hdr));
4334 
4335 		if (mp->m_len >= offset) {
4336                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4337 		} else {
4338                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4339                         ip6 = (struct ip6_hdr *)buf;
4340 		}
4341 
4342                 if (ip6->ip6_nxt == IPPROTO_TCP) {
4343 
4344 			hdrlen += sizeof(struct ip6_hdr);
4345 			offset = hdrlen + 4;
4346 
4347 			if (mp->m_len >= offset) {
4348 				th = (struct tcphdr *)(mp->m_data + hdrlen);;
4349 			} else {
4350 				m_copydata(mp, hdrlen, 4, buf);
4351 				th = (struct tcphdr *)buf;
4352 			}
4353                 }
4354 	}
4355 
4356         if (th != NULL) {
4357                 if ((th->th_sport == htons(3260)) ||
4358                         (th->th_dport == htons(3260)))
4359                         return 0;
4360         }
4361         return (-1);
4362 }
4363 
4364 void
4365 qla_hw_async_event(qla_host_t *ha)
4366 {
4367         switch (ha->hw.aen_mb0) {
4368         case 0x8101:
4369                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4370                         ha->hw.aen_mb3, ha->hw.aen_mb4);
4371 
4372                 break;
4373 
4374         default:
4375                 break;
4376         }
4377 
4378         return;
4379 }
4380 
4381 #ifdef QL_LDFLASH_FW
4382 static int
4383 ql_get_minidump_template(qla_host_t *ha)
4384 {
4385 	uint32_t			err;
4386 	device_t			dev = ha->pci_dev;
4387 	q80_config_md_templ_cmd_t	*md_templ;
4388 	q80_config_md_templ_cmd_rsp_t	*md_templ_rsp;
4389 
4390 	md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4391 	bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4392 
4393 	md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4394 	md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4395 	md_templ->count_version |= Q8_MBX_CMD_VERSION;
4396 
4397 	md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4398 	md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4399 
4400 	if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4401 		(sizeof(q80_config_md_templ_cmd_t) >> 2),
4402 		 ha->hw.mbox,
4403 		(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4404 
4405 		device_printf(dev, "%s: failed\n", __func__);
4406 
4407 		return (-1);
4408 	}
4409 
4410 	md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4411 
4412 	err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4413 
4414 	if (err) {
4415 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4416 		return (-1);
4417 	}
4418 
4419 	return (0);
4420 
4421 }
4422 #endif /* #ifdef QL_LDFLASH_FW */
4423 
4424 /*
4425  * Minidump related functionality
4426  */
4427 
4428 static int ql_parse_template(qla_host_t *ha);
4429 
4430 static uint32_t ql_rdcrb(qla_host_t *ha,
4431 			ql_minidump_entry_rdcrb_t *crb_entry,
4432 			uint32_t * data_buff);
4433 
4434 static uint32_t ql_pollrd(qla_host_t *ha,
4435 			ql_minidump_entry_pollrd_t *entry,
4436 			uint32_t * data_buff);
4437 
4438 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4439 			ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4440 			uint32_t *data_buff);
4441 
4442 static uint32_t ql_L2Cache(qla_host_t *ha,
4443 			ql_minidump_entry_cache_t *cacheEntry,
4444 			uint32_t * data_buff);
4445 
4446 static uint32_t ql_L1Cache(qla_host_t *ha,
4447 			ql_minidump_entry_cache_t *cacheEntry,
4448 			uint32_t *data_buff);
4449 
4450 static uint32_t ql_rdocm(qla_host_t *ha,
4451 			ql_minidump_entry_rdocm_t *ocmEntry,
4452 			uint32_t *data_buff);
4453 
4454 static uint32_t ql_rdmem(qla_host_t *ha,
4455 			ql_minidump_entry_rdmem_t *mem_entry,
4456 			uint32_t *data_buff);
4457 
4458 static uint32_t ql_rdrom(qla_host_t *ha,
4459 			ql_minidump_entry_rdrom_t *romEntry,
4460 			uint32_t *data_buff);
4461 
4462 static uint32_t ql_rdmux(qla_host_t *ha,
4463 			ql_minidump_entry_mux_t *muxEntry,
4464 			uint32_t *data_buff);
4465 
4466 static uint32_t ql_rdmux2(qla_host_t *ha,
4467 			ql_minidump_entry_mux2_t *muxEntry,
4468 			uint32_t *data_buff);
4469 
4470 static uint32_t ql_rdqueue(qla_host_t *ha,
4471 			ql_minidump_entry_queue_t *queueEntry,
4472 			uint32_t *data_buff);
4473 
4474 static uint32_t ql_cntrl(qla_host_t *ha,
4475 			ql_minidump_template_hdr_t *template_hdr,
4476 			ql_minidump_entry_cntrl_t *crbEntry);
4477 
4478 
4479 static uint32_t
4480 ql_minidump_size(qla_host_t *ha)
4481 {
4482 	uint32_t i, k;
4483 	uint32_t size = 0;
4484 	ql_minidump_template_hdr_t *hdr;
4485 
4486 	hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4487 
4488 	i = 0x2;
4489 
4490 	for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4491 		if (i & ha->hw.mdump_capture_mask)
4492 			size += hdr->capture_size_array[k];
4493 		i = i << 1;
4494 	}
4495 	return (size);
4496 }
4497 
4498 static void
4499 ql_free_minidump_buffer(qla_host_t *ha)
4500 {
4501 	if (ha->hw.mdump_buffer != NULL) {
4502 		free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4503 		ha->hw.mdump_buffer = NULL;
4504 		ha->hw.mdump_buffer_size = 0;
4505 	}
4506 	return;
4507 }
4508 
4509 static int
4510 ql_alloc_minidump_buffer(qla_host_t *ha)
4511 {
4512 	ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4513 
4514 	if (!ha->hw.mdump_buffer_size)
4515 		return (-1);
4516 
4517 	ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4518 					M_NOWAIT);
4519 
4520 	if (ha->hw.mdump_buffer == NULL)
4521 		return (-1);
4522 
4523 	return (0);
4524 }
4525 
4526 static void
4527 ql_free_minidump_template_buffer(qla_host_t *ha)
4528 {
4529 	if (ha->hw.mdump_template != NULL) {
4530 		free(ha->hw.mdump_template, M_QLA83XXBUF);
4531 		ha->hw.mdump_template = NULL;
4532 		ha->hw.mdump_template_size = 0;
4533 	}
4534 	return;
4535 }
4536 
4537 static int
4538 ql_alloc_minidump_template_buffer(qla_host_t *ha)
4539 {
4540 	ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4541 
4542 	ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4543 					M_QLA83XXBUF, M_NOWAIT);
4544 
4545 	if (ha->hw.mdump_template == NULL)
4546 		return (-1);
4547 
4548 	return (0);
4549 }
4550 
4551 static int
4552 ql_alloc_minidump_buffers(qla_host_t *ha)
4553 {
4554 	int ret;
4555 
4556 	ret = ql_alloc_minidump_template_buffer(ha);
4557 
4558 	if (ret)
4559 		return (ret);
4560 
4561 	ret = ql_alloc_minidump_buffer(ha);
4562 
4563 	if (ret)
4564 		ql_free_minidump_template_buffer(ha);
4565 
4566 	return (ret);
4567 }
4568 
4569 
4570 static uint32_t
4571 ql_validate_minidump_checksum(qla_host_t *ha)
4572 {
4573         uint64_t sum = 0;
4574 	int count;
4575 	uint32_t *template_buff;
4576 
4577 	count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4578 	template_buff = ha->hw.dma_buf.minidump.dma_b;
4579 
4580 	while (count-- > 0) {
4581 		sum += *template_buff++;
4582 	}
4583 
4584 	while (sum >> 32) {
4585 		sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4586 	}
4587 
4588 	return (~sum);
4589 }
4590 
4591 int
4592 ql_minidump_init(qla_host_t *ha)
4593 {
4594 	int		ret = 0;
4595 	uint32_t	template_size = 0;
4596 	device_t	dev = ha->pci_dev;
4597 
4598 	/*
4599 	 * Get Minidump Template Size
4600  	 */
4601 	ret = qla_get_minidump_tmplt_size(ha, &template_size);
4602 
4603 	if (ret || (template_size == 0)) {
4604 		device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4605 			template_size);
4606 		return (-1);
4607 	}
4608 
4609 	/*
4610 	 * Allocate Memory for Minidump Template
4611 	 */
4612 
4613 	ha->hw.dma_buf.minidump.alignment = 8;
4614 	ha->hw.dma_buf.minidump.size = template_size;
4615 
4616 #ifdef QL_LDFLASH_FW
4617 	if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4618 
4619 		device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4620 
4621 		return (-1);
4622 	}
4623 	ha->hw.dma_buf.flags.minidump = 1;
4624 
4625 	/*
4626 	 * Retrieve Minidump Template
4627 	 */
4628 	ret = ql_get_minidump_template(ha);
4629 #else
4630 	ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4631 
4632 #endif /* #ifdef QL_LDFLASH_FW */
4633 
4634 	if (ret == 0) {
4635 
4636 		ret = ql_validate_minidump_checksum(ha);
4637 
4638 		if (ret == 0) {
4639 
4640 			ret = ql_alloc_minidump_buffers(ha);
4641 
4642 			if (ret == 0)
4643 		ha->hw.mdump_init = 1;
4644 			else
4645 				device_printf(dev,
4646 					"%s: ql_alloc_minidump_buffers"
4647 					" failed\n", __func__);
4648 		} else {
4649 			device_printf(dev, "%s: ql_validate_minidump_checksum"
4650 				" failed\n", __func__);
4651 		}
4652 	} else {
4653 		device_printf(dev, "%s: ql_get_minidump_template failed\n",
4654 			 __func__);
4655 	}
4656 
4657 	if (ret)
4658 		ql_minidump_free(ha);
4659 
4660 	return (ret);
4661 }
4662 
4663 static void
4664 ql_minidump_free(qla_host_t *ha)
4665 {
4666 	ha->hw.mdump_init = 0;
4667 	if (ha->hw.dma_buf.flags.minidump) {
4668 		ha->hw.dma_buf.flags.minidump = 0;
4669 		ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4670 	}
4671 
4672 	ql_free_minidump_template_buffer(ha);
4673 	ql_free_minidump_buffer(ha);
4674 
4675 	return;
4676 }
4677 
4678 void
4679 ql_minidump(qla_host_t *ha)
4680 {
4681 	if (!ha->hw.mdump_init)
4682 		return;
4683 
4684 	if (ha->hw.mdump_done)
4685 		return;
4686 	ha->hw.mdump_usec_ts = qla_get_usec_timestamp();
4687 	ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4688 
4689 	bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4690 	bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4691 
4692 	bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4693 		ha->hw.mdump_template_size);
4694 
4695 	ql_parse_template(ha);
4696 
4697 	ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4698 
4699 	ha->hw.mdump_done = 1;
4700 
4701 	return;
4702 }
4703 
4704 
4705 /*
4706  * helper routines
4707  */
4708 static void
4709 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4710 {
4711 	if (esize != entry->hdr.entry_capture_size) {
4712 		entry->hdr.entry_capture_size = esize;
4713 		entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4714 	}
4715 	return;
4716 }
4717 
4718 
4719 static int
4720 ql_parse_template(qla_host_t *ha)
4721 {
4722 	uint32_t num_of_entries, buff_level, e_cnt, esize;
4723 	uint32_t end_cnt, rv = 0;
4724 	char *dump_buff, *dbuff;
4725 	int sane_start = 0, sane_end = 0;
4726 	ql_minidump_template_hdr_t *template_hdr;
4727 	ql_minidump_entry_t *entry;
4728 	uint32_t capture_mask;
4729 	uint32_t dump_size;
4730 
4731 	/* Setup parameters */
4732 	template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4733 
4734 	if (template_hdr->entry_type == TLHDR)
4735 		sane_start = 1;
4736 
4737 	dump_buff = (char *) ha->hw.mdump_buffer;
4738 
4739 	num_of_entries = template_hdr->num_of_entries;
4740 
4741 	entry = (ql_minidump_entry_t *) ((char *)template_hdr
4742 			+ template_hdr->first_entry_offset );
4743 
4744 	template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4745 		template_hdr->ocm_window_array[ha->pci_func];
4746 	template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4747 
4748 	capture_mask = ha->hw.mdump_capture_mask;
4749 	dump_size = ha->hw.mdump_buffer_size;
4750 
4751 	template_hdr->driver_capture_mask = capture_mask;
4752 
4753 	QL_DPRINT80(ha, (ha->pci_dev,
4754 		"%s: sane_start = %d num_of_entries = %d "
4755 		"capture_mask = 0x%x dump_size = %d \n",
4756 		__func__, sane_start, num_of_entries, capture_mask, dump_size));
4757 
4758 	for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4759 
4760 		/*
4761 		 * If the capture_mask of the entry does not match capture mask
4762 		 * skip the entry after marking the driver_flags indicator.
4763 		 */
4764 
4765 		if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4766 
4767 			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4768 			entry = (ql_minidump_entry_t *) ((char *) entry
4769 					+ entry->hdr.entry_size);
4770 			continue;
4771 		}
4772 
4773 		/*
4774 		 * This is ONLY needed in implementations where
4775 		 * the capture buffer allocated is too small to capture
4776 		 * all of the required entries for a given capture mask.
4777 		 * We need to empty the buffer contents to a file
4778 		 * if possible, before processing the next entry
4779 		 * If the buff_full_flag is set, no further capture will happen
4780 		 * and all remaining non-control entries will be skipped.
4781 		 */
4782 		if (entry->hdr.entry_capture_size != 0) {
4783 			if ((buff_level + entry->hdr.entry_capture_size) >
4784 				dump_size) {
4785 				/*  Try to recover by emptying buffer to file */
4786 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4787 				entry = (ql_minidump_entry_t *) ((char *) entry
4788 						+ entry->hdr.entry_size);
4789 				continue;
4790 			}
4791 		}
4792 
4793 		/*
4794 		 * Decode the entry type and process it accordingly
4795 		 */
4796 
4797 		switch (entry->hdr.entry_type) {
4798 		case RDNOP:
4799 			break;
4800 
4801 		case RDEND:
4802 			if (sane_end == 0) {
4803 				end_cnt = e_cnt;
4804 			}
4805 			sane_end++;
4806 			break;
4807 
4808 		case RDCRB:
4809 			dbuff = dump_buff + buff_level;
4810 			esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4811 			ql_entry_err_chk(entry, esize);
4812 			buff_level += esize;
4813 			break;
4814 
4815                 case POLLRD:
4816                         dbuff = dump_buff + buff_level;
4817                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4818                         ql_entry_err_chk(entry, esize);
4819                         buff_level += esize;
4820                         break;
4821 
4822                 case POLLRDMWR:
4823                         dbuff = dump_buff + buff_level;
4824                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4825 					(void *)dbuff);
4826                         ql_entry_err_chk(entry, esize);
4827                         buff_level += esize;
4828                         break;
4829 
4830 		case L2ITG:
4831 		case L2DTG:
4832 		case L2DAT:
4833 		case L2INS:
4834 			dbuff = dump_buff + buff_level;
4835 			esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4836 			if (esize == -1) {
4837 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4838 			} else {
4839 				ql_entry_err_chk(entry, esize);
4840 				buff_level += esize;
4841 			}
4842 			break;
4843 
4844 		case L1DAT:
4845 		case L1INS:
4846 			dbuff = dump_buff + buff_level;
4847 			esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4848 			ql_entry_err_chk(entry, esize);
4849 			buff_level += esize;
4850 			break;
4851 
4852 		case RDOCM:
4853 			dbuff = dump_buff + buff_level;
4854 			esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4855 			ql_entry_err_chk(entry, esize);
4856 			buff_level += esize;
4857 			break;
4858 
4859 		case RDMEM:
4860 			dbuff = dump_buff + buff_level;
4861 			esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4862 			ql_entry_err_chk(entry, esize);
4863 			buff_level += esize;
4864 			break;
4865 
4866 		case BOARD:
4867 		case RDROM:
4868 			dbuff = dump_buff + buff_level;
4869 			esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4870 			ql_entry_err_chk(entry, esize);
4871 			buff_level += esize;
4872 			break;
4873 
4874 		case RDMUX:
4875 			dbuff = dump_buff + buff_level;
4876 			esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4877 			ql_entry_err_chk(entry, esize);
4878 			buff_level += esize;
4879 			break;
4880 
4881                 case RDMUX2:
4882                         dbuff = dump_buff + buff_level;
4883                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4884                         ql_entry_err_chk(entry, esize);
4885                         buff_level += esize;
4886                         break;
4887 
4888 		case QUEUE:
4889 			dbuff = dump_buff + buff_level;
4890 			esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4891 			ql_entry_err_chk(entry, esize);
4892 			buff_level += esize;
4893 			break;
4894 
4895 		case CNTRL:
4896 			if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4897 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4898 			}
4899 			break;
4900 		default:
4901 			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4902 			break;
4903 		}
4904 		/*  next entry in the template */
4905 		entry = (ql_minidump_entry_t *) ((char *) entry
4906 						+ entry->hdr.entry_size);
4907 	}
4908 
4909 	if (!sane_start || (sane_end > 1)) {
4910 		device_printf(ha->pci_dev,
4911 			"\n%s: Template configuration error. Check Template\n",
4912 			__func__);
4913 	}
4914 
4915 	QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4916 		__func__, template_hdr->num_of_entries));
4917 
4918 	return 0;
4919 }
4920 
4921 /*
4922  * Read CRB operation.
4923  */
4924 static uint32_t
4925 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4926 	uint32_t * data_buff)
4927 {
4928 	int loop_cnt;
4929 	int ret;
4930 	uint32_t op_count, addr, stride, value = 0;
4931 
4932 	addr = crb_entry->addr;
4933 	op_count = crb_entry->op_count;
4934 	stride = crb_entry->addr_stride;
4935 
4936 	for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4937 
4938 		ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4939 
4940 		if (ret)
4941 			return (0);
4942 
4943 		*data_buff++ = addr;
4944 		*data_buff++ = value;
4945 		addr = addr + stride;
4946 	}
4947 
4948 	/*
4949 	 * for testing purpose we return amount of data written
4950 	 */
4951 	return (op_count * (2 * sizeof(uint32_t)));
4952 }
4953 
4954 /*
4955  * Handle L2 Cache.
4956  */
4957 
4958 static uint32_t
4959 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4960 	uint32_t * data_buff)
4961 {
4962 	int i, k;
4963 	int loop_cnt;
4964 	int ret;
4965 
4966 	uint32_t read_value;
4967 	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4968 	uint32_t tag_value, read_cnt;
4969 	volatile uint8_t cntl_value_r;
4970 	long timeout;
4971 	uint32_t data;
4972 
4973 	loop_cnt = cacheEntry->op_count;
4974 
4975 	read_addr = cacheEntry->read_addr;
4976 	cntrl_addr = cacheEntry->control_addr;
4977 	cntl_value_w = (uint32_t) cacheEntry->write_value;
4978 
4979 	tag_reg_addr = cacheEntry->tag_reg_addr;
4980 
4981 	tag_value = cacheEntry->init_tag_value;
4982 	read_cnt = cacheEntry->read_addr_cnt;
4983 
4984 	for (i = 0; i < loop_cnt; i++) {
4985 
4986 		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4987 		if (ret)
4988 			return (0);
4989 
4990 		if (cacheEntry->write_value != 0) {
4991 
4992 			ret = ql_rdwr_indreg32(ha, cntrl_addr,
4993 					&cntl_value_w, 0);
4994 			if (ret)
4995 				return (0);
4996 		}
4997 
4998 		if (cacheEntry->poll_mask != 0) {
4999 
5000 			timeout = cacheEntry->poll_wait;
5001 
5002 			ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
5003 			if (ret)
5004 				return (0);
5005 
5006 			cntl_value_r = (uint8_t)data;
5007 
5008 			while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
5009 
5010 				if (timeout) {
5011 					qla_mdelay(__func__, 1);
5012 					timeout--;
5013 				} else
5014 					break;
5015 
5016 				ret = ql_rdwr_indreg32(ha, cntrl_addr,
5017 						&data, 1);
5018 				if (ret)
5019 					return (0);
5020 
5021 				cntl_value_r = (uint8_t)data;
5022 			}
5023 			if (!timeout) {
5024 				/* Report timeout error.
5025 				 * core dump capture failed
5026 				 * Skip remaining entries.
5027 				 * Write buffer out to file
5028 				 * Use driver specific fields in template header
5029 				 * to report this error.
5030 				 */
5031 				return (-1);
5032 			}
5033 		}
5034 
5035 		addr = read_addr;
5036 		for (k = 0; k < read_cnt; k++) {
5037 
5038 			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5039 			if (ret)
5040 				return (0);
5041 
5042 			*data_buff++ = read_value;
5043 			addr += cacheEntry->read_addr_stride;
5044 		}
5045 
5046 		tag_value += cacheEntry->tag_value_stride;
5047 	}
5048 
5049 	return (read_cnt * loop_cnt * sizeof(uint32_t));
5050 }
5051 
5052 /*
5053  * Handle L1 Cache.
5054  */
5055 
5056 static uint32_t
5057 ql_L1Cache(qla_host_t *ha,
5058 	ql_minidump_entry_cache_t *cacheEntry,
5059 	uint32_t *data_buff)
5060 {
5061 	int ret;
5062 	int i, k;
5063 	int loop_cnt;
5064 
5065 	uint32_t read_value;
5066 	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
5067 	uint32_t tag_value, read_cnt;
5068 	uint32_t cntl_value_w;
5069 
5070 	loop_cnt = cacheEntry->op_count;
5071 
5072 	read_addr = cacheEntry->read_addr;
5073 	cntrl_addr = cacheEntry->control_addr;
5074 	cntl_value_w = (uint32_t) cacheEntry->write_value;
5075 
5076 	tag_reg_addr = cacheEntry->tag_reg_addr;
5077 
5078 	tag_value = cacheEntry->init_tag_value;
5079 	read_cnt = cacheEntry->read_addr_cnt;
5080 
5081 	for (i = 0; i < loop_cnt; i++) {
5082 
5083 		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
5084 		if (ret)
5085 			return (0);
5086 
5087 		ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
5088 		if (ret)
5089 			return (0);
5090 
5091 		addr = read_addr;
5092 		for (k = 0; k < read_cnt; k++) {
5093 
5094 			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5095 			if (ret)
5096 				return (0);
5097 
5098 			*data_buff++ = read_value;
5099 			addr += cacheEntry->read_addr_stride;
5100 		}
5101 
5102 		tag_value += cacheEntry->tag_value_stride;
5103 	}
5104 
5105 	return (read_cnt * loop_cnt * sizeof(uint32_t));
5106 }
5107 
5108 /*
5109  * Reading OCM memory
5110  */
5111 
5112 static uint32_t
5113 ql_rdocm(qla_host_t *ha,
5114 	ql_minidump_entry_rdocm_t *ocmEntry,
5115 	uint32_t *data_buff)
5116 {
5117 	int i, loop_cnt;
5118 	volatile uint32_t addr;
5119 	volatile uint32_t value;
5120 
5121 	addr = ocmEntry->read_addr;
5122 	loop_cnt = ocmEntry->op_count;
5123 
5124 	for (i = 0; i < loop_cnt; i++) {
5125 		value = READ_REG32(ha, addr);
5126 		*data_buff++ = value;
5127 		addr += ocmEntry->read_addr_stride;
5128 	}
5129 	return (loop_cnt * sizeof(value));
5130 }
5131 
5132 /*
5133  * Read memory
5134  */
5135 
5136 static uint32_t
5137 ql_rdmem(qla_host_t *ha,
5138 	ql_minidump_entry_rdmem_t *mem_entry,
5139 	uint32_t *data_buff)
5140 {
5141 	int ret;
5142         int i, loop_cnt;
5143         volatile uint32_t addr;
5144 	q80_offchip_mem_val_t val;
5145 
5146         addr = mem_entry->read_addr;
5147 
5148 	/* size in bytes / 16 */
5149         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
5150 
5151         for (i = 0; i < loop_cnt; i++) {
5152 
5153 		ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
5154 		if (ret)
5155 			return (0);
5156 
5157                 *data_buff++ = val.data_lo;
5158                 *data_buff++ = val.data_hi;
5159                 *data_buff++ = val.data_ulo;
5160                 *data_buff++ = val.data_uhi;
5161 
5162                 addr += (sizeof(uint32_t) * 4);
5163         }
5164 
5165         return (loop_cnt * (sizeof(uint32_t) * 4));
5166 }
5167 
5168 /*
5169  * Read Rom
5170  */
5171 
5172 static uint32_t
5173 ql_rdrom(qla_host_t *ha,
5174 	ql_minidump_entry_rdrom_t *romEntry,
5175 	uint32_t *data_buff)
5176 {
5177 	int ret;
5178 	int i, loop_cnt;
5179 	uint32_t addr;
5180 	uint32_t value;
5181 
5182 	addr = romEntry->read_addr;
5183 	loop_cnt = romEntry->read_data_size; /* This is size in bytes */
5184 	loop_cnt /= sizeof(value);
5185 
5186 	for (i = 0; i < loop_cnt; i++) {
5187 
5188 		ret = ql_rd_flash32(ha, addr, &value);
5189 		if (ret)
5190 			return (0);
5191 
5192 		*data_buff++ = value;
5193 		addr += sizeof(value);
5194 	}
5195 
5196 	return (loop_cnt * sizeof(value));
5197 }
5198 
5199 /*
5200  * Read MUX data
5201  */
5202 
5203 static uint32_t
5204 ql_rdmux(qla_host_t *ha,
5205 	ql_minidump_entry_mux_t *muxEntry,
5206 	uint32_t *data_buff)
5207 {
5208 	int ret;
5209 	int loop_cnt;
5210 	uint32_t read_value, sel_value;
5211 	uint32_t read_addr, select_addr;
5212 
5213 	select_addr = muxEntry->select_addr;
5214 	sel_value = muxEntry->select_value;
5215 	read_addr = muxEntry->read_addr;
5216 
5217 	for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
5218 
5219 		ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
5220 		if (ret)
5221 			return (0);
5222 
5223 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5224 		if (ret)
5225 			return (0);
5226 
5227 		*data_buff++ = sel_value;
5228 		*data_buff++ = read_value;
5229 
5230 		sel_value += muxEntry->select_value_stride;
5231 	}
5232 
5233 	return (loop_cnt * (2 * sizeof(uint32_t)));
5234 }
5235 
5236 static uint32_t
5237 ql_rdmux2(qla_host_t *ha,
5238 	ql_minidump_entry_mux2_t *muxEntry,
5239 	uint32_t *data_buff)
5240 {
5241 	int ret;
5242         int loop_cnt;
5243 
5244         uint32_t select_addr_1, select_addr_2;
5245         uint32_t select_value_1, select_value_2;
5246         uint32_t select_value_count, select_value_mask;
5247         uint32_t read_addr, read_value;
5248 
5249         select_addr_1 = muxEntry->select_addr_1;
5250         select_addr_2 = muxEntry->select_addr_2;
5251         select_value_1 = muxEntry->select_value_1;
5252         select_value_2 = muxEntry->select_value_2;
5253         select_value_count = muxEntry->select_value_count;
5254         select_value_mask  = muxEntry->select_value_mask;
5255 
5256         read_addr = muxEntry->read_addr;
5257 
5258         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
5259 		loop_cnt++) {
5260 
5261                 uint32_t temp_sel_val;
5262 
5263 		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
5264 		if (ret)
5265 			return (0);
5266 
5267                 temp_sel_val = select_value_1 & select_value_mask;
5268 
5269 		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5270 		if (ret)
5271 			return (0);
5272 
5273 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5274 		if (ret)
5275 			return (0);
5276 
5277                 *data_buff++ = temp_sel_val;
5278                 *data_buff++ = read_value;
5279 
5280 		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5281 		if (ret)
5282 			return (0);
5283 
5284                 temp_sel_val = select_value_2 & select_value_mask;
5285 
5286 		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5287 		if (ret)
5288 			return (0);
5289 
5290 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5291 		if (ret)
5292 			return (0);
5293 
5294                 *data_buff++ = temp_sel_val;
5295                 *data_buff++ = read_value;
5296 
5297                 select_value_1 += muxEntry->select_value_stride;
5298                 select_value_2 += muxEntry->select_value_stride;
5299         }
5300 
5301         return (loop_cnt * (4 * sizeof(uint32_t)));
5302 }
5303 
5304 /*
5305  * Handling Queue State Reads.
5306  */
5307 
5308 static uint32_t
5309 ql_rdqueue(qla_host_t *ha,
5310 	ql_minidump_entry_queue_t *queueEntry,
5311 	uint32_t *data_buff)
5312 {
5313 	int ret;
5314 	int loop_cnt, k;
5315 	uint32_t read_value;
5316 	uint32_t read_addr, read_stride, select_addr;
5317 	uint32_t queue_id, read_cnt;
5318 
5319 	read_cnt = queueEntry->read_addr_cnt;
5320 	read_stride = queueEntry->read_addr_stride;
5321 	select_addr = queueEntry->select_addr;
5322 
5323 	for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5324 		loop_cnt++) {
5325 
5326 		ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5327 		if (ret)
5328 			return (0);
5329 
5330 		read_addr = queueEntry->read_addr;
5331 
5332 		for (k = 0; k < read_cnt; k++) {
5333 
5334 			ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5335 			if (ret)
5336 				return (0);
5337 
5338 			*data_buff++ = read_value;
5339 			read_addr += read_stride;
5340 		}
5341 
5342 		queue_id += queueEntry->queue_id_stride;
5343 	}
5344 
5345 	return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5346 }
5347 
5348 /*
5349  * Handling control entries.
5350  */
5351 
5352 static uint32_t
5353 ql_cntrl(qla_host_t *ha,
5354 	ql_minidump_template_hdr_t *template_hdr,
5355 	ql_minidump_entry_cntrl_t *crbEntry)
5356 {
5357 	int ret;
5358 	int count;
5359 	uint32_t opcode, read_value, addr, entry_addr;
5360 	long timeout;
5361 
5362 	entry_addr = crbEntry->addr;
5363 
5364 	for (count = 0; count < crbEntry->op_count; count++) {
5365 		opcode = crbEntry->opcode;
5366 
5367 		if (opcode & QL_DBG_OPCODE_WR) {
5368 
5369                 	ret = ql_rdwr_indreg32(ha, entry_addr,
5370 					&crbEntry->value_1, 0);
5371 			if (ret)
5372 				return (0);
5373 
5374 			opcode &= ~QL_DBG_OPCODE_WR;
5375 		}
5376 
5377 		if (opcode & QL_DBG_OPCODE_RW) {
5378 
5379                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5380 			if (ret)
5381 				return (0);
5382 
5383                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5384 			if (ret)
5385 				return (0);
5386 
5387 			opcode &= ~QL_DBG_OPCODE_RW;
5388 		}
5389 
5390 		if (opcode & QL_DBG_OPCODE_AND) {
5391 
5392                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5393 			if (ret)
5394 				return (0);
5395 
5396 			read_value &= crbEntry->value_2;
5397 			opcode &= ~QL_DBG_OPCODE_AND;
5398 
5399 			if (opcode & QL_DBG_OPCODE_OR) {
5400 				read_value |= crbEntry->value_3;
5401 				opcode &= ~QL_DBG_OPCODE_OR;
5402 			}
5403 
5404                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5405 			if (ret)
5406 				return (0);
5407 		}
5408 
5409 		if (opcode & QL_DBG_OPCODE_OR) {
5410 
5411                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5412 			if (ret)
5413 				return (0);
5414 
5415 			read_value |= crbEntry->value_3;
5416 
5417                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5418 			if (ret)
5419 				return (0);
5420 
5421 			opcode &= ~QL_DBG_OPCODE_OR;
5422 		}
5423 
5424 		if (opcode & QL_DBG_OPCODE_POLL) {
5425 
5426 			opcode &= ~QL_DBG_OPCODE_POLL;
5427 			timeout = crbEntry->poll_timeout;
5428 			addr = entry_addr;
5429 
5430                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5431 			if (ret)
5432 				return (0);
5433 
5434 			while ((read_value & crbEntry->value_2)
5435 				!= crbEntry->value_1) {
5436 
5437 				if (timeout) {
5438 					qla_mdelay(__func__, 1);
5439 					timeout--;
5440 				} else
5441 					break;
5442 
5443                 		ret = ql_rdwr_indreg32(ha, addr,
5444 						&read_value, 1);
5445 				if (ret)
5446 					return (0);
5447 			}
5448 
5449 			if (!timeout) {
5450 				/*
5451 				 * Report timeout error.
5452 				 * core dump capture failed
5453 				 * Skip remaining entries.
5454 				 * Write buffer out to file
5455 				 * Use driver specific fields in template header
5456 				 * to report this error.
5457 				 */
5458 				return (-1);
5459 			}
5460 		}
5461 
5462 		if (opcode & QL_DBG_OPCODE_RDSTATE) {
5463 			/*
5464 			 * decide which address to use.
5465 			 */
5466 			if (crbEntry->state_index_a) {
5467 				addr = template_hdr->saved_state_array[
5468 						crbEntry-> state_index_a];
5469 			} else {
5470 				addr = entry_addr;
5471 			}
5472 
5473                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5474 			if (ret)
5475 				return (0);
5476 
5477 			template_hdr->saved_state_array[crbEntry->state_index_v]
5478 					= read_value;
5479 			opcode &= ~QL_DBG_OPCODE_RDSTATE;
5480 		}
5481 
5482 		if (opcode & QL_DBG_OPCODE_WRSTATE) {
5483 			/*
5484 			 * decide which value to use.
5485 			 */
5486 			if (crbEntry->state_index_v) {
5487 				read_value = template_hdr->saved_state_array[
5488 						crbEntry->state_index_v];
5489 			} else {
5490 				read_value = crbEntry->value_1;
5491 			}
5492 			/*
5493 			 * decide which address to use.
5494 			 */
5495 			if (crbEntry->state_index_a) {
5496 				addr = template_hdr->saved_state_array[
5497 						crbEntry-> state_index_a];
5498 			} else {
5499 				addr = entry_addr;
5500 			}
5501 
5502                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5503 			if (ret)
5504 				return (0);
5505 
5506 			opcode &= ~QL_DBG_OPCODE_WRSTATE;
5507 		}
5508 
5509 		if (opcode & QL_DBG_OPCODE_MDSTATE) {
5510 			/*  Read value from saved state using index */
5511 			read_value = template_hdr->saved_state_array[
5512 						crbEntry->state_index_v];
5513 
5514 			read_value <<= crbEntry->shl; /*Shift left operation */
5515 			read_value >>= crbEntry->shr; /*Shift right operation */
5516 
5517 			if (crbEntry->value_2) {
5518 				/* check if AND mask is provided */
5519 				read_value &= crbEntry->value_2;
5520 			}
5521 
5522 			read_value |= crbEntry->value_3; /* OR operation */
5523 			read_value += crbEntry->value_1; /* increment op */
5524 
5525 			/* Write value back to state area. */
5526 
5527 			template_hdr->saved_state_array[crbEntry->state_index_v]
5528 					= read_value;
5529 			opcode &= ~QL_DBG_OPCODE_MDSTATE;
5530 		}
5531 
5532 		entry_addr += crbEntry->addr_stride;
5533 	}
5534 
5535 	return (0);
5536 }
5537 
5538 /*
5539  * Handling rd poll entry.
5540  */
5541 
5542 static uint32_t
5543 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5544 	uint32_t *data_buff)
5545 {
5546         int ret;
5547         int loop_cnt;
5548         uint32_t op_count, select_addr, select_value_stride, select_value;
5549         uint32_t read_addr, poll, mask, data_size, data;
5550         uint32_t wait_count = 0;
5551 
5552         select_addr            = entry->select_addr;
5553         read_addr              = entry->read_addr;
5554         select_value           = entry->select_value;
5555         select_value_stride    = entry->select_value_stride;
5556         op_count               = entry->op_count;
5557         poll                   = entry->poll;
5558         mask                   = entry->mask;
5559         data_size              = entry->data_size;
5560 
5561         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5562 
5563                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5564 		if (ret)
5565 			return (0);
5566 
5567                 wait_count = 0;
5568 
5569                 while (wait_count < poll) {
5570 
5571                         uint32_t temp;
5572 
5573 			ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5574 			if (ret)
5575 				return (0);
5576 
5577                         if ( (temp & mask) != 0 ) {
5578                                 break;
5579                         }
5580                         wait_count++;
5581                 }
5582 
5583                 if (wait_count == poll) {
5584                         device_printf(ha->pci_dev,
5585 				"%s: Error in processing entry\n", __func__);
5586                         device_printf(ha->pci_dev,
5587 				"%s: wait_count <0x%x> poll <0x%x>\n",
5588 				__func__, wait_count, poll);
5589                         return 0;
5590                 }
5591 
5592 		ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5593 		if (ret)
5594 			return (0);
5595 
5596                 *data_buff++ = select_value;
5597                 *data_buff++ = data;
5598                 select_value = select_value + select_value_stride;
5599         }
5600 
5601         /*
5602          * for testing purpose we return amount of data written
5603          */
5604         return (loop_cnt * (2 * sizeof(uint32_t)));
5605 }
5606 
5607 
5608 /*
5609  * Handling rd modify write poll entry.
5610  */
5611 
5612 static uint32_t
5613 ql_pollrd_modify_write(qla_host_t *ha,
5614 	ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5615 	uint32_t *data_buff)
5616 {
5617 	int ret;
5618         uint32_t addr_1, addr_2, value_1, value_2, data;
5619         uint32_t poll, mask, data_size, modify_mask;
5620         uint32_t wait_count = 0;
5621 
5622         addr_1		= entry->addr_1;
5623         addr_2		= entry->addr_2;
5624         value_1		= entry->value_1;
5625         value_2		= entry->value_2;
5626 
5627         poll		= entry->poll;
5628         mask		= entry->mask;
5629         modify_mask	= entry->modify_mask;
5630         data_size	= entry->data_size;
5631 
5632 
5633 	ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5634 	if (ret)
5635 		return (0);
5636 
5637         wait_count = 0;
5638         while (wait_count < poll) {
5639 
5640 		uint32_t temp;
5641 
5642 		ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5643 		if (ret)
5644 			return (0);
5645 
5646                 if ( (temp & mask) != 0 ) {
5647                         break;
5648                 }
5649                 wait_count++;
5650         }
5651 
5652         if (wait_count == poll) {
5653                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5654 			__func__);
5655         } else {
5656 
5657 		ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5658 		if (ret)
5659 			return (0);
5660 
5661                 data = (data & modify_mask);
5662 
5663 		ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5664 		if (ret)
5665 			return (0);
5666 
5667 		ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5668 		if (ret)
5669 			return (0);
5670 
5671                 /* Poll again */
5672                 wait_count = 0;
5673                 while (wait_count < poll) {
5674 
5675                         uint32_t temp;
5676 
5677 			ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5678 			if (ret)
5679 				return (0);
5680 
5681                         if ( (temp & mask) != 0 ) {
5682                                 break;
5683                         }
5684                         wait_count++;
5685                 }
5686                 *data_buff++ = addr_2;
5687                 *data_buff++ = data;
5688         }
5689 
5690         /*
5691          * for testing purpose we return amount of data written
5692          */
5693         return (2 * sizeof(uint32_t));
5694 }
5695 
5696 
5697