xref: /freebsd/sys/dev/qlxgbe/ql_hw.c (revision 99429157e8615dc3b7f11afbe3ed92de7476a5db)
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependent functions
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include "ql_minidump.h"
45 
46 /*
47  * Static Functions
48  */
49 
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57 	uint32_t num_intrs, uint32_t create);
58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60 	int tenable, int rcv);
61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
63 
64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
65 		uint8_t *hdr);
66 static int qla_hw_add_all_mcast(qla_host_t *ha);
67 static int qla_hw_del_all_mcast(qla_host_t *ha);
68 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
69 
70 static int qla_init_nic_func(qla_host_t *ha);
71 static int qla_stop_nic_func(qla_host_t *ha);
72 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
73 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
74 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
75 static void qla_get_quick_stats(qla_host_t *ha);
76 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
77 static int qla_get_cam_search_mode(qla_host_t *ha);
78 
79 static void ql_minidump_free(qla_host_t *ha);
80 
81 
82 static int
83 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
84 {
85         int err = 0, ret;
86         qla_host_t *ha;
87 	uint32_t i;
88 
89         err = sysctl_handle_int(oidp, &ret, 0, req);
90 
91         if (err || !req->newptr)
92                 return (err);
93 
94         if (ret == 1) {
95 
96                 ha = (qla_host_t *)arg1;
97 
98 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
99 
100 			device_printf(ha->pci_dev,
101 				"%s: sds_ring[%d] = %p\n", __func__,i,
102 				(void *)ha->hw.sds[i].intr_count);
103 
104 			device_printf(ha->pci_dev,
105 				"%s: sds_ring[%d].spurious_intr_count = %p\n",
106 				__func__,
107 				i, (void *)ha->hw.sds[i].spurious_intr_count);
108 
109 			device_printf(ha->pci_dev,
110 				"%s: sds_ring[%d].rx_free = %d\n", __func__,i,
111 				ha->hw.sds[i].rx_free);
112 		}
113 
114 		for (i = 0; i < ha->hw.num_tx_rings; i++)
115 			device_printf(ha->pci_dev,
116 				"%s: tx[%d] = %p\n", __func__,i,
117 				(void *)ha->tx_ring[i].count);
118 
119 		for (i = 0; i < ha->hw.num_rds_rings; i++)
120 			device_printf(ha->pci_dev,
121 				"%s: rds_ring[%d] = %p\n", __func__,i,
122 				(void *)ha->hw.rds[i].count);
123 
124 		device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
125 			(void *)ha->lro_pkt_count);
126 
127 		device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
128 			(void *)ha->lro_bytes);
129 
130 #ifdef QL_ENABLE_ISCSI_TLV
131 		device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
132 			(void *)ha->hw.iscsi_pkt_count);
133 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
134 
135 	}
136 	return (err);
137 }
138 
139 static int
140 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
141 {
142 	int err, ret = 0;
143 	qla_host_t *ha;
144 
145 	err = sysctl_handle_int(oidp, &ret, 0, req);
146 
147 	if (err || !req->newptr)
148 		return (err);
149 
150 	if (ret == 1) {
151 		ha = (qla_host_t *)arg1;
152 		qla_get_quick_stats(ha);
153 	}
154 	return (err);
155 }
156 
157 #ifdef QL_DBG
158 
159 static void
160 qla_stop_pegs(qla_host_t *ha)
161 {
162         uint32_t val = 1;
163 
164         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
165         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
166         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
167         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
168         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
169         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
170 }
171 
172 static int
173 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
174 {
175 	int err, ret = 0;
176 	qla_host_t *ha;
177 
178 	err = sysctl_handle_int(oidp, &ret, 0, req);
179 
180 
181 	if (err || !req->newptr)
182 		return (err);
183 
184 	if (ret == 1) {
185 		ha = (qla_host_t *)arg1;
186 		QLA_LOCK(ha);
187 		qla_stop_pegs(ha);
188 		QLA_UNLOCK(ha);
189 	}
190 
191 	return err;
192 }
193 #endif /* #ifdef QL_DBG */
194 
195 static int
196 qla_validate_set_port_cfg_bit(uint32_t bits)
197 {
198         if ((bits & 0xF) > 1)
199                 return (-1);
200 
201         if (((bits >> 4) & 0xF) > 2)
202                 return (-1);
203 
204         if (((bits >> 8) & 0xF) > 2)
205                 return (-1);
206 
207         return (0);
208 }
209 
210 static int
211 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
212 {
213         int err, ret = 0;
214         qla_host_t *ha;
215         uint32_t cfg_bits;
216 
217         err = sysctl_handle_int(oidp, &ret, 0, req);
218 
219         if (err || !req->newptr)
220                 return (err);
221 
222         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
223 
224                 ha = (qla_host_t *)arg1;
225 
226                 err = qla_get_port_config(ha, &cfg_bits);
227 
228                 if (err)
229                         goto qla_sysctl_set_port_cfg_exit;
230 
231                 if (ret & 0x1) {
232                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
233                 } else {
234                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
235                 }
236 
237                 ret = ret >> 4;
238                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
239 
240                 if ((ret & 0xF) == 0) {
241                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
242                 } else if ((ret & 0xF) == 1){
243                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
244                 } else {
245                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
246                 }
247 
248                 ret = ret >> 4;
249                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
250 
251                 if (ret == 0) {
252                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
253                 } else if (ret == 1){
254                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
255                 } else {
256                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
257                 }
258 
259                 err = qla_set_port_config(ha, cfg_bits);
260         } else {
261                 ha = (qla_host_t *)arg1;
262 
263                 err = qla_get_port_config(ha, &cfg_bits);
264         }
265 
266 qla_sysctl_set_port_cfg_exit:
267         return err;
268 }
269 
270 static int
271 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
272 {
273 	int err, ret = 0;
274 	qla_host_t *ha;
275 
276 	err = sysctl_handle_int(oidp, &ret, 0, req);
277 
278 	if (err || !req->newptr)
279 		return (err);
280 
281 	ha = (qla_host_t *)arg1;
282 
283 	if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
284 		(ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
285 		err = qla_set_cam_search_mode(ha, (uint32_t)ret);
286 	} else {
287 		device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
288 	}
289 
290 	return (err);
291 }
292 
293 static int
294 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
295 {
296 	int err, ret = 0;
297 	qla_host_t *ha;
298 
299 	err = sysctl_handle_int(oidp, &ret, 0, req);
300 
301 	if (err || !req->newptr)
302 		return (err);
303 
304 	ha = (qla_host_t *)arg1;
305 	err = qla_get_cam_search_mode(ha);
306 
307 	return (err);
308 }
309 
310 
311 /*
312  * Name: ql_hw_add_sysctls
313  * Function: Add P3Plus specific sysctls
314  */
315 void
316 ql_hw_add_sysctls(qla_host_t *ha)
317 {
318         device_t	dev;
319 
320         dev = ha->pci_dev;
321 
322 	ha->hw.num_sds_rings = MAX_SDS_RINGS;
323 	ha->hw.num_rds_rings = MAX_RDS_RINGS;
324 	ha->hw.num_tx_rings = NUM_TX_RINGS;
325 
326 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
327 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
328 		OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
329 		ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
330 
331         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
332                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
333                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
334 		ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
335 
336         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
337                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
338                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
339 		ha->hw.num_tx_rings, "Number of Transmit Rings");
340 
341         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
342                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
343                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
344 		ha->txr_idx, "Tx Ring Used");
345 
346 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
347 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
348 		OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
349 		(void *)ha, 0,
350 		qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
351 
352         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
353                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
354                 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
355                 (void *)ha, 0,
356                 qla_sysctl_get_quick_stats, "I", "Quick Statistics");
357 
358         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
359                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
360                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
361 		ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
362 
363 	ha->hw.sds_cidx_thres = 32;
364         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
365                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
366                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
367 		ha->hw.sds_cidx_thres,
368 		"Number of SDS entries to process before updating"
369 		" SDS Ring Consumer Index");
370 
371 	ha->hw.rds_pidx_thres = 32;
372         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
373                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
374                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
375 		ha->hw.rds_pidx_thres,
376 		"Number of Rcv Rings Entries to post before updating"
377 		" RDS Ring Producer Index");
378 
379         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
380         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
381                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
382                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
383                 &ha->hw.rcv_intr_coalesce,
384                 ha->hw.rcv_intr_coalesce,
385                 "Rcv Intr Coalescing Parameters\n"
386                 "\tbits 15:0 max packets\n"
387                 "\tbits 31:16 max micro-seconds to wait\n"
388                 "\tplease run\n"
389                 "\tifconfig <if> down && ifconfig <if> up\n"
390                 "\tto take effect \n");
391 
392         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
393         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
394                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
395                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
396                 &ha->hw.xmt_intr_coalesce,
397                 ha->hw.xmt_intr_coalesce,
398                 "Xmt Intr Coalescing Parameters\n"
399                 "\tbits 15:0 max packets\n"
400                 "\tbits 31:16 max micro-seconds to wait\n"
401                 "\tplease run\n"
402                 "\tifconfig <if> down && ifconfig <if> up\n"
403                 "\tto take effect \n");
404 
405         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
406                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
407                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
408                 (void *)ha, 0,
409                 qla_sysctl_port_cfg, "I",
410                         "Set Port Configuration if values below "
411                         "otherwise Get Port Configuration\n"
412                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
413                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
414                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
415                         " 1 = xmt only; 2 = rcv only;\n"
416                 );
417 
418 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
419 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
420 		OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
421 		(void *)ha, 0,
422 		qla_sysctl_set_cam_search_mode, "I",
423 			"Set CAM Search Mode"
424 			"\t 1 = search mode internal\n"
425 			"\t 2 = search mode auto\n");
426 
427 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
428 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
429 		OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
430 		(void *)ha, 0,
431 		qla_sysctl_get_cam_search_mode, "I",
432 			"Get CAM Search Mode"
433 			"\t 1 = search mode internal\n"
434 			"\t 2 = search mode auto\n");
435 
436         ha->hw.enable_9kb = 1;
437 
438         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
439                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
440                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
441                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
442 
443         ha->hw.enable_hw_lro = 1;
444 
445         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
446                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
448                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
449 		"\t 1 : Hardware LRO if LRO is enabled\n"
450 		"\t 0 : Software LRO if LRO is enabled\n"
451 		"\t Any change requires ifconfig down/up to take effect\n"
452 		"\t Note that LRO may be turned off/on via ifconfig\n");
453 
454 	ha->hw.mdump_active = 0;
455         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
456                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
458 		ha->hw.mdump_active,
459 		"Minidump retrieval is Active");
460 
461 	ha->hw.mdump_done = 0;
462         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
463                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
464                 OID_AUTO, "mdump_done", CTLFLAG_RW,
465 		&ha->hw.mdump_done, ha->hw.mdump_done,
466 		"Minidump has been done and available for retrieval");
467 
468 	ha->hw.mdump_capture_mask = 0xF;
469         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
470                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
471                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
472 		&ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
473 		"Minidump capture mask");
474 #ifdef QL_DBG
475 
476 	ha->err_inject = 0;
477         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
478                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
479                 OID_AUTO, "err_inject",
480                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
481                 "Error to be injected\n"
482                 "\t\t\t 0: No Errors\n"
483                 "\t\t\t 1: rcv: rxb struct invalid\n"
484                 "\t\t\t 2: rcv: mp == NULL\n"
485                 "\t\t\t 3: lro: rxb struct invalid\n"
486                 "\t\t\t 4: lro: mp == NULL\n"
487                 "\t\t\t 5: rcv: num handles invalid\n"
488                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
489                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
490                 "\t\t\t 8: mbx: mailbox command failure\n"
491                 "\t\t\t 9: heartbeat failure\n"
492                 "\t\t\t A: temperature failure\n"
493 		"\t\t\t 11: m_getcl or m_getjcl failure\n" );
494 
495 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
496                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
497                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
498                 (void *)ha, 0,
499                 qla_sysctl_stop_pegs, "I", "Peg Stop");
500 
501 #endif /* #ifdef QL_DBG */
502 
503         ha->hw.user_pri_nic = 0;
504         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
505                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
506                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
507                 ha->hw.user_pri_nic,
508                 "VLAN Tag User Priority for Normal Ethernet Packets");
509 
510         ha->hw.user_pri_iscsi = 4;
511         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
512                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
513                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
514                 ha->hw.user_pri_iscsi,
515                 "VLAN Tag User Priority for iSCSI Packets");
516 
517 }
518 
519 void
520 ql_hw_link_status(qla_host_t *ha)
521 {
522 	device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
523 
524 	if (ha->hw.link_up) {
525 		device_printf(ha->pci_dev, "link Up\n");
526 	} else {
527 		device_printf(ha->pci_dev, "link Down\n");
528 	}
529 
530 	if (ha->hw.flags.fduplex) {
531 		device_printf(ha->pci_dev, "Full Duplex\n");
532 	} else {
533 		device_printf(ha->pci_dev, "Half Duplex\n");
534 	}
535 
536 	if (ha->hw.flags.autoneg) {
537 		device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
538 	} else {
539 		device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
540 	}
541 
542 	switch (ha->hw.link_speed) {
543 	case 0x710:
544 		device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
545 		break;
546 
547 	case 0x3E8:
548 		device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
549 		break;
550 
551 	case 0x64:
552 		device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
553 		break;
554 
555 	default:
556 		device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
557 		break;
558 	}
559 
560 	switch (ha->hw.module_type) {
561 
562 	case 0x01:
563 		device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
564 		break;
565 
566 	case 0x02:
567 		device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
568 		break;
569 
570 	case 0x03:
571 		device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
572 		break;
573 
574 	case 0x04:
575 		device_printf(ha->pci_dev,
576 			"Module Type 10GE Passive Copper(Compliant)[%d m]\n",
577 			ha->hw.cable_length);
578 		break;
579 
580 	case 0x05:
581 		device_printf(ha->pci_dev, "Module Type 10GE Active"
582 			" Limiting Copper(Compliant)[%d m]\n",
583 			ha->hw.cable_length);
584 		break;
585 
586 	case 0x06:
587 		device_printf(ha->pci_dev,
588 			"Module Type 10GE Passive Copper"
589 			" (Legacy, Best Effort)[%d m]\n",
590 			ha->hw.cable_length);
591 		break;
592 
593 	case 0x07:
594 		device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
595 		break;
596 
597 	case 0x08:
598 		device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
599 		break;
600 
601 	case 0x09:
602 		device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
603 		break;
604 
605 	case 0x0A:
606 		device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
607 		break;
608 
609 	case 0x0B:
610 		device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
611 			"(Legacy, Best Effort)\n");
612 		break;
613 
614 	default:
615 		device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
616 			ha->hw.module_type);
617 		break;
618 	}
619 
620 	if (ha->hw.link_faults == 1)
621 		device_printf(ha->pci_dev, "SFP Power Fault\n");
622 }
623 
624 /*
625  * Name: ql_free_dma
626  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
627  */
628 void
629 ql_free_dma(qla_host_t *ha)
630 {
631 	uint32_t i;
632 
633         if (ha->hw.dma_buf.flags.sds_ring) {
634 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
635 			ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
636 		}
637         	ha->hw.dma_buf.flags.sds_ring = 0;
638 	}
639 
640         if (ha->hw.dma_buf.flags.rds_ring) {
641 		for (i = 0; i < ha->hw.num_rds_rings; i++) {
642 			ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
643 		}
644         	ha->hw.dma_buf.flags.rds_ring = 0;
645 	}
646 
647         if (ha->hw.dma_buf.flags.tx_ring) {
648 		ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
649         	ha->hw.dma_buf.flags.tx_ring = 0;
650 	}
651 	ql_minidump_free(ha);
652 }
653 
654 /*
655  * Name: ql_alloc_dma
656  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
657  */
658 int
659 ql_alloc_dma(qla_host_t *ha)
660 {
661         device_t                dev;
662 	uint32_t		i, j, size, tx_ring_size;
663 	qla_hw_t		*hw;
664 	qla_hw_tx_cntxt_t	*tx_cntxt;
665 	uint8_t			*vaddr;
666 	bus_addr_t		paddr;
667 
668         dev = ha->pci_dev;
669 
670         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
671 
672 	hw = &ha->hw;
673 	/*
674 	 * Allocate Transmit Ring
675 	 */
676 	tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
677 	size = (tx_ring_size * ha->hw.num_tx_rings);
678 
679 	hw->dma_buf.tx_ring.alignment = 8;
680 	hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
681 
682         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
683                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
684                 goto ql_alloc_dma_exit;
685         }
686 
687 	vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
688 	paddr = hw->dma_buf.tx_ring.dma_addr;
689 
690 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
691 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
692 
693 		tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
694 		tx_cntxt->tx_ring_paddr = paddr;
695 
696 		vaddr += tx_ring_size;
697 		paddr += tx_ring_size;
698 	}
699 
700 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
701 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
702 
703 		tx_cntxt->tx_cons = (uint32_t *)vaddr;
704 		tx_cntxt->tx_cons_paddr = paddr;
705 
706 		vaddr += sizeof (uint32_t);
707 		paddr += sizeof (uint32_t);
708 	}
709 
710         ha->hw.dma_buf.flags.tx_ring = 1;
711 
712 	QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
713 		__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
714 		hw->dma_buf.tx_ring.dma_b));
715 	/*
716 	 * Allocate Receive Descriptor Rings
717 	 */
718 
719 	for (i = 0; i < hw->num_rds_rings; i++) {
720 
721 		hw->dma_buf.rds_ring[i].alignment = 8;
722 		hw->dma_buf.rds_ring[i].size =
723 			(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
724 
725 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
726 			device_printf(dev, "%s: rds ring[%d] alloc failed\n",
727 				__func__, i);
728 
729 			for (j = 0; j < i; j++)
730 				ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
731 
732 			goto ql_alloc_dma_exit;
733 		}
734 		QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
735 			__func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
736 			hw->dma_buf.rds_ring[i].dma_b));
737 	}
738 
739 	hw->dma_buf.flags.rds_ring = 1;
740 
741 	/*
742 	 * Allocate Status Descriptor Rings
743 	 */
744 
745 	for (i = 0; i < hw->num_sds_rings; i++) {
746 		hw->dma_buf.sds_ring[i].alignment = 8;
747 		hw->dma_buf.sds_ring[i].size =
748 			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
749 
750 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
751 			device_printf(dev, "%s: sds ring alloc failed\n",
752 				__func__);
753 
754 			for (j = 0; j < i; j++)
755 				ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
756 
757 			goto ql_alloc_dma_exit;
758 		}
759 		QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
760 			__func__, i,
761 			(void *)(hw->dma_buf.sds_ring[i].dma_addr),
762 			hw->dma_buf.sds_ring[i].dma_b));
763 	}
764 	for (i = 0; i < hw->num_sds_rings; i++) {
765 		hw->sds[i].sds_ring_base =
766 			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
767 	}
768 
769 	hw->dma_buf.flags.sds_ring = 1;
770 
771 	return 0;
772 
773 ql_alloc_dma_exit:
774 	ql_free_dma(ha);
775 	return -1;
776 }
777 
778 #define Q8_MBX_MSEC_DELAY	5000
779 
780 static int
781 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
782 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
783 {
784 	uint32_t i;
785 	uint32_t data;
786 	int ret = 0;
787 
788 	if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
789 		ret = -3;
790 		ha->qla_initiate_recovery = 1;
791 		goto exit_qla_mbx_cmd;
792 	}
793 
794 	if (no_pause)
795 		i = 1000;
796 	else
797 		i = Q8_MBX_MSEC_DELAY;
798 
799 	while (i) {
800 		data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
801 		if (data == 0)
802 			break;
803 		if (no_pause) {
804 			DELAY(1000);
805 		} else {
806 			qla_mdelay(__func__, 1);
807 		}
808 		i--;
809 	}
810 
811 	if (i == 0) {
812 		device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
813 			__func__, data);
814 		ret = -1;
815 		ha->qla_initiate_recovery = 1;
816 		goto exit_qla_mbx_cmd;
817 	}
818 
819 	for (i = 0; i < n_hmbox; i++) {
820 		WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
821 		h_mbox++;
822 	}
823 
824 	WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
825 
826 
827 	i = Q8_MBX_MSEC_DELAY;
828 	while (i) {
829 		data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
830 
831 		if ((data & 0x3) == 1) {
832 			data = READ_REG32(ha, Q8_FW_MBOX0);
833 			if ((data & 0xF000) != 0x8000)
834 				break;
835 		}
836 		if (no_pause) {
837 			DELAY(1000);
838 		} else {
839 			qla_mdelay(__func__, 1);
840 		}
841 		i--;
842 	}
843 	if (i == 0) {
844 		device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
845 			__func__, data);
846 		ret = -2;
847 		ha->qla_initiate_recovery = 1;
848 		goto exit_qla_mbx_cmd;
849 	}
850 
851 	for (i = 0; i < n_fwmbox; i++) {
852 		*fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
853 	}
854 
855 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
856 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
857 
858 exit_qla_mbx_cmd:
859 	return (ret);
860 }
861 
862 int
863 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
864 	uint32_t *num_rcvq)
865 {
866 	uint32_t *mbox, err;
867 	device_t dev = ha->pci_dev;
868 
869 	bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
870 
871 	mbox = ha->hw.mbox;
872 
873 	mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
874 
875 	if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
876 		device_printf(dev, "%s: failed0\n", __func__);
877 		return (-1);
878 	}
879 	err = mbox[0] >> 25;
880 
881 	if (supports_9kb != NULL) {
882 		if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
883 			*supports_9kb = 1;
884 		else
885 			*supports_9kb = 0;
886 	}
887 
888 	if (num_rcvq != NULL)
889 		*num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
890 
891 	if ((err != 1) && (err != 0)) {
892 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
893 		return (-1);
894 	}
895 	return 0;
896 }
897 
898 static int
899 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
900 	uint32_t create)
901 {
902 	uint32_t i, err;
903 	device_t dev = ha->pci_dev;
904 	q80_config_intr_t *c_intr;
905 	q80_config_intr_rsp_t *c_intr_rsp;
906 
907 	c_intr = (q80_config_intr_t *)ha->hw.mbox;
908 	bzero(c_intr, (sizeof (q80_config_intr_t)));
909 
910 	c_intr->opcode = Q8_MBX_CONFIG_INTR;
911 
912 	c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
913 	c_intr->count_version |= Q8_MBX_CMD_VERSION;
914 
915 	c_intr->nentries = num_intrs;
916 
917 	for (i = 0; i < num_intrs; i++) {
918 		if (create) {
919 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
920 			c_intr->intr[i].msix_index = start_idx + 1 + i;
921 		} else {
922 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
923 			c_intr->intr[i].msix_index =
924 				ha->hw.intr_id[(start_idx + i)];
925 		}
926 
927 		c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
928 	}
929 
930 	if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
931 		(sizeof (q80_config_intr_t) >> 2),
932 		ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
933 		device_printf(dev, "%s: failed0\n", __func__);
934 		return (-1);
935 	}
936 
937 	c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
938 
939 	err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
940 
941 	if (err) {
942 		device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
943 			c_intr_rsp->nentries);
944 
945 		for (i = 0; i < c_intr_rsp->nentries; i++) {
946 			device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
947 				__func__, i,
948 				c_intr_rsp->intr[i].status,
949 				c_intr_rsp->intr[i].intr_id,
950 				c_intr_rsp->intr[i].intr_src);
951 		}
952 
953 		return (-1);
954 	}
955 
956 	for (i = 0; ((i < num_intrs) && create); i++) {
957 		if (!c_intr_rsp->intr[i].status) {
958 			ha->hw.intr_id[(start_idx + i)] =
959 				c_intr_rsp->intr[i].intr_id;
960 			ha->hw.intr_src[(start_idx + i)] =
961 				c_intr_rsp->intr[i].intr_src;
962 		}
963 	}
964 
965 	return (0);
966 }
967 
968 /*
969  * Name: qla_config_rss
970  * Function: Configure RSS for the context/interface.
971  */
972 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
973 			0x8030f20c77cb2da3ULL,
974 			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
975 			0x255b0ec26d5a56daULL };
976 
977 static int
978 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
979 {
980 	q80_config_rss_t	*c_rss;
981 	q80_config_rss_rsp_t	*c_rss_rsp;
982 	uint32_t		err, i;
983 	device_t		dev = ha->pci_dev;
984 
985 	c_rss = (q80_config_rss_t *)ha->hw.mbox;
986 	bzero(c_rss, (sizeof (q80_config_rss_t)));
987 
988 	c_rss->opcode = Q8_MBX_CONFIG_RSS;
989 
990 	c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
991 	c_rss->count_version |= Q8_MBX_CMD_VERSION;
992 
993 	c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
994 				Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
995 	//c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
996 	//			Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
997 
998 	c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
999 	c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1000 
1001 	c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1002 
1003 	c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1004 	c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1005 
1006 	c_rss->cntxt_id = cntxt_id;
1007 
1008 	for (i = 0; i < 5; i++) {
1009 		c_rss->rss_key[i] = rss_key[i];
1010 	}
1011 
1012 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1013 		(sizeof (q80_config_rss_t) >> 2),
1014 		ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1015 		device_printf(dev, "%s: failed0\n", __func__);
1016 		return (-1);
1017 	}
1018 	c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1019 
1020 	err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1021 
1022 	if (err) {
1023 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1024 		return (-1);
1025 	}
1026 	return 0;
1027 }
1028 
1029 static int
1030 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1031         uint16_t cntxt_id, uint8_t *ind_table)
1032 {
1033         q80_config_rss_ind_table_t      *c_rss_ind;
1034         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1035         uint32_t                        err;
1036         device_t                        dev = ha->pci_dev;
1037 
1038 	if ((count > Q8_RSS_IND_TBL_SIZE) ||
1039 		((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1040 		device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1041 			start_idx, count);
1042 		return (-1);
1043 	}
1044 
1045         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1046         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1047 
1048         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1049         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1050         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1051 
1052 	c_rss_ind->start_idx = start_idx;
1053 	c_rss_ind->end_idx = start_idx + count - 1;
1054 	c_rss_ind->cntxt_id = cntxt_id;
1055 	bcopy(ind_table, c_rss_ind->ind_table, count);
1056 
1057 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1058 		(sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1059 		(sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1060 		device_printf(dev, "%s: failed0\n", __func__);
1061 		return (-1);
1062 	}
1063 
1064 	c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1065 	err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1066 
1067 	if (err) {
1068 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1069 		return (-1);
1070 	}
1071 	return 0;
1072 }
1073 
1074 /*
1075  * Name: qla_config_intr_coalesce
1076  * Function: Configure Interrupt Coalescing.
1077  */
1078 static int
1079 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1080 	int rcv)
1081 {
1082 	q80_config_intr_coalesc_t	*intrc;
1083 	q80_config_intr_coalesc_rsp_t	*intrc_rsp;
1084 	uint32_t			err, i;
1085 	device_t			dev = ha->pci_dev;
1086 
1087 	intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1088 	bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1089 
1090 	intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1091 	intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1092 	intrc->count_version |= Q8_MBX_CMD_VERSION;
1093 
1094 	if (rcv) {
1095 		intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1096 		intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1097 		intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1098 	} else {
1099 		intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1100 		intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1101 		intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1102 	}
1103 
1104 	intrc->cntxt_id = cntxt_id;
1105 
1106 	if (tenable) {
1107 		intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1108 		intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1109 
1110 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1111 			intrc->sds_ring_mask |= (1 << i);
1112 		}
1113 		intrc->ms_timeout = 1000;
1114 	}
1115 
1116 	if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1117 		(sizeof (q80_config_intr_coalesc_t) >> 2),
1118 		ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1119 		device_printf(dev, "%s: failed0\n", __func__);
1120 		return (-1);
1121 	}
1122 	intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1123 
1124 	err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1125 
1126 	if (err) {
1127 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1128 		return (-1);
1129 	}
1130 
1131 	return 0;
1132 }
1133 
1134 
1135 /*
1136  * Name: qla_config_mac_addr
1137  * Function: binds a MAC address to the context/interface.
1138  *	Can be unicast, multicast or broadcast.
1139  */
1140 static int
1141 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1142 	uint32_t num_mac)
1143 {
1144 	q80_config_mac_addr_t		*cmac;
1145 	q80_config_mac_addr_rsp_t	*cmac_rsp;
1146 	uint32_t			err;
1147 	device_t			dev = ha->pci_dev;
1148 	int				i;
1149 	uint8_t				*mac_cpy = mac_addr;
1150 
1151 	if (num_mac > Q8_MAX_MAC_ADDRS) {
1152 		device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1153 			__func__, (add_mac ? "Add" : "Del"), num_mac);
1154 		return (-1);
1155 	}
1156 
1157 	cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1158 	bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1159 
1160 	cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1161 	cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1162 	cmac->count_version |= Q8_MBX_CMD_VERSION;
1163 
1164 	if (add_mac)
1165 		cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1166 	else
1167 		cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1168 
1169 	cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1170 
1171 	cmac->nmac_entries = num_mac;
1172 	cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1173 
1174 	for (i = 0; i < num_mac; i++) {
1175 		bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN);
1176 		mac_addr = mac_addr + ETHER_ADDR_LEN;
1177 	}
1178 
1179 	if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1180 		(sizeof (q80_config_mac_addr_t) >> 2),
1181 		ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1182 		device_printf(dev, "%s: %s failed0\n", __func__,
1183 			(add_mac ? "Add" : "Del"));
1184 		return (-1);
1185 	}
1186 	cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1187 
1188 	err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1189 
1190 	if (err) {
1191 		device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1192 			(add_mac ? "Add" : "Del"), err);
1193 		for (i = 0; i < num_mac; i++) {
1194 			device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1195 				__func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1196 				mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1197 			mac_cpy += ETHER_ADDR_LEN;
1198 		}
1199 		return (-1);
1200 	}
1201 
1202 	return 0;
1203 }
1204 
1205 
1206 /*
1207  * Name: qla_set_mac_rcv_mode
1208  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1209  */
1210 static int
1211 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1212 {
1213 	q80_config_mac_rcv_mode_t	*rcv_mode;
1214 	uint32_t			err;
1215 	q80_config_mac_rcv_mode_rsp_t	*rcv_mode_rsp;
1216 	device_t			dev = ha->pci_dev;
1217 
1218 	rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1219 	bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1220 
1221 	rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1222 	rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1223 	rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1224 
1225 	rcv_mode->mode = mode;
1226 
1227 	rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1228 
1229 	if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1230 		(sizeof (q80_config_mac_rcv_mode_t) >> 2),
1231 		ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1232 		device_printf(dev, "%s: failed0\n", __func__);
1233 		return (-1);
1234 	}
1235 	rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1236 
1237 	err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1238 
1239 	if (err) {
1240 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1241 		return (-1);
1242 	}
1243 
1244 	return 0;
1245 }
1246 
1247 int
1248 ql_set_promisc(qla_host_t *ha)
1249 {
1250 	int ret;
1251 
1252 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1253 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1254 	return (ret);
1255 }
1256 
1257 void
1258 qla_reset_promisc(qla_host_t *ha)
1259 {
1260 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1261 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1262 }
1263 
1264 int
1265 ql_set_allmulti(qla_host_t *ha)
1266 {
1267 	int ret;
1268 
1269 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1270 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1271 	return (ret);
1272 }
1273 
1274 void
1275 qla_reset_allmulti(qla_host_t *ha)
1276 {
1277 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1278 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1279 }
1280 
1281 /*
1282  * Name: ql_set_max_mtu
1283  * Function:
1284  *	Sets the maximum transfer unit size for the specified rcv context.
1285  */
1286 int
1287 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1288 {
1289 	device_t		dev;
1290 	q80_set_max_mtu_t	*max_mtu;
1291 	q80_set_max_mtu_rsp_t	*max_mtu_rsp;
1292 	uint32_t		err;
1293 
1294 	dev = ha->pci_dev;
1295 
1296 	max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1297 	bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1298 
1299 	max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1300 	max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1301 	max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1302 
1303 	max_mtu->cntxt_id = cntxt_id;
1304 	max_mtu->mtu = mtu;
1305 
1306         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1307 		(sizeof (q80_set_max_mtu_t) >> 2),
1308                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1309                 device_printf(dev, "%s: failed\n", __func__);
1310                 return -1;
1311         }
1312 
1313 	max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1314 
1315         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1316 
1317         if (err) {
1318                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1319         }
1320 
1321 	return 0;
1322 }
1323 
1324 static int
1325 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1326 {
1327 	device_t		dev;
1328 	q80_link_event_t	*lnk;
1329 	q80_link_event_rsp_t	*lnk_rsp;
1330 	uint32_t		err;
1331 
1332 	dev = ha->pci_dev;
1333 
1334 	lnk = (q80_link_event_t *)ha->hw.mbox;
1335 	bzero(lnk, (sizeof (q80_link_event_t)));
1336 
1337 	lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1338 	lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1339 	lnk->count_version |= Q8_MBX_CMD_VERSION;
1340 
1341 	lnk->cntxt_id = cntxt_id;
1342 	lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1343 
1344         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1345                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1346                 device_printf(dev, "%s: failed\n", __func__);
1347                 return -1;
1348         }
1349 
1350 	lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1351 
1352         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1353 
1354         if (err) {
1355                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1356         }
1357 
1358 	return 0;
1359 }
1360 
1361 static int
1362 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1363 {
1364 	device_t		dev;
1365 	q80_config_fw_lro_t	*fw_lro;
1366 	q80_config_fw_lro_rsp_t	*fw_lro_rsp;
1367 	uint32_t		err;
1368 
1369 	dev = ha->pci_dev;
1370 
1371 	fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1372 	bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1373 
1374 	fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1375 	fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1376 	fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1377 
1378 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1379 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1380 
1381 	fw_lro->cntxt_id = cntxt_id;
1382 
1383 	if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1384 		(sizeof (q80_config_fw_lro_t) >> 2),
1385 		ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1386 		device_printf(dev, "%s: failed\n", __func__);
1387 		return -1;
1388 	}
1389 
1390 	fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1391 
1392 	err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1393 
1394 	if (err) {
1395 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1396 	}
1397 
1398 	return 0;
1399 }
1400 
1401 static int
1402 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1403 {
1404 	device_t                dev;
1405 	q80_hw_config_t         *hw_config;
1406 	q80_hw_config_rsp_t     *hw_config_rsp;
1407 	uint32_t                err;
1408 
1409 	dev = ha->pci_dev;
1410 
1411 	hw_config = (q80_hw_config_t *)ha->hw.mbox;
1412 	bzero(hw_config, sizeof (q80_hw_config_t));
1413 
1414 	hw_config->opcode = Q8_MBX_HW_CONFIG;
1415 	hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1416 	hw_config->count_version |= Q8_MBX_CMD_VERSION;
1417 
1418 	hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1419 
1420 	hw_config->u.set_cam_search_mode.mode = search_mode;
1421 
1422 	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1423 		(sizeof (q80_hw_config_t) >> 2),
1424 		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1425 		device_printf(dev, "%s: failed\n", __func__);
1426 		return -1;
1427 	}
1428 	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1429 
1430 	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1431 
1432 	if (err) {
1433 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1434 	}
1435 
1436 	return 0;
1437 }
1438 
1439 static int
1440 qla_get_cam_search_mode(qla_host_t *ha)
1441 {
1442 	device_t                dev;
1443 	q80_hw_config_t         *hw_config;
1444 	q80_hw_config_rsp_t     *hw_config_rsp;
1445 	uint32_t                err;
1446 
1447 	dev = ha->pci_dev;
1448 
1449 	hw_config = (q80_hw_config_t *)ha->hw.mbox;
1450 	bzero(hw_config, sizeof (q80_hw_config_t));
1451 
1452 	hw_config->opcode = Q8_MBX_HW_CONFIG;
1453 	hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1454 	hw_config->count_version |= Q8_MBX_CMD_VERSION;
1455 
1456 	hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1457 
1458 	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1459 		(sizeof (q80_hw_config_t) >> 2),
1460 		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1461 		device_printf(dev, "%s: failed\n", __func__);
1462 		return -1;
1463 	}
1464 	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1465 
1466 	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1467 
1468 	if (err) {
1469 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1470 	} else {
1471 		device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1472 			hw_config_rsp->u.get_cam_search_mode.mode);
1473 	}
1474 
1475 	return 0;
1476 }
1477 
1478 
1479 
1480 static void
1481 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1482 {
1483 	device_t dev = ha->pci_dev;
1484 
1485 	if (i < ha->hw.num_tx_rings) {
1486 		device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1487 			__func__, i, xstat->total_bytes);
1488 		device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1489 			__func__, i, xstat->total_pkts);
1490 		device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1491 			__func__, i, xstat->errors);
1492 		device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1493 			__func__, i, xstat->pkts_dropped);
1494 		device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1495 			__func__, i, xstat->switch_pkts);
1496 		device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1497 			__func__, i, xstat->num_buffers);
1498 	} else {
1499 		device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1500 			__func__, xstat->total_bytes);
1501 		device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1502 			__func__, xstat->total_pkts);
1503 		device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1504 			__func__, xstat->errors);
1505 		device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1506 			__func__, xstat->pkts_dropped);
1507 		device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1508 			__func__, xstat->switch_pkts);
1509 		device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1510 			__func__, xstat->num_buffers);
1511 	}
1512 }
1513 
1514 static void
1515 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1516 {
1517 	device_t dev = ha->pci_dev;
1518 
1519 	device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1520 		rstat->total_bytes);
1521 	device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1522 		rstat->total_pkts);
1523 	device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1524 		rstat->lro_pkt_count);
1525 	device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1526 		rstat->sw_pkt_count);
1527 	device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1528 		rstat->ip_chksum_err);
1529 	device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1530 		rstat->pkts_wo_acntxts);
1531 	device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1532 		__func__, rstat->pkts_dropped_no_sds_card);
1533 	device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1534 		__func__, rstat->pkts_dropped_no_sds_host);
1535 	device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1536 		rstat->oversized_pkts);
1537 	device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1538 		__func__, rstat->pkts_dropped_no_rds);
1539 	device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1540 		__func__, rstat->unxpctd_mcast_pkts);
1541 	device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1542 		rstat->re1_fbq_error);
1543 	device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1544 		rstat->invalid_mac_addr);
1545 	device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1546 		rstat->rds_prime_trys);
1547 	device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1548 		rstat->rds_prime_success);
1549 	device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1550 		rstat->lro_flows_added);
1551 	device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1552 		rstat->lro_flows_deleted);
1553 	device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1554 		rstat->lro_flows_active);
1555 	device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1556 		__func__, rstat->pkts_droped_unknown);
1557 }
1558 
1559 static void
1560 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1561 {
1562 	device_t dev = ha->pci_dev;
1563 
1564 	device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1565 		mstat->xmt_frames);
1566 	device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1567 		mstat->xmt_bytes);
1568 	device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1569 		mstat->xmt_mcast_pkts);
1570 	device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1571 		mstat->xmt_bcast_pkts);
1572 	device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1573 		mstat->xmt_pause_frames);
1574 	device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1575 		mstat->xmt_cntrl_pkts);
1576 	device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1577 		__func__, mstat->xmt_pkt_lt_64bytes);
1578 	device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1579 		__func__, mstat->xmt_pkt_lt_127bytes);
1580 	device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1581 		__func__, mstat->xmt_pkt_lt_255bytes);
1582 	device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1583 		__func__, mstat->xmt_pkt_lt_511bytes);
1584 	device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1585 		__func__, mstat->xmt_pkt_lt_1023bytes);
1586 	device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1587 		__func__, mstat->xmt_pkt_lt_1518bytes);
1588 	device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1589 		__func__, mstat->xmt_pkt_gt_1518bytes);
1590 
1591 	device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1592 		mstat->rcv_frames);
1593 	device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1594 		mstat->rcv_bytes);
1595 	device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1596 		mstat->rcv_mcast_pkts);
1597 	device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1598 		mstat->rcv_bcast_pkts);
1599 	device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1600 		mstat->rcv_pause_frames);
1601 	device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1602 		mstat->rcv_cntrl_pkts);
1603 	device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1604 		__func__, mstat->rcv_pkt_lt_64bytes);
1605 	device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1606 		__func__, mstat->rcv_pkt_lt_127bytes);
1607 	device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1608 		__func__, mstat->rcv_pkt_lt_255bytes);
1609 	device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1610 		__func__, mstat->rcv_pkt_lt_511bytes);
1611 	device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1612 		__func__, mstat->rcv_pkt_lt_1023bytes);
1613 	device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1614 		__func__, mstat->rcv_pkt_lt_1518bytes);
1615 	device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1616 		__func__, mstat->rcv_pkt_gt_1518bytes);
1617 
1618 	device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1619 		mstat->rcv_len_error);
1620 	device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1621 		mstat->rcv_len_small);
1622 	device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1623 		mstat->rcv_len_large);
1624 	device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1625 		mstat->rcv_jabber);
1626 	device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1627 		mstat->rcv_dropped);
1628 	device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1629 		mstat->fcs_error);
1630 	device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1631 		mstat->align_error);
1632 }
1633 
1634 
1635 static int
1636 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1637 {
1638 	device_t		dev;
1639 	q80_get_stats_t		*stat;
1640 	q80_get_stats_rsp_t	*stat_rsp;
1641 	uint32_t		err;
1642 
1643 	dev = ha->pci_dev;
1644 
1645 	stat = (q80_get_stats_t *)ha->hw.mbox;
1646 	bzero(stat, (sizeof (q80_get_stats_t)));
1647 
1648 	stat->opcode = Q8_MBX_GET_STATS;
1649 	stat->count_version = 2;
1650 	stat->count_version |= Q8_MBX_CMD_VERSION;
1651 
1652 	stat->cmd = cmd;
1653 
1654         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1655                 ha->hw.mbox, (rsp_size >> 2), 0)) {
1656                 device_printf(dev, "%s: failed\n", __func__);
1657                 return -1;
1658         }
1659 
1660 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1661 
1662         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1663 
1664         if (err) {
1665                 return -1;
1666         }
1667 
1668 	return 0;
1669 }
1670 
1671 void
1672 ql_get_stats(qla_host_t *ha)
1673 {
1674 	q80_get_stats_rsp_t	*stat_rsp;
1675 	q80_mac_stats_t		*mstat;
1676 	q80_xmt_stats_t		*xstat;
1677 	q80_rcv_stats_t		*rstat;
1678 	uint32_t		cmd;
1679 	int			i;
1680 
1681 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1682 	/*
1683 	 * Get MAC Statistics
1684 	 */
1685 	cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1686 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
1687 
1688 	cmd |= ((ha->pci_func & 0x1) << 16);
1689 
1690 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1691 		mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1692 		qla_mac_stats(ha, mstat);
1693 	} else {
1694                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1695 			__func__, ha->hw.mbox[0]);
1696 	}
1697 	/*
1698 	 * Get RCV Statistics
1699 	 */
1700 	cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1701 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
1702 	cmd |= (ha->hw.rcv_cntxt_id << 16);
1703 
1704 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1705 		rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1706 		qla_rcv_stats(ha, rstat);
1707 	} else {
1708                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1709 			__func__, ha->hw.mbox[0]);
1710 	}
1711 	/*
1712 	 * Get XMT Statistics
1713 	 */
1714 	for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1715 		cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1716 //		cmd |= Q8_GET_STATS_CMD_CLEAR;
1717 		cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1718 
1719 		if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1720 			== 0) {
1721 			xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1722 			qla_xmt_stats(ha, xstat, i);
1723 		} else {
1724 			device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1725 				__func__, ha->hw.mbox[0]);
1726 		}
1727 	}
1728 	return;
1729 }
1730 
1731 static void
1732 qla_get_quick_stats(qla_host_t *ha)
1733 {
1734 	q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1735 	q80_mac_stats_t         *mstat;
1736 	q80_xmt_stats_t         *xstat;
1737 	q80_rcv_stats_t         *rstat;
1738 	uint32_t                cmd;
1739 
1740 	stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1741 
1742 	cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1743 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1744 
1745 //      cmd |= ((ha->pci_func & 0x3) << 16);
1746 	cmd |= (0xFFFF << 16);
1747 
1748 	if (qla_get_hw_stats(ha, cmd,
1749 			sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1750 
1751 		mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1752 		rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1753 		xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1754 		qla_mac_stats(ha, mstat);
1755 		qla_rcv_stats(ha, rstat);
1756 		qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1757 	} else {
1758 		device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1759 			__func__, ha->hw.mbox[0]);
1760 	}
1761 	return;
1762 }
1763 
1764 /*
1765  * Name: qla_tx_tso
1766  * Function: Checks if the packet to be transmitted is a candidate for
1767  *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1768  *	Ring Structure are plugged in.
1769  */
1770 static int
1771 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1772 {
1773 	struct ether_vlan_header *eh;
1774 	struct ip *ip = NULL;
1775 	struct ip6_hdr *ip6 = NULL;
1776 	struct tcphdr *th = NULL;
1777 	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1778 	uint16_t etype, opcode, offload = 1;
1779 	device_t dev;
1780 
1781 	dev = ha->pci_dev;
1782 
1783 
1784 	eh = mtod(mp, struct ether_vlan_header *);
1785 
1786 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1787 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1788 		etype = ntohs(eh->evl_proto);
1789 	} else {
1790 		ehdrlen = ETHER_HDR_LEN;
1791 		etype = ntohs(eh->evl_encap_proto);
1792 	}
1793 
1794 	hdrlen = 0;
1795 
1796 	switch (etype) {
1797 		case ETHERTYPE_IP:
1798 
1799 			tcp_opt_off = ehdrlen + sizeof(struct ip) +
1800 					sizeof(struct tcphdr);
1801 
1802 			if (mp->m_len < tcp_opt_off) {
1803 				m_copydata(mp, 0, tcp_opt_off, hdr);
1804 				ip = (struct ip *)(hdr + ehdrlen);
1805 			} else {
1806 				ip = (struct ip *)(mp->m_data + ehdrlen);
1807 			}
1808 
1809 			ip_hlen = ip->ip_hl << 2;
1810 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1811 
1812 
1813 			if ((ip->ip_p != IPPROTO_TCP) ||
1814 				(ip_hlen != sizeof (struct ip))){
1815 				/* IP Options are not supported */
1816 
1817 				offload = 0;
1818 			} else
1819 				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1820 
1821 		break;
1822 
1823 		case ETHERTYPE_IPV6:
1824 
1825 			tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1826 					sizeof (struct tcphdr);
1827 
1828 			if (mp->m_len < tcp_opt_off) {
1829 				m_copydata(mp, 0, tcp_opt_off, hdr);
1830 				ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1831 			} else {
1832 				ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1833 			}
1834 
1835 			ip_hlen = sizeof(struct ip6_hdr);
1836 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1837 
1838 			if (ip6->ip6_nxt != IPPROTO_TCP) {
1839 				//device_printf(dev, "%s: ipv6\n", __func__);
1840 				offload = 0;
1841 			} else
1842 				th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1843 		break;
1844 
1845 		default:
1846 			QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1847 			offload = 0;
1848 		break;
1849 	}
1850 
1851 	if (!offload)
1852 		return (-1);
1853 
1854 	tcp_hlen = th->th_off << 2;
1855 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1856 
1857         if (mp->m_len < hdrlen) {
1858                 if (mp->m_len < tcp_opt_off) {
1859                         if (tcp_hlen > sizeof(struct tcphdr)) {
1860                                 m_copydata(mp, tcp_opt_off,
1861                                         (tcp_hlen - sizeof(struct tcphdr)),
1862                                         &hdr[tcp_opt_off]);
1863                         }
1864                 } else {
1865                         m_copydata(mp, 0, hdrlen, hdr);
1866                 }
1867         }
1868 
1869 	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1870 
1871 	tx_cmd->flags_opcode = opcode ;
1872 	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1873 	tx_cmd->total_hdr_len = hdrlen;
1874 
1875 	/* Check for Multicast least significant bit of MSB == 1 */
1876 	if (eh->evl_dhost[0] & 0x01) {
1877 		tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1878 	}
1879 
1880 	if (mp->m_len < hdrlen) {
1881 		printf("%d\n", hdrlen);
1882 		return (1);
1883 	}
1884 
1885 	return (0);
1886 }
1887 
1888 /*
1889  * Name: qla_tx_chksum
1890  * Function: Checks if the packet to be transmitted is a candidate for
1891  *	TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1892  *	Ring Structure are plugged in.
1893  */
1894 static int
1895 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1896 	uint32_t *tcp_hdr_off)
1897 {
1898 	struct ether_vlan_header *eh;
1899 	struct ip *ip;
1900 	struct ip6_hdr *ip6;
1901 	uint32_t ehdrlen, ip_hlen;
1902 	uint16_t etype, opcode, offload = 1;
1903 	device_t dev;
1904 	uint8_t buf[sizeof(struct ip6_hdr)];
1905 
1906 	dev = ha->pci_dev;
1907 
1908 	*op_code = 0;
1909 
1910 	if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1911 		return (-1);
1912 
1913 	eh = mtod(mp, struct ether_vlan_header *);
1914 
1915 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1916 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1917 		etype = ntohs(eh->evl_proto);
1918 	} else {
1919 		ehdrlen = ETHER_HDR_LEN;
1920 		etype = ntohs(eh->evl_encap_proto);
1921 	}
1922 
1923 
1924 	switch (etype) {
1925 		case ETHERTYPE_IP:
1926 			ip = (struct ip *)(mp->m_data + ehdrlen);
1927 
1928 			ip_hlen = sizeof (struct ip);
1929 
1930 			if (mp->m_len < (ehdrlen + ip_hlen)) {
1931 				m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1932 				ip = (struct ip *)buf;
1933 			}
1934 
1935 			if (ip->ip_p == IPPROTO_TCP)
1936 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1937 			else if (ip->ip_p == IPPROTO_UDP)
1938 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1939 			else {
1940 				//device_printf(dev, "%s: ipv4\n", __func__);
1941 				offload = 0;
1942 			}
1943 		break;
1944 
1945 		case ETHERTYPE_IPV6:
1946 			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1947 
1948 			ip_hlen = sizeof(struct ip6_hdr);
1949 
1950 			if (mp->m_len < (ehdrlen + ip_hlen)) {
1951 				m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1952 					buf);
1953 				ip6 = (struct ip6_hdr *)buf;
1954 			}
1955 
1956 			if (ip6->ip6_nxt == IPPROTO_TCP)
1957 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1958 			else if (ip6->ip6_nxt == IPPROTO_UDP)
1959 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1960 			else {
1961 				//device_printf(dev, "%s: ipv6\n", __func__);
1962 				offload = 0;
1963 			}
1964 		break;
1965 
1966 		default:
1967 			offload = 0;
1968 		break;
1969 	}
1970 	if (!offload)
1971 		return (-1);
1972 
1973 	*op_code = opcode;
1974 	*tcp_hdr_off = (ip_hlen + ehdrlen);
1975 
1976 	return (0);
1977 }
1978 
1979 #define QLA_TX_MIN_FREE 2
1980 /*
1981  * Name: ql_hw_send
1982  * Function: Transmits a packet. It first checks if the packet is a
1983  *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1984  *	offload. If either of these creteria are not met, it is transmitted
1985  *	as a regular ethernet frame.
1986  */
1987 int
1988 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1989 	uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1990 {
1991 	struct ether_vlan_header *eh;
1992 	qla_hw_t *hw = &ha->hw;
1993 	q80_tx_cmd_t *tx_cmd, tso_cmd;
1994 	bus_dma_segment_t *c_seg;
1995 	uint32_t num_tx_cmds, hdr_len = 0;
1996 	uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1997 	device_t dev;
1998 	int i, ret;
1999 	uint8_t *src = NULL, *dst = NULL;
2000 	uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2001 	uint32_t op_code = 0;
2002 	uint32_t tcp_hdr_off = 0;
2003 
2004 	dev = ha->pci_dev;
2005 
2006 	/*
2007 	 * Always make sure there is atleast one empty slot in the tx_ring
2008 	 * tx_ring is considered full when there only one entry available
2009 	 */
2010         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2011 
2012 	total_length = mp->m_pkthdr.len;
2013 	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2014 		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2015 			__func__, total_length);
2016 		return (-1);
2017 	}
2018 	eh = mtod(mp, struct ether_vlan_header *);
2019 
2020 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2021 
2022 		bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2023 
2024 		src = frame_hdr;
2025 		ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2026 
2027 		if (!(ret & ~1)) {
2028 			/* find the additional tx_cmd descriptors required */
2029 
2030 			if (mp->m_flags & M_VLANTAG)
2031 				tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2032 
2033 			hdr_len = tso_cmd.total_hdr_len;
2034 
2035 			bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2036 			bytes = QL_MIN(bytes, hdr_len);
2037 
2038 			num_tx_cmds++;
2039 			hdr_len -= bytes;
2040 
2041 			while (hdr_len) {
2042 				bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2043 				hdr_len -= bytes;
2044 				num_tx_cmds++;
2045 			}
2046 			hdr_len = tso_cmd.total_hdr_len;
2047 
2048 			if (ret == 0)
2049 				src = (uint8_t *)eh;
2050 		} else
2051 			return (EINVAL);
2052 	} else {
2053 		(void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2054 	}
2055 
2056 	if (iscsi_pdu)
2057 		ha->hw.iscsi_pkt_count++;
2058 
2059 	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2060 		ql_hw_tx_done_locked(ha, txr_idx);
2061 		if (hw->tx_cntxt[txr_idx].txr_free <=
2062 				(num_tx_cmds + QLA_TX_MIN_FREE)) {
2063         		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2064 				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2065 				__func__));
2066 			return (-1);
2067 		}
2068 	}
2069 
2070 	tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2071 
2072         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2073 
2074                 if (nsegs > ha->hw.max_tx_segs)
2075                         ha->hw.max_tx_segs = nsegs;
2076 
2077                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2078 
2079                 if (op_code) {
2080                         tx_cmd->flags_opcode = op_code;
2081                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2082 
2083                 } else {
2084                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2085                 }
2086 	} else {
2087 		bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2088 		ha->tx_tso_frames++;
2089 	}
2090 
2091 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2092         	tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2093 
2094 		if (iscsi_pdu)
2095 			eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2096 
2097 	} else if (mp->m_flags & M_VLANTAG) {
2098 
2099 		if (hdr_len) { /* TSO */
2100 			tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2101 						Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2102 			tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2103 		} else
2104 			tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2105 
2106 		ha->hw_vlan_tx_frames++;
2107 		tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2108 
2109 		if (iscsi_pdu) {
2110 			tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2111 			mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2112 		}
2113 	}
2114 
2115 
2116         tx_cmd->n_bufs = (uint8_t)nsegs;
2117         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2118         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2119 	tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2120 
2121 	c_seg = segs;
2122 
2123 	while (1) {
2124 		for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2125 
2126 			switch (i) {
2127 			case 0:
2128 				tx_cmd->buf1_addr = c_seg->ds_addr;
2129 				tx_cmd->buf1_len = c_seg->ds_len;
2130 				break;
2131 
2132 			case 1:
2133 				tx_cmd->buf2_addr = c_seg->ds_addr;
2134 				tx_cmd->buf2_len = c_seg->ds_len;
2135 				break;
2136 
2137 			case 2:
2138 				tx_cmd->buf3_addr = c_seg->ds_addr;
2139 				tx_cmd->buf3_len = c_seg->ds_len;
2140 				break;
2141 
2142 			case 3:
2143 				tx_cmd->buf4_addr = c_seg->ds_addr;
2144 				tx_cmd->buf4_len = c_seg->ds_len;
2145 				break;
2146 			}
2147 
2148 			c_seg++;
2149 			nsegs--;
2150 		}
2151 
2152 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2153 			(hw->tx_cntxt[txr_idx].txr_next + 1) &
2154 				(NUM_TX_DESCRIPTORS - 1);
2155 		tx_cmd_count++;
2156 
2157 		if (!nsegs)
2158 			break;
2159 
2160 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2161 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2162 	}
2163 
2164 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2165 
2166 		/* TSO : Copy the header in the following tx cmd descriptors */
2167 
2168 		txr_next = hw->tx_cntxt[txr_idx].txr_next;
2169 
2170 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2171 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2172 
2173 		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2174 		bytes = QL_MIN(bytes, hdr_len);
2175 
2176 		dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2177 
2178 		if (mp->m_flags & M_VLANTAG) {
2179 			/* first copy the src/dst MAC addresses */
2180 			bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2181 			dst += (ETHER_ADDR_LEN * 2);
2182 			src += (ETHER_ADDR_LEN * 2);
2183 
2184 			*((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2185 			dst += 2;
2186 			*((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2187 			dst += 2;
2188 
2189 			/* bytes left in src header */
2190 			hdr_len -= ((ETHER_ADDR_LEN * 2) +
2191 					ETHER_VLAN_ENCAP_LEN);
2192 
2193 			/* bytes left in TxCmd Entry */
2194 			bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2195 
2196 
2197 			bcopy(src, dst, bytes);
2198 			src += bytes;
2199 			hdr_len -= bytes;
2200 		} else {
2201 			bcopy(src, dst, bytes);
2202 			src += bytes;
2203 			hdr_len -= bytes;
2204 		}
2205 
2206 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2207 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2208 					(NUM_TX_DESCRIPTORS - 1);
2209 		tx_cmd_count++;
2210 
2211 		while (hdr_len) {
2212 			tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2213 			bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2214 
2215 			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2216 
2217 			bcopy(src, tx_cmd, bytes);
2218 			src += bytes;
2219 			hdr_len -= bytes;
2220 
2221 			txr_next = hw->tx_cntxt[txr_idx].txr_next =
2222 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2223 					(NUM_TX_DESCRIPTORS - 1);
2224 			tx_cmd_count++;
2225 		}
2226 	}
2227 
2228 	hw->tx_cntxt[txr_idx].txr_free =
2229 		hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2230 
2231 	QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2232 		txr_idx);
2233        	QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2234 
2235 	return (0);
2236 }
2237 
2238 
2239 
2240 #define Q8_CONFIG_IND_TBL_SIZE	32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2241 static int
2242 qla_config_rss_ind_table(qla_host_t *ha)
2243 {
2244 	uint32_t i, count;
2245 	uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2246 
2247 
2248 	for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2249 		rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2250 	}
2251 
2252 	for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2253 		i = i + Q8_CONFIG_IND_TBL_SIZE) {
2254 
2255 		if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2256 			count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2257 		} else {
2258 			count = Q8_CONFIG_IND_TBL_SIZE;
2259 		}
2260 
2261 		if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2262 			rss_ind_tbl))
2263 			return (-1);
2264 	}
2265 
2266 	return (0);
2267 }
2268 
2269 static int
2270 qla_config_soft_lro(qla_host_t *ha)
2271 {
2272         int i;
2273         qla_hw_t *hw = &ha->hw;
2274         struct lro_ctrl *lro;
2275 
2276         for (i = 0; i < hw->num_sds_rings; i++) {
2277                 lro = &hw->sds[i].lro;
2278 
2279 		bzero(lro, sizeof(struct lro_ctrl));
2280 
2281 #if (__FreeBSD_version >= 1100101)
2282                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2283                         device_printf(ha->pci_dev,
2284 				"%s: tcp_lro_init_args [%d] failed\n",
2285                                 __func__, i);
2286                         return (-1);
2287                 }
2288 #else
2289                 if (tcp_lro_init(lro)) {
2290                         device_printf(ha->pci_dev,
2291 				"%s: tcp_lro_init [%d] failed\n",
2292                                 __func__, i);
2293                         return (-1);
2294                 }
2295 #endif /* #if (__FreeBSD_version >= 1100101) */
2296 
2297                 lro->ifp = ha->ifp;
2298         }
2299 
2300         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2301         return (0);
2302 }
2303 
2304 static void
2305 qla_drain_soft_lro(qla_host_t *ha)
2306 {
2307         int i;
2308         qla_hw_t *hw = &ha->hw;
2309         struct lro_ctrl *lro;
2310 
2311        	for (i = 0; i < hw->num_sds_rings; i++) {
2312                	lro = &hw->sds[i].lro;
2313 
2314 #if (__FreeBSD_version >= 1100101)
2315 		tcp_lro_flush_all(lro);
2316 #else
2317                 struct lro_entry *queued;
2318 
2319 		while ((!SLIST_EMPTY(&lro->lro_active))) {
2320 			queued = SLIST_FIRST(&lro->lro_active);
2321 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
2322 			tcp_lro_flush(lro, queued);
2323 		}
2324 #endif /* #if (__FreeBSD_version >= 1100101) */
2325 	}
2326 
2327 	return;
2328 }
2329 
2330 static void
2331 qla_free_soft_lro(qla_host_t *ha)
2332 {
2333         int i;
2334         qla_hw_t *hw = &ha->hw;
2335         struct lro_ctrl *lro;
2336 
2337         for (i = 0; i < hw->num_sds_rings; i++) {
2338                	lro = &hw->sds[i].lro;
2339 		tcp_lro_free(lro);
2340 	}
2341 
2342 	return;
2343 }
2344 
2345 
2346 /*
2347  * Name: ql_del_hw_if
2348  * Function: Destroys the hardware specific entities corresponding to an
2349  *	Ethernet Interface
2350  */
2351 void
2352 ql_del_hw_if(qla_host_t *ha)
2353 {
2354 	uint32_t i;
2355 	uint32_t num_msix;
2356 
2357 	(void)qla_stop_nic_func(ha);
2358 
2359 	qla_del_rcv_cntxt(ha);
2360 
2361 	qla_del_xmt_cntxt(ha);
2362 
2363 	if (ha->hw.flags.init_intr_cnxt) {
2364 		for (i = 0; i < ha->hw.num_sds_rings; ) {
2365 
2366 			if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2367 				num_msix = Q8_MAX_INTR_VECTORS;
2368 			else
2369 				num_msix = ha->hw.num_sds_rings - i;
2370 			qla_config_intr_cntxt(ha, i, num_msix, 0);
2371 
2372 			i += num_msix;
2373 		}
2374 
2375 		ha->hw.flags.init_intr_cnxt = 0;
2376 	}
2377 
2378 	if (ha->hw.enable_soft_lro) {
2379 		qla_drain_soft_lro(ha);
2380 		qla_free_soft_lro(ha);
2381 	}
2382 
2383 	return;
2384 }
2385 
2386 void
2387 qla_confirm_9kb_enable(qla_host_t *ha)
2388 {
2389 	uint32_t supports_9kb = 0;
2390 
2391 	ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2392 
2393 	/* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2394 	WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2395 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2396 
2397 	qla_get_nic_partition(ha, &supports_9kb, NULL);
2398 
2399 	if (!supports_9kb)
2400 		ha->hw.enable_9kb = 0;
2401 
2402 	return;
2403 }
2404 
2405 /*
2406  * Name: ql_init_hw_if
2407  * Function: Creates the hardware specific entities corresponding to an
2408  *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2409  *	corresponding to the interface. Enables LRO if allowed.
2410  */
2411 int
2412 ql_init_hw_if(qla_host_t *ha)
2413 {
2414 	device_t	dev;
2415 	uint32_t	i;
2416 	uint8_t		bcast_mac[6];
2417 	qla_rdesc_t	*rdesc;
2418 	uint32_t	num_msix;
2419 
2420 	dev = ha->pci_dev;
2421 
2422 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
2423 		bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2424 			ha->hw.dma_buf.sds_ring[i].size);
2425 	}
2426 
2427 	for (i = 0; i < ha->hw.num_sds_rings; ) {
2428 
2429 		if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2430 			num_msix = Q8_MAX_INTR_VECTORS;
2431 		else
2432 			num_msix = ha->hw.num_sds_rings - i;
2433 
2434 		if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2435 
2436 			if (i > 0) {
2437 
2438 				num_msix = i;
2439 
2440 				for (i = 0; i < num_msix; ) {
2441 					qla_config_intr_cntxt(ha, i,
2442 						Q8_MAX_INTR_VECTORS, 0);
2443 					i += Q8_MAX_INTR_VECTORS;
2444 				}
2445 			}
2446 			return (-1);
2447 		}
2448 
2449 		i = i + num_msix;
2450 	}
2451 
2452         ha->hw.flags.init_intr_cnxt = 1;
2453 
2454 	/*
2455 	 * Create Receive Context
2456 	 */
2457 	if (qla_init_rcv_cntxt(ha)) {
2458 		return (-1);
2459 	}
2460 
2461 	for (i = 0; i < ha->hw.num_rds_rings; i++) {
2462 		rdesc = &ha->hw.rds[i];
2463 		rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2464 		rdesc->rx_in = 0;
2465 		/* Update the RDS Producer Indices */
2466 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2467 			rdesc->rx_next);
2468 	}
2469 
2470 
2471 	/*
2472 	 * Create Transmit Context
2473 	 */
2474 	if (qla_init_xmt_cntxt(ha)) {
2475 		qla_del_rcv_cntxt(ha);
2476 		return (-1);
2477 	}
2478 	ha->hw.max_tx_segs = 0;
2479 
2480 	if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2481 		return(-1);
2482 
2483 	ha->hw.flags.unicast_mac = 1;
2484 
2485 	bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2486 	bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2487 
2488 	if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2489 		return (-1);
2490 
2491 	ha->hw.flags.bcast_mac = 1;
2492 
2493 	/*
2494 	 * program any cached multicast addresses
2495 	 */
2496 	if (qla_hw_add_all_mcast(ha))
2497 		return (-1);
2498 
2499 	if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2500 		return (-1);
2501 
2502 	if (qla_config_rss_ind_table(ha))
2503 		return (-1);
2504 
2505 	if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2506 		return (-1);
2507 
2508 	if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2509 		return (-1);
2510 
2511 	if (ha->ifp->if_capenable & IFCAP_LRO) {
2512 		if (ha->hw.enable_hw_lro) {
2513 			ha->hw.enable_soft_lro = 0;
2514 
2515 			if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2516 				return (-1);
2517 		} else {
2518 			ha->hw.enable_soft_lro = 1;
2519 
2520 			if (qla_config_soft_lro(ha))
2521 				return (-1);
2522 		}
2523 	}
2524 
2525         if (qla_init_nic_func(ha))
2526                 return (-1);
2527 
2528         if (qla_query_fw_dcbx_caps(ha))
2529                 return (-1);
2530 
2531 	for (i = 0; i < ha->hw.num_sds_rings; i++)
2532 		QL_ENABLE_INTERRUPTS(ha, i);
2533 
2534 	return (0);
2535 }
2536 
2537 static int
2538 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2539 {
2540         device_t                dev = ha->pci_dev;
2541         q80_rq_map_sds_to_rds_t *map_rings;
2542 	q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2543         uint32_t                i, err;
2544         qla_hw_t                *hw = &ha->hw;
2545 
2546         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2547         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2548 
2549         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2550         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2551         map_rings->count_version |= Q8_MBX_CMD_VERSION;
2552 
2553         map_rings->cntxt_id = hw->rcv_cntxt_id;
2554         map_rings->num_rings = num_idx;
2555 
2556 	for (i = 0; i < num_idx; i++) {
2557 		map_rings->sds_rds[i].sds_ring = i + start_idx;
2558 		map_rings->sds_rds[i].rds_ring = i + start_idx;
2559 	}
2560 
2561         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2562                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2563                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2564                 device_printf(dev, "%s: failed0\n", __func__);
2565                 return (-1);
2566         }
2567 
2568         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2569 
2570         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2571 
2572         if (err) {
2573                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2574                 return (-1);
2575         }
2576 
2577         return (0);
2578 }
2579 
2580 /*
2581  * Name: qla_init_rcv_cntxt
2582  * Function: Creates the Receive Context.
2583  */
2584 static int
2585 qla_init_rcv_cntxt(qla_host_t *ha)
2586 {
2587 	q80_rq_rcv_cntxt_t	*rcntxt;
2588 	q80_rsp_rcv_cntxt_t	*rcntxt_rsp;
2589 	q80_stat_desc_t		*sdesc;
2590 	int			i, j;
2591         qla_hw_t		*hw = &ha->hw;
2592 	device_t		dev;
2593 	uint32_t		err;
2594 	uint32_t		rcntxt_sds_rings;
2595 	uint32_t		rcntxt_rds_rings;
2596 	uint32_t		max_idx;
2597 
2598 	dev = ha->pci_dev;
2599 
2600 	/*
2601 	 * Create Receive Context
2602 	 */
2603 
2604 	for (i = 0; i < hw->num_sds_rings; i++) {
2605 		sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2606 
2607 		for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2608 			sdesc->data[0] = 1ULL;
2609 			sdesc->data[1] = 1ULL;
2610 		}
2611 	}
2612 
2613 	rcntxt_sds_rings = hw->num_sds_rings;
2614 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2615 		rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2616 
2617 	rcntxt_rds_rings = hw->num_rds_rings;
2618 
2619 	if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2620 		rcntxt_rds_rings = MAX_RDS_RING_SETS;
2621 
2622 	rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2623 	bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2624 
2625 	rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2626 	rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2627 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2628 
2629 	rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2630 			Q8_RCV_CNTXT_CAP0_LRO |
2631 			Q8_RCV_CNTXT_CAP0_HW_LRO |
2632 			Q8_RCV_CNTXT_CAP0_RSS |
2633 			Q8_RCV_CNTXT_CAP0_SGL_LRO;
2634 
2635 	if (ha->hw.enable_9kb)
2636 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2637 	else
2638 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2639 
2640 	if (ha->hw.num_rds_rings > 1) {
2641 		rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2642 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2643 	} else
2644 		rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2645 
2646 	rcntxt->nsds_rings = rcntxt_sds_rings;
2647 
2648 	rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2649 
2650 	rcntxt->rcv_vpid = 0;
2651 
2652 	for (i = 0; i <  rcntxt_sds_rings; i++) {
2653 		rcntxt->sds[i].paddr =
2654 			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2655 		rcntxt->sds[i].size =
2656 			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2657 		rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
2658 		rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2659 	}
2660 
2661 	for (i = 0; i <  rcntxt_rds_rings; i++) {
2662 		rcntxt->rds[i].paddr_std =
2663 			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2664 
2665 		if (ha->hw.enable_9kb)
2666 			rcntxt->rds[i].std_bsize =
2667 				qla_host_to_le64(MJUM9BYTES);
2668 		else
2669 			rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2670 
2671 		rcntxt->rds[i].std_nentries =
2672 			qla_host_to_le32(NUM_RX_DESCRIPTORS);
2673 	}
2674 
2675         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2676 		(sizeof (q80_rq_rcv_cntxt_t) >> 2),
2677                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2678                 device_printf(dev, "%s: failed0\n", __func__);
2679                 return (-1);
2680         }
2681 
2682         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2683 
2684         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2685 
2686         if (err) {
2687                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2688                 return (-1);
2689         }
2690 
2691 	for (i = 0; i <  rcntxt_sds_rings; i++) {
2692 		hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2693 	}
2694 
2695 	for (i = 0; i <  rcntxt_rds_rings; i++) {
2696 		hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2697 	}
2698 
2699 	hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2700 
2701 	ha->hw.flags.init_rx_cnxt = 1;
2702 
2703 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2704 
2705 		for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2706 
2707 			if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2708 				max_idx = MAX_RCNTXT_SDS_RINGS;
2709 			else
2710 				max_idx = hw->num_sds_rings - i;
2711 
2712 			err = qla_add_rcv_rings(ha, i, max_idx);
2713 			if (err)
2714 				return -1;
2715 
2716 			i += max_idx;
2717 		}
2718 	}
2719 
2720 	if (hw->num_rds_rings > 1) {
2721 
2722 		for (i = 0; i < hw->num_rds_rings; ) {
2723 
2724 			if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2725 				max_idx = MAX_SDS_TO_RDS_MAP;
2726 			else
2727 				max_idx = hw->num_rds_rings - i;
2728 
2729 			err = qla_map_sds_to_rds(ha, i, max_idx);
2730 			if (err)
2731 				return -1;
2732 
2733 			i += max_idx;
2734 		}
2735 	}
2736 
2737 	return (0);
2738 }
2739 
2740 static int
2741 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2742 {
2743 	device_t		dev = ha->pci_dev;
2744 	q80_rq_add_rcv_rings_t	*add_rcv;
2745 	q80_rsp_add_rcv_rings_t	*add_rcv_rsp;
2746 	uint32_t		i,j, err;
2747         qla_hw_t		*hw = &ha->hw;
2748 
2749 	add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2750 	bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2751 
2752 	add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2753 	add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2754 	add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2755 
2756 	add_rcv->nrds_sets_rings = nsds | (1 << 5);
2757 	add_rcv->nsds_rings = nsds;
2758 	add_rcv->cntxt_id = hw->rcv_cntxt_id;
2759 
2760         for (i = 0; i <  nsds; i++) {
2761 
2762 		j = i + sds_idx;
2763 
2764                 add_rcv->sds[i].paddr =
2765                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2766 
2767                 add_rcv->sds[i].size =
2768                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2769 
2770                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
2771                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2772 
2773         }
2774 
2775         for (i = 0; (i <  nsds); i++) {
2776                 j = i + sds_idx;
2777 
2778                 add_rcv->rds[i].paddr_std =
2779                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2780 
2781 		if (ha->hw.enable_9kb)
2782 			add_rcv->rds[i].std_bsize =
2783 				qla_host_to_le64(MJUM9BYTES);
2784 		else
2785                 	add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2786 
2787                 add_rcv->rds[i].std_nentries =
2788                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2789         }
2790 
2791 
2792         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2793 		(sizeof (q80_rq_add_rcv_rings_t) >> 2),
2794                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2795                 device_printf(dev, "%s: failed0\n", __func__);
2796                 return (-1);
2797         }
2798 
2799         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2800 
2801         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2802 
2803         if (err) {
2804                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2805                 return (-1);
2806         }
2807 
2808 	for (i = 0; i < nsds; i++) {
2809 		hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2810 	}
2811 
2812 	for (i = 0; i < nsds; i++) {
2813 		hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2814 	}
2815 
2816 	return (0);
2817 }
2818 
2819 /*
2820  * Name: qla_del_rcv_cntxt
2821  * Function: Destroys the Receive Context.
2822  */
2823 static void
2824 qla_del_rcv_cntxt(qla_host_t *ha)
2825 {
2826 	device_t			dev = ha->pci_dev;
2827 	q80_rcv_cntxt_destroy_t		*rcntxt;
2828 	q80_rcv_cntxt_destroy_rsp_t	*rcntxt_rsp;
2829 	uint32_t			err;
2830 	uint8_t				bcast_mac[6];
2831 
2832 	if (!ha->hw.flags.init_rx_cnxt)
2833 		return;
2834 
2835 	if (qla_hw_del_all_mcast(ha))
2836 		return;
2837 
2838 	if (ha->hw.flags.bcast_mac) {
2839 
2840 		bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2841 		bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2842 
2843 		if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
2844 			return;
2845 		ha->hw.flags.bcast_mac = 0;
2846 
2847 	}
2848 
2849 	if (ha->hw.flags.unicast_mac) {
2850 		if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
2851 			return;
2852 		ha->hw.flags.unicast_mac = 0;
2853 	}
2854 
2855 	rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2856 	bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2857 
2858 	rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2859 	rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2860 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2861 
2862 	rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2863 
2864         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2865 		(sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2866                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2867                 device_printf(dev, "%s: failed0\n", __func__);
2868                 return;
2869         }
2870         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2871 
2872         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2873 
2874         if (err) {
2875                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2876         }
2877 
2878 	ha->hw.flags.init_rx_cnxt = 0;
2879 	return;
2880 }
2881 
2882 /*
2883  * Name: qla_init_xmt_cntxt
2884  * Function: Creates the Transmit Context.
2885  */
2886 static int
2887 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2888 {
2889 	device_t		dev;
2890         qla_hw_t		*hw = &ha->hw;
2891 	q80_rq_tx_cntxt_t	*tcntxt;
2892 	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
2893 	uint32_t		err;
2894 	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2895 	uint32_t		intr_idx;
2896 
2897 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2898 
2899 	dev = ha->pci_dev;
2900 
2901 	/*
2902 	 * Create Transmit Context
2903 	 */
2904 	tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2905 	bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2906 
2907 	tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2908 	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2909 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2910 
2911 	intr_idx = txr_idx;
2912 
2913 #ifdef QL_ENABLE_ISCSI_TLV
2914 
2915 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2916 				Q8_TX_CNTXT_CAP0_TC;
2917 
2918 	if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2919 		tcntxt->traffic_class = 1;
2920 	}
2921 
2922 	intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
2923 
2924 #else
2925 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2926 
2927 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2928 
2929 	tcntxt->ntx_rings = 1;
2930 
2931 	tcntxt->tx_ring[0].paddr =
2932 		qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2933 	tcntxt->tx_ring[0].tx_consumer =
2934 		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2935 	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2936 
2937 	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
2938 	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2939 
2940 	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2941 	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2942 
2943         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2944 		(sizeof (q80_rq_tx_cntxt_t) >> 2),
2945                 ha->hw.mbox,
2946 		(sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2947                 device_printf(dev, "%s: failed0\n", __func__);
2948                 return (-1);
2949         }
2950         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2951 
2952         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2953 
2954         if (err) {
2955                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2956 		return -1;
2957         }
2958 
2959 	hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2960 	hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2961 
2962 	if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2963 		return (-1);
2964 
2965 	return (0);
2966 }
2967 
2968 
2969 /*
2970  * Name: qla_del_xmt_cntxt
2971  * Function: Destroys the Transmit Context.
2972  */
2973 static int
2974 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2975 {
2976 	device_t			dev = ha->pci_dev;
2977 	q80_tx_cntxt_destroy_t		*tcntxt;
2978 	q80_tx_cntxt_destroy_rsp_t	*tcntxt_rsp;
2979 	uint32_t			err;
2980 
2981 	tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2982 	bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2983 
2984 	tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2985 	tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2986 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2987 
2988 	tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2989 
2990         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2991 		(sizeof (q80_tx_cntxt_destroy_t) >> 2),
2992                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2993                 device_printf(dev, "%s: failed0\n", __func__);
2994                 return (-1);
2995         }
2996         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2997 
2998         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2999 
3000         if (err) {
3001                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3002 		return (-1);
3003         }
3004 
3005 	return (0);
3006 }
3007 static void
3008 qla_del_xmt_cntxt(qla_host_t *ha)
3009 {
3010 	uint32_t i;
3011 
3012 	if (!ha->hw.flags.init_tx_cnxt)
3013 		return;
3014 
3015 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
3016 		if (qla_del_xmt_cntxt_i(ha, i))
3017 			break;
3018 	}
3019 	ha->hw.flags.init_tx_cnxt = 0;
3020 }
3021 
3022 static int
3023 qla_init_xmt_cntxt(qla_host_t *ha)
3024 {
3025 	uint32_t i, j;
3026 
3027 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
3028 		if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3029 			for (j = 0; j < i; j++)
3030 				qla_del_xmt_cntxt_i(ha, j);
3031 			return (-1);
3032 		}
3033 	}
3034 	ha->hw.flags.init_tx_cnxt = 1;
3035 	return (0);
3036 }
3037 
3038 static int
3039 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3040 {
3041 	int i, nmcast;
3042 	uint32_t count = 0;
3043 	uint8_t *mcast;
3044 
3045 	nmcast = ha->hw.nmcast;
3046 
3047 	QL_DPRINT2(ha, (ha->pci_dev,
3048 		"%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3049 
3050 	mcast = ha->hw.mac_addr_arr;
3051 	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3052 
3053 	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3054 		if ((ha->hw.mcast[i].addr[0] != 0) ||
3055 			(ha->hw.mcast[i].addr[1] != 0) ||
3056 			(ha->hw.mcast[i].addr[2] != 0) ||
3057 			(ha->hw.mcast[i].addr[3] != 0) ||
3058 			(ha->hw.mcast[i].addr[4] != 0) ||
3059 			(ha->hw.mcast[i].addr[5] != 0)) {
3060 
3061 			bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3062 			mcast = mcast + ETHER_ADDR_LEN;
3063 			count++;
3064 
3065 			if (count == Q8_MAX_MAC_ADDRS) {
3066 				if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3067 					add_mcast, count)) {
3068                 			device_printf(ha->pci_dev,
3069 						"%s: failed\n", __func__);
3070 					return (-1);
3071 				}
3072 
3073 				count = 0;
3074 				mcast = ha->hw.mac_addr_arr;
3075 				memset(mcast, 0,
3076 					(Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3077 			}
3078 
3079 			nmcast--;
3080 		}
3081 	}
3082 
3083 	if (count) {
3084 		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3085 			count)) {
3086                 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
3087 			return (-1);
3088 		}
3089 	}
3090 	QL_DPRINT2(ha, (ha->pci_dev,
3091 		"%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3092 
3093 	return 0;
3094 }
3095 
3096 static int
3097 qla_hw_add_all_mcast(qla_host_t *ha)
3098 {
3099 	int ret;
3100 
3101 	ret = qla_hw_all_mcast(ha, 1);
3102 
3103 	return (ret);
3104 }
3105 
3106 static int
3107 qla_hw_del_all_mcast(qla_host_t *ha)
3108 {
3109 	int ret;
3110 
3111 	ret = qla_hw_all_mcast(ha, 0);
3112 
3113 	bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3114 	ha->hw.nmcast = 0;
3115 
3116 	return (ret);
3117 }
3118 
3119 static int
3120 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3121 {
3122 	int i;
3123 
3124 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3125 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3126 			return (0); /* its been already added */
3127 	}
3128 	return (-1);
3129 }
3130 
3131 static int
3132 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3133 {
3134 	int i;
3135 
3136 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3137 
3138 		if ((ha->hw.mcast[i].addr[0] == 0) &&
3139 			(ha->hw.mcast[i].addr[1] == 0) &&
3140 			(ha->hw.mcast[i].addr[2] == 0) &&
3141 			(ha->hw.mcast[i].addr[3] == 0) &&
3142 			(ha->hw.mcast[i].addr[4] == 0) &&
3143 			(ha->hw.mcast[i].addr[5] == 0)) {
3144 
3145 			bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3146 			ha->hw.nmcast++;
3147 
3148 			mta = mta + ETHER_ADDR_LEN;
3149 			nmcast--;
3150 
3151 			if (nmcast == 0)
3152 				break;
3153 		}
3154 
3155 	}
3156 	return 0;
3157 }
3158 
3159 static int
3160 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3161 {
3162 	int i;
3163 
3164 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3165 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3166 
3167 			ha->hw.mcast[i].addr[0] = 0;
3168 			ha->hw.mcast[i].addr[1] = 0;
3169 			ha->hw.mcast[i].addr[2] = 0;
3170 			ha->hw.mcast[i].addr[3] = 0;
3171 			ha->hw.mcast[i].addr[4] = 0;
3172 			ha->hw.mcast[i].addr[5] = 0;
3173 
3174 			ha->hw.nmcast--;
3175 
3176 			mta = mta + ETHER_ADDR_LEN;
3177 			nmcast--;
3178 
3179 			if (nmcast == 0)
3180 				break;
3181 		}
3182 	}
3183 	return 0;
3184 }
3185 
3186 /*
3187  * Name: ql_hw_set_multi
3188  * Function: Sets the Multicast Addresses provided by the host O.S into the
3189  *	hardware (for the given interface)
3190  */
3191 int
3192 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3193 	uint32_t add_mac)
3194 {
3195 	uint8_t *mta = mcast_addr;
3196 	int i;
3197 	int ret = 0;
3198 	uint32_t count = 0;
3199 	uint8_t *mcast;
3200 
3201 	mcast = ha->hw.mac_addr_arr;
3202 	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3203 
3204 	for (i = 0; i < mcnt; i++) {
3205 		if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3206 			if (add_mac) {
3207 				if (qla_hw_mac_addr_present(ha, mta) != 0) {
3208 					bcopy(mta, mcast, ETHER_ADDR_LEN);
3209 					mcast = mcast + ETHER_ADDR_LEN;
3210 					count++;
3211 				}
3212 			} else {
3213 				if (qla_hw_mac_addr_present(ha, mta) == 0) {
3214 					bcopy(mta, mcast, ETHER_ADDR_LEN);
3215 					mcast = mcast + ETHER_ADDR_LEN;
3216 					count++;
3217 				}
3218 			}
3219 		}
3220 		if (count == Q8_MAX_MAC_ADDRS) {
3221 			if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3222 				add_mac, count)) {
3223                 		device_printf(ha->pci_dev, "%s: failed\n",
3224 					__func__);
3225 				return (-1);
3226 			}
3227 
3228 			if (add_mac) {
3229 				qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3230 					count);
3231 			} else {
3232 				qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3233 					count);
3234 			}
3235 
3236 			count = 0;
3237 			mcast = ha->hw.mac_addr_arr;
3238 			memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3239 		}
3240 
3241 		mta += Q8_MAC_ADDR_LEN;
3242 	}
3243 
3244 	if (count) {
3245 		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3246 			count)) {
3247                 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
3248 			return (-1);
3249 		}
3250 		if (add_mac) {
3251 			qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3252 		} else {
3253 			qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3254 		}
3255 	}
3256 
3257 	return (ret);
3258 }
3259 
3260 /*
3261  * Name: ql_hw_tx_done_locked
3262  * Function: Handle Transmit Completions
3263  */
3264 void
3265 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3266 {
3267 	qla_tx_buf_t *txb;
3268         qla_hw_t *hw = &ha->hw;
3269 	uint32_t comp_idx, comp_count = 0;
3270 	qla_hw_tx_cntxt_t *hw_tx_cntxt;
3271 
3272 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3273 
3274 	/* retrieve index of last entry in tx ring completed */
3275 	comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3276 
3277 	while (comp_idx != hw_tx_cntxt->txr_comp) {
3278 
3279 		txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3280 
3281 		hw_tx_cntxt->txr_comp++;
3282 		if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3283 			hw_tx_cntxt->txr_comp = 0;
3284 
3285 		comp_count++;
3286 
3287 		if (txb->m_head) {
3288 			if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
3289 
3290 			bus_dmamap_sync(ha->tx_tag, txb->map,
3291 				BUS_DMASYNC_POSTWRITE);
3292 			bus_dmamap_unload(ha->tx_tag, txb->map);
3293 			m_freem(txb->m_head);
3294 
3295 			txb->m_head = NULL;
3296 		}
3297 	}
3298 
3299 	hw_tx_cntxt->txr_free += comp_count;
3300 	return;
3301 }
3302 
3303 void
3304 ql_update_link_state(qla_host_t *ha)
3305 {
3306 	uint32_t link_state;
3307 	uint32_t prev_link_state;
3308 
3309 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3310 		ha->hw.link_up = 0;
3311 		return;
3312 	}
3313 	link_state = READ_REG32(ha, Q8_LINK_STATE);
3314 
3315 	prev_link_state =  ha->hw.link_up;
3316 
3317 	if (ha->pci_func == 0)
3318 		ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3319 	else
3320 		ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3321 
3322 	if (prev_link_state !=  ha->hw.link_up) {
3323 		if (ha->hw.link_up) {
3324 			if_link_state_change(ha->ifp, LINK_STATE_UP);
3325 		} else {
3326 			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3327 		}
3328 	}
3329 	return;
3330 }
3331 
3332 void
3333 ql_hw_stop_rcv(qla_host_t *ha)
3334 {
3335 	int i, done, count = 100;
3336 
3337 	ha->flags.stop_rcv = 1;
3338 
3339 	while (count) {
3340 		done = 1;
3341 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
3342 			if (ha->hw.sds[i].rcv_active)
3343 				done = 0;
3344 		}
3345 		if (done)
3346 			break;
3347 		else
3348 			qla_mdelay(__func__, 10);
3349 		count--;
3350 	}
3351 	if (!count)
3352 		device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3353 
3354 	return;
3355 }
3356 
3357 int
3358 ql_hw_check_health(qla_host_t *ha)
3359 {
3360 	uint32_t val;
3361 
3362 	ha->hw.health_count++;
3363 
3364 	if (ha->hw.health_count < 1000)
3365 		return 0;
3366 
3367 	ha->hw.health_count = 0;
3368 
3369 	val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3370 
3371 	if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3372 		(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3373 		device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3374 			__func__, val);
3375 		return -1;
3376 	}
3377 
3378 	val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3379 
3380 	if ((val != ha->hw.hbeat_value) &&
3381 		(!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3382 		ha->hw.hbeat_value = val;
3383 		return 0;
3384 	}
3385 	device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3386 		__func__, val);
3387 
3388 	return -1;
3389 }
3390 
3391 static int
3392 qla_init_nic_func(qla_host_t *ha)
3393 {
3394         device_t                dev;
3395         q80_init_nic_func_t     *init_nic;
3396         q80_init_nic_func_rsp_t *init_nic_rsp;
3397         uint32_t                err;
3398 
3399         dev = ha->pci_dev;
3400 
3401         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3402         bzero(init_nic, sizeof(q80_init_nic_func_t));
3403 
3404         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3405         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3406         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3407 
3408         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3409         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3410         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3411 
3412 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3413         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3414                 (sizeof (q80_init_nic_func_t) >> 2),
3415                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3416                 device_printf(dev, "%s: failed\n", __func__);
3417                 return -1;
3418         }
3419 
3420         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3421 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3422 
3423         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3424 
3425         if (err) {
3426                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3427         }
3428 
3429         return 0;
3430 }
3431 
3432 static int
3433 qla_stop_nic_func(qla_host_t *ha)
3434 {
3435         device_t                dev;
3436         q80_stop_nic_func_t     *stop_nic;
3437         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3438         uint32_t                err;
3439 
3440         dev = ha->pci_dev;
3441 
3442         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3443         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3444 
3445         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3446         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3447         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3448 
3449         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3450         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3451 
3452 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3453         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3454                 (sizeof (q80_stop_nic_func_t) >> 2),
3455                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3456                 device_printf(dev, "%s: failed\n", __func__);
3457                 return -1;
3458         }
3459 
3460         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3461 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3462 
3463         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3464 
3465         if (err) {
3466                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3467         }
3468 
3469         return 0;
3470 }
3471 
3472 static int
3473 qla_query_fw_dcbx_caps(qla_host_t *ha)
3474 {
3475         device_t                        dev;
3476         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3477         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3478         uint32_t                        err;
3479 
3480         dev = ha->pci_dev;
3481 
3482         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3483         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3484 
3485         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3486         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3487         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3488 
3489         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3490         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3491                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3492                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3493                 device_printf(dev, "%s: failed\n", __func__);
3494                 return -1;
3495         }
3496 
3497         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3498         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3499                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3500 
3501         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3502 
3503         if (err) {
3504                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3505         }
3506 
3507         return 0;
3508 }
3509 
3510 static int
3511 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3512         uint32_t aen_mb3, uint32_t aen_mb4)
3513 {
3514         device_t                dev;
3515         q80_idc_ack_t           *idc_ack;
3516         q80_idc_ack_rsp_t       *idc_ack_rsp;
3517         uint32_t                err;
3518         int                     count = 300;
3519 
3520         dev = ha->pci_dev;
3521 
3522         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3523         bzero(idc_ack, sizeof(q80_idc_ack_t));
3524 
3525         idc_ack->opcode = Q8_MBX_IDC_ACK;
3526         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3527         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3528 
3529         idc_ack->aen_mb1 = aen_mb1;
3530         idc_ack->aen_mb2 = aen_mb2;
3531         idc_ack->aen_mb3 = aen_mb3;
3532         idc_ack->aen_mb4 = aen_mb4;
3533 
3534         ha->hw.imd_compl= 0;
3535 
3536         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3537                 (sizeof (q80_idc_ack_t) >> 2),
3538                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3539                 device_printf(dev, "%s: failed\n", __func__);
3540                 return -1;
3541         }
3542 
3543         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3544 
3545         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3546 
3547         if (err) {
3548                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3549                 return(-1);
3550         }
3551 
3552         while (count && !ha->hw.imd_compl) {
3553                 qla_mdelay(__func__, 100);
3554                 count--;
3555         }
3556 
3557         if (!count)
3558                 return -1;
3559         else
3560                 device_printf(dev, "%s: count %d\n", __func__, count);
3561 
3562         return (0);
3563 }
3564 
3565 static int
3566 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3567 {
3568         device_t                dev;
3569         q80_set_port_cfg_t      *pcfg;
3570         q80_set_port_cfg_rsp_t  *pfg_rsp;
3571         uint32_t                err;
3572         int                     count = 300;
3573 
3574         dev = ha->pci_dev;
3575 
3576         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3577         bzero(pcfg, sizeof(q80_set_port_cfg_t));
3578 
3579         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3580         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3581         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3582 
3583         pcfg->cfg_bits = cfg_bits;
3584 
3585         device_printf(dev, "%s: cfg_bits"
3586                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3587                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3588                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3589                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3590                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3591 
3592         ha->hw.imd_compl= 0;
3593 
3594         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3595                 (sizeof (q80_set_port_cfg_t) >> 2),
3596                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3597                 device_printf(dev, "%s: failed\n", __func__);
3598                 return -1;
3599         }
3600 
3601         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3602 
3603         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3604 
3605         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3606                 while (count && !ha->hw.imd_compl) {
3607                         qla_mdelay(__func__, 100);
3608                         count--;
3609                 }
3610                 if (count) {
3611                         device_printf(dev, "%s: count %d\n", __func__, count);
3612 
3613                         err = 0;
3614                 }
3615         }
3616 
3617         if (err) {
3618                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3619                 return(-1);
3620         }
3621 
3622         return (0);
3623 }
3624 
3625 
3626 static int
3627 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3628 {
3629 	uint32_t			err;
3630 	device_t			dev = ha->pci_dev;
3631 	q80_config_md_templ_size_t	*md_size;
3632 	q80_config_md_templ_size_rsp_t	*md_size_rsp;
3633 
3634 #ifndef QL_LDFLASH_FW
3635 
3636 	ql_minidump_template_hdr_t *hdr;
3637 
3638 	hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3639 	*size = hdr->size_of_template;
3640 	return (0);
3641 
3642 #endif /* #ifdef QL_LDFLASH_FW */
3643 
3644 	md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3645 	bzero(md_size, sizeof(q80_config_md_templ_size_t));
3646 
3647 	md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3648 	md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3649 	md_size->count_version |= Q8_MBX_CMD_VERSION;
3650 
3651 	if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3652 		(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3653 		(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3654 
3655 		device_printf(dev, "%s: failed\n", __func__);
3656 
3657 		return (-1);
3658 	}
3659 
3660 	md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3661 
3662 	err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3663 
3664         if (err) {
3665 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3666 		return(-1);
3667         }
3668 
3669 	*size = md_size_rsp->templ_size;
3670 
3671 	return (0);
3672 }
3673 
3674 static int
3675 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3676 {
3677         device_t                dev;
3678         q80_get_port_cfg_t      *pcfg;
3679         q80_get_port_cfg_rsp_t  *pcfg_rsp;
3680         uint32_t                err;
3681 
3682         dev = ha->pci_dev;
3683 
3684         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3685         bzero(pcfg, sizeof(q80_get_port_cfg_t));
3686 
3687         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3688         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3689         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3690 
3691         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3692                 (sizeof (q80_get_port_cfg_t) >> 2),
3693                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3694                 device_printf(dev, "%s: failed\n", __func__);
3695                 return -1;
3696         }
3697 
3698         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3699 
3700         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3701 
3702         if (err) {
3703                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3704                 return(-1);
3705         }
3706 
3707         device_printf(dev, "%s: [cfg_bits, port type]"
3708                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3709                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3710                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3711                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3712                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3713                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3714                 );
3715 
3716         *cfg_bits = pcfg_rsp->cfg_bits;
3717 
3718         return (0);
3719 }
3720 
3721 int
3722 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3723 {
3724         struct ether_vlan_header        *eh;
3725         uint16_t                        etype;
3726         struct ip                       *ip = NULL;
3727         struct ip6_hdr                  *ip6 = NULL;
3728         struct tcphdr                   *th = NULL;
3729         uint32_t                        hdrlen;
3730         uint32_t                        offset;
3731         uint8_t                         buf[sizeof(struct ip6_hdr)];
3732 
3733         eh = mtod(mp, struct ether_vlan_header *);
3734 
3735         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3736                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3737                 etype = ntohs(eh->evl_proto);
3738         } else {
3739                 hdrlen = ETHER_HDR_LEN;
3740                 etype = ntohs(eh->evl_encap_proto);
3741         }
3742 
3743 	if (etype == ETHERTYPE_IP) {
3744 
3745 		offset = (hdrlen + sizeof (struct ip));
3746 
3747 		if (mp->m_len >= offset) {
3748                         ip = (struct ip *)(mp->m_data + hdrlen);
3749 		} else {
3750 			m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3751                         ip = (struct ip *)buf;
3752 		}
3753 
3754                 if (ip->ip_p == IPPROTO_TCP) {
3755 
3756 			hdrlen += ip->ip_hl << 2;
3757 			offset = hdrlen + 4;
3758 
3759 			if (mp->m_len >= offset) {
3760 				th = (struct tcphdr *)(mp->m_data + hdrlen);;
3761 			} else {
3762                                 m_copydata(mp, hdrlen, 4, buf);
3763 				th = (struct tcphdr *)buf;
3764 			}
3765                 }
3766 
3767 	} else if (etype == ETHERTYPE_IPV6) {
3768 
3769 		offset = (hdrlen + sizeof (struct ip6_hdr));
3770 
3771 		if (mp->m_len >= offset) {
3772                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3773 		} else {
3774                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3775                         ip6 = (struct ip6_hdr *)buf;
3776 		}
3777 
3778                 if (ip6->ip6_nxt == IPPROTO_TCP) {
3779 
3780 			hdrlen += sizeof(struct ip6_hdr);
3781 			offset = hdrlen + 4;
3782 
3783 			if (mp->m_len >= offset) {
3784 				th = (struct tcphdr *)(mp->m_data + hdrlen);;
3785 			} else {
3786 				m_copydata(mp, hdrlen, 4, buf);
3787 				th = (struct tcphdr *)buf;
3788 			}
3789                 }
3790 	}
3791 
3792         if (th != NULL) {
3793                 if ((th->th_sport == htons(3260)) ||
3794                         (th->th_dport == htons(3260)))
3795                         return 0;
3796         }
3797         return (-1);
3798 }
3799 
3800 void
3801 qla_hw_async_event(qla_host_t *ha)
3802 {
3803         switch (ha->hw.aen_mb0) {
3804         case 0x8101:
3805                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3806                         ha->hw.aen_mb3, ha->hw.aen_mb4);
3807 
3808                 break;
3809 
3810         default:
3811                 break;
3812         }
3813 
3814         return;
3815 }
3816 
3817 #ifdef QL_LDFLASH_FW
3818 static int
3819 ql_get_minidump_template(qla_host_t *ha)
3820 {
3821 	uint32_t			err;
3822 	device_t			dev = ha->pci_dev;
3823 	q80_config_md_templ_cmd_t	*md_templ;
3824 	q80_config_md_templ_cmd_rsp_t	*md_templ_rsp;
3825 
3826 	md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3827 	bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3828 
3829 	md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3830 	md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3831 	md_templ->count_version |= Q8_MBX_CMD_VERSION;
3832 
3833 	md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3834 	md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3835 
3836 	if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3837 		(sizeof(q80_config_md_templ_cmd_t) >> 2),
3838 		 ha->hw.mbox,
3839 		(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3840 
3841 		device_printf(dev, "%s: failed\n", __func__);
3842 
3843 		return (-1);
3844 	}
3845 
3846 	md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3847 
3848 	err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3849 
3850 	if (err) {
3851 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3852 		return (-1);
3853 	}
3854 
3855 	return (0);
3856 
3857 }
3858 #endif /* #ifdef QL_LDFLASH_FW */
3859 
3860 /*
3861  * Minidump related functionality
3862  */
3863 
3864 static int ql_parse_template(qla_host_t *ha);
3865 
3866 static uint32_t ql_rdcrb(qla_host_t *ha,
3867 			ql_minidump_entry_rdcrb_t *crb_entry,
3868 			uint32_t * data_buff);
3869 
3870 static uint32_t ql_pollrd(qla_host_t *ha,
3871 			ql_minidump_entry_pollrd_t *entry,
3872 			uint32_t * data_buff);
3873 
3874 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
3875 			ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
3876 			uint32_t *data_buff);
3877 
3878 static uint32_t ql_L2Cache(qla_host_t *ha,
3879 			ql_minidump_entry_cache_t *cacheEntry,
3880 			uint32_t * data_buff);
3881 
3882 static uint32_t ql_L1Cache(qla_host_t *ha,
3883 			ql_minidump_entry_cache_t *cacheEntry,
3884 			uint32_t *data_buff);
3885 
3886 static uint32_t ql_rdocm(qla_host_t *ha,
3887 			ql_minidump_entry_rdocm_t *ocmEntry,
3888 			uint32_t *data_buff);
3889 
3890 static uint32_t ql_rdmem(qla_host_t *ha,
3891 			ql_minidump_entry_rdmem_t *mem_entry,
3892 			uint32_t *data_buff);
3893 
3894 static uint32_t ql_rdrom(qla_host_t *ha,
3895 			ql_minidump_entry_rdrom_t *romEntry,
3896 			uint32_t *data_buff);
3897 
3898 static uint32_t ql_rdmux(qla_host_t *ha,
3899 			ql_minidump_entry_mux_t *muxEntry,
3900 			uint32_t *data_buff);
3901 
3902 static uint32_t ql_rdmux2(qla_host_t *ha,
3903 			ql_minidump_entry_mux2_t *muxEntry,
3904 			uint32_t *data_buff);
3905 
3906 static uint32_t ql_rdqueue(qla_host_t *ha,
3907 			ql_minidump_entry_queue_t *queueEntry,
3908 			uint32_t *data_buff);
3909 
3910 static uint32_t ql_cntrl(qla_host_t *ha,
3911 			ql_minidump_template_hdr_t *template_hdr,
3912 			ql_minidump_entry_cntrl_t *crbEntry);
3913 
3914 
3915 static uint32_t
3916 ql_minidump_size(qla_host_t *ha)
3917 {
3918 	uint32_t i, k;
3919 	uint32_t size = 0;
3920 	ql_minidump_template_hdr_t *hdr;
3921 
3922 	hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
3923 
3924 	i = 0x2;
3925 
3926 	for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
3927 		if (i & ha->hw.mdump_capture_mask)
3928 			size += hdr->capture_size_array[k];
3929 		i = i << 1;
3930 	}
3931 	return (size);
3932 }
3933 
3934 static void
3935 ql_free_minidump_buffer(qla_host_t *ha)
3936 {
3937 	if (ha->hw.mdump_buffer != NULL) {
3938 		free(ha->hw.mdump_buffer, M_QLA83XXBUF);
3939 		ha->hw.mdump_buffer = NULL;
3940 		ha->hw.mdump_buffer_size = 0;
3941 	}
3942 	return;
3943 }
3944 
3945 static int
3946 ql_alloc_minidump_buffer(qla_host_t *ha)
3947 {
3948 	ha->hw.mdump_buffer_size = ql_minidump_size(ha);
3949 
3950 	if (!ha->hw.mdump_buffer_size)
3951 		return (-1);
3952 
3953 	ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
3954 					M_NOWAIT);
3955 
3956 	if (ha->hw.mdump_buffer == NULL)
3957 		return (-1);
3958 
3959 	return (0);
3960 }
3961 
3962 static void
3963 ql_free_minidump_template_buffer(qla_host_t *ha)
3964 {
3965 	if (ha->hw.mdump_template != NULL) {
3966 		free(ha->hw.mdump_template, M_QLA83XXBUF);
3967 		ha->hw.mdump_template = NULL;
3968 		ha->hw.mdump_template_size = 0;
3969 	}
3970 	return;
3971 }
3972 
3973 static int
3974 ql_alloc_minidump_template_buffer(qla_host_t *ha)
3975 {
3976 	ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
3977 
3978 	ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
3979 					M_QLA83XXBUF, M_NOWAIT);
3980 
3981 	if (ha->hw.mdump_template == NULL)
3982 		return (-1);
3983 
3984 	return (0);
3985 }
3986 
3987 static int
3988 ql_alloc_minidump_buffers(qla_host_t *ha)
3989 {
3990 	int ret;
3991 
3992 	ret = ql_alloc_minidump_template_buffer(ha);
3993 
3994 	if (ret)
3995 		return (ret);
3996 
3997 	ret = ql_alloc_minidump_buffer(ha);
3998 
3999 	if (ret)
4000 		ql_free_minidump_template_buffer(ha);
4001 
4002 	return (ret);
4003 }
4004 
4005 
4006 static uint32_t
4007 ql_validate_minidump_checksum(qla_host_t *ha)
4008 {
4009         uint64_t sum = 0;
4010 	int count;
4011 	uint32_t *template_buff;
4012 
4013 	count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4014 	template_buff = ha->hw.dma_buf.minidump.dma_b;
4015 
4016 	while (count-- > 0) {
4017 		sum += *template_buff++;
4018 	}
4019 
4020 	while (sum >> 32) {
4021 		sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4022 	}
4023 
4024 	return (~sum);
4025 }
4026 
4027 int
4028 ql_minidump_init(qla_host_t *ha)
4029 {
4030 	int		ret = 0;
4031 	uint32_t	template_size = 0;
4032 	device_t	dev = ha->pci_dev;
4033 
4034 	/*
4035 	 * Get Minidump Template Size
4036  	 */
4037 	ret = qla_get_minidump_tmplt_size(ha, &template_size);
4038 
4039 	if (ret || (template_size == 0)) {
4040 		device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4041 			template_size);
4042 		return (-1);
4043 	}
4044 
4045 	/*
4046 	 * Allocate Memory for Minidump Template
4047 	 */
4048 
4049 	ha->hw.dma_buf.minidump.alignment = 8;
4050 	ha->hw.dma_buf.minidump.size = template_size;
4051 
4052 #ifdef QL_LDFLASH_FW
4053 	if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4054 
4055 		device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4056 
4057 		return (-1);
4058 	}
4059 	ha->hw.dma_buf.flags.minidump = 1;
4060 
4061 	/*
4062 	 * Retrieve Minidump Template
4063 	 */
4064 	ret = ql_get_minidump_template(ha);
4065 #else
4066 	ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4067 
4068 #endif /* #ifdef QL_LDFLASH_FW */
4069 
4070 	if (ret == 0) {
4071 
4072 		ret = ql_validate_minidump_checksum(ha);
4073 
4074 		if (ret == 0) {
4075 
4076 			ret = ql_alloc_minidump_buffers(ha);
4077 
4078 			if (ret == 0)
4079 		ha->hw.mdump_init = 1;
4080 			else
4081 				device_printf(dev,
4082 					"%s: ql_alloc_minidump_buffers"
4083 					" failed\n", __func__);
4084 		} else {
4085 			device_printf(dev, "%s: ql_validate_minidump_checksum"
4086 				" failed\n", __func__);
4087 		}
4088 	} else {
4089 		device_printf(dev, "%s: ql_get_minidump_template failed\n",
4090 			 __func__);
4091 	}
4092 
4093 	if (ret)
4094 		ql_minidump_free(ha);
4095 
4096 	return (ret);
4097 }
4098 
4099 static void
4100 ql_minidump_free(qla_host_t *ha)
4101 {
4102 	ha->hw.mdump_init = 0;
4103 	if (ha->hw.dma_buf.flags.minidump) {
4104 		ha->hw.dma_buf.flags.minidump = 0;
4105 		ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4106 	}
4107 
4108 	ql_free_minidump_template_buffer(ha);
4109 	ql_free_minidump_buffer(ha);
4110 
4111 	return;
4112 }
4113 
4114 void
4115 ql_minidump(qla_host_t *ha)
4116 {
4117 	if (!ha->hw.mdump_init)
4118 		return;
4119 
4120 	if (ha->hw.mdump_done)
4121 		return;
4122 
4123 		ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4124 
4125 	bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4126 	bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4127 
4128 	bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4129 		ha->hw.mdump_template_size);
4130 
4131 	ql_parse_template(ha);
4132 
4133 	ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4134 
4135 	ha->hw.mdump_done = 1;
4136 
4137 	return;
4138 }
4139 
4140 
4141 /*
4142  * helper routines
4143  */
4144 static void
4145 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4146 {
4147 	if (esize != entry->hdr.entry_capture_size) {
4148 		entry->hdr.entry_capture_size = esize;
4149 		entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4150 	}
4151 	return;
4152 }
4153 
4154 
4155 static int
4156 ql_parse_template(qla_host_t *ha)
4157 {
4158 	uint32_t num_of_entries, buff_level, e_cnt, esize;
4159 	uint32_t end_cnt, rv = 0;
4160 	char *dump_buff, *dbuff;
4161 	int sane_start = 0, sane_end = 0;
4162 	ql_minidump_template_hdr_t *template_hdr;
4163 	ql_minidump_entry_t *entry;
4164 	uint32_t capture_mask;
4165 	uint32_t dump_size;
4166 
4167 	/* Setup parameters */
4168 	template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4169 
4170 	if (template_hdr->entry_type == TLHDR)
4171 		sane_start = 1;
4172 
4173 	dump_buff = (char *) ha->hw.mdump_buffer;
4174 
4175 	num_of_entries = template_hdr->num_of_entries;
4176 
4177 	entry = (ql_minidump_entry_t *) ((char *)template_hdr
4178 			+ template_hdr->first_entry_offset );
4179 
4180 	template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4181 		template_hdr->ocm_window_array[ha->pci_func];
4182 	template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4183 
4184 	capture_mask = ha->hw.mdump_capture_mask;
4185 	dump_size = ha->hw.mdump_buffer_size;
4186 
4187 	template_hdr->driver_capture_mask = capture_mask;
4188 
4189 	QL_DPRINT80(ha, (ha->pci_dev,
4190 		"%s: sane_start = %d num_of_entries = %d "
4191 		"capture_mask = 0x%x dump_size = %d \n",
4192 		__func__, sane_start, num_of_entries, capture_mask, dump_size));
4193 
4194 	for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4195 
4196 		/*
4197 		 * If the capture_mask of the entry does not match capture mask
4198 		 * skip the entry after marking the driver_flags indicator.
4199 		 */
4200 
4201 		if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4202 
4203 			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4204 			entry = (ql_minidump_entry_t *) ((char *) entry
4205 					+ entry->hdr.entry_size);
4206 			continue;
4207 		}
4208 
4209 		/*
4210 		 * This is ONLY needed in implementations where
4211 		 * the capture buffer allocated is too small to capture
4212 		 * all of the required entries for a given capture mask.
4213 		 * We need to empty the buffer contents to a file
4214 		 * if possible, before processing the next entry
4215 		 * If the buff_full_flag is set, no further capture will happen
4216 		 * and all remaining non-control entries will be skipped.
4217 		 */
4218 		if (entry->hdr.entry_capture_size != 0) {
4219 			if ((buff_level + entry->hdr.entry_capture_size) >
4220 				dump_size) {
4221 				/*  Try to recover by emptying buffer to file */
4222 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4223 				entry = (ql_minidump_entry_t *) ((char *) entry
4224 						+ entry->hdr.entry_size);
4225 				continue;
4226 			}
4227 		}
4228 
4229 		/*
4230 		 * Decode the entry type and process it accordingly
4231 		 */
4232 
4233 		switch (entry->hdr.entry_type) {
4234 		case RDNOP:
4235 			break;
4236 
4237 		case RDEND:
4238 			if (sane_end == 0) {
4239 				end_cnt = e_cnt;
4240 			}
4241 			sane_end++;
4242 			break;
4243 
4244 		case RDCRB:
4245 			dbuff = dump_buff + buff_level;
4246 			esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4247 			ql_entry_err_chk(entry, esize);
4248 			buff_level += esize;
4249 			break;
4250 
4251                 case POLLRD:
4252                         dbuff = dump_buff + buff_level;
4253                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4254                         ql_entry_err_chk(entry, esize);
4255                         buff_level += esize;
4256                         break;
4257 
4258                 case POLLRDMWR:
4259                         dbuff = dump_buff + buff_level;
4260                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4261 					(void *)dbuff);
4262                         ql_entry_err_chk(entry, esize);
4263                         buff_level += esize;
4264                         break;
4265 
4266 		case L2ITG:
4267 		case L2DTG:
4268 		case L2DAT:
4269 		case L2INS:
4270 			dbuff = dump_buff + buff_level;
4271 			esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4272 			if (esize == -1) {
4273 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4274 			} else {
4275 				ql_entry_err_chk(entry, esize);
4276 				buff_level += esize;
4277 			}
4278 			break;
4279 
4280 		case L1DAT:
4281 		case L1INS:
4282 			dbuff = dump_buff + buff_level;
4283 			esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4284 			ql_entry_err_chk(entry, esize);
4285 			buff_level += esize;
4286 			break;
4287 
4288 		case RDOCM:
4289 			dbuff = dump_buff + buff_level;
4290 			esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4291 			ql_entry_err_chk(entry, esize);
4292 			buff_level += esize;
4293 			break;
4294 
4295 		case RDMEM:
4296 			dbuff = dump_buff + buff_level;
4297 			esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4298 			ql_entry_err_chk(entry, esize);
4299 			buff_level += esize;
4300 			break;
4301 
4302 		case BOARD:
4303 		case RDROM:
4304 			dbuff = dump_buff + buff_level;
4305 			esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4306 			ql_entry_err_chk(entry, esize);
4307 			buff_level += esize;
4308 			break;
4309 
4310 		case RDMUX:
4311 			dbuff = dump_buff + buff_level;
4312 			esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4313 			ql_entry_err_chk(entry, esize);
4314 			buff_level += esize;
4315 			break;
4316 
4317                 case RDMUX2:
4318                         dbuff = dump_buff + buff_level;
4319                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4320                         ql_entry_err_chk(entry, esize);
4321                         buff_level += esize;
4322                         break;
4323 
4324 		case QUEUE:
4325 			dbuff = dump_buff + buff_level;
4326 			esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4327 			ql_entry_err_chk(entry, esize);
4328 			buff_level += esize;
4329 			break;
4330 
4331 		case CNTRL:
4332 			if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4333 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4334 			}
4335 			break;
4336 		default:
4337 			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4338 			break;
4339 		}
4340 		/*  next entry in the template */
4341 		entry = (ql_minidump_entry_t *) ((char *) entry
4342 						+ entry->hdr.entry_size);
4343 	}
4344 
4345 	if (!sane_start || (sane_end > 1)) {
4346 		device_printf(ha->pci_dev,
4347 			"\n%s: Template configuration error. Check Template\n",
4348 			__func__);
4349 	}
4350 
4351 	QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4352 		__func__, template_hdr->num_of_entries));
4353 
4354 	return 0;
4355 }
4356 
4357 /*
4358  * Read CRB operation.
4359  */
4360 static uint32_t
4361 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4362 	uint32_t * data_buff)
4363 {
4364 	int loop_cnt;
4365 	int ret;
4366 	uint32_t op_count, addr, stride, value = 0;
4367 
4368 	addr = crb_entry->addr;
4369 	op_count = crb_entry->op_count;
4370 	stride = crb_entry->addr_stride;
4371 
4372 	for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4373 
4374 		ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4375 
4376 		if (ret)
4377 			return (0);
4378 
4379 		*data_buff++ = addr;
4380 		*data_buff++ = value;
4381 		addr = addr + stride;
4382 	}
4383 
4384 	/*
4385 	 * for testing purpose we return amount of data written
4386 	 */
4387 	return (op_count * (2 * sizeof(uint32_t)));
4388 }
4389 
4390 /*
4391  * Handle L2 Cache.
4392  */
4393 
4394 static uint32_t
4395 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4396 	uint32_t * data_buff)
4397 {
4398 	int i, k;
4399 	int loop_cnt;
4400 	int ret;
4401 
4402 	uint32_t read_value;
4403 	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4404 	uint32_t tag_value, read_cnt;
4405 	volatile uint8_t cntl_value_r;
4406 	long timeout;
4407 	uint32_t data;
4408 
4409 	loop_cnt = cacheEntry->op_count;
4410 
4411 	read_addr = cacheEntry->read_addr;
4412 	cntrl_addr = cacheEntry->control_addr;
4413 	cntl_value_w = (uint32_t) cacheEntry->write_value;
4414 
4415 	tag_reg_addr = cacheEntry->tag_reg_addr;
4416 
4417 	tag_value = cacheEntry->init_tag_value;
4418 	read_cnt = cacheEntry->read_addr_cnt;
4419 
4420 	for (i = 0; i < loop_cnt; i++) {
4421 
4422 		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4423 		if (ret)
4424 			return (0);
4425 
4426 		if (cacheEntry->write_value != 0) {
4427 
4428 			ret = ql_rdwr_indreg32(ha, cntrl_addr,
4429 					&cntl_value_w, 0);
4430 			if (ret)
4431 				return (0);
4432 		}
4433 
4434 		if (cacheEntry->poll_mask != 0) {
4435 
4436 			timeout = cacheEntry->poll_wait;
4437 
4438 			ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4439 			if (ret)
4440 				return (0);
4441 
4442 			cntl_value_r = (uint8_t)data;
4443 
4444 			while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4445 
4446 				if (timeout) {
4447 					qla_mdelay(__func__, 1);
4448 					timeout--;
4449 				} else
4450 					break;
4451 
4452 				ret = ql_rdwr_indreg32(ha, cntrl_addr,
4453 						&data, 1);
4454 				if (ret)
4455 					return (0);
4456 
4457 				cntl_value_r = (uint8_t)data;
4458 			}
4459 			if (!timeout) {
4460 				/* Report timeout error.
4461 				 * core dump capture failed
4462 				 * Skip remaining entries.
4463 				 * Write buffer out to file
4464 				 * Use driver specific fields in template header
4465 				 * to report this error.
4466 				 */
4467 				return (-1);
4468 			}
4469 		}
4470 
4471 		addr = read_addr;
4472 		for (k = 0; k < read_cnt; k++) {
4473 
4474 			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4475 			if (ret)
4476 				return (0);
4477 
4478 			*data_buff++ = read_value;
4479 			addr += cacheEntry->read_addr_stride;
4480 		}
4481 
4482 		tag_value += cacheEntry->tag_value_stride;
4483 	}
4484 
4485 	return (read_cnt * loop_cnt * sizeof(uint32_t));
4486 }
4487 
4488 /*
4489  * Handle L1 Cache.
4490  */
4491 
4492 static uint32_t
4493 ql_L1Cache(qla_host_t *ha,
4494 	ql_minidump_entry_cache_t *cacheEntry,
4495 	uint32_t *data_buff)
4496 {
4497 	int ret;
4498 	int i, k;
4499 	int loop_cnt;
4500 
4501 	uint32_t read_value;
4502 	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4503 	uint32_t tag_value, read_cnt;
4504 	uint32_t cntl_value_w;
4505 
4506 	loop_cnt = cacheEntry->op_count;
4507 
4508 	read_addr = cacheEntry->read_addr;
4509 	cntrl_addr = cacheEntry->control_addr;
4510 	cntl_value_w = (uint32_t) cacheEntry->write_value;
4511 
4512 	tag_reg_addr = cacheEntry->tag_reg_addr;
4513 
4514 	tag_value = cacheEntry->init_tag_value;
4515 	read_cnt = cacheEntry->read_addr_cnt;
4516 
4517 	for (i = 0; i < loop_cnt; i++) {
4518 
4519 		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4520 		if (ret)
4521 			return (0);
4522 
4523 		ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4524 		if (ret)
4525 			return (0);
4526 
4527 		addr = read_addr;
4528 		for (k = 0; k < read_cnt; k++) {
4529 
4530 			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4531 			if (ret)
4532 				return (0);
4533 
4534 			*data_buff++ = read_value;
4535 			addr += cacheEntry->read_addr_stride;
4536 		}
4537 
4538 		tag_value += cacheEntry->tag_value_stride;
4539 	}
4540 
4541 	return (read_cnt * loop_cnt * sizeof(uint32_t));
4542 }
4543 
4544 /*
4545  * Reading OCM memory
4546  */
4547 
4548 static uint32_t
4549 ql_rdocm(qla_host_t *ha,
4550 	ql_minidump_entry_rdocm_t *ocmEntry,
4551 	uint32_t *data_buff)
4552 {
4553 	int i, loop_cnt;
4554 	volatile uint32_t addr;
4555 	volatile uint32_t value;
4556 
4557 	addr = ocmEntry->read_addr;
4558 	loop_cnt = ocmEntry->op_count;
4559 
4560 	for (i = 0; i < loop_cnt; i++) {
4561 		value = READ_REG32(ha, addr);
4562 		*data_buff++ = value;
4563 		addr += ocmEntry->read_addr_stride;
4564 	}
4565 	return (loop_cnt * sizeof(value));
4566 }
4567 
4568 /*
4569  * Read memory
4570  */
4571 
4572 static uint32_t
4573 ql_rdmem(qla_host_t *ha,
4574 	ql_minidump_entry_rdmem_t *mem_entry,
4575 	uint32_t *data_buff)
4576 {
4577 	int ret;
4578         int i, loop_cnt;
4579         volatile uint32_t addr;
4580 	q80_offchip_mem_val_t val;
4581 
4582         addr = mem_entry->read_addr;
4583 
4584 	/* size in bytes / 16 */
4585         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4586 
4587         for (i = 0; i < loop_cnt; i++) {
4588 
4589 		ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4590 		if (ret)
4591 			return (0);
4592 
4593                 *data_buff++ = val.data_lo;
4594                 *data_buff++ = val.data_hi;
4595                 *data_buff++ = val.data_ulo;
4596                 *data_buff++ = val.data_uhi;
4597 
4598                 addr += (sizeof(uint32_t) * 4);
4599         }
4600 
4601         return (loop_cnt * (sizeof(uint32_t) * 4));
4602 }
4603 
4604 /*
4605  * Read Rom
4606  */
4607 
4608 static uint32_t
4609 ql_rdrom(qla_host_t *ha,
4610 	ql_minidump_entry_rdrom_t *romEntry,
4611 	uint32_t *data_buff)
4612 {
4613 	int ret;
4614 	int i, loop_cnt;
4615 	uint32_t addr;
4616 	uint32_t value;
4617 
4618 	addr = romEntry->read_addr;
4619 	loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4620 	loop_cnt /= sizeof(value);
4621 
4622 	for (i = 0; i < loop_cnt; i++) {
4623 
4624 		ret = ql_rd_flash32(ha, addr, &value);
4625 		if (ret)
4626 			return (0);
4627 
4628 		*data_buff++ = value;
4629 		addr += sizeof(value);
4630 	}
4631 
4632 	return (loop_cnt * sizeof(value));
4633 }
4634 
4635 /*
4636  * Read MUX data
4637  */
4638 
4639 static uint32_t
4640 ql_rdmux(qla_host_t *ha,
4641 	ql_minidump_entry_mux_t *muxEntry,
4642 	uint32_t *data_buff)
4643 {
4644 	int ret;
4645 	int loop_cnt;
4646 	uint32_t read_value, sel_value;
4647 	uint32_t read_addr, select_addr;
4648 
4649 	select_addr = muxEntry->select_addr;
4650 	sel_value = muxEntry->select_value;
4651 	read_addr = muxEntry->read_addr;
4652 
4653 	for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4654 
4655 		ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4656 		if (ret)
4657 			return (0);
4658 
4659 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4660 		if (ret)
4661 			return (0);
4662 
4663 		*data_buff++ = sel_value;
4664 		*data_buff++ = read_value;
4665 
4666 		sel_value += muxEntry->select_value_stride;
4667 	}
4668 
4669 	return (loop_cnt * (2 * sizeof(uint32_t)));
4670 }
4671 
4672 static uint32_t
4673 ql_rdmux2(qla_host_t *ha,
4674 	ql_minidump_entry_mux2_t *muxEntry,
4675 	uint32_t *data_buff)
4676 {
4677 	int ret;
4678         int loop_cnt;
4679 
4680         uint32_t select_addr_1, select_addr_2;
4681         uint32_t select_value_1, select_value_2;
4682         uint32_t select_value_count, select_value_mask;
4683         uint32_t read_addr, read_value;
4684 
4685         select_addr_1 = muxEntry->select_addr_1;
4686         select_addr_2 = muxEntry->select_addr_2;
4687         select_value_1 = muxEntry->select_value_1;
4688         select_value_2 = muxEntry->select_value_2;
4689         select_value_count = muxEntry->select_value_count;
4690         select_value_mask  = muxEntry->select_value_mask;
4691 
4692         read_addr = muxEntry->read_addr;
4693 
4694         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4695 		loop_cnt++) {
4696 
4697                 uint32_t temp_sel_val;
4698 
4699 		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4700 		if (ret)
4701 			return (0);
4702 
4703                 temp_sel_val = select_value_1 & select_value_mask;
4704 
4705 		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4706 		if (ret)
4707 			return (0);
4708 
4709 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4710 		if (ret)
4711 			return (0);
4712 
4713                 *data_buff++ = temp_sel_val;
4714                 *data_buff++ = read_value;
4715 
4716 		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
4717 		if (ret)
4718 			return (0);
4719 
4720                 temp_sel_val = select_value_2 & select_value_mask;
4721 
4722 		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4723 		if (ret)
4724 			return (0);
4725 
4726 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4727 		if (ret)
4728 			return (0);
4729 
4730                 *data_buff++ = temp_sel_val;
4731                 *data_buff++ = read_value;
4732 
4733                 select_value_1 += muxEntry->select_value_stride;
4734                 select_value_2 += muxEntry->select_value_stride;
4735         }
4736 
4737         return (loop_cnt * (4 * sizeof(uint32_t)));
4738 }
4739 
4740 /*
4741  * Handling Queue State Reads.
4742  */
4743 
4744 static uint32_t
4745 ql_rdqueue(qla_host_t *ha,
4746 	ql_minidump_entry_queue_t *queueEntry,
4747 	uint32_t *data_buff)
4748 {
4749 	int ret;
4750 	int loop_cnt, k;
4751 	uint32_t read_value;
4752 	uint32_t read_addr, read_stride, select_addr;
4753 	uint32_t queue_id, read_cnt;
4754 
4755 	read_cnt = queueEntry->read_addr_cnt;
4756 	read_stride = queueEntry->read_addr_stride;
4757 	select_addr = queueEntry->select_addr;
4758 
4759 	for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
4760 		loop_cnt++) {
4761 
4762 		ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
4763 		if (ret)
4764 			return (0);
4765 
4766 		read_addr = queueEntry->read_addr;
4767 
4768 		for (k = 0; k < read_cnt; k++) {
4769 
4770 			ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4771 			if (ret)
4772 				return (0);
4773 
4774 			*data_buff++ = read_value;
4775 			read_addr += read_stride;
4776 		}
4777 
4778 		queue_id += queueEntry->queue_id_stride;
4779 	}
4780 
4781 	return (loop_cnt * (read_cnt * sizeof(uint32_t)));
4782 }
4783 
4784 /*
4785  * Handling control entries.
4786  */
4787 
4788 static uint32_t
4789 ql_cntrl(qla_host_t *ha,
4790 	ql_minidump_template_hdr_t *template_hdr,
4791 	ql_minidump_entry_cntrl_t *crbEntry)
4792 {
4793 	int ret;
4794 	int count;
4795 	uint32_t opcode, read_value, addr, entry_addr;
4796 	long timeout;
4797 
4798 	entry_addr = crbEntry->addr;
4799 
4800 	for (count = 0; count < crbEntry->op_count; count++) {
4801 		opcode = crbEntry->opcode;
4802 
4803 		if (opcode & QL_DBG_OPCODE_WR) {
4804 
4805                 	ret = ql_rdwr_indreg32(ha, entry_addr,
4806 					&crbEntry->value_1, 0);
4807 			if (ret)
4808 				return (0);
4809 
4810 			opcode &= ~QL_DBG_OPCODE_WR;
4811 		}
4812 
4813 		if (opcode & QL_DBG_OPCODE_RW) {
4814 
4815                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4816 			if (ret)
4817 				return (0);
4818 
4819                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4820 			if (ret)
4821 				return (0);
4822 
4823 			opcode &= ~QL_DBG_OPCODE_RW;
4824 		}
4825 
4826 		if (opcode & QL_DBG_OPCODE_AND) {
4827 
4828                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4829 			if (ret)
4830 				return (0);
4831 
4832 			read_value &= crbEntry->value_2;
4833 			opcode &= ~QL_DBG_OPCODE_AND;
4834 
4835 			if (opcode & QL_DBG_OPCODE_OR) {
4836 				read_value |= crbEntry->value_3;
4837 				opcode &= ~QL_DBG_OPCODE_OR;
4838 			}
4839 
4840                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4841 			if (ret)
4842 				return (0);
4843 		}
4844 
4845 		if (opcode & QL_DBG_OPCODE_OR) {
4846 
4847                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4848 			if (ret)
4849 				return (0);
4850 
4851 			read_value |= crbEntry->value_3;
4852 
4853                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4854 			if (ret)
4855 				return (0);
4856 
4857 			opcode &= ~QL_DBG_OPCODE_OR;
4858 		}
4859 
4860 		if (opcode & QL_DBG_OPCODE_POLL) {
4861 
4862 			opcode &= ~QL_DBG_OPCODE_POLL;
4863 			timeout = crbEntry->poll_timeout;
4864 			addr = entry_addr;
4865 
4866                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4867 			if (ret)
4868 				return (0);
4869 
4870 			while ((read_value & crbEntry->value_2)
4871 				!= crbEntry->value_1) {
4872 
4873 				if (timeout) {
4874 					qla_mdelay(__func__, 1);
4875 					timeout--;
4876 				} else
4877 					break;
4878 
4879                 		ret = ql_rdwr_indreg32(ha, addr,
4880 						&read_value, 1);
4881 				if (ret)
4882 					return (0);
4883 			}
4884 
4885 			if (!timeout) {
4886 				/*
4887 				 * Report timeout error.
4888 				 * core dump capture failed
4889 				 * Skip remaining entries.
4890 				 * Write buffer out to file
4891 				 * Use driver specific fields in template header
4892 				 * to report this error.
4893 				 */
4894 				return (-1);
4895 			}
4896 		}
4897 
4898 		if (opcode & QL_DBG_OPCODE_RDSTATE) {
4899 			/*
4900 			 * decide which address to use.
4901 			 */
4902 			if (crbEntry->state_index_a) {
4903 				addr = template_hdr->saved_state_array[
4904 						crbEntry-> state_index_a];
4905 			} else {
4906 				addr = entry_addr;
4907 			}
4908 
4909                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4910 			if (ret)
4911 				return (0);
4912 
4913 			template_hdr->saved_state_array[crbEntry->state_index_v]
4914 					= read_value;
4915 			opcode &= ~QL_DBG_OPCODE_RDSTATE;
4916 		}
4917 
4918 		if (opcode & QL_DBG_OPCODE_WRSTATE) {
4919 			/*
4920 			 * decide which value to use.
4921 			 */
4922 			if (crbEntry->state_index_v) {
4923 				read_value = template_hdr->saved_state_array[
4924 						crbEntry->state_index_v];
4925 			} else {
4926 				read_value = crbEntry->value_1;
4927 			}
4928 			/*
4929 			 * decide which address to use.
4930 			 */
4931 			if (crbEntry->state_index_a) {
4932 				addr = template_hdr->saved_state_array[
4933 						crbEntry-> state_index_a];
4934 			} else {
4935 				addr = entry_addr;
4936 			}
4937 
4938                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
4939 			if (ret)
4940 				return (0);
4941 
4942 			opcode &= ~QL_DBG_OPCODE_WRSTATE;
4943 		}
4944 
4945 		if (opcode & QL_DBG_OPCODE_MDSTATE) {
4946 			/*  Read value from saved state using index */
4947 			read_value = template_hdr->saved_state_array[
4948 						crbEntry->state_index_v];
4949 
4950 			read_value <<= crbEntry->shl; /*Shift left operation */
4951 			read_value >>= crbEntry->shr; /*Shift right operation */
4952 
4953 			if (crbEntry->value_2) {
4954 				/* check if AND mask is provided */
4955 				read_value &= crbEntry->value_2;
4956 			}
4957 
4958 			read_value |= crbEntry->value_3; /* OR operation */
4959 			read_value += crbEntry->value_1; /* increment op */
4960 
4961 			/* Write value back to state area. */
4962 
4963 			template_hdr->saved_state_array[crbEntry->state_index_v]
4964 					= read_value;
4965 			opcode &= ~QL_DBG_OPCODE_MDSTATE;
4966 		}
4967 
4968 		entry_addr += crbEntry->addr_stride;
4969 	}
4970 
4971 	return (0);
4972 }
4973 
4974 /*
4975  * Handling rd poll entry.
4976  */
4977 
4978 static uint32_t
4979 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
4980 	uint32_t *data_buff)
4981 {
4982         int ret;
4983         int loop_cnt;
4984         uint32_t op_count, select_addr, select_value_stride, select_value;
4985         uint32_t read_addr, poll, mask, data_size, data;
4986         uint32_t wait_count = 0;
4987 
4988         select_addr            = entry->select_addr;
4989         read_addr              = entry->read_addr;
4990         select_value           = entry->select_value;
4991         select_value_stride    = entry->select_value_stride;
4992         op_count               = entry->op_count;
4993         poll                   = entry->poll;
4994         mask                   = entry->mask;
4995         data_size              = entry->data_size;
4996 
4997         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4998 
4999                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5000 		if (ret)
5001 			return (0);
5002 
5003                 wait_count = 0;
5004 
5005                 while (wait_count < poll) {
5006 
5007                         uint32_t temp;
5008 
5009 			ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5010 			if (ret)
5011 				return (0);
5012 
5013                         if ( (temp & mask) != 0 ) {
5014                                 break;
5015                         }
5016                         wait_count++;
5017                 }
5018 
5019                 if (wait_count == poll) {
5020                         device_printf(ha->pci_dev,
5021 				"%s: Error in processing entry\n", __func__);
5022                         device_printf(ha->pci_dev,
5023 				"%s: wait_count <0x%x> poll <0x%x>\n",
5024 				__func__, wait_count, poll);
5025                         return 0;
5026                 }
5027 
5028 		ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5029 		if (ret)
5030 			return (0);
5031 
5032                 *data_buff++ = select_value;
5033                 *data_buff++ = data;
5034                 select_value = select_value + select_value_stride;
5035         }
5036 
5037         /*
5038          * for testing purpose we return amount of data written
5039          */
5040         return (loop_cnt * (2 * sizeof(uint32_t)));
5041 }
5042 
5043 
5044 /*
5045  * Handling rd modify write poll entry.
5046  */
5047 
5048 static uint32_t
5049 ql_pollrd_modify_write(qla_host_t *ha,
5050 	ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5051 	uint32_t *data_buff)
5052 {
5053 	int ret;
5054         uint32_t addr_1, addr_2, value_1, value_2, data;
5055         uint32_t poll, mask, data_size, modify_mask;
5056         uint32_t wait_count = 0;
5057 
5058         addr_1		= entry->addr_1;
5059         addr_2		= entry->addr_2;
5060         value_1		= entry->value_1;
5061         value_2		= entry->value_2;
5062 
5063         poll		= entry->poll;
5064         mask		= entry->mask;
5065         modify_mask	= entry->modify_mask;
5066         data_size	= entry->data_size;
5067 
5068 
5069 	ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5070 	if (ret)
5071 		return (0);
5072 
5073         wait_count = 0;
5074         while (wait_count < poll) {
5075 
5076 		uint32_t temp;
5077 
5078 		ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5079 		if (ret)
5080 			return (0);
5081 
5082                 if ( (temp & mask) != 0 ) {
5083                         break;
5084                 }
5085                 wait_count++;
5086         }
5087 
5088         if (wait_count == poll) {
5089                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5090 			__func__);
5091         } else {
5092 
5093 		ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5094 		if (ret)
5095 			return (0);
5096 
5097                 data = (data & modify_mask);
5098 
5099 		ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5100 		if (ret)
5101 			return (0);
5102 
5103 		ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5104 		if (ret)
5105 			return (0);
5106 
5107                 /* Poll again */
5108                 wait_count = 0;
5109                 while (wait_count < poll) {
5110 
5111                         uint32_t temp;
5112 
5113 			ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5114 			if (ret)
5115 				return (0);
5116 
5117                         if ( (temp & mask) != 0 ) {
5118                                 break;
5119                         }
5120                         wait_count++;
5121                 }
5122                 *data_buff++ = addr_2;
5123                 *data_buff++ = data;
5124         }
5125 
5126         /*
5127          * for testing purpose we return amount of data written
5128          */
5129         return (2 * sizeof(uint32_t));
5130 }
5131 
5132 
5133