xref: /freebsd/sys/dev/qlxgbe/ql_hw.c (revision 5341316696773933de8e7684c18dc73b4cf03066)
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 
45 /*
46  * Static Functions
47  */
48 
49 static void qla_del_rcv_cntxt(qla_host_t *ha);
50 static int qla_init_rcv_cntxt(qla_host_t *ha);
51 static void qla_del_xmt_cntxt(qla_host_t *ha);
52 static int qla_init_xmt_cntxt(qla_host_t *ha);
53 static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57 	uint32_t num_intrs, uint32_t create);
58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60 	int tenable, int rcv);
61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
63 
64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
65 		uint8_t *hdr);
66 static int qla_hw_add_all_mcast(qla_host_t *ha);
67 static int qla_hw_del_all_mcast(qla_host_t *ha);
68 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
69 
70 static int qla_init_nic_func(qla_host_t *ha);
71 static int qla_stop_nic_func(qla_host_t *ha);
72 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
73 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
74 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
75 static void qla_get_quick_stats(qla_host_t *ha);
76 
77 static int qla_minidump_init(qla_host_t *ha);
78 static void qla_minidump_free(qla_host_t *ha);
79 
80 
81 static int
82 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
83 {
84         int err = 0, ret;
85         qla_host_t *ha;
86 	uint32_t i;
87 
88         err = sysctl_handle_int(oidp, &ret, 0, req);
89 
90         if (err || !req->newptr)
91                 return (err);
92 
93         if (ret == 1) {
94 
95                 ha = (qla_host_t *)arg1;
96 
97 		for (i = 0; i < ha->hw.num_sds_rings; i++)
98 			device_printf(ha->pci_dev,
99 				"%s: sds_ring[%d] = %p\n", __func__,i,
100 				(void *)ha->hw.sds[i].intr_count);
101 
102 		for (i = 0; i < ha->hw.num_tx_rings; i++)
103 			device_printf(ha->pci_dev,
104 				"%s: tx[%d] = %p\n", __func__,i,
105 				(void *)ha->tx_ring[i].count);
106 
107 		for (i = 0; i < ha->hw.num_rds_rings; i++)
108 			device_printf(ha->pci_dev,
109 				"%s: rds_ring[%d] = %p\n", __func__,i,
110 				(void *)ha->hw.rds[i].count);
111 
112 		device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
113 			(void *)ha->lro_pkt_count);
114 
115 		device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
116 			(void *)ha->lro_bytes);
117 
118 #ifdef QL_ENABLE_ISCSI_TLV
119 		device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
120 			(void *)ha->hw.iscsi_pkt_count);
121 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
122 
123 	}
124 	return (err);
125 }
126 
127 static int
128 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
129 {
130 	int err, ret = 0;
131 	qla_host_t *ha;
132 
133 	err = sysctl_handle_int(oidp, &ret, 0, req);
134 
135 	if (err || !req->newptr)
136 		return (err);
137 
138 	if (ret == 1) {
139 		ha = (qla_host_t *)arg1;
140 		qla_get_quick_stats(ha);
141 	}
142 	return (err);
143 }
144 
145 #ifdef QL_DBG
146 
147 static void
148 qla_stop_pegs(qla_host_t *ha)
149 {
150         uint32_t val = 1;
151 
152         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
153         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
154         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
155         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
156         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
157         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
158 }
159 
160 static int
161 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
162 {
163 	int err, ret = 0;
164 	qla_host_t *ha;
165 
166 	err = sysctl_handle_int(oidp, &ret, 0, req);
167 
168 
169 	if (err || !req->newptr)
170 		return (err);
171 
172 	if (ret == 1) {
173 		ha = (qla_host_t *)arg1;
174 		(void)QLA_LOCK(ha, __func__, 0);
175 		qla_stop_pegs(ha);
176 		QLA_UNLOCK(ha, __func__);
177 	}
178 
179 	return err;
180 }
181 #endif /* #ifdef QL_DBG */
182 
183 static int
184 qla_validate_set_port_cfg_bit(uint32_t bits)
185 {
186         if ((bits & 0xF) > 1)
187                 return (-1);
188 
189         if (((bits >> 4) & 0xF) > 2)
190                 return (-1);
191 
192         if (((bits >> 8) & 0xF) > 2)
193                 return (-1);
194 
195         return (0);
196 }
197 
198 static int
199 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
200 {
201         int err, ret = 0;
202         qla_host_t *ha;
203         uint32_t cfg_bits;
204 
205         err = sysctl_handle_int(oidp, &ret, 0, req);
206 
207         if (err || !req->newptr)
208                 return (err);
209 
210         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
211 
212                 ha = (qla_host_t *)arg1;
213 
214                 err = qla_get_port_config(ha, &cfg_bits);
215 
216                 if (err)
217                         goto qla_sysctl_set_port_cfg_exit;
218 
219                 if (ret & 0x1) {
220                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
221                 } else {
222                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
223                 }
224 
225                 ret = ret >> 4;
226                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
227 
228                 if ((ret & 0xF) == 0) {
229                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
230                 } else if ((ret & 0xF) == 1){
231                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
232                 } else {
233                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
234                 }
235 
236                 ret = ret >> 4;
237                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
238 
239                 if (ret == 0) {
240                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
241                 } else if (ret == 1){
242                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
243                 } else {
244                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
245                 }
246 
247                 err = qla_set_port_config(ha, cfg_bits);
248         } else {
249                 ha = (qla_host_t *)arg1;
250 
251                 err = qla_get_port_config(ha, &cfg_bits);
252         }
253 
254 qla_sysctl_set_port_cfg_exit:
255         return err;
256 }
257 
258 /*
259  * Name: ql_hw_add_sysctls
260  * Function: Add P3Plus specific sysctls
261  */
262 void
263 ql_hw_add_sysctls(qla_host_t *ha)
264 {
265         device_t	dev;
266 
267         dev = ha->pci_dev;
268 
269 	ha->hw.num_sds_rings = MAX_SDS_RINGS;
270 	ha->hw.num_rds_rings = MAX_RDS_RINGS;
271 	ha->hw.num_tx_rings = NUM_TX_RINGS;
272 
273 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
274 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
275 		OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
276 		ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
277 
278         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
279                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
280                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
281 		ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
282 
283         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
284                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
285                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
286 		ha->hw.num_tx_rings, "Number of Transmit Rings");
287 
288         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
289                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
290                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
291 		ha->txr_idx, "Tx Ring Used");
292 
293 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
294 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
295 		OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
296 		(void *)ha, 0,
297 		qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
298 
299         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
300                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
301                 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
302                 (void *)ha, 0,
303                 qla_sysctl_get_quick_stats, "I", "Quick Statistics");
304 
305         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
306                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
307                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
308 		ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
309 
310 	ha->hw.sds_cidx_thres = 32;
311         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
312                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
313                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
314 		ha->hw.sds_cidx_thres,
315 		"Number of SDS entries to process before updating"
316 		" SDS Ring Consumer Index");
317 
318 	ha->hw.rds_pidx_thres = 32;
319         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
320                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
321                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
322 		ha->hw.rds_pidx_thres,
323 		"Number of Rcv Rings Entries to post before updating"
324 		" RDS Ring Producer Index");
325 
326         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
327         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
328                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
329                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
330                 &ha->hw.rcv_intr_coalesce,
331                 ha->hw.rcv_intr_coalesce,
332                 "Rcv Intr Coalescing Parameters\n"
333                 "\tbits 15:0 max packets\n"
334                 "\tbits 31:16 max micro-seconds to wait\n"
335                 "\tplease run\n"
336                 "\tifconfig <if> down && ifconfig <if> up\n"
337                 "\tto take effect \n");
338 
339         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
340         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
341                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
342                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
343                 &ha->hw.xmt_intr_coalesce,
344                 ha->hw.xmt_intr_coalesce,
345                 "Xmt Intr Coalescing Parameters\n"
346                 "\tbits 15:0 max packets\n"
347                 "\tbits 31:16 max micro-seconds to wait\n"
348                 "\tplease run\n"
349                 "\tifconfig <if> down && ifconfig <if> up\n"
350                 "\tto take effect \n");
351 
352         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
353                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
354                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
355                 (void *)ha, 0,
356                 qla_sysctl_port_cfg, "I",
357                         "Set Port Configuration if values below "
358                         "otherwise Get Port Configuration\n"
359                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
360                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
361                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
362                         " 1 = xmt only; 2 = rcv only;\n"
363                 );
364 
365         ha->hw.enable_9kb = 1;
366 
367         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
368                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
369                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
370                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
371 
372 	ha->hw.mdump_active = 0;
373         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
374                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
375                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
376 		ha->hw.mdump_active,
377 		"Minidump Utility is Active \n"
378 		"\t 0 = Minidump Utility is not active\n"
379 		"\t 1 = Minidump Utility is retrieved on this port\n"
380 		"\t 2 = Minidump Utility is retrieved on the other port\n");
381 
382 	ha->hw.mdump_start = 0;
383         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
384                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
385                 OID_AUTO, "minidump_start", CTLFLAG_RW,
386 		&ha->hw.mdump_start, ha->hw.mdump_start,
387 		"Minidump Utility can start minidump process");
388 #ifdef QL_DBG
389 
390         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
391                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
392                 OID_AUTO, "err_inject",
393                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
394                 "Error to be injected\n"
395                 "\t\t\t 0: No Errors\n"
396                 "\t\t\t 1: rcv: rxb struct invalid\n"
397                 "\t\t\t 2: rcv: mp == NULL\n"
398                 "\t\t\t 3: lro: rxb struct invalid\n"
399                 "\t\t\t 4: lro: mp == NULL\n"
400                 "\t\t\t 5: rcv: num handles invalid\n"
401                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
402                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
403                 "\t\t\t 8: mbx: mailbox command failure\n"
404                 "\t\t\t 9: heartbeat failure\n"
405                 "\t\t\t A: temperature failure\n" );
406 
407 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
408                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
409                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
410                 (void *)ha, 0,
411                 qla_sysctl_stop_pegs, "I", "Peg Stop");
412 
413 #endif /* #ifdef QL_DBG */
414 
415         ha->hw.user_pri_nic = 0;
416         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
417                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
418                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
419                 ha->hw.user_pri_nic,
420                 "VLAN Tag User Priority for Normal Ethernet Packets");
421 
422         ha->hw.user_pri_iscsi = 4;
423         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
424                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
425                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
426                 ha->hw.user_pri_iscsi,
427                 "VLAN Tag User Priority for iSCSI Packets");
428 
429 }
430 
431 void
432 ql_hw_link_status(qla_host_t *ha)
433 {
434 	device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
435 
436 	if (ha->hw.link_up) {
437 		device_printf(ha->pci_dev, "link Up\n");
438 	} else {
439 		device_printf(ha->pci_dev, "link Down\n");
440 	}
441 
442 	if (ha->hw.flags.fduplex) {
443 		device_printf(ha->pci_dev, "Full Duplex\n");
444 	} else {
445 		device_printf(ha->pci_dev, "Half Duplex\n");
446 	}
447 
448 	if (ha->hw.flags.autoneg) {
449 		device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
450 	} else {
451 		device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
452 	}
453 
454 	switch (ha->hw.link_speed) {
455 	case 0x710:
456 		device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
457 		break;
458 
459 	case 0x3E8:
460 		device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
461 		break;
462 
463 	case 0x64:
464 		device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
465 		break;
466 
467 	default:
468 		device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
469 		break;
470 	}
471 
472 	switch (ha->hw.module_type) {
473 
474 	case 0x01:
475 		device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
476 		break;
477 
478 	case 0x02:
479 		device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
480 		break;
481 
482 	case 0x03:
483 		device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
484 		break;
485 
486 	case 0x04:
487 		device_printf(ha->pci_dev,
488 			"Module Type 10GE Passive Copper(Compliant)[%d m]\n",
489 			ha->hw.cable_length);
490 		break;
491 
492 	case 0x05:
493 		device_printf(ha->pci_dev, "Module Type 10GE Active"
494 			" Limiting Copper(Compliant)[%d m]\n",
495 			ha->hw.cable_length);
496 		break;
497 
498 	case 0x06:
499 		device_printf(ha->pci_dev,
500 			"Module Type 10GE Passive Copper"
501 			" (Legacy, Best Effort)[%d m]\n",
502 			ha->hw.cable_length);
503 		break;
504 
505 	case 0x07:
506 		device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
507 		break;
508 
509 	case 0x08:
510 		device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
511 		break;
512 
513 	case 0x09:
514 		device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
515 		break;
516 
517 	case 0x0A:
518 		device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
519 		break;
520 
521 	case 0x0B:
522 		device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
523 			"(Legacy, Best Effort)\n");
524 		break;
525 
526 	default:
527 		device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
528 			ha->hw.module_type);
529 		break;
530 	}
531 
532 	if (ha->hw.link_faults == 1)
533 		device_printf(ha->pci_dev, "SFP Power Fault\n");
534 }
535 
536 /*
537  * Name: ql_free_dma
538  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
539  */
540 void
541 ql_free_dma(qla_host_t *ha)
542 {
543 	uint32_t i;
544 
545         if (ha->hw.dma_buf.flags.sds_ring) {
546 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
547 			ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
548 		}
549         	ha->hw.dma_buf.flags.sds_ring = 0;
550 	}
551 
552         if (ha->hw.dma_buf.flags.rds_ring) {
553 		for (i = 0; i < ha->hw.num_rds_rings; i++) {
554 			ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
555 		}
556         	ha->hw.dma_buf.flags.rds_ring = 0;
557 	}
558 
559         if (ha->hw.dma_buf.flags.tx_ring) {
560 		ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
561         	ha->hw.dma_buf.flags.tx_ring = 0;
562 	}
563 	qla_minidump_free(ha);
564 }
565 
566 /*
567  * Name: ql_alloc_dma
568  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
569  */
570 int
571 ql_alloc_dma(qla_host_t *ha)
572 {
573         device_t                dev;
574 	uint32_t		i, j, size, tx_ring_size;
575 	qla_hw_t		*hw;
576 	qla_hw_tx_cntxt_t	*tx_cntxt;
577 	uint8_t			*vaddr;
578 	bus_addr_t		paddr;
579 
580         dev = ha->pci_dev;
581 
582         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
583 
584 	hw = &ha->hw;
585 	/*
586 	 * Allocate Transmit Ring
587 	 */
588 	tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
589 	size = (tx_ring_size * ha->hw.num_tx_rings);
590 
591 	hw->dma_buf.tx_ring.alignment = 8;
592 	hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
593 
594         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
595                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
596                 goto ql_alloc_dma_exit;
597         }
598 
599 	vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
600 	paddr = hw->dma_buf.tx_ring.dma_addr;
601 
602 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
603 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
604 
605 		tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
606 		tx_cntxt->tx_ring_paddr = paddr;
607 
608 		vaddr += tx_ring_size;
609 		paddr += tx_ring_size;
610 	}
611 
612 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
613 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
614 
615 		tx_cntxt->tx_cons = (uint32_t *)vaddr;
616 		tx_cntxt->tx_cons_paddr = paddr;
617 
618 		vaddr += sizeof (uint32_t);
619 		paddr += sizeof (uint32_t);
620 	}
621 
622         ha->hw.dma_buf.flags.tx_ring = 1;
623 
624 	QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
625 		__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
626 		hw->dma_buf.tx_ring.dma_b));
627 	/*
628 	 * Allocate Receive Descriptor Rings
629 	 */
630 
631 	for (i = 0; i < hw->num_rds_rings; i++) {
632 
633 		hw->dma_buf.rds_ring[i].alignment = 8;
634 		hw->dma_buf.rds_ring[i].size =
635 			(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
636 
637 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
638 			device_printf(dev, "%s: rds ring[%d] alloc failed\n",
639 				__func__, i);
640 
641 			for (j = 0; j < i; j++)
642 				ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
643 
644 			goto ql_alloc_dma_exit;
645 		}
646 		QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
647 			__func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
648 			hw->dma_buf.rds_ring[i].dma_b));
649 	}
650 
651 	hw->dma_buf.flags.rds_ring = 1;
652 
653 	/*
654 	 * Allocate Status Descriptor Rings
655 	 */
656 
657 	for (i = 0; i < hw->num_sds_rings; i++) {
658 		hw->dma_buf.sds_ring[i].alignment = 8;
659 		hw->dma_buf.sds_ring[i].size =
660 			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
661 
662 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
663 			device_printf(dev, "%s: sds ring alloc failed\n",
664 				__func__);
665 
666 			for (j = 0; j < i; j++)
667 				ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
668 
669 			goto ql_alloc_dma_exit;
670 		}
671 		QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
672 			__func__, i,
673 			(void *)(hw->dma_buf.sds_ring[i].dma_addr),
674 			hw->dma_buf.sds_ring[i].dma_b));
675 	}
676 	for (i = 0; i < hw->num_sds_rings; i++) {
677 		hw->sds[i].sds_ring_base =
678 			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
679 	}
680 
681 	hw->dma_buf.flags.sds_ring = 1;
682 
683 	return 0;
684 
685 ql_alloc_dma_exit:
686 	ql_free_dma(ha);
687 	return -1;
688 }
689 
690 #define Q8_MBX_MSEC_DELAY	5000
691 
692 static int
693 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
694 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
695 {
696 	uint32_t i;
697 	uint32_t data;
698 	int ret = 0;
699 
700 	if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
701 		ret = -3;
702 		ha->qla_initiate_recovery = 1;
703 		goto exit_qla_mbx_cmd;
704 	}
705 
706 	if (no_pause)
707 		i = 1000;
708 	else
709 		i = Q8_MBX_MSEC_DELAY;
710 
711 	while (i) {
712 		data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
713 		if (data == 0)
714 			break;
715 		if (no_pause) {
716 			DELAY(1000);
717 		} else {
718 			qla_mdelay(__func__, 1);
719 		}
720 		i--;
721 	}
722 
723 	if (i == 0) {
724 		device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
725 			__func__, data);
726 		ret = -1;
727 		ha->qla_initiate_recovery = 1;
728 		goto exit_qla_mbx_cmd;
729 	}
730 
731 	for (i = 0; i < n_hmbox; i++) {
732 		WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
733 		h_mbox++;
734 	}
735 
736 	WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
737 
738 
739 	i = Q8_MBX_MSEC_DELAY;
740 	while (i) {
741 		data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
742 
743 		if ((data & 0x3) == 1) {
744 			data = READ_REG32(ha, Q8_FW_MBOX0);
745 			if ((data & 0xF000) != 0x8000)
746 				break;
747 		}
748 		if (no_pause) {
749 			DELAY(1000);
750 		} else {
751 			qla_mdelay(__func__, 1);
752 		}
753 		i--;
754 	}
755 	if (i == 0) {
756 		device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
757 			__func__, data);
758 		ret = -2;
759 		ha->qla_initiate_recovery = 1;
760 		goto exit_qla_mbx_cmd;
761 	}
762 
763 	for (i = 0; i < n_fwmbox; i++) {
764 		*fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
765 	}
766 
767 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
768 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
769 
770 exit_qla_mbx_cmd:
771 	return (ret);
772 }
773 
774 int
775 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
776 	uint32_t *num_rcvq)
777 {
778 	uint32_t *mbox, err;
779 	device_t dev = ha->pci_dev;
780 
781 	bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
782 
783 	mbox = ha->hw.mbox;
784 
785 	mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
786 
787 	if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
788 		device_printf(dev, "%s: failed0\n", __func__);
789 		return (-1);
790 	}
791 	err = mbox[0] >> 25;
792 
793 	if (supports_9kb != NULL) {
794 		if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
795 			*supports_9kb = 1;
796 		else
797 			*supports_9kb = 0;
798 	}
799 
800 	if (num_rcvq != NULL)
801 		*num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
802 
803 	if ((err != 1) && (err != 0)) {
804 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
805 		return (-1);
806 	}
807 	return 0;
808 }
809 
810 static int
811 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
812 	uint32_t create)
813 {
814 	uint32_t i, err;
815 	device_t dev = ha->pci_dev;
816 	q80_config_intr_t *c_intr;
817 	q80_config_intr_rsp_t *c_intr_rsp;
818 
819 	c_intr = (q80_config_intr_t *)ha->hw.mbox;
820 	bzero(c_intr, (sizeof (q80_config_intr_t)));
821 
822 	c_intr->opcode = Q8_MBX_CONFIG_INTR;
823 
824 	c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
825 	c_intr->count_version |= Q8_MBX_CMD_VERSION;
826 
827 	c_intr->nentries = num_intrs;
828 
829 	for (i = 0; i < num_intrs; i++) {
830 		if (create) {
831 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
832 			c_intr->intr[i].msix_index = start_idx + 1 + i;
833 		} else {
834 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
835 			c_intr->intr[i].msix_index =
836 				ha->hw.intr_id[(start_idx + i)];
837 		}
838 
839 		c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
840 	}
841 
842 	if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
843 		(sizeof (q80_config_intr_t) >> 2),
844 		ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
845 		device_printf(dev, "%s: failed0\n", __func__);
846 		return (-1);
847 	}
848 
849 	c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
850 
851 	err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
852 
853 	if (err) {
854 		device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
855 			c_intr_rsp->nentries);
856 
857 		for (i = 0; i < c_intr_rsp->nentries; i++) {
858 			device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
859 				__func__, i,
860 				c_intr_rsp->intr[i].status,
861 				c_intr_rsp->intr[i].intr_id,
862 				c_intr_rsp->intr[i].intr_src);
863 		}
864 
865 		return (-1);
866 	}
867 
868 	for (i = 0; ((i < num_intrs) && create); i++) {
869 		if (!c_intr_rsp->intr[i].status) {
870 			ha->hw.intr_id[(start_idx + i)] =
871 				c_intr_rsp->intr[i].intr_id;
872 			ha->hw.intr_src[(start_idx + i)] =
873 				c_intr_rsp->intr[i].intr_src;
874 		}
875 	}
876 
877 	return (0);
878 }
879 
880 /*
881  * Name: qla_config_rss
882  * Function: Configure RSS for the context/interface.
883  */
884 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
885 			0x8030f20c77cb2da3ULL,
886 			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
887 			0x255b0ec26d5a56daULL };
888 
889 static int
890 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
891 {
892 	q80_config_rss_t	*c_rss;
893 	q80_config_rss_rsp_t	*c_rss_rsp;
894 	uint32_t		err, i;
895 	device_t		dev = ha->pci_dev;
896 
897 	c_rss = (q80_config_rss_t *)ha->hw.mbox;
898 	bzero(c_rss, (sizeof (q80_config_rss_t)));
899 
900 	c_rss->opcode = Q8_MBX_CONFIG_RSS;
901 
902 	c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
903 	c_rss->count_version |= Q8_MBX_CMD_VERSION;
904 
905 	c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
906 				Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
907 	//c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
908 	//			Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
909 
910 	c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
911 	c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
912 
913 	c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
914 
915 	c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
916 	c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
917 
918 	c_rss->cntxt_id = cntxt_id;
919 
920 	for (i = 0; i < 5; i++) {
921 		c_rss->rss_key[i] = rss_key[i];
922 	}
923 
924 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
925 		(sizeof (q80_config_rss_t) >> 2),
926 		ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
927 		device_printf(dev, "%s: failed0\n", __func__);
928 		return (-1);
929 	}
930 	c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
931 
932 	err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
933 
934 	if (err) {
935 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
936 		return (-1);
937 	}
938 	return 0;
939 }
940 
941 static int
942 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
943         uint16_t cntxt_id, uint8_t *ind_table)
944 {
945         q80_config_rss_ind_table_t      *c_rss_ind;
946         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
947         uint32_t                        err;
948         device_t                        dev = ha->pci_dev;
949 
950 	if ((count > Q8_RSS_IND_TBL_SIZE) ||
951 		((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
952 		device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
953 			start_idx, count);
954 		return (-1);
955 	}
956 
957         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
958         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
959 
960         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
961         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
962         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
963 
964 	c_rss_ind->start_idx = start_idx;
965 	c_rss_ind->end_idx = start_idx + count - 1;
966 	c_rss_ind->cntxt_id = cntxt_id;
967 	bcopy(ind_table, c_rss_ind->ind_table, count);
968 
969 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
970 		(sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
971 		(sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
972 		device_printf(dev, "%s: failed0\n", __func__);
973 		return (-1);
974 	}
975 
976 	c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
977 	err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
978 
979 	if (err) {
980 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
981 		return (-1);
982 	}
983 	return 0;
984 }
985 
986 /*
987  * Name: qla_config_intr_coalesce
988  * Function: Configure Interrupt Coalescing.
989  */
990 static int
991 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
992 	int rcv)
993 {
994 	q80_config_intr_coalesc_t	*intrc;
995 	q80_config_intr_coalesc_rsp_t	*intrc_rsp;
996 	uint32_t			err, i;
997 	device_t			dev = ha->pci_dev;
998 
999 	intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1000 	bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1001 
1002 	intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1003 	intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1004 	intrc->count_version |= Q8_MBX_CMD_VERSION;
1005 
1006 	if (rcv) {
1007 		intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1008 		intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1009 		intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1010 	} else {
1011 		intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1012 		intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1013 		intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1014 	}
1015 
1016 	intrc->cntxt_id = cntxt_id;
1017 
1018 	if (tenable) {
1019 		intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1020 		intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1021 
1022 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1023 			intrc->sds_ring_mask |= (1 << i);
1024 		}
1025 		intrc->ms_timeout = 1000;
1026 	}
1027 
1028 	if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1029 		(sizeof (q80_config_intr_coalesc_t) >> 2),
1030 		ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1031 		device_printf(dev, "%s: failed0\n", __func__);
1032 		return (-1);
1033 	}
1034 	intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1035 
1036 	err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1037 
1038 	if (err) {
1039 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1040 		return (-1);
1041 	}
1042 
1043 	return 0;
1044 }
1045 
1046 
1047 /*
1048  * Name: qla_config_mac_addr
1049  * Function: binds a MAC address to the context/interface.
1050  *	Can be unicast, multicast or broadcast.
1051  */
1052 static int
1053 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
1054 {
1055 	q80_config_mac_addr_t		*cmac;
1056 	q80_config_mac_addr_rsp_t	*cmac_rsp;
1057 	uint32_t			err;
1058 	device_t			dev = ha->pci_dev;
1059 
1060 	cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1061 	bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1062 
1063 	cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1064 	cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1065 	cmac->count_version |= Q8_MBX_CMD_VERSION;
1066 
1067 	if (add_mac)
1068 		cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1069 	else
1070 		cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1071 
1072 	cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1073 
1074 	cmac->nmac_entries = 1;
1075 	cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1076 	bcopy(mac_addr, cmac->mac_addr[0].addr, 6);
1077 
1078 	if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1079 		(sizeof (q80_config_mac_addr_t) >> 2),
1080 		ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1081 		device_printf(dev, "%s: %s failed0\n", __func__,
1082 			(add_mac ? "Add" : "Del"));
1083 		return (-1);
1084 	}
1085 	cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1086 
1087 	err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1088 
1089 	if (err) {
1090 		device_printf(dev, "%s: %s "
1091 			"%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n",
1092 			__func__, (add_mac ? "Add" : "Del"),
1093 			mac_addr[0], mac_addr[1], mac_addr[2],
1094 			mac_addr[3], mac_addr[4], mac_addr[5], err);
1095 		return (-1);
1096 	}
1097 
1098 	return 0;
1099 }
1100 
1101 
1102 /*
1103  * Name: qla_set_mac_rcv_mode
1104  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1105  */
1106 static int
1107 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1108 {
1109 	q80_config_mac_rcv_mode_t	*rcv_mode;
1110 	uint32_t			err;
1111 	q80_config_mac_rcv_mode_rsp_t	*rcv_mode_rsp;
1112 	device_t			dev = ha->pci_dev;
1113 
1114 	rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1115 	bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1116 
1117 	rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1118 	rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1119 	rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1120 
1121 	rcv_mode->mode = mode;
1122 
1123 	rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1124 
1125 	if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1126 		(sizeof (q80_config_mac_rcv_mode_t) >> 2),
1127 		ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1128 		device_printf(dev, "%s: failed0\n", __func__);
1129 		return (-1);
1130 	}
1131 	rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1132 
1133 	err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1134 
1135 	if (err) {
1136 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1137 		return (-1);
1138 	}
1139 
1140 	return 0;
1141 }
1142 
1143 int
1144 ql_set_promisc(qla_host_t *ha)
1145 {
1146 	int ret;
1147 
1148 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1149 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1150 	return (ret);
1151 }
1152 
1153 void
1154 qla_reset_promisc(qla_host_t *ha)
1155 {
1156 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1157 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1158 }
1159 
1160 int
1161 ql_set_allmulti(qla_host_t *ha)
1162 {
1163 	int ret;
1164 
1165 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1166 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1167 	return (ret);
1168 }
1169 
1170 void
1171 qla_reset_allmulti(qla_host_t *ha)
1172 {
1173 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1174 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1175 }
1176 
1177 /*
1178  * Name: ql_set_max_mtu
1179  * Function:
1180  *	Sets the maximum transfer unit size for the specified rcv context.
1181  */
1182 int
1183 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1184 {
1185 	device_t		dev;
1186 	q80_set_max_mtu_t	*max_mtu;
1187 	q80_set_max_mtu_rsp_t	*max_mtu_rsp;
1188 	uint32_t		err;
1189 
1190 	dev = ha->pci_dev;
1191 
1192 	max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1193 	bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1194 
1195 	max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1196 	max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1197 	max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1198 
1199 	max_mtu->cntxt_id = cntxt_id;
1200 	max_mtu->mtu = mtu;
1201 
1202         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1203 		(sizeof (q80_set_max_mtu_t) >> 2),
1204                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1205                 device_printf(dev, "%s: failed\n", __func__);
1206                 return -1;
1207         }
1208 
1209 	max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1210 
1211         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1212 
1213         if (err) {
1214                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1215         }
1216 
1217 	return 0;
1218 }
1219 
1220 static int
1221 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1222 {
1223 	device_t		dev;
1224 	q80_link_event_t	*lnk;
1225 	q80_link_event_rsp_t	*lnk_rsp;
1226 	uint32_t		err;
1227 
1228 	dev = ha->pci_dev;
1229 
1230 	lnk = (q80_link_event_t *)ha->hw.mbox;
1231 	bzero(lnk, (sizeof (q80_link_event_t)));
1232 
1233 	lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1234 	lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1235 	lnk->count_version |= Q8_MBX_CMD_VERSION;
1236 
1237 	lnk->cntxt_id = cntxt_id;
1238 	lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1239 
1240         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1241                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1242                 device_printf(dev, "%s: failed\n", __func__);
1243                 return -1;
1244         }
1245 
1246 	lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1247 
1248         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1249 
1250         if (err) {
1251                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1252         }
1253 
1254 	return 0;
1255 }
1256 
1257 static int
1258 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1259 {
1260 	device_t		dev;
1261 	q80_config_fw_lro_t	*fw_lro;
1262 	q80_config_fw_lro_rsp_t	*fw_lro_rsp;
1263 	uint32_t		err;
1264 
1265 	dev = ha->pci_dev;
1266 
1267 	fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1268 	bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1269 
1270 	fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1271 	fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1272 	fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1273 
1274 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1275 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1276 
1277 	fw_lro->cntxt_id = cntxt_id;
1278 
1279 	if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1280 		(sizeof (q80_config_fw_lro_t) >> 2),
1281 		ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1282 		device_printf(dev, "%s: failed\n", __func__);
1283 		return -1;
1284 	}
1285 
1286 	fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1287 
1288 	err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1289 
1290 	if (err) {
1291 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 static void
1298 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1299 {
1300 	device_t dev = ha->pci_dev;
1301 
1302 	if (i < ha->hw.num_tx_rings) {
1303 		device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1304 			__func__, i, xstat->total_bytes);
1305 		device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1306 			__func__, i, xstat->total_pkts);
1307 		device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1308 			__func__, i, xstat->errors);
1309 		device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1310 			__func__, i, xstat->pkts_dropped);
1311 		device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1312 			__func__, i, xstat->switch_pkts);
1313 		device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1314 			__func__, i, xstat->num_buffers);
1315 	} else {
1316 		device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1317 			__func__, xstat->total_bytes);
1318 		device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1319 			__func__, xstat->total_pkts);
1320 		device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1321 			__func__, xstat->errors);
1322 		device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1323 			__func__, xstat->pkts_dropped);
1324 		device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1325 			__func__, xstat->switch_pkts);
1326 		device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1327 			__func__, xstat->num_buffers);
1328 	}
1329 }
1330 
1331 static void
1332 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1333 {
1334 	device_t dev = ha->pci_dev;
1335 
1336 	device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1337 		rstat->total_bytes);
1338 	device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1339 		rstat->total_pkts);
1340 	device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1341 		rstat->lro_pkt_count);
1342 	device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1343 		rstat->sw_pkt_count);
1344 	device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1345 		rstat->ip_chksum_err);
1346 	device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1347 		rstat->pkts_wo_acntxts);
1348 	device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1349 		__func__, rstat->pkts_dropped_no_sds_card);
1350 	device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1351 		__func__, rstat->pkts_dropped_no_sds_host);
1352 	device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1353 		rstat->oversized_pkts);
1354 	device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1355 		__func__, rstat->pkts_dropped_no_rds);
1356 	device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1357 		__func__, rstat->unxpctd_mcast_pkts);
1358 	device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1359 		rstat->re1_fbq_error);
1360 	device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1361 		rstat->invalid_mac_addr);
1362 	device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1363 		rstat->rds_prime_trys);
1364 	device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1365 		rstat->rds_prime_success);
1366 	device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1367 		rstat->lro_flows_added);
1368 	device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1369 		rstat->lro_flows_deleted);
1370 	device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1371 		rstat->lro_flows_active);
1372 	device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1373 		__func__, rstat->pkts_droped_unknown);
1374 }
1375 
1376 static void
1377 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1378 {
1379 	device_t dev = ha->pci_dev;
1380 
1381 	device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1382 		mstat->xmt_frames);
1383 	device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1384 		mstat->xmt_bytes);
1385 	device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1386 		mstat->xmt_mcast_pkts);
1387 	device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1388 		mstat->xmt_bcast_pkts);
1389 	device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1390 		mstat->xmt_pause_frames);
1391 	device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1392 		mstat->xmt_cntrl_pkts);
1393 	device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1394 		__func__, mstat->xmt_pkt_lt_64bytes);
1395 	device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1396 		__func__, mstat->xmt_pkt_lt_127bytes);
1397 	device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1398 		__func__, mstat->xmt_pkt_lt_255bytes);
1399 	device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1400 		__func__, mstat->xmt_pkt_lt_511bytes);
1401 	device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1402 		__func__, mstat->xmt_pkt_lt_1023bytes);
1403 	device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1404 		__func__, mstat->xmt_pkt_lt_1518bytes);
1405 	device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1406 		__func__, mstat->xmt_pkt_gt_1518bytes);
1407 
1408 	device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1409 		mstat->rcv_frames);
1410 	device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1411 		mstat->rcv_bytes);
1412 	device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1413 		mstat->rcv_mcast_pkts);
1414 	device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1415 		mstat->rcv_bcast_pkts);
1416 	device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1417 		mstat->rcv_pause_frames);
1418 	device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1419 		mstat->rcv_cntrl_pkts);
1420 	device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1421 		__func__, mstat->rcv_pkt_lt_64bytes);
1422 	device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1423 		__func__, mstat->rcv_pkt_lt_127bytes);
1424 	device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1425 		__func__, mstat->rcv_pkt_lt_255bytes);
1426 	device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1427 		__func__, mstat->rcv_pkt_lt_511bytes);
1428 	device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1429 		__func__, mstat->rcv_pkt_lt_1023bytes);
1430 	device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1431 		__func__, mstat->rcv_pkt_lt_1518bytes);
1432 	device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1433 		__func__, mstat->rcv_pkt_gt_1518bytes);
1434 
1435 	device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1436 		mstat->rcv_len_error);
1437 	device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1438 		mstat->rcv_len_small);
1439 	device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1440 		mstat->rcv_len_large);
1441 	device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1442 		mstat->rcv_jabber);
1443 	device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1444 		mstat->rcv_dropped);
1445 	device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1446 		mstat->fcs_error);
1447 	device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1448 		mstat->align_error);
1449 }
1450 
1451 
1452 static int
1453 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1454 {
1455 	device_t		dev;
1456 	q80_get_stats_t		*stat;
1457 	q80_get_stats_rsp_t	*stat_rsp;
1458 	uint32_t		err;
1459 
1460 	dev = ha->pci_dev;
1461 
1462 	stat = (q80_get_stats_t *)ha->hw.mbox;
1463 	bzero(stat, (sizeof (q80_get_stats_t)));
1464 
1465 	stat->opcode = Q8_MBX_GET_STATS;
1466 	stat->count_version = 2;
1467 	stat->count_version |= Q8_MBX_CMD_VERSION;
1468 
1469 	stat->cmd = cmd;
1470 
1471         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1472                 ha->hw.mbox, (rsp_size >> 2), 0)) {
1473                 device_printf(dev, "%s: failed\n", __func__);
1474                 return -1;
1475         }
1476 
1477 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1478 
1479         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1480 
1481         if (err) {
1482                 return -1;
1483         }
1484 
1485 	return 0;
1486 }
1487 
1488 void
1489 ql_get_stats(qla_host_t *ha)
1490 {
1491 	q80_get_stats_rsp_t	*stat_rsp;
1492 	q80_mac_stats_t		*mstat;
1493 	q80_xmt_stats_t		*xstat;
1494 	q80_rcv_stats_t		*rstat;
1495 	uint32_t		cmd;
1496 	int			i;
1497 
1498 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1499 	/*
1500 	 * Get MAC Statistics
1501 	 */
1502 	cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1503 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
1504 
1505 	cmd |= ((ha->pci_func & 0x1) << 16);
1506 
1507 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1508 		mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1509 		qla_mac_stats(ha, mstat);
1510 	} else {
1511                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1512 			__func__, ha->hw.mbox[0]);
1513 	}
1514 	/*
1515 	 * Get RCV Statistics
1516 	 */
1517 	cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1518 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
1519 	cmd |= (ha->hw.rcv_cntxt_id << 16);
1520 
1521 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1522 		rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1523 		qla_rcv_stats(ha, rstat);
1524 	} else {
1525                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1526 			__func__, ha->hw.mbox[0]);
1527 	}
1528 	/*
1529 	 * Get XMT Statistics
1530 	 */
1531 	for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1532 		cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1533 //		cmd |= Q8_GET_STATS_CMD_CLEAR;
1534 		cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1535 
1536 		if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1537 			== 0) {
1538 			xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1539 			qla_xmt_stats(ha, xstat, i);
1540 		} else {
1541 			device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1542 				__func__, ha->hw.mbox[0]);
1543 		}
1544 	}
1545 	return;
1546 }
1547 
1548 static void
1549 qla_get_quick_stats(qla_host_t *ha)
1550 {
1551 	q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1552 	q80_mac_stats_t         *mstat;
1553 	q80_xmt_stats_t         *xstat;
1554 	q80_rcv_stats_t         *rstat;
1555 	uint32_t                cmd;
1556 
1557 	stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1558 
1559 	cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1560 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1561 
1562 //      cmd |= ((ha->pci_func & 0x3) << 16);
1563 	cmd |= (0xFFFF << 16);
1564 
1565 	if (qla_get_hw_stats(ha, cmd,
1566 			sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1567 
1568 		mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1569 		rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1570 		xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1571 		qla_mac_stats(ha, mstat);
1572 		qla_rcv_stats(ha, rstat);
1573 		qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1574 	} else {
1575 		device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1576 			__func__, ha->hw.mbox[0]);
1577 	}
1578 	return;
1579 }
1580 
1581 /*
1582  * Name: qla_tx_tso
1583  * Function: Checks if the packet to be transmitted is a candidate for
1584  *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1585  *	Ring Structure are plugged in.
1586  */
1587 static int
1588 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1589 {
1590 	struct ether_vlan_header *eh;
1591 	struct ip *ip = NULL;
1592 	struct ip6_hdr *ip6 = NULL;
1593 	struct tcphdr *th = NULL;
1594 	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1595 	uint16_t etype, opcode, offload = 1;
1596 	device_t dev;
1597 
1598 	dev = ha->pci_dev;
1599 
1600 
1601 	eh = mtod(mp, struct ether_vlan_header *);
1602 
1603 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1604 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1605 		etype = ntohs(eh->evl_proto);
1606 	} else {
1607 		ehdrlen = ETHER_HDR_LEN;
1608 		etype = ntohs(eh->evl_encap_proto);
1609 	}
1610 
1611 	hdrlen = 0;
1612 
1613 	switch (etype) {
1614 		case ETHERTYPE_IP:
1615 
1616 			tcp_opt_off = ehdrlen + sizeof(struct ip) +
1617 					sizeof(struct tcphdr);
1618 
1619 			if (mp->m_len < tcp_opt_off) {
1620 				m_copydata(mp, 0, tcp_opt_off, hdr);
1621 				ip = (struct ip *)(hdr + ehdrlen);
1622 			} else {
1623 				ip = (struct ip *)(mp->m_data + ehdrlen);
1624 			}
1625 
1626 			ip_hlen = ip->ip_hl << 2;
1627 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1628 
1629 
1630 			if ((ip->ip_p != IPPROTO_TCP) ||
1631 				(ip_hlen != sizeof (struct ip))){
1632 				/* IP Options are not supported */
1633 
1634 				offload = 0;
1635 			} else
1636 				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1637 
1638 		break;
1639 
1640 		case ETHERTYPE_IPV6:
1641 
1642 			tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1643 					sizeof (struct tcphdr);
1644 
1645 			if (mp->m_len < tcp_opt_off) {
1646 				m_copydata(mp, 0, tcp_opt_off, hdr);
1647 				ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1648 			} else {
1649 				ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1650 			}
1651 
1652 			ip_hlen = sizeof(struct ip6_hdr);
1653 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1654 
1655 			if (ip6->ip6_nxt != IPPROTO_TCP) {
1656 				//device_printf(dev, "%s: ipv6\n", __func__);
1657 				offload = 0;
1658 			} else
1659 				th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1660 		break;
1661 
1662 		default:
1663 			QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1664 			offload = 0;
1665 		break;
1666 	}
1667 
1668 	if (!offload)
1669 		return (-1);
1670 
1671 	tcp_hlen = th->th_off << 2;
1672 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1673 
1674         if (mp->m_len < hdrlen) {
1675                 if (mp->m_len < tcp_opt_off) {
1676                         if (tcp_hlen > sizeof(struct tcphdr)) {
1677                                 m_copydata(mp, tcp_opt_off,
1678                                         (tcp_hlen - sizeof(struct tcphdr)),
1679                                         &hdr[tcp_opt_off]);
1680                         }
1681                 } else {
1682                         m_copydata(mp, 0, hdrlen, hdr);
1683                 }
1684         }
1685 
1686 	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1687 
1688 	tx_cmd->flags_opcode = opcode ;
1689 	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1690 	tx_cmd->total_hdr_len = hdrlen;
1691 
1692 	/* Check for Multicast least significant bit of MSB == 1 */
1693 	if (eh->evl_dhost[0] & 0x01) {
1694 		tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1695 	}
1696 
1697 	if (mp->m_len < hdrlen) {
1698 		printf("%d\n", hdrlen);
1699 		return (1);
1700 	}
1701 
1702 	return (0);
1703 }
1704 
1705 /*
1706  * Name: qla_tx_chksum
1707  * Function: Checks if the packet to be transmitted is a candidate for
1708  *	TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1709  *	Ring Structure are plugged in.
1710  */
1711 static int
1712 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1713 	uint32_t *tcp_hdr_off)
1714 {
1715 	struct ether_vlan_header *eh;
1716 	struct ip *ip;
1717 	struct ip6_hdr *ip6;
1718 	uint32_t ehdrlen, ip_hlen;
1719 	uint16_t etype, opcode, offload = 1;
1720 	device_t dev;
1721 	uint8_t buf[sizeof(struct ip6_hdr)];
1722 
1723 	dev = ha->pci_dev;
1724 
1725 	*op_code = 0;
1726 
1727 	if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1728 		return (-1);
1729 
1730 	eh = mtod(mp, struct ether_vlan_header *);
1731 
1732 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1733 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1734 		etype = ntohs(eh->evl_proto);
1735 	} else {
1736 		ehdrlen = ETHER_HDR_LEN;
1737 		etype = ntohs(eh->evl_encap_proto);
1738 	}
1739 
1740 
1741 	switch (etype) {
1742 		case ETHERTYPE_IP:
1743 			ip = (struct ip *)(mp->m_data + ehdrlen);
1744 
1745 			ip_hlen = sizeof (struct ip);
1746 
1747 			if (mp->m_len < (ehdrlen + ip_hlen)) {
1748 				m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1749 				ip = (struct ip *)buf;
1750 			}
1751 
1752 			if (ip->ip_p == IPPROTO_TCP)
1753 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1754 			else if (ip->ip_p == IPPROTO_UDP)
1755 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1756 			else {
1757 				//device_printf(dev, "%s: ipv4\n", __func__);
1758 				offload = 0;
1759 			}
1760 		break;
1761 
1762 		case ETHERTYPE_IPV6:
1763 			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1764 
1765 			ip_hlen = sizeof(struct ip6_hdr);
1766 
1767 			if (mp->m_len < (ehdrlen + ip_hlen)) {
1768 				m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1769 					buf);
1770 				ip6 = (struct ip6_hdr *)buf;
1771 			}
1772 
1773 			if (ip6->ip6_nxt == IPPROTO_TCP)
1774 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1775 			else if (ip6->ip6_nxt == IPPROTO_UDP)
1776 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1777 			else {
1778 				//device_printf(dev, "%s: ipv6\n", __func__);
1779 				offload = 0;
1780 			}
1781 		break;
1782 
1783 		default:
1784 			offload = 0;
1785 		break;
1786 	}
1787 	if (!offload)
1788 		return (-1);
1789 
1790 	*op_code = opcode;
1791 	*tcp_hdr_off = (ip_hlen + ehdrlen);
1792 
1793 	return (0);
1794 }
1795 
1796 #define QLA_TX_MIN_FREE 2
1797 /*
1798  * Name: ql_hw_send
1799  * Function: Transmits a packet. It first checks if the packet is a
1800  *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1801  *	offload. If either of these creteria are not met, it is transmitted
1802  *	as a regular ethernet frame.
1803  */
1804 int
1805 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1806 	uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1807 {
1808 	struct ether_vlan_header *eh;
1809 	qla_hw_t *hw = &ha->hw;
1810 	q80_tx_cmd_t *tx_cmd, tso_cmd;
1811 	bus_dma_segment_t *c_seg;
1812 	uint32_t num_tx_cmds, hdr_len = 0;
1813 	uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1814 	device_t dev;
1815 	int i, ret;
1816 	uint8_t *src = NULL, *dst = NULL;
1817 	uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1818 	uint32_t op_code = 0;
1819 	uint32_t tcp_hdr_off = 0;
1820 
1821 	dev = ha->pci_dev;
1822 
1823 	/*
1824 	 * Always make sure there is atleast one empty slot in the tx_ring
1825 	 * tx_ring is considered full when there only one entry available
1826 	 */
1827         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
1828 
1829 	total_length = mp->m_pkthdr.len;
1830 	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
1831 		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
1832 			__func__, total_length);
1833 		return (-1);
1834 	}
1835 	eh = mtod(mp, struct ether_vlan_header *);
1836 
1837 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1838 
1839 		bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
1840 
1841 		src = frame_hdr;
1842 		ret = qla_tx_tso(ha, mp, &tso_cmd, src);
1843 
1844 		if (!(ret & ~1)) {
1845 			/* find the additional tx_cmd descriptors required */
1846 
1847 			if (mp->m_flags & M_VLANTAG)
1848 				tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
1849 
1850 			hdr_len = tso_cmd.total_hdr_len;
1851 
1852 			bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1853 			bytes = QL_MIN(bytes, hdr_len);
1854 
1855 			num_tx_cmds++;
1856 			hdr_len -= bytes;
1857 
1858 			while (hdr_len) {
1859 				bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1860 				hdr_len -= bytes;
1861 				num_tx_cmds++;
1862 			}
1863 			hdr_len = tso_cmd.total_hdr_len;
1864 
1865 			if (ret == 0)
1866 				src = (uint8_t *)eh;
1867 		} else
1868 			return (EINVAL);
1869 	} else {
1870 		(void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
1871 	}
1872 
1873 	if (iscsi_pdu)
1874 		ha->hw.iscsi_pkt_count++;
1875 
1876 	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
1877 		qla_hw_tx_done_locked(ha, txr_idx);
1878 		if (hw->tx_cntxt[txr_idx].txr_free <=
1879 				(num_tx_cmds + QLA_TX_MIN_FREE)) {
1880         		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
1881 				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
1882 				__func__));
1883 			return (-1);
1884 		}
1885 	}
1886 
1887 	tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
1888 
1889         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
1890 
1891                 if (nsegs > ha->hw.max_tx_segs)
1892                         ha->hw.max_tx_segs = nsegs;
1893 
1894                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1895 
1896                 if (op_code) {
1897                         tx_cmd->flags_opcode = op_code;
1898                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
1899 
1900                 } else {
1901                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
1902                 }
1903 	} else {
1904 		bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
1905 		ha->tx_tso_frames++;
1906 	}
1907 
1908 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1909         	tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
1910 
1911 		if (iscsi_pdu)
1912 			eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
1913 
1914 	} else if (mp->m_flags & M_VLANTAG) {
1915 
1916 		if (hdr_len) { /* TSO */
1917 			tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
1918 						Q8_TX_CMD_FLAGS_HW_VLAN_ID);
1919 			tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
1920 		} else
1921 			tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
1922 
1923 		ha->hw_vlan_tx_frames++;
1924 		tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
1925 
1926 		if (iscsi_pdu) {
1927 			tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
1928 			mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
1929 		}
1930 	}
1931 
1932 
1933         tx_cmd->n_bufs = (uint8_t)nsegs;
1934         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
1935         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
1936 	tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
1937 
1938 	c_seg = segs;
1939 
1940 	while (1) {
1941 		for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
1942 
1943 			switch (i) {
1944 			case 0:
1945 				tx_cmd->buf1_addr = c_seg->ds_addr;
1946 				tx_cmd->buf1_len = c_seg->ds_len;
1947 				break;
1948 
1949 			case 1:
1950 				tx_cmd->buf2_addr = c_seg->ds_addr;
1951 				tx_cmd->buf2_len = c_seg->ds_len;
1952 				break;
1953 
1954 			case 2:
1955 				tx_cmd->buf3_addr = c_seg->ds_addr;
1956 				tx_cmd->buf3_len = c_seg->ds_len;
1957 				break;
1958 
1959 			case 3:
1960 				tx_cmd->buf4_addr = c_seg->ds_addr;
1961 				tx_cmd->buf4_len = c_seg->ds_len;
1962 				break;
1963 			}
1964 
1965 			c_seg++;
1966 			nsegs--;
1967 		}
1968 
1969 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
1970 			(hw->tx_cntxt[txr_idx].txr_next + 1) &
1971 				(NUM_TX_DESCRIPTORS - 1);
1972 		tx_cmd_count++;
1973 
1974 		if (!nsegs)
1975 			break;
1976 
1977 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1978 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1979 	}
1980 
1981 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1982 
1983 		/* TSO : Copy the header in the following tx cmd descriptors */
1984 
1985 		txr_next = hw->tx_cntxt[txr_idx].txr_next;
1986 
1987 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1988 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1989 
1990 		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1991 		bytes = QL_MIN(bytes, hdr_len);
1992 
1993 		dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
1994 
1995 		if (mp->m_flags & M_VLANTAG) {
1996 			/* first copy the src/dst MAC addresses */
1997 			bcopy(src, dst, (ETHER_ADDR_LEN * 2));
1998 			dst += (ETHER_ADDR_LEN * 2);
1999 			src += (ETHER_ADDR_LEN * 2);
2000 
2001 			*((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2002 			dst += 2;
2003 			*((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2004 			dst += 2;
2005 
2006 			/* bytes left in src header */
2007 			hdr_len -= ((ETHER_ADDR_LEN * 2) +
2008 					ETHER_VLAN_ENCAP_LEN);
2009 
2010 			/* bytes left in TxCmd Entry */
2011 			bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2012 
2013 
2014 			bcopy(src, dst, bytes);
2015 			src += bytes;
2016 			hdr_len -= bytes;
2017 		} else {
2018 			bcopy(src, dst, bytes);
2019 			src += bytes;
2020 			hdr_len -= bytes;
2021 		}
2022 
2023 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2024 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2025 					(NUM_TX_DESCRIPTORS - 1);
2026 		tx_cmd_count++;
2027 
2028 		while (hdr_len) {
2029 			tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2030 			bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2031 
2032 			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2033 
2034 			bcopy(src, tx_cmd, bytes);
2035 			src += bytes;
2036 			hdr_len -= bytes;
2037 
2038 			txr_next = hw->tx_cntxt[txr_idx].txr_next =
2039 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2040 					(NUM_TX_DESCRIPTORS - 1);
2041 			tx_cmd_count++;
2042 		}
2043 	}
2044 
2045 	hw->tx_cntxt[txr_idx].txr_free =
2046 		hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2047 
2048 	QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2049 		txr_idx);
2050        	QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2051 
2052 	return (0);
2053 }
2054 
2055 
2056 
2057 #define Q8_CONFIG_IND_TBL_SIZE	32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2058 static int
2059 qla_config_rss_ind_table(qla_host_t *ha)
2060 {
2061 	uint32_t i, count;
2062 	uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2063 
2064 
2065 	for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2066 		rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2067 	}
2068 
2069 	for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2070 		i = i + Q8_CONFIG_IND_TBL_SIZE) {
2071 
2072 		if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2073 			count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2074 		} else {
2075 			count = Q8_CONFIG_IND_TBL_SIZE;
2076 		}
2077 
2078 		if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2079 			rss_ind_tbl))
2080 			return (-1);
2081 	}
2082 
2083 	return (0);
2084 }
2085 
2086 /*
2087  * Name: ql_del_hw_if
2088  * Function: Destroys the hardware specific entities corresponding to an
2089  *	Ethernet Interface
2090  */
2091 void
2092 ql_del_hw_if(qla_host_t *ha)
2093 {
2094 	uint32_t i;
2095 	uint32_t num_msix;
2096 
2097 	(void)qla_stop_nic_func(ha);
2098 
2099 	qla_del_rcv_cntxt(ha);
2100 	qla_del_xmt_cntxt(ha);
2101 
2102 	if (ha->hw.flags.init_intr_cnxt) {
2103 		for (i = 0; i < ha->hw.num_sds_rings; ) {
2104 
2105 			if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2106 				num_msix = Q8_MAX_INTR_VECTORS;
2107 			else
2108 				num_msix = ha->hw.num_sds_rings - i;
2109 			qla_config_intr_cntxt(ha, i, num_msix, 0);
2110 
2111 			i += num_msix;
2112 		}
2113 
2114 		ha->hw.flags.init_intr_cnxt = 0;
2115 	}
2116 	return;
2117 }
2118 
2119 void
2120 qla_confirm_9kb_enable(qla_host_t *ha)
2121 {
2122 	uint32_t supports_9kb = 0;
2123 
2124 	ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2125 
2126 	/* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2127 	WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2128 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2129 
2130 	qla_get_nic_partition(ha, &supports_9kb, NULL);
2131 
2132 	if (!supports_9kb)
2133 		ha->hw.enable_9kb = 0;
2134 
2135 	return;
2136 }
2137 
2138 
2139 /*
2140  * Name: ql_init_hw_if
2141  * Function: Creates the hardware specific entities corresponding to an
2142  *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2143  *	corresponding to the interface. Enables LRO if allowed.
2144  */
2145 int
2146 ql_init_hw_if(qla_host_t *ha)
2147 {
2148 	device_t	dev;
2149 	uint32_t	i;
2150 	uint8_t		bcast_mac[6];
2151 	qla_rdesc_t	*rdesc;
2152 	uint32_t	num_msix;
2153 
2154 	dev = ha->pci_dev;
2155 
2156 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
2157 		bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2158 			ha->hw.dma_buf.sds_ring[i].size);
2159 	}
2160 
2161 	for (i = 0; i < ha->hw.num_sds_rings; ) {
2162 
2163 		if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2164 			num_msix = Q8_MAX_INTR_VECTORS;
2165 		else
2166 			num_msix = ha->hw.num_sds_rings - i;
2167 
2168 		if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2169 
2170 			if (i > 0) {
2171 
2172 				num_msix = i;
2173 
2174 				for (i = 0; i < num_msix; ) {
2175 					qla_config_intr_cntxt(ha, i,
2176 						Q8_MAX_INTR_VECTORS, 0);
2177 					i += Q8_MAX_INTR_VECTORS;
2178 				}
2179 			}
2180 			return (-1);
2181 		}
2182 
2183 		i = i + num_msix;
2184 	}
2185 
2186         ha->hw.flags.init_intr_cnxt = 1;
2187 
2188 	if (ha->hw.mdump_init == 0) {
2189 		qla_minidump_init(ha);
2190 	}
2191 
2192 	/*
2193 	 * Create Receive Context
2194 	 */
2195 	if (qla_init_rcv_cntxt(ha)) {
2196 		return (-1);
2197 	}
2198 
2199 	for (i = 0; i < ha->hw.num_rds_rings; i++) {
2200 		rdesc = &ha->hw.rds[i];
2201 		rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2202 		rdesc->rx_in = 0;
2203 		/* Update the RDS Producer Indices */
2204 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2205 			rdesc->rx_next);
2206 	}
2207 
2208 
2209 	/*
2210 	 * Create Transmit Context
2211 	 */
2212 	if (qla_init_xmt_cntxt(ha)) {
2213 		qla_del_rcv_cntxt(ha);
2214 		return (-1);
2215 	}
2216 	ha->hw.max_tx_segs = 0;
2217 
2218 	if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1))
2219 		return(-1);
2220 
2221 	ha->hw.flags.unicast_mac = 1;
2222 
2223 	bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2224 	bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2225 
2226 	if (qla_config_mac_addr(ha, bcast_mac, 1))
2227 		return (-1);
2228 
2229 	ha->hw.flags.bcast_mac = 1;
2230 
2231 	/*
2232 	 * program any cached multicast addresses
2233 	 */
2234 	if (qla_hw_add_all_mcast(ha))
2235 		return (-1);
2236 
2237 	if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2238 		return (-1);
2239 
2240 	if (qla_config_rss_ind_table(ha))
2241 		return (-1);
2242 
2243 	if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2244 		return (-1);
2245 
2246 	if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2247 		return (-1);
2248 
2249 	if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2250 		return (-1);
2251 
2252         if (qla_init_nic_func(ha))
2253                 return (-1);
2254 
2255         if (qla_query_fw_dcbx_caps(ha))
2256                 return (-1);
2257 
2258 	for (i = 0; i < ha->hw.num_sds_rings; i++)
2259 		QL_ENABLE_INTERRUPTS(ha, i);
2260 
2261 	return (0);
2262 }
2263 
2264 static int
2265 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2266 {
2267         device_t                dev = ha->pci_dev;
2268         q80_rq_map_sds_to_rds_t *map_rings;
2269 	q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2270         uint32_t                i, err;
2271         qla_hw_t                *hw = &ha->hw;
2272 
2273         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2274         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2275 
2276         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2277         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2278         map_rings->count_version |= Q8_MBX_CMD_VERSION;
2279 
2280         map_rings->cntxt_id = hw->rcv_cntxt_id;
2281         map_rings->num_rings = num_idx;
2282 
2283 	for (i = 0; i < num_idx; i++) {
2284 		map_rings->sds_rds[i].sds_ring = i + start_idx;
2285 		map_rings->sds_rds[i].rds_ring = i + start_idx;
2286 	}
2287 
2288         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2289                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2290                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2291                 device_printf(dev, "%s: failed0\n", __func__);
2292                 return (-1);
2293         }
2294 
2295         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2296 
2297         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2298 
2299         if (err) {
2300                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2301                 return (-1);
2302         }
2303 
2304         return (0);
2305 }
2306 
2307 /*
2308  * Name: qla_init_rcv_cntxt
2309  * Function: Creates the Receive Context.
2310  */
2311 static int
2312 qla_init_rcv_cntxt(qla_host_t *ha)
2313 {
2314 	q80_rq_rcv_cntxt_t	*rcntxt;
2315 	q80_rsp_rcv_cntxt_t	*rcntxt_rsp;
2316 	q80_stat_desc_t		*sdesc;
2317 	int			i, j;
2318         qla_hw_t		*hw = &ha->hw;
2319 	device_t		dev;
2320 	uint32_t		err;
2321 	uint32_t		rcntxt_sds_rings;
2322 	uint32_t		rcntxt_rds_rings;
2323 	uint32_t		max_idx;
2324 
2325 	dev = ha->pci_dev;
2326 
2327 	/*
2328 	 * Create Receive Context
2329 	 */
2330 
2331 	for (i = 0; i < hw->num_sds_rings; i++) {
2332 		sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2333 
2334 		for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2335 			sdesc->data[0] = 1ULL;
2336 			sdesc->data[1] = 1ULL;
2337 		}
2338 	}
2339 
2340 	rcntxt_sds_rings = hw->num_sds_rings;
2341 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2342 		rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2343 
2344 	rcntxt_rds_rings = hw->num_rds_rings;
2345 
2346 	if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2347 		rcntxt_rds_rings = MAX_RDS_RING_SETS;
2348 
2349 	rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2350 	bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2351 
2352 	rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2353 	rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2354 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2355 
2356 	rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2357 			Q8_RCV_CNTXT_CAP0_LRO |
2358 			Q8_RCV_CNTXT_CAP0_HW_LRO |
2359 			Q8_RCV_CNTXT_CAP0_RSS |
2360 			Q8_RCV_CNTXT_CAP0_SGL_LRO;
2361 
2362 	if (ha->hw.enable_9kb)
2363 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2364 	else
2365 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2366 
2367 	if (ha->hw.num_rds_rings > 1) {
2368 		rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2369 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2370 	} else
2371 		rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2372 
2373 	rcntxt->nsds_rings = rcntxt_sds_rings;
2374 
2375 	rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2376 
2377 	rcntxt->rcv_vpid = 0;
2378 
2379 	for (i = 0; i <  rcntxt_sds_rings; i++) {
2380 		rcntxt->sds[i].paddr =
2381 			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2382 		rcntxt->sds[i].size =
2383 			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2384 		if (ha->msix_count == 2) {
2385 			rcntxt->sds[i].intr_id =
2386 				qla_host_to_le16(hw->intr_id[0]);
2387 			rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2388 		} else {
2389 			rcntxt->sds[i].intr_id =
2390 				qla_host_to_le16(hw->intr_id[i]);
2391 			rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2392 		}
2393 	}
2394 
2395 	for (i = 0; i <  rcntxt_rds_rings; i++) {
2396 		rcntxt->rds[i].paddr_std =
2397 			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2398 
2399 		if (ha->hw.enable_9kb)
2400 			rcntxt->rds[i].std_bsize =
2401 				qla_host_to_le64(MJUM9BYTES);
2402 		else
2403 			rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2404 
2405 		rcntxt->rds[i].std_nentries =
2406 			qla_host_to_le32(NUM_RX_DESCRIPTORS);
2407 	}
2408 
2409         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2410 		(sizeof (q80_rq_rcv_cntxt_t) >> 2),
2411                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2412                 device_printf(dev, "%s: failed0\n", __func__);
2413                 return (-1);
2414         }
2415 
2416         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2417 
2418         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2419 
2420         if (err) {
2421                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2422                 return (-1);
2423         }
2424 
2425 	for (i = 0; i <  rcntxt_sds_rings; i++) {
2426 		hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2427 	}
2428 
2429 	for (i = 0; i <  rcntxt_rds_rings; i++) {
2430 		hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2431 	}
2432 
2433 	hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2434 
2435 	ha->hw.flags.init_rx_cnxt = 1;
2436 
2437 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2438 
2439 		for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2440 
2441 			if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2442 				max_idx = MAX_RCNTXT_SDS_RINGS;
2443 			else
2444 				max_idx = hw->num_sds_rings - i;
2445 
2446 			err = qla_add_rcv_rings(ha, i, max_idx);
2447 			if (err)
2448 				return -1;
2449 
2450 			i += max_idx;
2451 		}
2452 	}
2453 
2454 	if (hw->num_rds_rings > 1) {
2455 
2456 		for (i = 0; i < hw->num_rds_rings; ) {
2457 
2458 			if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2459 				max_idx = MAX_SDS_TO_RDS_MAP;
2460 			else
2461 				max_idx = hw->num_rds_rings - i;
2462 
2463 			err = qla_map_sds_to_rds(ha, i, max_idx);
2464 			if (err)
2465 				return -1;
2466 
2467 			i += max_idx;
2468 		}
2469 	}
2470 
2471 	return (0);
2472 }
2473 
2474 static int
2475 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2476 {
2477 	device_t		dev = ha->pci_dev;
2478 	q80_rq_add_rcv_rings_t	*add_rcv;
2479 	q80_rsp_add_rcv_rings_t	*add_rcv_rsp;
2480 	uint32_t		i,j, err;
2481         qla_hw_t		*hw = &ha->hw;
2482 
2483 	add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2484 	bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2485 
2486 	add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2487 	add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2488 	add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2489 
2490 	add_rcv->nrds_sets_rings = nsds | (1 << 5);
2491 	add_rcv->nsds_rings = nsds;
2492 	add_rcv->cntxt_id = hw->rcv_cntxt_id;
2493 
2494         for (i = 0; i <  nsds; i++) {
2495 
2496 		j = i + sds_idx;
2497 
2498                 add_rcv->sds[i].paddr =
2499                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2500 
2501                 add_rcv->sds[i].size =
2502                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2503 
2504                 if (ha->msix_count == 2) {
2505                         add_rcv->sds[i].intr_id =
2506                                 qla_host_to_le16(hw->intr_id[0]);
2507                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2508                 } else {
2509                         add_rcv->sds[i].intr_id =
2510                                 qla_host_to_le16(hw->intr_id[j]);
2511                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2512                 }
2513 
2514         }
2515         for (i = 0; (i <  nsds); i++) {
2516                 j = i + sds_idx;
2517 
2518                 add_rcv->rds[i].paddr_std =
2519                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2520 
2521 		if (ha->hw.enable_9kb)
2522 			add_rcv->rds[i].std_bsize =
2523 				qla_host_to_le64(MJUM9BYTES);
2524 		else
2525                 	add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2526 
2527                 add_rcv->rds[i].std_nentries =
2528                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2529         }
2530 
2531 
2532         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2533 		(sizeof (q80_rq_add_rcv_rings_t) >> 2),
2534                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2535                 device_printf(dev, "%s: failed0\n", __func__);
2536                 return (-1);
2537         }
2538 
2539         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2540 
2541         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2542 
2543         if (err) {
2544                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2545                 return (-1);
2546         }
2547 
2548 	for (i = 0; i < nsds; i++) {
2549 		hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2550 	}
2551 
2552 	for (i = 0; i < nsds; i++) {
2553 		hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2554 	}
2555 
2556 	return (0);
2557 }
2558 
2559 /*
2560  * Name: qla_del_rcv_cntxt
2561  * Function: Destroys the Receive Context.
2562  */
2563 static void
2564 qla_del_rcv_cntxt(qla_host_t *ha)
2565 {
2566 	device_t			dev = ha->pci_dev;
2567 	q80_rcv_cntxt_destroy_t		*rcntxt;
2568 	q80_rcv_cntxt_destroy_rsp_t	*rcntxt_rsp;
2569 	uint32_t			err;
2570 	uint8_t				bcast_mac[6];
2571 
2572 	if (!ha->hw.flags.init_rx_cnxt)
2573 		return;
2574 
2575 	if (qla_hw_del_all_mcast(ha))
2576 		return;
2577 
2578 	if (ha->hw.flags.bcast_mac) {
2579 
2580 		bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2581 		bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2582 
2583 		if (qla_config_mac_addr(ha, bcast_mac, 0))
2584 			return;
2585 		ha->hw.flags.bcast_mac = 0;
2586 
2587 	}
2588 
2589 	if (ha->hw.flags.unicast_mac) {
2590 		if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0))
2591 			return;
2592 		ha->hw.flags.unicast_mac = 0;
2593 	}
2594 
2595 	rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2596 	bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2597 
2598 	rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2599 	rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2600 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2601 
2602 	rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2603 
2604         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2605 		(sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2606                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2607                 device_printf(dev, "%s: failed0\n", __func__);
2608                 return;
2609         }
2610         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2611 
2612         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2613 
2614         if (err) {
2615                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2616         }
2617 
2618 	ha->hw.flags.init_rx_cnxt = 0;
2619 	return;
2620 }
2621 
2622 /*
2623  * Name: qla_init_xmt_cntxt
2624  * Function: Creates the Transmit Context.
2625  */
2626 static int
2627 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2628 {
2629 	device_t		dev;
2630         qla_hw_t		*hw = &ha->hw;
2631 	q80_rq_tx_cntxt_t	*tcntxt;
2632 	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
2633 	uint32_t		err;
2634 	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2635 
2636 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2637 
2638 	dev = ha->pci_dev;
2639 
2640 	/*
2641 	 * Create Transmit Context
2642 	 */
2643 	tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2644 	bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2645 
2646 	tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2647 	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2648 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2649 
2650 #ifdef QL_ENABLE_ISCSI_TLV
2651 
2652 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2653 				Q8_TX_CNTXT_CAP0_TC;
2654 
2655 	if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2656 		tcntxt->traffic_class = 1;
2657 	}
2658 
2659 #else
2660 
2661 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2662 
2663 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2664 
2665 	tcntxt->ntx_rings = 1;
2666 
2667 	tcntxt->tx_ring[0].paddr =
2668 		qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2669 	tcntxt->tx_ring[0].tx_consumer =
2670 		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2671 	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2672 
2673 	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2674 	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2675 
2676 
2677 	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2678 	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2679 
2680         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2681 		(sizeof (q80_rq_tx_cntxt_t) >> 2),
2682                 ha->hw.mbox,
2683 		(sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2684                 device_printf(dev, "%s: failed0\n", __func__);
2685                 return (-1);
2686         }
2687         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2688 
2689         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2690 
2691         if (err) {
2692                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2693 		return -1;
2694         }
2695 
2696 	hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2697 	hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2698 
2699 	if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2700 		return (-1);
2701 
2702 	return (0);
2703 }
2704 
2705 
2706 /*
2707  * Name: qla_del_xmt_cntxt
2708  * Function: Destroys the Transmit Context.
2709  */
2710 static int
2711 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2712 {
2713 	device_t			dev = ha->pci_dev;
2714 	q80_tx_cntxt_destroy_t		*tcntxt;
2715 	q80_tx_cntxt_destroy_rsp_t	*tcntxt_rsp;
2716 	uint32_t			err;
2717 
2718 	tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2719 	bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2720 
2721 	tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2722 	tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2723 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2724 
2725 	tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2726 
2727         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2728 		(sizeof (q80_tx_cntxt_destroy_t) >> 2),
2729                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2730                 device_printf(dev, "%s: failed0\n", __func__);
2731                 return (-1);
2732         }
2733         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2734 
2735         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2736 
2737         if (err) {
2738                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2739 		return (-1);
2740         }
2741 
2742 	return (0);
2743 }
2744 static void
2745 qla_del_xmt_cntxt(qla_host_t *ha)
2746 {
2747 	uint32_t i;
2748 
2749 	if (!ha->hw.flags.init_tx_cnxt)
2750 		return;
2751 
2752 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2753 		if (qla_del_xmt_cntxt_i(ha, i))
2754 			break;
2755 	}
2756 	ha->hw.flags.init_tx_cnxt = 0;
2757 }
2758 
2759 static int
2760 qla_init_xmt_cntxt(qla_host_t *ha)
2761 {
2762 	uint32_t i, j;
2763 
2764 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2765 		if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2766 			for (j = 0; j < i; j++)
2767 				qla_del_xmt_cntxt_i(ha, j);
2768 			return (-1);
2769 		}
2770 	}
2771 	ha->hw.flags.init_tx_cnxt = 1;
2772 	return (0);
2773 }
2774 
2775 static int
2776 qla_hw_add_all_mcast(qla_host_t *ha)
2777 {
2778 	int i, nmcast;
2779 
2780 	nmcast = ha->hw.nmcast;
2781 
2782 	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2783 		if ((ha->hw.mcast[i].addr[0] != 0) ||
2784 			(ha->hw.mcast[i].addr[1] != 0) ||
2785 			(ha->hw.mcast[i].addr[2] != 0) ||
2786 			(ha->hw.mcast[i].addr[3] != 0) ||
2787 			(ha->hw.mcast[i].addr[4] != 0) ||
2788 			(ha->hw.mcast[i].addr[5] != 0)) {
2789 
2790 			if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) {
2791                 		device_printf(ha->pci_dev, "%s: failed\n",
2792 					__func__);
2793 				return (-1);
2794 			}
2795 
2796 			nmcast--;
2797 		}
2798 	}
2799 	return 0;
2800 }
2801 
2802 static int
2803 qla_hw_del_all_mcast(qla_host_t *ha)
2804 {
2805 	int i, nmcast;
2806 
2807 	nmcast = ha->hw.nmcast;
2808 
2809 	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2810 		if ((ha->hw.mcast[i].addr[0] != 0) ||
2811 			(ha->hw.mcast[i].addr[1] != 0) ||
2812 			(ha->hw.mcast[i].addr[2] != 0) ||
2813 			(ha->hw.mcast[i].addr[3] != 0) ||
2814 			(ha->hw.mcast[i].addr[4] != 0) ||
2815 			(ha->hw.mcast[i].addr[5] != 0)) {
2816 
2817 			if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0))
2818 				return (-1);
2819 
2820 			nmcast--;
2821 		}
2822 	}
2823 	return 0;
2824 }
2825 
2826 static int
2827 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
2828 {
2829 	int i;
2830 
2831 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2832 
2833 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
2834 			return 0; /* its been already added */
2835 	}
2836 
2837 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2838 
2839 		if ((ha->hw.mcast[i].addr[0] == 0) &&
2840 			(ha->hw.mcast[i].addr[1] == 0) &&
2841 			(ha->hw.mcast[i].addr[2] == 0) &&
2842 			(ha->hw.mcast[i].addr[3] == 0) &&
2843 			(ha->hw.mcast[i].addr[4] == 0) &&
2844 			(ha->hw.mcast[i].addr[5] == 0)) {
2845 
2846 			if (qla_config_mac_addr(ha, mta, 1))
2847 				return (-1);
2848 
2849 			bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
2850 			ha->hw.nmcast++;
2851 
2852 			return 0;
2853 		}
2854 	}
2855 	return 0;
2856 }
2857 
2858 static int
2859 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
2860 {
2861 	int i;
2862 
2863 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2864 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
2865 
2866 			if (qla_config_mac_addr(ha, mta, 0))
2867 				return (-1);
2868 
2869 			ha->hw.mcast[i].addr[0] = 0;
2870 			ha->hw.mcast[i].addr[1] = 0;
2871 			ha->hw.mcast[i].addr[2] = 0;
2872 			ha->hw.mcast[i].addr[3] = 0;
2873 			ha->hw.mcast[i].addr[4] = 0;
2874 			ha->hw.mcast[i].addr[5] = 0;
2875 
2876 			ha->hw.nmcast--;
2877 
2878 			return 0;
2879 		}
2880 	}
2881 	return 0;
2882 }
2883 
2884 /*
2885  * Name: ql_hw_set_multi
2886  * Function: Sets the Multicast Addresses provided the host O.S into the
2887  *	hardware (for the given interface)
2888  */
2889 int
2890 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt,
2891 	uint32_t add_mac)
2892 {
2893 	int i;
2894 	uint8_t *mta = mcast;
2895 	int ret = 0;
2896 
2897 	for (i = 0; i < mcnt; i++) {
2898 		if (add_mac) {
2899 			ret = qla_hw_add_mcast(ha, mta);
2900 			if (ret)
2901 				break;
2902 		} else {
2903 			ret = qla_hw_del_mcast(ha, mta);
2904 			if (ret)
2905 				break;
2906 		}
2907 
2908 		mta += Q8_MAC_ADDR_LEN;
2909 	}
2910 	return (ret);
2911 }
2912 
2913 /*
2914  * Name: qla_hw_tx_done_locked
2915  * Function: Handle Transmit Completions
2916  */
2917 static void
2918 qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
2919 {
2920 	qla_tx_buf_t *txb;
2921         qla_hw_t *hw = &ha->hw;
2922 	uint32_t comp_idx, comp_count = 0;
2923 	qla_hw_tx_cntxt_t *hw_tx_cntxt;
2924 
2925 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2926 
2927 	/* retrieve index of last entry in tx ring completed */
2928 	comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
2929 
2930 	while (comp_idx != hw_tx_cntxt->txr_comp) {
2931 
2932 		txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
2933 
2934 		hw_tx_cntxt->txr_comp++;
2935 		if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
2936 			hw_tx_cntxt->txr_comp = 0;
2937 
2938 		comp_count++;
2939 
2940 		if (txb->m_head) {
2941 			if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
2942 
2943 			bus_dmamap_sync(ha->tx_tag, txb->map,
2944 				BUS_DMASYNC_POSTWRITE);
2945 			bus_dmamap_unload(ha->tx_tag, txb->map);
2946 			m_freem(txb->m_head);
2947 
2948 			txb->m_head = NULL;
2949 		}
2950 	}
2951 
2952 	hw_tx_cntxt->txr_free += comp_count;
2953 	return;
2954 }
2955 
2956 /*
2957  * Name: ql_hw_tx_done
2958  * Function: Handle Transmit Completions
2959  */
2960 void
2961 ql_hw_tx_done(qla_host_t *ha)
2962 {
2963 	int i;
2964 	uint32_t flag = 0;
2965 
2966 	if (!mtx_trylock(&ha->tx_lock)) {
2967        		QL_DPRINT8(ha, (ha->pci_dev,
2968 			"%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
2969 		return;
2970 	}
2971 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2972 		qla_hw_tx_done_locked(ha, i);
2973 		if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
2974 			flag = 1;
2975 	}
2976 
2977 	if (!flag)
2978 		ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2979 
2980 	QLA_TX_UNLOCK(ha);
2981 	return;
2982 }
2983 
2984 void
2985 ql_update_link_state(qla_host_t *ha)
2986 {
2987 	uint32_t link_state;
2988 	uint32_t prev_link_state;
2989 
2990 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2991 		ha->hw.link_up = 0;
2992 		return;
2993 	}
2994 	link_state = READ_REG32(ha, Q8_LINK_STATE);
2995 
2996 	prev_link_state =  ha->hw.link_up;
2997 
2998 	if (ha->pci_func == 0)
2999 		ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3000 	else
3001 		ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3002 
3003 	if (prev_link_state !=  ha->hw.link_up) {
3004 		if (ha->hw.link_up) {
3005 			if_link_state_change(ha->ifp, LINK_STATE_UP);
3006 		} else {
3007 			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3008 		}
3009 	}
3010 	return;
3011 }
3012 
3013 void
3014 ql_hw_stop_rcv(qla_host_t *ha)
3015 {
3016 	int i, done, count = 100;
3017 
3018 	while (count) {
3019 		done = 1;
3020 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
3021 			if (ha->hw.sds[i].rcv_active)
3022 				done = 0;
3023 		}
3024 		if (done)
3025 			break;
3026 		else
3027 			qla_mdelay(__func__, 10);
3028 		count--;
3029 	}
3030 	if (!count)
3031 		device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3032 
3033 	return;
3034 }
3035 
3036 int
3037 ql_hw_check_health(qla_host_t *ha)
3038 {
3039 	uint32_t val;
3040 
3041 	ha->hw.health_count++;
3042 
3043 	if (ha->hw.health_count < 1000)
3044 		return 0;
3045 
3046 	ha->hw.health_count = 0;
3047 
3048 	val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3049 
3050 	if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3051 		(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3052 		device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3053 			__func__, val);
3054 		return -1;
3055 	}
3056 
3057 	val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3058 
3059 	if ((val != ha->hw.hbeat_value) &&
3060 		(!(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE)))) {
3061 		ha->hw.hbeat_value = val;
3062 		return 0;
3063 	}
3064 	device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3065 		__func__, val);
3066 
3067 	return -1;
3068 }
3069 
3070 static int
3071 qla_init_nic_func(qla_host_t *ha)
3072 {
3073         device_t                dev;
3074         q80_init_nic_func_t     *init_nic;
3075         q80_init_nic_func_rsp_t *init_nic_rsp;
3076         uint32_t                err;
3077 
3078         dev = ha->pci_dev;
3079 
3080         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3081         bzero(init_nic, sizeof(q80_init_nic_func_t));
3082 
3083         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3084         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3085         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3086 
3087         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3088         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3089         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3090 
3091 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3092         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3093                 (sizeof (q80_init_nic_func_t) >> 2),
3094                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3095                 device_printf(dev, "%s: failed\n", __func__);
3096                 return -1;
3097         }
3098 
3099         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3100 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3101 
3102         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3103 
3104         if (err) {
3105                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3106         }
3107 
3108         return 0;
3109 }
3110 
3111 static int
3112 qla_stop_nic_func(qla_host_t *ha)
3113 {
3114         device_t                dev;
3115         q80_stop_nic_func_t     *stop_nic;
3116         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3117         uint32_t                err;
3118 
3119         dev = ha->pci_dev;
3120 
3121         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3122         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3123 
3124         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3125         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3126         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3127 
3128         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3129         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3130 
3131 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3132         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3133                 (sizeof (q80_stop_nic_func_t) >> 2),
3134                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3135                 device_printf(dev, "%s: failed\n", __func__);
3136                 return -1;
3137         }
3138 
3139         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3140 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3141 
3142         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3143 
3144         if (err) {
3145                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3146         }
3147 
3148         return 0;
3149 }
3150 
3151 static int
3152 qla_query_fw_dcbx_caps(qla_host_t *ha)
3153 {
3154         device_t                        dev;
3155         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3156         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3157         uint32_t                        err;
3158 
3159         dev = ha->pci_dev;
3160 
3161         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3162         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3163 
3164         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3165         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3166         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3167 
3168         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3169         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3170                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3171                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3172                 device_printf(dev, "%s: failed\n", __func__);
3173                 return -1;
3174         }
3175 
3176         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3177         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3178                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3179 
3180         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3181 
3182         if (err) {
3183                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3184         }
3185 
3186         return 0;
3187 }
3188 
3189 static int
3190 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3191         uint32_t aen_mb3, uint32_t aen_mb4)
3192 {
3193         device_t                dev;
3194         q80_idc_ack_t           *idc_ack;
3195         q80_idc_ack_rsp_t       *idc_ack_rsp;
3196         uint32_t                err;
3197         int                     count = 300;
3198 
3199         dev = ha->pci_dev;
3200 
3201         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3202         bzero(idc_ack, sizeof(q80_idc_ack_t));
3203 
3204         idc_ack->opcode = Q8_MBX_IDC_ACK;
3205         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3206         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3207 
3208         idc_ack->aen_mb1 = aen_mb1;
3209         idc_ack->aen_mb2 = aen_mb2;
3210         idc_ack->aen_mb3 = aen_mb3;
3211         idc_ack->aen_mb4 = aen_mb4;
3212 
3213         ha->hw.imd_compl= 0;
3214 
3215         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3216                 (sizeof (q80_idc_ack_t) >> 2),
3217                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3218                 device_printf(dev, "%s: failed\n", __func__);
3219                 return -1;
3220         }
3221 
3222         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3223 
3224         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3225 
3226         if (err) {
3227                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3228                 return(-1);
3229         }
3230 
3231         while (count && !ha->hw.imd_compl) {
3232                 qla_mdelay(__func__, 100);
3233                 count--;
3234         }
3235 
3236         if (!count)
3237                 return -1;
3238         else
3239                 device_printf(dev, "%s: count %d\n", __func__, count);
3240 
3241         return (0);
3242 }
3243 
3244 static int
3245 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3246 {
3247         device_t                dev;
3248         q80_set_port_cfg_t      *pcfg;
3249         q80_set_port_cfg_rsp_t  *pfg_rsp;
3250         uint32_t                err;
3251         int                     count = 300;
3252 
3253         dev = ha->pci_dev;
3254 
3255         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3256         bzero(pcfg, sizeof(q80_set_port_cfg_t));
3257 
3258         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3259         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3260         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3261 
3262         pcfg->cfg_bits = cfg_bits;
3263 
3264         device_printf(dev, "%s: cfg_bits"
3265                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3266                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3267                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3268                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3269                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3270 
3271         ha->hw.imd_compl= 0;
3272 
3273         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3274                 (sizeof (q80_set_port_cfg_t) >> 2),
3275                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3276                 device_printf(dev, "%s: failed\n", __func__);
3277                 return -1;
3278         }
3279 
3280         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3281 
3282         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3283 
3284         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3285                 while (count && !ha->hw.imd_compl) {
3286                         qla_mdelay(__func__, 100);
3287                         count--;
3288                 }
3289                 if (count) {
3290                         device_printf(dev, "%s: count %d\n", __func__, count);
3291 
3292                         err = 0;
3293                 }
3294         }
3295 
3296         if (err) {
3297                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3298                 return(-1);
3299         }
3300 
3301         return (0);
3302 }
3303 
3304 
3305 static int
3306 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3307 {
3308 	uint32_t			err;
3309 	device_t			dev = ha->pci_dev;
3310 	q80_config_md_templ_size_t	*md_size;
3311 	q80_config_md_templ_size_rsp_t	*md_size_rsp;
3312 
3313 #ifdef QL_LDFLASH_FW
3314 
3315 	*size = ql83xx_minidump_len;
3316 	return (0);
3317 
3318 #endif /* #ifdef QL_LDFLASH_FW */
3319 
3320 	md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3321 	bzero(md_size, sizeof(q80_config_md_templ_size_t));
3322 
3323 	md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3324 	md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3325 	md_size->count_version |= Q8_MBX_CMD_VERSION;
3326 
3327 	if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3328 		(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3329 		(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3330 
3331 		device_printf(dev, "%s: failed\n", __func__);
3332 
3333 		return (-1);
3334 	}
3335 
3336 	md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3337 
3338 	err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3339 
3340         if (err) {
3341 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3342 		return(-1);
3343         }
3344 
3345 	*size = md_size_rsp->templ_size;
3346 
3347 	return (0);
3348 }
3349 
3350 static int
3351 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3352 {
3353         device_t                dev;
3354         q80_get_port_cfg_t      *pcfg;
3355         q80_get_port_cfg_rsp_t  *pcfg_rsp;
3356         uint32_t                err;
3357 
3358         dev = ha->pci_dev;
3359 
3360         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3361         bzero(pcfg, sizeof(q80_get_port_cfg_t));
3362 
3363         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3364         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3365         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3366 
3367         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3368                 (sizeof (q80_get_port_cfg_t) >> 2),
3369                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3370                 device_printf(dev, "%s: failed\n", __func__);
3371                 return -1;
3372         }
3373 
3374         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3375 
3376         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3377 
3378         if (err) {
3379                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3380                 return(-1);
3381         }
3382 
3383         device_printf(dev, "%s: [cfg_bits, port type]"
3384                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3385                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3386                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3387                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3388                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3389                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3390                 );
3391 
3392         *cfg_bits = pcfg_rsp->cfg_bits;
3393 
3394         return (0);
3395 }
3396 
3397 int
3398 qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3399 {
3400         struct ether_vlan_header        *eh;
3401         uint16_t                        etype;
3402         struct ip                       *ip = NULL;
3403         struct ip6_hdr                  *ip6 = NULL;
3404         struct tcphdr                   *th = NULL;
3405         uint32_t                        hdrlen;
3406         uint32_t                        offset;
3407         uint8_t                         buf[sizeof(struct ip6_hdr)];
3408 
3409         eh = mtod(mp, struct ether_vlan_header *);
3410 
3411         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3412                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3413                 etype = ntohs(eh->evl_proto);
3414         } else {
3415                 hdrlen = ETHER_HDR_LEN;
3416                 etype = ntohs(eh->evl_encap_proto);
3417         }
3418 
3419 	if (etype == ETHERTYPE_IP) {
3420 
3421 		offset = (hdrlen + sizeof (struct ip));
3422 
3423 		if (mp->m_len >= offset) {
3424                         ip = (struct ip *)(mp->m_data + hdrlen);
3425 		} else {
3426 			m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3427                         ip = (struct ip *)buf;
3428 		}
3429 
3430                 if (ip->ip_p == IPPROTO_TCP) {
3431 
3432 			hdrlen += ip->ip_hl << 2;
3433 			offset = hdrlen + 4;
3434 
3435 			if (mp->m_len >= offset) {
3436 				th = (struct tcphdr *)(mp->m_data + hdrlen);;
3437 			} else {
3438                                 m_copydata(mp, hdrlen, 4, buf);
3439 				th = (struct tcphdr *)buf;
3440 			}
3441                 }
3442 
3443 	} else if (etype == ETHERTYPE_IPV6) {
3444 
3445 		offset = (hdrlen + sizeof (struct ip6_hdr));
3446 
3447 		if (mp->m_len >= offset) {
3448                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3449 		} else {
3450                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3451                         ip6 = (struct ip6_hdr *)buf;
3452 		}
3453 
3454                 if (ip6->ip6_nxt == IPPROTO_TCP) {
3455 
3456 			hdrlen += sizeof(struct ip6_hdr);
3457 			offset = hdrlen + 4;
3458 
3459 			if (mp->m_len >= offset) {
3460 				th = (struct tcphdr *)(mp->m_data + hdrlen);;
3461 			} else {
3462 				m_copydata(mp, hdrlen, 4, buf);
3463 				th = (struct tcphdr *)buf;
3464 			}
3465                 }
3466 	}
3467 
3468         if (th != NULL) {
3469                 if ((th->th_sport == htons(3260)) ||
3470                         (th->th_dport == htons(3260)))
3471                         return 0;
3472         }
3473         return (-1);
3474 }
3475 
3476 void
3477 qla_hw_async_event(qla_host_t *ha)
3478 {
3479         switch (ha->hw.aen_mb0) {
3480         case 0x8101:
3481                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3482                         ha->hw.aen_mb3, ha->hw.aen_mb4);
3483 
3484                 break;
3485 
3486         default:
3487                 break;
3488         }
3489 
3490         return;
3491 }
3492 
3493 #ifdef QL_LDFLASH_FW
3494 static int
3495 qla_get_minidump_template(qla_host_t *ha)
3496 {
3497 	uint32_t			err;
3498 	device_t			dev = ha->pci_dev;
3499 	q80_config_md_templ_cmd_t	*md_templ;
3500 	q80_config_md_templ_cmd_rsp_t	*md_templ_rsp;
3501 
3502 	md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3503 	bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3504 
3505 	md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3506 	md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3507 	md_templ->count_version |= Q8_MBX_CMD_VERSION;
3508 
3509 	md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3510 	md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3511 
3512 	if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3513 		(sizeof(q80_config_md_templ_cmd_t) >> 2),
3514 		 ha->hw.mbox,
3515 		(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3516 
3517 		device_printf(dev, "%s: failed\n", __func__);
3518 
3519 		return (-1);
3520 	}
3521 
3522 	md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3523 
3524 	err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3525 
3526 	if (err) {
3527 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3528 		return (-1);
3529 	}
3530 
3531 	return (0);
3532 
3533 }
3534 #endif /* #ifdef QL_LDFLASH_FW */
3535 
3536 static int
3537 qla_minidump_init(qla_host_t *ha)
3538 {
3539 	int		ret = 0;
3540 	uint32_t	template_size = 0;
3541 	device_t	dev = ha->pci_dev;
3542 
3543 	/*
3544 	 * Get Minidump Template Size
3545  	 */
3546 	ret = qla_get_minidump_tmplt_size(ha, &template_size);
3547 
3548 	if (ret || (template_size == 0)) {
3549 		device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
3550 			template_size);
3551 		return (-1);
3552 	}
3553 
3554 	/*
3555 	 * Allocate Memory for Minidump Template
3556 	 */
3557 
3558 	ha->hw.dma_buf.minidump.alignment = 8;
3559 	ha->hw.dma_buf.minidump.size = template_size;
3560 
3561 #ifdef QL_LDFLASH_FW
3562 	if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
3563 
3564 		device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
3565 
3566 		return (-1);
3567 	}
3568 	ha->hw.dma_buf.flags.minidump = 1;
3569 
3570 	/*
3571 	 * Retrieve Minidump Template
3572 	 */
3573 	ret = qla_get_minidump_template(ha);
3574 #else
3575 	ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
3576 #endif /* #ifdef QL_LDFLASH_FW */
3577 
3578 	if (ret) {
3579 		qla_minidump_free(ha);
3580 	} else {
3581 		ha->hw.mdump_init = 1;
3582 	}
3583 
3584 	return (ret);
3585 }
3586 
3587 
3588 static void
3589 qla_minidump_free(qla_host_t *ha)
3590 {
3591 	ha->hw.mdump_init = 0;
3592 	if (ha->hw.dma_buf.flags.minidump) {
3593 		ha->hw.dma_buf.flags.minidump = 0;
3594 		ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
3595 	}
3596 	return;
3597 }
3598 
3599 void
3600 ql_minidump(qla_host_t *ha)
3601 {
3602 	uint32_t delay = 6000;
3603 
3604 	if (!ha->hw.mdump_init)
3605 		return;
3606 
3607 	if (!ha->hw.mdump_active)
3608 		return;
3609 
3610 	if (ha->hw.mdump_active == 1) {
3611 		ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
3612 		ha->hw.mdump_start = 1;
3613 	}
3614 
3615 	while (delay-- && ha->hw.mdump_active) {
3616 		qla_mdelay(__func__, 100);
3617 	}
3618 	ha->hw.mdump_start = 0;
3619 	ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
3620 
3621 	return;
3622 }
3623