xref: /freebsd/sys/dev/qlxge/qls_hw.c (revision 45dd2eaac379e5576f745380260470204c49beac)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2014 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: qls_hw.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  * Content: Contains Hardware dependent functions
34  */
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "qls_os.h"
39 #include "qls_hw.h"
40 #include "qls_def.h"
41 #include "qls_inline.h"
42 #include "qls_ver.h"
43 #include "qls_glbl.h"
44 #include "qls_dbg.h"
45 
46 /*
47  * Static Functions
48  */
49 static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
50 static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
51 static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
52                 uint32_t add_mac, uint32_t index);
53 
54 static int qls_init_rss(qla_host_t *ha);
55 static int qls_init_comp_queue(qla_host_t *ha, int cid);
56 static int qls_init_work_queue(qla_host_t *ha, int wid);
57 static int qls_init_fw_routing_table(qla_host_t *ha);
58 static int qls_hw_add_all_mcast(qla_host_t *ha);
59 static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
60 static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
61 static int qls_wait_for_flash_ready(qla_host_t *ha);
62 
63 static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
64 static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
65 
66 static void qls_free_tx_dma(qla_host_t *ha);
67 static int qls_alloc_tx_dma(qla_host_t *ha);
68 static void qls_free_rx_dma(qla_host_t *ha);
69 static int qls_alloc_rx_dma(qla_host_t *ha);
70 static void qls_free_mpi_dma(qla_host_t *ha);
71 static int qls_alloc_mpi_dma(qla_host_t *ha);
72 static void qls_free_rss_dma(qla_host_t *ha);
73 static int qls_alloc_rss_dma(qla_host_t *ha);
74 
75 static int qls_flash_validate(qla_host_t *ha, const char *signature);
76 
77 static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
78 static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
79 		uint32_t reg, uint32_t *data);
80 static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
81 		uint32_t reg, uint32_t data);
82 
83 static int qls_hw_reset(qla_host_t *ha);
84 
85 /*
86  * MPI Related Functions
87  */
88 static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
89 		uint32_t *out_mbx, uint32_t o_count);
90 static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
91 static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
92 static void qls_mbx_get_link_status(qla_host_t *ha);
93 static void qls_mbx_about_fw(qla_host_t *ha);
94 
95 int
96 qls_get_msix_count(qla_host_t *ha)
97 {
98 	return (ha->num_rx_rings);
99 }
100 
101 static int
102 qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
103 {
104         int err = 0, ret;
105         qla_host_t *ha;
106 
107         err = sysctl_handle_int(oidp, &ret, 0, req);
108 
109         if (err || !req->newptr)
110                 return (err);
111 
112         if (ret == 1) {
113                 ha = (qla_host_t *)arg1;
114 		qls_mpi_core_dump(ha);
115         }
116         return (err);
117 }
118 
119 static int
120 qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
121 {
122         int err = 0, ret;
123         qla_host_t *ha;
124 
125         err = sysctl_handle_int(oidp, &ret, 0, req);
126 
127         if (err || !req->newptr)
128                 return (err);
129 
130         if (ret == 1) {
131                 ha = (qla_host_t *)arg1;
132 		qls_mbx_get_link_status(ha);
133 		qls_mbx_about_fw(ha);
134         }
135         return (err);
136 }
137 
138 void
139 qls_hw_add_sysctls(qla_host_t *ha)
140 {
141         device_t	dev;
142 
143         dev = ha->pci_dev;
144 
145 	ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
146 
147 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
148 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
149 		OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
150 		ha->num_rx_rings, "Number of Completion Queues");
151 
152         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
153                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
154                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
155 		ha->num_tx_rings, "Number of Transmit Rings");
156 
157         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
158             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
159             OID_AUTO, "mpi_dump",
160 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
161 	    qls_syctl_mpi_dump, "I", "MPI Dump");
162 
163         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
164             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
165             OID_AUTO, "link_status",
166 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
167 	    qls_syctl_link_status, "I", "Link Status");
168 }
169 
170 /*
171  * Name: qls_free_dma
172  * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
173  */
174 void
175 qls_free_dma(qla_host_t *ha)
176 {
177 	qls_free_rss_dma(ha);
178 	qls_free_mpi_dma(ha);
179 	qls_free_tx_dma(ha);
180 	qls_free_rx_dma(ha);
181 	return;
182 }
183 
184 /*
185  * Name: qls_alloc_dma
186  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
187  */
188 int
189 qls_alloc_dma(qla_host_t *ha)
190 {
191 	if (qls_alloc_rx_dma(ha))
192 		return (-1);
193 
194 	if (qls_alloc_tx_dma(ha)) {
195 		qls_free_rx_dma(ha);
196 		return (-1);
197 	}
198 
199 	if (qls_alloc_mpi_dma(ha)) {
200 		qls_free_tx_dma(ha);
201 		qls_free_rx_dma(ha);
202 		return (-1);
203 	}
204 
205 	if (qls_alloc_rss_dma(ha)) {
206 		qls_free_mpi_dma(ha);
207 		qls_free_tx_dma(ha);
208 		qls_free_rx_dma(ha);
209 		return (-1);
210 	}
211 
212 	return (0);
213 }
214 
215 static int
216 qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
217 {
218 	uint32_t data32;
219 	uint32_t count = 3;
220 
221 	while (count--) {
222 		data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
223 
224 		if (data32 & op)
225 			return (0);
226 
227 		QLA_USEC_DELAY(100);
228 	}
229 	ha->qla_initiate_recovery = 1;
230 	return (-1);
231 }
232 
233 /*
234  * Name: qls_config_unicast_mac_addr
235  * Function: binds/unbinds a unicast MAC address to the interface.
236  */
237 static int
238 qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
239 {
240 	int ret = 0;
241 	uint32_t mac_upper = 0;
242 	uint32_t mac_lower = 0;
243 	uint32_t value = 0, index;
244 
245 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
246 		Q81_CTL_SEM_SET_MAC_SERDES)) {
247 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
248 		return(-1);
249 	}
250 
251 	if (add_mac) {
252 		mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
253 		mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
254 				(ha->mac_addr[4] << 8) | ha->mac_addr[5];
255 	}
256 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
257 	if (ret)
258 		goto qls_config_unicast_mac_addr_exit;
259 
260 	index = 128 * (ha->pci_func & 0x1); /* index */
261 
262 	value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
263 		Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
264 
265 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
266 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
267 
268 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
269 	if (ret)
270 		goto qls_config_unicast_mac_addr_exit;
271 
272 	value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
273 		Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
274 
275 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
276 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
277 
278 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
279 	if (ret)
280 		goto qls_config_unicast_mac_addr_exit;
281 
282 	value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
283 		Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
284 
285 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
286 
287 	value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
288 			((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
289 			(0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
290 
291 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
292 
293 qls_config_unicast_mac_addr_exit:
294 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
295 	return (ret);
296 }
297 
298 /*
299  * Name: qls_config_mcast_mac_addr
300  * Function: binds/unbinds a multicast MAC address to the interface.
301  */
302 static int
303 qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
304 	uint32_t index)
305 {
306 	int ret = 0;
307 	uint32_t mac_upper = 0;
308 	uint32_t mac_lower = 0;
309 	uint32_t value = 0;
310 
311 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
312 		Q81_CTL_SEM_SET_MAC_SERDES)) {
313 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
314 		return(-1);
315 	}
316 
317 	if (add_mac) {
318 		mac_upper = (mac_addr[0] << 8) | mac_addr[1];
319 		mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
320 				(mac_addr[4] << 8) | mac_addr[5];
321 	}
322 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
323 	if (ret)
324 		goto qls_config_mcast_mac_addr_exit;
325 
326 	value = Q81_CTL_MAC_PROTO_AI_E |
327 			(index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
328 			Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
329 
330 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
331 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
332 
333 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
334 	if (ret)
335 		goto qls_config_mcast_mac_addr_exit;
336 
337 	value = Q81_CTL_MAC_PROTO_AI_E |
338 			(index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
339 			Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
340 
341 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
342 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
343 
344 qls_config_mcast_mac_addr_exit:
345 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
346 
347 	return (ret);
348 }
349 
350 /*
351  * Name: qls_set_mac_rcv_mode
352  * Function: Enable/Disable AllMulticast and Promiscuous Modes.
353  */
354 static int
355 qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
356 {
357 	uint32_t data32;
358 	uint32_t count = 3;
359 
360 	while (count--) {
361 		data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
362 
363 		if (data32 & op)
364 			return (0);
365 
366 		QLA_USEC_DELAY(100);
367 	}
368 	ha->qla_initiate_recovery = 1;
369 	return (-1);
370 }
371 
372 static int
373 qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
374 {
375 	int ret = 0;
376 
377 	ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
378 
379 	if (ret) {
380 		device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
381 			__func__, index, data);
382 		goto qls_load_route_idx_reg_exit;
383 	}
384 
385 	WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
386 	WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
387 
388 qls_load_route_idx_reg_exit:
389 	return (ret);
390 }
391 
392 static int
393 qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
394 {
395 	int ret = 0;
396 
397 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
398 		Q81_CTL_SEM_SET_RIDX_DATAREG)) {
399 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
400 		return(-1);
401 	}
402 
403 	ret = qls_load_route_idx_reg(ha, index, data);
404 
405 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
406 
407 	return (ret);
408 }
409 
410 static int
411 qls_clear_routing_table(qla_host_t *ha)
412 {
413 	int i, ret = 0;
414 
415 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
416 		Q81_CTL_SEM_SET_RIDX_DATAREG)) {
417 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
418 		return(-1);
419 	}
420 
421 	for (i = 0; i < 16; i++) {
422 		ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
423 			(i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
424 		if (ret)
425 			break;
426 	}
427 
428 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
429 
430 	return (ret);
431 }
432 
433 int
434 qls_set_promisc(qla_host_t *ha)
435 {
436 	int ret;
437 
438 	ret = qls_load_route_idx_reg_locked(ha,
439 			(Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
440 			Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
441 			Q81_CTL_RD_VALID_PKT);
442 	return (ret);
443 }
444 
445 void
446 qls_reset_promisc(qla_host_t *ha)
447 {
448 	qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
449 			Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
450 	return;
451 }
452 
453 int
454 qls_set_allmulti(qla_host_t *ha)
455 {
456 	int ret;
457 
458 	ret = qls_load_route_idx_reg_locked(ha,
459 			(Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
460 			Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
461 			Q81_CTL_RD_MCAST);
462 	return (ret);
463 }
464 
465 void
466 qls_reset_allmulti(qla_host_t *ha)
467 {
468 	qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
469 			Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
470 	return;
471 }
472 
473 static int
474 qls_init_fw_routing_table(qla_host_t *ha)
475 {
476 	int ret = 0;
477 
478 	ret = qls_clear_routing_table(ha);
479 	if (ret)
480 		return (-1);
481 
482 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
483 		Q81_CTL_SEM_SET_RIDX_DATAREG)) {
484 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
485 		return(-1);
486 	}
487 
488 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
489 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
490 			Q81_CTL_RD_ERROR_PKT);
491 	if (ret)
492 		goto qls_init_fw_routing_table_exit;
493 
494 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
495 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
496 			Q81_CTL_RD_BCAST);
497 	if (ret)
498 		goto qls_init_fw_routing_table_exit;
499 
500 	if (ha->num_rx_rings > 1 ) {
501 		ret = qls_load_route_idx_reg(ha,
502 				(Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
503 				Q81_CTL_RI_TYPE_NICQMASK |
504 				Q81_CTL_RI_IDX_RSS_MATCH),
505 				Q81_CTL_RD_RSS_MATCH);
506 		if (ret)
507 			goto qls_init_fw_routing_table_exit;
508 	}
509 
510 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
511 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
512 			Q81_CTL_RD_MCAST_REG_MATCH);
513 	if (ret)
514 		goto qls_init_fw_routing_table_exit;
515 
516 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
517 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
518 			Q81_CTL_RD_CAM_HIT);
519 	if (ret)
520 		goto qls_init_fw_routing_table_exit;
521 
522 qls_init_fw_routing_table_exit:
523 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
524 	return (ret);
525 }
526 
527 static int
528 qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
529 {
530         struct ether_vlan_header *eh;
531         struct ip *ip;
532         struct ip6_hdr *ip6;
533 	struct tcphdr *th;
534         uint32_t ehdrlen, ip_hlen;
535 	int ret = 0;
536         uint16_t etype;
537         uint8_t buf[sizeof(struct ip6_hdr)];
538 
539         eh = mtod(mp, struct ether_vlan_header *);
540 
541         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
542                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
543                 etype = ntohs(eh->evl_proto);
544         } else {
545                 ehdrlen = ETHER_HDR_LEN;
546                 etype = ntohs(eh->evl_encap_proto);
547         }
548 
549         switch (etype) {
550                 case ETHERTYPE_IP:
551                         ip = (struct ip *)(mp->m_data + ehdrlen);
552 
553                         ip_hlen = sizeof (struct ip);
554 
555                         if (mp->m_len < (ehdrlen + ip_hlen)) {
556                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
557                                 ip = (struct ip *)buf;
558                         }
559 			tx_mac->opcode = Q81_IOCB_TX_TSO;
560 			tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
561 
562 			tx_mac->phdr_offsets = ehdrlen;
563 
564 			tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
565 							Q81_TX_TSO_PHDR_SHIFT);
566 
567 			ip->ip_sum = 0;
568 
569 			if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
570 				tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
571 
572 				th = (struct tcphdr *)(ip + 1);
573 
574 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
575 						ip->ip_dst.s_addr,
576 						htons(IPPROTO_TCP));
577 				tx_mac->mss = mp->m_pkthdr.tso_segsz;
578 				tx_mac->phdr_length = ip_hlen + ehdrlen +
579 							(th->th_off << 2);
580 				break;
581 			}
582 			tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
583 
584                         if (ip->ip_p == IPPROTO_TCP) {
585 				tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
586                         } else if (ip->ip_p == IPPROTO_UDP) {
587 				tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
588                         }
589                 break;
590 
591                 case ETHERTYPE_IPV6:
592                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
593 
594                         ip_hlen = sizeof(struct ip6_hdr);
595 
596                         if (mp->m_len < (ehdrlen + ip_hlen)) {
597                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
598                                         buf);
599                                 ip6 = (struct ip6_hdr *)buf;
600                         }
601 
602 			tx_mac->opcode = Q81_IOCB_TX_TSO;
603 			tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
604 			tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
605 
606 			tx_mac->phdr_offsets = ehdrlen;
607 			tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
608 							Q81_TX_TSO_PHDR_SHIFT);
609 
610                         if (ip6->ip6_nxt == IPPROTO_TCP) {
611 				tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
612                         } else if (ip6->ip6_nxt == IPPROTO_UDP) {
613 				tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
614                         }
615                 break;
616 
617                 default:
618                         ret = -1;
619                 break;
620         }
621 
622         return (ret);
623 }
624 
625 #define QLA_TX_MIN_FREE 2
626 int
627 qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
628 {
629 	uint32_t txr_done, txr_next;
630 
631 	txr_done = ha->tx_ring[txr_idx].txr_done;
632 	txr_next = ha->tx_ring[txr_idx].txr_next;
633 
634 	if (txr_done == txr_next) {
635 		ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
636 	} else if (txr_done > txr_next) {
637 		ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
638 	} else {
639 		ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
640 			txr_done - txr_next;
641 	}
642 
643 	if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
644 		return (-1);
645 
646 	return (0);
647 }
648 
649 /*
650  * Name: qls_hw_send
651  * Function: Transmits a packet. It first checks if the packet is a
652  *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
653  *	offload. If either of these creteria are not met, it is transmitted
654  *	as a regular ethernet frame.
655  */
656 int
657 qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
658 	uint32_t txr_next,  struct mbuf *mp, uint32_t txr_idx)
659 {
660         q81_tx_mac_t *tx_mac;
661 	q81_txb_desc_t *tx_desc;
662         uint32_t total_length = 0;
663         uint32_t i;
664         device_t dev;
665 	int ret = 0;
666 
667 	dev = ha->pci_dev;
668 
669         total_length = mp->m_pkthdr.len;
670 
671         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
672                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
673                         __func__, total_length);
674                 return (-1);
675         }
676 
677 	if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
678 		if (qls_hw_tx_done(ha, txr_idx)) {
679 			device_printf(dev, "%s: tx_free[%d] = %d\n",
680 				__func__, txr_idx,
681 				ha->tx_ring[txr_idx].txr_free);
682 			return (-1);
683 		}
684 	}
685 
686 	tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
687 
688 	bzero(tx_mac, sizeof(q81_tx_mac_t));
689 
690 	if ((mp->m_pkthdr.csum_flags &
691 			(CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
692 		ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
693 		if (ret)
694 			return (EINVAL);
695 
696 		if (mp->m_pkthdr.csum_flags & CSUM_TSO)
697 			ha->tx_ring[txr_idx].tx_tso_frames++;
698 		else
699 			ha->tx_ring[txr_idx].tx_frames++;
700 
701 	} else {
702 		tx_mac->opcode = Q81_IOCB_TX_MAC;
703 	}
704 
705 	if (mp->m_flags & M_VLANTAG) {
706 		tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
707 		tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
708 
709 		ha->tx_ring[txr_idx].tx_vlan_frames++;
710 	}
711 
712 	tx_mac->frame_length = total_length;
713 
714 	tx_mac->tid_lo = txr_next;
715 
716 	if (nsegs <= MAX_TX_MAC_DESC) {
717 		QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
718 			tx_mac->tid_lo));
719 
720 		for (i = 0; i < nsegs; i++) {
721 			tx_mac->txd[i].baddr = segs->ds_addr;
722 			tx_mac->txd[i].length = segs->ds_len;
723 			segs++;
724 		}
725 		tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
726 
727 	} else {
728 		QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
729 			tx_mac->tid_lo));
730 
731 		tx_mac->txd[0].baddr =
732 			ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
733 		tx_mac->txd[0].length =
734 			nsegs * (sizeof(q81_txb_desc_t));
735 		tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
736 
737 		tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
738 
739 		for (i = 0; i < nsegs; i++) {
740 			tx_desc->baddr = segs->ds_addr;
741 			tx_desc->length = segs->ds_len;
742 
743 			if (i == (nsegs -1))
744 				tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
745 			else
746 				tx_desc->flags = 0;
747 
748 			segs++;
749 			tx_desc++;
750 		}
751 	}
752 	txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
753 	ha->tx_ring[txr_idx].txr_next = txr_next;
754 
755 	ha->tx_ring[txr_idx].txr_free--;
756 
757 	Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
758 
759 	return (0);
760 }
761 
762 /*
763  * Name: qls_del_hw_if
764  * Function: Destroys the hardware specific entities corresponding to an
765  *	Ethernet Interface
766  */
767 void
768 qls_del_hw_if(qla_host_t *ha)
769 {
770 	uint32_t value;
771 	int i;
772 	//int  count;
773 
774 	if (ha->hw_init == 0) {
775 		qls_hw_reset(ha);
776 		return;
777 	}
778 
779 	for (i = 0;  i < ha->num_tx_rings; i++) {
780 		Q81_SET_WQ_INVALID(i);
781 	}
782 	for (i = 0;  i < ha->num_rx_rings; i++) {
783 		Q81_SET_CQ_INVALID(i);
784 	}
785 
786 	for (i = 0; i < ha->num_rx_rings; i++) {
787 		Q81_DISABLE_INTR(ha, i); /* MSI-x i */
788 	}
789 
790 	value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
791 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
792 
793 	value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
794 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
795 	ha->flags.intr_enable = 0;
796 
797 	qls_hw_reset(ha);
798 
799 	return;
800 }
801 
802 /*
803  * Name: qls_init_hw_if
804  * Function: Creates the hardware specific entities corresponding to an
805  *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
806  *	corresponding to the interface. Enables LRO if allowed.
807  */
808 int
809 qls_init_hw_if(qla_host_t *ha)
810 {
811 	device_t	dev;
812 	uint32_t	value;
813 	int		ret = 0;
814 	int		i;
815 
816 	QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
817 
818 	dev = ha->pci_dev;
819 
820 	ret = qls_hw_reset(ha);
821 	if (ret)
822 		goto qls_init_hw_if_exit;
823 
824 	ha->vm_pgsize = 4096;
825 
826 	/* Enable FAE and EFE bits in System Register */
827 	value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
828 	value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
829 
830 	WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
831 
832 	/* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
833 	value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
834 	WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
835 
836 	/* Function Specific Control Register - Set Page Size and Enable NIC */
837 	value = Q81_CTL_FUNC_SPECIFIC_FE |
838 		Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
839 		Q81_CTL_FUNC_SPECIFIC_EPC_O |
840 		Q81_CTL_FUNC_SPECIFIC_EPC_I |
841 		Q81_CTL_FUNC_SPECIFIC_EC;
842 	value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) |
843                         Q81_CTL_FUNC_SPECIFIC_FE |
844 			Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
845 			Q81_CTL_FUNC_SPECIFIC_EPC_O |
846 			Q81_CTL_FUNC_SPECIFIC_EPC_I |
847 			Q81_CTL_FUNC_SPECIFIC_EC;
848 
849 	WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
850 
851 	/* Interrupt Mask Register */
852 	value = Q81_CTL_INTRM_PI;
853 	value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
854 
855 	WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
856 
857 	/* Initialiatize Completion Queue */
858 	for (i = 0; i < ha->num_rx_rings; i++) {
859 		ret = qls_init_comp_queue(ha, i);
860 		if (ret)
861 			goto qls_init_hw_if_exit;
862 	}
863 
864 	if (ha->num_rx_rings > 1 ) {
865 		ret = qls_init_rss(ha);
866 		if (ret)
867 			goto qls_init_hw_if_exit;
868 	}
869 
870 	/* Initialize Work Queue */
871 
872 	for (i = 0; i < ha->num_tx_rings; i++) {
873 		ret = qls_init_work_queue(ha, i);
874 		if (ret)
875 			goto qls_init_hw_if_exit;
876 	}
877 
878 	if (ret)
879 		goto qls_init_hw_if_exit;
880 
881 	/* Set up CAM RAM with MAC Address */
882 	ret = qls_config_unicast_mac_addr(ha, 1);
883 	if (ret)
884 		goto qls_init_hw_if_exit;
885 
886 	ret = qls_hw_add_all_mcast(ha);
887 	if (ret)
888 		goto qls_init_hw_if_exit;
889 
890 	/* Initialize Firmware Routing Table */
891 	ret = qls_init_fw_routing_table(ha);
892 	if (ret)
893 		goto qls_init_hw_if_exit;
894 
895 	/* Get Chip Revision ID */
896 	ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
897 
898 	/* Enable Global Interrupt */
899 	value = Q81_CTL_INTRE_EI;
900 	value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
901 
902 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
903 
904 	/* Enable Interrupt Handshake Disable */
905 	value = Q81_CTL_INTRE_IHD;
906 	value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
907 
908 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
909 
910 	/* Enable Completion Interrupt */
911 
912 	ha->flags.intr_enable = 1;
913 
914 	for (i = 0; i < ha->num_rx_rings; i++) {
915 		Q81_ENABLE_INTR(ha, i); /* MSI-x i */
916 	}
917 
918 	ha->hw_init = 1;
919 
920 	qls_mbx_get_link_status(ha);
921 
922 	QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
923 		ha->rx_ring[0].cq_db_offset));
924 	QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
925 		ha->tx_ring[0].wq_db_offset));
926 
927 	for (i = 0; i < ha->num_rx_rings; i++) {
928 		Q81_WR_CQ_CONS_IDX(i, 0);
929 		Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
930 		Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
931 
932 		QL_DPRINT2((dev, "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
933 			"[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
934 			Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
935 			Q81_RD_SBQ_IDX(i)));
936 	}
937 
938 	for (i = 0; i < ha->num_rx_rings; i++) {
939 		Q81_SET_CQ_VALID(i);
940 	}
941 
942 qls_init_hw_if_exit:
943 	QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
944 	return (ret);
945 }
946 
947 static int
948 qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
949 {
950 	uint32_t data32;
951 	uint32_t count = 3;
952 
953 	while (count--) {
954 		data32 = READ_REG32(ha, Q81_CTL_CONFIG);
955 
956 		if ((data32 & bits) == value)
957 			return (0);
958 
959 		QLA_USEC_DELAY(100);
960 	}
961 	ha->qla_initiate_recovery = 1;
962 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
963 	return (-1);
964 }
965 
966 static uint8_t q81_hash_key[] = {
967 			0xda, 0x56, 0x5a, 0x6d,
968 			0xc2, 0x0e, 0x5b, 0x25,
969 			0x3d, 0x25, 0x67, 0x41,
970 			0xb0, 0x8f, 0xa3, 0x43,
971 			0xcb, 0x2b, 0xca, 0xd0,
972 			0xb4, 0x30, 0x7b, 0xae,
973 			0xa3, 0x2d, 0xcb, 0x77,
974 			0x0c, 0xf2, 0x30, 0x80,
975 			0x3b, 0xb7, 0x42, 0x6a,
976 			0xfa, 0x01, 0xac, 0xbe };
977 
978 static int
979 qls_init_rss(qla_host_t *ha)
980 {
981 	q81_rss_icb_t	*rss_icb;
982 	int		ret = 0;
983 	int		i;
984 	uint32_t	value;
985 
986 	rss_icb = ha->rss_dma.dma_b;
987 
988 	bzero(rss_icb, sizeof (q81_rss_icb_t));
989 
990 	rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
991 				Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
992 				Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
993 				Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6;
994 
995 	rss_icb->mask = 0x3FF;
996 
997 	for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
998 		rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
999 	}
1000 
1001 	memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
1002 	memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
1003 
1004 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1005 
1006 	if (ret)
1007 		goto qls_init_rss_exit;
1008 
1009 	ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1010 
1011 	if (ret) {
1012 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1013 		goto qls_init_rss_exit;
1014 	}
1015 
1016 	value = (uint32_t)ha->rss_dma.dma_addr;
1017 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1018 
1019 	value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
1020 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1021 
1022 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1023 
1024 	value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
1025 			Q81_CTL_CONFIG_LR;
1026 
1027 	WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1028 
1029 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1030 
1031 qls_init_rss_exit:
1032 	return (ret);
1033 }
1034 
1035 static int
1036 qls_init_comp_queue(qla_host_t *ha, int cid)
1037 {
1038 	q81_cq_icb_t	*cq_icb;
1039 	qla_rx_ring_t	*rxr;
1040 	int		ret = 0;
1041 	uint32_t	value;
1042 
1043 	rxr = &ha->rx_ring[cid];
1044 
1045 	rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
1046 
1047 	cq_icb = rxr->cq_icb_vaddr;
1048 
1049 	bzero(cq_icb, sizeof (q81_cq_icb_t));
1050 
1051 	cq_icb->msix_vector = cid;
1052 	cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
1053 			Q81_CQ_ICB_FLAGS_LI |
1054 			Q81_CQ_ICB_FLAGS_LL |
1055 			Q81_CQ_ICB_FLAGS_LS |
1056 			Q81_CQ_ICB_FLAGS_LV;
1057 
1058 	cq_icb->length_v = NUM_CQ_ENTRIES;
1059 
1060 	cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
1061 	cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
1062 
1063 	cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
1064 	cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
1065 
1066 	cq_icb->pkt_idelay = 10;
1067 	cq_icb->idelay = 100;
1068 
1069 	cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
1070 	cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1071 
1072 	cq_icb->lbq_bsize = QLA_LGB_SIZE;
1073 	cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
1074 
1075 	cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
1076 	cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1077 
1078 	cq_icb->sbq_bsize = (uint16_t)ha->msize;
1079 	cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
1080 
1081 	QL_DUMP_CQ(ha);
1082 
1083 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1084 
1085 	if (ret)
1086 		goto qls_init_comp_queue_exit;
1087 
1088 	ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1089 
1090 	if (ret) {
1091 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1092 		goto qls_init_comp_queue_exit;
1093 	}
1094 
1095 	value = (uint32_t)rxr->cq_icb_paddr;
1096 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1097 
1098 	value = (uint32_t)(rxr->cq_icb_paddr >> 32);
1099 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1100 
1101 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1102 
1103 	value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1104 	value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
1105 	value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1106 	WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1107 
1108 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1109 
1110 	rxr->cq_next = 0;
1111 	rxr->lbq_next = rxr->lbq_free = 0;
1112 	rxr->sbq_next = rxr->sbq_free = 0;
1113 	rxr->rx_free = rxr->rx_next = 0;
1114 	rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
1115 	rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
1116 
1117 qls_init_comp_queue_exit:
1118 	return (ret);
1119 }
1120 
1121 static int
1122 qls_init_work_queue(qla_host_t *ha, int wid)
1123 {
1124 	q81_wq_icb_t	*wq_icb;
1125 	qla_tx_ring_t	*txr;
1126 	int		ret = 0;
1127 	uint32_t	value;
1128 
1129 	txr = &ha->tx_ring[wid];
1130 
1131 	txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
1132 						+ (ha->vm_pgsize * wid));
1133 
1134 	txr->wq_db_offset = (ha->vm_pgsize * wid);
1135 
1136 	wq_icb = txr->wq_icb_vaddr;
1137 	bzero(wq_icb, sizeof (q81_wq_icb_t));
1138 
1139 	wq_icb->length_v = NUM_TX_DESCRIPTORS  |
1140 				Q81_WQ_ICB_VALID;
1141 
1142 	wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
1143 			Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
1144 
1145 	wq_icb->wqcqid_rss = wid;
1146 
1147 	wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
1148 	wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
1149 
1150 	wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
1151 	wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
1152 
1153 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1154 
1155 	if (ret)
1156 		goto qls_init_wq_exit;
1157 
1158 	ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1159 
1160 	if (ret) {
1161 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1162 		goto qls_init_wq_exit;
1163 	}
1164 
1165 	value = (uint32_t)txr->wq_icb_paddr;
1166 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1167 
1168 	value = (uint32_t)(txr->wq_icb_paddr >> 32);
1169 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1170 
1171 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1172 
1173 	value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1174 	value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
1175 	value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1176 	WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1177 
1178 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1179 
1180 	txr->txr_free = NUM_TX_DESCRIPTORS;
1181 	txr->txr_next = 0;
1182 	txr->txr_done = 0;
1183 
1184 qls_init_wq_exit:
1185 	return (ret);
1186 }
1187 
1188 static int
1189 qls_hw_add_all_mcast(qla_host_t *ha)
1190 {
1191 	int i, nmcast;
1192 
1193 	nmcast = ha->nmcast;
1194 
1195 	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
1196 		if ((ha->mcast[i].addr[0] != 0) ||
1197 			(ha->mcast[i].addr[1] != 0) ||
1198 			(ha->mcast[i].addr[2] != 0) ||
1199 			(ha->mcast[i].addr[3] != 0) ||
1200 			(ha->mcast[i].addr[4] != 0) ||
1201 			(ha->mcast[i].addr[5] != 0)) {
1202 			if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
1203 				1, i)) {
1204                 		device_printf(ha->pci_dev, "%s: failed\n",
1205 					__func__);
1206 				return (-1);
1207 			}
1208 
1209 			nmcast--;
1210 		}
1211 	}
1212 	return 0;
1213 }
1214 
1215 static int
1216 qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
1217 {
1218 	int i;
1219 
1220 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1221 		if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1222 			return 0; /* its been already added */
1223 	}
1224 
1225 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1226 		if ((ha->mcast[i].addr[0] == 0) &&
1227 			(ha->mcast[i].addr[1] == 0) &&
1228 			(ha->mcast[i].addr[2] == 0) &&
1229 			(ha->mcast[i].addr[3] == 0) &&
1230 			(ha->mcast[i].addr[4] == 0) &&
1231 			(ha->mcast[i].addr[5] == 0)) {
1232 			if (qls_config_mcast_mac_addr(ha, mta, 1, i))
1233 				return (-1);
1234 
1235 			bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
1236 			ha->nmcast++;
1237 
1238 			return 0;
1239 		}
1240 	}
1241 	return 0;
1242 }
1243 
1244 static int
1245 qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
1246 {
1247 	int i;
1248 
1249 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1250 		if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
1251 			if (qls_config_mcast_mac_addr(ha, mta, 0, i))
1252 				return (-1);
1253 
1254 			ha->mcast[i].addr[0] = 0;
1255 			ha->mcast[i].addr[1] = 0;
1256 			ha->mcast[i].addr[2] = 0;
1257 			ha->mcast[i].addr[3] = 0;
1258 			ha->mcast[i].addr[4] = 0;
1259 			ha->mcast[i].addr[5] = 0;
1260 
1261 			ha->nmcast--;
1262 
1263 			return 0;
1264 		}
1265 	}
1266 	return 0;
1267 }
1268 
1269 /*
1270  * Name: qls_hw_set_multi
1271  * Function: Sets the Multicast Addresses provided the host O.S into the
1272  *	hardware (for the given interface)
1273  */
1274 void
1275 qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1276 	uint32_t add_mac)
1277 {
1278 	int i;
1279 
1280 	for (i = 0; i < mcnt; i++) {
1281 		if (add_mac) {
1282 			if (qls_hw_add_mcast(ha, mta))
1283 				break;
1284 		} else {
1285 			if (qls_hw_del_mcast(ha, mta))
1286 				break;
1287 		}
1288 
1289 		mta += Q8_MAC_ADDR_LEN;
1290 	}
1291 	return;
1292 }
1293 
1294 void
1295 qls_update_link_state(qla_host_t *ha)
1296 {
1297 	uint32_t link_state;
1298 	uint32_t prev_link_state;
1299 
1300 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1301 		ha->link_up = 0;
1302 		return;
1303 	}
1304 	link_state = READ_REG32(ha, Q81_CTL_STATUS);
1305 
1306 	prev_link_state =  ha->link_up;
1307 
1308 	if ((ha->pci_func & 0x1) == 0)
1309 		ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
1310 	else
1311 		ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
1312 
1313 	if (prev_link_state !=  ha->link_up) {
1314 		if (ha->link_up) {
1315 			if_link_state_change(ha->ifp, LINK_STATE_UP);
1316 		} else {
1317 			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1318 		}
1319 	}
1320 	return;
1321 }
1322 
1323 static void
1324 qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
1325 {
1326 	if (ha->tx_ring[r_idx].flags.wq_dma) {
1327 		qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
1328 		ha->tx_ring[r_idx].flags.wq_dma = 0;
1329 	}
1330 
1331 	if (ha->tx_ring[r_idx].flags.privb_dma) {
1332 		qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
1333 		ha->tx_ring[r_idx].flags.privb_dma = 0;
1334 	}
1335 	return;
1336 }
1337 
1338 static void
1339 qls_free_tx_dma(qla_host_t *ha)
1340 {
1341 	int i, j;
1342 	qla_tx_buf_t *txb;
1343 
1344 	for (i = 0; i < ha->num_tx_rings; i++) {
1345 		qls_free_tx_ring_dma(ha, i);
1346 
1347 		for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1348 			txb = &ha->tx_ring[i].tx_buf[j];
1349 
1350 			if (txb->map) {
1351 				bus_dmamap_destroy(ha->tx_tag, txb->map);
1352 			}
1353 		}
1354 	}
1355 
1356         if (ha->tx_tag != NULL) {
1357                 bus_dma_tag_destroy(ha->tx_tag);
1358                 ha->tx_tag = NULL;
1359         }
1360 
1361 	return;
1362 }
1363 
1364 static int
1365 qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
1366 {
1367 	int		ret = 0, i;
1368 	uint8_t		*v_addr;
1369 	bus_addr_t	p_addr;
1370 	qla_tx_buf_t	*txb;
1371 	device_t	dev = ha->pci_dev;
1372 
1373 	ha->tx_ring[ridx].wq_dma.alignment = 8;
1374 	ha->tx_ring[ridx].wq_dma.size =
1375 		NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
1376 
1377 	ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
1378 
1379 	if (ret) {
1380 		device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
1381 		goto qls_alloc_tx_ring_dma_exit;
1382 	}
1383 	ha->tx_ring[ridx].flags.wq_dma = 1;
1384 
1385 	ha->tx_ring[ridx].privb_dma.alignment = 8;
1386 	ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
1387 
1388 	ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
1389 
1390 	if (ret) {
1391 		device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
1392 		goto qls_alloc_tx_ring_dma_exit;
1393 	}
1394 
1395 	ha->tx_ring[ridx].flags.privb_dma = 1;
1396 
1397 	ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
1398 	ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
1399 
1400 	v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
1401 	p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
1402 
1403 	ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
1404 	ha->tx_ring[ridx].wq_icb_paddr = p_addr;
1405 
1406 	ha->tx_ring[ridx].txr_cons_vaddr =
1407 		(uint32_t *)(v_addr + (PAGE_SIZE >> 1));
1408 	ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
1409 
1410 	v_addr = v_addr + (PAGE_SIZE >> 1);
1411 	p_addr = p_addr + (PAGE_SIZE >> 1);
1412 
1413 	txb = ha->tx_ring[ridx].tx_buf;
1414 
1415 	for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1416 		txb[i].oal_vaddr = v_addr;
1417 		txb[i].oal_paddr = p_addr;
1418 
1419 		v_addr = v_addr + QLA_OAL_BLK_SIZE;
1420 		p_addr = p_addr + QLA_OAL_BLK_SIZE;
1421 	}
1422 
1423 qls_alloc_tx_ring_dma_exit:
1424 	return (ret);
1425 }
1426 
1427 static int
1428 qls_alloc_tx_dma(qla_host_t *ha)
1429 {
1430 	int	i, j;
1431 	int	ret = 0;
1432 	qla_tx_buf_t *txb;
1433 
1434         if (bus_dma_tag_create(NULL,    /* parent */
1435                 1, 0,    /* alignment, bounds */
1436                 BUS_SPACE_MAXADDR,       /* lowaddr */
1437                 BUS_SPACE_MAXADDR,       /* highaddr */
1438                 NULL, NULL,      /* filter, filterarg */
1439                 QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1440                 QLA_MAX_SEGMENTS,        /* nsegments */
1441                 PAGE_SIZE,        /* maxsegsize */
1442                 BUS_DMA_ALLOCNOW,        /* flags */
1443                 NULL,    /* lockfunc */
1444                 NULL,    /* lockfuncarg */
1445                 &ha->tx_tag)) {
1446                 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1447                         __func__);
1448                 return (ENOMEM);
1449         }
1450 
1451 	for (i = 0; i < ha->num_tx_rings; i++) {
1452 		ret = qls_alloc_tx_ring_dma(ha, i);
1453 
1454 		if (ret) {
1455 			qls_free_tx_dma(ha);
1456 			break;
1457 		}
1458 
1459 		for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1460 			txb = &ha->tx_ring[i].tx_buf[j];
1461 
1462 			ret = bus_dmamap_create(ha->tx_tag,
1463 				BUS_DMA_NOWAIT, &txb->map);
1464 			if (ret) {
1465 				ha->err_tx_dmamap_create++;
1466 				device_printf(ha->pci_dev,
1467 				"%s: bus_dmamap_create failed[%d, %d, %d]\n",
1468 				__func__, ret, i, j);
1469 
1470 				qls_free_tx_dma(ha);
1471 
1472                 		return (ret);
1473        			}
1474 		}
1475 	}
1476 
1477 	return (ret);
1478 }
1479 
1480 static void
1481 qls_free_rss_dma(qla_host_t *ha)
1482 {
1483 	qls_free_dmabuf(ha, &ha->rss_dma);
1484 	ha->flags.rss_dma = 0;
1485 }
1486 
1487 static int
1488 qls_alloc_rss_dma(qla_host_t *ha)
1489 {
1490 	int ret = 0;
1491 
1492 	ha->rss_dma.alignment = 4;
1493 	ha->rss_dma.size = PAGE_SIZE;
1494 
1495 	ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
1496 
1497 	if (ret)
1498 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
1499 	else
1500 		ha->flags.rss_dma = 1;
1501 
1502 	return (ret);
1503 }
1504 
1505 static void
1506 qls_free_mpi_dma(qla_host_t *ha)
1507 {
1508 	qls_free_dmabuf(ha, &ha->mpi_dma);
1509 	ha->flags.mpi_dma = 0;
1510 }
1511 
1512 static int
1513 qls_alloc_mpi_dma(qla_host_t *ha)
1514 {
1515 	int ret = 0;
1516 
1517 	ha->mpi_dma.alignment = 4;
1518 	ha->mpi_dma.size = (0x4000 * 4);
1519 
1520 	ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
1521 	if (ret)
1522 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
1523 	else
1524 		ha->flags.mpi_dma = 1;
1525 
1526 	return (ret);
1527 }
1528 
1529 static void
1530 qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
1531 {
1532 	if (ha->rx_ring[ridx].flags.cq_dma) {
1533 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1534 		ha->rx_ring[ridx].flags.cq_dma = 0;
1535 	}
1536 
1537 	if (ha->rx_ring[ridx].flags.lbq_dma) {
1538 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1539 		ha->rx_ring[ridx].flags.lbq_dma = 0;
1540 	}
1541 
1542 	if (ha->rx_ring[ridx].flags.sbq_dma) {
1543 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1544 		ha->rx_ring[ridx].flags.sbq_dma = 0;
1545 	}
1546 
1547 	if (ha->rx_ring[ridx].flags.lb_dma) {
1548 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1549 		ha->rx_ring[ridx].flags.lb_dma = 0;
1550 	}
1551 	return;
1552 }
1553 
1554 static void
1555 qls_free_rx_dma(qla_host_t *ha)
1556 {
1557 	int i;
1558 
1559 	for (i = 0; i < ha->num_rx_rings; i++) {
1560 		qls_free_rx_ring_dma(ha, i);
1561 	}
1562 
1563         if (ha->rx_tag != NULL) {
1564                 bus_dma_tag_destroy(ha->rx_tag);
1565                 ha->rx_tag = NULL;
1566         }
1567 
1568 	return;
1569 }
1570 
1571 static int
1572 qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
1573 {
1574 	int				i, ret = 0;
1575 	uint8_t				*v_addr;
1576 	bus_addr_t			p_addr;
1577 	volatile q81_bq_addr_e_t	*bq_e;
1578 	device_t			dev = ha->pci_dev;
1579 
1580 	ha->rx_ring[ridx].cq_dma.alignment = 128;
1581 	ha->rx_ring[ridx].cq_dma.size =
1582 		(NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
1583 
1584 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1585 
1586 	if (ret) {
1587 		device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
1588 		goto qls_alloc_rx_ring_dma_exit;
1589 	}
1590 	ha->rx_ring[ridx].flags.cq_dma = 1;
1591 
1592 	ha->rx_ring[ridx].lbq_dma.alignment = 8;
1593 	ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
1594 
1595 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1596 
1597 	if (ret) {
1598 		device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
1599 		goto qls_alloc_rx_ring_dma_exit;
1600 	}
1601 	ha->rx_ring[ridx].flags.lbq_dma = 1;
1602 
1603 	ha->rx_ring[ridx].sbq_dma.alignment = 8;
1604 	ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
1605 
1606 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1607 
1608 	if (ret) {
1609 		device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
1610 		goto qls_alloc_rx_ring_dma_exit;
1611 	}
1612 	ha->rx_ring[ridx].flags.sbq_dma = 1;
1613 
1614 	ha->rx_ring[ridx].lb_dma.alignment = 8;
1615 	ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
1616 
1617 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1618 	if (ret) {
1619 		device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
1620 		goto qls_alloc_rx_ring_dma_exit;
1621 	}
1622 	ha->rx_ring[ridx].flags.lb_dma = 1;
1623 
1624 	bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
1625 	bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
1626 	bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
1627 	bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
1628 
1629 	/* completion queue */
1630 	ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
1631 	ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
1632 
1633 	v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
1634 	p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
1635 
1636 	v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1637 	p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1638 
1639 	/* completion queue icb */
1640 	ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
1641 	ha->rx_ring[ridx].cq_icb_paddr = p_addr;
1642 
1643 	v_addr = v_addr + (PAGE_SIZE >> 2);
1644 	p_addr = p_addr + (PAGE_SIZE >> 2);
1645 
1646 	/* completion queue index register */
1647 	ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
1648 	ha->rx_ring[ridx].cqi_paddr = p_addr;
1649 
1650 	v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
1651 	p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
1652 
1653 	/* large buffer queue address table */
1654 	ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
1655 	ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
1656 
1657 	/* large buffer queue */
1658 	ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
1659 	ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
1660 
1661 	v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
1662 	p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
1663 
1664 	/* small buffer queue address table */
1665 	ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
1666 	ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
1667 
1668 	/* small buffer queue */
1669 	ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
1670 	ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
1671 
1672 	ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
1673 	ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
1674 
1675 	/* Initialize Large Buffer Queue Table */
1676 
1677 	p_addr = ha->rx_ring[ridx].lbq_paddr;
1678 	bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
1679 
1680 	bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1681 	bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1682 
1683 	p_addr = ha->rx_ring[ridx].lb_paddr;
1684 	bq_e = ha->rx_ring[ridx].lbq_vaddr;
1685 
1686 	for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
1687 		bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1688 		bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1689 
1690 		p_addr = p_addr + QLA_LGB_SIZE;
1691 		bq_e++;
1692 	}
1693 
1694 	/* Initialize Small Buffer Queue Table */
1695 
1696 	p_addr = ha->rx_ring[ridx].sbq_paddr;
1697 	bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
1698 
1699 	for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
1700 		bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1701 		bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1702 
1703 		p_addr = p_addr + QLA_PAGE_SIZE;
1704 		bq_e++;
1705 	}
1706 
1707 qls_alloc_rx_ring_dma_exit:
1708 	return (ret);
1709 }
1710 
1711 static int
1712 qls_alloc_rx_dma(qla_host_t *ha)
1713 {
1714 	int	i;
1715 	int	ret = 0;
1716 
1717         if (bus_dma_tag_create(NULL,    /* parent */
1718                         1, 0,    /* alignment, bounds */
1719                         BUS_SPACE_MAXADDR,       /* lowaddr */
1720                         BUS_SPACE_MAXADDR,       /* highaddr */
1721                         NULL, NULL,      /* filter, filterarg */
1722                         MJUM9BYTES,     /* maxsize */
1723                         1,        /* nsegments */
1724                         MJUM9BYTES,        /* maxsegsize */
1725                         BUS_DMA_ALLOCNOW,        /* flags */
1726                         NULL,    /* lockfunc */
1727                         NULL,    /* lockfuncarg */
1728                         &ha->rx_tag)) {
1729                 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1730                         __func__);
1731 
1732                 return (ENOMEM);
1733         }
1734 
1735 	for (i = 0; i < ha->num_rx_rings; i++) {
1736 		ret = qls_alloc_rx_ring_dma(ha, i);
1737 
1738 		if (ret) {
1739 			qls_free_rx_dma(ha);
1740 			break;
1741 		}
1742 	}
1743 
1744 	return (ret);
1745 }
1746 
1747 static int
1748 qls_wait_for_flash_ready(qla_host_t *ha)
1749 {
1750 	uint32_t data32;
1751 	uint32_t count = 3;
1752 
1753 	while (count--) {
1754 		data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
1755 
1756 		if (data32 & Q81_CTL_FLASH_ADDR_ERR)
1757 			goto qls_wait_for_flash_ready_exit;
1758 
1759 		if (data32 & Q81_CTL_FLASH_ADDR_RDY)
1760 			return (0);
1761 
1762 		QLA_USEC_DELAY(100);
1763 	}
1764 
1765 qls_wait_for_flash_ready_exit:
1766 	QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1767 
1768 	return (-1);
1769 }
1770 
1771 /*
1772  * Name: qls_rd_flash32
1773  * Function: Read Flash Memory
1774  */
1775 int
1776 qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
1777 {
1778 	int ret;
1779 
1780 	ret = qls_wait_for_flash_ready(ha);
1781 
1782 	if (ret)
1783 		return (ret);
1784 
1785 	WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
1786 
1787 	ret = qls_wait_for_flash_ready(ha);
1788 
1789 	if (ret)
1790 		return (ret);
1791 
1792 	*data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
1793 
1794 	return 0;
1795 }
1796 
1797 static int
1798 qls_flash_validate(qla_host_t *ha, const char *signature)
1799 {
1800 	uint16_t csum16 = 0;
1801 	uint16_t *data16;
1802 	int i;
1803 
1804 	if (bcmp(ha->flash.id, signature, 4)) {
1805 		QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
1806 			"%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
1807 			ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
1808 			signature));
1809 		return(-1);
1810 	}
1811 
1812 	data16 = (uint16_t *)&ha->flash;
1813 
1814 	for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
1815 		csum16 += *data16++;
1816 	}
1817 
1818 	if (csum16) {
1819 		QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
1820 		return(-1);
1821 	}
1822 	return(0);
1823 }
1824 
1825 int
1826 qls_rd_nic_params(qla_host_t *ha)
1827 {
1828 	int		i, ret = 0;
1829 	uint32_t	faddr;
1830 	uint32_t	*qflash;
1831 
1832 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
1833 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1834 		return(-1);
1835 	}
1836 
1837 	if ((ha->pci_func & 0x1) == 0)
1838 		faddr = Q81_F0_FLASH_OFFSET >> 2;
1839 	else
1840 		faddr = Q81_F1_FLASH_OFFSET >> 2;
1841 
1842 	qflash = (uint32_t *)&ha->flash;
1843 
1844 	for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
1845 		ret = qls_rd_flash32(ha, faddr, qflash);
1846 
1847 		if (ret)
1848 			goto qls_rd_flash_data_exit;
1849 
1850 		faddr++;
1851 		qflash++;
1852 	}
1853 
1854 	QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
1855 
1856 	ret = qls_flash_validate(ha, Q81_FLASH_ID);
1857 
1858 	if (ret)
1859 		goto qls_rd_flash_data_exit;
1860 
1861 	bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
1862 
1863 	QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
1864 		__func__, ha->mac_addr[0],  ha->mac_addr[1], ha->mac_addr[2],
1865 		ha->mac_addr[3], ha->mac_addr[4],  ha->mac_addr[5]));
1866 
1867 qls_rd_flash_data_exit:
1868 
1869 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
1870 
1871 	return(ret);
1872 }
1873 
1874 static int
1875 qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
1876 {
1877 	uint32_t count = 30;
1878 	uint32_t data;
1879 
1880 	while (count--) {
1881 		WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
1882 
1883 		data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
1884 
1885 		if (data & value) {
1886 			return (0);
1887 		} else {
1888 			QLA_USEC_DELAY(100);
1889 		}
1890 	}
1891 	ha->qla_initiate_recovery = 1;
1892 	return (-1);
1893 }
1894 
1895 static void
1896 qls_sem_unlock(qla_host_t *ha, uint32_t mask)
1897 {
1898 	WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
1899 }
1900 
1901 static int
1902 qls_wait_for_proc_addr_ready(qla_host_t *ha)
1903 {
1904 	uint32_t data32;
1905 	uint32_t count = 3;
1906 
1907 	while (count--) {
1908 		data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
1909 
1910 		if (data32 & Q81_CTL_PROC_ADDR_ERR)
1911 			goto qls_wait_for_proc_addr_ready_exit;
1912 
1913 		if (data32 & Q81_CTL_PROC_ADDR_RDY)
1914 			return (0);
1915 
1916 		QLA_USEC_DELAY(100);
1917 	}
1918 
1919 qls_wait_for_proc_addr_ready_exit:
1920 	QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1921 
1922 	ha->qla_initiate_recovery = 1;
1923 	return (-1);
1924 }
1925 
1926 static int
1927 qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1928 	uint32_t *data)
1929 {
1930 	int ret;
1931 	uint32_t value;
1932 
1933 	ret = qls_wait_for_proc_addr_ready(ha);
1934 
1935 	if (ret)
1936 		goto qls_proc_addr_rd_reg_exit;
1937 
1938 	value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
1939 
1940 	WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1941 
1942 	ret = qls_wait_for_proc_addr_ready(ha);
1943 
1944 	if (ret)
1945 		goto qls_proc_addr_rd_reg_exit;
1946 
1947 	*data = READ_REG32(ha, Q81_CTL_PROC_DATA);
1948 
1949 qls_proc_addr_rd_reg_exit:
1950 	return (ret);
1951 }
1952 
1953 static int
1954 qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1955 	uint32_t data)
1956 {
1957 	int ret;
1958 	uint32_t value;
1959 
1960 	ret = qls_wait_for_proc_addr_ready(ha);
1961 
1962 	if (ret)
1963 		goto qls_proc_addr_wr_reg_exit;
1964 
1965 	WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
1966 
1967 	value = addr_module | reg;
1968 
1969 	WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1970 
1971 	ret = qls_wait_for_proc_addr_ready(ha);
1972 
1973 qls_proc_addr_wr_reg_exit:
1974 	return (ret);
1975 }
1976 
1977 static int
1978 qls_hw_nic_reset(qla_host_t *ha)
1979 {
1980 	int		count;
1981 	uint32_t	data;
1982 	device_t	dev = ha->pci_dev;
1983 
1984 	ha->hw_init = 0;
1985 
1986 	data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
1987 			Q81_CTL_RESET_FUNC;
1988 	WRITE_REG32(ha, Q81_CTL_RESET, data);
1989 
1990 	count = 10;
1991 	while (count--) {
1992 		data = READ_REG32(ha, Q81_CTL_RESET);
1993 		if ((data & Q81_CTL_RESET_FUNC) == 0)
1994 			break;
1995 		QLA_USEC_DELAY(10);
1996 	}
1997 	if (count == 0) {
1998 		device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
1999 			__func__);
2000 		return (-1);
2001 	}
2002 	return (0);
2003 }
2004 
2005 static int
2006 qls_hw_reset(qla_host_t *ha)
2007 {
2008 	device_t	dev = ha->pci_dev;
2009 	int		ret;
2010 	int		count;
2011 	uint32_t	data;
2012 
2013 	QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
2014 
2015 	if (ha->hw_init == 0) {
2016 		ret = qls_hw_nic_reset(ha);
2017 		goto qls_hw_reset_exit;
2018 	}
2019 
2020 	ret = qls_clear_routing_table(ha);
2021 	if (ret)
2022 		goto qls_hw_reset_exit;
2023 
2024 	ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
2025 	if (ret)
2026 		goto qls_hw_reset_exit;
2027 
2028 	/*
2029 	 * Wait for FIFO to empty
2030 	 */
2031 	count = 5;
2032 	while (count--) {
2033 		data = READ_REG32(ha, Q81_CTL_STATUS);
2034 		if (data & Q81_CTL_STATUS_NFE)
2035 			break;
2036 		qls_mdelay(__func__, 100);
2037 	}
2038 	if (count == 0) {
2039 		device_printf(dev, "%s: NFE bit not set\n", __func__);
2040 		goto qls_hw_reset_exit;
2041 	}
2042 
2043 	count = 5;
2044 	while (count--) {
2045 		(void)qls_mbx_get_mgmt_ctrl(ha, &data);
2046 
2047 		if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
2048 			(data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
2049 			break;
2050 		qls_mdelay(__func__, 100);
2051 	}
2052 	if (count == 0)
2053 		goto qls_hw_reset_exit;
2054 
2055 	/*
2056 	 * Reset the NIC function
2057 	 */
2058 	ret = qls_hw_nic_reset(ha);
2059 	if (ret)
2060 		goto qls_hw_reset_exit;
2061 
2062 	ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
2063 
2064 qls_hw_reset_exit:
2065 	if (ret)
2066 		device_printf(dev, "%s: failed\n", __func__);
2067 
2068 	return (ret);
2069 }
2070 
2071 /*
2072  * MPI Related Functions
2073  */
2074 int
2075 qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2076 {
2077 	int ret;
2078 
2079 	ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2080 			reg, data);
2081 	return (ret);
2082 }
2083 
2084 int
2085 qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2086 {
2087 	int ret;
2088 
2089 	ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2090 			reg, data);
2091 	return (ret);
2092 }
2093 
2094 int
2095 qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2096 {
2097 	int ret;
2098 
2099 	if ((ha->pci_func & 0x1) == 0)
2100 		reg += Q81_FUNC0_MBX_OUT_REG0;
2101 	else
2102 		reg += Q81_FUNC1_MBX_OUT_REG0;
2103 
2104 	ret = qls_mpi_risc_rd_reg(ha, reg, data);
2105 
2106 	return (ret);
2107 }
2108 
2109 int
2110 qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2111 {
2112 	int ret;
2113 
2114 	if ((ha->pci_func & 0x1) == 0)
2115 		reg += Q81_FUNC0_MBX_IN_REG0;
2116 	else
2117 		reg += Q81_FUNC1_MBX_IN_REG0;
2118 
2119 	ret = qls_mpi_risc_wr_reg(ha, reg, data);
2120 
2121 	return (ret);
2122 }
2123 
2124 static int
2125 qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
2126 	uint32_t *out_mbx, uint32_t o_count)
2127 {
2128 	int i, ret = -1;
2129 	uint32_t data32;
2130 	uint32_t count = 50;
2131 
2132 	QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
2133 		__func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
2134 
2135 	data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2136 
2137 	if (data32 & Q81_CTL_HCS_HTR_INTR) {
2138 		device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
2139 			__func__, data32);
2140 		goto qls_mbx_cmd_exit;
2141 	}
2142 
2143 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
2144 		Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
2145 		device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
2146 		goto qls_mbx_cmd_exit;
2147 	}
2148 
2149 	ha->mbx_done = 0;
2150 
2151 	for (i = 0; i < i_count; i++) {
2152 		ret = qls_mbx_wr_reg(ha, i, *in_mbx);
2153 
2154 		if (ret) {
2155 			device_printf(ha->pci_dev,
2156 				"%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
2157 				i, *in_mbx);
2158 			qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2159 			goto qls_mbx_cmd_exit;
2160 		}
2161 
2162 		in_mbx++;
2163 	}
2164 	WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
2165 
2166 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2167 
2168 	ret = -1;
2169 	ha->mbx_done = 0;
2170 
2171 	while (count--) {
2172 		if (ha->flags.intr_enable == 0) {
2173 			data32 = READ_REG32(ha, Q81_CTL_STATUS);
2174 
2175 			if (!(data32 & Q81_CTL_STATUS_PI)) {
2176 				qls_mdelay(__func__, 100);
2177 				continue;
2178 			}
2179 
2180 			ret = qls_mbx_rd_reg(ha, 0, &data32);
2181 
2182 			if (ret == 0 ) {
2183 				if ((data32 & 0xF000) == 0x4000) {
2184 					out_mbx[0] = data32;
2185 
2186 					for (i = 1; i < o_count; i++) {
2187 						ret = qls_mbx_rd_reg(ha, i,
2188 								&data32);
2189 						if (ret) {
2190 							device_printf(
2191 								ha->pci_dev,
2192 								"%s: mbx_rd[%d]"
2193 								" failed\n",
2194 								__func__, i);
2195 							break;
2196 						}
2197 						out_mbx[i] = data32;
2198 					}
2199 					break;
2200 				} else if ((data32 & 0xF000) == 0x8000) {
2201 					count = 50;
2202 					WRITE_REG32(ha,\
2203 						Q81_CTL_HOST_CMD_STATUS,\
2204 						Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2205 				}
2206 			}
2207 		} else {
2208 			if (ha->mbx_done) {
2209 				for (i = 1; i < o_count; i++) {
2210 					out_mbx[i] = ha->mbox[i];
2211 				}
2212 				ret = 0;
2213 				break;
2214 			}
2215 		}
2216 		qls_mdelay(__func__, 1000);
2217 	}
2218 
2219 qls_mbx_cmd_exit:
2220 
2221 	if (ha->flags.intr_enable == 0) {
2222 		WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2223 			Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2224 	}
2225 
2226 	if (ret) {
2227 		ha->qla_initiate_recovery = 1;
2228 	}
2229 
2230 	QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
2231 	return (ret);
2232 }
2233 
2234 static int
2235 qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
2236 {
2237 	uint32_t *mbox;
2238 	device_t dev = ha->pci_dev;
2239 
2240 	mbox = ha->mbox;
2241 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2242 
2243 	mbox[0] = Q81_MBX_SET_MGMT_CTL;
2244 	mbox[1] = t_ctrl;
2245 
2246 	if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
2247 		device_printf(dev, "%s failed\n", __func__);
2248 		return (-1);
2249 	}
2250 
2251 	if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
2252 		((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
2253 			(mbox[0] == Q81_MBX_CMD_ERROR))){
2254 		return (0);
2255 	}
2256 	device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
2257 	return (-1);
2258 
2259 }
2260 
2261 static int
2262 qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
2263 {
2264 	uint32_t *mbox;
2265 	device_t dev = ha->pci_dev;
2266 
2267 	*t_status = 0;
2268 
2269 	mbox = ha->mbox;
2270 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2271 
2272 	mbox[0] = Q81_MBX_GET_MGMT_CTL;
2273 
2274 	if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
2275 		device_printf(dev, "%s failed\n", __func__);
2276 		return (-1);
2277 	}
2278 
2279 	*t_status = mbox[1];
2280 
2281 	return (0);
2282 }
2283 
2284 static void
2285 qls_mbx_get_link_status(qla_host_t *ha)
2286 {
2287 	uint32_t *mbox;
2288 	device_t dev = ha->pci_dev;
2289 
2290 	mbox = ha->mbox;
2291 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2292 
2293 	mbox[0] = Q81_MBX_GET_LNK_STATUS;
2294 
2295 	if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2296 		device_printf(dev, "%s failed\n", __func__);
2297 		return;
2298 	}
2299 
2300 	ha->link_status			= mbox[1];
2301 	ha->link_down_info		= mbox[2];
2302 	ha->link_hw_info		= mbox[3];
2303 	ha->link_dcbx_counters		= mbox[4];
2304 	ha->link_change_counters	= mbox[5];
2305 
2306 	device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2307 		__func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2308 
2309 	return;
2310 }
2311 
2312 static void
2313 qls_mbx_about_fw(qla_host_t *ha)
2314 {
2315 	uint32_t *mbox;
2316 	device_t dev = ha->pci_dev;
2317 
2318 	mbox = ha->mbox;
2319 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2320 
2321 	mbox[0] = Q81_MBX_ABOUT_FW;
2322 
2323 	if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2324 		device_printf(dev, "%s failed\n", __func__);
2325 		return;
2326 	}
2327 
2328 	device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2329 		__func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2330 }
2331 
2332 int
2333 qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
2334 	uint32_t r_size)
2335 {
2336 	bus_addr_t b_paddr;
2337 	uint32_t *mbox;
2338 	device_t dev = ha->pci_dev;
2339 
2340 	mbox = ha->mbox;
2341 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2342 
2343 	bzero(ha->mpi_dma.dma_b,(r_size << 2));
2344 	b_paddr = ha->mpi_dma.dma_addr;
2345 
2346 	mbox[0] = Q81_MBX_DUMP_RISC_RAM;
2347 	mbox[1] = r_addr & 0xFFFF;
2348 	mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
2349 	mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
2350 	mbox[4] = (r_size >> 16) & 0xFFFF;
2351 	mbox[5] = r_size & 0xFFFF;
2352 	mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
2353 	mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
2354 	mbox[8] = (r_addr >> 16) & 0xFFFF;
2355 
2356 	bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2357 		BUS_DMASYNC_PREREAD);
2358 
2359 	if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
2360 		device_printf(dev, "%s failed\n", __func__);
2361 		return (-1);
2362 	}
2363         if (mbox[0] != 0x4000) {
2364                 device_printf(ha->pci_dev, "%s: failed!\n", __func__);
2365 		return (-1);
2366         } else {
2367                 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2368                         BUS_DMASYNC_POSTREAD);
2369                 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
2370         }
2371 
2372 	return (0);
2373 }
2374 
2375 int
2376 qls_mpi_reset(qla_host_t *ha)
2377 {
2378 	int		count;
2379 	uint32_t	data;
2380 	device_t	dev = ha->pci_dev;
2381 
2382 	WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2383 		Q81_CTL_HCS_CMD_SET_RISC_RESET);
2384 
2385 	count = 10;
2386 	while (count--) {
2387 		data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2388 		if (data & Q81_CTL_HCS_RISC_RESET) {
2389 			WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2390 				Q81_CTL_HCS_CMD_CLR_RISC_RESET);
2391 			break;
2392 		}
2393 		qls_mdelay(__func__, 10);
2394 	}
2395 	if (count == 0) {
2396 		device_printf(dev, "%s: failed\n", __func__);
2397 		return (-1);
2398 	}
2399 	return (0);
2400 }
2401