xref: /freebsd/sys/dev/qlxge/qls_hw.c (revision fa9012aef1ad596a99950f986cf1fb31111c034a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2014 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: qls_hw.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  * Content: Contains Hardware dependent functions
34  */
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 
39 
40 #include "qls_os.h"
41 #include "qls_hw.h"
42 #include "qls_def.h"
43 #include "qls_inline.h"
44 #include "qls_ver.h"
45 #include "qls_glbl.h"
46 #include "qls_dbg.h"
47 
48 /*
49  * Static Functions
50  */
51 static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
52 static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
53 static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
54                 uint32_t add_mac, uint32_t index);
55 
56 static int qls_init_rss(qla_host_t *ha);
57 static int qls_init_comp_queue(qla_host_t *ha, int cid);
58 static int qls_init_work_queue(qla_host_t *ha, int wid);
59 static int qls_init_fw_routing_table(qla_host_t *ha);
60 static int qls_hw_add_all_mcast(qla_host_t *ha);
61 static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
62 static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
63 static int qls_wait_for_flash_ready(qla_host_t *ha);
64 
65 static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
66 static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
67 
68 static void qls_free_tx_dma(qla_host_t *ha);
69 static int qls_alloc_tx_dma(qla_host_t *ha);
70 static void qls_free_rx_dma(qla_host_t *ha);
71 static int qls_alloc_rx_dma(qla_host_t *ha);
72 static void qls_free_mpi_dma(qla_host_t *ha);
73 static int qls_alloc_mpi_dma(qla_host_t *ha);
74 static void qls_free_rss_dma(qla_host_t *ha);
75 static int qls_alloc_rss_dma(qla_host_t *ha);
76 
77 static int qls_flash_validate(qla_host_t *ha, const char *signature);
78 
79 
80 static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
81 static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
82 		uint32_t reg, uint32_t *data);
83 static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
84 		uint32_t reg, uint32_t data);
85 
86 static int qls_hw_reset(qla_host_t *ha);
87 
88 /*
89  * MPI Related Functions
90  */
91 static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
92 		uint32_t *out_mbx, uint32_t o_count);
93 static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
94 static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
95 static void qls_mbx_get_link_status(qla_host_t *ha);
96 static void qls_mbx_about_fw(qla_host_t *ha);
97 
98 int
99 qls_get_msix_count(qla_host_t *ha)
100 {
101 	return (ha->num_rx_rings);
102 }
103 
104 static int
105 qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
106 {
107         int err = 0, ret;
108         qla_host_t *ha;
109 
110         err = sysctl_handle_int(oidp, &ret, 0, req);
111 
112         if (err || !req->newptr)
113                 return (err);
114 
115 
116         if (ret == 1) {
117                 ha = (qla_host_t *)arg1;
118 		qls_mpi_core_dump(ha);
119         }
120         return (err);
121 }
122 
123 static int
124 qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
125 {
126         int err = 0, ret;
127         qla_host_t *ha;
128 
129         err = sysctl_handle_int(oidp, &ret, 0, req);
130 
131         if (err || !req->newptr)
132                 return (err);
133 
134 
135         if (ret == 1) {
136                 ha = (qla_host_t *)arg1;
137 		qls_mbx_get_link_status(ha);
138 		qls_mbx_about_fw(ha);
139         }
140         return (err);
141 }
142 
143 void
144 qls_hw_add_sysctls(qla_host_t *ha)
145 {
146         device_t	dev;
147 
148         dev = ha->pci_dev;
149 
150 	ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
151 
152 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
153 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
154 		OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
155 		ha->num_rx_rings, "Number of Completion Queues");
156 
157         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
158                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
159                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
160 		ha->num_tx_rings, "Number of Transmit Rings");
161 
162         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
163             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
164             OID_AUTO, "mpi_dump",
165 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
166 	    qls_syctl_mpi_dump, "I", "MPI Dump");
167 
168         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
169             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
170             OID_AUTO, "link_status",
171 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
172 	    qls_syctl_link_status, "I", "Link Status");
173 }
174 
175 /*
176  * Name: qls_free_dma
177  * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
178  */
179 void
180 qls_free_dma(qla_host_t *ha)
181 {
182 	qls_free_rss_dma(ha);
183 	qls_free_mpi_dma(ha);
184 	qls_free_tx_dma(ha);
185 	qls_free_rx_dma(ha);
186 	return;
187 }
188 
189 /*
190  * Name: qls_alloc_dma
191  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
192  */
193 int
194 qls_alloc_dma(qla_host_t *ha)
195 {
196 	if (qls_alloc_rx_dma(ha))
197 		return (-1);
198 
199 	if (qls_alloc_tx_dma(ha)) {
200 		qls_free_rx_dma(ha);
201 		return (-1);
202 	}
203 
204 	if (qls_alloc_mpi_dma(ha)) {
205 		qls_free_tx_dma(ha);
206 		qls_free_rx_dma(ha);
207 		return (-1);
208 	}
209 
210 	if (qls_alloc_rss_dma(ha)) {
211 		qls_free_mpi_dma(ha);
212 		qls_free_tx_dma(ha);
213 		qls_free_rx_dma(ha);
214 		return (-1);
215 	}
216 
217 	return (0);
218 }
219 
220 
221 static int
222 qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
223 {
224 	uint32_t data32;
225 	uint32_t count = 3;
226 
227 	while (count--) {
228 		data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
229 
230 		if (data32 & op)
231 			return (0);
232 
233 		QLA_USEC_DELAY(100);
234 	}
235 	ha->qla_initiate_recovery = 1;
236 	return (-1);
237 }
238 
239 /*
240  * Name: qls_config_unicast_mac_addr
241  * Function: binds/unbinds a unicast MAC address to the interface.
242  */
243 static int
244 qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
245 {
246 	int ret = 0;
247 	uint32_t mac_upper = 0;
248 	uint32_t mac_lower = 0;
249 	uint32_t value = 0, index;
250 
251 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
252 		Q81_CTL_SEM_SET_MAC_SERDES)) {
253 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
254 		return(-1);
255 	}
256 
257 	if (add_mac) {
258 		mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
259 		mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
260 				(ha->mac_addr[4] << 8) | ha->mac_addr[5];
261 	}
262 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
263 	if (ret)
264 		goto qls_config_unicast_mac_addr_exit;
265 
266 	index = 128 * (ha->pci_func & 0x1); /* index */
267 
268 	value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
269 		Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
270 
271 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
272 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
273 
274 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
275 	if (ret)
276 		goto qls_config_unicast_mac_addr_exit;
277 
278 	value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
279 		Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
280 
281 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
282 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
283 
284 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
285 	if (ret)
286 		goto qls_config_unicast_mac_addr_exit;
287 
288 	value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
289 		Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
290 
291 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
292 
293 	value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
294 			((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
295 			(0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
296 
297 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
298 
299 qls_config_unicast_mac_addr_exit:
300 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
301 	return (ret);
302 }
303 
304 /*
305  * Name: qls_config_mcast_mac_addr
306  * Function: binds/unbinds a multicast MAC address to the interface.
307  */
308 static int
309 qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
310 	uint32_t index)
311 {
312 	int ret = 0;
313 	uint32_t mac_upper = 0;
314 	uint32_t mac_lower = 0;
315 	uint32_t value = 0;
316 
317 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
318 		Q81_CTL_SEM_SET_MAC_SERDES)) {
319 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
320 		return(-1);
321 	}
322 
323 	if (add_mac) {
324 		mac_upper = (mac_addr[0] << 8) | mac_addr[1];
325 		mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
326 				(mac_addr[4] << 8) | mac_addr[5];
327 	}
328 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
329 	if (ret)
330 		goto qls_config_mcast_mac_addr_exit;
331 
332 	value = Q81_CTL_MAC_PROTO_AI_E |
333 			(index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
334 			Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
335 
336 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
337 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
338 
339 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
340 	if (ret)
341 		goto qls_config_mcast_mac_addr_exit;
342 
343 	value = Q81_CTL_MAC_PROTO_AI_E |
344 			(index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
345 			Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
346 
347 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
348 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
349 
350 qls_config_mcast_mac_addr_exit:
351 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
352 
353 	return (ret);
354 }
355 
356 /*
357  * Name: qls_set_mac_rcv_mode
358  * Function: Enable/Disable AllMulticast and Promiscuous Modes.
359  */
360 static int
361 qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
362 {
363 	uint32_t data32;
364 	uint32_t count = 3;
365 
366 	while (count--) {
367 		data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
368 
369 		if (data32 & op)
370 			return (0);
371 
372 		QLA_USEC_DELAY(100);
373 	}
374 	ha->qla_initiate_recovery = 1;
375 	return (-1);
376 }
377 
378 static int
379 qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
380 {
381 	int ret = 0;
382 
383 	ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
384 
385 	if (ret) {
386 		device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
387 			__func__, index, data);
388 		goto qls_load_route_idx_reg_exit;
389 	}
390 
391 
392 	WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
393 	WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
394 
395 qls_load_route_idx_reg_exit:
396 	return (ret);
397 }
398 
399 static int
400 qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
401 {
402 	int ret = 0;
403 
404 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
405 		Q81_CTL_SEM_SET_RIDX_DATAREG)) {
406 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
407 		return(-1);
408 	}
409 
410 	ret = qls_load_route_idx_reg(ha, index, data);
411 
412 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
413 
414 	return (ret);
415 }
416 
417 static int
418 qls_clear_routing_table(qla_host_t *ha)
419 {
420 	int i, ret = 0;
421 
422 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
423 		Q81_CTL_SEM_SET_RIDX_DATAREG)) {
424 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
425 		return(-1);
426 	}
427 
428 	for (i = 0; i < 16; i++) {
429 		ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
430 			(i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
431 		if (ret)
432 			break;
433 	}
434 
435 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
436 
437 	return (ret);
438 }
439 
440 int
441 qls_set_promisc(qla_host_t *ha)
442 {
443 	int ret;
444 
445 	ret = qls_load_route_idx_reg_locked(ha,
446 			(Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
447 			Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
448 			Q81_CTL_RD_VALID_PKT);
449 	return (ret);
450 }
451 
452 void
453 qls_reset_promisc(qla_host_t *ha)
454 {
455 	int ret;
456 
457 	ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
458 			Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
459 	return;
460 }
461 
462 int
463 qls_set_allmulti(qla_host_t *ha)
464 {
465 	int ret;
466 
467 	ret = qls_load_route_idx_reg_locked(ha,
468 			(Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
469 			Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
470 			Q81_CTL_RD_MCAST);
471 	return (ret);
472 }
473 
474 void
475 qls_reset_allmulti(qla_host_t *ha)
476 {
477 	int ret;
478 
479 	ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
480 			Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
481 	return;
482 }
483 
484 
485 static int
486 qls_init_fw_routing_table(qla_host_t *ha)
487 {
488 	int ret = 0;
489 
490 	ret = qls_clear_routing_table(ha);
491 	if (ret)
492 		return (-1);
493 
494 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
495 		Q81_CTL_SEM_SET_RIDX_DATAREG)) {
496 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
497 		return(-1);
498 	}
499 
500 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
501 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
502 			Q81_CTL_RD_ERROR_PKT);
503 	if (ret)
504 		goto qls_init_fw_routing_table_exit;
505 
506 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
507 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
508 			Q81_CTL_RD_BCAST);
509 	if (ret)
510 		goto qls_init_fw_routing_table_exit;
511 
512 	if (ha->num_rx_rings > 1 ) {
513 		ret = qls_load_route_idx_reg(ha,
514 				(Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
515 				Q81_CTL_RI_TYPE_NICQMASK |
516 				Q81_CTL_RI_IDX_RSS_MATCH),
517 				Q81_CTL_RD_RSS_MATCH);
518 		if (ret)
519 			goto qls_init_fw_routing_table_exit;
520 	}
521 
522 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
523 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
524 			Q81_CTL_RD_MCAST_REG_MATCH);
525 	if (ret)
526 		goto qls_init_fw_routing_table_exit;
527 
528 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
529 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
530 			Q81_CTL_RD_CAM_HIT);
531 	if (ret)
532 		goto qls_init_fw_routing_table_exit;
533 
534 qls_init_fw_routing_table_exit:
535 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
536 	return (ret);
537 }
538 
539 static int
540 qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
541 {
542         struct ether_vlan_header *eh;
543         struct ip *ip;
544         struct ip6_hdr *ip6;
545 	struct tcphdr *th;
546         uint32_t ehdrlen, ip_hlen;
547 	int ret = 0;
548         uint16_t etype;
549         device_t dev;
550         uint8_t buf[sizeof(struct ip6_hdr)];
551 
552         dev = ha->pci_dev;
553 
554         eh = mtod(mp, struct ether_vlan_header *);
555 
556         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
557                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
558                 etype = ntohs(eh->evl_proto);
559         } else {
560                 ehdrlen = ETHER_HDR_LEN;
561                 etype = ntohs(eh->evl_encap_proto);
562         }
563 
564         switch (etype) {
565                 case ETHERTYPE_IP:
566                         ip = (struct ip *)(mp->m_data + ehdrlen);
567 
568                         ip_hlen = sizeof (struct ip);
569 
570                         if (mp->m_len < (ehdrlen + ip_hlen)) {
571                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
572                                 ip = (struct ip *)buf;
573                         }
574 			tx_mac->opcode = Q81_IOCB_TX_TSO;
575 			tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
576 
577 			tx_mac->phdr_offsets = ehdrlen;
578 
579 			tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
580 							Q81_TX_TSO_PHDR_SHIFT);
581 
582 			ip->ip_sum = 0;
583 
584 			if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
585 				tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
586 
587 				th = (struct tcphdr *)(ip + 1);
588 
589 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
590 						ip->ip_dst.s_addr,
591 						htons(IPPROTO_TCP));
592 				tx_mac->mss = mp->m_pkthdr.tso_segsz;
593 				tx_mac->phdr_length = ip_hlen + ehdrlen +
594 							(th->th_off << 2);
595 				break;
596 			}
597 			tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
598 
599 
600                         if (ip->ip_p == IPPROTO_TCP) {
601 				tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
602                         } else if (ip->ip_p == IPPROTO_UDP) {
603 				tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
604                         }
605                 break;
606 
607                 case ETHERTYPE_IPV6:
608                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
609 
610                         ip_hlen = sizeof(struct ip6_hdr);
611 
612                         if (mp->m_len < (ehdrlen + ip_hlen)) {
613                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
614                                         buf);
615                                 ip6 = (struct ip6_hdr *)buf;
616                         }
617 
618 			tx_mac->opcode = Q81_IOCB_TX_TSO;
619 			tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
620 			tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
621 
622 			tx_mac->phdr_offsets = ehdrlen;
623 			tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
624 							Q81_TX_TSO_PHDR_SHIFT);
625 
626                         if (ip6->ip6_nxt == IPPROTO_TCP) {
627 				tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
628                         } else if (ip6->ip6_nxt == IPPROTO_UDP) {
629 				tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
630                         }
631                 break;
632 
633                 default:
634                         ret = -1;
635                 break;
636         }
637 
638         return (ret);
639 }
640 
641 #define QLA_TX_MIN_FREE 2
642 int
643 qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
644 {
645 	uint32_t txr_done, txr_next;
646 
647 	txr_done = ha->tx_ring[txr_idx].txr_done;
648 	txr_next = ha->tx_ring[txr_idx].txr_next;
649 
650 	if (txr_done == txr_next) {
651 		ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
652 	} else if (txr_done > txr_next) {
653 		ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
654 	} else {
655 		ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
656 			txr_done - txr_next;
657 	}
658 
659 	if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
660 		return (-1);
661 
662 	return (0);
663 }
664 
665 /*
666  * Name: qls_hw_send
667  * Function: Transmits a packet. It first checks if the packet is a
668  *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
669  *	offload. If either of these creteria are not met, it is transmitted
670  *	as a regular ethernet frame.
671  */
672 int
673 qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
674 	uint32_t txr_next,  struct mbuf *mp, uint32_t txr_idx)
675 {
676         q81_tx_mac_t *tx_mac;
677 	q81_txb_desc_t *tx_desc;
678         uint32_t total_length = 0;
679         uint32_t i;
680         device_t dev;
681 	int ret = 0;
682 
683 	dev = ha->pci_dev;
684 
685         total_length = mp->m_pkthdr.len;
686 
687         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
688                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
689                         __func__, total_length);
690                 return (-1);
691         }
692 
693 	if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
694 		if (qls_hw_tx_done(ha, txr_idx)) {
695 			device_printf(dev, "%s: tx_free[%d] = %d\n",
696 				__func__, txr_idx,
697 				ha->tx_ring[txr_idx].txr_free);
698 			return (-1);
699 		}
700 	}
701 
702 	tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
703 
704 	bzero(tx_mac, sizeof(q81_tx_mac_t));
705 
706 	if ((mp->m_pkthdr.csum_flags &
707 			(CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
708 
709 		ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
710 		if (ret)
711 			return (EINVAL);
712 
713 		if (mp->m_pkthdr.csum_flags & CSUM_TSO)
714 			ha->tx_ring[txr_idx].tx_tso_frames++;
715 		else
716 			ha->tx_ring[txr_idx].tx_frames++;
717 
718 	} else {
719 		tx_mac->opcode = Q81_IOCB_TX_MAC;
720 	}
721 
722 	if (mp->m_flags & M_VLANTAG) {
723 
724 		tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
725 		tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
726 
727 		ha->tx_ring[txr_idx].tx_vlan_frames++;
728 	}
729 
730 	tx_mac->frame_length = total_length;
731 
732 	tx_mac->tid_lo = txr_next;
733 
734 	if (nsegs <= MAX_TX_MAC_DESC) {
735 
736 		QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
737 			tx_mac->tid_lo));
738 
739 		for (i = 0; i < nsegs; i++) {
740 			tx_mac->txd[i].baddr = segs->ds_addr;
741 			tx_mac->txd[i].length = segs->ds_len;
742 			segs++;
743 		}
744 		tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
745 
746 	} else {
747 		QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
748 			tx_mac->tid_lo));
749 
750 		tx_mac->txd[0].baddr =
751 			ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
752 		tx_mac->txd[0].length =
753 			nsegs * (sizeof(q81_txb_desc_t));
754 		tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
755 
756 		tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
757 
758 		for (i = 0; i < nsegs; i++) {
759 			tx_desc->baddr = segs->ds_addr;
760 			tx_desc->length = segs->ds_len;
761 
762 			if (i == (nsegs -1))
763 				tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
764 			else
765 				tx_desc->flags = 0;
766 
767 			segs++;
768 			tx_desc++;
769 		}
770 	}
771 	txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
772 	ha->tx_ring[txr_idx].txr_next = txr_next;
773 
774 	ha->tx_ring[txr_idx].txr_free--;
775 
776 	Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
777 
778 	return (0);
779 }
780 
781 /*
782  * Name: qls_del_hw_if
783  * Function: Destroys the hardware specific entities corresponding to an
784  *	Ethernet Interface
785  */
786 void
787 qls_del_hw_if(qla_host_t *ha)
788 {
789 	uint32_t value;
790 	int i;
791 	//int  count;
792 
793 	if (ha->hw_init == 0) {
794 		qls_hw_reset(ha);
795 		return;
796 	}
797 
798 	for (i = 0;  i < ha->num_tx_rings; i++) {
799 		Q81_SET_WQ_INVALID(i);
800 	}
801 	for (i = 0;  i < ha->num_rx_rings; i++) {
802 		Q81_SET_CQ_INVALID(i);
803 	}
804 
805 	for (i = 0; i < ha->num_rx_rings; i++) {
806 		Q81_DISABLE_INTR(ha, i); /* MSI-x i */
807 	}
808 
809 	value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
810 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
811 
812 	value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
813 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
814 	ha->flags.intr_enable = 0;
815 
816 	qls_hw_reset(ha);
817 
818 	return;
819 }
820 
821 /*
822  * Name: qls_init_hw_if
823  * Function: Creates the hardware specific entities corresponding to an
824  *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
825  *	corresponding to the interface. Enables LRO if allowed.
826  */
827 int
828 qls_init_hw_if(qla_host_t *ha)
829 {
830 	device_t	dev;
831 	uint32_t	value;
832 	int		ret = 0;
833 	int		i;
834 
835 
836 	QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
837 
838 	dev = ha->pci_dev;
839 
840 	ret = qls_hw_reset(ha);
841 	if (ret)
842 		goto qls_init_hw_if_exit;
843 
844 	ha->vm_pgsize = 4096;
845 
846 	/* Enable FAE and EFE bits in System Register */
847 	value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
848 	value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
849 
850 	WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
851 
852 	/* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
853 	value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
854 	WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
855 
856 	/* Function Specific Control Register - Set Page Size and Enable NIC */
857 	value = Q81_CTL_FUNC_SPECIFIC_FE |
858 		Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
859 		Q81_CTL_FUNC_SPECIFIC_EPC_O |
860 		Q81_CTL_FUNC_SPECIFIC_EPC_I |
861 		Q81_CTL_FUNC_SPECIFIC_EC;
862 	value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) |
863                         Q81_CTL_FUNC_SPECIFIC_FE |
864 			Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
865 			Q81_CTL_FUNC_SPECIFIC_EPC_O |
866 			Q81_CTL_FUNC_SPECIFIC_EPC_I |
867 			Q81_CTL_FUNC_SPECIFIC_EC;
868 
869 	WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
870 
871 	/* Interrupt Mask Register */
872 	value = Q81_CTL_INTRM_PI;
873 	value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
874 
875 	WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
876 
877 	/* Initialiatize Completion Queue */
878 	for (i = 0; i < ha->num_rx_rings; i++) {
879 		ret = qls_init_comp_queue(ha, i);
880 		if (ret)
881 			goto qls_init_hw_if_exit;
882 	}
883 
884 	if (ha->num_rx_rings > 1 ) {
885 		ret = qls_init_rss(ha);
886 		if (ret)
887 			goto qls_init_hw_if_exit;
888 	}
889 
890 	/* Initialize Work Queue */
891 
892 	for (i = 0; i < ha->num_tx_rings; i++) {
893 		ret = qls_init_work_queue(ha, i);
894 		if (ret)
895 			goto qls_init_hw_if_exit;
896 	}
897 
898 	if (ret)
899 		goto qls_init_hw_if_exit;
900 
901 	/* Set up CAM RAM with MAC Address */
902 	ret = qls_config_unicast_mac_addr(ha, 1);
903 	if (ret)
904 		goto qls_init_hw_if_exit;
905 
906 	ret = qls_hw_add_all_mcast(ha);
907 	if (ret)
908 		goto qls_init_hw_if_exit;
909 
910 	/* Initialize Firmware Routing Table */
911 	ret = qls_init_fw_routing_table(ha);
912 	if (ret)
913 		goto qls_init_hw_if_exit;
914 
915 	/* Get Chip Revision ID */
916 	ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
917 
918 	/* Enable Global Interrupt */
919 	value = Q81_CTL_INTRE_EI;
920 	value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
921 
922 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
923 
924 	/* Enable Interrupt Handshake Disable */
925 	value = Q81_CTL_INTRE_IHD;
926 	value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
927 
928 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
929 
930 	/* Enable Completion Interrupt */
931 
932 	ha->flags.intr_enable = 1;
933 
934 	for (i = 0; i < ha->num_rx_rings; i++) {
935 		Q81_ENABLE_INTR(ha, i); /* MSI-x i */
936 	}
937 
938 	ha->hw_init = 1;
939 
940 	qls_mbx_get_link_status(ha);
941 
942 	QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
943 		ha->rx_ring[0].cq_db_offset));
944 	QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
945 		ha->tx_ring[0].wq_db_offset));
946 
947 	for (i = 0; i < ha->num_rx_rings; i++) {
948 
949 		Q81_WR_CQ_CONS_IDX(i, 0);
950 		Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
951 		Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
952 
953 		QL_DPRINT2((dev, "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
954 			"[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
955 			Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
956 			Q81_RD_SBQ_IDX(i)));
957 	}
958 
959 	for (i = 0; i < ha->num_rx_rings; i++) {
960 		Q81_SET_CQ_VALID(i);
961 	}
962 
963 qls_init_hw_if_exit:
964 	QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
965 	return (ret);
966 }
967 
968 static int
969 qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
970 {
971 	uint32_t data32;
972 	uint32_t count = 3;
973 
974 	while (count--) {
975 
976 		data32 = READ_REG32(ha, Q81_CTL_CONFIG);
977 
978 		if ((data32 & bits) == value)
979 			return (0);
980 
981 		QLA_USEC_DELAY(100);
982 	}
983 	ha->qla_initiate_recovery = 1;
984 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
985 	return (-1);
986 }
987 
988 static uint8_t q81_hash_key[] = {
989 			0xda, 0x56, 0x5a, 0x6d,
990 			0xc2, 0x0e, 0x5b, 0x25,
991 			0x3d, 0x25, 0x67, 0x41,
992 			0xb0, 0x8f, 0xa3, 0x43,
993 			0xcb, 0x2b, 0xca, 0xd0,
994 			0xb4, 0x30, 0x7b, 0xae,
995 			0xa3, 0x2d, 0xcb, 0x77,
996 			0x0c, 0xf2, 0x30, 0x80,
997 			0x3b, 0xb7, 0x42, 0x6a,
998 			0xfa, 0x01, 0xac, 0xbe };
999 
1000 static int
1001 qls_init_rss(qla_host_t *ha)
1002 {
1003 	q81_rss_icb_t	*rss_icb;
1004 	int		ret = 0;
1005 	int		i;
1006 	uint32_t	value;
1007 
1008 	rss_icb = ha->rss_dma.dma_b;
1009 
1010 	bzero(rss_icb, sizeof (q81_rss_icb_t));
1011 
1012 	rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
1013 				Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
1014 				Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
1015 				Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6;
1016 
1017 	rss_icb->mask = 0x3FF;
1018 
1019 	for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
1020 		rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
1021 	}
1022 
1023 	memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
1024 	memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
1025 
1026 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1027 
1028 	if (ret)
1029 		goto qls_init_rss_exit;
1030 
1031 	ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1032 
1033 	if (ret) {
1034 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1035 		goto qls_init_rss_exit;
1036 	}
1037 
1038 	value = (uint32_t)ha->rss_dma.dma_addr;
1039 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1040 
1041 	value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
1042 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1043 
1044 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1045 
1046 	value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
1047 			Q81_CTL_CONFIG_LR;
1048 
1049 	WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1050 
1051 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1052 
1053 qls_init_rss_exit:
1054 	return (ret);
1055 }
1056 
1057 static int
1058 qls_init_comp_queue(qla_host_t *ha, int cid)
1059 {
1060 	q81_cq_icb_t	*cq_icb;
1061 	qla_rx_ring_t	*rxr;
1062 	int		ret = 0;
1063 	uint32_t	value;
1064 
1065 	rxr = &ha->rx_ring[cid];
1066 
1067 	rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
1068 
1069 	cq_icb = rxr->cq_icb_vaddr;
1070 
1071 	bzero(cq_icb, sizeof (q81_cq_icb_t));
1072 
1073 	cq_icb->msix_vector = cid;
1074 	cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
1075 			Q81_CQ_ICB_FLAGS_LI |
1076 			Q81_CQ_ICB_FLAGS_LL |
1077 			Q81_CQ_ICB_FLAGS_LS |
1078 			Q81_CQ_ICB_FLAGS_LV;
1079 
1080 	cq_icb->length_v = NUM_CQ_ENTRIES;
1081 
1082 	cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
1083 	cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
1084 
1085 	cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
1086 	cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
1087 
1088 	cq_icb->pkt_idelay = 10;
1089 	cq_icb->idelay = 100;
1090 
1091 	cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
1092 	cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1093 
1094 	cq_icb->lbq_bsize = QLA_LGB_SIZE;
1095 	cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
1096 
1097 	cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
1098 	cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1099 
1100 	cq_icb->sbq_bsize = (uint16_t)ha->msize;
1101 	cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
1102 
1103 	QL_DUMP_CQ(ha);
1104 
1105 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1106 
1107 	if (ret)
1108 		goto qls_init_comp_queue_exit;
1109 
1110 	ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1111 
1112 	if (ret) {
1113 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1114 		goto qls_init_comp_queue_exit;
1115 	}
1116 
1117 	value = (uint32_t)rxr->cq_icb_paddr;
1118 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1119 
1120 	value = (uint32_t)(rxr->cq_icb_paddr >> 32);
1121 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1122 
1123 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1124 
1125 	value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1126 	value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
1127 	value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1128 	WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1129 
1130 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1131 
1132 	rxr->cq_next = 0;
1133 	rxr->lbq_next = rxr->lbq_free = 0;
1134 	rxr->sbq_next = rxr->sbq_free = 0;
1135 	rxr->rx_free = rxr->rx_next = 0;
1136 	rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
1137 	rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
1138 
1139 qls_init_comp_queue_exit:
1140 	return (ret);
1141 }
1142 
1143 static int
1144 qls_init_work_queue(qla_host_t *ha, int wid)
1145 {
1146 	q81_wq_icb_t	*wq_icb;
1147 	qla_tx_ring_t	*txr;
1148 	int		ret = 0;
1149 	uint32_t	value;
1150 
1151 	txr = &ha->tx_ring[wid];
1152 
1153 	txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
1154 						+ (ha->vm_pgsize * wid));
1155 
1156 	txr->wq_db_offset = (ha->vm_pgsize * wid);
1157 
1158 	wq_icb = txr->wq_icb_vaddr;
1159 	bzero(wq_icb, sizeof (q81_wq_icb_t));
1160 
1161 	wq_icb->length_v = NUM_TX_DESCRIPTORS  |
1162 				Q81_WQ_ICB_VALID;
1163 
1164 	wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
1165 			Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
1166 
1167 	wq_icb->wqcqid_rss = wid;
1168 
1169 	wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
1170 	wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
1171 
1172 	wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
1173 	wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
1174 
1175 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1176 
1177 	if (ret)
1178 		goto qls_init_wq_exit;
1179 
1180 	ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1181 
1182 	if (ret) {
1183 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1184 		goto qls_init_wq_exit;
1185 	}
1186 
1187 	value = (uint32_t)txr->wq_icb_paddr;
1188 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1189 
1190 	value = (uint32_t)(txr->wq_icb_paddr >> 32);
1191 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1192 
1193 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1194 
1195 	value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1196 	value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
1197 	value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1198 	WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1199 
1200 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1201 
1202 	txr->txr_free = NUM_TX_DESCRIPTORS;
1203 	txr->txr_next = 0;
1204 	txr->txr_done = 0;
1205 
1206 qls_init_wq_exit:
1207 	return (ret);
1208 }
1209 
1210 static int
1211 qls_hw_add_all_mcast(qla_host_t *ha)
1212 {
1213 	int i, nmcast;
1214 
1215 	nmcast = ha->nmcast;
1216 
1217 	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
1218 		if ((ha->mcast[i].addr[0] != 0) ||
1219 			(ha->mcast[i].addr[1] != 0) ||
1220 			(ha->mcast[i].addr[2] != 0) ||
1221 			(ha->mcast[i].addr[3] != 0) ||
1222 			(ha->mcast[i].addr[4] != 0) ||
1223 			(ha->mcast[i].addr[5] != 0)) {
1224 
1225 			if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
1226 				1, i)) {
1227                 		device_printf(ha->pci_dev, "%s: failed\n",
1228 					__func__);
1229 				return (-1);
1230 			}
1231 
1232 			nmcast--;
1233 		}
1234 	}
1235 	return 0;
1236 }
1237 
1238 static int
1239 qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
1240 {
1241 	int i;
1242 
1243 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1244 
1245 		if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1246 			return 0; /* its been already added */
1247 	}
1248 
1249 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1250 
1251 		if ((ha->mcast[i].addr[0] == 0) &&
1252 			(ha->mcast[i].addr[1] == 0) &&
1253 			(ha->mcast[i].addr[2] == 0) &&
1254 			(ha->mcast[i].addr[3] == 0) &&
1255 			(ha->mcast[i].addr[4] == 0) &&
1256 			(ha->mcast[i].addr[5] == 0)) {
1257 
1258 			if (qls_config_mcast_mac_addr(ha, mta, 1, i))
1259 				return (-1);
1260 
1261 			bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
1262 			ha->nmcast++;
1263 
1264 			return 0;
1265 		}
1266 	}
1267 	return 0;
1268 }
1269 
1270 static int
1271 qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
1272 {
1273 	int i;
1274 
1275 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1276 		if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
1277 
1278 			if (qls_config_mcast_mac_addr(ha, mta, 0, i))
1279 				return (-1);
1280 
1281 			ha->mcast[i].addr[0] = 0;
1282 			ha->mcast[i].addr[1] = 0;
1283 			ha->mcast[i].addr[2] = 0;
1284 			ha->mcast[i].addr[3] = 0;
1285 			ha->mcast[i].addr[4] = 0;
1286 			ha->mcast[i].addr[5] = 0;
1287 
1288 			ha->nmcast--;
1289 
1290 			return 0;
1291 		}
1292 	}
1293 	return 0;
1294 }
1295 
1296 /*
1297  * Name: qls_hw_set_multi
1298  * Function: Sets the Multicast Addresses provided the host O.S into the
1299  *	hardware (for the given interface)
1300  */
1301 void
1302 qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1303 	uint32_t add_mac)
1304 {
1305 	int i;
1306 
1307 	for (i = 0; i < mcnt; i++) {
1308 		if (add_mac) {
1309 			if (qls_hw_add_mcast(ha, mta))
1310 				break;
1311 		} else {
1312 			if (qls_hw_del_mcast(ha, mta))
1313 				break;
1314 		}
1315 
1316 		mta += Q8_MAC_ADDR_LEN;
1317 	}
1318 	return;
1319 }
1320 
1321 void
1322 qls_update_link_state(qla_host_t *ha)
1323 {
1324 	uint32_t link_state;
1325 	uint32_t prev_link_state;
1326 
1327 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1328 		ha->link_up = 0;
1329 		return;
1330 	}
1331 	link_state = READ_REG32(ha, Q81_CTL_STATUS);
1332 
1333 	prev_link_state =  ha->link_up;
1334 
1335 	if ((ha->pci_func & 0x1) == 0)
1336 		ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
1337 	else
1338 		ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
1339 
1340 	if (prev_link_state !=  ha->link_up) {
1341 
1342 
1343 		if (ha->link_up) {
1344 			if_link_state_change(ha->ifp, LINK_STATE_UP);
1345 		} else {
1346 			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1347 		}
1348 	}
1349 	return;
1350 }
1351 
1352 static void
1353 qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
1354 {
1355 	if (ha->tx_ring[r_idx].flags.wq_dma) {
1356 		qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
1357 		ha->tx_ring[r_idx].flags.wq_dma = 0;
1358 	}
1359 
1360 	if (ha->tx_ring[r_idx].flags.privb_dma) {
1361 		qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
1362 		ha->tx_ring[r_idx].flags.privb_dma = 0;
1363 	}
1364 	return;
1365 }
1366 
1367 static void
1368 qls_free_tx_dma(qla_host_t *ha)
1369 {
1370 	int i, j;
1371 	qla_tx_buf_t *txb;
1372 
1373 	for (i = 0; i < ha->num_tx_rings; i++) {
1374 
1375 		qls_free_tx_ring_dma(ha, i);
1376 
1377 		for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1378 
1379 			txb = &ha->tx_ring[i].tx_buf[j];
1380 
1381 			if (txb->map) {
1382 				bus_dmamap_destroy(ha->tx_tag, txb->map);
1383 			}
1384 		}
1385 	}
1386 
1387         if (ha->tx_tag != NULL) {
1388                 bus_dma_tag_destroy(ha->tx_tag);
1389                 ha->tx_tag = NULL;
1390         }
1391 
1392 	return;
1393 }
1394 
1395 static int
1396 qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
1397 {
1398 	int		ret = 0, i;
1399 	uint8_t		*v_addr;
1400 	bus_addr_t	p_addr;
1401 	qla_tx_buf_t	*txb;
1402 	device_t	dev = ha->pci_dev;
1403 
1404 	ha->tx_ring[ridx].wq_dma.alignment = 8;
1405 	ha->tx_ring[ridx].wq_dma.size =
1406 		NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
1407 
1408 	ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
1409 
1410 	if (ret) {
1411 		device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
1412 		goto qls_alloc_tx_ring_dma_exit;
1413 	}
1414 	ha->tx_ring[ridx].flags.wq_dma = 1;
1415 
1416 	ha->tx_ring[ridx].privb_dma.alignment = 8;
1417 	ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
1418 
1419 	ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
1420 
1421 	if (ret) {
1422 		device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
1423 		goto qls_alloc_tx_ring_dma_exit;
1424 	}
1425 
1426 	ha->tx_ring[ridx].flags.privb_dma = 1;
1427 
1428 	ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
1429 	ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
1430 
1431 	v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
1432 	p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
1433 
1434 	ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
1435 	ha->tx_ring[ridx].wq_icb_paddr = p_addr;
1436 
1437 	ha->tx_ring[ridx].txr_cons_vaddr =
1438 		(uint32_t *)(v_addr + (PAGE_SIZE >> 1));
1439 	ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
1440 
1441 	v_addr = v_addr + (PAGE_SIZE >> 1);
1442 	p_addr = p_addr + (PAGE_SIZE >> 1);
1443 
1444 	txb = ha->tx_ring[ridx].tx_buf;
1445 
1446 	for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1447 
1448 		txb[i].oal_vaddr = v_addr;
1449 		txb[i].oal_paddr = p_addr;
1450 
1451 		v_addr = v_addr + QLA_OAL_BLK_SIZE;
1452 		p_addr = p_addr + QLA_OAL_BLK_SIZE;
1453 	}
1454 
1455 qls_alloc_tx_ring_dma_exit:
1456 	return (ret);
1457 }
1458 
1459 static int
1460 qls_alloc_tx_dma(qla_host_t *ha)
1461 {
1462 	int	i, j;
1463 	int	ret = 0;
1464 	qla_tx_buf_t *txb;
1465 
1466         if (bus_dma_tag_create(NULL,    /* parent */
1467                 1, 0,    /* alignment, bounds */
1468                 BUS_SPACE_MAXADDR,       /* lowaddr */
1469                 BUS_SPACE_MAXADDR,       /* highaddr */
1470                 NULL, NULL,      /* filter, filterarg */
1471                 QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1472                 QLA_MAX_SEGMENTS,        /* nsegments */
1473                 PAGE_SIZE,        /* maxsegsize */
1474                 BUS_DMA_ALLOCNOW,        /* flags */
1475                 NULL,    /* lockfunc */
1476                 NULL,    /* lockfuncarg */
1477                 &ha->tx_tag)) {
1478                 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1479                         __func__);
1480                 return (ENOMEM);
1481         }
1482 
1483 	for (i = 0; i < ha->num_tx_rings; i++) {
1484 
1485 		ret = qls_alloc_tx_ring_dma(ha, i);
1486 
1487 		if (ret) {
1488 			qls_free_tx_dma(ha);
1489 			break;
1490 		}
1491 
1492 		for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1493 
1494 			txb = &ha->tx_ring[i].tx_buf[j];
1495 
1496 			ret = bus_dmamap_create(ha->tx_tag,
1497 				BUS_DMA_NOWAIT, &txb->map);
1498 			if (ret) {
1499 				ha->err_tx_dmamap_create++;
1500 				device_printf(ha->pci_dev,
1501 				"%s: bus_dmamap_create failed[%d, %d, %d]\n",
1502 				__func__, ret, i, j);
1503 
1504 				qls_free_tx_dma(ha);
1505 
1506                 		return (ret);
1507        			}
1508 		}
1509 	}
1510 
1511 	return (ret);
1512 }
1513 
1514 static void
1515 qls_free_rss_dma(qla_host_t *ha)
1516 {
1517 	qls_free_dmabuf(ha, &ha->rss_dma);
1518 	ha->flags.rss_dma = 0;
1519 }
1520 
1521 static int
1522 qls_alloc_rss_dma(qla_host_t *ha)
1523 {
1524 	int ret = 0;
1525 
1526 	ha->rss_dma.alignment = 4;
1527 	ha->rss_dma.size = PAGE_SIZE;
1528 
1529 	ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
1530 
1531 	if (ret)
1532 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
1533 	else
1534 		ha->flags.rss_dma = 1;
1535 
1536 	return (ret);
1537 }
1538 
1539 static void
1540 qls_free_mpi_dma(qla_host_t *ha)
1541 {
1542 	qls_free_dmabuf(ha, &ha->mpi_dma);
1543 	ha->flags.mpi_dma = 0;
1544 }
1545 
1546 static int
1547 qls_alloc_mpi_dma(qla_host_t *ha)
1548 {
1549 	int ret = 0;
1550 
1551 	ha->mpi_dma.alignment = 4;
1552 	ha->mpi_dma.size = (0x4000 * 4);
1553 
1554 	ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
1555 	if (ret)
1556 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
1557 	else
1558 		ha->flags.mpi_dma = 1;
1559 
1560 	return (ret);
1561 }
1562 
1563 static void
1564 qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
1565 {
1566 	if (ha->rx_ring[ridx].flags.cq_dma) {
1567 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1568 		ha->rx_ring[ridx].flags.cq_dma = 0;
1569 	}
1570 
1571 	if (ha->rx_ring[ridx].flags.lbq_dma) {
1572 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1573 		ha->rx_ring[ridx].flags.lbq_dma = 0;
1574 	}
1575 
1576 	if (ha->rx_ring[ridx].flags.sbq_dma) {
1577 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1578 		ha->rx_ring[ridx].flags.sbq_dma = 0;
1579 	}
1580 
1581 	if (ha->rx_ring[ridx].flags.lb_dma) {
1582 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1583 		ha->rx_ring[ridx].flags.lb_dma = 0;
1584 	}
1585 	return;
1586 }
1587 
1588 static void
1589 qls_free_rx_dma(qla_host_t *ha)
1590 {
1591 	int i;
1592 
1593 	for (i = 0; i < ha->num_rx_rings; i++) {
1594 		qls_free_rx_ring_dma(ha, i);
1595 	}
1596 
1597         if (ha->rx_tag != NULL) {
1598                 bus_dma_tag_destroy(ha->rx_tag);
1599                 ha->rx_tag = NULL;
1600         }
1601 
1602 	return;
1603 }
1604 
1605 static int
1606 qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
1607 {
1608 	int				i, ret = 0;
1609 	uint8_t				*v_addr;
1610 	bus_addr_t			p_addr;
1611 	volatile q81_bq_addr_e_t	*bq_e;
1612 	device_t			dev = ha->pci_dev;
1613 
1614 	ha->rx_ring[ridx].cq_dma.alignment = 128;
1615 	ha->rx_ring[ridx].cq_dma.size =
1616 		(NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
1617 
1618 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1619 
1620 	if (ret) {
1621 		device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
1622 		goto qls_alloc_rx_ring_dma_exit;
1623 	}
1624 	ha->rx_ring[ridx].flags.cq_dma = 1;
1625 
1626 	ha->rx_ring[ridx].lbq_dma.alignment = 8;
1627 	ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
1628 
1629 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1630 
1631 	if (ret) {
1632 		device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
1633 		goto qls_alloc_rx_ring_dma_exit;
1634 	}
1635 	ha->rx_ring[ridx].flags.lbq_dma = 1;
1636 
1637 	ha->rx_ring[ridx].sbq_dma.alignment = 8;
1638 	ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
1639 
1640 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1641 
1642 	if (ret) {
1643 		device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
1644 		goto qls_alloc_rx_ring_dma_exit;
1645 	}
1646 	ha->rx_ring[ridx].flags.sbq_dma = 1;
1647 
1648 	ha->rx_ring[ridx].lb_dma.alignment = 8;
1649 	ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
1650 
1651 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1652 	if (ret) {
1653 		device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
1654 		goto qls_alloc_rx_ring_dma_exit;
1655 	}
1656 	ha->rx_ring[ridx].flags.lb_dma = 1;
1657 
1658 	bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
1659 	bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
1660 	bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
1661 	bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
1662 
1663 	/* completion queue */
1664 	ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
1665 	ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
1666 
1667 	v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
1668 	p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
1669 
1670 	v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1671 	p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1672 
1673 	/* completion queue icb */
1674 	ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
1675 	ha->rx_ring[ridx].cq_icb_paddr = p_addr;
1676 
1677 	v_addr = v_addr + (PAGE_SIZE >> 2);
1678 	p_addr = p_addr + (PAGE_SIZE >> 2);
1679 
1680 	/* completion queue index register */
1681 	ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
1682 	ha->rx_ring[ridx].cqi_paddr = p_addr;
1683 
1684 	v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
1685 	p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
1686 
1687 	/* large buffer queue address table */
1688 	ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
1689 	ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
1690 
1691 	/* large buffer queue */
1692 	ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
1693 	ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
1694 
1695 	v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
1696 	p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
1697 
1698 	/* small buffer queue address table */
1699 	ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
1700 	ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
1701 
1702 	/* small buffer queue */
1703 	ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
1704 	ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
1705 
1706 	ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
1707 	ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
1708 
1709 	/* Initialize Large Buffer Queue Table */
1710 
1711 	p_addr = ha->rx_ring[ridx].lbq_paddr;
1712 	bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
1713 
1714 	bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1715 	bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1716 
1717 	p_addr = ha->rx_ring[ridx].lb_paddr;
1718 	bq_e = ha->rx_ring[ridx].lbq_vaddr;
1719 
1720 	for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
1721 		bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1722 		bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1723 
1724 		p_addr = p_addr + QLA_LGB_SIZE;
1725 		bq_e++;
1726 	}
1727 
1728 	/* Initialize Small Buffer Queue Table */
1729 
1730 	p_addr = ha->rx_ring[ridx].sbq_paddr;
1731 	bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
1732 
1733 	for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
1734 		bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1735 		bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1736 
1737 		p_addr = p_addr + QLA_PAGE_SIZE;
1738 		bq_e++;
1739 	}
1740 
1741 qls_alloc_rx_ring_dma_exit:
1742 	return (ret);
1743 }
1744 
1745 static int
1746 qls_alloc_rx_dma(qla_host_t *ha)
1747 {
1748 	int	i;
1749 	int	ret = 0;
1750 
1751         if (bus_dma_tag_create(NULL,    /* parent */
1752                         1, 0,    /* alignment, bounds */
1753                         BUS_SPACE_MAXADDR,       /* lowaddr */
1754                         BUS_SPACE_MAXADDR,       /* highaddr */
1755                         NULL, NULL,      /* filter, filterarg */
1756                         MJUM9BYTES,     /* maxsize */
1757                         1,        /* nsegments */
1758                         MJUM9BYTES,        /* maxsegsize */
1759                         BUS_DMA_ALLOCNOW,        /* flags */
1760                         NULL,    /* lockfunc */
1761                         NULL,    /* lockfuncarg */
1762                         &ha->rx_tag)) {
1763 
1764                 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1765                         __func__);
1766 
1767                 return (ENOMEM);
1768         }
1769 
1770 	for (i = 0; i < ha->num_rx_rings; i++) {
1771 		ret = qls_alloc_rx_ring_dma(ha, i);
1772 
1773 		if (ret) {
1774 			qls_free_rx_dma(ha);
1775 			break;
1776 		}
1777 	}
1778 
1779 	return (ret);
1780 }
1781 
1782 static int
1783 qls_wait_for_flash_ready(qla_host_t *ha)
1784 {
1785 	uint32_t data32;
1786 	uint32_t count = 3;
1787 
1788 	while (count--) {
1789 
1790 		data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
1791 
1792 		if (data32 & Q81_CTL_FLASH_ADDR_ERR)
1793 			goto qls_wait_for_flash_ready_exit;
1794 
1795 		if (data32 & Q81_CTL_FLASH_ADDR_RDY)
1796 			return (0);
1797 
1798 		QLA_USEC_DELAY(100);
1799 	}
1800 
1801 qls_wait_for_flash_ready_exit:
1802 	QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1803 
1804 	return (-1);
1805 }
1806 
1807 /*
1808  * Name: qls_rd_flash32
1809  * Function: Read Flash Memory
1810  */
1811 int
1812 qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
1813 {
1814 	int ret;
1815 
1816 	ret = qls_wait_for_flash_ready(ha);
1817 
1818 	if (ret)
1819 		return (ret);
1820 
1821 	WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
1822 
1823 	ret = qls_wait_for_flash_ready(ha);
1824 
1825 	if (ret)
1826 		return (ret);
1827 
1828 	*data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
1829 
1830 	return 0;
1831 }
1832 
1833 static int
1834 qls_flash_validate(qla_host_t *ha, const char *signature)
1835 {
1836 	uint16_t csum16 = 0;
1837 	uint16_t *data16;
1838 	int i;
1839 
1840 	if (bcmp(ha->flash.id, signature, 4)) {
1841 		QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
1842 			"%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
1843 			ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
1844 			signature));
1845 		return(-1);
1846 	}
1847 
1848 	data16 = (uint16_t *)&ha->flash;
1849 
1850 	for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
1851 		csum16 += *data16++;
1852 	}
1853 
1854 	if (csum16) {
1855 		QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
1856 		return(-1);
1857 	}
1858 	return(0);
1859 }
1860 
1861 int
1862 qls_rd_nic_params(qla_host_t *ha)
1863 {
1864 	int		i, ret = 0;
1865 	uint32_t	faddr;
1866 	uint32_t	*qflash;
1867 
1868 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
1869 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1870 		return(-1);
1871 	}
1872 
1873 	if ((ha->pci_func & 0x1) == 0)
1874 		faddr = Q81_F0_FLASH_OFFSET >> 2;
1875 	else
1876 		faddr = Q81_F1_FLASH_OFFSET >> 2;
1877 
1878 	qflash = (uint32_t *)&ha->flash;
1879 
1880 	for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
1881 
1882 		ret = qls_rd_flash32(ha, faddr, qflash);
1883 
1884 		if (ret)
1885 			goto qls_rd_flash_data_exit;
1886 
1887 		faddr++;
1888 		qflash++;
1889 	}
1890 
1891 	QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
1892 
1893 	ret = qls_flash_validate(ha, Q81_FLASH_ID);
1894 
1895 	if (ret)
1896 		goto qls_rd_flash_data_exit;
1897 
1898 	bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
1899 
1900 	QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
1901 		__func__, ha->mac_addr[0],  ha->mac_addr[1], ha->mac_addr[2],
1902 		ha->mac_addr[3], ha->mac_addr[4],  ha->mac_addr[5]));
1903 
1904 qls_rd_flash_data_exit:
1905 
1906 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
1907 
1908 	return(ret);
1909 }
1910 
1911 static int
1912 qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
1913 {
1914 	uint32_t count = 30;
1915 	uint32_t data;
1916 
1917 	while (count--) {
1918 		WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
1919 
1920 		data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
1921 
1922 		if (data & value) {
1923 			return (0);
1924 		} else {
1925 			QLA_USEC_DELAY(100);
1926 		}
1927 	}
1928 	ha->qla_initiate_recovery = 1;
1929 	return (-1);
1930 }
1931 
1932 static void
1933 qls_sem_unlock(qla_host_t *ha, uint32_t mask)
1934 {
1935 	WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
1936 }
1937 
1938 static int
1939 qls_wait_for_proc_addr_ready(qla_host_t *ha)
1940 {
1941 	uint32_t data32;
1942 	uint32_t count = 3;
1943 
1944 	while (count--) {
1945 
1946 		data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
1947 
1948 		if (data32 & Q81_CTL_PROC_ADDR_ERR)
1949 			goto qls_wait_for_proc_addr_ready_exit;
1950 
1951 		if (data32 & Q81_CTL_PROC_ADDR_RDY)
1952 			return (0);
1953 
1954 		QLA_USEC_DELAY(100);
1955 	}
1956 
1957 qls_wait_for_proc_addr_ready_exit:
1958 	QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1959 
1960 	ha->qla_initiate_recovery = 1;
1961 	return (-1);
1962 }
1963 
1964 static int
1965 qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1966 	uint32_t *data)
1967 {
1968 	int ret;
1969 	uint32_t value;
1970 
1971 	ret = qls_wait_for_proc_addr_ready(ha);
1972 
1973 	if (ret)
1974 		goto qls_proc_addr_rd_reg_exit;
1975 
1976 	value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
1977 
1978 	WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1979 
1980 	ret = qls_wait_for_proc_addr_ready(ha);
1981 
1982 	if (ret)
1983 		goto qls_proc_addr_rd_reg_exit;
1984 
1985 	*data = READ_REG32(ha, Q81_CTL_PROC_DATA);
1986 
1987 qls_proc_addr_rd_reg_exit:
1988 	return (ret);
1989 }
1990 
1991 static int
1992 qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1993 	uint32_t data)
1994 {
1995 	int ret;
1996 	uint32_t value;
1997 
1998 	ret = qls_wait_for_proc_addr_ready(ha);
1999 
2000 	if (ret)
2001 		goto qls_proc_addr_wr_reg_exit;
2002 
2003 	WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
2004 
2005 	value = addr_module | reg;
2006 
2007 	WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
2008 
2009 	ret = qls_wait_for_proc_addr_ready(ha);
2010 
2011 qls_proc_addr_wr_reg_exit:
2012 	return (ret);
2013 }
2014 
2015 static int
2016 qls_hw_nic_reset(qla_host_t *ha)
2017 {
2018 	int		count;
2019 	uint32_t	data;
2020 	device_t	dev = ha->pci_dev;
2021 
2022 	ha->hw_init = 0;
2023 
2024 	data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
2025 			Q81_CTL_RESET_FUNC;
2026 	WRITE_REG32(ha, Q81_CTL_RESET, data);
2027 
2028 	count = 10;
2029 	while (count--) {
2030 		data = READ_REG32(ha, Q81_CTL_RESET);
2031 		if ((data & Q81_CTL_RESET_FUNC) == 0)
2032 			break;
2033 		QLA_USEC_DELAY(10);
2034 	}
2035 	if (count == 0) {
2036 		device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
2037 			__func__);
2038 		return (-1);
2039 	}
2040 	return (0);
2041 }
2042 
2043 static int
2044 qls_hw_reset(qla_host_t *ha)
2045 {
2046 	device_t	dev = ha->pci_dev;
2047 	int		ret;
2048 	int		count;
2049 	uint32_t	data;
2050 
2051 	QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
2052 
2053 	if (ha->hw_init == 0) {
2054 		ret = qls_hw_nic_reset(ha);
2055 		goto qls_hw_reset_exit;
2056 	}
2057 
2058 	ret = qls_clear_routing_table(ha);
2059 	if (ret)
2060 		goto qls_hw_reset_exit;
2061 
2062 	ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
2063 	if (ret)
2064 		goto qls_hw_reset_exit;
2065 
2066 	/*
2067 	 * Wait for FIFO to empty
2068 	 */
2069 	count = 5;
2070 	while (count--) {
2071 		data = READ_REG32(ha, Q81_CTL_STATUS);
2072 		if (data & Q81_CTL_STATUS_NFE)
2073 			break;
2074 		qls_mdelay(__func__, 100);
2075 	}
2076 	if (count == 0) {
2077 		device_printf(dev, "%s: NFE bit not set\n", __func__);
2078 		goto qls_hw_reset_exit;
2079 	}
2080 
2081 	count = 5;
2082 	while (count--) {
2083 		(void)qls_mbx_get_mgmt_ctrl(ha, &data);
2084 
2085 		if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
2086 			(data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
2087 			break;
2088 		qls_mdelay(__func__, 100);
2089 	}
2090 	if (count == 0)
2091 		goto qls_hw_reset_exit;
2092 
2093 	/*
2094 	 * Reset the NIC function
2095 	 */
2096 	ret = qls_hw_nic_reset(ha);
2097 	if (ret)
2098 		goto qls_hw_reset_exit;
2099 
2100 	ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
2101 
2102 qls_hw_reset_exit:
2103 	if (ret)
2104 		device_printf(dev, "%s: failed\n", __func__);
2105 
2106 	return (ret);
2107 }
2108 
2109 /*
2110  * MPI Related Functions
2111  */
2112 int
2113 qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2114 {
2115 	int ret;
2116 
2117 	ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2118 			reg, data);
2119 	return (ret);
2120 }
2121 
2122 int
2123 qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2124 {
2125 	int ret;
2126 
2127 	ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2128 			reg, data);
2129 	return (ret);
2130 }
2131 
2132 int
2133 qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2134 {
2135 	int ret;
2136 
2137 	if ((ha->pci_func & 0x1) == 0)
2138 		reg += Q81_FUNC0_MBX_OUT_REG0;
2139 	else
2140 		reg += Q81_FUNC1_MBX_OUT_REG0;
2141 
2142 	ret = qls_mpi_risc_rd_reg(ha, reg, data);
2143 
2144 	return (ret);
2145 }
2146 
2147 int
2148 qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2149 {
2150 	int ret;
2151 
2152 	if ((ha->pci_func & 0x1) == 0)
2153 		reg += Q81_FUNC0_MBX_IN_REG0;
2154 	else
2155 		reg += Q81_FUNC1_MBX_IN_REG0;
2156 
2157 	ret = qls_mpi_risc_wr_reg(ha, reg, data);
2158 
2159 	return (ret);
2160 }
2161 
2162 
2163 static int
2164 qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
2165 	uint32_t *out_mbx, uint32_t o_count)
2166 {
2167 	int i, ret = -1;
2168 	uint32_t data32, mbx_cmd = 0;
2169 	uint32_t count = 50;
2170 
2171 	QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
2172 		__func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
2173 
2174 	data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2175 
2176 	if (data32 & Q81_CTL_HCS_HTR_INTR) {
2177 		device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
2178 			__func__, data32);
2179 		goto qls_mbx_cmd_exit;
2180 	}
2181 
2182 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
2183 		Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
2184 		device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
2185 		goto qls_mbx_cmd_exit;
2186 	}
2187 
2188 	ha->mbx_done = 0;
2189 
2190 	mbx_cmd = *in_mbx;
2191 
2192 	for (i = 0; i < i_count; i++) {
2193 
2194 		ret = qls_mbx_wr_reg(ha, i, *in_mbx);
2195 
2196 		if (ret) {
2197 			device_printf(ha->pci_dev,
2198 				"%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
2199 				i, *in_mbx);
2200 			qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2201 			goto qls_mbx_cmd_exit;
2202 		}
2203 
2204 		in_mbx++;
2205 	}
2206 	WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
2207 
2208 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2209 
2210 	ret = -1;
2211 	ha->mbx_done = 0;
2212 
2213 	while (count--) {
2214 
2215 		if (ha->flags.intr_enable == 0) {
2216 			data32 = READ_REG32(ha, Q81_CTL_STATUS);
2217 
2218 			if (!(data32 & Q81_CTL_STATUS_PI)) {
2219 				qls_mdelay(__func__, 100);
2220 				continue;
2221 			}
2222 
2223 			ret = qls_mbx_rd_reg(ha, 0, &data32);
2224 
2225 			if (ret == 0 ) {
2226 				if ((data32 & 0xF000) == 0x4000) {
2227 
2228 					out_mbx[0] = data32;
2229 
2230 					for (i = 1; i < o_count; i++) {
2231 						ret = qls_mbx_rd_reg(ha, i,
2232 								&data32);
2233 						if (ret) {
2234 							device_printf(
2235 								ha->pci_dev,
2236 								"%s: mbx_rd[%d]"
2237 								" failed\n",
2238 								__func__, i);
2239 							break;
2240 						}
2241 						out_mbx[i] = data32;
2242 					}
2243 					break;
2244 				} else if ((data32 & 0xF000) == 0x8000) {
2245 					count = 50;
2246 					WRITE_REG32(ha,\
2247 						Q81_CTL_HOST_CMD_STATUS,\
2248 						Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2249 				}
2250 			}
2251 		} else {
2252 			if (ha->mbx_done) {
2253 				for (i = 1; i < o_count; i++) {
2254 					out_mbx[i] = ha->mbox[i];
2255 				}
2256 				ret = 0;
2257 				break;
2258 			}
2259 		}
2260 		qls_mdelay(__func__, 1000);
2261 	}
2262 
2263 qls_mbx_cmd_exit:
2264 
2265 	if (ha->flags.intr_enable == 0) {
2266 		WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2267 			Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2268 	}
2269 
2270 	if (ret) {
2271 		ha->qla_initiate_recovery = 1;
2272 	}
2273 
2274 	QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
2275 	return (ret);
2276 }
2277 
2278 static int
2279 qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
2280 {
2281 	uint32_t *mbox;
2282 	device_t dev = ha->pci_dev;
2283 
2284 	mbox = ha->mbox;
2285 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2286 
2287 	mbox[0] = Q81_MBX_SET_MGMT_CTL;
2288 	mbox[1] = t_ctrl;
2289 
2290 	if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
2291 		device_printf(dev, "%s failed\n", __func__);
2292 		return (-1);
2293 	}
2294 
2295 	if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
2296 		((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
2297 			(mbox[0] == Q81_MBX_CMD_ERROR))){
2298 		return (0);
2299 	}
2300 	device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
2301 	return (-1);
2302 
2303 }
2304 
2305 static int
2306 qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
2307 {
2308 	uint32_t *mbox;
2309 	device_t dev = ha->pci_dev;
2310 
2311 	*t_status = 0;
2312 
2313 	mbox = ha->mbox;
2314 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2315 
2316 	mbox[0] = Q81_MBX_GET_MGMT_CTL;
2317 
2318 	if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
2319 		device_printf(dev, "%s failed\n", __func__);
2320 		return (-1);
2321 	}
2322 
2323 	*t_status = mbox[1];
2324 
2325 	return (0);
2326 }
2327 
2328 static void
2329 qls_mbx_get_link_status(qla_host_t *ha)
2330 {
2331 	uint32_t *mbox;
2332 	device_t dev = ha->pci_dev;
2333 
2334 	mbox = ha->mbox;
2335 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2336 
2337 	mbox[0] = Q81_MBX_GET_LNK_STATUS;
2338 
2339 	if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2340 		device_printf(dev, "%s failed\n", __func__);
2341 		return;
2342 	}
2343 
2344 	ha->link_status			= mbox[1];
2345 	ha->link_down_info		= mbox[2];
2346 	ha->link_hw_info		= mbox[3];
2347 	ha->link_dcbx_counters		= mbox[4];
2348 	ha->link_change_counters	= mbox[5];
2349 
2350 	device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2351 		__func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2352 
2353 	return;
2354 }
2355 
2356 static void
2357 qls_mbx_about_fw(qla_host_t *ha)
2358 {
2359 	uint32_t *mbox;
2360 	device_t dev = ha->pci_dev;
2361 
2362 	mbox = ha->mbox;
2363 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2364 
2365 	mbox[0] = Q81_MBX_ABOUT_FW;
2366 
2367 	if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2368 		device_printf(dev, "%s failed\n", __func__);
2369 		return;
2370 	}
2371 
2372 	device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2373 		__func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2374 }
2375 
2376 int
2377 qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
2378 	uint32_t r_size)
2379 {
2380 	bus_addr_t b_paddr;
2381 	uint32_t *mbox;
2382 	device_t dev = ha->pci_dev;
2383 
2384 	mbox = ha->mbox;
2385 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2386 
2387 	bzero(ha->mpi_dma.dma_b,(r_size << 2));
2388 	b_paddr = ha->mpi_dma.dma_addr;
2389 
2390 	mbox[0] = Q81_MBX_DUMP_RISC_RAM;
2391 	mbox[1] = r_addr & 0xFFFF;
2392 	mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
2393 	mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
2394 	mbox[4] = (r_size >> 16) & 0xFFFF;
2395 	mbox[5] = r_size & 0xFFFF;
2396 	mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
2397 	mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
2398 	mbox[8] = (r_addr >> 16) & 0xFFFF;
2399 
2400 	bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2401 		BUS_DMASYNC_PREREAD);
2402 
2403 	if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
2404 		device_printf(dev, "%s failed\n", __func__);
2405 		return (-1);
2406 	}
2407         if (mbox[0] != 0x4000) {
2408                 device_printf(ha->pci_dev, "%s: failed!\n", __func__);
2409 		return (-1);
2410         } else {
2411                 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2412                         BUS_DMASYNC_POSTREAD);
2413                 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
2414         }
2415 
2416 	return (0);
2417 }
2418 
2419 int
2420 qls_mpi_reset(qla_host_t *ha)
2421 {
2422 	int		count;
2423 	uint32_t	data;
2424 	device_t	dev = ha->pci_dev;
2425 
2426 	WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2427 		Q81_CTL_HCS_CMD_SET_RISC_RESET);
2428 
2429 	count = 10;
2430 	while (count--) {
2431 		data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2432 		if (data & Q81_CTL_HCS_RISC_RESET) {
2433 			WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2434 				Q81_CTL_HCS_CMD_CLR_RISC_RESET);
2435 			break;
2436 		}
2437 		qls_mdelay(__func__, 10);
2438 	}
2439 	if (count == 0) {
2440 		device_printf(dev, "%s: failed\n", __func__);
2441 		return (-1);
2442 	}
2443 	return (0);
2444 }
2445 
2446