xref: /freebsd/sys/dev/qlxge/qls_hw.c (revision 5bb3134a8c21cb87b30e135ef168483f0333dabb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2014 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: qls_hw.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  * Content: Contains Hardware dependent functions
34  */
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "qls_os.h"
39 #include "qls_hw.h"
40 #include "qls_def.h"
41 #include "qls_inline.h"
42 #include "qls_ver.h"
43 #include "qls_glbl.h"
44 #include "qls_dbg.h"
45 
46 /*
47  * Static Functions
48  */
49 static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
50 static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
51 static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
52                 uint32_t add_mac, uint32_t index);
53 
54 static int qls_init_rss(qla_host_t *ha);
55 static int qls_init_comp_queue(qla_host_t *ha, int cid);
56 static int qls_init_work_queue(qla_host_t *ha, int wid);
57 static int qls_init_fw_routing_table(qla_host_t *ha);
58 static int qls_hw_add_all_mcast(qla_host_t *ha);
59 static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
60 static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
61 static int qls_wait_for_flash_ready(qla_host_t *ha);
62 
63 static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
64 static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
65 
66 static void qls_free_tx_dma(qla_host_t *ha);
67 static int qls_alloc_tx_dma(qla_host_t *ha);
68 static void qls_free_rx_dma(qla_host_t *ha);
69 static int qls_alloc_rx_dma(qla_host_t *ha);
70 static void qls_free_mpi_dma(qla_host_t *ha);
71 static int qls_alloc_mpi_dma(qla_host_t *ha);
72 static void qls_free_rss_dma(qla_host_t *ha);
73 static int qls_alloc_rss_dma(qla_host_t *ha);
74 
75 static int qls_flash_validate(qla_host_t *ha, const char *signature);
76 
77 static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
78 static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
79 		uint32_t reg, uint32_t *data);
80 static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
81 		uint32_t reg, uint32_t data);
82 
83 static int qls_hw_reset(qla_host_t *ha);
84 
85 /*
86  * MPI Related Functions
87  */
88 static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
89 		uint32_t *out_mbx, uint32_t o_count);
90 static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
91 static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
92 static void qls_mbx_get_link_status(qla_host_t *ha);
93 static void qls_mbx_about_fw(qla_host_t *ha);
94 
95 int
96 qls_get_msix_count(qla_host_t *ha)
97 {
98 	return (ha->num_rx_rings);
99 }
100 
101 static int
102 qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
103 {
104         int err = 0, ret;
105         qla_host_t *ha;
106 
107         err = sysctl_handle_int(oidp, &ret, 0, req);
108 
109         if (err || !req->newptr)
110                 return (err);
111 
112         if (ret == 1) {
113                 ha = (qla_host_t *)arg1;
114 		qls_mpi_core_dump(ha);
115         }
116         return (err);
117 }
118 
119 static int
120 qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
121 {
122         int err = 0, ret;
123         qla_host_t *ha;
124 
125         err = sysctl_handle_int(oidp, &ret, 0, req);
126 
127         if (err || !req->newptr)
128                 return (err);
129 
130         if (ret == 1) {
131                 ha = (qla_host_t *)arg1;
132 		qls_mbx_get_link_status(ha);
133 		qls_mbx_about_fw(ha);
134         }
135         return (err);
136 }
137 
138 void
139 qls_hw_add_sysctls(qla_host_t *ha)
140 {
141         device_t	dev;
142 
143         dev = ha->pci_dev;
144 
145 	ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
146 
147 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
148 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
149 		OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
150 		ha->num_rx_rings, "Number of Completion Queues");
151 
152         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
153                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
154                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
155 		ha->num_tx_rings, "Number of Transmit Rings");
156 
157         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
158             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
159             OID_AUTO, "mpi_dump",
160 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
161 	    qls_syctl_mpi_dump, "I", "MPI Dump");
162 
163         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
164             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
165             OID_AUTO, "link_status",
166 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
167 	    qls_syctl_link_status, "I", "Link Status");
168 }
169 
170 /*
171  * Name: qls_free_dma
172  * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
173  */
174 void
175 qls_free_dma(qla_host_t *ha)
176 {
177 	qls_free_rss_dma(ha);
178 	qls_free_mpi_dma(ha);
179 	qls_free_tx_dma(ha);
180 	qls_free_rx_dma(ha);
181 	return;
182 }
183 
184 /*
185  * Name: qls_alloc_dma
186  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
187  */
188 int
189 qls_alloc_dma(qla_host_t *ha)
190 {
191 	if (qls_alloc_rx_dma(ha))
192 		return (-1);
193 
194 	if (qls_alloc_tx_dma(ha)) {
195 		qls_free_rx_dma(ha);
196 		return (-1);
197 	}
198 
199 	if (qls_alloc_mpi_dma(ha)) {
200 		qls_free_tx_dma(ha);
201 		qls_free_rx_dma(ha);
202 		return (-1);
203 	}
204 
205 	if (qls_alloc_rss_dma(ha)) {
206 		qls_free_mpi_dma(ha);
207 		qls_free_tx_dma(ha);
208 		qls_free_rx_dma(ha);
209 		return (-1);
210 	}
211 
212 	return (0);
213 }
214 
215 static int
216 qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
217 {
218 	uint32_t data32;
219 	uint32_t count = 3;
220 
221 	while (count--) {
222 		data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
223 
224 		if (data32 & op)
225 			return (0);
226 
227 		QLA_USEC_DELAY(100);
228 	}
229 	ha->qla_initiate_recovery = 1;
230 	return (-1);
231 }
232 
233 /*
234  * Name: qls_config_unicast_mac_addr
235  * Function: binds/unbinds a unicast MAC address to the interface.
236  */
237 static int
238 qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
239 {
240 	int ret = 0;
241 	uint32_t mac_upper = 0;
242 	uint32_t mac_lower = 0;
243 	uint32_t value = 0, index;
244 
245 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
246 		Q81_CTL_SEM_SET_MAC_SERDES)) {
247 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
248 		return(-1);
249 	}
250 
251 	if (add_mac) {
252 		mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
253 		mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
254 				(ha->mac_addr[4] << 8) | ha->mac_addr[5];
255 	}
256 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
257 	if (ret)
258 		goto qls_config_unicast_mac_addr_exit;
259 
260 	index = 128 * (ha->pci_func & 0x1); /* index */
261 
262 	value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
263 		Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
264 
265 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
266 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
267 
268 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
269 	if (ret)
270 		goto qls_config_unicast_mac_addr_exit;
271 
272 	value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
273 		Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
274 
275 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
276 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
277 
278 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
279 	if (ret)
280 		goto qls_config_unicast_mac_addr_exit;
281 
282 	value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
283 		Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
284 
285 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
286 
287 	value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
288 			((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
289 			(0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
290 
291 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
292 
293 qls_config_unicast_mac_addr_exit:
294 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
295 	return (ret);
296 }
297 
298 /*
299  * Name: qls_config_mcast_mac_addr
300  * Function: binds/unbinds a multicast MAC address to the interface.
301  */
302 static int
303 qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
304 	uint32_t index)
305 {
306 	int ret = 0;
307 	uint32_t mac_upper = 0;
308 	uint32_t mac_lower = 0;
309 	uint32_t value = 0;
310 
311 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
312 		Q81_CTL_SEM_SET_MAC_SERDES)) {
313 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
314 		return(-1);
315 	}
316 
317 	if (add_mac) {
318 		mac_upper = (mac_addr[0] << 8) | mac_addr[1];
319 		mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
320 				(mac_addr[4] << 8) | mac_addr[5];
321 	}
322 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
323 	if (ret)
324 		goto qls_config_mcast_mac_addr_exit;
325 
326 	value = Q81_CTL_MAC_PROTO_AI_E |
327 			(index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
328 			Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
329 
330 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
331 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
332 
333 	ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
334 	if (ret)
335 		goto qls_config_mcast_mac_addr_exit;
336 
337 	value = Q81_CTL_MAC_PROTO_AI_E |
338 			(index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
339 			Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
340 
341 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
342 	WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
343 
344 qls_config_mcast_mac_addr_exit:
345 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
346 
347 	return (ret);
348 }
349 
350 /*
351  * Name: qls_set_mac_rcv_mode
352  * Function: Enable/Disable AllMulticast and Promiscuous Modes.
353  */
354 static int
355 qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
356 {
357 	uint32_t data32;
358 	uint32_t count = 3;
359 
360 	while (count--) {
361 		data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
362 
363 		if (data32 & op)
364 			return (0);
365 
366 		QLA_USEC_DELAY(100);
367 	}
368 	ha->qla_initiate_recovery = 1;
369 	return (-1);
370 }
371 
372 static int
373 qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
374 {
375 	int ret = 0;
376 
377 	ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
378 
379 	if (ret) {
380 		device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
381 			__func__, index, data);
382 		goto qls_load_route_idx_reg_exit;
383 	}
384 
385 	WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
386 	WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
387 
388 qls_load_route_idx_reg_exit:
389 	return (ret);
390 }
391 
392 static int
393 qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
394 {
395 	int ret = 0;
396 
397 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
398 		Q81_CTL_SEM_SET_RIDX_DATAREG)) {
399 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
400 		return(-1);
401 	}
402 
403 	ret = qls_load_route_idx_reg(ha, index, data);
404 
405 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
406 
407 	return (ret);
408 }
409 
410 static int
411 qls_clear_routing_table(qla_host_t *ha)
412 {
413 	int i, ret = 0;
414 
415 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
416 		Q81_CTL_SEM_SET_RIDX_DATAREG)) {
417 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
418 		return(-1);
419 	}
420 
421 	for (i = 0; i < 16; i++) {
422 		ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
423 			(i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
424 		if (ret)
425 			break;
426 	}
427 
428 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
429 
430 	return (ret);
431 }
432 
433 int
434 qls_set_promisc(qla_host_t *ha)
435 {
436 	int ret;
437 
438 	ret = qls_load_route_idx_reg_locked(ha,
439 			(Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
440 			Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
441 			Q81_CTL_RD_VALID_PKT);
442 	return (ret);
443 }
444 
445 void
446 qls_reset_promisc(qla_host_t *ha)
447 {
448 	int ret;
449 
450 	ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
451 			Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
452 	return;
453 }
454 
455 int
456 qls_set_allmulti(qla_host_t *ha)
457 {
458 	int ret;
459 
460 	ret = qls_load_route_idx_reg_locked(ha,
461 			(Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
462 			Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
463 			Q81_CTL_RD_MCAST);
464 	return (ret);
465 }
466 
467 void
468 qls_reset_allmulti(qla_host_t *ha)
469 {
470 	int ret;
471 
472 	ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
473 			Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
474 	return;
475 }
476 
477 static int
478 qls_init_fw_routing_table(qla_host_t *ha)
479 {
480 	int ret = 0;
481 
482 	ret = qls_clear_routing_table(ha);
483 	if (ret)
484 		return (-1);
485 
486 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
487 		Q81_CTL_SEM_SET_RIDX_DATAREG)) {
488 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
489 		return(-1);
490 	}
491 
492 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
493 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
494 			Q81_CTL_RD_ERROR_PKT);
495 	if (ret)
496 		goto qls_init_fw_routing_table_exit;
497 
498 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
499 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
500 			Q81_CTL_RD_BCAST);
501 	if (ret)
502 		goto qls_init_fw_routing_table_exit;
503 
504 	if (ha->num_rx_rings > 1 ) {
505 		ret = qls_load_route_idx_reg(ha,
506 				(Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
507 				Q81_CTL_RI_TYPE_NICQMASK |
508 				Q81_CTL_RI_IDX_RSS_MATCH),
509 				Q81_CTL_RD_RSS_MATCH);
510 		if (ret)
511 			goto qls_init_fw_routing_table_exit;
512 	}
513 
514 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
515 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
516 			Q81_CTL_RD_MCAST_REG_MATCH);
517 	if (ret)
518 		goto qls_init_fw_routing_table_exit;
519 
520 	ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
521 			Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
522 			Q81_CTL_RD_CAM_HIT);
523 	if (ret)
524 		goto qls_init_fw_routing_table_exit;
525 
526 qls_init_fw_routing_table_exit:
527 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
528 	return (ret);
529 }
530 
531 static int
532 qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
533 {
534         struct ether_vlan_header *eh;
535         struct ip *ip;
536         struct ip6_hdr *ip6;
537 	struct tcphdr *th;
538         uint32_t ehdrlen, ip_hlen;
539 	int ret = 0;
540         uint16_t etype;
541         device_t dev;
542         uint8_t buf[sizeof(struct ip6_hdr)];
543 
544         dev = ha->pci_dev;
545 
546         eh = mtod(mp, struct ether_vlan_header *);
547 
548         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
549                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
550                 etype = ntohs(eh->evl_proto);
551         } else {
552                 ehdrlen = ETHER_HDR_LEN;
553                 etype = ntohs(eh->evl_encap_proto);
554         }
555 
556         switch (etype) {
557                 case ETHERTYPE_IP:
558                         ip = (struct ip *)(mp->m_data + ehdrlen);
559 
560                         ip_hlen = sizeof (struct ip);
561 
562                         if (mp->m_len < (ehdrlen + ip_hlen)) {
563                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
564                                 ip = (struct ip *)buf;
565                         }
566 			tx_mac->opcode = Q81_IOCB_TX_TSO;
567 			tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
568 
569 			tx_mac->phdr_offsets = ehdrlen;
570 
571 			tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
572 							Q81_TX_TSO_PHDR_SHIFT);
573 
574 			ip->ip_sum = 0;
575 
576 			if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
577 				tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
578 
579 				th = (struct tcphdr *)(ip + 1);
580 
581 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
582 						ip->ip_dst.s_addr,
583 						htons(IPPROTO_TCP));
584 				tx_mac->mss = mp->m_pkthdr.tso_segsz;
585 				tx_mac->phdr_length = ip_hlen + ehdrlen +
586 							(th->th_off << 2);
587 				break;
588 			}
589 			tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
590 
591                         if (ip->ip_p == IPPROTO_TCP) {
592 				tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
593                         } else if (ip->ip_p == IPPROTO_UDP) {
594 				tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
595                         }
596                 break;
597 
598                 case ETHERTYPE_IPV6:
599                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
600 
601                         ip_hlen = sizeof(struct ip6_hdr);
602 
603                         if (mp->m_len < (ehdrlen + ip_hlen)) {
604                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
605                                         buf);
606                                 ip6 = (struct ip6_hdr *)buf;
607                         }
608 
609 			tx_mac->opcode = Q81_IOCB_TX_TSO;
610 			tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
611 			tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
612 
613 			tx_mac->phdr_offsets = ehdrlen;
614 			tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
615 							Q81_TX_TSO_PHDR_SHIFT);
616 
617                         if (ip6->ip6_nxt == IPPROTO_TCP) {
618 				tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
619                         } else if (ip6->ip6_nxt == IPPROTO_UDP) {
620 				tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
621                         }
622                 break;
623 
624                 default:
625                         ret = -1;
626                 break;
627         }
628 
629         return (ret);
630 }
631 
632 #define QLA_TX_MIN_FREE 2
633 int
634 qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
635 {
636 	uint32_t txr_done, txr_next;
637 
638 	txr_done = ha->tx_ring[txr_idx].txr_done;
639 	txr_next = ha->tx_ring[txr_idx].txr_next;
640 
641 	if (txr_done == txr_next) {
642 		ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
643 	} else if (txr_done > txr_next) {
644 		ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
645 	} else {
646 		ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
647 			txr_done - txr_next;
648 	}
649 
650 	if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
651 		return (-1);
652 
653 	return (0);
654 }
655 
656 /*
657  * Name: qls_hw_send
658  * Function: Transmits a packet. It first checks if the packet is a
659  *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
660  *	offload. If either of these creteria are not met, it is transmitted
661  *	as a regular ethernet frame.
662  */
663 int
664 qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
665 	uint32_t txr_next,  struct mbuf *mp, uint32_t txr_idx)
666 {
667         q81_tx_mac_t *tx_mac;
668 	q81_txb_desc_t *tx_desc;
669         uint32_t total_length = 0;
670         uint32_t i;
671         device_t dev;
672 	int ret = 0;
673 
674 	dev = ha->pci_dev;
675 
676         total_length = mp->m_pkthdr.len;
677 
678         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
679                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
680                         __func__, total_length);
681                 return (-1);
682         }
683 
684 	if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
685 		if (qls_hw_tx_done(ha, txr_idx)) {
686 			device_printf(dev, "%s: tx_free[%d] = %d\n",
687 				__func__, txr_idx,
688 				ha->tx_ring[txr_idx].txr_free);
689 			return (-1);
690 		}
691 	}
692 
693 	tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
694 
695 	bzero(tx_mac, sizeof(q81_tx_mac_t));
696 
697 	if ((mp->m_pkthdr.csum_flags &
698 			(CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
699 		ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
700 		if (ret)
701 			return (EINVAL);
702 
703 		if (mp->m_pkthdr.csum_flags & CSUM_TSO)
704 			ha->tx_ring[txr_idx].tx_tso_frames++;
705 		else
706 			ha->tx_ring[txr_idx].tx_frames++;
707 
708 	} else {
709 		tx_mac->opcode = Q81_IOCB_TX_MAC;
710 	}
711 
712 	if (mp->m_flags & M_VLANTAG) {
713 		tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
714 		tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
715 
716 		ha->tx_ring[txr_idx].tx_vlan_frames++;
717 	}
718 
719 	tx_mac->frame_length = total_length;
720 
721 	tx_mac->tid_lo = txr_next;
722 
723 	if (nsegs <= MAX_TX_MAC_DESC) {
724 		QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
725 			tx_mac->tid_lo));
726 
727 		for (i = 0; i < nsegs; i++) {
728 			tx_mac->txd[i].baddr = segs->ds_addr;
729 			tx_mac->txd[i].length = segs->ds_len;
730 			segs++;
731 		}
732 		tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
733 
734 	} else {
735 		QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
736 			tx_mac->tid_lo));
737 
738 		tx_mac->txd[0].baddr =
739 			ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
740 		tx_mac->txd[0].length =
741 			nsegs * (sizeof(q81_txb_desc_t));
742 		tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
743 
744 		tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
745 
746 		for (i = 0; i < nsegs; i++) {
747 			tx_desc->baddr = segs->ds_addr;
748 			tx_desc->length = segs->ds_len;
749 
750 			if (i == (nsegs -1))
751 				tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
752 			else
753 				tx_desc->flags = 0;
754 
755 			segs++;
756 			tx_desc++;
757 		}
758 	}
759 	txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
760 	ha->tx_ring[txr_idx].txr_next = txr_next;
761 
762 	ha->tx_ring[txr_idx].txr_free--;
763 
764 	Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
765 
766 	return (0);
767 }
768 
769 /*
770  * Name: qls_del_hw_if
771  * Function: Destroys the hardware specific entities corresponding to an
772  *	Ethernet Interface
773  */
774 void
775 qls_del_hw_if(qla_host_t *ha)
776 {
777 	uint32_t value;
778 	int i;
779 	//int  count;
780 
781 	if (ha->hw_init == 0) {
782 		qls_hw_reset(ha);
783 		return;
784 	}
785 
786 	for (i = 0;  i < ha->num_tx_rings; i++) {
787 		Q81_SET_WQ_INVALID(i);
788 	}
789 	for (i = 0;  i < ha->num_rx_rings; i++) {
790 		Q81_SET_CQ_INVALID(i);
791 	}
792 
793 	for (i = 0; i < ha->num_rx_rings; i++) {
794 		Q81_DISABLE_INTR(ha, i); /* MSI-x i */
795 	}
796 
797 	value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
798 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
799 
800 	value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
801 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
802 	ha->flags.intr_enable = 0;
803 
804 	qls_hw_reset(ha);
805 
806 	return;
807 }
808 
809 /*
810  * Name: qls_init_hw_if
811  * Function: Creates the hardware specific entities corresponding to an
812  *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
813  *	corresponding to the interface. Enables LRO if allowed.
814  */
815 int
816 qls_init_hw_if(qla_host_t *ha)
817 {
818 	device_t	dev;
819 	uint32_t	value;
820 	int		ret = 0;
821 	int		i;
822 
823 	QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
824 
825 	dev = ha->pci_dev;
826 
827 	ret = qls_hw_reset(ha);
828 	if (ret)
829 		goto qls_init_hw_if_exit;
830 
831 	ha->vm_pgsize = 4096;
832 
833 	/* Enable FAE and EFE bits in System Register */
834 	value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
835 	value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
836 
837 	WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
838 
839 	/* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
840 	value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
841 	WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
842 
843 	/* Function Specific Control Register - Set Page Size and Enable NIC */
844 	value = Q81_CTL_FUNC_SPECIFIC_FE |
845 		Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
846 		Q81_CTL_FUNC_SPECIFIC_EPC_O |
847 		Q81_CTL_FUNC_SPECIFIC_EPC_I |
848 		Q81_CTL_FUNC_SPECIFIC_EC;
849 	value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) |
850                         Q81_CTL_FUNC_SPECIFIC_FE |
851 			Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
852 			Q81_CTL_FUNC_SPECIFIC_EPC_O |
853 			Q81_CTL_FUNC_SPECIFIC_EPC_I |
854 			Q81_CTL_FUNC_SPECIFIC_EC;
855 
856 	WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
857 
858 	/* Interrupt Mask Register */
859 	value = Q81_CTL_INTRM_PI;
860 	value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
861 
862 	WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
863 
864 	/* Initialiatize Completion Queue */
865 	for (i = 0; i < ha->num_rx_rings; i++) {
866 		ret = qls_init_comp_queue(ha, i);
867 		if (ret)
868 			goto qls_init_hw_if_exit;
869 	}
870 
871 	if (ha->num_rx_rings > 1 ) {
872 		ret = qls_init_rss(ha);
873 		if (ret)
874 			goto qls_init_hw_if_exit;
875 	}
876 
877 	/* Initialize Work Queue */
878 
879 	for (i = 0; i < ha->num_tx_rings; i++) {
880 		ret = qls_init_work_queue(ha, i);
881 		if (ret)
882 			goto qls_init_hw_if_exit;
883 	}
884 
885 	if (ret)
886 		goto qls_init_hw_if_exit;
887 
888 	/* Set up CAM RAM with MAC Address */
889 	ret = qls_config_unicast_mac_addr(ha, 1);
890 	if (ret)
891 		goto qls_init_hw_if_exit;
892 
893 	ret = qls_hw_add_all_mcast(ha);
894 	if (ret)
895 		goto qls_init_hw_if_exit;
896 
897 	/* Initialize Firmware Routing Table */
898 	ret = qls_init_fw_routing_table(ha);
899 	if (ret)
900 		goto qls_init_hw_if_exit;
901 
902 	/* Get Chip Revision ID */
903 	ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
904 
905 	/* Enable Global Interrupt */
906 	value = Q81_CTL_INTRE_EI;
907 	value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
908 
909 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
910 
911 	/* Enable Interrupt Handshake Disable */
912 	value = Q81_CTL_INTRE_IHD;
913 	value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
914 
915 	WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
916 
917 	/* Enable Completion Interrupt */
918 
919 	ha->flags.intr_enable = 1;
920 
921 	for (i = 0; i < ha->num_rx_rings; i++) {
922 		Q81_ENABLE_INTR(ha, i); /* MSI-x i */
923 	}
924 
925 	ha->hw_init = 1;
926 
927 	qls_mbx_get_link_status(ha);
928 
929 	QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
930 		ha->rx_ring[0].cq_db_offset));
931 	QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
932 		ha->tx_ring[0].wq_db_offset));
933 
934 	for (i = 0; i < ha->num_rx_rings; i++) {
935 		Q81_WR_CQ_CONS_IDX(i, 0);
936 		Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
937 		Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
938 
939 		QL_DPRINT2((dev, "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
940 			"[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
941 			Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
942 			Q81_RD_SBQ_IDX(i)));
943 	}
944 
945 	for (i = 0; i < ha->num_rx_rings; i++) {
946 		Q81_SET_CQ_VALID(i);
947 	}
948 
949 qls_init_hw_if_exit:
950 	QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
951 	return (ret);
952 }
953 
954 static int
955 qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
956 {
957 	uint32_t data32;
958 	uint32_t count = 3;
959 
960 	while (count--) {
961 		data32 = READ_REG32(ha, Q81_CTL_CONFIG);
962 
963 		if ((data32 & bits) == value)
964 			return (0);
965 
966 		QLA_USEC_DELAY(100);
967 	}
968 	ha->qla_initiate_recovery = 1;
969 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
970 	return (-1);
971 }
972 
973 static uint8_t q81_hash_key[] = {
974 			0xda, 0x56, 0x5a, 0x6d,
975 			0xc2, 0x0e, 0x5b, 0x25,
976 			0x3d, 0x25, 0x67, 0x41,
977 			0xb0, 0x8f, 0xa3, 0x43,
978 			0xcb, 0x2b, 0xca, 0xd0,
979 			0xb4, 0x30, 0x7b, 0xae,
980 			0xa3, 0x2d, 0xcb, 0x77,
981 			0x0c, 0xf2, 0x30, 0x80,
982 			0x3b, 0xb7, 0x42, 0x6a,
983 			0xfa, 0x01, 0xac, 0xbe };
984 
985 static int
986 qls_init_rss(qla_host_t *ha)
987 {
988 	q81_rss_icb_t	*rss_icb;
989 	int		ret = 0;
990 	int		i;
991 	uint32_t	value;
992 
993 	rss_icb = ha->rss_dma.dma_b;
994 
995 	bzero(rss_icb, sizeof (q81_rss_icb_t));
996 
997 	rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
998 				Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
999 				Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
1000 				Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6;
1001 
1002 	rss_icb->mask = 0x3FF;
1003 
1004 	for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
1005 		rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
1006 	}
1007 
1008 	memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
1009 	memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
1010 
1011 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1012 
1013 	if (ret)
1014 		goto qls_init_rss_exit;
1015 
1016 	ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1017 
1018 	if (ret) {
1019 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1020 		goto qls_init_rss_exit;
1021 	}
1022 
1023 	value = (uint32_t)ha->rss_dma.dma_addr;
1024 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1025 
1026 	value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
1027 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1028 
1029 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1030 
1031 	value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
1032 			Q81_CTL_CONFIG_LR;
1033 
1034 	WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1035 
1036 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1037 
1038 qls_init_rss_exit:
1039 	return (ret);
1040 }
1041 
1042 static int
1043 qls_init_comp_queue(qla_host_t *ha, int cid)
1044 {
1045 	q81_cq_icb_t	*cq_icb;
1046 	qla_rx_ring_t	*rxr;
1047 	int		ret = 0;
1048 	uint32_t	value;
1049 
1050 	rxr = &ha->rx_ring[cid];
1051 
1052 	rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
1053 
1054 	cq_icb = rxr->cq_icb_vaddr;
1055 
1056 	bzero(cq_icb, sizeof (q81_cq_icb_t));
1057 
1058 	cq_icb->msix_vector = cid;
1059 	cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
1060 			Q81_CQ_ICB_FLAGS_LI |
1061 			Q81_CQ_ICB_FLAGS_LL |
1062 			Q81_CQ_ICB_FLAGS_LS |
1063 			Q81_CQ_ICB_FLAGS_LV;
1064 
1065 	cq_icb->length_v = NUM_CQ_ENTRIES;
1066 
1067 	cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
1068 	cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
1069 
1070 	cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
1071 	cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
1072 
1073 	cq_icb->pkt_idelay = 10;
1074 	cq_icb->idelay = 100;
1075 
1076 	cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
1077 	cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1078 
1079 	cq_icb->lbq_bsize = QLA_LGB_SIZE;
1080 	cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
1081 
1082 	cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
1083 	cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1084 
1085 	cq_icb->sbq_bsize = (uint16_t)ha->msize;
1086 	cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
1087 
1088 	QL_DUMP_CQ(ha);
1089 
1090 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1091 
1092 	if (ret)
1093 		goto qls_init_comp_queue_exit;
1094 
1095 	ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1096 
1097 	if (ret) {
1098 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1099 		goto qls_init_comp_queue_exit;
1100 	}
1101 
1102 	value = (uint32_t)rxr->cq_icb_paddr;
1103 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1104 
1105 	value = (uint32_t)(rxr->cq_icb_paddr >> 32);
1106 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1107 
1108 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1109 
1110 	value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1111 	value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
1112 	value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1113 	WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1114 
1115 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1116 
1117 	rxr->cq_next = 0;
1118 	rxr->lbq_next = rxr->lbq_free = 0;
1119 	rxr->sbq_next = rxr->sbq_free = 0;
1120 	rxr->rx_free = rxr->rx_next = 0;
1121 	rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
1122 	rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
1123 
1124 qls_init_comp_queue_exit:
1125 	return (ret);
1126 }
1127 
1128 static int
1129 qls_init_work_queue(qla_host_t *ha, int wid)
1130 {
1131 	q81_wq_icb_t	*wq_icb;
1132 	qla_tx_ring_t	*txr;
1133 	int		ret = 0;
1134 	uint32_t	value;
1135 
1136 	txr = &ha->tx_ring[wid];
1137 
1138 	txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
1139 						+ (ha->vm_pgsize * wid));
1140 
1141 	txr->wq_db_offset = (ha->vm_pgsize * wid);
1142 
1143 	wq_icb = txr->wq_icb_vaddr;
1144 	bzero(wq_icb, sizeof (q81_wq_icb_t));
1145 
1146 	wq_icb->length_v = NUM_TX_DESCRIPTORS  |
1147 				Q81_WQ_ICB_VALID;
1148 
1149 	wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
1150 			Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
1151 
1152 	wq_icb->wqcqid_rss = wid;
1153 
1154 	wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
1155 	wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
1156 
1157 	wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
1158 	wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
1159 
1160 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1161 
1162 	if (ret)
1163 		goto qls_init_wq_exit;
1164 
1165 	ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1166 
1167 	if (ret) {
1168 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1169 		goto qls_init_wq_exit;
1170 	}
1171 
1172 	value = (uint32_t)txr->wq_icb_paddr;
1173 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1174 
1175 	value = (uint32_t)(txr->wq_icb_paddr >> 32);
1176 	WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1177 
1178 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1179 
1180 	value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1181 	value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
1182 	value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1183 	WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1184 
1185 	ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1186 
1187 	txr->txr_free = NUM_TX_DESCRIPTORS;
1188 	txr->txr_next = 0;
1189 	txr->txr_done = 0;
1190 
1191 qls_init_wq_exit:
1192 	return (ret);
1193 }
1194 
1195 static int
1196 qls_hw_add_all_mcast(qla_host_t *ha)
1197 {
1198 	int i, nmcast;
1199 
1200 	nmcast = ha->nmcast;
1201 
1202 	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
1203 		if ((ha->mcast[i].addr[0] != 0) ||
1204 			(ha->mcast[i].addr[1] != 0) ||
1205 			(ha->mcast[i].addr[2] != 0) ||
1206 			(ha->mcast[i].addr[3] != 0) ||
1207 			(ha->mcast[i].addr[4] != 0) ||
1208 			(ha->mcast[i].addr[5] != 0)) {
1209 			if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
1210 				1, i)) {
1211                 		device_printf(ha->pci_dev, "%s: failed\n",
1212 					__func__);
1213 				return (-1);
1214 			}
1215 
1216 			nmcast--;
1217 		}
1218 	}
1219 	return 0;
1220 }
1221 
1222 static int
1223 qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
1224 {
1225 	int i;
1226 
1227 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1228 		if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1229 			return 0; /* its been already added */
1230 	}
1231 
1232 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1233 		if ((ha->mcast[i].addr[0] == 0) &&
1234 			(ha->mcast[i].addr[1] == 0) &&
1235 			(ha->mcast[i].addr[2] == 0) &&
1236 			(ha->mcast[i].addr[3] == 0) &&
1237 			(ha->mcast[i].addr[4] == 0) &&
1238 			(ha->mcast[i].addr[5] == 0)) {
1239 			if (qls_config_mcast_mac_addr(ha, mta, 1, i))
1240 				return (-1);
1241 
1242 			bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
1243 			ha->nmcast++;
1244 
1245 			return 0;
1246 		}
1247 	}
1248 	return 0;
1249 }
1250 
1251 static int
1252 qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
1253 {
1254 	int i;
1255 
1256 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1257 		if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
1258 			if (qls_config_mcast_mac_addr(ha, mta, 0, i))
1259 				return (-1);
1260 
1261 			ha->mcast[i].addr[0] = 0;
1262 			ha->mcast[i].addr[1] = 0;
1263 			ha->mcast[i].addr[2] = 0;
1264 			ha->mcast[i].addr[3] = 0;
1265 			ha->mcast[i].addr[4] = 0;
1266 			ha->mcast[i].addr[5] = 0;
1267 
1268 			ha->nmcast--;
1269 
1270 			return 0;
1271 		}
1272 	}
1273 	return 0;
1274 }
1275 
1276 /*
1277  * Name: qls_hw_set_multi
1278  * Function: Sets the Multicast Addresses provided the host O.S into the
1279  *	hardware (for the given interface)
1280  */
1281 void
1282 qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1283 	uint32_t add_mac)
1284 {
1285 	int i;
1286 
1287 	for (i = 0; i < mcnt; i++) {
1288 		if (add_mac) {
1289 			if (qls_hw_add_mcast(ha, mta))
1290 				break;
1291 		} else {
1292 			if (qls_hw_del_mcast(ha, mta))
1293 				break;
1294 		}
1295 
1296 		mta += Q8_MAC_ADDR_LEN;
1297 	}
1298 	return;
1299 }
1300 
1301 void
1302 qls_update_link_state(qla_host_t *ha)
1303 {
1304 	uint32_t link_state;
1305 	uint32_t prev_link_state;
1306 
1307 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1308 		ha->link_up = 0;
1309 		return;
1310 	}
1311 	link_state = READ_REG32(ha, Q81_CTL_STATUS);
1312 
1313 	prev_link_state =  ha->link_up;
1314 
1315 	if ((ha->pci_func & 0x1) == 0)
1316 		ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
1317 	else
1318 		ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
1319 
1320 	if (prev_link_state !=  ha->link_up) {
1321 		if (ha->link_up) {
1322 			if_link_state_change(ha->ifp, LINK_STATE_UP);
1323 		} else {
1324 			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1325 		}
1326 	}
1327 	return;
1328 }
1329 
1330 static void
1331 qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
1332 {
1333 	if (ha->tx_ring[r_idx].flags.wq_dma) {
1334 		qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
1335 		ha->tx_ring[r_idx].flags.wq_dma = 0;
1336 	}
1337 
1338 	if (ha->tx_ring[r_idx].flags.privb_dma) {
1339 		qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
1340 		ha->tx_ring[r_idx].flags.privb_dma = 0;
1341 	}
1342 	return;
1343 }
1344 
1345 static void
1346 qls_free_tx_dma(qla_host_t *ha)
1347 {
1348 	int i, j;
1349 	qla_tx_buf_t *txb;
1350 
1351 	for (i = 0; i < ha->num_tx_rings; i++) {
1352 		qls_free_tx_ring_dma(ha, i);
1353 
1354 		for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1355 			txb = &ha->tx_ring[i].tx_buf[j];
1356 
1357 			if (txb->map) {
1358 				bus_dmamap_destroy(ha->tx_tag, txb->map);
1359 			}
1360 		}
1361 	}
1362 
1363         if (ha->tx_tag != NULL) {
1364                 bus_dma_tag_destroy(ha->tx_tag);
1365                 ha->tx_tag = NULL;
1366         }
1367 
1368 	return;
1369 }
1370 
1371 static int
1372 qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
1373 {
1374 	int		ret = 0, i;
1375 	uint8_t		*v_addr;
1376 	bus_addr_t	p_addr;
1377 	qla_tx_buf_t	*txb;
1378 	device_t	dev = ha->pci_dev;
1379 
1380 	ha->tx_ring[ridx].wq_dma.alignment = 8;
1381 	ha->tx_ring[ridx].wq_dma.size =
1382 		NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
1383 
1384 	ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
1385 
1386 	if (ret) {
1387 		device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
1388 		goto qls_alloc_tx_ring_dma_exit;
1389 	}
1390 	ha->tx_ring[ridx].flags.wq_dma = 1;
1391 
1392 	ha->tx_ring[ridx].privb_dma.alignment = 8;
1393 	ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
1394 
1395 	ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
1396 
1397 	if (ret) {
1398 		device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
1399 		goto qls_alloc_tx_ring_dma_exit;
1400 	}
1401 
1402 	ha->tx_ring[ridx].flags.privb_dma = 1;
1403 
1404 	ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
1405 	ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
1406 
1407 	v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
1408 	p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
1409 
1410 	ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
1411 	ha->tx_ring[ridx].wq_icb_paddr = p_addr;
1412 
1413 	ha->tx_ring[ridx].txr_cons_vaddr =
1414 		(uint32_t *)(v_addr + (PAGE_SIZE >> 1));
1415 	ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
1416 
1417 	v_addr = v_addr + (PAGE_SIZE >> 1);
1418 	p_addr = p_addr + (PAGE_SIZE >> 1);
1419 
1420 	txb = ha->tx_ring[ridx].tx_buf;
1421 
1422 	for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1423 		txb[i].oal_vaddr = v_addr;
1424 		txb[i].oal_paddr = p_addr;
1425 
1426 		v_addr = v_addr + QLA_OAL_BLK_SIZE;
1427 		p_addr = p_addr + QLA_OAL_BLK_SIZE;
1428 	}
1429 
1430 qls_alloc_tx_ring_dma_exit:
1431 	return (ret);
1432 }
1433 
1434 static int
1435 qls_alloc_tx_dma(qla_host_t *ha)
1436 {
1437 	int	i, j;
1438 	int	ret = 0;
1439 	qla_tx_buf_t *txb;
1440 
1441         if (bus_dma_tag_create(NULL,    /* parent */
1442                 1, 0,    /* alignment, bounds */
1443                 BUS_SPACE_MAXADDR,       /* lowaddr */
1444                 BUS_SPACE_MAXADDR,       /* highaddr */
1445                 NULL, NULL,      /* filter, filterarg */
1446                 QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1447                 QLA_MAX_SEGMENTS,        /* nsegments */
1448                 PAGE_SIZE,        /* maxsegsize */
1449                 BUS_DMA_ALLOCNOW,        /* flags */
1450                 NULL,    /* lockfunc */
1451                 NULL,    /* lockfuncarg */
1452                 &ha->tx_tag)) {
1453                 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1454                         __func__);
1455                 return (ENOMEM);
1456         }
1457 
1458 	for (i = 0; i < ha->num_tx_rings; i++) {
1459 		ret = qls_alloc_tx_ring_dma(ha, i);
1460 
1461 		if (ret) {
1462 			qls_free_tx_dma(ha);
1463 			break;
1464 		}
1465 
1466 		for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1467 			txb = &ha->tx_ring[i].tx_buf[j];
1468 
1469 			ret = bus_dmamap_create(ha->tx_tag,
1470 				BUS_DMA_NOWAIT, &txb->map);
1471 			if (ret) {
1472 				ha->err_tx_dmamap_create++;
1473 				device_printf(ha->pci_dev,
1474 				"%s: bus_dmamap_create failed[%d, %d, %d]\n",
1475 				__func__, ret, i, j);
1476 
1477 				qls_free_tx_dma(ha);
1478 
1479                 		return (ret);
1480        			}
1481 		}
1482 	}
1483 
1484 	return (ret);
1485 }
1486 
1487 static void
1488 qls_free_rss_dma(qla_host_t *ha)
1489 {
1490 	qls_free_dmabuf(ha, &ha->rss_dma);
1491 	ha->flags.rss_dma = 0;
1492 }
1493 
1494 static int
1495 qls_alloc_rss_dma(qla_host_t *ha)
1496 {
1497 	int ret = 0;
1498 
1499 	ha->rss_dma.alignment = 4;
1500 	ha->rss_dma.size = PAGE_SIZE;
1501 
1502 	ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
1503 
1504 	if (ret)
1505 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
1506 	else
1507 		ha->flags.rss_dma = 1;
1508 
1509 	return (ret);
1510 }
1511 
1512 static void
1513 qls_free_mpi_dma(qla_host_t *ha)
1514 {
1515 	qls_free_dmabuf(ha, &ha->mpi_dma);
1516 	ha->flags.mpi_dma = 0;
1517 }
1518 
1519 static int
1520 qls_alloc_mpi_dma(qla_host_t *ha)
1521 {
1522 	int ret = 0;
1523 
1524 	ha->mpi_dma.alignment = 4;
1525 	ha->mpi_dma.size = (0x4000 * 4);
1526 
1527 	ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
1528 	if (ret)
1529 		device_printf(ha->pci_dev, "%s: failed\n", __func__);
1530 	else
1531 		ha->flags.mpi_dma = 1;
1532 
1533 	return (ret);
1534 }
1535 
1536 static void
1537 qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
1538 {
1539 	if (ha->rx_ring[ridx].flags.cq_dma) {
1540 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1541 		ha->rx_ring[ridx].flags.cq_dma = 0;
1542 	}
1543 
1544 	if (ha->rx_ring[ridx].flags.lbq_dma) {
1545 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1546 		ha->rx_ring[ridx].flags.lbq_dma = 0;
1547 	}
1548 
1549 	if (ha->rx_ring[ridx].flags.sbq_dma) {
1550 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1551 		ha->rx_ring[ridx].flags.sbq_dma = 0;
1552 	}
1553 
1554 	if (ha->rx_ring[ridx].flags.lb_dma) {
1555 		qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1556 		ha->rx_ring[ridx].flags.lb_dma = 0;
1557 	}
1558 	return;
1559 }
1560 
1561 static void
1562 qls_free_rx_dma(qla_host_t *ha)
1563 {
1564 	int i;
1565 
1566 	for (i = 0; i < ha->num_rx_rings; i++) {
1567 		qls_free_rx_ring_dma(ha, i);
1568 	}
1569 
1570         if (ha->rx_tag != NULL) {
1571                 bus_dma_tag_destroy(ha->rx_tag);
1572                 ha->rx_tag = NULL;
1573         }
1574 
1575 	return;
1576 }
1577 
1578 static int
1579 qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
1580 {
1581 	int				i, ret = 0;
1582 	uint8_t				*v_addr;
1583 	bus_addr_t			p_addr;
1584 	volatile q81_bq_addr_e_t	*bq_e;
1585 	device_t			dev = ha->pci_dev;
1586 
1587 	ha->rx_ring[ridx].cq_dma.alignment = 128;
1588 	ha->rx_ring[ridx].cq_dma.size =
1589 		(NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
1590 
1591 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1592 
1593 	if (ret) {
1594 		device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
1595 		goto qls_alloc_rx_ring_dma_exit;
1596 	}
1597 	ha->rx_ring[ridx].flags.cq_dma = 1;
1598 
1599 	ha->rx_ring[ridx].lbq_dma.alignment = 8;
1600 	ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
1601 
1602 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1603 
1604 	if (ret) {
1605 		device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
1606 		goto qls_alloc_rx_ring_dma_exit;
1607 	}
1608 	ha->rx_ring[ridx].flags.lbq_dma = 1;
1609 
1610 	ha->rx_ring[ridx].sbq_dma.alignment = 8;
1611 	ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
1612 
1613 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1614 
1615 	if (ret) {
1616 		device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
1617 		goto qls_alloc_rx_ring_dma_exit;
1618 	}
1619 	ha->rx_ring[ridx].flags.sbq_dma = 1;
1620 
1621 	ha->rx_ring[ridx].lb_dma.alignment = 8;
1622 	ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
1623 
1624 	ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1625 	if (ret) {
1626 		device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
1627 		goto qls_alloc_rx_ring_dma_exit;
1628 	}
1629 	ha->rx_ring[ridx].flags.lb_dma = 1;
1630 
1631 	bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
1632 	bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
1633 	bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
1634 	bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
1635 
1636 	/* completion queue */
1637 	ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
1638 	ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
1639 
1640 	v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
1641 	p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
1642 
1643 	v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1644 	p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1645 
1646 	/* completion queue icb */
1647 	ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
1648 	ha->rx_ring[ridx].cq_icb_paddr = p_addr;
1649 
1650 	v_addr = v_addr + (PAGE_SIZE >> 2);
1651 	p_addr = p_addr + (PAGE_SIZE >> 2);
1652 
1653 	/* completion queue index register */
1654 	ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
1655 	ha->rx_ring[ridx].cqi_paddr = p_addr;
1656 
1657 	v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
1658 	p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
1659 
1660 	/* large buffer queue address table */
1661 	ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
1662 	ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
1663 
1664 	/* large buffer queue */
1665 	ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
1666 	ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
1667 
1668 	v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
1669 	p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
1670 
1671 	/* small buffer queue address table */
1672 	ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
1673 	ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
1674 
1675 	/* small buffer queue */
1676 	ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
1677 	ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
1678 
1679 	ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
1680 	ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
1681 
1682 	/* Initialize Large Buffer Queue Table */
1683 
1684 	p_addr = ha->rx_ring[ridx].lbq_paddr;
1685 	bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
1686 
1687 	bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1688 	bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1689 
1690 	p_addr = ha->rx_ring[ridx].lb_paddr;
1691 	bq_e = ha->rx_ring[ridx].lbq_vaddr;
1692 
1693 	for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
1694 		bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1695 		bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1696 
1697 		p_addr = p_addr + QLA_LGB_SIZE;
1698 		bq_e++;
1699 	}
1700 
1701 	/* Initialize Small Buffer Queue Table */
1702 
1703 	p_addr = ha->rx_ring[ridx].sbq_paddr;
1704 	bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
1705 
1706 	for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
1707 		bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1708 		bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1709 
1710 		p_addr = p_addr + QLA_PAGE_SIZE;
1711 		bq_e++;
1712 	}
1713 
1714 qls_alloc_rx_ring_dma_exit:
1715 	return (ret);
1716 }
1717 
1718 static int
1719 qls_alloc_rx_dma(qla_host_t *ha)
1720 {
1721 	int	i;
1722 	int	ret = 0;
1723 
1724         if (bus_dma_tag_create(NULL,    /* parent */
1725                         1, 0,    /* alignment, bounds */
1726                         BUS_SPACE_MAXADDR,       /* lowaddr */
1727                         BUS_SPACE_MAXADDR,       /* highaddr */
1728                         NULL, NULL,      /* filter, filterarg */
1729                         MJUM9BYTES,     /* maxsize */
1730                         1,        /* nsegments */
1731                         MJUM9BYTES,        /* maxsegsize */
1732                         BUS_DMA_ALLOCNOW,        /* flags */
1733                         NULL,    /* lockfunc */
1734                         NULL,    /* lockfuncarg */
1735                         &ha->rx_tag)) {
1736                 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1737                         __func__);
1738 
1739                 return (ENOMEM);
1740         }
1741 
1742 	for (i = 0; i < ha->num_rx_rings; i++) {
1743 		ret = qls_alloc_rx_ring_dma(ha, i);
1744 
1745 		if (ret) {
1746 			qls_free_rx_dma(ha);
1747 			break;
1748 		}
1749 	}
1750 
1751 	return (ret);
1752 }
1753 
1754 static int
1755 qls_wait_for_flash_ready(qla_host_t *ha)
1756 {
1757 	uint32_t data32;
1758 	uint32_t count = 3;
1759 
1760 	while (count--) {
1761 		data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
1762 
1763 		if (data32 & Q81_CTL_FLASH_ADDR_ERR)
1764 			goto qls_wait_for_flash_ready_exit;
1765 
1766 		if (data32 & Q81_CTL_FLASH_ADDR_RDY)
1767 			return (0);
1768 
1769 		QLA_USEC_DELAY(100);
1770 	}
1771 
1772 qls_wait_for_flash_ready_exit:
1773 	QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1774 
1775 	return (-1);
1776 }
1777 
1778 /*
1779  * Name: qls_rd_flash32
1780  * Function: Read Flash Memory
1781  */
1782 int
1783 qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
1784 {
1785 	int ret;
1786 
1787 	ret = qls_wait_for_flash_ready(ha);
1788 
1789 	if (ret)
1790 		return (ret);
1791 
1792 	WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
1793 
1794 	ret = qls_wait_for_flash_ready(ha);
1795 
1796 	if (ret)
1797 		return (ret);
1798 
1799 	*data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
1800 
1801 	return 0;
1802 }
1803 
1804 static int
1805 qls_flash_validate(qla_host_t *ha, const char *signature)
1806 {
1807 	uint16_t csum16 = 0;
1808 	uint16_t *data16;
1809 	int i;
1810 
1811 	if (bcmp(ha->flash.id, signature, 4)) {
1812 		QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
1813 			"%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
1814 			ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
1815 			signature));
1816 		return(-1);
1817 	}
1818 
1819 	data16 = (uint16_t *)&ha->flash;
1820 
1821 	for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
1822 		csum16 += *data16++;
1823 	}
1824 
1825 	if (csum16) {
1826 		QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
1827 		return(-1);
1828 	}
1829 	return(0);
1830 }
1831 
1832 int
1833 qls_rd_nic_params(qla_host_t *ha)
1834 {
1835 	int		i, ret = 0;
1836 	uint32_t	faddr;
1837 	uint32_t	*qflash;
1838 
1839 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
1840 		QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1841 		return(-1);
1842 	}
1843 
1844 	if ((ha->pci_func & 0x1) == 0)
1845 		faddr = Q81_F0_FLASH_OFFSET >> 2;
1846 	else
1847 		faddr = Q81_F1_FLASH_OFFSET >> 2;
1848 
1849 	qflash = (uint32_t *)&ha->flash;
1850 
1851 	for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
1852 		ret = qls_rd_flash32(ha, faddr, qflash);
1853 
1854 		if (ret)
1855 			goto qls_rd_flash_data_exit;
1856 
1857 		faddr++;
1858 		qflash++;
1859 	}
1860 
1861 	QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
1862 
1863 	ret = qls_flash_validate(ha, Q81_FLASH_ID);
1864 
1865 	if (ret)
1866 		goto qls_rd_flash_data_exit;
1867 
1868 	bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
1869 
1870 	QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
1871 		__func__, ha->mac_addr[0],  ha->mac_addr[1], ha->mac_addr[2],
1872 		ha->mac_addr[3], ha->mac_addr[4],  ha->mac_addr[5]));
1873 
1874 qls_rd_flash_data_exit:
1875 
1876 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
1877 
1878 	return(ret);
1879 }
1880 
1881 static int
1882 qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
1883 {
1884 	uint32_t count = 30;
1885 	uint32_t data;
1886 
1887 	while (count--) {
1888 		WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
1889 
1890 		data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
1891 
1892 		if (data & value) {
1893 			return (0);
1894 		} else {
1895 			QLA_USEC_DELAY(100);
1896 		}
1897 	}
1898 	ha->qla_initiate_recovery = 1;
1899 	return (-1);
1900 }
1901 
1902 static void
1903 qls_sem_unlock(qla_host_t *ha, uint32_t mask)
1904 {
1905 	WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
1906 }
1907 
1908 static int
1909 qls_wait_for_proc_addr_ready(qla_host_t *ha)
1910 {
1911 	uint32_t data32;
1912 	uint32_t count = 3;
1913 
1914 	while (count--) {
1915 		data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
1916 
1917 		if (data32 & Q81_CTL_PROC_ADDR_ERR)
1918 			goto qls_wait_for_proc_addr_ready_exit;
1919 
1920 		if (data32 & Q81_CTL_PROC_ADDR_RDY)
1921 			return (0);
1922 
1923 		QLA_USEC_DELAY(100);
1924 	}
1925 
1926 qls_wait_for_proc_addr_ready_exit:
1927 	QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1928 
1929 	ha->qla_initiate_recovery = 1;
1930 	return (-1);
1931 }
1932 
1933 static int
1934 qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1935 	uint32_t *data)
1936 {
1937 	int ret;
1938 	uint32_t value;
1939 
1940 	ret = qls_wait_for_proc_addr_ready(ha);
1941 
1942 	if (ret)
1943 		goto qls_proc_addr_rd_reg_exit;
1944 
1945 	value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
1946 
1947 	WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1948 
1949 	ret = qls_wait_for_proc_addr_ready(ha);
1950 
1951 	if (ret)
1952 		goto qls_proc_addr_rd_reg_exit;
1953 
1954 	*data = READ_REG32(ha, Q81_CTL_PROC_DATA);
1955 
1956 qls_proc_addr_rd_reg_exit:
1957 	return (ret);
1958 }
1959 
1960 static int
1961 qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1962 	uint32_t data)
1963 {
1964 	int ret;
1965 	uint32_t value;
1966 
1967 	ret = qls_wait_for_proc_addr_ready(ha);
1968 
1969 	if (ret)
1970 		goto qls_proc_addr_wr_reg_exit;
1971 
1972 	WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
1973 
1974 	value = addr_module | reg;
1975 
1976 	WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1977 
1978 	ret = qls_wait_for_proc_addr_ready(ha);
1979 
1980 qls_proc_addr_wr_reg_exit:
1981 	return (ret);
1982 }
1983 
1984 static int
1985 qls_hw_nic_reset(qla_host_t *ha)
1986 {
1987 	int		count;
1988 	uint32_t	data;
1989 	device_t	dev = ha->pci_dev;
1990 
1991 	ha->hw_init = 0;
1992 
1993 	data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
1994 			Q81_CTL_RESET_FUNC;
1995 	WRITE_REG32(ha, Q81_CTL_RESET, data);
1996 
1997 	count = 10;
1998 	while (count--) {
1999 		data = READ_REG32(ha, Q81_CTL_RESET);
2000 		if ((data & Q81_CTL_RESET_FUNC) == 0)
2001 			break;
2002 		QLA_USEC_DELAY(10);
2003 	}
2004 	if (count == 0) {
2005 		device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
2006 			__func__);
2007 		return (-1);
2008 	}
2009 	return (0);
2010 }
2011 
2012 static int
2013 qls_hw_reset(qla_host_t *ha)
2014 {
2015 	device_t	dev = ha->pci_dev;
2016 	int		ret;
2017 	int		count;
2018 	uint32_t	data;
2019 
2020 	QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
2021 
2022 	if (ha->hw_init == 0) {
2023 		ret = qls_hw_nic_reset(ha);
2024 		goto qls_hw_reset_exit;
2025 	}
2026 
2027 	ret = qls_clear_routing_table(ha);
2028 	if (ret)
2029 		goto qls_hw_reset_exit;
2030 
2031 	ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
2032 	if (ret)
2033 		goto qls_hw_reset_exit;
2034 
2035 	/*
2036 	 * Wait for FIFO to empty
2037 	 */
2038 	count = 5;
2039 	while (count--) {
2040 		data = READ_REG32(ha, Q81_CTL_STATUS);
2041 		if (data & Q81_CTL_STATUS_NFE)
2042 			break;
2043 		qls_mdelay(__func__, 100);
2044 	}
2045 	if (count == 0) {
2046 		device_printf(dev, "%s: NFE bit not set\n", __func__);
2047 		goto qls_hw_reset_exit;
2048 	}
2049 
2050 	count = 5;
2051 	while (count--) {
2052 		(void)qls_mbx_get_mgmt_ctrl(ha, &data);
2053 
2054 		if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
2055 			(data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
2056 			break;
2057 		qls_mdelay(__func__, 100);
2058 	}
2059 	if (count == 0)
2060 		goto qls_hw_reset_exit;
2061 
2062 	/*
2063 	 * Reset the NIC function
2064 	 */
2065 	ret = qls_hw_nic_reset(ha);
2066 	if (ret)
2067 		goto qls_hw_reset_exit;
2068 
2069 	ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
2070 
2071 qls_hw_reset_exit:
2072 	if (ret)
2073 		device_printf(dev, "%s: failed\n", __func__);
2074 
2075 	return (ret);
2076 }
2077 
2078 /*
2079  * MPI Related Functions
2080  */
2081 int
2082 qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2083 {
2084 	int ret;
2085 
2086 	ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2087 			reg, data);
2088 	return (ret);
2089 }
2090 
2091 int
2092 qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2093 {
2094 	int ret;
2095 
2096 	ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2097 			reg, data);
2098 	return (ret);
2099 }
2100 
2101 int
2102 qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2103 {
2104 	int ret;
2105 
2106 	if ((ha->pci_func & 0x1) == 0)
2107 		reg += Q81_FUNC0_MBX_OUT_REG0;
2108 	else
2109 		reg += Q81_FUNC1_MBX_OUT_REG0;
2110 
2111 	ret = qls_mpi_risc_rd_reg(ha, reg, data);
2112 
2113 	return (ret);
2114 }
2115 
2116 int
2117 qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2118 {
2119 	int ret;
2120 
2121 	if ((ha->pci_func & 0x1) == 0)
2122 		reg += Q81_FUNC0_MBX_IN_REG0;
2123 	else
2124 		reg += Q81_FUNC1_MBX_IN_REG0;
2125 
2126 	ret = qls_mpi_risc_wr_reg(ha, reg, data);
2127 
2128 	return (ret);
2129 }
2130 
2131 static int
2132 qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
2133 	uint32_t *out_mbx, uint32_t o_count)
2134 {
2135 	int i, ret = -1;
2136 	uint32_t data32, mbx_cmd = 0;
2137 	uint32_t count = 50;
2138 
2139 	QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
2140 		__func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
2141 
2142 	data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2143 
2144 	if (data32 & Q81_CTL_HCS_HTR_INTR) {
2145 		device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
2146 			__func__, data32);
2147 		goto qls_mbx_cmd_exit;
2148 	}
2149 
2150 	if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
2151 		Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
2152 		device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
2153 		goto qls_mbx_cmd_exit;
2154 	}
2155 
2156 	ha->mbx_done = 0;
2157 
2158 	mbx_cmd = *in_mbx;
2159 
2160 	for (i = 0; i < i_count; i++) {
2161 		ret = qls_mbx_wr_reg(ha, i, *in_mbx);
2162 
2163 		if (ret) {
2164 			device_printf(ha->pci_dev,
2165 				"%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
2166 				i, *in_mbx);
2167 			qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2168 			goto qls_mbx_cmd_exit;
2169 		}
2170 
2171 		in_mbx++;
2172 	}
2173 	WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
2174 
2175 	qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2176 
2177 	ret = -1;
2178 	ha->mbx_done = 0;
2179 
2180 	while (count--) {
2181 		if (ha->flags.intr_enable == 0) {
2182 			data32 = READ_REG32(ha, Q81_CTL_STATUS);
2183 
2184 			if (!(data32 & Q81_CTL_STATUS_PI)) {
2185 				qls_mdelay(__func__, 100);
2186 				continue;
2187 			}
2188 
2189 			ret = qls_mbx_rd_reg(ha, 0, &data32);
2190 
2191 			if (ret == 0 ) {
2192 				if ((data32 & 0xF000) == 0x4000) {
2193 					out_mbx[0] = data32;
2194 
2195 					for (i = 1; i < o_count; i++) {
2196 						ret = qls_mbx_rd_reg(ha, i,
2197 								&data32);
2198 						if (ret) {
2199 							device_printf(
2200 								ha->pci_dev,
2201 								"%s: mbx_rd[%d]"
2202 								" failed\n",
2203 								__func__, i);
2204 							break;
2205 						}
2206 						out_mbx[i] = data32;
2207 					}
2208 					break;
2209 				} else if ((data32 & 0xF000) == 0x8000) {
2210 					count = 50;
2211 					WRITE_REG32(ha,\
2212 						Q81_CTL_HOST_CMD_STATUS,\
2213 						Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2214 				}
2215 			}
2216 		} else {
2217 			if (ha->mbx_done) {
2218 				for (i = 1; i < o_count; i++) {
2219 					out_mbx[i] = ha->mbox[i];
2220 				}
2221 				ret = 0;
2222 				break;
2223 			}
2224 		}
2225 		qls_mdelay(__func__, 1000);
2226 	}
2227 
2228 qls_mbx_cmd_exit:
2229 
2230 	if (ha->flags.intr_enable == 0) {
2231 		WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2232 			Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2233 	}
2234 
2235 	if (ret) {
2236 		ha->qla_initiate_recovery = 1;
2237 	}
2238 
2239 	QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
2240 	return (ret);
2241 }
2242 
2243 static int
2244 qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
2245 {
2246 	uint32_t *mbox;
2247 	device_t dev = ha->pci_dev;
2248 
2249 	mbox = ha->mbox;
2250 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2251 
2252 	mbox[0] = Q81_MBX_SET_MGMT_CTL;
2253 	mbox[1] = t_ctrl;
2254 
2255 	if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
2256 		device_printf(dev, "%s failed\n", __func__);
2257 		return (-1);
2258 	}
2259 
2260 	if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
2261 		((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
2262 			(mbox[0] == Q81_MBX_CMD_ERROR))){
2263 		return (0);
2264 	}
2265 	device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
2266 	return (-1);
2267 
2268 }
2269 
2270 static int
2271 qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
2272 {
2273 	uint32_t *mbox;
2274 	device_t dev = ha->pci_dev;
2275 
2276 	*t_status = 0;
2277 
2278 	mbox = ha->mbox;
2279 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2280 
2281 	mbox[0] = Q81_MBX_GET_MGMT_CTL;
2282 
2283 	if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
2284 		device_printf(dev, "%s failed\n", __func__);
2285 		return (-1);
2286 	}
2287 
2288 	*t_status = mbox[1];
2289 
2290 	return (0);
2291 }
2292 
2293 static void
2294 qls_mbx_get_link_status(qla_host_t *ha)
2295 {
2296 	uint32_t *mbox;
2297 	device_t dev = ha->pci_dev;
2298 
2299 	mbox = ha->mbox;
2300 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2301 
2302 	mbox[0] = Q81_MBX_GET_LNK_STATUS;
2303 
2304 	if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2305 		device_printf(dev, "%s failed\n", __func__);
2306 		return;
2307 	}
2308 
2309 	ha->link_status			= mbox[1];
2310 	ha->link_down_info		= mbox[2];
2311 	ha->link_hw_info		= mbox[3];
2312 	ha->link_dcbx_counters		= mbox[4];
2313 	ha->link_change_counters	= mbox[5];
2314 
2315 	device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2316 		__func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2317 
2318 	return;
2319 }
2320 
2321 static void
2322 qls_mbx_about_fw(qla_host_t *ha)
2323 {
2324 	uint32_t *mbox;
2325 	device_t dev = ha->pci_dev;
2326 
2327 	mbox = ha->mbox;
2328 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2329 
2330 	mbox[0] = Q81_MBX_ABOUT_FW;
2331 
2332 	if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2333 		device_printf(dev, "%s failed\n", __func__);
2334 		return;
2335 	}
2336 
2337 	device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2338 		__func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2339 }
2340 
2341 int
2342 qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
2343 	uint32_t r_size)
2344 {
2345 	bus_addr_t b_paddr;
2346 	uint32_t *mbox;
2347 	device_t dev = ha->pci_dev;
2348 
2349 	mbox = ha->mbox;
2350 	bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2351 
2352 	bzero(ha->mpi_dma.dma_b,(r_size << 2));
2353 	b_paddr = ha->mpi_dma.dma_addr;
2354 
2355 	mbox[0] = Q81_MBX_DUMP_RISC_RAM;
2356 	mbox[1] = r_addr & 0xFFFF;
2357 	mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
2358 	mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
2359 	mbox[4] = (r_size >> 16) & 0xFFFF;
2360 	mbox[5] = r_size & 0xFFFF;
2361 	mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
2362 	mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
2363 	mbox[8] = (r_addr >> 16) & 0xFFFF;
2364 
2365 	bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2366 		BUS_DMASYNC_PREREAD);
2367 
2368 	if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
2369 		device_printf(dev, "%s failed\n", __func__);
2370 		return (-1);
2371 	}
2372         if (mbox[0] != 0x4000) {
2373                 device_printf(ha->pci_dev, "%s: failed!\n", __func__);
2374 		return (-1);
2375         } else {
2376                 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2377                         BUS_DMASYNC_POSTREAD);
2378                 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
2379         }
2380 
2381 	return (0);
2382 }
2383 
2384 int
2385 qls_mpi_reset(qla_host_t *ha)
2386 {
2387 	int		count;
2388 	uint32_t	data;
2389 	device_t	dev = ha->pci_dev;
2390 
2391 	WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2392 		Q81_CTL_HCS_CMD_SET_RISC_RESET);
2393 
2394 	count = 10;
2395 	while (count--) {
2396 		data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2397 		if (data & Q81_CTL_HCS_RISC_RESET) {
2398 			WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2399 				Q81_CTL_HCS_CMD_CLR_RISC_RESET);
2400 			break;
2401 		}
2402 		qls_mdelay(__func__, 10);
2403 	}
2404 	if (count == 0) {
2405 		device_printf(dev, "%s: failed\n", __func__);
2406 		return (-1);
2407 	}
2408 	return (0);
2409 }
2410