xref: /freebsd/sys/dev/qlxgbe/ql_os.c (revision ce6a89e27cd190313be39bb479880aeda4778436)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: ql_os.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 
39 #include "ql_os.h"
40 #include "ql_hw.h"
41 #include "ql_def.h"
42 #include "ql_inline.h"
43 #include "ql_ver.h"
44 #include "ql_glbl.h"
45 #include "ql_dbg.h"
46 #include <sys/smp.h>
47 
48 /*
49  * Some PCI Configuration Space Related Defines
50  */
51 
52 #ifndef PCI_VENDOR_QLOGIC
53 #define PCI_VENDOR_QLOGIC	0x1077
54 #endif
55 
56 #ifndef PCI_PRODUCT_QLOGIC_ISP8030
57 #define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
58 #endif
59 
60 #define PCI_QLOGIC_ISP8030 \
61 	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
62 
63 /*
64  * static functions
65  */
66 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
67 static void qla_free_parent_dma_tag(qla_host_t *ha);
68 static int qla_alloc_xmt_bufs(qla_host_t *ha);
69 static void qla_free_xmt_bufs(qla_host_t *ha);
70 static int qla_alloc_rcv_bufs(qla_host_t *ha);
71 static void qla_free_rcv_bufs(qla_host_t *ha);
72 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
73 
74 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
75 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
76 static void qla_release(qla_host_t *ha);
77 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
78 		int error);
79 static void qla_stop(qla_host_t *ha);
80 static void qla_get_peer(qla_host_t *ha);
81 static void qla_error_recovery(void *context, int pending);
82 static void qla_async_event(void *context, int pending);
83 static void qla_stats(void *context, int pending);
84 static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
85 		uint32_t iscsi_pdu);
86 
87 /*
88  * Hooks to the Operating Systems
89  */
90 static int qla_pci_probe (device_t);
91 static int qla_pci_attach (device_t);
92 static int qla_pci_detach (device_t);
93 
94 static void qla_init(void *arg);
95 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
96 static int qla_media_change(struct ifnet *ifp);
97 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
98 
99 static int qla_transmit(struct ifnet *ifp, struct mbuf  *mp);
100 static void qla_qflush(struct ifnet *ifp);
101 static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
102 static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
103 static int qla_create_fp_taskqueues(qla_host_t *ha);
104 static void qla_destroy_fp_taskqueues(qla_host_t *ha);
105 static void qla_drain_fp_taskqueues(qla_host_t *ha);
106 
107 static device_method_t qla_pci_methods[] = {
108 	/* Device interface */
109 	DEVMETHOD(device_probe, qla_pci_probe),
110 	DEVMETHOD(device_attach, qla_pci_attach),
111 	DEVMETHOD(device_detach, qla_pci_detach),
112 	{ 0, 0 }
113 };
114 
115 static driver_t qla_pci_driver = {
116 	"ql", qla_pci_methods, sizeof (qla_host_t),
117 };
118 
119 static devclass_t qla83xx_devclass;
120 
121 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
122 
123 MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
124 MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
125 
126 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
127 
128 #define QL_STD_REPLENISH_THRES		0
129 #define QL_JUMBO_REPLENISH_THRES	32
130 
131 
132 static char dev_str[64];
133 static char ver_str[64];
134 
135 /*
136  * Name:	qla_pci_probe
137  * Function:	Validate the PCI device to be a QLA80XX device
138  */
139 static int
140 qla_pci_probe(device_t dev)
141 {
142         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
143         case PCI_QLOGIC_ISP8030:
144 		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
145 			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
146 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
147 			QLA_VERSION_BUILD);
148 		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
149 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
150 			QLA_VERSION_BUILD);
151                 device_set_desc(dev, dev_str);
152                 break;
153         default:
154                 return (ENXIO);
155         }
156 
157         if (bootverbose)
158                 printf("%s: %s\n ", __func__, dev_str);
159 
160         return (BUS_PROBE_DEFAULT);
161 }
162 
163 static void
164 qla_add_sysctls(qla_host_t *ha)
165 {
166         device_t dev = ha->pci_dev;
167 
168 	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
169 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
170 		OID_AUTO, "version", CTLFLAG_RD,
171 		ver_str, 0, "Driver Version");
172 
173         SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
174                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
175                 OID_AUTO, "fw_version", CTLFLAG_RD,
176                 ha->fw_ver_str, 0, "firmware version");
177 
178         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
179             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
180 	    "link_status", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
181 	    (void *)ha, 0, qla_sysctl_get_link_status, "I", "Link Status");
182 
183 	ha->dbg_level = 0;
184         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
185                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
186                 OID_AUTO, "debug", CTLFLAG_RW,
187                 &ha->dbg_level, ha->dbg_level, "Debug Level");
188 
189 	ha->enable_minidump = 1;
190 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
191 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
192 		OID_AUTO, "enable_minidump", CTLFLAG_RW,
193 		&ha->enable_minidump, ha->enable_minidump,
194 		"Minidump retrival prior to error recovery "
195 		"is enabled only when this is set");
196 
197 	ha->enable_driverstate_dump = 1;
198 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
199 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
200 		OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW,
201 		&ha->enable_driverstate_dump, ha->enable_driverstate_dump,
202 		"Driver State retrival prior to error recovery "
203 		"is enabled only when this is set");
204 
205 	ha->enable_error_recovery = 1;
206 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
207 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
208 		OID_AUTO, "enable_error_recovery", CTLFLAG_RW,
209 		&ha->enable_error_recovery, ha->enable_error_recovery,
210 		"when set error recovery is enabled on fatal errors "
211 		"otherwise the port is turned offline");
212 
213 	ha->ms_delay_after_init = 1000;
214 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
215 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
216 		OID_AUTO, "ms_delay_after_init", CTLFLAG_RW,
217 		&ha->ms_delay_after_init, ha->ms_delay_after_init,
218 		"millisecond delay after hw_init");
219 
220 	ha->std_replenish = QL_STD_REPLENISH_THRES;
221         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
222                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
223                 OID_AUTO, "std_replenish", CTLFLAG_RW,
224                 &ha->std_replenish, ha->std_replenish,
225                 "Threshold for Replenishing Standard Frames");
226 
227         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
228                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
229                 OID_AUTO, "ipv4_lro",
230                 CTLFLAG_RD, &ha->ipv4_lro,
231                 "number of ipv4 lro completions");
232 
233         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
234                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
235                 OID_AUTO, "ipv6_lro",
236                 CTLFLAG_RD, &ha->ipv6_lro,
237                 "number of ipv6 lro completions");
238 
239 	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
240 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
241 		OID_AUTO, "tx_tso_frames",
242 		CTLFLAG_RD, &ha->tx_tso_frames,
243 		"number of Tx TSO Frames");
244 
245 	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
246                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
247 		OID_AUTO, "hw_vlan_tx_frames",
248 		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
249 		"number of Tx VLAN Frames");
250 
251 	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
252                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
253 		OID_AUTO, "hw_lock_failed",
254 		CTLFLAG_RD, &ha->hw_lock_failed,
255 		"number of hw_lock failures");
256 
257         return;
258 }
259 
260 static void
261 qla_watchdog(void *arg)
262 {
263 	qla_host_t *ha = arg;
264 	qla_hw_t *hw;
265 	struct ifnet *ifp;
266 
267 	hw = &ha->hw;
268 	ifp = ha->ifp;
269 
270         if (ha->qla_watchdog_exit) {
271 		ha->qla_watchdog_exited = 1;
272 		return;
273 	}
274 	ha->qla_watchdog_exited = 0;
275 
276 	if (!ha->qla_watchdog_pause) {
277                 if (!ha->offline &&
278                         (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
279                         (ha->msg_from_peer == QL_PEER_MSG_RESET))) {
280 
281 	        	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
282 			ql_update_link_state(ha);
283 
284 			if (ha->enable_error_recovery) {
285 				ha->qla_watchdog_paused = 1;
286 				ha->qla_watchdog_pause = 1;
287 				ha->err_inject = 0;
288 				device_printf(ha->pci_dev,
289 					"%s: taskqueue_enqueue(err_task) \n",
290 					__func__);
291 				taskqueue_enqueue(ha->err_tq, &ha->err_task);
292 			} else {
293 				if (ifp != NULL)
294 					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
295 				ha->offline = 1;
296 			}
297 			return;
298 
299 		} else {
300 			if (ha->qla_interface_up) {
301 
302 				ha->watchdog_ticks++;
303 
304 				if (ha->watchdog_ticks > 1000)
305 					ha->watchdog_ticks = 0;
306 
307 				if (!ha->watchdog_ticks && QL_RUNNING(ifp)) {
308 					taskqueue_enqueue(ha->stats_tq,
309 						&ha->stats_task);
310 				}
311 
312 				if (ha->async_event) {
313 					taskqueue_enqueue(ha->async_event_tq,
314 						&ha->async_event_task);
315 				}
316 
317 			}
318 			ha->qla_watchdog_paused = 0;
319 		}
320 	} else {
321 		ha->qla_watchdog_paused = 1;
322 	}
323 
324 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
325 		qla_watchdog, ha);
326 }
327 
328 /*
329  * Name:	qla_pci_attach
330  * Function:	attaches the device to the operating system
331  */
332 static int
333 qla_pci_attach(device_t dev)
334 {
335 	qla_host_t *ha = NULL;
336 	uint32_t rsrc_len;
337 	int i;
338 	uint32_t num_rcvq = 0;
339 
340         if ((ha = device_get_softc(dev)) == NULL) {
341                 device_printf(dev, "cannot get softc\n");
342                 return (ENOMEM);
343         }
344 
345         memset(ha, 0, sizeof (qla_host_t));
346 
347         if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
348                 device_printf(dev, "device is not ISP8030\n");
349                 return (ENXIO);
350 	}
351 
352         ha->pci_func = pci_get_function(dev) & 0x1;
353 
354         ha->pci_dev = dev;
355 
356 	pci_enable_busmaster(dev);
357 
358 	ha->reg_rid = PCIR_BAR(0);
359 	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
360 				RF_ACTIVE);
361 
362         if (ha->pci_reg == NULL) {
363                 device_printf(dev, "unable to map any ports\n");
364                 goto qla_pci_attach_err;
365         }
366 
367 	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
368 					ha->reg_rid);
369 
370 	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
371 	mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF);
372 	ha->flags.lock_init = 1;
373 
374 	qla_add_sysctls(ha);
375 
376 	ha->hw.num_sds_rings = MAX_SDS_RINGS;
377 	ha->hw.num_rds_rings = MAX_RDS_RINGS;
378 	ha->hw.num_tx_rings = NUM_TX_RINGS;
379 
380 	ha->reg_rid1 = PCIR_BAR(2);
381 	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
382 			&ha->reg_rid1, RF_ACTIVE);
383 
384 	ha->msix_count = pci_msix_count(dev);
385 
386 	if (ha->msix_count < 1 ) {
387 		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
388 			ha->msix_count);
389 		goto qla_pci_attach_err;
390 	}
391 
392 	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
393 		ha->hw.num_sds_rings = ha->msix_count - 1;
394 	}
395 
396 	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
397 		" msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
398 		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
399 		ha->pci_reg1));
400 
401         /* initialize hardware */
402         if (ql_init_hw(ha)) {
403                 device_printf(dev, "%s: ql_init_hw failed\n", __func__);
404                 goto qla_pci_attach_err;
405         }
406 
407         device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
408                 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
409                 ha->fw_ver_build);
410         snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
411                         ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
412                         ha->fw_ver_build);
413 
414         if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
415                 device_printf(dev, "%s: qla_get_nic_partition failed\n",
416                         __func__);
417                 goto qla_pci_attach_err;
418         }
419         device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
420                 " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
421 		__func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
422 		ha->pci_reg, ha->pci_reg1, num_rcvq);
423 
424         if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
425 		if (ha->hw.num_sds_rings > 15) {
426                 	ha->hw.num_sds_rings = 15;
427 		}
428         }
429 
430 	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
431 	ha->hw.num_tx_rings = ha->hw.num_sds_rings;
432 
433 #ifdef QL_ENABLE_ISCSI_TLV
434 	ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
435 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
436 
437 	ql_hw_add_sysctls(ha);
438 
439 	ha->msix_count = ha->hw.num_sds_rings + 1;
440 
441 	if (pci_alloc_msix(dev, &ha->msix_count)) {
442 		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
443 			ha->msix_count);
444 		ha->msix_count = 0;
445 		goto qla_pci_attach_err;
446 	}
447 
448 	ha->mbx_irq_rid = 1;
449 	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
450 				&ha->mbx_irq_rid,
451 				(RF_ACTIVE | RF_SHAREABLE));
452 	if (ha->mbx_irq == NULL) {
453 		device_printf(dev, "could not allocate mbx interrupt\n");
454 		goto qla_pci_attach_err;
455 	}
456 	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
457 		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
458 		device_printf(dev, "could not setup mbx interrupt\n");
459 		goto qla_pci_attach_err;
460 	}
461 
462 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
463 		ha->irq_vec[i].sds_idx = i;
464                 ha->irq_vec[i].ha = ha;
465                 ha->irq_vec[i].irq_rid = 2 + i;
466 
467 		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
468 				&ha->irq_vec[i].irq_rid,
469 				(RF_ACTIVE | RF_SHAREABLE));
470 
471 		if (ha->irq_vec[i].irq == NULL) {
472 			device_printf(dev, "could not allocate interrupt\n");
473 			goto qla_pci_attach_err;
474 		}
475 		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
476 			(INTR_TYPE_NET | INTR_MPSAFE),
477 			NULL, ql_isr, &ha->irq_vec[i],
478 			&ha->irq_vec[i].handle)) {
479 			device_printf(dev, "could not setup interrupt\n");
480 			goto qla_pci_attach_err;
481 		}
482 
483 		ha->tx_fp[i].ha = ha;
484 		ha->tx_fp[i].txr_idx = i;
485 
486 		if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
487 			device_printf(dev, "%s: could not allocate tx_br[%d]\n",
488 				__func__, i);
489 			goto qla_pci_attach_err;
490 		}
491 	}
492 
493 	if (qla_create_fp_taskqueues(ha) != 0)
494 		goto qla_pci_attach_err;
495 
496 	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
497 		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
498 
499 	ql_read_mac_addr(ha);
500 
501 	/* allocate parent dma tag */
502 	if (qla_alloc_parent_dma_tag(ha)) {
503 		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
504 			__func__);
505 		goto qla_pci_attach_err;
506 	}
507 
508 	/* alloc all dma buffers */
509 	if (ql_alloc_dma(ha)) {
510 		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
511 		goto qla_pci_attach_err;
512 	}
513 	qla_get_peer(ha);
514 
515 	if (ql_minidump_init(ha) != 0) {
516 		device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
517 		goto qla_pci_attach_err;
518 	}
519 	ql_alloc_drvr_state_buffer(ha);
520 	ql_alloc_sp_log_buffer(ha);
521 	/* create the o.s ethernet interface */
522 	qla_init_ifnet(dev, ha);
523 
524 	ha->flags.qla_watchdog_active = 1;
525 	ha->qla_watchdog_pause = 0;
526 
527 	callout_init(&ha->tx_callout, TRUE);
528 	ha->flags.qla_callout_init = 1;
529 
530 	/* create ioctl device interface */
531 	if (ql_make_cdev(ha)) {
532 		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
533 		goto qla_pci_attach_err;
534 	}
535 
536 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
537 		qla_watchdog, ha);
538 
539 	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
540 	ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT,
541 			taskqueue_thread_enqueue, &ha->err_tq);
542 	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
543 		device_get_nameunit(ha->pci_dev));
544 
545         TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
546         ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT,
547                         taskqueue_thread_enqueue, &ha->async_event_tq);
548         taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
549                 device_get_nameunit(ha->pci_dev));
550 
551         TASK_INIT(&ha->stats_task, 0, qla_stats, ha);
552         ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT,
553                         taskqueue_thread_enqueue, &ha->stats_tq);
554         taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq",
555                 device_get_nameunit(ha->pci_dev));
556 
557 	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
558         return (0);
559 
560 qla_pci_attach_err:
561 
562 	qla_release(ha);
563 
564 	if (ha->flags.lock_init) {
565 		mtx_destroy(&ha->hw_lock);
566 		mtx_destroy(&ha->sp_log_lock);
567 	}
568 
569 	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
570         return (ENXIO);
571 }
572 
573 /*
574  * Name:	qla_pci_detach
575  * Function:	Unhooks the device from the operating system
576  */
577 static int
578 qla_pci_detach(device_t dev)
579 {
580 	qla_host_t *ha = NULL;
581 	struct ifnet *ifp;
582 
583 
584         if ((ha = device_get_softc(dev)) == NULL) {
585                 device_printf(dev, "cannot get softc\n");
586                 return (ENOMEM);
587         }
588 
589 	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
590 
591 	ifp = ha->ifp;
592 
593 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
594 	QLA_LOCK(ha, __func__, -1, 0);
595 
596 	ha->qla_detach_active = 1;
597 	qla_stop(ha);
598 
599 	qla_release(ha);
600 
601 	QLA_UNLOCK(ha, __func__);
602 
603 	if (ha->flags.lock_init) {
604 		mtx_destroy(&ha->hw_lock);
605 		mtx_destroy(&ha->sp_log_lock);
606 	}
607 
608 	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
609 
610         return (0);
611 }
612 
613 /*
614  * SYSCTL Related Callbacks
615  */
616 static int
617 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
618 {
619 	int err, ret = 0;
620 	qla_host_t *ha;
621 
622 	err = sysctl_handle_int(oidp, &ret, 0, req);
623 
624 	if (err || !req->newptr)
625 		return (err);
626 
627 	if (ret == 1) {
628 		ha = (qla_host_t *)arg1;
629 		ql_hw_link_status(ha);
630 	}
631 	return (err);
632 }
633 
634 /*
635  * Name:	qla_release
636  * Function:	Releases the resources allocated for the device
637  */
638 static void
639 qla_release(qla_host_t *ha)
640 {
641 	device_t dev;
642 	int i;
643 
644 	dev = ha->pci_dev;
645 
646         if (ha->async_event_tq) {
647                 taskqueue_drain_all(ha->async_event_tq);
648                 taskqueue_free(ha->async_event_tq);
649         }
650 
651 	if (ha->err_tq) {
652 		taskqueue_drain_all(ha->err_tq);
653 		taskqueue_free(ha->err_tq);
654 	}
655 
656 	if (ha->stats_tq) {
657 		taskqueue_drain_all(ha->stats_tq);
658 		taskqueue_free(ha->stats_tq);
659 	}
660 
661 	ql_del_cdev(ha);
662 
663 	if (ha->flags.qla_watchdog_active) {
664 		ha->qla_watchdog_exit = 1;
665 
666 		while (ha->qla_watchdog_exited == 0)
667 			qla_mdelay(__func__, 1);
668 	}
669 
670 	if (ha->flags.qla_callout_init)
671 		callout_stop(&ha->tx_callout);
672 
673 	if (ha->ifp != NULL)
674 		ether_ifdetach(ha->ifp);
675 
676 	ql_free_drvr_state_buffer(ha);
677 	ql_free_sp_log_buffer(ha);
678 	ql_free_dma(ha);
679 	qla_free_parent_dma_tag(ha);
680 
681 	if (ha->mbx_handle)
682 		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
683 
684 	if (ha->mbx_irq)
685 		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
686 				ha->mbx_irq);
687 
688 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
689 
690 		if (ha->irq_vec[i].handle) {
691 			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
692 					ha->irq_vec[i].handle);
693 		}
694 
695 		if (ha->irq_vec[i].irq) {
696 			(void)bus_release_resource(dev, SYS_RES_IRQ,
697 				ha->irq_vec[i].irq_rid,
698 				ha->irq_vec[i].irq);
699 		}
700 
701 		qla_free_tx_br(ha, &ha->tx_fp[i]);
702 	}
703 	qla_destroy_fp_taskqueues(ha);
704 
705 	if (ha->msix_count)
706 		pci_release_msi(dev);
707 
708         if (ha->pci_reg)
709                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
710 				ha->pci_reg);
711 
712         if (ha->pci_reg1)
713                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
714 				ha->pci_reg1);
715 
716 	return;
717 }
718 
719 /*
720  * DMA Related Functions
721  */
722 
723 static void
724 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
725 {
726         *((bus_addr_t *)arg) = 0;
727 
728         if (error) {
729                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
730                 return;
731 	}
732 
733         *((bus_addr_t *)arg) = segs[0].ds_addr;
734 
735 	return;
736 }
737 
738 int
739 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
740 {
741         int             ret = 0;
742         device_t        dev;
743         bus_addr_t      b_addr;
744 
745         dev = ha->pci_dev;
746 
747         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
748 
749         ret = bus_dma_tag_create(
750                         ha->parent_tag,/* parent */
751                         dma_buf->alignment,
752                         ((bus_size_t)(1ULL << 32)),/* boundary */
753                         BUS_SPACE_MAXADDR,      /* lowaddr */
754                         BUS_SPACE_MAXADDR,      /* highaddr */
755                         NULL, NULL,             /* filter, filterarg */
756                         dma_buf->size,          /* maxsize */
757                         1,                      /* nsegments */
758                         dma_buf->size,          /* maxsegsize */
759                         0,                      /* flags */
760                         NULL, NULL,             /* lockfunc, lockarg */
761                         &dma_buf->dma_tag);
762 
763         if (ret) {
764                 device_printf(dev, "%s: could not create dma tag\n", __func__);
765                 goto ql_alloc_dmabuf_exit;
766         }
767         ret = bus_dmamem_alloc(dma_buf->dma_tag,
768                         (void **)&dma_buf->dma_b,
769                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
770                         &dma_buf->dma_map);
771         if (ret) {
772                 bus_dma_tag_destroy(dma_buf->dma_tag);
773                 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
774                 goto ql_alloc_dmabuf_exit;
775         }
776 
777         ret = bus_dmamap_load(dma_buf->dma_tag,
778                         dma_buf->dma_map,
779                         dma_buf->dma_b,
780                         dma_buf->size,
781                         qla_dmamap_callback,
782                         &b_addr, BUS_DMA_NOWAIT);
783 
784         if (ret || !b_addr) {
785                 bus_dma_tag_destroy(dma_buf->dma_tag);
786                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
787                         dma_buf->dma_map);
788                 ret = -1;
789                 goto ql_alloc_dmabuf_exit;
790         }
791 
792         dma_buf->dma_addr = b_addr;
793 
794 ql_alloc_dmabuf_exit:
795         QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
796                 __func__, ret, (void *)dma_buf->dma_tag,
797                 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
798 		dma_buf->size));
799 
800         return ret;
801 }
802 
803 void
804 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
805 {
806 	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
807         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
808         bus_dma_tag_destroy(dma_buf->dma_tag);
809 }
810 
811 static int
812 qla_alloc_parent_dma_tag(qla_host_t *ha)
813 {
814 	int		ret;
815 	device_t	dev;
816 
817 	dev = ha->pci_dev;
818 
819         /*
820          * Allocate parent DMA Tag
821          */
822         ret = bus_dma_tag_create(
823                         bus_get_dma_tag(dev),   /* parent */
824                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
825                         BUS_SPACE_MAXADDR,      /* lowaddr */
826                         BUS_SPACE_MAXADDR,      /* highaddr */
827                         NULL, NULL,             /* filter, filterarg */
828                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
829                         0,                      /* nsegments */
830                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
831                         0,                      /* flags */
832                         NULL, NULL,             /* lockfunc, lockarg */
833                         &ha->parent_tag);
834 
835         if (ret) {
836                 device_printf(dev, "%s: could not create parent dma tag\n",
837                         __func__);
838 		return (-1);
839         }
840 
841         ha->flags.parent_tag = 1;
842 
843 	return (0);
844 }
845 
846 static void
847 qla_free_parent_dma_tag(qla_host_t *ha)
848 {
849         if (ha->flags.parent_tag) {
850                 bus_dma_tag_destroy(ha->parent_tag);
851                 ha->flags.parent_tag = 0;
852         }
853 }
854 
855 /*
856  * Name: qla_init_ifnet
857  * Function: Creates the Network Device Interface and Registers it with the O.S
858  */
859 
860 static void
861 qla_init_ifnet(device_t dev, qla_host_t *ha)
862 {
863 	struct ifnet *ifp;
864 
865 	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
866 
867 	ifp = ha->ifp = if_alloc(IFT_ETHER);
868 
869 	if (ifp == NULL)
870 		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
871 
872 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
873 
874 	ifp->if_baudrate = IF_Gbps(10);
875 	ifp->if_capabilities = IFCAP_LINKSTATE;
876 	ifp->if_mtu = ETHERMTU;
877 
878 	ifp->if_init = qla_init;
879 	ifp->if_softc = ha;
880 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
881 	ifp->if_ioctl = qla_ioctl;
882 
883 	ifp->if_transmit = qla_transmit;
884 	ifp->if_qflush = qla_qflush;
885 
886 	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
887 	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
888 	IFQ_SET_READY(&ifp->if_snd);
889 
890 	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
891 
892 	ether_ifattach(ifp, qla_get_mac_addr(ha));
893 
894 	ifp->if_capabilities |= IFCAP_HWCSUM |
895 				IFCAP_TSO4 |
896 				IFCAP_TSO6 |
897 				IFCAP_JUMBO_MTU |
898 				IFCAP_VLAN_HWTAGGING |
899 				IFCAP_VLAN_MTU |
900 				IFCAP_VLAN_HWTSO |
901 				IFCAP_LRO;
902 
903 	ifp->if_capenable = ifp->if_capabilities;
904 
905 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
906 
907 	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
908 
909 	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
910 		NULL);
911 	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
912 
913 	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
914 
915 	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
916 
917 	return;
918 }
919 
920 static void
921 qla_init_locked(qla_host_t *ha)
922 {
923 	struct ifnet *ifp = ha->ifp;
924 
925 	ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0);
926 
927 	qla_stop(ha);
928 
929 	if (qla_alloc_xmt_bufs(ha) != 0)
930 		return;
931 
932 	qla_confirm_9kb_enable(ha);
933 
934 	if (qla_alloc_rcv_bufs(ha) != 0)
935 		return;
936 
937 	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
938 
939 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
940 	ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
941 
942 	ha->stop_rcv = 0;
943  	if (ql_init_hw_if(ha) == 0) {
944 		ifp = ha->ifp;
945 		ifp->if_drv_flags |= IFF_DRV_RUNNING;
946 		ha->hw_vlan_tx_frames = 0;
947 		ha->tx_tso_frames = 0;
948 		ha->qla_interface_up = 1;
949 		ql_update_link_state(ha);
950 	} else {
951 		if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE)
952 			ha->hw.sp_log_stop = -1;
953 	}
954 
955 	ha->qla_watchdog_pause = 0;
956 
957 	return;
958 }
959 
960 static void
961 qla_init(void *arg)
962 {
963 	qla_host_t *ha;
964 
965 	ha = (qla_host_t *)arg;
966 
967 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
968 
969 	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
970 		return;
971 
972 	qla_init_locked(ha);
973 
974 	QLA_UNLOCK(ha, __func__);
975 
976 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
977 }
978 
979 static u_int
980 qla_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
981 {
982 	uint8_t *mta = arg;
983 
984 	if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
985 		return (0);
986 
987 	bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
988 
989 	return (1);
990 }
991 
992 static int
993 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
994 {
995 	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
996 	int mcnt = 0;
997 	struct ifnet *ifp = ha->ifp;
998 	int ret = 0;
999 
1000 	mcnt = if_foreach_llmaddr(ifp, qla_copy_maddr, mta);
1001 
1002 	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1003 		QLA_LOCK_NO_SLEEP) != 0)
1004 		return (-1);
1005 
1006 	ql_sp_log(ha, 12, 4, ifp->if_drv_flags,
1007 		(ifp->if_drv_flags & IFF_DRV_RUNNING),
1008 		add_multi, (uint32_t)mcnt, 0);
1009 
1010 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1011 
1012 		if (!add_multi) {
1013 			ret = qla_hw_del_all_mcast(ha);
1014 
1015 			if (ret)
1016 				device_printf(ha->pci_dev,
1017 					"%s: qla_hw_del_all_mcast() failed\n",
1018 				__func__);
1019 		}
1020 
1021 		if (!ret)
1022 			ret = ql_hw_set_multi(ha, mta, mcnt, 1);
1023 
1024 	}
1025 
1026 	QLA_UNLOCK(ha, __func__);
1027 
1028 	return (ret);
1029 }
1030 
1031 static int
1032 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1033 {
1034 	int ret = 0;
1035 	struct ifreq *ifr = (struct ifreq *)data;
1036 	struct ifaddr *ifa = (struct ifaddr *)data;
1037 	qla_host_t *ha;
1038 
1039 	ha = (qla_host_t *)ifp->if_softc;
1040 	if (ha->offline || ha->qla_initiate_recovery)
1041 		return (ret);
1042 
1043 	switch (cmd) {
1044 	case SIOCSIFADDR:
1045 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
1046 			__func__, cmd));
1047 
1048 		if (ifa->ifa_addr->sa_family == AF_INET) {
1049 
1050 			ret = QLA_LOCK(ha, __func__,
1051 					QLA_LOCK_DEFAULT_MS_TIMEOUT,
1052 					QLA_LOCK_NO_SLEEP);
1053 			if (ret)
1054 				break;
1055 
1056 			ifp->if_flags |= IFF_UP;
1057 
1058 			ql_sp_log(ha, 8, 3, ifp->if_drv_flags,
1059 				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1060 				ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0);
1061 
1062 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1063 				qla_init_locked(ha);
1064 			}
1065 
1066 			QLA_UNLOCK(ha, __func__);
1067 			QL_DPRINT4(ha, (ha->pci_dev,
1068 				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
1069 				__func__, cmd,
1070 				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
1071 
1072 			arp_ifinit(ifp, ifa);
1073 		} else {
1074 			ether_ioctl(ifp, cmd, data);
1075 		}
1076 		break;
1077 
1078 	case SIOCSIFMTU:
1079 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
1080 			__func__, cmd));
1081 
1082 		if (ifr->ifr_mtu > QLA_MAX_MTU) {
1083 			ret = EINVAL;
1084 		} else {
1085 			ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1086 					QLA_LOCK_NO_SLEEP);
1087 
1088 			if (ret)
1089 				break;
1090 
1091 			ifp->if_mtu = ifr->ifr_mtu;
1092 			ha->max_frame_size =
1093 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1094 
1095 			ql_sp_log(ha, 9, 4, ifp->if_drv_flags,
1096 				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1097 				ha->max_frame_size, ifp->if_mtu, 0);
1098 
1099 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1100 				qla_init_locked(ha);
1101 			}
1102 
1103 			if (ifp->if_mtu > ETHERMTU)
1104 				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
1105 			else
1106 				ha->std_replenish = QL_STD_REPLENISH_THRES;
1107 
1108 
1109 			QLA_UNLOCK(ha, __func__);
1110 		}
1111 
1112 		break;
1113 
1114 	case SIOCSIFFLAGS:
1115 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
1116 			__func__, cmd));
1117 
1118 		ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1119 				QLA_LOCK_NO_SLEEP);
1120 
1121 		if (ret)
1122 			break;
1123 
1124 		ql_sp_log(ha, 10, 4, ifp->if_drv_flags,
1125 			(ifp->if_drv_flags & IFF_DRV_RUNNING),
1126 			ha->if_flags, ifp->if_flags, 0);
1127 
1128 		if (ifp->if_flags & IFF_UP) {
1129 
1130 			ha->max_frame_size = ifp->if_mtu +
1131 					ETHER_HDR_LEN + ETHER_CRC_LEN;
1132 			qla_init_locked(ha);
1133 
1134 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1135 				if ((ifp->if_flags ^ ha->if_flags) &
1136 					IFF_PROMISC) {
1137 					ret = ql_set_promisc(ha);
1138 				} else if ((ifp->if_flags ^ ha->if_flags) &
1139 					IFF_ALLMULTI) {
1140 					ret = ql_set_allmulti(ha);
1141 				}
1142 			}
1143 		} else {
1144 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1145 				qla_stop(ha);
1146 			ha->if_flags = ifp->if_flags;
1147 		}
1148 
1149 		QLA_UNLOCK(ha, __func__);
1150 		break;
1151 
1152 	case SIOCADDMULTI:
1153 		QL_DPRINT4(ha, (ha->pci_dev,
1154 			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1155 
1156 		if (qla_set_multi(ha, 1))
1157 			ret = EINVAL;
1158 		break;
1159 
1160 	case SIOCDELMULTI:
1161 		QL_DPRINT4(ha, (ha->pci_dev,
1162 			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1163 
1164 		if (qla_set_multi(ha, 0))
1165 			ret = EINVAL;
1166 		break;
1167 
1168 	case SIOCSIFMEDIA:
1169 	case SIOCGIFMEDIA:
1170 		QL_DPRINT4(ha, (ha->pci_dev,
1171 			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1172 			__func__, cmd));
1173 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1174 		break;
1175 
1176 	case SIOCSIFCAP:
1177 	{
1178 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1179 
1180 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1181 			__func__, cmd));
1182 
1183 		if (mask & IFCAP_HWCSUM)
1184 			ifp->if_capenable ^= IFCAP_HWCSUM;
1185 		if (mask & IFCAP_TSO4)
1186 			ifp->if_capenable ^= IFCAP_TSO4;
1187 		if (mask & IFCAP_TSO6)
1188 			ifp->if_capenable ^= IFCAP_TSO6;
1189 		if (mask & IFCAP_VLAN_HWTAGGING)
1190 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1191 		if (mask & IFCAP_VLAN_HWTSO)
1192 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1193 		if (mask & IFCAP_LRO)
1194 			ifp->if_capenable ^= IFCAP_LRO;
1195 
1196 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1197 			ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1198 				QLA_LOCK_NO_SLEEP);
1199 
1200 			if (ret)
1201 				break;
1202 
1203 			ql_sp_log(ha, 11, 4, ifp->if_drv_flags,
1204 				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1205 				mask, ifp->if_capenable, 0);
1206 
1207 			qla_init_locked(ha);
1208 
1209 			QLA_UNLOCK(ha, __func__);
1210 
1211 		}
1212 		VLAN_CAPABILITIES(ifp);
1213 		break;
1214 	}
1215 
1216 	default:
1217 		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1218 			__func__, cmd));
1219 		ret = ether_ioctl(ifp, cmd, data);
1220 		break;
1221 	}
1222 
1223 	return (ret);
1224 }
1225 
1226 static int
1227 qla_media_change(struct ifnet *ifp)
1228 {
1229 	qla_host_t *ha;
1230 	struct ifmedia *ifm;
1231 	int ret = 0;
1232 
1233 	ha = (qla_host_t *)ifp->if_softc;
1234 
1235 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1236 
1237 	ifm = &ha->media;
1238 
1239 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1240 		ret = EINVAL;
1241 
1242 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1243 
1244 	return (ret);
1245 }
1246 
1247 static void
1248 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1249 {
1250 	qla_host_t *ha;
1251 
1252 	ha = (qla_host_t *)ifp->if_softc;
1253 
1254 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1255 
1256 	ifmr->ifm_status = IFM_AVALID;
1257 	ifmr->ifm_active = IFM_ETHER;
1258 
1259 	ql_update_link_state(ha);
1260 	if (ha->hw.link_up) {
1261 		ifmr->ifm_status |= IFM_ACTIVE;
1262 		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1263 	}
1264 
1265 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1266 		(ha->hw.link_up ? "link_up" : "link_down")));
1267 
1268 	return;
1269 }
1270 
1271 
1272 static int
1273 qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
1274 	uint32_t iscsi_pdu)
1275 {
1276 	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1277 	bus_dmamap_t		map;
1278 	int			nsegs;
1279 	int			ret = -1;
1280 	uint32_t		tx_idx;
1281 	struct mbuf		*m_head = *m_headp;
1282 
1283 	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1284 
1285 	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1286 
1287 	if ((NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) ||
1288 		(QL_ERR_INJECT(ha, INJCT_TXBUF_MBUF_NON_NULL))){
1289 		QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\
1290 			"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\
1291 			ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
1292 
1293 		device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d tx_idx = %d "
1294 			"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,
1295 			ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head);
1296 
1297 		if (m_head)
1298 			m_freem(m_head);
1299 		*m_headp = NULL;
1300 		QL_INITIATE_RECOVERY(ha);
1301 		return (ret);
1302 	}
1303 
1304 	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1305 
1306 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1307 			BUS_DMA_NOWAIT);
1308 
1309 	if (ret == EFBIG) {
1310 
1311 		struct mbuf *m;
1312 
1313 		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1314 			m_head->m_pkthdr.len));
1315 
1316 		m = m_defrag(m_head, M_NOWAIT);
1317 		if (m == NULL) {
1318 			ha->err_tx_defrag++;
1319 			m_freem(m_head);
1320 			*m_headp = NULL;
1321 			device_printf(ha->pci_dev,
1322 				"%s: m_defrag() = NULL [%d]\n",
1323 				__func__, ret);
1324 			return (ENOBUFS);
1325 		}
1326 		m_head = m;
1327 		*m_headp = m_head;
1328 
1329 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1330 					segs, &nsegs, BUS_DMA_NOWAIT))) {
1331 
1332 			ha->err_tx_dmamap_load++;
1333 
1334 			device_printf(ha->pci_dev,
1335 				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1336 				__func__, ret, m_head->m_pkthdr.len);
1337 
1338 			if (ret != ENOMEM) {
1339 				m_freem(m_head);
1340 				*m_headp = NULL;
1341 			}
1342 			return (ret);
1343 		}
1344 
1345 	} else if (ret) {
1346 
1347 		ha->err_tx_dmamap_load++;
1348 
1349 		device_printf(ha->pci_dev,
1350 			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1351 			__func__, ret, m_head->m_pkthdr.len);
1352 
1353 		if (ret != ENOMEM) {
1354 			m_freem(m_head);
1355 			*m_headp = NULL;
1356 		}
1357 		return (ret);
1358 	}
1359 
1360 	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1361 
1362 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1363 
1364         if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1365 				iscsi_pdu))) {
1366 		ha->tx_ring[txr_idx].count++;
1367 		if (iscsi_pdu)
1368 			ha->tx_ring[txr_idx].iscsi_pkt_count++;
1369 		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1370 	} else {
1371 		bus_dmamap_unload(ha->tx_tag, map);
1372 		if (ret == EINVAL) {
1373 			if (m_head)
1374 				m_freem(m_head);
1375 			*m_headp = NULL;
1376 		}
1377 	}
1378 
1379 	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1380 	return (ret);
1381 }
1382 
1383 static int
1384 qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1385 {
1386         snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
1387                 "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
1388 
1389         mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
1390 
1391         fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
1392                                    M_NOWAIT, &fp->tx_mtx);
1393         if (fp->tx_br == NULL) {
1394             QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
1395                 " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
1396             return (-ENOMEM);
1397         }
1398         return 0;
1399 }
1400 
1401 static void
1402 qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1403 {
1404         struct mbuf *mp;
1405         struct ifnet *ifp = ha->ifp;
1406 
1407         if (mtx_initialized(&fp->tx_mtx)) {
1408 
1409                 if (fp->tx_br != NULL) {
1410 
1411                         mtx_lock(&fp->tx_mtx);
1412 
1413                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1414                                 m_freem(mp);
1415                         }
1416 
1417                         mtx_unlock(&fp->tx_mtx);
1418 
1419                         buf_ring_free(fp->tx_br, M_DEVBUF);
1420                         fp->tx_br = NULL;
1421                 }
1422                 mtx_destroy(&fp->tx_mtx);
1423         }
1424         return;
1425 }
1426 
1427 static void
1428 qla_fp_taskqueue(void *context, int pending)
1429 {
1430         qla_tx_fp_t *fp;
1431         qla_host_t *ha;
1432         struct ifnet *ifp;
1433         struct mbuf  *mp = NULL;
1434         int ret = 0;
1435 	uint32_t txr_idx;
1436 	uint32_t iscsi_pdu = 0;
1437 	uint32_t rx_pkts_left = -1;
1438 
1439         fp = context;
1440 
1441         if (fp == NULL)
1442                 return;
1443 
1444         ha = (qla_host_t *)fp->ha;
1445 
1446         ifp = ha->ifp;
1447 
1448 	txr_idx = fp->txr_idx;
1449 
1450         mtx_lock(&fp->tx_mtx);
1451 
1452         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
1453                 mtx_unlock(&fp->tx_mtx);
1454                 goto qla_fp_taskqueue_exit;
1455         }
1456 
1457 	while (rx_pkts_left && !ha->stop_rcv &&
1458 		(ifp->if_drv_flags & IFF_DRV_RUNNING) && ha->hw.link_up) {
1459 		rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
1460 
1461 #ifdef QL_ENABLE_ISCSI_TLV
1462 		ql_hw_tx_done_locked(ha, fp->txr_idx);
1463 		ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
1464 #else
1465 		ql_hw_tx_done_locked(ha, fp->txr_idx);
1466 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1467 
1468 		mp = drbr_peek(ifp, fp->tx_br);
1469 
1470         	while (mp != NULL) {
1471 
1472 			if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
1473 #ifdef QL_ENABLE_ISCSI_TLV
1474 				if (ql_iscsi_pdu(ha, mp) == 0) {
1475 					txr_idx = txr_idx +
1476 						(ha->hw.num_tx_rings >> 1);
1477 					iscsi_pdu = 1;
1478 				} else {
1479 					iscsi_pdu = 0;
1480 					txr_idx = fp->txr_idx;
1481 				}
1482 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1483 			}
1484 
1485 			ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
1486 
1487 			if (ret) {
1488 				if (mp != NULL)
1489 					drbr_putback(ifp, fp->tx_br, mp);
1490 				else {
1491 					drbr_advance(ifp, fp->tx_br);
1492 				}
1493 
1494 				mtx_unlock(&fp->tx_mtx);
1495 
1496 				goto qla_fp_taskqueue_exit0;
1497 			} else {
1498 				drbr_advance(ifp, fp->tx_br);
1499 			}
1500 
1501 			/* Send a copy of the frame to the BPF listener */
1502 			ETHER_BPF_MTAP(ifp, mp);
1503 
1504 			if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
1505 				(!ha->hw.link_up))
1506 				break;
1507 
1508 			mp = drbr_peek(ifp, fp->tx_br);
1509 		}
1510 	}
1511         mtx_unlock(&fp->tx_mtx);
1512 
1513 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1514 		goto qla_fp_taskqueue_exit;
1515 
1516 qla_fp_taskqueue_exit0:
1517 
1518 	if (rx_pkts_left || ((mp != NULL) && ret)) {
1519 		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1520 	} else {
1521 		if (!ha->stop_rcv) {
1522 			QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
1523 		}
1524 	}
1525 
1526 qla_fp_taskqueue_exit:
1527 
1528         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1529         return;
1530 }
1531 
1532 static int
1533 qla_create_fp_taskqueues(qla_host_t *ha)
1534 {
1535         int     i;
1536         uint8_t tq_name[32];
1537 
1538         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1539 
1540                 qla_tx_fp_t *fp = &ha->tx_fp[i];
1541 
1542                 bzero(tq_name, sizeof (tq_name));
1543                 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
1544 
1545                 NET_TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
1546 
1547                 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
1548                                         taskqueue_thread_enqueue,
1549                                         &fp->fp_taskqueue);
1550 
1551                 if (fp->fp_taskqueue == NULL)
1552                         return (-1);
1553 
1554                 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
1555                         tq_name);
1556 
1557                 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
1558                         fp->fp_taskqueue));
1559         }
1560 
1561         return (0);
1562 }
1563 
1564 static void
1565 qla_destroy_fp_taskqueues(qla_host_t *ha)
1566 {
1567         int     i;
1568 
1569         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1570 
1571                 qla_tx_fp_t *fp = &ha->tx_fp[i];
1572 
1573                 if (fp->fp_taskqueue != NULL) {
1574                         taskqueue_drain_all(fp->fp_taskqueue);
1575                         taskqueue_free(fp->fp_taskqueue);
1576                         fp->fp_taskqueue = NULL;
1577                 }
1578         }
1579         return;
1580 }
1581 
1582 static void
1583 qla_drain_fp_taskqueues(qla_host_t *ha)
1584 {
1585         int     i;
1586 
1587         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1588                 qla_tx_fp_t *fp = &ha->tx_fp[i];
1589 
1590                 if (fp->fp_taskqueue != NULL) {
1591                         taskqueue_drain_all(fp->fp_taskqueue);
1592                 }
1593         }
1594         return;
1595 }
1596 
1597 static int
1598 qla_transmit(struct ifnet *ifp, struct mbuf  *mp)
1599 {
1600 	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1601         qla_tx_fp_t *fp;
1602         int rss_id = 0;
1603         int ret = 0;
1604 
1605         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1606 
1607 #if __FreeBSD_version >= 1100000
1608         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
1609 #else
1610         if (mp->m_flags & M_FLOWID)
1611 #endif
1612                 rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
1613                                         ha->hw.num_sds_rings;
1614         fp = &ha->tx_fp[rss_id];
1615 
1616         if (fp->tx_br == NULL) {
1617                 ret = EINVAL;
1618                 goto qla_transmit_exit;
1619         }
1620 
1621         if (mp != NULL) {
1622                 ret = drbr_enqueue(ifp, fp->tx_br, mp);
1623         }
1624 
1625         if (fp->fp_taskqueue != NULL)
1626                 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1627 
1628         ret = 0;
1629 
1630 qla_transmit_exit:
1631 
1632         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1633         return ret;
1634 }
1635 
1636 static void
1637 qla_qflush(struct ifnet *ifp)
1638 {
1639         int                     i;
1640         qla_tx_fp_t		*fp;
1641         struct mbuf             *mp;
1642         qla_host_t              *ha;
1643 
1644         ha = (qla_host_t *)ifp->if_softc;
1645 
1646         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1647 
1648         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1649 
1650                 fp = &ha->tx_fp[i];
1651 
1652                 if (fp == NULL)
1653                         continue;
1654 
1655                 if (fp->tx_br) {
1656                         mtx_lock(&fp->tx_mtx);
1657 
1658                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1659                                 m_freem(mp);
1660                         }
1661                         mtx_unlock(&fp->tx_mtx);
1662                 }
1663         }
1664         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1665 
1666         return;
1667 }
1668 
1669 static void
1670 qla_stop(qla_host_t *ha)
1671 {
1672 	struct ifnet *ifp = ha->ifp;
1673 	device_t	dev;
1674 	int i = 0;
1675 
1676 	ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0);
1677 
1678 	dev = ha->pci_dev;
1679 
1680 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1681 	ha->qla_watchdog_pause = 1;
1682 
1683         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1684         	qla_tx_fp_t *fp;
1685 
1686 		fp = &ha->tx_fp[i];
1687 
1688                 if (fp == NULL)
1689                         continue;
1690 
1691 		if (fp->tx_br != NULL) {
1692                         mtx_lock(&fp->tx_mtx);
1693                         mtx_unlock(&fp->tx_mtx);
1694 		}
1695 	}
1696 
1697 	while (!ha->qla_watchdog_paused)
1698 		qla_mdelay(__func__, 1);
1699 
1700 	ha->qla_interface_up = 0;
1701 
1702 	qla_drain_fp_taskqueues(ha);
1703 
1704 	ql_del_hw_if(ha);
1705 
1706 	qla_free_xmt_bufs(ha);
1707 	qla_free_rcv_bufs(ha);
1708 
1709 	return;
1710 }
1711 
1712 /*
1713  * Buffer Management Functions for Transmit and Receive Rings
1714  */
1715 static int
1716 qla_alloc_xmt_bufs(qla_host_t *ha)
1717 {
1718 	int ret = 0;
1719 	uint32_t i, j;
1720 	qla_tx_buf_t *txb;
1721 
1722 	if (bus_dma_tag_create(NULL,    /* parent */
1723 		1, 0,    /* alignment, bounds */
1724 		BUS_SPACE_MAXADDR,       /* lowaddr */
1725 		BUS_SPACE_MAXADDR,       /* highaddr */
1726 		NULL, NULL,      /* filter, filterarg */
1727 		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1728 		QLA_MAX_SEGMENTS,        /* nsegments */
1729 		PAGE_SIZE,        /* maxsegsize */
1730 		BUS_DMA_ALLOCNOW,        /* flags */
1731 		NULL,    /* lockfunc */
1732 		NULL,    /* lockfuncarg */
1733 		&ha->tx_tag)) {
1734 		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1735 			__func__);
1736 		return (ENOMEM);
1737 	}
1738 
1739 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1740 		bzero((void *)ha->tx_ring[i].tx_buf,
1741 			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1742 	}
1743 
1744 	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1745 		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1746 
1747 			txb = &ha->tx_ring[j].tx_buf[i];
1748 
1749 			if ((ret = bus_dmamap_create(ha->tx_tag,
1750 					BUS_DMA_NOWAIT, &txb->map))) {
1751 
1752 				ha->err_tx_dmamap_create++;
1753 				device_printf(ha->pci_dev,
1754 					"%s: bus_dmamap_create failed[%d]\n",
1755 					__func__, ret);
1756 
1757 				qla_free_xmt_bufs(ha);
1758 
1759 				return (ret);
1760 			}
1761 		}
1762 	}
1763 
1764 	return 0;
1765 }
1766 
1767 /*
1768  * Release mbuf after it sent on the wire
1769  */
1770 static void
1771 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1772 {
1773 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1774 
1775 	if (txb->m_head) {
1776 		bus_dmamap_sync(ha->tx_tag, txb->map,
1777 			BUS_DMASYNC_POSTWRITE);
1778 
1779 		bus_dmamap_unload(ha->tx_tag, txb->map);
1780 
1781 		m_freem(txb->m_head);
1782 		txb->m_head = NULL;
1783 
1784 		bus_dmamap_destroy(ha->tx_tag, txb->map);
1785 		txb->map = NULL;
1786 	}
1787 
1788 	if (txb->map) {
1789 		bus_dmamap_unload(ha->tx_tag, txb->map);
1790 		bus_dmamap_destroy(ha->tx_tag, txb->map);
1791 		txb->map = NULL;
1792 	}
1793 
1794 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1795 }
1796 
1797 static void
1798 qla_free_xmt_bufs(qla_host_t *ha)
1799 {
1800 	int		i, j;
1801 
1802 	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1803 		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1804 			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1805 	}
1806 
1807 	if (ha->tx_tag != NULL) {
1808 		bus_dma_tag_destroy(ha->tx_tag);
1809 		ha->tx_tag = NULL;
1810 	}
1811 
1812 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1813 		bzero((void *)ha->tx_ring[i].tx_buf,
1814 			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1815 	}
1816 	return;
1817 }
1818 
1819 
1820 static int
1821 qla_alloc_rcv_std(qla_host_t *ha)
1822 {
1823 	int		i, j, k, r, ret = 0;
1824 	qla_rx_buf_t	*rxb;
1825 	qla_rx_ring_t	*rx_ring;
1826 
1827 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1828 
1829 		rx_ring = &ha->rx_ring[r];
1830 
1831 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1832 
1833 			rxb = &rx_ring->rx_buf[i];
1834 
1835 			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1836 					&rxb->map);
1837 
1838 			if (ret) {
1839 				device_printf(ha->pci_dev,
1840 					"%s: dmamap[%d, %d] failed\n",
1841 					__func__, r, i);
1842 
1843 				for (k = 0; k < r; k++) {
1844 					for (j = 0; j < NUM_RX_DESCRIPTORS;
1845 						j++) {
1846 						rxb = &ha->rx_ring[k].rx_buf[j];
1847 						bus_dmamap_destroy(ha->rx_tag,
1848 							rxb->map);
1849 					}
1850 				}
1851 
1852 				for (j = 0; j < i; j++) {
1853 					bus_dmamap_destroy(ha->rx_tag,
1854 						rx_ring->rx_buf[j].map);
1855 				}
1856 				goto qla_alloc_rcv_std_err;
1857 			}
1858 		}
1859 	}
1860 
1861 	qla_init_hw_rcv_descriptors(ha);
1862 
1863 
1864 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1865 
1866 		rx_ring = &ha->rx_ring[r];
1867 
1868 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1869 			rxb = &rx_ring->rx_buf[i];
1870 			rxb->handle = i;
1871 			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1872 				/*
1873 			 	 * set the physical address in the
1874 				 * corresponding descriptor entry in the
1875 				 * receive ring/queue for the hba
1876 				 */
1877 				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1878 					rxb->paddr,
1879 					(rxb->m_head)->m_pkthdr.len);
1880 			} else {
1881 				device_printf(ha->pci_dev,
1882 					"%s: ql_get_mbuf [%d, %d] failed\n",
1883 					__func__, r, i);
1884 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1885 				goto qla_alloc_rcv_std_err;
1886 			}
1887 		}
1888 	}
1889 	return 0;
1890 
1891 qla_alloc_rcv_std_err:
1892 	return (-1);
1893 }
1894 
1895 static void
1896 qla_free_rcv_std(qla_host_t *ha)
1897 {
1898 	int		i, r;
1899 	qla_rx_buf_t	*rxb;
1900 
1901 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1902 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1903 			rxb = &ha->rx_ring[r].rx_buf[i];
1904 			if (rxb->m_head != NULL) {
1905 				bus_dmamap_unload(ha->rx_tag, rxb->map);
1906 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1907 				m_freem(rxb->m_head);
1908 				rxb->m_head = NULL;
1909 			}
1910 		}
1911 	}
1912 	return;
1913 }
1914 
1915 static int
1916 qla_alloc_rcv_bufs(qla_host_t *ha)
1917 {
1918 	int		i, ret = 0;
1919 
1920 	if (bus_dma_tag_create(NULL,    /* parent */
1921 			1, 0,    /* alignment, bounds */
1922 			BUS_SPACE_MAXADDR,       /* lowaddr */
1923 			BUS_SPACE_MAXADDR,       /* highaddr */
1924 			NULL, NULL,      /* filter, filterarg */
1925 			MJUM9BYTES,     /* maxsize */
1926 			1,        /* nsegments */
1927 			MJUM9BYTES,        /* maxsegsize */
1928 			BUS_DMA_ALLOCNOW,        /* flags */
1929 			NULL,    /* lockfunc */
1930 			NULL,    /* lockfuncarg */
1931 			&ha->rx_tag)) {
1932 
1933 		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1934 			__func__);
1935 
1936 		return (ENOMEM);
1937 	}
1938 
1939 	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1940 
1941 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1942 		ha->hw.sds[i].sdsr_next = 0;
1943 		ha->hw.sds[i].rxb_free = NULL;
1944 		ha->hw.sds[i].rx_free = 0;
1945 	}
1946 
1947 	ret = qla_alloc_rcv_std(ha);
1948 
1949 	return (ret);
1950 }
1951 
1952 static void
1953 qla_free_rcv_bufs(qla_host_t *ha)
1954 {
1955 	int		i;
1956 
1957 	qla_free_rcv_std(ha);
1958 
1959 	if (ha->rx_tag != NULL) {
1960 		bus_dma_tag_destroy(ha->rx_tag);
1961 		ha->rx_tag = NULL;
1962 	}
1963 
1964 	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1965 
1966 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1967 		ha->hw.sds[i].sdsr_next = 0;
1968 		ha->hw.sds[i].rxb_free = NULL;
1969 		ha->hw.sds[i].rx_free = 0;
1970 	}
1971 
1972 	return;
1973 }
1974 
1975 int
1976 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1977 {
1978 	register struct mbuf *mp = nmp;
1979 	struct ifnet   		*ifp;
1980 	int            		ret = 0;
1981 	uint32_t		offset;
1982 	bus_dma_segment_t	segs[1];
1983 	int			nsegs, mbuf_size;
1984 
1985 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1986 
1987 	ifp = ha->ifp;
1988 
1989         if (ha->hw.enable_9kb)
1990                 mbuf_size = MJUM9BYTES;
1991         else
1992                 mbuf_size = MCLBYTES;
1993 
1994 	if (mp == NULL) {
1995 
1996 		if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
1997 			return(-1);
1998 
1999                 if (ha->hw.enable_9kb)
2000                         mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
2001                 else
2002                         mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2003 
2004 		if (mp == NULL) {
2005 			ha->err_m_getcl++;
2006 			ret = ENOBUFS;
2007 			device_printf(ha->pci_dev,
2008 					"%s: m_getcl failed\n", __func__);
2009 			goto exit_ql_get_mbuf;
2010 		}
2011 		mp->m_len = mp->m_pkthdr.len = mbuf_size;
2012 	} else {
2013 		mp->m_len = mp->m_pkthdr.len = mbuf_size;
2014 		mp->m_data = mp->m_ext.ext_buf;
2015 		mp->m_next = NULL;
2016 	}
2017 
2018 	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
2019 	if (offset) {
2020 		offset = 8 - offset;
2021 		m_adj(mp, offset);
2022 	}
2023 
2024 	/*
2025 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
2026 	 * machinery to arrange the memory mapping.
2027 	 */
2028 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
2029 			mp, segs, &nsegs, BUS_DMA_NOWAIT);
2030 	rxb->paddr = segs[0].ds_addr;
2031 
2032 	if (ret || !rxb->paddr || (nsegs != 1)) {
2033 		m_free(mp);
2034 		rxb->m_head = NULL;
2035 		device_printf(ha->pci_dev,
2036 			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
2037 			__func__, ret, (long long unsigned int)rxb->paddr,
2038 			nsegs);
2039                 ret = -1;
2040 		goto exit_ql_get_mbuf;
2041 	}
2042 	rxb->m_head = mp;
2043 	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
2044 
2045 exit_ql_get_mbuf:
2046 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
2047 	return (ret);
2048 }
2049 
2050 
2051 static void
2052 qla_get_peer(qla_host_t *ha)
2053 {
2054 	device_t *peers;
2055 	int count, i, slot;
2056 	int my_slot = pci_get_slot(ha->pci_dev);
2057 
2058 	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
2059 		return;
2060 
2061 	for (i = 0; i < count; i++) {
2062 		slot = pci_get_slot(peers[i]);
2063 
2064 		if ((slot >= 0) && (slot == my_slot) &&
2065 			(pci_get_device(peers[i]) ==
2066 				pci_get_device(ha->pci_dev))) {
2067 			if (ha->pci_dev != peers[i])
2068 				ha->peer_dev = peers[i];
2069 		}
2070 	}
2071 }
2072 
2073 static void
2074 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
2075 {
2076 	qla_host_t *ha_peer;
2077 
2078 	if (ha->peer_dev) {
2079         	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
2080 
2081 			ha_peer->msg_from_peer = msg_to_peer;
2082 		}
2083 	}
2084 }
2085 
2086 void
2087 qla_set_error_recovery(qla_host_t *ha)
2088 {
2089 	struct ifnet *ifp = ha->ifp;
2090 
2091 	if (!cold && ha->enable_error_recovery) {
2092 		if (ifp)
2093 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2094 		ha->qla_initiate_recovery = 1;
2095 	} else
2096 		ha->offline = 1;
2097 	return;
2098 }
2099 
2100 static void
2101 qla_error_recovery(void *context, int pending)
2102 {
2103 	qla_host_t *ha = context;
2104 	uint32_t msecs_100 = 400;
2105 	struct ifnet *ifp = ha->ifp;
2106 	int i = 0;
2107 
2108 	device_printf(ha->pci_dev, "%s: enter\n", __func__);
2109 	ha->hw.imd_compl = 1;
2110 
2111 	taskqueue_drain_all(ha->stats_tq);
2112 	taskqueue_drain_all(ha->async_event_tq);
2113 
2114 	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2115 		return;
2116 
2117 	device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n",
2118 		__func__, qla_get_usec_timestamp());
2119 
2120 	if (ha->qla_interface_up) {
2121 
2122 		qla_mdelay(__func__, 300);
2123 
2124 	        //ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2125 
2126 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
2127 	        	qla_tx_fp_t *fp;
2128 
2129 			fp = &ha->tx_fp[i];
2130 
2131 			if (fp == NULL)
2132 				continue;
2133 
2134 			if (fp->tx_br != NULL) {
2135 				mtx_lock(&fp->tx_mtx);
2136 				mtx_unlock(&fp->tx_mtx);
2137 			}
2138 		}
2139 	}
2140 
2141 	qla_drain_fp_taskqueues(ha);
2142 
2143 	if ((ha->pci_func & 0x1) == 0) {
2144 
2145 		if (!ha->msg_from_peer) {
2146 			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2147 
2148 			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
2149 				msecs_100--)
2150 				qla_mdelay(__func__, 100);
2151 		}
2152 
2153 		ha->msg_from_peer = 0;
2154 
2155 		if (ha->enable_minidump)
2156 			ql_minidump(ha);
2157 
2158 		if (ha->enable_driverstate_dump)
2159 			ql_capture_drvr_state(ha);
2160 
2161 		if (ql_init_hw(ha)) {
2162 			device_printf(ha->pci_dev,
2163 				"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2164 				__func__, qla_get_usec_timestamp());
2165 			ha->offline = 1;
2166 			goto qla_error_recovery_exit;
2167 		}
2168 
2169 		if (ha->qla_interface_up) {
2170 			qla_free_xmt_bufs(ha);
2171 			qla_free_rcv_bufs(ha);
2172 		}
2173 
2174 		if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2175 			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2176 
2177 	} else {
2178 		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
2179 
2180 			ha->msg_from_peer = 0;
2181 
2182 			if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2183 				qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2184 		} else {
2185 			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2186 		}
2187 
2188 		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
2189 			qla_mdelay(__func__, 100);
2190 		ha->msg_from_peer = 0;
2191 
2192 		if (ha->enable_driverstate_dump)
2193 			ql_capture_drvr_state(ha);
2194 
2195 		if (msecs_100 == 0) {
2196 			device_printf(ha->pci_dev,
2197 				"%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n",
2198 				__func__, qla_get_usec_timestamp());
2199 			ha->offline = 1;
2200 			goto qla_error_recovery_exit;
2201 		}
2202 
2203 		if (ql_init_hw(ha)) {
2204 			device_printf(ha->pci_dev,
2205 				"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2206 				__func__, qla_get_usec_timestamp());
2207 			ha->offline = 1;
2208 			goto qla_error_recovery_exit;
2209 		}
2210 
2211 		if (ha->qla_interface_up) {
2212 			qla_free_xmt_bufs(ha);
2213 			qla_free_rcv_bufs(ha);
2214 		}
2215 	}
2216 
2217 	qla_mdelay(__func__, ha->ms_delay_after_init);
2218 
2219 	*((uint32_t *)&ha->hw.flags) = 0;
2220 	ha->qla_initiate_recovery = 0;
2221 
2222 	if (ha->qla_interface_up) {
2223 
2224 		if (qla_alloc_xmt_bufs(ha) != 0) {
2225 			ha->offline = 1;
2226 			goto qla_error_recovery_exit;
2227 		}
2228 
2229 		qla_confirm_9kb_enable(ha);
2230 
2231 		if (qla_alloc_rcv_bufs(ha) != 0) {
2232 			ha->offline = 1;
2233 			goto qla_error_recovery_exit;
2234 		}
2235 
2236 		ha->stop_rcv = 0;
2237 
2238 		if (ql_init_hw_if(ha) == 0) {
2239 			ifp = ha->ifp;
2240 			ifp->if_drv_flags |= IFF_DRV_RUNNING;
2241 			ha->qla_watchdog_pause = 0;
2242 			ql_update_link_state(ha);
2243 		} else {
2244 			ha->offline = 1;
2245 
2246 			if (ha->hw.sp_log_stop_events &
2247 				Q8_SP_LOG_STOP_IF_START_FAILURE)
2248 				ha->hw.sp_log_stop = -1;
2249 		}
2250 	} else {
2251 		ha->qla_watchdog_pause = 0;
2252 	}
2253 
2254 qla_error_recovery_exit:
2255 
2256 	if (ha->offline ) {
2257 		device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n",
2258 			__func__, qla_get_usec_timestamp());
2259 		if (ha->hw.sp_log_stop_events &
2260 			Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE)
2261 			ha->hw.sp_log_stop = -1;
2262 	}
2263 
2264 
2265         QLA_UNLOCK(ha, __func__);
2266 
2267 	if (!ha->offline)
2268 		callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
2269 			qla_watchdog, ha);
2270 
2271 	device_printf(ha->pci_dev,
2272 		"%s: ts_usecs = %ld exit\n",
2273 		__func__, qla_get_usec_timestamp());
2274 	return;
2275 }
2276 
2277 static void
2278 qla_async_event(void *context, int pending)
2279 {
2280         qla_host_t *ha = context;
2281 
2282 	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2283 		return;
2284 
2285 	if (ha->async_event) {
2286 		ha->async_event = 0;
2287         	qla_hw_async_event(ha);
2288 	}
2289 
2290 	QLA_UNLOCK(ha, __func__);
2291 
2292 	return;
2293 }
2294 
2295 static void
2296 qla_stats(void *context, int pending)
2297 {
2298         qla_host_t *ha;
2299 
2300         ha = context;
2301 
2302 	ql_get_stats(ha);
2303 
2304 	return;
2305 }
2306 
2307