xref: /freebsd/sys/dev/qlxgbe/ql_os.c (revision 273c26a3c3bea87a241d6879abd4f991db180bf0)
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: ql_os.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include <sys/smp.h>
45 
46 /*
47  * Some PCI Configuration Space Related Defines
48  */
49 
50 #ifndef PCI_VENDOR_QLOGIC
51 #define PCI_VENDOR_QLOGIC	0x1077
52 #endif
53 
54 #ifndef PCI_PRODUCT_QLOGIC_ISP8030
55 #define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
56 #endif
57 
58 #define PCI_QLOGIC_ISP8030 \
59 	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60 
61 /*
62  * static functions
63  */
64 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
65 static void qla_free_parent_dma_tag(qla_host_t *ha);
66 static int qla_alloc_xmt_bufs(qla_host_t *ha);
67 static void qla_free_xmt_bufs(qla_host_t *ha);
68 static int qla_alloc_rcv_bufs(qla_host_t *ha);
69 static void qla_free_rcv_bufs(qla_host_t *ha);
70 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71 
72 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73 static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
74 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
75 static void qla_release(qla_host_t *ha);
76 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
77 		int error);
78 static void qla_stop(qla_host_t *ha);
79 static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
80 static void qla_tx_done(void *context, int pending);
81 static void qla_get_peer(qla_host_t *ha);
82 static void qla_error_recovery(void *context, int pending);
83 static void qla_async_event(void *context, int pending);
84 
85 /*
86  * Hooks to the Operating Systems
87  */
88 static int qla_pci_probe (device_t);
89 static int qla_pci_attach (device_t);
90 static int qla_pci_detach (device_t);
91 
92 static void qla_init(void *arg);
93 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
94 static int qla_media_change(struct ifnet *ifp);
95 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
96 static void qla_start(struct ifnet *ifp);
97 
98 static device_method_t qla_pci_methods[] = {
99 	/* Device interface */
100 	DEVMETHOD(device_probe, qla_pci_probe),
101 	DEVMETHOD(device_attach, qla_pci_attach),
102 	DEVMETHOD(device_detach, qla_pci_detach),
103 	{ 0, 0 }
104 };
105 
106 static driver_t qla_pci_driver = {
107 	"ql", qla_pci_methods, sizeof (qla_host_t),
108 };
109 
110 static devclass_t qla83xx_devclass;
111 
112 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
113 
114 MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
115 MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
116 
117 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
118 
119 #define QL_STD_REPLENISH_THRES		0
120 #define QL_JUMBO_REPLENISH_THRES	32
121 
122 
123 static char dev_str[64];
124 static char ver_str[64];
125 
126 /*
127  * Name:	qla_pci_probe
128  * Function:	Validate the PCI device to be a QLA80XX device
129  */
130 static int
131 qla_pci_probe(device_t dev)
132 {
133         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
134         case PCI_QLOGIC_ISP8030:
135 		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
136 			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
137 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
138 			QLA_VERSION_BUILD);
139 		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
140 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
141 			QLA_VERSION_BUILD);
142                 device_set_desc(dev, dev_str);
143                 break;
144         default:
145                 return (ENXIO);
146         }
147 
148         if (bootverbose)
149                 printf("%s: %s\n ", __func__, dev_str);
150 
151         return (BUS_PROBE_DEFAULT);
152 }
153 
154 static void
155 qla_add_sysctls(qla_host_t *ha)
156 {
157         device_t dev = ha->pci_dev;
158 
159 	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
160 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
161 		OID_AUTO, "version", CTLFLAG_RD,
162 		ver_str, 0, "Driver Version");
163 
164         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
165                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
166                 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
167                 (void *)ha, 0,
168                 qla_sysctl_get_stats, "I", "Statistics");
169 
170         SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
171                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
172                 OID_AUTO, "fw_version", CTLFLAG_RD,
173                 ha->fw_ver_str, 0, "firmware version");
174 
175         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
176                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
177                 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
178                 (void *)ha, 0,
179                 qla_sysctl_get_link_status, "I", "Link Status");
180 
181 	ha->dbg_level = 0;
182         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
183                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
184                 OID_AUTO, "debug", CTLFLAG_RW,
185                 &ha->dbg_level, ha->dbg_level, "Debug Level");
186 
187 	ha->std_replenish = QL_STD_REPLENISH_THRES;
188         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
189                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
190                 OID_AUTO, "std_replenish", CTLFLAG_RW,
191                 &ha->std_replenish, ha->std_replenish,
192                 "Threshold for Replenishing Standard Frames");
193 
194         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
195                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
196                 OID_AUTO, "ipv4_lro",
197                 CTLFLAG_RD, &ha->ipv4_lro,
198                 "number of ipv4 lro completions");
199 
200         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
201                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202                 OID_AUTO, "ipv6_lro",
203                 CTLFLAG_RD, &ha->ipv6_lro,
204                 "number of ipv6 lro completions");
205 
206 	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
207 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
208 		OID_AUTO, "tx_tso_frames",
209 		CTLFLAG_RD, &ha->tx_tso_frames,
210 		"number of Tx TSO Frames");
211 
212 	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
213                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
214 		OID_AUTO, "hw_vlan_tx_frames",
215 		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
216 		"number of Tx VLAN Frames");
217 
218         return;
219 }
220 
221 static void
222 qla_watchdog(void *arg)
223 {
224 	qla_host_t *ha = arg;
225 	qla_hw_t *hw;
226 	struct ifnet *ifp;
227 	uint32_t i;
228 	qla_hw_tx_cntxt_t *hw_tx_cntxt;
229 
230 	hw = &ha->hw;
231 	ifp = ha->ifp;
232 
233         if (ha->flags.qla_watchdog_exit) {
234 		ha->qla_watchdog_exited = 1;
235 		return;
236 	}
237 	ha->qla_watchdog_exited = 0;
238 
239 	if (!ha->flags.qla_watchdog_pause) {
240 		if (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
241 			(ha->msg_from_peer == QL_PEER_MSG_RESET)) {
242 			ha->qla_watchdog_paused = 1;
243 			ha->flags.qla_watchdog_pause = 1;
244 			ha->qla_initiate_recovery = 0;
245 			ha->err_inject = 0;
246 			taskqueue_enqueue(ha->err_tq, &ha->err_task);
247 		} else if (ha->flags.qla_interface_up) {
248 
249                         if (ha->async_event) {
250                                 ha->async_event = 0;
251                                 taskqueue_enqueue(ha->async_event_tq,
252                                         &ha->async_event_task);
253                         }
254 
255 			for (i = 0; i < ha->hw.num_tx_rings; i++) {
256 				hw_tx_cntxt = &hw->tx_cntxt[i];
257 				if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) !=
258 					hw_tx_cntxt->txr_comp) {
259 					taskqueue_enqueue(ha->tx_tq,
260 						&ha->tx_task);
261 					break;
262 				}
263 			}
264 
265 			if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
266 				taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
267 			}
268 			ha->qla_watchdog_paused = 0;
269 		} else {
270 			ha->qla_watchdog_paused = 0;
271 		}
272 	} else {
273 		ha->qla_watchdog_paused = 1;
274 	}
275 
276 	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
277 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
278 		qla_watchdog, ha);
279 }
280 
281 /*
282  * Name:	qla_pci_attach
283  * Function:	attaches the device to the operating system
284  */
285 static int
286 qla_pci_attach(device_t dev)
287 {
288 	qla_host_t *ha = NULL;
289 	uint32_t rsrc_len;
290 	int i;
291 	uint32_t num_rcvq = 0;
292 
293         if ((ha = device_get_softc(dev)) == NULL) {
294                 device_printf(dev, "cannot get softc\n");
295                 return (ENOMEM);
296         }
297 
298         memset(ha, 0, sizeof (qla_host_t));
299 
300         if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
301                 device_printf(dev, "device is not ISP8030\n");
302                 return (ENXIO);
303 	}
304 
305         ha->pci_func = pci_get_function(dev) & 0x1;
306 
307         ha->pci_dev = dev;
308 
309 	pci_enable_busmaster(dev);
310 
311 	ha->reg_rid = PCIR_BAR(0);
312 	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
313 				RF_ACTIVE);
314 
315         if (ha->pci_reg == NULL) {
316                 device_printf(dev, "unable to map any ports\n");
317                 goto qla_pci_attach_err;
318         }
319 
320 	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
321 					ha->reg_rid);
322 
323 	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_SPIN);
324 
325 	mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
326 
327 	qla_add_sysctls(ha);
328 	ql_hw_add_sysctls(ha);
329 
330 	ha->flags.lock_init = 1;
331 
332 	ha->reg_rid1 = PCIR_BAR(2);
333 	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
334 			&ha->reg_rid1, RF_ACTIVE);
335 
336 	ha->msix_count = pci_msix_count(dev);
337 
338 	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
339 		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
340 			ha->msix_count);
341 		goto qla_pci_attach_err;
342 	}
343 
344 	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
345 		" msix_count 0x%x pci_reg %p\n", __func__, ha,
346 		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
347 
348         /* initialize hardware */
349         if (ql_init_hw(ha)) {
350                 device_printf(dev, "%s: ql_init_hw failed\n", __func__);
351                 goto qla_pci_attach_err;
352         }
353 
354         device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
355                 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
356                 ha->fw_ver_build);
357         snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
358                         ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
359                         ha->fw_ver_build);
360 
361         if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
362                 device_printf(dev, "%s: qla_get_nic_partition failed\n",
363                         __func__);
364                 goto qla_pci_attach_err;
365         }
366         device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
367                 " msix_count 0x%x pci_reg %p num_rcvq = %d\n", __func__, ha,
368                 ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, num_rcvq);
369 
370 
371 #ifdef QL_ENABLE_ISCSI_TLV
372         if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
373                 ha->hw.num_sds_rings = 15;
374                 ha->hw.num_tx_rings = 32;
375         }
376 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
377 	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
378 
379 	ha->msix_count = ha->hw.num_sds_rings + 1;
380 
381 	if (pci_alloc_msix(dev, &ha->msix_count)) {
382 		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
383 			ha->msix_count);
384 		ha->msix_count = 0;
385 		goto qla_pci_attach_err;
386 	}
387 
388 	ha->mbx_irq_rid = 1;
389 	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
390 				&ha->mbx_irq_rid,
391 				(RF_ACTIVE | RF_SHAREABLE));
392 	if (ha->mbx_irq == NULL) {
393 		device_printf(dev, "could not allocate mbx interrupt\n");
394 		goto qla_pci_attach_err;
395 	}
396 	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
397 		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
398 		device_printf(dev, "could not setup mbx interrupt\n");
399 		goto qla_pci_attach_err;
400 	}
401 
402 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
403 		ha->irq_vec[i].sds_idx = i;
404                 ha->irq_vec[i].ha = ha;
405                 ha->irq_vec[i].irq_rid = 2 + i;
406 
407 		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
408 				&ha->irq_vec[i].irq_rid,
409 				(RF_ACTIVE | RF_SHAREABLE));
410 
411 		if (ha->irq_vec[i].irq == NULL) {
412 			device_printf(dev, "could not allocate interrupt\n");
413 			goto qla_pci_attach_err;
414 		}
415 		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
416 			(INTR_TYPE_NET | INTR_MPSAFE),
417 			NULL, ql_isr, &ha->irq_vec[i],
418 			&ha->irq_vec[i].handle)) {
419 			device_printf(dev, "could not setup interrupt\n");
420 			goto qla_pci_attach_err;
421 		}
422 	}
423 
424 	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
425 		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
426 
427 	ql_read_mac_addr(ha);
428 
429 	/* allocate parent dma tag */
430 	if (qla_alloc_parent_dma_tag(ha)) {
431 		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
432 			__func__);
433 		goto qla_pci_attach_err;
434 	}
435 
436 	/* alloc all dma buffers */
437 	if (ql_alloc_dma(ha)) {
438 		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
439 		goto qla_pci_attach_err;
440 	}
441 	qla_get_peer(ha);
442 
443 	if (ql_minidump_init(ha) != 0) {
444 		device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
445 		goto qla_pci_attach_err;
446 	}
447 	/* create the o.s ethernet interface */
448 	qla_init_ifnet(dev, ha);
449 
450 	ha->flags.qla_watchdog_active = 1;
451 	ha->flags.qla_watchdog_pause = 0;
452 
453 
454 	TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
455 	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
456 			taskqueue_thread_enqueue, &ha->tx_tq);
457 	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
458 		device_get_nameunit(ha->pci_dev));
459 
460 	callout_init(&ha->tx_callout, TRUE);
461 	ha->flags.qla_callout_init = 1;
462 
463 	/* create ioctl device interface */
464 	if (ql_make_cdev(ha)) {
465 		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
466 		goto qla_pci_attach_err;
467 	}
468 
469 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
470 		qla_watchdog, ha);
471 
472 	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
473 	ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
474 			taskqueue_thread_enqueue, &ha->err_tq);
475 	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
476 		device_get_nameunit(ha->pci_dev));
477 
478         TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
479         ha->async_event_tq = taskqueue_create_fast("qla_asyncq", M_NOWAIT,
480                         taskqueue_thread_enqueue, &ha->async_event_tq);
481         taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
482                 device_get_nameunit(ha->pci_dev));
483 
484 	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
485         return (0);
486 
487 qla_pci_attach_err:
488 
489 	qla_release(ha);
490 
491 	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
492         return (ENXIO);
493 }
494 
495 /*
496  * Name:	qla_pci_detach
497  * Function:	Unhooks the device from the operating system
498  */
499 static int
500 qla_pci_detach(device_t dev)
501 {
502 	qla_host_t *ha = NULL;
503 	struct ifnet *ifp;
504 
505 	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
506 
507         if ((ha = device_get_softc(dev)) == NULL) {
508                 device_printf(dev, "cannot get softc\n");
509                 return (ENOMEM);
510         }
511 
512 	ifp = ha->ifp;
513 
514 	(void)QLA_LOCK(ha, __func__, 0);
515 	qla_stop(ha);
516 	QLA_UNLOCK(ha, __func__);
517 
518 	qla_release(ha);
519 
520 	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
521 
522         return (0);
523 }
524 
525 /*
526  * SYSCTL Related Callbacks
527  */
528 static int
529 qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
530 {
531 	int err, ret = 0;
532 	qla_host_t *ha;
533 
534 	err = sysctl_handle_int(oidp, &ret, 0, req);
535 
536 	if (err || !req->newptr)
537 		return (err);
538 
539 	if (ret == 1) {
540 		ha = (qla_host_t *)arg1;
541 		ql_get_stats(ha);
542 	}
543 	return (err);
544 }
545 static int
546 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
547 {
548 	int err, ret = 0;
549 	qla_host_t *ha;
550 
551 	err = sysctl_handle_int(oidp, &ret, 0, req);
552 
553 	if (err || !req->newptr)
554 		return (err);
555 
556 	if (ret == 1) {
557 		ha = (qla_host_t *)arg1;
558 		ql_hw_link_status(ha);
559 	}
560 	return (err);
561 }
562 
563 /*
564  * Name:	qla_release
565  * Function:	Releases the resources allocated for the device
566  */
567 static void
568 qla_release(qla_host_t *ha)
569 {
570 	device_t dev;
571 	int i;
572 
573 	dev = ha->pci_dev;
574 
575         if (ha->async_event_tq) {
576                 taskqueue_drain(ha->async_event_tq, &ha->async_event_task);
577                 taskqueue_free(ha->async_event_tq);
578         }
579 
580 	if (ha->err_tq) {
581 		taskqueue_drain(ha->err_tq, &ha->err_task);
582 		taskqueue_free(ha->err_tq);
583 	}
584 
585 	if (ha->tx_tq) {
586 		taskqueue_drain(ha->tx_tq, &ha->tx_task);
587 		taskqueue_free(ha->tx_tq);
588 	}
589 
590 	ql_del_cdev(ha);
591 
592 	if (ha->flags.qla_watchdog_active) {
593 		ha->flags.qla_watchdog_exit = 1;
594 
595 		while (ha->qla_watchdog_exited == 0)
596 			qla_mdelay(__func__, 1);
597 	}
598 
599 	if (ha->flags.qla_callout_init)
600 		callout_stop(&ha->tx_callout);
601 
602 	if (ha->ifp != NULL)
603 		ether_ifdetach(ha->ifp);
604 
605 	ql_free_dma(ha);
606 	qla_free_parent_dma_tag(ha);
607 
608 	if (ha->mbx_handle)
609 		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
610 
611 	if (ha->mbx_irq)
612 		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
613 				ha->mbx_irq);
614 
615 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
616 
617 		if (ha->irq_vec[i].handle) {
618 			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
619 					ha->irq_vec[i].handle);
620 		}
621 
622 		if (ha->irq_vec[i].irq) {
623 			(void)bus_release_resource(dev, SYS_RES_IRQ,
624 				ha->irq_vec[i].irq_rid,
625 				ha->irq_vec[i].irq);
626 		}
627 	}
628 
629 	if (ha->msix_count)
630 		pci_release_msi(dev);
631 
632 	if (ha->flags.lock_init) {
633 		mtx_destroy(&ha->tx_lock);
634 		mtx_destroy(&ha->hw_lock);
635 	}
636 
637         if (ha->pci_reg)
638                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
639 				ha->pci_reg);
640 
641         if (ha->pci_reg1)
642                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
643 				ha->pci_reg1);
644 }
645 
646 /*
647  * DMA Related Functions
648  */
649 
650 static void
651 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
652 {
653         *((bus_addr_t *)arg) = 0;
654 
655         if (error) {
656                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
657                 return;
658 	}
659 
660         *((bus_addr_t *)arg) = segs[0].ds_addr;
661 
662 	return;
663 }
664 
665 int
666 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
667 {
668         int             ret = 0;
669         device_t        dev;
670         bus_addr_t      b_addr;
671 
672         dev = ha->pci_dev;
673 
674         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
675 
676         ret = bus_dma_tag_create(
677                         ha->parent_tag,/* parent */
678                         dma_buf->alignment,
679                         ((bus_size_t)(1ULL << 32)),/* boundary */
680                         BUS_SPACE_MAXADDR,      /* lowaddr */
681                         BUS_SPACE_MAXADDR,      /* highaddr */
682                         NULL, NULL,             /* filter, filterarg */
683                         dma_buf->size,          /* maxsize */
684                         1,                      /* nsegments */
685                         dma_buf->size,          /* maxsegsize */
686                         0,                      /* flags */
687                         NULL, NULL,             /* lockfunc, lockarg */
688                         &dma_buf->dma_tag);
689 
690         if (ret) {
691                 device_printf(dev, "%s: could not create dma tag\n", __func__);
692                 goto ql_alloc_dmabuf_exit;
693         }
694         ret = bus_dmamem_alloc(dma_buf->dma_tag,
695                         (void **)&dma_buf->dma_b,
696                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
697                         &dma_buf->dma_map);
698         if (ret) {
699                 bus_dma_tag_destroy(dma_buf->dma_tag);
700                 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
701                 goto ql_alloc_dmabuf_exit;
702         }
703 
704         ret = bus_dmamap_load(dma_buf->dma_tag,
705                         dma_buf->dma_map,
706                         dma_buf->dma_b,
707                         dma_buf->size,
708                         qla_dmamap_callback,
709                         &b_addr, BUS_DMA_NOWAIT);
710 
711         if (ret || !b_addr) {
712                 bus_dma_tag_destroy(dma_buf->dma_tag);
713                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
714                         dma_buf->dma_map);
715                 ret = -1;
716                 goto ql_alloc_dmabuf_exit;
717         }
718 
719         dma_buf->dma_addr = b_addr;
720 
721 ql_alloc_dmabuf_exit:
722         QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
723                 __func__, ret, (void *)dma_buf->dma_tag,
724                 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
725 		dma_buf->size));
726 
727         return ret;
728 }
729 
730 void
731 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
732 {
733         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
734         bus_dma_tag_destroy(dma_buf->dma_tag);
735 }
736 
737 static int
738 qla_alloc_parent_dma_tag(qla_host_t *ha)
739 {
740 	int		ret;
741 	device_t	dev;
742 
743 	dev = ha->pci_dev;
744 
745         /*
746          * Allocate parent DMA Tag
747          */
748         ret = bus_dma_tag_create(
749                         bus_get_dma_tag(dev),   /* parent */
750                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
751                         BUS_SPACE_MAXADDR,      /* lowaddr */
752                         BUS_SPACE_MAXADDR,      /* highaddr */
753                         NULL, NULL,             /* filter, filterarg */
754                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
755                         0,                      /* nsegments */
756                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
757                         0,                      /* flags */
758                         NULL, NULL,             /* lockfunc, lockarg */
759                         &ha->parent_tag);
760 
761         if (ret) {
762                 device_printf(dev, "%s: could not create parent dma tag\n",
763                         __func__);
764 		return (-1);
765         }
766 
767         ha->flags.parent_tag = 1;
768 
769 	return (0);
770 }
771 
772 static void
773 qla_free_parent_dma_tag(qla_host_t *ha)
774 {
775         if (ha->flags.parent_tag) {
776                 bus_dma_tag_destroy(ha->parent_tag);
777                 ha->flags.parent_tag = 0;
778         }
779 }
780 
781 /*
782  * Name: qla_init_ifnet
783  * Function: Creates the Network Device Interface and Registers it with the O.S
784  */
785 
786 static void
787 qla_init_ifnet(device_t dev, qla_host_t *ha)
788 {
789 	struct ifnet *ifp;
790 
791 	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
792 
793 	ifp = ha->ifp = if_alloc(IFT_ETHER);
794 
795 	if (ifp == NULL)
796 		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
797 
798 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
799 
800 	ifp->if_baudrate = IF_Gbps(10);
801 	ifp->if_capabilities = IFCAP_LINKSTATE;
802 	ifp->if_mtu = ETHERMTU;
803 
804 	ifp->if_init = qla_init;
805 	ifp->if_softc = ha;
806 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
807 	ifp->if_ioctl = qla_ioctl;
808 	ifp->if_start = qla_start;
809 
810 	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
811 	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
812 	IFQ_SET_READY(&ifp->if_snd);
813 
814 	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
815 
816 	ether_ifattach(ifp, qla_get_mac_addr(ha));
817 
818 	ifp->if_capabilities = IFCAP_HWCSUM |
819 				IFCAP_TSO4 |
820 				IFCAP_JUMBO_MTU;
821 
822 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
823 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
824 
825 	ifp->if_capenable = ifp->if_capabilities;
826 
827 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
828 
829 	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
830 
831 	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
832 		NULL);
833 	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
834 
835 	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
836 
837 	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
838 
839 	return;
840 }
841 
842 static void
843 qla_init_locked(qla_host_t *ha)
844 {
845 	struct ifnet *ifp = ha->ifp;
846 
847 	qla_stop(ha);
848 
849 	if (qla_alloc_xmt_bufs(ha) != 0)
850 		return;
851 
852 	qla_confirm_9kb_enable(ha);
853 
854 	if (qla_alloc_rcv_bufs(ha) != 0)
855 		return;
856 
857 	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
858 
859 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
860 
861 	ha->flags.stop_rcv = 0;
862  	if (ql_init_hw_if(ha) == 0) {
863 		ifp = ha->ifp;
864 		ifp->if_drv_flags |= IFF_DRV_RUNNING;
865 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
866 		ha->flags.qla_watchdog_pause = 0;
867 		ha->hw_vlan_tx_frames = 0;
868 		ha->tx_tso_frames = 0;
869 		ha->flags.qla_interface_up = 1;
870 	}
871 
872 	return;
873 }
874 
875 static void
876 qla_init(void *arg)
877 {
878 	qla_host_t *ha;
879 
880 	ha = (qla_host_t *)arg;
881 
882 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
883 
884 	(void)QLA_LOCK(ha, __func__, 0);
885 	qla_init_locked(ha);
886 	QLA_UNLOCK(ha, __func__);
887 
888 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
889 }
890 
891 static int
892 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
893 {
894 	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
895 	struct ifmultiaddr *ifma;
896 	int mcnt = 0;
897 	struct ifnet *ifp = ha->ifp;
898 	int ret = 0;
899 
900 	if_maddr_rlock(ifp);
901 
902 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
903 
904 		if (ifma->ifma_addr->sa_family != AF_LINK)
905 			continue;
906 
907 		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
908 			break;
909 
910 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
911 			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
912 
913 		mcnt++;
914 	}
915 
916 	if_maddr_runlock(ifp);
917 
918 	if (QLA_LOCK(ha, __func__, 1) == 0) {
919 		ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
920 		QLA_UNLOCK(ha, __func__);
921 	}
922 
923 	return (ret);
924 }
925 
926 static int
927 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
928 {
929 	int ret = 0;
930 	struct ifreq *ifr = (struct ifreq *)data;
931 	struct ifaddr *ifa = (struct ifaddr *)data;
932 	qla_host_t *ha;
933 
934 	ha = (qla_host_t *)ifp->if_softc;
935 
936 	switch (cmd) {
937 	case SIOCSIFADDR:
938 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
939 			__func__, cmd));
940 
941 		if (ifa->ifa_addr->sa_family == AF_INET) {
942 			ifp->if_flags |= IFF_UP;
943 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
944 				(void)QLA_LOCK(ha, __func__, 0);
945 				qla_init_locked(ha);
946 				QLA_UNLOCK(ha, __func__);
947 			}
948 			QL_DPRINT4(ha, (ha->pci_dev,
949 				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
950 				__func__, cmd,
951 				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
952 
953 			arp_ifinit(ifp, ifa);
954 		} else {
955 			ether_ioctl(ifp, cmd, data);
956 		}
957 		break;
958 
959 	case SIOCSIFMTU:
960 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
961 			__func__, cmd));
962 
963 		if (ifr->ifr_mtu > QLA_MAX_MTU) {
964 			ret = EINVAL;
965 		} else {
966 			(void) QLA_LOCK(ha, __func__, 0);
967 			ifp->if_mtu = ifr->ifr_mtu;
968 			ha->max_frame_size =
969 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
970 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
971 				ret = ql_set_max_mtu(ha, ha->max_frame_size,
972 					ha->hw.rcv_cntxt_id);
973 			}
974 
975 			if (ifp->if_mtu > ETHERMTU)
976 				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
977 			else
978 				ha->std_replenish = QL_STD_REPLENISH_THRES;
979 
980 
981 			QLA_UNLOCK(ha, __func__);
982 
983 			if (ret)
984 				ret = EINVAL;
985 		}
986 
987 		break;
988 
989 	case SIOCSIFFLAGS:
990 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
991 			__func__, cmd));
992 
993 		(void)QLA_LOCK(ha, __func__, 0);
994 
995 		if (ifp->if_flags & IFF_UP) {
996 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
997 				if ((ifp->if_flags ^ ha->if_flags) &
998 					IFF_PROMISC) {
999 					ret = ql_set_promisc(ha);
1000 				} else if ((ifp->if_flags ^ ha->if_flags) &
1001 					IFF_ALLMULTI) {
1002 					ret = ql_set_allmulti(ha);
1003 				}
1004 			} else {
1005 				qla_init_locked(ha);
1006 				ha->max_frame_size = ifp->if_mtu +
1007 					ETHER_HDR_LEN + ETHER_CRC_LEN;
1008 				ret = ql_set_max_mtu(ha, ha->max_frame_size,
1009 					ha->hw.rcv_cntxt_id);
1010 			}
1011 		} else {
1012 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1013 				qla_stop(ha);
1014 			ha->if_flags = ifp->if_flags;
1015 		}
1016 
1017 		QLA_UNLOCK(ha, __func__);
1018 		break;
1019 
1020 	case SIOCADDMULTI:
1021 		QL_DPRINT4(ha, (ha->pci_dev,
1022 			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1023 
1024 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1025 			if (qla_set_multi(ha, 1))
1026 				ret = EINVAL;
1027 		}
1028 		break;
1029 
1030 	case SIOCDELMULTI:
1031 		QL_DPRINT4(ha, (ha->pci_dev,
1032 			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1033 
1034 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1035 			if (qla_set_multi(ha, 0))
1036 				ret = EINVAL;
1037 		}
1038 		break;
1039 
1040 	case SIOCSIFMEDIA:
1041 	case SIOCGIFMEDIA:
1042 		QL_DPRINT4(ha, (ha->pci_dev,
1043 			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1044 			__func__, cmd));
1045 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1046 		break;
1047 
1048 	case SIOCSIFCAP:
1049 	{
1050 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1051 
1052 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1053 			__func__, cmd));
1054 
1055 		if (mask & IFCAP_HWCSUM)
1056 			ifp->if_capenable ^= IFCAP_HWCSUM;
1057 		if (mask & IFCAP_TSO4)
1058 			ifp->if_capenable ^= IFCAP_TSO4;
1059 		if (mask & IFCAP_VLAN_HWTAGGING)
1060 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1061 		if (mask & IFCAP_VLAN_HWTSO)
1062 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1063 
1064 		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1065 			qla_init(ha);
1066 
1067 		VLAN_CAPABILITIES(ifp);
1068 		break;
1069 	}
1070 
1071 	default:
1072 		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1073 			__func__, cmd));
1074 		ret = ether_ioctl(ifp, cmd, data);
1075 		break;
1076 	}
1077 
1078 	return (ret);
1079 }
1080 
1081 static int
1082 qla_media_change(struct ifnet *ifp)
1083 {
1084 	qla_host_t *ha;
1085 	struct ifmedia *ifm;
1086 	int ret = 0;
1087 
1088 	ha = (qla_host_t *)ifp->if_softc;
1089 
1090 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1091 
1092 	ifm = &ha->media;
1093 
1094 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1095 		ret = EINVAL;
1096 
1097 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1098 
1099 	return (ret);
1100 }
1101 
1102 static void
1103 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1104 {
1105 	qla_host_t *ha;
1106 
1107 	ha = (qla_host_t *)ifp->if_softc;
1108 
1109 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1110 
1111 	ifmr->ifm_status = IFM_AVALID;
1112 	ifmr->ifm_active = IFM_ETHER;
1113 
1114 	ql_update_link_state(ha);
1115 	if (ha->hw.link_up) {
1116 		ifmr->ifm_status |= IFM_ACTIVE;
1117 		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1118 	}
1119 
1120 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1121 		(ha->hw.link_up ? "link_up" : "link_down")));
1122 
1123 	return;
1124 }
1125 
1126 static void
1127 qla_start(struct ifnet *ifp)
1128 {
1129 	struct mbuf    *m_head;
1130 	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1131 
1132 	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1133 
1134 	if (!mtx_trylock(&ha->tx_lock)) {
1135 		QL_DPRINT8(ha, (ha->pci_dev,
1136 			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1137 		return;
1138 	}
1139 
1140 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1141 		IFF_DRV_RUNNING) {
1142 		QL_DPRINT8(ha,
1143 			(ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1144 		QLA_TX_UNLOCK(ha);
1145 		return;
1146 	}
1147 
1148 	if (!ha->hw.link_up || !ha->watchdog_ticks)
1149 		ql_update_link_state(ha);
1150 
1151 	if (!ha->hw.link_up) {
1152 		QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__));
1153 		QLA_TX_UNLOCK(ha);
1154 		return;
1155 	}
1156 
1157 	while (ifp->if_snd.ifq_head != NULL) {
1158 		IF_DEQUEUE(&ifp->if_snd, m_head);
1159 
1160 		if (m_head == NULL) {
1161 			QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n",
1162 				__func__));
1163 			break;
1164 		}
1165 
1166 		if (qla_send(ha, &m_head)) {
1167 			if (m_head == NULL)
1168 				break;
1169 			QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__));
1170 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1171 			IF_PREPEND(&ifp->if_snd, m_head);
1172 			break;
1173 		}
1174 		/* Send a copy of the frame to the BPF listener */
1175 		ETHER_BPF_MTAP(ifp, m_head);
1176 	}
1177 	QLA_TX_UNLOCK(ha);
1178 	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1179 	return;
1180 }
1181 
1182 static int
1183 qla_send(qla_host_t *ha, struct mbuf **m_headp)
1184 {
1185 	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1186 	bus_dmamap_t		map;
1187 	int			nsegs;
1188 	int			ret = -1;
1189 	uint32_t		tx_idx;
1190 	struct mbuf		*m_head = *m_headp;
1191 	uint32_t		txr_idx = ha->txr_idx;
1192 	uint32_t		iscsi_pdu = 0;
1193 
1194 	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1195 
1196 	/* check if flowid is set */
1197 
1198 	if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) {
1199 #ifdef QL_ENABLE_ISCSI_TLV
1200 		if (qla_iscsi_pdu(ha, m_head) == 0) {
1201 			iscsi_pdu = 1;
1202 			txr_idx = m_head->m_pkthdr.flowid &
1203 					((ha->hw.num_tx_rings >> 1) - 1);
1204 		} else {
1205 			txr_idx = m_head->m_pkthdr.flowid &
1206 					(ha->hw.num_tx_rings - 1);
1207 		}
1208 #else
1209 		txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1);
1210 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1211 	}
1212 
1213 
1214 	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1215 	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1216 
1217 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1218 			BUS_DMA_NOWAIT);
1219 
1220 	if (ret == EFBIG) {
1221 
1222 		struct mbuf *m;
1223 
1224 		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1225 			m_head->m_pkthdr.len));
1226 
1227 		m = m_defrag(m_head, M_NOWAIT);
1228 		if (m == NULL) {
1229 			ha->err_tx_defrag++;
1230 			m_freem(m_head);
1231 			*m_headp = NULL;
1232 			device_printf(ha->pci_dev,
1233 				"%s: m_defrag() = NULL [%d]\n",
1234 				__func__, ret);
1235 			return (ENOBUFS);
1236 		}
1237 		m_head = m;
1238 		*m_headp = m_head;
1239 
1240 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1241 					segs, &nsegs, BUS_DMA_NOWAIT))) {
1242 
1243 			ha->err_tx_dmamap_load++;
1244 
1245 			device_printf(ha->pci_dev,
1246 				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1247 				__func__, ret, m_head->m_pkthdr.len);
1248 
1249 			if (ret != ENOMEM) {
1250 				m_freem(m_head);
1251 				*m_headp = NULL;
1252 			}
1253 			return (ret);
1254 		}
1255 
1256 	} else if (ret) {
1257 
1258 		ha->err_tx_dmamap_load++;
1259 
1260 		device_printf(ha->pci_dev,
1261 			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1262 			__func__, ret, m_head->m_pkthdr.len);
1263 
1264 		if (ret != ENOMEM) {
1265 			m_freem(m_head);
1266 			*m_headp = NULL;
1267 		}
1268 		return (ret);
1269 	}
1270 
1271 	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1272 
1273 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1274 
1275         if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1276 				iscsi_pdu))) {
1277 		ha->tx_ring[txr_idx].count++;
1278 		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1279 	} else {
1280 		if (ret == EINVAL) {
1281 			if (m_head)
1282 				m_freem(m_head);
1283 			*m_headp = NULL;
1284 		}
1285 	}
1286 
1287 	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1288 	return (ret);
1289 }
1290 
1291 static void
1292 qla_stop(qla_host_t *ha)
1293 {
1294 	struct ifnet *ifp = ha->ifp;
1295 	device_t	dev;
1296 
1297 	dev = ha->pci_dev;
1298 
1299 	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1300 	QLA_TX_LOCK(ha); QLA_TX_UNLOCK(ha);
1301 
1302 	ha->flags.qla_watchdog_pause = 1;
1303 
1304 	while (!ha->qla_watchdog_paused)
1305 		qla_mdelay(__func__, 1);
1306 
1307 	ha->flags.qla_interface_up = 0;
1308 
1309 	ql_hw_stop_rcv(ha);
1310 
1311 	ql_del_hw_if(ha);
1312 
1313 	qla_free_xmt_bufs(ha);
1314 	qla_free_rcv_bufs(ha);
1315 
1316 	return;
1317 }
1318 
1319 /*
1320  * Buffer Management Functions for Transmit and Receive Rings
1321  */
1322 static int
1323 qla_alloc_xmt_bufs(qla_host_t *ha)
1324 {
1325 	int ret = 0;
1326 	uint32_t i, j;
1327 	qla_tx_buf_t *txb;
1328 
1329 	if (bus_dma_tag_create(NULL,    /* parent */
1330 		1, 0,    /* alignment, bounds */
1331 		BUS_SPACE_MAXADDR,       /* lowaddr */
1332 		BUS_SPACE_MAXADDR,       /* highaddr */
1333 		NULL, NULL,      /* filter, filterarg */
1334 		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1335 		QLA_MAX_SEGMENTS,        /* nsegments */
1336 		PAGE_SIZE,        /* maxsegsize */
1337 		BUS_DMA_ALLOCNOW,        /* flags */
1338 		NULL,    /* lockfunc */
1339 		NULL,    /* lockfuncarg */
1340 		&ha->tx_tag)) {
1341 		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1342 			__func__);
1343 		return (ENOMEM);
1344 	}
1345 
1346 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1347 		bzero((void *)ha->tx_ring[i].tx_buf,
1348 			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1349 	}
1350 
1351 	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1352 		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1353 
1354 			txb = &ha->tx_ring[j].tx_buf[i];
1355 
1356 			if ((ret = bus_dmamap_create(ha->tx_tag,
1357 					BUS_DMA_NOWAIT, &txb->map))) {
1358 
1359 				ha->err_tx_dmamap_create++;
1360 				device_printf(ha->pci_dev,
1361 					"%s: bus_dmamap_create failed[%d]\n",
1362 					__func__, ret);
1363 
1364 				qla_free_xmt_bufs(ha);
1365 
1366 				return (ret);
1367 			}
1368 		}
1369 	}
1370 
1371 	return 0;
1372 }
1373 
1374 /*
1375  * Release mbuf after it sent on the wire
1376  */
1377 static void
1378 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1379 {
1380 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1381 
1382 	if (txb->m_head && txb->map) {
1383 
1384 		bus_dmamap_unload(ha->tx_tag, txb->map);
1385 
1386 		m_freem(txb->m_head);
1387 		txb->m_head = NULL;
1388 	}
1389 
1390 	if (txb->map)
1391 		bus_dmamap_destroy(ha->tx_tag, txb->map);
1392 
1393 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1394 }
1395 
1396 static void
1397 qla_free_xmt_bufs(qla_host_t *ha)
1398 {
1399 	int		i, j;
1400 
1401 	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1402 		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1403 			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1404 	}
1405 
1406 	if (ha->tx_tag != NULL) {
1407 		bus_dma_tag_destroy(ha->tx_tag);
1408 		ha->tx_tag = NULL;
1409 	}
1410 
1411 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1412 		bzero((void *)ha->tx_ring[i].tx_buf,
1413 			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1414 	}
1415 	return;
1416 }
1417 
1418 
1419 static int
1420 qla_alloc_rcv_std(qla_host_t *ha)
1421 {
1422 	int		i, j, k, r, ret = 0;
1423 	qla_rx_buf_t	*rxb;
1424 	qla_rx_ring_t	*rx_ring;
1425 
1426 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1427 
1428 		rx_ring = &ha->rx_ring[r];
1429 
1430 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1431 
1432 			rxb = &rx_ring->rx_buf[i];
1433 
1434 			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1435 					&rxb->map);
1436 
1437 			if (ret) {
1438 				device_printf(ha->pci_dev,
1439 					"%s: dmamap[%d, %d] failed\n",
1440 					__func__, r, i);
1441 
1442 				for (k = 0; k < r; k++) {
1443 					for (j = 0; j < NUM_RX_DESCRIPTORS;
1444 						j++) {
1445 						rxb = &ha->rx_ring[k].rx_buf[j];
1446 						bus_dmamap_destroy(ha->rx_tag,
1447 							rxb->map);
1448 					}
1449 				}
1450 
1451 				for (j = 0; j < i; j++) {
1452 					bus_dmamap_destroy(ha->rx_tag,
1453 						rx_ring->rx_buf[j].map);
1454 				}
1455 				goto qla_alloc_rcv_std_err;
1456 			}
1457 		}
1458 	}
1459 
1460 	qla_init_hw_rcv_descriptors(ha);
1461 
1462 
1463 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1464 
1465 		rx_ring = &ha->rx_ring[r];
1466 
1467 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1468 			rxb = &rx_ring->rx_buf[i];
1469 			rxb->handle = i;
1470 			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1471 				/*
1472 			 	 * set the physical address in the
1473 				 * corresponding descriptor entry in the
1474 				 * receive ring/queue for the hba
1475 				 */
1476 				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1477 					rxb->paddr,
1478 					(rxb->m_head)->m_pkthdr.len);
1479 			} else {
1480 				device_printf(ha->pci_dev,
1481 					"%s: ql_get_mbuf [%d, %d] failed\n",
1482 					__func__, r, i);
1483 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1484 				goto qla_alloc_rcv_std_err;
1485 			}
1486 		}
1487 	}
1488 	return 0;
1489 
1490 qla_alloc_rcv_std_err:
1491 	return (-1);
1492 }
1493 
1494 static void
1495 qla_free_rcv_std(qla_host_t *ha)
1496 {
1497 	int		i, r;
1498 	qla_rx_buf_t	*rxb;
1499 
1500 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1501 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1502 			rxb = &ha->rx_ring[r].rx_buf[i];
1503 			if (rxb->m_head != NULL) {
1504 				bus_dmamap_unload(ha->rx_tag, rxb->map);
1505 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1506 				m_freem(rxb->m_head);
1507 				rxb->m_head = NULL;
1508 			}
1509 		}
1510 	}
1511 	return;
1512 }
1513 
1514 static int
1515 qla_alloc_rcv_bufs(qla_host_t *ha)
1516 {
1517 	int		i, ret = 0;
1518 
1519 	if (bus_dma_tag_create(NULL,    /* parent */
1520 			1, 0,    /* alignment, bounds */
1521 			BUS_SPACE_MAXADDR,       /* lowaddr */
1522 			BUS_SPACE_MAXADDR,       /* highaddr */
1523 			NULL, NULL,      /* filter, filterarg */
1524 			MJUM9BYTES,     /* maxsize */
1525 			1,        /* nsegments */
1526 			MJUM9BYTES,        /* maxsegsize */
1527 			BUS_DMA_ALLOCNOW,        /* flags */
1528 			NULL,    /* lockfunc */
1529 			NULL,    /* lockfuncarg */
1530 			&ha->rx_tag)) {
1531 
1532 		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1533 			__func__);
1534 
1535 		return (ENOMEM);
1536 	}
1537 
1538 	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1539 
1540 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1541 		ha->hw.sds[i].sdsr_next = 0;
1542 		ha->hw.sds[i].rxb_free = NULL;
1543 		ha->hw.sds[i].rx_free = 0;
1544 	}
1545 
1546 	ret = qla_alloc_rcv_std(ha);
1547 
1548 	return (ret);
1549 }
1550 
1551 static void
1552 qla_free_rcv_bufs(qla_host_t *ha)
1553 {
1554 	int		i;
1555 
1556 	qla_free_rcv_std(ha);
1557 
1558 	if (ha->rx_tag != NULL) {
1559 		bus_dma_tag_destroy(ha->rx_tag);
1560 		ha->rx_tag = NULL;
1561 	}
1562 
1563 	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1564 
1565 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1566 		ha->hw.sds[i].sdsr_next = 0;
1567 		ha->hw.sds[i].rxb_free = NULL;
1568 		ha->hw.sds[i].rx_free = 0;
1569 	}
1570 
1571 	return;
1572 }
1573 
1574 int
1575 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1576 {
1577 	register struct mbuf *mp = nmp;
1578 	struct ifnet   		*ifp;
1579 	int            		ret = 0;
1580 	uint32_t		offset;
1581 	bus_dma_segment_t	segs[1];
1582 	int			nsegs, mbuf_size;
1583 
1584 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1585 
1586 	ifp = ha->ifp;
1587 
1588         if (ha->hw.enable_9kb)
1589                 mbuf_size = MJUM9BYTES;
1590         else
1591                 mbuf_size = MCLBYTES;
1592 
1593 	if (mp == NULL) {
1594 
1595 		if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
1596 			return(-1);
1597 
1598                 if (ha->hw.enable_9kb)
1599                         mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
1600                 else
1601                         mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1602 
1603 		if (mp == NULL) {
1604 			ha->err_m_getcl++;
1605 			ret = ENOBUFS;
1606 			device_printf(ha->pci_dev,
1607 					"%s: m_getcl failed\n", __func__);
1608 			goto exit_ql_get_mbuf;
1609 		}
1610 		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1611 	} else {
1612 		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1613 		mp->m_data = mp->m_ext.ext_buf;
1614 		mp->m_next = NULL;
1615 	}
1616 
1617 	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1618 	if (offset) {
1619 		offset = 8 - offset;
1620 		m_adj(mp, offset);
1621 	}
1622 
1623 	/*
1624 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1625 	 * machinery to arrange the memory mapping.
1626 	 */
1627 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1628 			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1629 	rxb->paddr = segs[0].ds_addr;
1630 
1631 	if (ret || !rxb->paddr || (nsegs != 1)) {
1632 		m_free(mp);
1633 		rxb->m_head = NULL;
1634 		device_printf(ha->pci_dev,
1635 			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1636 			__func__, ret, (long long unsigned int)rxb->paddr,
1637 			nsegs);
1638                 ret = -1;
1639 		goto exit_ql_get_mbuf;
1640 	}
1641 	rxb->m_head = mp;
1642 	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1643 
1644 exit_ql_get_mbuf:
1645 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1646 	return (ret);
1647 }
1648 
1649 static void
1650 qla_tx_done(void *context, int pending)
1651 {
1652 	qla_host_t *ha = context;
1653 	struct ifnet   *ifp;
1654 
1655 	ifp = ha->ifp;
1656 
1657 	if (!ifp)
1658 		return;
1659 
1660 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1661 		QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1662 		return;
1663 	}
1664 	ql_hw_tx_done(ha);
1665 
1666 	qla_start(ha->ifp);
1667 }
1668 
1669 static void
1670 qla_get_peer(qla_host_t *ha)
1671 {
1672 	device_t *peers;
1673 	int count, i, slot;
1674 	int my_slot = pci_get_slot(ha->pci_dev);
1675 
1676 	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
1677 		return;
1678 
1679 	for (i = 0; i < count; i++) {
1680 		slot = pci_get_slot(peers[i]);
1681 
1682 		if ((slot >= 0) && (slot == my_slot) &&
1683 			(pci_get_device(peers[i]) ==
1684 				pci_get_device(ha->pci_dev))) {
1685 			if (ha->pci_dev != peers[i])
1686 				ha->peer_dev = peers[i];
1687 		}
1688 	}
1689 }
1690 
1691 static void
1692 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
1693 {
1694 	qla_host_t *ha_peer;
1695 
1696 	if (ha->peer_dev) {
1697         	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
1698 
1699 			ha_peer->msg_from_peer = msg_to_peer;
1700 		}
1701 	}
1702 }
1703 
1704 static void
1705 qla_error_recovery(void *context, int pending)
1706 {
1707 	qla_host_t *ha = context;
1708 	uint32_t msecs_100 = 100;
1709 	struct ifnet *ifp = ha->ifp;
1710 
1711         (void)QLA_LOCK(ha, __func__, 0);
1712 
1713 	if (ha->flags.qla_interface_up) {
1714 
1715 	ha->hw.imd_compl = 1;
1716 	qla_mdelay(__func__, 300);
1717 
1718         ql_hw_stop_rcv(ha);
1719 
1720         ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1721 		QLA_TX_LOCK(ha); QLA_TX_UNLOCK(ha);
1722 	}
1723 
1724         QLA_UNLOCK(ha, __func__);
1725 
1726 	if ((ha->pci_func & 0x1) == 0) {
1727 
1728 		if (!ha->msg_from_peer) {
1729 			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1730 
1731 			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
1732 				msecs_100--)
1733 				qla_mdelay(__func__, 100);
1734 		}
1735 
1736 		ha->msg_from_peer = 0;
1737 
1738         	(void)QLA_LOCK(ha, __func__, 0);
1739 		ql_minidump(ha);
1740         	QLA_UNLOCK(ha, __func__);
1741 
1742 		(void) ql_init_hw(ha);
1743 
1744         	(void)QLA_LOCK(ha, __func__, 0);
1745 		if (ha->flags.qla_interface_up) {
1746         	qla_free_xmt_bufs(ha);
1747 	        qla_free_rcv_bufs(ha);
1748 		}
1749         	QLA_UNLOCK(ha, __func__);
1750 
1751 		qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1752 
1753 	} else {
1754 		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
1755 
1756 			ha->msg_from_peer = 0;
1757 
1758 			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1759 		} else {
1760 			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1761 		}
1762 
1763 		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
1764 			qla_mdelay(__func__, 100);
1765 		ha->msg_from_peer = 0;
1766 
1767 		(void) ql_init_hw(ha);
1768 
1769         	(void)QLA_LOCK(ha, __func__, 0);
1770 		if (ha->flags.qla_interface_up) {
1771         	qla_free_xmt_bufs(ha);
1772 	        qla_free_rcv_bufs(ha);
1773 	}
1774         	QLA_UNLOCK(ha, __func__);
1775 	}
1776 
1777         (void)QLA_LOCK(ha, __func__, 0);
1778 
1779 	if (ha->flags.qla_interface_up) {
1780 	if (qla_alloc_xmt_bufs(ha) != 0) {
1781         	QLA_UNLOCK(ha, __func__);
1782                 return;
1783 	}
1784 	qla_confirm_9kb_enable(ha);
1785 
1786         if (qla_alloc_rcv_bufs(ha) != 0) {
1787         	QLA_UNLOCK(ha, __func__);
1788                 return;
1789 	}
1790 
1791         ha->flags.stop_rcv = 0;
1792         if (ql_init_hw_if(ha) == 0) {
1793                 ifp = ha->ifp;
1794                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1795                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1796                 ha->flags.qla_watchdog_pause = 0;
1797         }
1798 	} else
1799 		ha->flags.qla_watchdog_pause = 0;
1800 
1801         QLA_UNLOCK(ha, __func__);
1802 }
1803 
1804 static void
1805 qla_async_event(void *context, int pending)
1806 {
1807         qla_host_t *ha = context;
1808 
1809         (void)QLA_LOCK(ha, __func__, 0);
1810         qla_hw_async_event(ha);
1811         QLA_UNLOCK(ha, __func__);
1812 }
1813 
1814