xref: /freebsd/sys/dev/qlxgbe/ql_os.c (revision 445ed7b40948c160f2f7d363d2d0ae1ffac4aabd)
1 /*
2  * Copyright (c) 2013-2014 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: ql_os.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include <sys/smp.h>
45 
46 /*
47  * Some PCI Configuration Space Related Defines
48  */
49 
50 #ifndef PCI_VENDOR_QLOGIC
51 #define PCI_VENDOR_QLOGIC	0x1077
52 #endif
53 
54 #ifndef PCI_PRODUCT_QLOGIC_ISP8030
55 #define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
56 #endif
57 
58 #define PCI_QLOGIC_ISP8030 \
59 	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60 
61 /*
62  * static functions
63  */
64 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
65 static void qla_free_parent_dma_tag(qla_host_t *ha);
66 static int qla_alloc_xmt_bufs(qla_host_t *ha);
67 static void qla_free_xmt_bufs(qla_host_t *ha);
68 static int qla_alloc_rcv_bufs(qla_host_t *ha);
69 static void qla_free_rcv_bufs(qla_host_t *ha);
70 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71 
72 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73 static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
74 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
75 static void qla_release(qla_host_t *ha);
76 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
77 		int error);
78 static void qla_stop(qla_host_t *ha);
79 static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
80 static void qla_tx_done(void *context, int pending);
81 static void qla_get_peer(qla_host_t *ha);
82 static void qla_error_recovery(void *context, int pending);
83 
84 /*
85  * Hooks to the Operating Systems
86  */
87 static int qla_pci_probe (device_t);
88 static int qla_pci_attach (device_t);
89 static int qla_pci_detach (device_t);
90 
91 static void qla_init(void *arg);
92 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
93 static int qla_media_change(struct ifnet *ifp);
94 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
95 static void qla_start(struct ifnet *ifp);
96 
97 static device_method_t qla_pci_methods[] = {
98 	/* Device interface */
99 	DEVMETHOD(device_probe, qla_pci_probe),
100 	DEVMETHOD(device_attach, qla_pci_attach),
101 	DEVMETHOD(device_detach, qla_pci_detach),
102 	{ 0, 0 }
103 };
104 
105 static driver_t qla_pci_driver = {
106 	"ql", qla_pci_methods, sizeof (qla_host_t),
107 };
108 
109 static devclass_t qla83xx_devclass;
110 
111 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
112 
113 MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
114 MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
115 
116 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
117 
118 #define QL_STD_REPLENISH_THRES		0
119 #define QL_JUMBO_REPLENISH_THRES	32
120 
121 
122 static char dev_str[64];
123 
124 /*
125  * Name:	qla_pci_probe
126  * Function:	Validate the PCI device to be a QLA80XX device
127  */
128 static int
129 qla_pci_probe(device_t dev)
130 {
131         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
132         case PCI_QLOGIC_ISP8030:
133 		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
134 			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
135 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
136 			QLA_VERSION_BUILD);
137                 device_set_desc(dev, dev_str);
138                 break;
139         default:
140                 return (ENXIO);
141         }
142 
143         if (bootverbose)
144                 printf("%s: %s\n ", __func__, dev_str);
145 
146         return (BUS_PROBE_DEFAULT);
147 }
148 
149 static void
150 qla_add_sysctls(qla_host_t *ha)
151 {
152         device_t dev = ha->pci_dev;
153 
154         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
155                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
156                 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
157                 (void *)ha, 0,
158                 qla_sysctl_get_stats, "I", "Statistics");
159 
160         SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
161                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
162                 OID_AUTO, "fw_version", CTLFLAG_RD,
163                 ha->fw_ver_str, 0, "firmware version");
164 
165         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
166                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
167                 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
168                 (void *)ha, 0,
169                 qla_sysctl_get_link_status, "I", "Link Status");
170 
171 	ha->dbg_level = 0;
172         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
173                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
174                 OID_AUTO, "debug", CTLFLAG_RW,
175                 &ha->dbg_level, ha->dbg_level, "Debug Level");
176 
177 	ha->std_replenish = QL_STD_REPLENISH_THRES;
178         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
179                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
180                 OID_AUTO, "std_replenish", CTLFLAG_RW,
181                 &ha->std_replenish, ha->std_replenish,
182                 "Threshold for Replenishing Standard Frames");
183 
184         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
185                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
186                 OID_AUTO, "ipv4_lro",
187                 CTLFLAG_RD, &ha->ipv4_lro,
188                 "number of ipv4 lro completions");
189 
190         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
191                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
192                 OID_AUTO, "ipv6_lro",
193                 CTLFLAG_RD, &ha->ipv6_lro,
194                 "number of ipv6 lro completions");
195 
196 	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
197 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
198 		OID_AUTO, "tx_tso_frames",
199 		CTLFLAG_RD, &ha->tx_tso_frames,
200 		"number of Tx TSO Frames");
201 
202 	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
203                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
204 		OID_AUTO, "hw_vlan_tx_frames",
205 		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
206 		"number of Tx VLAN Frames");
207 
208         return;
209 }
210 
211 static void
212 qla_watchdog(void *arg)
213 {
214 	qla_host_t *ha = arg;
215 	qla_hw_t *hw;
216 	struct ifnet *ifp;
217 	uint32_t i;
218 	qla_hw_tx_cntxt_t *hw_tx_cntxt;
219 
220 	hw = &ha->hw;
221 	ifp = ha->ifp;
222 
223         if (ha->flags.qla_watchdog_exit) {
224 		ha->qla_watchdog_exited = 1;
225 		return;
226 	}
227 	ha->qla_watchdog_exited = 0;
228 
229 	if (!ha->flags.qla_watchdog_pause) {
230 		if (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
231 			(ha->msg_from_peer == QL_PEER_MSG_RESET)) {
232 			ha->qla_watchdog_paused = 1;
233 			ha->flags.qla_watchdog_pause = 1;
234 			ha->qla_initiate_recovery = 0;
235 			ha->err_inject = 0;
236 			taskqueue_enqueue(ha->err_tq, &ha->err_task);
237 		} else {
238 			for (i = 0; i < ha->hw.num_tx_rings; i++) {
239 				hw_tx_cntxt = &hw->tx_cntxt[i];
240 				if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) !=
241 					hw_tx_cntxt->txr_comp) {
242 					taskqueue_enqueue(ha->tx_tq,
243 						&ha->tx_task);
244 					break;
245 				}
246 			}
247 
248 			if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
249 				taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
250 			}
251 			ha->qla_watchdog_paused = 0;
252 		}
253 
254 	} else {
255 		ha->qla_watchdog_paused = 1;
256 	}
257 
258 	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
259 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
260 		qla_watchdog, ha);
261 }
262 
263 /*
264  * Name:	qla_pci_attach
265  * Function:	attaches the device to the operating system
266  */
267 static int
268 qla_pci_attach(device_t dev)
269 {
270 	qla_host_t *ha = NULL;
271 	uint32_t rsrc_len;
272 	int i;
273 
274 	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
275 
276         if ((ha = device_get_softc(dev)) == NULL) {
277                 device_printf(dev, "cannot get softc\n");
278                 return (ENOMEM);
279         }
280 
281         memset(ha, 0, sizeof (qla_host_t));
282 
283         if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
284                 device_printf(dev, "device is not ISP8030\n");
285                 return (ENXIO);
286 	}
287 
288         ha->pci_func = pci_get_function(dev);
289 
290         ha->pci_dev = dev;
291 
292 	pci_enable_busmaster(dev);
293 
294 	ha->reg_rid = PCIR_BAR(0);
295 	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
296 				RF_ACTIVE);
297 
298         if (ha->pci_reg == NULL) {
299                 device_printf(dev, "unable to map any ports\n");
300                 goto qla_pci_attach_err;
301         }
302 
303 	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
304 					ha->reg_rid);
305 
306 	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
307 
308 	mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
309 
310 	qla_add_sysctls(ha);
311 	ql_hw_add_sysctls(ha);
312 
313 	ha->flags.lock_init = 1;
314 
315 	ha->reg_rid1 = PCIR_BAR(2);
316 	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
317 			&ha->reg_rid1, RF_ACTIVE);
318 
319 	ha->msix_count = pci_msix_count(dev);
320 
321 	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
322 		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
323 			ha->msix_count);
324 		goto qla_pci_attach_err;
325 	}
326 
327 	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
328 		" msix_count 0x%x pci_reg %p\n", __func__, ha,
329 		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
330 
331 	ha->msix_count = ha->hw.num_sds_rings + 1;
332 
333 	if (pci_alloc_msix(dev, &ha->msix_count)) {
334 		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
335 			ha->msix_count);
336 		ha->msix_count = 0;
337 		goto qla_pci_attach_err;
338 	}
339 
340 	ha->mbx_irq_rid = 1;
341 	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
342 				&ha->mbx_irq_rid,
343 				(RF_ACTIVE | RF_SHAREABLE));
344 	if (ha->mbx_irq == NULL) {
345 		device_printf(dev, "could not allocate mbx interrupt\n");
346 		goto qla_pci_attach_err;
347 	}
348 	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
349 		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
350 		device_printf(dev, "could not setup mbx interrupt\n");
351 		goto qla_pci_attach_err;
352 	}
353 
354 
355 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
356 		ha->irq_vec[i].sds_idx = i;
357                 ha->irq_vec[i].ha = ha;
358                 ha->irq_vec[i].irq_rid = 2 + i;
359 
360 		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
361 				&ha->irq_vec[i].irq_rid,
362 				(RF_ACTIVE | RF_SHAREABLE));
363 
364 		if (ha->irq_vec[i].irq == NULL) {
365 			device_printf(dev, "could not allocate interrupt\n");
366 			goto qla_pci_attach_err;
367 		}
368 		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
369 			(INTR_TYPE_NET | INTR_MPSAFE),
370 			NULL, ql_isr, &ha->irq_vec[i],
371 			&ha->irq_vec[i].handle)) {
372 			device_printf(dev, "could not setup interrupt\n");
373 			goto qla_pci_attach_err;
374 		}
375 	}
376 
377 	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
378 		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
379 
380 	/* initialize hardware */
381 	if (ql_init_hw(ha)) {
382 		device_printf(dev, "%s: ql_init_hw failed\n", __func__);
383 		goto qla_pci_attach_err;
384 	}
385 
386 	device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
387 		ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
388 		ha->fw_ver_build);
389         snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
390                         ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
391                         ha->fw_ver_build);
392 
393 	ql_read_mac_addr(ha);
394 
395 	/* allocate parent dma tag */
396 	if (qla_alloc_parent_dma_tag(ha)) {
397 		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
398 			__func__);
399 		goto qla_pci_attach_err;
400 	}
401 
402 	/* alloc all dma buffers */
403 	if (ql_alloc_dma(ha)) {
404 		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
405 		goto qla_pci_attach_err;
406 	}
407 	qla_get_peer(ha);
408 
409 	/* create the o.s ethernet interface */
410 	qla_init_ifnet(dev, ha);
411 
412 	ha->flags.qla_watchdog_active = 1;
413 	ha->flags.qla_watchdog_pause = 1;
414 
415 
416 	TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
417 	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
418 			taskqueue_thread_enqueue, &ha->tx_tq);
419 	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
420 		device_get_nameunit(ha->pci_dev));
421 
422 	callout_init(&ha->tx_callout, TRUE);
423 	ha->flags.qla_callout_init = 1;
424 
425 	/* create ioctl device interface */
426 	if (ql_make_cdev(ha)) {
427 		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
428 		goto qla_pci_attach_err;
429 	}
430 
431 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
432 		qla_watchdog, ha);
433 
434 	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
435 	ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
436 			taskqueue_thread_enqueue, &ha->err_tq);
437 	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
438 		device_get_nameunit(ha->pci_dev));
439 
440 	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
441         return (0);
442 
443 qla_pci_attach_err:
444 
445 	qla_release(ha);
446 
447 	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
448         return (ENXIO);
449 }
450 
451 /*
452  * Name:	qla_pci_detach
453  * Function:	Unhooks the device from the operating system
454  */
455 static int
456 qla_pci_detach(device_t dev)
457 {
458 	qla_host_t *ha = NULL;
459 	struct ifnet *ifp;
460 
461 	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
462 
463         if ((ha = device_get_softc(dev)) == NULL) {
464                 device_printf(dev, "cannot get softc\n");
465                 return (ENOMEM);
466         }
467 
468 	ifp = ha->ifp;
469 
470 	(void)QLA_LOCK(ha, __func__, 0);
471 	qla_stop(ha);
472 	QLA_UNLOCK(ha, __func__);
473 
474 	qla_release(ha);
475 
476 	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
477 
478         return (0);
479 }
480 
481 /*
482  * SYSCTL Related Callbacks
483  */
484 static int
485 qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
486 {
487 	int err, ret = 0;
488 	qla_host_t *ha;
489 
490 	err = sysctl_handle_int(oidp, &ret, 0, req);
491 
492 	if (err || !req->newptr)
493 		return (err);
494 
495 	if (ret == 1) {
496 		ha = (qla_host_t *)arg1;
497 		ql_get_stats(ha);
498 	}
499 	return (err);
500 }
501 static int
502 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
503 {
504 	int err, ret = 0;
505 	qla_host_t *ha;
506 
507 	err = sysctl_handle_int(oidp, &ret, 0, req);
508 
509 	if (err || !req->newptr)
510 		return (err);
511 
512 	if (ret == 1) {
513 		ha = (qla_host_t *)arg1;
514 		ql_hw_link_status(ha);
515 	}
516 	return (err);
517 }
518 
519 /*
520  * Name:	qla_release
521  * Function:	Releases the resources allocated for the device
522  */
523 static void
524 qla_release(qla_host_t *ha)
525 {
526 	device_t dev;
527 	int i;
528 
529 	dev = ha->pci_dev;
530 
531 	if (ha->err_tq) {
532 		taskqueue_drain(ha->err_tq, &ha->err_task);
533 		taskqueue_free(ha->err_tq);
534 	}
535 
536 	if (ha->tx_tq) {
537 		taskqueue_drain(ha->tx_tq, &ha->tx_task);
538 		taskqueue_free(ha->tx_tq);
539 	}
540 
541 	ql_del_cdev(ha);
542 
543 	if (ha->flags.qla_watchdog_active) {
544 		ha->flags.qla_watchdog_exit = 1;
545 
546 		while (ha->qla_watchdog_exited == 0)
547 			qla_mdelay(__func__, 1);
548 	}
549 
550 	if (ha->flags.qla_callout_init)
551 		callout_stop(&ha->tx_callout);
552 
553 	if (ha->ifp != NULL)
554 		ether_ifdetach(ha->ifp);
555 
556 	ql_free_dma(ha);
557 	qla_free_parent_dma_tag(ha);
558 
559 	if (ha->mbx_handle)
560 		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
561 
562 	if (ha->mbx_irq)
563 		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
564 				ha->mbx_irq);
565 
566 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
567 
568 		if (ha->irq_vec[i].handle) {
569 			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
570 					ha->irq_vec[i].handle);
571 		}
572 
573 		if (ha->irq_vec[i].irq) {
574 			(void)bus_release_resource(dev, SYS_RES_IRQ,
575 				ha->irq_vec[i].irq_rid,
576 				ha->irq_vec[i].irq);
577 		}
578 	}
579 
580 	if (ha->msix_count)
581 		pci_release_msi(dev);
582 
583 	if (ha->flags.lock_init) {
584 		mtx_destroy(&ha->tx_lock);
585 		mtx_destroy(&ha->hw_lock);
586 	}
587 
588         if (ha->pci_reg)
589                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
590 				ha->pci_reg);
591 
592         if (ha->pci_reg1)
593                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
594 				ha->pci_reg1);
595 }
596 
597 /*
598  * DMA Related Functions
599  */
600 
601 static void
602 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
603 {
604         *((bus_addr_t *)arg) = 0;
605 
606         if (error) {
607                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
608                 return;
609 	}
610 
611         *((bus_addr_t *)arg) = segs[0].ds_addr;
612 
613 	return;
614 }
615 
616 int
617 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
618 {
619         int             ret = 0;
620         device_t        dev;
621         bus_addr_t      b_addr;
622 
623         dev = ha->pci_dev;
624 
625         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
626 
627         ret = bus_dma_tag_create(
628                         ha->parent_tag,/* parent */
629                         dma_buf->alignment,
630                         ((bus_size_t)(1ULL << 32)),/* boundary */
631                         BUS_SPACE_MAXADDR,      /* lowaddr */
632                         BUS_SPACE_MAXADDR,      /* highaddr */
633                         NULL, NULL,             /* filter, filterarg */
634                         dma_buf->size,          /* maxsize */
635                         1,                      /* nsegments */
636                         dma_buf->size,          /* maxsegsize */
637                         0,                      /* flags */
638                         NULL, NULL,             /* lockfunc, lockarg */
639                         &dma_buf->dma_tag);
640 
641         if (ret) {
642                 device_printf(dev, "%s: could not create dma tag\n", __func__);
643                 goto ql_alloc_dmabuf_exit;
644         }
645         ret = bus_dmamem_alloc(dma_buf->dma_tag,
646                         (void **)&dma_buf->dma_b,
647                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
648                         &dma_buf->dma_map);
649         if (ret) {
650                 bus_dma_tag_destroy(dma_buf->dma_tag);
651                 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
652                 goto ql_alloc_dmabuf_exit;
653         }
654 
655         ret = bus_dmamap_load(dma_buf->dma_tag,
656                         dma_buf->dma_map,
657                         dma_buf->dma_b,
658                         dma_buf->size,
659                         qla_dmamap_callback,
660                         &b_addr, BUS_DMA_NOWAIT);
661 
662         if (ret || !b_addr) {
663                 bus_dma_tag_destroy(dma_buf->dma_tag);
664                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
665                         dma_buf->dma_map);
666                 ret = -1;
667                 goto ql_alloc_dmabuf_exit;
668         }
669 
670         dma_buf->dma_addr = b_addr;
671 
672 ql_alloc_dmabuf_exit:
673         QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
674                 __func__, ret, (void *)dma_buf->dma_tag,
675                 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
676 		dma_buf->size));
677 
678         return ret;
679 }
680 
681 void
682 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
683 {
684         bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
685         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
686         bus_dma_tag_destroy(dma_buf->dma_tag);
687 }
688 
689 static int
690 qla_alloc_parent_dma_tag(qla_host_t *ha)
691 {
692 	int		ret;
693 	device_t	dev;
694 
695 	dev = ha->pci_dev;
696 
697         /*
698          * Allocate parent DMA Tag
699          */
700         ret = bus_dma_tag_create(
701                         bus_get_dma_tag(dev),   /* parent */
702                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
703                         BUS_SPACE_MAXADDR,      /* lowaddr */
704                         BUS_SPACE_MAXADDR,      /* highaddr */
705                         NULL, NULL,             /* filter, filterarg */
706                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
707                         0,                      /* nsegments */
708                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
709                         0,                      /* flags */
710                         NULL, NULL,             /* lockfunc, lockarg */
711                         &ha->parent_tag);
712 
713         if (ret) {
714                 device_printf(dev, "%s: could not create parent dma tag\n",
715                         __func__);
716 		return (-1);
717         }
718 
719         ha->flags.parent_tag = 1;
720 
721 	return (0);
722 }
723 
724 static void
725 qla_free_parent_dma_tag(qla_host_t *ha)
726 {
727         if (ha->flags.parent_tag) {
728                 bus_dma_tag_destroy(ha->parent_tag);
729                 ha->flags.parent_tag = 0;
730         }
731 }
732 
733 /*
734  * Name: qla_init_ifnet
735  * Function: Creates the Network Device Interface and Registers it with the O.S
736  */
737 
738 static void
739 qla_init_ifnet(device_t dev, qla_host_t *ha)
740 {
741 	struct ifnet *ifp;
742 
743 	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
744 
745 	ifp = ha->ifp = if_alloc(IFT_ETHER);
746 
747 	if (ifp == NULL)
748 		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
749 
750 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
751 
752 	ifp->if_baudrate = IF_Gbps(10);
753 	ifp->if_capabilities = IFCAP_LINKSTATE;
754 
755 	ifp->if_init = qla_init;
756 	ifp->if_softc = ha;
757 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
758 	ifp->if_ioctl = qla_ioctl;
759 	ifp->if_start = qla_start;
760 
761 	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
762 	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
763 	IFQ_SET_READY(&ifp->if_snd);
764 
765 	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
766 
767 	ether_ifattach(ifp, qla_get_mac_addr(ha));
768 
769 	ifp->if_capabilities = IFCAP_HWCSUM |
770 				IFCAP_TSO4 |
771 				IFCAP_JUMBO_MTU;
772 
773 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
774 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
775 
776 	ifp->if_capenable = ifp->if_capabilities;
777 
778 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
779 
780 	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
781 
782 	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
783 		NULL);
784 	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
785 
786 	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
787 
788 	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
789 
790 	return;
791 }
792 
793 static void
794 qla_init_locked(qla_host_t *ha)
795 {
796 	struct ifnet *ifp = ha->ifp;
797 
798 	qla_stop(ha);
799 
800 	if (qla_alloc_xmt_bufs(ha) != 0)
801 		return;
802 
803 	if (qla_alloc_rcv_bufs(ha) != 0)
804 		return;
805 
806 	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
807 
808 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
809 
810 	ha->flags.stop_rcv = 0;
811  	if (ql_init_hw_if(ha) == 0) {
812 		ifp = ha->ifp;
813 		ifp->if_drv_flags |= IFF_DRV_RUNNING;
814 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
815 		ha->flags.qla_watchdog_pause = 0;
816 		ha->hw_vlan_tx_frames = 0;
817 		ha->tx_tso_frames = 0;
818 	}
819 
820 	return;
821 }
822 
823 static void
824 qla_init(void *arg)
825 {
826 	qla_host_t *ha;
827 
828 	ha = (qla_host_t *)arg;
829 
830 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
831 
832 	(void)QLA_LOCK(ha, __func__, 0);
833 	qla_init_locked(ha);
834 	QLA_UNLOCK(ha, __func__);
835 
836 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
837 }
838 
839 static int
840 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
841 {
842 	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
843 	struct ifmultiaddr *ifma;
844 	int mcnt = 0;
845 	struct ifnet *ifp = ha->ifp;
846 	int ret = 0;
847 
848 	if_maddr_rlock(ifp);
849 
850 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
851 
852 		if (ifma->ifma_addr->sa_family != AF_LINK)
853 			continue;
854 
855 		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
856 			break;
857 
858 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
859 			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
860 
861 		mcnt++;
862 	}
863 
864 	if_maddr_runlock(ifp);
865 
866 	if (QLA_LOCK(ha, __func__, 1) == 0) {
867 		ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
868 		QLA_UNLOCK(ha, __func__);
869 	}
870 
871 	return (ret);
872 }
873 
874 static int
875 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
876 {
877 	int ret = 0;
878 	struct ifreq *ifr = (struct ifreq *)data;
879 	struct ifaddr *ifa = (struct ifaddr *)data;
880 	qla_host_t *ha;
881 
882 	ha = (qla_host_t *)ifp->if_softc;
883 
884 	switch (cmd) {
885 	case SIOCSIFADDR:
886 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
887 			__func__, cmd));
888 
889 		if (ifa->ifa_addr->sa_family == AF_INET) {
890 			ifp->if_flags |= IFF_UP;
891 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
892 				(void)QLA_LOCK(ha, __func__, 0);
893 				qla_init_locked(ha);
894 				QLA_UNLOCK(ha, __func__);
895 			}
896 			QL_DPRINT4(ha, (ha->pci_dev,
897 				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
898 				__func__, cmd,
899 				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
900 
901 			arp_ifinit(ifp, ifa);
902 		} else {
903 			ether_ioctl(ifp, cmd, data);
904 		}
905 		break;
906 
907 	case SIOCSIFMTU:
908 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
909 			__func__, cmd));
910 
911 		if (ifr->ifr_mtu > QLA_MAX_MTU) {
912 			ret = EINVAL;
913 		} else {
914 			(void) QLA_LOCK(ha, __func__, 0);
915 			ifp->if_mtu = ifr->ifr_mtu;
916 			ha->max_frame_size =
917 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
918 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
919 				ret = ql_set_max_mtu(ha, ha->max_frame_size,
920 					ha->hw.rcv_cntxt_id);
921 			}
922 
923 			if (ifp->if_mtu > ETHERMTU)
924 				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
925 			else
926 				ha->std_replenish = QL_STD_REPLENISH_THRES;
927 
928 
929 			QLA_UNLOCK(ha, __func__);
930 
931 			if (ret)
932 				ret = EINVAL;
933 		}
934 
935 		break;
936 
937 	case SIOCSIFFLAGS:
938 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
939 			__func__, cmd));
940 
941 		(void)QLA_LOCK(ha, __func__, 0);
942 
943 		if (ifp->if_flags & IFF_UP) {
944 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
945 				if ((ifp->if_flags ^ ha->if_flags) &
946 					IFF_PROMISC) {
947 					ret = ql_set_promisc(ha);
948 				} else if ((ifp->if_flags ^ ha->if_flags) &
949 					IFF_ALLMULTI) {
950 					ret = ql_set_allmulti(ha);
951 				}
952 			} else {
953 				qla_init_locked(ha);
954 				ha->max_frame_size = ifp->if_mtu +
955 					ETHER_HDR_LEN + ETHER_CRC_LEN;
956 				ret = ql_set_max_mtu(ha, ha->max_frame_size,
957 					ha->hw.rcv_cntxt_id);
958 			}
959 		} else {
960 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
961 				qla_stop(ha);
962 			ha->if_flags = ifp->if_flags;
963 		}
964 
965 		QLA_UNLOCK(ha, __func__);
966 		break;
967 
968 	case SIOCADDMULTI:
969 		QL_DPRINT4(ha, (ha->pci_dev,
970 			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
971 
972 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
973 			if (qla_set_multi(ha, 1))
974 				ret = EINVAL;
975 		}
976 		break;
977 
978 	case SIOCDELMULTI:
979 		QL_DPRINT4(ha, (ha->pci_dev,
980 			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
981 
982 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
983 			if (qla_set_multi(ha, 0))
984 				ret = EINVAL;
985 		}
986 		break;
987 
988 	case SIOCSIFMEDIA:
989 	case SIOCGIFMEDIA:
990 		QL_DPRINT4(ha, (ha->pci_dev,
991 			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
992 			__func__, cmd));
993 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
994 		break;
995 
996 	case SIOCSIFCAP:
997 	{
998 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
999 
1000 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1001 			__func__, cmd));
1002 
1003 		if (mask & IFCAP_HWCSUM)
1004 			ifp->if_capenable ^= IFCAP_HWCSUM;
1005 		if (mask & IFCAP_TSO4)
1006 			ifp->if_capenable ^= IFCAP_TSO4;
1007 		if (mask & IFCAP_VLAN_HWTAGGING)
1008 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1009 		if (mask & IFCAP_VLAN_HWTSO)
1010 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1011 
1012 		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1013 			qla_init(ha);
1014 
1015 		VLAN_CAPABILITIES(ifp);
1016 		break;
1017 	}
1018 
1019 	default:
1020 		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1021 			__func__, cmd));
1022 		ret = ether_ioctl(ifp, cmd, data);
1023 		break;
1024 	}
1025 
1026 	return (ret);
1027 }
1028 
1029 static int
1030 qla_media_change(struct ifnet *ifp)
1031 {
1032 	qla_host_t *ha;
1033 	struct ifmedia *ifm;
1034 	int ret = 0;
1035 
1036 	ha = (qla_host_t *)ifp->if_softc;
1037 
1038 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1039 
1040 	ifm = &ha->media;
1041 
1042 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1043 		ret = EINVAL;
1044 
1045 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1046 
1047 	return (ret);
1048 }
1049 
1050 static void
1051 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1052 {
1053 	qla_host_t *ha;
1054 
1055 	ha = (qla_host_t *)ifp->if_softc;
1056 
1057 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1058 
1059 	ifmr->ifm_status = IFM_AVALID;
1060 	ifmr->ifm_active = IFM_ETHER;
1061 
1062 	ql_update_link_state(ha);
1063 	if (ha->hw.link_up) {
1064 		ifmr->ifm_status |= IFM_ACTIVE;
1065 		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1066 	}
1067 
1068 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1069 		(ha->hw.link_up ? "link_up" : "link_down")));
1070 
1071 	return;
1072 }
1073 
1074 static void
1075 qla_start(struct ifnet *ifp)
1076 {
1077 	struct mbuf    *m_head;
1078 	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1079 
1080 	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1081 
1082 	if (!mtx_trylock(&ha->tx_lock)) {
1083 		QL_DPRINT8(ha, (ha->pci_dev,
1084 			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1085 		return;
1086 	}
1087 
1088 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1089 		IFF_DRV_RUNNING) {
1090 		QL_DPRINT8(ha,
1091 			(ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1092 		QLA_TX_UNLOCK(ha);
1093 		return;
1094 	}
1095 
1096 	if (!ha->watchdog_ticks)
1097 		ql_update_link_state(ha);
1098 
1099 	if (!ha->hw.link_up) {
1100 		QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__));
1101 		QLA_TX_UNLOCK(ha);
1102 		return;
1103 	}
1104 
1105 	while (ifp->if_snd.ifq_head != NULL) {
1106 		IF_DEQUEUE(&ifp->if_snd, m_head);
1107 
1108 		if (m_head == NULL) {
1109 			QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n",
1110 				__func__));
1111 			break;
1112 		}
1113 
1114 		if (qla_send(ha, &m_head)) {
1115 			if (m_head == NULL)
1116 				break;
1117 			QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__));
1118 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1119 			IF_PREPEND(&ifp->if_snd, m_head);
1120 			break;
1121 		}
1122 		/* Send a copy of the frame to the BPF listener */
1123 		ETHER_BPF_MTAP(ifp, m_head);
1124 	}
1125 	QLA_TX_UNLOCK(ha);
1126 	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1127 	return;
1128 }
1129 
1130 static int
1131 qla_send(qla_host_t *ha, struct mbuf **m_headp)
1132 {
1133 	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1134 	bus_dmamap_t		map;
1135 	int			nsegs;
1136 	int			ret = -1;
1137 	uint32_t		tx_idx;
1138 	struct mbuf		*m_head = *m_headp;
1139 	uint32_t		txr_idx = ha->txr_idx;
1140 
1141 	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1142 
1143 	/* check if flowid is set */
1144 	if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE)
1145 		txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1);
1146 
1147 	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1148 	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1149 
1150 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1151 			BUS_DMA_NOWAIT);
1152 
1153 	if (ret == EFBIG) {
1154 
1155 		struct mbuf *m;
1156 
1157 		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1158 			m_head->m_pkthdr.len));
1159 
1160 		m = m_defrag(m_head, M_NOWAIT);
1161 		if (m == NULL) {
1162 			ha->err_tx_defrag++;
1163 			m_freem(m_head);
1164 			*m_headp = NULL;
1165 			device_printf(ha->pci_dev,
1166 				"%s: m_defrag() = NULL [%d]\n",
1167 				__func__, ret);
1168 			return (ENOBUFS);
1169 		}
1170 		m_head = m;
1171 		*m_headp = m_head;
1172 
1173 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1174 					segs, &nsegs, BUS_DMA_NOWAIT))) {
1175 
1176 			ha->err_tx_dmamap_load++;
1177 
1178 			device_printf(ha->pci_dev,
1179 				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1180 				__func__, ret, m_head->m_pkthdr.len);
1181 
1182 			if (ret != ENOMEM) {
1183 				m_freem(m_head);
1184 				*m_headp = NULL;
1185 			}
1186 			return (ret);
1187 		}
1188 
1189 	} else if (ret) {
1190 
1191 		ha->err_tx_dmamap_load++;
1192 
1193 		device_printf(ha->pci_dev,
1194 			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1195 			__func__, ret, m_head->m_pkthdr.len);
1196 
1197 		if (ret != ENOMEM) {
1198 			m_freem(m_head);
1199 			*m_headp = NULL;
1200 		}
1201 		return (ret);
1202 	}
1203 
1204 	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1205 
1206 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1207 
1208         if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) {
1209 
1210 		ha->tx_ring[txr_idx].count++;
1211 		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1212 	} else {
1213 		if (ret == EINVAL) {
1214 			if (m_head)
1215 				m_freem(m_head);
1216 			*m_headp = NULL;
1217 		}
1218 	}
1219 
1220 	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1221 	return (ret);
1222 }
1223 
1224 static void
1225 qla_stop(qla_host_t *ha)
1226 {
1227 	struct ifnet *ifp = ha->ifp;
1228 	device_t	dev;
1229 
1230 	dev = ha->pci_dev;
1231 
1232 	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1233 
1234 	ha->flags.qla_watchdog_pause = 1;
1235 
1236 	while (!ha->qla_watchdog_paused)
1237 		qla_mdelay(__func__, 1);
1238 
1239 	ha->flags.stop_rcv = 1;
1240 	ql_hw_stop_rcv(ha);
1241 
1242 	ql_del_hw_if(ha);
1243 
1244 	qla_free_xmt_bufs(ha);
1245 	qla_free_rcv_bufs(ha);
1246 
1247 	return;
1248 }
1249 
1250 /*
1251  * Buffer Management Functions for Transmit and Receive Rings
1252  */
1253 static int
1254 qla_alloc_xmt_bufs(qla_host_t *ha)
1255 {
1256 	int ret = 0;
1257 	uint32_t i, j;
1258 	qla_tx_buf_t *txb;
1259 
1260 	if (bus_dma_tag_create(NULL,    /* parent */
1261 		1, 0,    /* alignment, bounds */
1262 		BUS_SPACE_MAXADDR,       /* lowaddr */
1263 		BUS_SPACE_MAXADDR,       /* highaddr */
1264 		NULL, NULL,      /* filter, filterarg */
1265 		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1266 		QLA_MAX_SEGMENTS,        /* nsegments */
1267 		PAGE_SIZE,        /* maxsegsize */
1268 		BUS_DMA_ALLOCNOW,        /* flags */
1269 		NULL,    /* lockfunc */
1270 		NULL,    /* lockfuncarg */
1271 		&ha->tx_tag)) {
1272 		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1273 			__func__);
1274 		return (ENOMEM);
1275 	}
1276 
1277 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1278 		bzero((void *)ha->tx_ring[i].tx_buf,
1279 			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1280 	}
1281 
1282 	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1283 		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1284 
1285 			txb = &ha->tx_ring[j].tx_buf[i];
1286 
1287 			if ((ret = bus_dmamap_create(ha->tx_tag,
1288 					BUS_DMA_NOWAIT, &txb->map))) {
1289 
1290 				ha->err_tx_dmamap_create++;
1291 				device_printf(ha->pci_dev,
1292 					"%s: bus_dmamap_create failed[%d]\n",
1293 					__func__, ret);
1294 
1295 				qla_free_xmt_bufs(ha);
1296 
1297 				return (ret);
1298 			}
1299 		}
1300 	}
1301 
1302 	return 0;
1303 }
1304 
1305 /*
1306  * Release mbuf after it sent on the wire
1307  */
1308 static void
1309 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1310 {
1311 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1312 
1313 	if (txb->m_head && txb->map) {
1314 
1315 		bus_dmamap_unload(ha->tx_tag, txb->map);
1316 
1317 		m_freem(txb->m_head);
1318 		txb->m_head = NULL;
1319 	}
1320 
1321 	if (txb->map)
1322 		bus_dmamap_destroy(ha->tx_tag, txb->map);
1323 
1324 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1325 }
1326 
1327 static void
1328 qla_free_xmt_bufs(qla_host_t *ha)
1329 {
1330 	int		i, j;
1331 
1332 	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1333 		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1334 			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1335 	}
1336 
1337 	if (ha->tx_tag != NULL) {
1338 		bus_dma_tag_destroy(ha->tx_tag);
1339 		ha->tx_tag = NULL;
1340 	}
1341 
1342 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1343 		bzero((void *)ha->tx_ring[i].tx_buf,
1344 			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1345 	}
1346 	return;
1347 }
1348 
1349 
1350 static int
1351 qla_alloc_rcv_std(qla_host_t *ha)
1352 {
1353 	int		i, j, k, r, ret = 0;
1354 	qla_rx_buf_t	*rxb;
1355 	qla_rx_ring_t	*rx_ring;
1356 
1357 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1358 
1359 		rx_ring = &ha->rx_ring[r];
1360 
1361 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1362 
1363 			rxb = &rx_ring->rx_buf[i];
1364 
1365 			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1366 					&rxb->map);
1367 
1368 			if (ret) {
1369 				device_printf(ha->pci_dev,
1370 					"%s: dmamap[%d, %d] failed\n",
1371 					__func__, r, i);
1372 
1373 				for (k = 0; k < r; k++) {
1374 					for (j = 0; j < NUM_RX_DESCRIPTORS;
1375 						j++) {
1376 						rxb = &ha->rx_ring[k].rx_buf[j];
1377 						bus_dmamap_destroy(ha->rx_tag,
1378 							rxb->map);
1379 					}
1380 				}
1381 
1382 				for (j = 0; j < i; j++) {
1383 					bus_dmamap_destroy(ha->rx_tag,
1384 						rx_ring->rx_buf[j].map);
1385 				}
1386 				goto qla_alloc_rcv_std_err;
1387 			}
1388 		}
1389 	}
1390 
1391 	qla_init_hw_rcv_descriptors(ha);
1392 
1393 
1394 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1395 
1396 		rx_ring = &ha->rx_ring[r];
1397 
1398 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1399 			rxb = &rx_ring->rx_buf[i];
1400 			rxb->handle = i;
1401 			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1402 				/*
1403 			 	 * set the physical address in the
1404 				 * corresponding descriptor entry in the
1405 				 * receive ring/queue for the hba
1406 				 */
1407 				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1408 					rxb->paddr,
1409 					(rxb->m_head)->m_pkthdr.len);
1410 			} else {
1411 				device_printf(ha->pci_dev,
1412 					"%s: ql_get_mbuf [%d, %d] failed\n",
1413 					__func__, r, i);
1414 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1415 				goto qla_alloc_rcv_std_err;
1416 			}
1417 		}
1418 	}
1419 	return 0;
1420 
1421 qla_alloc_rcv_std_err:
1422 	return (-1);
1423 }
1424 
1425 static void
1426 qla_free_rcv_std(qla_host_t *ha)
1427 {
1428 	int		i, r;
1429 	qla_rx_buf_t	*rxb;
1430 
1431 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1432 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1433 			rxb = &ha->rx_ring[r].rx_buf[i];
1434 			if (rxb->m_head != NULL) {
1435 				bus_dmamap_unload(ha->rx_tag, rxb->map);
1436 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1437 				m_freem(rxb->m_head);
1438 				rxb->m_head = NULL;
1439 			}
1440 		}
1441 	}
1442 	return;
1443 }
1444 
1445 static int
1446 qla_alloc_rcv_bufs(qla_host_t *ha)
1447 {
1448 	int		i, ret = 0;
1449 
1450 	if (bus_dma_tag_create(NULL,    /* parent */
1451 			1, 0,    /* alignment, bounds */
1452 			BUS_SPACE_MAXADDR,       /* lowaddr */
1453 			BUS_SPACE_MAXADDR,       /* highaddr */
1454 			NULL, NULL,      /* filter, filterarg */
1455 			MJUM9BYTES,     /* maxsize */
1456 			1,        /* nsegments */
1457 			MJUM9BYTES,        /* maxsegsize */
1458 			BUS_DMA_ALLOCNOW,        /* flags */
1459 			NULL,    /* lockfunc */
1460 			NULL,    /* lockfuncarg */
1461 			&ha->rx_tag)) {
1462 
1463 		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1464 			__func__);
1465 
1466 		return (ENOMEM);
1467 	}
1468 
1469 	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1470 
1471 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1472 		ha->hw.sds[i].sdsr_next = 0;
1473 		ha->hw.sds[i].rxb_free = NULL;
1474 		ha->hw.sds[i].rx_free = 0;
1475 	}
1476 
1477 	ret = qla_alloc_rcv_std(ha);
1478 
1479 	return (ret);
1480 }
1481 
1482 static void
1483 qla_free_rcv_bufs(qla_host_t *ha)
1484 {
1485 	int		i;
1486 
1487 	qla_free_rcv_std(ha);
1488 
1489 	if (ha->rx_tag != NULL) {
1490 		bus_dma_tag_destroy(ha->rx_tag);
1491 		ha->rx_tag = NULL;
1492 	}
1493 
1494 	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1495 
1496 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1497 		ha->hw.sds[i].sdsr_next = 0;
1498 		ha->hw.sds[i].rxb_free = NULL;
1499 		ha->hw.sds[i].rx_free = 0;
1500 	}
1501 
1502 	return;
1503 }
1504 
1505 int
1506 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1507 {
1508 	register struct mbuf *mp = nmp;
1509 	struct ifnet   		*ifp;
1510 	int            		ret = 0;
1511 	uint32_t		offset;
1512 	bus_dma_segment_t	segs[1];
1513 	int			nsegs;
1514 
1515 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1516 
1517 	ifp = ha->ifp;
1518 
1519 	if (mp == NULL) {
1520 
1521 		mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1522 
1523 		if (mp == NULL) {
1524 			ha->err_m_getcl++;
1525 			ret = ENOBUFS;
1526 			device_printf(ha->pci_dev,
1527 					"%s: m_getcl failed\n", __func__);
1528 			goto exit_ql_get_mbuf;
1529 		}
1530 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1531 	} else {
1532 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1533 		mp->m_data = mp->m_ext.ext_buf;
1534 		mp->m_next = NULL;
1535 	}
1536 
1537 	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1538 	if (offset) {
1539 		offset = 8 - offset;
1540 		m_adj(mp, offset);
1541 	}
1542 
1543 	/*
1544 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1545 	 * machinery to arrange the memory mapping.
1546 	 */
1547 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1548 			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1549 	rxb->paddr = segs[0].ds_addr;
1550 
1551 	if (ret || !rxb->paddr || (nsegs != 1)) {
1552 		m_free(mp);
1553 		rxb->m_head = NULL;
1554 		device_printf(ha->pci_dev,
1555 			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1556 			__func__, ret, (long long unsigned int)rxb->paddr,
1557 			nsegs);
1558                 ret = -1;
1559 		goto exit_ql_get_mbuf;
1560 	}
1561 	rxb->m_head = mp;
1562 	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1563 
1564 exit_ql_get_mbuf:
1565 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1566 	return (ret);
1567 }
1568 
1569 static void
1570 qla_tx_done(void *context, int pending)
1571 {
1572 	qla_host_t *ha = context;
1573 	struct ifnet   *ifp;
1574 
1575 	ifp = ha->ifp;
1576 
1577 	if (!ifp)
1578 		return;
1579 
1580 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1581 		QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1582 		return;
1583 	}
1584 	ql_hw_tx_done(ha);
1585 
1586 	qla_start(ha->ifp);
1587 }
1588 
1589 static void
1590 qla_get_peer(qla_host_t *ha)
1591 {
1592 	device_t *peers;
1593 	int count, i, slot;
1594 	int my_slot = pci_get_slot(ha->pci_dev);
1595 
1596 	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
1597 		return;
1598 
1599 	for (i = 0; i < count; i++) {
1600 		slot = pci_get_slot(peers[i]);
1601 
1602 		if ((slot >= 0) && (slot == my_slot) &&
1603 			(pci_get_device(peers[i]) ==
1604 				pci_get_device(ha->pci_dev))) {
1605 			if (ha->pci_dev != peers[i])
1606 				ha->peer_dev = peers[i];
1607 		}
1608 	}
1609 }
1610 
1611 static void
1612 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
1613 {
1614 	qla_host_t *ha_peer;
1615 
1616 	if (ha->peer_dev) {
1617         	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
1618 
1619 			ha_peer->msg_from_peer = msg_to_peer;
1620 		}
1621 	}
1622 }
1623 
1624 static void
1625 qla_error_recovery(void *context, int pending)
1626 {
1627 	qla_host_t *ha = context;
1628 	uint32_t msecs_100 = 100;
1629 	struct ifnet *ifp = ha->ifp;
1630 
1631         (void)QLA_LOCK(ha, __func__, 0);
1632 
1633         ha->flags.stop_rcv = 1;
1634 
1635         ql_hw_stop_rcv(ha);
1636 
1637         ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1638 
1639         QLA_UNLOCK(ha, __func__);
1640 
1641 	if ((ha->pci_func & 0x1) == 0) {
1642 
1643 		if (!ha->msg_from_peer) {
1644 			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1645 
1646 			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
1647 				msecs_100--)
1648 				qla_mdelay(__func__, 100);
1649 		}
1650 
1651 		ha->msg_from_peer = 0;
1652 
1653 		ql_minidump(ha);
1654 
1655 		(void) ql_init_hw(ha);
1656         	qla_free_xmt_bufs(ha);
1657 	        qla_free_rcv_bufs(ha);
1658 
1659 		qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1660 
1661 	} else {
1662 		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
1663 
1664 			ha->msg_from_peer = 0;
1665 
1666 			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1667 		} else {
1668 			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1669 		}
1670 
1671 		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
1672 			qla_mdelay(__func__, 100);
1673 		ha->msg_from_peer = 0;
1674 
1675 		(void) ql_init_hw(ha);
1676         	qla_free_xmt_bufs(ha);
1677 	        qla_free_rcv_bufs(ha);
1678 	}
1679         (void)QLA_LOCK(ha, __func__, 0);
1680 
1681 	if (qla_alloc_xmt_bufs(ha) != 0) {
1682         	QLA_UNLOCK(ha, __func__);
1683                 return;
1684 	}
1685 
1686         if (qla_alloc_rcv_bufs(ha) != 0) {
1687         	QLA_UNLOCK(ha, __func__);
1688                 return;
1689 	}
1690 
1691         ha->flags.stop_rcv = 0;
1692         if (ql_init_hw_if(ha) == 0) {
1693                 ifp = ha->ifp;
1694                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1695                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1696                 ha->flags.qla_watchdog_pause = 0;
1697         }
1698 
1699         QLA_UNLOCK(ha, __func__);
1700 }
1701 
1702