xref: /freebsd/sys/dev/qlxgbe/ql_os.c (revision 640235e2c2ba32947f7c59d168437ffa1280f1e6)
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: ql_os.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include <sys/smp.h>
45 
46 /*
47  * Some PCI Configuration Space Related Defines
48  */
49 
50 #ifndef PCI_VENDOR_QLOGIC
51 #define PCI_VENDOR_QLOGIC	0x1077
52 #endif
53 
54 #ifndef PCI_PRODUCT_QLOGIC_ISP8030
55 #define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
56 #endif
57 
58 #define PCI_QLOGIC_ISP8030 \
59 	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60 
61 /*
62  * static functions
63  */
64 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
65 static void qla_free_parent_dma_tag(qla_host_t *ha);
66 static int qla_alloc_xmt_bufs(qla_host_t *ha);
67 static void qla_free_xmt_bufs(qla_host_t *ha);
68 static int qla_alloc_rcv_bufs(qla_host_t *ha);
69 static void qla_free_rcv_bufs(qla_host_t *ha);
70 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71 
72 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73 static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
74 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
75 static void qla_release(qla_host_t *ha);
76 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
77 		int error);
78 static void qla_stop(qla_host_t *ha);
79 static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
80 static void qla_tx_done(void *context, int pending);
81 static void qla_get_peer(qla_host_t *ha);
82 static void qla_error_recovery(void *context, int pending);
83 static void qla_async_event(void *context, int pending);
84 
85 /*
86  * Hooks to the Operating Systems
87  */
88 static int qla_pci_probe (device_t);
89 static int qla_pci_attach (device_t);
90 static int qla_pci_detach (device_t);
91 
92 static void qla_init(void *arg);
93 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
94 static int qla_media_change(struct ifnet *ifp);
95 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
96 static void qla_start(struct ifnet *ifp);
97 
98 static device_method_t qla_pci_methods[] = {
99 	/* Device interface */
100 	DEVMETHOD(device_probe, qla_pci_probe),
101 	DEVMETHOD(device_attach, qla_pci_attach),
102 	DEVMETHOD(device_detach, qla_pci_detach),
103 	{ 0, 0 }
104 };
105 
106 static driver_t qla_pci_driver = {
107 	"ql", qla_pci_methods, sizeof (qla_host_t),
108 };
109 
110 static devclass_t qla83xx_devclass;
111 
112 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
113 
114 MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
115 MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
116 
117 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
118 
119 #define QL_STD_REPLENISH_THRES		0
120 #define QL_JUMBO_REPLENISH_THRES	32
121 
122 
123 static char dev_str[64];
124 static char ver_str[64];
125 
126 /*
127  * Name:	qla_pci_probe
128  * Function:	Validate the PCI device to be a QLA80XX device
129  */
130 static int
131 qla_pci_probe(device_t dev)
132 {
133         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
134         case PCI_QLOGIC_ISP8030:
135 		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
136 			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
137 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
138 			QLA_VERSION_BUILD);
139 		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
140 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
141 			QLA_VERSION_BUILD);
142                 device_set_desc(dev, dev_str);
143                 break;
144         default:
145                 return (ENXIO);
146         }
147 
148         if (bootverbose)
149                 printf("%s: %s\n ", __func__, dev_str);
150 
151         return (BUS_PROBE_DEFAULT);
152 }
153 
154 static void
155 qla_add_sysctls(qla_host_t *ha)
156 {
157         device_t dev = ha->pci_dev;
158 
159 	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
160 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
161 		OID_AUTO, "version", CTLFLAG_RD,
162 		ver_str, 0, "Driver Version");
163 
164         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
165                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
166                 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
167                 (void *)ha, 0,
168                 qla_sysctl_get_stats, "I", "Statistics");
169 
170         SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
171                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
172                 OID_AUTO, "fw_version", CTLFLAG_RD,
173                 ha->fw_ver_str, 0, "firmware version");
174 
175         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
176                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
177                 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
178                 (void *)ha, 0,
179                 qla_sysctl_get_link_status, "I", "Link Status");
180 
181 	ha->dbg_level = 0;
182         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
183                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
184                 OID_AUTO, "debug", CTLFLAG_RW,
185                 &ha->dbg_level, ha->dbg_level, "Debug Level");
186 
187 	ha->std_replenish = QL_STD_REPLENISH_THRES;
188         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
189                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
190                 OID_AUTO, "std_replenish", CTLFLAG_RW,
191                 &ha->std_replenish, ha->std_replenish,
192                 "Threshold for Replenishing Standard Frames");
193 
194         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
195                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
196                 OID_AUTO, "ipv4_lro",
197                 CTLFLAG_RD, &ha->ipv4_lro,
198                 "number of ipv4 lro completions");
199 
200         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
201                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202                 OID_AUTO, "ipv6_lro",
203                 CTLFLAG_RD, &ha->ipv6_lro,
204                 "number of ipv6 lro completions");
205 
206 	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
207 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
208 		OID_AUTO, "tx_tso_frames",
209 		CTLFLAG_RD, &ha->tx_tso_frames,
210 		"number of Tx TSO Frames");
211 
212 	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
213                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
214 		OID_AUTO, "hw_vlan_tx_frames",
215 		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
216 		"number of Tx VLAN Frames");
217 
218         return;
219 }
220 
221 static void
222 qla_watchdog(void *arg)
223 {
224 	qla_host_t *ha = arg;
225 	qla_hw_t *hw;
226 	struct ifnet *ifp;
227 	uint32_t i;
228 	qla_hw_tx_cntxt_t *hw_tx_cntxt;
229 
230 	hw = &ha->hw;
231 	ifp = ha->ifp;
232 
233         if (ha->flags.qla_watchdog_exit) {
234 		ha->qla_watchdog_exited = 1;
235 		return;
236 	}
237 	ha->qla_watchdog_exited = 0;
238 
239 	if (!ha->flags.qla_watchdog_pause) {
240 		if (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
241 			(ha->msg_from_peer == QL_PEER_MSG_RESET)) {
242 			ha->qla_watchdog_paused = 1;
243 			ha->flags.qla_watchdog_pause = 1;
244 			ha->qla_initiate_recovery = 0;
245 			ha->err_inject = 0;
246 			taskqueue_enqueue(ha->err_tq, &ha->err_task);
247 		} else {
248 
249                         if (ha->async_event) {
250                                 ha->async_event = 0;
251                                 taskqueue_enqueue(ha->async_event_tq,
252                                         &ha->async_event_task);
253                         }
254 
255 			for (i = 0; i < ha->hw.num_tx_rings; i++) {
256 				hw_tx_cntxt = &hw->tx_cntxt[i];
257 				if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) !=
258 					hw_tx_cntxt->txr_comp) {
259 					taskqueue_enqueue(ha->tx_tq,
260 						&ha->tx_task);
261 					break;
262 				}
263 			}
264 
265 			if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
266 				taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
267 			}
268 			ha->qla_watchdog_paused = 0;
269 		}
270 
271 	} else {
272 		ha->qla_watchdog_paused = 1;
273 	}
274 
275 	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
276 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
277 		qla_watchdog, ha);
278 }
279 
280 /*
281  * Name:	qla_pci_attach
282  * Function:	attaches the device to the operating system
283  */
284 static int
285 qla_pci_attach(device_t dev)
286 {
287 	qla_host_t *ha = NULL;
288 	uint32_t rsrc_len;
289 	int i;
290 	uint32_t num_rcvq = 0;
291 
292         if ((ha = device_get_softc(dev)) == NULL) {
293                 device_printf(dev, "cannot get softc\n");
294                 return (ENOMEM);
295         }
296 
297         memset(ha, 0, sizeof (qla_host_t));
298 
299         if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
300                 device_printf(dev, "device is not ISP8030\n");
301                 return (ENXIO);
302 	}
303 
304         ha->pci_func = pci_get_function(dev);
305 
306         ha->pci_dev = dev;
307 
308 	pci_enable_busmaster(dev);
309 
310 	ha->reg_rid = PCIR_BAR(0);
311 	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
312 				RF_ACTIVE);
313 
314         if (ha->pci_reg == NULL) {
315                 device_printf(dev, "unable to map any ports\n");
316                 goto qla_pci_attach_err;
317         }
318 
319 	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
320 					ha->reg_rid);
321 
322 	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
323 
324 	mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
325 
326 	qla_add_sysctls(ha);
327 	ql_hw_add_sysctls(ha);
328 
329 	ha->flags.lock_init = 1;
330 
331 	ha->reg_rid1 = PCIR_BAR(2);
332 	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
333 			&ha->reg_rid1, RF_ACTIVE);
334 
335 	ha->msix_count = pci_msix_count(dev);
336 
337 	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
338 		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
339 			ha->msix_count);
340 		goto qla_pci_attach_err;
341 	}
342 
343 	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
344 		" msix_count 0x%x pci_reg %p\n", __func__, ha,
345 		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
346 
347         /* initialize hardware */
348         if (ql_init_hw(ha)) {
349                 device_printf(dev, "%s: ql_init_hw failed\n", __func__);
350                 goto qla_pci_attach_err;
351         }
352 
353         device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
354                 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
355                 ha->fw_ver_build);
356         snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
357                         ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
358                         ha->fw_ver_build);
359 
360         if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
361                 device_printf(dev, "%s: qla_get_nic_partition failed\n",
362                         __func__);
363                 goto qla_pci_attach_err;
364         }
365         device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
366                 " msix_count 0x%x pci_reg %p num_rcvq = %d\n", __func__, ha,
367                 ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, num_rcvq);
368 
369 
370 #ifdef QL_ENABLE_ISCSI_TLV
371         if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
372                 ha->hw.num_sds_rings = 15;
373                 ha->hw.num_tx_rings = 32;
374         }
375 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
376 	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
377 
378 	ha->msix_count = ha->hw.num_sds_rings + 1;
379 
380 	if (pci_alloc_msix(dev, &ha->msix_count)) {
381 		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
382 			ha->msix_count);
383 		ha->msix_count = 0;
384 		goto qla_pci_attach_err;
385 	}
386 
387 	ha->mbx_irq_rid = 1;
388 	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
389 				&ha->mbx_irq_rid,
390 				(RF_ACTIVE | RF_SHAREABLE));
391 	if (ha->mbx_irq == NULL) {
392 		device_printf(dev, "could not allocate mbx interrupt\n");
393 		goto qla_pci_attach_err;
394 	}
395 	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
396 		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
397 		device_printf(dev, "could not setup mbx interrupt\n");
398 		goto qla_pci_attach_err;
399 	}
400 
401 
402 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
403 		ha->irq_vec[i].sds_idx = i;
404                 ha->irq_vec[i].ha = ha;
405                 ha->irq_vec[i].irq_rid = 2 + i;
406 
407 		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
408 				&ha->irq_vec[i].irq_rid,
409 				(RF_ACTIVE | RF_SHAREABLE));
410 
411 		if (ha->irq_vec[i].irq == NULL) {
412 			device_printf(dev, "could not allocate interrupt\n");
413 			goto qla_pci_attach_err;
414 		}
415 		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
416 			(INTR_TYPE_NET | INTR_MPSAFE),
417 			NULL, ql_isr, &ha->irq_vec[i],
418 			&ha->irq_vec[i].handle)) {
419 			device_printf(dev, "could not setup interrupt\n");
420 			goto qla_pci_attach_err;
421 		}
422 	}
423 
424 	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
425 		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
426 
427 	ql_read_mac_addr(ha);
428 
429 	/* allocate parent dma tag */
430 	if (qla_alloc_parent_dma_tag(ha)) {
431 		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
432 			__func__);
433 		goto qla_pci_attach_err;
434 	}
435 
436 	/* alloc all dma buffers */
437 	if (ql_alloc_dma(ha)) {
438 		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
439 		goto qla_pci_attach_err;
440 	}
441 	qla_get_peer(ha);
442 
443 	/* create the o.s ethernet interface */
444 	qla_init_ifnet(dev, ha);
445 
446 	ha->flags.qla_watchdog_active = 1;
447 	ha->flags.qla_watchdog_pause = 1;
448 
449 
450 	TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
451 	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
452 			taskqueue_thread_enqueue, &ha->tx_tq);
453 	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
454 		device_get_nameunit(ha->pci_dev));
455 
456 	callout_init(&ha->tx_callout, 1);
457 	ha->flags.qla_callout_init = 1;
458 
459 	/* create ioctl device interface */
460 	if (ql_make_cdev(ha)) {
461 		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
462 		goto qla_pci_attach_err;
463 	}
464 
465 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
466 		qla_watchdog, ha);
467 
468 	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
469 	ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
470 			taskqueue_thread_enqueue, &ha->err_tq);
471 	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
472 		device_get_nameunit(ha->pci_dev));
473 
474         TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
475         ha->async_event_tq = taskqueue_create_fast("qla_asyncq", M_NOWAIT,
476                         taskqueue_thread_enqueue, &ha->async_event_tq);
477         taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
478                 device_get_nameunit(ha->pci_dev));
479 
480 	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
481         return (0);
482 
483 qla_pci_attach_err:
484 
485 	qla_release(ha);
486 
487 	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
488         return (ENXIO);
489 }
490 
491 /*
492  * Name:	qla_pci_detach
493  * Function:	Unhooks the device from the operating system
494  */
495 static int
496 qla_pci_detach(device_t dev)
497 {
498 	qla_host_t *ha = NULL;
499 	struct ifnet *ifp;
500 
501 	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
502 
503         if ((ha = device_get_softc(dev)) == NULL) {
504                 device_printf(dev, "cannot get softc\n");
505                 return (ENOMEM);
506         }
507 
508 	ifp = ha->ifp;
509 
510 	(void)QLA_LOCK(ha, __func__, 0);
511 	qla_stop(ha);
512 	QLA_UNLOCK(ha, __func__);
513 
514 	qla_release(ha);
515 
516 	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
517 
518         return (0);
519 }
520 
521 /*
522  * SYSCTL Related Callbacks
523  */
524 static int
525 qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
526 {
527 	int err, ret = 0;
528 	qla_host_t *ha;
529 
530 	err = sysctl_handle_int(oidp, &ret, 0, req);
531 
532 	if (err || !req->newptr)
533 		return (err);
534 
535 	if (ret == 1) {
536 		ha = (qla_host_t *)arg1;
537 		ql_get_stats(ha);
538 	}
539 	return (err);
540 }
541 static int
542 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
543 {
544 	int err, ret = 0;
545 	qla_host_t *ha;
546 
547 	err = sysctl_handle_int(oidp, &ret, 0, req);
548 
549 	if (err || !req->newptr)
550 		return (err);
551 
552 	if (ret == 1) {
553 		ha = (qla_host_t *)arg1;
554 		ql_hw_link_status(ha);
555 	}
556 	return (err);
557 }
558 
559 /*
560  * Name:	qla_release
561  * Function:	Releases the resources allocated for the device
562  */
563 static void
564 qla_release(qla_host_t *ha)
565 {
566 	device_t dev;
567 	int i;
568 
569 	dev = ha->pci_dev;
570 
571         if (ha->async_event_tq) {
572                 taskqueue_drain(ha->async_event_tq, &ha->async_event_task);
573                 taskqueue_free(ha->async_event_tq);
574         }
575 
576 	if (ha->err_tq) {
577 		taskqueue_drain(ha->err_tq, &ha->err_task);
578 		taskqueue_free(ha->err_tq);
579 	}
580 
581 	if (ha->tx_tq) {
582 		taskqueue_drain(ha->tx_tq, &ha->tx_task);
583 		taskqueue_free(ha->tx_tq);
584 	}
585 
586 	ql_del_cdev(ha);
587 
588 	if (ha->flags.qla_watchdog_active) {
589 		ha->flags.qla_watchdog_exit = 1;
590 
591 		while (ha->qla_watchdog_exited == 0)
592 			qla_mdelay(__func__, 1);
593 	}
594 
595 	if (ha->flags.qla_callout_init)
596 		callout_stop(&ha->tx_callout);
597 
598 	if (ha->ifp != NULL)
599 		ether_ifdetach(ha->ifp);
600 
601 	ql_free_dma(ha);
602 	qla_free_parent_dma_tag(ha);
603 
604 	if (ha->mbx_handle)
605 		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
606 
607 	if (ha->mbx_irq)
608 		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
609 				ha->mbx_irq);
610 
611 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
612 
613 		if (ha->irq_vec[i].handle) {
614 			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
615 					ha->irq_vec[i].handle);
616 		}
617 
618 		if (ha->irq_vec[i].irq) {
619 			(void)bus_release_resource(dev, SYS_RES_IRQ,
620 				ha->irq_vec[i].irq_rid,
621 				ha->irq_vec[i].irq);
622 		}
623 	}
624 
625 	if (ha->msix_count)
626 		pci_release_msi(dev);
627 
628 	if (ha->flags.lock_init) {
629 		mtx_destroy(&ha->tx_lock);
630 		mtx_destroy(&ha->hw_lock);
631 	}
632 
633         if (ha->pci_reg)
634                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
635 				ha->pci_reg);
636 
637         if (ha->pci_reg1)
638                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
639 				ha->pci_reg1);
640 }
641 
642 /*
643  * DMA Related Functions
644  */
645 
646 static void
647 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
648 {
649         *((bus_addr_t *)arg) = 0;
650 
651         if (error) {
652                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
653                 return;
654 	}
655 
656         *((bus_addr_t *)arg) = segs[0].ds_addr;
657 
658 	return;
659 }
660 
661 int
662 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
663 {
664         int             ret = 0;
665         device_t        dev;
666         bus_addr_t      b_addr;
667 
668         dev = ha->pci_dev;
669 
670         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
671 
672         ret = bus_dma_tag_create(
673                         ha->parent_tag,/* parent */
674                         dma_buf->alignment,
675                         ((bus_size_t)(1ULL << 32)),/* boundary */
676                         BUS_SPACE_MAXADDR,      /* lowaddr */
677                         BUS_SPACE_MAXADDR,      /* highaddr */
678                         NULL, NULL,             /* filter, filterarg */
679                         dma_buf->size,          /* maxsize */
680                         1,                      /* nsegments */
681                         dma_buf->size,          /* maxsegsize */
682                         0,                      /* flags */
683                         NULL, NULL,             /* lockfunc, lockarg */
684                         &dma_buf->dma_tag);
685 
686         if (ret) {
687                 device_printf(dev, "%s: could not create dma tag\n", __func__);
688                 goto ql_alloc_dmabuf_exit;
689         }
690         ret = bus_dmamem_alloc(dma_buf->dma_tag,
691                         (void **)&dma_buf->dma_b,
692                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
693                         &dma_buf->dma_map);
694         if (ret) {
695                 bus_dma_tag_destroy(dma_buf->dma_tag);
696                 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
697                 goto ql_alloc_dmabuf_exit;
698         }
699 
700         ret = bus_dmamap_load(dma_buf->dma_tag,
701                         dma_buf->dma_map,
702                         dma_buf->dma_b,
703                         dma_buf->size,
704                         qla_dmamap_callback,
705                         &b_addr, BUS_DMA_NOWAIT);
706 
707         if (ret || !b_addr) {
708                 bus_dma_tag_destroy(dma_buf->dma_tag);
709                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
710                         dma_buf->dma_map);
711                 ret = -1;
712                 goto ql_alloc_dmabuf_exit;
713         }
714 
715         dma_buf->dma_addr = b_addr;
716 
717 ql_alloc_dmabuf_exit:
718         QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
719                 __func__, ret, (void *)dma_buf->dma_tag,
720                 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
721 		dma_buf->size));
722 
723         return ret;
724 }
725 
726 void
727 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
728 {
729         bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
730         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
731         bus_dma_tag_destroy(dma_buf->dma_tag);
732 }
733 
734 static int
735 qla_alloc_parent_dma_tag(qla_host_t *ha)
736 {
737 	int		ret;
738 	device_t	dev;
739 
740 	dev = ha->pci_dev;
741 
742         /*
743          * Allocate parent DMA Tag
744          */
745         ret = bus_dma_tag_create(
746                         bus_get_dma_tag(dev),   /* parent */
747                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
748                         BUS_SPACE_MAXADDR,      /* lowaddr */
749                         BUS_SPACE_MAXADDR,      /* highaddr */
750                         NULL, NULL,             /* filter, filterarg */
751                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
752                         0,                      /* nsegments */
753                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
754                         0,                      /* flags */
755                         NULL, NULL,             /* lockfunc, lockarg */
756                         &ha->parent_tag);
757 
758         if (ret) {
759                 device_printf(dev, "%s: could not create parent dma tag\n",
760                         __func__);
761 		return (-1);
762         }
763 
764         ha->flags.parent_tag = 1;
765 
766 	return (0);
767 }
768 
769 static void
770 qla_free_parent_dma_tag(qla_host_t *ha)
771 {
772         if (ha->flags.parent_tag) {
773                 bus_dma_tag_destroy(ha->parent_tag);
774                 ha->flags.parent_tag = 0;
775         }
776 }
777 
778 /*
779  * Name: qla_init_ifnet
780  * Function: Creates the Network Device Interface and Registers it with the O.S
781  */
782 
783 static void
784 qla_init_ifnet(device_t dev, qla_host_t *ha)
785 {
786 	struct ifnet *ifp;
787 
788 	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
789 
790 	ifp = ha->ifp = if_alloc(IFT_ETHER);
791 
792 	if (ifp == NULL)
793 		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
794 
795 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
796 
797 	ifp->if_baudrate = IF_Gbps(10);
798 	ifp->if_capabilities = IFCAP_LINKSTATE;
799 
800 	ifp->if_init = qla_init;
801 	ifp->if_softc = ha;
802 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
803 	ifp->if_ioctl = qla_ioctl;
804 	ifp->if_start = qla_start;
805 
806 	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
807 	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
808 	IFQ_SET_READY(&ifp->if_snd);
809 
810 	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
811 
812 	ether_ifattach(ifp, qla_get_mac_addr(ha));
813 
814 	ifp->if_capabilities = IFCAP_HWCSUM |
815 				IFCAP_TSO4 |
816 				IFCAP_JUMBO_MTU;
817 
818 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
819 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
820 
821 	ifp->if_capenable = ifp->if_capabilities;
822 
823 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
824 
825 	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
826 
827 	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
828 		NULL);
829 	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
830 
831 	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
832 
833 	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
834 
835 	return;
836 }
837 
838 static void
839 qla_init_locked(qla_host_t *ha)
840 {
841 	struct ifnet *ifp = ha->ifp;
842 
843 	qla_stop(ha);
844 
845 	if (qla_alloc_xmt_bufs(ha) != 0)
846 		return;
847 
848 	qla_confirm_9kb_enable(ha);
849 
850 	if (qla_alloc_rcv_bufs(ha) != 0)
851 		return;
852 
853 	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
854 
855 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
856 
857 	ha->flags.stop_rcv = 0;
858  	if (ql_init_hw_if(ha) == 0) {
859 		ifp = ha->ifp;
860 		ifp->if_drv_flags |= IFF_DRV_RUNNING;
861 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
862 		ha->flags.qla_watchdog_pause = 0;
863 		ha->hw_vlan_tx_frames = 0;
864 		ha->tx_tso_frames = 0;
865 	}
866 
867 	return;
868 }
869 
870 static void
871 qla_init(void *arg)
872 {
873 	qla_host_t *ha;
874 
875 	ha = (qla_host_t *)arg;
876 
877 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
878 
879 	(void)QLA_LOCK(ha, __func__, 0);
880 	qla_init_locked(ha);
881 	QLA_UNLOCK(ha, __func__);
882 
883 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
884 }
885 
886 static int
887 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
888 {
889 	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
890 	struct ifmultiaddr *ifma;
891 	int mcnt = 0;
892 	struct ifnet *ifp = ha->ifp;
893 	int ret = 0;
894 
895 	if_maddr_rlock(ifp);
896 
897 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
898 
899 		if (ifma->ifma_addr->sa_family != AF_LINK)
900 			continue;
901 
902 		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
903 			break;
904 
905 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
906 			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
907 
908 		mcnt++;
909 	}
910 
911 	if_maddr_runlock(ifp);
912 
913 	if (QLA_LOCK(ha, __func__, 1) == 0) {
914 		ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
915 		QLA_UNLOCK(ha, __func__);
916 	}
917 
918 	return (ret);
919 }
920 
921 static int
922 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
923 {
924 	int ret = 0;
925 	struct ifreq *ifr = (struct ifreq *)data;
926 	struct ifaddr *ifa = (struct ifaddr *)data;
927 	qla_host_t *ha;
928 
929 	ha = (qla_host_t *)ifp->if_softc;
930 
931 	switch (cmd) {
932 	case SIOCSIFADDR:
933 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
934 			__func__, cmd));
935 
936 		if (ifa->ifa_addr->sa_family == AF_INET) {
937 			ifp->if_flags |= IFF_UP;
938 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
939 				(void)QLA_LOCK(ha, __func__, 0);
940 				qla_init_locked(ha);
941 				QLA_UNLOCK(ha, __func__);
942 			}
943 			QL_DPRINT4(ha, (ha->pci_dev,
944 				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
945 				__func__, cmd,
946 				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
947 
948 			arp_ifinit(ifp, ifa);
949 		} else {
950 			ether_ioctl(ifp, cmd, data);
951 		}
952 		break;
953 
954 	case SIOCSIFMTU:
955 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
956 			__func__, cmd));
957 
958 		if (ifr->ifr_mtu > QLA_MAX_MTU) {
959 			ret = EINVAL;
960 		} else {
961 			(void) QLA_LOCK(ha, __func__, 0);
962 			ifp->if_mtu = ifr->ifr_mtu;
963 			ha->max_frame_size =
964 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
965 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
966 				ret = ql_set_max_mtu(ha, ha->max_frame_size,
967 					ha->hw.rcv_cntxt_id);
968 			}
969 
970 			if (ifp->if_mtu > ETHERMTU)
971 				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
972 			else
973 				ha->std_replenish = QL_STD_REPLENISH_THRES;
974 
975 
976 			QLA_UNLOCK(ha, __func__);
977 
978 			if (ret)
979 				ret = EINVAL;
980 		}
981 
982 		break;
983 
984 	case SIOCSIFFLAGS:
985 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
986 			__func__, cmd));
987 
988 		(void)QLA_LOCK(ha, __func__, 0);
989 
990 		if (ifp->if_flags & IFF_UP) {
991 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
992 				if ((ifp->if_flags ^ ha->if_flags) &
993 					IFF_PROMISC) {
994 					ret = ql_set_promisc(ha);
995 				} else if ((ifp->if_flags ^ ha->if_flags) &
996 					IFF_ALLMULTI) {
997 					ret = ql_set_allmulti(ha);
998 				}
999 			} else {
1000 				qla_init_locked(ha);
1001 				ha->max_frame_size = ifp->if_mtu +
1002 					ETHER_HDR_LEN + ETHER_CRC_LEN;
1003 				ret = ql_set_max_mtu(ha, ha->max_frame_size,
1004 					ha->hw.rcv_cntxt_id);
1005 			}
1006 		} else {
1007 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1008 				qla_stop(ha);
1009 			ha->if_flags = ifp->if_flags;
1010 		}
1011 
1012 		QLA_UNLOCK(ha, __func__);
1013 		break;
1014 
1015 	case SIOCADDMULTI:
1016 		QL_DPRINT4(ha, (ha->pci_dev,
1017 			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1018 
1019 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1020 			if (qla_set_multi(ha, 1))
1021 				ret = EINVAL;
1022 		}
1023 		break;
1024 
1025 	case SIOCDELMULTI:
1026 		QL_DPRINT4(ha, (ha->pci_dev,
1027 			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1028 
1029 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1030 			if (qla_set_multi(ha, 0))
1031 				ret = EINVAL;
1032 		}
1033 		break;
1034 
1035 	case SIOCSIFMEDIA:
1036 	case SIOCGIFMEDIA:
1037 		QL_DPRINT4(ha, (ha->pci_dev,
1038 			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1039 			__func__, cmd));
1040 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1041 		break;
1042 
1043 	case SIOCSIFCAP:
1044 	{
1045 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1046 
1047 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1048 			__func__, cmd));
1049 
1050 		if (mask & IFCAP_HWCSUM)
1051 			ifp->if_capenable ^= IFCAP_HWCSUM;
1052 		if (mask & IFCAP_TSO4)
1053 			ifp->if_capenable ^= IFCAP_TSO4;
1054 		if (mask & IFCAP_VLAN_HWTAGGING)
1055 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1056 		if (mask & IFCAP_VLAN_HWTSO)
1057 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1058 
1059 		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1060 			qla_init(ha);
1061 
1062 		VLAN_CAPABILITIES(ifp);
1063 		break;
1064 	}
1065 
1066 	default:
1067 		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1068 			__func__, cmd));
1069 		ret = ether_ioctl(ifp, cmd, data);
1070 		break;
1071 	}
1072 
1073 	return (ret);
1074 }
1075 
1076 static int
1077 qla_media_change(struct ifnet *ifp)
1078 {
1079 	qla_host_t *ha;
1080 	struct ifmedia *ifm;
1081 	int ret = 0;
1082 
1083 	ha = (qla_host_t *)ifp->if_softc;
1084 
1085 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1086 
1087 	ifm = &ha->media;
1088 
1089 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1090 		ret = EINVAL;
1091 
1092 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1093 
1094 	return (ret);
1095 }
1096 
1097 static void
1098 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1099 {
1100 	qla_host_t *ha;
1101 
1102 	ha = (qla_host_t *)ifp->if_softc;
1103 
1104 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1105 
1106 	ifmr->ifm_status = IFM_AVALID;
1107 	ifmr->ifm_active = IFM_ETHER;
1108 
1109 	ql_update_link_state(ha);
1110 	if (ha->hw.link_up) {
1111 		ifmr->ifm_status |= IFM_ACTIVE;
1112 		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1113 	}
1114 
1115 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1116 		(ha->hw.link_up ? "link_up" : "link_down")));
1117 
1118 	return;
1119 }
1120 
1121 static void
1122 qla_start(struct ifnet *ifp)
1123 {
1124 	struct mbuf    *m_head;
1125 	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1126 
1127 	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1128 
1129 	if (!mtx_trylock(&ha->tx_lock)) {
1130 		QL_DPRINT8(ha, (ha->pci_dev,
1131 			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1132 		return;
1133 	}
1134 
1135 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1136 		IFF_DRV_RUNNING) {
1137 		QL_DPRINT8(ha,
1138 			(ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1139 		QLA_TX_UNLOCK(ha);
1140 		return;
1141 	}
1142 
1143 	if (!ha->hw.link_up || !ha->watchdog_ticks)
1144 		ql_update_link_state(ha);
1145 
1146 	if (!ha->hw.link_up) {
1147 		QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__));
1148 		QLA_TX_UNLOCK(ha);
1149 		return;
1150 	}
1151 
1152 	while (ifp->if_snd.ifq_head != NULL) {
1153 		IF_DEQUEUE(&ifp->if_snd, m_head);
1154 
1155 		if (m_head == NULL) {
1156 			QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n",
1157 				__func__));
1158 			break;
1159 		}
1160 
1161 		if (qla_send(ha, &m_head)) {
1162 			if (m_head == NULL)
1163 				break;
1164 			QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__));
1165 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1166 			IF_PREPEND(&ifp->if_snd, m_head);
1167 			break;
1168 		}
1169 		/* Send a copy of the frame to the BPF listener */
1170 		ETHER_BPF_MTAP(ifp, m_head);
1171 	}
1172 	QLA_TX_UNLOCK(ha);
1173 	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1174 	return;
1175 }
1176 
1177 static int
1178 qla_send(qla_host_t *ha, struct mbuf **m_headp)
1179 {
1180 	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1181 	bus_dmamap_t		map;
1182 	int			nsegs;
1183 	int			ret = -1;
1184 	uint32_t		tx_idx;
1185 	struct mbuf		*m_head = *m_headp;
1186 	uint32_t		txr_idx = ha->txr_idx;
1187 	uint32_t		iscsi_pdu = 0;
1188 
1189 	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1190 
1191 	/* check if flowid is set */
1192 
1193 	if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) {
1194 #ifdef QL_ENABLE_ISCSI_TLV
1195 		if (qla_iscsi_pdu(ha, m_head) == 0) {
1196 			iscsi_pdu = 1;
1197 			txr_idx = m_head->m_pkthdr.flowid &
1198 					((ha->hw.num_tx_rings >> 1) - 1);
1199 		} else {
1200 			txr_idx = m_head->m_pkthdr.flowid &
1201 					(ha->hw.num_tx_rings - 1);
1202 		}
1203 #else
1204 		txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1);
1205 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1206 	}
1207 
1208 
1209 	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1210 	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1211 
1212 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1213 			BUS_DMA_NOWAIT);
1214 
1215 	if (ret == EFBIG) {
1216 
1217 		struct mbuf *m;
1218 
1219 		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1220 			m_head->m_pkthdr.len));
1221 
1222 		m = m_defrag(m_head, M_NOWAIT);
1223 		if (m == NULL) {
1224 			ha->err_tx_defrag++;
1225 			m_freem(m_head);
1226 			*m_headp = NULL;
1227 			device_printf(ha->pci_dev,
1228 				"%s: m_defrag() = NULL [%d]\n",
1229 				__func__, ret);
1230 			return (ENOBUFS);
1231 		}
1232 		m_head = m;
1233 		*m_headp = m_head;
1234 
1235 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1236 					segs, &nsegs, BUS_DMA_NOWAIT))) {
1237 
1238 			ha->err_tx_dmamap_load++;
1239 
1240 			device_printf(ha->pci_dev,
1241 				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1242 				__func__, ret, m_head->m_pkthdr.len);
1243 
1244 			if (ret != ENOMEM) {
1245 				m_freem(m_head);
1246 				*m_headp = NULL;
1247 			}
1248 			return (ret);
1249 		}
1250 
1251 	} else if (ret) {
1252 
1253 		ha->err_tx_dmamap_load++;
1254 
1255 		device_printf(ha->pci_dev,
1256 			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1257 			__func__, ret, m_head->m_pkthdr.len);
1258 
1259 		if (ret != ENOMEM) {
1260 			m_freem(m_head);
1261 			*m_headp = NULL;
1262 		}
1263 		return (ret);
1264 	}
1265 
1266 	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1267 
1268 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1269 
1270         if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1271 				iscsi_pdu))) {
1272 		ha->tx_ring[txr_idx].count++;
1273 		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1274 	} else {
1275 		if (ret == EINVAL) {
1276 			if (m_head)
1277 				m_freem(m_head);
1278 			*m_headp = NULL;
1279 		}
1280 	}
1281 
1282 	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1283 	return (ret);
1284 }
1285 
1286 static void
1287 qla_stop(qla_host_t *ha)
1288 {
1289 	struct ifnet *ifp = ha->ifp;
1290 	device_t	dev;
1291 
1292 	dev = ha->pci_dev;
1293 
1294 	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1295 
1296 	ha->flags.qla_watchdog_pause = 1;
1297 
1298 	while (!ha->qla_watchdog_paused)
1299 		qla_mdelay(__func__, 1);
1300 
1301 	ha->flags.stop_rcv = 1;
1302 	ql_hw_stop_rcv(ha);
1303 
1304 	ql_del_hw_if(ha);
1305 
1306 	qla_free_xmt_bufs(ha);
1307 	qla_free_rcv_bufs(ha);
1308 
1309 	return;
1310 }
1311 
1312 /*
1313  * Buffer Management Functions for Transmit and Receive Rings
1314  */
1315 static int
1316 qla_alloc_xmt_bufs(qla_host_t *ha)
1317 {
1318 	int ret = 0;
1319 	uint32_t i, j;
1320 	qla_tx_buf_t *txb;
1321 
1322 	if (bus_dma_tag_create(NULL,    /* parent */
1323 		1, 0,    /* alignment, bounds */
1324 		BUS_SPACE_MAXADDR,       /* lowaddr */
1325 		BUS_SPACE_MAXADDR,       /* highaddr */
1326 		NULL, NULL,      /* filter, filterarg */
1327 		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1328 		QLA_MAX_SEGMENTS,        /* nsegments */
1329 		PAGE_SIZE,        /* maxsegsize */
1330 		BUS_DMA_ALLOCNOW,        /* flags */
1331 		NULL,    /* lockfunc */
1332 		NULL,    /* lockfuncarg */
1333 		&ha->tx_tag)) {
1334 		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1335 			__func__);
1336 		return (ENOMEM);
1337 	}
1338 
1339 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1340 		bzero((void *)ha->tx_ring[i].tx_buf,
1341 			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1342 	}
1343 
1344 	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1345 		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1346 
1347 			txb = &ha->tx_ring[j].tx_buf[i];
1348 
1349 			if ((ret = bus_dmamap_create(ha->tx_tag,
1350 					BUS_DMA_NOWAIT, &txb->map))) {
1351 
1352 				ha->err_tx_dmamap_create++;
1353 				device_printf(ha->pci_dev,
1354 					"%s: bus_dmamap_create failed[%d]\n",
1355 					__func__, ret);
1356 
1357 				qla_free_xmt_bufs(ha);
1358 
1359 				return (ret);
1360 			}
1361 		}
1362 	}
1363 
1364 	return 0;
1365 }
1366 
1367 /*
1368  * Release mbuf after it sent on the wire
1369  */
1370 static void
1371 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1372 {
1373 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1374 
1375 	if (txb->m_head && txb->map) {
1376 
1377 		bus_dmamap_unload(ha->tx_tag, txb->map);
1378 
1379 		m_freem(txb->m_head);
1380 		txb->m_head = NULL;
1381 	}
1382 
1383 	if (txb->map)
1384 		bus_dmamap_destroy(ha->tx_tag, txb->map);
1385 
1386 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1387 }
1388 
1389 static void
1390 qla_free_xmt_bufs(qla_host_t *ha)
1391 {
1392 	int		i, j;
1393 
1394 	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1395 		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1396 			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1397 	}
1398 
1399 	if (ha->tx_tag != NULL) {
1400 		bus_dma_tag_destroy(ha->tx_tag);
1401 		ha->tx_tag = NULL;
1402 	}
1403 
1404 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1405 		bzero((void *)ha->tx_ring[i].tx_buf,
1406 			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1407 	}
1408 	return;
1409 }
1410 
1411 
1412 static int
1413 qla_alloc_rcv_std(qla_host_t *ha)
1414 {
1415 	int		i, j, k, r, ret = 0;
1416 	qla_rx_buf_t	*rxb;
1417 	qla_rx_ring_t	*rx_ring;
1418 
1419 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1420 
1421 		rx_ring = &ha->rx_ring[r];
1422 
1423 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1424 
1425 			rxb = &rx_ring->rx_buf[i];
1426 
1427 			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1428 					&rxb->map);
1429 
1430 			if (ret) {
1431 				device_printf(ha->pci_dev,
1432 					"%s: dmamap[%d, %d] failed\n",
1433 					__func__, r, i);
1434 
1435 				for (k = 0; k < r; k++) {
1436 					for (j = 0; j < NUM_RX_DESCRIPTORS;
1437 						j++) {
1438 						rxb = &ha->rx_ring[k].rx_buf[j];
1439 						bus_dmamap_destroy(ha->rx_tag,
1440 							rxb->map);
1441 					}
1442 				}
1443 
1444 				for (j = 0; j < i; j++) {
1445 					bus_dmamap_destroy(ha->rx_tag,
1446 						rx_ring->rx_buf[j].map);
1447 				}
1448 				goto qla_alloc_rcv_std_err;
1449 			}
1450 		}
1451 	}
1452 
1453 	qla_init_hw_rcv_descriptors(ha);
1454 
1455 
1456 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1457 
1458 		rx_ring = &ha->rx_ring[r];
1459 
1460 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1461 			rxb = &rx_ring->rx_buf[i];
1462 			rxb->handle = i;
1463 			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1464 				/*
1465 			 	 * set the physical address in the
1466 				 * corresponding descriptor entry in the
1467 				 * receive ring/queue for the hba
1468 				 */
1469 				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1470 					rxb->paddr,
1471 					(rxb->m_head)->m_pkthdr.len);
1472 			} else {
1473 				device_printf(ha->pci_dev,
1474 					"%s: ql_get_mbuf [%d, %d] failed\n",
1475 					__func__, r, i);
1476 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1477 				goto qla_alloc_rcv_std_err;
1478 			}
1479 		}
1480 	}
1481 	return 0;
1482 
1483 qla_alloc_rcv_std_err:
1484 	return (-1);
1485 }
1486 
1487 static void
1488 qla_free_rcv_std(qla_host_t *ha)
1489 {
1490 	int		i, r;
1491 	qla_rx_buf_t	*rxb;
1492 
1493 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1494 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1495 			rxb = &ha->rx_ring[r].rx_buf[i];
1496 			if (rxb->m_head != NULL) {
1497 				bus_dmamap_unload(ha->rx_tag, rxb->map);
1498 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1499 				m_freem(rxb->m_head);
1500 				rxb->m_head = NULL;
1501 			}
1502 		}
1503 	}
1504 	return;
1505 }
1506 
1507 static int
1508 qla_alloc_rcv_bufs(qla_host_t *ha)
1509 {
1510 	int		i, ret = 0;
1511 
1512 	if (bus_dma_tag_create(NULL,    /* parent */
1513 			1, 0,    /* alignment, bounds */
1514 			BUS_SPACE_MAXADDR,       /* lowaddr */
1515 			BUS_SPACE_MAXADDR,       /* highaddr */
1516 			NULL, NULL,      /* filter, filterarg */
1517 			MJUM9BYTES,     /* maxsize */
1518 			1,        /* nsegments */
1519 			MJUM9BYTES,        /* maxsegsize */
1520 			BUS_DMA_ALLOCNOW,        /* flags */
1521 			NULL,    /* lockfunc */
1522 			NULL,    /* lockfuncarg */
1523 			&ha->rx_tag)) {
1524 
1525 		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1526 			__func__);
1527 
1528 		return (ENOMEM);
1529 	}
1530 
1531 	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1532 
1533 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1534 		ha->hw.sds[i].sdsr_next = 0;
1535 		ha->hw.sds[i].rxb_free = NULL;
1536 		ha->hw.sds[i].rx_free = 0;
1537 	}
1538 
1539 	ret = qla_alloc_rcv_std(ha);
1540 
1541 	return (ret);
1542 }
1543 
1544 static void
1545 qla_free_rcv_bufs(qla_host_t *ha)
1546 {
1547 	int		i;
1548 
1549 	qla_free_rcv_std(ha);
1550 
1551 	if (ha->rx_tag != NULL) {
1552 		bus_dma_tag_destroy(ha->rx_tag);
1553 		ha->rx_tag = NULL;
1554 	}
1555 
1556 	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1557 
1558 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1559 		ha->hw.sds[i].sdsr_next = 0;
1560 		ha->hw.sds[i].rxb_free = NULL;
1561 		ha->hw.sds[i].rx_free = 0;
1562 	}
1563 
1564 	return;
1565 }
1566 
1567 int
1568 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1569 {
1570 	register struct mbuf *mp = nmp;
1571 	struct ifnet   		*ifp;
1572 	int            		ret = 0;
1573 	uint32_t		offset;
1574 	bus_dma_segment_t	segs[1];
1575 	int			nsegs, mbuf_size;
1576 
1577 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1578 
1579 	ifp = ha->ifp;
1580 
1581         if (ha->hw.enable_9kb)
1582                 mbuf_size = MJUM9BYTES;
1583         else
1584                 mbuf_size = MCLBYTES;
1585 
1586 	if (mp == NULL) {
1587 
1588                 if (ha->hw.enable_9kb)
1589                         mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
1590                 else
1591                         mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1592 
1593 		if (mp == NULL) {
1594 			ha->err_m_getcl++;
1595 			ret = ENOBUFS;
1596 			device_printf(ha->pci_dev,
1597 					"%s: m_getcl failed\n", __func__);
1598 			goto exit_ql_get_mbuf;
1599 		}
1600 		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1601 	} else {
1602 		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1603 		mp->m_data = mp->m_ext.ext_buf;
1604 		mp->m_next = NULL;
1605 	}
1606 
1607 	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1608 	if (offset) {
1609 		offset = 8 - offset;
1610 		m_adj(mp, offset);
1611 	}
1612 
1613 	/*
1614 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1615 	 * machinery to arrange the memory mapping.
1616 	 */
1617 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1618 			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1619 	rxb->paddr = segs[0].ds_addr;
1620 
1621 	if (ret || !rxb->paddr || (nsegs != 1)) {
1622 		m_free(mp);
1623 		rxb->m_head = NULL;
1624 		device_printf(ha->pci_dev,
1625 			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1626 			__func__, ret, (long long unsigned int)rxb->paddr,
1627 			nsegs);
1628                 ret = -1;
1629 		goto exit_ql_get_mbuf;
1630 	}
1631 	rxb->m_head = mp;
1632 	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1633 
1634 exit_ql_get_mbuf:
1635 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1636 	return (ret);
1637 }
1638 
1639 static void
1640 qla_tx_done(void *context, int pending)
1641 {
1642 	qla_host_t *ha = context;
1643 	struct ifnet   *ifp;
1644 
1645 	ifp = ha->ifp;
1646 
1647 	if (!ifp)
1648 		return;
1649 
1650 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1651 		QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1652 		return;
1653 	}
1654 	ql_hw_tx_done(ha);
1655 
1656 	qla_start(ha->ifp);
1657 }
1658 
1659 static void
1660 qla_get_peer(qla_host_t *ha)
1661 {
1662 	device_t *peers;
1663 	int count, i, slot;
1664 	int my_slot = pci_get_slot(ha->pci_dev);
1665 
1666 	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
1667 		return;
1668 
1669 	for (i = 0; i < count; i++) {
1670 		slot = pci_get_slot(peers[i]);
1671 
1672 		if ((slot >= 0) && (slot == my_slot) &&
1673 			(pci_get_device(peers[i]) ==
1674 				pci_get_device(ha->pci_dev))) {
1675 			if (ha->pci_dev != peers[i])
1676 				ha->peer_dev = peers[i];
1677 		}
1678 	}
1679 }
1680 
1681 static void
1682 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
1683 {
1684 	qla_host_t *ha_peer;
1685 
1686 	if (ha->peer_dev) {
1687         	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
1688 
1689 			ha_peer->msg_from_peer = msg_to_peer;
1690 		}
1691 	}
1692 }
1693 
1694 static void
1695 qla_error_recovery(void *context, int pending)
1696 {
1697 	qla_host_t *ha = context;
1698 	uint32_t msecs_100 = 100;
1699 	struct ifnet *ifp = ha->ifp;
1700 
1701         (void)QLA_LOCK(ha, __func__, 0);
1702 
1703 	ha->hw.imd_compl = 1;
1704 	qla_mdelay(__func__, 300);
1705 
1706         ha->flags.stop_rcv = 1;
1707 
1708         ql_hw_stop_rcv(ha);
1709 
1710         ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1711 
1712         QLA_UNLOCK(ha, __func__);
1713 
1714 	if ((ha->pci_func & 0x1) == 0) {
1715 
1716 		if (!ha->msg_from_peer) {
1717 			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1718 
1719 			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
1720 				msecs_100--)
1721 				qla_mdelay(__func__, 100);
1722 		}
1723 
1724 		ha->msg_from_peer = 0;
1725 
1726 		ql_minidump(ha);
1727 
1728 		(void) ql_init_hw(ha);
1729         	qla_free_xmt_bufs(ha);
1730 	        qla_free_rcv_bufs(ha);
1731 
1732 		qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1733 
1734 	} else {
1735 		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
1736 
1737 			ha->msg_from_peer = 0;
1738 
1739 			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1740 		} else {
1741 			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1742 		}
1743 
1744 		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
1745 			qla_mdelay(__func__, 100);
1746 		ha->msg_from_peer = 0;
1747 
1748 		(void) ql_init_hw(ha);
1749         	qla_free_xmt_bufs(ha);
1750 	        qla_free_rcv_bufs(ha);
1751 	}
1752         (void)QLA_LOCK(ha, __func__, 0);
1753 
1754 	if (qla_alloc_xmt_bufs(ha) != 0) {
1755         	QLA_UNLOCK(ha, __func__);
1756                 return;
1757 	}
1758 	qla_confirm_9kb_enable(ha);
1759 
1760         if (qla_alloc_rcv_bufs(ha) != 0) {
1761         	QLA_UNLOCK(ha, __func__);
1762                 return;
1763 	}
1764 
1765         ha->flags.stop_rcv = 0;
1766         if (ql_init_hw_if(ha) == 0) {
1767                 ifp = ha->ifp;
1768                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1769                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1770                 ha->flags.qla_watchdog_pause = 0;
1771         }
1772 
1773         QLA_UNLOCK(ha, __func__);
1774 }
1775 
1776 static void
1777 qla_async_event(void *context, int pending)
1778 {
1779         qla_host_t *ha = context;
1780 
1781         (void)QLA_LOCK(ha, __func__, 0);
1782         qla_hw_async_event(ha);
1783         QLA_UNLOCK(ha, __func__);
1784 }
1785 
1786