xref: /freebsd/sys/dev/qlxge/qls_os.c (revision 7cc42f6d25ef2e19059d088fa7d4853fe9afefb5)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2014 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: qls_os.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "qls_os.h"
39 #include "qls_hw.h"
40 #include "qls_def.h"
41 #include "qls_inline.h"
42 #include "qls_ver.h"
43 #include "qls_glbl.h"
44 #include "qls_dbg.h"
45 #include <sys/smp.h>
46 
47 /*
48  * Some PCI Configuration Space Related Defines
49  */
50 
51 #ifndef PCI_VENDOR_QLOGIC
52 #define PCI_VENDOR_QLOGIC	0x1077
53 #endif
54 
55 #ifndef PCI_DEVICE_QLOGIC_8000
56 #define PCI_DEVICE_QLOGIC_8000	0x8000
57 #endif
58 
59 #define PCI_QLOGIC_DEV8000 \
60 	((PCI_DEVICE_QLOGIC_8000 << 16) | PCI_VENDOR_QLOGIC)
61 
62 /*
63  * static functions
64  */
65 static int qls_alloc_parent_dma_tag(qla_host_t *ha);
66 static void qls_free_parent_dma_tag(qla_host_t *ha);
67 
68 static void qls_flush_xmt_bufs(qla_host_t *ha);
69 
70 static int qls_alloc_rcv_bufs(qla_host_t *ha);
71 static void qls_free_rcv_bufs(qla_host_t *ha);
72 
73 static void qls_init_ifnet(device_t dev, qla_host_t *ha);
74 static void qls_release(qla_host_t *ha);
75 static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
76 		int error);
77 static void qls_stop(qla_host_t *ha);
78 static int qls_send(qla_host_t *ha, struct mbuf **m_headp);
79 static void qls_tx_done(void *context, int pending);
80 
81 static int qls_config_lro(qla_host_t *ha);
82 static void qls_free_lro(qla_host_t *ha);
83 
84 static void qls_error_recovery(void *context, int pending);
85 
86 /*
87  * Hooks to the Operating Systems
88  */
89 static int qls_pci_probe (device_t);
90 static int qls_pci_attach (device_t);
91 static int qls_pci_detach (device_t);
92 
93 static void qls_start(struct ifnet *ifp);
94 static void qls_init(void *arg);
95 static int qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
96 static int qls_media_change(struct ifnet *ifp);
97 static void qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
98 
99 static device_method_t qla_pci_methods[] = {
100 	/* Device interface */
101 	DEVMETHOD(device_probe, qls_pci_probe),
102 	DEVMETHOD(device_attach, qls_pci_attach),
103 	DEVMETHOD(device_detach, qls_pci_detach),
104 	{ 0, 0 }
105 };
106 
107 static driver_t qla_pci_driver = {
108 	"ql", qla_pci_methods, sizeof (qla_host_t),
109 };
110 
111 static devclass_t qla8000_devclass;
112 
113 DRIVER_MODULE(qla8000, pci, qla_pci_driver, qla8000_devclass, 0, 0);
114 
115 MODULE_DEPEND(qla8000, pci, 1, 1, 1);
116 MODULE_DEPEND(qla8000, ether, 1, 1, 1);
117 
118 MALLOC_DEFINE(M_QLA8000BUF, "qla8000buf", "Buffers for qla8000 driver");
119 
120 static char dev_str[64];
121 static char ver_str[64];
122 
123 /*
124  * Name:	qls_pci_probe
125  * Function:	Validate the PCI device to be a QLA80XX device
126  */
127 static int
128 qls_pci_probe(device_t dev)
129 {
130         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
131         case PCI_QLOGIC_DEV8000:
132 		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
133 			"Qlogic ISP 8000 PCI CNA Adapter-Ethernet Function",
134 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
135 			QLA_VERSION_BUILD);
136 		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
137 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
138 			QLA_VERSION_BUILD);
139                 device_set_desc(dev, dev_str);
140                 break;
141         default:
142                 return (ENXIO);
143         }
144 
145         if (bootverbose)
146                 printf("%s: %s\n ", __func__, dev_str);
147 
148         return (BUS_PROBE_DEFAULT);
149 }
150 
151 static int
152 qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
153 {
154         int err = 0, ret;
155         qla_host_t *ha;
156         uint32_t i;
157 
158         err = sysctl_handle_int(oidp, &ret, 0, req);
159 
160         if (err || !req->newptr)
161                 return (err);
162 
163         if (ret == 1) {
164                 ha = (qla_host_t *)arg1;
165 
166                 for (i = 0; i < ha->num_tx_rings; i++) {
167                         device_printf(ha->pci_dev,
168                                 "%s: tx_ring[%d].tx_frames= %p\n",
169 				__func__, i,
170                                 (void *)ha->tx_ring[i].tx_frames);
171 
172                         device_printf(ha->pci_dev,
173                                 "%s: tx_ring[%d].tx_tso_frames= %p\n",
174 				__func__, i,
175                                 (void *)ha->tx_ring[i].tx_tso_frames);
176 
177                         device_printf(ha->pci_dev,
178                                 "%s: tx_ring[%d].tx_vlan_frames= %p\n",
179 				__func__, i,
180                                 (void *)ha->tx_ring[i].tx_vlan_frames);
181 
182                         device_printf(ha->pci_dev,
183                                 "%s: tx_ring[%d].txr_free= 0x%08x\n",
184 				__func__, i,
185                                 ha->tx_ring[i].txr_free);
186 
187                         device_printf(ha->pci_dev,
188                                 "%s: tx_ring[%d].txr_next= 0x%08x\n",
189 				__func__, i,
190                                 ha->tx_ring[i].txr_next);
191 
192                         device_printf(ha->pci_dev,
193                                 "%s: tx_ring[%d].txr_done= 0x%08x\n",
194 				__func__, i,
195                                 ha->tx_ring[i].txr_done);
196 
197                         device_printf(ha->pci_dev,
198                                 "%s: tx_ring[%d].txr_cons_idx= 0x%08x\n",
199 				__func__, i,
200                                 *(ha->tx_ring[i].txr_cons_vaddr));
201 		}
202 
203                 for (i = 0; i < ha->num_rx_rings; i++) {
204                         device_printf(ha->pci_dev,
205                                 "%s: rx_ring[%d].rx_int= %p\n",
206 				__func__, i,
207                                 (void *)ha->rx_ring[i].rx_int);
208 
209                         device_printf(ha->pci_dev,
210                                 "%s: rx_ring[%d].rss_int= %p\n",
211 				__func__, i,
212                                 (void *)ha->rx_ring[i].rss_int);
213 
214                         device_printf(ha->pci_dev,
215                                 "%s: rx_ring[%d].lbq_next= 0x%08x\n",
216 				__func__, i,
217                                 ha->rx_ring[i].lbq_next);
218 
219                         device_printf(ha->pci_dev,
220                                 "%s: rx_ring[%d].lbq_free= 0x%08x\n",
221 				__func__, i,
222                                 ha->rx_ring[i].lbq_free);
223 
224                         device_printf(ha->pci_dev,
225                                 "%s: rx_ring[%d].lbq_in= 0x%08x\n",
226 				__func__, i,
227                                 ha->rx_ring[i].lbq_in);
228 
229                         device_printf(ha->pci_dev,
230                                 "%s: rx_ring[%d].sbq_next= 0x%08x\n",
231 				__func__, i,
232                                 ha->rx_ring[i].sbq_next);
233 
234                         device_printf(ha->pci_dev,
235                                 "%s: rx_ring[%d].sbq_free= 0x%08x\n",
236 				__func__, i,
237                                 ha->rx_ring[i].sbq_free);
238 
239                         device_printf(ha->pci_dev,
240                                 "%s: rx_ring[%d].sbq_in= 0x%08x\n",
241 				__func__, i,
242                                 ha->rx_ring[i].sbq_in);
243 		}
244 
245 		device_printf(ha->pci_dev, "%s: err_m_getcl = 0x%08x\n",
246 				__func__, ha->err_m_getcl);
247 		device_printf(ha->pci_dev, "%s: err_m_getjcl = 0x%08x\n",
248 				__func__, ha->err_m_getjcl);
249 		device_printf(ha->pci_dev,
250 				"%s: err_tx_dmamap_create = 0x%08x\n",
251 				__func__, ha->err_tx_dmamap_create);
252 		device_printf(ha->pci_dev,
253 				"%s: err_tx_dmamap_load = 0x%08x\n",
254 				__func__, ha->err_tx_dmamap_load);
255 		device_printf(ha->pci_dev,
256 				"%s: err_tx_defrag = 0x%08x\n",
257 				__func__, ha->err_tx_defrag);
258         }
259         return (err);
260 }
261 
262 static void
263 qls_add_sysctls(qla_host_t *ha)
264 {
265         device_t dev = ha->pci_dev;
266 
267 	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
268 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
269 		OID_AUTO, "version", CTLFLAG_RD,
270 		ver_str, 0, "Driver Version");
271 
272 	qls_dbg_level = 0;
273         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
274                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
275                 OID_AUTO, "debug", CTLFLAG_RW,
276                 &qls_dbg_level, qls_dbg_level, "Debug Level");
277 
278         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
279             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
280             OID_AUTO, "drvr_stats",
281 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
282 	    qls_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
283 
284         return;
285 }
286 
287 static void
288 qls_watchdog(void *arg)
289 {
290 	qla_host_t *ha = arg;
291 	struct ifnet *ifp;
292 
293 	ifp = ha->ifp;
294 
295         if (ha->flags.qla_watchdog_exit) {
296 		ha->qla_watchdog_exited = 1;
297 		return;
298 	}
299 	ha->qla_watchdog_exited = 0;
300 
301 	if (!ha->flags.qla_watchdog_pause) {
302 		if (ha->qla_initiate_recovery) {
303 			ha->qla_watchdog_paused = 1;
304 			ha->qla_initiate_recovery = 0;
305 			ha->err_inject = 0;
306 			taskqueue_enqueue(ha->err_tq, &ha->err_task);
307 
308 		} else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
309 			taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
310 		}
311 
312 		ha->qla_watchdog_paused = 0;
313 	} else {
314 		ha->qla_watchdog_paused = 1;
315 	}
316 
317 	ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000;
318 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
319 		qls_watchdog, ha);
320 
321 	return;
322 }
323 
324 /*
325  * Name:	qls_pci_attach
326  * Function:	attaches the device to the operating system
327  */
328 static int
329 qls_pci_attach(device_t dev)
330 {
331 	qla_host_t *ha = NULL;
332 	int i;
333 
334 	QL_DPRINT2((dev, "%s: enter\n", __func__));
335 
336         if ((ha = device_get_softc(dev)) == NULL) {
337                 device_printf(dev, "cannot get softc\n");
338                 return (ENOMEM);
339         }
340 
341         memset(ha, 0, sizeof (qla_host_t));
342 
343         if (pci_get_device(dev) != PCI_DEVICE_QLOGIC_8000) {
344                 device_printf(dev, "device is not QLE8000\n");
345                 return (ENXIO);
346 	}
347 
348         ha->pci_func = pci_get_function(dev);
349 
350         ha->pci_dev = dev;
351 
352 	pci_enable_busmaster(dev);
353 
354 	ha->reg_rid = PCIR_BAR(1);
355 	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
356 				RF_ACTIVE);
357 
358         if (ha->pci_reg == NULL) {
359                 device_printf(dev, "unable to map any ports\n");
360                 goto qls_pci_attach_err;
361         }
362 
363 	ha->reg_rid1 = PCIR_BAR(3);
364 	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
365 			&ha->reg_rid1, RF_ACTIVE);
366 
367         if (ha->pci_reg1 == NULL) {
368                 device_printf(dev, "unable to map any ports\n");
369                 goto qls_pci_attach_err;
370         }
371 
372 	mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
373 	mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
374 
375 	qls_add_sysctls(ha);
376 	qls_hw_add_sysctls(ha);
377 
378 	ha->flags.lock_init = 1;
379 
380 	ha->msix_count = pci_msix_count(dev);
381 
382 	if (ha->msix_count < qls_get_msix_count(ha)) {
383 		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
384 			ha->msix_count);
385 		goto qls_pci_attach_err;
386 	}
387 
388 	ha->msix_count = qls_get_msix_count(ha);
389 
390 	device_printf(dev, "\n%s: ha %p pci_func 0x%x  msix_count 0x%x"
391 		" pci_reg %p pci_reg1 %p\n", __func__, ha,
392 		ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1);
393 
394 	if (pci_alloc_msix(dev, &ha->msix_count)) {
395 		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
396 			ha->msix_count);
397 		ha->msix_count = 0;
398 		goto qls_pci_attach_err;
399 	}
400 
401         for (i = 0; i < ha->num_rx_rings; i++) {
402                 ha->irq_vec[i].cq_idx = i;
403                 ha->irq_vec[i].ha = ha;
404                 ha->irq_vec[i].irq_rid = 1 + i;
405 
406                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
407                                 &ha->irq_vec[i].irq_rid,
408                                 (RF_ACTIVE | RF_SHAREABLE));
409 
410                 if (ha->irq_vec[i].irq == NULL) {
411                         device_printf(dev, "could not allocate interrupt\n");
412                         goto qls_pci_attach_err;
413                 }
414 
415 		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
416 			(INTR_TYPE_NET | INTR_MPSAFE), NULL, qls_isr,
417 			&ha->irq_vec[i], &ha->irq_vec[i].handle)) {
418 				device_printf(dev,
419 					"could not setup interrupt\n");
420 			goto qls_pci_attach_err;
421 		}
422         }
423 
424 	qls_rd_nic_params(ha);
425 
426 	/* allocate parent dma tag */
427 	if (qls_alloc_parent_dma_tag(ha)) {
428 		device_printf(dev, "%s: qls_alloc_parent_dma_tag failed\n",
429 			__func__);
430 		goto qls_pci_attach_err;
431 	}
432 
433 	/* alloc all dma buffers */
434 	if (qls_alloc_dma(ha)) {
435 		device_printf(dev, "%s: qls_alloc_dma failed\n", __func__);
436 		goto qls_pci_attach_err;
437 	}
438 
439 	/* create the o.s ethernet interface */
440 	qls_init_ifnet(dev, ha);
441 
442 	ha->flags.qla_watchdog_active = 1;
443 	ha->flags.qla_watchdog_pause = 1;
444 
445 	TASK_INIT(&ha->tx_task, 0, qls_tx_done, ha);
446 	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
447 			taskqueue_thread_enqueue, &ha->tx_tq);
448 	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
449 		device_get_nameunit(ha->pci_dev));
450 
451 	callout_init(&ha->tx_callout, 1);
452 	ha->flags.qla_callout_init = 1;
453 
454         /* create ioctl device interface */
455         if (qls_make_cdev(ha)) {
456                 device_printf(dev, "%s: qls_make_cdev failed\n", __func__);
457                 goto qls_pci_attach_err;
458         }
459 
460 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
461 		qls_watchdog, ha);
462 
463         TASK_INIT(&ha->err_task, 0, qls_error_recovery, ha);
464         ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
465                         taskqueue_thread_enqueue, &ha->err_tq);
466         taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
467                 device_get_nameunit(ha->pci_dev));
468 
469 	QL_DPRINT2((dev, "%s: exit 0\n", __func__));
470         return (0);
471 
472 qls_pci_attach_err:
473 
474 	qls_release(ha);
475 
476 	QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
477         return (ENXIO);
478 }
479 
480 /*
481  * Name:	qls_pci_detach
482  * Function:	Unhooks the device from the operating system
483  */
484 static int
485 qls_pci_detach(device_t dev)
486 {
487 	qla_host_t *ha = NULL;
488 	struct ifnet *ifp;
489 
490 	QL_DPRINT2((dev, "%s: enter\n", __func__));
491 
492         if ((ha = device_get_softc(dev)) == NULL) {
493                 device_printf(dev, "cannot get softc\n");
494                 return (ENOMEM);
495         }
496 
497 	ifp = ha->ifp;
498 
499 	(void)QLA_LOCK(ha, __func__, 0);
500 	qls_stop(ha);
501 	QLA_UNLOCK(ha, __func__);
502 
503 	qls_release(ha);
504 
505 	QL_DPRINT2((dev, "%s: exit\n", __func__));
506 
507         return (0);
508 }
509 
510 /*
511  * Name:	qls_release
512  * Function:	Releases the resources allocated for the device
513  */
514 static void
515 qls_release(qla_host_t *ha)
516 {
517 	device_t dev;
518 	int i;
519 
520 	dev = ha->pci_dev;
521 
522 	if (ha->err_tq) {
523 		taskqueue_drain(ha->err_tq, &ha->err_task);
524 		taskqueue_free(ha->err_tq);
525 	}
526 
527 	if (ha->tx_tq) {
528 		taskqueue_drain(ha->tx_tq, &ha->tx_task);
529 		taskqueue_free(ha->tx_tq);
530 	}
531 
532 	qls_del_cdev(ha);
533 
534 	if (ha->flags.qla_watchdog_active) {
535 		ha->flags.qla_watchdog_exit = 1;
536 
537 		while (ha->qla_watchdog_exited == 0)
538 			qls_mdelay(__func__, 1);
539 	}
540 
541 	if (ha->flags.qla_callout_init)
542 		callout_stop(&ha->tx_callout);
543 
544 	if (ha->ifp != NULL)
545 		ether_ifdetach(ha->ifp);
546 
547 	qls_free_dma(ha);
548 	qls_free_parent_dma_tag(ha);
549 
550         for (i = 0; i < ha->num_rx_rings; i++) {
551                 if (ha->irq_vec[i].handle) {
552                         (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
553                                         ha->irq_vec[i].handle);
554                 }
555 
556                 if (ha->irq_vec[i].irq) {
557                         (void)bus_release_resource(dev, SYS_RES_IRQ,
558                                 ha->irq_vec[i].irq_rid,
559                                 ha->irq_vec[i].irq);
560                 }
561         }
562 
563 	if (ha->msix_count)
564 		pci_release_msi(dev);
565 
566 	if (ha->flags.lock_init) {
567 		mtx_destroy(&ha->tx_lock);
568 		mtx_destroy(&ha->hw_lock);
569 	}
570 
571         if (ha->pci_reg)
572                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
573 				ha->pci_reg);
574 
575         if (ha->pci_reg1)
576                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
577 				ha->pci_reg1);
578 }
579 
580 /*
581  * DMA Related Functions
582  */
583 
584 static void
585 qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
586 {
587         *((bus_addr_t *)arg) = 0;
588 
589         if (error) {
590                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
591                 return;
592 	}
593 
594         *((bus_addr_t *)arg) = segs[0].ds_addr;
595 
596 	return;
597 }
598 
599 int
600 qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
601 {
602         int             ret = 0;
603         device_t        dev;
604         bus_addr_t      b_addr;
605 
606         dev = ha->pci_dev;
607 
608         QL_DPRINT2((dev, "%s: enter\n", __func__));
609 
610         ret = bus_dma_tag_create(
611                         ha->parent_tag,/* parent */
612                         dma_buf->alignment,
613                         ((bus_size_t)(1ULL << 32)),/* boundary */
614                         BUS_SPACE_MAXADDR,      /* lowaddr */
615                         BUS_SPACE_MAXADDR,      /* highaddr */
616                         NULL, NULL,             /* filter, filterarg */
617                         dma_buf->size,          /* maxsize */
618                         1,                      /* nsegments */
619                         dma_buf->size,          /* maxsegsize */
620                         0,                      /* flags */
621                         NULL, NULL,             /* lockfunc, lockarg */
622                         &dma_buf->dma_tag);
623 
624         if (ret) {
625                 device_printf(dev, "%s: could not create dma tag\n", __func__);
626                 goto qls_alloc_dmabuf_exit;
627         }
628         ret = bus_dmamem_alloc(dma_buf->dma_tag,
629                         (void **)&dma_buf->dma_b,
630                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
631                         &dma_buf->dma_map);
632         if (ret) {
633                 bus_dma_tag_destroy(dma_buf->dma_tag);
634                 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
635                 goto qls_alloc_dmabuf_exit;
636         }
637 
638         ret = bus_dmamap_load(dma_buf->dma_tag,
639                         dma_buf->dma_map,
640                         dma_buf->dma_b,
641                         dma_buf->size,
642                         qls_dmamap_callback,
643                         &b_addr, BUS_DMA_NOWAIT);
644 
645         if (ret || !b_addr) {
646                 bus_dma_tag_destroy(dma_buf->dma_tag);
647                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
648                         dma_buf->dma_map);
649                 ret = -1;
650                 goto qls_alloc_dmabuf_exit;
651         }
652 
653         dma_buf->dma_addr = b_addr;
654 
655 qls_alloc_dmabuf_exit:
656         QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
657                 __func__, ret, (void *)dma_buf->dma_tag,
658                 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
659 		dma_buf->size));
660 
661         return ret;
662 }
663 
664 void
665 qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
666 {
667         bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
668         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
669         bus_dma_tag_destroy(dma_buf->dma_tag);
670 }
671 
672 static int
673 qls_alloc_parent_dma_tag(qla_host_t *ha)
674 {
675 	int		ret;
676 	device_t	dev;
677 
678 	dev = ha->pci_dev;
679 
680         /*
681          * Allocate parent DMA Tag
682          */
683         ret = bus_dma_tag_create(
684                         bus_get_dma_tag(dev),   /* parent */
685                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
686                         BUS_SPACE_MAXADDR,      /* lowaddr */
687                         BUS_SPACE_MAXADDR,      /* highaddr */
688                         NULL, NULL,             /* filter, filterarg */
689                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
690                         0,                      /* nsegments */
691                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
692                         0,                      /* flags */
693                         NULL, NULL,             /* lockfunc, lockarg */
694                         &ha->parent_tag);
695 
696         if (ret) {
697                 device_printf(dev, "%s: could not create parent dma tag\n",
698                         __func__);
699 		return (-1);
700         }
701 
702         ha->flags.parent_tag = 1;
703 
704 	return (0);
705 }
706 
707 static void
708 qls_free_parent_dma_tag(qla_host_t *ha)
709 {
710         if (ha->flags.parent_tag) {
711                 bus_dma_tag_destroy(ha->parent_tag);
712                 ha->flags.parent_tag = 0;
713         }
714 }
715 
716 /*
717  * Name: qls_init_ifnet
718  * Function: Creates the Network Device Interface and Registers it with the O.S
719  */
720 
721 static void
722 qls_init_ifnet(device_t dev, qla_host_t *ha)
723 {
724 	struct ifnet *ifp;
725 
726 	QL_DPRINT2((dev, "%s: enter\n", __func__));
727 
728 	ifp = ha->ifp = if_alloc(IFT_ETHER);
729 
730 	if (ifp == NULL)
731 		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
732 
733 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
734 	ifp->if_baudrate = IF_Gbps(10);
735 	ifp->if_init = qls_init;
736 	ifp->if_softc = ha;
737 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
738 	ifp->if_ioctl = qls_ioctl;
739 	ifp->if_start = qls_start;
740 
741 	IFQ_SET_MAXLEN(&ifp->if_snd, qls_get_ifq_snd_maxlen(ha));
742 	ifp->if_snd.ifq_drv_maxlen = qls_get_ifq_snd_maxlen(ha);
743 	IFQ_SET_READY(&ifp->if_snd);
744 
745 	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
746 	if (ha->max_frame_size <= MCLBYTES) {
747 		ha->msize = MCLBYTES;
748 	} else if (ha->max_frame_size <= MJUMPAGESIZE) {
749 		ha->msize = MJUMPAGESIZE;
750 	} else
751 		ha->msize = MJUM9BYTES;
752 
753 	ether_ifattach(ifp, qls_get_mac_addr(ha));
754 
755 	ifp->if_capabilities = IFCAP_JUMBO_MTU;
756 
757 	ifp->if_capabilities |= IFCAP_HWCSUM;
758 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
759 
760 	ifp->if_capabilities |= IFCAP_TSO4;
761 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
762 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
763 	ifp->if_capabilities |= IFCAP_LINKSTATE;
764 
765 	ifp->if_capenable = ifp->if_capabilities;
766 
767 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
768 
769 	ifmedia_init(&ha->media, IFM_IMASK, qls_media_change, qls_media_status);
770 
771 	ifmedia_add(&ha->media, (IFM_ETHER | qls_get_optics(ha) | IFM_FDX), 0,
772 		NULL);
773 	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
774 
775 	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
776 
777 	QL_DPRINT2((dev, "%s: exit\n", __func__));
778 
779 	return;
780 }
781 
782 static void
783 qls_init_locked(qla_host_t *ha)
784 {
785 	struct ifnet *ifp = ha->ifp;
786 
787 	qls_stop(ha);
788 
789 	qls_flush_xmt_bufs(ha);
790 
791 	if (qls_alloc_rcv_bufs(ha) != 0)
792 		return;
793 
794 	if (qls_config_lro(ha))
795 		return;
796 
797 	bcopy(IF_LLADDR(ha->ifp), ha->mac_addr, ETHER_ADDR_LEN);
798 
799 	ifp->if_hwassist = CSUM_IP;
800 	ifp->if_hwassist |= CSUM_TCP;
801 	ifp->if_hwassist |= CSUM_UDP;
802 	ifp->if_hwassist |= CSUM_TSO;
803 
804  	if (qls_init_hw_if(ha) == 0) {
805 		ifp = ha->ifp;
806 		ifp->if_drv_flags |= IFF_DRV_RUNNING;
807 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
808 		ha->flags.qla_watchdog_pause = 0;
809 	}
810 
811 	return;
812 }
813 
814 static void
815 qls_init(void *arg)
816 {
817 	qla_host_t *ha;
818 
819 	ha = (qla_host_t *)arg;
820 
821 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
822 
823 	(void)QLA_LOCK(ha, __func__, 0);
824 	qls_init_locked(ha);
825 	QLA_UNLOCK(ha, __func__);
826 
827 	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
828 }
829 
830 static u_int
831 qls_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
832 {
833 	uint8_t *mta = arg;
834 
835 	if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
836 		return (0);
837 
838 	bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
839 
840 	return (1);
841 }
842 
843 static void
844 qls_set_multi(qla_host_t *ha, uint32_t add_multi)
845 {
846 	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
847 	struct ifnet *ifp = ha->ifp;
848 	int mcnt;
849 
850 	mcnt = if_foreach_llmaddr(ifp, qls_copy_maddr, mta);
851 
852 	if (QLA_LOCK(ha, __func__, 1) == 0) {
853 		qls_hw_set_multi(ha, mta, mcnt, add_multi);
854 		QLA_UNLOCK(ha, __func__);
855 	}
856 
857 	return;
858 }
859 
860 static int
861 qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
862 {
863 	int ret = 0;
864 	struct ifreq *ifr = (struct ifreq *)data;
865 	struct ifaddr *ifa = (struct ifaddr *)data;
866 	qla_host_t *ha;
867 
868 	ha = (qla_host_t *)ifp->if_softc;
869 
870 	switch (cmd) {
871 	case SIOCSIFADDR:
872 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
873 			__func__, cmd));
874 
875 		if (ifa->ifa_addr->sa_family == AF_INET) {
876 			ifp->if_flags |= IFF_UP;
877 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
878 				(void)QLA_LOCK(ha, __func__, 0);
879 				qls_init_locked(ha);
880 				QLA_UNLOCK(ha, __func__);
881 			}
882 			QL_DPRINT4((ha->pci_dev,
883 				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
884 				__func__, cmd,
885 				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
886 
887 			arp_ifinit(ifp, ifa);
888 		} else {
889 			ether_ioctl(ifp, cmd, data);
890 		}
891 		break;
892 
893 	case SIOCSIFMTU:
894 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
895 			__func__, cmd));
896 
897 		if (ifr->ifr_mtu > QLA_MAX_MTU) {
898 			ret = EINVAL;
899 		} else {
900 			(void) QLA_LOCK(ha, __func__, 0);
901 
902 			ifp->if_mtu = ifr->ifr_mtu;
903 			ha->max_frame_size =
904 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
905 
906 			QLA_UNLOCK(ha, __func__);
907 
908 			if (ret)
909 				ret = EINVAL;
910 		}
911 
912 		break;
913 
914 	case SIOCSIFFLAGS:
915 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
916 			__func__, cmd));
917 
918 		(void)QLA_LOCK(ha, __func__, 0);
919 
920 		if (ifp->if_flags & IFF_UP) {
921 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
922 				if ((ifp->if_flags ^ ha->if_flags) &
923 					IFF_PROMISC) {
924 					ret = qls_set_promisc(ha);
925 				} else if ((ifp->if_flags ^ ha->if_flags) &
926 					IFF_ALLMULTI) {
927 					ret = qls_set_allmulti(ha);
928 				}
929 			} else {
930 				ha->max_frame_size = ifp->if_mtu +
931 					ETHER_HDR_LEN + ETHER_CRC_LEN;
932 				qls_init_locked(ha);
933 			}
934 		} else {
935 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
936 				qls_stop(ha);
937 			ha->if_flags = ifp->if_flags;
938 		}
939 
940 		QLA_UNLOCK(ha, __func__);
941 		break;
942 
943 	case SIOCADDMULTI:
944 		QL_DPRINT4((ha->pci_dev,
945 			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
946 
947 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
948 			qls_set_multi(ha, 1);
949 		}
950 		break;
951 
952 	case SIOCDELMULTI:
953 		QL_DPRINT4((ha->pci_dev,
954 			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
955 
956 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
957 			qls_set_multi(ha, 0);
958 		}
959 		break;
960 
961 	case SIOCSIFMEDIA:
962 	case SIOCGIFMEDIA:
963 		QL_DPRINT4((ha->pci_dev,
964 			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
965 			__func__, cmd));
966 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
967 		break;
968 
969 	case SIOCSIFCAP:
970 	{
971 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
972 
973 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
974 			__func__, cmd));
975 
976 		if (mask & IFCAP_HWCSUM)
977 			ifp->if_capenable ^= IFCAP_HWCSUM;
978 		if (mask & IFCAP_TSO4)
979 			ifp->if_capenable ^= IFCAP_TSO4;
980 		if (mask & IFCAP_VLAN_HWTAGGING)
981 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
982 		if (mask & IFCAP_VLAN_HWTSO)
983 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
984 
985 		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
986 			qls_init(ha);
987 
988 		VLAN_CAPABILITIES(ifp);
989 		break;
990 	}
991 
992 	default:
993 		QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
994 			__func__, cmd));
995 		ret = ether_ioctl(ifp, cmd, data);
996 		break;
997 	}
998 
999 	return (ret);
1000 }
1001 
1002 static int
1003 qls_media_change(struct ifnet *ifp)
1004 {
1005 	qla_host_t *ha;
1006 	struct ifmedia *ifm;
1007 	int ret = 0;
1008 
1009 	ha = (qla_host_t *)ifp->if_softc;
1010 
1011 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1012 
1013 	ifm = &ha->media;
1014 
1015 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1016 		ret = EINVAL;
1017 
1018 	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1019 
1020 	return (ret);
1021 }
1022 
1023 static void
1024 qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1025 {
1026 	qla_host_t *ha;
1027 
1028 	ha = (qla_host_t *)ifp->if_softc;
1029 
1030 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1031 
1032 	ifmr->ifm_status = IFM_AVALID;
1033 	ifmr->ifm_active = IFM_ETHER;
1034 
1035 	qls_update_link_state(ha);
1036 	if (ha->link_up) {
1037 		ifmr->ifm_status |= IFM_ACTIVE;
1038 		ifmr->ifm_active |= (IFM_FDX | qls_get_optics(ha));
1039 	}
1040 
1041 	QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
1042 		(ha->link_up ? "link_up" : "link_down")));
1043 
1044 	return;
1045 }
1046 
1047 static void
1048 qls_start(struct ifnet *ifp)
1049 {
1050 	int		i, ret = 0;
1051 	struct mbuf	*m_head;
1052 	qla_host_t	*ha = (qla_host_t *)ifp->if_softc;
1053 
1054 	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1055 
1056 	if (!mtx_trylock(&ha->tx_lock)) {
1057 		QL_DPRINT8((ha->pci_dev,
1058 			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1059 		return;
1060 	}
1061 
1062 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) ==
1063 		IFF_DRV_RUNNING) {
1064 		for (i = 0; i < ha->num_tx_rings; i++) {
1065 			ret |= qls_hw_tx_done(ha, i);
1066 		}
1067 
1068 		if (ret == 0)
1069 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1070 	}
1071 
1072 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1073 		IFF_DRV_RUNNING) {
1074 		QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1075 		QLA_TX_UNLOCK(ha);
1076 		return;
1077 	}
1078 
1079 	if (!ha->link_up) {
1080 		qls_update_link_state(ha);
1081 		if (!ha->link_up) {
1082 			QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
1083 			QLA_TX_UNLOCK(ha);
1084 			return;
1085 		}
1086 	}
1087 
1088 	while (ifp->if_snd.ifq_head != NULL) {
1089 		IF_DEQUEUE(&ifp->if_snd, m_head);
1090 
1091 		if (m_head == NULL) {
1092 			QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
1093 				__func__));
1094 			break;
1095 		}
1096 
1097 		if (qls_send(ha, &m_head)) {
1098 			if (m_head == NULL)
1099 				break;
1100 			QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
1101 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1102 			IF_PREPEND(&ifp->if_snd, m_head);
1103 			break;
1104 		}
1105 		/* Send a copy of the frame to the BPF listener */
1106 		ETHER_BPF_MTAP(ifp, m_head);
1107 	}
1108 
1109 	QLA_TX_UNLOCK(ha);
1110 	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1111 	return;
1112 }
1113 
1114 static int
1115 qls_send(qla_host_t *ha, struct mbuf **m_headp)
1116 {
1117 	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1118 	bus_dmamap_t		map;
1119 	int			nsegs;
1120 	int			ret = -1;
1121 	uint32_t		tx_idx;
1122 	struct mbuf		*m_head = *m_headp;
1123 	uint32_t		txr_idx = 0;
1124 
1125 	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1126 
1127 	/* check if flowid is set */
1128 	if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE)
1129 		txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1);
1130 
1131 	tx_idx = ha->tx_ring[txr_idx].txr_next;
1132 
1133 	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1134 
1135 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1136 			BUS_DMA_NOWAIT);
1137 
1138 	if (ret == EFBIG) {
1139 		struct mbuf *m;
1140 
1141 		QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1142 			m_head->m_pkthdr.len));
1143 
1144 		m = m_defrag(m_head, M_NOWAIT);
1145 		if (m == NULL) {
1146 			ha->err_tx_defrag++;
1147 			m_freem(m_head);
1148 			*m_headp = NULL;
1149 			device_printf(ha->pci_dev,
1150 				"%s: m_defrag() = NULL [%d]\n",
1151 				__func__, ret);
1152 			return (ENOBUFS);
1153 		}
1154 		m_head = m;
1155 		*m_headp = m_head;
1156 
1157 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1158 					segs, &nsegs, BUS_DMA_NOWAIT))) {
1159 			ha->err_tx_dmamap_load++;
1160 
1161 			device_printf(ha->pci_dev,
1162 				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1163 				__func__, ret, m_head->m_pkthdr.len);
1164 
1165 			if (ret != ENOMEM) {
1166 				m_freem(m_head);
1167 				*m_headp = NULL;
1168 			}
1169 			return (ret);
1170 		}
1171 
1172 	} else if (ret) {
1173 		ha->err_tx_dmamap_load++;
1174 
1175 		device_printf(ha->pci_dev,
1176 			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1177 			__func__, ret, m_head->m_pkthdr.len);
1178 
1179 		if (ret != ENOMEM) {
1180 			m_freem(m_head);
1181 			*m_headp = NULL;
1182 		}
1183 		return (ret);
1184 	}
1185 
1186 	QL_ASSERT(ha, (nsegs != 0), ("qls_send: empty packet"));
1187 
1188 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1189 
1190         if (!(ret = qls_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) {
1191 		ha->tx_ring[txr_idx].count++;
1192 		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1193 		ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map;
1194 	} else {
1195 		if (ret == EINVAL) {
1196 			if (m_head)
1197 				m_freem(m_head);
1198 			*m_headp = NULL;
1199 		}
1200 	}
1201 
1202 	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1203 	return (ret);
1204 }
1205 
1206 static void
1207 qls_stop(qla_host_t *ha)
1208 {
1209 	struct ifnet *ifp = ha->ifp;
1210 	device_t	dev;
1211 
1212 	dev = ha->pci_dev;
1213 
1214 	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1215 
1216 	ha->flags.qla_watchdog_pause = 1;
1217 
1218 	while (!ha->qla_watchdog_paused)
1219 		qls_mdelay(__func__, 1);
1220 
1221 	qls_del_hw_if(ha);
1222 
1223 	qls_free_lro(ha);
1224 
1225 	qls_flush_xmt_bufs(ha);
1226 	qls_free_rcv_bufs(ha);
1227 
1228 	return;
1229 }
1230 
1231 /*
1232  * Buffer Management Functions for Transmit and Receive Rings
1233  */
1234 /*
1235  * Release mbuf after it sent on the wire
1236  */
1237 static void
1238 qls_flush_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1239 {
1240 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1241 
1242 	if (txb->m_head) {
1243 		bus_dmamap_unload(ha->tx_tag, txb->map);
1244 
1245 		m_freem(txb->m_head);
1246 		txb->m_head = NULL;
1247 	}
1248 
1249 	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1250 }
1251 
1252 static void
1253 qls_flush_xmt_bufs(qla_host_t *ha)
1254 {
1255 	int		i, j;
1256 
1257 	for (j = 0; j < ha->num_tx_rings; j++) {
1258 		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1259 			qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1260 	}
1261 
1262 	return;
1263 }
1264 
1265 static int
1266 qls_alloc_rcv_mbufs(qla_host_t *ha, int r)
1267 {
1268 	int			i, j, ret = 0;
1269 	qla_rx_buf_t		*rxb;
1270 	qla_rx_ring_t		*rx_ring;
1271 	volatile q81_bq_addr_e_t *sbq_e;
1272 
1273 	rx_ring = &ha->rx_ring[r];
1274 
1275 	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1276 		rxb = &rx_ring->rx_buf[i];
1277 
1278 		ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1279 
1280 		if (ret) {
1281 			device_printf(ha->pci_dev,
1282 				"%s: dmamap[%d, %d] failed\n", __func__, r, i);
1283 
1284 			for (j = 0; j < i; j++) {
1285 				rxb = &rx_ring->rx_buf[j];
1286 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1287 			}
1288 			goto qls_alloc_rcv_mbufs_err;
1289 		}
1290 	}
1291 
1292 	rx_ring = &ha->rx_ring[r];
1293 
1294 	sbq_e = rx_ring->sbq_vaddr;
1295 
1296 	rxb = &rx_ring->rx_buf[0];
1297 
1298 	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1299 		if (!(ret = qls_get_mbuf(ha, rxb, NULL))) {
1300 			/*
1301 		 	 * set the physical address in the
1302 			 * corresponding descriptor entry in the
1303 			 * receive ring/queue for the hba
1304 			 */
1305 
1306 			sbq_e->addr_lo = rxb->paddr & 0xFFFFFFFF;
1307 			sbq_e->addr_hi = (rxb->paddr >> 32) & 0xFFFFFFFF;
1308 
1309 		} else {
1310 			device_printf(ha->pci_dev,
1311 				"%s: qls_get_mbuf [%d, %d] failed\n",
1312 					__func__, r, i);
1313 			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1314 			goto qls_alloc_rcv_mbufs_err;
1315 		}
1316 
1317 		rxb++;
1318 		sbq_e++;
1319 	}
1320 	return 0;
1321 
1322 qls_alloc_rcv_mbufs_err:
1323 	return (-1);
1324 }
1325 
1326 static void
1327 qls_free_rcv_bufs(qla_host_t *ha)
1328 {
1329 	int		i, r;
1330 	qla_rx_buf_t	*rxb;
1331 	qla_rx_ring_t	*rxr;
1332 
1333 	for (r = 0; r < ha->num_rx_rings; r++) {
1334 		rxr = &ha->rx_ring[r];
1335 
1336 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1337 			rxb = &rxr->rx_buf[i];
1338 
1339 			if (rxb->m_head != NULL) {
1340 				bus_dmamap_unload(ha->rx_tag, rxb->map);
1341 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1342 				m_freem(rxb->m_head);
1343 			}
1344 		}
1345 		bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1346 	}
1347 	return;
1348 }
1349 
1350 static int
1351 qls_alloc_rcv_bufs(qla_host_t *ha)
1352 {
1353 	int		r, ret = 0;
1354 	qla_rx_ring_t	*rxr;
1355 
1356 	for (r = 0; r < ha->num_rx_rings; r++) {
1357 		rxr = &ha->rx_ring[r];
1358 		bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1359 	}
1360 
1361 	for (r = 0; r < ha->num_rx_rings; r++) {
1362 		ret = qls_alloc_rcv_mbufs(ha, r);
1363 
1364 		if (ret)
1365 			qls_free_rcv_bufs(ha);
1366 	}
1367 
1368 	return (ret);
1369 }
1370 
1371 int
1372 qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1373 {
1374 	struct mbuf *mp = nmp;
1375 	struct ifnet   		*ifp;
1376 	int            		ret = 0;
1377 	uint32_t		offset;
1378 	bus_dma_segment_t	segs[1];
1379 	int			nsegs;
1380 
1381 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1382 
1383 	ifp = ha->ifp;
1384 
1385 	if (mp == NULL) {
1386 		mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ha->msize);
1387 
1388 		if (mp == NULL) {
1389 			if (ha->msize == MCLBYTES)
1390 				ha->err_m_getcl++;
1391 			else
1392 				ha->err_m_getjcl++;
1393 
1394 			ret = ENOBUFS;
1395 			device_printf(ha->pci_dev,
1396 					"%s: m_getcl failed\n", __func__);
1397 			goto exit_qls_get_mbuf;
1398 		}
1399 		mp->m_len = mp->m_pkthdr.len = ha->msize;
1400 	} else {
1401 		mp->m_len = mp->m_pkthdr.len = ha->msize;
1402 		mp->m_data = mp->m_ext.ext_buf;
1403 		mp->m_next = NULL;
1404 	}
1405 
1406 	/* align the receive buffers to 8 byte boundary */
1407 	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1408 	if (offset) {
1409 		offset = 8 - offset;
1410 		m_adj(mp, offset);
1411 	}
1412 
1413 	/*
1414 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1415 	 * machinery to arrange the memory mapping.
1416 	 */
1417 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1418 			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1419 	rxb->paddr = segs[0].ds_addr;
1420 
1421 	if (ret || !rxb->paddr || (nsegs != 1)) {
1422 		m_freem(mp);
1423 		rxb->m_head = NULL;
1424 		device_printf(ha->pci_dev,
1425 			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1426 			__func__, ret, (long long unsigned int)rxb->paddr,
1427 			nsegs);
1428                 ret = -1;
1429 		goto exit_qls_get_mbuf;
1430 	}
1431 	rxb->m_head = mp;
1432 	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1433 
1434 exit_qls_get_mbuf:
1435 	QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1436 	return (ret);
1437 }
1438 
1439 static void
1440 qls_tx_done(void *context, int pending)
1441 {
1442 	qla_host_t *ha = context;
1443 	struct ifnet   *ifp;
1444 
1445 	ifp = ha->ifp;
1446 
1447 	if (!ifp)
1448 		return;
1449 
1450 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1451 		QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1452 		return;
1453 	}
1454 
1455 	qls_start(ha->ifp);
1456 	return;
1457 }
1458 
1459 static int
1460 qls_config_lro(qla_host_t *ha)
1461 {
1462         int i;
1463         struct lro_ctrl *lro;
1464 
1465         for (i = 0; i < ha->num_rx_rings; i++) {
1466                 lro = &ha->rx_ring[i].lro;
1467                 if (tcp_lro_init(lro)) {
1468                         device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
1469                                 __func__);
1470                         return (-1);
1471                 }
1472                 lro->ifp = ha->ifp;
1473         }
1474         ha->flags.lro_init = 1;
1475 
1476         QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
1477         return (0);
1478 }
1479 
1480 static void
1481 qls_free_lro(qla_host_t *ha)
1482 {
1483         int i;
1484         struct lro_ctrl *lro;
1485 
1486         if (!ha->flags.lro_init)
1487                 return;
1488 
1489         for (i = 0; i < ha->num_rx_rings; i++) {
1490                 lro = &ha->rx_ring[i].lro;
1491                 tcp_lro_free(lro);
1492         }
1493         ha->flags.lro_init = 0;
1494 }
1495 
1496 static void
1497 qls_error_recovery(void *context, int pending)
1498 {
1499         qla_host_t *ha = context;
1500 
1501 	qls_init(ha);
1502 
1503 	return;
1504 }
1505