xref: /freebsd/sys/dev/qlxgb/qla_os.c (revision 9ecd54f24fe9fa373e07c9fd7c052deb2188f545)
1 /*
2  * Copyright (c) 2011-2013 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: qla_os.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "qla_os.h"
37 #include "qla_reg.h"
38 #include "qla_hw.h"
39 #include "qla_def.h"
40 #include "qla_inline.h"
41 #include "qla_ver.h"
42 #include "qla_glbl.h"
43 #include "qla_dbg.h"
44 
45 /*
46  * Some PCI Configuration Space Related Defines
47  */
48 
49 #ifndef PCI_VENDOR_QLOGIC
50 #define PCI_VENDOR_QLOGIC	0x1077
51 #endif
52 
53 #ifndef PCI_PRODUCT_QLOGIC_ISP8020
54 #define PCI_PRODUCT_QLOGIC_ISP8020	0x8020
55 #endif
56 
57 #define PCI_QLOGIC_ISP8020 \
58 	((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC)
59 
60 /*
61  * static functions
62  */
63 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
64 static void qla_free_parent_dma_tag(qla_host_t *ha);
65 static int qla_alloc_xmt_bufs(qla_host_t *ha);
66 static void qla_free_xmt_bufs(qla_host_t *ha);
67 static int qla_alloc_rcv_bufs(qla_host_t *ha);
68 static void qla_free_rcv_bufs(qla_host_t *ha);
69 
70 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
71 static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
72 static void qla_release(qla_host_t *ha);
73 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
74 		int error);
75 static void qla_stop(qla_host_t *ha);
76 static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
77 static void qla_tx_done(void *context, int pending);
78 
79 /*
80  * Hooks to the Operating Systems
81  */
82 static int qla_pci_probe (device_t);
83 static int qla_pci_attach (device_t);
84 static int qla_pci_detach (device_t);
85 
86 static void qla_init(void *arg);
87 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
88 static int qla_media_change(struct ifnet *ifp);
89 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
90 
91 static device_method_t qla_pci_methods[] = {
92 	/* Device interface */
93 	DEVMETHOD(device_probe, qla_pci_probe),
94 	DEVMETHOD(device_attach, qla_pci_attach),
95 	DEVMETHOD(device_detach, qla_pci_detach),
96 	{ 0, 0 }
97 };
98 
99 static driver_t qla_pci_driver = {
100 	"ql", qla_pci_methods, sizeof (qla_host_t),
101 };
102 
103 static devclass_t qla80xx_devclass;
104 
105 DRIVER_MODULE(qla80xx, pci, qla_pci_driver, qla80xx_devclass, 0, 0);
106 
107 MODULE_DEPEND(qla80xx, pci, 1, 1, 1);
108 MODULE_DEPEND(qla80xx, ether, 1, 1, 1);
109 
110 MALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver");
111 
112 uint32_t std_replenish = 8;
113 uint32_t jumbo_replenish = 2;
114 uint32_t rcv_pkt_thres = 128;
115 uint32_t rcv_pkt_thres_d = 32;
116 uint32_t snd_pkt_thres = 16;
117 uint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2);
118 
119 static char dev_str[64];
120 
121 /*
122  * Name:	qla_pci_probe
123  * Function:	Validate the PCI device to be a QLA80XX device
124  */
125 static int
126 qla_pci_probe(device_t dev)
127 {
128         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
129         case PCI_QLOGIC_ISP8020:
130 		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
131 			"Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function",
132 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
133 			QLA_VERSION_BUILD);
134                 device_set_desc(dev, dev_str);
135                 break;
136         default:
137                 return (ENXIO);
138         }
139 
140         if (bootverbose)
141                 printf("%s: %s\n ", __func__, dev_str);
142 
143         return (BUS_PROBE_DEFAULT);
144 }
145 
146 static void
147 qla_add_sysctls(qla_host_t *ha)
148 {
149         device_t dev = ha->pci_dev;
150 
151         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
152                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
153                 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD,
154                 (void *)ha, 0,
155                 qla_sysctl_get_stats, "I", "Statistics");
156 
157 	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
158 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
159 		OID_AUTO, "fw_version", CTLFLAG_RD,
160 		&ha->fw_ver_str, 0, "firmware version");
161 
162 	dbg_level = 0;
163         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
164                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
165                 OID_AUTO, "debug", CTLFLAG_RW,
166                 &dbg_level, dbg_level, "Debug Level");
167 
168         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
169                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
170                 OID_AUTO, "std_replenish", CTLFLAG_RW,
171                 &std_replenish, std_replenish,
172                 "Threshold for Replenishing Standard Frames");
173 
174         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
175                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
176                 OID_AUTO, "jumbo_replenish", CTLFLAG_RW,
177                 &jumbo_replenish, jumbo_replenish,
178                 "Threshold for Replenishing Jumbo Frames");
179 
180         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
181                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
182                 OID_AUTO, "rcv_pkt_thres",  CTLFLAG_RW,
183                 &rcv_pkt_thres, rcv_pkt_thres,
184                 "Threshold for # of rcv pkts to trigger indication isr");
185 
186         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
187                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
188                 OID_AUTO, "rcv_pkt_thres_d",  CTLFLAG_RW,
189                 &rcv_pkt_thres_d, rcv_pkt_thres_d,
190                 "Threshold for # of rcv pkts to trigger indication defered");
191 
192         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
193                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
194                 OID_AUTO, "snd_pkt_thres",  CTLFLAG_RW,
195                 &snd_pkt_thres, snd_pkt_thres,
196                 "Threshold for # of snd packets");
197 
198         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
199                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
200                 OID_AUTO, "free_pkt_thres",  CTLFLAG_RW,
201                 &free_pkt_thres, free_pkt_thres,
202                 "Threshold for # of packets to free at a time");
203 
204         return;
205 }
206 
207 static void
208 qla_watchdog(void *arg)
209 {
210 	qla_host_t *ha = arg;
211 	qla_hw_t *hw;
212 	struct ifnet *ifp;
213 
214 	hw = &ha->hw;
215 	ifp = ha->ifp;
216 
217         if (ha->flags.qla_watchdog_exit)
218 		return;
219 
220 	if (!ha->flags.qla_watchdog_pause) {
221 		if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
222 			taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
223 		} else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
224 			taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
225 		}
226 	}
227 	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
228 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
229 		qla_watchdog, ha);
230 }
231 
232 /*
233  * Name:	qla_pci_attach
234  * Function:	attaches the device to the operating system
235  */
236 static int
237 qla_pci_attach(device_t dev)
238 {
239 	qla_host_t *ha = NULL;
240 	uint32_t rsrc_len, i;
241 
242 	QL_DPRINT2((dev, "%s: enter\n", __func__));
243 
244         if ((ha = device_get_softc(dev)) == NULL) {
245                 device_printf(dev, "cannot get softc\n");
246                 return (ENOMEM);
247         }
248 
249         memset(ha, 0, sizeof (qla_host_t));
250 
251         if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) {
252                 device_printf(dev, "device is not ISP8020\n");
253                 return (ENXIO);
254 	}
255 
256         ha->pci_func = pci_get_function(dev);
257 
258         ha->pci_dev = dev;
259 
260 	pci_enable_busmaster(dev);
261 
262 	ha->reg_rid = PCIR_BAR(0);
263 	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
264 				RF_ACTIVE);
265 
266         if (ha->pci_reg == NULL) {
267                 device_printf(dev, "unable to map any ports\n");
268                 goto qla_pci_attach_err;
269         }
270 
271 	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
272 					ha->reg_rid);
273 
274 	mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
275 	mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
276 	mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF);
277 	mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF);
278 	ha->flags.lock_init = 1;
279 
280 	ha->msix_count = pci_msix_count(dev);
281 
282 	if (ha->msix_count < qla_get_msix_count(ha)) {
283 		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
284 			ha->msix_count);
285 		goto qla_pci_attach_err;
286 	}
287 
288 	QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x"
289 		" msix_count 0x%x pci_reg %p\n", __func__, ha,
290 		ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
291 
292 	ha->msix_count = qla_get_msix_count(ha);
293 
294 	if (pci_alloc_msix(dev, &ha->msix_count)) {
295 		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
296 			ha->msix_count);
297 		ha->msix_count = 0;
298 		goto qla_pci_attach_err;
299 	}
300 
301 	TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
302 	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
303 			taskqueue_thread_enqueue, &ha->tx_tq);
304 	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
305 		device_get_nameunit(ha->pci_dev));
306 
307         for (i = 0; i < ha->msix_count; i++) {
308                 ha->irq_vec[i].irq_rid = i+1;
309                 ha->irq_vec[i].ha = ha;
310 
311                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
312                                         &ha->irq_vec[i].irq_rid,
313                                         (RF_ACTIVE | RF_SHAREABLE));
314 
315                 if (ha->irq_vec[i].irq == NULL) {
316                         device_printf(dev, "could not allocate interrupt\n");
317                         goto qla_pci_attach_err;
318                 }
319 
320                 if (bus_setup_intr(dev, ha->irq_vec[i].irq,
321                         (INTR_TYPE_NET | INTR_MPSAFE),
322                         NULL, qla_isr, &ha->irq_vec[i],
323                         &ha->irq_vec[i].handle)) {
324                         device_printf(dev, "could not setup interrupt\n");
325                         goto qla_pci_attach_err;
326                 }
327 
328 		TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\
329 			&ha->irq_vec[i]);
330 
331 		ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq",
332 			M_NOWAIT, taskqueue_thread_enqueue,
333 			&ha->irq_vec[i].rcv_tq);
334 
335 		taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET,
336 			"%s rcvq",
337 			device_get_nameunit(ha->pci_dev));
338         }
339 
340 	qla_add_sysctls(ha);
341 
342 	/* add hardware specific sysctls */
343 	qla_hw_add_sysctls(ha);
344 
345 	/* initialize hardware */
346 	if (qla_init_hw(ha)) {
347 		device_printf(dev, "%s: qla_init_hw failed\n", __func__);
348 		goto qla_pci_attach_err;
349 	}
350 
351 	device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
352 		ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
353 		ha->fw_ver_build);
354 
355 	snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
356 			ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
357 			ha->fw_ver_build);
358 
359 	//qla_get_hw_caps(ha);
360 	qla_read_mac_addr(ha);
361 
362 	/* allocate parent dma tag */
363 	if (qla_alloc_parent_dma_tag(ha)) {
364 		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
365 			__func__);
366 		goto qla_pci_attach_err;
367 	}
368 
369 	/* alloc all dma buffers */
370 	if (qla_alloc_dma(ha)) {
371 		device_printf(dev, "%s: qla_alloc_dma failed\n", __func__);
372 		goto qla_pci_attach_err;
373 	}
374 
375 	/* create the o.s ethernet interface */
376 	qla_init_ifnet(dev, ha);
377 
378 	ha->flags.qla_watchdog_active = 1;
379 	ha->flags.qla_watchdog_pause = 1;
380 
381 	callout_init(&ha->tx_callout, TRUE);
382 
383 	/* create ioctl device interface */
384 	if (qla_make_cdev(ha)) {
385 		device_printf(dev, "%s: qla_make_cdev failed\n", __func__);
386 		goto qla_pci_attach_err;
387 	}
388 
389 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
390 		qla_watchdog, ha);
391 
392 	QL_DPRINT2((dev, "%s: exit 0\n", __func__));
393         return (0);
394 
395 qla_pci_attach_err:
396 
397 	qla_release(ha);
398 
399 	QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
400         return (ENXIO);
401 }
402 
403 /*
404  * Name:	qla_pci_detach
405  * Function:	Unhooks the device from the operating system
406  */
407 static int
408 qla_pci_detach(device_t dev)
409 {
410 	qla_host_t *ha = NULL;
411 	struct ifnet *ifp;
412 	int i;
413 
414 	QL_DPRINT2((dev, "%s: enter\n", __func__));
415 
416         if ((ha = device_get_softc(dev)) == NULL) {
417                 device_printf(dev, "cannot get softc\n");
418                 return (ENOMEM);
419         }
420 
421 	ifp = ha->ifp;
422 
423 	QLA_LOCK(ha, __func__);
424 	qla_stop(ha);
425 	QLA_UNLOCK(ha, __func__);
426 
427 	if (ha->tx_tq) {
428 		taskqueue_drain(ha->tx_tq, &ha->tx_task);
429 		taskqueue_free(ha->tx_tq);
430 	}
431 
432         for (i = 0; i < ha->msix_count; i++) {
433 		taskqueue_drain(ha->irq_vec[i].rcv_tq,
434 			&ha->irq_vec[i].rcv_task);
435 		taskqueue_free(ha->irq_vec[i].rcv_tq);
436 	}
437 
438 	qla_release(ha);
439 
440 	QL_DPRINT2((dev, "%s: exit\n", __func__));
441 
442         return (0);
443 }
444 
445 /*
446  * SYSCTL Related Callbacks
447  */
448 static int
449 qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
450 {
451 	int err, ret = 0;
452 	qla_host_t *ha;
453 
454 	err = sysctl_handle_int(oidp, &ret, 0, req);
455 
456 	if (err)
457 		return (err);
458 
459 	ha = (qla_host_t *)arg1;
460 	//qla_get_stats(ha);
461 	QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret));
462 	return (err);
463 }
464 
465 
466 /*
467  * Name:	qla_release
468  * Function:	Releases the resources allocated for the device
469  */
470 static void
471 qla_release(qla_host_t *ha)
472 {
473 	device_t dev;
474 	int i;
475 
476 	dev = ha->pci_dev;
477 
478 	qla_del_cdev(ha);
479 
480 	if (ha->flags.qla_watchdog_active)
481 		ha->flags.qla_watchdog_exit = 1;
482 
483 	callout_stop(&ha->tx_callout);
484 	qla_mdelay(__func__, 100);
485 
486 	if (ha->ifp != NULL)
487 		ether_ifdetach(ha->ifp);
488 
489 	qla_free_dma(ha);
490 	qla_free_parent_dma_tag(ha);
491 
492 	for (i = 0; i < ha->msix_count; i++) {
493 		if (ha->irq_vec[i].handle)
494 			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
495 				ha->irq_vec[i].handle);
496 		if (ha->irq_vec[i].irq)
497 			(void) bus_release_resource(dev, SYS_RES_IRQ,
498 				ha->irq_vec[i].irq_rid,
499 				ha->irq_vec[i].irq);
500 	}
501 	if (ha->msix_count)
502 		pci_release_msi(dev);
503 
504 	if (ha->flags.lock_init) {
505 		mtx_destroy(&ha->tx_lock);
506 		mtx_destroy(&ha->rx_lock);
507 		mtx_destroy(&ha->rxj_lock);
508 		mtx_destroy(&ha->hw_lock);
509 	}
510 
511         if (ha->pci_reg)
512                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
513 				ha->pci_reg);
514 }
515 
516 /*
517  * DMA Related Functions
518  */
519 
520 static void
521 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
522 {
523         *((bus_addr_t *)arg) = 0;
524 
525         if (error) {
526                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
527                 return;
528 	}
529 
530         QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs));
531 
532         *((bus_addr_t *)arg) = segs[0].ds_addr;
533 
534 	return;
535 }
536 
537 int
538 qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
539 {
540         int             ret = 0;
541         device_t        dev;
542         bus_addr_t      b_addr;
543 
544         dev = ha->pci_dev;
545 
546         QL_DPRINT2((dev, "%s: enter\n", __func__));
547 
548         ret = bus_dma_tag_create(
549                         ha->parent_tag,/* parent */
550                         dma_buf->alignment,
551                         ((bus_size_t)(1ULL << 32)),/* boundary */
552                         BUS_SPACE_MAXADDR,      /* lowaddr */
553                         BUS_SPACE_MAXADDR,      /* highaddr */
554                         NULL, NULL,             /* filter, filterarg */
555                         dma_buf->size,          /* maxsize */
556                         1,                      /* nsegments */
557                         dma_buf->size,          /* maxsegsize */
558                         0,                      /* flags */
559                         NULL, NULL,             /* lockfunc, lockarg */
560                         &dma_buf->dma_tag);
561 
562         if (ret) {
563                 device_printf(dev, "%s: could not create dma tag\n", __func__);
564                 goto qla_alloc_dmabuf_exit;
565         }
566         ret = bus_dmamem_alloc(dma_buf->dma_tag,
567                         (void **)&dma_buf->dma_b,
568                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
569                         &dma_buf->dma_map);
570         if (ret) {
571                 bus_dma_tag_destroy(dma_buf->dma_tag);
572                 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
573                 goto qla_alloc_dmabuf_exit;
574         }
575 
576         ret = bus_dmamap_load(dma_buf->dma_tag,
577                         dma_buf->dma_map,
578                         dma_buf->dma_b,
579                         dma_buf->size,
580                         qla_dmamap_callback,
581                         &b_addr, BUS_DMA_NOWAIT);
582 
583         if (ret || !b_addr) {
584                 bus_dma_tag_destroy(dma_buf->dma_tag);
585                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
586                         dma_buf->dma_map);
587                 ret = -1;
588                 goto qla_alloc_dmabuf_exit;
589         }
590 
591         dma_buf->dma_addr = b_addr;
592 
593 qla_alloc_dmabuf_exit:
594         QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
595                 __func__, ret, (void *)dma_buf->dma_tag,
596                 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
597 		dma_buf->size));
598 
599         return ret;
600 }
601 
602 void
603 qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
604 {
605         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
606         bus_dma_tag_destroy(dma_buf->dma_tag);
607 }
608 
609 static int
610 qla_alloc_parent_dma_tag(qla_host_t *ha)
611 {
612 	int		ret;
613 	device_t	dev;
614 
615 	dev = ha->pci_dev;
616 
617         /*
618          * Allocate parent DMA Tag
619          */
620         ret = bus_dma_tag_create(
621                         bus_get_dma_tag(dev),   /* parent */
622                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
623                         BUS_SPACE_MAXADDR,      /* lowaddr */
624                         BUS_SPACE_MAXADDR,      /* highaddr */
625                         NULL, NULL,             /* filter, filterarg */
626                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
627                         0,                      /* nsegments */
628                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
629                         0,                      /* flags */
630                         NULL, NULL,             /* lockfunc, lockarg */
631                         &ha->parent_tag);
632 
633         if (ret) {
634                 device_printf(dev, "%s: could not create parent dma tag\n",
635                         __func__);
636 		return (-1);
637         }
638 
639         ha->flags.parent_tag = 1;
640 
641 	return (0);
642 }
643 
644 static void
645 qla_free_parent_dma_tag(qla_host_t *ha)
646 {
647         if (ha->flags.parent_tag) {
648                 bus_dma_tag_destroy(ha->parent_tag);
649                 ha->flags.parent_tag = 0;
650         }
651 }
652 
653 /*
654  * Name: qla_init_ifnet
655  * Function: Creates the Network Device Interface and Registers it with the O.S
656  */
657 
658 static void
659 qla_init_ifnet(device_t dev, qla_host_t *ha)
660 {
661 	struct ifnet *ifp;
662 
663 	QL_DPRINT2((dev, "%s: enter\n", __func__));
664 
665 	ifp = ha->ifp = if_alloc(IFT_ETHER);
666 
667 	if (ifp == NULL)
668 		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
669 
670 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
671 
672 	ifp->if_baudrate = IF_Gbps(10);
673 	ifp->if_init = qla_init;
674 	ifp->if_softc = ha;
675 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
676 	ifp->if_ioctl = qla_ioctl;
677 	ifp->if_start = qla_start;
678 
679 	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
680 	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
681 	IFQ_SET_READY(&ifp->if_snd);
682 
683 	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
684 
685 	ether_ifattach(ifp, qla_get_mac_addr(ha));
686 
687 	ifp->if_capabilities = IFCAP_HWCSUM |
688 				IFCAP_TSO4 |
689 				IFCAP_JUMBO_MTU;
690 
691 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
692 	ifp->if_capabilities |= IFCAP_LINKSTATE;
693 
694 #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002)
695 	ifp->if_timer = 0;
696 	ifp->if_watchdog = NULL;
697 #endif /* #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) */
698 
699 	ifp->if_capenable = ifp->if_capabilities;
700 
701 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
702 
703 	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
704 
705 	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
706 		NULL);
707 	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
708 
709 	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
710 
711 	QL_DPRINT2((dev, "%s: exit\n", __func__));
712 
713 	return;
714 }
715 
716 static void
717 qla_init_locked(qla_host_t *ha)
718 {
719 	struct ifnet *ifp = ha->ifp;
720 
721 	qla_stop(ha);
722 
723 	if (qla_alloc_xmt_bufs(ha) != 0)
724 		return;
725 
726 	if (qla_alloc_rcv_bufs(ha) != 0)
727 		return;
728 
729 	if (qla_config_lro(ha))
730 		return;
731 
732 	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
733 
734 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
735 
736 	ha->flags.stop_rcv = 0;
737 	if (qla_init_hw_if(ha) == 0) {
738 		ifp = ha->ifp;
739 		ifp->if_drv_flags |= IFF_DRV_RUNNING;
740 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
741 		ha->flags.qla_watchdog_pause = 0;
742 	}
743 
744 	return;
745 }
746 
747 static void
748 qla_init(void *arg)
749 {
750 	qla_host_t *ha;
751 
752 	ha = (qla_host_t *)arg;
753 
754 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
755 
756 	QLA_LOCK(ha, __func__);
757 	qla_init_locked(ha);
758 	QLA_UNLOCK(ha, __func__);
759 
760 	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
761 }
762 
763 static void
764 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
765 {
766 	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
767 	struct ifmultiaddr *ifma;
768 	int mcnt = 0;
769 	struct ifnet *ifp = ha->ifp;
770 
771 	if_maddr_rlock(ifp);
772 
773 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
774 
775 		if (ifma->ifma_addr->sa_family != AF_LINK)
776 			continue;
777 
778 		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
779 			break;
780 
781 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
782 			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
783 
784 		mcnt++;
785 	}
786 
787 	if_maddr_runlock(ifp);
788 
789 	qla_hw_set_multi(ha, mta, mcnt, add_multi);
790 
791 	return;
792 }
793 
794 static int
795 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
796 {
797 	int ret = 0;
798 	struct ifreq *ifr = (struct ifreq *)data;
799 	struct ifaddr *ifa = (struct ifaddr *)data;
800 	qla_host_t *ha;
801 
802 	ha = (qla_host_t *)ifp->if_softc;
803 
804 	switch (cmd) {
805 	case SIOCSIFADDR:
806 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
807 			__func__, cmd));
808 
809 		if (ifa->ifa_addr->sa_family == AF_INET) {
810 			ifp->if_flags |= IFF_UP;
811 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
812 				QLA_LOCK(ha, __func__);
813 				qla_init_locked(ha);
814 				QLA_UNLOCK(ha, __func__);
815 			}
816 		QL_DPRINT4((ha->pci_dev,
817 			"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
818 			__func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
819 
820 			arp_ifinit(ifp, ifa);
821 			if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) {
822 				qla_config_ipv4_addr(ha,
823 					(IA_SIN(ifa)->sin_addr.s_addr));
824 			}
825 		} else {
826 			ether_ioctl(ifp, cmd, data);
827 		}
828 		break;
829 
830 	case SIOCSIFMTU:
831 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
832 			__func__, cmd));
833 
834 		if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
835 			ret = EINVAL;
836 		} else {
837 			QLA_LOCK(ha, __func__);
838 			ifp->if_mtu = ifr->ifr_mtu;
839 			ha->max_frame_size =
840 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
841 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
842 				ret = qla_set_max_mtu(ha, ha->max_frame_size,
843 					(ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
844 			}
845 			QLA_UNLOCK(ha, __func__);
846 
847 			if (ret)
848 				ret = EINVAL;
849 		}
850 
851 		break;
852 
853 	case SIOCSIFFLAGS:
854 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
855 			__func__, cmd));
856 
857 		if (ifp->if_flags & IFF_UP) {
858 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
859 				if ((ifp->if_flags ^ ha->if_flags) &
860 					IFF_PROMISC) {
861 					qla_set_promisc(ha);
862 				} else if ((ifp->if_flags ^ ha->if_flags) &
863 					IFF_ALLMULTI) {
864 					qla_set_allmulti(ha);
865 				}
866 			} else {
867 				QLA_LOCK(ha, __func__);
868 				qla_init_locked(ha);
869 				ha->max_frame_size = ifp->if_mtu +
870 					ETHER_HDR_LEN + ETHER_CRC_LEN;
871 				ret = qla_set_max_mtu(ha, ha->max_frame_size,
872 					(ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
873 				QLA_UNLOCK(ha, __func__);
874 			}
875 		} else {
876 			QLA_LOCK(ha, __func__);
877 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
878 				qla_stop(ha);
879 			ha->if_flags = ifp->if_flags;
880 			QLA_UNLOCK(ha, __func__);
881 		}
882 		break;
883 
884 	case SIOCADDMULTI:
885 		QL_DPRINT4((ha->pci_dev,
886 			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
887 
888 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
889 			qla_set_multi(ha, 1);
890 		}
891 		break;
892 
893 	case SIOCDELMULTI:
894 		QL_DPRINT4((ha->pci_dev,
895 			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
896 
897 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
898 			qla_set_multi(ha, 0);
899 		}
900 		break;
901 
902 	case SIOCSIFMEDIA:
903 	case SIOCGIFMEDIA:
904 		QL_DPRINT4((ha->pci_dev,
905 			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
906 			__func__, cmd));
907 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
908 		break;
909 
910 	case SIOCSIFCAP:
911 	{
912 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
913 
914 		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
915 			__func__, cmd));
916 
917 		if (mask & IFCAP_HWCSUM)
918 			ifp->if_capenable ^= IFCAP_HWCSUM;
919 		if (mask & IFCAP_TSO4)
920 			ifp->if_capenable ^= IFCAP_TSO4;
921 		if (mask & IFCAP_TSO6)
922 			ifp->if_capenable ^= IFCAP_TSO6;
923 		if (mask & IFCAP_VLAN_HWTAGGING)
924 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
925 
926 		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
927 			qla_init(ha);
928 
929 		VLAN_CAPABILITIES(ifp);
930 		break;
931 	}
932 
933 	default:
934 		QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
935 			__func__, cmd));
936 		ret = ether_ioctl(ifp, cmd, data);
937 		break;
938 	}
939 
940 	return (ret);
941 }
942 
943 static int
944 qla_media_change(struct ifnet *ifp)
945 {
946 	qla_host_t *ha;
947 	struct ifmedia *ifm;
948 	int ret = 0;
949 
950 	ha = (qla_host_t *)ifp->if_softc;
951 
952 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
953 
954 	ifm = &ha->media;
955 
956 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
957 		ret = EINVAL;
958 
959 	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
960 
961 	return (ret);
962 }
963 
964 static void
965 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
966 {
967 	qla_host_t *ha;
968 
969 	ha = (qla_host_t *)ifp->if_softc;
970 
971 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
972 
973 	ifmr->ifm_status = IFM_AVALID;
974 	ifmr->ifm_active = IFM_ETHER;
975 
976 	qla_update_link_state(ha);
977 	if (ha->hw.flags.link_up) {
978 		ifmr->ifm_status |= IFM_ACTIVE;
979 		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
980 	}
981 
982 	QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
983 		(ha->hw.flags.link_up ? "link_up" : "link_down")));
984 
985 	return;
986 }
987 
988 void
989 qla_start(struct ifnet *ifp)
990 {
991 	struct mbuf    *m_head;
992 	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
993 
994 	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
995 
996 	if (!mtx_trylock(&ha->tx_lock)) {
997 		QL_DPRINT8((ha->pci_dev,
998 			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
999 		return;
1000 	}
1001 
1002 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1003 		IFF_DRV_RUNNING) {
1004 		QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1005 		QLA_TX_UNLOCK(ha);
1006 		return;
1007 	}
1008 
1009 	if (!ha->watchdog_ticks)
1010 		qla_update_link_state(ha);
1011 
1012 	if (!ha->hw.flags.link_up) {
1013 		QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
1014 		QLA_TX_UNLOCK(ha);
1015 		return;
1016 	}
1017 
1018 	while (ifp->if_snd.ifq_head != NULL) {
1019 		IF_DEQUEUE(&ifp->if_snd, m_head);
1020 
1021 		if (m_head == NULL) {
1022 			QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
1023 				__func__));
1024 			break;
1025 		}
1026 
1027 		if (qla_send(ha, &m_head)) {
1028 			if (m_head == NULL)
1029 				break;
1030 			QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
1031 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1032 			IF_PREPEND(&ifp->if_snd, m_head);
1033 			break;
1034 		}
1035 		/* Send a copy of the frame to the BPF listener */
1036 		ETHER_BPF_MTAP(ifp, m_head);
1037 	}
1038 	QLA_TX_UNLOCK(ha);
1039 	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1040 	return;
1041 }
1042 
1043 static int
1044 qla_send(qla_host_t *ha, struct mbuf **m_headp)
1045 {
1046 	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1047 	bus_dmamap_t		map;
1048 	int			nsegs;
1049 	int			ret = -1;
1050 	uint32_t		tx_idx;
1051 	struct mbuf *m_head = *m_headp;
1052 
1053 	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1054 
1055 	if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) {
1056 		ha->err_tx_dmamap_create++;
1057 		device_printf(ha->pci_dev,
1058 			"%s: bus_dmamap_create failed[%d, %d]\n",
1059 			__func__, ret, m_head->m_pkthdr.len);
1060 		return (ret);
1061 	}
1062 
1063 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1064 			BUS_DMA_NOWAIT);
1065 
1066 	if (ret == EFBIG) {
1067 
1068 		struct mbuf *m;
1069 
1070 		QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1071 			m_head->m_pkthdr.len));
1072 
1073 		m = m_defrag(m_head, M_NOWAIT);
1074 		if (m == NULL) {
1075 			ha->err_tx_defrag++;
1076 			m_freem(m_head);
1077 			*m_headp = NULL;
1078 			device_printf(ha->pci_dev,
1079 				"%s: m_defrag() = NULL [%d]\n",
1080 				__func__, ret);
1081 			return (ENOBUFS);
1082 		}
1083 		m_head = m;
1084 
1085 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1086 					segs, &nsegs, BUS_DMA_NOWAIT))) {
1087 
1088 			ha->err_tx_dmamap_load++;
1089 
1090 			device_printf(ha->pci_dev,
1091 				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1092 				__func__, ret, m_head->m_pkthdr.len);
1093 
1094 			bus_dmamap_destroy(ha->tx_tag, map);
1095 			if (ret != ENOMEM) {
1096 				m_freem(m_head);
1097 				*m_headp = NULL;
1098 			}
1099 			return (ret);
1100 		}
1101 	} else if (ret) {
1102 		ha->err_tx_dmamap_load++;
1103 
1104 		device_printf(ha->pci_dev,
1105 			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1106 			__func__, ret, m_head->m_pkthdr.len);
1107 
1108 		bus_dmamap_destroy(ha->tx_tag, map);
1109 
1110 		if (ret != ENOMEM) {
1111 			m_freem(m_head);
1112 			*m_headp = NULL;
1113 		}
1114 		return (ret);
1115 	}
1116 
1117 	QL_ASSERT((nsegs != 0), ("qla_send: empty packet"));
1118 
1119 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1120 
1121 	if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) {
1122 		ha->tx_buf[tx_idx].m_head = m_head;
1123 		ha->tx_buf[tx_idx].map = map;
1124 	} else {
1125 		if (ret == EINVAL) {
1126 			m_freem(m_head);
1127 			*m_headp = NULL;
1128 		}
1129 	}
1130 
1131 	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1132 	return (ret);
1133 }
1134 
1135 static void
1136 qla_stop(qla_host_t *ha)
1137 {
1138 	struct ifnet *ifp = ha->ifp;
1139 	device_t	dev;
1140 
1141 	dev = ha->pci_dev;
1142 
1143 	ha->flags.qla_watchdog_pause = 1;
1144 	qla_mdelay(__func__, 100);
1145 
1146 	ha->flags.stop_rcv = 1;
1147 	qla_hw_stop_rcv(ha);
1148 
1149 	qla_del_hw_if(ha);
1150 
1151 	qla_free_lro(ha);
1152 
1153 	qla_free_xmt_bufs(ha);
1154 	qla_free_rcv_bufs(ha);
1155 
1156 	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1157 
1158 	return;
1159 }
1160 
1161 /*
1162  * Buffer Management Functions for Transmit and Receive Rings
1163  */
1164 static int
1165 qla_alloc_xmt_bufs(qla_host_t *ha)
1166 {
1167 	if (bus_dma_tag_create(NULL,    /* parent */
1168 		1, 0,    /* alignment, bounds */
1169 		BUS_SPACE_MAXADDR,       /* lowaddr */
1170 		BUS_SPACE_MAXADDR,       /* highaddr */
1171 		NULL, NULL,      /* filter, filterarg */
1172 		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1173 		QLA_MAX_SEGMENTS,        /* nsegments */
1174 		PAGE_SIZE,        /* maxsegsize */
1175 		BUS_DMA_ALLOCNOW,        /* flags */
1176 		NULL,    /* lockfunc */
1177 		NULL,    /* lockfuncarg */
1178 		&ha->tx_tag)) {
1179 		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1180 			__func__);
1181 		return (ENOMEM);
1182 	}
1183 	bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1184 
1185 	return 0;
1186 }
1187 
1188 /*
1189  * Release mbuf after it sent on the wire
1190  */
1191 static void
1192 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1193 {
1194 	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1195 
1196 	if (txb->m_head) {
1197 
1198 		bus_dmamap_unload(ha->tx_tag, txb->map);
1199 		bus_dmamap_destroy(ha->tx_tag, txb->map);
1200 
1201 		m_freem(txb->m_head);
1202 		txb->m_head = NULL;
1203 	}
1204 
1205 	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1206 }
1207 
1208 static void
1209 qla_free_xmt_bufs(qla_host_t *ha)
1210 {
1211 	int		i;
1212 
1213 	for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1214 		qla_clear_tx_buf(ha, &ha->tx_buf[i]);
1215 
1216 	if (ha->tx_tag != NULL) {
1217 		bus_dma_tag_destroy(ha->tx_tag);
1218 		ha->tx_tag = NULL;
1219 	}
1220 	bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1221 
1222 	return;
1223 }
1224 
1225 
1226 static int
1227 qla_alloc_rcv_bufs(qla_host_t *ha)
1228 {
1229 	int		i, j, ret = 0;
1230 	qla_rx_buf_t	*rxb;
1231 
1232 	if (bus_dma_tag_create(NULL,    /* parent */
1233 			1, 0,    /* alignment, bounds */
1234 			BUS_SPACE_MAXADDR,       /* lowaddr */
1235 			BUS_SPACE_MAXADDR,       /* highaddr */
1236 			NULL, NULL,      /* filter, filterarg */
1237 			MJUM9BYTES,     /* maxsize */
1238 			1,        /* nsegments */
1239 			MJUM9BYTES,        /* maxsegsize */
1240 			BUS_DMA_ALLOCNOW,        /* flags */
1241 			NULL,    /* lockfunc */
1242 			NULL,    /* lockfuncarg */
1243 			&ha->rx_tag)) {
1244 
1245 		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1246 			__func__);
1247 
1248 		return (ENOMEM);
1249 	}
1250 
1251 	bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1252 	bzero((void *)ha->rx_jbuf,
1253 		(sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
1254 
1255 	for (i = 0; i < MAX_SDS_RINGS; i++) {
1256 		ha->hw.sds[i].sdsr_next = 0;
1257 		ha->hw.sds[i].rxb_free = NULL;
1258 		ha->hw.sds[i].rx_free = 0;
1259 		ha->hw.sds[i].rxjb_free = NULL;
1260 		ha->hw.sds[i].rxj_free = 0;
1261 	}
1262 
1263 	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1264 
1265 		rxb = &ha->rx_buf[i];
1266 
1267 		ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1268 
1269 		if (ret) {
1270 			device_printf(ha->pci_dev,
1271 				"%s: dmamap[%d] failed\n", __func__, i);
1272 
1273 			for (j = 0; j < i; j++) {
1274 				bus_dmamap_destroy(ha->rx_tag,
1275 					ha->rx_buf[j].map);
1276 			}
1277 			goto qla_alloc_rcv_bufs_failed;
1278 		}
1279 	}
1280 
1281 	qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL);
1282 
1283 	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1284 		rxb = &ha->rx_buf[i];
1285 		rxb->handle = i;
1286 		if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) {
1287 			/*
1288 		 	 * set the physical address in the corresponding
1289 			 * descriptor entry in the receive ring/queue for the
1290 			 * hba
1291 			 */
1292 			qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i,
1293 				rxb->handle, rxb->paddr,
1294 				(rxb->m_head)->m_pkthdr.len);
1295 		} else {
1296 			device_printf(ha->pci_dev,
1297 				"%s: qla_get_mbuf [standard(%d)] failed\n",
1298 				__func__, i);
1299 			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1300 			goto qla_alloc_rcv_bufs_failed;
1301 		}
1302 	}
1303 
1304 
1305 	for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1306 
1307 		rxb = &ha->rx_jbuf[i];
1308 
1309 		ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1310 
1311 		if (ret) {
1312 			device_printf(ha->pci_dev,
1313 				"%s: dmamap[%d] failed\n", __func__, i);
1314 
1315 			for (j = 0; j < i; j++) {
1316 				bus_dmamap_destroy(ha->rx_tag,
1317 					ha->rx_jbuf[j].map);
1318 			}
1319 			goto qla_alloc_rcv_bufs_failed;
1320 		}
1321 	}
1322 
1323 	qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO);
1324 
1325 	for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1326 		rxb = &ha->rx_jbuf[i];
1327 		rxb->handle = i;
1328 		if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) {
1329 			/*
1330 		 	 * set the physical address in the corresponding
1331 			 * descriptor entry in the receive ring/queue for the
1332 			 * hba
1333 			 */
1334 			qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i,
1335 				rxb->handle, rxb->paddr,
1336 				(rxb->m_head)->m_pkthdr.len);
1337 		} else {
1338 			device_printf(ha->pci_dev,
1339 				"%s: qla_get_mbuf [jumbo(%d)] failed\n",
1340 				__func__, i);
1341 			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1342 			goto qla_alloc_rcv_bufs_failed;
1343 		}
1344 	}
1345 
1346 	return (0);
1347 
1348 qla_alloc_rcv_bufs_failed:
1349 	qla_free_rcv_bufs(ha);
1350 	return (ret);
1351 }
1352 
1353 static void
1354 qla_free_rcv_bufs(qla_host_t *ha)
1355 {
1356 	int		i;
1357 	qla_rx_buf_t	*rxb;
1358 
1359 	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1360 		rxb = &ha->rx_buf[i];
1361 		if (rxb->m_head != NULL) {
1362 			bus_dmamap_unload(ha->rx_tag, rxb->map);
1363 			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1364 			m_freem(rxb->m_head);
1365 			rxb->m_head = NULL;
1366 		}
1367 	}
1368 
1369 	for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1370 		rxb = &ha->rx_jbuf[i];
1371 		if (rxb->m_head != NULL) {
1372 			bus_dmamap_unload(ha->rx_tag, rxb->map);
1373 			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1374 			m_freem(rxb->m_head);
1375 			rxb->m_head = NULL;
1376 		}
1377 	}
1378 
1379 	if (ha->rx_tag != NULL) {
1380 		bus_dma_tag_destroy(ha->rx_tag);
1381 		ha->rx_tag = NULL;
1382 	}
1383 
1384 	bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1385 	bzero((void *)ha->rx_jbuf,
1386 		(sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
1387 
1388 	for (i = 0; i < MAX_SDS_RINGS; i++) {
1389 		ha->hw.sds[i].sdsr_next = 0;
1390 		ha->hw.sds[i].rxb_free = NULL;
1391 		ha->hw.sds[i].rx_free = 0;
1392 		ha->hw.sds[i].rxjb_free = NULL;
1393 		ha->hw.sds[i].rxj_free = 0;
1394 	}
1395 
1396 	return;
1397 }
1398 
1399 int
1400 qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
1401 	uint32_t jumbo)
1402 {
1403 	register struct mbuf *mp = nmp;
1404 	struct ifnet   *ifp;
1405 	int             ret = 0;
1406 	uint32_t	offset;
1407 
1408 	QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo));
1409 
1410 	ifp = ha->ifp;
1411 
1412 	if (mp == NULL) {
1413 
1414 		if (!jumbo) {
1415 			mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1416 
1417 			if (mp == NULL) {
1418 				ha->err_m_getcl++;
1419 				ret = ENOBUFS;
1420 				device_printf(ha->pci_dev,
1421 					"%s: m_getcl failed\n", __func__);
1422 				goto exit_qla_get_mbuf;
1423 			}
1424 			mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1425 		} else {
1426 			mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1427 				MJUM9BYTES);
1428 			if (mp == NULL) {
1429 				ha->err_m_getjcl++;
1430 				ret = ENOBUFS;
1431 				device_printf(ha->pci_dev,
1432 					"%s: m_getjcl failed\n", __func__);
1433 				goto exit_qla_get_mbuf;
1434 			}
1435 			mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
1436 		}
1437 	} else {
1438 		if (!jumbo)
1439 			mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1440 		else
1441 			mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
1442 
1443 		mp->m_data = mp->m_ext.ext_buf;
1444 		mp->m_next = NULL;
1445 	}
1446 
1447 
1448 	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1449 	if (offset) {
1450 		offset = 8 - offset;
1451 		m_adj(mp, offset);
1452 	}
1453 
1454 	/*
1455 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1456 	 * machinery to arrange the memory mapping.
1457 	 */
1458 	ret = bus_dmamap_load(ha->rx_tag, rxb->map,
1459 				mtod(mp, void *), mp->m_len,
1460 				qla_dmamap_callback, &rxb->paddr,
1461 				BUS_DMA_NOWAIT);
1462 	if (ret || !rxb->paddr) {
1463 		m_free(mp);
1464 		rxb->m_head = NULL;
1465 		device_printf(ha->pci_dev,
1466 			"%s: bus_dmamap_load failed\n", __func__);
1467                 ret = -1;
1468 		goto exit_qla_get_mbuf;
1469 	}
1470 	rxb->m_head = mp;
1471 	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1472 
1473 exit_qla_get_mbuf:
1474 	QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1475 	return (ret);
1476 }
1477 
1478 static void
1479 qla_tx_done(void *context, int pending)
1480 {
1481 	qla_host_t *ha = context;
1482 
1483 	qla_hw_tx_done(ha);
1484 	qla_start(ha->ifp);
1485 }
1486 
1487