1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013-2014 Qlogic Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * File: qls_os.c
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 */
34
35 #include <sys/cdefs.h>
36 #include "qls_os.h"
37 #include "qls_hw.h"
38 #include "qls_def.h"
39 #include "qls_inline.h"
40 #include "qls_ver.h"
41 #include "qls_glbl.h"
42 #include "qls_dbg.h"
43 #include <sys/smp.h>
44
45 /*
46 * Some PCI Configuration Space Related Defines
47 */
48
49 #ifndef PCI_VENDOR_QLOGIC
50 #define PCI_VENDOR_QLOGIC 0x1077
51 #endif
52
53 #ifndef PCI_DEVICE_QLOGIC_8000
54 #define PCI_DEVICE_QLOGIC_8000 0x8000
55 #endif
56
57 #define PCI_QLOGIC_DEV8000 \
58 ((PCI_DEVICE_QLOGIC_8000 << 16) | PCI_VENDOR_QLOGIC)
59
60 /*
61 * static functions
62 */
63 static int qls_alloc_parent_dma_tag(qla_host_t *ha);
64 static void qls_free_parent_dma_tag(qla_host_t *ha);
65
66 static void qls_flush_xmt_bufs(qla_host_t *ha);
67
68 static int qls_alloc_rcv_bufs(qla_host_t *ha);
69 static void qls_free_rcv_bufs(qla_host_t *ha);
70
71 static void qls_init_ifnet(device_t dev, qla_host_t *ha);
72 static void qls_release(qla_host_t *ha);
73 static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
74 int error);
75 static void qls_stop(qla_host_t *ha);
76 static int qls_send(qla_host_t *ha, struct mbuf **m_headp);
77 static void qls_tx_done(void *context, int pending);
78
79 static int qls_config_lro(qla_host_t *ha);
80 static void qls_free_lro(qla_host_t *ha);
81
82 static void qls_error_recovery(void *context, int pending);
83
84 /*
85 * Hooks to the Operating Systems
86 */
87 static int qls_pci_probe (device_t);
88 static int qls_pci_attach (device_t);
89 static int qls_pci_detach (device_t);
90
91 static void qls_start(if_t ifp);
92 static void qls_init(void *arg);
93 static int qls_ioctl(if_t ifp, u_long cmd, caddr_t data);
94 static int qls_media_change(if_t ifp);
95 static void qls_media_status(if_t ifp, struct ifmediareq *ifmr);
96
97 static device_method_t qla_pci_methods[] = {
98 /* Device interface */
99 DEVMETHOD(device_probe, qls_pci_probe),
100 DEVMETHOD(device_attach, qls_pci_attach),
101 DEVMETHOD(device_detach, qls_pci_detach),
102 { 0, 0 }
103 };
104
105 static driver_t qla_pci_driver = {
106 "ql", qla_pci_methods, sizeof (qla_host_t),
107 };
108
109 DRIVER_MODULE(qla8000, pci, qla_pci_driver, 0, 0);
110
111 MODULE_DEPEND(qla8000, pci, 1, 1, 1);
112 MODULE_DEPEND(qla8000, ether, 1, 1, 1);
113
114 MALLOC_DEFINE(M_QLA8000BUF, "qla8000buf", "Buffers for qla8000 driver");
115
116 static char dev_str[64];
117 static char ver_str[64];
118
119 /*
120 * Name: qls_pci_probe
121 * Function: Validate the PCI device to be a QLA80XX device
122 */
123 static int
qls_pci_probe(device_t dev)124 qls_pci_probe(device_t dev)
125 {
126 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
127 case PCI_QLOGIC_DEV8000:
128 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
129 "Qlogic ISP 8000 PCI CNA Adapter-Ethernet Function",
130 QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
131 QLA_VERSION_BUILD);
132 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
133 QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
134 QLA_VERSION_BUILD);
135 device_set_desc(dev, dev_str);
136 break;
137 default:
138 return (ENXIO);
139 }
140
141 if (bootverbose)
142 printf("%s: %s\n ", __func__, dev_str);
143
144 return (BUS_PROBE_DEFAULT);
145 }
146
147 static int
qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)148 qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
149 {
150 int err = 0, ret;
151 qla_host_t *ha;
152 uint32_t i;
153
154 err = sysctl_handle_int(oidp, &ret, 0, req);
155
156 if (err || !req->newptr)
157 return (err);
158
159 if (ret == 1) {
160 ha = (qla_host_t *)arg1;
161
162 for (i = 0; i < ha->num_tx_rings; i++) {
163 QL_DPRINT2((ha->pci_dev,
164 "%s: tx_ring[%d].tx_frames= %p\n",
165 __func__, i,
166 (void *)ha->tx_ring[i].tx_frames));
167
168 QL_DPRINT2((ha->pci_dev,
169 "%s: tx_ring[%d].tx_tso_frames= %p\n",
170 __func__, i,
171 (void *)ha->tx_ring[i].tx_tso_frames));
172
173 QL_DPRINT2((ha->pci_dev,
174 "%s: tx_ring[%d].tx_vlan_frames= %p\n",
175 __func__, i,
176 (void *)ha->tx_ring[i].tx_vlan_frames));
177
178 device_printf(ha->pci_dev,
179 "%s: tx_ring[%d].txr_free= 0x%08x\n",
180 __func__, i,
181 ha->tx_ring[i].txr_free);
182
183 device_printf(ha->pci_dev,
184 "%s: tx_ring[%d].txr_next= 0x%08x\n",
185 __func__, i,
186 ha->tx_ring[i].txr_next);
187
188 device_printf(ha->pci_dev,
189 "%s: tx_ring[%d].txr_done= 0x%08x\n",
190 __func__, i,
191 ha->tx_ring[i].txr_done);
192
193 device_printf(ha->pci_dev,
194 "%s: tx_ring[%d].txr_cons_idx= 0x%08x\n",
195 __func__, i,
196 *(ha->tx_ring[i].txr_cons_vaddr));
197 }
198
199 for (i = 0; i < ha->num_rx_rings; i++) {
200 QL_DPRINT2((ha->pci_dev,
201 "%s: rx_ring[%d].rx_int= %p\n",
202 __func__, i,
203 (void *)ha->rx_ring[i].rx_int));
204
205 QL_DPRINT2((ha->pci_dev,
206 "%s: rx_ring[%d].rss_int= %p\n",
207 __func__, i,
208 (void *)ha->rx_ring[i].rss_int));
209
210 device_printf(ha->pci_dev,
211 "%s: rx_ring[%d].lbq_next= 0x%08x\n",
212 __func__, i,
213 ha->rx_ring[i].lbq_next);
214
215 device_printf(ha->pci_dev,
216 "%s: rx_ring[%d].lbq_free= 0x%08x\n",
217 __func__, i,
218 ha->rx_ring[i].lbq_free);
219
220 device_printf(ha->pci_dev,
221 "%s: rx_ring[%d].lbq_in= 0x%08x\n",
222 __func__, i,
223 ha->rx_ring[i].lbq_in);
224
225 device_printf(ha->pci_dev,
226 "%s: rx_ring[%d].sbq_next= 0x%08x\n",
227 __func__, i,
228 ha->rx_ring[i].sbq_next);
229
230 device_printf(ha->pci_dev,
231 "%s: rx_ring[%d].sbq_free= 0x%08x\n",
232 __func__, i,
233 ha->rx_ring[i].sbq_free);
234
235 device_printf(ha->pci_dev,
236 "%s: rx_ring[%d].sbq_in= 0x%08x\n",
237 __func__, i,
238 ha->rx_ring[i].sbq_in);
239 }
240
241 device_printf(ha->pci_dev, "%s: err_m_getcl = 0x%08x\n",
242 __func__, ha->err_m_getcl);
243 device_printf(ha->pci_dev, "%s: err_m_getjcl = 0x%08x\n",
244 __func__, ha->err_m_getjcl);
245 device_printf(ha->pci_dev,
246 "%s: err_tx_dmamap_create = 0x%08x\n",
247 __func__, ha->err_tx_dmamap_create);
248 device_printf(ha->pci_dev,
249 "%s: err_tx_dmamap_load = 0x%08x\n",
250 __func__, ha->err_tx_dmamap_load);
251 device_printf(ha->pci_dev,
252 "%s: err_tx_defrag = 0x%08x\n",
253 __func__, ha->err_tx_defrag);
254 }
255 return (err);
256 }
257
258 static void
qls_add_sysctls(qla_host_t * ha)259 qls_add_sysctls(qla_host_t *ha)
260 {
261 device_t dev = ha->pci_dev;
262
263 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
264 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
265 OID_AUTO, "version", CTLFLAG_RD,
266 ver_str, 0, "Driver Version");
267
268 qls_dbg_level = 0;
269 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
270 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
271 OID_AUTO, "debug", CTLFLAG_RW,
272 &qls_dbg_level, qls_dbg_level, "Debug Level");
273
274 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
275 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
276 OID_AUTO, "drvr_stats",
277 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
278 qls_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
279
280 return;
281 }
282
283 static void
qls_watchdog(void * arg)284 qls_watchdog(void *arg)
285 {
286 qla_host_t *ha = arg;
287 if_t ifp;
288
289 ifp = ha->ifp;
290
291 if (ha->flags.qla_watchdog_exit) {
292 ha->qla_watchdog_exited = 1;
293 return;
294 }
295 ha->qla_watchdog_exited = 0;
296
297 if (!ha->flags.qla_watchdog_pause) {
298 if (ha->qla_initiate_recovery) {
299 ha->qla_watchdog_paused = 1;
300 ha->qla_initiate_recovery = 0;
301 ha->err_inject = 0;
302 taskqueue_enqueue(ha->err_tq, &ha->err_task);
303
304 } else if (!if_sendq_empty(ifp) && QL_RUNNING(ifp)) {
305 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
306 }
307
308 ha->qla_watchdog_paused = 0;
309 } else {
310 ha->qla_watchdog_paused = 1;
311 }
312
313 ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000;
314 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
315 qls_watchdog, ha);
316
317 return;
318 }
319
320 /*
321 * Name: qls_pci_attach
322 * Function: attaches the device to the operating system
323 */
324 static int
qls_pci_attach(device_t dev)325 qls_pci_attach(device_t dev)
326 {
327 qla_host_t *ha = NULL;
328 int i;
329
330 QL_DPRINT2((dev, "%s: enter\n", __func__));
331
332 if ((ha = device_get_softc(dev)) == NULL) {
333 device_printf(dev, "cannot get softc\n");
334 return (ENOMEM);
335 }
336
337 memset(ha, 0, sizeof (qla_host_t));
338
339 if (pci_get_device(dev) != PCI_DEVICE_QLOGIC_8000) {
340 device_printf(dev, "device is not QLE8000\n");
341 return (ENXIO);
342 }
343
344 ha->pci_func = pci_get_function(dev);
345
346 ha->pci_dev = dev;
347
348 pci_enable_busmaster(dev);
349
350 ha->reg_rid = PCIR_BAR(1);
351 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
352 RF_ACTIVE);
353
354 if (ha->pci_reg == NULL) {
355 device_printf(dev, "unable to map any ports\n");
356 goto qls_pci_attach_err;
357 }
358
359 ha->reg_rid1 = PCIR_BAR(3);
360 ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
361 &ha->reg_rid1, RF_ACTIVE);
362
363 if (ha->pci_reg1 == NULL) {
364 device_printf(dev, "unable to map any ports\n");
365 goto qls_pci_attach_err;
366 }
367
368 mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
369 mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
370
371 qls_add_sysctls(ha);
372 qls_hw_add_sysctls(ha);
373
374 ha->flags.lock_init = 1;
375
376 ha->msix_count = pci_msix_count(dev);
377
378 if (ha->msix_count < qls_get_msix_count(ha)) {
379 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
380 ha->msix_count);
381 goto qls_pci_attach_err;
382 }
383
384 ha->msix_count = qls_get_msix_count(ha);
385
386 QL_DPRINT2((dev, "\n%s: ha %p pci_func 0x%x msix_count 0x%x"
387 " pci_reg %p pci_reg1 %p\n", __func__, ha,
388 ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1));
389
390 if (pci_alloc_msix(dev, &ha->msix_count)) {
391 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
392 ha->msix_count);
393 ha->msix_count = 0;
394 goto qls_pci_attach_err;
395 }
396
397 for (i = 0; i < ha->num_rx_rings; i++) {
398 ha->irq_vec[i].cq_idx = i;
399 ha->irq_vec[i].ha = ha;
400 ha->irq_vec[i].irq_rid = 1 + i;
401
402 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
403 &ha->irq_vec[i].irq_rid,
404 (RF_ACTIVE | RF_SHAREABLE));
405
406 if (ha->irq_vec[i].irq == NULL) {
407 device_printf(dev, "could not allocate interrupt\n");
408 goto qls_pci_attach_err;
409 }
410
411 if (bus_setup_intr(dev, ha->irq_vec[i].irq,
412 (INTR_TYPE_NET | INTR_MPSAFE), NULL, qls_isr,
413 &ha->irq_vec[i], &ha->irq_vec[i].handle)) {
414 device_printf(dev,
415 "could not setup interrupt\n");
416 goto qls_pci_attach_err;
417 }
418 }
419
420 qls_rd_nic_params(ha);
421
422 /* allocate parent dma tag */
423 if (qls_alloc_parent_dma_tag(ha)) {
424 device_printf(dev, "%s: qls_alloc_parent_dma_tag failed\n",
425 __func__);
426 goto qls_pci_attach_err;
427 }
428
429 /* alloc all dma buffers */
430 if (qls_alloc_dma(ha)) {
431 device_printf(dev, "%s: qls_alloc_dma failed\n", __func__);
432 goto qls_pci_attach_err;
433 }
434
435 /* create the o.s ethernet interface */
436 qls_init_ifnet(dev, ha);
437
438 ha->flags.qla_watchdog_active = 1;
439 ha->flags.qla_watchdog_pause = 1;
440
441 TASK_INIT(&ha->tx_task, 0, qls_tx_done, ha);
442 ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
443 taskqueue_thread_enqueue, &ha->tx_tq);
444 taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
445 device_get_nameunit(ha->pci_dev));
446
447 callout_init(&ha->tx_callout, 1);
448 ha->flags.qla_callout_init = 1;
449
450 /* create ioctl device interface */
451 if (qls_make_cdev(ha)) {
452 device_printf(dev, "%s: qls_make_cdev failed\n", __func__);
453 goto qls_pci_attach_err;
454 }
455
456 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
457 qls_watchdog, ha);
458
459 TASK_INIT(&ha->err_task, 0, qls_error_recovery, ha);
460 ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
461 taskqueue_thread_enqueue, &ha->err_tq);
462 taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
463 device_get_nameunit(ha->pci_dev));
464
465 QL_DPRINT2((dev, "%s: exit 0\n", __func__));
466 return (0);
467
468 qls_pci_attach_err:
469
470 qls_release(ha);
471
472 QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
473 return (ENXIO);
474 }
475
476 /*
477 * Name: qls_pci_detach
478 * Function: Unhooks the device from the operating system
479 */
480 static int
qls_pci_detach(device_t dev)481 qls_pci_detach(device_t dev)
482 {
483 qla_host_t *ha = NULL;
484
485 QL_DPRINT2((dev, "%s: enter\n", __func__));
486
487 if ((ha = device_get_softc(dev)) == NULL) {
488 device_printf(dev, "cannot get softc\n");
489 return (ENOMEM);
490 }
491
492 (void)QLA_LOCK(ha, __func__, 0);
493 qls_stop(ha);
494 QLA_UNLOCK(ha, __func__);
495
496 qls_release(ha);
497
498 QL_DPRINT2((dev, "%s: exit\n", __func__));
499
500 return (0);
501 }
502
503 /*
504 * Name: qls_release
505 * Function: Releases the resources allocated for the device
506 */
507 static void
qls_release(qla_host_t * ha)508 qls_release(qla_host_t *ha)
509 {
510 device_t dev;
511 int i;
512
513 dev = ha->pci_dev;
514
515 if (ha->err_tq) {
516 taskqueue_drain(ha->err_tq, &ha->err_task);
517 taskqueue_free(ha->err_tq);
518 }
519
520 if (ha->tx_tq) {
521 taskqueue_drain(ha->tx_tq, &ha->tx_task);
522 taskqueue_free(ha->tx_tq);
523 }
524
525 qls_del_cdev(ha);
526
527 if (ha->flags.qla_watchdog_active) {
528 ha->flags.qla_watchdog_exit = 1;
529
530 while (ha->qla_watchdog_exited == 0)
531 qls_mdelay(__func__, 1);
532 }
533
534 if (ha->flags.qla_callout_init)
535 callout_stop(&ha->tx_callout);
536
537 if (ha->ifp != NULL)
538 ether_ifdetach(ha->ifp);
539
540 qls_free_dma(ha);
541 qls_free_parent_dma_tag(ha);
542
543 for (i = 0; i < ha->num_rx_rings; i++) {
544 if (ha->irq_vec[i].handle) {
545 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
546 ha->irq_vec[i].handle);
547 }
548
549 if (ha->irq_vec[i].irq) {
550 (void)bus_release_resource(dev, SYS_RES_IRQ,
551 ha->irq_vec[i].irq_rid,
552 ha->irq_vec[i].irq);
553 }
554 }
555
556 if (ha->msix_count)
557 pci_release_msi(dev);
558
559 if (ha->flags.lock_init) {
560 mtx_destroy(&ha->tx_lock);
561 mtx_destroy(&ha->hw_lock);
562 }
563
564 if (ha->pci_reg)
565 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
566 ha->pci_reg);
567
568 if (ha->pci_reg1)
569 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
570 ha->pci_reg1);
571 }
572
573 /*
574 * DMA Related Functions
575 */
576
577 static void
qls_dmamap_callback(void * arg,bus_dma_segment_t * segs,int nsegs,int error)578 qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
579 {
580 *((bus_addr_t *)arg) = 0;
581
582 if (error) {
583 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
584 return;
585 }
586
587 *((bus_addr_t *)arg) = segs[0].ds_addr;
588
589 return;
590 }
591
592 int
qls_alloc_dmabuf(qla_host_t * ha,qla_dma_t * dma_buf)593 qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
594 {
595 int ret = 0;
596 device_t dev;
597 bus_addr_t b_addr;
598
599 dev = ha->pci_dev;
600
601 QL_DPRINT2((dev, "%s: enter\n", __func__));
602
603 ret = bus_dma_tag_create(
604 ha->parent_tag,/* parent */
605 dma_buf->alignment,
606 ((bus_size_t)(1ULL << 32)),/* boundary */
607 BUS_SPACE_MAXADDR, /* lowaddr */
608 BUS_SPACE_MAXADDR, /* highaddr */
609 NULL, NULL, /* filter, filterarg */
610 dma_buf->size, /* maxsize */
611 1, /* nsegments */
612 dma_buf->size, /* maxsegsize */
613 0, /* flags */
614 NULL, NULL, /* lockfunc, lockarg */
615 &dma_buf->dma_tag);
616
617 if (ret) {
618 device_printf(dev, "%s: could not create dma tag\n", __func__);
619 goto qls_alloc_dmabuf_exit;
620 }
621 ret = bus_dmamem_alloc(dma_buf->dma_tag,
622 (void **)&dma_buf->dma_b,
623 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
624 &dma_buf->dma_map);
625 if (ret) {
626 bus_dma_tag_destroy(dma_buf->dma_tag);
627 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
628 goto qls_alloc_dmabuf_exit;
629 }
630
631 ret = bus_dmamap_load(dma_buf->dma_tag,
632 dma_buf->dma_map,
633 dma_buf->dma_b,
634 dma_buf->size,
635 qls_dmamap_callback,
636 &b_addr, BUS_DMA_NOWAIT);
637
638 if (ret || !b_addr) {
639 bus_dma_tag_destroy(dma_buf->dma_tag);
640 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
641 dma_buf->dma_map);
642 ret = -1;
643 goto qls_alloc_dmabuf_exit;
644 }
645
646 dma_buf->dma_addr = b_addr;
647
648 qls_alloc_dmabuf_exit:
649 QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
650 __func__, ret, (void *)dma_buf->dma_tag,
651 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
652 dma_buf->size));
653
654 return ret;
655 }
656
657 void
qls_free_dmabuf(qla_host_t * ha,qla_dma_t * dma_buf)658 qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
659 {
660 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
661 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
662 bus_dma_tag_destroy(dma_buf->dma_tag);
663 }
664
665 static int
qls_alloc_parent_dma_tag(qla_host_t * ha)666 qls_alloc_parent_dma_tag(qla_host_t *ha)
667 {
668 int ret;
669 device_t dev;
670
671 dev = ha->pci_dev;
672
673 /*
674 * Allocate parent DMA Tag
675 */
676 ret = bus_dma_tag_create(
677 bus_get_dma_tag(dev), /* parent */
678 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
679 BUS_SPACE_MAXADDR, /* lowaddr */
680 BUS_SPACE_MAXADDR, /* highaddr */
681 NULL, NULL, /* filter, filterarg */
682 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
683 0, /* nsegments */
684 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
685 0, /* flags */
686 NULL, NULL, /* lockfunc, lockarg */
687 &ha->parent_tag);
688
689 if (ret) {
690 device_printf(dev, "%s: could not create parent dma tag\n",
691 __func__);
692 return (-1);
693 }
694
695 ha->flags.parent_tag = 1;
696
697 return (0);
698 }
699
700 static void
qls_free_parent_dma_tag(qla_host_t * ha)701 qls_free_parent_dma_tag(qla_host_t *ha)
702 {
703 if (ha->flags.parent_tag) {
704 bus_dma_tag_destroy(ha->parent_tag);
705 ha->flags.parent_tag = 0;
706 }
707 }
708
709 /*
710 * Name: qls_init_ifnet
711 * Function: Creates the Network Device Interface and Registers it with the O.S
712 */
713
714 static void
qls_init_ifnet(device_t dev,qla_host_t * ha)715 qls_init_ifnet(device_t dev, qla_host_t *ha)
716 {
717 if_t ifp;
718
719 QL_DPRINT2((dev, "%s: enter\n", __func__));
720
721 ifp = ha->ifp = if_alloc(IFT_ETHER);
722 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
723 if_setbaudrate(ifp, IF_Gbps(10));
724 if_setinitfn(ifp, qls_init);
725 if_setsoftc(ifp, ha);
726 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
727 if_setioctlfn(ifp, qls_ioctl);
728 if_setstartfn(ifp, qls_start);
729
730 if_setsendqlen(ifp, qls_get_ifq_snd_maxlen(ha));
731 if_setsendqready(ifp);
732
733 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
734 if (ha->max_frame_size <= MCLBYTES) {
735 ha->msize = MCLBYTES;
736 } else if (ha->max_frame_size <= MJUMPAGESIZE) {
737 ha->msize = MJUMPAGESIZE;
738 } else
739 ha->msize = MJUM9BYTES;
740
741 ether_ifattach(ifp, qls_get_mac_addr(ha));
742
743 if_setcapabilities(ifp, IFCAP_JUMBO_MTU);
744
745 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
746 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
747
748 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
749 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
750 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
751 if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0);
752
753 if_setcapenable(ifp, if_getcapabilities(ifp));
754
755 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
756
757 ifmedia_init(&ha->media, IFM_IMASK, qls_media_change, qls_media_status);
758
759 ifmedia_add(&ha->media, (IFM_ETHER | qls_get_optics(ha) | IFM_FDX), 0,
760 NULL);
761 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
762
763 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
764
765 QL_DPRINT2((dev, "%s: exit\n", __func__));
766
767 return;
768 }
769
770 static void
qls_init_locked(qla_host_t * ha)771 qls_init_locked(qla_host_t *ha)
772 {
773 if_t ifp = ha->ifp;
774
775 qls_stop(ha);
776
777 qls_flush_xmt_bufs(ha);
778
779 if (qls_alloc_rcv_bufs(ha) != 0)
780 return;
781
782 if (qls_config_lro(ha))
783 return;
784
785 bcopy(if_getlladdr(ha->ifp), ha->mac_addr, ETHER_ADDR_LEN);
786
787 if_sethwassist(ifp, CSUM_IP);
788 if_sethwassistbits(ifp, CSUM_TCP, 0);
789 if_sethwassistbits(ifp, CSUM_UDP, 0);
790 if_sethwassistbits(ifp, CSUM_TSO, 0);
791
792 if (qls_init_hw_if(ha) == 0) {
793 ifp = ha->ifp;
794 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
795 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
796 ha->flags.qla_watchdog_pause = 0;
797 }
798
799 return;
800 }
801
802 static void
qls_init(void * arg)803 qls_init(void *arg)
804 {
805 qla_host_t *ha;
806
807 ha = (qla_host_t *)arg;
808
809 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
810
811 (void)QLA_LOCK(ha, __func__, 0);
812 qls_init_locked(ha);
813 QLA_UNLOCK(ha, __func__);
814
815 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
816 }
817
818 static u_int
qls_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int mcnt)819 qls_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
820 {
821 uint8_t *mta = arg;
822
823 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
824 return (0);
825
826 bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
827
828 return (1);
829 }
830
831 static void
qls_set_multi(qla_host_t * ha,uint32_t add_multi)832 qls_set_multi(qla_host_t *ha, uint32_t add_multi)
833 {
834 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
835 if_t ifp = ha->ifp;
836 int mcnt;
837
838 mcnt = if_foreach_llmaddr(ifp, qls_copy_maddr, mta);
839
840 if (QLA_LOCK(ha, __func__, 1) == 0) {
841 qls_hw_set_multi(ha, mta, mcnt, add_multi);
842 QLA_UNLOCK(ha, __func__);
843 }
844
845 return;
846 }
847
848 static int
qls_ioctl(if_t ifp,u_long cmd,caddr_t data)849 qls_ioctl(if_t ifp, u_long cmd, caddr_t data)
850 {
851 int ret = 0;
852 struct ifreq *ifr = (struct ifreq *)data;
853 #ifdef INET
854 struct ifaddr *ifa = (struct ifaddr *)data;
855 #endif
856 qla_host_t *ha;
857
858 ha = (qla_host_t *)if_getsoftc(ifp);
859
860 switch (cmd) {
861 case SIOCSIFADDR:
862 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
863 __func__, cmd));
864
865 #ifdef INET
866 if (ifa->ifa_addr->sa_family == AF_INET) {
867 if_setflagbits(ifp, IFF_UP, 0);
868 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
869 (void)QLA_LOCK(ha, __func__, 0);
870 qls_init_locked(ha);
871 QLA_UNLOCK(ha, __func__);
872 }
873 QL_DPRINT4((ha->pci_dev,
874 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
875 __func__, cmd,
876 ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
877
878 arp_ifinit(ifp, ifa);
879 break;
880 }
881 #endif
882 ether_ioctl(ifp, cmd, data);
883 break;
884
885 case SIOCSIFMTU:
886 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
887 __func__, cmd));
888
889 if (ifr->ifr_mtu > QLA_MAX_MTU) {
890 ret = EINVAL;
891 } else {
892 (void) QLA_LOCK(ha, __func__, 0);
893
894 if_setmtu(ifp, ifr->ifr_mtu);
895 ha->max_frame_size =
896 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
897
898 QLA_UNLOCK(ha, __func__);
899
900 if (ret)
901 ret = EINVAL;
902 }
903
904 break;
905
906 case SIOCSIFFLAGS:
907 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
908 __func__, cmd));
909
910 (void)QLA_LOCK(ha, __func__, 0);
911
912 if (if_getflags(ifp) & IFF_UP) {
913 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
914 if ((if_getflags(ifp) ^ ha->if_flags) &
915 IFF_PROMISC) {
916 ret = qls_set_promisc(ha);
917 } else if ((if_getflags(ifp) ^ ha->if_flags) &
918 IFF_ALLMULTI) {
919 ret = qls_set_allmulti(ha);
920 }
921 } else {
922 ha->max_frame_size = if_getmtu(ifp) +
923 ETHER_HDR_LEN + ETHER_CRC_LEN;
924 qls_init_locked(ha);
925 }
926 } else {
927 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
928 qls_stop(ha);
929 ha->if_flags = if_getflags(ifp);
930 }
931
932 QLA_UNLOCK(ha, __func__);
933 break;
934
935 case SIOCADDMULTI:
936 QL_DPRINT4((ha->pci_dev,
937 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
938
939 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
940 qls_set_multi(ha, 1);
941 }
942 break;
943
944 case SIOCDELMULTI:
945 QL_DPRINT4((ha->pci_dev,
946 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
947
948 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
949 qls_set_multi(ha, 0);
950 }
951 break;
952
953 case SIOCSIFMEDIA:
954 case SIOCGIFMEDIA:
955 QL_DPRINT4((ha->pci_dev,
956 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
957 __func__, cmd));
958 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
959 break;
960
961 case SIOCSIFCAP:
962 {
963 int mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
964
965 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
966 __func__, cmd));
967
968 if (mask & IFCAP_HWCSUM)
969 if_togglecapenable(ifp, IFCAP_HWCSUM);
970 if (mask & IFCAP_TSO4)
971 if_togglecapenable(ifp, IFCAP_TSO4);
972 if (mask & IFCAP_VLAN_HWTAGGING)
973 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
974 if (mask & IFCAP_VLAN_HWTSO)
975 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
976
977 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
978 qls_init(ha);
979
980 VLAN_CAPABILITIES(ifp);
981 break;
982 }
983
984 default:
985 QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
986 __func__, cmd));
987 ret = ether_ioctl(ifp, cmd, data);
988 break;
989 }
990
991 return (ret);
992 }
993
994 static int
qls_media_change(if_t ifp)995 qls_media_change(if_t ifp)
996 {
997 qla_host_t *ha;
998 struct ifmedia *ifm;
999 int ret = 0;
1000
1001 ha = (qla_host_t *)if_getsoftc(ifp);
1002
1003 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1004
1005 ifm = &ha->media;
1006
1007 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1008 ret = EINVAL;
1009
1010 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1011
1012 return (ret);
1013 }
1014
1015 static void
qls_media_status(if_t ifp,struct ifmediareq * ifmr)1016 qls_media_status(if_t ifp, struct ifmediareq *ifmr)
1017 {
1018 qla_host_t *ha;
1019
1020 ha = (qla_host_t *)if_getsoftc(ifp);
1021
1022 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1023
1024 ifmr->ifm_status = IFM_AVALID;
1025 ifmr->ifm_active = IFM_ETHER;
1026
1027 qls_update_link_state(ha);
1028 if (ha->link_up) {
1029 ifmr->ifm_status |= IFM_ACTIVE;
1030 ifmr->ifm_active |= (IFM_FDX | qls_get_optics(ha));
1031 }
1032
1033 QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
1034 (ha->link_up ? "link_up" : "link_down")));
1035
1036 return;
1037 }
1038
1039 static void
qls_start(if_t ifp)1040 qls_start(if_t ifp)
1041 {
1042 int i, ret = 0;
1043 struct mbuf *m_head;
1044 qla_host_t *ha = (qla_host_t *)if_getsoftc(ifp);
1045
1046 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1047
1048 if (!mtx_trylock(&ha->tx_lock)) {
1049 QL_DPRINT8((ha->pci_dev,
1050 "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1051 return;
1052 }
1053
1054 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) ==
1055 IFF_DRV_RUNNING) {
1056 for (i = 0; i < ha->num_tx_rings; i++) {
1057 ret |= qls_hw_tx_done(ha, i);
1058 }
1059
1060 if (ret == 0)
1061 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1062 }
1063
1064 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1065 IFF_DRV_RUNNING) {
1066 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1067 QLA_TX_UNLOCK(ha);
1068 return;
1069 }
1070
1071 if (!ha->link_up) {
1072 qls_update_link_state(ha);
1073 if (!ha->link_up) {
1074 QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
1075 QLA_TX_UNLOCK(ha);
1076 return;
1077 }
1078 }
1079
1080 while (!if_sendq_empty(ifp)) {
1081 m_head = if_dequeue(ifp);
1082
1083 if (m_head == NULL) {
1084 QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
1085 __func__));
1086 break;
1087 }
1088
1089 if (qls_send(ha, &m_head)) {
1090 if (m_head == NULL)
1091 break;
1092 QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
1093 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1094 if_sendq_prepend(ifp, m_head);
1095 break;
1096 }
1097 /* Send a copy of the frame to the BPF listener */
1098 ETHER_BPF_MTAP(ifp, m_head);
1099 }
1100
1101 QLA_TX_UNLOCK(ha);
1102 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1103 return;
1104 }
1105
1106 static int
qls_send(qla_host_t * ha,struct mbuf ** m_headp)1107 qls_send(qla_host_t *ha, struct mbuf **m_headp)
1108 {
1109 bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
1110 bus_dmamap_t map;
1111 int nsegs;
1112 int ret = -1;
1113 uint32_t tx_idx;
1114 struct mbuf *m_head = *m_headp;
1115 uint32_t txr_idx = 0;
1116
1117 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1118
1119 /* check if flowid is set */
1120 if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE)
1121 txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1);
1122
1123 tx_idx = ha->tx_ring[txr_idx].txr_next;
1124
1125 map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1126
1127 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1128 BUS_DMA_NOWAIT);
1129
1130 if (ret == EFBIG) {
1131 struct mbuf *m;
1132
1133 QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1134 m_head->m_pkthdr.len));
1135
1136 m = m_defrag(m_head, M_NOWAIT);
1137 if (m == NULL) {
1138 ha->err_tx_defrag++;
1139 m_freem(m_head);
1140 *m_headp = NULL;
1141 device_printf(ha->pci_dev,
1142 "%s: m_defrag() = NULL [%d]\n",
1143 __func__, ret);
1144 return (ENOBUFS);
1145 }
1146 m_head = m;
1147 *m_headp = m_head;
1148
1149 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1150 segs, &nsegs, BUS_DMA_NOWAIT))) {
1151 ha->err_tx_dmamap_load++;
1152
1153 device_printf(ha->pci_dev,
1154 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1155 __func__, ret, m_head->m_pkthdr.len);
1156
1157 if (ret != ENOMEM) {
1158 m_freem(m_head);
1159 *m_headp = NULL;
1160 }
1161 return (ret);
1162 }
1163
1164 } else if (ret) {
1165 ha->err_tx_dmamap_load++;
1166
1167 device_printf(ha->pci_dev,
1168 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1169 __func__, ret, m_head->m_pkthdr.len);
1170
1171 if (ret != ENOMEM) {
1172 m_freem(m_head);
1173 *m_headp = NULL;
1174 }
1175 return (ret);
1176 }
1177
1178 QL_ASSERT(ha, (nsegs != 0), ("qls_send: empty packet"));
1179
1180 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1181
1182 if (!(ret = qls_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) {
1183 ha->tx_ring[txr_idx].count++;
1184 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1185 ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map;
1186 } else {
1187 if (ret == EINVAL) {
1188 if (m_head)
1189 m_freem(m_head);
1190 *m_headp = NULL;
1191 }
1192 }
1193
1194 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1195 return (ret);
1196 }
1197
1198 static void
qls_stop(qla_host_t * ha)1199 qls_stop(qla_host_t *ha)
1200 {
1201 if_t ifp = ha->ifp;
1202
1203 if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
1204
1205 ha->flags.qla_watchdog_pause = 1;
1206
1207 while (!ha->qla_watchdog_paused)
1208 qls_mdelay(__func__, 1);
1209
1210 qls_del_hw_if(ha);
1211
1212 qls_free_lro(ha);
1213
1214 qls_flush_xmt_bufs(ha);
1215 qls_free_rcv_bufs(ha);
1216
1217 return;
1218 }
1219
1220 /*
1221 * Buffer Management Functions for Transmit and Receive Rings
1222 */
1223 /*
1224 * Release mbuf after it sent on the wire
1225 */
1226 static void
qls_flush_tx_buf(qla_host_t * ha,qla_tx_buf_t * txb)1227 qls_flush_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1228 {
1229 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1230
1231 if (txb->m_head) {
1232 bus_dmamap_unload(ha->tx_tag, txb->map);
1233
1234 m_freem(txb->m_head);
1235 txb->m_head = NULL;
1236 }
1237
1238 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1239 }
1240
1241 static void
qls_flush_xmt_bufs(qla_host_t * ha)1242 qls_flush_xmt_bufs(qla_host_t *ha)
1243 {
1244 int i, j;
1245
1246 for (j = 0; j < ha->num_tx_rings; j++) {
1247 for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1248 qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1249 }
1250
1251 return;
1252 }
1253
1254 static int
qls_alloc_rcv_mbufs(qla_host_t * ha,int r)1255 qls_alloc_rcv_mbufs(qla_host_t *ha, int r)
1256 {
1257 int i, j, ret = 0;
1258 qla_rx_buf_t *rxb;
1259 qla_rx_ring_t *rx_ring;
1260 volatile q81_bq_addr_e_t *sbq_e;
1261
1262 rx_ring = &ha->rx_ring[r];
1263
1264 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1265 rxb = &rx_ring->rx_buf[i];
1266
1267 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1268
1269 if (ret) {
1270 device_printf(ha->pci_dev,
1271 "%s: dmamap[%d, %d] failed\n", __func__, r, i);
1272
1273 for (j = 0; j < i; j++) {
1274 rxb = &rx_ring->rx_buf[j];
1275 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1276 }
1277 goto qls_alloc_rcv_mbufs_err;
1278 }
1279 }
1280
1281 rx_ring = &ha->rx_ring[r];
1282
1283 sbq_e = rx_ring->sbq_vaddr;
1284
1285 rxb = &rx_ring->rx_buf[0];
1286
1287 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1288 if (!(ret = qls_get_mbuf(ha, rxb, NULL))) {
1289 /*
1290 * set the physical address in the
1291 * corresponding descriptor entry in the
1292 * receive ring/queue for the hba
1293 */
1294
1295 sbq_e->addr_lo = rxb->paddr & 0xFFFFFFFF;
1296 sbq_e->addr_hi = (rxb->paddr >> 32) & 0xFFFFFFFF;
1297
1298 } else {
1299 device_printf(ha->pci_dev,
1300 "%s: qls_get_mbuf [%d, %d] failed\n",
1301 __func__, r, i);
1302 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1303 goto qls_alloc_rcv_mbufs_err;
1304 }
1305
1306 rxb++;
1307 sbq_e++;
1308 }
1309 return 0;
1310
1311 qls_alloc_rcv_mbufs_err:
1312 return (-1);
1313 }
1314
1315 static void
qls_free_rcv_bufs(qla_host_t * ha)1316 qls_free_rcv_bufs(qla_host_t *ha)
1317 {
1318 int i, r;
1319 qla_rx_buf_t *rxb;
1320 qla_rx_ring_t *rxr;
1321
1322 for (r = 0; r < ha->num_rx_rings; r++) {
1323 rxr = &ha->rx_ring[r];
1324
1325 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1326 rxb = &rxr->rx_buf[i];
1327
1328 if (rxb->m_head != NULL) {
1329 bus_dmamap_unload(ha->rx_tag, rxb->map);
1330 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1331 m_freem(rxb->m_head);
1332 }
1333 }
1334 bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1335 }
1336 return;
1337 }
1338
1339 static int
qls_alloc_rcv_bufs(qla_host_t * ha)1340 qls_alloc_rcv_bufs(qla_host_t *ha)
1341 {
1342 int r, ret = 0;
1343 qla_rx_ring_t *rxr;
1344
1345 for (r = 0; r < ha->num_rx_rings; r++) {
1346 rxr = &ha->rx_ring[r];
1347 bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1348 }
1349
1350 for (r = 0; r < ha->num_rx_rings; r++) {
1351 ret = qls_alloc_rcv_mbufs(ha, r);
1352
1353 if (ret)
1354 qls_free_rcv_bufs(ha);
1355 }
1356
1357 return (ret);
1358 }
1359
1360 int
qls_get_mbuf(qla_host_t * ha,qla_rx_buf_t * rxb,struct mbuf * nmp)1361 qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1362 {
1363 struct mbuf *mp = nmp;
1364 int ret = 0;
1365 uint32_t offset;
1366 bus_dma_segment_t segs[1];
1367 int nsegs;
1368
1369 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1370
1371 if (mp == NULL) {
1372 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ha->msize);
1373
1374 if (mp == NULL) {
1375 if (ha->msize == MCLBYTES)
1376 ha->err_m_getcl++;
1377 else
1378 ha->err_m_getjcl++;
1379
1380 ret = ENOBUFS;
1381 device_printf(ha->pci_dev,
1382 "%s: m_getcl failed\n", __func__);
1383 goto exit_qls_get_mbuf;
1384 }
1385 mp->m_len = mp->m_pkthdr.len = ha->msize;
1386 } else {
1387 mp->m_len = mp->m_pkthdr.len = ha->msize;
1388 mp->m_data = mp->m_ext.ext_buf;
1389 mp->m_next = NULL;
1390 }
1391
1392 /* align the receive buffers to 8 byte boundary */
1393 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1394 if (offset) {
1395 offset = 8 - offset;
1396 m_adj(mp, offset);
1397 }
1398
1399 /*
1400 * Using memory from the mbuf cluster pool, invoke the bus_dma
1401 * machinery to arrange the memory mapping.
1402 */
1403 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1404 mp, segs, &nsegs, BUS_DMA_NOWAIT);
1405 rxb->paddr = segs[0].ds_addr;
1406
1407 if (ret || !rxb->paddr || (nsegs != 1)) {
1408 m_freem(mp);
1409 rxb->m_head = NULL;
1410 device_printf(ha->pci_dev,
1411 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1412 __func__, ret, (long long unsigned int)rxb->paddr,
1413 nsegs);
1414 ret = -1;
1415 goto exit_qls_get_mbuf;
1416 }
1417 rxb->m_head = mp;
1418 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1419
1420 exit_qls_get_mbuf:
1421 QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1422 return (ret);
1423 }
1424
1425 static void
qls_tx_done(void * context,int pending)1426 qls_tx_done(void *context, int pending)
1427 {
1428 qla_host_t *ha = context;
1429 if_t ifp;
1430
1431 ifp = ha->ifp;
1432
1433 if (!ifp)
1434 return;
1435
1436 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1437 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1438 return;
1439 }
1440
1441 qls_start(ha->ifp);
1442 return;
1443 }
1444
1445 static int
qls_config_lro(qla_host_t * ha)1446 qls_config_lro(qla_host_t *ha)
1447 {
1448 #if defined(INET) || defined(INET6)
1449 int i;
1450 struct lro_ctrl *lro;
1451
1452 for (i = 0; i < ha->num_rx_rings; i++) {
1453 lro = &ha->rx_ring[i].lro;
1454 if (tcp_lro_init(lro)) {
1455 device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
1456 __func__);
1457 return (-1);
1458 }
1459 lro->ifp = ha->ifp;
1460 }
1461 ha->flags.lro_init = 1;
1462
1463 QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
1464 #endif
1465 return (0);
1466 }
1467
1468 static void
qls_free_lro(qla_host_t * ha)1469 qls_free_lro(qla_host_t *ha)
1470 {
1471 #if defined(INET) || defined(INET6)
1472 int i;
1473 struct lro_ctrl *lro;
1474
1475 if (!ha->flags.lro_init)
1476 return;
1477
1478 for (i = 0; i < ha->num_rx_rings; i++) {
1479 lro = &ha->rx_ring[i].lro;
1480 tcp_lro_free(lro);
1481 }
1482 ha->flags.lro_init = 0;
1483 #endif
1484 }
1485
1486 static void
qls_error_recovery(void * context,int pending)1487 qls_error_recovery(void *context, int pending)
1488 {
1489 qla_host_t *ha = context;
1490
1491 qls_init(ha);
1492
1493 return;
1494 }
1495