1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013-2014 Qlogic Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * File: qls_os.c 32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "qls_os.h" 39 #include "qls_hw.h" 40 #include "qls_def.h" 41 #include "qls_inline.h" 42 #include "qls_ver.h" 43 #include "qls_glbl.h" 44 #include "qls_dbg.h" 45 #include <sys/smp.h> 46 47 /* 48 * Some PCI Configuration Space Related Defines 49 */ 50 51 #ifndef PCI_VENDOR_QLOGIC 52 #define PCI_VENDOR_QLOGIC 0x1077 53 #endif 54 55 #ifndef PCI_DEVICE_QLOGIC_8000 56 #define PCI_DEVICE_QLOGIC_8000 0x8000 57 #endif 58 59 #define PCI_QLOGIC_DEV8000 \ 60 ((PCI_DEVICE_QLOGIC_8000 << 16) | PCI_VENDOR_QLOGIC) 61 62 /* 63 * static functions 64 */ 65 static int qls_alloc_parent_dma_tag(qla_host_t *ha); 66 static void qls_free_parent_dma_tag(qla_host_t *ha); 67 68 static void qls_flush_xmt_bufs(qla_host_t *ha); 69 70 static int qls_alloc_rcv_bufs(qla_host_t *ha); 71 static void qls_free_rcv_bufs(qla_host_t *ha); 72 73 static void qls_init_ifnet(device_t dev, qla_host_t *ha); 74 static void qls_release(qla_host_t *ha); 75 static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, 76 int error); 77 static void qls_stop(qla_host_t *ha); 78 static int qls_send(qla_host_t *ha, struct mbuf **m_headp); 79 static void qls_tx_done(void *context, int pending); 80 81 static int qls_config_lro(qla_host_t *ha); 82 static void qls_free_lro(qla_host_t *ha); 83 84 static void qls_error_recovery(void *context, int pending); 85 86 /* 87 * Hooks to the Operating Systems 88 */ 89 static int qls_pci_probe (device_t); 90 static int qls_pci_attach (device_t); 91 static int qls_pci_detach (device_t); 92 93 static void qls_start(if_t ifp); 94 static void qls_init(void *arg); 95 static int qls_ioctl(if_t ifp, u_long cmd, caddr_t data); 96 static int qls_media_change(if_t ifp); 97 static void qls_media_status(if_t ifp, struct ifmediareq *ifmr); 98 99 static device_method_t qla_pci_methods[] = { 100 /* Device interface */ 101 DEVMETHOD(device_probe, qls_pci_probe), 102 DEVMETHOD(device_attach, qls_pci_attach), 103 DEVMETHOD(device_detach, qls_pci_detach), 104 { 0, 0 } 105 }; 106 107 static driver_t qla_pci_driver = { 108 "ql", qla_pci_methods, sizeof (qla_host_t), 109 }; 110 111 DRIVER_MODULE(qla8000, pci, qla_pci_driver, 0, 0); 112 113 MODULE_DEPEND(qla8000, pci, 1, 1, 1); 114 MODULE_DEPEND(qla8000, ether, 1, 1, 1); 115 116 MALLOC_DEFINE(M_QLA8000BUF, "qla8000buf", "Buffers for qla8000 driver"); 117 118 static char dev_str[64]; 119 static char ver_str[64]; 120 121 /* 122 * Name: qls_pci_probe 123 * Function: Validate the PCI device to be a QLA80XX device 124 */ 125 static int 126 qls_pci_probe(device_t dev) 127 { 128 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 129 case PCI_QLOGIC_DEV8000: 130 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", 131 "Qlogic ISP 8000 PCI CNA Adapter-Ethernet Function", 132 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 133 QLA_VERSION_BUILD); 134 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d", 135 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 136 QLA_VERSION_BUILD); 137 device_set_desc(dev, dev_str); 138 break; 139 default: 140 return (ENXIO); 141 } 142 143 if (bootverbose) 144 printf("%s: %s\n ", __func__, dev_str); 145 146 return (BUS_PROBE_DEFAULT); 147 } 148 149 static int 150 qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS) 151 { 152 int err = 0, ret; 153 qla_host_t *ha; 154 uint32_t i; 155 156 err = sysctl_handle_int(oidp, &ret, 0, req); 157 158 if (err || !req->newptr) 159 return (err); 160 161 if (ret == 1) { 162 ha = (qla_host_t *)arg1; 163 164 for (i = 0; i < ha->num_tx_rings; i++) { 165 device_printf(ha->pci_dev, 166 "%s: tx_ring[%d].tx_frames= %p\n", 167 __func__, i, 168 (void *)ha->tx_ring[i].tx_frames); 169 170 device_printf(ha->pci_dev, 171 "%s: tx_ring[%d].tx_tso_frames= %p\n", 172 __func__, i, 173 (void *)ha->tx_ring[i].tx_tso_frames); 174 175 device_printf(ha->pci_dev, 176 "%s: tx_ring[%d].tx_vlan_frames= %p\n", 177 __func__, i, 178 (void *)ha->tx_ring[i].tx_vlan_frames); 179 180 device_printf(ha->pci_dev, 181 "%s: tx_ring[%d].txr_free= 0x%08x\n", 182 __func__, i, 183 ha->tx_ring[i].txr_free); 184 185 device_printf(ha->pci_dev, 186 "%s: tx_ring[%d].txr_next= 0x%08x\n", 187 __func__, i, 188 ha->tx_ring[i].txr_next); 189 190 device_printf(ha->pci_dev, 191 "%s: tx_ring[%d].txr_done= 0x%08x\n", 192 __func__, i, 193 ha->tx_ring[i].txr_done); 194 195 device_printf(ha->pci_dev, 196 "%s: tx_ring[%d].txr_cons_idx= 0x%08x\n", 197 __func__, i, 198 *(ha->tx_ring[i].txr_cons_vaddr)); 199 } 200 201 for (i = 0; i < ha->num_rx_rings; i++) { 202 device_printf(ha->pci_dev, 203 "%s: rx_ring[%d].rx_int= %p\n", 204 __func__, i, 205 (void *)ha->rx_ring[i].rx_int); 206 207 device_printf(ha->pci_dev, 208 "%s: rx_ring[%d].rss_int= %p\n", 209 __func__, i, 210 (void *)ha->rx_ring[i].rss_int); 211 212 device_printf(ha->pci_dev, 213 "%s: rx_ring[%d].lbq_next= 0x%08x\n", 214 __func__, i, 215 ha->rx_ring[i].lbq_next); 216 217 device_printf(ha->pci_dev, 218 "%s: rx_ring[%d].lbq_free= 0x%08x\n", 219 __func__, i, 220 ha->rx_ring[i].lbq_free); 221 222 device_printf(ha->pci_dev, 223 "%s: rx_ring[%d].lbq_in= 0x%08x\n", 224 __func__, i, 225 ha->rx_ring[i].lbq_in); 226 227 device_printf(ha->pci_dev, 228 "%s: rx_ring[%d].sbq_next= 0x%08x\n", 229 __func__, i, 230 ha->rx_ring[i].sbq_next); 231 232 device_printf(ha->pci_dev, 233 "%s: rx_ring[%d].sbq_free= 0x%08x\n", 234 __func__, i, 235 ha->rx_ring[i].sbq_free); 236 237 device_printf(ha->pci_dev, 238 "%s: rx_ring[%d].sbq_in= 0x%08x\n", 239 __func__, i, 240 ha->rx_ring[i].sbq_in); 241 } 242 243 device_printf(ha->pci_dev, "%s: err_m_getcl = 0x%08x\n", 244 __func__, ha->err_m_getcl); 245 device_printf(ha->pci_dev, "%s: err_m_getjcl = 0x%08x\n", 246 __func__, ha->err_m_getjcl); 247 device_printf(ha->pci_dev, 248 "%s: err_tx_dmamap_create = 0x%08x\n", 249 __func__, ha->err_tx_dmamap_create); 250 device_printf(ha->pci_dev, 251 "%s: err_tx_dmamap_load = 0x%08x\n", 252 __func__, ha->err_tx_dmamap_load); 253 device_printf(ha->pci_dev, 254 "%s: err_tx_defrag = 0x%08x\n", 255 __func__, ha->err_tx_defrag); 256 } 257 return (err); 258 } 259 260 static void 261 qls_add_sysctls(qla_host_t *ha) 262 { 263 device_t dev = ha->pci_dev; 264 265 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 266 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 267 OID_AUTO, "version", CTLFLAG_RD, 268 ver_str, 0, "Driver Version"); 269 270 qls_dbg_level = 0; 271 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 272 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 273 OID_AUTO, "debug", CTLFLAG_RW, 274 &qls_dbg_level, qls_dbg_level, "Debug Level"); 275 276 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 277 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 278 OID_AUTO, "drvr_stats", 279 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, 280 qls_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics"); 281 282 return; 283 } 284 285 static void 286 qls_watchdog(void *arg) 287 { 288 qla_host_t *ha = arg; 289 if_t ifp; 290 291 ifp = ha->ifp; 292 293 if (ha->flags.qla_watchdog_exit) { 294 ha->qla_watchdog_exited = 1; 295 return; 296 } 297 ha->qla_watchdog_exited = 0; 298 299 if (!ha->flags.qla_watchdog_pause) { 300 if (ha->qla_initiate_recovery) { 301 ha->qla_watchdog_paused = 1; 302 ha->qla_initiate_recovery = 0; 303 ha->err_inject = 0; 304 taskqueue_enqueue(ha->err_tq, &ha->err_task); 305 306 } else if (!if_sendq_empty(ifp) && QL_RUNNING(ifp)) { 307 taskqueue_enqueue(ha->tx_tq, &ha->tx_task); 308 } 309 310 ha->qla_watchdog_paused = 0; 311 } else { 312 ha->qla_watchdog_paused = 1; 313 } 314 315 ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000; 316 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 317 qls_watchdog, ha); 318 319 return; 320 } 321 322 /* 323 * Name: qls_pci_attach 324 * Function: attaches the device to the operating system 325 */ 326 static int 327 qls_pci_attach(device_t dev) 328 { 329 qla_host_t *ha = NULL; 330 int i; 331 332 QL_DPRINT2((dev, "%s: enter\n", __func__)); 333 334 if ((ha = device_get_softc(dev)) == NULL) { 335 device_printf(dev, "cannot get softc\n"); 336 return (ENOMEM); 337 } 338 339 memset(ha, 0, sizeof (qla_host_t)); 340 341 if (pci_get_device(dev) != PCI_DEVICE_QLOGIC_8000) { 342 device_printf(dev, "device is not QLE8000\n"); 343 return (ENXIO); 344 } 345 346 ha->pci_func = pci_get_function(dev); 347 348 ha->pci_dev = dev; 349 350 pci_enable_busmaster(dev); 351 352 ha->reg_rid = PCIR_BAR(1); 353 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 354 RF_ACTIVE); 355 356 if (ha->pci_reg == NULL) { 357 device_printf(dev, "unable to map any ports\n"); 358 goto qls_pci_attach_err; 359 } 360 361 ha->reg_rid1 = PCIR_BAR(3); 362 ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 363 &ha->reg_rid1, RF_ACTIVE); 364 365 if (ha->pci_reg1 == NULL) { 366 device_printf(dev, "unable to map any ports\n"); 367 goto qls_pci_attach_err; 368 } 369 370 mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 371 mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF); 372 373 qls_add_sysctls(ha); 374 qls_hw_add_sysctls(ha); 375 376 ha->flags.lock_init = 1; 377 378 ha->msix_count = pci_msix_count(dev); 379 380 if (ha->msix_count < qls_get_msix_count(ha)) { 381 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 382 ha->msix_count); 383 goto qls_pci_attach_err; 384 } 385 386 ha->msix_count = qls_get_msix_count(ha); 387 388 device_printf(dev, "\n%s: ha %p pci_func 0x%x msix_count 0x%x" 389 " pci_reg %p pci_reg1 %p\n", __func__, ha, 390 ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1); 391 392 if (pci_alloc_msix(dev, &ha->msix_count)) { 393 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, 394 ha->msix_count); 395 ha->msix_count = 0; 396 goto qls_pci_attach_err; 397 } 398 399 for (i = 0; i < ha->num_rx_rings; i++) { 400 ha->irq_vec[i].cq_idx = i; 401 ha->irq_vec[i].ha = ha; 402 ha->irq_vec[i].irq_rid = 1 + i; 403 404 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 405 &ha->irq_vec[i].irq_rid, 406 (RF_ACTIVE | RF_SHAREABLE)); 407 408 if (ha->irq_vec[i].irq == NULL) { 409 device_printf(dev, "could not allocate interrupt\n"); 410 goto qls_pci_attach_err; 411 } 412 413 if (bus_setup_intr(dev, ha->irq_vec[i].irq, 414 (INTR_TYPE_NET | INTR_MPSAFE), NULL, qls_isr, 415 &ha->irq_vec[i], &ha->irq_vec[i].handle)) { 416 device_printf(dev, 417 "could not setup interrupt\n"); 418 goto qls_pci_attach_err; 419 } 420 } 421 422 qls_rd_nic_params(ha); 423 424 /* allocate parent dma tag */ 425 if (qls_alloc_parent_dma_tag(ha)) { 426 device_printf(dev, "%s: qls_alloc_parent_dma_tag failed\n", 427 __func__); 428 goto qls_pci_attach_err; 429 } 430 431 /* alloc all dma buffers */ 432 if (qls_alloc_dma(ha)) { 433 device_printf(dev, "%s: qls_alloc_dma failed\n", __func__); 434 goto qls_pci_attach_err; 435 } 436 437 /* create the o.s ethernet interface */ 438 qls_init_ifnet(dev, ha); 439 440 ha->flags.qla_watchdog_active = 1; 441 ha->flags.qla_watchdog_pause = 1; 442 443 TASK_INIT(&ha->tx_task, 0, qls_tx_done, ha); 444 ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT, 445 taskqueue_thread_enqueue, &ha->tx_tq); 446 taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq", 447 device_get_nameunit(ha->pci_dev)); 448 449 callout_init(&ha->tx_callout, 1); 450 ha->flags.qla_callout_init = 1; 451 452 /* create ioctl device interface */ 453 if (qls_make_cdev(ha)) { 454 device_printf(dev, "%s: qls_make_cdev failed\n", __func__); 455 goto qls_pci_attach_err; 456 } 457 458 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 459 qls_watchdog, ha); 460 461 TASK_INIT(&ha->err_task, 0, qls_error_recovery, ha); 462 ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT, 463 taskqueue_thread_enqueue, &ha->err_tq); 464 taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", 465 device_get_nameunit(ha->pci_dev)); 466 467 QL_DPRINT2((dev, "%s: exit 0\n", __func__)); 468 return (0); 469 470 qls_pci_attach_err: 471 472 qls_release(ha); 473 474 QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__)); 475 return (ENXIO); 476 } 477 478 /* 479 * Name: qls_pci_detach 480 * Function: Unhooks the device from the operating system 481 */ 482 static int 483 qls_pci_detach(device_t dev) 484 { 485 qla_host_t *ha = NULL; 486 487 QL_DPRINT2((dev, "%s: enter\n", __func__)); 488 489 if ((ha = device_get_softc(dev)) == NULL) { 490 device_printf(dev, "cannot get softc\n"); 491 return (ENOMEM); 492 } 493 494 (void)QLA_LOCK(ha, __func__, 0); 495 qls_stop(ha); 496 QLA_UNLOCK(ha, __func__); 497 498 qls_release(ha); 499 500 QL_DPRINT2((dev, "%s: exit\n", __func__)); 501 502 return (0); 503 } 504 505 /* 506 * Name: qls_release 507 * Function: Releases the resources allocated for the device 508 */ 509 static void 510 qls_release(qla_host_t *ha) 511 { 512 device_t dev; 513 int i; 514 515 dev = ha->pci_dev; 516 517 if (ha->err_tq) { 518 taskqueue_drain(ha->err_tq, &ha->err_task); 519 taskqueue_free(ha->err_tq); 520 } 521 522 if (ha->tx_tq) { 523 taskqueue_drain(ha->tx_tq, &ha->tx_task); 524 taskqueue_free(ha->tx_tq); 525 } 526 527 qls_del_cdev(ha); 528 529 if (ha->flags.qla_watchdog_active) { 530 ha->flags.qla_watchdog_exit = 1; 531 532 while (ha->qla_watchdog_exited == 0) 533 qls_mdelay(__func__, 1); 534 } 535 536 if (ha->flags.qla_callout_init) 537 callout_stop(&ha->tx_callout); 538 539 if (ha->ifp != NULL) 540 ether_ifdetach(ha->ifp); 541 542 qls_free_dma(ha); 543 qls_free_parent_dma_tag(ha); 544 545 for (i = 0; i < ha->num_rx_rings; i++) { 546 if (ha->irq_vec[i].handle) { 547 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 548 ha->irq_vec[i].handle); 549 } 550 551 if (ha->irq_vec[i].irq) { 552 (void)bus_release_resource(dev, SYS_RES_IRQ, 553 ha->irq_vec[i].irq_rid, 554 ha->irq_vec[i].irq); 555 } 556 } 557 558 if (ha->msix_count) 559 pci_release_msi(dev); 560 561 if (ha->flags.lock_init) { 562 mtx_destroy(&ha->tx_lock); 563 mtx_destroy(&ha->hw_lock); 564 } 565 566 if (ha->pci_reg) 567 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 568 ha->pci_reg); 569 570 if (ha->pci_reg1) 571 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, 572 ha->pci_reg1); 573 } 574 575 /* 576 * DMA Related Functions 577 */ 578 579 static void 580 qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 581 { 582 *((bus_addr_t *)arg) = 0; 583 584 if (error) { 585 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 586 return; 587 } 588 589 *((bus_addr_t *)arg) = segs[0].ds_addr; 590 591 return; 592 } 593 594 int 595 qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 596 { 597 int ret = 0; 598 device_t dev; 599 bus_addr_t b_addr; 600 601 dev = ha->pci_dev; 602 603 QL_DPRINT2((dev, "%s: enter\n", __func__)); 604 605 ret = bus_dma_tag_create( 606 ha->parent_tag,/* parent */ 607 dma_buf->alignment, 608 ((bus_size_t)(1ULL << 32)),/* boundary */ 609 BUS_SPACE_MAXADDR, /* lowaddr */ 610 BUS_SPACE_MAXADDR, /* highaddr */ 611 NULL, NULL, /* filter, filterarg */ 612 dma_buf->size, /* maxsize */ 613 1, /* nsegments */ 614 dma_buf->size, /* maxsegsize */ 615 0, /* flags */ 616 NULL, NULL, /* lockfunc, lockarg */ 617 &dma_buf->dma_tag); 618 619 if (ret) { 620 device_printf(dev, "%s: could not create dma tag\n", __func__); 621 goto qls_alloc_dmabuf_exit; 622 } 623 ret = bus_dmamem_alloc(dma_buf->dma_tag, 624 (void **)&dma_buf->dma_b, 625 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 626 &dma_buf->dma_map); 627 if (ret) { 628 bus_dma_tag_destroy(dma_buf->dma_tag); 629 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); 630 goto qls_alloc_dmabuf_exit; 631 } 632 633 ret = bus_dmamap_load(dma_buf->dma_tag, 634 dma_buf->dma_map, 635 dma_buf->dma_b, 636 dma_buf->size, 637 qls_dmamap_callback, 638 &b_addr, BUS_DMA_NOWAIT); 639 640 if (ret || !b_addr) { 641 bus_dma_tag_destroy(dma_buf->dma_tag); 642 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 643 dma_buf->dma_map); 644 ret = -1; 645 goto qls_alloc_dmabuf_exit; 646 } 647 648 dma_buf->dma_addr = b_addr; 649 650 qls_alloc_dmabuf_exit: 651 QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", 652 __func__, ret, (void *)dma_buf->dma_tag, 653 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, 654 dma_buf->size)); 655 656 return ret; 657 } 658 659 void 660 qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 661 { 662 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 663 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 664 bus_dma_tag_destroy(dma_buf->dma_tag); 665 } 666 667 static int 668 qls_alloc_parent_dma_tag(qla_host_t *ha) 669 { 670 int ret; 671 device_t dev; 672 673 dev = ha->pci_dev; 674 675 /* 676 * Allocate parent DMA Tag 677 */ 678 ret = bus_dma_tag_create( 679 bus_get_dma_tag(dev), /* parent */ 680 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 681 BUS_SPACE_MAXADDR, /* lowaddr */ 682 BUS_SPACE_MAXADDR, /* highaddr */ 683 NULL, NULL, /* filter, filterarg */ 684 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 685 0, /* nsegments */ 686 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 687 0, /* flags */ 688 NULL, NULL, /* lockfunc, lockarg */ 689 &ha->parent_tag); 690 691 if (ret) { 692 device_printf(dev, "%s: could not create parent dma tag\n", 693 __func__); 694 return (-1); 695 } 696 697 ha->flags.parent_tag = 1; 698 699 return (0); 700 } 701 702 static void 703 qls_free_parent_dma_tag(qla_host_t *ha) 704 { 705 if (ha->flags.parent_tag) { 706 bus_dma_tag_destroy(ha->parent_tag); 707 ha->flags.parent_tag = 0; 708 } 709 } 710 711 /* 712 * Name: qls_init_ifnet 713 * Function: Creates the Network Device Interface and Registers it with the O.S 714 */ 715 716 static void 717 qls_init_ifnet(device_t dev, qla_host_t *ha) 718 { 719 if_t ifp; 720 721 QL_DPRINT2((dev, "%s: enter\n", __func__)); 722 723 ifp = ha->ifp = if_alloc(IFT_ETHER); 724 725 if (ifp == NULL) 726 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 727 728 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 729 if_setbaudrate(ifp, IF_Gbps(10)); 730 if_setinitfn(ifp, qls_init); 731 if_setsoftc(ifp, ha); 732 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 733 if_setioctlfn(ifp, qls_ioctl); 734 if_setstartfn(ifp, qls_start); 735 736 if_setsendqlen(ifp, qls_get_ifq_snd_maxlen(ha)); 737 if_setsendqready(ifp); 738 739 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 740 if (ha->max_frame_size <= MCLBYTES) { 741 ha->msize = MCLBYTES; 742 } else if (ha->max_frame_size <= MJUMPAGESIZE) { 743 ha->msize = MJUMPAGESIZE; 744 } else 745 ha->msize = MJUM9BYTES; 746 747 ether_ifattach(ifp, qls_get_mac_addr(ha)); 748 749 if_setcapabilities(ifp, IFCAP_JUMBO_MTU); 750 751 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0); 752 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 753 754 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0); 755 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); 756 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0); 757 if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0); 758 759 if_setcapenable(ifp, if_getcapabilities(ifp)); 760 761 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 762 763 ifmedia_init(&ha->media, IFM_IMASK, qls_media_change, qls_media_status); 764 765 ifmedia_add(&ha->media, (IFM_ETHER | qls_get_optics(ha) | IFM_FDX), 0, 766 NULL); 767 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 768 769 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 770 771 QL_DPRINT2((dev, "%s: exit\n", __func__)); 772 773 return; 774 } 775 776 static void 777 qls_init_locked(qla_host_t *ha) 778 { 779 if_t ifp = ha->ifp; 780 781 qls_stop(ha); 782 783 qls_flush_xmt_bufs(ha); 784 785 if (qls_alloc_rcv_bufs(ha) != 0) 786 return; 787 788 if (qls_config_lro(ha)) 789 return; 790 791 bcopy(if_getlladdr(ha->ifp), ha->mac_addr, ETHER_ADDR_LEN); 792 793 if_sethwassist(ifp, CSUM_IP); 794 if_sethwassistbits(ifp, CSUM_TCP, 0); 795 if_sethwassistbits(ifp, CSUM_UDP, 0); 796 if_sethwassistbits(ifp, CSUM_TSO, 0); 797 798 if (qls_init_hw_if(ha) == 0) { 799 ifp = ha->ifp; 800 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 801 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 802 ha->flags.qla_watchdog_pause = 0; 803 } 804 805 return; 806 } 807 808 static void 809 qls_init(void *arg) 810 { 811 qla_host_t *ha; 812 813 ha = (qla_host_t *)arg; 814 815 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 816 817 (void)QLA_LOCK(ha, __func__, 0); 818 qls_init_locked(ha); 819 QLA_UNLOCK(ha, __func__); 820 821 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 822 } 823 824 static u_int 825 qls_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 826 { 827 uint8_t *mta = arg; 828 829 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) 830 return (0); 831 832 bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); 833 834 return (1); 835 } 836 837 static void 838 qls_set_multi(qla_host_t *ha, uint32_t add_multi) 839 { 840 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; 841 if_t ifp = ha->ifp; 842 int mcnt; 843 844 mcnt = if_foreach_llmaddr(ifp, qls_copy_maddr, mta); 845 846 if (QLA_LOCK(ha, __func__, 1) == 0) { 847 qls_hw_set_multi(ha, mta, mcnt, add_multi); 848 QLA_UNLOCK(ha, __func__); 849 } 850 851 return; 852 } 853 854 static int 855 qls_ioctl(if_t ifp, u_long cmd, caddr_t data) 856 { 857 int ret = 0; 858 struct ifreq *ifr = (struct ifreq *)data; 859 #ifdef INET 860 struct ifaddr *ifa = (struct ifaddr *)data; 861 #endif 862 qla_host_t *ha; 863 864 ha = (qla_host_t *)if_getsoftc(ifp); 865 866 switch (cmd) { 867 case SIOCSIFADDR: 868 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 869 __func__, cmd)); 870 871 #ifdef INET 872 if (ifa->ifa_addr->sa_family == AF_INET) { 873 if_setflagbits(ifp, IFF_UP, 0); 874 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 875 (void)QLA_LOCK(ha, __func__, 0); 876 qls_init_locked(ha); 877 QLA_UNLOCK(ha, __func__); 878 } 879 QL_DPRINT4((ha->pci_dev, 880 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 881 __func__, cmd, 882 ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 883 884 arp_ifinit(ifp, ifa); 885 break; 886 } 887 #endif 888 ether_ioctl(ifp, cmd, data); 889 break; 890 891 case SIOCSIFMTU: 892 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 893 __func__, cmd)); 894 895 if (ifr->ifr_mtu > QLA_MAX_MTU) { 896 ret = EINVAL; 897 } else { 898 (void) QLA_LOCK(ha, __func__, 0); 899 900 if_setmtu(ifp, ifr->ifr_mtu); 901 ha->max_frame_size = 902 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 903 904 QLA_UNLOCK(ha, __func__); 905 906 if (ret) 907 ret = EINVAL; 908 } 909 910 break; 911 912 case SIOCSIFFLAGS: 913 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 914 __func__, cmd)); 915 916 (void)QLA_LOCK(ha, __func__, 0); 917 918 if (if_getflags(ifp) & IFF_UP) { 919 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 920 if ((if_getflags(ifp) ^ ha->if_flags) & 921 IFF_PROMISC) { 922 ret = qls_set_promisc(ha); 923 } else if ((if_getflags(ifp) ^ ha->if_flags) & 924 IFF_ALLMULTI) { 925 ret = qls_set_allmulti(ha); 926 } 927 } else { 928 ha->max_frame_size = if_getmtu(ifp) + 929 ETHER_HDR_LEN + ETHER_CRC_LEN; 930 qls_init_locked(ha); 931 } 932 } else { 933 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 934 qls_stop(ha); 935 ha->if_flags = if_getflags(ifp); 936 } 937 938 QLA_UNLOCK(ha, __func__); 939 break; 940 941 case SIOCADDMULTI: 942 QL_DPRINT4((ha->pci_dev, 943 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 944 945 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 946 qls_set_multi(ha, 1); 947 } 948 break; 949 950 case SIOCDELMULTI: 951 QL_DPRINT4((ha->pci_dev, 952 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 953 954 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 955 qls_set_multi(ha, 0); 956 } 957 break; 958 959 case SIOCSIFMEDIA: 960 case SIOCGIFMEDIA: 961 QL_DPRINT4((ha->pci_dev, 962 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 963 __func__, cmd)); 964 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 965 break; 966 967 case SIOCSIFCAP: 968 { 969 int mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 970 971 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 972 __func__, cmd)); 973 974 if (mask & IFCAP_HWCSUM) 975 if_togglecapenable(ifp, IFCAP_HWCSUM); 976 if (mask & IFCAP_TSO4) 977 if_togglecapenable(ifp, IFCAP_TSO4); 978 if (mask & IFCAP_VLAN_HWTAGGING) 979 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 980 if (mask & IFCAP_VLAN_HWTSO) 981 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 982 983 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 984 qls_init(ha); 985 986 VLAN_CAPABILITIES(ifp); 987 break; 988 } 989 990 default: 991 QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n", 992 __func__, cmd)); 993 ret = ether_ioctl(ifp, cmd, data); 994 break; 995 } 996 997 return (ret); 998 } 999 1000 static int 1001 qls_media_change(if_t ifp) 1002 { 1003 qla_host_t *ha; 1004 struct ifmedia *ifm; 1005 int ret = 0; 1006 1007 ha = (qla_host_t *)if_getsoftc(ifp); 1008 1009 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 1010 1011 ifm = &ha->media; 1012 1013 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1014 ret = EINVAL; 1015 1016 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 1017 1018 return (ret); 1019 } 1020 1021 static void 1022 qls_media_status(if_t ifp, struct ifmediareq *ifmr) 1023 { 1024 qla_host_t *ha; 1025 1026 ha = (qla_host_t *)if_getsoftc(ifp); 1027 1028 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 1029 1030 ifmr->ifm_status = IFM_AVALID; 1031 ifmr->ifm_active = IFM_ETHER; 1032 1033 qls_update_link_state(ha); 1034 if (ha->link_up) { 1035 ifmr->ifm_status |= IFM_ACTIVE; 1036 ifmr->ifm_active |= (IFM_FDX | qls_get_optics(ha)); 1037 } 1038 1039 QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\ 1040 (ha->link_up ? "link_up" : "link_down"))); 1041 1042 return; 1043 } 1044 1045 static void 1046 qls_start(if_t ifp) 1047 { 1048 int i, ret = 0; 1049 struct mbuf *m_head; 1050 qla_host_t *ha = (qla_host_t *)if_getsoftc(ifp); 1051 1052 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); 1053 1054 if (!mtx_trylock(&ha->tx_lock)) { 1055 QL_DPRINT8((ha->pci_dev, 1056 "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__)); 1057 return; 1058 } 1059 1060 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 1061 IFF_DRV_RUNNING) { 1062 for (i = 0; i < ha->num_tx_rings; i++) { 1063 ret |= qls_hw_tx_done(ha, i); 1064 } 1065 1066 if (ret == 0) 1067 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1068 } 1069 1070 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1071 IFF_DRV_RUNNING) { 1072 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); 1073 QLA_TX_UNLOCK(ha); 1074 return; 1075 } 1076 1077 if (!ha->link_up) { 1078 qls_update_link_state(ha); 1079 if (!ha->link_up) { 1080 QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__)); 1081 QLA_TX_UNLOCK(ha); 1082 return; 1083 } 1084 } 1085 1086 while (!if_sendq_empty(ifp)) { 1087 m_head = if_dequeue(ifp); 1088 1089 if (m_head == NULL) { 1090 QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n", 1091 __func__)); 1092 break; 1093 } 1094 1095 if (qls_send(ha, &m_head)) { 1096 if (m_head == NULL) 1097 break; 1098 QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__)); 1099 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1100 if_sendq_prepend(ifp, m_head); 1101 break; 1102 } 1103 /* Send a copy of the frame to the BPF listener */ 1104 ETHER_BPF_MTAP(ifp, m_head); 1105 } 1106 1107 QLA_TX_UNLOCK(ha); 1108 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); 1109 return; 1110 } 1111 1112 static int 1113 qls_send(qla_host_t *ha, struct mbuf **m_headp) 1114 { 1115 bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; 1116 bus_dmamap_t map; 1117 int nsegs; 1118 int ret = -1; 1119 uint32_t tx_idx; 1120 struct mbuf *m_head = *m_headp; 1121 uint32_t txr_idx = 0; 1122 1123 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); 1124 1125 /* check if flowid is set */ 1126 if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) 1127 txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1); 1128 1129 tx_idx = ha->tx_ring[txr_idx].txr_next; 1130 1131 map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; 1132 1133 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 1134 BUS_DMA_NOWAIT); 1135 1136 if (ret == EFBIG) { 1137 struct mbuf *m; 1138 1139 QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 1140 m_head->m_pkthdr.len)); 1141 1142 m = m_defrag(m_head, M_NOWAIT); 1143 if (m == NULL) { 1144 ha->err_tx_defrag++; 1145 m_freem(m_head); 1146 *m_headp = NULL; 1147 device_printf(ha->pci_dev, 1148 "%s: m_defrag() = NULL [%d]\n", 1149 __func__, ret); 1150 return (ENOBUFS); 1151 } 1152 m_head = m; 1153 *m_headp = m_head; 1154 1155 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 1156 segs, &nsegs, BUS_DMA_NOWAIT))) { 1157 ha->err_tx_dmamap_load++; 1158 1159 device_printf(ha->pci_dev, 1160 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 1161 __func__, ret, m_head->m_pkthdr.len); 1162 1163 if (ret != ENOMEM) { 1164 m_freem(m_head); 1165 *m_headp = NULL; 1166 } 1167 return (ret); 1168 } 1169 1170 } else if (ret) { 1171 ha->err_tx_dmamap_load++; 1172 1173 device_printf(ha->pci_dev, 1174 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 1175 __func__, ret, m_head->m_pkthdr.len); 1176 1177 if (ret != ENOMEM) { 1178 m_freem(m_head); 1179 *m_headp = NULL; 1180 } 1181 return (ret); 1182 } 1183 1184 QL_ASSERT(ha, (nsegs != 0), ("qls_send: empty packet")); 1185 1186 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 1187 1188 if (!(ret = qls_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) { 1189 ha->tx_ring[txr_idx].count++; 1190 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; 1191 ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map; 1192 } else { 1193 if (ret == EINVAL) { 1194 if (m_head) 1195 m_freem(m_head); 1196 *m_headp = NULL; 1197 } 1198 } 1199 1200 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); 1201 return (ret); 1202 } 1203 1204 static void 1205 qls_stop(qla_host_t *ha) 1206 { 1207 if_t ifp = ha->ifp; 1208 1209 if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)); 1210 1211 ha->flags.qla_watchdog_pause = 1; 1212 1213 while (!ha->qla_watchdog_paused) 1214 qls_mdelay(__func__, 1); 1215 1216 qls_del_hw_if(ha); 1217 1218 qls_free_lro(ha); 1219 1220 qls_flush_xmt_bufs(ha); 1221 qls_free_rcv_bufs(ha); 1222 1223 return; 1224 } 1225 1226 /* 1227 * Buffer Management Functions for Transmit and Receive Rings 1228 */ 1229 /* 1230 * Release mbuf after it sent on the wire 1231 */ 1232 static void 1233 qls_flush_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) 1234 { 1235 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 1236 1237 if (txb->m_head) { 1238 bus_dmamap_unload(ha->tx_tag, txb->map); 1239 1240 m_freem(txb->m_head); 1241 txb->m_head = NULL; 1242 } 1243 1244 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 1245 } 1246 1247 static void 1248 qls_flush_xmt_bufs(qla_host_t *ha) 1249 { 1250 int i, j; 1251 1252 for (j = 0; j < ha->num_tx_rings; j++) { 1253 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) 1254 qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); 1255 } 1256 1257 return; 1258 } 1259 1260 static int 1261 qls_alloc_rcv_mbufs(qla_host_t *ha, int r) 1262 { 1263 int i, j, ret = 0; 1264 qla_rx_buf_t *rxb; 1265 qla_rx_ring_t *rx_ring; 1266 volatile q81_bq_addr_e_t *sbq_e; 1267 1268 rx_ring = &ha->rx_ring[r]; 1269 1270 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1271 rxb = &rx_ring->rx_buf[i]; 1272 1273 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); 1274 1275 if (ret) { 1276 device_printf(ha->pci_dev, 1277 "%s: dmamap[%d, %d] failed\n", __func__, r, i); 1278 1279 for (j = 0; j < i; j++) { 1280 rxb = &rx_ring->rx_buf[j]; 1281 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1282 } 1283 goto qls_alloc_rcv_mbufs_err; 1284 } 1285 } 1286 1287 rx_ring = &ha->rx_ring[r]; 1288 1289 sbq_e = rx_ring->sbq_vaddr; 1290 1291 rxb = &rx_ring->rx_buf[0]; 1292 1293 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1294 if (!(ret = qls_get_mbuf(ha, rxb, NULL))) { 1295 /* 1296 * set the physical address in the 1297 * corresponding descriptor entry in the 1298 * receive ring/queue for the hba 1299 */ 1300 1301 sbq_e->addr_lo = rxb->paddr & 0xFFFFFFFF; 1302 sbq_e->addr_hi = (rxb->paddr >> 32) & 0xFFFFFFFF; 1303 1304 } else { 1305 device_printf(ha->pci_dev, 1306 "%s: qls_get_mbuf [%d, %d] failed\n", 1307 __func__, r, i); 1308 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1309 goto qls_alloc_rcv_mbufs_err; 1310 } 1311 1312 rxb++; 1313 sbq_e++; 1314 } 1315 return 0; 1316 1317 qls_alloc_rcv_mbufs_err: 1318 return (-1); 1319 } 1320 1321 static void 1322 qls_free_rcv_bufs(qla_host_t *ha) 1323 { 1324 int i, r; 1325 qla_rx_buf_t *rxb; 1326 qla_rx_ring_t *rxr; 1327 1328 for (r = 0; r < ha->num_rx_rings; r++) { 1329 rxr = &ha->rx_ring[r]; 1330 1331 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1332 rxb = &rxr->rx_buf[i]; 1333 1334 if (rxb->m_head != NULL) { 1335 bus_dmamap_unload(ha->rx_tag, rxb->map); 1336 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1337 m_freem(rxb->m_head); 1338 } 1339 } 1340 bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); 1341 } 1342 return; 1343 } 1344 1345 static int 1346 qls_alloc_rcv_bufs(qla_host_t *ha) 1347 { 1348 int r, ret = 0; 1349 qla_rx_ring_t *rxr; 1350 1351 for (r = 0; r < ha->num_rx_rings; r++) { 1352 rxr = &ha->rx_ring[r]; 1353 bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); 1354 } 1355 1356 for (r = 0; r < ha->num_rx_rings; r++) { 1357 ret = qls_alloc_rcv_mbufs(ha, r); 1358 1359 if (ret) 1360 qls_free_rcv_bufs(ha); 1361 } 1362 1363 return (ret); 1364 } 1365 1366 int 1367 qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) 1368 { 1369 struct mbuf *mp = nmp; 1370 int ret = 0; 1371 uint32_t offset; 1372 bus_dma_segment_t segs[1]; 1373 int nsegs; 1374 1375 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 1376 1377 if (mp == NULL) { 1378 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ha->msize); 1379 1380 if (mp == NULL) { 1381 if (ha->msize == MCLBYTES) 1382 ha->err_m_getcl++; 1383 else 1384 ha->err_m_getjcl++; 1385 1386 ret = ENOBUFS; 1387 device_printf(ha->pci_dev, 1388 "%s: m_getcl failed\n", __func__); 1389 goto exit_qls_get_mbuf; 1390 } 1391 mp->m_len = mp->m_pkthdr.len = ha->msize; 1392 } else { 1393 mp->m_len = mp->m_pkthdr.len = ha->msize; 1394 mp->m_data = mp->m_ext.ext_buf; 1395 mp->m_next = NULL; 1396 } 1397 1398 /* align the receive buffers to 8 byte boundary */ 1399 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); 1400 if (offset) { 1401 offset = 8 - offset; 1402 m_adj(mp, offset); 1403 } 1404 1405 /* 1406 * Using memory from the mbuf cluster pool, invoke the bus_dma 1407 * machinery to arrange the memory mapping. 1408 */ 1409 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, 1410 mp, segs, &nsegs, BUS_DMA_NOWAIT); 1411 rxb->paddr = segs[0].ds_addr; 1412 1413 if (ret || !rxb->paddr || (nsegs != 1)) { 1414 m_freem(mp); 1415 rxb->m_head = NULL; 1416 device_printf(ha->pci_dev, 1417 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 1418 __func__, ret, (long long unsigned int)rxb->paddr, 1419 nsegs); 1420 ret = -1; 1421 goto exit_qls_get_mbuf; 1422 } 1423 rxb->m_head = mp; 1424 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); 1425 1426 exit_qls_get_mbuf: 1427 QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); 1428 return (ret); 1429 } 1430 1431 static void 1432 qls_tx_done(void *context, int pending) 1433 { 1434 qla_host_t *ha = context; 1435 if_t ifp; 1436 1437 ifp = ha->ifp; 1438 1439 if (!ifp) 1440 return; 1441 1442 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 1443 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); 1444 return; 1445 } 1446 1447 qls_start(ha->ifp); 1448 return; 1449 } 1450 1451 static int 1452 qls_config_lro(qla_host_t *ha) 1453 { 1454 #if defined(INET) || defined(INET6) 1455 int i; 1456 struct lro_ctrl *lro; 1457 1458 for (i = 0; i < ha->num_rx_rings; i++) { 1459 lro = &ha->rx_ring[i].lro; 1460 if (tcp_lro_init(lro)) { 1461 device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n", 1462 __func__); 1463 return (-1); 1464 } 1465 lro->ifp = ha->ifp; 1466 } 1467 ha->flags.lro_init = 1; 1468 1469 QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__)); 1470 #endif 1471 return (0); 1472 } 1473 1474 static void 1475 qls_free_lro(qla_host_t *ha) 1476 { 1477 #if defined(INET) || defined(INET6) 1478 int i; 1479 struct lro_ctrl *lro; 1480 1481 if (!ha->flags.lro_init) 1482 return; 1483 1484 for (i = 0; i < ha->num_rx_rings; i++) { 1485 lro = &ha->rx_ring[i].lro; 1486 tcp_lro_free(lro); 1487 } 1488 ha->flags.lro_init = 0; 1489 #endif 1490 } 1491 1492 static void 1493 qls_error_recovery(void *context, int pending) 1494 { 1495 qla_host_t *ha = context; 1496 1497 qls_init(ha); 1498 1499 return; 1500 } 1501