1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013-2014 Qlogic Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * File: qls_os.c 32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 33 */ 34 35 #include <sys/cdefs.h> 36 #include "qls_os.h" 37 #include "qls_hw.h" 38 #include "qls_def.h" 39 #include "qls_inline.h" 40 #include "qls_ver.h" 41 #include "qls_glbl.h" 42 #include "qls_dbg.h" 43 #include <sys/smp.h> 44 45 /* 46 * Some PCI Configuration Space Related Defines 47 */ 48 49 #ifndef PCI_VENDOR_QLOGIC 50 #define PCI_VENDOR_QLOGIC 0x1077 51 #endif 52 53 #ifndef PCI_DEVICE_QLOGIC_8000 54 #define PCI_DEVICE_QLOGIC_8000 0x8000 55 #endif 56 57 #define PCI_QLOGIC_DEV8000 \ 58 ((PCI_DEVICE_QLOGIC_8000 << 16) | PCI_VENDOR_QLOGIC) 59 60 /* 61 * static functions 62 */ 63 static int qls_alloc_parent_dma_tag(qla_host_t *ha); 64 static void qls_free_parent_dma_tag(qla_host_t *ha); 65 66 static void qls_flush_xmt_bufs(qla_host_t *ha); 67 68 static int qls_alloc_rcv_bufs(qla_host_t *ha); 69 static void qls_free_rcv_bufs(qla_host_t *ha); 70 71 static void qls_init_ifnet(device_t dev, qla_host_t *ha); 72 static void qls_release(qla_host_t *ha); 73 static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, 74 int error); 75 static void qls_stop(qla_host_t *ha); 76 static int qls_send(qla_host_t *ha, struct mbuf **m_headp); 77 static void qls_tx_done(void *context, int pending); 78 79 static int qls_config_lro(qla_host_t *ha); 80 static void qls_free_lro(qla_host_t *ha); 81 82 static void qls_error_recovery(void *context, int pending); 83 84 /* 85 * Hooks to the Operating Systems 86 */ 87 static int qls_pci_probe (device_t); 88 static int qls_pci_attach (device_t); 89 static int qls_pci_detach (device_t); 90 91 static void qls_start(if_t ifp); 92 static void qls_init(void *arg); 93 static int qls_ioctl(if_t ifp, u_long cmd, caddr_t data); 94 static int qls_media_change(if_t ifp); 95 static void qls_media_status(if_t ifp, struct ifmediareq *ifmr); 96 97 static device_method_t qla_pci_methods[] = { 98 /* Device interface */ 99 DEVMETHOD(device_probe, qls_pci_probe), 100 DEVMETHOD(device_attach, qls_pci_attach), 101 DEVMETHOD(device_detach, qls_pci_detach), 102 { 0, 0 } 103 }; 104 105 static driver_t qla_pci_driver = { 106 "ql", qla_pci_methods, sizeof (qla_host_t), 107 }; 108 109 DRIVER_MODULE(qla8000, pci, qla_pci_driver, 0, 0); 110 111 MODULE_DEPEND(qla8000, pci, 1, 1, 1); 112 MODULE_DEPEND(qla8000, ether, 1, 1, 1); 113 114 MALLOC_DEFINE(M_QLA8000BUF, "qla8000buf", "Buffers for qla8000 driver"); 115 116 static char dev_str[64]; 117 static char ver_str[64]; 118 119 /* 120 * Name: qls_pci_probe 121 * Function: Validate the PCI device to be a QLA80XX device 122 */ 123 static int 124 qls_pci_probe(device_t dev) 125 { 126 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 127 case PCI_QLOGIC_DEV8000: 128 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", 129 "Qlogic ISP 8000 PCI CNA Adapter-Ethernet Function", 130 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 131 QLA_VERSION_BUILD); 132 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d", 133 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 134 QLA_VERSION_BUILD); 135 device_set_desc(dev, dev_str); 136 break; 137 default: 138 return (ENXIO); 139 } 140 141 if (bootverbose) 142 printf("%s: %s\n ", __func__, dev_str); 143 144 return (BUS_PROBE_DEFAULT); 145 } 146 147 static int 148 qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS) 149 { 150 int err = 0, ret; 151 qla_host_t *ha; 152 uint32_t i; 153 154 err = sysctl_handle_int(oidp, &ret, 0, req); 155 156 if (err || !req->newptr) 157 return (err); 158 159 if (ret == 1) { 160 ha = (qla_host_t *)arg1; 161 162 for (i = 0; i < ha->num_tx_rings; i++) { 163 device_printf(ha->pci_dev, 164 "%s: tx_ring[%d].tx_frames= %p\n", 165 __func__, i, 166 (void *)ha->tx_ring[i].tx_frames); 167 168 device_printf(ha->pci_dev, 169 "%s: tx_ring[%d].tx_tso_frames= %p\n", 170 __func__, i, 171 (void *)ha->tx_ring[i].tx_tso_frames); 172 173 device_printf(ha->pci_dev, 174 "%s: tx_ring[%d].tx_vlan_frames= %p\n", 175 __func__, i, 176 (void *)ha->tx_ring[i].tx_vlan_frames); 177 178 device_printf(ha->pci_dev, 179 "%s: tx_ring[%d].txr_free= 0x%08x\n", 180 __func__, i, 181 ha->tx_ring[i].txr_free); 182 183 device_printf(ha->pci_dev, 184 "%s: tx_ring[%d].txr_next= 0x%08x\n", 185 __func__, i, 186 ha->tx_ring[i].txr_next); 187 188 device_printf(ha->pci_dev, 189 "%s: tx_ring[%d].txr_done= 0x%08x\n", 190 __func__, i, 191 ha->tx_ring[i].txr_done); 192 193 device_printf(ha->pci_dev, 194 "%s: tx_ring[%d].txr_cons_idx= 0x%08x\n", 195 __func__, i, 196 *(ha->tx_ring[i].txr_cons_vaddr)); 197 } 198 199 for (i = 0; i < ha->num_rx_rings; i++) { 200 device_printf(ha->pci_dev, 201 "%s: rx_ring[%d].rx_int= %p\n", 202 __func__, i, 203 (void *)ha->rx_ring[i].rx_int); 204 205 device_printf(ha->pci_dev, 206 "%s: rx_ring[%d].rss_int= %p\n", 207 __func__, i, 208 (void *)ha->rx_ring[i].rss_int); 209 210 device_printf(ha->pci_dev, 211 "%s: rx_ring[%d].lbq_next= 0x%08x\n", 212 __func__, i, 213 ha->rx_ring[i].lbq_next); 214 215 device_printf(ha->pci_dev, 216 "%s: rx_ring[%d].lbq_free= 0x%08x\n", 217 __func__, i, 218 ha->rx_ring[i].lbq_free); 219 220 device_printf(ha->pci_dev, 221 "%s: rx_ring[%d].lbq_in= 0x%08x\n", 222 __func__, i, 223 ha->rx_ring[i].lbq_in); 224 225 device_printf(ha->pci_dev, 226 "%s: rx_ring[%d].sbq_next= 0x%08x\n", 227 __func__, i, 228 ha->rx_ring[i].sbq_next); 229 230 device_printf(ha->pci_dev, 231 "%s: rx_ring[%d].sbq_free= 0x%08x\n", 232 __func__, i, 233 ha->rx_ring[i].sbq_free); 234 235 device_printf(ha->pci_dev, 236 "%s: rx_ring[%d].sbq_in= 0x%08x\n", 237 __func__, i, 238 ha->rx_ring[i].sbq_in); 239 } 240 241 device_printf(ha->pci_dev, "%s: err_m_getcl = 0x%08x\n", 242 __func__, ha->err_m_getcl); 243 device_printf(ha->pci_dev, "%s: err_m_getjcl = 0x%08x\n", 244 __func__, ha->err_m_getjcl); 245 device_printf(ha->pci_dev, 246 "%s: err_tx_dmamap_create = 0x%08x\n", 247 __func__, ha->err_tx_dmamap_create); 248 device_printf(ha->pci_dev, 249 "%s: err_tx_dmamap_load = 0x%08x\n", 250 __func__, ha->err_tx_dmamap_load); 251 device_printf(ha->pci_dev, 252 "%s: err_tx_defrag = 0x%08x\n", 253 __func__, ha->err_tx_defrag); 254 } 255 return (err); 256 } 257 258 static void 259 qls_add_sysctls(qla_host_t *ha) 260 { 261 device_t dev = ha->pci_dev; 262 263 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 264 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 265 OID_AUTO, "version", CTLFLAG_RD, 266 ver_str, 0, "Driver Version"); 267 268 qls_dbg_level = 0; 269 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 270 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 271 OID_AUTO, "debug", CTLFLAG_RW, 272 &qls_dbg_level, qls_dbg_level, "Debug Level"); 273 274 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 275 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 276 OID_AUTO, "drvr_stats", 277 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, 278 qls_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics"); 279 280 return; 281 } 282 283 static void 284 qls_watchdog(void *arg) 285 { 286 qla_host_t *ha = arg; 287 if_t ifp; 288 289 ifp = ha->ifp; 290 291 if (ha->flags.qla_watchdog_exit) { 292 ha->qla_watchdog_exited = 1; 293 return; 294 } 295 ha->qla_watchdog_exited = 0; 296 297 if (!ha->flags.qla_watchdog_pause) { 298 if (ha->qla_initiate_recovery) { 299 ha->qla_watchdog_paused = 1; 300 ha->qla_initiate_recovery = 0; 301 ha->err_inject = 0; 302 taskqueue_enqueue(ha->err_tq, &ha->err_task); 303 304 } else if (!if_sendq_empty(ifp) && QL_RUNNING(ifp)) { 305 taskqueue_enqueue(ha->tx_tq, &ha->tx_task); 306 } 307 308 ha->qla_watchdog_paused = 0; 309 } else { 310 ha->qla_watchdog_paused = 1; 311 } 312 313 ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000; 314 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 315 qls_watchdog, ha); 316 317 return; 318 } 319 320 /* 321 * Name: qls_pci_attach 322 * Function: attaches the device to the operating system 323 */ 324 static int 325 qls_pci_attach(device_t dev) 326 { 327 qla_host_t *ha = NULL; 328 int i; 329 330 QL_DPRINT2((dev, "%s: enter\n", __func__)); 331 332 if ((ha = device_get_softc(dev)) == NULL) { 333 device_printf(dev, "cannot get softc\n"); 334 return (ENOMEM); 335 } 336 337 memset(ha, 0, sizeof (qla_host_t)); 338 339 if (pci_get_device(dev) != PCI_DEVICE_QLOGIC_8000) { 340 device_printf(dev, "device is not QLE8000\n"); 341 return (ENXIO); 342 } 343 344 ha->pci_func = pci_get_function(dev); 345 346 ha->pci_dev = dev; 347 348 pci_enable_busmaster(dev); 349 350 ha->reg_rid = PCIR_BAR(1); 351 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 352 RF_ACTIVE); 353 354 if (ha->pci_reg == NULL) { 355 device_printf(dev, "unable to map any ports\n"); 356 goto qls_pci_attach_err; 357 } 358 359 ha->reg_rid1 = PCIR_BAR(3); 360 ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 361 &ha->reg_rid1, RF_ACTIVE); 362 363 if (ha->pci_reg1 == NULL) { 364 device_printf(dev, "unable to map any ports\n"); 365 goto qls_pci_attach_err; 366 } 367 368 mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 369 mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF); 370 371 qls_add_sysctls(ha); 372 qls_hw_add_sysctls(ha); 373 374 ha->flags.lock_init = 1; 375 376 ha->msix_count = pci_msix_count(dev); 377 378 if (ha->msix_count < qls_get_msix_count(ha)) { 379 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 380 ha->msix_count); 381 goto qls_pci_attach_err; 382 } 383 384 ha->msix_count = qls_get_msix_count(ha); 385 386 device_printf(dev, "\n%s: ha %p pci_func 0x%x msix_count 0x%x" 387 " pci_reg %p pci_reg1 %p\n", __func__, ha, 388 ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1); 389 390 if (pci_alloc_msix(dev, &ha->msix_count)) { 391 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, 392 ha->msix_count); 393 ha->msix_count = 0; 394 goto qls_pci_attach_err; 395 } 396 397 for (i = 0; i < ha->num_rx_rings; i++) { 398 ha->irq_vec[i].cq_idx = i; 399 ha->irq_vec[i].ha = ha; 400 ha->irq_vec[i].irq_rid = 1 + i; 401 402 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 403 &ha->irq_vec[i].irq_rid, 404 (RF_ACTIVE | RF_SHAREABLE)); 405 406 if (ha->irq_vec[i].irq == NULL) { 407 device_printf(dev, "could not allocate interrupt\n"); 408 goto qls_pci_attach_err; 409 } 410 411 if (bus_setup_intr(dev, ha->irq_vec[i].irq, 412 (INTR_TYPE_NET | INTR_MPSAFE), NULL, qls_isr, 413 &ha->irq_vec[i], &ha->irq_vec[i].handle)) { 414 device_printf(dev, 415 "could not setup interrupt\n"); 416 goto qls_pci_attach_err; 417 } 418 } 419 420 qls_rd_nic_params(ha); 421 422 /* allocate parent dma tag */ 423 if (qls_alloc_parent_dma_tag(ha)) { 424 device_printf(dev, "%s: qls_alloc_parent_dma_tag failed\n", 425 __func__); 426 goto qls_pci_attach_err; 427 } 428 429 /* alloc all dma buffers */ 430 if (qls_alloc_dma(ha)) { 431 device_printf(dev, "%s: qls_alloc_dma failed\n", __func__); 432 goto qls_pci_attach_err; 433 } 434 435 /* create the o.s ethernet interface */ 436 qls_init_ifnet(dev, ha); 437 438 ha->flags.qla_watchdog_active = 1; 439 ha->flags.qla_watchdog_pause = 1; 440 441 TASK_INIT(&ha->tx_task, 0, qls_tx_done, ha); 442 ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT, 443 taskqueue_thread_enqueue, &ha->tx_tq); 444 taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq", 445 device_get_nameunit(ha->pci_dev)); 446 447 callout_init(&ha->tx_callout, 1); 448 ha->flags.qla_callout_init = 1; 449 450 /* create ioctl device interface */ 451 if (qls_make_cdev(ha)) { 452 device_printf(dev, "%s: qls_make_cdev failed\n", __func__); 453 goto qls_pci_attach_err; 454 } 455 456 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 457 qls_watchdog, ha); 458 459 TASK_INIT(&ha->err_task, 0, qls_error_recovery, ha); 460 ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT, 461 taskqueue_thread_enqueue, &ha->err_tq); 462 taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", 463 device_get_nameunit(ha->pci_dev)); 464 465 QL_DPRINT2((dev, "%s: exit 0\n", __func__)); 466 return (0); 467 468 qls_pci_attach_err: 469 470 qls_release(ha); 471 472 QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__)); 473 return (ENXIO); 474 } 475 476 /* 477 * Name: qls_pci_detach 478 * Function: Unhooks the device from the operating system 479 */ 480 static int 481 qls_pci_detach(device_t dev) 482 { 483 qla_host_t *ha = NULL; 484 485 QL_DPRINT2((dev, "%s: enter\n", __func__)); 486 487 if ((ha = device_get_softc(dev)) == NULL) { 488 device_printf(dev, "cannot get softc\n"); 489 return (ENOMEM); 490 } 491 492 (void)QLA_LOCK(ha, __func__, 0); 493 qls_stop(ha); 494 QLA_UNLOCK(ha, __func__); 495 496 qls_release(ha); 497 498 QL_DPRINT2((dev, "%s: exit\n", __func__)); 499 500 return (0); 501 } 502 503 /* 504 * Name: qls_release 505 * Function: Releases the resources allocated for the device 506 */ 507 static void 508 qls_release(qla_host_t *ha) 509 { 510 device_t dev; 511 int i; 512 513 dev = ha->pci_dev; 514 515 if (ha->err_tq) { 516 taskqueue_drain(ha->err_tq, &ha->err_task); 517 taskqueue_free(ha->err_tq); 518 } 519 520 if (ha->tx_tq) { 521 taskqueue_drain(ha->tx_tq, &ha->tx_task); 522 taskqueue_free(ha->tx_tq); 523 } 524 525 qls_del_cdev(ha); 526 527 if (ha->flags.qla_watchdog_active) { 528 ha->flags.qla_watchdog_exit = 1; 529 530 while (ha->qla_watchdog_exited == 0) 531 qls_mdelay(__func__, 1); 532 } 533 534 if (ha->flags.qla_callout_init) 535 callout_stop(&ha->tx_callout); 536 537 if (ha->ifp != NULL) 538 ether_ifdetach(ha->ifp); 539 540 qls_free_dma(ha); 541 qls_free_parent_dma_tag(ha); 542 543 for (i = 0; i < ha->num_rx_rings; i++) { 544 if (ha->irq_vec[i].handle) { 545 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 546 ha->irq_vec[i].handle); 547 } 548 549 if (ha->irq_vec[i].irq) { 550 (void)bus_release_resource(dev, SYS_RES_IRQ, 551 ha->irq_vec[i].irq_rid, 552 ha->irq_vec[i].irq); 553 } 554 } 555 556 if (ha->msix_count) 557 pci_release_msi(dev); 558 559 if (ha->flags.lock_init) { 560 mtx_destroy(&ha->tx_lock); 561 mtx_destroy(&ha->hw_lock); 562 } 563 564 if (ha->pci_reg) 565 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 566 ha->pci_reg); 567 568 if (ha->pci_reg1) 569 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, 570 ha->pci_reg1); 571 } 572 573 /* 574 * DMA Related Functions 575 */ 576 577 static void 578 qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 579 { 580 *((bus_addr_t *)arg) = 0; 581 582 if (error) { 583 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 584 return; 585 } 586 587 *((bus_addr_t *)arg) = segs[0].ds_addr; 588 589 return; 590 } 591 592 int 593 qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 594 { 595 int ret = 0; 596 device_t dev; 597 bus_addr_t b_addr; 598 599 dev = ha->pci_dev; 600 601 QL_DPRINT2((dev, "%s: enter\n", __func__)); 602 603 ret = bus_dma_tag_create( 604 ha->parent_tag,/* parent */ 605 dma_buf->alignment, 606 ((bus_size_t)(1ULL << 32)),/* boundary */ 607 BUS_SPACE_MAXADDR, /* lowaddr */ 608 BUS_SPACE_MAXADDR, /* highaddr */ 609 NULL, NULL, /* filter, filterarg */ 610 dma_buf->size, /* maxsize */ 611 1, /* nsegments */ 612 dma_buf->size, /* maxsegsize */ 613 0, /* flags */ 614 NULL, NULL, /* lockfunc, lockarg */ 615 &dma_buf->dma_tag); 616 617 if (ret) { 618 device_printf(dev, "%s: could not create dma tag\n", __func__); 619 goto qls_alloc_dmabuf_exit; 620 } 621 ret = bus_dmamem_alloc(dma_buf->dma_tag, 622 (void **)&dma_buf->dma_b, 623 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 624 &dma_buf->dma_map); 625 if (ret) { 626 bus_dma_tag_destroy(dma_buf->dma_tag); 627 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); 628 goto qls_alloc_dmabuf_exit; 629 } 630 631 ret = bus_dmamap_load(dma_buf->dma_tag, 632 dma_buf->dma_map, 633 dma_buf->dma_b, 634 dma_buf->size, 635 qls_dmamap_callback, 636 &b_addr, BUS_DMA_NOWAIT); 637 638 if (ret || !b_addr) { 639 bus_dma_tag_destroy(dma_buf->dma_tag); 640 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 641 dma_buf->dma_map); 642 ret = -1; 643 goto qls_alloc_dmabuf_exit; 644 } 645 646 dma_buf->dma_addr = b_addr; 647 648 qls_alloc_dmabuf_exit: 649 QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", 650 __func__, ret, (void *)dma_buf->dma_tag, 651 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, 652 dma_buf->size)); 653 654 return ret; 655 } 656 657 void 658 qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 659 { 660 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 661 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 662 bus_dma_tag_destroy(dma_buf->dma_tag); 663 } 664 665 static int 666 qls_alloc_parent_dma_tag(qla_host_t *ha) 667 { 668 int ret; 669 device_t dev; 670 671 dev = ha->pci_dev; 672 673 /* 674 * Allocate parent DMA Tag 675 */ 676 ret = bus_dma_tag_create( 677 bus_get_dma_tag(dev), /* parent */ 678 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 679 BUS_SPACE_MAXADDR, /* lowaddr */ 680 BUS_SPACE_MAXADDR, /* highaddr */ 681 NULL, NULL, /* filter, filterarg */ 682 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 683 0, /* nsegments */ 684 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 685 0, /* flags */ 686 NULL, NULL, /* lockfunc, lockarg */ 687 &ha->parent_tag); 688 689 if (ret) { 690 device_printf(dev, "%s: could not create parent dma tag\n", 691 __func__); 692 return (-1); 693 } 694 695 ha->flags.parent_tag = 1; 696 697 return (0); 698 } 699 700 static void 701 qls_free_parent_dma_tag(qla_host_t *ha) 702 { 703 if (ha->flags.parent_tag) { 704 bus_dma_tag_destroy(ha->parent_tag); 705 ha->flags.parent_tag = 0; 706 } 707 } 708 709 /* 710 * Name: qls_init_ifnet 711 * Function: Creates the Network Device Interface and Registers it with the O.S 712 */ 713 714 static void 715 qls_init_ifnet(device_t dev, qla_host_t *ha) 716 { 717 if_t ifp; 718 719 QL_DPRINT2((dev, "%s: enter\n", __func__)); 720 721 ifp = ha->ifp = if_alloc(IFT_ETHER); 722 723 if (ifp == NULL) 724 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 725 726 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 727 if_setbaudrate(ifp, IF_Gbps(10)); 728 if_setinitfn(ifp, qls_init); 729 if_setsoftc(ifp, ha); 730 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 731 if_setioctlfn(ifp, qls_ioctl); 732 if_setstartfn(ifp, qls_start); 733 734 if_setsendqlen(ifp, qls_get_ifq_snd_maxlen(ha)); 735 if_setsendqready(ifp); 736 737 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 738 if (ha->max_frame_size <= MCLBYTES) { 739 ha->msize = MCLBYTES; 740 } else if (ha->max_frame_size <= MJUMPAGESIZE) { 741 ha->msize = MJUMPAGESIZE; 742 } else 743 ha->msize = MJUM9BYTES; 744 745 ether_ifattach(ifp, qls_get_mac_addr(ha)); 746 747 if_setcapabilities(ifp, IFCAP_JUMBO_MTU); 748 749 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0); 750 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 751 752 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0); 753 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); 754 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0); 755 if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0); 756 757 if_setcapenable(ifp, if_getcapabilities(ifp)); 758 759 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 760 761 ifmedia_init(&ha->media, IFM_IMASK, qls_media_change, qls_media_status); 762 763 ifmedia_add(&ha->media, (IFM_ETHER | qls_get_optics(ha) | IFM_FDX), 0, 764 NULL); 765 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 766 767 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 768 769 QL_DPRINT2((dev, "%s: exit\n", __func__)); 770 771 return; 772 } 773 774 static void 775 qls_init_locked(qla_host_t *ha) 776 { 777 if_t ifp = ha->ifp; 778 779 qls_stop(ha); 780 781 qls_flush_xmt_bufs(ha); 782 783 if (qls_alloc_rcv_bufs(ha) != 0) 784 return; 785 786 if (qls_config_lro(ha)) 787 return; 788 789 bcopy(if_getlladdr(ha->ifp), ha->mac_addr, ETHER_ADDR_LEN); 790 791 if_sethwassist(ifp, CSUM_IP); 792 if_sethwassistbits(ifp, CSUM_TCP, 0); 793 if_sethwassistbits(ifp, CSUM_UDP, 0); 794 if_sethwassistbits(ifp, CSUM_TSO, 0); 795 796 if (qls_init_hw_if(ha) == 0) { 797 ifp = ha->ifp; 798 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 799 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 800 ha->flags.qla_watchdog_pause = 0; 801 } 802 803 return; 804 } 805 806 static void 807 qls_init(void *arg) 808 { 809 qla_host_t *ha; 810 811 ha = (qla_host_t *)arg; 812 813 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 814 815 (void)QLA_LOCK(ha, __func__, 0); 816 qls_init_locked(ha); 817 QLA_UNLOCK(ha, __func__); 818 819 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 820 } 821 822 static u_int 823 qls_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 824 { 825 uint8_t *mta = arg; 826 827 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) 828 return (0); 829 830 bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); 831 832 return (1); 833 } 834 835 static void 836 qls_set_multi(qla_host_t *ha, uint32_t add_multi) 837 { 838 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; 839 if_t ifp = ha->ifp; 840 int mcnt; 841 842 mcnt = if_foreach_llmaddr(ifp, qls_copy_maddr, mta); 843 844 if (QLA_LOCK(ha, __func__, 1) == 0) { 845 qls_hw_set_multi(ha, mta, mcnt, add_multi); 846 QLA_UNLOCK(ha, __func__); 847 } 848 849 return; 850 } 851 852 static int 853 qls_ioctl(if_t ifp, u_long cmd, caddr_t data) 854 { 855 int ret = 0; 856 struct ifreq *ifr = (struct ifreq *)data; 857 #ifdef INET 858 struct ifaddr *ifa = (struct ifaddr *)data; 859 #endif 860 qla_host_t *ha; 861 862 ha = (qla_host_t *)if_getsoftc(ifp); 863 864 switch (cmd) { 865 case SIOCSIFADDR: 866 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 867 __func__, cmd)); 868 869 #ifdef INET 870 if (ifa->ifa_addr->sa_family == AF_INET) { 871 if_setflagbits(ifp, IFF_UP, 0); 872 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 873 (void)QLA_LOCK(ha, __func__, 0); 874 qls_init_locked(ha); 875 QLA_UNLOCK(ha, __func__); 876 } 877 QL_DPRINT4((ha->pci_dev, 878 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 879 __func__, cmd, 880 ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 881 882 arp_ifinit(ifp, ifa); 883 break; 884 } 885 #endif 886 ether_ioctl(ifp, cmd, data); 887 break; 888 889 case SIOCSIFMTU: 890 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 891 __func__, cmd)); 892 893 if (ifr->ifr_mtu > QLA_MAX_MTU) { 894 ret = EINVAL; 895 } else { 896 (void) QLA_LOCK(ha, __func__, 0); 897 898 if_setmtu(ifp, ifr->ifr_mtu); 899 ha->max_frame_size = 900 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 901 902 QLA_UNLOCK(ha, __func__); 903 904 if (ret) 905 ret = EINVAL; 906 } 907 908 break; 909 910 case SIOCSIFFLAGS: 911 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 912 __func__, cmd)); 913 914 (void)QLA_LOCK(ha, __func__, 0); 915 916 if (if_getflags(ifp) & IFF_UP) { 917 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 918 if ((if_getflags(ifp) ^ ha->if_flags) & 919 IFF_PROMISC) { 920 ret = qls_set_promisc(ha); 921 } else if ((if_getflags(ifp) ^ ha->if_flags) & 922 IFF_ALLMULTI) { 923 ret = qls_set_allmulti(ha); 924 } 925 } else { 926 ha->max_frame_size = if_getmtu(ifp) + 927 ETHER_HDR_LEN + ETHER_CRC_LEN; 928 qls_init_locked(ha); 929 } 930 } else { 931 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 932 qls_stop(ha); 933 ha->if_flags = if_getflags(ifp); 934 } 935 936 QLA_UNLOCK(ha, __func__); 937 break; 938 939 case SIOCADDMULTI: 940 QL_DPRINT4((ha->pci_dev, 941 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 942 943 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 944 qls_set_multi(ha, 1); 945 } 946 break; 947 948 case SIOCDELMULTI: 949 QL_DPRINT4((ha->pci_dev, 950 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 951 952 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 953 qls_set_multi(ha, 0); 954 } 955 break; 956 957 case SIOCSIFMEDIA: 958 case SIOCGIFMEDIA: 959 QL_DPRINT4((ha->pci_dev, 960 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 961 __func__, cmd)); 962 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 963 break; 964 965 case SIOCSIFCAP: 966 { 967 int mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 968 969 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 970 __func__, cmd)); 971 972 if (mask & IFCAP_HWCSUM) 973 if_togglecapenable(ifp, IFCAP_HWCSUM); 974 if (mask & IFCAP_TSO4) 975 if_togglecapenable(ifp, IFCAP_TSO4); 976 if (mask & IFCAP_VLAN_HWTAGGING) 977 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 978 if (mask & IFCAP_VLAN_HWTSO) 979 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 980 981 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 982 qls_init(ha); 983 984 VLAN_CAPABILITIES(ifp); 985 break; 986 } 987 988 default: 989 QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n", 990 __func__, cmd)); 991 ret = ether_ioctl(ifp, cmd, data); 992 break; 993 } 994 995 return (ret); 996 } 997 998 static int 999 qls_media_change(if_t ifp) 1000 { 1001 qla_host_t *ha; 1002 struct ifmedia *ifm; 1003 int ret = 0; 1004 1005 ha = (qla_host_t *)if_getsoftc(ifp); 1006 1007 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 1008 1009 ifm = &ha->media; 1010 1011 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1012 ret = EINVAL; 1013 1014 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 1015 1016 return (ret); 1017 } 1018 1019 static void 1020 qls_media_status(if_t ifp, struct ifmediareq *ifmr) 1021 { 1022 qla_host_t *ha; 1023 1024 ha = (qla_host_t *)if_getsoftc(ifp); 1025 1026 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 1027 1028 ifmr->ifm_status = IFM_AVALID; 1029 ifmr->ifm_active = IFM_ETHER; 1030 1031 qls_update_link_state(ha); 1032 if (ha->link_up) { 1033 ifmr->ifm_status |= IFM_ACTIVE; 1034 ifmr->ifm_active |= (IFM_FDX | qls_get_optics(ha)); 1035 } 1036 1037 QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\ 1038 (ha->link_up ? "link_up" : "link_down"))); 1039 1040 return; 1041 } 1042 1043 static void 1044 qls_start(if_t ifp) 1045 { 1046 int i, ret = 0; 1047 struct mbuf *m_head; 1048 qla_host_t *ha = (qla_host_t *)if_getsoftc(ifp); 1049 1050 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); 1051 1052 if (!mtx_trylock(&ha->tx_lock)) { 1053 QL_DPRINT8((ha->pci_dev, 1054 "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__)); 1055 return; 1056 } 1057 1058 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 1059 IFF_DRV_RUNNING) { 1060 for (i = 0; i < ha->num_tx_rings; i++) { 1061 ret |= qls_hw_tx_done(ha, i); 1062 } 1063 1064 if (ret == 0) 1065 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1066 } 1067 1068 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1069 IFF_DRV_RUNNING) { 1070 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); 1071 QLA_TX_UNLOCK(ha); 1072 return; 1073 } 1074 1075 if (!ha->link_up) { 1076 qls_update_link_state(ha); 1077 if (!ha->link_up) { 1078 QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__)); 1079 QLA_TX_UNLOCK(ha); 1080 return; 1081 } 1082 } 1083 1084 while (!if_sendq_empty(ifp)) { 1085 m_head = if_dequeue(ifp); 1086 1087 if (m_head == NULL) { 1088 QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n", 1089 __func__)); 1090 break; 1091 } 1092 1093 if (qls_send(ha, &m_head)) { 1094 if (m_head == NULL) 1095 break; 1096 QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__)); 1097 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1098 if_sendq_prepend(ifp, m_head); 1099 break; 1100 } 1101 /* Send a copy of the frame to the BPF listener */ 1102 ETHER_BPF_MTAP(ifp, m_head); 1103 } 1104 1105 QLA_TX_UNLOCK(ha); 1106 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); 1107 return; 1108 } 1109 1110 static int 1111 qls_send(qla_host_t *ha, struct mbuf **m_headp) 1112 { 1113 bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; 1114 bus_dmamap_t map; 1115 int nsegs; 1116 int ret = -1; 1117 uint32_t tx_idx; 1118 struct mbuf *m_head = *m_headp; 1119 uint32_t txr_idx = 0; 1120 1121 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); 1122 1123 /* check if flowid is set */ 1124 if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) 1125 txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1); 1126 1127 tx_idx = ha->tx_ring[txr_idx].txr_next; 1128 1129 map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; 1130 1131 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 1132 BUS_DMA_NOWAIT); 1133 1134 if (ret == EFBIG) { 1135 struct mbuf *m; 1136 1137 QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 1138 m_head->m_pkthdr.len)); 1139 1140 m = m_defrag(m_head, M_NOWAIT); 1141 if (m == NULL) { 1142 ha->err_tx_defrag++; 1143 m_freem(m_head); 1144 *m_headp = NULL; 1145 device_printf(ha->pci_dev, 1146 "%s: m_defrag() = NULL [%d]\n", 1147 __func__, ret); 1148 return (ENOBUFS); 1149 } 1150 m_head = m; 1151 *m_headp = m_head; 1152 1153 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 1154 segs, &nsegs, BUS_DMA_NOWAIT))) { 1155 ha->err_tx_dmamap_load++; 1156 1157 device_printf(ha->pci_dev, 1158 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 1159 __func__, ret, m_head->m_pkthdr.len); 1160 1161 if (ret != ENOMEM) { 1162 m_freem(m_head); 1163 *m_headp = NULL; 1164 } 1165 return (ret); 1166 } 1167 1168 } else if (ret) { 1169 ha->err_tx_dmamap_load++; 1170 1171 device_printf(ha->pci_dev, 1172 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 1173 __func__, ret, m_head->m_pkthdr.len); 1174 1175 if (ret != ENOMEM) { 1176 m_freem(m_head); 1177 *m_headp = NULL; 1178 } 1179 return (ret); 1180 } 1181 1182 QL_ASSERT(ha, (nsegs != 0), ("qls_send: empty packet")); 1183 1184 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 1185 1186 if (!(ret = qls_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) { 1187 ha->tx_ring[txr_idx].count++; 1188 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; 1189 ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map; 1190 } else { 1191 if (ret == EINVAL) { 1192 if (m_head) 1193 m_freem(m_head); 1194 *m_headp = NULL; 1195 } 1196 } 1197 1198 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); 1199 return (ret); 1200 } 1201 1202 static void 1203 qls_stop(qla_host_t *ha) 1204 { 1205 if_t ifp = ha->ifp; 1206 1207 if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)); 1208 1209 ha->flags.qla_watchdog_pause = 1; 1210 1211 while (!ha->qla_watchdog_paused) 1212 qls_mdelay(__func__, 1); 1213 1214 qls_del_hw_if(ha); 1215 1216 qls_free_lro(ha); 1217 1218 qls_flush_xmt_bufs(ha); 1219 qls_free_rcv_bufs(ha); 1220 1221 return; 1222 } 1223 1224 /* 1225 * Buffer Management Functions for Transmit and Receive Rings 1226 */ 1227 /* 1228 * Release mbuf after it sent on the wire 1229 */ 1230 static void 1231 qls_flush_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) 1232 { 1233 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 1234 1235 if (txb->m_head) { 1236 bus_dmamap_unload(ha->tx_tag, txb->map); 1237 1238 m_freem(txb->m_head); 1239 txb->m_head = NULL; 1240 } 1241 1242 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 1243 } 1244 1245 static void 1246 qls_flush_xmt_bufs(qla_host_t *ha) 1247 { 1248 int i, j; 1249 1250 for (j = 0; j < ha->num_tx_rings; j++) { 1251 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) 1252 qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); 1253 } 1254 1255 return; 1256 } 1257 1258 static int 1259 qls_alloc_rcv_mbufs(qla_host_t *ha, int r) 1260 { 1261 int i, j, ret = 0; 1262 qla_rx_buf_t *rxb; 1263 qla_rx_ring_t *rx_ring; 1264 volatile q81_bq_addr_e_t *sbq_e; 1265 1266 rx_ring = &ha->rx_ring[r]; 1267 1268 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1269 rxb = &rx_ring->rx_buf[i]; 1270 1271 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); 1272 1273 if (ret) { 1274 device_printf(ha->pci_dev, 1275 "%s: dmamap[%d, %d] failed\n", __func__, r, i); 1276 1277 for (j = 0; j < i; j++) { 1278 rxb = &rx_ring->rx_buf[j]; 1279 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1280 } 1281 goto qls_alloc_rcv_mbufs_err; 1282 } 1283 } 1284 1285 rx_ring = &ha->rx_ring[r]; 1286 1287 sbq_e = rx_ring->sbq_vaddr; 1288 1289 rxb = &rx_ring->rx_buf[0]; 1290 1291 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1292 if (!(ret = qls_get_mbuf(ha, rxb, NULL))) { 1293 /* 1294 * set the physical address in the 1295 * corresponding descriptor entry in the 1296 * receive ring/queue for the hba 1297 */ 1298 1299 sbq_e->addr_lo = rxb->paddr & 0xFFFFFFFF; 1300 sbq_e->addr_hi = (rxb->paddr >> 32) & 0xFFFFFFFF; 1301 1302 } else { 1303 device_printf(ha->pci_dev, 1304 "%s: qls_get_mbuf [%d, %d] failed\n", 1305 __func__, r, i); 1306 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1307 goto qls_alloc_rcv_mbufs_err; 1308 } 1309 1310 rxb++; 1311 sbq_e++; 1312 } 1313 return 0; 1314 1315 qls_alloc_rcv_mbufs_err: 1316 return (-1); 1317 } 1318 1319 static void 1320 qls_free_rcv_bufs(qla_host_t *ha) 1321 { 1322 int i, r; 1323 qla_rx_buf_t *rxb; 1324 qla_rx_ring_t *rxr; 1325 1326 for (r = 0; r < ha->num_rx_rings; r++) { 1327 rxr = &ha->rx_ring[r]; 1328 1329 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1330 rxb = &rxr->rx_buf[i]; 1331 1332 if (rxb->m_head != NULL) { 1333 bus_dmamap_unload(ha->rx_tag, rxb->map); 1334 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1335 m_freem(rxb->m_head); 1336 } 1337 } 1338 bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); 1339 } 1340 return; 1341 } 1342 1343 static int 1344 qls_alloc_rcv_bufs(qla_host_t *ha) 1345 { 1346 int r, ret = 0; 1347 qla_rx_ring_t *rxr; 1348 1349 for (r = 0; r < ha->num_rx_rings; r++) { 1350 rxr = &ha->rx_ring[r]; 1351 bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); 1352 } 1353 1354 for (r = 0; r < ha->num_rx_rings; r++) { 1355 ret = qls_alloc_rcv_mbufs(ha, r); 1356 1357 if (ret) 1358 qls_free_rcv_bufs(ha); 1359 } 1360 1361 return (ret); 1362 } 1363 1364 int 1365 qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) 1366 { 1367 struct mbuf *mp = nmp; 1368 int ret = 0; 1369 uint32_t offset; 1370 bus_dma_segment_t segs[1]; 1371 int nsegs; 1372 1373 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 1374 1375 if (mp == NULL) { 1376 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ha->msize); 1377 1378 if (mp == NULL) { 1379 if (ha->msize == MCLBYTES) 1380 ha->err_m_getcl++; 1381 else 1382 ha->err_m_getjcl++; 1383 1384 ret = ENOBUFS; 1385 device_printf(ha->pci_dev, 1386 "%s: m_getcl failed\n", __func__); 1387 goto exit_qls_get_mbuf; 1388 } 1389 mp->m_len = mp->m_pkthdr.len = ha->msize; 1390 } else { 1391 mp->m_len = mp->m_pkthdr.len = ha->msize; 1392 mp->m_data = mp->m_ext.ext_buf; 1393 mp->m_next = NULL; 1394 } 1395 1396 /* align the receive buffers to 8 byte boundary */ 1397 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); 1398 if (offset) { 1399 offset = 8 - offset; 1400 m_adj(mp, offset); 1401 } 1402 1403 /* 1404 * Using memory from the mbuf cluster pool, invoke the bus_dma 1405 * machinery to arrange the memory mapping. 1406 */ 1407 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, 1408 mp, segs, &nsegs, BUS_DMA_NOWAIT); 1409 rxb->paddr = segs[0].ds_addr; 1410 1411 if (ret || !rxb->paddr || (nsegs != 1)) { 1412 m_freem(mp); 1413 rxb->m_head = NULL; 1414 device_printf(ha->pci_dev, 1415 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 1416 __func__, ret, (long long unsigned int)rxb->paddr, 1417 nsegs); 1418 ret = -1; 1419 goto exit_qls_get_mbuf; 1420 } 1421 rxb->m_head = mp; 1422 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); 1423 1424 exit_qls_get_mbuf: 1425 QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); 1426 return (ret); 1427 } 1428 1429 static void 1430 qls_tx_done(void *context, int pending) 1431 { 1432 qla_host_t *ha = context; 1433 if_t ifp; 1434 1435 ifp = ha->ifp; 1436 1437 if (!ifp) 1438 return; 1439 1440 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 1441 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); 1442 return; 1443 } 1444 1445 qls_start(ha->ifp); 1446 return; 1447 } 1448 1449 static int 1450 qls_config_lro(qla_host_t *ha) 1451 { 1452 #if defined(INET) || defined(INET6) 1453 int i; 1454 struct lro_ctrl *lro; 1455 1456 for (i = 0; i < ha->num_rx_rings; i++) { 1457 lro = &ha->rx_ring[i].lro; 1458 if (tcp_lro_init(lro)) { 1459 device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n", 1460 __func__); 1461 return (-1); 1462 } 1463 lro->ifp = ha->ifp; 1464 } 1465 ha->flags.lro_init = 1; 1466 1467 QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__)); 1468 #endif 1469 return (0); 1470 } 1471 1472 static void 1473 qls_free_lro(qla_host_t *ha) 1474 { 1475 #if defined(INET) || defined(INET6) 1476 int i; 1477 struct lro_ctrl *lro; 1478 1479 if (!ha->flags.lro_init) 1480 return; 1481 1482 for (i = 0; i < ha->num_rx_rings; i++) { 1483 lro = &ha->rx_ring[i].lro; 1484 tcp_lro_free(lro); 1485 } 1486 ha->flags.lro_init = 0; 1487 #endif 1488 } 1489 1490 static void 1491 qls_error_recovery(void *context, int pending) 1492 { 1493 qla_host_t *ha = context; 1494 1495 qls_init(ha); 1496 1497 return; 1498 } 1499