1 /* 2 * Copyright (c) 2013-2016 Qlogic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File: ql_os.c 30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 37 #include "ql_os.h" 38 #include "ql_hw.h" 39 #include "ql_def.h" 40 #include "ql_inline.h" 41 #include "ql_ver.h" 42 #include "ql_glbl.h" 43 #include "ql_dbg.h" 44 #include <sys/smp.h> 45 46 /* 47 * Some PCI Configuration Space Related Defines 48 */ 49 50 #ifndef PCI_VENDOR_QLOGIC 51 #define PCI_VENDOR_QLOGIC 0x1077 52 #endif 53 54 #ifndef PCI_PRODUCT_QLOGIC_ISP8030 55 #define PCI_PRODUCT_QLOGIC_ISP8030 0x8030 56 #endif 57 58 #define PCI_QLOGIC_ISP8030 \ 59 ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC) 60 61 /* 62 * static functions 63 */ 64 static int qla_alloc_parent_dma_tag(qla_host_t *ha); 65 static void qla_free_parent_dma_tag(qla_host_t *ha); 66 static int qla_alloc_xmt_bufs(qla_host_t *ha); 67 static void qla_free_xmt_bufs(qla_host_t *ha); 68 static int qla_alloc_rcv_bufs(qla_host_t *ha); 69 static void qla_free_rcv_bufs(qla_host_t *ha); 70 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb); 71 72 static void qla_init_ifnet(device_t dev, qla_host_t *ha); 73 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS); 74 static void qla_release(qla_host_t *ha); 75 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, 76 int error); 77 static void qla_stop(qla_host_t *ha); 78 static void qla_get_peer(qla_host_t *ha); 79 static void qla_error_recovery(void *context, int pending); 80 static void qla_async_event(void *context, int pending); 81 static void qla_stats(void *context, int pending); 82 static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, 83 uint32_t iscsi_pdu); 84 85 /* 86 * Hooks to the Operating Systems 87 */ 88 static int qla_pci_probe (device_t); 89 static int qla_pci_attach (device_t); 90 static int qla_pci_detach (device_t); 91 92 static void qla_init(void *arg); 93 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 94 static int qla_media_change(struct ifnet *ifp); 95 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 96 97 static int qla_transmit(struct ifnet *ifp, struct mbuf *mp); 98 static void qla_qflush(struct ifnet *ifp); 99 static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); 100 static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); 101 static int qla_create_fp_taskqueues(qla_host_t *ha); 102 static void qla_destroy_fp_taskqueues(qla_host_t *ha); 103 static void qla_drain_fp_taskqueues(qla_host_t *ha); 104 105 static device_method_t qla_pci_methods[] = { 106 /* Device interface */ 107 DEVMETHOD(device_probe, qla_pci_probe), 108 DEVMETHOD(device_attach, qla_pci_attach), 109 DEVMETHOD(device_detach, qla_pci_detach), 110 { 0, 0 } 111 }; 112 113 static driver_t qla_pci_driver = { 114 "ql", qla_pci_methods, sizeof (qla_host_t), 115 }; 116 117 static devclass_t qla83xx_devclass; 118 119 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0); 120 121 MODULE_DEPEND(qla83xx, pci, 1, 1, 1); 122 MODULE_DEPEND(qla83xx, ether, 1, 1, 1); 123 124 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver"); 125 126 #define QL_STD_REPLENISH_THRES 0 127 #define QL_JUMBO_REPLENISH_THRES 32 128 129 130 static char dev_str[64]; 131 static char ver_str[64]; 132 133 /* 134 * Name: qla_pci_probe 135 * Function: Validate the PCI device to be a QLA80XX device 136 */ 137 static int 138 qla_pci_probe(device_t dev) 139 { 140 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 141 case PCI_QLOGIC_ISP8030: 142 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", 143 "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function", 144 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 145 QLA_VERSION_BUILD); 146 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d", 147 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 148 QLA_VERSION_BUILD); 149 device_set_desc(dev, dev_str); 150 break; 151 default: 152 return (ENXIO); 153 } 154 155 if (bootverbose) 156 printf("%s: %s\n ", __func__, dev_str); 157 158 return (BUS_PROBE_DEFAULT); 159 } 160 161 static void 162 qla_add_sysctls(qla_host_t *ha) 163 { 164 device_t dev = ha->pci_dev; 165 166 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 167 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 168 OID_AUTO, "version", CTLFLAG_RD, 169 ver_str, 0, "Driver Version"); 170 171 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 172 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 173 OID_AUTO, "fw_version", CTLFLAG_RD, 174 ha->fw_ver_str, 0, "firmware version"); 175 176 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 177 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 178 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW, 179 (void *)ha, 0, 180 qla_sysctl_get_link_status, "I", "Link Status"); 181 182 ha->dbg_level = 0; 183 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 184 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 185 OID_AUTO, "debug", CTLFLAG_RW, 186 &ha->dbg_level, ha->dbg_level, "Debug Level"); 187 188 ha->std_replenish = QL_STD_REPLENISH_THRES; 189 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 190 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 191 OID_AUTO, "std_replenish", CTLFLAG_RW, 192 &ha->std_replenish, ha->std_replenish, 193 "Threshold for Replenishing Standard Frames"); 194 195 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 196 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 197 OID_AUTO, "ipv4_lro", 198 CTLFLAG_RD, &ha->ipv4_lro, 199 "number of ipv4 lro completions"); 200 201 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 202 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 203 OID_AUTO, "ipv6_lro", 204 CTLFLAG_RD, &ha->ipv6_lro, 205 "number of ipv6 lro completions"); 206 207 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 208 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 209 OID_AUTO, "tx_tso_frames", 210 CTLFLAG_RD, &ha->tx_tso_frames, 211 "number of Tx TSO Frames"); 212 213 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 214 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 215 OID_AUTO, "hw_vlan_tx_frames", 216 CTLFLAG_RD, &ha->hw_vlan_tx_frames, 217 "number of Tx VLAN Frames"); 218 219 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 220 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 221 OID_AUTO, "hw_lock_failed", 222 CTLFLAG_RD, &ha->hw_lock_failed, 223 "number of hw_lock failures"); 224 225 return; 226 } 227 228 static void 229 qla_watchdog(void *arg) 230 { 231 qla_host_t *ha = arg; 232 qla_hw_t *hw; 233 struct ifnet *ifp; 234 235 hw = &ha->hw; 236 ifp = ha->ifp; 237 238 if (ha->qla_watchdog_exit) { 239 ha->qla_watchdog_exited = 1; 240 return; 241 } 242 ha->qla_watchdog_exited = 0; 243 244 if (!ha->qla_watchdog_pause) { 245 if (ql_hw_check_health(ha) || ha->qla_initiate_recovery || 246 (ha->msg_from_peer == QL_PEER_MSG_RESET)) { 247 248 if (!(ha->dbg_level & 0x8000)) { 249 ha->qla_watchdog_paused = 1; 250 ha->qla_watchdog_pause = 1; 251 ha->qla_initiate_recovery = 0; 252 ha->err_inject = 0; 253 device_printf(ha->pci_dev, 254 "%s: taskqueue_enqueue(err_task) \n", 255 __func__); 256 taskqueue_enqueue(ha->err_tq, &ha->err_task); 257 return; 258 } 259 260 } else if (ha->qla_interface_up) { 261 262 ha->watchdog_ticks++; 263 264 if (ha->watchdog_ticks > 1000) 265 ha->watchdog_ticks = 0; 266 267 if (!ha->watchdog_ticks && QL_RUNNING(ifp)) { 268 taskqueue_enqueue(ha->stats_tq, &ha->stats_task); 269 } 270 271 if (ha->async_event) { 272 taskqueue_enqueue(ha->async_event_tq, 273 &ha->async_event_task); 274 } 275 276 #if 0 277 for (i = 0; ((i < ha->hw.num_sds_rings) && 278 !ha->watchdog_ticks); i++) { 279 qla_tx_fp_t *fp = &ha->tx_fp[i]; 280 281 if (fp->fp_taskqueue != NULL) 282 taskqueue_enqueue(fp->fp_taskqueue, 283 &fp->fp_task); 284 } 285 #endif 286 ha->qla_watchdog_paused = 0; 287 } else { 288 ha->qla_watchdog_paused = 0; 289 } 290 } else { 291 ha->qla_watchdog_paused = 1; 292 } 293 294 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 295 qla_watchdog, ha); 296 } 297 298 /* 299 * Name: qla_pci_attach 300 * Function: attaches the device to the operating system 301 */ 302 static int 303 qla_pci_attach(device_t dev) 304 { 305 qla_host_t *ha = NULL; 306 uint32_t rsrc_len; 307 int i; 308 uint32_t num_rcvq = 0; 309 310 if ((ha = device_get_softc(dev)) == NULL) { 311 device_printf(dev, "cannot get softc\n"); 312 return (ENOMEM); 313 } 314 315 memset(ha, 0, sizeof (qla_host_t)); 316 317 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) { 318 device_printf(dev, "device is not ISP8030\n"); 319 return (ENXIO); 320 } 321 322 ha->pci_func = pci_get_function(dev) & 0x1; 323 324 ha->pci_dev = dev; 325 326 pci_enable_busmaster(dev); 327 328 ha->reg_rid = PCIR_BAR(0); 329 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 330 RF_ACTIVE); 331 332 if (ha->pci_reg == NULL) { 333 device_printf(dev, "unable to map any ports\n"); 334 goto qla_pci_attach_err; 335 } 336 337 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 338 ha->reg_rid); 339 340 mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 341 ha->flags.lock_init = 1; 342 343 qla_add_sysctls(ha); 344 345 ha->hw.num_sds_rings = MAX_SDS_RINGS; 346 ha->hw.num_rds_rings = MAX_RDS_RINGS; 347 ha->hw.num_tx_rings = NUM_TX_RINGS; 348 349 ha->reg_rid1 = PCIR_BAR(2); 350 ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 351 &ha->reg_rid1, RF_ACTIVE); 352 353 ha->msix_count = pci_msix_count(dev); 354 355 if (ha->msix_count < 1 ) { 356 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 357 ha->msix_count); 358 goto qla_pci_attach_err; 359 } 360 361 if (ha->msix_count < (ha->hw.num_sds_rings + 1)) { 362 ha->hw.num_sds_rings = ha->msix_count - 1; 363 } 364 365 QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" 366 " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha, 367 ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, 368 ha->pci_reg1)); 369 370 /* initialize hardware */ 371 if (ql_init_hw(ha)) { 372 device_printf(dev, "%s: ql_init_hw failed\n", __func__); 373 goto qla_pci_attach_err; 374 } 375 376 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, 377 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 378 ha->fw_ver_build); 379 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", 380 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 381 ha->fw_ver_build); 382 383 if (qla_get_nic_partition(ha, NULL, &num_rcvq)) { 384 device_printf(dev, "%s: qla_get_nic_partition failed\n", 385 __func__); 386 goto qla_pci_attach_err; 387 } 388 device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" 389 " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n", 390 __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, 391 ha->pci_reg, ha->pci_reg1, num_rcvq); 392 393 if ((ha->msix_count < 64) || (num_rcvq != 32)) { 394 if (ha->hw.num_sds_rings > 15) { 395 ha->hw.num_sds_rings = 15; 396 } 397 } 398 399 ha->hw.num_rds_rings = ha->hw.num_sds_rings; 400 ha->hw.num_tx_rings = ha->hw.num_sds_rings; 401 402 #ifdef QL_ENABLE_ISCSI_TLV 403 ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2; 404 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 405 406 ql_hw_add_sysctls(ha); 407 408 ha->msix_count = ha->hw.num_sds_rings + 1; 409 410 if (pci_alloc_msix(dev, &ha->msix_count)) { 411 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, 412 ha->msix_count); 413 ha->msix_count = 0; 414 goto qla_pci_attach_err; 415 } 416 417 ha->mbx_irq_rid = 1; 418 ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 419 &ha->mbx_irq_rid, 420 (RF_ACTIVE | RF_SHAREABLE)); 421 if (ha->mbx_irq == NULL) { 422 device_printf(dev, "could not allocate mbx interrupt\n"); 423 goto qla_pci_attach_err; 424 } 425 if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE), 426 NULL, ql_mbx_isr, ha, &ha->mbx_handle)) { 427 device_printf(dev, "could not setup mbx interrupt\n"); 428 goto qla_pci_attach_err; 429 } 430 431 for (i = 0; i < ha->hw.num_sds_rings; i++) { 432 ha->irq_vec[i].sds_idx = i; 433 ha->irq_vec[i].ha = ha; 434 ha->irq_vec[i].irq_rid = 2 + i; 435 436 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 437 &ha->irq_vec[i].irq_rid, 438 (RF_ACTIVE | RF_SHAREABLE)); 439 440 if (ha->irq_vec[i].irq == NULL) { 441 device_printf(dev, "could not allocate interrupt\n"); 442 goto qla_pci_attach_err; 443 } 444 if (bus_setup_intr(dev, ha->irq_vec[i].irq, 445 (INTR_TYPE_NET | INTR_MPSAFE), 446 NULL, ql_isr, &ha->irq_vec[i], 447 &ha->irq_vec[i].handle)) { 448 device_printf(dev, "could not setup interrupt\n"); 449 goto qla_pci_attach_err; 450 } 451 452 ha->tx_fp[i].ha = ha; 453 ha->tx_fp[i].txr_idx = i; 454 455 if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) { 456 device_printf(dev, "%s: could not allocate tx_br[%d]\n", 457 __func__, i); 458 goto qla_pci_attach_err; 459 } 460 } 461 462 if (qla_create_fp_taskqueues(ha) != 0) 463 goto qla_pci_attach_err; 464 465 printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus, 466 ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count); 467 468 ql_read_mac_addr(ha); 469 470 /* allocate parent dma tag */ 471 if (qla_alloc_parent_dma_tag(ha)) { 472 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", 473 __func__); 474 goto qla_pci_attach_err; 475 } 476 477 /* alloc all dma buffers */ 478 if (ql_alloc_dma(ha)) { 479 device_printf(dev, "%s: ql_alloc_dma failed\n", __func__); 480 goto qla_pci_attach_err; 481 } 482 qla_get_peer(ha); 483 484 if (ql_minidump_init(ha) != 0) { 485 device_printf(dev, "%s: ql_minidump_init failed\n", __func__); 486 goto qla_pci_attach_err; 487 } 488 /* create the o.s ethernet interface */ 489 qla_init_ifnet(dev, ha); 490 491 ha->flags.qla_watchdog_active = 1; 492 ha->qla_watchdog_pause = 0; 493 494 callout_init(&ha->tx_callout, TRUE); 495 ha->flags.qla_callout_init = 1; 496 497 /* create ioctl device interface */ 498 if (ql_make_cdev(ha)) { 499 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 500 goto qla_pci_attach_err; 501 } 502 503 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 504 qla_watchdog, ha); 505 506 TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha); 507 ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT, 508 taskqueue_thread_enqueue, &ha->err_tq); 509 taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", 510 device_get_nameunit(ha->pci_dev)); 511 512 TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha); 513 ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT, 514 taskqueue_thread_enqueue, &ha->async_event_tq); 515 taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq", 516 device_get_nameunit(ha->pci_dev)); 517 518 TASK_INIT(&ha->stats_task, 0, qla_stats, ha); 519 ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT, 520 taskqueue_thread_enqueue, &ha->stats_tq); 521 taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq", 522 device_get_nameunit(ha->pci_dev)); 523 524 QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__)); 525 return (0); 526 527 qla_pci_attach_err: 528 529 qla_release(ha); 530 531 if (ha->flags.lock_init) { 532 mtx_destroy(&ha->hw_lock); 533 } 534 535 QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__)); 536 return (ENXIO); 537 } 538 539 /* 540 * Name: qla_pci_detach 541 * Function: Unhooks the device from the operating system 542 */ 543 static int 544 qla_pci_detach(device_t dev) 545 { 546 qla_host_t *ha = NULL; 547 struct ifnet *ifp; 548 549 550 if ((ha = device_get_softc(dev)) == NULL) { 551 device_printf(dev, "cannot get softc\n"); 552 return (ENOMEM); 553 } 554 555 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 556 557 ifp = ha->ifp; 558 559 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 560 QLA_LOCK(ha, __func__, -1, 0); 561 562 ha->qla_detach_active = 1; 563 qla_stop(ha); 564 565 qla_release(ha); 566 567 QLA_UNLOCK(ha, __func__); 568 569 if (ha->flags.lock_init) { 570 mtx_destroy(&ha->hw_lock); 571 } 572 573 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 574 575 return (0); 576 } 577 578 /* 579 * SYSCTL Related Callbacks 580 */ 581 static int 582 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS) 583 { 584 int err, ret = 0; 585 qla_host_t *ha; 586 587 err = sysctl_handle_int(oidp, &ret, 0, req); 588 589 if (err || !req->newptr) 590 return (err); 591 592 if (ret == 1) { 593 ha = (qla_host_t *)arg1; 594 ql_hw_link_status(ha); 595 } 596 return (err); 597 } 598 599 /* 600 * Name: qla_release 601 * Function: Releases the resources allocated for the device 602 */ 603 static void 604 qla_release(qla_host_t *ha) 605 { 606 device_t dev; 607 int i; 608 609 dev = ha->pci_dev; 610 611 if (ha->async_event_tq) { 612 taskqueue_drain(ha->async_event_tq, &ha->async_event_task); 613 taskqueue_free(ha->async_event_tq); 614 } 615 616 if (ha->err_tq) { 617 taskqueue_drain(ha->err_tq, &ha->err_task); 618 taskqueue_free(ha->err_tq); 619 } 620 621 if (ha->stats_tq) { 622 taskqueue_drain(ha->stats_tq, &ha->stats_task); 623 taskqueue_free(ha->stats_tq); 624 } 625 626 ql_del_cdev(ha); 627 628 if (ha->flags.qla_watchdog_active) { 629 ha->qla_watchdog_exit = 1; 630 631 while (ha->qla_watchdog_exited == 0) 632 qla_mdelay(__func__, 1); 633 } 634 635 if (ha->flags.qla_callout_init) 636 callout_stop(&ha->tx_callout); 637 638 if (ha->ifp != NULL) 639 ether_ifdetach(ha->ifp); 640 641 ql_free_dma(ha); 642 qla_free_parent_dma_tag(ha); 643 644 if (ha->mbx_handle) 645 (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle); 646 647 if (ha->mbx_irq) 648 (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid, 649 ha->mbx_irq); 650 651 for (i = 0; i < ha->hw.num_sds_rings; i++) { 652 653 if (ha->irq_vec[i].handle) { 654 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 655 ha->irq_vec[i].handle); 656 } 657 658 if (ha->irq_vec[i].irq) { 659 (void)bus_release_resource(dev, SYS_RES_IRQ, 660 ha->irq_vec[i].irq_rid, 661 ha->irq_vec[i].irq); 662 } 663 664 qla_free_tx_br(ha, &ha->tx_fp[i]); 665 } 666 qla_destroy_fp_taskqueues(ha); 667 668 if (ha->msix_count) 669 pci_release_msi(dev); 670 671 // if (ha->flags.lock_init) { 672 // mtx_destroy(&ha->hw_lock); 673 // } 674 675 if (ha->pci_reg) 676 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 677 ha->pci_reg); 678 679 if (ha->pci_reg1) 680 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, 681 ha->pci_reg1); 682 683 return; 684 } 685 686 /* 687 * DMA Related Functions 688 */ 689 690 static void 691 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 692 { 693 *((bus_addr_t *)arg) = 0; 694 695 if (error) { 696 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 697 return; 698 } 699 700 *((bus_addr_t *)arg) = segs[0].ds_addr; 701 702 return; 703 } 704 705 int 706 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 707 { 708 int ret = 0; 709 device_t dev; 710 bus_addr_t b_addr; 711 712 dev = ha->pci_dev; 713 714 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 715 716 ret = bus_dma_tag_create( 717 ha->parent_tag,/* parent */ 718 dma_buf->alignment, 719 ((bus_size_t)(1ULL << 32)),/* boundary */ 720 BUS_SPACE_MAXADDR, /* lowaddr */ 721 BUS_SPACE_MAXADDR, /* highaddr */ 722 NULL, NULL, /* filter, filterarg */ 723 dma_buf->size, /* maxsize */ 724 1, /* nsegments */ 725 dma_buf->size, /* maxsegsize */ 726 0, /* flags */ 727 NULL, NULL, /* lockfunc, lockarg */ 728 &dma_buf->dma_tag); 729 730 if (ret) { 731 device_printf(dev, "%s: could not create dma tag\n", __func__); 732 goto ql_alloc_dmabuf_exit; 733 } 734 ret = bus_dmamem_alloc(dma_buf->dma_tag, 735 (void **)&dma_buf->dma_b, 736 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 737 &dma_buf->dma_map); 738 if (ret) { 739 bus_dma_tag_destroy(dma_buf->dma_tag); 740 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); 741 goto ql_alloc_dmabuf_exit; 742 } 743 744 ret = bus_dmamap_load(dma_buf->dma_tag, 745 dma_buf->dma_map, 746 dma_buf->dma_b, 747 dma_buf->size, 748 qla_dmamap_callback, 749 &b_addr, BUS_DMA_NOWAIT); 750 751 if (ret || !b_addr) { 752 bus_dma_tag_destroy(dma_buf->dma_tag); 753 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 754 dma_buf->dma_map); 755 ret = -1; 756 goto ql_alloc_dmabuf_exit; 757 } 758 759 dma_buf->dma_addr = b_addr; 760 761 ql_alloc_dmabuf_exit: 762 QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", 763 __func__, ret, (void *)dma_buf->dma_tag, 764 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, 765 dma_buf->size)); 766 767 return ret; 768 } 769 770 void 771 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 772 { 773 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 774 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 775 bus_dma_tag_destroy(dma_buf->dma_tag); 776 } 777 778 static int 779 qla_alloc_parent_dma_tag(qla_host_t *ha) 780 { 781 int ret; 782 device_t dev; 783 784 dev = ha->pci_dev; 785 786 /* 787 * Allocate parent DMA Tag 788 */ 789 ret = bus_dma_tag_create( 790 bus_get_dma_tag(dev), /* parent */ 791 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 792 BUS_SPACE_MAXADDR, /* lowaddr */ 793 BUS_SPACE_MAXADDR, /* highaddr */ 794 NULL, NULL, /* filter, filterarg */ 795 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 796 0, /* nsegments */ 797 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 798 0, /* flags */ 799 NULL, NULL, /* lockfunc, lockarg */ 800 &ha->parent_tag); 801 802 if (ret) { 803 device_printf(dev, "%s: could not create parent dma tag\n", 804 __func__); 805 return (-1); 806 } 807 808 ha->flags.parent_tag = 1; 809 810 return (0); 811 } 812 813 static void 814 qla_free_parent_dma_tag(qla_host_t *ha) 815 { 816 if (ha->flags.parent_tag) { 817 bus_dma_tag_destroy(ha->parent_tag); 818 ha->flags.parent_tag = 0; 819 } 820 } 821 822 /* 823 * Name: qla_init_ifnet 824 * Function: Creates the Network Device Interface and Registers it with the O.S 825 */ 826 827 static void 828 qla_init_ifnet(device_t dev, qla_host_t *ha) 829 { 830 struct ifnet *ifp; 831 832 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 833 834 ifp = ha->ifp = if_alloc(IFT_ETHER); 835 836 if (ifp == NULL) 837 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 838 839 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 840 841 ifp->if_baudrate = IF_Gbps(10); 842 ifp->if_capabilities = IFCAP_LINKSTATE; 843 ifp->if_mtu = ETHERMTU; 844 845 ifp->if_init = qla_init; 846 ifp->if_softc = ha; 847 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 848 ifp->if_ioctl = qla_ioctl; 849 850 ifp->if_transmit = qla_transmit; 851 ifp->if_qflush = qla_qflush; 852 853 IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); 854 ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); 855 IFQ_SET_READY(&ifp->if_snd); 856 857 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 858 859 ether_ifattach(ifp, qla_get_mac_addr(ha)); 860 861 ifp->if_capabilities |= IFCAP_HWCSUM | 862 IFCAP_TSO4 | 863 IFCAP_JUMBO_MTU | 864 IFCAP_VLAN_HWTAGGING | 865 IFCAP_VLAN_MTU | 866 IFCAP_VLAN_HWTSO | 867 IFCAP_LRO; 868 869 ifp->if_capenable = ifp->if_capabilities; 870 871 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 872 873 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); 874 875 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, 876 NULL); 877 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 878 879 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 880 881 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 882 883 return; 884 } 885 886 static void 887 qla_init_locked(qla_host_t *ha) 888 { 889 struct ifnet *ifp = ha->ifp; 890 891 qla_stop(ha); 892 893 if (qla_alloc_xmt_bufs(ha) != 0) 894 return; 895 896 qla_confirm_9kb_enable(ha); 897 898 if (qla_alloc_rcv_bufs(ha) != 0) 899 return; 900 901 bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); 902 903 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; 904 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 905 906 ha->stop_rcv = 0; 907 if (ql_init_hw_if(ha) == 0) { 908 ifp = ha->ifp; 909 ifp->if_drv_flags |= IFF_DRV_RUNNING; 910 ha->qla_watchdog_pause = 0; 911 ha->hw_vlan_tx_frames = 0; 912 ha->tx_tso_frames = 0; 913 ha->qla_interface_up = 1; 914 ql_update_link_state(ha); 915 } 916 917 return; 918 } 919 920 static void 921 qla_init(void *arg) 922 { 923 qla_host_t *ha; 924 925 ha = (qla_host_t *)arg; 926 927 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 928 929 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 930 return; 931 932 qla_init_locked(ha); 933 934 QLA_UNLOCK(ha, __func__); 935 936 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 937 } 938 939 static int 940 qla_set_multi(qla_host_t *ha, uint32_t add_multi) 941 { 942 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; 943 struct ifmultiaddr *ifma; 944 int mcnt = 0; 945 struct ifnet *ifp = ha->ifp; 946 int ret = 0; 947 948 if_maddr_rlock(ifp); 949 950 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 951 952 if (ifma->ifma_addr->sa_family != AF_LINK) 953 continue; 954 955 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) 956 break; 957 958 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 959 &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); 960 961 mcnt++; 962 } 963 964 if_maddr_runlock(ifp); 965 966 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 967 QLA_LOCK_NO_SLEEP) != 0) 968 return (-1); 969 970 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 971 ret = ql_hw_set_multi(ha, mta, mcnt, add_multi); 972 } 973 974 QLA_UNLOCK(ha, __func__); 975 976 return (ret); 977 } 978 979 static int 980 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 981 { 982 int ret = 0; 983 struct ifreq *ifr = (struct ifreq *)data; 984 struct ifaddr *ifa = (struct ifaddr *)data; 985 qla_host_t *ha; 986 987 ha = (qla_host_t *)ifp->if_softc; 988 989 switch (cmd) { 990 case SIOCSIFADDR: 991 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 992 __func__, cmd)); 993 994 if (ifa->ifa_addr->sa_family == AF_INET) { 995 996 ret = QLA_LOCK(ha, __func__, 997 QLA_LOCK_DEFAULT_MS_TIMEOUT, 998 QLA_LOCK_NO_SLEEP); 999 if (ret) 1000 break; 1001 1002 ifp->if_flags |= IFF_UP; 1003 1004 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1005 qla_init_locked(ha); 1006 } 1007 1008 QLA_UNLOCK(ha, __func__); 1009 QL_DPRINT4(ha, (ha->pci_dev, 1010 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 1011 __func__, cmd, 1012 ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 1013 1014 arp_ifinit(ifp, ifa); 1015 } else { 1016 ether_ioctl(ifp, cmd, data); 1017 } 1018 break; 1019 1020 case SIOCSIFMTU: 1021 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 1022 __func__, cmd)); 1023 1024 if (ifr->ifr_mtu > QLA_MAX_MTU) { 1025 ret = EINVAL; 1026 } else { 1027 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1028 QLA_LOCK_NO_SLEEP); 1029 1030 if (ret) 1031 break; 1032 1033 ifp->if_mtu = ifr->ifr_mtu; 1034 ha->max_frame_size = 1035 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1036 1037 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1038 qla_init_locked(ha); 1039 } 1040 1041 if (ifp->if_mtu > ETHERMTU) 1042 ha->std_replenish = QL_JUMBO_REPLENISH_THRES; 1043 else 1044 ha->std_replenish = QL_STD_REPLENISH_THRES; 1045 1046 1047 QLA_UNLOCK(ha, __func__); 1048 } 1049 1050 break; 1051 1052 case SIOCSIFFLAGS: 1053 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 1054 __func__, cmd)); 1055 1056 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1057 QLA_LOCK_NO_SLEEP); 1058 1059 if (ret) 1060 break; 1061 1062 if (ifp->if_flags & IFF_UP) { 1063 1064 ha->max_frame_size = ifp->if_mtu + 1065 ETHER_HDR_LEN + ETHER_CRC_LEN; 1066 qla_init_locked(ha); 1067 1068 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1069 if ((ifp->if_flags ^ ha->if_flags) & 1070 IFF_PROMISC) { 1071 ret = ql_set_promisc(ha); 1072 } else if ((ifp->if_flags ^ ha->if_flags) & 1073 IFF_ALLMULTI) { 1074 ret = ql_set_allmulti(ha); 1075 } 1076 } 1077 } else { 1078 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1079 qla_stop(ha); 1080 ha->if_flags = ifp->if_flags; 1081 } 1082 1083 QLA_UNLOCK(ha, __func__); 1084 break; 1085 1086 case SIOCADDMULTI: 1087 QL_DPRINT4(ha, (ha->pci_dev, 1088 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 1089 1090 if (qla_set_multi(ha, 1)) 1091 ret = EINVAL; 1092 break; 1093 1094 case SIOCDELMULTI: 1095 QL_DPRINT4(ha, (ha->pci_dev, 1096 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 1097 1098 if (qla_set_multi(ha, 0)) 1099 ret = EINVAL; 1100 break; 1101 1102 case SIOCSIFMEDIA: 1103 case SIOCGIFMEDIA: 1104 QL_DPRINT4(ha, (ha->pci_dev, 1105 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 1106 __func__, cmd)); 1107 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 1108 break; 1109 1110 case SIOCSIFCAP: 1111 { 1112 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1113 1114 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 1115 __func__, cmd)); 1116 1117 if (mask & IFCAP_HWCSUM) 1118 ifp->if_capenable ^= IFCAP_HWCSUM; 1119 if (mask & IFCAP_TSO4) 1120 ifp->if_capenable ^= IFCAP_TSO4; 1121 if (mask & IFCAP_TSO6) 1122 ifp->if_capenable ^= IFCAP_TSO6; 1123 if (mask & IFCAP_VLAN_HWTAGGING) 1124 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1125 if (mask & IFCAP_VLAN_HWTSO) 1126 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1127 if (mask & IFCAP_LRO) 1128 ifp->if_capenable ^= IFCAP_LRO; 1129 1130 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1131 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1132 QLA_LOCK_NO_SLEEP); 1133 1134 if (ret) 1135 break; 1136 1137 qla_init_locked(ha); 1138 1139 QLA_UNLOCK(ha, __func__); 1140 1141 } 1142 VLAN_CAPABILITIES(ifp); 1143 break; 1144 } 1145 1146 default: 1147 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", 1148 __func__, cmd)); 1149 ret = ether_ioctl(ifp, cmd, data); 1150 break; 1151 } 1152 1153 return (ret); 1154 } 1155 1156 static int 1157 qla_media_change(struct ifnet *ifp) 1158 { 1159 qla_host_t *ha; 1160 struct ifmedia *ifm; 1161 int ret = 0; 1162 1163 ha = (qla_host_t *)ifp->if_softc; 1164 1165 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1166 1167 ifm = &ha->media; 1168 1169 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1170 ret = EINVAL; 1171 1172 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1173 1174 return (ret); 1175 } 1176 1177 static void 1178 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1179 { 1180 qla_host_t *ha; 1181 1182 ha = (qla_host_t *)ifp->if_softc; 1183 1184 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1185 1186 ifmr->ifm_status = IFM_AVALID; 1187 ifmr->ifm_active = IFM_ETHER; 1188 1189 ql_update_link_state(ha); 1190 if (ha->hw.link_up) { 1191 ifmr->ifm_status |= IFM_ACTIVE; 1192 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); 1193 } 1194 1195 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\ 1196 (ha->hw.link_up ? "link_up" : "link_down"))); 1197 1198 return; 1199 } 1200 1201 1202 static int 1203 qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, 1204 uint32_t iscsi_pdu) 1205 { 1206 bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; 1207 bus_dmamap_t map; 1208 int nsegs; 1209 int ret = -1; 1210 uint32_t tx_idx; 1211 struct mbuf *m_head = *m_headp; 1212 1213 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1214 1215 tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next; 1216 map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; 1217 1218 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 1219 BUS_DMA_NOWAIT); 1220 1221 if (ret == EFBIG) { 1222 1223 struct mbuf *m; 1224 1225 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 1226 m_head->m_pkthdr.len)); 1227 1228 m = m_defrag(m_head, M_NOWAIT); 1229 if (m == NULL) { 1230 ha->err_tx_defrag++; 1231 m_freem(m_head); 1232 *m_headp = NULL; 1233 device_printf(ha->pci_dev, 1234 "%s: m_defrag() = NULL [%d]\n", 1235 __func__, ret); 1236 return (ENOBUFS); 1237 } 1238 m_head = m; 1239 *m_headp = m_head; 1240 1241 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 1242 segs, &nsegs, BUS_DMA_NOWAIT))) { 1243 1244 ha->err_tx_dmamap_load++; 1245 1246 device_printf(ha->pci_dev, 1247 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 1248 __func__, ret, m_head->m_pkthdr.len); 1249 1250 if (ret != ENOMEM) { 1251 m_freem(m_head); 1252 *m_headp = NULL; 1253 } 1254 return (ret); 1255 } 1256 1257 } else if (ret) { 1258 1259 ha->err_tx_dmamap_load++; 1260 1261 device_printf(ha->pci_dev, 1262 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 1263 __func__, ret, m_head->m_pkthdr.len); 1264 1265 if (ret != ENOMEM) { 1266 m_freem(m_head); 1267 *m_headp = NULL; 1268 } 1269 return (ret); 1270 } 1271 1272 QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet")); 1273 1274 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 1275 1276 if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx, 1277 iscsi_pdu))) { 1278 ha->tx_ring[txr_idx].count++; 1279 if (iscsi_pdu) 1280 ha->tx_ring[txr_idx].iscsi_pkt_count++; 1281 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; 1282 } else { 1283 if (ret == EINVAL) { 1284 if (m_head) 1285 m_freem(m_head); 1286 *m_headp = NULL; 1287 } 1288 } 1289 1290 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1291 return (ret); 1292 } 1293 1294 static int 1295 qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) 1296 { 1297 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 1298 "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx); 1299 1300 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 1301 1302 fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF, 1303 M_NOWAIT, &fp->tx_mtx); 1304 if (fp->tx_br == NULL) { 1305 QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " 1306 " fp[%d, %d]\n", ha->pci_func, fp->txr_idx)); 1307 return (-ENOMEM); 1308 } 1309 return 0; 1310 } 1311 1312 static void 1313 qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) 1314 { 1315 struct mbuf *mp; 1316 struct ifnet *ifp = ha->ifp; 1317 1318 if (mtx_initialized(&fp->tx_mtx)) { 1319 1320 if (fp->tx_br != NULL) { 1321 1322 mtx_lock(&fp->tx_mtx); 1323 1324 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 1325 m_freem(mp); 1326 } 1327 1328 mtx_unlock(&fp->tx_mtx); 1329 1330 buf_ring_free(fp->tx_br, M_DEVBUF); 1331 fp->tx_br = NULL; 1332 } 1333 mtx_destroy(&fp->tx_mtx); 1334 } 1335 return; 1336 } 1337 1338 static void 1339 qla_fp_taskqueue(void *context, int pending) 1340 { 1341 qla_tx_fp_t *fp; 1342 qla_host_t *ha; 1343 struct ifnet *ifp; 1344 struct mbuf *mp; 1345 int ret; 1346 uint32_t txr_idx; 1347 uint32_t iscsi_pdu = 0; 1348 uint32_t rx_pkts_left = -1; 1349 1350 fp = context; 1351 1352 if (fp == NULL) 1353 return; 1354 1355 ha = (qla_host_t *)fp->ha; 1356 1357 ifp = ha->ifp; 1358 1359 txr_idx = fp->txr_idx; 1360 1361 mtx_lock(&fp->tx_mtx); 1362 1363 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) { 1364 mtx_unlock(&fp->tx_mtx); 1365 goto qla_fp_taskqueue_exit; 1366 } 1367 1368 while (rx_pkts_left && !ha->stop_rcv) { 1369 rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64); 1370 1371 #ifdef QL_ENABLE_ISCSI_TLV 1372 ql_hw_tx_done_locked(ha, fp->txr_idx); 1373 ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1))); 1374 #else 1375 ql_hw_tx_done_locked(ha, fp->txr_idx); 1376 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 1377 1378 mp = drbr_peek(ifp, fp->tx_br); 1379 1380 while (mp != NULL) { 1381 1382 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) { 1383 #ifdef QL_ENABLE_ISCSI_TLV 1384 if (ql_iscsi_pdu(ha, mp) == 0) { 1385 txr_idx = txr_idx + 1386 (ha->hw.num_tx_rings >> 1); 1387 iscsi_pdu = 1; 1388 } else { 1389 iscsi_pdu = 0; 1390 txr_idx = fp->txr_idx; 1391 } 1392 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 1393 } 1394 1395 ret = qla_send(ha, &mp, txr_idx, iscsi_pdu); 1396 1397 if (ret) { 1398 if (mp != NULL) 1399 drbr_putback(ifp, fp->tx_br, mp); 1400 else { 1401 drbr_advance(ifp, fp->tx_br); 1402 } 1403 1404 mtx_unlock(&fp->tx_mtx); 1405 1406 goto qla_fp_taskqueue_exit0; 1407 } else { 1408 drbr_advance(ifp, fp->tx_br); 1409 } 1410 1411 mp = drbr_peek(ifp, fp->tx_br); 1412 } 1413 } 1414 mtx_unlock(&fp->tx_mtx); 1415 1416 qla_fp_taskqueue_exit0: 1417 1418 if (rx_pkts_left || ((mp != NULL) && ret)) { 1419 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 1420 } else { 1421 if (!ha->stop_rcv) { 1422 QL_ENABLE_INTERRUPTS(ha, fp->txr_idx); 1423 } 1424 } 1425 1426 qla_fp_taskqueue_exit: 1427 1428 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 1429 return; 1430 } 1431 1432 static int 1433 qla_create_fp_taskqueues(qla_host_t *ha) 1434 { 1435 int i; 1436 uint8_t tq_name[32]; 1437 1438 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1439 1440 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1441 1442 bzero(tq_name, sizeof (tq_name)); 1443 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 1444 1445 TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); 1446 1447 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, 1448 taskqueue_thread_enqueue, 1449 &fp->fp_taskqueue); 1450 1451 if (fp->fp_taskqueue == NULL) 1452 return (-1); 1453 1454 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 1455 tq_name); 1456 1457 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, 1458 fp->fp_taskqueue)); 1459 } 1460 1461 return (0); 1462 } 1463 1464 static void 1465 qla_destroy_fp_taskqueues(qla_host_t *ha) 1466 { 1467 int i; 1468 1469 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1470 1471 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1472 1473 if (fp->fp_taskqueue != NULL) { 1474 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 1475 taskqueue_free(fp->fp_taskqueue); 1476 fp->fp_taskqueue = NULL; 1477 } 1478 } 1479 return; 1480 } 1481 1482 static void 1483 qla_drain_fp_taskqueues(qla_host_t *ha) 1484 { 1485 int i; 1486 1487 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1488 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1489 1490 if (fp->fp_taskqueue != NULL) { 1491 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 1492 } 1493 } 1494 return; 1495 } 1496 1497 static int 1498 qla_transmit(struct ifnet *ifp, struct mbuf *mp) 1499 { 1500 qla_host_t *ha = (qla_host_t *)ifp->if_softc; 1501 qla_tx_fp_t *fp; 1502 int rss_id = 0; 1503 int ret = 0; 1504 1505 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1506 1507 #if __FreeBSD_version >= 1100000 1508 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 1509 #else 1510 if (mp->m_flags & M_FLOWID) 1511 #endif 1512 rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) % 1513 ha->hw.num_sds_rings; 1514 fp = &ha->tx_fp[rss_id]; 1515 1516 if (fp->tx_br == NULL) { 1517 ret = EINVAL; 1518 goto qla_transmit_exit; 1519 } 1520 1521 if (mp != NULL) { 1522 ret = drbr_enqueue(ifp, fp->tx_br, mp); 1523 } 1524 1525 if (fp->fp_taskqueue != NULL) 1526 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 1527 1528 ret = 0; 1529 1530 qla_transmit_exit: 1531 1532 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 1533 return ret; 1534 } 1535 1536 static void 1537 qla_qflush(struct ifnet *ifp) 1538 { 1539 int i; 1540 qla_tx_fp_t *fp; 1541 struct mbuf *mp; 1542 qla_host_t *ha; 1543 1544 ha = (qla_host_t *)ifp->if_softc; 1545 1546 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1547 1548 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1549 1550 fp = &ha->tx_fp[i]; 1551 1552 if (fp == NULL) 1553 continue; 1554 1555 if (fp->tx_br) { 1556 mtx_lock(&fp->tx_mtx); 1557 1558 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 1559 m_freem(mp); 1560 } 1561 mtx_unlock(&fp->tx_mtx); 1562 } 1563 } 1564 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1565 1566 return; 1567 } 1568 1569 static void 1570 qla_stop(qla_host_t *ha) 1571 { 1572 struct ifnet *ifp = ha->ifp; 1573 device_t dev; 1574 int i = 0; 1575 1576 dev = ha->pci_dev; 1577 1578 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1579 ha->qla_watchdog_pause = 1; 1580 1581 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1582 qla_tx_fp_t *fp; 1583 1584 fp = &ha->tx_fp[i]; 1585 1586 if (fp == NULL) 1587 continue; 1588 1589 if (fp->tx_br != NULL) { 1590 mtx_lock(&fp->tx_mtx); 1591 mtx_unlock(&fp->tx_mtx); 1592 } 1593 } 1594 1595 while (!ha->qla_watchdog_paused) 1596 qla_mdelay(__func__, 1); 1597 1598 ha->qla_interface_up = 0; 1599 1600 qla_drain_fp_taskqueues(ha); 1601 1602 ql_del_hw_if(ha); 1603 1604 qla_free_xmt_bufs(ha); 1605 qla_free_rcv_bufs(ha); 1606 1607 return; 1608 } 1609 1610 /* 1611 * Buffer Management Functions for Transmit and Receive Rings 1612 */ 1613 static int 1614 qla_alloc_xmt_bufs(qla_host_t *ha) 1615 { 1616 int ret = 0; 1617 uint32_t i, j; 1618 qla_tx_buf_t *txb; 1619 1620 if (bus_dma_tag_create(NULL, /* parent */ 1621 1, 0, /* alignment, bounds */ 1622 BUS_SPACE_MAXADDR, /* lowaddr */ 1623 BUS_SPACE_MAXADDR, /* highaddr */ 1624 NULL, NULL, /* filter, filterarg */ 1625 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ 1626 QLA_MAX_SEGMENTS, /* nsegments */ 1627 PAGE_SIZE, /* maxsegsize */ 1628 BUS_DMA_ALLOCNOW, /* flags */ 1629 NULL, /* lockfunc */ 1630 NULL, /* lockfuncarg */ 1631 &ha->tx_tag)) { 1632 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", 1633 __func__); 1634 return (ENOMEM); 1635 } 1636 1637 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1638 bzero((void *)ha->tx_ring[i].tx_buf, 1639 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1640 } 1641 1642 for (j = 0; j < ha->hw.num_tx_rings; j++) { 1643 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) { 1644 1645 txb = &ha->tx_ring[j].tx_buf[i]; 1646 1647 if ((ret = bus_dmamap_create(ha->tx_tag, 1648 BUS_DMA_NOWAIT, &txb->map))) { 1649 1650 ha->err_tx_dmamap_create++; 1651 device_printf(ha->pci_dev, 1652 "%s: bus_dmamap_create failed[%d]\n", 1653 __func__, ret); 1654 1655 qla_free_xmt_bufs(ha); 1656 1657 return (ret); 1658 } 1659 } 1660 } 1661 1662 return 0; 1663 } 1664 1665 /* 1666 * Release mbuf after it sent on the wire 1667 */ 1668 static void 1669 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) 1670 { 1671 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1672 1673 if (txb->m_head && txb->map) { 1674 1675 bus_dmamap_unload(ha->tx_tag, txb->map); 1676 1677 m_freem(txb->m_head); 1678 txb->m_head = NULL; 1679 } 1680 1681 if (txb->map) 1682 bus_dmamap_destroy(ha->tx_tag, txb->map); 1683 1684 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1685 } 1686 1687 static void 1688 qla_free_xmt_bufs(qla_host_t *ha) 1689 { 1690 int i, j; 1691 1692 for (j = 0; j < ha->hw.num_tx_rings; j++) { 1693 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) 1694 qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); 1695 } 1696 1697 if (ha->tx_tag != NULL) { 1698 bus_dma_tag_destroy(ha->tx_tag); 1699 ha->tx_tag = NULL; 1700 } 1701 1702 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1703 bzero((void *)ha->tx_ring[i].tx_buf, 1704 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1705 } 1706 return; 1707 } 1708 1709 1710 static int 1711 qla_alloc_rcv_std(qla_host_t *ha) 1712 { 1713 int i, j, k, r, ret = 0; 1714 qla_rx_buf_t *rxb; 1715 qla_rx_ring_t *rx_ring; 1716 1717 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1718 1719 rx_ring = &ha->rx_ring[r]; 1720 1721 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1722 1723 rxb = &rx_ring->rx_buf[i]; 1724 1725 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, 1726 &rxb->map); 1727 1728 if (ret) { 1729 device_printf(ha->pci_dev, 1730 "%s: dmamap[%d, %d] failed\n", 1731 __func__, r, i); 1732 1733 for (k = 0; k < r; k++) { 1734 for (j = 0; j < NUM_RX_DESCRIPTORS; 1735 j++) { 1736 rxb = &ha->rx_ring[k].rx_buf[j]; 1737 bus_dmamap_destroy(ha->rx_tag, 1738 rxb->map); 1739 } 1740 } 1741 1742 for (j = 0; j < i; j++) { 1743 bus_dmamap_destroy(ha->rx_tag, 1744 rx_ring->rx_buf[j].map); 1745 } 1746 goto qla_alloc_rcv_std_err; 1747 } 1748 } 1749 } 1750 1751 qla_init_hw_rcv_descriptors(ha); 1752 1753 1754 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1755 1756 rx_ring = &ha->rx_ring[r]; 1757 1758 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1759 rxb = &rx_ring->rx_buf[i]; 1760 rxb->handle = i; 1761 if (!(ret = ql_get_mbuf(ha, rxb, NULL))) { 1762 /* 1763 * set the physical address in the 1764 * corresponding descriptor entry in the 1765 * receive ring/queue for the hba 1766 */ 1767 qla_set_hw_rcv_desc(ha, r, i, rxb->handle, 1768 rxb->paddr, 1769 (rxb->m_head)->m_pkthdr.len); 1770 } else { 1771 device_printf(ha->pci_dev, 1772 "%s: ql_get_mbuf [%d, %d] failed\n", 1773 __func__, r, i); 1774 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1775 goto qla_alloc_rcv_std_err; 1776 } 1777 } 1778 } 1779 return 0; 1780 1781 qla_alloc_rcv_std_err: 1782 return (-1); 1783 } 1784 1785 static void 1786 qla_free_rcv_std(qla_host_t *ha) 1787 { 1788 int i, r; 1789 qla_rx_buf_t *rxb; 1790 1791 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1792 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1793 rxb = &ha->rx_ring[r].rx_buf[i]; 1794 if (rxb->m_head != NULL) { 1795 bus_dmamap_unload(ha->rx_tag, rxb->map); 1796 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1797 m_freem(rxb->m_head); 1798 rxb->m_head = NULL; 1799 } 1800 } 1801 } 1802 return; 1803 } 1804 1805 static int 1806 qla_alloc_rcv_bufs(qla_host_t *ha) 1807 { 1808 int i, ret = 0; 1809 1810 if (bus_dma_tag_create(NULL, /* parent */ 1811 1, 0, /* alignment, bounds */ 1812 BUS_SPACE_MAXADDR, /* lowaddr */ 1813 BUS_SPACE_MAXADDR, /* highaddr */ 1814 NULL, NULL, /* filter, filterarg */ 1815 MJUM9BYTES, /* maxsize */ 1816 1, /* nsegments */ 1817 MJUM9BYTES, /* maxsegsize */ 1818 BUS_DMA_ALLOCNOW, /* flags */ 1819 NULL, /* lockfunc */ 1820 NULL, /* lockfuncarg */ 1821 &ha->rx_tag)) { 1822 1823 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", 1824 __func__); 1825 1826 return (ENOMEM); 1827 } 1828 1829 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1830 1831 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1832 ha->hw.sds[i].sdsr_next = 0; 1833 ha->hw.sds[i].rxb_free = NULL; 1834 ha->hw.sds[i].rx_free = 0; 1835 } 1836 1837 ret = qla_alloc_rcv_std(ha); 1838 1839 return (ret); 1840 } 1841 1842 static void 1843 qla_free_rcv_bufs(qla_host_t *ha) 1844 { 1845 int i; 1846 1847 qla_free_rcv_std(ha); 1848 1849 if (ha->rx_tag != NULL) { 1850 bus_dma_tag_destroy(ha->rx_tag); 1851 ha->rx_tag = NULL; 1852 } 1853 1854 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1855 1856 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1857 ha->hw.sds[i].sdsr_next = 0; 1858 ha->hw.sds[i].rxb_free = NULL; 1859 ha->hw.sds[i].rx_free = 0; 1860 } 1861 1862 return; 1863 } 1864 1865 int 1866 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) 1867 { 1868 register struct mbuf *mp = nmp; 1869 struct ifnet *ifp; 1870 int ret = 0; 1871 uint32_t offset; 1872 bus_dma_segment_t segs[1]; 1873 int nsegs, mbuf_size; 1874 1875 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1876 1877 ifp = ha->ifp; 1878 1879 if (ha->hw.enable_9kb) 1880 mbuf_size = MJUM9BYTES; 1881 else 1882 mbuf_size = MCLBYTES; 1883 1884 if (mp == NULL) { 1885 1886 if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE)) 1887 return(-1); 1888 1889 if (ha->hw.enable_9kb) 1890 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size); 1891 else 1892 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1893 1894 if (mp == NULL) { 1895 ha->err_m_getcl++; 1896 ret = ENOBUFS; 1897 device_printf(ha->pci_dev, 1898 "%s: m_getcl failed\n", __func__); 1899 goto exit_ql_get_mbuf; 1900 } 1901 mp->m_len = mp->m_pkthdr.len = mbuf_size; 1902 } else { 1903 mp->m_len = mp->m_pkthdr.len = mbuf_size; 1904 mp->m_data = mp->m_ext.ext_buf; 1905 mp->m_next = NULL; 1906 } 1907 1908 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); 1909 if (offset) { 1910 offset = 8 - offset; 1911 m_adj(mp, offset); 1912 } 1913 1914 /* 1915 * Using memory from the mbuf cluster pool, invoke the bus_dma 1916 * machinery to arrange the memory mapping. 1917 */ 1918 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, 1919 mp, segs, &nsegs, BUS_DMA_NOWAIT); 1920 rxb->paddr = segs[0].ds_addr; 1921 1922 if (ret || !rxb->paddr || (nsegs != 1)) { 1923 m_free(mp); 1924 rxb->m_head = NULL; 1925 device_printf(ha->pci_dev, 1926 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 1927 __func__, ret, (long long unsigned int)rxb->paddr, 1928 nsegs); 1929 ret = -1; 1930 goto exit_ql_get_mbuf; 1931 } 1932 rxb->m_head = mp; 1933 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); 1934 1935 exit_ql_get_mbuf: 1936 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); 1937 return (ret); 1938 } 1939 1940 1941 static void 1942 qla_get_peer(qla_host_t *ha) 1943 { 1944 device_t *peers; 1945 int count, i, slot; 1946 int my_slot = pci_get_slot(ha->pci_dev); 1947 1948 if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count)) 1949 return; 1950 1951 for (i = 0; i < count; i++) { 1952 slot = pci_get_slot(peers[i]); 1953 1954 if ((slot >= 0) && (slot == my_slot) && 1955 (pci_get_device(peers[i]) == 1956 pci_get_device(ha->pci_dev))) { 1957 if (ha->pci_dev != peers[i]) 1958 ha->peer_dev = peers[i]; 1959 } 1960 } 1961 } 1962 1963 static void 1964 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer) 1965 { 1966 qla_host_t *ha_peer; 1967 1968 if (ha->peer_dev) { 1969 if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) { 1970 1971 ha_peer->msg_from_peer = msg_to_peer; 1972 } 1973 } 1974 } 1975 1976 static void 1977 qla_error_recovery(void *context, int pending) 1978 { 1979 qla_host_t *ha = context; 1980 uint32_t msecs_100 = 100; 1981 struct ifnet *ifp = ha->ifp; 1982 int i = 0; 1983 1984 device_printf(ha->pci_dev, "%s: \n", __func__); 1985 ha->hw.imd_compl = 1; 1986 1987 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 1988 return; 1989 1990 device_printf(ha->pci_dev, "%s: enter\n", __func__); 1991 1992 if (ha->qla_interface_up) { 1993 1994 qla_mdelay(__func__, 300); 1995 1996 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1997 1998 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1999 qla_tx_fp_t *fp; 2000 2001 fp = &ha->tx_fp[i]; 2002 2003 if (fp == NULL) 2004 continue; 2005 2006 if (fp->tx_br != NULL) { 2007 mtx_lock(&fp->tx_mtx); 2008 mtx_unlock(&fp->tx_mtx); 2009 } 2010 } 2011 } 2012 2013 2014 qla_drain_fp_taskqueues(ha); 2015 2016 if ((ha->pci_func & 0x1) == 0) { 2017 2018 if (!ha->msg_from_peer) { 2019 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 2020 2021 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && 2022 msecs_100--) 2023 qla_mdelay(__func__, 100); 2024 } 2025 2026 ha->msg_from_peer = 0; 2027 2028 ql_minidump(ha); 2029 2030 (void) ql_init_hw(ha); 2031 2032 if (ha->qla_interface_up) { 2033 qla_free_xmt_bufs(ha); 2034 qla_free_rcv_bufs(ha); 2035 } 2036 2037 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 2038 2039 } else { 2040 if (ha->msg_from_peer == QL_PEER_MSG_RESET) { 2041 2042 ha->msg_from_peer = 0; 2043 2044 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 2045 } else { 2046 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 2047 } 2048 2049 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) 2050 qla_mdelay(__func__, 100); 2051 ha->msg_from_peer = 0; 2052 2053 (void) ql_init_hw(ha); 2054 2055 qla_mdelay(__func__, 1000); 2056 2057 if (ha->qla_interface_up) { 2058 qla_free_xmt_bufs(ha); 2059 qla_free_rcv_bufs(ha); 2060 } 2061 } 2062 2063 if (ha->qla_interface_up) { 2064 2065 if (qla_alloc_xmt_bufs(ha) != 0) { 2066 goto qla_error_recovery_exit; 2067 } 2068 qla_confirm_9kb_enable(ha); 2069 2070 if (qla_alloc_rcv_bufs(ha) != 0) { 2071 goto qla_error_recovery_exit; 2072 } 2073 2074 ha->stop_rcv = 0; 2075 2076 if (ql_init_hw_if(ha) == 0) { 2077 ifp = ha->ifp; 2078 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2079 ha->qla_watchdog_pause = 0; 2080 } 2081 } else 2082 ha->qla_watchdog_pause = 0; 2083 2084 qla_error_recovery_exit: 2085 2086 device_printf(ha->pci_dev, "%s: exit\n", __func__); 2087 2088 QLA_UNLOCK(ha, __func__); 2089 2090 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 2091 qla_watchdog, ha); 2092 return; 2093 } 2094 2095 static void 2096 qla_async_event(void *context, int pending) 2097 { 2098 qla_host_t *ha = context; 2099 2100 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 2101 return; 2102 2103 if (ha->async_event) { 2104 ha->async_event = 0; 2105 qla_hw_async_event(ha); 2106 } 2107 2108 QLA_UNLOCK(ha, __func__); 2109 2110 return; 2111 } 2112 2113 static void 2114 qla_stats(void *context, int pending) 2115 { 2116 qla_host_t *ha; 2117 2118 ha = context; 2119 2120 ql_get_stats(ha); 2121 return; 2122 } 2123 2124