1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013-2016 Qlogic Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * File: ql_os.c 32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 39 #include "ql_os.h" 40 #include "ql_hw.h" 41 #include "ql_def.h" 42 #include "ql_inline.h" 43 #include "ql_ver.h" 44 #include "ql_glbl.h" 45 #include "ql_dbg.h" 46 #include <sys/smp.h> 47 48 /* 49 * Some PCI Configuration Space Related Defines 50 */ 51 52 #ifndef PCI_VENDOR_QLOGIC 53 #define PCI_VENDOR_QLOGIC 0x1077 54 #endif 55 56 #ifndef PCI_PRODUCT_QLOGIC_ISP8030 57 #define PCI_PRODUCT_QLOGIC_ISP8030 0x8030 58 #endif 59 60 #define PCI_QLOGIC_ISP8030 \ 61 ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC) 62 63 /* 64 * static functions 65 */ 66 static int qla_alloc_parent_dma_tag(qla_host_t *ha); 67 static void qla_free_parent_dma_tag(qla_host_t *ha); 68 static int qla_alloc_xmt_bufs(qla_host_t *ha); 69 static void qla_free_xmt_bufs(qla_host_t *ha); 70 static int qla_alloc_rcv_bufs(qla_host_t *ha); 71 static void qla_free_rcv_bufs(qla_host_t *ha); 72 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb); 73 74 static void qla_init_ifnet(device_t dev, qla_host_t *ha); 75 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS); 76 static void qla_release(qla_host_t *ha); 77 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, 78 int error); 79 static void qla_stop(qla_host_t *ha); 80 static void qla_get_peer(qla_host_t *ha); 81 static void qla_error_recovery(void *context, int pending); 82 static void qla_async_event(void *context, int pending); 83 static void qla_stats(void *context, int pending); 84 static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, 85 uint32_t iscsi_pdu); 86 87 /* 88 * Hooks to the Operating Systems 89 */ 90 static int qla_pci_probe (device_t); 91 static int qla_pci_attach (device_t); 92 static int qla_pci_detach (device_t); 93 94 static void qla_init(void *arg); 95 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 96 static int qla_media_change(struct ifnet *ifp); 97 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 98 99 static int qla_transmit(struct ifnet *ifp, struct mbuf *mp); 100 static void qla_qflush(struct ifnet *ifp); 101 static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); 102 static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); 103 static int qla_create_fp_taskqueues(qla_host_t *ha); 104 static void qla_destroy_fp_taskqueues(qla_host_t *ha); 105 static void qla_drain_fp_taskqueues(qla_host_t *ha); 106 107 static device_method_t qla_pci_methods[] = { 108 /* Device interface */ 109 DEVMETHOD(device_probe, qla_pci_probe), 110 DEVMETHOD(device_attach, qla_pci_attach), 111 DEVMETHOD(device_detach, qla_pci_detach), 112 { 0, 0 } 113 }; 114 115 static driver_t qla_pci_driver = { 116 "ql", qla_pci_methods, sizeof (qla_host_t), 117 }; 118 119 static devclass_t qla83xx_devclass; 120 121 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0); 122 123 MODULE_DEPEND(qla83xx, pci, 1, 1, 1); 124 MODULE_DEPEND(qla83xx, ether, 1, 1, 1); 125 126 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver"); 127 128 #define QL_STD_REPLENISH_THRES 0 129 #define QL_JUMBO_REPLENISH_THRES 32 130 131 132 static char dev_str[64]; 133 static char ver_str[64]; 134 135 /* 136 * Name: qla_pci_probe 137 * Function: Validate the PCI device to be a QLA80XX device 138 */ 139 static int 140 qla_pci_probe(device_t dev) 141 { 142 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 143 case PCI_QLOGIC_ISP8030: 144 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", 145 "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function", 146 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 147 QLA_VERSION_BUILD); 148 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d", 149 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 150 QLA_VERSION_BUILD); 151 device_set_desc(dev, dev_str); 152 break; 153 default: 154 return (ENXIO); 155 } 156 157 if (bootverbose) 158 printf("%s: %s\n ", __func__, dev_str); 159 160 return (BUS_PROBE_DEFAULT); 161 } 162 163 static void 164 qla_add_sysctls(qla_host_t *ha) 165 { 166 device_t dev = ha->pci_dev; 167 168 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 169 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 170 OID_AUTO, "version", CTLFLAG_RD, 171 ver_str, 0, "Driver Version"); 172 173 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 174 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 175 OID_AUTO, "fw_version", CTLFLAG_RD, 176 ha->fw_ver_str, 0, "firmware version"); 177 178 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 179 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 180 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW, 181 (void *)ha, 0, 182 qla_sysctl_get_link_status, "I", "Link Status"); 183 184 ha->dbg_level = 0; 185 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 186 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 187 OID_AUTO, "debug", CTLFLAG_RW, 188 &ha->dbg_level, ha->dbg_level, "Debug Level"); 189 190 ha->enable_minidump = 1; 191 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 192 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 193 OID_AUTO, "enable_minidump", CTLFLAG_RW, 194 &ha->enable_minidump, ha->enable_minidump, 195 "Minidump retrival is enabled only when this is set"); 196 197 ha->std_replenish = QL_STD_REPLENISH_THRES; 198 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 199 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 200 OID_AUTO, "std_replenish", CTLFLAG_RW, 201 &ha->std_replenish, ha->std_replenish, 202 "Threshold for Replenishing Standard Frames"); 203 204 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 205 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 206 OID_AUTO, "ipv4_lro", 207 CTLFLAG_RD, &ha->ipv4_lro, 208 "number of ipv4 lro completions"); 209 210 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 211 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 212 OID_AUTO, "ipv6_lro", 213 CTLFLAG_RD, &ha->ipv6_lro, 214 "number of ipv6 lro completions"); 215 216 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 217 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 218 OID_AUTO, "tx_tso_frames", 219 CTLFLAG_RD, &ha->tx_tso_frames, 220 "number of Tx TSO Frames"); 221 222 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 223 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 224 OID_AUTO, "hw_vlan_tx_frames", 225 CTLFLAG_RD, &ha->hw_vlan_tx_frames, 226 "number of Tx VLAN Frames"); 227 228 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 229 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 230 OID_AUTO, "hw_lock_failed", 231 CTLFLAG_RD, &ha->hw_lock_failed, 232 "number of hw_lock failures"); 233 234 return; 235 } 236 237 static void 238 qla_watchdog(void *arg) 239 { 240 qla_host_t *ha = arg; 241 qla_hw_t *hw; 242 struct ifnet *ifp; 243 244 hw = &ha->hw; 245 ifp = ha->ifp; 246 247 if (ha->qla_watchdog_exit) { 248 ha->qla_watchdog_exited = 1; 249 return; 250 } 251 ha->qla_watchdog_exited = 0; 252 253 if (!ha->qla_watchdog_pause) { 254 if (ql_hw_check_health(ha) || ha->qla_initiate_recovery || 255 (ha->msg_from_peer == QL_PEER_MSG_RESET)) { 256 257 if (!(ha->dbg_level & 0x8000)) { 258 ha->qla_watchdog_paused = 1; 259 ha->qla_watchdog_pause = 1; 260 ha->qla_initiate_recovery = 0; 261 ha->err_inject = 0; 262 device_printf(ha->pci_dev, 263 "%s: taskqueue_enqueue(err_task) \n", 264 __func__); 265 taskqueue_enqueue(ha->err_tq, &ha->err_task); 266 return; 267 } 268 269 } else if (ha->qla_interface_up) { 270 271 ha->watchdog_ticks++; 272 273 if (ha->watchdog_ticks > 1000) 274 ha->watchdog_ticks = 0; 275 276 if (!ha->watchdog_ticks && QL_RUNNING(ifp)) { 277 taskqueue_enqueue(ha->stats_tq, &ha->stats_task); 278 } 279 280 if (ha->async_event) { 281 taskqueue_enqueue(ha->async_event_tq, 282 &ha->async_event_task); 283 } 284 285 #if 0 286 for (i = 0; ((i < ha->hw.num_sds_rings) && 287 !ha->watchdog_ticks); i++) { 288 qla_tx_fp_t *fp = &ha->tx_fp[i]; 289 290 if (fp->fp_taskqueue != NULL) 291 taskqueue_enqueue(fp->fp_taskqueue, 292 &fp->fp_task); 293 } 294 #endif 295 ha->qla_watchdog_paused = 0; 296 } else { 297 ha->qla_watchdog_paused = 0; 298 } 299 } else { 300 ha->qla_watchdog_paused = 1; 301 } 302 303 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 304 qla_watchdog, ha); 305 } 306 307 /* 308 * Name: qla_pci_attach 309 * Function: attaches the device to the operating system 310 */ 311 static int 312 qla_pci_attach(device_t dev) 313 { 314 qla_host_t *ha = NULL; 315 uint32_t rsrc_len; 316 int i; 317 uint32_t num_rcvq = 0; 318 319 if ((ha = device_get_softc(dev)) == NULL) { 320 device_printf(dev, "cannot get softc\n"); 321 return (ENOMEM); 322 } 323 324 memset(ha, 0, sizeof (qla_host_t)); 325 326 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) { 327 device_printf(dev, "device is not ISP8030\n"); 328 return (ENXIO); 329 } 330 331 ha->pci_func = pci_get_function(dev) & 0x1; 332 333 ha->pci_dev = dev; 334 335 pci_enable_busmaster(dev); 336 337 ha->reg_rid = PCIR_BAR(0); 338 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 339 RF_ACTIVE); 340 341 if (ha->pci_reg == NULL) { 342 device_printf(dev, "unable to map any ports\n"); 343 goto qla_pci_attach_err; 344 } 345 346 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 347 ha->reg_rid); 348 349 mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 350 ha->flags.lock_init = 1; 351 352 qla_add_sysctls(ha); 353 354 ha->hw.num_sds_rings = MAX_SDS_RINGS; 355 ha->hw.num_rds_rings = MAX_RDS_RINGS; 356 ha->hw.num_tx_rings = NUM_TX_RINGS; 357 358 ha->reg_rid1 = PCIR_BAR(2); 359 ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 360 &ha->reg_rid1, RF_ACTIVE); 361 362 ha->msix_count = pci_msix_count(dev); 363 364 if (ha->msix_count < 1 ) { 365 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 366 ha->msix_count); 367 goto qla_pci_attach_err; 368 } 369 370 if (ha->msix_count < (ha->hw.num_sds_rings + 1)) { 371 ha->hw.num_sds_rings = ha->msix_count - 1; 372 } 373 374 QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" 375 " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha, 376 ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, 377 ha->pci_reg1)); 378 379 /* initialize hardware */ 380 if (ql_init_hw(ha)) { 381 device_printf(dev, "%s: ql_init_hw failed\n", __func__); 382 goto qla_pci_attach_err; 383 } 384 385 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, 386 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 387 ha->fw_ver_build); 388 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", 389 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 390 ha->fw_ver_build); 391 392 if (qla_get_nic_partition(ha, NULL, &num_rcvq)) { 393 device_printf(dev, "%s: qla_get_nic_partition failed\n", 394 __func__); 395 goto qla_pci_attach_err; 396 } 397 device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" 398 " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n", 399 __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, 400 ha->pci_reg, ha->pci_reg1, num_rcvq); 401 402 if ((ha->msix_count < 64) || (num_rcvq != 32)) { 403 if (ha->hw.num_sds_rings > 15) { 404 ha->hw.num_sds_rings = 15; 405 } 406 } 407 408 ha->hw.num_rds_rings = ha->hw.num_sds_rings; 409 ha->hw.num_tx_rings = ha->hw.num_sds_rings; 410 411 #ifdef QL_ENABLE_ISCSI_TLV 412 ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2; 413 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 414 415 ql_hw_add_sysctls(ha); 416 417 ha->msix_count = ha->hw.num_sds_rings + 1; 418 419 if (pci_alloc_msix(dev, &ha->msix_count)) { 420 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, 421 ha->msix_count); 422 ha->msix_count = 0; 423 goto qla_pci_attach_err; 424 } 425 426 ha->mbx_irq_rid = 1; 427 ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 428 &ha->mbx_irq_rid, 429 (RF_ACTIVE | RF_SHAREABLE)); 430 if (ha->mbx_irq == NULL) { 431 device_printf(dev, "could not allocate mbx interrupt\n"); 432 goto qla_pci_attach_err; 433 } 434 if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE), 435 NULL, ql_mbx_isr, ha, &ha->mbx_handle)) { 436 device_printf(dev, "could not setup mbx interrupt\n"); 437 goto qla_pci_attach_err; 438 } 439 440 for (i = 0; i < ha->hw.num_sds_rings; i++) { 441 ha->irq_vec[i].sds_idx = i; 442 ha->irq_vec[i].ha = ha; 443 ha->irq_vec[i].irq_rid = 2 + i; 444 445 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 446 &ha->irq_vec[i].irq_rid, 447 (RF_ACTIVE | RF_SHAREABLE)); 448 449 if (ha->irq_vec[i].irq == NULL) { 450 device_printf(dev, "could not allocate interrupt\n"); 451 goto qla_pci_attach_err; 452 } 453 if (bus_setup_intr(dev, ha->irq_vec[i].irq, 454 (INTR_TYPE_NET | INTR_MPSAFE), 455 NULL, ql_isr, &ha->irq_vec[i], 456 &ha->irq_vec[i].handle)) { 457 device_printf(dev, "could not setup interrupt\n"); 458 goto qla_pci_attach_err; 459 } 460 461 ha->tx_fp[i].ha = ha; 462 ha->tx_fp[i].txr_idx = i; 463 464 if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) { 465 device_printf(dev, "%s: could not allocate tx_br[%d]\n", 466 __func__, i); 467 goto qla_pci_attach_err; 468 } 469 } 470 471 if (qla_create_fp_taskqueues(ha) != 0) 472 goto qla_pci_attach_err; 473 474 printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus, 475 ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count); 476 477 ql_read_mac_addr(ha); 478 479 /* allocate parent dma tag */ 480 if (qla_alloc_parent_dma_tag(ha)) { 481 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", 482 __func__); 483 goto qla_pci_attach_err; 484 } 485 486 /* alloc all dma buffers */ 487 if (ql_alloc_dma(ha)) { 488 device_printf(dev, "%s: ql_alloc_dma failed\n", __func__); 489 goto qla_pci_attach_err; 490 } 491 qla_get_peer(ha); 492 493 if (ql_minidump_init(ha) != 0) { 494 device_printf(dev, "%s: ql_minidump_init failed\n", __func__); 495 goto qla_pci_attach_err; 496 } 497 ql_alloc_drvr_state_buffer(ha); 498 /* create the o.s ethernet interface */ 499 qla_init_ifnet(dev, ha); 500 501 ha->flags.qla_watchdog_active = 1; 502 ha->qla_watchdog_pause = 0; 503 504 callout_init(&ha->tx_callout, TRUE); 505 ha->flags.qla_callout_init = 1; 506 507 /* create ioctl device interface */ 508 if (ql_make_cdev(ha)) { 509 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 510 goto qla_pci_attach_err; 511 } 512 513 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 514 qla_watchdog, ha); 515 516 TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha); 517 ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT, 518 taskqueue_thread_enqueue, &ha->err_tq); 519 taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", 520 device_get_nameunit(ha->pci_dev)); 521 522 TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha); 523 ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT, 524 taskqueue_thread_enqueue, &ha->async_event_tq); 525 taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq", 526 device_get_nameunit(ha->pci_dev)); 527 528 TASK_INIT(&ha->stats_task, 0, qla_stats, ha); 529 ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT, 530 taskqueue_thread_enqueue, &ha->stats_tq); 531 taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq", 532 device_get_nameunit(ha->pci_dev)); 533 534 QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__)); 535 return (0); 536 537 qla_pci_attach_err: 538 539 qla_release(ha); 540 541 if (ha->flags.lock_init) { 542 mtx_destroy(&ha->hw_lock); 543 } 544 545 QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__)); 546 return (ENXIO); 547 } 548 549 /* 550 * Name: qla_pci_detach 551 * Function: Unhooks the device from the operating system 552 */ 553 static int 554 qla_pci_detach(device_t dev) 555 { 556 qla_host_t *ha = NULL; 557 struct ifnet *ifp; 558 559 560 if ((ha = device_get_softc(dev)) == NULL) { 561 device_printf(dev, "cannot get softc\n"); 562 return (ENOMEM); 563 } 564 565 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 566 567 ifp = ha->ifp; 568 569 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 570 QLA_LOCK(ha, __func__, -1, 0); 571 572 ha->qla_detach_active = 1; 573 qla_stop(ha); 574 575 qla_release(ha); 576 577 QLA_UNLOCK(ha, __func__); 578 579 if (ha->flags.lock_init) { 580 mtx_destroy(&ha->hw_lock); 581 } 582 583 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 584 585 return (0); 586 } 587 588 /* 589 * SYSCTL Related Callbacks 590 */ 591 static int 592 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS) 593 { 594 int err, ret = 0; 595 qla_host_t *ha; 596 597 err = sysctl_handle_int(oidp, &ret, 0, req); 598 599 if (err || !req->newptr) 600 return (err); 601 602 if (ret == 1) { 603 ha = (qla_host_t *)arg1; 604 ql_hw_link_status(ha); 605 } 606 return (err); 607 } 608 609 /* 610 * Name: qla_release 611 * Function: Releases the resources allocated for the device 612 */ 613 static void 614 qla_release(qla_host_t *ha) 615 { 616 device_t dev; 617 int i; 618 619 dev = ha->pci_dev; 620 621 if (ha->async_event_tq) { 622 taskqueue_drain(ha->async_event_tq, &ha->async_event_task); 623 taskqueue_free(ha->async_event_tq); 624 } 625 626 if (ha->err_tq) { 627 taskqueue_drain(ha->err_tq, &ha->err_task); 628 taskqueue_free(ha->err_tq); 629 } 630 631 if (ha->stats_tq) { 632 taskqueue_drain(ha->stats_tq, &ha->stats_task); 633 taskqueue_free(ha->stats_tq); 634 } 635 636 ql_del_cdev(ha); 637 638 if (ha->flags.qla_watchdog_active) { 639 ha->qla_watchdog_exit = 1; 640 641 while (ha->qla_watchdog_exited == 0) 642 qla_mdelay(__func__, 1); 643 } 644 645 if (ha->flags.qla_callout_init) 646 callout_stop(&ha->tx_callout); 647 648 if (ha->ifp != NULL) 649 ether_ifdetach(ha->ifp); 650 651 ql_free_drvr_state_buffer(ha); 652 ql_free_dma(ha); 653 qla_free_parent_dma_tag(ha); 654 655 if (ha->mbx_handle) 656 (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle); 657 658 if (ha->mbx_irq) 659 (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid, 660 ha->mbx_irq); 661 662 for (i = 0; i < ha->hw.num_sds_rings; i++) { 663 664 if (ha->irq_vec[i].handle) { 665 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 666 ha->irq_vec[i].handle); 667 } 668 669 if (ha->irq_vec[i].irq) { 670 (void)bus_release_resource(dev, SYS_RES_IRQ, 671 ha->irq_vec[i].irq_rid, 672 ha->irq_vec[i].irq); 673 } 674 675 qla_free_tx_br(ha, &ha->tx_fp[i]); 676 } 677 qla_destroy_fp_taskqueues(ha); 678 679 if (ha->msix_count) 680 pci_release_msi(dev); 681 682 // if (ha->flags.lock_init) { 683 // mtx_destroy(&ha->hw_lock); 684 // } 685 686 if (ha->pci_reg) 687 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 688 ha->pci_reg); 689 690 if (ha->pci_reg1) 691 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, 692 ha->pci_reg1); 693 694 return; 695 } 696 697 /* 698 * DMA Related Functions 699 */ 700 701 static void 702 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 703 { 704 *((bus_addr_t *)arg) = 0; 705 706 if (error) { 707 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 708 return; 709 } 710 711 *((bus_addr_t *)arg) = segs[0].ds_addr; 712 713 return; 714 } 715 716 int 717 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 718 { 719 int ret = 0; 720 device_t dev; 721 bus_addr_t b_addr; 722 723 dev = ha->pci_dev; 724 725 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 726 727 ret = bus_dma_tag_create( 728 ha->parent_tag,/* parent */ 729 dma_buf->alignment, 730 ((bus_size_t)(1ULL << 32)),/* boundary */ 731 BUS_SPACE_MAXADDR, /* lowaddr */ 732 BUS_SPACE_MAXADDR, /* highaddr */ 733 NULL, NULL, /* filter, filterarg */ 734 dma_buf->size, /* maxsize */ 735 1, /* nsegments */ 736 dma_buf->size, /* maxsegsize */ 737 0, /* flags */ 738 NULL, NULL, /* lockfunc, lockarg */ 739 &dma_buf->dma_tag); 740 741 if (ret) { 742 device_printf(dev, "%s: could not create dma tag\n", __func__); 743 goto ql_alloc_dmabuf_exit; 744 } 745 ret = bus_dmamem_alloc(dma_buf->dma_tag, 746 (void **)&dma_buf->dma_b, 747 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 748 &dma_buf->dma_map); 749 if (ret) { 750 bus_dma_tag_destroy(dma_buf->dma_tag); 751 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); 752 goto ql_alloc_dmabuf_exit; 753 } 754 755 ret = bus_dmamap_load(dma_buf->dma_tag, 756 dma_buf->dma_map, 757 dma_buf->dma_b, 758 dma_buf->size, 759 qla_dmamap_callback, 760 &b_addr, BUS_DMA_NOWAIT); 761 762 if (ret || !b_addr) { 763 bus_dma_tag_destroy(dma_buf->dma_tag); 764 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 765 dma_buf->dma_map); 766 ret = -1; 767 goto ql_alloc_dmabuf_exit; 768 } 769 770 dma_buf->dma_addr = b_addr; 771 772 ql_alloc_dmabuf_exit: 773 QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", 774 __func__, ret, (void *)dma_buf->dma_tag, 775 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, 776 dma_buf->size)); 777 778 return ret; 779 } 780 781 void 782 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 783 { 784 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 785 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 786 bus_dma_tag_destroy(dma_buf->dma_tag); 787 } 788 789 static int 790 qla_alloc_parent_dma_tag(qla_host_t *ha) 791 { 792 int ret; 793 device_t dev; 794 795 dev = ha->pci_dev; 796 797 /* 798 * Allocate parent DMA Tag 799 */ 800 ret = bus_dma_tag_create( 801 bus_get_dma_tag(dev), /* parent */ 802 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 803 BUS_SPACE_MAXADDR, /* lowaddr */ 804 BUS_SPACE_MAXADDR, /* highaddr */ 805 NULL, NULL, /* filter, filterarg */ 806 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 807 0, /* nsegments */ 808 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 809 0, /* flags */ 810 NULL, NULL, /* lockfunc, lockarg */ 811 &ha->parent_tag); 812 813 if (ret) { 814 device_printf(dev, "%s: could not create parent dma tag\n", 815 __func__); 816 return (-1); 817 } 818 819 ha->flags.parent_tag = 1; 820 821 return (0); 822 } 823 824 static void 825 qla_free_parent_dma_tag(qla_host_t *ha) 826 { 827 if (ha->flags.parent_tag) { 828 bus_dma_tag_destroy(ha->parent_tag); 829 ha->flags.parent_tag = 0; 830 } 831 } 832 833 /* 834 * Name: qla_init_ifnet 835 * Function: Creates the Network Device Interface and Registers it with the O.S 836 */ 837 838 static void 839 qla_init_ifnet(device_t dev, qla_host_t *ha) 840 { 841 struct ifnet *ifp; 842 843 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 844 845 ifp = ha->ifp = if_alloc(IFT_ETHER); 846 847 if (ifp == NULL) 848 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 849 850 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 851 852 ifp->if_baudrate = IF_Gbps(10); 853 ifp->if_capabilities = IFCAP_LINKSTATE; 854 ifp->if_mtu = ETHERMTU; 855 856 ifp->if_init = qla_init; 857 ifp->if_softc = ha; 858 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 859 ifp->if_ioctl = qla_ioctl; 860 861 ifp->if_transmit = qla_transmit; 862 ifp->if_qflush = qla_qflush; 863 864 IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); 865 ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); 866 IFQ_SET_READY(&ifp->if_snd); 867 868 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 869 870 ether_ifattach(ifp, qla_get_mac_addr(ha)); 871 872 ifp->if_capabilities |= IFCAP_HWCSUM | 873 IFCAP_TSO4 | 874 IFCAP_JUMBO_MTU | 875 IFCAP_VLAN_HWTAGGING | 876 IFCAP_VLAN_MTU | 877 IFCAP_VLAN_HWTSO | 878 IFCAP_LRO; 879 880 ifp->if_capenable = ifp->if_capabilities; 881 882 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 883 884 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); 885 886 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, 887 NULL); 888 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 889 890 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 891 892 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 893 894 return; 895 } 896 897 static void 898 qla_init_locked(qla_host_t *ha) 899 { 900 struct ifnet *ifp = ha->ifp; 901 902 qla_stop(ha); 903 904 if (qla_alloc_xmt_bufs(ha) != 0) 905 return; 906 907 qla_confirm_9kb_enable(ha); 908 909 if (qla_alloc_rcv_bufs(ha) != 0) 910 return; 911 912 bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); 913 914 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; 915 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 916 917 ha->stop_rcv = 0; 918 if (ql_init_hw_if(ha) == 0) { 919 ifp = ha->ifp; 920 ifp->if_drv_flags |= IFF_DRV_RUNNING; 921 ha->qla_watchdog_pause = 0; 922 ha->hw_vlan_tx_frames = 0; 923 ha->tx_tso_frames = 0; 924 ha->qla_interface_up = 1; 925 ql_update_link_state(ha); 926 } 927 928 return; 929 } 930 931 static void 932 qla_init(void *arg) 933 { 934 qla_host_t *ha; 935 936 ha = (qla_host_t *)arg; 937 938 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 939 940 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 941 return; 942 943 qla_init_locked(ha); 944 945 QLA_UNLOCK(ha, __func__); 946 947 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 948 } 949 950 static int 951 qla_set_multi(qla_host_t *ha, uint32_t add_multi) 952 { 953 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; 954 struct ifmultiaddr *ifma; 955 int mcnt = 0; 956 struct ifnet *ifp = ha->ifp; 957 int ret = 0; 958 959 if_maddr_rlock(ifp); 960 961 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 962 963 if (ifma->ifma_addr->sa_family != AF_LINK) 964 continue; 965 966 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) 967 break; 968 969 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 970 &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); 971 972 mcnt++; 973 } 974 975 if_maddr_runlock(ifp); 976 977 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 978 QLA_LOCK_NO_SLEEP) != 0) 979 return (-1); 980 981 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 982 983 if (!add_multi) { 984 ret = qla_hw_del_all_mcast(ha); 985 986 if (ret) 987 device_printf(ha->pci_dev, 988 "%s: qla_hw_del_all_mcast() failed\n", 989 __func__); 990 } 991 992 if (!ret) 993 ret = ql_hw_set_multi(ha, mta, mcnt, 1); 994 995 } 996 997 QLA_UNLOCK(ha, __func__); 998 999 return (ret); 1000 } 1001 1002 static int 1003 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1004 { 1005 int ret = 0; 1006 struct ifreq *ifr = (struct ifreq *)data; 1007 struct ifaddr *ifa = (struct ifaddr *)data; 1008 qla_host_t *ha; 1009 1010 ha = (qla_host_t *)ifp->if_softc; 1011 1012 switch (cmd) { 1013 case SIOCSIFADDR: 1014 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 1015 __func__, cmd)); 1016 1017 if (ifa->ifa_addr->sa_family == AF_INET) { 1018 1019 ret = QLA_LOCK(ha, __func__, 1020 QLA_LOCK_DEFAULT_MS_TIMEOUT, 1021 QLA_LOCK_NO_SLEEP); 1022 if (ret) 1023 break; 1024 1025 ifp->if_flags |= IFF_UP; 1026 1027 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1028 qla_init_locked(ha); 1029 } 1030 1031 QLA_UNLOCK(ha, __func__); 1032 QL_DPRINT4(ha, (ha->pci_dev, 1033 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 1034 __func__, cmd, 1035 ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 1036 1037 arp_ifinit(ifp, ifa); 1038 } else { 1039 ether_ioctl(ifp, cmd, data); 1040 } 1041 break; 1042 1043 case SIOCSIFMTU: 1044 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 1045 __func__, cmd)); 1046 1047 if (ifr->ifr_mtu > QLA_MAX_MTU) { 1048 ret = EINVAL; 1049 } else { 1050 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1051 QLA_LOCK_NO_SLEEP); 1052 1053 if (ret) 1054 break; 1055 1056 ifp->if_mtu = ifr->ifr_mtu; 1057 ha->max_frame_size = 1058 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1059 1060 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1061 qla_init_locked(ha); 1062 } 1063 1064 if (ifp->if_mtu > ETHERMTU) 1065 ha->std_replenish = QL_JUMBO_REPLENISH_THRES; 1066 else 1067 ha->std_replenish = QL_STD_REPLENISH_THRES; 1068 1069 1070 QLA_UNLOCK(ha, __func__); 1071 } 1072 1073 break; 1074 1075 case SIOCSIFFLAGS: 1076 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 1077 __func__, cmd)); 1078 1079 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1080 QLA_LOCK_NO_SLEEP); 1081 1082 if (ret) 1083 break; 1084 1085 if (ifp->if_flags & IFF_UP) { 1086 1087 ha->max_frame_size = ifp->if_mtu + 1088 ETHER_HDR_LEN + ETHER_CRC_LEN; 1089 qla_init_locked(ha); 1090 1091 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1092 if ((ifp->if_flags ^ ha->if_flags) & 1093 IFF_PROMISC) { 1094 ret = ql_set_promisc(ha); 1095 } else if ((ifp->if_flags ^ ha->if_flags) & 1096 IFF_ALLMULTI) { 1097 ret = ql_set_allmulti(ha); 1098 } 1099 } 1100 } else { 1101 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1102 qla_stop(ha); 1103 ha->if_flags = ifp->if_flags; 1104 } 1105 1106 QLA_UNLOCK(ha, __func__); 1107 break; 1108 1109 case SIOCADDMULTI: 1110 QL_DPRINT4(ha, (ha->pci_dev, 1111 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 1112 1113 if (qla_set_multi(ha, 1)) 1114 ret = EINVAL; 1115 break; 1116 1117 case SIOCDELMULTI: 1118 QL_DPRINT4(ha, (ha->pci_dev, 1119 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 1120 1121 if (qla_set_multi(ha, 0)) 1122 ret = EINVAL; 1123 break; 1124 1125 case SIOCSIFMEDIA: 1126 case SIOCGIFMEDIA: 1127 QL_DPRINT4(ha, (ha->pci_dev, 1128 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 1129 __func__, cmd)); 1130 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 1131 break; 1132 1133 case SIOCSIFCAP: 1134 { 1135 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1136 1137 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 1138 __func__, cmd)); 1139 1140 if (mask & IFCAP_HWCSUM) 1141 ifp->if_capenable ^= IFCAP_HWCSUM; 1142 if (mask & IFCAP_TSO4) 1143 ifp->if_capenable ^= IFCAP_TSO4; 1144 if (mask & IFCAP_TSO6) 1145 ifp->if_capenable ^= IFCAP_TSO6; 1146 if (mask & IFCAP_VLAN_HWTAGGING) 1147 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1148 if (mask & IFCAP_VLAN_HWTSO) 1149 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1150 if (mask & IFCAP_LRO) 1151 ifp->if_capenable ^= IFCAP_LRO; 1152 1153 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1154 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1155 QLA_LOCK_NO_SLEEP); 1156 1157 if (ret) 1158 break; 1159 1160 qla_init_locked(ha); 1161 1162 QLA_UNLOCK(ha, __func__); 1163 1164 } 1165 VLAN_CAPABILITIES(ifp); 1166 break; 1167 } 1168 1169 default: 1170 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", 1171 __func__, cmd)); 1172 ret = ether_ioctl(ifp, cmd, data); 1173 break; 1174 } 1175 1176 return (ret); 1177 } 1178 1179 static int 1180 qla_media_change(struct ifnet *ifp) 1181 { 1182 qla_host_t *ha; 1183 struct ifmedia *ifm; 1184 int ret = 0; 1185 1186 ha = (qla_host_t *)ifp->if_softc; 1187 1188 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1189 1190 ifm = &ha->media; 1191 1192 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1193 ret = EINVAL; 1194 1195 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1196 1197 return (ret); 1198 } 1199 1200 static void 1201 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1202 { 1203 qla_host_t *ha; 1204 1205 ha = (qla_host_t *)ifp->if_softc; 1206 1207 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1208 1209 ifmr->ifm_status = IFM_AVALID; 1210 ifmr->ifm_active = IFM_ETHER; 1211 1212 ql_update_link_state(ha); 1213 if (ha->hw.link_up) { 1214 ifmr->ifm_status |= IFM_ACTIVE; 1215 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); 1216 } 1217 1218 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\ 1219 (ha->hw.link_up ? "link_up" : "link_down"))); 1220 1221 return; 1222 } 1223 1224 1225 static int 1226 qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, 1227 uint32_t iscsi_pdu) 1228 { 1229 bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; 1230 bus_dmamap_t map; 1231 int nsegs; 1232 int ret = -1; 1233 uint32_t tx_idx; 1234 struct mbuf *m_head = *m_headp; 1235 1236 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1237 1238 tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next; 1239 1240 if (NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) { 1241 QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\ 1242 "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\ 1243 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head)); 1244 if (m_head) 1245 m_freem(m_head); 1246 *m_headp = NULL; 1247 return (ret); 1248 } 1249 1250 map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; 1251 1252 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 1253 BUS_DMA_NOWAIT); 1254 1255 if (ret == EFBIG) { 1256 1257 struct mbuf *m; 1258 1259 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 1260 m_head->m_pkthdr.len)); 1261 1262 m = m_defrag(m_head, M_NOWAIT); 1263 if (m == NULL) { 1264 ha->err_tx_defrag++; 1265 m_freem(m_head); 1266 *m_headp = NULL; 1267 device_printf(ha->pci_dev, 1268 "%s: m_defrag() = NULL [%d]\n", 1269 __func__, ret); 1270 return (ENOBUFS); 1271 } 1272 m_head = m; 1273 *m_headp = m_head; 1274 1275 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 1276 segs, &nsegs, BUS_DMA_NOWAIT))) { 1277 1278 ha->err_tx_dmamap_load++; 1279 1280 device_printf(ha->pci_dev, 1281 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 1282 __func__, ret, m_head->m_pkthdr.len); 1283 1284 if (ret != ENOMEM) { 1285 m_freem(m_head); 1286 *m_headp = NULL; 1287 } 1288 return (ret); 1289 } 1290 1291 } else if (ret) { 1292 1293 ha->err_tx_dmamap_load++; 1294 1295 device_printf(ha->pci_dev, 1296 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 1297 __func__, ret, m_head->m_pkthdr.len); 1298 1299 if (ret != ENOMEM) { 1300 m_freem(m_head); 1301 *m_headp = NULL; 1302 } 1303 return (ret); 1304 } 1305 1306 QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet")); 1307 1308 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 1309 1310 if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx, 1311 iscsi_pdu))) { 1312 ha->tx_ring[txr_idx].count++; 1313 if (iscsi_pdu) 1314 ha->tx_ring[txr_idx].iscsi_pkt_count++; 1315 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; 1316 } else { 1317 bus_dmamap_unload(ha->tx_tag, map); 1318 if (ret == EINVAL) { 1319 if (m_head) 1320 m_freem(m_head); 1321 *m_headp = NULL; 1322 } 1323 } 1324 1325 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1326 return (ret); 1327 } 1328 1329 static int 1330 qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) 1331 { 1332 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 1333 "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx); 1334 1335 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 1336 1337 fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF, 1338 M_NOWAIT, &fp->tx_mtx); 1339 if (fp->tx_br == NULL) { 1340 QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " 1341 " fp[%d, %d]\n", ha->pci_func, fp->txr_idx)); 1342 return (-ENOMEM); 1343 } 1344 return 0; 1345 } 1346 1347 static void 1348 qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) 1349 { 1350 struct mbuf *mp; 1351 struct ifnet *ifp = ha->ifp; 1352 1353 if (mtx_initialized(&fp->tx_mtx)) { 1354 1355 if (fp->tx_br != NULL) { 1356 1357 mtx_lock(&fp->tx_mtx); 1358 1359 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 1360 m_freem(mp); 1361 } 1362 1363 mtx_unlock(&fp->tx_mtx); 1364 1365 buf_ring_free(fp->tx_br, M_DEVBUF); 1366 fp->tx_br = NULL; 1367 } 1368 mtx_destroy(&fp->tx_mtx); 1369 } 1370 return; 1371 } 1372 1373 static void 1374 qla_fp_taskqueue(void *context, int pending) 1375 { 1376 qla_tx_fp_t *fp; 1377 qla_host_t *ha; 1378 struct ifnet *ifp; 1379 struct mbuf *mp; 1380 int ret; 1381 uint32_t txr_idx; 1382 uint32_t iscsi_pdu = 0; 1383 uint32_t rx_pkts_left = -1; 1384 1385 fp = context; 1386 1387 if (fp == NULL) 1388 return; 1389 1390 ha = (qla_host_t *)fp->ha; 1391 1392 ifp = ha->ifp; 1393 1394 txr_idx = fp->txr_idx; 1395 1396 mtx_lock(&fp->tx_mtx); 1397 1398 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) { 1399 mtx_unlock(&fp->tx_mtx); 1400 goto qla_fp_taskqueue_exit; 1401 } 1402 1403 while (rx_pkts_left && !ha->stop_rcv && 1404 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1405 rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64); 1406 1407 #ifdef QL_ENABLE_ISCSI_TLV 1408 ql_hw_tx_done_locked(ha, fp->txr_idx); 1409 ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1))); 1410 #else 1411 ql_hw_tx_done_locked(ha, fp->txr_idx); 1412 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 1413 1414 mp = drbr_peek(ifp, fp->tx_br); 1415 1416 while (mp != NULL) { 1417 1418 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) { 1419 #ifdef QL_ENABLE_ISCSI_TLV 1420 if (ql_iscsi_pdu(ha, mp) == 0) { 1421 txr_idx = txr_idx + 1422 (ha->hw.num_tx_rings >> 1); 1423 iscsi_pdu = 1; 1424 } else { 1425 iscsi_pdu = 0; 1426 txr_idx = fp->txr_idx; 1427 } 1428 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 1429 } 1430 1431 ret = qla_send(ha, &mp, txr_idx, iscsi_pdu); 1432 1433 if (ret) { 1434 if (mp != NULL) 1435 drbr_putback(ifp, fp->tx_br, mp); 1436 else { 1437 drbr_advance(ifp, fp->tx_br); 1438 } 1439 1440 mtx_unlock(&fp->tx_mtx); 1441 1442 goto qla_fp_taskqueue_exit0; 1443 } else { 1444 drbr_advance(ifp, fp->tx_br); 1445 } 1446 1447 /* Send a copy of the frame to the BPF listener */ 1448 ETHER_BPF_MTAP(ifp, mp); 1449 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1450 break; 1451 1452 mp = drbr_peek(ifp, fp->tx_br); 1453 } 1454 } 1455 mtx_unlock(&fp->tx_mtx); 1456 1457 qla_fp_taskqueue_exit0: 1458 1459 if (rx_pkts_left || ((mp != NULL) && ret)) { 1460 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 1461 } else { 1462 if (!ha->stop_rcv) { 1463 QL_ENABLE_INTERRUPTS(ha, fp->txr_idx); 1464 } 1465 } 1466 1467 qla_fp_taskqueue_exit: 1468 1469 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 1470 return; 1471 } 1472 1473 static int 1474 qla_create_fp_taskqueues(qla_host_t *ha) 1475 { 1476 int i; 1477 uint8_t tq_name[32]; 1478 1479 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1480 1481 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1482 1483 bzero(tq_name, sizeof (tq_name)); 1484 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 1485 1486 TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); 1487 1488 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, 1489 taskqueue_thread_enqueue, 1490 &fp->fp_taskqueue); 1491 1492 if (fp->fp_taskqueue == NULL) 1493 return (-1); 1494 1495 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 1496 tq_name); 1497 1498 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, 1499 fp->fp_taskqueue)); 1500 } 1501 1502 return (0); 1503 } 1504 1505 static void 1506 qla_destroy_fp_taskqueues(qla_host_t *ha) 1507 { 1508 int i; 1509 1510 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1511 1512 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1513 1514 if (fp->fp_taskqueue != NULL) { 1515 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 1516 taskqueue_free(fp->fp_taskqueue); 1517 fp->fp_taskqueue = NULL; 1518 } 1519 } 1520 return; 1521 } 1522 1523 static void 1524 qla_drain_fp_taskqueues(qla_host_t *ha) 1525 { 1526 int i; 1527 1528 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1529 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1530 1531 if (fp->fp_taskqueue != NULL) { 1532 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 1533 } 1534 } 1535 return; 1536 } 1537 1538 static int 1539 qla_transmit(struct ifnet *ifp, struct mbuf *mp) 1540 { 1541 qla_host_t *ha = (qla_host_t *)ifp->if_softc; 1542 qla_tx_fp_t *fp; 1543 int rss_id = 0; 1544 int ret = 0; 1545 1546 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1547 1548 #if __FreeBSD_version >= 1100000 1549 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 1550 #else 1551 if (mp->m_flags & M_FLOWID) 1552 #endif 1553 rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) % 1554 ha->hw.num_sds_rings; 1555 fp = &ha->tx_fp[rss_id]; 1556 1557 if (fp->tx_br == NULL) { 1558 ret = EINVAL; 1559 goto qla_transmit_exit; 1560 } 1561 1562 if (mp != NULL) { 1563 ret = drbr_enqueue(ifp, fp->tx_br, mp); 1564 } 1565 1566 if (fp->fp_taskqueue != NULL) 1567 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 1568 1569 ret = 0; 1570 1571 qla_transmit_exit: 1572 1573 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 1574 return ret; 1575 } 1576 1577 static void 1578 qla_qflush(struct ifnet *ifp) 1579 { 1580 int i; 1581 qla_tx_fp_t *fp; 1582 struct mbuf *mp; 1583 qla_host_t *ha; 1584 1585 ha = (qla_host_t *)ifp->if_softc; 1586 1587 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1588 1589 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1590 1591 fp = &ha->tx_fp[i]; 1592 1593 if (fp == NULL) 1594 continue; 1595 1596 if (fp->tx_br) { 1597 mtx_lock(&fp->tx_mtx); 1598 1599 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 1600 m_freem(mp); 1601 } 1602 mtx_unlock(&fp->tx_mtx); 1603 } 1604 } 1605 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1606 1607 return; 1608 } 1609 1610 static void 1611 qla_stop(qla_host_t *ha) 1612 { 1613 struct ifnet *ifp = ha->ifp; 1614 device_t dev; 1615 int i = 0; 1616 1617 dev = ha->pci_dev; 1618 1619 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1620 ha->qla_watchdog_pause = 1; 1621 1622 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1623 qla_tx_fp_t *fp; 1624 1625 fp = &ha->tx_fp[i]; 1626 1627 if (fp == NULL) 1628 continue; 1629 1630 if (fp->tx_br != NULL) { 1631 mtx_lock(&fp->tx_mtx); 1632 mtx_unlock(&fp->tx_mtx); 1633 } 1634 } 1635 1636 while (!ha->qla_watchdog_paused) 1637 qla_mdelay(__func__, 1); 1638 1639 ha->qla_interface_up = 0; 1640 1641 qla_drain_fp_taskqueues(ha); 1642 1643 ql_del_hw_if(ha); 1644 1645 qla_free_xmt_bufs(ha); 1646 qla_free_rcv_bufs(ha); 1647 1648 return; 1649 } 1650 1651 /* 1652 * Buffer Management Functions for Transmit and Receive Rings 1653 */ 1654 static int 1655 qla_alloc_xmt_bufs(qla_host_t *ha) 1656 { 1657 int ret = 0; 1658 uint32_t i, j; 1659 qla_tx_buf_t *txb; 1660 1661 if (bus_dma_tag_create(NULL, /* parent */ 1662 1, 0, /* alignment, bounds */ 1663 BUS_SPACE_MAXADDR, /* lowaddr */ 1664 BUS_SPACE_MAXADDR, /* highaddr */ 1665 NULL, NULL, /* filter, filterarg */ 1666 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ 1667 QLA_MAX_SEGMENTS, /* nsegments */ 1668 PAGE_SIZE, /* maxsegsize */ 1669 BUS_DMA_ALLOCNOW, /* flags */ 1670 NULL, /* lockfunc */ 1671 NULL, /* lockfuncarg */ 1672 &ha->tx_tag)) { 1673 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", 1674 __func__); 1675 return (ENOMEM); 1676 } 1677 1678 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1679 bzero((void *)ha->tx_ring[i].tx_buf, 1680 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1681 } 1682 1683 for (j = 0; j < ha->hw.num_tx_rings; j++) { 1684 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) { 1685 1686 txb = &ha->tx_ring[j].tx_buf[i]; 1687 1688 if ((ret = bus_dmamap_create(ha->tx_tag, 1689 BUS_DMA_NOWAIT, &txb->map))) { 1690 1691 ha->err_tx_dmamap_create++; 1692 device_printf(ha->pci_dev, 1693 "%s: bus_dmamap_create failed[%d]\n", 1694 __func__, ret); 1695 1696 qla_free_xmt_bufs(ha); 1697 1698 return (ret); 1699 } 1700 } 1701 } 1702 1703 return 0; 1704 } 1705 1706 /* 1707 * Release mbuf after it sent on the wire 1708 */ 1709 static void 1710 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) 1711 { 1712 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1713 1714 if (txb->m_head) { 1715 bus_dmamap_sync(ha->tx_tag, txb->map, 1716 BUS_DMASYNC_POSTWRITE); 1717 1718 bus_dmamap_unload(ha->tx_tag, txb->map); 1719 1720 m_freem(txb->m_head); 1721 txb->m_head = NULL; 1722 1723 bus_dmamap_destroy(ha->tx_tag, txb->map); 1724 txb->map = NULL; 1725 } 1726 1727 if (txb->map) { 1728 bus_dmamap_unload(ha->tx_tag, txb->map); 1729 bus_dmamap_destroy(ha->tx_tag, txb->map); 1730 txb->map = NULL; 1731 } 1732 1733 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1734 } 1735 1736 static void 1737 qla_free_xmt_bufs(qla_host_t *ha) 1738 { 1739 int i, j; 1740 1741 for (j = 0; j < ha->hw.num_tx_rings; j++) { 1742 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) 1743 qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); 1744 } 1745 1746 if (ha->tx_tag != NULL) { 1747 bus_dma_tag_destroy(ha->tx_tag); 1748 ha->tx_tag = NULL; 1749 } 1750 1751 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1752 bzero((void *)ha->tx_ring[i].tx_buf, 1753 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1754 } 1755 return; 1756 } 1757 1758 1759 static int 1760 qla_alloc_rcv_std(qla_host_t *ha) 1761 { 1762 int i, j, k, r, ret = 0; 1763 qla_rx_buf_t *rxb; 1764 qla_rx_ring_t *rx_ring; 1765 1766 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1767 1768 rx_ring = &ha->rx_ring[r]; 1769 1770 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1771 1772 rxb = &rx_ring->rx_buf[i]; 1773 1774 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, 1775 &rxb->map); 1776 1777 if (ret) { 1778 device_printf(ha->pci_dev, 1779 "%s: dmamap[%d, %d] failed\n", 1780 __func__, r, i); 1781 1782 for (k = 0; k < r; k++) { 1783 for (j = 0; j < NUM_RX_DESCRIPTORS; 1784 j++) { 1785 rxb = &ha->rx_ring[k].rx_buf[j]; 1786 bus_dmamap_destroy(ha->rx_tag, 1787 rxb->map); 1788 } 1789 } 1790 1791 for (j = 0; j < i; j++) { 1792 bus_dmamap_destroy(ha->rx_tag, 1793 rx_ring->rx_buf[j].map); 1794 } 1795 goto qla_alloc_rcv_std_err; 1796 } 1797 } 1798 } 1799 1800 qla_init_hw_rcv_descriptors(ha); 1801 1802 1803 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1804 1805 rx_ring = &ha->rx_ring[r]; 1806 1807 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1808 rxb = &rx_ring->rx_buf[i]; 1809 rxb->handle = i; 1810 if (!(ret = ql_get_mbuf(ha, rxb, NULL))) { 1811 /* 1812 * set the physical address in the 1813 * corresponding descriptor entry in the 1814 * receive ring/queue for the hba 1815 */ 1816 qla_set_hw_rcv_desc(ha, r, i, rxb->handle, 1817 rxb->paddr, 1818 (rxb->m_head)->m_pkthdr.len); 1819 } else { 1820 device_printf(ha->pci_dev, 1821 "%s: ql_get_mbuf [%d, %d] failed\n", 1822 __func__, r, i); 1823 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1824 goto qla_alloc_rcv_std_err; 1825 } 1826 } 1827 } 1828 return 0; 1829 1830 qla_alloc_rcv_std_err: 1831 return (-1); 1832 } 1833 1834 static void 1835 qla_free_rcv_std(qla_host_t *ha) 1836 { 1837 int i, r; 1838 qla_rx_buf_t *rxb; 1839 1840 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1841 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1842 rxb = &ha->rx_ring[r].rx_buf[i]; 1843 if (rxb->m_head != NULL) { 1844 bus_dmamap_unload(ha->rx_tag, rxb->map); 1845 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1846 m_freem(rxb->m_head); 1847 rxb->m_head = NULL; 1848 } 1849 } 1850 } 1851 return; 1852 } 1853 1854 static int 1855 qla_alloc_rcv_bufs(qla_host_t *ha) 1856 { 1857 int i, ret = 0; 1858 1859 if (bus_dma_tag_create(NULL, /* parent */ 1860 1, 0, /* alignment, bounds */ 1861 BUS_SPACE_MAXADDR, /* lowaddr */ 1862 BUS_SPACE_MAXADDR, /* highaddr */ 1863 NULL, NULL, /* filter, filterarg */ 1864 MJUM9BYTES, /* maxsize */ 1865 1, /* nsegments */ 1866 MJUM9BYTES, /* maxsegsize */ 1867 BUS_DMA_ALLOCNOW, /* flags */ 1868 NULL, /* lockfunc */ 1869 NULL, /* lockfuncarg */ 1870 &ha->rx_tag)) { 1871 1872 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", 1873 __func__); 1874 1875 return (ENOMEM); 1876 } 1877 1878 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1879 1880 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1881 ha->hw.sds[i].sdsr_next = 0; 1882 ha->hw.sds[i].rxb_free = NULL; 1883 ha->hw.sds[i].rx_free = 0; 1884 } 1885 1886 ret = qla_alloc_rcv_std(ha); 1887 1888 return (ret); 1889 } 1890 1891 static void 1892 qla_free_rcv_bufs(qla_host_t *ha) 1893 { 1894 int i; 1895 1896 qla_free_rcv_std(ha); 1897 1898 if (ha->rx_tag != NULL) { 1899 bus_dma_tag_destroy(ha->rx_tag); 1900 ha->rx_tag = NULL; 1901 } 1902 1903 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1904 1905 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1906 ha->hw.sds[i].sdsr_next = 0; 1907 ha->hw.sds[i].rxb_free = NULL; 1908 ha->hw.sds[i].rx_free = 0; 1909 } 1910 1911 return; 1912 } 1913 1914 int 1915 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) 1916 { 1917 register struct mbuf *mp = nmp; 1918 struct ifnet *ifp; 1919 int ret = 0; 1920 uint32_t offset; 1921 bus_dma_segment_t segs[1]; 1922 int nsegs, mbuf_size; 1923 1924 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1925 1926 ifp = ha->ifp; 1927 1928 if (ha->hw.enable_9kb) 1929 mbuf_size = MJUM9BYTES; 1930 else 1931 mbuf_size = MCLBYTES; 1932 1933 if (mp == NULL) { 1934 1935 if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE)) 1936 return(-1); 1937 1938 if (ha->hw.enable_9kb) 1939 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size); 1940 else 1941 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1942 1943 if (mp == NULL) { 1944 ha->err_m_getcl++; 1945 ret = ENOBUFS; 1946 device_printf(ha->pci_dev, 1947 "%s: m_getcl failed\n", __func__); 1948 goto exit_ql_get_mbuf; 1949 } 1950 mp->m_len = mp->m_pkthdr.len = mbuf_size; 1951 } else { 1952 mp->m_len = mp->m_pkthdr.len = mbuf_size; 1953 mp->m_data = mp->m_ext.ext_buf; 1954 mp->m_next = NULL; 1955 } 1956 1957 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); 1958 if (offset) { 1959 offset = 8 - offset; 1960 m_adj(mp, offset); 1961 } 1962 1963 /* 1964 * Using memory from the mbuf cluster pool, invoke the bus_dma 1965 * machinery to arrange the memory mapping. 1966 */ 1967 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, 1968 mp, segs, &nsegs, BUS_DMA_NOWAIT); 1969 rxb->paddr = segs[0].ds_addr; 1970 1971 if (ret || !rxb->paddr || (nsegs != 1)) { 1972 m_free(mp); 1973 rxb->m_head = NULL; 1974 device_printf(ha->pci_dev, 1975 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 1976 __func__, ret, (long long unsigned int)rxb->paddr, 1977 nsegs); 1978 ret = -1; 1979 goto exit_ql_get_mbuf; 1980 } 1981 rxb->m_head = mp; 1982 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); 1983 1984 exit_ql_get_mbuf: 1985 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); 1986 return (ret); 1987 } 1988 1989 1990 static void 1991 qla_get_peer(qla_host_t *ha) 1992 { 1993 device_t *peers; 1994 int count, i, slot; 1995 int my_slot = pci_get_slot(ha->pci_dev); 1996 1997 if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count)) 1998 return; 1999 2000 for (i = 0; i < count; i++) { 2001 slot = pci_get_slot(peers[i]); 2002 2003 if ((slot >= 0) && (slot == my_slot) && 2004 (pci_get_device(peers[i]) == 2005 pci_get_device(ha->pci_dev))) { 2006 if (ha->pci_dev != peers[i]) 2007 ha->peer_dev = peers[i]; 2008 } 2009 } 2010 } 2011 2012 static void 2013 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer) 2014 { 2015 qla_host_t *ha_peer; 2016 2017 if (ha->peer_dev) { 2018 if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) { 2019 2020 ha_peer->msg_from_peer = msg_to_peer; 2021 } 2022 } 2023 } 2024 2025 static void 2026 qla_error_recovery(void *context, int pending) 2027 { 2028 qla_host_t *ha = context; 2029 uint32_t msecs_100 = 100; 2030 struct ifnet *ifp = ha->ifp; 2031 int i = 0; 2032 2033 device_printf(ha->pci_dev, "%s: \n", __func__); 2034 ha->hw.imd_compl = 1; 2035 2036 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 2037 return; 2038 2039 device_printf(ha->pci_dev, "%s: enter\n", __func__); 2040 2041 if (ha->qla_interface_up) { 2042 2043 qla_mdelay(__func__, 300); 2044 2045 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2046 2047 for (i = 0; i < ha->hw.num_sds_rings; i++) { 2048 qla_tx_fp_t *fp; 2049 2050 fp = &ha->tx_fp[i]; 2051 2052 if (fp == NULL) 2053 continue; 2054 2055 if (fp->tx_br != NULL) { 2056 mtx_lock(&fp->tx_mtx); 2057 mtx_unlock(&fp->tx_mtx); 2058 } 2059 } 2060 } 2061 2062 2063 qla_drain_fp_taskqueues(ha); 2064 2065 if ((ha->pci_func & 0x1) == 0) { 2066 2067 if (!ha->msg_from_peer) { 2068 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 2069 2070 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && 2071 msecs_100--) 2072 qla_mdelay(__func__, 100); 2073 } 2074 2075 ha->msg_from_peer = 0; 2076 2077 if (ha->enable_minidump) 2078 ql_minidump(ha); 2079 2080 (void) ql_init_hw(ha); 2081 2082 if (ha->qla_interface_up) { 2083 qla_free_xmt_bufs(ha); 2084 qla_free_rcv_bufs(ha); 2085 } 2086 2087 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 2088 2089 } else { 2090 if (ha->msg_from_peer == QL_PEER_MSG_RESET) { 2091 2092 ha->msg_from_peer = 0; 2093 2094 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 2095 } else { 2096 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 2097 } 2098 2099 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) 2100 qla_mdelay(__func__, 100); 2101 ha->msg_from_peer = 0; 2102 2103 (void) ql_init_hw(ha); 2104 2105 qla_mdelay(__func__, 1000); 2106 2107 if (ha->qla_interface_up) { 2108 qla_free_xmt_bufs(ha); 2109 qla_free_rcv_bufs(ha); 2110 } 2111 } 2112 2113 if (ha->qla_interface_up) { 2114 2115 if (qla_alloc_xmt_bufs(ha) != 0) { 2116 goto qla_error_recovery_exit; 2117 } 2118 qla_confirm_9kb_enable(ha); 2119 2120 if (qla_alloc_rcv_bufs(ha) != 0) { 2121 goto qla_error_recovery_exit; 2122 } 2123 2124 ha->stop_rcv = 0; 2125 2126 if (ql_init_hw_if(ha) == 0) { 2127 ifp = ha->ifp; 2128 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2129 ha->qla_watchdog_pause = 0; 2130 } 2131 } else 2132 ha->qla_watchdog_pause = 0; 2133 2134 qla_error_recovery_exit: 2135 2136 device_printf(ha->pci_dev, "%s: exit\n", __func__); 2137 2138 QLA_UNLOCK(ha, __func__); 2139 2140 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 2141 qla_watchdog, ha); 2142 return; 2143 } 2144 2145 static void 2146 qla_async_event(void *context, int pending) 2147 { 2148 qla_host_t *ha = context; 2149 2150 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 2151 return; 2152 2153 if (ha->async_event) { 2154 ha->async_event = 0; 2155 qla_hw_async_event(ha); 2156 } 2157 2158 QLA_UNLOCK(ha, __func__); 2159 2160 return; 2161 } 2162 2163 static void 2164 qla_stats(void *context, int pending) 2165 { 2166 qla_host_t *ha; 2167 2168 ha = context; 2169 2170 ql_get_stats(ha); 2171 return; 2172 } 2173 2174