1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013-2016 Qlogic Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * File: ql_os.c 32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 39 #include "ql_os.h" 40 #include "ql_hw.h" 41 #include "ql_def.h" 42 #include "ql_inline.h" 43 #include "ql_ver.h" 44 #include "ql_glbl.h" 45 #include "ql_dbg.h" 46 #include <sys/smp.h> 47 48 /* 49 * Some PCI Configuration Space Related Defines 50 */ 51 52 #ifndef PCI_VENDOR_QLOGIC 53 #define PCI_VENDOR_QLOGIC 0x1077 54 #endif 55 56 #ifndef PCI_PRODUCT_QLOGIC_ISP8030 57 #define PCI_PRODUCT_QLOGIC_ISP8030 0x8030 58 #endif 59 60 #define PCI_QLOGIC_ISP8030 \ 61 ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC) 62 63 /* 64 * static functions 65 */ 66 static int qla_alloc_parent_dma_tag(qla_host_t *ha); 67 static void qla_free_parent_dma_tag(qla_host_t *ha); 68 static int qla_alloc_xmt_bufs(qla_host_t *ha); 69 static void qla_free_xmt_bufs(qla_host_t *ha); 70 static int qla_alloc_rcv_bufs(qla_host_t *ha); 71 static void qla_free_rcv_bufs(qla_host_t *ha); 72 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb); 73 74 static void qla_init_ifnet(device_t dev, qla_host_t *ha); 75 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS); 76 static void qla_release(qla_host_t *ha); 77 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, 78 int error); 79 static void qla_stop(qla_host_t *ha); 80 static void qla_get_peer(qla_host_t *ha); 81 static void qla_error_recovery(void *context, int pending); 82 static void qla_async_event(void *context, int pending); 83 static void qla_stats(void *context, int pending); 84 static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, 85 uint32_t iscsi_pdu); 86 87 /* 88 * Hooks to the Operating Systems 89 */ 90 static int qla_pci_probe (device_t); 91 static int qla_pci_attach (device_t); 92 static int qla_pci_detach (device_t); 93 94 static void qla_init(void *arg); 95 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 96 static int qla_media_change(struct ifnet *ifp); 97 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 98 99 static int qla_transmit(struct ifnet *ifp, struct mbuf *mp); 100 static void qla_qflush(struct ifnet *ifp); 101 static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); 102 static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); 103 static int qla_create_fp_taskqueues(qla_host_t *ha); 104 static void qla_destroy_fp_taskqueues(qla_host_t *ha); 105 static void qla_drain_fp_taskqueues(qla_host_t *ha); 106 107 static device_method_t qla_pci_methods[] = { 108 /* Device interface */ 109 DEVMETHOD(device_probe, qla_pci_probe), 110 DEVMETHOD(device_attach, qla_pci_attach), 111 DEVMETHOD(device_detach, qla_pci_detach), 112 { 0, 0 } 113 }; 114 115 static driver_t qla_pci_driver = { 116 "ql", qla_pci_methods, sizeof (qla_host_t), 117 }; 118 119 static devclass_t qla83xx_devclass; 120 121 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0); 122 123 MODULE_DEPEND(qla83xx, pci, 1, 1, 1); 124 MODULE_DEPEND(qla83xx, ether, 1, 1, 1); 125 126 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver"); 127 128 #define QL_STD_REPLENISH_THRES 0 129 #define QL_JUMBO_REPLENISH_THRES 32 130 131 132 static char dev_str[64]; 133 static char ver_str[64]; 134 135 /* 136 * Name: qla_pci_probe 137 * Function: Validate the PCI device to be a QLA80XX device 138 */ 139 static int 140 qla_pci_probe(device_t dev) 141 { 142 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 143 case PCI_QLOGIC_ISP8030: 144 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", 145 "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function", 146 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 147 QLA_VERSION_BUILD); 148 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d", 149 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 150 QLA_VERSION_BUILD); 151 device_set_desc(dev, dev_str); 152 break; 153 default: 154 return (ENXIO); 155 } 156 157 if (bootverbose) 158 printf("%s: %s\n ", __func__, dev_str); 159 160 return (BUS_PROBE_DEFAULT); 161 } 162 163 static void 164 qla_add_sysctls(qla_host_t *ha) 165 { 166 device_t dev = ha->pci_dev; 167 168 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 169 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 170 OID_AUTO, "version", CTLFLAG_RD, 171 ver_str, 0, "Driver Version"); 172 173 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 174 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 175 OID_AUTO, "fw_version", CTLFLAG_RD, 176 ha->fw_ver_str, 0, "firmware version"); 177 178 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 179 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 180 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW, 181 (void *)ha, 0, 182 qla_sysctl_get_link_status, "I", "Link Status"); 183 184 ha->dbg_level = 0; 185 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 186 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 187 OID_AUTO, "debug", CTLFLAG_RW, 188 &ha->dbg_level, ha->dbg_level, "Debug Level"); 189 190 ha->enable_minidump = 1; 191 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 192 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 193 OID_AUTO, "enable_minidump", CTLFLAG_RW, 194 &ha->enable_minidump, ha->enable_minidump, 195 "Minidump retrival prior to error recovery " 196 "is enabled only when this is set"); 197 198 ha->enable_driverstate_dump = 1; 199 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 200 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 201 OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW, 202 &ha->enable_driverstate_dump, ha->enable_driverstate_dump, 203 "Driver State retrival prior to error recovery " 204 "is enabled only when this is set"); 205 206 ha->enable_error_recovery = 1; 207 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 208 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 209 OID_AUTO, "enable_error_recovery", CTLFLAG_RW, 210 &ha->enable_error_recovery, ha->enable_error_recovery, 211 "when set error recovery is enabled on fatal errors " 212 "otherwise the port is turned offline"); 213 214 ha->ms_delay_after_init = 1000; 215 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 216 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 217 OID_AUTO, "ms_delay_after_init", CTLFLAG_RW, 218 &ha->ms_delay_after_init, ha->ms_delay_after_init, 219 "millisecond delay after hw_init"); 220 221 ha->std_replenish = QL_STD_REPLENISH_THRES; 222 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 223 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 224 OID_AUTO, "std_replenish", CTLFLAG_RW, 225 &ha->std_replenish, ha->std_replenish, 226 "Threshold for Replenishing Standard Frames"); 227 228 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 229 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 230 OID_AUTO, "ipv4_lro", 231 CTLFLAG_RD, &ha->ipv4_lro, 232 "number of ipv4 lro completions"); 233 234 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 235 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 236 OID_AUTO, "ipv6_lro", 237 CTLFLAG_RD, &ha->ipv6_lro, 238 "number of ipv6 lro completions"); 239 240 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 241 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 242 OID_AUTO, "tx_tso_frames", 243 CTLFLAG_RD, &ha->tx_tso_frames, 244 "number of Tx TSO Frames"); 245 246 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 247 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 248 OID_AUTO, "hw_vlan_tx_frames", 249 CTLFLAG_RD, &ha->hw_vlan_tx_frames, 250 "number of Tx VLAN Frames"); 251 252 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 253 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 254 OID_AUTO, "hw_lock_failed", 255 CTLFLAG_RD, &ha->hw_lock_failed, 256 "number of hw_lock failures"); 257 258 return; 259 } 260 261 static void 262 qla_watchdog(void *arg) 263 { 264 qla_host_t *ha = arg; 265 qla_hw_t *hw; 266 struct ifnet *ifp; 267 268 hw = &ha->hw; 269 ifp = ha->ifp; 270 271 if (ha->qla_watchdog_exit) { 272 ha->qla_watchdog_exited = 1; 273 return; 274 } 275 ha->qla_watchdog_exited = 0; 276 277 if (!ha->qla_watchdog_pause) { 278 if (!ha->offline && 279 (ql_hw_check_health(ha) || ha->qla_initiate_recovery || 280 (ha->msg_from_peer == QL_PEER_MSG_RESET))) { 281 282 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 283 ql_update_link_state(ha); 284 285 if (ha->enable_error_recovery) { 286 ha->qla_watchdog_paused = 1; 287 ha->qla_watchdog_pause = 1; 288 ha->err_inject = 0; 289 device_printf(ha->pci_dev, 290 "%s: taskqueue_enqueue(err_task) \n", 291 __func__); 292 taskqueue_enqueue(ha->err_tq, &ha->err_task); 293 } else { 294 if (ifp != NULL) 295 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 296 ha->offline = 1; 297 } 298 return; 299 300 } else { 301 if (ha->qla_interface_up) { 302 303 ha->watchdog_ticks++; 304 305 if (ha->watchdog_ticks > 1000) 306 ha->watchdog_ticks = 0; 307 308 if (!ha->watchdog_ticks && QL_RUNNING(ifp)) { 309 taskqueue_enqueue(ha->stats_tq, 310 &ha->stats_task); 311 } 312 313 if (ha->async_event) { 314 taskqueue_enqueue(ha->async_event_tq, 315 &ha->async_event_task); 316 } 317 318 } 319 ha->qla_watchdog_paused = 0; 320 } 321 } else { 322 ha->qla_watchdog_paused = 1; 323 } 324 325 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 326 qla_watchdog, ha); 327 } 328 329 /* 330 * Name: qla_pci_attach 331 * Function: attaches the device to the operating system 332 */ 333 static int 334 qla_pci_attach(device_t dev) 335 { 336 qla_host_t *ha = NULL; 337 uint32_t rsrc_len; 338 int i; 339 uint32_t num_rcvq = 0; 340 341 if ((ha = device_get_softc(dev)) == NULL) { 342 device_printf(dev, "cannot get softc\n"); 343 return (ENOMEM); 344 } 345 346 memset(ha, 0, sizeof (qla_host_t)); 347 348 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) { 349 device_printf(dev, "device is not ISP8030\n"); 350 return (ENXIO); 351 } 352 353 ha->pci_func = pci_get_function(dev) & 0x1; 354 355 ha->pci_dev = dev; 356 357 pci_enable_busmaster(dev); 358 359 ha->reg_rid = PCIR_BAR(0); 360 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 361 RF_ACTIVE); 362 363 if (ha->pci_reg == NULL) { 364 device_printf(dev, "unable to map any ports\n"); 365 goto qla_pci_attach_err; 366 } 367 368 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 369 ha->reg_rid); 370 371 mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 372 mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF); 373 ha->flags.lock_init = 1; 374 375 qla_add_sysctls(ha); 376 377 ha->hw.num_sds_rings = MAX_SDS_RINGS; 378 ha->hw.num_rds_rings = MAX_RDS_RINGS; 379 ha->hw.num_tx_rings = NUM_TX_RINGS; 380 381 ha->reg_rid1 = PCIR_BAR(2); 382 ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 383 &ha->reg_rid1, RF_ACTIVE); 384 385 ha->msix_count = pci_msix_count(dev); 386 387 if (ha->msix_count < 1 ) { 388 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 389 ha->msix_count); 390 goto qla_pci_attach_err; 391 } 392 393 if (ha->msix_count < (ha->hw.num_sds_rings + 1)) { 394 ha->hw.num_sds_rings = ha->msix_count - 1; 395 } 396 397 QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" 398 " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha, 399 ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, 400 ha->pci_reg1)); 401 402 /* initialize hardware */ 403 if (ql_init_hw(ha)) { 404 device_printf(dev, "%s: ql_init_hw failed\n", __func__); 405 goto qla_pci_attach_err; 406 } 407 408 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, 409 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 410 ha->fw_ver_build); 411 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", 412 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 413 ha->fw_ver_build); 414 415 if (qla_get_nic_partition(ha, NULL, &num_rcvq)) { 416 device_printf(dev, "%s: qla_get_nic_partition failed\n", 417 __func__); 418 goto qla_pci_attach_err; 419 } 420 device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" 421 " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n", 422 __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, 423 ha->pci_reg, ha->pci_reg1, num_rcvq); 424 425 if ((ha->msix_count < 64) || (num_rcvq != 32)) { 426 if (ha->hw.num_sds_rings > 15) { 427 ha->hw.num_sds_rings = 15; 428 } 429 } 430 431 ha->hw.num_rds_rings = ha->hw.num_sds_rings; 432 ha->hw.num_tx_rings = ha->hw.num_sds_rings; 433 434 #ifdef QL_ENABLE_ISCSI_TLV 435 ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2; 436 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 437 438 ql_hw_add_sysctls(ha); 439 440 ha->msix_count = ha->hw.num_sds_rings + 1; 441 442 if (pci_alloc_msix(dev, &ha->msix_count)) { 443 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, 444 ha->msix_count); 445 ha->msix_count = 0; 446 goto qla_pci_attach_err; 447 } 448 449 ha->mbx_irq_rid = 1; 450 ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 451 &ha->mbx_irq_rid, 452 (RF_ACTIVE | RF_SHAREABLE)); 453 if (ha->mbx_irq == NULL) { 454 device_printf(dev, "could not allocate mbx interrupt\n"); 455 goto qla_pci_attach_err; 456 } 457 if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE), 458 NULL, ql_mbx_isr, ha, &ha->mbx_handle)) { 459 device_printf(dev, "could not setup mbx interrupt\n"); 460 goto qla_pci_attach_err; 461 } 462 463 for (i = 0; i < ha->hw.num_sds_rings; i++) { 464 ha->irq_vec[i].sds_idx = i; 465 ha->irq_vec[i].ha = ha; 466 ha->irq_vec[i].irq_rid = 2 + i; 467 468 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 469 &ha->irq_vec[i].irq_rid, 470 (RF_ACTIVE | RF_SHAREABLE)); 471 472 if (ha->irq_vec[i].irq == NULL) { 473 device_printf(dev, "could not allocate interrupt\n"); 474 goto qla_pci_attach_err; 475 } 476 if (bus_setup_intr(dev, ha->irq_vec[i].irq, 477 (INTR_TYPE_NET | INTR_MPSAFE), 478 NULL, ql_isr, &ha->irq_vec[i], 479 &ha->irq_vec[i].handle)) { 480 device_printf(dev, "could not setup interrupt\n"); 481 goto qla_pci_attach_err; 482 } 483 484 ha->tx_fp[i].ha = ha; 485 ha->tx_fp[i].txr_idx = i; 486 487 if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) { 488 device_printf(dev, "%s: could not allocate tx_br[%d]\n", 489 __func__, i); 490 goto qla_pci_attach_err; 491 } 492 } 493 494 if (qla_create_fp_taskqueues(ha) != 0) 495 goto qla_pci_attach_err; 496 497 printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus, 498 ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count); 499 500 ql_read_mac_addr(ha); 501 502 /* allocate parent dma tag */ 503 if (qla_alloc_parent_dma_tag(ha)) { 504 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", 505 __func__); 506 goto qla_pci_attach_err; 507 } 508 509 /* alloc all dma buffers */ 510 if (ql_alloc_dma(ha)) { 511 device_printf(dev, "%s: ql_alloc_dma failed\n", __func__); 512 goto qla_pci_attach_err; 513 } 514 qla_get_peer(ha); 515 516 if (ql_minidump_init(ha) != 0) { 517 device_printf(dev, "%s: ql_minidump_init failed\n", __func__); 518 goto qla_pci_attach_err; 519 } 520 ql_alloc_drvr_state_buffer(ha); 521 ql_alloc_sp_log_buffer(ha); 522 /* create the o.s ethernet interface */ 523 qla_init_ifnet(dev, ha); 524 525 ha->flags.qla_watchdog_active = 1; 526 ha->qla_watchdog_pause = 0; 527 528 callout_init(&ha->tx_callout, TRUE); 529 ha->flags.qla_callout_init = 1; 530 531 /* create ioctl device interface */ 532 if (ql_make_cdev(ha)) { 533 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 534 goto qla_pci_attach_err; 535 } 536 537 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 538 qla_watchdog, ha); 539 540 TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha); 541 ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT, 542 taskqueue_thread_enqueue, &ha->err_tq); 543 taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", 544 device_get_nameunit(ha->pci_dev)); 545 546 TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha); 547 ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT, 548 taskqueue_thread_enqueue, &ha->async_event_tq); 549 taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq", 550 device_get_nameunit(ha->pci_dev)); 551 552 TASK_INIT(&ha->stats_task, 0, qla_stats, ha); 553 ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT, 554 taskqueue_thread_enqueue, &ha->stats_tq); 555 taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq", 556 device_get_nameunit(ha->pci_dev)); 557 558 QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__)); 559 return (0); 560 561 qla_pci_attach_err: 562 563 qla_release(ha); 564 565 if (ha->flags.lock_init) { 566 mtx_destroy(&ha->hw_lock); 567 mtx_destroy(&ha->sp_log_lock); 568 } 569 570 QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__)); 571 return (ENXIO); 572 } 573 574 /* 575 * Name: qla_pci_detach 576 * Function: Unhooks the device from the operating system 577 */ 578 static int 579 qla_pci_detach(device_t dev) 580 { 581 qla_host_t *ha = NULL; 582 struct ifnet *ifp; 583 584 585 if ((ha = device_get_softc(dev)) == NULL) { 586 device_printf(dev, "cannot get softc\n"); 587 return (ENOMEM); 588 } 589 590 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 591 592 ifp = ha->ifp; 593 594 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 595 QLA_LOCK(ha, __func__, -1, 0); 596 597 ha->qla_detach_active = 1; 598 qla_stop(ha); 599 600 qla_release(ha); 601 602 QLA_UNLOCK(ha, __func__); 603 604 if (ha->flags.lock_init) { 605 mtx_destroy(&ha->hw_lock); 606 mtx_destroy(&ha->sp_log_lock); 607 } 608 609 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 610 611 return (0); 612 } 613 614 /* 615 * SYSCTL Related Callbacks 616 */ 617 static int 618 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS) 619 { 620 int err, ret = 0; 621 qla_host_t *ha; 622 623 err = sysctl_handle_int(oidp, &ret, 0, req); 624 625 if (err || !req->newptr) 626 return (err); 627 628 if (ret == 1) { 629 ha = (qla_host_t *)arg1; 630 ql_hw_link_status(ha); 631 } 632 return (err); 633 } 634 635 /* 636 * Name: qla_release 637 * Function: Releases the resources allocated for the device 638 */ 639 static void 640 qla_release(qla_host_t *ha) 641 { 642 device_t dev; 643 int i; 644 645 dev = ha->pci_dev; 646 647 if (ha->async_event_tq) { 648 taskqueue_drain_all(ha->async_event_tq); 649 taskqueue_free(ha->async_event_tq); 650 } 651 652 if (ha->err_tq) { 653 taskqueue_drain_all(ha->err_tq); 654 taskqueue_free(ha->err_tq); 655 } 656 657 if (ha->stats_tq) { 658 taskqueue_drain_all(ha->stats_tq); 659 taskqueue_free(ha->stats_tq); 660 } 661 662 ql_del_cdev(ha); 663 664 if (ha->flags.qla_watchdog_active) { 665 ha->qla_watchdog_exit = 1; 666 667 while (ha->qla_watchdog_exited == 0) 668 qla_mdelay(__func__, 1); 669 } 670 671 if (ha->flags.qla_callout_init) 672 callout_stop(&ha->tx_callout); 673 674 if (ha->ifp != NULL) 675 ether_ifdetach(ha->ifp); 676 677 ql_free_drvr_state_buffer(ha); 678 ql_free_sp_log_buffer(ha); 679 ql_free_dma(ha); 680 qla_free_parent_dma_tag(ha); 681 682 if (ha->mbx_handle) 683 (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle); 684 685 if (ha->mbx_irq) 686 (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid, 687 ha->mbx_irq); 688 689 for (i = 0; i < ha->hw.num_sds_rings; i++) { 690 691 if (ha->irq_vec[i].handle) { 692 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 693 ha->irq_vec[i].handle); 694 } 695 696 if (ha->irq_vec[i].irq) { 697 (void)bus_release_resource(dev, SYS_RES_IRQ, 698 ha->irq_vec[i].irq_rid, 699 ha->irq_vec[i].irq); 700 } 701 702 qla_free_tx_br(ha, &ha->tx_fp[i]); 703 } 704 qla_destroy_fp_taskqueues(ha); 705 706 if (ha->msix_count) 707 pci_release_msi(dev); 708 709 if (ha->pci_reg) 710 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 711 ha->pci_reg); 712 713 if (ha->pci_reg1) 714 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, 715 ha->pci_reg1); 716 717 return; 718 } 719 720 /* 721 * DMA Related Functions 722 */ 723 724 static void 725 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 726 { 727 *((bus_addr_t *)arg) = 0; 728 729 if (error) { 730 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 731 return; 732 } 733 734 *((bus_addr_t *)arg) = segs[0].ds_addr; 735 736 return; 737 } 738 739 int 740 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 741 { 742 int ret = 0; 743 device_t dev; 744 bus_addr_t b_addr; 745 746 dev = ha->pci_dev; 747 748 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 749 750 ret = bus_dma_tag_create( 751 ha->parent_tag,/* parent */ 752 dma_buf->alignment, 753 ((bus_size_t)(1ULL << 32)),/* boundary */ 754 BUS_SPACE_MAXADDR, /* lowaddr */ 755 BUS_SPACE_MAXADDR, /* highaddr */ 756 NULL, NULL, /* filter, filterarg */ 757 dma_buf->size, /* maxsize */ 758 1, /* nsegments */ 759 dma_buf->size, /* maxsegsize */ 760 0, /* flags */ 761 NULL, NULL, /* lockfunc, lockarg */ 762 &dma_buf->dma_tag); 763 764 if (ret) { 765 device_printf(dev, "%s: could not create dma tag\n", __func__); 766 goto ql_alloc_dmabuf_exit; 767 } 768 ret = bus_dmamem_alloc(dma_buf->dma_tag, 769 (void **)&dma_buf->dma_b, 770 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 771 &dma_buf->dma_map); 772 if (ret) { 773 bus_dma_tag_destroy(dma_buf->dma_tag); 774 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); 775 goto ql_alloc_dmabuf_exit; 776 } 777 778 ret = bus_dmamap_load(dma_buf->dma_tag, 779 dma_buf->dma_map, 780 dma_buf->dma_b, 781 dma_buf->size, 782 qla_dmamap_callback, 783 &b_addr, BUS_DMA_NOWAIT); 784 785 if (ret || !b_addr) { 786 bus_dma_tag_destroy(dma_buf->dma_tag); 787 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 788 dma_buf->dma_map); 789 ret = -1; 790 goto ql_alloc_dmabuf_exit; 791 } 792 793 dma_buf->dma_addr = b_addr; 794 795 ql_alloc_dmabuf_exit: 796 QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", 797 __func__, ret, (void *)dma_buf->dma_tag, 798 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, 799 dma_buf->size)); 800 801 return ret; 802 } 803 804 void 805 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 806 { 807 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 808 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 809 bus_dma_tag_destroy(dma_buf->dma_tag); 810 } 811 812 static int 813 qla_alloc_parent_dma_tag(qla_host_t *ha) 814 { 815 int ret; 816 device_t dev; 817 818 dev = ha->pci_dev; 819 820 /* 821 * Allocate parent DMA Tag 822 */ 823 ret = bus_dma_tag_create( 824 bus_get_dma_tag(dev), /* parent */ 825 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 826 BUS_SPACE_MAXADDR, /* lowaddr */ 827 BUS_SPACE_MAXADDR, /* highaddr */ 828 NULL, NULL, /* filter, filterarg */ 829 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 830 0, /* nsegments */ 831 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 832 0, /* flags */ 833 NULL, NULL, /* lockfunc, lockarg */ 834 &ha->parent_tag); 835 836 if (ret) { 837 device_printf(dev, "%s: could not create parent dma tag\n", 838 __func__); 839 return (-1); 840 } 841 842 ha->flags.parent_tag = 1; 843 844 return (0); 845 } 846 847 static void 848 qla_free_parent_dma_tag(qla_host_t *ha) 849 { 850 if (ha->flags.parent_tag) { 851 bus_dma_tag_destroy(ha->parent_tag); 852 ha->flags.parent_tag = 0; 853 } 854 } 855 856 /* 857 * Name: qla_init_ifnet 858 * Function: Creates the Network Device Interface and Registers it with the O.S 859 */ 860 861 static void 862 qla_init_ifnet(device_t dev, qla_host_t *ha) 863 { 864 struct ifnet *ifp; 865 866 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 867 868 ifp = ha->ifp = if_alloc(IFT_ETHER); 869 870 if (ifp == NULL) 871 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 872 873 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 874 875 ifp->if_baudrate = IF_Gbps(10); 876 ifp->if_capabilities = IFCAP_LINKSTATE; 877 ifp->if_mtu = ETHERMTU; 878 879 ifp->if_init = qla_init; 880 ifp->if_softc = ha; 881 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 882 ifp->if_ioctl = qla_ioctl; 883 884 ifp->if_transmit = qla_transmit; 885 ifp->if_qflush = qla_qflush; 886 887 IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); 888 ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); 889 IFQ_SET_READY(&ifp->if_snd); 890 891 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 892 893 ether_ifattach(ifp, qla_get_mac_addr(ha)); 894 895 ifp->if_capabilities |= IFCAP_HWCSUM | 896 IFCAP_TSO4 | 897 IFCAP_TSO6 | 898 IFCAP_JUMBO_MTU | 899 IFCAP_VLAN_HWTAGGING | 900 IFCAP_VLAN_MTU | 901 IFCAP_VLAN_HWTSO | 902 IFCAP_LRO; 903 904 ifp->if_capenable = ifp->if_capabilities; 905 906 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 907 908 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); 909 910 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, 911 NULL); 912 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 913 914 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 915 916 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 917 918 return; 919 } 920 921 static void 922 qla_init_locked(qla_host_t *ha) 923 { 924 struct ifnet *ifp = ha->ifp; 925 926 ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0); 927 928 qla_stop(ha); 929 930 if (qla_alloc_xmt_bufs(ha) != 0) 931 return; 932 933 qla_confirm_9kb_enable(ha); 934 935 if (qla_alloc_rcv_bufs(ha) != 0) 936 return; 937 938 bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); 939 940 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; 941 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 942 943 ha->stop_rcv = 0; 944 if (ql_init_hw_if(ha) == 0) { 945 ifp = ha->ifp; 946 ifp->if_drv_flags |= IFF_DRV_RUNNING; 947 ha->hw_vlan_tx_frames = 0; 948 ha->tx_tso_frames = 0; 949 ha->qla_interface_up = 1; 950 ql_update_link_state(ha); 951 } else { 952 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE) 953 ha->hw.sp_log_stop = -1; 954 } 955 956 ha->qla_watchdog_pause = 0; 957 958 return; 959 } 960 961 static void 962 qla_init(void *arg) 963 { 964 qla_host_t *ha; 965 966 ha = (qla_host_t *)arg; 967 968 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 969 970 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 971 return; 972 973 qla_init_locked(ha); 974 975 QLA_UNLOCK(ha, __func__); 976 977 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 978 } 979 980 static u_int 981 qla_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 982 { 983 uint8_t *mta = arg; 984 985 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) 986 return (0); 987 988 bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); 989 990 return (1); 991 } 992 993 static int 994 qla_set_multi(qla_host_t *ha, uint32_t add_multi) 995 { 996 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; 997 int mcnt = 0; 998 struct ifnet *ifp = ha->ifp; 999 int ret = 0; 1000 1001 mcnt = if_foreach_llmaddr(ifp, qla_copy_maddr, mta); 1002 1003 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1004 QLA_LOCK_NO_SLEEP) != 0) 1005 return (-1); 1006 1007 ql_sp_log(ha, 12, 4, ifp->if_drv_flags, 1008 (ifp->if_drv_flags & IFF_DRV_RUNNING), 1009 add_multi, (uint32_t)mcnt, 0); 1010 1011 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1012 1013 if (!add_multi) { 1014 ret = qla_hw_del_all_mcast(ha); 1015 1016 if (ret) 1017 device_printf(ha->pci_dev, 1018 "%s: qla_hw_del_all_mcast() failed\n", 1019 __func__); 1020 } 1021 1022 if (!ret) 1023 ret = ql_hw_set_multi(ha, mta, mcnt, 1); 1024 1025 } 1026 1027 QLA_UNLOCK(ha, __func__); 1028 1029 return (ret); 1030 } 1031 1032 static int 1033 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1034 { 1035 int ret = 0; 1036 struct ifreq *ifr = (struct ifreq *)data; 1037 struct ifaddr *ifa = (struct ifaddr *)data; 1038 qla_host_t *ha; 1039 1040 ha = (qla_host_t *)ifp->if_softc; 1041 if (ha->offline || ha->qla_initiate_recovery) 1042 return (ret); 1043 1044 switch (cmd) { 1045 case SIOCSIFADDR: 1046 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 1047 __func__, cmd)); 1048 1049 if (ifa->ifa_addr->sa_family == AF_INET) { 1050 1051 ret = QLA_LOCK(ha, __func__, 1052 QLA_LOCK_DEFAULT_MS_TIMEOUT, 1053 QLA_LOCK_NO_SLEEP); 1054 if (ret) 1055 break; 1056 1057 ifp->if_flags |= IFF_UP; 1058 1059 ql_sp_log(ha, 8, 3, ifp->if_drv_flags, 1060 (ifp->if_drv_flags & IFF_DRV_RUNNING), 1061 ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0); 1062 1063 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1064 qla_init_locked(ha); 1065 } 1066 1067 QLA_UNLOCK(ha, __func__); 1068 QL_DPRINT4(ha, (ha->pci_dev, 1069 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 1070 __func__, cmd, 1071 ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 1072 1073 arp_ifinit(ifp, ifa); 1074 } else { 1075 ether_ioctl(ifp, cmd, data); 1076 } 1077 break; 1078 1079 case SIOCSIFMTU: 1080 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 1081 __func__, cmd)); 1082 1083 if (ifr->ifr_mtu > QLA_MAX_MTU) { 1084 ret = EINVAL; 1085 } else { 1086 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1087 QLA_LOCK_NO_SLEEP); 1088 1089 if (ret) 1090 break; 1091 1092 ifp->if_mtu = ifr->ifr_mtu; 1093 ha->max_frame_size = 1094 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1095 1096 ql_sp_log(ha, 9, 4, ifp->if_drv_flags, 1097 (ifp->if_drv_flags & IFF_DRV_RUNNING), 1098 ha->max_frame_size, ifp->if_mtu, 0); 1099 1100 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1101 qla_init_locked(ha); 1102 } 1103 1104 if (ifp->if_mtu > ETHERMTU) 1105 ha->std_replenish = QL_JUMBO_REPLENISH_THRES; 1106 else 1107 ha->std_replenish = QL_STD_REPLENISH_THRES; 1108 1109 1110 QLA_UNLOCK(ha, __func__); 1111 } 1112 1113 break; 1114 1115 case SIOCSIFFLAGS: 1116 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 1117 __func__, cmd)); 1118 1119 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1120 QLA_LOCK_NO_SLEEP); 1121 1122 if (ret) 1123 break; 1124 1125 ql_sp_log(ha, 10, 4, ifp->if_drv_flags, 1126 (ifp->if_drv_flags & IFF_DRV_RUNNING), 1127 ha->if_flags, ifp->if_flags, 0); 1128 1129 if (ifp->if_flags & IFF_UP) { 1130 1131 ha->max_frame_size = ifp->if_mtu + 1132 ETHER_HDR_LEN + ETHER_CRC_LEN; 1133 qla_init_locked(ha); 1134 1135 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1136 if ((ifp->if_flags ^ ha->if_flags) & 1137 IFF_PROMISC) { 1138 ret = ql_set_promisc(ha); 1139 } else if ((ifp->if_flags ^ ha->if_flags) & 1140 IFF_ALLMULTI) { 1141 ret = ql_set_allmulti(ha); 1142 } 1143 } 1144 } else { 1145 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1146 qla_stop(ha); 1147 ha->if_flags = ifp->if_flags; 1148 } 1149 1150 QLA_UNLOCK(ha, __func__); 1151 break; 1152 1153 case SIOCADDMULTI: 1154 QL_DPRINT4(ha, (ha->pci_dev, 1155 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 1156 1157 if (qla_set_multi(ha, 1)) 1158 ret = EINVAL; 1159 break; 1160 1161 case SIOCDELMULTI: 1162 QL_DPRINT4(ha, (ha->pci_dev, 1163 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 1164 1165 if (qla_set_multi(ha, 0)) 1166 ret = EINVAL; 1167 break; 1168 1169 case SIOCSIFMEDIA: 1170 case SIOCGIFMEDIA: 1171 QL_DPRINT4(ha, (ha->pci_dev, 1172 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 1173 __func__, cmd)); 1174 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 1175 break; 1176 1177 case SIOCSIFCAP: 1178 { 1179 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1180 1181 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 1182 __func__, cmd)); 1183 1184 if (mask & IFCAP_HWCSUM) 1185 ifp->if_capenable ^= IFCAP_HWCSUM; 1186 if (mask & IFCAP_TSO4) 1187 ifp->if_capenable ^= IFCAP_TSO4; 1188 if (mask & IFCAP_TSO6) 1189 ifp->if_capenable ^= IFCAP_TSO6; 1190 if (mask & IFCAP_VLAN_HWTAGGING) 1191 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1192 if (mask & IFCAP_VLAN_HWTSO) 1193 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1194 if (mask & IFCAP_LRO) 1195 ifp->if_capenable ^= IFCAP_LRO; 1196 1197 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1198 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1199 QLA_LOCK_NO_SLEEP); 1200 1201 if (ret) 1202 break; 1203 1204 ql_sp_log(ha, 11, 4, ifp->if_drv_flags, 1205 (ifp->if_drv_flags & IFF_DRV_RUNNING), 1206 mask, ifp->if_capenable, 0); 1207 1208 qla_init_locked(ha); 1209 1210 QLA_UNLOCK(ha, __func__); 1211 1212 } 1213 VLAN_CAPABILITIES(ifp); 1214 break; 1215 } 1216 1217 default: 1218 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", 1219 __func__, cmd)); 1220 ret = ether_ioctl(ifp, cmd, data); 1221 break; 1222 } 1223 1224 return (ret); 1225 } 1226 1227 static int 1228 qla_media_change(struct ifnet *ifp) 1229 { 1230 qla_host_t *ha; 1231 struct ifmedia *ifm; 1232 int ret = 0; 1233 1234 ha = (qla_host_t *)ifp->if_softc; 1235 1236 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1237 1238 ifm = &ha->media; 1239 1240 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1241 ret = EINVAL; 1242 1243 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1244 1245 return (ret); 1246 } 1247 1248 static void 1249 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1250 { 1251 qla_host_t *ha; 1252 1253 ha = (qla_host_t *)ifp->if_softc; 1254 1255 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1256 1257 ifmr->ifm_status = IFM_AVALID; 1258 ifmr->ifm_active = IFM_ETHER; 1259 1260 ql_update_link_state(ha); 1261 if (ha->hw.link_up) { 1262 ifmr->ifm_status |= IFM_ACTIVE; 1263 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); 1264 } 1265 1266 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\ 1267 (ha->hw.link_up ? "link_up" : "link_down"))); 1268 1269 return; 1270 } 1271 1272 1273 static int 1274 qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, 1275 uint32_t iscsi_pdu) 1276 { 1277 bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; 1278 bus_dmamap_t map; 1279 int nsegs; 1280 int ret = -1; 1281 uint32_t tx_idx; 1282 struct mbuf *m_head = *m_headp; 1283 1284 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1285 1286 tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next; 1287 1288 if ((NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) || 1289 (QL_ERR_INJECT(ha, INJCT_TXBUF_MBUF_NON_NULL))){ 1290 QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\ 1291 "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\ 1292 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head)); 1293 1294 device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d tx_idx = %d " 1295 "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx, 1296 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head); 1297 1298 if (m_head) 1299 m_freem(m_head); 1300 *m_headp = NULL; 1301 QL_INITIATE_RECOVERY(ha); 1302 return (ret); 1303 } 1304 1305 map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; 1306 1307 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 1308 BUS_DMA_NOWAIT); 1309 1310 if (ret == EFBIG) { 1311 1312 struct mbuf *m; 1313 1314 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 1315 m_head->m_pkthdr.len)); 1316 1317 m = m_defrag(m_head, M_NOWAIT); 1318 if (m == NULL) { 1319 ha->err_tx_defrag++; 1320 m_freem(m_head); 1321 *m_headp = NULL; 1322 device_printf(ha->pci_dev, 1323 "%s: m_defrag() = NULL [%d]\n", 1324 __func__, ret); 1325 return (ENOBUFS); 1326 } 1327 m_head = m; 1328 *m_headp = m_head; 1329 1330 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 1331 segs, &nsegs, BUS_DMA_NOWAIT))) { 1332 1333 ha->err_tx_dmamap_load++; 1334 1335 device_printf(ha->pci_dev, 1336 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 1337 __func__, ret, m_head->m_pkthdr.len); 1338 1339 if (ret != ENOMEM) { 1340 m_freem(m_head); 1341 *m_headp = NULL; 1342 } 1343 return (ret); 1344 } 1345 1346 } else if (ret) { 1347 1348 ha->err_tx_dmamap_load++; 1349 1350 device_printf(ha->pci_dev, 1351 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 1352 __func__, ret, m_head->m_pkthdr.len); 1353 1354 if (ret != ENOMEM) { 1355 m_freem(m_head); 1356 *m_headp = NULL; 1357 } 1358 return (ret); 1359 } 1360 1361 QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet")); 1362 1363 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 1364 1365 if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx, 1366 iscsi_pdu))) { 1367 ha->tx_ring[txr_idx].count++; 1368 if (iscsi_pdu) 1369 ha->tx_ring[txr_idx].iscsi_pkt_count++; 1370 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; 1371 } else { 1372 bus_dmamap_unload(ha->tx_tag, map); 1373 if (ret == EINVAL) { 1374 if (m_head) 1375 m_freem(m_head); 1376 *m_headp = NULL; 1377 } 1378 } 1379 1380 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1381 return (ret); 1382 } 1383 1384 static int 1385 qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) 1386 { 1387 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 1388 "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx); 1389 1390 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 1391 1392 fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF, 1393 M_NOWAIT, &fp->tx_mtx); 1394 if (fp->tx_br == NULL) { 1395 QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " 1396 " fp[%d, %d]\n", ha->pci_func, fp->txr_idx)); 1397 return (-ENOMEM); 1398 } 1399 return 0; 1400 } 1401 1402 static void 1403 qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) 1404 { 1405 struct mbuf *mp; 1406 struct ifnet *ifp = ha->ifp; 1407 1408 if (mtx_initialized(&fp->tx_mtx)) { 1409 1410 if (fp->tx_br != NULL) { 1411 1412 mtx_lock(&fp->tx_mtx); 1413 1414 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 1415 m_freem(mp); 1416 } 1417 1418 mtx_unlock(&fp->tx_mtx); 1419 1420 buf_ring_free(fp->tx_br, M_DEVBUF); 1421 fp->tx_br = NULL; 1422 } 1423 mtx_destroy(&fp->tx_mtx); 1424 } 1425 return; 1426 } 1427 1428 static void 1429 qla_fp_taskqueue(void *context, int pending) 1430 { 1431 qla_tx_fp_t *fp; 1432 qla_host_t *ha; 1433 struct ifnet *ifp; 1434 struct mbuf *mp = NULL; 1435 int ret = 0; 1436 uint32_t txr_idx; 1437 uint32_t iscsi_pdu = 0; 1438 uint32_t rx_pkts_left = -1; 1439 1440 fp = context; 1441 1442 if (fp == NULL) 1443 return; 1444 1445 ha = (qla_host_t *)fp->ha; 1446 1447 ifp = ha->ifp; 1448 1449 txr_idx = fp->txr_idx; 1450 1451 mtx_lock(&fp->tx_mtx); 1452 1453 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) { 1454 mtx_unlock(&fp->tx_mtx); 1455 goto qla_fp_taskqueue_exit; 1456 } 1457 1458 while (rx_pkts_left && !ha->stop_rcv && 1459 (ifp->if_drv_flags & IFF_DRV_RUNNING) && ha->hw.link_up) { 1460 rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64); 1461 1462 #ifdef QL_ENABLE_ISCSI_TLV 1463 ql_hw_tx_done_locked(ha, fp->txr_idx); 1464 ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1))); 1465 #else 1466 ql_hw_tx_done_locked(ha, fp->txr_idx); 1467 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 1468 1469 mp = drbr_peek(ifp, fp->tx_br); 1470 1471 while (mp != NULL) { 1472 1473 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) { 1474 #ifdef QL_ENABLE_ISCSI_TLV 1475 if (ql_iscsi_pdu(ha, mp) == 0) { 1476 txr_idx = txr_idx + 1477 (ha->hw.num_tx_rings >> 1); 1478 iscsi_pdu = 1; 1479 } else { 1480 iscsi_pdu = 0; 1481 txr_idx = fp->txr_idx; 1482 } 1483 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 1484 } 1485 1486 ret = qla_send(ha, &mp, txr_idx, iscsi_pdu); 1487 1488 if (ret) { 1489 if (mp != NULL) 1490 drbr_putback(ifp, fp->tx_br, mp); 1491 else { 1492 drbr_advance(ifp, fp->tx_br); 1493 } 1494 1495 mtx_unlock(&fp->tx_mtx); 1496 1497 goto qla_fp_taskqueue_exit0; 1498 } else { 1499 drbr_advance(ifp, fp->tx_br); 1500 } 1501 1502 /* Send a copy of the frame to the BPF listener */ 1503 ETHER_BPF_MTAP(ifp, mp); 1504 1505 if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || 1506 (!ha->hw.link_up)) 1507 break; 1508 1509 mp = drbr_peek(ifp, fp->tx_br); 1510 } 1511 } 1512 mtx_unlock(&fp->tx_mtx); 1513 1514 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1515 goto qla_fp_taskqueue_exit; 1516 1517 qla_fp_taskqueue_exit0: 1518 1519 if (rx_pkts_left || ((mp != NULL) && ret)) { 1520 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 1521 } else { 1522 if (!ha->stop_rcv) { 1523 QL_ENABLE_INTERRUPTS(ha, fp->txr_idx); 1524 } 1525 } 1526 1527 qla_fp_taskqueue_exit: 1528 1529 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 1530 return; 1531 } 1532 1533 static int 1534 qla_create_fp_taskqueues(qla_host_t *ha) 1535 { 1536 int i; 1537 uint8_t tq_name[32]; 1538 1539 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1540 1541 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1542 1543 bzero(tq_name, sizeof (tq_name)); 1544 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 1545 1546 NET_TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); 1547 1548 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, 1549 taskqueue_thread_enqueue, 1550 &fp->fp_taskqueue); 1551 1552 if (fp->fp_taskqueue == NULL) 1553 return (-1); 1554 1555 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 1556 tq_name); 1557 1558 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, 1559 fp->fp_taskqueue)); 1560 } 1561 1562 return (0); 1563 } 1564 1565 static void 1566 qla_destroy_fp_taskqueues(qla_host_t *ha) 1567 { 1568 int i; 1569 1570 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1571 1572 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1573 1574 if (fp->fp_taskqueue != NULL) { 1575 taskqueue_drain_all(fp->fp_taskqueue); 1576 taskqueue_free(fp->fp_taskqueue); 1577 fp->fp_taskqueue = NULL; 1578 } 1579 } 1580 return; 1581 } 1582 1583 static void 1584 qla_drain_fp_taskqueues(qla_host_t *ha) 1585 { 1586 int i; 1587 1588 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1589 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1590 1591 if (fp->fp_taskqueue != NULL) { 1592 taskqueue_drain_all(fp->fp_taskqueue); 1593 } 1594 } 1595 return; 1596 } 1597 1598 static int 1599 qla_transmit(struct ifnet *ifp, struct mbuf *mp) 1600 { 1601 qla_host_t *ha = (qla_host_t *)ifp->if_softc; 1602 qla_tx_fp_t *fp; 1603 int rss_id = 0; 1604 int ret = 0; 1605 1606 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1607 1608 #if __FreeBSD_version >= 1100000 1609 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 1610 #else 1611 if (mp->m_flags & M_FLOWID) 1612 #endif 1613 rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) % 1614 ha->hw.num_sds_rings; 1615 fp = &ha->tx_fp[rss_id]; 1616 1617 if (fp->tx_br == NULL) { 1618 ret = EINVAL; 1619 goto qla_transmit_exit; 1620 } 1621 1622 if (mp != NULL) { 1623 ret = drbr_enqueue(ifp, fp->tx_br, mp); 1624 } 1625 1626 if (fp->fp_taskqueue != NULL) 1627 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 1628 1629 ret = 0; 1630 1631 qla_transmit_exit: 1632 1633 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 1634 return ret; 1635 } 1636 1637 static void 1638 qla_qflush(struct ifnet *ifp) 1639 { 1640 int i; 1641 qla_tx_fp_t *fp; 1642 struct mbuf *mp; 1643 qla_host_t *ha; 1644 1645 ha = (qla_host_t *)ifp->if_softc; 1646 1647 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1648 1649 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1650 1651 fp = &ha->tx_fp[i]; 1652 1653 if (fp == NULL) 1654 continue; 1655 1656 if (fp->tx_br) { 1657 mtx_lock(&fp->tx_mtx); 1658 1659 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 1660 m_freem(mp); 1661 } 1662 mtx_unlock(&fp->tx_mtx); 1663 } 1664 } 1665 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1666 1667 return; 1668 } 1669 1670 static void 1671 qla_stop(qla_host_t *ha) 1672 { 1673 struct ifnet *ifp = ha->ifp; 1674 device_t dev; 1675 int i = 0; 1676 1677 ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0); 1678 1679 dev = ha->pci_dev; 1680 1681 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1682 ha->qla_watchdog_pause = 1; 1683 1684 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1685 qla_tx_fp_t *fp; 1686 1687 fp = &ha->tx_fp[i]; 1688 1689 if (fp == NULL) 1690 continue; 1691 1692 if (fp->tx_br != NULL) { 1693 mtx_lock(&fp->tx_mtx); 1694 mtx_unlock(&fp->tx_mtx); 1695 } 1696 } 1697 1698 while (!ha->qla_watchdog_paused) 1699 qla_mdelay(__func__, 1); 1700 1701 ha->qla_interface_up = 0; 1702 1703 qla_drain_fp_taskqueues(ha); 1704 1705 ql_del_hw_if(ha); 1706 1707 qla_free_xmt_bufs(ha); 1708 qla_free_rcv_bufs(ha); 1709 1710 return; 1711 } 1712 1713 /* 1714 * Buffer Management Functions for Transmit and Receive Rings 1715 */ 1716 static int 1717 qla_alloc_xmt_bufs(qla_host_t *ha) 1718 { 1719 int ret = 0; 1720 uint32_t i, j; 1721 qla_tx_buf_t *txb; 1722 1723 if (bus_dma_tag_create(NULL, /* parent */ 1724 1, 0, /* alignment, bounds */ 1725 BUS_SPACE_MAXADDR, /* lowaddr */ 1726 BUS_SPACE_MAXADDR, /* highaddr */ 1727 NULL, NULL, /* filter, filterarg */ 1728 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ 1729 QLA_MAX_SEGMENTS, /* nsegments */ 1730 PAGE_SIZE, /* maxsegsize */ 1731 BUS_DMA_ALLOCNOW, /* flags */ 1732 NULL, /* lockfunc */ 1733 NULL, /* lockfuncarg */ 1734 &ha->tx_tag)) { 1735 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", 1736 __func__); 1737 return (ENOMEM); 1738 } 1739 1740 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1741 bzero((void *)ha->tx_ring[i].tx_buf, 1742 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1743 } 1744 1745 for (j = 0; j < ha->hw.num_tx_rings; j++) { 1746 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) { 1747 1748 txb = &ha->tx_ring[j].tx_buf[i]; 1749 1750 if ((ret = bus_dmamap_create(ha->tx_tag, 1751 BUS_DMA_NOWAIT, &txb->map))) { 1752 1753 ha->err_tx_dmamap_create++; 1754 device_printf(ha->pci_dev, 1755 "%s: bus_dmamap_create failed[%d]\n", 1756 __func__, ret); 1757 1758 qla_free_xmt_bufs(ha); 1759 1760 return (ret); 1761 } 1762 } 1763 } 1764 1765 return 0; 1766 } 1767 1768 /* 1769 * Release mbuf after it sent on the wire 1770 */ 1771 static void 1772 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) 1773 { 1774 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1775 1776 if (txb->m_head) { 1777 bus_dmamap_sync(ha->tx_tag, txb->map, 1778 BUS_DMASYNC_POSTWRITE); 1779 1780 bus_dmamap_unload(ha->tx_tag, txb->map); 1781 1782 m_freem(txb->m_head); 1783 txb->m_head = NULL; 1784 1785 bus_dmamap_destroy(ha->tx_tag, txb->map); 1786 txb->map = NULL; 1787 } 1788 1789 if (txb->map) { 1790 bus_dmamap_unload(ha->tx_tag, txb->map); 1791 bus_dmamap_destroy(ha->tx_tag, txb->map); 1792 txb->map = NULL; 1793 } 1794 1795 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1796 } 1797 1798 static void 1799 qla_free_xmt_bufs(qla_host_t *ha) 1800 { 1801 int i, j; 1802 1803 for (j = 0; j < ha->hw.num_tx_rings; j++) { 1804 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) 1805 qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); 1806 } 1807 1808 if (ha->tx_tag != NULL) { 1809 bus_dma_tag_destroy(ha->tx_tag); 1810 ha->tx_tag = NULL; 1811 } 1812 1813 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1814 bzero((void *)ha->tx_ring[i].tx_buf, 1815 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1816 } 1817 return; 1818 } 1819 1820 1821 static int 1822 qla_alloc_rcv_std(qla_host_t *ha) 1823 { 1824 int i, j, k, r, ret = 0; 1825 qla_rx_buf_t *rxb; 1826 qla_rx_ring_t *rx_ring; 1827 1828 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1829 1830 rx_ring = &ha->rx_ring[r]; 1831 1832 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1833 1834 rxb = &rx_ring->rx_buf[i]; 1835 1836 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, 1837 &rxb->map); 1838 1839 if (ret) { 1840 device_printf(ha->pci_dev, 1841 "%s: dmamap[%d, %d] failed\n", 1842 __func__, r, i); 1843 1844 for (k = 0; k < r; k++) { 1845 for (j = 0; j < NUM_RX_DESCRIPTORS; 1846 j++) { 1847 rxb = &ha->rx_ring[k].rx_buf[j]; 1848 bus_dmamap_destroy(ha->rx_tag, 1849 rxb->map); 1850 } 1851 } 1852 1853 for (j = 0; j < i; j++) { 1854 bus_dmamap_destroy(ha->rx_tag, 1855 rx_ring->rx_buf[j].map); 1856 } 1857 goto qla_alloc_rcv_std_err; 1858 } 1859 } 1860 } 1861 1862 qla_init_hw_rcv_descriptors(ha); 1863 1864 1865 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1866 1867 rx_ring = &ha->rx_ring[r]; 1868 1869 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1870 rxb = &rx_ring->rx_buf[i]; 1871 rxb->handle = i; 1872 if (!(ret = ql_get_mbuf(ha, rxb, NULL))) { 1873 /* 1874 * set the physical address in the 1875 * corresponding descriptor entry in the 1876 * receive ring/queue for the hba 1877 */ 1878 qla_set_hw_rcv_desc(ha, r, i, rxb->handle, 1879 rxb->paddr, 1880 (rxb->m_head)->m_pkthdr.len); 1881 } else { 1882 device_printf(ha->pci_dev, 1883 "%s: ql_get_mbuf [%d, %d] failed\n", 1884 __func__, r, i); 1885 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1886 goto qla_alloc_rcv_std_err; 1887 } 1888 } 1889 } 1890 return 0; 1891 1892 qla_alloc_rcv_std_err: 1893 return (-1); 1894 } 1895 1896 static void 1897 qla_free_rcv_std(qla_host_t *ha) 1898 { 1899 int i, r; 1900 qla_rx_buf_t *rxb; 1901 1902 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1903 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1904 rxb = &ha->rx_ring[r].rx_buf[i]; 1905 if (rxb->m_head != NULL) { 1906 bus_dmamap_unload(ha->rx_tag, rxb->map); 1907 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1908 m_freem(rxb->m_head); 1909 rxb->m_head = NULL; 1910 } 1911 } 1912 } 1913 return; 1914 } 1915 1916 static int 1917 qla_alloc_rcv_bufs(qla_host_t *ha) 1918 { 1919 int i, ret = 0; 1920 1921 if (bus_dma_tag_create(NULL, /* parent */ 1922 1, 0, /* alignment, bounds */ 1923 BUS_SPACE_MAXADDR, /* lowaddr */ 1924 BUS_SPACE_MAXADDR, /* highaddr */ 1925 NULL, NULL, /* filter, filterarg */ 1926 MJUM9BYTES, /* maxsize */ 1927 1, /* nsegments */ 1928 MJUM9BYTES, /* maxsegsize */ 1929 BUS_DMA_ALLOCNOW, /* flags */ 1930 NULL, /* lockfunc */ 1931 NULL, /* lockfuncarg */ 1932 &ha->rx_tag)) { 1933 1934 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", 1935 __func__); 1936 1937 return (ENOMEM); 1938 } 1939 1940 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1941 1942 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1943 ha->hw.sds[i].sdsr_next = 0; 1944 ha->hw.sds[i].rxb_free = NULL; 1945 ha->hw.sds[i].rx_free = 0; 1946 } 1947 1948 ret = qla_alloc_rcv_std(ha); 1949 1950 return (ret); 1951 } 1952 1953 static void 1954 qla_free_rcv_bufs(qla_host_t *ha) 1955 { 1956 int i; 1957 1958 qla_free_rcv_std(ha); 1959 1960 if (ha->rx_tag != NULL) { 1961 bus_dma_tag_destroy(ha->rx_tag); 1962 ha->rx_tag = NULL; 1963 } 1964 1965 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1966 1967 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1968 ha->hw.sds[i].sdsr_next = 0; 1969 ha->hw.sds[i].rxb_free = NULL; 1970 ha->hw.sds[i].rx_free = 0; 1971 } 1972 1973 return; 1974 } 1975 1976 int 1977 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) 1978 { 1979 register struct mbuf *mp = nmp; 1980 struct ifnet *ifp; 1981 int ret = 0; 1982 uint32_t offset; 1983 bus_dma_segment_t segs[1]; 1984 int nsegs, mbuf_size; 1985 1986 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1987 1988 ifp = ha->ifp; 1989 1990 if (ha->hw.enable_9kb) 1991 mbuf_size = MJUM9BYTES; 1992 else 1993 mbuf_size = MCLBYTES; 1994 1995 if (mp == NULL) { 1996 1997 if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE)) 1998 return(-1); 1999 2000 if (ha->hw.enable_9kb) 2001 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size); 2002 else 2003 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2004 2005 if (mp == NULL) { 2006 ha->err_m_getcl++; 2007 ret = ENOBUFS; 2008 device_printf(ha->pci_dev, 2009 "%s: m_getcl failed\n", __func__); 2010 goto exit_ql_get_mbuf; 2011 } 2012 mp->m_len = mp->m_pkthdr.len = mbuf_size; 2013 } else { 2014 mp->m_len = mp->m_pkthdr.len = mbuf_size; 2015 mp->m_data = mp->m_ext.ext_buf; 2016 mp->m_next = NULL; 2017 } 2018 2019 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); 2020 if (offset) { 2021 offset = 8 - offset; 2022 m_adj(mp, offset); 2023 } 2024 2025 /* 2026 * Using memory from the mbuf cluster pool, invoke the bus_dma 2027 * machinery to arrange the memory mapping. 2028 */ 2029 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, 2030 mp, segs, &nsegs, BUS_DMA_NOWAIT); 2031 rxb->paddr = segs[0].ds_addr; 2032 2033 if (ret || !rxb->paddr || (nsegs != 1)) { 2034 m_free(mp); 2035 rxb->m_head = NULL; 2036 device_printf(ha->pci_dev, 2037 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 2038 __func__, ret, (long long unsigned int)rxb->paddr, 2039 nsegs); 2040 ret = -1; 2041 goto exit_ql_get_mbuf; 2042 } 2043 rxb->m_head = mp; 2044 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); 2045 2046 exit_ql_get_mbuf: 2047 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); 2048 return (ret); 2049 } 2050 2051 2052 static void 2053 qla_get_peer(qla_host_t *ha) 2054 { 2055 device_t *peers; 2056 int count, i, slot; 2057 int my_slot = pci_get_slot(ha->pci_dev); 2058 2059 if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count)) 2060 return; 2061 2062 for (i = 0; i < count; i++) { 2063 slot = pci_get_slot(peers[i]); 2064 2065 if ((slot >= 0) && (slot == my_slot) && 2066 (pci_get_device(peers[i]) == 2067 pci_get_device(ha->pci_dev))) { 2068 if (ha->pci_dev != peers[i]) 2069 ha->peer_dev = peers[i]; 2070 } 2071 } 2072 } 2073 2074 static void 2075 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer) 2076 { 2077 qla_host_t *ha_peer; 2078 2079 if (ha->peer_dev) { 2080 if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) { 2081 2082 ha_peer->msg_from_peer = msg_to_peer; 2083 } 2084 } 2085 } 2086 2087 void 2088 qla_set_error_recovery(qla_host_t *ha) 2089 { 2090 struct ifnet *ifp = ha->ifp; 2091 2092 if (!cold && ha->enable_error_recovery) { 2093 if (ifp) 2094 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2095 ha->qla_initiate_recovery = 1; 2096 } else 2097 ha->offline = 1; 2098 return; 2099 } 2100 2101 static void 2102 qla_error_recovery(void *context, int pending) 2103 { 2104 qla_host_t *ha = context; 2105 uint32_t msecs_100 = 400; 2106 struct ifnet *ifp = ha->ifp; 2107 int i = 0; 2108 2109 device_printf(ha->pci_dev, "%s: enter\n", __func__); 2110 ha->hw.imd_compl = 1; 2111 2112 taskqueue_drain_all(ha->stats_tq); 2113 taskqueue_drain_all(ha->async_event_tq); 2114 2115 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 2116 return; 2117 2118 device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n", 2119 __func__, qla_get_usec_timestamp()); 2120 2121 if (ha->qla_interface_up) { 2122 2123 qla_mdelay(__func__, 300); 2124 2125 //ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2126 2127 for (i = 0; i < ha->hw.num_sds_rings; i++) { 2128 qla_tx_fp_t *fp; 2129 2130 fp = &ha->tx_fp[i]; 2131 2132 if (fp == NULL) 2133 continue; 2134 2135 if (fp->tx_br != NULL) { 2136 mtx_lock(&fp->tx_mtx); 2137 mtx_unlock(&fp->tx_mtx); 2138 } 2139 } 2140 } 2141 2142 qla_drain_fp_taskqueues(ha); 2143 2144 if ((ha->pci_func & 0x1) == 0) { 2145 2146 if (!ha->msg_from_peer) { 2147 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 2148 2149 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && 2150 msecs_100--) 2151 qla_mdelay(__func__, 100); 2152 } 2153 2154 ha->msg_from_peer = 0; 2155 2156 if (ha->enable_minidump) 2157 ql_minidump(ha); 2158 2159 if (ha->enable_driverstate_dump) 2160 ql_capture_drvr_state(ha); 2161 2162 if (ql_init_hw(ha)) { 2163 device_printf(ha->pci_dev, 2164 "%s: ts_usecs = %ld exit: ql_init_hw failed\n", 2165 __func__, qla_get_usec_timestamp()); 2166 ha->offline = 1; 2167 goto qla_error_recovery_exit; 2168 } 2169 2170 if (ha->qla_interface_up) { 2171 qla_free_xmt_bufs(ha); 2172 qla_free_rcv_bufs(ha); 2173 } 2174 2175 if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY)) 2176 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 2177 2178 } else { 2179 if (ha->msg_from_peer == QL_PEER_MSG_RESET) { 2180 2181 ha->msg_from_peer = 0; 2182 2183 if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY)) 2184 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 2185 } else { 2186 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 2187 } 2188 2189 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) 2190 qla_mdelay(__func__, 100); 2191 ha->msg_from_peer = 0; 2192 2193 if (ha->enable_driverstate_dump) 2194 ql_capture_drvr_state(ha); 2195 2196 if (msecs_100 == 0) { 2197 device_printf(ha->pci_dev, 2198 "%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n", 2199 __func__, qla_get_usec_timestamp()); 2200 ha->offline = 1; 2201 goto qla_error_recovery_exit; 2202 } 2203 2204 if (ql_init_hw(ha)) { 2205 device_printf(ha->pci_dev, 2206 "%s: ts_usecs = %ld exit: ql_init_hw failed\n", 2207 __func__, qla_get_usec_timestamp()); 2208 ha->offline = 1; 2209 goto qla_error_recovery_exit; 2210 } 2211 2212 if (ha->qla_interface_up) { 2213 qla_free_xmt_bufs(ha); 2214 qla_free_rcv_bufs(ha); 2215 } 2216 } 2217 2218 qla_mdelay(__func__, ha->ms_delay_after_init); 2219 2220 *((uint32_t *)&ha->hw.flags) = 0; 2221 ha->qla_initiate_recovery = 0; 2222 2223 if (ha->qla_interface_up) { 2224 2225 if (qla_alloc_xmt_bufs(ha) != 0) { 2226 ha->offline = 1; 2227 goto qla_error_recovery_exit; 2228 } 2229 2230 qla_confirm_9kb_enable(ha); 2231 2232 if (qla_alloc_rcv_bufs(ha) != 0) { 2233 ha->offline = 1; 2234 goto qla_error_recovery_exit; 2235 } 2236 2237 ha->stop_rcv = 0; 2238 2239 if (ql_init_hw_if(ha) == 0) { 2240 ifp = ha->ifp; 2241 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2242 ha->qla_watchdog_pause = 0; 2243 ql_update_link_state(ha); 2244 } else { 2245 ha->offline = 1; 2246 2247 if (ha->hw.sp_log_stop_events & 2248 Q8_SP_LOG_STOP_IF_START_FAILURE) 2249 ha->hw.sp_log_stop = -1; 2250 } 2251 } else { 2252 ha->qla_watchdog_pause = 0; 2253 } 2254 2255 qla_error_recovery_exit: 2256 2257 if (ha->offline ) { 2258 device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n", 2259 __func__, qla_get_usec_timestamp()); 2260 if (ha->hw.sp_log_stop_events & 2261 Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE) 2262 ha->hw.sp_log_stop = -1; 2263 } 2264 2265 2266 QLA_UNLOCK(ha, __func__); 2267 2268 if (!ha->offline) 2269 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 2270 qla_watchdog, ha); 2271 2272 device_printf(ha->pci_dev, 2273 "%s: ts_usecs = %ld exit\n", 2274 __func__, qla_get_usec_timestamp()); 2275 return; 2276 } 2277 2278 static void 2279 qla_async_event(void *context, int pending) 2280 { 2281 qla_host_t *ha = context; 2282 2283 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 2284 return; 2285 2286 if (ha->async_event) { 2287 ha->async_event = 0; 2288 qla_hw_async_event(ha); 2289 } 2290 2291 QLA_UNLOCK(ha, __func__); 2292 2293 return; 2294 } 2295 2296 static void 2297 qla_stats(void *context, int pending) 2298 { 2299 qla_host_t *ha; 2300 2301 ha = context; 2302 2303 ql_get_stats(ha); 2304 2305 return; 2306 } 2307 2308