1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013-2016 Qlogic Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * File: ql_os.c 32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 39 #include "ql_os.h" 40 #include "ql_hw.h" 41 #include "ql_def.h" 42 #include "ql_inline.h" 43 #include "ql_ver.h" 44 #include "ql_glbl.h" 45 #include "ql_dbg.h" 46 #include <sys/smp.h> 47 48 /* 49 * Some PCI Configuration Space Related Defines 50 */ 51 52 #ifndef PCI_VENDOR_QLOGIC 53 #define PCI_VENDOR_QLOGIC 0x1077 54 #endif 55 56 #ifndef PCI_PRODUCT_QLOGIC_ISP8030 57 #define PCI_PRODUCT_QLOGIC_ISP8030 0x8030 58 #endif 59 60 #define PCI_QLOGIC_ISP8030 \ 61 ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC) 62 63 /* 64 * static functions 65 */ 66 static int qla_alloc_parent_dma_tag(qla_host_t *ha); 67 static void qla_free_parent_dma_tag(qla_host_t *ha); 68 static int qla_alloc_xmt_bufs(qla_host_t *ha); 69 static void qla_free_xmt_bufs(qla_host_t *ha); 70 static int qla_alloc_rcv_bufs(qla_host_t *ha); 71 static void qla_free_rcv_bufs(qla_host_t *ha); 72 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb); 73 74 static void qla_init_ifnet(device_t dev, qla_host_t *ha); 75 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS); 76 static void qla_release(qla_host_t *ha); 77 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, 78 int error); 79 static void qla_stop(qla_host_t *ha); 80 static void qla_get_peer(qla_host_t *ha); 81 static void qla_error_recovery(void *context, int pending); 82 static void qla_async_event(void *context, int pending); 83 static void qla_stats(void *context, int pending); 84 static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, 85 uint32_t iscsi_pdu); 86 87 /* 88 * Hooks to the Operating Systems 89 */ 90 static int qla_pci_probe (device_t); 91 static int qla_pci_attach (device_t); 92 static int qla_pci_detach (device_t); 93 94 static void qla_init(void *arg); 95 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 96 static int qla_media_change(struct ifnet *ifp); 97 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 98 99 static int qla_transmit(struct ifnet *ifp, struct mbuf *mp); 100 static void qla_qflush(struct ifnet *ifp); 101 static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); 102 static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); 103 static int qla_create_fp_taskqueues(qla_host_t *ha); 104 static void qla_destroy_fp_taskqueues(qla_host_t *ha); 105 static void qla_drain_fp_taskqueues(qla_host_t *ha); 106 107 static device_method_t qla_pci_methods[] = { 108 /* Device interface */ 109 DEVMETHOD(device_probe, qla_pci_probe), 110 DEVMETHOD(device_attach, qla_pci_attach), 111 DEVMETHOD(device_detach, qla_pci_detach), 112 { 0, 0 } 113 }; 114 115 static driver_t qla_pci_driver = { 116 "ql", qla_pci_methods, sizeof (qla_host_t), 117 }; 118 119 static devclass_t qla83xx_devclass; 120 121 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0); 122 123 MODULE_DEPEND(qla83xx, pci, 1, 1, 1); 124 MODULE_DEPEND(qla83xx, ether, 1, 1, 1); 125 126 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver"); 127 128 #define QL_STD_REPLENISH_THRES 0 129 #define QL_JUMBO_REPLENISH_THRES 32 130 131 132 static char dev_str[64]; 133 static char ver_str[64]; 134 135 /* 136 * Name: qla_pci_probe 137 * Function: Validate the PCI device to be a QLA80XX device 138 */ 139 static int 140 qla_pci_probe(device_t dev) 141 { 142 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 143 case PCI_QLOGIC_ISP8030: 144 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", 145 "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function", 146 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 147 QLA_VERSION_BUILD); 148 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d", 149 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 150 QLA_VERSION_BUILD); 151 device_set_desc(dev, dev_str); 152 break; 153 default: 154 return (ENXIO); 155 } 156 157 if (bootverbose) 158 printf("%s: %s\n ", __func__, dev_str); 159 160 return (BUS_PROBE_DEFAULT); 161 } 162 163 static void 164 qla_add_sysctls(qla_host_t *ha) 165 { 166 device_t dev = ha->pci_dev; 167 168 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 169 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 170 OID_AUTO, "version", CTLFLAG_RD, 171 ver_str, 0, "Driver Version"); 172 173 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 174 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 175 OID_AUTO, "fw_version", CTLFLAG_RD, 176 ha->fw_ver_str, 0, "firmware version"); 177 178 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 179 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 180 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW, 181 (void *)ha, 0, 182 qla_sysctl_get_link_status, "I", "Link Status"); 183 184 ha->dbg_level = 0; 185 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 186 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 187 OID_AUTO, "debug", CTLFLAG_RW, 188 &ha->dbg_level, ha->dbg_level, "Debug Level"); 189 190 ha->enable_minidump = 1; 191 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 192 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 193 OID_AUTO, "enable_minidump", CTLFLAG_RW, 194 &ha->enable_minidump, ha->enable_minidump, 195 "Minidump retrival prior to error recovery " 196 "is enabled only when this is set"); 197 198 ha->enable_driverstate_dump = 1; 199 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 200 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 201 OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW, 202 &ha->enable_driverstate_dump, ha->enable_driverstate_dump, 203 "Driver State retrival prior to error recovery " 204 "is enabled only when this is set"); 205 206 ha->enable_error_recovery = 1; 207 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 208 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 209 OID_AUTO, "enable_error_recovery", CTLFLAG_RW, 210 &ha->enable_error_recovery, ha->enable_error_recovery, 211 "when set error recovery is enabled on fatal errors " 212 "otherwise the port is turned offline"); 213 214 ha->ms_delay_after_init = 1000; 215 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 216 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 217 OID_AUTO, "ms_delay_after_init", CTLFLAG_RW, 218 &ha->ms_delay_after_init, ha->ms_delay_after_init, 219 "millisecond delay after hw_init"); 220 221 ha->std_replenish = QL_STD_REPLENISH_THRES; 222 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 223 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 224 OID_AUTO, "std_replenish", CTLFLAG_RW, 225 &ha->std_replenish, ha->std_replenish, 226 "Threshold for Replenishing Standard Frames"); 227 228 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 229 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 230 OID_AUTO, "ipv4_lro", 231 CTLFLAG_RD, &ha->ipv4_lro, 232 "number of ipv4 lro completions"); 233 234 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 235 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 236 OID_AUTO, "ipv6_lro", 237 CTLFLAG_RD, &ha->ipv6_lro, 238 "number of ipv6 lro completions"); 239 240 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 241 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 242 OID_AUTO, "tx_tso_frames", 243 CTLFLAG_RD, &ha->tx_tso_frames, 244 "number of Tx TSO Frames"); 245 246 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 247 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 248 OID_AUTO, "hw_vlan_tx_frames", 249 CTLFLAG_RD, &ha->hw_vlan_tx_frames, 250 "number of Tx VLAN Frames"); 251 252 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 253 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 254 OID_AUTO, "hw_lock_failed", 255 CTLFLAG_RD, &ha->hw_lock_failed, 256 "number of hw_lock failures"); 257 258 return; 259 } 260 261 static void 262 qla_watchdog(void *arg) 263 { 264 qla_host_t *ha = arg; 265 qla_hw_t *hw; 266 struct ifnet *ifp; 267 268 hw = &ha->hw; 269 ifp = ha->ifp; 270 271 if (ha->qla_watchdog_exit) { 272 ha->qla_watchdog_exited = 1; 273 return; 274 } 275 ha->qla_watchdog_exited = 0; 276 277 if (!ha->qla_watchdog_pause) { 278 if (!ha->offline && 279 (ql_hw_check_health(ha) || ha->qla_initiate_recovery || 280 (ha->msg_from_peer == QL_PEER_MSG_RESET))) { 281 282 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 283 ql_update_link_state(ha); 284 285 if (ha->enable_error_recovery) { 286 ha->qla_watchdog_paused = 1; 287 ha->qla_watchdog_pause = 1; 288 ha->err_inject = 0; 289 device_printf(ha->pci_dev, 290 "%s: taskqueue_enqueue(err_task) \n", 291 __func__); 292 taskqueue_enqueue(ha->err_tq, &ha->err_task); 293 } else { 294 if (ifp != NULL) 295 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 296 ha->offline = 1; 297 } 298 return; 299 300 } else { 301 if (ha->qla_interface_up) { 302 303 ha->watchdog_ticks++; 304 305 if (ha->watchdog_ticks > 1000) 306 ha->watchdog_ticks = 0; 307 308 if (!ha->watchdog_ticks && QL_RUNNING(ifp)) { 309 taskqueue_enqueue(ha->stats_tq, 310 &ha->stats_task); 311 } 312 313 if (ha->async_event) { 314 taskqueue_enqueue(ha->async_event_tq, 315 &ha->async_event_task); 316 } 317 318 } 319 ha->qla_watchdog_paused = 0; 320 } 321 } else { 322 ha->qla_watchdog_paused = 1; 323 } 324 325 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 326 qla_watchdog, ha); 327 } 328 329 /* 330 * Name: qla_pci_attach 331 * Function: attaches the device to the operating system 332 */ 333 static int 334 qla_pci_attach(device_t dev) 335 { 336 qla_host_t *ha = NULL; 337 uint32_t rsrc_len; 338 int i; 339 uint32_t num_rcvq = 0; 340 341 if ((ha = device_get_softc(dev)) == NULL) { 342 device_printf(dev, "cannot get softc\n"); 343 return (ENOMEM); 344 } 345 346 memset(ha, 0, sizeof (qla_host_t)); 347 348 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) { 349 device_printf(dev, "device is not ISP8030\n"); 350 return (ENXIO); 351 } 352 353 ha->pci_func = pci_get_function(dev) & 0x1; 354 355 ha->pci_dev = dev; 356 357 pci_enable_busmaster(dev); 358 359 ha->reg_rid = PCIR_BAR(0); 360 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 361 RF_ACTIVE); 362 363 if (ha->pci_reg == NULL) { 364 device_printf(dev, "unable to map any ports\n"); 365 goto qla_pci_attach_err; 366 } 367 368 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 369 ha->reg_rid); 370 371 mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 372 mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF); 373 ha->flags.lock_init = 1; 374 375 qla_add_sysctls(ha); 376 377 ha->hw.num_sds_rings = MAX_SDS_RINGS; 378 ha->hw.num_rds_rings = MAX_RDS_RINGS; 379 ha->hw.num_tx_rings = NUM_TX_RINGS; 380 381 ha->reg_rid1 = PCIR_BAR(2); 382 ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 383 &ha->reg_rid1, RF_ACTIVE); 384 385 ha->msix_count = pci_msix_count(dev); 386 387 if (ha->msix_count < 1 ) { 388 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 389 ha->msix_count); 390 goto qla_pci_attach_err; 391 } 392 393 if (ha->msix_count < (ha->hw.num_sds_rings + 1)) { 394 ha->hw.num_sds_rings = ha->msix_count - 1; 395 } 396 397 QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" 398 " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha, 399 ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, 400 ha->pci_reg1)); 401 402 /* initialize hardware */ 403 if (ql_init_hw(ha)) { 404 device_printf(dev, "%s: ql_init_hw failed\n", __func__); 405 goto qla_pci_attach_err; 406 } 407 408 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, 409 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 410 ha->fw_ver_build); 411 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", 412 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 413 ha->fw_ver_build); 414 415 if (qla_get_nic_partition(ha, NULL, &num_rcvq)) { 416 device_printf(dev, "%s: qla_get_nic_partition failed\n", 417 __func__); 418 goto qla_pci_attach_err; 419 } 420 device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" 421 " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n", 422 __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, 423 ha->pci_reg, ha->pci_reg1, num_rcvq); 424 425 if ((ha->msix_count < 64) || (num_rcvq != 32)) { 426 if (ha->hw.num_sds_rings > 15) { 427 ha->hw.num_sds_rings = 15; 428 } 429 } 430 431 ha->hw.num_rds_rings = ha->hw.num_sds_rings; 432 ha->hw.num_tx_rings = ha->hw.num_sds_rings; 433 434 #ifdef QL_ENABLE_ISCSI_TLV 435 ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2; 436 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 437 438 ql_hw_add_sysctls(ha); 439 440 ha->msix_count = ha->hw.num_sds_rings + 1; 441 442 if (pci_alloc_msix(dev, &ha->msix_count)) { 443 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, 444 ha->msix_count); 445 ha->msix_count = 0; 446 goto qla_pci_attach_err; 447 } 448 449 ha->mbx_irq_rid = 1; 450 ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 451 &ha->mbx_irq_rid, 452 (RF_ACTIVE | RF_SHAREABLE)); 453 if (ha->mbx_irq == NULL) { 454 device_printf(dev, "could not allocate mbx interrupt\n"); 455 goto qla_pci_attach_err; 456 } 457 if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE), 458 NULL, ql_mbx_isr, ha, &ha->mbx_handle)) { 459 device_printf(dev, "could not setup mbx interrupt\n"); 460 goto qla_pci_attach_err; 461 } 462 463 for (i = 0; i < ha->hw.num_sds_rings; i++) { 464 ha->irq_vec[i].sds_idx = i; 465 ha->irq_vec[i].ha = ha; 466 ha->irq_vec[i].irq_rid = 2 + i; 467 468 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 469 &ha->irq_vec[i].irq_rid, 470 (RF_ACTIVE | RF_SHAREABLE)); 471 472 if (ha->irq_vec[i].irq == NULL) { 473 device_printf(dev, "could not allocate interrupt\n"); 474 goto qla_pci_attach_err; 475 } 476 if (bus_setup_intr(dev, ha->irq_vec[i].irq, 477 (INTR_TYPE_NET | INTR_MPSAFE), 478 NULL, ql_isr, &ha->irq_vec[i], 479 &ha->irq_vec[i].handle)) { 480 device_printf(dev, "could not setup interrupt\n"); 481 goto qla_pci_attach_err; 482 } 483 484 ha->tx_fp[i].ha = ha; 485 ha->tx_fp[i].txr_idx = i; 486 487 if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) { 488 device_printf(dev, "%s: could not allocate tx_br[%d]\n", 489 __func__, i); 490 goto qla_pci_attach_err; 491 } 492 } 493 494 if (qla_create_fp_taskqueues(ha) != 0) 495 goto qla_pci_attach_err; 496 497 printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus, 498 ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count); 499 500 ql_read_mac_addr(ha); 501 502 /* allocate parent dma tag */ 503 if (qla_alloc_parent_dma_tag(ha)) { 504 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", 505 __func__); 506 goto qla_pci_attach_err; 507 } 508 509 /* alloc all dma buffers */ 510 if (ql_alloc_dma(ha)) { 511 device_printf(dev, "%s: ql_alloc_dma failed\n", __func__); 512 goto qla_pci_attach_err; 513 } 514 qla_get_peer(ha); 515 516 if (ql_minidump_init(ha) != 0) { 517 device_printf(dev, "%s: ql_minidump_init failed\n", __func__); 518 goto qla_pci_attach_err; 519 } 520 ql_alloc_drvr_state_buffer(ha); 521 ql_alloc_sp_log_buffer(ha); 522 /* create the o.s ethernet interface */ 523 qla_init_ifnet(dev, ha); 524 525 ha->flags.qla_watchdog_active = 1; 526 ha->qla_watchdog_pause = 0; 527 528 callout_init(&ha->tx_callout, TRUE); 529 ha->flags.qla_callout_init = 1; 530 531 /* create ioctl device interface */ 532 if (ql_make_cdev(ha)) { 533 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 534 goto qla_pci_attach_err; 535 } 536 537 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 538 qla_watchdog, ha); 539 540 TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha); 541 ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT, 542 taskqueue_thread_enqueue, &ha->err_tq); 543 taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", 544 device_get_nameunit(ha->pci_dev)); 545 546 TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha); 547 ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT, 548 taskqueue_thread_enqueue, &ha->async_event_tq); 549 taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq", 550 device_get_nameunit(ha->pci_dev)); 551 552 TASK_INIT(&ha->stats_task, 0, qla_stats, ha); 553 ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT, 554 taskqueue_thread_enqueue, &ha->stats_tq); 555 taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq", 556 device_get_nameunit(ha->pci_dev)); 557 558 QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__)); 559 return (0); 560 561 qla_pci_attach_err: 562 563 qla_release(ha); 564 565 if (ha->flags.lock_init) { 566 mtx_destroy(&ha->hw_lock); 567 mtx_destroy(&ha->sp_log_lock); 568 } 569 570 QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__)); 571 return (ENXIO); 572 } 573 574 /* 575 * Name: qla_pci_detach 576 * Function: Unhooks the device from the operating system 577 */ 578 static int 579 qla_pci_detach(device_t dev) 580 { 581 qla_host_t *ha = NULL; 582 struct ifnet *ifp; 583 584 585 if ((ha = device_get_softc(dev)) == NULL) { 586 device_printf(dev, "cannot get softc\n"); 587 return (ENOMEM); 588 } 589 590 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 591 592 ifp = ha->ifp; 593 594 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 595 QLA_LOCK(ha, __func__, -1, 0); 596 597 ha->qla_detach_active = 1; 598 qla_stop(ha); 599 600 qla_release(ha); 601 602 QLA_UNLOCK(ha, __func__); 603 604 if (ha->flags.lock_init) { 605 mtx_destroy(&ha->hw_lock); 606 mtx_destroy(&ha->sp_log_lock); 607 } 608 609 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 610 611 return (0); 612 } 613 614 /* 615 * SYSCTL Related Callbacks 616 */ 617 static int 618 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS) 619 { 620 int err, ret = 0; 621 qla_host_t *ha; 622 623 err = sysctl_handle_int(oidp, &ret, 0, req); 624 625 if (err || !req->newptr) 626 return (err); 627 628 if (ret == 1) { 629 ha = (qla_host_t *)arg1; 630 ql_hw_link_status(ha); 631 } 632 return (err); 633 } 634 635 /* 636 * Name: qla_release 637 * Function: Releases the resources allocated for the device 638 */ 639 static void 640 qla_release(qla_host_t *ha) 641 { 642 device_t dev; 643 int i; 644 645 dev = ha->pci_dev; 646 647 if (ha->async_event_tq) { 648 taskqueue_drain_all(ha->async_event_tq); 649 taskqueue_free(ha->async_event_tq); 650 } 651 652 if (ha->err_tq) { 653 taskqueue_drain_all(ha->err_tq); 654 taskqueue_free(ha->err_tq); 655 } 656 657 if (ha->stats_tq) { 658 taskqueue_drain_all(ha->stats_tq); 659 taskqueue_free(ha->stats_tq); 660 } 661 662 ql_del_cdev(ha); 663 664 if (ha->flags.qla_watchdog_active) { 665 ha->qla_watchdog_exit = 1; 666 667 while (ha->qla_watchdog_exited == 0) 668 qla_mdelay(__func__, 1); 669 } 670 671 if (ha->flags.qla_callout_init) 672 callout_stop(&ha->tx_callout); 673 674 if (ha->ifp != NULL) 675 ether_ifdetach(ha->ifp); 676 677 ql_free_drvr_state_buffer(ha); 678 ql_free_sp_log_buffer(ha); 679 ql_free_dma(ha); 680 qla_free_parent_dma_tag(ha); 681 682 if (ha->mbx_handle) 683 (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle); 684 685 if (ha->mbx_irq) 686 (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid, 687 ha->mbx_irq); 688 689 for (i = 0; i < ha->hw.num_sds_rings; i++) { 690 691 if (ha->irq_vec[i].handle) { 692 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 693 ha->irq_vec[i].handle); 694 } 695 696 if (ha->irq_vec[i].irq) { 697 (void)bus_release_resource(dev, SYS_RES_IRQ, 698 ha->irq_vec[i].irq_rid, 699 ha->irq_vec[i].irq); 700 } 701 702 qla_free_tx_br(ha, &ha->tx_fp[i]); 703 } 704 qla_destroy_fp_taskqueues(ha); 705 706 if (ha->msix_count) 707 pci_release_msi(dev); 708 709 if (ha->pci_reg) 710 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 711 ha->pci_reg); 712 713 if (ha->pci_reg1) 714 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, 715 ha->pci_reg1); 716 717 return; 718 } 719 720 /* 721 * DMA Related Functions 722 */ 723 724 static void 725 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 726 { 727 *((bus_addr_t *)arg) = 0; 728 729 if (error) { 730 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 731 return; 732 } 733 734 *((bus_addr_t *)arg) = segs[0].ds_addr; 735 736 return; 737 } 738 739 int 740 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 741 { 742 int ret = 0; 743 device_t dev; 744 bus_addr_t b_addr; 745 746 dev = ha->pci_dev; 747 748 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 749 750 ret = bus_dma_tag_create( 751 ha->parent_tag,/* parent */ 752 dma_buf->alignment, 753 ((bus_size_t)(1ULL << 32)),/* boundary */ 754 BUS_SPACE_MAXADDR, /* lowaddr */ 755 BUS_SPACE_MAXADDR, /* highaddr */ 756 NULL, NULL, /* filter, filterarg */ 757 dma_buf->size, /* maxsize */ 758 1, /* nsegments */ 759 dma_buf->size, /* maxsegsize */ 760 0, /* flags */ 761 NULL, NULL, /* lockfunc, lockarg */ 762 &dma_buf->dma_tag); 763 764 if (ret) { 765 device_printf(dev, "%s: could not create dma tag\n", __func__); 766 goto ql_alloc_dmabuf_exit; 767 } 768 ret = bus_dmamem_alloc(dma_buf->dma_tag, 769 (void **)&dma_buf->dma_b, 770 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 771 &dma_buf->dma_map); 772 if (ret) { 773 bus_dma_tag_destroy(dma_buf->dma_tag); 774 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); 775 goto ql_alloc_dmabuf_exit; 776 } 777 778 ret = bus_dmamap_load(dma_buf->dma_tag, 779 dma_buf->dma_map, 780 dma_buf->dma_b, 781 dma_buf->size, 782 qla_dmamap_callback, 783 &b_addr, BUS_DMA_NOWAIT); 784 785 if (ret || !b_addr) { 786 bus_dma_tag_destroy(dma_buf->dma_tag); 787 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 788 dma_buf->dma_map); 789 ret = -1; 790 goto ql_alloc_dmabuf_exit; 791 } 792 793 dma_buf->dma_addr = b_addr; 794 795 ql_alloc_dmabuf_exit: 796 QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", 797 __func__, ret, (void *)dma_buf->dma_tag, 798 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, 799 dma_buf->size)); 800 801 return ret; 802 } 803 804 void 805 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 806 { 807 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 808 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 809 bus_dma_tag_destroy(dma_buf->dma_tag); 810 } 811 812 static int 813 qla_alloc_parent_dma_tag(qla_host_t *ha) 814 { 815 int ret; 816 device_t dev; 817 818 dev = ha->pci_dev; 819 820 /* 821 * Allocate parent DMA Tag 822 */ 823 ret = bus_dma_tag_create( 824 bus_get_dma_tag(dev), /* parent */ 825 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 826 BUS_SPACE_MAXADDR, /* lowaddr */ 827 BUS_SPACE_MAXADDR, /* highaddr */ 828 NULL, NULL, /* filter, filterarg */ 829 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 830 0, /* nsegments */ 831 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 832 0, /* flags */ 833 NULL, NULL, /* lockfunc, lockarg */ 834 &ha->parent_tag); 835 836 if (ret) { 837 device_printf(dev, "%s: could not create parent dma tag\n", 838 __func__); 839 return (-1); 840 } 841 842 ha->flags.parent_tag = 1; 843 844 return (0); 845 } 846 847 static void 848 qla_free_parent_dma_tag(qla_host_t *ha) 849 { 850 if (ha->flags.parent_tag) { 851 bus_dma_tag_destroy(ha->parent_tag); 852 ha->flags.parent_tag = 0; 853 } 854 } 855 856 /* 857 * Name: qla_init_ifnet 858 * Function: Creates the Network Device Interface and Registers it with the O.S 859 */ 860 861 static void 862 qla_init_ifnet(device_t dev, qla_host_t *ha) 863 { 864 struct ifnet *ifp; 865 866 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 867 868 ifp = ha->ifp = if_alloc(IFT_ETHER); 869 870 if (ifp == NULL) 871 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 872 873 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 874 875 ifp->if_baudrate = IF_Gbps(10); 876 ifp->if_capabilities = IFCAP_LINKSTATE; 877 ifp->if_mtu = ETHERMTU; 878 879 ifp->if_init = qla_init; 880 ifp->if_softc = ha; 881 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 882 ifp->if_ioctl = qla_ioctl; 883 884 ifp->if_transmit = qla_transmit; 885 ifp->if_qflush = qla_qflush; 886 887 IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); 888 ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); 889 IFQ_SET_READY(&ifp->if_snd); 890 891 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 892 893 ether_ifattach(ifp, qla_get_mac_addr(ha)); 894 895 ifp->if_capabilities |= IFCAP_HWCSUM | 896 IFCAP_TSO4 | 897 IFCAP_TSO6 | 898 IFCAP_JUMBO_MTU | 899 IFCAP_VLAN_HWTAGGING | 900 IFCAP_VLAN_MTU | 901 IFCAP_VLAN_HWTSO | 902 IFCAP_LRO; 903 904 ifp->if_capenable = ifp->if_capabilities; 905 906 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 907 908 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); 909 910 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, 911 NULL); 912 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 913 914 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 915 916 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 917 918 return; 919 } 920 921 static void 922 qla_init_locked(qla_host_t *ha) 923 { 924 struct ifnet *ifp = ha->ifp; 925 926 ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0); 927 928 qla_stop(ha); 929 930 if (qla_alloc_xmt_bufs(ha) != 0) 931 return; 932 933 qla_confirm_9kb_enable(ha); 934 935 if (qla_alloc_rcv_bufs(ha) != 0) 936 return; 937 938 bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); 939 940 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; 941 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 942 943 ha->stop_rcv = 0; 944 if (ql_init_hw_if(ha) == 0) { 945 ifp = ha->ifp; 946 ifp->if_drv_flags |= IFF_DRV_RUNNING; 947 ha->hw_vlan_tx_frames = 0; 948 ha->tx_tso_frames = 0; 949 ha->qla_interface_up = 1; 950 ql_update_link_state(ha); 951 } else { 952 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE) 953 ha->hw.sp_log_stop = -1; 954 } 955 956 ha->qla_watchdog_pause = 0; 957 958 return; 959 } 960 961 static void 962 qla_init(void *arg) 963 { 964 qla_host_t *ha; 965 966 ha = (qla_host_t *)arg; 967 968 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 969 970 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 971 return; 972 973 qla_init_locked(ha); 974 975 QLA_UNLOCK(ha, __func__); 976 977 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 978 } 979 980 static int 981 qla_set_multi(qla_host_t *ha, uint32_t add_multi) 982 { 983 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; 984 struct ifmultiaddr *ifma; 985 int mcnt = 0; 986 struct ifnet *ifp = ha->ifp; 987 int ret = 0; 988 989 if_maddr_rlock(ifp); 990 991 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 992 993 if (ifma->ifma_addr->sa_family != AF_LINK) 994 continue; 995 996 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) 997 break; 998 999 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 1000 &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); 1001 1002 mcnt++; 1003 } 1004 1005 if_maddr_runlock(ifp); 1006 1007 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1008 QLA_LOCK_NO_SLEEP) != 0) 1009 return (-1); 1010 1011 ql_sp_log(ha, 12, 4, ifp->if_drv_flags, 1012 (ifp->if_drv_flags & IFF_DRV_RUNNING), 1013 add_multi, (uint32_t)mcnt, 0); 1014 1015 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1016 1017 if (!add_multi) { 1018 ret = qla_hw_del_all_mcast(ha); 1019 1020 if (ret) 1021 device_printf(ha->pci_dev, 1022 "%s: qla_hw_del_all_mcast() failed\n", 1023 __func__); 1024 } 1025 1026 if (!ret) 1027 ret = ql_hw_set_multi(ha, mta, mcnt, 1); 1028 1029 } 1030 1031 QLA_UNLOCK(ha, __func__); 1032 1033 return (ret); 1034 } 1035 1036 static int 1037 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1038 { 1039 int ret = 0; 1040 struct ifreq *ifr = (struct ifreq *)data; 1041 struct ifaddr *ifa = (struct ifaddr *)data; 1042 qla_host_t *ha; 1043 1044 ha = (qla_host_t *)ifp->if_softc; 1045 if (ha->offline || ha->qla_initiate_recovery) 1046 return (ret); 1047 1048 switch (cmd) { 1049 case SIOCSIFADDR: 1050 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 1051 __func__, cmd)); 1052 1053 if (ifa->ifa_addr->sa_family == AF_INET) { 1054 1055 ret = QLA_LOCK(ha, __func__, 1056 QLA_LOCK_DEFAULT_MS_TIMEOUT, 1057 QLA_LOCK_NO_SLEEP); 1058 if (ret) 1059 break; 1060 1061 ifp->if_flags |= IFF_UP; 1062 1063 ql_sp_log(ha, 8, 3, ifp->if_drv_flags, 1064 (ifp->if_drv_flags & IFF_DRV_RUNNING), 1065 ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0); 1066 1067 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1068 qla_init_locked(ha); 1069 } 1070 1071 QLA_UNLOCK(ha, __func__); 1072 QL_DPRINT4(ha, (ha->pci_dev, 1073 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 1074 __func__, cmd, 1075 ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 1076 1077 arp_ifinit(ifp, ifa); 1078 } else { 1079 ether_ioctl(ifp, cmd, data); 1080 } 1081 break; 1082 1083 case SIOCSIFMTU: 1084 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 1085 __func__, cmd)); 1086 1087 if (ifr->ifr_mtu > QLA_MAX_MTU) { 1088 ret = EINVAL; 1089 } else { 1090 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1091 QLA_LOCK_NO_SLEEP); 1092 1093 if (ret) 1094 break; 1095 1096 ifp->if_mtu = ifr->ifr_mtu; 1097 ha->max_frame_size = 1098 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1099 1100 ql_sp_log(ha, 9, 4, ifp->if_drv_flags, 1101 (ifp->if_drv_flags & IFF_DRV_RUNNING), 1102 ha->max_frame_size, ifp->if_mtu, 0); 1103 1104 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1105 qla_init_locked(ha); 1106 } 1107 1108 if (ifp->if_mtu > ETHERMTU) 1109 ha->std_replenish = QL_JUMBO_REPLENISH_THRES; 1110 else 1111 ha->std_replenish = QL_STD_REPLENISH_THRES; 1112 1113 1114 QLA_UNLOCK(ha, __func__); 1115 } 1116 1117 break; 1118 1119 case SIOCSIFFLAGS: 1120 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 1121 __func__, cmd)); 1122 1123 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1124 QLA_LOCK_NO_SLEEP); 1125 1126 if (ret) 1127 break; 1128 1129 ql_sp_log(ha, 10, 4, ifp->if_drv_flags, 1130 (ifp->if_drv_flags & IFF_DRV_RUNNING), 1131 ha->if_flags, ifp->if_flags, 0); 1132 1133 if (ifp->if_flags & IFF_UP) { 1134 1135 ha->max_frame_size = ifp->if_mtu + 1136 ETHER_HDR_LEN + ETHER_CRC_LEN; 1137 qla_init_locked(ha); 1138 1139 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1140 if ((ifp->if_flags ^ ha->if_flags) & 1141 IFF_PROMISC) { 1142 ret = ql_set_promisc(ha); 1143 } else if ((ifp->if_flags ^ ha->if_flags) & 1144 IFF_ALLMULTI) { 1145 ret = ql_set_allmulti(ha); 1146 } 1147 } 1148 } else { 1149 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1150 qla_stop(ha); 1151 ha->if_flags = ifp->if_flags; 1152 } 1153 1154 QLA_UNLOCK(ha, __func__); 1155 break; 1156 1157 case SIOCADDMULTI: 1158 QL_DPRINT4(ha, (ha->pci_dev, 1159 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 1160 1161 if (qla_set_multi(ha, 1)) 1162 ret = EINVAL; 1163 break; 1164 1165 case SIOCDELMULTI: 1166 QL_DPRINT4(ha, (ha->pci_dev, 1167 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 1168 1169 if (qla_set_multi(ha, 0)) 1170 ret = EINVAL; 1171 break; 1172 1173 case SIOCSIFMEDIA: 1174 case SIOCGIFMEDIA: 1175 QL_DPRINT4(ha, (ha->pci_dev, 1176 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 1177 __func__, cmd)); 1178 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 1179 break; 1180 1181 case SIOCSIFCAP: 1182 { 1183 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1184 1185 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 1186 __func__, cmd)); 1187 1188 if (mask & IFCAP_HWCSUM) 1189 ifp->if_capenable ^= IFCAP_HWCSUM; 1190 if (mask & IFCAP_TSO4) 1191 ifp->if_capenable ^= IFCAP_TSO4; 1192 if (mask & IFCAP_TSO6) 1193 ifp->if_capenable ^= IFCAP_TSO6; 1194 if (mask & IFCAP_VLAN_HWTAGGING) 1195 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1196 if (mask & IFCAP_VLAN_HWTSO) 1197 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1198 if (mask & IFCAP_LRO) 1199 ifp->if_capenable ^= IFCAP_LRO; 1200 1201 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1202 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1203 QLA_LOCK_NO_SLEEP); 1204 1205 if (ret) 1206 break; 1207 1208 ql_sp_log(ha, 11, 4, ifp->if_drv_flags, 1209 (ifp->if_drv_flags & IFF_DRV_RUNNING), 1210 mask, ifp->if_capenable, 0); 1211 1212 qla_init_locked(ha); 1213 1214 QLA_UNLOCK(ha, __func__); 1215 1216 } 1217 VLAN_CAPABILITIES(ifp); 1218 break; 1219 } 1220 1221 default: 1222 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", 1223 __func__, cmd)); 1224 ret = ether_ioctl(ifp, cmd, data); 1225 break; 1226 } 1227 1228 return (ret); 1229 } 1230 1231 static int 1232 qla_media_change(struct ifnet *ifp) 1233 { 1234 qla_host_t *ha; 1235 struct ifmedia *ifm; 1236 int ret = 0; 1237 1238 ha = (qla_host_t *)ifp->if_softc; 1239 1240 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1241 1242 ifm = &ha->media; 1243 1244 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1245 ret = EINVAL; 1246 1247 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1248 1249 return (ret); 1250 } 1251 1252 static void 1253 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1254 { 1255 qla_host_t *ha; 1256 1257 ha = (qla_host_t *)ifp->if_softc; 1258 1259 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1260 1261 ifmr->ifm_status = IFM_AVALID; 1262 ifmr->ifm_active = IFM_ETHER; 1263 1264 ql_update_link_state(ha); 1265 if (ha->hw.link_up) { 1266 ifmr->ifm_status |= IFM_ACTIVE; 1267 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); 1268 } 1269 1270 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\ 1271 (ha->hw.link_up ? "link_up" : "link_down"))); 1272 1273 return; 1274 } 1275 1276 1277 static int 1278 qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, 1279 uint32_t iscsi_pdu) 1280 { 1281 bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; 1282 bus_dmamap_t map; 1283 int nsegs; 1284 int ret = -1; 1285 uint32_t tx_idx; 1286 struct mbuf *m_head = *m_headp; 1287 1288 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1289 1290 tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next; 1291 1292 if (NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) { 1293 QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\ 1294 "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\ 1295 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head)); 1296 if (m_head) 1297 m_freem(m_head); 1298 *m_headp = NULL; 1299 return (ret); 1300 } 1301 1302 map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; 1303 1304 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 1305 BUS_DMA_NOWAIT); 1306 1307 if (ret == EFBIG) { 1308 1309 struct mbuf *m; 1310 1311 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 1312 m_head->m_pkthdr.len)); 1313 1314 m = m_defrag(m_head, M_NOWAIT); 1315 if (m == NULL) { 1316 ha->err_tx_defrag++; 1317 m_freem(m_head); 1318 *m_headp = NULL; 1319 device_printf(ha->pci_dev, 1320 "%s: m_defrag() = NULL [%d]\n", 1321 __func__, ret); 1322 return (ENOBUFS); 1323 } 1324 m_head = m; 1325 *m_headp = m_head; 1326 1327 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 1328 segs, &nsegs, BUS_DMA_NOWAIT))) { 1329 1330 ha->err_tx_dmamap_load++; 1331 1332 device_printf(ha->pci_dev, 1333 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 1334 __func__, ret, m_head->m_pkthdr.len); 1335 1336 if (ret != ENOMEM) { 1337 m_freem(m_head); 1338 *m_headp = NULL; 1339 } 1340 return (ret); 1341 } 1342 1343 } else if (ret) { 1344 1345 ha->err_tx_dmamap_load++; 1346 1347 device_printf(ha->pci_dev, 1348 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 1349 __func__, ret, m_head->m_pkthdr.len); 1350 1351 if (ret != ENOMEM) { 1352 m_freem(m_head); 1353 *m_headp = NULL; 1354 } 1355 return (ret); 1356 } 1357 1358 QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet")); 1359 1360 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 1361 1362 if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx, 1363 iscsi_pdu))) { 1364 ha->tx_ring[txr_idx].count++; 1365 if (iscsi_pdu) 1366 ha->tx_ring[txr_idx].iscsi_pkt_count++; 1367 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; 1368 } else { 1369 bus_dmamap_unload(ha->tx_tag, map); 1370 if (ret == EINVAL) { 1371 if (m_head) 1372 m_freem(m_head); 1373 *m_headp = NULL; 1374 } 1375 } 1376 1377 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1378 return (ret); 1379 } 1380 1381 static int 1382 qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) 1383 { 1384 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 1385 "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx); 1386 1387 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 1388 1389 fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF, 1390 M_NOWAIT, &fp->tx_mtx); 1391 if (fp->tx_br == NULL) { 1392 QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " 1393 " fp[%d, %d]\n", ha->pci_func, fp->txr_idx)); 1394 return (-ENOMEM); 1395 } 1396 return 0; 1397 } 1398 1399 static void 1400 qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) 1401 { 1402 struct mbuf *mp; 1403 struct ifnet *ifp = ha->ifp; 1404 1405 if (mtx_initialized(&fp->tx_mtx)) { 1406 1407 if (fp->tx_br != NULL) { 1408 1409 mtx_lock(&fp->tx_mtx); 1410 1411 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 1412 m_freem(mp); 1413 } 1414 1415 mtx_unlock(&fp->tx_mtx); 1416 1417 buf_ring_free(fp->tx_br, M_DEVBUF); 1418 fp->tx_br = NULL; 1419 } 1420 mtx_destroy(&fp->tx_mtx); 1421 } 1422 return; 1423 } 1424 1425 static void 1426 qla_fp_taskqueue(void *context, int pending) 1427 { 1428 qla_tx_fp_t *fp; 1429 qla_host_t *ha; 1430 struct ifnet *ifp; 1431 struct mbuf *mp; 1432 int ret; 1433 uint32_t txr_idx; 1434 uint32_t iscsi_pdu = 0; 1435 uint32_t rx_pkts_left = -1; 1436 1437 fp = context; 1438 1439 if (fp == NULL) 1440 return; 1441 1442 ha = (qla_host_t *)fp->ha; 1443 1444 ifp = ha->ifp; 1445 1446 txr_idx = fp->txr_idx; 1447 1448 mtx_lock(&fp->tx_mtx); 1449 1450 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) { 1451 mtx_unlock(&fp->tx_mtx); 1452 goto qla_fp_taskqueue_exit; 1453 } 1454 1455 while (rx_pkts_left && !ha->stop_rcv && 1456 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1457 rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64); 1458 1459 #ifdef QL_ENABLE_ISCSI_TLV 1460 ql_hw_tx_done_locked(ha, fp->txr_idx); 1461 ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1))); 1462 #else 1463 ql_hw_tx_done_locked(ha, fp->txr_idx); 1464 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 1465 1466 mp = drbr_peek(ifp, fp->tx_br); 1467 1468 while (mp != NULL) { 1469 1470 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) { 1471 #ifdef QL_ENABLE_ISCSI_TLV 1472 if (ql_iscsi_pdu(ha, mp) == 0) { 1473 txr_idx = txr_idx + 1474 (ha->hw.num_tx_rings >> 1); 1475 iscsi_pdu = 1; 1476 } else { 1477 iscsi_pdu = 0; 1478 txr_idx = fp->txr_idx; 1479 } 1480 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 1481 } 1482 1483 ret = qla_send(ha, &mp, txr_idx, iscsi_pdu); 1484 1485 if (ret) { 1486 if (mp != NULL) 1487 drbr_putback(ifp, fp->tx_br, mp); 1488 else { 1489 drbr_advance(ifp, fp->tx_br); 1490 } 1491 1492 mtx_unlock(&fp->tx_mtx); 1493 1494 goto qla_fp_taskqueue_exit0; 1495 } else { 1496 drbr_advance(ifp, fp->tx_br); 1497 } 1498 1499 /* Send a copy of the frame to the BPF listener */ 1500 ETHER_BPF_MTAP(ifp, mp); 1501 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1502 break; 1503 1504 mp = drbr_peek(ifp, fp->tx_br); 1505 } 1506 } 1507 mtx_unlock(&fp->tx_mtx); 1508 1509 qla_fp_taskqueue_exit0: 1510 1511 if (rx_pkts_left || ((mp != NULL) && ret)) { 1512 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 1513 } else { 1514 if (!ha->stop_rcv) { 1515 QL_ENABLE_INTERRUPTS(ha, fp->txr_idx); 1516 } 1517 } 1518 1519 qla_fp_taskqueue_exit: 1520 1521 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 1522 return; 1523 } 1524 1525 static int 1526 qla_create_fp_taskqueues(qla_host_t *ha) 1527 { 1528 int i; 1529 uint8_t tq_name[32]; 1530 1531 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1532 1533 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1534 1535 bzero(tq_name, sizeof (tq_name)); 1536 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 1537 1538 TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); 1539 1540 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, 1541 taskqueue_thread_enqueue, 1542 &fp->fp_taskqueue); 1543 1544 if (fp->fp_taskqueue == NULL) 1545 return (-1); 1546 1547 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 1548 tq_name); 1549 1550 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, 1551 fp->fp_taskqueue)); 1552 } 1553 1554 return (0); 1555 } 1556 1557 static void 1558 qla_destroy_fp_taskqueues(qla_host_t *ha) 1559 { 1560 int i; 1561 1562 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1563 1564 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1565 1566 if (fp->fp_taskqueue != NULL) { 1567 taskqueue_drain_all(fp->fp_taskqueue); 1568 taskqueue_free(fp->fp_taskqueue); 1569 fp->fp_taskqueue = NULL; 1570 } 1571 } 1572 return; 1573 } 1574 1575 static void 1576 qla_drain_fp_taskqueues(qla_host_t *ha) 1577 { 1578 int i; 1579 1580 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1581 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1582 1583 if (fp->fp_taskqueue != NULL) { 1584 taskqueue_drain_all(fp->fp_taskqueue); 1585 } 1586 } 1587 return; 1588 } 1589 1590 static int 1591 qla_transmit(struct ifnet *ifp, struct mbuf *mp) 1592 { 1593 qla_host_t *ha = (qla_host_t *)ifp->if_softc; 1594 qla_tx_fp_t *fp; 1595 int rss_id = 0; 1596 int ret = 0; 1597 1598 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1599 1600 #if __FreeBSD_version >= 1100000 1601 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 1602 #else 1603 if (mp->m_flags & M_FLOWID) 1604 #endif 1605 rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) % 1606 ha->hw.num_sds_rings; 1607 fp = &ha->tx_fp[rss_id]; 1608 1609 if (fp->tx_br == NULL) { 1610 ret = EINVAL; 1611 goto qla_transmit_exit; 1612 } 1613 1614 if (mp != NULL) { 1615 ret = drbr_enqueue(ifp, fp->tx_br, mp); 1616 } 1617 1618 if (fp->fp_taskqueue != NULL) 1619 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 1620 1621 ret = 0; 1622 1623 qla_transmit_exit: 1624 1625 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 1626 return ret; 1627 } 1628 1629 static void 1630 qla_qflush(struct ifnet *ifp) 1631 { 1632 int i; 1633 qla_tx_fp_t *fp; 1634 struct mbuf *mp; 1635 qla_host_t *ha; 1636 1637 ha = (qla_host_t *)ifp->if_softc; 1638 1639 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1640 1641 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1642 1643 fp = &ha->tx_fp[i]; 1644 1645 if (fp == NULL) 1646 continue; 1647 1648 if (fp->tx_br) { 1649 mtx_lock(&fp->tx_mtx); 1650 1651 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 1652 m_freem(mp); 1653 } 1654 mtx_unlock(&fp->tx_mtx); 1655 } 1656 } 1657 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1658 1659 return; 1660 } 1661 1662 static void 1663 qla_stop(qla_host_t *ha) 1664 { 1665 struct ifnet *ifp = ha->ifp; 1666 device_t dev; 1667 int i = 0; 1668 1669 ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0); 1670 1671 dev = ha->pci_dev; 1672 1673 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1674 ha->qla_watchdog_pause = 1; 1675 1676 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1677 qla_tx_fp_t *fp; 1678 1679 fp = &ha->tx_fp[i]; 1680 1681 if (fp == NULL) 1682 continue; 1683 1684 if (fp->tx_br != NULL) { 1685 mtx_lock(&fp->tx_mtx); 1686 mtx_unlock(&fp->tx_mtx); 1687 } 1688 } 1689 1690 while (!ha->qla_watchdog_paused) 1691 qla_mdelay(__func__, 1); 1692 1693 ha->qla_interface_up = 0; 1694 1695 qla_drain_fp_taskqueues(ha); 1696 1697 ql_del_hw_if(ha); 1698 1699 qla_free_xmt_bufs(ha); 1700 qla_free_rcv_bufs(ha); 1701 1702 return; 1703 } 1704 1705 /* 1706 * Buffer Management Functions for Transmit and Receive Rings 1707 */ 1708 static int 1709 qla_alloc_xmt_bufs(qla_host_t *ha) 1710 { 1711 int ret = 0; 1712 uint32_t i, j; 1713 qla_tx_buf_t *txb; 1714 1715 if (bus_dma_tag_create(NULL, /* parent */ 1716 1, 0, /* alignment, bounds */ 1717 BUS_SPACE_MAXADDR, /* lowaddr */ 1718 BUS_SPACE_MAXADDR, /* highaddr */ 1719 NULL, NULL, /* filter, filterarg */ 1720 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ 1721 QLA_MAX_SEGMENTS, /* nsegments */ 1722 PAGE_SIZE, /* maxsegsize */ 1723 BUS_DMA_ALLOCNOW, /* flags */ 1724 NULL, /* lockfunc */ 1725 NULL, /* lockfuncarg */ 1726 &ha->tx_tag)) { 1727 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", 1728 __func__); 1729 return (ENOMEM); 1730 } 1731 1732 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1733 bzero((void *)ha->tx_ring[i].tx_buf, 1734 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1735 } 1736 1737 for (j = 0; j < ha->hw.num_tx_rings; j++) { 1738 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) { 1739 1740 txb = &ha->tx_ring[j].tx_buf[i]; 1741 1742 if ((ret = bus_dmamap_create(ha->tx_tag, 1743 BUS_DMA_NOWAIT, &txb->map))) { 1744 1745 ha->err_tx_dmamap_create++; 1746 device_printf(ha->pci_dev, 1747 "%s: bus_dmamap_create failed[%d]\n", 1748 __func__, ret); 1749 1750 qla_free_xmt_bufs(ha); 1751 1752 return (ret); 1753 } 1754 } 1755 } 1756 1757 return 0; 1758 } 1759 1760 /* 1761 * Release mbuf after it sent on the wire 1762 */ 1763 static void 1764 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) 1765 { 1766 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1767 1768 if (txb->m_head) { 1769 bus_dmamap_sync(ha->tx_tag, txb->map, 1770 BUS_DMASYNC_POSTWRITE); 1771 1772 bus_dmamap_unload(ha->tx_tag, txb->map); 1773 1774 m_freem(txb->m_head); 1775 txb->m_head = NULL; 1776 1777 bus_dmamap_destroy(ha->tx_tag, txb->map); 1778 txb->map = NULL; 1779 } 1780 1781 if (txb->map) { 1782 bus_dmamap_unload(ha->tx_tag, txb->map); 1783 bus_dmamap_destroy(ha->tx_tag, txb->map); 1784 txb->map = NULL; 1785 } 1786 1787 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1788 } 1789 1790 static void 1791 qla_free_xmt_bufs(qla_host_t *ha) 1792 { 1793 int i, j; 1794 1795 for (j = 0; j < ha->hw.num_tx_rings; j++) { 1796 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) 1797 qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); 1798 } 1799 1800 if (ha->tx_tag != NULL) { 1801 bus_dma_tag_destroy(ha->tx_tag); 1802 ha->tx_tag = NULL; 1803 } 1804 1805 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1806 bzero((void *)ha->tx_ring[i].tx_buf, 1807 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1808 } 1809 return; 1810 } 1811 1812 1813 static int 1814 qla_alloc_rcv_std(qla_host_t *ha) 1815 { 1816 int i, j, k, r, ret = 0; 1817 qla_rx_buf_t *rxb; 1818 qla_rx_ring_t *rx_ring; 1819 1820 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1821 1822 rx_ring = &ha->rx_ring[r]; 1823 1824 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1825 1826 rxb = &rx_ring->rx_buf[i]; 1827 1828 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, 1829 &rxb->map); 1830 1831 if (ret) { 1832 device_printf(ha->pci_dev, 1833 "%s: dmamap[%d, %d] failed\n", 1834 __func__, r, i); 1835 1836 for (k = 0; k < r; k++) { 1837 for (j = 0; j < NUM_RX_DESCRIPTORS; 1838 j++) { 1839 rxb = &ha->rx_ring[k].rx_buf[j]; 1840 bus_dmamap_destroy(ha->rx_tag, 1841 rxb->map); 1842 } 1843 } 1844 1845 for (j = 0; j < i; j++) { 1846 bus_dmamap_destroy(ha->rx_tag, 1847 rx_ring->rx_buf[j].map); 1848 } 1849 goto qla_alloc_rcv_std_err; 1850 } 1851 } 1852 } 1853 1854 qla_init_hw_rcv_descriptors(ha); 1855 1856 1857 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1858 1859 rx_ring = &ha->rx_ring[r]; 1860 1861 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1862 rxb = &rx_ring->rx_buf[i]; 1863 rxb->handle = i; 1864 if (!(ret = ql_get_mbuf(ha, rxb, NULL))) { 1865 /* 1866 * set the physical address in the 1867 * corresponding descriptor entry in the 1868 * receive ring/queue for the hba 1869 */ 1870 qla_set_hw_rcv_desc(ha, r, i, rxb->handle, 1871 rxb->paddr, 1872 (rxb->m_head)->m_pkthdr.len); 1873 } else { 1874 device_printf(ha->pci_dev, 1875 "%s: ql_get_mbuf [%d, %d] failed\n", 1876 __func__, r, i); 1877 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1878 goto qla_alloc_rcv_std_err; 1879 } 1880 } 1881 } 1882 return 0; 1883 1884 qla_alloc_rcv_std_err: 1885 return (-1); 1886 } 1887 1888 static void 1889 qla_free_rcv_std(qla_host_t *ha) 1890 { 1891 int i, r; 1892 qla_rx_buf_t *rxb; 1893 1894 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1895 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1896 rxb = &ha->rx_ring[r].rx_buf[i]; 1897 if (rxb->m_head != NULL) { 1898 bus_dmamap_unload(ha->rx_tag, rxb->map); 1899 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1900 m_freem(rxb->m_head); 1901 rxb->m_head = NULL; 1902 } 1903 } 1904 } 1905 return; 1906 } 1907 1908 static int 1909 qla_alloc_rcv_bufs(qla_host_t *ha) 1910 { 1911 int i, ret = 0; 1912 1913 if (bus_dma_tag_create(NULL, /* parent */ 1914 1, 0, /* alignment, bounds */ 1915 BUS_SPACE_MAXADDR, /* lowaddr */ 1916 BUS_SPACE_MAXADDR, /* highaddr */ 1917 NULL, NULL, /* filter, filterarg */ 1918 MJUM9BYTES, /* maxsize */ 1919 1, /* nsegments */ 1920 MJUM9BYTES, /* maxsegsize */ 1921 BUS_DMA_ALLOCNOW, /* flags */ 1922 NULL, /* lockfunc */ 1923 NULL, /* lockfuncarg */ 1924 &ha->rx_tag)) { 1925 1926 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", 1927 __func__); 1928 1929 return (ENOMEM); 1930 } 1931 1932 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1933 1934 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1935 ha->hw.sds[i].sdsr_next = 0; 1936 ha->hw.sds[i].rxb_free = NULL; 1937 ha->hw.sds[i].rx_free = 0; 1938 } 1939 1940 ret = qla_alloc_rcv_std(ha); 1941 1942 return (ret); 1943 } 1944 1945 static void 1946 qla_free_rcv_bufs(qla_host_t *ha) 1947 { 1948 int i; 1949 1950 qla_free_rcv_std(ha); 1951 1952 if (ha->rx_tag != NULL) { 1953 bus_dma_tag_destroy(ha->rx_tag); 1954 ha->rx_tag = NULL; 1955 } 1956 1957 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1958 1959 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1960 ha->hw.sds[i].sdsr_next = 0; 1961 ha->hw.sds[i].rxb_free = NULL; 1962 ha->hw.sds[i].rx_free = 0; 1963 } 1964 1965 return; 1966 } 1967 1968 int 1969 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) 1970 { 1971 register struct mbuf *mp = nmp; 1972 struct ifnet *ifp; 1973 int ret = 0; 1974 uint32_t offset; 1975 bus_dma_segment_t segs[1]; 1976 int nsegs, mbuf_size; 1977 1978 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1979 1980 ifp = ha->ifp; 1981 1982 if (ha->hw.enable_9kb) 1983 mbuf_size = MJUM9BYTES; 1984 else 1985 mbuf_size = MCLBYTES; 1986 1987 if (mp == NULL) { 1988 1989 if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE)) 1990 return(-1); 1991 1992 if (ha->hw.enable_9kb) 1993 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size); 1994 else 1995 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1996 1997 if (mp == NULL) { 1998 ha->err_m_getcl++; 1999 ret = ENOBUFS; 2000 device_printf(ha->pci_dev, 2001 "%s: m_getcl failed\n", __func__); 2002 goto exit_ql_get_mbuf; 2003 } 2004 mp->m_len = mp->m_pkthdr.len = mbuf_size; 2005 } else { 2006 mp->m_len = mp->m_pkthdr.len = mbuf_size; 2007 mp->m_data = mp->m_ext.ext_buf; 2008 mp->m_next = NULL; 2009 } 2010 2011 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); 2012 if (offset) { 2013 offset = 8 - offset; 2014 m_adj(mp, offset); 2015 } 2016 2017 /* 2018 * Using memory from the mbuf cluster pool, invoke the bus_dma 2019 * machinery to arrange the memory mapping. 2020 */ 2021 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, 2022 mp, segs, &nsegs, BUS_DMA_NOWAIT); 2023 rxb->paddr = segs[0].ds_addr; 2024 2025 if (ret || !rxb->paddr || (nsegs != 1)) { 2026 m_free(mp); 2027 rxb->m_head = NULL; 2028 device_printf(ha->pci_dev, 2029 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 2030 __func__, ret, (long long unsigned int)rxb->paddr, 2031 nsegs); 2032 ret = -1; 2033 goto exit_ql_get_mbuf; 2034 } 2035 rxb->m_head = mp; 2036 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); 2037 2038 exit_ql_get_mbuf: 2039 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); 2040 return (ret); 2041 } 2042 2043 2044 static void 2045 qla_get_peer(qla_host_t *ha) 2046 { 2047 device_t *peers; 2048 int count, i, slot; 2049 int my_slot = pci_get_slot(ha->pci_dev); 2050 2051 if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count)) 2052 return; 2053 2054 for (i = 0; i < count; i++) { 2055 slot = pci_get_slot(peers[i]); 2056 2057 if ((slot >= 0) && (slot == my_slot) && 2058 (pci_get_device(peers[i]) == 2059 pci_get_device(ha->pci_dev))) { 2060 if (ha->pci_dev != peers[i]) 2061 ha->peer_dev = peers[i]; 2062 } 2063 } 2064 } 2065 2066 static void 2067 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer) 2068 { 2069 qla_host_t *ha_peer; 2070 2071 if (ha->peer_dev) { 2072 if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) { 2073 2074 ha_peer->msg_from_peer = msg_to_peer; 2075 } 2076 } 2077 } 2078 2079 void 2080 qla_set_error_recovery(qla_host_t *ha) 2081 { 2082 struct ifnet *ifp = ha->ifp; 2083 2084 if (!cold && ha->enable_error_recovery) { 2085 if (ifp) 2086 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2087 ha->qla_initiate_recovery = 1; 2088 } else 2089 ha->offline = 1; 2090 return; 2091 } 2092 2093 static void 2094 qla_error_recovery(void *context, int pending) 2095 { 2096 qla_host_t *ha = context; 2097 uint32_t msecs_100 = 400; 2098 struct ifnet *ifp = ha->ifp; 2099 int i = 0; 2100 2101 device_printf(ha->pci_dev, "%s: enter\n", __func__); 2102 ha->hw.imd_compl = 1; 2103 2104 taskqueue_drain_all(ha->stats_tq); 2105 taskqueue_drain_all(ha->async_event_tq); 2106 2107 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 2108 return; 2109 2110 device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n", 2111 __func__, qla_get_usec_timestamp()); 2112 2113 if (ha->qla_interface_up) { 2114 2115 qla_mdelay(__func__, 300); 2116 2117 //ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2118 2119 for (i = 0; i < ha->hw.num_sds_rings; i++) { 2120 qla_tx_fp_t *fp; 2121 2122 fp = &ha->tx_fp[i]; 2123 2124 if (fp == NULL) 2125 continue; 2126 2127 if (fp->tx_br != NULL) { 2128 mtx_lock(&fp->tx_mtx); 2129 mtx_unlock(&fp->tx_mtx); 2130 } 2131 } 2132 } 2133 2134 qla_drain_fp_taskqueues(ha); 2135 2136 if ((ha->pci_func & 0x1) == 0) { 2137 2138 if (!ha->msg_from_peer) { 2139 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 2140 2141 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && 2142 msecs_100--) 2143 qla_mdelay(__func__, 100); 2144 } 2145 2146 ha->msg_from_peer = 0; 2147 2148 if (ha->enable_minidump) 2149 ql_minidump(ha); 2150 2151 if (ha->enable_driverstate_dump) 2152 ql_capture_drvr_state(ha); 2153 2154 if (ql_init_hw(ha)) { 2155 device_printf(ha->pci_dev, 2156 "%s: ts_usecs = %ld exit: ql_init_hw failed\n", 2157 __func__, qla_get_usec_timestamp()); 2158 ha->offline = 1; 2159 goto qla_error_recovery_exit; 2160 } 2161 2162 if (ha->qla_interface_up) { 2163 qla_free_xmt_bufs(ha); 2164 qla_free_rcv_bufs(ha); 2165 } 2166 2167 if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY)) 2168 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 2169 2170 } else { 2171 if (ha->msg_from_peer == QL_PEER_MSG_RESET) { 2172 2173 ha->msg_from_peer = 0; 2174 2175 if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY)) 2176 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 2177 } else { 2178 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 2179 } 2180 2181 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) 2182 qla_mdelay(__func__, 100); 2183 ha->msg_from_peer = 0; 2184 2185 if (ha->enable_driverstate_dump) 2186 ql_capture_drvr_state(ha); 2187 2188 if (msecs_100 == 0) { 2189 device_printf(ha->pci_dev, 2190 "%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n", 2191 __func__, qla_get_usec_timestamp()); 2192 ha->offline = 1; 2193 goto qla_error_recovery_exit; 2194 } 2195 2196 if (ql_init_hw(ha)) { 2197 device_printf(ha->pci_dev, 2198 "%s: ts_usecs = %ld exit: ql_init_hw failed\n", 2199 __func__, qla_get_usec_timestamp()); 2200 ha->offline = 1; 2201 goto qla_error_recovery_exit; 2202 } 2203 2204 if (ha->qla_interface_up) { 2205 qla_free_xmt_bufs(ha); 2206 qla_free_rcv_bufs(ha); 2207 } 2208 } 2209 2210 qla_mdelay(__func__, ha->ms_delay_after_init); 2211 2212 *((uint32_t *)&ha->hw.flags) = 0; 2213 ha->qla_initiate_recovery = 0; 2214 2215 if (ha->qla_interface_up) { 2216 2217 if (qla_alloc_xmt_bufs(ha) != 0) { 2218 ha->offline = 1; 2219 goto qla_error_recovery_exit; 2220 } 2221 2222 qla_confirm_9kb_enable(ha); 2223 2224 if (qla_alloc_rcv_bufs(ha) != 0) { 2225 ha->offline = 1; 2226 goto qla_error_recovery_exit; 2227 } 2228 2229 ha->stop_rcv = 0; 2230 2231 if (ql_init_hw_if(ha) == 0) { 2232 ifp = ha->ifp; 2233 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2234 ha->qla_watchdog_pause = 0; 2235 ql_update_link_state(ha); 2236 } else { 2237 ha->offline = 1; 2238 2239 if (ha->hw.sp_log_stop_events & 2240 Q8_SP_LOG_STOP_IF_START_FAILURE) 2241 ha->hw.sp_log_stop = -1; 2242 } 2243 } else { 2244 ha->qla_watchdog_pause = 0; 2245 } 2246 2247 qla_error_recovery_exit: 2248 2249 if (ha->offline ) { 2250 device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n", 2251 __func__, qla_get_usec_timestamp()); 2252 if (ha->hw.sp_log_stop_events & 2253 Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE) 2254 ha->hw.sp_log_stop = -1; 2255 } 2256 2257 2258 QLA_UNLOCK(ha, __func__); 2259 2260 if (!ha->offline) 2261 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 2262 qla_watchdog, ha); 2263 2264 device_printf(ha->pci_dev, 2265 "%s: ts_usecs = %ld exit\n", 2266 __func__, qla_get_usec_timestamp()); 2267 return; 2268 } 2269 2270 static void 2271 qla_async_event(void *context, int pending) 2272 { 2273 qla_host_t *ha = context; 2274 2275 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 2276 return; 2277 2278 if (ha->async_event) { 2279 ha->async_event = 0; 2280 qla_hw_async_event(ha); 2281 } 2282 2283 QLA_UNLOCK(ha, __func__); 2284 2285 return; 2286 } 2287 2288 static void 2289 qla_stats(void *context, int pending) 2290 { 2291 qla_host_t *ha; 2292 2293 ha = context; 2294 2295 ql_get_stats(ha); 2296 2297 return; 2298 } 2299 2300