1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011-2013 Qlogic Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * File: qla_os.c 32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 33 */ 34 35 #include <sys/cdefs.h> 36 #include "qla_os.h" 37 #include "qla_reg.h" 38 #include "qla_hw.h" 39 #include "qla_def.h" 40 #include "qla_inline.h" 41 #include "qla_ver.h" 42 #include "qla_glbl.h" 43 #include "qla_dbg.h" 44 45 /* 46 * Some PCI Configuration Space Related Defines 47 */ 48 49 #ifndef PCI_VENDOR_QLOGIC 50 #define PCI_VENDOR_QLOGIC 0x1077 51 #endif 52 53 #ifndef PCI_PRODUCT_QLOGIC_ISP8020 54 #define PCI_PRODUCT_QLOGIC_ISP8020 0x8020 55 #endif 56 57 #define PCI_QLOGIC_ISP8020 \ 58 ((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC) 59 60 /* 61 * static functions 62 */ 63 static int qla_alloc_parent_dma_tag(qla_host_t *ha); 64 static void qla_free_parent_dma_tag(qla_host_t *ha); 65 static int qla_alloc_xmt_bufs(qla_host_t *ha); 66 static void qla_free_xmt_bufs(qla_host_t *ha); 67 static int qla_alloc_rcv_bufs(qla_host_t *ha); 68 static void qla_free_rcv_bufs(qla_host_t *ha); 69 70 static void qla_init_ifnet(device_t dev, qla_host_t *ha); 71 static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS); 72 static void qla_release(qla_host_t *ha); 73 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, 74 int error); 75 static void qla_stop(qla_host_t *ha); 76 static int qla_send(qla_host_t *ha, struct mbuf **m_headp); 77 static void qla_tx_done(void *context, int pending); 78 79 /* 80 * Hooks to the Operating Systems 81 */ 82 static int qla_pci_probe (device_t); 83 static int qla_pci_attach (device_t); 84 static int qla_pci_detach (device_t); 85 86 static void qla_init(void *arg); 87 static int qla_ioctl(if_t ifp, u_long cmd, caddr_t data); 88 static int qla_media_change(if_t ifp); 89 static void qla_media_status(if_t ifp, struct ifmediareq *ifmr); 90 91 static device_method_t qla_pci_methods[] = { 92 /* Device interface */ 93 DEVMETHOD(device_probe, qla_pci_probe), 94 DEVMETHOD(device_attach, qla_pci_attach), 95 DEVMETHOD(device_detach, qla_pci_detach), 96 { 0, 0 } 97 }; 98 99 static driver_t qla_pci_driver = { 100 "ql", qla_pci_methods, sizeof (qla_host_t), 101 }; 102 103 DRIVER_MODULE(qla80xx, pci, qla_pci_driver, 0, 0); 104 105 MODULE_DEPEND(qla80xx, pci, 1, 1, 1); 106 MODULE_DEPEND(qla80xx, ether, 1, 1, 1); 107 108 MALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver"); 109 110 uint32_t std_replenish = 8; 111 uint32_t jumbo_replenish = 2; 112 uint32_t rcv_pkt_thres = 128; 113 uint32_t rcv_pkt_thres_d = 32; 114 uint32_t snd_pkt_thres = 16; 115 uint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2); 116 117 static char dev_str[64]; 118 119 /* 120 * Name: qla_pci_probe 121 * Function: Validate the PCI device to be a QLA80XX device 122 */ 123 static int 124 qla_pci_probe(device_t dev) 125 { 126 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 127 case PCI_QLOGIC_ISP8020: 128 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", 129 "Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function", 130 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 131 QLA_VERSION_BUILD); 132 device_set_desc(dev, dev_str); 133 break; 134 default: 135 return (ENXIO); 136 } 137 138 if (bootverbose) 139 printf("%s: %s\n ", __func__, dev_str); 140 141 return (BUS_PROBE_DEFAULT); 142 } 143 144 static void 145 qla_add_sysctls(qla_host_t *ha) 146 { 147 device_t dev = ha->pci_dev; 148 149 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 150 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 151 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 152 (void *)ha, 0, qla_sysctl_get_stats, "I", "Statistics"); 153 154 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 155 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 156 OID_AUTO, "fw_version", CTLFLAG_RD, 157 ha->fw_ver_str, 0, "firmware version"); 158 159 dbg_level = 0; 160 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 161 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 162 OID_AUTO, "debug", CTLFLAG_RW, 163 &dbg_level, dbg_level, "Debug Level"); 164 165 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 166 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 167 OID_AUTO, "std_replenish", CTLFLAG_RW, 168 &std_replenish, std_replenish, 169 "Threshold for Replenishing Standard Frames"); 170 171 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 172 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 173 OID_AUTO, "jumbo_replenish", CTLFLAG_RW, 174 &jumbo_replenish, jumbo_replenish, 175 "Threshold for Replenishing Jumbo Frames"); 176 177 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 178 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 179 OID_AUTO, "rcv_pkt_thres", CTLFLAG_RW, 180 &rcv_pkt_thres, rcv_pkt_thres, 181 "Threshold for # of rcv pkts to trigger indication isr"); 182 183 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 184 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 185 OID_AUTO, "rcv_pkt_thres_d", CTLFLAG_RW, 186 &rcv_pkt_thres_d, rcv_pkt_thres_d, 187 "Threshold for # of rcv pkts to trigger indication defered"); 188 189 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 190 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 191 OID_AUTO, "snd_pkt_thres", CTLFLAG_RW, 192 &snd_pkt_thres, snd_pkt_thres, 193 "Threshold for # of snd packets"); 194 195 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 196 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 197 OID_AUTO, "free_pkt_thres", CTLFLAG_RW, 198 &free_pkt_thres, free_pkt_thres, 199 "Threshold for # of packets to free at a time"); 200 201 return; 202 } 203 204 static void 205 qla_watchdog(void *arg) 206 { 207 qla_host_t *ha = arg; 208 qla_hw_t *hw; 209 if_t ifp; 210 211 hw = &ha->hw; 212 ifp = ha->ifp; 213 214 if (ha->flags.qla_watchdog_exit) 215 return; 216 217 if (!ha->flags.qla_watchdog_pause) { 218 if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) { 219 taskqueue_enqueue(ha->tx_tq, &ha->tx_task); 220 } else if (!if_sendq_empty(ifp) && QL_RUNNING(ifp)) { 221 taskqueue_enqueue(ha->tx_tq, &ha->tx_task); 222 } 223 } 224 ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000; 225 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 226 qla_watchdog, ha); 227 } 228 229 /* 230 * Name: qla_pci_attach 231 * Function: attaches the device to the operating system 232 */ 233 static int 234 qla_pci_attach(device_t dev) 235 { 236 qla_host_t *ha = NULL; 237 uint32_t rsrc_len, i; 238 239 QL_DPRINT2((dev, "%s: enter\n", __func__)); 240 241 if ((ha = device_get_softc(dev)) == NULL) { 242 device_printf(dev, "cannot get softc\n"); 243 return (ENOMEM); 244 } 245 246 memset(ha, 0, sizeof (qla_host_t)); 247 248 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) { 249 device_printf(dev, "device is not ISP8020\n"); 250 return (ENXIO); 251 } 252 253 ha->pci_func = pci_get_function(dev); 254 255 ha->pci_dev = dev; 256 257 pci_enable_busmaster(dev); 258 259 ha->reg_rid = PCIR_BAR(0); 260 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 261 RF_ACTIVE); 262 263 if (ha->pci_reg == NULL) { 264 device_printf(dev, "unable to map any ports\n"); 265 goto qla_pci_attach_err; 266 } 267 268 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 269 ha->reg_rid); 270 271 mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 272 mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF); 273 mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF); 274 mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF); 275 ha->flags.lock_init = 1; 276 277 ha->msix_count = pci_msix_count(dev); 278 279 if (ha->msix_count < qla_get_msix_count(ha)) { 280 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 281 ha->msix_count); 282 goto qla_pci_attach_err; 283 } 284 285 QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x" 286 " msix_count 0x%x pci_reg %p\n", __func__, ha, 287 ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg)); 288 289 ha->msix_count = qla_get_msix_count(ha); 290 291 if (pci_alloc_msix(dev, &ha->msix_count)) { 292 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, 293 ha->msix_count); 294 ha->msix_count = 0; 295 goto qla_pci_attach_err; 296 } 297 298 TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha); 299 ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT, 300 taskqueue_thread_enqueue, &ha->tx_tq); 301 taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq", 302 device_get_nameunit(ha->pci_dev)); 303 304 for (i = 0; i < ha->msix_count; i++) { 305 ha->irq_vec[i].irq_rid = i+1; 306 ha->irq_vec[i].ha = ha; 307 308 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 309 &ha->irq_vec[i].irq_rid, 310 (RF_ACTIVE | RF_SHAREABLE)); 311 312 if (ha->irq_vec[i].irq == NULL) { 313 device_printf(dev, "could not allocate interrupt\n"); 314 goto qla_pci_attach_err; 315 } 316 317 if (bus_setup_intr(dev, ha->irq_vec[i].irq, 318 (INTR_TYPE_NET | INTR_MPSAFE), 319 NULL, qla_isr, &ha->irq_vec[i], 320 &ha->irq_vec[i].handle)) { 321 device_printf(dev, "could not setup interrupt\n"); 322 goto qla_pci_attach_err; 323 } 324 325 TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\ 326 &ha->irq_vec[i]); 327 328 ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq", 329 M_NOWAIT, taskqueue_thread_enqueue, 330 &ha->irq_vec[i].rcv_tq); 331 332 taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET, 333 "%s rcvq", 334 device_get_nameunit(ha->pci_dev)); 335 } 336 337 qla_add_sysctls(ha); 338 339 /* add hardware specific sysctls */ 340 qla_hw_add_sysctls(ha); 341 342 /* initialize hardware */ 343 if (qla_init_hw(ha)) { 344 device_printf(dev, "%s: qla_init_hw failed\n", __func__); 345 goto qla_pci_attach_err; 346 } 347 348 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, 349 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 350 ha->fw_ver_build); 351 352 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", 353 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 354 ha->fw_ver_build); 355 356 //qla_get_hw_caps(ha); 357 qla_read_mac_addr(ha); 358 359 /* allocate parent dma tag */ 360 if (qla_alloc_parent_dma_tag(ha)) { 361 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", 362 __func__); 363 goto qla_pci_attach_err; 364 } 365 366 /* alloc all dma buffers */ 367 if (qla_alloc_dma(ha)) { 368 device_printf(dev, "%s: qla_alloc_dma failed\n", __func__); 369 goto qla_pci_attach_err; 370 } 371 372 /* create the o.s ethernet interface */ 373 qla_init_ifnet(dev, ha); 374 375 ha->flags.qla_watchdog_active = 1; 376 ha->flags.qla_watchdog_pause = 1; 377 378 callout_init(&ha->tx_callout, 1); 379 380 /* create ioctl device interface */ 381 if (qla_make_cdev(ha)) { 382 device_printf(dev, "%s: qla_make_cdev failed\n", __func__); 383 goto qla_pci_attach_err; 384 } 385 386 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 387 qla_watchdog, ha); 388 389 QL_DPRINT2((dev, "%s: exit 0\n", __func__)); 390 return (0); 391 392 qla_pci_attach_err: 393 394 qla_release(ha); 395 396 QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__)); 397 return (ENXIO); 398 } 399 400 /* 401 * Name: qla_pci_detach 402 * Function: Unhooks the device from the operating system 403 */ 404 static int 405 qla_pci_detach(device_t dev) 406 { 407 qla_host_t *ha = NULL; 408 int i; 409 410 QL_DPRINT2((dev, "%s: enter\n", __func__)); 411 412 if ((ha = device_get_softc(dev)) == NULL) { 413 device_printf(dev, "cannot get softc\n"); 414 return (ENOMEM); 415 } 416 417 QLA_LOCK(ha, __func__); 418 qla_stop(ha); 419 QLA_UNLOCK(ha, __func__); 420 421 if (ha->tx_tq) { 422 taskqueue_drain(ha->tx_tq, &ha->tx_task); 423 taskqueue_free(ha->tx_tq); 424 } 425 426 for (i = 0; i < ha->msix_count; i++) { 427 taskqueue_drain(ha->irq_vec[i].rcv_tq, 428 &ha->irq_vec[i].rcv_task); 429 taskqueue_free(ha->irq_vec[i].rcv_tq); 430 } 431 432 qla_release(ha); 433 434 QL_DPRINT2((dev, "%s: exit\n", __func__)); 435 436 return (0); 437 } 438 439 /* 440 * SYSCTL Related Callbacks 441 */ 442 static int 443 qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS) 444 { 445 int err, ret = 0; 446 qla_host_t *ha; 447 448 err = sysctl_handle_int(oidp, &ret, 0, req); 449 450 if (err) 451 return (err); 452 453 ha = (qla_host_t *)arg1; 454 //qla_get_stats(ha); 455 QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret)); 456 return (err); 457 } 458 459 /* 460 * Name: qla_release 461 * Function: Releases the resources allocated for the device 462 */ 463 static void 464 qla_release(qla_host_t *ha) 465 { 466 device_t dev; 467 int i; 468 469 dev = ha->pci_dev; 470 471 qla_del_cdev(ha); 472 473 if (ha->flags.qla_watchdog_active) 474 ha->flags.qla_watchdog_exit = 1; 475 476 callout_stop(&ha->tx_callout); 477 qla_mdelay(__func__, 100); 478 479 if (ha->ifp != NULL) 480 ether_ifdetach(ha->ifp); 481 482 qla_free_dma(ha); 483 qla_free_parent_dma_tag(ha); 484 485 for (i = 0; i < ha->msix_count; i++) { 486 if (ha->irq_vec[i].handle) 487 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 488 ha->irq_vec[i].handle); 489 if (ha->irq_vec[i].irq) 490 (void) bus_release_resource(dev, SYS_RES_IRQ, 491 ha->irq_vec[i].irq_rid, 492 ha->irq_vec[i].irq); 493 } 494 if (ha->msix_count) 495 pci_release_msi(dev); 496 497 if (ha->flags.lock_init) { 498 mtx_destroy(&ha->tx_lock); 499 mtx_destroy(&ha->rx_lock); 500 mtx_destroy(&ha->rxj_lock); 501 mtx_destroy(&ha->hw_lock); 502 } 503 504 if (ha->pci_reg) 505 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 506 ha->pci_reg); 507 } 508 509 /* 510 * DMA Related Functions 511 */ 512 513 static void 514 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 515 { 516 *((bus_addr_t *)arg) = 0; 517 518 if (error) { 519 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 520 return; 521 } 522 523 QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs)); 524 525 *((bus_addr_t *)arg) = segs[0].ds_addr; 526 527 return; 528 } 529 530 int 531 qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 532 { 533 int ret = 0; 534 device_t dev; 535 bus_addr_t b_addr; 536 537 dev = ha->pci_dev; 538 539 QL_DPRINT2((dev, "%s: enter\n", __func__)); 540 541 ret = bus_dma_tag_create( 542 ha->parent_tag,/* parent */ 543 dma_buf->alignment, 544 ((bus_size_t)(1ULL << 32)),/* boundary */ 545 BUS_SPACE_MAXADDR, /* lowaddr */ 546 BUS_SPACE_MAXADDR, /* highaddr */ 547 NULL, NULL, /* filter, filterarg */ 548 dma_buf->size, /* maxsize */ 549 1, /* nsegments */ 550 dma_buf->size, /* maxsegsize */ 551 0, /* flags */ 552 NULL, NULL, /* lockfunc, lockarg */ 553 &dma_buf->dma_tag); 554 555 if (ret) { 556 device_printf(dev, "%s: could not create dma tag\n", __func__); 557 goto qla_alloc_dmabuf_exit; 558 } 559 ret = bus_dmamem_alloc(dma_buf->dma_tag, 560 (void **)&dma_buf->dma_b, 561 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 562 &dma_buf->dma_map); 563 if (ret) { 564 bus_dma_tag_destroy(dma_buf->dma_tag); 565 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); 566 goto qla_alloc_dmabuf_exit; 567 } 568 569 ret = bus_dmamap_load(dma_buf->dma_tag, 570 dma_buf->dma_map, 571 dma_buf->dma_b, 572 dma_buf->size, 573 qla_dmamap_callback, 574 &b_addr, BUS_DMA_NOWAIT); 575 576 if (ret || !b_addr) { 577 bus_dma_tag_destroy(dma_buf->dma_tag); 578 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 579 dma_buf->dma_map); 580 ret = -1; 581 goto qla_alloc_dmabuf_exit; 582 } 583 584 dma_buf->dma_addr = b_addr; 585 586 qla_alloc_dmabuf_exit: 587 QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", 588 __func__, ret, (void *)dma_buf->dma_tag, 589 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, 590 dma_buf->size)); 591 592 return ret; 593 } 594 595 void 596 qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 597 { 598 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 599 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 600 bus_dma_tag_destroy(dma_buf->dma_tag); 601 } 602 603 static int 604 qla_alloc_parent_dma_tag(qla_host_t *ha) 605 { 606 int ret; 607 device_t dev; 608 609 dev = ha->pci_dev; 610 611 /* 612 * Allocate parent DMA Tag 613 */ 614 ret = bus_dma_tag_create( 615 bus_get_dma_tag(dev), /* parent */ 616 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 617 BUS_SPACE_MAXADDR, /* lowaddr */ 618 BUS_SPACE_MAXADDR, /* highaddr */ 619 NULL, NULL, /* filter, filterarg */ 620 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 621 0, /* nsegments */ 622 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 623 0, /* flags */ 624 NULL, NULL, /* lockfunc, lockarg */ 625 &ha->parent_tag); 626 627 if (ret) { 628 device_printf(dev, "%s: could not create parent dma tag\n", 629 __func__); 630 return (-1); 631 } 632 633 ha->flags.parent_tag = 1; 634 635 return (0); 636 } 637 638 static void 639 qla_free_parent_dma_tag(qla_host_t *ha) 640 { 641 if (ha->flags.parent_tag) { 642 bus_dma_tag_destroy(ha->parent_tag); 643 ha->flags.parent_tag = 0; 644 } 645 } 646 647 /* 648 * Name: qla_init_ifnet 649 * Function: Creates the Network Device Interface and Registers it with the O.S 650 */ 651 652 static void 653 qla_init_ifnet(device_t dev, qla_host_t *ha) 654 { 655 if_t ifp; 656 657 QL_DPRINT2((dev, "%s: enter\n", __func__)); 658 659 ifp = ha->ifp = if_alloc(IFT_ETHER); 660 661 if (ifp == NULL) 662 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 663 664 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 665 666 if_setmtu(ifp, ETHERMTU); 667 if_setbaudrate(ifp, IF_Gbps(10)); 668 if_setinitfn(ifp, qla_init); 669 if_setsoftc(ifp, ha); 670 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 671 if_setioctlfn(ifp, qla_ioctl); 672 if_setstartfn(ifp, qla_start); 673 674 if_setsendqlen(ifp, qla_get_ifq_snd_maxlen(ha)); 675 if_setsendqready(ifp); 676 677 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 678 679 ether_ifattach(ifp, qla_get_mac_addr(ha)); 680 681 if_setcapabilities(ifp, IFCAP_HWCSUM | 682 IFCAP_TSO4 | 683 IFCAP_JUMBO_MTU); 684 685 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU, 0); 686 if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0); 687 688 if_setcapenable(ifp, if_getcapabilities(ifp)); 689 690 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 691 692 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); 693 694 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, 695 NULL); 696 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 697 698 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 699 700 QL_DPRINT2((dev, "%s: exit\n", __func__)); 701 702 return; 703 } 704 705 static void 706 qla_init_locked(qla_host_t *ha) 707 { 708 if_t ifp = ha->ifp; 709 710 qla_stop(ha); 711 712 if (qla_alloc_xmt_bufs(ha) != 0) 713 return; 714 715 if (qla_alloc_rcv_bufs(ha) != 0) 716 return; 717 718 if (qla_config_lro(ha)) 719 return; 720 721 bcopy(if_getlladdr(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); 722 723 if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_TSO); 724 725 ha->flags.stop_rcv = 0; 726 if (qla_init_hw_if(ha) == 0) { 727 ifp = ha->ifp; 728 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 729 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 730 ha->flags.qla_watchdog_pause = 0; 731 } 732 733 return; 734 } 735 736 static void 737 qla_init(void *arg) 738 { 739 qla_host_t *ha; 740 741 ha = (qla_host_t *)arg; 742 743 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 744 745 QLA_LOCK(ha, __func__); 746 qla_init_locked(ha); 747 QLA_UNLOCK(ha, __func__); 748 749 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 750 } 751 752 static u_int 753 qla_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 754 { 755 uint8_t *mta = arg; 756 757 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) 758 return (0); 759 bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); 760 761 return (1); 762 } 763 764 static void 765 qla_set_multi(qla_host_t *ha, uint32_t add_multi) 766 { 767 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; 768 if_t ifp = ha->ifp; 769 int mcnt; 770 771 mcnt = if_foreach_llmaddr(ifp, qla_copy_maddr, mta); 772 qla_hw_set_multi(ha, mta, mcnt, add_multi); 773 774 return; 775 } 776 777 static int 778 qla_ioctl(if_t ifp, u_long cmd, caddr_t data) 779 { 780 int ret = 0; 781 struct ifreq *ifr = (struct ifreq *)data; 782 #ifdef INET 783 struct ifaddr *ifa = (struct ifaddr *)data; 784 #endif 785 qla_host_t *ha; 786 787 ha = (qla_host_t *)if_getsoftc(ifp); 788 789 switch (cmd) { 790 case SIOCSIFADDR: 791 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 792 __func__, cmd)); 793 794 #ifdef INET 795 if (ifa->ifa_addr->sa_family == AF_INET) { 796 if_setflagbits(ifp, IFF_UP, 0); 797 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 798 QLA_LOCK(ha, __func__); 799 qla_init_locked(ha); 800 QLA_UNLOCK(ha, __func__); 801 } 802 QL_DPRINT4((ha->pci_dev, 803 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 804 __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 805 806 arp_ifinit(ifp, ifa); 807 if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) { 808 qla_config_ipv4_addr(ha, 809 (IA_SIN(ifa)->sin_addr.s_addr)); 810 } 811 break; 812 } 813 #endif 814 ether_ioctl(ifp, cmd, data); 815 break; 816 817 case SIOCSIFMTU: 818 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 819 __func__, cmd)); 820 821 if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) { 822 ret = EINVAL; 823 } else { 824 QLA_LOCK(ha, __func__); 825 if_setmtu(ifp, ifr->ifr_mtu); 826 ha->max_frame_size = 827 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 828 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 829 ret = qla_set_max_mtu(ha, ha->max_frame_size, 830 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); 831 } 832 QLA_UNLOCK(ha, __func__); 833 834 if (ret) 835 ret = EINVAL; 836 } 837 838 break; 839 840 case SIOCSIFFLAGS: 841 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 842 __func__, cmd)); 843 844 if (if_getflags(ifp) & IFF_UP) { 845 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 846 if ((if_getflags(ifp) ^ ha->if_flags) & 847 IFF_PROMISC) { 848 qla_set_promisc(ha); 849 } else if ((if_getflags(ifp) ^ ha->if_flags) & 850 IFF_ALLMULTI) { 851 qla_set_allmulti(ha); 852 } 853 } else { 854 QLA_LOCK(ha, __func__); 855 qla_init_locked(ha); 856 ha->max_frame_size = if_getmtu(ifp) + 857 ETHER_HDR_LEN + ETHER_CRC_LEN; 858 ret = qla_set_max_mtu(ha, ha->max_frame_size, 859 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); 860 QLA_UNLOCK(ha, __func__); 861 } 862 } else { 863 QLA_LOCK(ha, __func__); 864 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 865 qla_stop(ha); 866 ha->if_flags = if_getflags(ifp); 867 QLA_UNLOCK(ha, __func__); 868 } 869 break; 870 871 case SIOCADDMULTI: 872 QL_DPRINT4((ha->pci_dev, 873 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 874 875 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 876 qla_set_multi(ha, 1); 877 } 878 break; 879 880 case SIOCDELMULTI: 881 QL_DPRINT4((ha->pci_dev, 882 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 883 884 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 885 qla_set_multi(ha, 0); 886 } 887 break; 888 889 case SIOCSIFMEDIA: 890 case SIOCGIFMEDIA: 891 QL_DPRINT4((ha->pci_dev, 892 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 893 __func__, cmd)); 894 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 895 break; 896 897 case SIOCSIFCAP: 898 { 899 int mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 900 901 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 902 __func__, cmd)); 903 904 if (mask & IFCAP_HWCSUM) 905 if_togglecapenable(ifp, IFCAP_HWCSUM); 906 if (mask & IFCAP_TSO4) 907 if_togglecapenable(ifp, IFCAP_TSO4); 908 if (mask & IFCAP_TSO6) 909 if_togglecapenable(ifp, IFCAP_TSO6); 910 if (mask & IFCAP_VLAN_HWTAGGING) 911 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 912 913 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 914 qla_init(ha); 915 916 VLAN_CAPABILITIES(ifp); 917 break; 918 } 919 920 default: 921 QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n", 922 __func__, cmd)); 923 ret = ether_ioctl(ifp, cmd, data); 924 break; 925 } 926 927 return (ret); 928 } 929 930 static int 931 qla_media_change(if_t ifp) 932 { 933 qla_host_t *ha; 934 struct ifmedia *ifm; 935 int ret = 0; 936 937 ha = (qla_host_t *)if_getsoftc(ifp); 938 939 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 940 941 ifm = &ha->media; 942 943 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 944 ret = EINVAL; 945 946 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 947 948 return (ret); 949 } 950 951 static void 952 qla_media_status(if_t ifp, struct ifmediareq *ifmr) 953 { 954 qla_host_t *ha; 955 956 ha = (qla_host_t *)if_getsoftc(ifp); 957 958 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 959 960 ifmr->ifm_status = IFM_AVALID; 961 ifmr->ifm_active = IFM_ETHER; 962 963 qla_update_link_state(ha); 964 if (ha->hw.flags.link_up) { 965 ifmr->ifm_status |= IFM_ACTIVE; 966 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); 967 } 968 969 QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\ 970 (ha->hw.flags.link_up ? "link_up" : "link_down"))); 971 972 return; 973 } 974 975 void 976 qla_start(if_t ifp) 977 { 978 struct mbuf *m_head; 979 qla_host_t *ha = (qla_host_t *)if_getsoftc(ifp); 980 981 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); 982 983 if (!mtx_trylock(&ha->tx_lock)) { 984 QL_DPRINT8((ha->pci_dev, 985 "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__)); 986 return; 987 } 988 989 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 990 IFF_DRV_RUNNING) { 991 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); 992 QLA_TX_UNLOCK(ha); 993 return; 994 } 995 996 if (!ha->watchdog_ticks) 997 qla_update_link_state(ha); 998 999 if (!ha->hw.flags.link_up) { 1000 QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__)); 1001 QLA_TX_UNLOCK(ha); 1002 return; 1003 } 1004 1005 while (!if_sendq_empty(ifp)) { 1006 m_head = if_dequeue(ifp); 1007 1008 if (m_head == NULL) { 1009 QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n", 1010 __func__)); 1011 break; 1012 } 1013 1014 if (qla_send(ha, &m_head)) { 1015 if (m_head == NULL) 1016 break; 1017 QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__)); 1018 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1019 if_sendq_prepend(ifp, m_head); 1020 break; 1021 } 1022 /* Send a copy of the frame to the BPF listener */ 1023 ETHER_BPF_MTAP(ifp, m_head); 1024 } 1025 QLA_TX_UNLOCK(ha); 1026 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); 1027 return; 1028 } 1029 1030 static int 1031 qla_send(qla_host_t *ha, struct mbuf **m_headp) 1032 { 1033 bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; 1034 bus_dmamap_t map; 1035 int nsegs; 1036 int ret = -1; 1037 uint32_t tx_idx; 1038 struct mbuf *m_head = *m_headp; 1039 1040 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); 1041 1042 if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) { 1043 ha->err_tx_dmamap_create++; 1044 device_printf(ha->pci_dev, 1045 "%s: bus_dmamap_create failed[%d, %d]\n", 1046 __func__, ret, m_head->m_pkthdr.len); 1047 return (ret); 1048 } 1049 1050 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 1051 BUS_DMA_NOWAIT); 1052 1053 if (ret == EFBIG) { 1054 struct mbuf *m; 1055 1056 QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 1057 m_head->m_pkthdr.len)); 1058 1059 m = m_defrag(m_head, M_NOWAIT); 1060 if (m == NULL) { 1061 ha->err_tx_defrag++; 1062 m_freem(m_head); 1063 *m_headp = NULL; 1064 device_printf(ha->pci_dev, 1065 "%s: m_defrag() = NULL [%d]\n", 1066 __func__, ret); 1067 return (ENOBUFS); 1068 } 1069 m_head = m; 1070 1071 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 1072 segs, &nsegs, BUS_DMA_NOWAIT))) { 1073 ha->err_tx_dmamap_load++; 1074 1075 device_printf(ha->pci_dev, 1076 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 1077 __func__, ret, m_head->m_pkthdr.len); 1078 1079 bus_dmamap_destroy(ha->tx_tag, map); 1080 if (ret != ENOMEM) { 1081 m_freem(m_head); 1082 *m_headp = NULL; 1083 } 1084 return (ret); 1085 } 1086 } else if (ret) { 1087 ha->err_tx_dmamap_load++; 1088 1089 device_printf(ha->pci_dev, 1090 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 1091 __func__, ret, m_head->m_pkthdr.len); 1092 1093 bus_dmamap_destroy(ha->tx_tag, map); 1094 1095 if (ret != ENOMEM) { 1096 m_freem(m_head); 1097 *m_headp = NULL; 1098 } 1099 return (ret); 1100 } 1101 1102 QL_ASSERT((nsegs != 0), ("qla_send: empty packet")); 1103 1104 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 1105 1106 if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) { 1107 ha->tx_buf[tx_idx].m_head = m_head; 1108 ha->tx_buf[tx_idx].map = map; 1109 } else { 1110 if (ret == EINVAL) { 1111 m_freem(m_head); 1112 *m_headp = NULL; 1113 } 1114 } 1115 1116 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); 1117 return (ret); 1118 } 1119 1120 static void 1121 qla_stop(qla_host_t *ha) 1122 { 1123 if_t ifp = ha->ifp; 1124 1125 ha->flags.qla_watchdog_pause = 1; 1126 qla_mdelay(__func__, 100); 1127 1128 ha->flags.stop_rcv = 1; 1129 qla_hw_stop_rcv(ha); 1130 1131 qla_del_hw_if(ha); 1132 1133 qla_free_lro(ha); 1134 1135 qla_free_xmt_bufs(ha); 1136 qla_free_rcv_bufs(ha); 1137 1138 if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)); 1139 1140 return; 1141 } 1142 1143 /* 1144 * Buffer Management Functions for Transmit and Receive Rings 1145 */ 1146 static int 1147 qla_alloc_xmt_bufs(qla_host_t *ha) 1148 { 1149 if (bus_dma_tag_create(NULL, /* parent */ 1150 1, 0, /* alignment, bounds */ 1151 BUS_SPACE_MAXADDR, /* lowaddr */ 1152 BUS_SPACE_MAXADDR, /* highaddr */ 1153 NULL, NULL, /* filter, filterarg */ 1154 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ 1155 QLA_MAX_SEGMENTS, /* nsegments */ 1156 PAGE_SIZE, /* maxsegsize */ 1157 BUS_DMA_ALLOCNOW, /* flags */ 1158 NULL, /* lockfunc */ 1159 NULL, /* lockfuncarg */ 1160 &ha->tx_tag)) { 1161 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", 1162 __func__); 1163 return (ENOMEM); 1164 } 1165 bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1166 1167 return 0; 1168 } 1169 1170 /* 1171 * Release mbuf after it sent on the wire 1172 */ 1173 static void 1174 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) 1175 { 1176 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 1177 1178 if (txb->m_head) { 1179 bus_dmamap_unload(ha->tx_tag, txb->map); 1180 bus_dmamap_destroy(ha->tx_tag, txb->map); 1181 1182 m_freem(txb->m_head); 1183 txb->m_head = NULL; 1184 } 1185 1186 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 1187 } 1188 1189 static void 1190 qla_free_xmt_bufs(qla_host_t *ha) 1191 { 1192 int i; 1193 1194 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) 1195 qla_clear_tx_buf(ha, &ha->tx_buf[i]); 1196 1197 if (ha->tx_tag != NULL) { 1198 bus_dma_tag_destroy(ha->tx_tag); 1199 ha->tx_tag = NULL; 1200 } 1201 bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1202 1203 return; 1204 } 1205 1206 static int 1207 qla_alloc_rcv_bufs(qla_host_t *ha) 1208 { 1209 int i, j, ret = 0; 1210 qla_rx_buf_t *rxb; 1211 1212 if (bus_dma_tag_create(NULL, /* parent */ 1213 1, 0, /* alignment, bounds */ 1214 BUS_SPACE_MAXADDR, /* lowaddr */ 1215 BUS_SPACE_MAXADDR, /* highaddr */ 1216 NULL, NULL, /* filter, filterarg */ 1217 MJUM9BYTES, /* maxsize */ 1218 1, /* nsegments */ 1219 MJUM9BYTES, /* maxsegsize */ 1220 BUS_DMA_ALLOCNOW, /* flags */ 1221 NULL, /* lockfunc */ 1222 NULL, /* lockfuncarg */ 1223 &ha->rx_tag)) { 1224 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", 1225 __func__); 1226 1227 return (ENOMEM); 1228 } 1229 1230 bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); 1231 bzero((void *)ha->rx_jbuf, 1232 (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS)); 1233 1234 for (i = 0; i < MAX_SDS_RINGS; i++) { 1235 ha->hw.sds[i].sdsr_next = 0; 1236 ha->hw.sds[i].rxb_free = NULL; 1237 ha->hw.sds[i].rx_free = 0; 1238 ha->hw.sds[i].rxjb_free = NULL; 1239 ha->hw.sds[i].rxj_free = 0; 1240 } 1241 1242 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1243 rxb = &ha->rx_buf[i]; 1244 1245 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); 1246 1247 if (ret) { 1248 device_printf(ha->pci_dev, 1249 "%s: dmamap[%d] failed\n", __func__, i); 1250 1251 for (j = 0; j < i; j++) { 1252 bus_dmamap_destroy(ha->rx_tag, 1253 ha->rx_buf[j].map); 1254 } 1255 goto qla_alloc_rcv_bufs_failed; 1256 } 1257 } 1258 1259 qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL); 1260 1261 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1262 rxb = &ha->rx_buf[i]; 1263 rxb->handle = i; 1264 if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) { 1265 /* 1266 * set the physical address in the corresponding 1267 * descriptor entry in the receive ring/queue for the 1268 * hba 1269 */ 1270 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i, 1271 rxb->handle, rxb->paddr, 1272 (rxb->m_head)->m_pkthdr.len); 1273 } else { 1274 device_printf(ha->pci_dev, 1275 "%s: qla_get_mbuf [standard(%d)] failed\n", 1276 __func__, i); 1277 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1278 goto qla_alloc_rcv_bufs_failed; 1279 } 1280 } 1281 1282 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { 1283 rxb = &ha->rx_jbuf[i]; 1284 1285 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); 1286 1287 if (ret) { 1288 device_printf(ha->pci_dev, 1289 "%s: dmamap[%d] failed\n", __func__, i); 1290 1291 for (j = 0; j < i; j++) { 1292 bus_dmamap_destroy(ha->rx_tag, 1293 ha->rx_jbuf[j].map); 1294 } 1295 goto qla_alloc_rcv_bufs_failed; 1296 } 1297 } 1298 1299 qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO); 1300 1301 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { 1302 rxb = &ha->rx_jbuf[i]; 1303 rxb->handle = i; 1304 if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) { 1305 /* 1306 * set the physical address in the corresponding 1307 * descriptor entry in the receive ring/queue for the 1308 * hba 1309 */ 1310 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i, 1311 rxb->handle, rxb->paddr, 1312 (rxb->m_head)->m_pkthdr.len); 1313 } else { 1314 device_printf(ha->pci_dev, 1315 "%s: qla_get_mbuf [jumbo(%d)] failed\n", 1316 __func__, i); 1317 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1318 goto qla_alloc_rcv_bufs_failed; 1319 } 1320 } 1321 1322 return (0); 1323 1324 qla_alloc_rcv_bufs_failed: 1325 qla_free_rcv_bufs(ha); 1326 return (ret); 1327 } 1328 1329 static void 1330 qla_free_rcv_bufs(qla_host_t *ha) 1331 { 1332 int i; 1333 qla_rx_buf_t *rxb; 1334 1335 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1336 rxb = &ha->rx_buf[i]; 1337 if (rxb->m_head != NULL) { 1338 bus_dmamap_unload(ha->rx_tag, rxb->map); 1339 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1340 m_freem(rxb->m_head); 1341 rxb->m_head = NULL; 1342 } 1343 } 1344 1345 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { 1346 rxb = &ha->rx_jbuf[i]; 1347 if (rxb->m_head != NULL) { 1348 bus_dmamap_unload(ha->rx_tag, rxb->map); 1349 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1350 m_freem(rxb->m_head); 1351 rxb->m_head = NULL; 1352 } 1353 } 1354 1355 if (ha->rx_tag != NULL) { 1356 bus_dma_tag_destroy(ha->rx_tag); 1357 ha->rx_tag = NULL; 1358 } 1359 1360 bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); 1361 bzero((void *)ha->rx_jbuf, 1362 (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS)); 1363 1364 for (i = 0; i < MAX_SDS_RINGS; i++) { 1365 ha->hw.sds[i].sdsr_next = 0; 1366 ha->hw.sds[i].rxb_free = NULL; 1367 ha->hw.sds[i].rx_free = 0; 1368 ha->hw.sds[i].rxjb_free = NULL; 1369 ha->hw.sds[i].rxj_free = 0; 1370 } 1371 1372 return; 1373 } 1374 1375 int 1376 qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp, 1377 uint32_t jumbo) 1378 { 1379 struct mbuf *mp = nmp; 1380 int ret = 0; 1381 uint32_t offset; 1382 1383 QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo)); 1384 1385 if (mp == NULL) { 1386 if (!jumbo) { 1387 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1388 1389 if (mp == NULL) { 1390 ha->err_m_getcl++; 1391 ret = ENOBUFS; 1392 device_printf(ha->pci_dev, 1393 "%s: m_getcl failed\n", __func__); 1394 goto exit_qla_get_mbuf; 1395 } 1396 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1397 } else { 1398 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1399 MJUM9BYTES); 1400 if (mp == NULL) { 1401 ha->err_m_getjcl++; 1402 ret = ENOBUFS; 1403 device_printf(ha->pci_dev, 1404 "%s: m_getjcl failed\n", __func__); 1405 goto exit_qla_get_mbuf; 1406 } 1407 mp->m_len = mp->m_pkthdr.len = MJUM9BYTES; 1408 } 1409 } else { 1410 if (!jumbo) 1411 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1412 else 1413 mp->m_len = mp->m_pkthdr.len = MJUM9BYTES; 1414 1415 mp->m_data = mp->m_ext.ext_buf; 1416 mp->m_next = NULL; 1417 } 1418 1419 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); 1420 if (offset) { 1421 offset = 8 - offset; 1422 m_adj(mp, offset); 1423 } 1424 1425 /* 1426 * Using memory from the mbuf cluster pool, invoke the bus_dma 1427 * machinery to arrange the memory mapping. 1428 */ 1429 ret = bus_dmamap_load(ha->rx_tag, rxb->map, 1430 mtod(mp, void *), mp->m_len, 1431 qla_dmamap_callback, &rxb->paddr, 1432 BUS_DMA_NOWAIT); 1433 if (ret || !rxb->paddr) { 1434 m_free(mp); 1435 rxb->m_head = NULL; 1436 device_printf(ha->pci_dev, 1437 "%s: bus_dmamap_load failed\n", __func__); 1438 ret = -1; 1439 goto exit_qla_get_mbuf; 1440 } 1441 rxb->m_head = mp; 1442 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); 1443 1444 exit_qla_get_mbuf: 1445 QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); 1446 return (ret); 1447 } 1448 1449 static void 1450 qla_tx_done(void *context, int pending) 1451 { 1452 qla_host_t *ha = context; 1453 1454 qla_hw_tx_done(ha); 1455 qla_start(ha->ifp); 1456 } 1457