1 /*- 2 * Copyright (c) 2017 Broadcom. All rights reserved. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the copyright holder nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 #define OCS_COPYRIGHT "Copyright (C) 2017 Broadcom. All rights reserved." 35 36 /** 37 * @file 38 * Implementation of required FreeBSD PCI interface functions 39 */ 40 41 #include "ocs.h" 42 #include "version.h" 43 #include <sys/sysctl.h> 44 #include <sys/malloc.h> 45 46 static MALLOC_DEFINE(M_OCS, "OCS", "OneCore Storage data"); 47 48 #include <dev/pci/pcireg.h> 49 #include <dev/pci/pcivar.h> 50 51 #include <machine/bus.h> 52 53 /** 54 * Tunable parameters for transport 55 */ 56 int logmask = 0; 57 int ctrlmask = 2; 58 int logdest = 1; 59 int loglevel = LOG_INFO; 60 int ramlog_size = 1*1024*1024; 61 int ddump_saved_size = 0; 62 static const char *queue_topology = "eq cq rq cq mq $nulp($nwq(cq wq:ulp=$rpt1)) cq wq:len=256:class=1"; 63 64 static void ocs_release_bus(struct ocs_softc *); 65 static int32_t ocs_intr_alloc(struct ocs_softc *); 66 static int32_t ocs_intr_setup(struct ocs_softc *); 67 static int32_t ocs_intr_teardown(struct ocs_softc *); 68 static int ocs_pci_intx_filter(void *); 69 static void ocs_pci_intr(void *); 70 static int32_t ocs_init_dma_tag(struct ocs_softc *ocs); 71 72 static int32_t ocs_setup_fcports(ocs_t *ocs); 73 74 ocs_t *ocs_devices[MAX_OCS_DEVICES]; 75 76 /** 77 * @brief Check support for the given device 78 * 79 * Determine support for a given device by examining the PCI vendor and 80 * device IDs 81 * 82 * @param dev device abstraction 83 * 84 * @return 0 if device is supported, ENXIO otherwise 85 */ 86 static int 87 ocs_pci_probe(device_t dev) 88 { 89 char *desc = NULL; 90 91 if (pci_get_vendor(dev) != PCI_VENDOR_EMULEX) { 92 return ENXIO; 93 } 94 95 switch (pci_get_device(dev)) { 96 case PCI_PRODUCT_EMULEX_OCE16001: 97 desc = "Emulex LightPulse FC Adapter"; 98 break; 99 case PCI_PRODUCT_EMULEX_LPE31004: 100 desc = "Emulex LightPulse FC Adapter"; 101 break; 102 case PCI_PRODUCT_EMULEX_OCE50102: 103 desc = "Emulex LightPulse 10GbE FCoE/NIC Adapter"; 104 break; 105 default: 106 return ENXIO; 107 } 108 109 device_set_desc(dev, desc); 110 111 return BUS_PROBE_DEFAULT; 112 } 113 114 static int 115 ocs_map_bars(device_t dev, struct ocs_softc *ocs) 116 { 117 118 /* 119 * Map PCI BAR0 register into the CPU's space. 120 */ 121 122 ocs->reg[0].rid = PCIR_BAR(PCI_64BIT_BAR0); 123 ocs->reg[0].res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 124 &ocs->reg[0].rid, RF_ACTIVE); 125 126 if (ocs->reg[0].res == NULL) { 127 device_printf(dev, "bus_alloc_resource failed rid=%#x\n", 128 ocs->reg[0].rid); 129 return ENXIO; 130 } 131 132 ocs->reg[0].btag = rman_get_bustag(ocs->reg[0].res); 133 ocs->reg[0].bhandle = rman_get_bushandle(ocs->reg[0].res); 134 return 0; 135 } 136 137 138 static int 139 ocs_setup_params(struct ocs_softc *ocs) 140 { 141 int32_t i = 0; 142 const char *hw_war_version; 143 /* Setup tunable parameters */ 144 ocs->ctrlmask = ctrlmask; 145 ocs->speed = 0; 146 ocs->topology = 0; 147 ocs->ethernet_license = 0; 148 ocs->num_scsi_ios = 8192; 149 ocs->enable_hlm = 0; 150 ocs->hlm_group_size = 8; 151 ocs->logmask = logmask; 152 153 ocs->config_tgt = FALSE; 154 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 155 "target", &i)) { 156 if (1 == i) { 157 ocs->config_tgt = TRUE; 158 device_printf(ocs->dev, "Enabling target\n"); 159 } 160 } 161 162 ocs->config_ini = TRUE; 163 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 164 "initiator", &i)) { 165 if (0 == i) { 166 ocs->config_ini = FALSE; 167 device_printf(ocs->dev, "Disabling initiator\n"); 168 } 169 } 170 ocs->enable_ini = ocs->config_ini; 171 172 if (!ocs->config_ini && !ocs->config_tgt) { 173 device_printf(ocs->dev, "Unsupported, both initiator and target mode disabled.\n"); 174 return 1; 175 176 } 177 178 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 179 "logmask", &logmask)) { 180 device_printf(ocs->dev, "logmask = %#x\n", logmask); 181 } 182 183 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 184 "logdest", &logdest)) { 185 device_printf(ocs->dev, "logdest = %#x\n", logdest); 186 } 187 188 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 189 "loglevel", &loglevel)) { 190 device_printf(ocs->dev, "loglevel = %#x\n", loglevel); 191 } 192 193 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 194 "ramlog_size", &ramlog_size)) { 195 device_printf(ocs->dev, "ramlog_size = %#x\n", ramlog_size); 196 } 197 198 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 199 "ddump_saved_size", &ddump_saved_size)) { 200 device_printf(ocs->dev, "ddump_saved_size= %#x\n", ddump_saved_size); 201 } 202 203 /* If enabled, initailize a RAM logging buffer */ 204 if (logdest & 2) { 205 ocs->ramlog = ocs_ramlog_init(ocs, ramlog_size/OCS_RAMLOG_DEFAULT_BUFFERS, 206 OCS_RAMLOG_DEFAULT_BUFFERS); 207 /* If NULL was returned, then we'll simply skip using the ramlog but */ 208 /* set logdest to 1 to ensure that we at least get default logging. */ 209 if (ocs->ramlog == NULL) { 210 logdest = 1; 211 } 212 } 213 214 /* initialize a saved ddump */ 215 if (ddump_saved_size) { 216 if (ocs_textbuf_alloc(ocs, &ocs->ddump_saved, ddump_saved_size)) { 217 ocs_log_err(ocs, "failed to allocate memory for saved ddump\n"); 218 } 219 } 220 221 if (0 == resource_string_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 222 "hw_war_version", &hw_war_version)) { 223 device_printf(ocs->dev, "hw_war_version = %s\n", hw_war_version); 224 ocs->hw_war_version = strdup(hw_war_version, M_OCS); 225 } 226 227 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 228 "explicit_buffer_list", &i)) { 229 ocs->explicit_buffer_list = i; 230 } 231 232 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 233 "ethernet_license", &i)) { 234 ocs->ethernet_license = i; 235 } 236 237 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 238 "speed", &i)) { 239 device_printf(ocs->dev, "speed = %d Mbps\n", i); 240 ocs->speed = i; 241 } 242 ocs->desc = device_get_desc(ocs->dev); 243 244 ocs_device_lock_init(ocs); 245 ocs->driver_version = STR_BE_MAJOR "." STR_BE_MINOR "." STR_BE_BUILD "." STR_BE_BRANCH; 246 ocs->model = ocs_pci_model(ocs->pci_vendor, ocs->pci_device); 247 248 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 249 "enable_hlm", &i)) { 250 device_printf(ocs->dev, "enable_hlm = %d\n", i); 251 ocs->enable_hlm = i; 252 if (ocs->enable_hlm) { 253 ocs->hlm_group_size = 8; 254 255 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 256 "hlm_group_size", &i)) { 257 ocs->hlm_group_size = i; 258 } 259 device_printf(ocs->dev, "hlm_group_size = %d\n", i); 260 } 261 } 262 263 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 264 "num_scsi_ios", &i)) { 265 ocs->num_scsi_ios = i; 266 device_printf(ocs->dev, "num_scsi_ios = %d\n", ocs->num_scsi_ios); 267 } else { 268 ocs->num_scsi_ios = 8192; 269 } 270 271 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 272 "topology", &i)) { 273 ocs->topology = i; 274 device_printf(ocs->dev, "Setting topology=%#x\n", i); 275 } 276 277 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 278 "num_vports", &i)) { 279 if (i >= 0 && i <= 254) { 280 device_printf(ocs->dev, "num_vports = %d\n", i); 281 ocs->num_vports = i; 282 } else { 283 device_printf(ocs->dev, "num_vports: %d not supported \n", i); 284 } 285 } 286 287 288 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 289 "external_loopback", &i)) { 290 device_printf(ocs->dev, "external_loopback = %d\n", i); 291 ocs->external_loopback = i; 292 } 293 294 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 295 "tgt_rscn_delay", &i)) { 296 device_printf(ocs->dev, "tgt_rscn_delay = %d\n", i); 297 ocs->tgt_rscn_delay_msec = i * 1000; 298 } 299 300 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 301 "tgt_rscn_period", &i)) { 302 device_printf(ocs->dev, "tgt_rscn_period = %d\n", i); 303 ocs->tgt_rscn_period_msec = i * 1000; 304 } 305 306 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), 307 "target_io_timer", &i)) { 308 device_printf(ocs->dev, "target_io_timer = %d\n", i); 309 ocs->target_io_timer_sec = i; 310 } 311 312 hw_global.queue_topology_string = queue_topology; 313 ocs->rq_selection_policy = 0; 314 ocs->rr_quanta = 1; 315 ocs->filter_def = "0,0,0,0"; 316 317 return 0; 318 } 319 320 static int32_t 321 ocs_setup_fcports(ocs_t *ocs) 322 { 323 uint32_t i = 0, role = 0; 324 uint64_t sli_wwpn, sli_wwnn; 325 size_t size; 326 ocs_xport_t *xport = ocs->xport; 327 ocs_vport_spec_t *vport; 328 ocs_fcport *fcp = NULL; 329 330 size = sizeof(ocs_fcport) * (ocs->num_vports + 1); 331 332 ocs->fcports = ocs_malloc(ocs, size, M_ZERO|M_NOWAIT); 333 if (ocs->fcports == NULL) { 334 device_printf(ocs->dev, "Can't allocate fcport \n"); 335 return 1; 336 } 337 338 role = (ocs->enable_ini)? KNOB_ROLE_INITIATOR: 0 | 339 (ocs->enable_tgt)? KNOB_ROLE_TARGET: 0; 340 341 fcp = FCPORT(ocs, i); 342 fcp->role = role; 343 i++; 344 345 ocs_list_foreach(&xport->vport_list, vport) { 346 fcp = FCPORT(ocs, i); 347 vport->tgt_data = fcp; 348 fcp->vport = vport; 349 fcp->role = role; 350 351 if (ocs_hw_get_def_wwn(ocs, i, &sli_wwpn, &sli_wwnn)) { 352 ocs_log_err(ocs, "Get default wwn failed \n"); 353 i++; 354 continue; 355 } 356 357 vport->wwpn = ocs_be64toh(sli_wwpn); 358 vport->wwnn = ocs_be64toh(sli_wwnn); 359 i++; 360 ocs_log_debug(ocs, "VPort wwpn: %lx wwnn: %lx \n", vport->wwpn, vport->wwnn); 361 } 362 363 return 0; 364 } 365 366 int32_t 367 ocs_device_attach(ocs_t *ocs) 368 { 369 int32_t i; 370 ocs_io_t *io = NULL; 371 372 if (ocs->attached) { 373 ocs_log_warn(ocs, "%s: Device is already attached\n", __func__); 374 return -1; 375 } 376 377 /* Allocate transport object and bring online */ 378 ocs->xport = ocs_xport_alloc(ocs); 379 if (ocs->xport == NULL) { 380 device_printf(ocs->dev, "failed to allocate transport object\n"); 381 return ENOMEM; 382 } else if (ocs_xport_attach(ocs->xport) != 0) { 383 device_printf(ocs->dev, "%s: failed to attach transport object\n", __func__); 384 goto fail_xport_attach; 385 } else if (ocs_xport_initialize(ocs->xport) != 0) { 386 device_printf(ocs->dev, "%s: failed to initialize transport object\n", __func__); 387 goto fail_xport_init; 388 } 389 390 if (ocs_init_dma_tag(ocs)) { 391 goto fail_intr_setup; 392 } 393 394 for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) { 395 if (bus_dmamap_create(ocs->buf_dmat, 0, &io->tgt_io.dmap)) { 396 device_printf(ocs->dev, "%s: bad dma map create\n", __func__); 397 } 398 399 io->tgt_io.state = OCS_CAM_IO_FREE; 400 } 401 402 if (ocs_setup_fcports(ocs)) { 403 device_printf(ocs->dev, "FCports creation failed\n"); 404 goto fail_intr_setup; 405 } 406 407 if(ocs_cam_attach(ocs)) { 408 device_printf(ocs->dev, "cam attach failed \n"); 409 goto fail_intr_setup; 410 } 411 412 if (ocs_intr_setup(ocs)) { 413 device_printf(ocs->dev, "Interrupt setup failed\n"); 414 goto fail_intr_setup; 415 } 416 417 if (ocs->enable_ini || ocs->enable_tgt) { 418 if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE)) { 419 device_printf(ocs->dev, "Can't init port\n"); 420 goto fail_xport_online; 421 } 422 } 423 424 ocs->attached = true; 425 426 return 0; 427 428 fail_xport_online: 429 if (ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN)) { 430 device_printf(ocs->dev, "Transport Shutdown timed out\n"); 431 } 432 ocs_intr_teardown(ocs); 433 fail_intr_setup: 434 fail_xport_init: 435 ocs_xport_detach(ocs->xport); 436 if (ocs->config_tgt) 437 ocs_scsi_tgt_del_device(ocs); 438 439 ocs_xport_free(ocs->xport); 440 ocs->xport = NULL; 441 fail_xport_attach: 442 if (ocs->xport) 443 ocs_free(ocs, ocs->xport, sizeof(*(ocs->xport))); 444 ocs->xport = NULL; 445 return ENXIO; 446 } 447 448 /** 449 * @brief Connect the driver to the given device 450 * 451 * If the probe routine is successful, the OS will give the driver 452 * the opportunity to connect itself to the device. This routine 453 * maps PCI resources (memory BARs and interrupts) and initialize a 454 * hardware object. 455 * 456 * @param dev device abstraction 457 * 458 * @return 0 if the driver attaches to the device, ENXIO otherwise 459 */ 460 461 static int 462 ocs_pci_attach(device_t dev) 463 { 464 struct ocs_softc *ocs; 465 int instance; 466 467 instance = device_get_unit(dev); 468 469 ocs = (struct ocs_softc *)device_get_softc(dev); 470 if (NULL == ocs) { 471 device_printf(dev, "cannot allocate softc\n"); 472 return ENOMEM; 473 } 474 memset(ocs, 0, sizeof(struct ocs_softc)); 475 476 if (instance < ARRAY_SIZE(ocs_devices)) { 477 ocs_devices[instance] = ocs; 478 } else { 479 device_printf(dev, "got unexpected ocs instance number %d\n", instance); 480 } 481 482 ocs->instance_index = instance; 483 484 ocs->dev = dev; 485 486 pci_enable_io(dev, SYS_RES_MEMORY); 487 pci_enable_busmaster(dev); 488 489 ocs->pci_vendor = pci_get_vendor(dev); 490 ocs->pci_device = pci_get_device(dev); 491 snprintf(ocs->businfo, sizeof(ocs->businfo), "%02X:%02X:%02X", 492 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); 493 494 /* Map all memory BARs */ 495 if (ocs_map_bars(dev, ocs)) { 496 device_printf(dev, "Failed to map pci bars\n"); 497 goto release_bus; 498 } 499 500 /* create a root DMA tag for the device */ 501 if (bus_dma_tag_create(bus_get_dma_tag(dev), 502 1, /* byte alignment */ 503 0, /* no boundary restrictions */ 504 BUS_SPACE_MAXADDR, /* no minimum low address */ 505 BUS_SPACE_MAXADDR, /* no maximum high address */ 506 NULL, /* no filter function */ 507 NULL, /* or arguments */ 508 BUS_SPACE_MAXSIZE, /* max size covered by tag */ 509 BUS_SPACE_UNRESTRICTED, /* no segment count restrictions */ 510 BUS_SPACE_MAXSIZE, /* no segment length restrictions */ 511 0, /* flags */ 512 NULL, /* no lock manipulation function */ 513 NULL, /* or arguments */ 514 &ocs->dmat)) { 515 device_printf(dev, "parent DMA tag allocation failed\n"); 516 goto release_bus; 517 } 518 519 if (ocs_intr_alloc(ocs)) { 520 device_printf(dev, "Interrupt allocation failed\n"); 521 goto release_bus; 522 } 523 524 if (PCIC_SERIALBUS == pci_get_class(dev) && 525 PCIS_SERIALBUS_FC == pci_get_subclass(dev)) 526 ocs->ocs_xport = OCS_XPORT_FC; 527 else { 528 device_printf(dev, "unsupported class (%#x : %#x)\n", 529 pci_get_class(dev), 530 pci_get_class(dev)); 531 goto release_bus; 532 } 533 534 /* Setup tunable parameters */ 535 if (ocs_setup_params(ocs)) { 536 device_printf(ocs->dev, "failed to setup params\n"); 537 goto release_bus; 538 } 539 540 if (ocs_device_attach(ocs)) { 541 device_printf(ocs->dev, "failed to attach device\n"); 542 goto release_params; 543 } 544 545 ocs->fc_type = FC_TYPE_FCP; 546 547 ocs_debug_attach(ocs); 548 549 return 0; 550 551 release_params: 552 ocs_ramlog_free(ocs, ocs->ramlog); 553 ocs_device_lock_free(ocs); 554 free(ocs->hw_war_version, M_OCS); 555 release_bus: 556 ocs_release_bus(ocs); 557 return ENXIO; 558 } 559 560 /** 561 * @brief free resources when pci device detach 562 * 563 * @param ocs pointer to ocs structure 564 * 565 * @return 0 for success, a negative error code value for failure. 566 */ 567 568 int32_t 569 ocs_device_detach(ocs_t *ocs) 570 { 571 int32_t rc = 0, i; 572 ocs_io_t *io = NULL; 573 574 if (ocs != NULL) { 575 if (!ocs->attached) { 576 ocs_log_warn(ocs, "%s: Device is not attached\n", __func__); 577 return -1; 578 } 579 580 rc = ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN); 581 if (rc) { 582 ocs_log_err(ocs, "%s: Transport Shutdown timed out\n", __func__); 583 } 584 585 ocs_intr_teardown(ocs); 586 587 if (ocs_xport_detach(ocs->xport) != 0) { 588 ocs_log_err(ocs, "%s: Transport detach failed\n", __func__); 589 } 590 591 ocs_cam_detach(ocs); 592 ocs_free(ocs, ocs->fcports, sizeof(ocs->fcports)); 593 594 for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) { 595 if (bus_dmamap_destroy(ocs->buf_dmat, io->tgt_io.dmap)) { 596 device_printf(ocs->dev, "%s: bad dma map destroy\n", __func__); 597 } 598 } 599 bus_dma_tag_destroy(ocs->dmat); 600 ocs_xport_free(ocs->xport); 601 ocs->xport = NULL; 602 603 ocs->attached = FALSE; 604 605 } 606 607 return 0; 608 } 609 610 611 /** 612 * @brief Detach the driver from the given device 613 * 614 * If the driver is a loadable module, this routine gets called at unload 615 * time. This routine will stop the device and free any allocated resources. 616 * 617 * @param dev device abstraction 618 * 619 * @return 0 if the driver detaches from the device, ENXIO otherwise 620 */ 621 static int 622 ocs_pci_detach(device_t dev) 623 { 624 struct ocs_softc *ocs; 625 626 ocs = (struct ocs_softc *)device_get_softc(dev); 627 if (!ocs) { 628 device_printf(dev, "no driver context?!?\n"); 629 return -1; 630 } 631 632 if (ocs->config_tgt && ocs->enable_tgt) { 633 device_printf(dev, "can't detach with target mode enabled\n"); 634 return EBUSY; 635 } 636 637 ocs_device_detach(ocs); 638 639 /* 640 * Workaround for OCS SCSI Transport quirk. 641 * 642 * CTL requires that target mode is disabled prior to unloading the 643 * driver (ie ocs->enable_tgt = FALSE), but once the target is disabled, 644 * the transport will not call ocs_scsi_tgt_del_device() which deallocates 645 * CAM resources. The workaround is to explicitly make the call here. 646 */ 647 if (ocs->config_tgt) 648 ocs_scsi_tgt_del_device(ocs); 649 650 /* free strdup created buffer.*/ 651 free(ocs->hw_war_version, M_OCS); 652 653 ocs_device_lock_free(ocs); 654 655 ocs_debug_detach(ocs); 656 657 ocs_ramlog_free(ocs, ocs->ramlog); 658 659 ocs_release_bus(ocs); 660 661 return 0; 662 } 663 664 /** 665 * @brief Notify driver of system shutdown 666 * 667 * @param dev device abstraction 668 * 669 * @return 0 if the driver attaches to the device, ENXIO otherwise 670 */ 671 static int 672 ocs_pci_shutdown(device_t dev) 673 { 674 device_printf(dev, "%s\n", __func__); 675 return 0; 676 } 677 678 /** 679 * @brief Release bus resources allocated within the soft context 680 * 681 * @param ocs Pointer to the driver's context 682 * 683 * @return none 684 */ 685 static void 686 ocs_release_bus(struct ocs_softc *ocs) 687 { 688 689 if (NULL != ocs) { 690 uint32_t i; 691 692 ocs_intr_teardown(ocs); 693 694 if (ocs->irq) { 695 bus_release_resource(ocs->dev, SYS_RES_IRQ, 696 rman_get_rid(ocs->irq), ocs->irq); 697 698 if (ocs->n_vec) { 699 pci_release_msi(ocs->dev); 700 ocs->n_vec = 0; 701 } 702 703 ocs->irq = NULL; 704 } 705 706 bus_dma_tag_destroy(ocs->dmat); 707 708 for (i = 0; i < PCI_MAX_BAR; i++) { 709 if (ocs->reg[i].res) { 710 bus_release_resource(ocs->dev, SYS_RES_MEMORY, 711 ocs->reg[i].rid, 712 ocs->reg[i].res); 713 } 714 } 715 } 716 } 717 718 /** 719 * @brief Allocate and initialize interrupts 720 * 721 * @param ocs Pointer to the driver's context 722 * 723 * @return none 724 */ 725 static int32_t 726 ocs_intr_alloc(struct ocs_softc *ocs) 727 { 728 729 ocs->n_vec = 1; 730 if (pci_alloc_msix(ocs->dev, &ocs->n_vec)) { 731 device_printf(ocs->dev, "MSI-X allocation failed\n"); 732 if (pci_alloc_msi(ocs->dev, &ocs->n_vec)) { 733 device_printf(ocs->dev, "MSI allocation failed \n"); 734 ocs->irqid = 0; 735 ocs->n_vec = 0; 736 } else 737 ocs->irqid = 1; 738 } else { 739 ocs->irqid = 1; 740 } 741 742 ocs->irq = bus_alloc_resource_any(ocs->dev, SYS_RES_IRQ, &ocs->irqid, 743 RF_ACTIVE | RF_SHAREABLE); 744 if (NULL == ocs->irq) { 745 device_printf(ocs->dev, "could not allocate interrupt\n"); 746 return -1; 747 } 748 749 ocs->intr_ctx.vec = 0; 750 ocs->intr_ctx.softc = ocs; 751 snprintf(ocs->intr_ctx.name, sizeof(ocs->intr_ctx.name), 752 "%s_intr_%d", 753 device_get_nameunit(ocs->dev), 754 ocs->intr_ctx.vec); 755 756 return 0; 757 } 758 759 /** 760 * @brief Create and attach an interrupt handler 761 * 762 * @param ocs Pointer to the driver's context 763 * 764 * @return 0 on success, non-zero otherwise 765 */ 766 static int32_t 767 ocs_intr_setup(struct ocs_softc *ocs) 768 { 769 driver_filter_t *filter = NULL; 770 771 if (0 == ocs->n_vec) { 772 filter = ocs_pci_intx_filter; 773 } 774 775 if (bus_setup_intr(ocs->dev, ocs->irq, INTR_MPSAFE | INTR_TYPE_CAM, 776 filter, ocs_pci_intr, &ocs->intr_ctx, 777 &ocs->tag)) { 778 device_printf(ocs->dev, "could not initialize interrupt\n"); 779 return -1; 780 } 781 782 return 0; 783 } 784 785 786 /** 787 * @brief Detach an interrupt handler 788 * 789 * @param ocs Pointer to the driver's context 790 * 791 * @return 0 on success, non-zero otherwise 792 */ 793 static int32_t 794 ocs_intr_teardown(struct ocs_softc *ocs) 795 { 796 797 if (!ocs) { 798 printf("%s: bad driver context?!?\n", __func__); 799 return -1; 800 } 801 802 if (ocs->tag) { 803 bus_teardown_intr(ocs->dev, ocs->irq, ocs->tag); 804 ocs->tag = NULL; 805 } 806 807 return 0; 808 } 809 810 /** 811 * @brief PCI interrupt handler 812 * 813 * @param arg pointer to the driver's software context 814 * 815 * @return FILTER_HANDLED if interrupt is processed, FILTER_STRAY otherwise 816 */ 817 static int 818 ocs_pci_intx_filter(void *arg) 819 { 820 ocs_intr_ctx_t *intr = arg; 821 struct ocs_softc *ocs = NULL; 822 uint16_t val = 0; 823 824 if (NULL == intr) { 825 return FILTER_STRAY; 826 } 827 828 ocs = intr->softc; 829 #ifndef PCIM_STATUS_INTR 830 #define PCIM_STATUS_INTR 0x0008 831 #endif 832 val = pci_read_config(ocs->dev, PCIR_STATUS, 2); 833 if (0xffff == val) { 834 device_printf(ocs->dev, "%s: pci_read_config(PCIR_STATUS) failed\n", __func__); 835 return FILTER_STRAY; 836 } 837 if (0 == (val & PCIM_STATUS_INTR)) { 838 return FILTER_STRAY; 839 } 840 841 val = pci_read_config(ocs->dev, PCIR_COMMAND, 2); 842 val |= PCIM_CMD_INTxDIS; 843 pci_write_config(ocs->dev, PCIR_COMMAND, val, 2); 844 845 return FILTER_SCHEDULE_THREAD; 846 } 847 848 /** 849 * @brief interrupt handler 850 * 851 * @param context pointer to the interrupt context 852 */ 853 static void 854 ocs_pci_intr(void *context) 855 { 856 ocs_intr_ctx_t *intr = context; 857 struct ocs_softc *ocs = intr->softc; 858 859 mtx_lock(&ocs->sim_lock); 860 ocs_hw_process(&ocs->hw, intr->vec, OCS_OS_MAX_ISR_TIME_MSEC); 861 mtx_unlock(&ocs->sim_lock); 862 } 863 864 /** 865 * @brief Initialize DMA tag 866 * 867 * @param ocs the driver instance's software context 868 * 869 * @return 0 on success, non-zero otherwise 870 */ 871 static int32_t 872 ocs_init_dma_tag(struct ocs_softc *ocs) 873 { 874 uint32_t max_sgl = 0; 875 uint32_t max_sge = 0; 876 877 /* 878 * IOs can't use the parent DMA tag and must create their 879 * own, based primarily on a restricted number of DMA segments. 880 * This is more of a BSD requirement than a SLI Port requirement 881 */ 882 ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &max_sgl); 883 ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge); 884 885 if (bus_dma_tag_create(ocs->dmat, 886 1, /* byte alignment */ 887 0, /* no boundary restrictions */ 888 BUS_SPACE_MAXADDR, /* no minimum low address */ 889 BUS_SPACE_MAXADDR, /* no maximum high address */ 890 NULL, /* no filter function */ 891 NULL, /* or arguments */ 892 BUS_SPACE_MAXSIZE, /* max size covered by tag */ 893 max_sgl, /* segment count restrictions */ 894 max_sge, /* segment length restrictions */ 895 0, /* flags */ 896 NULL, /* no lock manipulation function */ 897 NULL, /* or arguments */ 898 &ocs->buf_dmat)) { 899 device_printf(ocs->dev, "%s: bad bus_dma_tag_create(buf_dmat)\n", __func__); 900 return -1; 901 } 902 return 0; 903 } 904 905 int32_t 906 ocs_get_property(const char *prop_name, char *buffer, uint32_t buffer_len) 907 { 908 return -1; 909 } 910 911 /** 912 * @brief return pointer to ocs structure given instance index 913 * 914 * A pointer to an ocs structure is returned given an instance index. 915 * 916 * @param index index to ocs_devices array 917 * 918 * @return ocs pointer 919 */ 920 921 ocs_t *ocs_get_instance(uint32_t index) 922 { 923 if (index < ARRAY_SIZE(ocs_devices)) { 924 return ocs_devices[index]; 925 } 926 return NULL; 927 } 928 929 /** 930 * @brief Return instance index of an opaque ocs structure 931 * 932 * Returns the ocs instance index 933 * 934 * @param os pointer to ocs instance 935 * 936 * @return pointer to ocs instance index 937 */ 938 uint32_t 939 ocs_instance(void *os) 940 { 941 ocs_t *ocs = os; 942 return ocs->instance_index; 943 } 944 945 static device_method_t ocs_methods[] = { 946 DEVMETHOD(device_probe, ocs_pci_probe), 947 DEVMETHOD(device_attach, ocs_pci_attach), 948 DEVMETHOD(device_detach, ocs_pci_detach), 949 DEVMETHOD(device_shutdown, ocs_pci_shutdown), 950 {0, 0} 951 }; 952 953 static driver_t ocs_driver = { 954 "ocs_fc", 955 ocs_methods, 956 sizeof(struct ocs_softc) 957 }; 958 959 static devclass_t ocs_devclass; 960 961 DRIVER_MODULE(ocs_fc, pci, ocs_driver, ocs_devclass, 0, 0); 962 MODULE_VERSION(ocs_fc, 1); 963 964