1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright 2014 Cisco Systems, Inc. All rights reserved. 3 4 #include <linux/module.h> 5 #include <linux/mempool.h> 6 #include <linux/string.h> 7 #include <linux/slab.h> 8 #include <linux/errno.h> 9 #include <linux/init.h> 10 #include <linux/pci.h> 11 #include <linux/skbuff.h> 12 #include <linux/interrupt.h> 13 #include <linux/spinlock.h> 14 #include <linux/workqueue.h> 15 #include <scsi/scsi_host.h> 16 #include <scsi/scsi_tcq.h> 17 18 #include "snic.h" 19 #include "snic_fwint.h" 20 21 #define PCI_DEVICE_ID_CISCO_SNIC 0x0046 22 23 /* Supported devices by snic module */ 24 static struct pci_device_id snic_id_table[] = { 25 {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) }, 26 { 0, } /* end of table */ 27 }; 28 29 unsigned int snic_log_level = 0x0; 30 module_param(snic_log_level, int, S_IRUGO|S_IWUSR); 31 MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels"); 32 33 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 34 unsigned int snic_trace_max_pages = 16; 35 module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR); 36 MODULE_PARM_DESC(snic_trace_max_pages, 37 "Total allocated memory pages for snic trace buffer"); 38 39 #endif 40 unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH; 41 module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR); 42 MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN"); 43 44 /* 45 * snic_slave_alloc : callback function to SCSI Mid Layer, called on 46 * scsi device initialization. 47 */ 48 static int 49 snic_slave_alloc(struct scsi_device *sdev) 50 { 51 struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev)); 52 53 if (!tgt || snic_tgt_chkready(tgt)) 54 return -ENXIO; 55 56 return 0; 57 } 58 59 /* 60 * snic_slave_configure : callback function to SCSI Mid Layer, called on 61 * scsi device initialization. 62 */ 63 static int 64 snic_slave_configure(struct scsi_device *sdev) 65 { 66 struct snic *snic = shost_priv(sdev->host); 67 u32 qdepth = 0, max_ios = 0; 68 int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ; 69 70 /* Set Queue Depth */ 71 max_ios = snic_max_qdepth; 72 qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH); 73 scsi_change_queue_depth(sdev, qdepth); 74 75 if (snic->fwinfo.io_tmo > 1) 76 tmo = snic->fwinfo.io_tmo * HZ; 77 78 /* FW requires extended timeouts */ 79 blk_queue_rq_timeout(sdev->request_queue, tmo); 80 81 return 0; 82 } 83 84 static int 85 snic_change_queue_depth(struct scsi_device *sdev, int qdepth) 86 { 87 struct snic *snic = shost_priv(sdev->host); 88 int qsz = 0; 89 90 qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH); 91 if (qsz < sdev->queue_depth) 92 atomic64_inc(&snic->s_stats.misc.qsz_rampdown); 93 else if (qsz > sdev->queue_depth) 94 atomic64_inc(&snic->s_stats.misc.qsz_rampup); 95 96 atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth); 97 98 scsi_change_queue_depth(sdev, qsz); 99 100 return sdev->queue_depth; 101 } 102 103 static const struct scsi_host_template snic_host_template = { 104 .module = THIS_MODULE, 105 .name = SNIC_DRV_NAME, 106 .queuecommand = snic_queuecommand, 107 .eh_abort_handler = snic_abort_cmd, 108 .eh_device_reset_handler = snic_device_reset, 109 .eh_host_reset_handler = snic_host_reset, 110 .slave_alloc = snic_slave_alloc, 111 .slave_configure = snic_slave_configure, 112 .change_queue_depth = snic_change_queue_depth, 113 .this_id = -1, 114 .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH, 115 .can_queue = SNIC_MAX_IO_REQ, 116 .sg_tablesize = SNIC_MAX_SG_DESC_CNT, 117 .max_sectors = 0x800, 118 .shost_groups = snic_host_groups, 119 .track_queue_depth = 1, 120 .cmd_size = sizeof(struct snic_internal_io_state), 121 .proc_name = "snic_scsi", 122 }; 123 124 /* 125 * snic_handle_link_event : Handles link events such as link up/down/error 126 */ 127 void 128 snic_handle_link_event(struct snic *snic) 129 { 130 unsigned long flags; 131 132 spin_lock_irqsave(&snic->snic_lock, flags); 133 if (snic->stop_link_events) { 134 spin_unlock_irqrestore(&snic->snic_lock, flags); 135 136 return; 137 } 138 spin_unlock_irqrestore(&snic->snic_lock, flags); 139 140 queue_work(snic_glob->event_q, &snic->link_work); 141 } /* end of snic_handle_link_event */ 142 143 /* 144 * snic_notify_set : sets notification area 145 * This notification area is to receive events from fw 146 * Note: snic supports only MSIX interrupts, in which we can just call 147 * svnic_dev_notify_set directly 148 */ 149 static int 150 snic_notify_set(struct snic *snic) 151 { 152 int ret = 0; 153 enum vnic_dev_intr_mode intr_mode; 154 155 intr_mode = svnic_dev_get_intr_mode(snic->vdev); 156 157 if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) { 158 ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY); 159 } else { 160 SNIC_HOST_ERR(snic->shost, 161 "Interrupt mode should be setup before devcmd notify set %d\n", 162 intr_mode); 163 ret = -1; 164 } 165 166 return ret; 167 } /* end of snic_notify_set */ 168 169 /* 170 * snic_dev_wait : polls vnic open status. 171 */ 172 static int 173 snic_dev_wait(struct vnic_dev *vdev, 174 int (*start)(struct vnic_dev *, int), 175 int (*finished)(struct vnic_dev *, int *), 176 int arg) 177 { 178 unsigned long time; 179 int ret, done; 180 int retry_cnt = 0; 181 182 ret = start(vdev, arg); 183 if (ret) 184 return ret; 185 186 /* 187 * Wait for func to complete...2 seconds max. 188 * 189 * Sometimes schedule_timeout_uninterruptible take long time 190 * to wakeup, which results skipping retry. The retry counter 191 * ensures to retry at least two times. 192 */ 193 time = jiffies + (HZ * 2); 194 do { 195 ret = finished(vdev, &done); 196 if (ret) 197 return ret; 198 199 if (done) 200 return 0; 201 schedule_timeout_uninterruptible(HZ/10); 202 ++retry_cnt; 203 } while (time_after(time, jiffies) || (retry_cnt < 3)); 204 205 return -ETIMEDOUT; 206 } /* end of snic_dev_wait */ 207 208 /* 209 * snic_cleanup: called by snic_remove 210 * Stops the snic device, masks all interrupts, Completed CQ entries are 211 * drained. Posted WQ/RQ/Copy-WQ entries are cleanup 212 */ 213 static int 214 snic_cleanup(struct snic *snic) 215 { 216 unsigned int i; 217 int ret; 218 219 svnic_dev_disable(snic->vdev); 220 for (i = 0; i < snic->intr_count; i++) 221 svnic_intr_mask(&snic->intr[i]); 222 223 for (i = 0; i < snic->wq_count; i++) { 224 ret = svnic_wq_disable(&snic->wq[i]); 225 if (ret) 226 return ret; 227 } 228 229 /* Clean up completed IOs */ 230 snic_fwcq_cmpl_handler(snic, -1); 231 232 snic_wq_cmpl_handler(snic, -1); 233 234 /* Clean up the IOs that have not completed */ 235 for (i = 0; i < snic->wq_count; i++) 236 svnic_wq_clean(&snic->wq[i], snic_free_wq_buf); 237 238 for (i = 0; i < snic->cq_count; i++) 239 svnic_cq_clean(&snic->cq[i]); 240 241 for (i = 0; i < snic->intr_count; i++) 242 svnic_intr_clean(&snic->intr[i]); 243 244 /* Cleanup snic specific requests */ 245 snic_free_all_untagged_reqs(snic); 246 247 /* Cleanup Pending SCSI commands */ 248 snic_shutdown_scsi_cleanup(snic); 249 250 for (i = 0; i < SNIC_REQ_MAX_CACHES; i++) 251 mempool_destroy(snic->req_pool[i]); 252 253 return 0; 254 } /* end of snic_cleanup */ 255 256 257 static void 258 snic_iounmap(struct snic *snic) 259 { 260 if (snic->bar0.vaddr) 261 iounmap(snic->bar0.vaddr); 262 } 263 264 /* 265 * snic_vdev_open_done : polls for svnic_dev_open cmd completion. 266 */ 267 static int 268 snic_vdev_open_done(struct vnic_dev *vdev, int *done) 269 { 270 struct snic *snic = svnic_dev_priv(vdev); 271 int ret; 272 int nretries = 5; 273 274 do { 275 ret = svnic_dev_open_done(vdev, done); 276 if (ret == 0) 277 break; 278 279 SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n"); 280 } while (nretries--); 281 282 return ret; 283 } /* end of snic_vdev_open_done */ 284 285 /* 286 * snic_add_host : registers scsi host with ML 287 */ 288 static int 289 snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev) 290 { 291 int ret = 0; 292 293 ret = scsi_add_host(shost, &pdev->dev); 294 if (ret) { 295 SNIC_HOST_ERR(shost, 296 "snic: scsi_add_host failed. %d\n", 297 ret); 298 299 return ret; 300 } 301 302 SNIC_BUG_ON(shost->work_q != NULL); 303 snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d", 304 shost->host_no); 305 shost->work_q = create_singlethread_workqueue(shost->work_q_name); 306 if (!shost->work_q) { 307 SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n"); 308 309 ret = -ENOMEM; 310 } 311 312 return ret; 313 } /* end of snic_add_host */ 314 315 static void 316 snic_del_host(struct Scsi_Host *shost) 317 { 318 if (!shost->work_q) 319 return; 320 321 destroy_workqueue(shost->work_q); 322 shost->work_q = NULL; 323 scsi_remove_host(shost); 324 } 325 326 int 327 snic_get_state(struct snic *snic) 328 { 329 return atomic_read(&snic->state); 330 } 331 332 void 333 snic_set_state(struct snic *snic, enum snic_state state) 334 { 335 SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n", 336 snic_state_to_str(snic_get_state(snic)), 337 snic_state_to_str(state)); 338 339 atomic_set(&snic->state, state); 340 } 341 342 /* 343 * snic_probe : Initialize the snic interface. 344 */ 345 static int 346 snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 347 { 348 struct Scsi_Host *shost; 349 struct snic *snic; 350 mempool_t *pool; 351 unsigned long flags; 352 u32 max_ios = 0; 353 int ret, i; 354 355 /* Device Information */ 356 SNIC_INFO("snic device %4x:%4x:%4x:%4x: ", 357 pdev->vendor, pdev->device, pdev->subsystem_vendor, 358 pdev->subsystem_device); 359 360 SNIC_INFO("snic device bus %x: slot %x: fn %x\n", 361 pdev->bus->number, PCI_SLOT(pdev->devfn), 362 PCI_FUNC(pdev->devfn)); 363 364 /* 365 * Allocate SCSI Host and setup association between host, and snic 366 */ 367 shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic)); 368 if (!shost) { 369 SNIC_ERR("Unable to alloc scsi_host\n"); 370 ret = -ENOMEM; 371 372 goto prob_end; 373 } 374 snic = shost_priv(shost); 375 snic->shost = shost; 376 377 snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME, 378 shost->host_no); 379 380 SNIC_HOST_INFO(shost, 381 "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n", 382 shost->host_no, snic, shost, pdev->bus->number, 383 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 384 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 385 /* Per snic debugfs init */ 386 snic_stats_debugfs_init(snic); 387 #endif 388 389 /* Setup PCI Resources */ 390 pci_set_drvdata(pdev, snic); 391 snic->pdev = pdev; 392 393 ret = pci_enable_device(pdev); 394 if (ret) { 395 SNIC_HOST_ERR(shost, 396 "Cannot enable PCI Resources, aborting : %d\n", 397 ret); 398 399 goto err_free_snic; 400 } 401 402 ret = pci_request_regions(pdev, SNIC_DRV_NAME); 403 if (ret) { 404 SNIC_HOST_ERR(shost, 405 "Cannot obtain PCI Resources, aborting : %d\n", 406 ret); 407 408 goto err_pci_disable; 409 } 410 411 pci_set_master(pdev); 412 413 /* 414 * Query PCI Controller on system for DMA addressing 415 * limitation for the device. Try 43-bit first, and 416 * fail to 32-bit. 417 */ 418 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43)); 419 if (ret) { 420 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 421 if (ret) { 422 SNIC_HOST_ERR(shost, 423 "No Usable DMA Configuration, aborting %d\n", 424 ret); 425 goto err_rel_regions; 426 } 427 } 428 429 /* Map vNIC resources from BAR0 */ 430 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 431 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n"); 432 433 ret = -ENODEV; 434 goto err_rel_regions; 435 } 436 437 snic->bar0.vaddr = pci_iomap(pdev, 0, 0); 438 if (!snic->bar0.vaddr) { 439 SNIC_HOST_ERR(shost, 440 "Cannot memory map BAR0 res hdr aborting.\n"); 441 442 ret = -ENODEV; 443 goto err_rel_regions; 444 } 445 446 snic->bar0.bus_addr = pci_resource_start(pdev, 0); 447 snic->bar0.len = pci_resource_len(pdev, 0); 448 SNIC_BUG_ON(snic->bar0.bus_addr == 0); 449 450 /* Devcmd2 Resource Allocation and Initialization */ 451 snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1); 452 if (!snic->vdev) { 453 SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n"); 454 455 ret = -ENODEV; 456 goto err_iounmap; 457 } 458 459 ret = svnic_dev_cmd_init(snic->vdev, 0); 460 if (ret) { 461 SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret); 462 463 goto err_vnic_unreg; 464 } 465 466 ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0); 467 if (ret) { 468 SNIC_HOST_ERR(shost, 469 "vNIC dev open failed, aborting. %d\n", 470 ret); 471 472 goto err_vnic_unreg; 473 } 474 475 ret = svnic_dev_init(snic->vdev, 0); 476 if (ret) { 477 SNIC_HOST_ERR(shost, 478 "vNIC dev init failed. aborting. %d\n", 479 ret); 480 481 goto err_dev_close; 482 } 483 484 /* Get vNIC information */ 485 ret = snic_get_vnic_config(snic); 486 if (ret) { 487 SNIC_HOST_ERR(shost, 488 "Get vNIC configuration failed, aborting. %d\n", 489 ret); 490 491 goto err_dev_close; 492 } 493 494 /* Configure Maximum Outstanding IO reqs */ 495 max_ios = snic->config.io_throttle_count; 496 if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD) 497 shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ, 498 max_t(u32, SNIC_MIN_IO_REQ, max_ios)); 499 500 snic->max_tag_id = shost->can_queue; 501 502 shost->max_lun = snic->config.luns_per_tgt; 503 shost->max_id = SNIC_MAX_TARGET; 504 505 shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/ 506 507 snic_get_res_counts(snic); 508 509 /* 510 * Assumption: Only MSIx is supported 511 */ 512 ret = snic_set_intr_mode(snic); 513 if (ret) { 514 SNIC_HOST_ERR(shost, 515 "Failed to set intr mode aborting. %d\n", 516 ret); 517 518 goto err_dev_close; 519 } 520 521 ret = snic_alloc_vnic_res(snic); 522 if (ret) { 523 SNIC_HOST_ERR(shost, 524 "Failed to alloc vNIC resources aborting. %d\n", 525 ret); 526 527 goto err_clear_intr; 528 } 529 530 /* Initialize specific lists */ 531 INIT_LIST_HEAD(&snic->list); 532 533 /* 534 * spl_cmd_list for maintaining snic specific cmds 535 * such as EXCH_VER_REQ, REPORT_TARGETS etc 536 */ 537 INIT_LIST_HEAD(&snic->spl_cmd_list); 538 spin_lock_init(&snic->spl_cmd_lock); 539 540 /* initialize all snic locks */ 541 spin_lock_init(&snic->snic_lock); 542 543 for (i = 0; i < SNIC_WQ_MAX; i++) 544 spin_lock_init(&snic->wq_lock[i]); 545 546 for (i = 0; i < SNIC_IO_LOCKS; i++) 547 spin_lock_init(&snic->io_req_lock[i]); 548 549 pool = mempool_create_slab_pool(2, 550 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); 551 if (!pool) { 552 SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n"); 553 554 ret = -ENOMEM; 555 goto err_free_res; 556 } 557 558 snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool; 559 560 pool = mempool_create_slab_pool(2, 561 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); 562 if (!pool) { 563 SNIC_HOST_ERR(shost, "max sgl pool creation failed\n"); 564 565 ret = -ENOMEM; 566 goto err_free_dflt_sgl_pool; 567 } 568 569 snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool; 570 571 pool = mempool_create_slab_pool(2, 572 snic_glob->req_cache[SNIC_REQ_TM_CACHE]); 573 if (!pool) { 574 SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n"); 575 576 ret = -ENOMEM; 577 goto err_free_max_sgl_pool; 578 } 579 580 snic->req_pool[SNIC_REQ_TM_CACHE] = pool; 581 582 /* Initialize snic state */ 583 atomic_set(&snic->state, SNIC_INIT); 584 585 atomic_set(&snic->ios_inflight, 0); 586 587 /* Setup notification buffer area */ 588 ret = snic_notify_set(snic); 589 if (ret) { 590 SNIC_HOST_ERR(shost, 591 "Failed to alloc notify buffer aborting. %d\n", 592 ret); 593 594 goto err_free_tmreq_pool; 595 } 596 597 spin_lock_irqsave(&snic_glob->snic_list_lock, flags); 598 list_add_tail(&snic->list, &snic_glob->snic_list); 599 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); 600 601 snic_disc_init(&snic->disc); 602 INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc); 603 INIT_WORK(&snic->disc_work, snic_handle_disc); 604 INIT_WORK(&snic->link_work, snic_handle_link); 605 606 /* Enable all queues */ 607 for (i = 0; i < snic->wq_count; i++) 608 svnic_wq_enable(&snic->wq[i]); 609 610 ret = svnic_dev_enable_wait(snic->vdev); 611 if (ret) { 612 SNIC_HOST_ERR(shost, 613 "vNIC dev enable failed w/ error %d\n", 614 ret); 615 616 goto err_vdev_enable; 617 } 618 619 ret = snic_request_intr(snic); 620 if (ret) { 621 SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret); 622 623 goto err_req_intr; 624 } 625 626 for (i = 0; i < snic->intr_count; i++) 627 svnic_intr_unmask(&snic->intr[i]); 628 629 /* Get snic params */ 630 ret = snic_get_conf(snic); 631 if (ret) { 632 SNIC_HOST_ERR(shost, 633 "Failed to get snic io config from FW w err %d\n", 634 ret); 635 636 goto err_get_conf; 637 } 638 639 /* 640 * Initialization done with PCI system, hardware, firmware. 641 * Add shost to SCSI 642 */ 643 ret = snic_add_host(shost, pdev); 644 if (ret) { 645 SNIC_HOST_ERR(shost, 646 "Adding scsi host Failed ... exiting. %d\n", 647 ret); 648 649 goto err_get_conf; 650 } 651 652 snic_set_state(snic, SNIC_ONLINE); 653 654 ret = snic_disc_start(snic); 655 if (ret) { 656 SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n", 657 ret); 658 659 goto err_get_conf; 660 } 661 662 SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n"); 663 664 return 0; 665 666 err_get_conf: 667 snic_free_all_untagged_reqs(snic); 668 669 for (i = 0; i < snic->intr_count; i++) 670 svnic_intr_mask(&snic->intr[i]); 671 672 snic_free_intr(snic); 673 674 err_req_intr: 675 svnic_dev_disable(snic->vdev); 676 677 err_vdev_enable: 678 svnic_dev_notify_unset(snic->vdev); 679 680 for (i = 0; i < snic->wq_count; i++) { 681 int rc = 0; 682 683 rc = svnic_wq_disable(&snic->wq[i]); 684 if (rc) { 685 SNIC_HOST_ERR(shost, 686 "WQ Disable Failed w/ err = %d\n", rc); 687 688 break; 689 } 690 } 691 snic_del_host(snic->shost); 692 693 err_free_tmreq_pool: 694 mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]); 695 696 err_free_max_sgl_pool: 697 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]); 698 699 err_free_dflt_sgl_pool: 700 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]); 701 702 err_free_res: 703 snic_free_vnic_res(snic); 704 705 err_clear_intr: 706 snic_clear_intr_mode(snic); 707 708 err_dev_close: 709 svnic_dev_close(snic->vdev); 710 711 err_vnic_unreg: 712 svnic_dev_unregister(snic->vdev); 713 714 err_iounmap: 715 snic_iounmap(snic); 716 717 err_rel_regions: 718 pci_release_regions(pdev); 719 720 err_pci_disable: 721 pci_disable_device(pdev); 722 723 err_free_snic: 724 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 725 snic_stats_debugfs_remove(snic); 726 #endif 727 scsi_host_put(shost); 728 pci_set_drvdata(pdev, NULL); 729 730 prob_end: 731 SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n", 732 pdev->bus->number, PCI_SLOT(pdev->devfn), 733 PCI_FUNC(pdev->devfn)); 734 735 return ret; 736 } /* end of snic_probe */ 737 738 739 /* 740 * snic_remove : invoked on unbinding the interface to cleanup the 741 * resources allocated in snic_probe on initialization. 742 */ 743 static void 744 snic_remove(struct pci_dev *pdev) 745 { 746 struct snic *snic = pci_get_drvdata(pdev); 747 unsigned long flags; 748 749 if (!snic) { 750 SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n", 751 pdev->bus->number, PCI_SLOT(pdev->devfn), 752 PCI_FUNC(pdev->devfn)); 753 754 return; 755 } 756 757 /* 758 * Mark state so that the workqueue thread stops forwarding 759 * received frames and link events. ISR and other threads 760 * that can queue work items will also stop creating work 761 * items on the snic workqueue 762 */ 763 snic_set_state(snic, SNIC_OFFLINE); 764 spin_lock_irqsave(&snic->snic_lock, flags); 765 snic->stop_link_events = 1; 766 spin_unlock_irqrestore(&snic->snic_lock, flags); 767 768 flush_workqueue(snic_glob->event_q); 769 snic_disc_term(snic); 770 771 spin_lock_irqsave(&snic->snic_lock, flags); 772 snic->in_remove = 1; 773 spin_unlock_irqrestore(&snic->snic_lock, flags); 774 775 /* 776 * This stops the snic device, masks all interrupts, Completed 777 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are 778 * cleanup 779 */ 780 snic_cleanup(snic); 781 782 spin_lock_irqsave(&snic_glob->snic_list_lock, flags); 783 list_del(&snic->list); 784 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); 785 786 snic_tgt_del_all(snic); 787 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 788 snic_stats_debugfs_remove(snic); 789 #endif 790 snic_del_host(snic->shost); 791 792 svnic_dev_notify_unset(snic->vdev); 793 snic_free_intr(snic); 794 snic_free_vnic_res(snic); 795 snic_clear_intr_mode(snic); 796 svnic_dev_close(snic->vdev); 797 svnic_dev_unregister(snic->vdev); 798 snic_iounmap(snic); 799 pci_release_regions(pdev); 800 pci_disable_device(pdev); 801 pci_set_drvdata(pdev, NULL); 802 803 /* this frees Scsi_Host and snic memory (continuous chunk) */ 804 scsi_host_put(snic->shost); 805 } /* end of snic_remove */ 806 807 808 struct snic_global *snic_glob; 809 810 /* 811 * snic_global_data_init: Initialize SNIC Global Data 812 * Notes: All the global lists, variables should be part of global data 813 * this helps in debugging. 814 */ 815 static int 816 snic_global_data_init(void) 817 { 818 int ret = 0; 819 struct kmem_cache *cachep; 820 ssize_t len = 0; 821 822 snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL); 823 824 if (!snic_glob) { 825 SNIC_ERR("Failed to allocate Global Context.\n"); 826 827 ret = -ENOMEM; 828 goto gdi_end; 829 } 830 831 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 832 /* Debugfs related Initialization */ 833 /* Create debugfs entries for snic */ 834 snic_debugfs_init(); 835 836 /* Trace related Initialization */ 837 /* Allocate memory for trace buffer */ 838 ret = snic_trc_init(); 839 if (ret < 0) { 840 SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n"); 841 snic_trc_free(); 842 /* continue even if it fails */ 843 } 844 845 #endif 846 INIT_LIST_HEAD(&snic_glob->snic_list); 847 spin_lock_init(&snic_glob->snic_list_lock); 848 849 /* Create a cache for allocation of snic_host_req+default size ESGLs */ 850 len = sizeof(struct snic_req_info); 851 len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl); 852 cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN, 853 SLAB_HWCACHE_ALIGN, NULL); 854 if (!cachep) { 855 SNIC_ERR("Failed to create snic default sgl slab\n"); 856 ret = -ENOMEM; 857 858 goto err_dflt_req_slab; 859 } 860 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep; 861 862 /* Create a cache for allocation of max size Extended SGLs */ 863 len = sizeof(struct snic_req_info); 864 len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl); 865 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, 866 SLAB_HWCACHE_ALIGN, NULL); 867 if (!cachep) { 868 SNIC_ERR("Failed to create snic max sgl slab\n"); 869 ret = -ENOMEM; 870 871 goto err_max_req_slab; 872 } 873 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep; 874 875 len = sizeof(struct snic_host_req); 876 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, 877 SLAB_HWCACHE_ALIGN, NULL); 878 if (!cachep) { 879 SNIC_ERR("Failed to create snic tm req slab\n"); 880 ret = -ENOMEM; 881 882 goto err_tmreq_slab; 883 } 884 snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep; 885 886 /* snic_event queue */ 887 snic_glob->event_q = create_singlethread_workqueue("snic_event_wq"); 888 if (!snic_glob->event_q) { 889 SNIC_ERR("snic event queue create failed\n"); 890 ret = -ENOMEM; 891 892 goto err_eventq; 893 } 894 895 return ret; 896 897 err_eventq: 898 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); 899 900 err_tmreq_slab: 901 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); 902 903 err_max_req_slab: 904 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); 905 906 err_dflt_req_slab: 907 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 908 snic_trc_free(); 909 snic_debugfs_term(); 910 #endif 911 kfree(snic_glob); 912 snic_glob = NULL; 913 914 gdi_end: 915 return ret; 916 } /* end of snic_glob_init */ 917 918 /* 919 * snic_global_data_cleanup : Frees SNIC Global Data 920 */ 921 static void 922 snic_global_data_cleanup(void) 923 { 924 SNIC_BUG_ON(snic_glob == NULL); 925 926 destroy_workqueue(snic_glob->event_q); 927 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); 928 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); 929 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); 930 931 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 932 /* Freeing Trace Resources */ 933 snic_trc_free(); 934 935 /* Freeing Debugfs Resources */ 936 snic_debugfs_term(); 937 #endif 938 kfree(snic_glob); 939 snic_glob = NULL; 940 } /* end of snic_glob_cleanup */ 941 942 static struct pci_driver snic_driver = { 943 .name = SNIC_DRV_NAME, 944 .id_table = snic_id_table, 945 .probe = snic_probe, 946 .remove = snic_remove, 947 }; 948 949 static int __init 950 snic_init_module(void) 951 { 952 int ret = 0; 953 954 #ifndef __x86_64__ 955 SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n"); 956 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 957 #endif 958 959 SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION); 960 961 ret = snic_global_data_init(); 962 if (ret) { 963 SNIC_ERR("Failed to Initialize Global Data.\n"); 964 965 return ret; 966 } 967 968 ret = pci_register_driver(&snic_driver); 969 if (ret < 0) { 970 SNIC_ERR("PCI driver register error\n"); 971 972 goto err_pci_reg; 973 } 974 975 return ret; 976 977 err_pci_reg: 978 snic_global_data_cleanup(); 979 980 return ret; 981 } 982 983 static void __exit 984 snic_cleanup_module(void) 985 { 986 pci_unregister_driver(&snic_driver); 987 snic_global_data_cleanup(); 988 } 989 990 module_init(snic_init_module); 991 module_exit(snic_cleanup_module); 992 993 MODULE_LICENSE("GPL v2"); 994 MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION); 995 MODULE_VERSION(SNIC_DRV_VERSION); 996 MODULE_DEVICE_TABLE(pci, snic_id_table); 997 MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, " 998 "Sesidhar Baddela <sebaddel@cisco.com>"); 999