1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2008 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/moduleparam.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mutex.h> 14 15 #include <scsi/scsi_tcq.h> 16 #include <scsi/scsicam.h> 17 #include <scsi/scsi_transport.h> 18 #include <scsi/scsi_transport_fc.h> 19 20 /* 21 * Driver version 22 */ 23 char qla2x00_version_str[40]; 24 25 /* 26 * SRB allocation cache 27 */ 28 static struct kmem_cache *srb_cachep; 29 30 int ql2xlogintimeout = 20; 31 module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); 32 MODULE_PARM_DESC(ql2xlogintimeout, 33 "Login timeout value in seconds."); 34 35 int qlport_down_retry; 36 module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR); 37 MODULE_PARM_DESC(qlport_down_retry, 38 "Maximum number of command retries to a port that returns " 39 "a PORT-DOWN status."); 40 41 int ql2xplogiabsentdevice; 42 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 43 MODULE_PARM_DESC(ql2xplogiabsentdevice, 44 "Option to enable PLOGI to devices that are not present after " 45 "a Fabric scan. This is needed for several broken switches. " 46 "Default is 0 - no PLOGI. 1 - perfom PLOGI."); 47 48 int ql2xloginretrycount = 0; 49 module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR); 50 MODULE_PARM_DESC(ql2xloginretrycount, 51 "Specify an alternate value for the NVRAM login retry count."); 52 53 int ql2xallocfwdump = 1; 54 module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR); 55 MODULE_PARM_DESC(ql2xallocfwdump, 56 "Option to enable allocation of memory for a firmware dump " 57 "during HBA initialization. Memory allocation requirements " 58 "vary by ISP type. Default is 1 - allocate memory."); 59 60 int ql2xextended_error_logging; 61 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 62 MODULE_PARM_DESC(ql2xextended_error_logging, 63 "Option to enable extended error logging, " 64 "Default is 0 - no logging. 1 - log errors."); 65 66 static void qla2x00_free_device(scsi_qla_host_t *); 67 68 static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); 69 70 int ql2xfdmienable=1; 71 module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); 72 MODULE_PARM_DESC(ql2xfdmienable, 73 "Enables FDMI registratons " 74 "Default is 0 - no FDMI. 1 - perfom FDMI."); 75 76 #define MAX_Q_DEPTH 32 77 static int ql2xmaxqdepth = MAX_Q_DEPTH; 78 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 79 MODULE_PARM_DESC(ql2xmaxqdepth, 80 "Maximum queue depth to report for target devices."); 81 82 int ql2xqfullrampup = 120; 83 module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); 84 MODULE_PARM_DESC(ql2xqfullrampup, 85 "Number of seconds to wait to begin to ramp-up the queue " 86 "depth for a device after a queue-full condition has been " 87 "detected. Default is 120 seconds."); 88 89 int ql2xiidmaenable=1; 90 module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); 91 MODULE_PARM_DESC(ql2xiidmaenable, 92 "Enables iIDMA settings " 93 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 94 95 96 /* 97 * SCSI host template entry points 98 */ 99 static int qla2xxx_slave_configure(struct scsi_device * device); 100 static int qla2xxx_slave_alloc(struct scsi_device *); 101 static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); 102 static void qla2xxx_scan_start(struct Scsi_Host *); 103 static void qla2xxx_slave_destroy(struct scsi_device *); 104 static int qla2x00_queuecommand(struct scsi_cmnd *cmd, 105 void (*fn)(struct scsi_cmnd *)); 106 static int qla24xx_queuecommand(struct scsi_cmnd *cmd, 107 void (*fn)(struct scsi_cmnd *)); 108 static int qla2xxx_eh_abort(struct scsi_cmnd *); 109 static int qla2xxx_eh_device_reset(struct scsi_cmnd *); 110 static int qla2xxx_eh_target_reset(struct scsi_cmnd *); 111 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 112 static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 113 114 static int qla2x00_change_queue_depth(struct scsi_device *, int); 115 static int qla2x00_change_queue_type(struct scsi_device *, int); 116 117 static struct scsi_host_template qla2x00_driver_template = { 118 .module = THIS_MODULE, 119 .name = QLA2XXX_DRIVER_NAME, 120 .queuecommand = qla2x00_queuecommand, 121 122 .eh_abort_handler = qla2xxx_eh_abort, 123 .eh_device_reset_handler = qla2xxx_eh_device_reset, 124 .eh_target_reset_handler = qla2xxx_eh_target_reset, 125 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 126 .eh_host_reset_handler = qla2xxx_eh_host_reset, 127 128 .slave_configure = qla2xxx_slave_configure, 129 130 .slave_alloc = qla2xxx_slave_alloc, 131 .slave_destroy = qla2xxx_slave_destroy, 132 .scan_finished = qla2xxx_scan_finished, 133 .scan_start = qla2xxx_scan_start, 134 .change_queue_depth = qla2x00_change_queue_depth, 135 .change_queue_type = qla2x00_change_queue_type, 136 .this_id = -1, 137 .cmd_per_lun = 3, 138 .use_clustering = ENABLE_CLUSTERING, 139 .sg_tablesize = SG_ALL, 140 141 /* 142 * The RISC allows for each command to transfer (2^32-1) bytes of data, 143 * which equates to 0x800000 sectors. 144 */ 145 .max_sectors = 0xFFFF, 146 .shost_attrs = qla2x00_host_attrs, 147 }; 148 149 struct scsi_host_template qla24xx_driver_template = { 150 .module = THIS_MODULE, 151 .name = QLA2XXX_DRIVER_NAME, 152 .queuecommand = qla24xx_queuecommand, 153 154 .eh_abort_handler = qla2xxx_eh_abort, 155 .eh_device_reset_handler = qla2xxx_eh_device_reset, 156 .eh_target_reset_handler = qla2xxx_eh_target_reset, 157 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 158 .eh_host_reset_handler = qla2xxx_eh_host_reset, 159 160 .slave_configure = qla2xxx_slave_configure, 161 162 .slave_alloc = qla2xxx_slave_alloc, 163 .slave_destroy = qla2xxx_slave_destroy, 164 .scan_finished = qla2xxx_scan_finished, 165 .scan_start = qla2xxx_scan_start, 166 .change_queue_depth = qla2x00_change_queue_depth, 167 .change_queue_type = qla2x00_change_queue_type, 168 .this_id = -1, 169 .cmd_per_lun = 3, 170 .use_clustering = ENABLE_CLUSTERING, 171 .sg_tablesize = SG_ALL, 172 173 .max_sectors = 0xFFFF, 174 .shost_attrs = qla2x00_host_attrs, 175 }; 176 177 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 178 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 179 180 /* TODO Convert to inlines 181 * 182 * Timer routines 183 */ 184 185 __inline__ void 186 qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval) 187 { 188 init_timer(&ha->timer); 189 ha->timer.expires = jiffies + interval * HZ; 190 ha->timer.data = (unsigned long)ha; 191 ha->timer.function = (void (*)(unsigned long))func; 192 add_timer(&ha->timer); 193 ha->timer_active = 1; 194 } 195 196 static inline void 197 qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval) 198 { 199 mod_timer(&ha->timer, jiffies + interval * HZ); 200 } 201 202 static __inline__ void 203 qla2x00_stop_timer(scsi_qla_host_t *ha) 204 { 205 del_timer_sync(&ha->timer); 206 ha->timer_active = 0; 207 } 208 209 static int qla2x00_do_dpc(void *data); 210 211 static void qla2x00_rst_aen(scsi_qla_host_t *); 212 213 static int qla2x00_mem_alloc(scsi_qla_host_t *); 214 static void qla2x00_mem_free(scsi_qla_host_t *ha); 215 static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 216 217 /* -------------------------------------------------------------------------- */ 218 219 static char * 220 qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str) 221 { 222 static char *pci_bus_modes[] = { 223 "33", "66", "100", "133", 224 }; 225 uint16_t pci_bus; 226 227 strcpy(str, "PCI"); 228 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 229 if (pci_bus) { 230 strcat(str, "-X ("); 231 strcat(str, pci_bus_modes[pci_bus]); 232 } else { 233 pci_bus = (ha->pci_attr & BIT_8) >> 8; 234 strcat(str, " ("); 235 strcat(str, pci_bus_modes[pci_bus]); 236 } 237 strcat(str, " MHz)"); 238 239 return (str); 240 } 241 242 static char * 243 qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str) 244 { 245 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 246 uint32_t pci_bus; 247 int pcie_reg; 248 249 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 250 if (pcie_reg) { 251 char lwstr[6]; 252 uint16_t pcie_lstat, lspeed, lwidth; 253 254 pcie_reg += 0x12; 255 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); 256 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); 257 lwidth = (pcie_lstat & 258 (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4; 259 260 strcpy(str, "PCIe ("); 261 if (lspeed == 1) 262 strcat(str, "2.5GT/s "); 263 else if (lspeed == 2) 264 strcat(str, "5.0GT/s "); 265 else 266 strcat(str, "<unknown> "); 267 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); 268 strcat(str, lwstr); 269 270 return str; 271 } 272 273 strcpy(str, "PCI"); 274 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 275 if (pci_bus == 0 || pci_bus == 8) { 276 strcat(str, " ("); 277 strcat(str, pci_bus_modes[pci_bus >> 3]); 278 } else { 279 strcat(str, "-X "); 280 if (pci_bus & BIT_2) 281 strcat(str, "Mode 2"); 282 else 283 strcat(str, "Mode 1"); 284 strcat(str, " ("); 285 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]); 286 } 287 strcat(str, " MHz)"); 288 289 return str; 290 } 291 292 static char * 293 qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) 294 { 295 char un_str[10]; 296 297 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 298 ha->fw_minor_version, 299 ha->fw_subminor_version); 300 301 if (ha->fw_attributes & BIT_9) { 302 strcat(str, "FLX"); 303 return (str); 304 } 305 306 switch (ha->fw_attributes & 0xFF) { 307 case 0x7: 308 strcat(str, "EF"); 309 break; 310 case 0x17: 311 strcat(str, "TP"); 312 break; 313 case 0x37: 314 strcat(str, "IP"); 315 break; 316 case 0x77: 317 strcat(str, "VI"); 318 break; 319 default: 320 sprintf(un_str, "(%x)", ha->fw_attributes); 321 strcat(str, un_str); 322 break; 323 } 324 if (ha->fw_attributes & 0x100) 325 strcat(str, "X"); 326 327 return (str); 328 } 329 330 static char * 331 qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) 332 { 333 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 334 ha->fw_minor_version, 335 ha->fw_subminor_version); 336 337 if (ha->fw_attributes & BIT_0) 338 strcat(str, "[Class 2] "); 339 if (ha->fw_attributes & BIT_1) 340 strcat(str, "[IP] "); 341 if (ha->fw_attributes & BIT_2) 342 strcat(str, "[Multi-ID] "); 343 if (ha->fw_attributes & BIT_3) 344 strcat(str, "[SB-2] "); 345 if (ha->fw_attributes & BIT_4) 346 strcat(str, "[T10 CRC] "); 347 if (ha->fw_attributes & BIT_5) 348 strcat(str, "[VI] "); 349 if (ha->fw_attributes & BIT_10) 350 strcat(str, "[84XX] "); 351 if (ha->fw_attributes & BIT_13) 352 strcat(str, "[Experimental]"); 353 return str; 354 } 355 356 static inline srb_t * 357 qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport, 358 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 359 { 360 srb_t *sp; 361 362 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 363 if (!sp) 364 return sp; 365 366 sp->ha = ha; 367 sp->fcport = fcport; 368 sp->cmd = cmd; 369 sp->flags = 0; 370 CMD_SP(cmd) = (void *)sp; 371 cmd->scsi_done = done; 372 373 return sp; 374 } 375 376 static int 377 qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 378 { 379 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 380 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 381 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 382 srb_t *sp; 383 int rval; 384 385 if (unlikely(pci_channel_offline(ha->pdev))) { 386 cmd->result = DID_REQUEUE << 16; 387 goto qc_fail_command; 388 } 389 390 rval = fc_remote_port_chkready(rport); 391 if (rval) { 392 cmd->result = rval; 393 goto qc_fail_command; 394 } 395 396 /* Close window on fcport/rport state-transitioning. */ 397 if (fcport->drport) 398 goto qc_target_busy; 399 400 if (atomic_read(&fcport->state) != FCS_ONLINE) { 401 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 402 atomic_read(&ha->loop_state) == LOOP_DEAD) { 403 cmd->result = DID_NO_CONNECT << 16; 404 goto qc_fail_command; 405 } 406 goto qc_target_busy; 407 } 408 409 spin_unlock_irq(ha->host->host_lock); 410 411 sp = qla2x00_get_new_sp(ha, fcport, cmd, done); 412 if (!sp) 413 goto qc_host_busy_lock; 414 415 rval = qla2x00_start_scsi(sp); 416 if (rval != QLA_SUCCESS) 417 goto qc_host_busy_free_sp; 418 419 spin_lock_irq(ha->host->host_lock); 420 421 return 0; 422 423 qc_host_busy_free_sp: 424 qla2x00_sp_free_dma(ha, sp); 425 mempool_free(sp, ha->srb_mempool); 426 427 qc_host_busy_lock: 428 spin_lock_irq(ha->host->host_lock); 429 return SCSI_MLQUEUE_HOST_BUSY; 430 431 qc_target_busy: 432 return SCSI_MLQUEUE_TARGET_BUSY; 433 434 qc_fail_command: 435 done(cmd); 436 437 return 0; 438 } 439 440 441 static int 442 qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 443 { 444 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 445 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 446 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 447 srb_t *sp; 448 int rval; 449 scsi_qla_host_t *pha = to_qla_parent(ha); 450 451 if (unlikely(pci_channel_offline(pha->pdev))) { 452 cmd->result = DID_REQUEUE << 16; 453 goto qc24_fail_command; 454 } 455 456 rval = fc_remote_port_chkready(rport); 457 if (rval) { 458 cmd->result = rval; 459 goto qc24_fail_command; 460 } 461 462 /* Close window on fcport/rport state-transitioning. */ 463 if (fcport->drport) 464 goto qc24_target_busy; 465 466 if (atomic_read(&fcport->state) != FCS_ONLINE) { 467 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 468 atomic_read(&pha->loop_state) == LOOP_DEAD) { 469 cmd->result = DID_NO_CONNECT << 16; 470 goto qc24_fail_command; 471 } 472 goto qc24_target_busy; 473 } 474 475 spin_unlock_irq(ha->host->host_lock); 476 477 sp = qla2x00_get_new_sp(pha, fcport, cmd, done); 478 if (!sp) 479 goto qc24_host_busy_lock; 480 481 rval = qla24xx_start_scsi(sp); 482 if (rval != QLA_SUCCESS) 483 goto qc24_host_busy_free_sp; 484 485 spin_lock_irq(ha->host->host_lock); 486 487 return 0; 488 489 qc24_host_busy_free_sp: 490 qla2x00_sp_free_dma(pha, sp); 491 mempool_free(sp, pha->srb_mempool); 492 493 qc24_host_busy_lock: 494 spin_lock_irq(ha->host->host_lock); 495 return SCSI_MLQUEUE_HOST_BUSY; 496 497 qc24_target_busy: 498 return SCSI_MLQUEUE_TARGET_BUSY; 499 500 qc24_fail_command: 501 done(cmd); 502 503 return 0; 504 } 505 506 507 /* 508 * qla2x00_eh_wait_on_command 509 * Waits for the command to be returned by the Firmware for some 510 * max time. 511 * 512 * Input: 513 * ha = actual ha whose done queue will contain the command 514 * returned by firmware. 515 * cmd = Scsi Command to wait on. 516 * flag = Abort/Reset(Bus or Device Reset) 517 * 518 * Return: 519 * Not Found : 0 520 * Found : 1 521 */ 522 static int 523 qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) 524 { 525 #define ABORT_POLLING_PERIOD 1000 526 #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 527 unsigned long wait_iter = ABORT_WAIT_ITER; 528 int ret = QLA_SUCCESS; 529 530 while (CMD_SP(cmd)) { 531 msleep(ABORT_POLLING_PERIOD); 532 533 if (--wait_iter) 534 break; 535 } 536 if (CMD_SP(cmd)) 537 ret = QLA_FUNCTION_FAILED; 538 539 return ret; 540 } 541 542 /* 543 * qla2x00_wait_for_hba_online 544 * Wait till the HBA is online after going through 545 * <= MAX_RETRIES_OF_ISP_ABORT or 546 * finally HBA is disabled ie marked offline 547 * 548 * Input: 549 * ha - pointer to host adapter structure 550 * 551 * Note: 552 * Does context switching-Release SPIN_LOCK 553 * (if any) before calling this routine. 554 * 555 * Return: 556 * Success (Adapter is online) : 0 557 * Failed (Adapter is offline/disabled) : 1 558 */ 559 int 560 qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 561 { 562 int return_status; 563 unsigned long wait_online; 564 scsi_qla_host_t *pha = to_qla_parent(ha); 565 566 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 567 while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) || 568 test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) || 569 test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) || 570 pha->dpc_active) && time_before(jiffies, wait_online)) { 571 572 msleep(1000); 573 } 574 if (pha->flags.online) 575 return_status = QLA_SUCCESS; 576 else 577 return_status = QLA_FUNCTION_FAILED; 578 579 return (return_status); 580 } 581 582 /* 583 * qla2x00_wait_for_loop_ready 584 * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop 585 * to be in LOOP_READY state. 586 * Input: 587 * ha - pointer to host adapter structure 588 * 589 * Note: 590 * Does context switching-Release SPIN_LOCK 591 * (if any) before calling this routine. 592 * 593 * 594 * Return: 595 * Success (LOOP_READY) : 0 596 * Failed (LOOP_NOT_READY) : 1 597 */ 598 static inline int 599 qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha) 600 { 601 int return_status = QLA_SUCCESS; 602 unsigned long loop_timeout ; 603 scsi_qla_host_t *pha = to_qla_parent(ha); 604 605 /* wait for 5 min at the max for loop to be ready */ 606 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ); 607 608 while ((!atomic_read(&pha->loop_down_timer) && 609 atomic_read(&pha->loop_state) == LOOP_DOWN) || 610 atomic_read(&pha->loop_state) != LOOP_READY) { 611 if (atomic_read(&pha->loop_state) == LOOP_DEAD) { 612 return_status = QLA_FUNCTION_FAILED; 613 break; 614 } 615 msleep(1000); 616 if (time_after_eq(jiffies, loop_timeout)) { 617 return_status = QLA_FUNCTION_FAILED; 618 break; 619 } 620 } 621 return (return_status); 622 } 623 624 void 625 qla2x00_abort_fcport_cmds(fc_port_t *fcport) 626 { 627 int cnt; 628 unsigned long flags; 629 srb_t *sp; 630 scsi_qla_host_t *ha = fcport->ha; 631 scsi_qla_host_t *pha = to_qla_parent(ha); 632 633 spin_lock_irqsave(&pha->hardware_lock, flags); 634 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 635 sp = pha->outstanding_cmds[cnt]; 636 if (!sp) 637 continue; 638 if (sp->fcport != fcport) 639 continue; 640 641 spin_unlock_irqrestore(&pha->hardware_lock, flags); 642 if (ha->isp_ops->abort_command(ha, sp)) { 643 DEBUG2(qla_printk(KERN_WARNING, ha, 644 "Abort failed -- %lx\n", sp->cmd->serial_number)); 645 } else { 646 if (qla2x00_eh_wait_on_command(ha, sp->cmd) != 647 QLA_SUCCESS) 648 DEBUG2(qla_printk(KERN_WARNING, ha, 649 "Abort failed while waiting -- %lx\n", 650 sp->cmd->serial_number)); 651 652 } 653 spin_lock_irqsave(&pha->hardware_lock, flags); 654 } 655 spin_unlock_irqrestore(&pha->hardware_lock, flags); 656 } 657 658 static void 659 qla2x00_block_error_handler(struct scsi_cmnd *cmnd) 660 { 661 struct Scsi_Host *shost = cmnd->device->host; 662 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 663 unsigned long flags; 664 665 spin_lock_irqsave(shost->host_lock, flags); 666 while (rport->port_state == FC_PORTSTATE_BLOCKED) { 667 spin_unlock_irqrestore(shost->host_lock, flags); 668 msleep(1000); 669 spin_lock_irqsave(shost->host_lock, flags); 670 } 671 spin_unlock_irqrestore(shost->host_lock, flags); 672 return; 673 } 674 675 /************************************************************************** 676 * qla2xxx_eh_abort 677 * 678 * Description: 679 * The abort function will abort the specified command. 680 * 681 * Input: 682 * cmd = Linux SCSI command packet to be aborted. 683 * 684 * Returns: 685 * Either SUCCESS or FAILED. 686 * 687 * Note: 688 * Only return FAILED if command not returned by firmware. 689 **************************************************************************/ 690 static int 691 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 692 { 693 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 694 srb_t *sp; 695 int ret, i; 696 unsigned int id, lun; 697 unsigned long serial; 698 unsigned long flags; 699 int wait = 0; 700 scsi_qla_host_t *pha = to_qla_parent(ha); 701 702 qla2x00_block_error_handler(cmd); 703 704 if (!CMD_SP(cmd)) 705 return SUCCESS; 706 707 ret = SUCCESS; 708 709 id = cmd->device->id; 710 lun = cmd->device->lun; 711 serial = cmd->serial_number; 712 713 /* Check active list for command command. */ 714 spin_lock_irqsave(&pha->hardware_lock, flags); 715 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 716 sp = pha->outstanding_cmds[i]; 717 718 if (sp == NULL) 719 continue; 720 721 if (sp->cmd != cmd) 722 continue; 723 724 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", 725 __func__, ha->host_no, sp, serial)); 726 727 spin_unlock_irqrestore(&pha->hardware_lock, flags); 728 if (ha->isp_ops->abort_command(ha, sp)) { 729 DEBUG2(printk("%s(%ld): abort_command " 730 "mbx failed.\n", __func__, ha->host_no)); 731 } else { 732 DEBUG3(printk("%s(%ld): abort_command " 733 "mbx success.\n", __func__, ha->host_no)); 734 wait = 1; 735 } 736 spin_lock_irqsave(&pha->hardware_lock, flags); 737 738 break; 739 } 740 spin_unlock_irqrestore(&pha->hardware_lock, flags); 741 742 /* Wait for the command to be returned. */ 743 if (wait) { 744 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { 745 qla_printk(KERN_ERR, ha, 746 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 747 "%x.\n", ha->host_no, id, lun, serial, ret); 748 ret = FAILED; 749 } 750 } 751 752 qla_printk(KERN_INFO, ha, 753 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", 754 ha->host_no, id, lun, wait, serial, ret); 755 756 return ret; 757 } 758 759 enum nexus_wait_type { 760 WAIT_HOST = 0, 761 WAIT_TARGET, 762 WAIT_LUN, 763 }; 764 765 static int 766 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t, 767 unsigned int l, enum nexus_wait_type type) 768 { 769 int cnt, match, status; 770 srb_t *sp; 771 unsigned long flags; 772 scsi_qla_host_t *pha = to_qla_parent(ha); 773 774 status = QLA_SUCCESS; 775 spin_lock_irqsave(&pha->hardware_lock, flags); 776 for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; 777 cnt++) { 778 sp = pha->outstanding_cmds[cnt]; 779 if (!sp) 780 continue; 781 782 if (ha->vp_idx != sp->fcport->ha->vp_idx) 783 continue; 784 match = 0; 785 switch (type) { 786 case WAIT_HOST: 787 match = 1; 788 break; 789 case WAIT_TARGET: 790 match = sp->cmd->device->id == t; 791 break; 792 case WAIT_LUN: 793 match = (sp->cmd->device->id == t && 794 sp->cmd->device->lun == l); 795 break; 796 } 797 if (!match) 798 continue; 799 800 spin_unlock_irqrestore(&pha->hardware_lock, flags); 801 status = qla2x00_eh_wait_on_command(ha, sp->cmd); 802 spin_lock_irqsave(&pha->hardware_lock, flags); 803 } 804 spin_unlock_irqrestore(&pha->hardware_lock, flags); 805 806 return status; 807 } 808 809 static char *reset_errors[] = { 810 "HBA not online", 811 "HBA not ready", 812 "Task management failed", 813 "Waiting for command completions", 814 }; 815 816 static int 817 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 818 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 819 { 820 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 821 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 822 int err; 823 824 qla2x00_block_error_handler(cmd); 825 826 if (!fcport) 827 return FAILED; 828 829 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 830 ha->host_no, cmd->device->id, cmd->device->lun, name); 831 832 err = 0; 833 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 834 goto eh_reset_failed; 835 err = 1; 836 if (qla2x00_wait_for_loop_ready(ha) != QLA_SUCCESS) 837 goto eh_reset_failed; 838 err = 2; 839 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 840 goto eh_reset_failed; 841 err = 3; 842 if (qla2x00_eh_wait_for_pending_commands(ha, cmd->device->id, 843 cmd->device->lun, type) != QLA_SUCCESS) 844 goto eh_reset_failed; 845 846 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 847 ha->host_no, cmd->device->id, cmd->device->lun, name); 848 849 return SUCCESS; 850 851 eh_reset_failed: 852 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n", 853 ha->host_no, cmd->device->id, cmd->device->lun, name, 854 reset_errors[err]); 855 return FAILED; 856 } 857 858 static int 859 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 860 { 861 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 862 863 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 864 ha->isp_ops->lun_reset); 865 } 866 867 static int 868 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 869 { 870 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 871 872 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 873 ha->isp_ops->target_reset); 874 } 875 876 /************************************************************************** 877 * qla2xxx_eh_bus_reset 878 * 879 * Description: 880 * The bus reset function will reset the bus and abort any executing 881 * commands. 882 * 883 * Input: 884 * cmd = Linux SCSI command packet of the command that cause the 885 * bus reset. 886 * 887 * Returns: 888 * SUCCESS/FAILURE (defined as macro in scsi.h). 889 * 890 **************************************************************************/ 891 static int 892 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 893 { 894 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 895 scsi_qla_host_t *pha = to_qla_parent(ha); 896 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 897 int ret = FAILED; 898 unsigned int id, lun; 899 unsigned long serial; 900 901 qla2x00_block_error_handler(cmd); 902 903 id = cmd->device->id; 904 lun = cmd->device->lun; 905 serial = cmd->serial_number; 906 907 if (!fcport) 908 return ret; 909 910 qla_printk(KERN_INFO, ha, 911 "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun); 912 913 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) { 914 DEBUG2(printk("%s failed:board disabled\n",__func__)); 915 goto eh_bus_reset_done; 916 } 917 918 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { 919 if (qla2x00_loop_reset(ha) == QLA_SUCCESS) 920 ret = SUCCESS; 921 } 922 if (ret == FAILED) 923 goto eh_bus_reset_done; 924 925 /* Flush outstanding commands. */ 926 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) != 927 QLA_SUCCESS) 928 ret = FAILED; 929 930 eh_bus_reset_done: 931 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 932 (ret == FAILED) ? "failed" : "succeded"); 933 934 return ret; 935 } 936 937 /************************************************************************** 938 * qla2xxx_eh_host_reset 939 * 940 * Description: 941 * The reset function will reset the Adapter. 942 * 943 * Input: 944 * cmd = Linux SCSI command packet of the command that cause the 945 * adapter reset. 946 * 947 * Returns: 948 * Either SUCCESS or FAILED. 949 * 950 * Note: 951 **************************************************************************/ 952 static int 953 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 954 { 955 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 956 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 957 int ret = FAILED; 958 unsigned int id, lun; 959 unsigned long serial; 960 scsi_qla_host_t *pha = to_qla_parent(ha); 961 962 qla2x00_block_error_handler(cmd); 963 964 id = cmd->device->id; 965 lun = cmd->device->lun; 966 serial = cmd->serial_number; 967 968 if (!fcport) 969 return ret; 970 971 qla_printk(KERN_INFO, ha, 972 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun); 973 974 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 975 goto eh_host_reset_lock; 976 977 /* 978 * Fixme-may be dpc thread is active and processing 979 * loop_resync,so wait a while for it to 980 * be completed and then issue big hammer.Otherwise 981 * it may cause I/O failure as big hammer marks the 982 * devices as lost kicking of the port_down_timer 983 * while dpc is stuck for the mailbox to complete. 984 */ 985 qla2x00_wait_for_loop_ready(ha); 986 set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 987 if (qla2x00_abort_isp(pha)) { 988 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 989 /* failed. schedule dpc to try */ 990 set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags); 991 992 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 993 goto eh_host_reset_lock; 994 } 995 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 996 997 /* Waiting for our command in done_queue to be returned to OS.*/ 998 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) == 999 QLA_SUCCESS) 1000 ret = SUCCESS; 1001 1002 if (ha->parent) 1003 qla2x00_vp_abort_isp(ha); 1004 1005 eh_host_reset_lock: 1006 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1007 (ret == FAILED) ? "failed" : "succeded"); 1008 1009 return ret; 1010 } 1011 1012 /* 1013 * qla2x00_loop_reset 1014 * Issue loop reset. 1015 * 1016 * Input: 1017 * ha = adapter block pointer. 1018 * 1019 * Returns: 1020 * 0 = success 1021 */ 1022 int 1023 qla2x00_loop_reset(scsi_qla_host_t *ha) 1024 { 1025 int ret; 1026 struct fc_port *fcport; 1027 1028 if (ha->flags.enable_lip_full_login) { 1029 ret = qla2x00_full_login_lip(ha); 1030 if (ret != QLA_SUCCESS) { 1031 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1032 "full_login_lip=%d.\n", __func__, ha->host_no, 1033 ret)); 1034 } 1035 atomic_set(&ha->loop_state, LOOP_DOWN); 1036 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 1037 qla2x00_mark_all_devices_lost(ha, 0); 1038 qla2x00_wait_for_loop_ready(ha); 1039 } 1040 1041 if (ha->flags.enable_lip_reset) { 1042 ret = qla2x00_lip_reset(ha); 1043 if (ret != QLA_SUCCESS) { 1044 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1045 "lip_reset=%d.\n", __func__, ha->host_no, ret)); 1046 } 1047 qla2x00_wait_for_loop_ready(ha); 1048 } 1049 1050 if (ha->flags.enable_target_reset) { 1051 list_for_each_entry(fcport, &ha->fcports, list) { 1052 if (fcport->port_type != FCT_TARGET) 1053 continue; 1054 1055 ret = ha->isp_ops->target_reset(fcport, 0); 1056 if (ret != QLA_SUCCESS) { 1057 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1058 "target_reset=%d d_id=%x.\n", __func__, 1059 ha->host_no, ret, fcport->d_id.b24)); 1060 } 1061 } 1062 } 1063 1064 /* Issue marker command only when we are going to start the I/O */ 1065 ha->marker_needed = 1; 1066 1067 return QLA_SUCCESS; 1068 } 1069 1070 void 1071 qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res) 1072 { 1073 int cnt; 1074 unsigned long flags; 1075 srb_t *sp; 1076 1077 spin_lock_irqsave(&ha->hardware_lock, flags); 1078 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1079 sp = ha->outstanding_cmds[cnt]; 1080 if (sp) { 1081 ha->outstanding_cmds[cnt] = NULL; 1082 sp->cmd->result = res; 1083 qla2x00_sp_compl(ha, sp); 1084 } 1085 } 1086 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1087 } 1088 1089 static int 1090 qla2xxx_slave_alloc(struct scsi_device *sdev) 1091 { 1092 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1093 1094 if (!rport || fc_remote_port_chkready(rport)) 1095 return -ENXIO; 1096 1097 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1098 1099 return 0; 1100 } 1101 1102 static int 1103 qla2xxx_slave_configure(struct scsi_device *sdev) 1104 { 1105 scsi_qla_host_t *ha = shost_priv(sdev->host); 1106 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1107 1108 if (sdev->tagged_supported) 1109 scsi_activate_tcq(sdev, ha->max_q_depth); 1110 else 1111 scsi_deactivate_tcq(sdev, ha->max_q_depth); 1112 1113 rport->dev_loss_tmo = ha->port_down_retry_count; 1114 1115 return 0; 1116 } 1117 1118 static void 1119 qla2xxx_slave_destroy(struct scsi_device *sdev) 1120 { 1121 sdev->hostdata = NULL; 1122 } 1123 1124 static int 1125 qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth) 1126 { 1127 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 1128 return sdev->queue_depth; 1129 } 1130 1131 static int 1132 qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type) 1133 { 1134 if (sdev->tagged_supported) { 1135 scsi_set_tag_type(sdev, tag_type); 1136 if (tag_type) 1137 scsi_activate_tcq(sdev, sdev->queue_depth); 1138 else 1139 scsi_deactivate_tcq(sdev, sdev->queue_depth); 1140 } else 1141 tag_type = 0; 1142 1143 return tag_type; 1144 } 1145 1146 /** 1147 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1148 * @ha: HA context 1149 * 1150 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1151 * supported addressing method. 1152 */ 1153 static void 1154 qla2x00_config_dma_addressing(scsi_qla_host_t *ha) 1155 { 1156 /* Assume a 32bit DMA mask. */ 1157 ha->flags.enable_64bit_addressing = 0; 1158 1159 if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) { 1160 /* Any upper-dword bits set? */ 1161 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1162 !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) { 1163 /* Ok, a 64bit DMA mask is applicable. */ 1164 ha->flags.enable_64bit_addressing = 1; 1165 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1166 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1167 return; 1168 } 1169 } 1170 1171 dma_set_mask(&ha->pdev->dev, DMA_32BIT_MASK); 1172 pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK); 1173 } 1174 1175 static void 1176 qla2x00_enable_intrs(scsi_qla_host_t *ha) 1177 { 1178 unsigned long flags = 0; 1179 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1180 1181 spin_lock_irqsave(&ha->hardware_lock, flags); 1182 ha->interrupts_on = 1; 1183 /* enable risc and host interrupts */ 1184 WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1185 RD_REG_WORD(®->ictrl); 1186 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1187 1188 } 1189 1190 static void 1191 qla2x00_disable_intrs(scsi_qla_host_t *ha) 1192 { 1193 unsigned long flags = 0; 1194 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1195 1196 spin_lock_irqsave(&ha->hardware_lock, flags); 1197 ha->interrupts_on = 0; 1198 /* disable risc and host interrupts */ 1199 WRT_REG_WORD(®->ictrl, 0); 1200 RD_REG_WORD(®->ictrl); 1201 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1202 } 1203 1204 static void 1205 qla24xx_enable_intrs(scsi_qla_host_t *ha) 1206 { 1207 unsigned long flags = 0; 1208 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1209 1210 spin_lock_irqsave(&ha->hardware_lock, flags); 1211 ha->interrupts_on = 1; 1212 WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); 1213 RD_REG_DWORD(®->ictrl); 1214 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1215 } 1216 1217 static void 1218 qla24xx_disable_intrs(scsi_qla_host_t *ha) 1219 { 1220 unsigned long flags = 0; 1221 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1222 1223 spin_lock_irqsave(&ha->hardware_lock, flags); 1224 ha->interrupts_on = 0; 1225 WRT_REG_DWORD(®->ictrl, 0); 1226 RD_REG_DWORD(®->ictrl); 1227 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1228 } 1229 1230 static struct isp_operations qla2100_isp_ops = { 1231 .pci_config = qla2100_pci_config, 1232 .reset_chip = qla2x00_reset_chip, 1233 .chip_diag = qla2x00_chip_diag, 1234 .config_rings = qla2x00_config_rings, 1235 .reset_adapter = qla2x00_reset_adapter, 1236 .nvram_config = qla2x00_nvram_config, 1237 .update_fw_options = qla2x00_update_fw_options, 1238 .load_risc = qla2x00_load_risc, 1239 .pci_info_str = qla2x00_pci_info_str, 1240 .fw_version_str = qla2x00_fw_version_str, 1241 .intr_handler = qla2100_intr_handler, 1242 .enable_intrs = qla2x00_enable_intrs, 1243 .disable_intrs = qla2x00_disable_intrs, 1244 .abort_command = qla2x00_abort_command, 1245 .target_reset = qla2x00_abort_target, 1246 .lun_reset = qla2x00_lun_reset, 1247 .fabric_login = qla2x00_login_fabric, 1248 .fabric_logout = qla2x00_fabric_logout, 1249 .calc_req_entries = qla2x00_calc_iocbs_32, 1250 .build_iocbs = qla2x00_build_scsi_iocbs_32, 1251 .prep_ms_iocb = qla2x00_prep_ms_iocb, 1252 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 1253 .read_nvram = qla2x00_read_nvram_data, 1254 .write_nvram = qla2x00_write_nvram_data, 1255 .fw_dump = qla2100_fw_dump, 1256 .beacon_on = NULL, 1257 .beacon_off = NULL, 1258 .beacon_blink = NULL, 1259 .read_optrom = qla2x00_read_optrom_data, 1260 .write_optrom = qla2x00_write_optrom_data, 1261 .get_flash_version = qla2x00_get_flash_version, 1262 }; 1263 1264 static struct isp_operations qla2300_isp_ops = { 1265 .pci_config = qla2300_pci_config, 1266 .reset_chip = qla2x00_reset_chip, 1267 .chip_diag = qla2x00_chip_diag, 1268 .config_rings = qla2x00_config_rings, 1269 .reset_adapter = qla2x00_reset_adapter, 1270 .nvram_config = qla2x00_nvram_config, 1271 .update_fw_options = qla2x00_update_fw_options, 1272 .load_risc = qla2x00_load_risc, 1273 .pci_info_str = qla2x00_pci_info_str, 1274 .fw_version_str = qla2x00_fw_version_str, 1275 .intr_handler = qla2300_intr_handler, 1276 .enable_intrs = qla2x00_enable_intrs, 1277 .disable_intrs = qla2x00_disable_intrs, 1278 .abort_command = qla2x00_abort_command, 1279 .target_reset = qla2x00_abort_target, 1280 .lun_reset = qla2x00_lun_reset, 1281 .fabric_login = qla2x00_login_fabric, 1282 .fabric_logout = qla2x00_fabric_logout, 1283 .calc_req_entries = qla2x00_calc_iocbs_32, 1284 .build_iocbs = qla2x00_build_scsi_iocbs_32, 1285 .prep_ms_iocb = qla2x00_prep_ms_iocb, 1286 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 1287 .read_nvram = qla2x00_read_nvram_data, 1288 .write_nvram = qla2x00_write_nvram_data, 1289 .fw_dump = qla2300_fw_dump, 1290 .beacon_on = qla2x00_beacon_on, 1291 .beacon_off = qla2x00_beacon_off, 1292 .beacon_blink = qla2x00_beacon_blink, 1293 .read_optrom = qla2x00_read_optrom_data, 1294 .write_optrom = qla2x00_write_optrom_data, 1295 .get_flash_version = qla2x00_get_flash_version, 1296 }; 1297 1298 static struct isp_operations qla24xx_isp_ops = { 1299 .pci_config = qla24xx_pci_config, 1300 .reset_chip = qla24xx_reset_chip, 1301 .chip_diag = qla24xx_chip_diag, 1302 .config_rings = qla24xx_config_rings, 1303 .reset_adapter = qla24xx_reset_adapter, 1304 .nvram_config = qla24xx_nvram_config, 1305 .update_fw_options = qla24xx_update_fw_options, 1306 .load_risc = qla24xx_load_risc, 1307 .pci_info_str = qla24xx_pci_info_str, 1308 .fw_version_str = qla24xx_fw_version_str, 1309 .intr_handler = qla24xx_intr_handler, 1310 .enable_intrs = qla24xx_enable_intrs, 1311 .disable_intrs = qla24xx_disable_intrs, 1312 .abort_command = qla24xx_abort_command, 1313 .target_reset = qla24xx_abort_target, 1314 .lun_reset = qla24xx_lun_reset, 1315 .fabric_login = qla24xx_login_fabric, 1316 .fabric_logout = qla24xx_fabric_logout, 1317 .calc_req_entries = NULL, 1318 .build_iocbs = NULL, 1319 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1320 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1321 .read_nvram = qla24xx_read_nvram_data, 1322 .write_nvram = qla24xx_write_nvram_data, 1323 .fw_dump = qla24xx_fw_dump, 1324 .beacon_on = qla24xx_beacon_on, 1325 .beacon_off = qla24xx_beacon_off, 1326 .beacon_blink = qla24xx_beacon_blink, 1327 .read_optrom = qla24xx_read_optrom_data, 1328 .write_optrom = qla24xx_write_optrom_data, 1329 .get_flash_version = qla24xx_get_flash_version, 1330 }; 1331 1332 static struct isp_operations qla25xx_isp_ops = { 1333 .pci_config = qla25xx_pci_config, 1334 .reset_chip = qla24xx_reset_chip, 1335 .chip_diag = qla24xx_chip_diag, 1336 .config_rings = qla24xx_config_rings, 1337 .reset_adapter = qla24xx_reset_adapter, 1338 .nvram_config = qla24xx_nvram_config, 1339 .update_fw_options = qla24xx_update_fw_options, 1340 .load_risc = qla24xx_load_risc, 1341 .pci_info_str = qla24xx_pci_info_str, 1342 .fw_version_str = qla24xx_fw_version_str, 1343 .intr_handler = qla24xx_intr_handler, 1344 .enable_intrs = qla24xx_enable_intrs, 1345 .disable_intrs = qla24xx_disable_intrs, 1346 .abort_command = qla24xx_abort_command, 1347 .target_reset = qla24xx_abort_target, 1348 .lun_reset = qla24xx_lun_reset, 1349 .fabric_login = qla24xx_login_fabric, 1350 .fabric_logout = qla24xx_fabric_logout, 1351 .calc_req_entries = NULL, 1352 .build_iocbs = NULL, 1353 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1354 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1355 .read_nvram = qla25xx_read_nvram_data, 1356 .write_nvram = qla25xx_write_nvram_data, 1357 .fw_dump = qla25xx_fw_dump, 1358 .beacon_on = qla24xx_beacon_on, 1359 .beacon_off = qla24xx_beacon_off, 1360 .beacon_blink = qla24xx_beacon_blink, 1361 .read_optrom = qla25xx_read_optrom_data, 1362 .write_optrom = qla24xx_write_optrom_data, 1363 .get_flash_version = qla24xx_get_flash_version, 1364 }; 1365 1366 static inline void 1367 qla2x00_set_isp_flags(scsi_qla_host_t *ha) 1368 { 1369 ha->device_type = DT_EXTENDED_IDS; 1370 switch (ha->pdev->device) { 1371 case PCI_DEVICE_ID_QLOGIC_ISP2100: 1372 ha->device_type |= DT_ISP2100; 1373 ha->device_type &= ~DT_EXTENDED_IDS; 1374 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 1375 break; 1376 case PCI_DEVICE_ID_QLOGIC_ISP2200: 1377 ha->device_type |= DT_ISP2200; 1378 ha->device_type &= ~DT_EXTENDED_IDS; 1379 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 1380 break; 1381 case PCI_DEVICE_ID_QLOGIC_ISP2300: 1382 ha->device_type |= DT_ISP2300; 1383 ha->device_type |= DT_ZIO_SUPPORTED; 1384 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1385 break; 1386 case PCI_DEVICE_ID_QLOGIC_ISP2312: 1387 ha->device_type |= DT_ISP2312; 1388 ha->device_type |= DT_ZIO_SUPPORTED; 1389 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1390 break; 1391 case PCI_DEVICE_ID_QLOGIC_ISP2322: 1392 ha->device_type |= DT_ISP2322; 1393 ha->device_type |= DT_ZIO_SUPPORTED; 1394 if (ha->pdev->subsystem_vendor == 0x1028 && 1395 ha->pdev->subsystem_device == 0x0170) 1396 ha->device_type |= DT_OEM_001; 1397 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1398 break; 1399 case PCI_DEVICE_ID_QLOGIC_ISP6312: 1400 ha->device_type |= DT_ISP6312; 1401 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1402 break; 1403 case PCI_DEVICE_ID_QLOGIC_ISP6322: 1404 ha->device_type |= DT_ISP6322; 1405 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1406 break; 1407 case PCI_DEVICE_ID_QLOGIC_ISP2422: 1408 ha->device_type |= DT_ISP2422; 1409 ha->device_type |= DT_ZIO_SUPPORTED; 1410 ha->device_type |= DT_FWI2; 1411 ha->device_type |= DT_IIDMA; 1412 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1413 break; 1414 case PCI_DEVICE_ID_QLOGIC_ISP2432: 1415 ha->device_type |= DT_ISP2432; 1416 ha->device_type |= DT_ZIO_SUPPORTED; 1417 ha->device_type |= DT_FWI2; 1418 ha->device_type |= DT_IIDMA; 1419 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1420 break; 1421 case PCI_DEVICE_ID_QLOGIC_ISP8432: 1422 ha->device_type |= DT_ISP8432; 1423 ha->device_type |= DT_ZIO_SUPPORTED; 1424 ha->device_type |= DT_FWI2; 1425 ha->device_type |= DT_IIDMA; 1426 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1427 break; 1428 case PCI_DEVICE_ID_QLOGIC_ISP5422: 1429 ha->device_type |= DT_ISP5422; 1430 ha->device_type |= DT_FWI2; 1431 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1432 break; 1433 case PCI_DEVICE_ID_QLOGIC_ISP5432: 1434 ha->device_type |= DT_ISP5432; 1435 ha->device_type |= DT_FWI2; 1436 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1437 break; 1438 case PCI_DEVICE_ID_QLOGIC_ISP2532: 1439 ha->device_type |= DT_ISP2532; 1440 ha->device_type |= DT_ZIO_SUPPORTED; 1441 ha->device_type |= DT_FWI2; 1442 ha->device_type |= DT_IIDMA; 1443 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1444 break; 1445 } 1446 } 1447 1448 static int 1449 qla2x00_iospace_config(scsi_qla_host_t *ha) 1450 { 1451 resource_size_t pio; 1452 1453 if (pci_request_selected_regions(ha->pdev, ha->bars, 1454 QLA2XXX_DRIVER_NAME)) { 1455 qla_printk(KERN_WARNING, ha, 1456 "Failed to reserve PIO/MMIO regions (%s)\n", 1457 pci_name(ha->pdev)); 1458 1459 goto iospace_error_exit; 1460 } 1461 if (!(ha->bars & 1)) 1462 goto skip_pio; 1463 1464 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 1465 pio = pci_resource_start(ha->pdev, 0); 1466 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1467 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1468 qla_printk(KERN_WARNING, ha, 1469 "Invalid PCI I/O region size (%s)...\n", 1470 pci_name(ha->pdev)); 1471 pio = 0; 1472 } 1473 } else { 1474 qla_printk(KERN_WARNING, ha, 1475 "region #0 not a PIO resource (%s)...\n", 1476 pci_name(ha->pdev)); 1477 pio = 0; 1478 } 1479 ha->pio_address = pio; 1480 1481 skip_pio: 1482 /* Use MMIO operations for all accesses. */ 1483 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1484 qla_printk(KERN_ERR, ha, 1485 "region #1 not an MMIO resource (%s), aborting\n", 1486 pci_name(ha->pdev)); 1487 goto iospace_error_exit; 1488 } 1489 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1490 qla_printk(KERN_ERR, ha, 1491 "Invalid PCI mem region size (%s), aborting\n", 1492 pci_name(ha->pdev)); 1493 goto iospace_error_exit; 1494 } 1495 1496 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1497 if (!ha->iobase) { 1498 qla_printk(KERN_ERR, ha, 1499 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 1500 1501 goto iospace_error_exit; 1502 } 1503 1504 return (0); 1505 1506 iospace_error_exit: 1507 return (-ENOMEM); 1508 } 1509 1510 static void 1511 qla2xxx_scan_start(struct Scsi_Host *shost) 1512 { 1513 scsi_qla_host_t *ha = shost_priv(shost); 1514 1515 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 1516 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1517 set_bit(RSCN_UPDATE, &ha->dpc_flags); 1518 set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 1519 } 1520 1521 static int 1522 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 1523 { 1524 scsi_qla_host_t *ha = shost_priv(shost); 1525 1526 if (!ha->host) 1527 return 1; 1528 if (time > ha->loop_reset_delay * HZ) 1529 return 1; 1530 1531 return atomic_read(&ha->loop_state) == LOOP_READY; 1532 } 1533 1534 /* 1535 * PCI driver interface 1536 */ 1537 static int __devinit 1538 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 1539 { 1540 int ret = -ENODEV; 1541 struct Scsi_Host *host; 1542 scsi_qla_host_t *ha; 1543 char pci_info[30]; 1544 char fw_str[30]; 1545 struct scsi_host_template *sht; 1546 int bars, mem_only = 0; 1547 1548 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 1549 sht = &qla2x00_driver_template; 1550 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 1551 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 1552 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 1553 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 1554 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 1555 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) { 1556 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1557 sht = &qla24xx_driver_template; 1558 mem_only = 1; 1559 } 1560 1561 if (mem_only) { 1562 if (pci_enable_device_mem(pdev)) 1563 goto probe_out; 1564 } else { 1565 if (pci_enable_device(pdev)) 1566 goto probe_out; 1567 } 1568 1569 /* This may fail but that's ok */ 1570 pci_enable_pcie_error_reporting(pdev); 1571 1572 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 1573 if (host == NULL) { 1574 printk(KERN_WARNING 1575 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 1576 goto probe_disable_device; 1577 } 1578 1579 /* Clear our data area */ 1580 ha = shost_priv(host); 1581 memset(ha, 0, sizeof(scsi_qla_host_t)); 1582 1583 ha->pdev = pdev; 1584 ha->host = host; 1585 ha->host_no = host->host_no; 1586 sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no); 1587 ha->parent = NULL; 1588 ha->bars = bars; 1589 ha->mem_only = mem_only; 1590 spin_lock_init(&ha->hardware_lock); 1591 1592 /* Set ISP-type information. */ 1593 qla2x00_set_isp_flags(ha); 1594 1595 /* Configure PCI I/O space */ 1596 ret = qla2x00_iospace_config(ha); 1597 if (ret) 1598 goto probe_failed; 1599 1600 qla_printk(KERN_INFO, ha, 1601 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 1602 ha->iobase); 1603 1604 ha->prev_topology = 0; 1605 ha->init_cb_size = sizeof(init_cb_t); 1606 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx; 1607 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1608 ha->optrom_size = OPTROM_SIZE_2300; 1609 1610 ha->max_q_depth = MAX_Q_DEPTH; 1611 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 1612 ha->max_q_depth = ql2xmaxqdepth; 1613 1614 /* Assign ISP specific operations. */ 1615 if (IS_QLA2100(ha)) { 1616 host->max_id = MAX_TARGETS_2100; 1617 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1618 ha->request_q_length = REQUEST_ENTRY_CNT_2100; 1619 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1620 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1621 host->sg_tablesize = 32; 1622 ha->gid_list_info_size = 4; 1623 ha->isp_ops = &qla2100_isp_ops; 1624 } else if (IS_QLA2200(ha)) { 1625 host->max_id = MAX_TARGETS_2200; 1626 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1627 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1628 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1629 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1630 ha->gid_list_info_size = 4; 1631 ha->isp_ops = &qla2100_isp_ops; 1632 } else if (IS_QLA23XX(ha)) { 1633 host->max_id = MAX_TARGETS_2200; 1634 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1635 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1636 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1637 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1638 ha->gid_list_info_size = 6; 1639 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1640 ha->optrom_size = OPTROM_SIZE_2322; 1641 ha->isp_ops = &qla2300_isp_ops; 1642 } else if (IS_QLA24XX_TYPE(ha)) { 1643 host->max_id = MAX_TARGETS_2200; 1644 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1645 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1646 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1647 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1648 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1649 ha->mgmt_svr_loop_id = 10 + ha->vp_idx; 1650 ha->gid_list_info_size = 8; 1651 ha->optrom_size = OPTROM_SIZE_24XX; 1652 ha->isp_ops = &qla24xx_isp_ops; 1653 } else if (IS_QLA25XX(ha)) { 1654 host->max_id = MAX_TARGETS_2200; 1655 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1656 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1657 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1658 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1659 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1660 ha->mgmt_svr_loop_id = 10 + ha->vp_idx; 1661 ha->gid_list_info_size = 8; 1662 ha->optrom_size = OPTROM_SIZE_25XX; 1663 ha->isp_ops = &qla25xx_isp_ops; 1664 } 1665 host->can_queue = ha->request_q_length + 128; 1666 1667 mutex_init(&ha->vport_lock); 1668 init_completion(&ha->mbx_cmd_comp); 1669 complete(&ha->mbx_cmd_comp); 1670 init_completion(&ha->mbx_intr_comp); 1671 1672 INIT_LIST_HEAD(&ha->list); 1673 INIT_LIST_HEAD(&ha->fcports); 1674 INIT_LIST_HEAD(&ha->vp_list); 1675 INIT_LIST_HEAD(&ha->work_list); 1676 1677 set_bit(0, (unsigned long *) ha->vp_idx_map); 1678 1679 qla2x00_config_dma_addressing(ha); 1680 if (qla2x00_mem_alloc(ha)) { 1681 qla_printk(KERN_WARNING, ha, 1682 "[ERROR] Failed to allocate memory for adapter\n"); 1683 1684 ret = -ENOMEM; 1685 goto probe_failed; 1686 } 1687 1688 if (qla2x00_initialize_adapter(ha)) { 1689 qla_printk(KERN_WARNING, ha, 1690 "Failed to initialize adapter\n"); 1691 1692 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 1693 "Adapter flags %x.\n", 1694 ha->host_no, ha->device_flags)); 1695 1696 ret = -ENODEV; 1697 goto probe_failed; 1698 } 1699 1700 /* 1701 * Startup the kernel thread for this host adapter 1702 */ 1703 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 1704 "%s_dpc", ha->host_str); 1705 if (IS_ERR(ha->dpc_thread)) { 1706 qla_printk(KERN_WARNING, ha, 1707 "Unable to start DPC thread!\n"); 1708 ret = PTR_ERR(ha->dpc_thread); 1709 goto probe_failed; 1710 } 1711 1712 host->this_id = 255; 1713 host->cmd_per_lun = 3; 1714 host->unique_id = host->host_no; 1715 host->max_cmd_len = MAX_CMDSZ; 1716 host->max_channel = MAX_BUSES - 1; 1717 host->max_lun = MAX_LUNS; 1718 host->transportt = qla2xxx_transport_template; 1719 1720 ret = qla2x00_request_irqs(ha); 1721 if (ret) 1722 goto probe_failed; 1723 1724 /* Initialized the timer */ 1725 qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL); 1726 1727 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1728 ha->host_no, ha)); 1729 1730 pci_set_drvdata(pdev, ha); 1731 1732 ha->flags.init_done = 1; 1733 ha->flags.online = 1; 1734 1735 ret = scsi_add_host(host, &pdev->dev); 1736 if (ret) 1737 goto probe_failed; 1738 1739 ha->isp_ops->enable_intrs(ha); 1740 1741 scsi_scan_host(host); 1742 1743 qla2x00_alloc_sysfs_attr(ha); 1744 1745 qla2x00_init_host_attr(ha); 1746 1747 qla2x00_dfs_setup(ha); 1748 1749 qla_printk(KERN_INFO, ha, "\n" 1750 " QLogic Fibre Channel HBA Driver: %s\n" 1751 " QLogic %s - %s\n" 1752 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 1753 qla2x00_version_str, ha->model_number, 1754 ha->model_desc ? ha->model_desc: "", pdev->device, 1755 ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev), 1756 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, 1757 ha->isp_ops->fw_version_str(ha, fw_str)); 1758 1759 return 0; 1760 1761 probe_failed: 1762 qla2x00_free_device(ha); 1763 1764 scsi_host_put(host); 1765 1766 probe_disable_device: 1767 pci_disable_device(pdev); 1768 1769 probe_out: 1770 return ret; 1771 } 1772 1773 static void 1774 qla2x00_remove_one(struct pci_dev *pdev) 1775 { 1776 scsi_qla_host_t *ha, *vha, *temp; 1777 1778 ha = pci_get_drvdata(pdev); 1779 1780 list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list) 1781 fc_vport_terminate(vha->fc_vport); 1782 1783 set_bit(UNLOADING, &ha->dpc_flags); 1784 1785 qla2x00_dfs_remove(ha); 1786 1787 qla84xx_put_chip(ha); 1788 1789 qla2x00_free_sysfs_attr(ha); 1790 1791 fc_remove_host(ha->host); 1792 1793 scsi_remove_host(ha->host); 1794 1795 qla2x00_free_device(ha); 1796 1797 scsi_host_put(ha->host); 1798 1799 pci_disable_device(pdev); 1800 pci_set_drvdata(pdev, NULL); 1801 } 1802 1803 static void 1804 qla2x00_free_device(scsi_qla_host_t *ha) 1805 { 1806 qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16); 1807 1808 /* Disable timer */ 1809 if (ha->timer_active) 1810 qla2x00_stop_timer(ha); 1811 1812 ha->flags.online = 0; 1813 1814 /* Kill the kernel thread for this host */ 1815 if (ha->dpc_thread) { 1816 struct task_struct *t = ha->dpc_thread; 1817 1818 /* 1819 * qla2xxx_wake_dpc checks for ->dpc_thread 1820 * so we need to zero it out. 1821 */ 1822 ha->dpc_thread = NULL; 1823 kthread_stop(t); 1824 } 1825 1826 if (ha->flags.fce_enabled) 1827 qla2x00_disable_fce_trace(ha, NULL, NULL); 1828 1829 if (ha->eft) 1830 qla2x00_disable_eft_trace(ha); 1831 1832 /* Stop currently executing firmware. */ 1833 qla2x00_try_to_stop_firmware(ha); 1834 1835 /* turn-off interrupts on the card */ 1836 if (ha->interrupts_on) 1837 ha->isp_ops->disable_intrs(ha); 1838 1839 qla2x00_mem_free(ha); 1840 1841 qla2x00_free_irqs(ha); 1842 1843 /* release io space registers */ 1844 if (ha->iobase) 1845 iounmap(ha->iobase); 1846 pci_release_selected_regions(ha->pdev, ha->bars); 1847 } 1848 1849 static inline void 1850 qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, 1851 int defer) 1852 { 1853 struct fc_rport *rport; 1854 scsi_qla_host_t *pha = to_qla_parent(ha); 1855 1856 if (!fcport->rport) 1857 return; 1858 1859 rport = fcport->rport; 1860 if (defer) { 1861 spin_lock_irq(ha->host->host_lock); 1862 fcport->drport = rport; 1863 spin_unlock_irq(ha->host->host_lock); 1864 set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags); 1865 qla2xxx_wake_dpc(pha); 1866 } else 1867 fc_remote_port_delete(rport); 1868 } 1869 1870 /* 1871 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 1872 * 1873 * Input: ha = adapter block pointer. fcport = port structure pointer. 1874 * 1875 * Return: None. 1876 * 1877 * Context: 1878 */ 1879 void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport, 1880 int do_login, int defer) 1881 { 1882 if (atomic_read(&fcport->state) == FCS_ONLINE && 1883 ha->vp_idx == fcport->vp_idx) 1884 qla2x00_schedule_rport_del(ha, fcport, defer); 1885 1886 /* 1887 * We may need to retry the login, so don't change the state of the 1888 * port but do the retries. 1889 */ 1890 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 1891 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1892 1893 if (!do_login) 1894 return; 1895 1896 if (fcport->login_retry == 0) { 1897 fcport->login_retry = ha->login_retry_count; 1898 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 1899 1900 DEBUG(printk("scsi(%ld): Port login retry: " 1901 "%02x%02x%02x%02x%02x%02x%02x%02x, " 1902 "id = 0x%04x retry cnt=%d\n", 1903 ha->host_no, 1904 fcport->port_name[0], 1905 fcport->port_name[1], 1906 fcport->port_name[2], 1907 fcport->port_name[3], 1908 fcport->port_name[4], 1909 fcport->port_name[5], 1910 fcport->port_name[6], 1911 fcport->port_name[7], 1912 fcport->loop_id, 1913 fcport->login_retry)); 1914 } 1915 } 1916 1917 /* 1918 * qla2x00_mark_all_devices_lost 1919 * Updates fcport state when device goes offline. 1920 * 1921 * Input: 1922 * ha = adapter block pointer. 1923 * fcport = port structure pointer. 1924 * 1925 * Return: 1926 * None. 1927 * 1928 * Context: 1929 */ 1930 void 1931 qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer) 1932 { 1933 fc_port_t *fcport; 1934 scsi_qla_host_t *pha = to_qla_parent(ha); 1935 1936 list_for_each_entry(fcport, &pha->fcports, list) { 1937 if (ha->vp_idx != fcport->vp_idx) 1938 continue; 1939 /* 1940 * No point in marking the device as lost, if the device is 1941 * already DEAD. 1942 */ 1943 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 1944 continue; 1945 if (atomic_read(&fcport->state) == FCS_ONLINE) 1946 qla2x00_schedule_rport_del(ha, fcport, defer); 1947 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1948 } 1949 } 1950 1951 /* 1952 * qla2x00_mem_alloc 1953 * Allocates adapter memory. 1954 * 1955 * Returns: 1956 * 0 = success. 1957 * !0 = failure. 1958 */ 1959 static int 1960 qla2x00_mem_alloc(scsi_qla_host_t *ha) 1961 { 1962 char name[16]; 1963 1964 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, 1965 (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma, 1966 GFP_KERNEL); 1967 if (!ha->request_ring) 1968 goto fail; 1969 1970 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev, 1971 (ha->response_q_length + 1) * sizeof(response_t), 1972 &ha->response_dma, GFP_KERNEL); 1973 if (!ha->response_ring) 1974 goto fail_free_request_ring; 1975 1976 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE, 1977 &ha->gid_list_dma, GFP_KERNEL); 1978 if (!ha->gid_list) 1979 goto fail_free_response_ring; 1980 1981 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 1982 &ha->init_cb_dma, GFP_KERNEL); 1983 if (!ha->init_cb) 1984 goto fail_free_gid_list; 1985 1986 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME, 1987 ha->host_no); 1988 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 1989 DMA_POOL_SIZE, 8, 0); 1990 if (!ha->s_dma_pool) 1991 goto fail_free_init_cb; 1992 1993 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 1994 if (!ha->srb_mempool) 1995 goto fail_free_s_dma_pool; 1996 1997 /* Get memory for cached NVRAM */ 1998 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 1999 if (!ha->nvram) 2000 goto fail_free_srb_mempool; 2001 2002 /* Allocate memory for SNS commands */ 2003 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2004 /* Get consistent memory allocated for SNS commands */ 2005 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 2006 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2007 if (!ha->sns_cmd) 2008 goto fail_free_nvram; 2009 } else { 2010 /* Get consistent memory allocated for MS IOCB */ 2011 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2012 &ha->ms_iocb_dma); 2013 if (!ha->ms_iocb) 2014 goto fail_free_nvram; 2015 2016 /* Get consistent memory allocated for CT SNS commands */ 2017 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 2018 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2019 if (!ha->ct_sns) 2020 goto fail_free_ms_iocb; 2021 } 2022 2023 return 0; 2024 2025 fail_free_ms_iocb: 2026 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2027 ha->ms_iocb = NULL; 2028 ha->ms_iocb_dma = 0; 2029 fail_free_nvram: 2030 kfree(ha->nvram); 2031 ha->nvram = NULL; 2032 fail_free_srb_mempool: 2033 mempool_destroy(ha->srb_mempool); 2034 ha->srb_mempool = NULL; 2035 fail_free_s_dma_pool: 2036 dma_pool_destroy(ha->s_dma_pool); 2037 ha->s_dma_pool = NULL; 2038 fail_free_init_cb: 2039 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 2040 ha->init_cb_dma); 2041 ha->init_cb = NULL; 2042 ha->init_cb_dma = 0; 2043 fail_free_gid_list: 2044 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2045 ha->gid_list_dma); 2046 ha->gid_list = NULL; 2047 ha->gid_list_dma = 0; 2048 fail_free_response_ring: 2049 dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) * 2050 sizeof(response_t), ha->response_ring, ha->response_dma); 2051 ha->response_ring = NULL; 2052 ha->response_dma = 0; 2053 fail_free_request_ring: 2054 dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) * 2055 sizeof(request_t), ha->request_ring, ha->request_dma); 2056 ha->request_ring = NULL; 2057 ha->request_dma = 0; 2058 fail: 2059 return -ENOMEM; 2060 } 2061 2062 /* 2063 * qla2x00_mem_free 2064 * Frees all adapter allocated memory. 2065 * 2066 * Input: 2067 * ha = adapter block pointer. 2068 */ 2069 static void 2070 qla2x00_mem_free(scsi_qla_host_t *ha) 2071 { 2072 struct list_head *fcpl, *fcptemp; 2073 fc_port_t *fcport; 2074 2075 if (ha->srb_mempool) 2076 mempool_destroy(ha->srb_mempool); 2077 2078 if (ha->fce) 2079 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2080 ha->fce_dma); 2081 2082 if (ha->fw_dump) { 2083 if (ha->eft) 2084 dma_free_coherent(&ha->pdev->dev, 2085 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 2086 vfree(ha->fw_dump); 2087 } 2088 2089 if (ha->sns_cmd) 2090 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2091 ha->sns_cmd, ha->sns_cmd_dma); 2092 2093 if (ha->ct_sns) 2094 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 2095 ha->ct_sns, ha->ct_sns_dma); 2096 2097 if (ha->sfp_data) 2098 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 2099 2100 if (ha->ms_iocb) 2101 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2102 2103 if (ha->s_dma_pool) 2104 dma_pool_destroy(ha->s_dma_pool); 2105 2106 if (ha->init_cb) 2107 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 2108 ha->init_cb, ha->init_cb_dma); 2109 2110 if (ha->gid_list) 2111 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2112 ha->gid_list_dma); 2113 2114 if (ha->response_ring) 2115 dma_free_coherent(&ha->pdev->dev, 2116 (ha->response_q_length + 1) * sizeof(response_t), 2117 ha->response_ring, ha->response_dma); 2118 2119 if (ha->request_ring) 2120 dma_free_coherent(&ha->pdev->dev, 2121 (ha->request_q_length + 1) * sizeof(request_t), 2122 ha->request_ring, ha->request_dma); 2123 2124 ha->srb_mempool = NULL; 2125 ha->eft = NULL; 2126 ha->eft_dma = 0; 2127 ha->sns_cmd = NULL; 2128 ha->sns_cmd_dma = 0; 2129 ha->ct_sns = NULL; 2130 ha->ct_sns_dma = 0; 2131 ha->ms_iocb = NULL; 2132 ha->ms_iocb_dma = 0; 2133 ha->init_cb = NULL; 2134 ha->init_cb_dma = 0; 2135 2136 ha->s_dma_pool = NULL; 2137 2138 ha->gid_list = NULL; 2139 ha->gid_list_dma = 0; 2140 2141 ha->response_ring = NULL; 2142 ha->response_dma = 0; 2143 ha->request_ring = NULL; 2144 ha->request_dma = 0; 2145 2146 list_for_each_safe(fcpl, fcptemp, &ha->fcports) { 2147 fcport = list_entry(fcpl, fc_port_t, list); 2148 2149 /* fc ports */ 2150 list_del_init(&fcport->list); 2151 kfree(fcport); 2152 } 2153 INIT_LIST_HEAD(&ha->fcports); 2154 2155 ha->fw_dump = NULL; 2156 ha->fw_dumped = 0; 2157 ha->fw_dump_reading = 0; 2158 2159 vfree(ha->optrom_buffer); 2160 kfree(ha->nvram); 2161 } 2162 2163 static struct qla_work_evt * 2164 qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type, 2165 int locked) 2166 { 2167 struct qla_work_evt *e; 2168 2169 e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: 2170 GFP_KERNEL); 2171 if (!e) 2172 return NULL; 2173 2174 INIT_LIST_HEAD(&e->list); 2175 e->type = type; 2176 e->flags = QLA_EVT_FLAG_FREE; 2177 return e; 2178 } 2179 2180 static int 2181 qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2182 { 2183 unsigned long uninitialized_var(flags); 2184 scsi_qla_host_t *pha = to_qla_parent(ha); 2185 2186 if (!locked) 2187 spin_lock_irqsave(&pha->hardware_lock, flags); 2188 list_add_tail(&e->list, &ha->work_list); 2189 qla2xxx_wake_dpc(ha); 2190 if (!locked) 2191 spin_unlock_irqrestore(&pha->hardware_lock, flags); 2192 return QLA_SUCCESS; 2193 } 2194 2195 int 2196 qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code, 2197 u32 data) 2198 { 2199 struct qla_work_evt *e; 2200 2201 e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1); 2202 if (!e) 2203 return QLA_FUNCTION_FAILED; 2204 2205 e->u.aen.code = code; 2206 e->u.aen.data = data; 2207 return qla2x00_post_work(ha, e, 1); 2208 } 2209 2210 int 2211 qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1, 2212 uint16_t d2, uint16_t d3) 2213 { 2214 struct qla_work_evt *e; 2215 2216 e = qla2x00_alloc_work(ha, QLA_EVT_HWE_LOG, 1); 2217 if (!e) 2218 return QLA_FUNCTION_FAILED; 2219 2220 e->u.hwe.code = code; 2221 e->u.hwe.d1 = d1; 2222 e->u.hwe.d2 = d2; 2223 e->u.hwe.d3 = d3; 2224 return qla2x00_post_work(ha, e, 1); 2225 } 2226 2227 static void 2228 qla2x00_do_work(struct scsi_qla_host *ha) 2229 { 2230 struct qla_work_evt *e; 2231 scsi_qla_host_t *pha = to_qla_parent(ha); 2232 2233 spin_lock_irq(&pha->hardware_lock); 2234 while (!list_empty(&ha->work_list)) { 2235 e = list_entry(ha->work_list.next, struct qla_work_evt, list); 2236 list_del_init(&e->list); 2237 spin_unlock_irq(&pha->hardware_lock); 2238 2239 switch (e->type) { 2240 case QLA_EVT_AEN: 2241 fc_host_post_event(ha->host, fc_get_event_number(), 2242 e->u.aen.code, e->u.aen.data); 2243 break; 2244 case QLA_EVT_HWE_LOG: 2245 qla2xxx_hw_event_log(ha, e->u.hwe.code, e->u.hwe.d1, 2246 e->u.hwe.d2, e->u.hwe.d3); 2247 break; 2248 } 2249 if (e->flags & QLA_EVT_FLAG_FREE) 2250 kfree(e); 2251 spin_lock_irq(&pha->hardware_lock); 2252 } 2253 spin_unlock_irq(&pha->hardware_lock); 2254 } 2255 2256 /************************************************************************** 2257 * qla2x00_do_dpc 2258 * This kernel thread is a task that is schedule by the interrupt handler 2259 * to perform the background processing for interrupts. 2260 * 2261 * Notes: 2262 * This task always run in the context of a kernel thread. It 2263 * is kick-off by the driver's detect code and starts up 2264 * up one per adapter. It immediately goes to sleep and waits for 2265 * some fibre event. When either the interrupt handler or 2266 * the timer routine detects a event it will one of the task 2267 * bits then wake us up. 2268 **************************************************************************/ 2269 static int 2270 qla2x00_do_dpc(void *data) 2271 { 2272 int rval; 2273 scsi_qla_host_t *ha; 2274 fc_port_t *fcport; 2275 uint8_t status; 2276 uint16_t next_loopid; 2277 struct scsi_qla_host *vha; 2278 int i; 2279 2280 2281 ha = (scsi_qla_host_t *)data; 2282 2283 set_user_nice(current, -20); 2284 2285 while (!kthread_should_stop()) { 2286 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 2287 2288 set_current_state(TASK_INTERRUPTIBLE); 2289 schedule(); 2290 __set_current_state(TASK_RUNNING); 2291 2292 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 2293 2294 /* Initialization not yet finished. Don't do anything yet. */ 2295 if (!ha->flags.init_done) 2296 continue; 2297 2298 DEBUG3(printk("scsi(%ld): DPC handler\n", ha->host_no)); 2299 2300 ha->dpc_active = 1; 2301 2302 if (ha->flags.mbox_busy) { 2303 ha->dpc_active = 0; 2304 continue; 2305 } 2306 2307 qla2x00_do_work(ha); 2308 2309 if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 2310 2311 DEBUG(printk("scsi(%ld): dpc: sched " 2312 "qla2x00_abort_isp ha = %p\n", 2313 ha->host_no, ha)); 2314 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 2315 &ha->dpc_flags))) { 2316 2317 if (qla2x00_abort_isp(ha)) { 2318 /* failed. retry later */ 2319 set_bit(ISP_ABORT_NEEDED, 2320 &ha->dpc_flags); 2321 } 2322 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2323 } 2324 2325 for_each_mapped_vp_idx(ha, i) { 2326 list_for_each_entry(vha, &ha->vp_list, 2327 vp_list) { 2328 if (i == vha->vp_idx) { 2329 set_bit(ISP_ABORT_NEEDED, 2330 &vha->dpc_flags); 2331 break; 2332 } 2333 } 2334 } 2335 2336 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 2337 ha->host_no)); 2338 } 2339 2340 if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) { 2341 qla2x00_update_fcports(ha); 2342 clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 2343 } 2344 2345 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2346 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { 2347 2348 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 2349 ha->host_no)); 2350 2351 qla2x00_rst_aen(ha); 2352 clear_bit(RESET_ACTIVE, &ha->dpc_flags); 2353 } 2354 2355 /* Retry each device up to login retry count */ 2356 if ((test_and_clear_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2357 !test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) && 2358 atomic_read(&ha->loop_state) != LOOP_DOWN) { 2359 2360 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 2361 ha->host_no)); 2362 2363 next_loopid = 0; 2364 list_for_each_entry(fcport, &ha->fcports, list) { 2365 /* 2366 * If the port is not ONLINE then try to login 2367 * to it if we haven't run out of retries. 2368 */ 2369 if (atomic_read(&fcport->state) != FCS_ONLINE && 2370 fcport->login_retry) { 2371 2372 if (fcport->flags & FCF_FABRIC_DEVICE) { 2373 if (fcport->flags & 2374 FCF_TAPE_PRESENT) 2375 ha->isp_ops->fabric_logout( 2376 ha, fcport->loop_id, 2377 fcport->d_id.b.domain, 2378 fcport->d_id.b.area, 2379 fcport->d_id.b.al_pa); 2380 status = qla2x00_fabric_login( 2381 ha, fcport, &next_loopid); 2382 } else 2383 status = 2384 qla2x00_local_device_login( 2385 ha, fcport); 2386 2387 fcport->login_retry--; 2388 if (status == QLA_SUCCESS) { 2389 fcport->old_loop_id = fcport->loop_id; 2390 2391 DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n", 2392 ha->host_no, fcport->loop_id)); 2393 2394 qla2x00_update_fcport(ha, 2395 fcport); 2396 } else if (status == 1) { 2397 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 2398 /* retry the login again */ 2399 DEBUG(printk("scsi(%ld): Retrying %d login again loop_id 0x%x\n", 2400 ha->host_no, 2401 fcport->login_retry, fcport->loop_id)); 2402 } else { 2403 fcport->login_retry = 0; 2404 } 2405 if (fcport->login_retry == 0 && status != QLA_SUCCESS) 2406 fcport->loop_id = FC_NO_LOOP_ID; 2407 } 2408 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2409 break; 2410 } 2411 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 2412 ha->host_no)); 2413 } 2414 2415 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2416 2417 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 2418 ha->host_no)); 2419 2420 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 2421 &ha->dpc_flags))) { 2422 2423 rval = qla2x00_loop_resync(ha); 2424 2425 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 2426 } 2427 2428 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 2429 ha->host_no)); 2430 } 2431 2432 if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) && 2433 atomic_read(&ha->loop_state) == LOOP_READY) { 2434 clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 2435 qla2xxx_flash_npiv_conf(ha); 2436 } 2437 2438 if (!ha->interrupts_on) 2439 ha->isp_ops->enable_intrs(ha); 2440 2441 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2442 ha->isp_ops->beacon_blink(ha); 2443 2444 qla2x00_do_dpc_all_vps(ha); 2445 2446 ha->dpc_active = 0; 2447 } /* End of while(1) */ 2448 2449 DEBUG(printk("scsi(%ld): DPC handler exiting\n", ha->host_no)); 2450 2451 /* 2452 * Make sure that nobody tries to wake us up again. 2453 */ 2454 ha->dpc_active = 0; 2455 2456 return 0; 2457 } 2458 2459 void 2460 qla2xxx_wake_dpc(scsi_qla_host_t *ha) 2461 { 2462 struct task_struct *t = ha->dpc_thread; 2463 2464 if (!test_bit(UNLOADING, &ha->dpc_flags) && t) 2465 wake_up_process(t); 2466 } 2467 2468 /* 2469 * qla2x00_rst_aen 2470 * Processes asynchronous reset. 2471 * 2472 * Input: 2473 * ha = adapter block pointer. 2474 */ 2475 static void 2476 qla2x00_rst_aen(scsi_qla_host_t *ha) 2477 { 2478 if (ha->flags.online && !ha->flags.reset_active && 2479 !atomic_read(&ha->loop_down_timer) && 2480 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 2481 do { 2482 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 2483 2484 /* 2485 * Issue marker command only when we are going to start 2486 * the I/O. 2487 */ 2488 ha->marker_needed = 1; 2489 } while (!atomic_read(&ha->loop_down_timer) && 2490 (test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags))); 2491 } 2492 } 2493 2494 static void 2495 qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp) 2496 { 2497 struct scsi_cmnd *cmd = sp->cmd; 2498 2499 if (sp->flags & SRB_DMA_VALID) { 2500 scsi_dma_unmap(cmd); 2501 sp->flags &= ~SRB_DMA_VALID; 2502 } 2503 CMD_SP(cmd) = NULL; 2504 } 2505 2506 void 2507 qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp) 2508 { 2509 struct scsi_cmnd *cmd = sp->cmd; 2510 2511 qla2x00_sp_free_dma(ha, sp); 2512 2513 mempool_free(sp, ha->srb_mempool); 2514 2515 cmd->scsi_done(cmd); 2516 } 2517 2518 /************************************************************************** 2519 * qla2x00_timer 2520 * 2521 * Description: 2522 * One second timer 2523 * 2524 * Context: Interrupt 2525 ***************************************************************************/ 2526 void 2527 qla2x00_timer(scsi_qla_host_t *ha) 2528 { 2529 unsigned long cpu_flags = 0; 2530 fc_port_t *fcport; 2531 int start_dpc = 0; 2532 int index; 2533 srb_t *sp; 2534 int t; 2535 scsi_qla_host_t *pha = to_qla_parent(ha); 2536 2537 /* 2538 * Ports - Port down timer. 2539 * 2540 * Whenever, a port is in the LOST state we start decrementing its port 2541 * down timer every second until it reaches zero. Once it reaches zero 2542 * the port it marked DEAD. 2543 */ 2544 t = 0; 2545 list_for_each_entry(fcport, &ha->fcports, list) { 2546 if (fcport->port_type != FCT_TARGET) 2547 continue; 2548 2549 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 2550 2551 if (atomic_read(&fcport->port_down_timer) == 0) 2552 continue; 2553 2554 if (atomic_dec_and_test(&fcport->port_down_timer) != 0) 2555 atomic_set(&fcport->state, FCS_DEVICE_DEAD); 2556 2557 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: " 2558 "%d remaining\n", 2559 ha->host_no, 2560 t, atomic_read(&fcport->port_down_timer))); 2561 } 2562 t++; 2563 } /* End of for fcport */ 2564 2565 2566 /* Loop down handler. */ 2567 if (atomic_read(&ha->loop_down_timer) > 0 && 2568 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) && ha->flags.online) { 2569 2570 if (atomic_read(&ha->loop_down_timer) == 2571 ha->loop_down_abort_time) { 2572 2573 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 2574 "queues before time expire\n", 2575 ha->host_no)); 2576 2577 if (!IS_QLA2100(ha) && ha->link_down_timeout) 2578 atomic_set(&ha->loop_state, LOOP_DEAD); 2579 2580 /* Schedule an ISP abort to return any tape commands. */ 2581 /* NPIV - scan physical port only */ 2582 if (!ha->parent) { 2583 spin_lock_irqsave(&ha->hardware_lock, 2584 cpu_flags); 2585 for (index = 1; 2586 index < MAX_OUTSTANDING_COMMANDS; 2587 index++) { 2588 fc_port_t *sfcp; 2589 2590 sp = ha->outstanding_cmds[index]; 2591 if (!sp) 2592 continue; 2593 sfcp = sp->fcport; 2594 if (!(sfcp->flags & FCF_TAPE_PRESENT)) 2595 continue; 2596 2597 set_bit(ISP_ABORT_NEEDED, 2598 &ha->dpc_flags); 2599 break; 2600 } 2601 spin_unlock_irqrestore(&ha->hardware_lock, 2602 cpu_flags); 2603 } 2604 set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags); 2605 start_dpc++; 2606 } 2607 2608 /* if the loop has been down for 4 minutes, reinit adapter */ 2609 if (atomic_dec_and_test(&ha->loop_down_timer) != 0) { 2610 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - " 2611 "restarting queues.\n", 2612 ha->host_no)); 2613 2614 set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags); 2615 start_dpc++; 2616 2617 if (!(ha->device_flags & DFLG_NO_CABLE) && 2618 !ha->parent) { 2619 DEBUG(printk("scsi(%ld): Loop down - " 2620 "aborting ISP.\n", 2621 ha->host_no)); 2622 qla_printk(KERN_WARNING, ha, 2623 "Loop down - aborting ISP.\n"); 2624 2625 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2626 } 2627 } 2628 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 2629 ha->host_no, 2630 atomic_read(&ha->loop_down_timer))); 2631 } 2632 2633 /* Check if beacon LED needs to be blinked */ 2634 if (ha->beacon_blink_led == 1) { 2635 set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags); 2636 start_dpc++; 2637 } 2638 2639 /* Process any deferred work. */ 2640 if (!list_empty(&ha->work_list)) 2641 start_dpc++; 2642 2643 /* Schedule the DPC routine if needed */ 2644 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2645 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2646 test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) || 2647 start_dpc || 2648 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2649 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || 2650 test_bit(VP_DPC_NEEDED, &ha->dpc_flags) || 2651 test_bit(RELOGIN_NEEDED, &ha->dpc_flags))) 2652 qla2xxx_wake_dpc(pha); 2653 2654 qla2x00_restart_timer(ha, WATCH_INTERVAL); 2655 } 2656 2657 /* Firmware interface routines. */ 2658 2659 #define FW_BLOBS 6 2660 #define FW_ISP21XX 0 2661 #define FW_ISP22XX 1 2662 #define FW_ISP2300 2 2663 #define FW_ISP2322 3 2664 #define FW_ISP24XX 4 2665 #define FW_ISP25XX 5 2666 2667 #define FW_FILE_ISP21XX "ql2100_fw.bin" 2668 #define FW_FILE_ISP22XX "ql2200_fw.bin" 2669 #define FW_FILE_ISP2300 "ql2300_fw.bin" 2670 #define FW_FILE_ISP2322 "ql2322_fw.bin" 2671 #define FW_FILE_ISP24XX "ql2400_fw.bin" 2672 #define FW_FILE_ISP25XX "ql2500_fw.bin" 2673 2674 static DEFINE_MUTEX(qla_fw_lock); 2675 2676 static struct fw_blob qla_fw_blobs[FW_BLOBS] = { 2677 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 2678 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 2679 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 2680 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 2681 { .name = FW_FILE_ISP24XX, }, 2682 { .name = FW_FILE_ISP25XX, }, 2683 }; 2684 2685 struct fw_blob * 2686 qla2x00_request_firmware(scsi_qla_host_t *ha) 2687 { 2688 struct fw_blob *blob; 2689 2690 blob = NULL; 2691 if (IS_QLA2100(ha)) { 2692 blob = &qla_fw_blobs[FW_ISP21XX]; 2693 } else if (IS_QLA2200(ha)) { 2694 blob = &qla_fw_blobs[FW_ISP22XX]; 2695 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 2696 blob = &qla_fw_blobs[FW_ISP2300]; 2697 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 2698 blob = &qla_fw_blobs[FW_ISP2322]; 2699 } else if (IS_QLA24XX_TYPE(ha)) { 2700 blob = &qla_fw_blobs[FW_ISP24XX]; 2701 } else if (IS_QLA25XX(ha)) { 2702 blob = &qla_fw_blobs[FW_ISP25XX]; 2703 } 2704 2705 mutex_lock(&qla_fw_lock); 2706 if (blob->fw) 2707 goto out; 2708 2709 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 2710 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 2711 "(%s).\n", ha->host_no, blob->name)); 2712 blob->fw = NULL; 2713 blob = NULL; 2714 goto out; 2715 } 2716 2717 out: 2718 mutex_unlock(&qla_fw_lock); 2719 return blob; 2720 } 2721 2722 static void 2723 qla2x00_release_firmware(void) 2724 { 2725 int idx; 2726 2727 mutex_lock(&qla_fw_lock); 2728 for (idx = 0; idx < FW_BLOBS; idx++) 2729 if (qla_fw_blobs[idx].fw) 2730 release_firmware(qla_fw_blobs[idx].fw); 2731 mutex_unlock(&qla_fw_lock); 2732 } 2733 2734 static pci_ers_result_t 2735 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 2736 { 2737 switch (state) { 2738 case pci_channel_io_normal: 2739 return PCI_ERS_RESULT_CAN_RECOVER; 2740 case pci_channel_io_frozen: 2741 pci_disable_device(pdev); 2742 return PCI_ERS_RESULT_NEED_RESET; 2743 case pci_channel_io_perm_failure: 2744 qla2x00_remove_one(pdev); 2745 return PCI_ERS_RESULT_DISCONNECT; 2746 } 2747 return PCI_ERS_RESULT_NEED_RESET; 2748 } 2749 2750 static pci_ers_result_t 2751 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 2752 { 2753 int risc_paused = 0; 2754 uint32_t stat; 2755 unsigned long flags; 2756 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2757 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2758 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 2759 2760 spin_lock_irqsave(&ha->hardware_lock, flags); 2761 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 2762 stat = RD_REG_DWORD(®->hccr); 2763 if (stat & HCCR_RISC_PAUSE) 2764 risc_paused = 1; 2765 } else if (IS_QLA23XX(ha)) { 2766 stat = RD_REG_DWORD(®->u.isp2300.host_status); 2767 if (stat & HSR_RISC_PAUSED) 2768 risc_paused = 1; 2769 } else if (IS_FWI2_CAPABLE(ha)) { 2770 stat = RD_REG_DWORD(®24->host_status); 2771 if (stat & HSRX_RISC_PAUSED) 2772 risc_paused = 1; 2773 } 2774 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2775 2776 if (risc_paused) { 2777 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 2778 "Dumping firmware!\n"); 2779 ha->isp_ops->fw_dump(ha, 0); 2780 2781 return PCI_ERS_RESULT_NEED_RESET; 2782 } else 2783 return PCI_ERS_RESULT_RECOVERED; 2784 } 2785 2786 static pci_ers_result_t 2787 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 2788 { 2789 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 2790 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2791 int rc; 2792 2793 if (ha->mem_only) 2794 rc = pci_enable_device_mem(pdev); 2795 else 2796 rc = pci_enable_device(pdev); 2797 2798 if (rc) { 2799 qla_printk(KERN_WARNING, ha, 2800 "Can't re-enable PCI device after reset.\n"); 2801 2802 return ret; 2803 } 2804 pci_set_master(pdev); 2805 2806 if (ha->isp_ops->pci_config(ha)) 2807 return ret; 2808 2809 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2810 if (qla2x00_abort_isp(ha)== QLA_SUCCESS) 2811 ret = PCI_ERS_RESULT_RECOVERED; 2812 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2813 2814 return ret; 2815 } 2816 2817 static void 2818 qla2xxx_pci_resume(struct pci_dev *pdev) 2819 { 2820 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2821 int ret; 2822 2823 ret = qla2x00_wait_for_hba_online(ha); 2824 if (ret != QLA_SUCCESS) { 2825 qla_printk(KERN_ERR, ha, 2826 "the device failed to resume I/O " 2827 "from slot/link_reset"); 2828 } 2829 pci_cleanup_aer_uncorrect_error_status(pdev); 2830 } 2831 2832 static struct pci_error_handlers qla2xxx_err_handler = { 2833 .error_detected = qla2xxx_pci_error_detected, 2834 .mmio_enabled = qla2xxx_pci_mmio_enabled, 2835 .slot_reset = qla2xxx_pci_slot_reset, 2836 .resume = qla2xxx_pci_resume, 2837 }; 2838 2839 static struct pci_device_id qla2xxx_pci_tbl[] = { 2840 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 2841 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 2842 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 2843 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 2844 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 2845 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 2846 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 2847 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 2848 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 2849 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 2850 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 2851 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 2852 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 2853 { 0 }, 2854 }; 2855 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 2856 2857 static struct pci_driver qla2xxx_pci_driver = { 2858 .name = QLA2XXX_DRIVER_NAME, 2859 .driver = { 2860 .owner = THIS_MODULE, 2861 }, 2862 .id_table = qla2xxx_pci_tbl, 2863 .probe = qla2x00_probe_one, 2864 .remove = qla2x00_remove_one, 2865 .err_handler = &qla2xxx_err_handler, 2866 }; 2867 2868 /** 2869 * qla2x00_module_init - Module initialization. 2870 **/ 2871 static int __init 2872 qla2x00_module_init(void) 2873 { 2874 int ret = 0; 2875 2876 /* Allocate cache for SRBs. */ 2877 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 2878 SLAB_HWCACHE_ALIGN, NULL); 2879 if (srb_cachep == NULL) { 2880 printk(KERN_ERR 2881 "qla2xxx: Unable to allocate SRB cache...Failing load!\n"); 2882 return -ENOMEM; 2883 } 2884 2885 /* Derive version string. */ 2886 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 2887 if (ql2xextended_error_logging) 2888 strcat(qla2x00_version_str, "-debug"); 2889 2890 qla2xxx_transport_template = 2891 fc_attach_transport(&qla2xxx_transport_functions); 2892 if (!qla2xxx_transport_template) { 2893 kmem_cache_destroy(srb_cachep); 2894 return -ENODEV; 2895 } 2896 qla2xxx_transport_vport_template = 2897 fc_attach_transport(&qla2xxx_transport_vport_functions); 2898 if (!qla2xxx_transport_vport_template) { 2899 kmem_cache_destroy(srb_cachep); 2900 fc_release_transport(qla2xxx_transport_template); 2901 return -ENODEV; 2902 } 2903 2904 printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n", 2905 qla2x00_version_str); 2906 ret = pci_register_driver(&qla2xxx_pci_driver); 2907 if (ret) { 2908 kmem_cache_destroy(srb_cachep); 2909 fc_release_transport(qla2xxx_transport_template); 2910 fc_release_transport(qla2xxx_transport_vport_template); 2911 } 2912 return ret; 2913 } 2914 2915 /** 2916 * qla2x00_module_exit - Module cleanup. 2917 **/ 2918 static void __exit 2919 qla2x00_module_exit(void) 2920 { 2921 pci_unregister_driver(&qla2xxx_pci_driver); 2922 qla2x00_release_firmware(); 2923 kmem_cache_destroy(srb_cachep); 2924 fc_release_transport(qla2xxx_transport_template); 2925 fc_release_transport(qla2xxx_transport_vport_template); 2926 } 2927 2928 module_init(qla2x00_module_init); 2929 module_exit(qla2x00_module_exit); 2930 2931 MODULE_AUTHOR("QLogic Corporation"); 2932 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 2933 MODULE_LICENSE("GPL"); 2934 MODULE_VERSION(QLA2XXX_VERSION); 2935 MODULE_FIRMWARE(FW_FILE_ISP21XX); 2936 MODULE_FIRMWARE(FW_FILE_ISP22XX); 2937 MODULE_FIRMWARE(FW_FILE_ISP2300); 2938 MODULE_FIRMWARE(FW_FILE_ISP2322); 2939 MODULE_FIRMWARE(FW_FILE_ISP24XX); 2940 MODULE_FIRMWARE(FW_FILE_ISP25XX); 2941