1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2005 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/moduleparam.h> 10 #include <linux/vmalloc.h> 11 #include <linux/smp_lock.h> 12 #include <linux/delay.h> 13 14 #include <scsi/scsi_tcq.h> 15 #include <scsi/scsicam.h> 16 #include <scsi/scsi_transport.h> 17 #include <scsi/scsi_transport_fc.h> 18 19 /* 20 * Driver version 21 */ 22 char qla2x00_version_str[40]; 23 24 /* 25 * SRB allocation cache 26 */ 27 static kmem_cache_t *srb_cachep; 28 29 /* 30 * Ioctl related information. 31 */ 32 static int num_hosts; 33 34 int ql2xlogintimeout = 20; 35 module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); 36 MODULE_PARM_DESC(ql2xlogintimeout, 37 "Login timeout value in seconds."); 38 39 int qlport_down_retry = 30; 40 module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR); 41 MODULE_PARM_DESC(qlport_down_retry, 42 "Maximum number of command retries to a port that returns" 43 "a PORT-DOWN status."); 44 45 int ql2xplogiabsentdevice; 46 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 47 MODULE_PARM_DESC(ql2xplogiabsentdevice, 48 "Option to enable PLOGI to devices that are not present after " 49 "a Fabric scan. This is needed for several broken switches." 50 "Default is 0 - no PLOGI. 1 - perfom PLOGI."); 51 52 int ql2xloginretrycount = 0; 53 module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR); 54 MODULE_PARM_DESC(ql2xloginretrycount, 55 "Specify an alternate value for the NVRAM login retry count."); 56 57 int ql2xfwloadbin=1; 58 module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR); 59 MODULE_PARM_DESC(ql2xfwloadbin, 60 "Load ISP2xxx firmware image via hotplug."); 61 62 static void qla2x00_free_device(scsi_qla_host_t *); 63 64 static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); 65 66 int ql2xfdmienable; 67 module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); 68 MODULE_PARM_DESC(ql2xfdmienable, 69 "Enables FDMI registratons " 70 "Default is 0 - no FDMI. 1 - perfom FDMI."); 71 72 /* 73 * SCSI host template entry points 74 */ 75 static int qla2xxx_slave_configure(struct scsi_device * device); 76 static int qla2xxx_slave_alloc(struct scsi_device *); 77 static void qla2xxx_slave_destroy(struct scsi_device *); 78 static int qla2x00_queuecommand(struct scsi_cmnd *cmd, 79 void (*fn)(struct scsi_cmnd *)); 80 static int qla24xx_queuecommand(struct scsi_cmnd *cmd, 81 void (*fn)(struct scsi_cmnd *)); 82 static int qla2xxx_eh_abort(struct scsi_cmnd *); 83 static int qla2xxx_eh_device_reset(struct scsi_cmnd *); 84 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 85 static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 86 static int qla2x00_loop_reset(scsi_qla_host_t *ha); 87 static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *); 88 89 static int qla2x00_change_queue_depth(struct scsi_device *, int); 90 static int qla2x00_change_queue_type(struct scsi_device *, int); 91 92 static struct scsi_host_template qla2x00_driver_template = { 93 .module = THIS_MODULE, 94 .name = "qla2xxx", 95 .queuecommand = qla2x00_queuecommand, 96 97 .eh_abort_handler = qla2xxx_eh_abort, 98 .eh_device_reset_handler = qla2xxx_eh_device_reset, 99 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 100 .eh_host_reset_handler = qla2xxx_eh_host_reset, 101 102 .slave_configure = qla2xxx_slave_configure, 103 104 .slave_alloc = qla2xxx_slave_alloc, 105 .slave_destroy = qla2xxx_slave_destroy, 106 .change_queue_depth = qla2x00_change_queue_depth, 107 .change_queue_type = qla2x00_change_queue_type, 108 .this_id = -1, 109 .cmd_per_lun = 3, 110 .use_clustering = ENABLE_CLUSTERING, 111 .sg_tablesize = SG_ALL, 112 113 /* 114 * The RISC allows for each command to transfer (2^32-1) bytes of data, 115 * which equates to 0x800000 sectors. 116 */ 117 .max_sectors = 0xFFFF, 118 .shost_attrs = qla2x00_host_attrs, 119 }; 120 121 static struct scsi_host_template qla24xx_driver_template = { 122 .module = THIS_MODULE, 123 .name = "qla2xxx", 124 .queuecommand = qla24xx_queuecommand, 125 126 .eh_abort_handler = qla2xxx_eh_abort, 127 .eh_device_reset_handler = qla2xxx_eh_device_reset, 128 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 129 .eh_host_reset_handler = qla2xxx_eh_host_reset, 130 131 .slave_configure = qla2xxx_slave_configure, 132 133 .slave_alloc = qla2xxx_slave_alloc, 134 .slave_destroy = qla2xxx_slave_destroy, 135 .change_queue_depth = qla2x00_change_queue_depth, 136 .change_queue_type = qla2x00_change_queue_type, 137 .this_id = -1, 138 .cmd_per_lun = 3, 139 .use_clustering = ENABLE_CLUSTERING, 140 .sg_tablesize = SG_ALL, 141 142 .max_sectors = 0xFFFF, 143 .shost_attrs = qla2x00_host_attrs, 144 }; 145 146 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 147 148 /* TODO Convert to inlines 149 * 150 * Timer routines 151 */ 152 #define WATCH_INTERVAL 1 /* number of seconds */ 153 154 static void qla2x00_timer(scsi_qla_host_t *); 155 156 static __inline__ void qla2x00_start_timer(scsi_qla_host_t *, 157 void *, unsigned long); 158 static __inline__ void qla2x00_restart_timer(scsi_qla_host_t *, unsigned long); 159 static __inline__ void qla2x00_stop_timer(scsi_qla_host_t *); 160 161 static inline void 162 qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval) 163 { 164 init_timer(&ha->timer); 165 ha->timer.expires = jiffies + interval * HZ; 166 ha->timer.data = (unsigned long)ha; 167 ha->timer.function = (void (*)(unsigned long))func; 168 add_timer(&ha->timer); 169 ha->timer_active = 1; 170 } 171 172 static inline void 173 qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval) 174 { 175 mod_timer(&ha->timer, jiffies + interval * HZ); 176 } 177 178 static __inline__ void 179 qla2x00_stop_timer(scsi_qla_host_t *ha) 180 { 181 del_timer_sync(&ha->timer); 182 ha->timer_active = 0; 183 } 184 185 static int qla2x00_do_dpc(void *data); 186 187 static void qla2x00_rst_aen(scsi_qla_host_t *); 188 189 static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *); 190 static void qla2x00_mem_free(scsi_qla_host_t *ha); 191 static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha); 192 static void qla2x00_free_sp_pool(scsi_qla_host_t *ha); 193 static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 194 void qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *); 195 196 /* -------------------------------------------------------------------------- */ 197 198 static char * 199 qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str) 200 { 201 static char *pci_bus_modes[] = { 202 "33", "66", "100", "133", 203 }; 204 uint16_t pci_bus; 205 206 strcpy(str, "PCI"); 207 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 208 if (pci_bus) { 209 strcat(str, "-X ("); 210 strcat(str, pci_bus_modes[pci_bus]); 211 } else { 212 pci_bus = (ha->pci_attr & BIT_8) >> 8; 213 strcat(str, " ("); 214 strcat(str, pci_bus_modes[pci_bus]); 215 } 216 strcat(str, " MHz)"); 217 218 return (str); 219 } 220 221 static char * 222 qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str) 223 { 224 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 225 uint32_t pci_bus; 226 int pcie_reg; 227 228 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 229 if (pcie_reg) { 230 char lwstr[6]; 231 uint16_t pcie_lstat, lspeed, lwidth; 232 233 pcie_reg += 0x12; 234 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); 235 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); 236 lwidth = (pcie_lstat & 237 (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4; 238 239 strcpy(str, "PCIe ("); 240 if (lspeed == 1) 241 strcat(str, "2.5Gb/s "); 242 else 243 strcat(str, "<unknown> "); 244 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); 245 strcat(str, lwstr); 246 247 return str; 248 } 249 250 strcpy(str, "PCI"); 251 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 252 if (pci_bus == 0 || pci_bus == 8) { 253 strcat(str, " ("); 254 strcat(str, pci_bus_modes[pci_bus >> 3]); 255 } else { 256 strcat(str, "-X "); 257 if (pci_bus & BIT_2) 258 strcat(str, "Mode 2"); 259 else 260 strcat(str, "Mode 1"); 261 strcat(str, " ("); 262 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]); 263 } 264 strcat(str, " MHz)"); 265 266 return str; 267 } 268 269 char * 270 qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) 271 { 272 char un_str[10]; 273 274 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 275 ha->fw_minor_version, 276 ha->fw_subminor_version); 277 278 if (ha->fw_attributes & BIT_9) { 279 strcat(str, "FLX"); 280 return (str); 281 } 282 283 switch (ha->fw_attributes & 0xFF) { 284 case 0x7: 285 strcat(str, "EF"); 286 break; 287 case 0x17: 288 strcat(str, "TP"); 289 break; 290 case 0x37: 291 strcat(str, "IP"); 292 break; 293 case 0x77: 294 strcat(str, "VI"); 295 break; 296 default: 297 sprintf(un_str, "(%x)", ha->fw_attributes); 298 strcat(str, un_str); 299 break; 300 } 301 if (ha->fw_attributes & 0x100) 302 strcat(str, "X"); 303 304 return (str); 305 } 306 307 char * 308 qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) 309 { 310 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 311 ha->fw_minor_version, 312 ha->fw_subminor_version); 313 314 if (ha->fw_attributes & BIT_0) 315 strcat(str, "[Class 2] "); 316 if (ha->fw_attributes & BIT_1) 317 strcat(str, "[IP] "); 318 if (ha->fw_attributes & BIT_2) 319 strcat(str, "[Multi-ID] "); 320 if (ha->fw_attributes & BIT_13) 321 strcat(str, "[Experimental]"); 322 return str; 323 } 324 325 static inline srb_t * 326 qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport, 327 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 328 { 329 srb_t *sp; 330 331 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 332 if (!sp) 333 return sp; 334 335 atomic_set(&sp->ref_count, 1); 336 sp->ha = ha; 337 sp->fcport = fcport; 338 sp->cmd = cmd; 339 sp->flags = 0; 340 CMD_SP(cmd) = (void *)sp; 341 cmd->scsi_done = done; 342 343 return sp; 344 } 345 346 static int 347 qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 348 { 349 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 350 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 351 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 352 srb_t *sp; 353 int rval; 354 355 rval = fc_remote_port_chkready(rport); 356 if (rval) { 357 cmd->result = rval; 358 goto qc_fail_command; 359 } 360 361 if (atomic_read(&fcport->state) != FCS_ONLINE) { 362 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 363 atomic_read(&ha->loop_state) == LOOP_DEAD) { 364 cmd->result = DID_NO_CONNECT << 16; 365 goto qc_fail_command; 366 } 367 goto qc_host_busy; 368 } 369 370 spin_unlock_irq(ha->host->host_lock); 371 372 sp = qla2x00_get_new_sp(ha, fcport, cmd, done); 373 if (!sp) 374 goto qc_host_busy_lock; 375 376 rval = qla2x00_start_scsi(sp); 377 if (rval != QLA_SUCCESS) 378 goto qc_host_busy_free_sp; 379 380 spin_lock_irq(ha->host->host_lock); 381 382 return 0; 383 384 qc_host_busy_free_sp: 385 qla2x00_sp_free_dma(ha, sp); 386 mempool_free(sp, ha->srb_mempool); 387 388 qc_host_busy_lock: 389 spin_lock_irq(ha->host->host_lock); 390 391 qc_host_busy: 392 return SCSI_MLQUEUE_HOST_BUSY; 393 394 qc_fail_command: 395 done(cmd); 396 397 return 0; 398 } 399 400 401 static int 402 qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 403 { 404 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 405 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 406 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 407 srb_t *sp; 408 int rval; 409 410 rval = fc_remote_port_chkready(rport); 411 if (rval) { 412 cmd->result = rval; 413 goto qc24_fail_command; 414 } 415 416 if (atomic_read(&fcport->state) != FCS_ONLINE) { 417 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 418 atomic_read(&ha->loop_state) == LOOP_DEAD) { 419 cmd->result = DID_NO_CONNECT << 16; 420 goto qc24_fail_command; 421 } 422 goto qc24_host_busy; 423 } 424 425 spin_unlock_irq(ha->host->host_lock); 426 427 sp = qla2x00_get_new_sp(ha, fcport, cmd, done); 428 if (!sp) 429 goto qc24_host_busy_lock; 430 431 rval = qla24xx_start_scsi(sp); 432 if (rval != QLA_SUCCESS) 433 goto qc24_host_busy_free_sp; 434 435 spin_lock_irq(ha->host->host_lock); 436 437 return 0; 438 439 qc24_host_busy_free_sp: 440 qla2x00_sp_free_dma(ha, sp); 441 mempool_free(sp, ha->srb_mempool); 442 443 qc24_host_busy_lock: 444 spin_lock_irq(ha->host->host_lock); 445 446 qc24_host_busy: 447 return SCSI_MLQUEUE_HOST_BUSY; 448 449 qc24_fail_command: 450 done(cmd); 451 452 return 0; 453 } 454 455 456 /* 457 * qla2x00_eh_wait_on_command 458 * Waits for the command to be returned by the Firmware for some 459 * max time. 460 * 461 * Input: 462 * ha = actual ha whose done queue will contain the command 463 * returned by firmware. 464 * cmd = Scsi Command to wait on. 465 * flag = Abort/Reset(Bus or Device Reset) 466 * 467 * Return: 468 * Not Found : 0 469 * Found : 1 470 */ 471 static int 472 qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) 473 { 474 #define ABORT_POLLING_PERIOD 1000 475 #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 476 unsigned long wait_iter = ABORT_WAIT_ITER; 477 int ret = QLA_SUCCESS; 478 479 while (CMD_SP(cmd)) { 480 msleep(ABORT_POLLING_PERIOD); 481 482 if (--wait_iter) 483 break; 484 } 485 if (CMD_SP(cmd)) 486 ret = QLA_FUNCTION_FAILED; 487 488 return ret; 489 } 490 491 /* 492 * qla2x00_wait_for_hba_online 493 * Wait till the HBA is online after going through 494 * <= MAX_RETRIES_OF_ISP_ABORT or 495 * finally HBA is disabled ie marked offline 496 * 497 * Input: 498 * ha - pointer to host adapter structure 499 * 500 * Note: 501 * Does context switching-Release SPIN_LOCK 502 * (if any) before calling this routine. 503 * 504 * Return: 505 * Success (Adapter is online) : 0 506 * Failed (Adapter is offline/disabled) : 1 507 */ 508 static int 509 qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 510 { 511 int return_status; 512 unsigned long wait_online; 513 514 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 515 while (((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) || 516 test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) || 517 test_bit(ISP_ABORT_RETRY, &ha->dpc_flags) || 518 ha->dpc_active) && time_before(jiffies, wait_online)) { 519 520 msleep(1000); 521 } 522 if (ha->flags.online) 523 return_status = QLA_SUCCESS; 524 else 525 return_status = QLA_FUNCTION_FAILED; 526 527 DEBUG2(printk("%s return_status=%d\n",__func__,return_status)); 528 529 return (return_status); 530 } 531 532 /* 533 * qla2x00_wait_for_loop_ready 534 * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop 535 * to be in LOOP_READY state. 536 * Input: 537 * ha - pointer to host adapter structure 538 * 539 * Note: 540 * Does context switching-Release SPIN_LOCK 541 * (if any) before calling this routine. 542 * 543 * 544 * Return: 545 * Success (LOOP_READY) : 0 546 * Failed (LOOP_NOT_READY) : 1 547 */ 548 static inline int 549 qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha) 550 { 551 int return_status = QLA_SUCCESS; 552 unsigned long loop_timeout ; 553 554 /* wait for 5 min at the max for loop to be ready */ 555 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ); 556 557 while ((!atomic_read(&ha->loop_down_timer) && 558 atomic_read(&ha->loop_state) == LOOP_DOWN) || 559 atomic_read(&ha->loop_state) != LOOP_READY) { 560 msleep(1000); 561 if (time_after_eq(jiffies, loop_timeout)) { 562 return_status = QLA_FUNCTION_FAILED; 563 break; 564 } 565 } 566 return (return_status); 567 } 568 569 /************************************************************************** 570 * qla2xxx_eh_abort 571 * 572 * Description: 573 * The abort function will abort the specified command. 574 * 575 * Input: 576 * cmd = Linux SCSI command packet to be aborted. 577 * 578 * Returns: 579 * Either SUCCESS or FAILED. 580 * 581 * Note: 582 **************************************************************************/ 583 int 584 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 585 { 586 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 587 srb_t *sp; 588 int ret, i; 589 unsigned int id, lun; 590 unsigned long serial; 591 unsigned long flags; 592 593 if (!CMD_SP(cmd)) 594 return FAILED; 595 596 ret = FAILED; 597 598 id = cmd->device->id; 599 lun = cmd->device->lun; 600 serial = cmd->serial_number; 601 602 /* Check active list for command command. */ 603 spin_lock_irqsave(&ha->hardware_lock, flags); 604 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 605 sp = ha->outstanding_cmds[i]; 606 607 if (sp == NULL) 608 continue; 609 610 if (sp->cmd != cmd) 611 continue; 612 613 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld " 614 "sp->state=%x\n", __func__, ha->host_no, sp, serial, 615 sp->state)); 616 DEBUG3(qla2x00_print_scsi_cmd(cmd);) 617 618 spin_unlock_irqrestore(&ha->hardware_lock, flags); 619 if (ha->isp_ops.abort_command(ha, sp)) { 620 DEBUG2(printk("%s(%ld): abort_command " 621 "mbx failed.\n", __func__, ha->host_no)); 622 } else { 623 DEBUG3(printk("%s(%ld): abort_command " 624 "mbx success.\n", __func__, ha->host_no)); 625 ret = SUCCESS; 626 } 627 spin_lock_irqsave(&ha->hardware_lock, flags); 628 629 break; 630 } 631 spin_unlock_irqrestore(&ha->hardware_lock, flags); 632 633 /* Wait for the command to be returned. */ 634 if (ret == SUCCESS) { 635 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { 636 qla_printk(KERN_ERR, ha, 637 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 638 "%x.\n", ha->host_no, id, lun, serial, ret); 639 } 640 } 641 642 qla_printk(KERN_INFO, ha, 643 "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no, 644 id, lun, serial, ret); 645 646 return ret; 647 } 648 649 /************************************************************************** 650 * qla2x00_eh_wait_for_pending_target_commands 651 * 652 * Description: 653 * Waits for all the commands to come back from the specified target. 654 * 655 * Input: 656 * ha - pointer to scsi_qla_host structure. 657 * t - target 658 * Returns: 659 * Either SUCCESS or FAILED. 660 * 661 * Note: 662 **************************************************************************/ 663 static int 664 qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t) 665 { 666 int cnt; 667 int status; 668 srb_t *sp; 669 struct scsi_cmnd *cmd; 670 unsigned long flags; 671 672 status = 0; 673 674 /* 675 * Waiting for all commands for the designated target in the active 676 * array 677 */ 678 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 679 spin_lock_irqsave(&ha->hardware_lock, flags); 680 sp = ha->outstanding_cmds[cnt]; 681 if (sp) { 682 cmd = sp->cmd; 683 spin_unlock_irqrestore(&ha->hardware_lock, flags); 684 if (cmd->device->id == t) { 685 if (!qla2x00_eh_wait_on_command(ha, cmd)) { 686 status = 1; 687 break; 688 } 689 } 690 } else { 691 spin_unlock_irqrestore(&ha->hardware_lock, flags); 692 } 693 } 694 return (status); 695 } 696 697 698 /************************************************************************** 699 * qla2xxx_eh_device_reset 700 * 701 * Description: 702 * The device reset function will reset the target and abort any 703 * executing commands. 704 * 705 * NOTE: The use of SP is undefined within this context. Do *NOT* 706 * attempt to use this value, even if you determine it is 707 * non-null. 708 * 709 * Input: 710 * cmd = Linux SCSI command packet of the command that cause the 711 * bus device reset. 712 * 713 * Returns: 714 * SUCCESS/FAILURE (defined as macro in scsi.h). 715 * 716 **************************************************************************/ 717 int 718 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 719 { 720 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 721 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 722 srb_t *sp; 723 int ret; 724 unsigned int id, lun; 725 unsigned long serial; 726 727 ret = FAILED; 728 729 id = cmd->device->id; 730 lun = cmd->device->lun; 731 serial = cmd->serial_number; 732 733 sp = (srb_t *) CMD_SP(cmd); 734 if (!sp || !fcport) 735 return ret; 736 737 qla_printk(KERN_INFO, ha, 738 "scsi(%ld:%d:%d): DEVICE RESET ISSUED.\n", ha->host_no, id, lun); 739 740 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 741 goto eh_dev_reset_done; 742 743 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { 744 if (qla2x00_device_reset(ha, fcport) == 0) 745 ret = SUCCESS; 746 747 #if defined(LOGOUT_AFTER_DEVICE_RESET) 748 if (ret == SUCCESS) { 749 if (fcport->flags & FC_FABRIC_DEVICE) { 750 ha->isp_ops.fabric_logout(ha, fcport->loop_id); 751 qla2x00_mark_device_lost(ha, fcport); 752 } 753 } 754 #endif 755 } else { 756 DEBUG2(printk(KERN_INFO 757 "%s failed: loop not ready\n",__func__);) 758 } 759 760 if (ret == FAILED) { 761 DEBUG3(printk("%s(%ld): device reset failed\n", 762 __func__, ha->host_no)); 763 qla_printk(KERN_INFO, ha, "%s: device reset failed\n", 764 __func__); 765 766 goto eh_dev_reset_done; 767 } 768 769 /* Flush outstanding commands. */ 770 if (qla2x00_eh_wait_for_pending_target_commands(ha, id)) 771 ret = FAILED; 772 if (ret == FAILED) { 773 DEBUG3(printk("%s(%ld): failed while waiting for commands\n", 774 __func__, ha->host_no)); 775 qla_printk(KERN_INFO, ha, 776 "%s: failed while waiting for commands\n", __func__); 777 } else 778 qla_printk(KERN_INFO, ha, 779 "scsi(%ld:%d:%d): DEVICE RESET SUCCEEDED.\n", ha->host_no, 780 id, lun); 781 eh_dev_reset_done: 782 return ret; 783 } 784 785 /************************************************************************** 786 * qla2x00_eh_wait_for_pending_commands 787 * 788 * Description: 789 * Waits for all the commands to come back from the specified host. 790 * 791 * Input: 792 * ha - pointer to scsi_qla_host structure. 793 * 794 * Returns: 795 * 1 : SUCCESS 796 * 0 : FAILED 797 * 798 * Note: 799 **************************************************************************/ 800 static int 801 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha) 802 { 803 int cnt; 804 int status; 805 srb_t *sp; 806 struct scsi_cmnd *cmd; 807 unsigned long flags; 808 809 status = 1; 810 811 /* 812 * Waiting for all commands for the designated target in the active 813 * array 814 */ 815 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 816 spin_lock_irqsave(&ha->hardware_lock, flags); 817 sp = ha->outstanding_cmds[cnt]; 818 if (sp) { 819 cmd = sp->cmd; 820 spin_unlock_irqrestore(&ha->hardware_lock, flags); 821 status = qla2x00_eh_wait_on_command(ha, cmd); 822 if (status == 0) 823 break; 824 } 825 else { 826 spin_unlock_irqrestore(&ha->hardware_lock, flags); 827 } 828 } 829 return (status); 830 } 831 832 833 /************************************************************************** 834 * qla2xxx_eh_bus_reset 835 * 836 * Description: 837 * The bus reset function will reset the bus and abort any executing 838 * commands. 839 * 840 * Input: 841 * cmd = Linux SCSI command packet of the command that cause the 842 * bus reset. 843 * 844 * Returns: 845 * SUCCESS/FAILURE (defined as macro in scsi.h). 846 * 847 **************************************************************************/ 848 int 849 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 850 { 851 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 852 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 853 srb_t *sp; 854 int ret; 855 unsigned int id, lun; 856 unsigned long serial; 857 858 ret = FAILED; 859 860 id = cmd->device->id; 861 lun = cmd->device->lun; 862 serial = cmd->serial_number; 863 864 sp = (srb_t *) CMD_SP(cmd); 865 if (!sp || !fcport) 866 return ret; 867 868 qla_printk(KERN_INFO, ha, 869 "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun); 870 871 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) { 872 DEBUG2(printk("%s failed:board disabled\n",__func__)); 873 goto eh_bus_reset_done; 874 } 875 876 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { 877 if (qla2x00_loop_reset(ha) == QLA_SUCCESS) 878 ret = SUCCESS; 879 } 880 if (ret == FAILED) 881 goto eh_bus_reset_done; 882 883 /* Flush outstanding commands. */ 884 if (!qla2x00_eh_wait_for_pending_commands(ha)) 885 ret = FAILED; 886 887 eh_bus_reset_done: 888 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 889 (ret == FAILED) ? "failed" : "succeded"); 890 891 return ret; 892 } 893 894 /************************************************************************** 895 * qla2xxx_eh_host_reset 896 * 897 * Description: 898 * The reset function will reset the Adapter. 899 * 900 * Input: 901 * cmd = Linux SCSI command packet of the command that cause the 902 * adapter reset. 903 * 904 * Returns: 905 * Either SUCCESS or FAILED. 906 * 907 * Note: 908 **************************************************************************/ 909 int 910 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 911 { 912 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 913 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 914 srb_t *sp; 915 int ret; 916 unsigned int id, lun; 917 unsigned long serial; 918 919 ret = FAILED; 920 921 id = cmd->device->id; 922 lun = cmd->device->lun; 923 serial = cmd->serial_number; 924 925 sp = (srb_t *) CMD_SP(cmd); 926 if (!sp || !fcport) 927 return ret; 928 929 qla_printk(KERN_INFO, ha, 930 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun); 931 932 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 933 goto eh_host_reset_lock; 934 935 /* 936 * Fixme-may be dpc thread is active and processing 937 * loop_resync,so wait a while for it to 938 * be completed and then issue big hammer.Otherwise 939 * it may cause I/O failure as big hammer marks the 940 * devices as lost kicking of the port_down_timer 941 * while dpc is stuck for the mailbox to complete. 942 */ 943 qla2x00_wait_for_loop_ready(ha); 944 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 945 if (qla2x00_abort_isp(ha)) { 946 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 947 /* failed. schedule dpc to try */ 948 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 949 950 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 951 goto eh_host_reset_lock; 952 } 953 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 954 955 /* Waiting for our command in done_queue to be returned to OS.*/ 956 if (qla2x00_eh_wait_for_pending_commands(ha)) 957 ret = SUCCESS; 958 959 eh_host_reset_lock: 960 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 961 (ret == FAILED) ? "failed" : "succeded"); 962 963 return ret; 964 } 965 966 /* 967 * qla2x00_loop_reset 968 * Issue loop reset. 969 * 970 * Input: 971 * ha = adapter block pointer. 972 * 973 * Returns: 974 * 0 = success 975 */ 976 static int 977 qla2x00_loop_reset(scsi_qla_host_t *ha) 978 { 979 int status = QLA_SUCCESS; 980 struct fc_port *fcport; 981 982 if (ha->flags.enable_lip_reset) { 983 status = qla2x00_lip_reset(ha); 984 } 985 986 if (status == QLA_SUCCESS && ha->flags.enable_target_reset) { 987 list_for_each_entry(fcport, &ha->fcports, list) { 988 if (fcport->port_type != FCT_TARGET) 989 continue; 990 991 status = qla2x00_device_reset(ha, fcport); 992 if (status != QLA_SUCCESS) 993 break; 994 } 995 } 996 997 if (status == QLA_SUCCESS && 998 ((!ha->flags.enable_target_reset && 999 !ha->flags.enable_lip_reset) || 1000 ha->flags.enable_lip_full_login)) { 1001 1002 status = qla2x00_full_login_lip(ha); 1003 } 1004 1005 /* Issue marker command only when we are going to start the I/O */ 1006 ha->marker_needed = 1; 1007 1008 if (status) { 1009 /* Empty */ 1010 DEBUG2_3(printk("%s(%ld): **** FAILED ****\n", 1011 __func__, 1012 ha->host_no);) 1013 } else { 1014 /* Empty */ 1015 DEBUG3(printk("%s(%ld): exiting normally.\n", 1016 __func__, 1017 ha->host_no);) 1018 } 1019 1020 return(status); 1021 } 1022 1023 /* 1024 * qla2x00_device_reset 1025 * Issue bus device reset message to the target. 1026 * 1027 * Input: 1028 * ha = adapter block pointer. 1029 * t = SCSI ID. 1030 * TARGET_QUEUE_LOCK must be released. 1031 * ADAPTER_STATE_LOCK must be released. 1032 * 1033 * Context: 1034 * Kernel context. 1035 */ 1036 static int 1037 qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport) 1038 { 1039 /* Abort Target command will clear Reservation */ 1040 return ha->isp_ops.abort_target(reset_fcport); 1041 } 1042 1043 static int 1044 qla2xxx_slave_alloc(struct scsi_device *sdev) 1045 { 1046 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1047 1048 if (!rport || fc_remote_port_chkready(rport)) 1049 return -ENXIO; 1050 1051 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1052 1053 return 0; 1054 } 1055 1056 static int 1057 qla2xxx_slave_configure(struct scsi_device *sdev) 1058 { 1059 scsi_qla_host_t *ha = to_qla_host(sdev->host); 1060 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1061 1062 if (sdev->tagged_supported) 1063 scsi_activate_tcq(sdev, 32); 1064 else 1065 scsi_deactivate_tcq(sdev, 32); 1066 1067 rport->dev_loss_tmo = ha->port_down_retry_count + 5; 1068 1069 return 0; 1070 } 1071 1072 static void 1073 qla2xxx_slave_destroy(struct scsi_device *sdev) 1074 { 1075 sdev->hostdata = NULL; 1076 } 1077 1078 static int 1079 qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth) 1080 { 1081 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 1082 return sdev->queue_depth; 1083 } 1084 1085 static int 1086 qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type) 1087 { 1088 if (sdev->tagged_supported) { 1089 scsi_set_tag_type(sdev, tag_type); 1090 if (tag_type) 1091 scsi_activate_tcq(sdev, sdev->queue_depth); 1092 else 1093 scsi_deactivate_tcq(sdev, sdev->queue_depth); 1094 } else 1095 tag_type = 0; 1096 1097 return tag_type; 1098 } 1099 1100 /** 1101 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1102 * @ha: HA context 1103 * 1104 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1105 * supported addressing method. 1106 */ 1107 static void 1108 qla2x00_config_dma_addressing(scsi_qla_host_t *ha) 1109 { 1110 /* Assume a 32bit DMA mask. */ 1111 ha->flags.enable_64bit_addressing = 0; 1112 1113 if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) { 1114 /* Any upper-dword bits set? */ 1115 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1116 !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) { 1117 /* Ok, a 64bit DMA mask is applicable. */ 1118 ha->flags.enable_64bit_addressing = 1; 1119 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64; 1120 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64; 1121 return; 1122 } 1123 } 1124 1125 dma_set_mask(&ha->pdev->dev, DMA_32BIT_MASK); 1126 pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK); 1127 } 1128 1129 static int 1130 qla2x00_iospace_config(scsi_qla_host_t *ha) 1131 { 1132 unsigned long pio, pio_len, pio_flags; 1133 unsigned long mmio, mmio_len, mmio_flags; 1134 1135 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 1136 pio = pci_resource_start(ha->pdev, 0); 1137 pio_len = pci_resource_len(ha->pdev, 0); 1138 pio_flags = pci_resource_flags(ha->pdev, 0); 1139 if (pio_flags & IORESOURCE_IO) { 1140 if (pio_len < MIN_IOBASE_LEN) { 1141 qla_printk(KERN_WARNING, ha, 1142 "Invalid PCI I/O region size (%s)...\n", 1143 pci_name(ha->pdev)); 1144 pio = 0; 1145 } 1146 } else { 1147 qla_printk(KERN_WARNING, ha, 1148 "region #0 not a PIO resource (%s)...\n", 1149 pci_name(ha->pdev)); 1150 pio = 0; 1151 } 1152 1153 /* Use MMIO operations for all accesses. */ 1154 mmio = pci_resource_start(ha->pdev, 1); 1155 mmio_len = pci_resource_len(ha->pdev, 1); 1156 mmio_flags = pci_resource_flags(ha->pdev, 1); 1157 1158 if (!(mmio_flags & IORESOURCE_MEM)) { 1159 qla_printk(KERN_ERR, ha, 1160 "region #0 not an MMIO resource (%s), aborting\n", 1161 pci_name(ha->pdev)); 1162 goto iospace_error_exit; 1163 } 1164 if (mmio_len < MIN_IOBASE_LEN) { 1165 qla_printk(KERN_ERR, ha, 1166 "Invalid PCI mem region size (%s), aborting\n", 1167 pci_name(ha->pdev)); 1168 goto iospace_error_exit; 1169 } 1170 1171 if (pci_request_regions(ha->pdev, ha->brd_info->drv_name)) { 1172 qla_printk(KERN_WARNING, ha, 1173 "Failed to reserve PIO/MMIO regions (%s)\n", 1174 pci_name(ha->pdev)); 1175 1176 goto iospace_error_exit; 1177 } 1178 1179 ha->pio_address = pio; 1180 ha->pio_length = pio_len; 1181 ha->iobase = ioremap(mmio, MIN_IOBASE_LEN); 1182 if (!ha->iobase) { 1183 qla_printk(KERN_ERR, ha, 1184 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 1185 1186 goto iospace_error_exit; 1187 } 1188 1189 return (0); 1190 1191 iospace_error_exit: 1192 return (-ENOMEM); 1193 } 1194 1195 static void 1196 qla2x00_enable_intrs(scsi_qla_host_t *ha) 1197 { 1198 unsigned long flags = 0; 1199 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1200 1201 spin_lock_irqsave(&ha->hardware_lock, flags); 1202 ha->interrupts_on = 1; 1203 /* enable risc and host interrupts */ 1204 WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1205 RD_REG_WORD(®->ictrl); 1206 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1207 1208 } 1209 1210 static void 1211 qla2x00_disable_intrs(scsi_qla_host_t *ha) 1212 { 1213 unsigned long flags = 0; 1214 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1215 1216 spin_lock_irqsave(&ha->hardware_lock, flags); 1217 ha->interrupts_on = 0; 1218 /* disable risc and host interrupts */ 1219 WRT_REG_WORD(®->ictrl, 0); 1220 RD_REG_WORD(®->ictrl); 1221 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1222 } 1223 1224 static void 1225 qla24xx_enable_intrs(scsi_qla_host_t *ha) 1226 { 1227 unsigned long flags = 0; 1228 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1229 1230 spin_lock_irqsave(&ha->hardware_lock, flags); 1231 ha->interrupts_on = 1; 1232 WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); 1233 RD_REG_DWORD(®->ictrl); 1234 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1235 } 1236 1237 static void 1238 qla24xx_disable_intrs(scsi_qla_host_t *ha) 1239 { 1240 unsigned long flags = 0; 1241 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1242 1243 spin_lock_irqsave(&ha->hardware_lock, flags); 1244 ha->interrupts_on = 0; 1245 WRT_REG_DWORD(®->ictrl, 0); 1246 RD_REG_DWORD(®->ictrl); 1247 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1248 } 1249 1250 /* 1251 * PCI driver interface 1252 */ 1253 int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info) 1254 { 1255 int ret = -ENODEV; 1256 device_reg_t __iomem *reg; 1257 struct Scsi_Host *host; 1258 scsi_qla_host_t *ha; 1259 unsigned long flags = 0; 1260 unsigned long wait_switch = 0; 1261 char pci_info[20]; 1262 char fw_str[30]; 1263 fc_port_t *fcport; 1264 1265 if (pci_enable_device(pdev)) 1266 goto probe_out; 1267 1268 host = scsi_host_alloc(brd_info->sht ? brd_info->sht: 1269 &qla2x00_driver_template, sizeof(scsi_qla_host_t)); 1270 if (host == NULL) { 1271 printk(KERN_WARNING 1272 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 1273 goto probe_disable_device; 1274 } 1275 1276 /* Clear our data area */ 1277 ha = (scsi_qla_host_t *)host->hostdata; 1278 memset(ha, 0, sizeof(scsi_qla_host_t)); 1279 1280 ha->pdev = pdev; 1281 ha->host = host; 1282 ha->host_no = host->host_no; 1283 ha->brd_info = brd_info; 1284 sprintf(ha->host_str, "%s_%ld", ha->brd_info->drv_name, ha->host_no); 1285 1286 ha->dpc_pid = -1; 1287 1288 /* Configure PCI I/O space */ 1289 ret = qla2x00_iospace_config(ha); 1290 if (ret) 1291 goto probe_failed; 1292 1293 qla_printk(KERN_INFO, ha, 1294 "Found an %s, irq %d, iobase 0x%p\n", ha->brd_info->isp_name, 1295 pdev->irq, ha->iobase); 1296 1297 spin_lock_init(&ha->hardware_lock); 1298 1299 ha->prev_topology = 0; 1300 ha->ports = MAX_BUSES; 1301 ha->init_cb_size = sizeof(init_cb_t); 1302 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER; 1303 1304 /* Assign ISP specific operations. */ 1305 ha->isp_ops.pci_config = qla2100_pci_config; 1306 ha->isp_ops.reset_chip = qla2x00_reset_chip; 1307 ha->isp_ops.chip_diag = qla2x00_chip_diag; 1308 ha->isp_ops.config_rings = qla2x00_config_rings; 1309 ha->isp_ops.reset_adapter = qla2x00_reset_adapter; 1310 ha->isp_ops.nvram_config = qla2x00_nvram_config; 1311 ha->isp_ops.update_fw_options = qla2x00_update_fw_options; 1312 ha->isp_ops.load_risc = qla2x00_load_risc; 1313 ha->isp_ops.pci_info_str = qla2x00_pci_info_str; 1314 ha->isp_ops.fw_version_str = qla2x00_fw_version_str; 1315 ha->isp_ops.intr_handler = qla2100_intr_handler; 1316 ha->isp_ops.enable_intrs = qla2x00_enable_intrs; 1317 ha->isp_ops.disable_intrs = qla2x00_disable_intrs; 1318 ha->isp_ops.abort_command = qla2x00_abort_command; 1319 ha->isp_ops.abort_target = qla2x00_abort_target; 1320 ha->isp_ops.fabric_login = qla2x00_login_fabric; 1321 ha->isp_ops.fabric_logout = qla2x00_fabric_logout; 1322 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32; 1323 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32; 1324 ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb; 1325 ha->isp_ops.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb; 1326 ha->isp_ops.read_nvram = qla2x00_read_nvram_data; 1327 ha->isp_ops.write_nvram = qla2x00_write_nvram_data; 1328 ha->isp_ops.fw_dump = qla2100_fw_dump; 1329 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump; 1330 if (IS_QLA2100(ha)) { 1331 host->max_id = MAX_TARGETS_2100; 1332 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1333 ha->request_q_length = REQUEST_ENTRY_CNT_2100; 1334 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1335 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1336 host->sg_tablesize = 32; 1337 ha->gid_list_info_size = 4; 1338 } else if (IS_QLA2200(ha)) { 1339 host->max_id = MAX_TARGETS_2200; 1340 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1341 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1342 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1343 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1344 ha->gid_list_info_size = 4; 1345 } else if (IS_QLA23XX(ha)) { 1346 host->max_id = MAX_TARGETS_2200; 1347 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1348 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1349 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1350 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1351 ha->isp_ops.pci_config = qla2300_pci_config; 1352 ha->isp_ops.intr_handler = qla2300_intr_handler; 1353 ha->isp_ops.fw_dump = qla2300_fw_dump; 1354 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump; 1355 ha->gid_list_info_size = 6; 1356 } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 1357 host->max_id = MAX_TARGETS_2200; 1358 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1359 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1360 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1361 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1362 ha->init_cb_size = sizeof(struct init_cb_24xx); 1363 ha->mgmt_svr_loop_id = 10; 1364 ha->isp_ops.pci_config = qla24xx_pci_config; 1365 ha->isp_ops.reset_chip = qla24xx_reset_chip; 1366 ha->isp_ops.chip_diag = qla24xx_chip_diag; 1367 ha->isp_ops.config_rings = qla24xx_config_rings; 1368 ha->isp_ops.reset_adapter = qla24xx_reset_adapter; 1369 ha->isp_ops.nvram_config = qla24xx_nvram_config; 1370 ha->isp_ops.update_fw_options = qla24xx_update_fw_options; 1371 ha->isp_ops.load_risc = qla24xx_load_risc_flash; 1372 if (ql2xfwloadbin) 1373 ha->isp_ops.load_risc = qla24xx_load_risc_hotplug; 1374 ha->isp_ops.pci_info_str = qla24xx_pci_info_str; 1375 ha->isp_ops.fw_version_str = qla24xx_fw_version_str; 1376 ha->isp_ops.intr_handler = qla24xx_intr_handler; 1377 ha->isp_ops.enable_intrs = qla24xx_enable_intrs; 1378 ha->isp_ops.disable_intrs = qla24xx_disable_intrs; 1379 ha->isp_ops.abort_command = qla24xx_abort_command; 1380 ha->isp_ops.abort_target = qla24xx_abort_target; 1381 ha->isp_ops.fabric_login = qla24xx_login_fabric; 1382 ha->isp_ops.fabric_logout = qla24xx_fabric_logout; 1383 ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb; 1384 ha->isp_ops.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb; 1385 ha->isp_ops.read_nvram = qla24xx_read_nvram_data; 1386 ha->isp_ops.write_nvram = qla24xx_write_nvram_data; 1387 ha->isp_ops.fw_dump = qla24xx_fw_dump; 1388 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump; 1389 ha->gid_list_info_size = 8; 1390 } 1391 host->can_queue = ha->request_q_length + 128; 1392 1393 /* load the F/W, read paramaters, and init the H/W */ 1394 ha->instance = num_hosts; 1395 1396 init_MUTEX(&ha->mbx_cmd_sem); 1397 init_MUTEX_LOCKED(&ha->mbx_intr_sem); 1398 1399 INIT_LIST_HEAD(&ha->list); 1400 INIT_LIST_HEAD(&ha->fcports); 1401 INIT_LIST_HEAD(&ha->rscn_fcports); 1402 1403 /* 1404 * These locks are used to prevent more than one CPU 1405 * from modifying the queue at the same time. The 1406 * higher level "host_lock" will reduce most 1407 * contention for these locks. 1408 */ 1409 spin_lock_init(&ha->mbx_reg_lock); 1410 1411 init_completion(&ha->dpc_inited); 1412 init_completion(&ha->dpc_exited); 1413 1414 qla2x00_config_dma_addressing(ha); 1415 if (qla2x00_mem_alloc(ha)) { 1416 qla_printk(KERN_WARNING, ha, 1417 "[ERROR] Failed to allocate memory for adapter\n"); 1418 1419 ret = -ENOMEM; 1420 goto probe_failed; 1421 } 1422 1423 if (qla2x00_initialize_adapter(ha) && 1424 !(ha->device_flags & DFLG_NO_CABLE)) { 1425 1426 qla_printk(KERN_WARNING, ha, 1427 "Failed to initialize adapter\n"); 1428 1429 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 1430 "Adapter flags %x.\n", 1431 ha->host_no, ha->device_flags)); 1432 1433 ret = -ENODEV; 1434 goto probe_failed; 1435 } 1436 1437 /* 1438 * Startup the kernel thread for this host adapter 1439 */ 1440 ha->dpc_should_die = 0; 1441 ha->dpc_pid = kernel_thread(qla2x00_do_dpc, ha, 0); 1442 if (ha->dpc_pid < 0) { 1443 qla_printk(KERN_WARNING, ha, 1444 "Unable to start DPC thread!\n"); 1445 1446 ret = -ENODEV; 1447 goto probe_failed; 1448 } 1449 wait_for_completion(&ha->dpc_inited); 1450 1451 host->this_id = 255; 1452 host->cmd_per_lun = 3; 1453 host->unique_id = ha->instance; 1454 host->max_cmd_len = MAX_CMDSZ; 1455 host->max_channel = ha->ports - 1; 1456 host->max_lun = MAX_LUNS; 1457 host->transportt = qla2xxx_transport_template; 1458 1459 ret = request_irq(pdev->irq, ha->isp_ops.intr_handler, 1460 SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha); 1461 if (ret) { 1462 qla_printk(KERN_WARNING, ha, 1463 "Failed to reserve interrupt %d already in use.\n", 1464 pdev->irq); 1465 goto probe_failed; 1466 } 1467 host->irq = pdev->irq; 1468 1469 /* Initialized the timer */ 1470 qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL); 1471 1472 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1473 ha->host_no, ha)); 1474 1475 ha->isp_ops.disable_intrs(ha); 1476 1477 spin_lock_irqsave(&ha->hardware_lock, flags); 1478 reg = ha->iobase; 1479 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 1480 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); 1481 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT); 1482 } else { 1483 WRT_REG_WORD(®->isp.semaphore, 0); 1484 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT); 1485 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); 1486 1487 /* Enable proper parity */ 1488 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) { 1489 if (IS_QLA2300(ha)) 1490 /* SRAM parity */ 1491 WRT_REG_WORD(®->isp.hccr, 1492 (HCCR_ENABLE_PARITY + 0x1)); 1493 else 1494 /* SRAM, Instruction RAM and GP RAM parity */ 1495 WRT_REG_WORD(®->isp.hccr, 1496 (HCCR_ENABLE_PARITY + 0x7)); 1497 } 1498 } 1499 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1500 1501 ha->isp_ops.enable_intrs(ha); 1502 1503 /* v2.19.5b6 */ 1504 /* 1505 * Wait around max loop_reset_delay secs for the devices to come 1506 * on-line. We don't want Linux scanning before we are ready. 1507 * 1508 */ 1509 for (wait_switch = jiffies + (ha->loop_reset_delay * HZ); 1510 time_before(jiffies,wait_switch) && 1511 !(ha->device_flags & (DFLG_NO_CABLE | DFLG_FABRIC_DEVICES)) 1512 && (ha->device_flags & SWITCH_FOUND) ;) { 1513 1514 qla2x00_check_fabric_devices(ha); 1515 1516 msleep(10); 1517 } 1518 1519 pci_set_drvdata(pdev, ha); 1520 ha->flags.init_done = 1; 1521 num_hosts++; 1522 1523 ret = scsi_add_host(host, &pdev->dev); 1524 if (ret) 1525 goto probe_failed; 1526 1527 qla2x00_alloc_sysfs_attr(ha); 1528 1529 qla2x00_init_host_attr(ha); 1530 1531 qla_printk(KERN_INFO, ha, "\n" 1532 " QLogic Fibre Channel HBA Driver: %s\n" 1533 " QLogic %s - %s\n" 1534 " %s: %s @ %s hdma%c, host#=%ld, fw=%s\n", qla2x00_version_str, 1535 ha->model_number, ha->model_desc ? ha->model_desc: "", 1536 ha->brd_info->isp_name, ha->isp_ops.pci_info_str(ha, pci_info), 1537 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+': '-', 1538 ha->host_no, ha->isp_ops.fw_version_str(ha, fw_str)); 1539 1540 /* Go with fc_rport registration. */ 1541 list_for_each_entry(fcport, &ha->fcports, list) 1542 qla2x00_reg_remote_port(ha, fcport); 1543 1544 return 0; 1545 1546 probe_failed: 1547 qla2x00_free_device(ha); 1548 1549 scsi_host_put(host); 1550 1551 probe_disable_device: 1552 pci_disable_device(pdev); 1553 1554 probe_out: 1555 return ret; 1556 } 1557 EXPORT_SYMBOL_GPL(qla2x00_probe_one); 1558 1559 void qla2x00_remove_one(struct pci_dev *pdev) 1560 { 1561 scsi_qla_host_t *ha; 1562 1563 ha = pci_get_drvdata(pdev); 1564 1565 qla2x00_free_sysfs_attr(ha); 1566 1567 fc_remove_host(ha->host); 1568 1569 scsi_remove_host(ha->host); 1570 1571 qla2x00_free_device(ha); 1572 1573 scsi_host_put(ha->host); 1574 1575 pci_set_drvdata(pdev, NULL); 1576 } 1577 EXPORT_SYMBOL_GPL(qla2x00_remove_one); 1578 1579 static void 1580 qla2x00_free_device(scsi_qla_host_t *ha) 1581 { 1582 int ret; 1583 1584 /* Abort any outstanding IO descriptors. */ 1585 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 1586 qla2x00_cancel_io_descriptors(ha); 1587 1588 /* Disable timer */ 1589 if (ha->timer_active) 1590 qla2x00_stop_timer(ha); 1591 1592 /* Kill the kernel thread for this host */ 1593 if (ha->dpc_pid >= 0) { 1594 ha->dpc_should_die = 1; 1595 wmb(); 1596 ret = kill_proc(ha->dpc_pid, SIGHUP, 1); 1597 if (ret) { 1598 qla_printk(KERN_ERR, ha, 1599 "Unable to signal DPC thread -- (%d)\n", ret); 1600 1601 /* TODO: SOMETHING MORE??? */ 1602 } else { 1603 wait_for_completion(&ha->dpc_exited); 1604 } 1605 } 1606 1607 /* Stop currently executing firmware. */ 1608 qla2x00_stop_firmware(ha); 1609 1610 /* turn-off interrupts on the card */ 1611 if (ha->interrupts_on) 1612 ha->isp_ops.disable_intrs(ha); 1613 1614 qla2x00_mem_free(ha); 1615 1616 ha->flags.online = 0; 1617 1618 /* Detach interrupts */ 1619 if (ha->pdev->irq) 1620 free_irq(ha->pdev->irq, ha); 1621 1622 /* release io space registers */ 1623 if (ha->iobase) 1624 iounmap(ha->iobase); 1625 pci_release_regions(ha->pdev); 1626 1627 pci_disable_device(ha->pdev); 1628 } 1629 1630 /* 1631 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 1632 * 1633 * Input: ha = adapter block pointer. fcport = port structure pointer. 1634 * 1635 * Return: None. 1636 * 1637 * Context: 1638 */ 1639 void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport, 1640 int do_login) 1641 { 1642 if (atomic_read(&fcport->state) == FCS_ONLINE && fcport->rport) 1643 schedule_work(&fcport->rport_del_work); 1644 1645 /* 1646 * We may need to retry the login, so don't change the state of the 1647 * port but do the retries. 1648 */ 1649 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 1650 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1651 1652 if (!do_login) 1653 return; 1654 1655 if (fcport->login_retry == 0) { 1656 fcport->login_retry = ha->login_retry_count; 1657 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 1658 1659 DEBUG(printk("scsi(%ld): Port login retry: " 1660 "%02x%02x%02x%02x%02x%02x%02x%02x, " 1661 "id = 0x%04x retry cnt=%d\n", 1662 ha->host_no, 1663 fcport->port_name[0], 1664 fcport->port_name[1], 1665 fcport->port_name[2], 1666 fcport->port_name[3], 1667 fcport->port_name[4], 1668 fcport->port_name[5], 1669 fcport->port_name[6], 1670 fcport->port_name[7], 1671 fcport->loop_id, 1672 fcport->login_retry)); 1673 } 1674 } 1675 1676 /* 1677 * qla2x00_mark_all_devices_lost 1678 * Updates fcport state when device goes offline. 1679 * 1680 * Input: 1681 * ha = adapter block pointer. 1682 * fcport = port structure pointer. 1683 * 1684 * Return: 1685 * None. 1686 * 1687 * Context: 1688 */ 1689 void 1690 qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha) 1691 { 1692 fc_port_t *fcport; 1693 1694 list_for_each_entry(fcport, &ha->fcports, list) { 1695 if (fcport->port_type != FCT_TARGET) 1696 continue; 1697 1698 /* 1699 * No point in marking the device as lost, if the device is 1700 * already DEAD. 1701 */ 1702 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 1703 continue; 1704 if (atomic_read(&fcport->state) == FCS_ONLINE && fcport->rport) 1705 schedule_work(&fcport->rport_del_work); 1706 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1707 } 1708 } 1709 1710 /* 1711 * qla2x00_mem_alloc 1712 * Allocates adapter memory. 1713 * 1714 * Returns: 1715 * 0 = success. 1716 * 1 = failure. 1717 */ 1718 static uint8_t 1719 qla2x00_mem_alloc(scsi_qla_host_t *ha) 1720 { 1721 char name[16]; 1722 uint8_t status = 1; 1723 int retry= 10; 1724 1725 do { 1726 /* 1727 * This will loop only once if everything goes well, else some 1728 * number of retries will be performed to get around a kernel 1729 * bug where available mem is not allocated until after a 1730 * little delay and a retry. 1731 */ 1732 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, 1733 (ha->request_q_length + 1) * sizeof(request_t), 1734 &ha->request_dma, GFP_KERNEL); 1735 if (ha->request_ring == NULL) { 1736 qla_printk(KERN_WARNING, ha, 1737 "Memory Allocation failed - request_ring\n"); 1738 1739 qla2x00_mem_free(ha); 1740 msleep(100); 1741 1742 continue; 1743 } 1744 1745 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev, 1746 (ha->response_q_length + 1) * sizeof(response_t), 1747 &ha->response_dma, GFP_KERNEL); 1748 if (ha->response_ring == NULL) { 1749 qla_printk(KERN_WARNING, ha, 1750 "Memory Allocation failed - response_ring\n"); 1751 1752 qla2x00_mem_free(ha); 1753 msleep(100); 1754 1755 continue; 1756 } 1757 1758 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE, 1759 &ha->gid_list_dma, GFP_KERNEL); 1760 if (ha->gid_list == NULL) { 1761 qla_printk(KERN_WARNING, ha, 1762 "Memory Allocation failed - gid_list\n"); 1763 1764 qla2x00_mem_free(ha); 1765 msleep(100); 1766 1767 continue; 1768 } 1769 1770 ha->rlc_rsp = dma_alloc_coherent(&ha->pdev->dev, 1771 sizeof(rpt_lun_cmd_rsp_t), &ha->rlc_rsp_dma, GFP_KERNEL); 1772 if (ha->rlc_rsp == NULL) { 1773 qla_printk(KERN_WARNING, ha, 1774 "Memory Allocation failed - rlc"); 1775 1776 qla2x00_mem_free(ha); 1777 msleep(100); 1778 1779 continue; 1780 } 1781 1782 snprintf(name, sizeof(name), "qla2xxx_%ld", ha->host_no); 1783 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 1784 DMA_POOL_SIZE, 8, 0); 1785 if (ha->s_dma_pool == NULL) { 1786 qla_printk(KERN_WARNING, ha, 1787 "Memory Allocation failed - s_dma_pool\n"); 1788 1789 qla2x00_mem_free(ha); 1790 msleep(100); 1791 1792 continue; 1793 } 1794 1795 /* get consistent memory allocated for init control block */ 1796 ha->init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 1797 &ha->init_cb_dma); 1798 if (ha->init_cb == NULL) { 1799 qla_printk(KERN_WARNING, ha, 1800 "Memory Allocation failed - init_cb\n"); 1801 1802 qla2x00_mem_free(ha); 1803 msleep(100); 1804 1805 continue; 1806 } 1807 memset(ha->init_cb, 0, ha->init_cb_size); 1808 1809 /* Get consistent memory allocated for Get Port Database cmd */ 1810 ha->iodesc_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 1811 &ha->iodesc_pd_dma); 1812 if (ha->iodesc_pd == NULL) { 1813 /* error */ 1814 qla_printk(KERN_WARNING, ha, 1815 "Memory Allocation failed - iodesc_pd\n"); 1816 1817 qla2x00_mem_free(ha); 1818 msleep(100); 1819 1820 continue; 1821 } 1822 memset(ha->iodesc_pd, 0, PORT_DATABASE_SIZE); 1823 1824 /* Allocate ioctl related memory. */ 1825 if (qla2x00_alloc_ioctl_mem(ha)) { 1826 qla_printk(KERN_WARNING, ha, 1827 "Memory Allocation failed - ioctl_mem\n"); 1828 1829 qla2x00_mem_free(ha); 1830 msleep(100); 1831 1832 continue; 1833 } 1834 1835 if (qla2x00_allocate_sp_pool(ha)) { 1836 qla_printk(KERN_WARNING, ha, 1837 "Memory Allocation failed - " 1838 "qla2x00_allocate_sp_pool()\n"); 1839 1840 qla2x00_mem_free(ha); 1841 msleep(100); 1842 1843 continue; 1844 } 1845 1846 /* Allocate memory for SNS commands */ 1847 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 1848 /* Get consistent memory allocated for SNS commands */ 1849 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 1850 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, 1851 GFP_KERNEL); 1852 if (ha->sns_cmd == NULL) { 1853 /* error */ 1854 qla_printk(KERN_WARNING, ha, 1855 "Memory Allocation failed - sns_cmd\n"); 1856 1857 qla2x00_mem_free(ha); 1858 msleep(100); 1859 1860 continue; 1861 } 1862 memset(ha->sns_cmd, 0, sizeof(struct sns_cmd_pkt)); 1863 } else { 1864 /* Get consistent memory allocated for MS IOCB */ 1865 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 1866 &ha->ms_iocb_dma); 1867 if (ha->ms_iocb == NULL) { 1868 /* error */ 1869 qla_printk(KERN_WARNING, ha, 1870 "Memory Allocation failed - ms_iocb\n"); 1871 1872 qla2x00_mem_free(ha); 1873 msleep(100); 1874 1875 continue; 1876 } 1877 memset(ha->ms_iocb, 0, sizeof(ms_iocb_entry_t)); 1878 1879 /* 1880 * Get consistent memory allocated for CT SNS 1881 * commands 1882 */ 1883 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 1884 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, 1885 GFP_KERNEL); 1886 if (ha->ct_sns == NULL) { 1887 /* error */ 1888 qla_printk(KERN_WARNING, ha, 1889 "Memory Allocation failed - ct_sns\n"); 1890 1891 qla2x00_mem_free(ha); 1892 msleep(100); 1893 1894 continue; 1895 } 1896 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt)); 1897 } 1898 1899 /* Done all allocations without any error. */ 1900 status = 0; 1901 1902 } while (retry-- && status != 0); 1903 1904 if (status) { 1905 printk(KERN_WARNING 1906 "%s(): **** FAILED ****\n", __func__); 1907 } 1908 1909 return(status); 1910 } 1911 1912 /* 1913 * qla2x00_mem_free 1914 * Frees all adapter allocated memory. 1915 * 1916 * Input: 1917 * ha = adapter block pointer. 1918 */ 1919 static void 1920 qla2x00_mem_free(scsi_qla_host_t *ha) 1921 { 1922 struct list_head *fcpl, *fcptemp; 1923 fc_port_t *fcport; 1924 unsigned int wtime;/* max wait time if mbx cmd is busy. */ 1925 1926 if (ha == NULL) { 1927 /* error */ 1928 DEBUG2(printk("%s(): ERROR invalid ha pointer.\n", __func__)); 1929 return; 1930 } 1931 1932 /* Make sure all other threads are stopped. */ 1933 wtime = 60 * 1000; 1934 while (ha->dpc_wait && wtime) 1935 wtime = msleep_interruptible(wtime); 1936 1937 /* free ioctl memory */ 1938 qla2x00_free_ioctl_mem(ha); 1939 1940 /* free sp pool */ 1941 qla2x00_free_sp_pool(ha); 1942 1943 if (ha->sns_cmd) 1944 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 1945 ha->sns_cmd, ha->sns_cmd_dma); 1946 1947 if (ha->ct_sns) 1948 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 1949 ha->ct_sns, ha->ct_sns_dma); 1950 1951 if (ha->ms_iocb) 1952 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 1953 1954 if (ha->iodesc_pd) 1955 dma_pool_free(ha->s_dma_pool, ha->iodesc_pd, ha->iodesc_pd_dma); 1956 1957 if (ha->init_cb) 1958 dma_pool_free(ha->s_dma_pool, ha->init_cb, ha->init_cb_dma); 1959 1960 if (ha->s_dma_pool) 1961 dma_pool_destroy(ha->s_dma_pool); 1962 1963 if (ha->rlc_rsp) 1964 dma_free_coherent(&ha->pdev->dev, 1965 sizeof(rpt_lun_cmd_rsp_t), ha->rlc_rsp, 1966 ha->rlc_rsp_dma); 1967 1968 if (ha->gid_list) 1969 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 1970 ha->gid_list_dma); 1971 1972 if (ha->response_ring) 1973 dma_free_coherent(&ha->pdev->dev, 1974 (ha->response_q_length + 1) * sizeof(response_t), 1975 ha->response_ring, ha->response_dma); 1976 1977 if (ha->request_ring) 1978 dma_free_coherent(&ha->pdev->dev, 1979 (ha->request_q_length + 1) * sizeof(request_t), 1980 ha->request_ring, ha->request_dma); 1981 1982 ha->sns_cmd = NULL; 1983 ha->sns_cmd_dma = 0; 1984 ha->ct_sns = NULL; 1985 ha->ct_sns_dma = 0; 1986 ha->ms_iocb = NULL; 1987 ha->ms_iocb_dma = 0; 1988 ha->iodesc_pd = NULL; 1989 ha->iodesc_pd_dma = 0; 1990 ha->init_cb = NULL; 1991 ha->init_cb_dma = 0; 1992 1993 ha->s_dma_pool = NULL; 1994 1995 ha->rlc_rsp = NULL; 1996 ha->rlc_rsp_dma = 0; 1997 ha->gid_list = NULL; 1998 ha->gid_list_dma = 0; 1999 2000 ha->response_ring = NULL; 2001 ha->response_dma = 0; 2002 ha->request_ring = NULL; 2003 ha->request_dma = 0; 2004 2005 list_for_each_safe(fcpl, fcptemp, &ha->fcports) { 2006 fcport = list_entry(fcpl, fc_port_t, list); 2007 2008 /* fc ports */ 2009 list_del_init(&fcport->list); 2010 kfree(fcport); 2011 } 2012 INIT_LIST_HEAD(&ha->fcports); 2013 2014 if (ha->fw_dump) 2015 free_pages((unsigned long)ha->fw_dump, ha->fw_dump_order); 2016 2017 vfree(ha->fw_dump24); 2018 2019 vfree(ha->fw_dump_buffer); 2020 2021 ha->fw_dump = NULL; 2022 ha->fw_dump24 = NULL; 2023 ha->fw_dumped = 0; 2024 ha->fw_dump_reading = 0; 2025 ha->fw_dump_buffer = NULL; 2026 } 2027 2028 /* 2029 * qla2x00_allocate_sp_pool 2030 * This routine is called during initialization to allocate 2031 * memory for local srb_t. 2032 * 2033 * Input: 2034 * ha = adapter block pointer. 2035 * 2036 * Context: 2037 * Kernel context. 2038 * 2039 * Note: Sets the ref_count for non Null sp to one. 2040 */ 2041 static int 2042 qla2x00_allocate_sp_pool(scsi_qla_host_t *ha) 2043 { 2044 int rval; 2045 2046 rval = QLA_SUCCESS; 2047 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 2048 mempool_free_slab, srb_cachep); 2049 if (ha->srb_mempool == NULL) { 2050 qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n"); 2051 rval = QLA_FUNCTION_FAILED; 2052 } 2053 return (rval); 2054 } 2055 2056 /* 2057 * This routine frees all adapter allocated memory. 2058 * 2059 */ 2060 static void 2061 qla2x00_free_sp_pool( scsi_qla_host_t *ha) 2062 { 2063 if (ha->srb_mempool) { 2064 mempool_destroy(ha->srb_mempool); 2065 ha->srb_mempool = NULL; 2066 } 2067 } 2068 2069 /************************************************************************** 2070 * qla2x00_do_dpc 2071 * This kernel thread is a task that is schedule by the interrupt handler 2072 * to perform the background processing for interrupts. 2073 * 2074 * Notes: 2075 * This task always run in the context of a kernel thread. It 2076 * is kick-off by the driver's detect code and starts up 2077 * up one per adapter. It immediately goes to sleep and waits for 2078 * some fibre event. When either the interrupt handler or 2079 * the timer routine detects a event it will one of the task 2080 * bits then wake us up. 2081 **************************************************************************/ 2082 static int 2083 qla2x00_do_dpc(void *data) 2084 { 2085 DECLARE_MUTEX_LOCKED(sem); 2086 scsi_qla_host_t *ha; 2087 fc_port_t *fcport; 2088 uint8_t status; 2089 uint16_t next_loopid; 2090 2091 ha = (scsi_qla_host_t *)data; 2092 2093 lock_kernel(); 2094 2095 daemonize("%s_dpc", ha->host_str); 2096 allow_signal(SIGHUP); 2097 2098 ha->dpc_wait = &sem; 2099 2100 set_user_nice(current, -20); 2101 2102 unlock_kernel(); 2103 2104 complete(&ha->dpc_inited); 2105 2106 while (1) { 2107 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 2108 2109 if (down_interruptible(&sem)) 2110 break; 2111 2112 if (ha->dpc_should_die) 2113 break; 2114 2115 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 2116 2117 /* Initialization not yet finished. Don't do anything yet. */ 2118 if (!ha->flags.init_done || ha->dpc_active) 2119 continue; 2120 2121 DEBUG3(printk("scsi(%ld): DPC handler\n", ha->host_no)); 2122 2123 ha->dpc_active = 1; 2124 2125 if (ha->flags.mbox_busy) { 2126 ha->dpc_active = 0; 2127 continue; 2128 } 2129 2130 if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 2131 2132 DEBUG(printk("scsi(%ld): dpc: sched " 2133 "qla2x00_abort_isp ha = %p\n", 2134 ha->host_no, ha)); 2135 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 2136 &ha->dpc_flags))) { 2137 2138 if (qla2x00_abort_isp(ha)) { 2139 /* failed. retry later */ 2140 set_bit(ISP_ABORT_NEEDED, 2141 &ha->dpc_flags); 2142 } 2143 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2144 } 2145 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 2146 ha->host_no)); 2147 } 2148 2149 if (test_and_clear_bit(LOOP_RESET_NEEDED, &ha->dpc_flags)) { 2150 DEBUG(printk("scsi(%ld): dpc: sched loop_reset()\n", 2151 ha->host_no)); 2152 qla2x00_loop_reset(ha); 2153 } 2154 2155 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2156 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { 2157 2158 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 2159 ha->host_no)); 2160 2161 qla2x00_rst_aen(ha); 2162 clear_bit(RESET_ACTIVE, &ha->dpc_flags); 2163 } 2164 2165 /* Retry each device up to login retry count */ 2166 if ((test_and_clear_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2167 !test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) && 2168 atomic_read(&ha->loop_state) != LOOP_DOWN) { 2169 2170 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 2171 ha->host_no)); 2172 2173 next_loopid = 0; 2174 list_for_each_entry(fcport, &ha->fcports, list) { 2175 if (fcport->port_type != FCT_TARGET) 2176 continue; 2177 2178 /* 2179 * If the port is not ONLINE then try to login 2180 * to it if we haven't run out of retries. 2181 */ 2182 if (atomic_read(&fcport->state) != FCS_ONLINE && 2183 fcport->login_retry) { 2184 2185 fcport->login_retry--; 2186 if (fcport->flags & FCF_FABRIC_DEVICE) { 2187 if (fcport->flags & 2188 FCF_TAPE_PRESENT) 2189 ha->isp_ops.fabric_logout( 2190 ha, fcport->loop_id, 2191 fcport->d_id.b.domain, 2192 fcport->d_id.b.area, 2193 fcport->d_id.b.al_pa); 2194 status = qla2x00_fabric_login( 2195 ha, fcport, &next_loopid); 2196 } else 2197 status = 2198 qla2x00_local_device_login( 2199 ha, fcport->loop_id); 2200 2201 if (status == QLA_SUCCESS) { 2202 fcport->old_loop_id = fcport->loop_id; 2203 2204 DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n", 2205 ha->host_no, fcport->loop_id)); 2206 2207 fcport->port_login_retry_count = 2208 ha->port_down_retry_count * PORT_RETRY_TIME; 2209 atomic_set(&fcport->state, FCS_ONLINE); 2210 atomic_set(&fcport->port_down_timer, 2211 ha->port_down_retry_count * PORT_RETRY_TIME); 2212 2213 fcport->login_retry = 0; 2214 } else if (status == 1) { 2215 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 2216 /* retry the login again */ 2217 DEBUG(printk("scsi(%ld): Retrying %d login again loop_id 0x%x\n", 2218 ha->host_no, 2219 fcport->login_retry, fcport->loop_id)); 2220 } else { 2221 fcport->login_retry = 0; 2222 } 2223 } 2224 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2225 break; 2226 } 2227 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 2228 ha->host_no)); 2229 } 2230 2231 if ((test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags)) && 2232 atomic_read(&ha->loop_state) != LOOP_DOWN) { 2233 2234 clear_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags); 2235 DEBUG(printk("scsi(%ld): qla2x00_login_retry()\n", 2236 ha->host_no)); 2237 2238 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 2239 2240 DEBUG(printk("scsi(%ld): qla2x00_login_retry - end\n", 2241 ha->host_no)); 2242 } 2243 2244 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2245 2246 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 2247 ha->host_no)); 2248 2249 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 2250 &ha->dpc_flags))) { 2251 2252 qla2x00_loop_resync(ha); 2253 2254 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 2255 } 2256 2257 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 2258 ha->host_no)); 2259 } 2260 2261 if (test_and_clear_bit(FCPORT_RESCAN_NEEDED, &ha->dpc_flags)) { 2262 2263 DEBUG(printk("scsi(%ld): Rescan flagged fcports...\n", 2264 ha->host_no)); 2265 2266 qla2x00_rescan_fcports(ha); 2267 2268 DEBUG(printk("scsi(%ld): Rescan flagged fcports..." 2269 "end.\n", 2270 ha->host_no)); 2271 } 2272 2273 if (!ha->interrupts_on) 2274 ha->isp_ops.enable_intrs(ha); 2275 2276 ha->dpc_active = 0; 2277 } /* End of while(1) */ 2278 2279 DEBUG(printk("scsi(%ld): DPC handler exiting\n", ha->host_no)); 2280 2281 /* 2282 * Make sure that nobody tries to wake us up again. 2283 */ 2284 ha->dpc_wait = NULL; 2285 ha->dpc_active = 0; 2286 2287 complete_and_exit(&ha->dpc_exited, 0); 2288 } 2289 2290 /* 2291 * qla2x00_rst_aen 2292 * Processes asynchronous reset. 2293 * 2294 * Input: 2295 * ha = adapter block pointer. 2296 */ 2297 static void 2298 qla2x00_rst_aen(scsi_qla_host_t *ha) 2299 { 2300 if (ha->flags.online && !ha->flags.reset_active && 2301 !atomic_read(&ha->loop_down_timer) && 2302 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 2303 do { 2304 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 2305 2306 /* 2307 * Issue marker command only when we are going to start 2308 * the I/O. 2309 */ 2310 ha->marker_needed = 1; 2311 } while (!atomic_read(&ha->loop_down_timer) && 2312 (test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags))); 2313 } 2314 } 2315 2316 static void 2317 qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp) 2318 { 2319 struct scsi_cmnd *cmd = sp->cmd; 2320 2321 if (sp->flags & SRB_DMA_VALID) { 2322 if (cmd->use_sg) { 2323 dma_unmap_sg(&ha->pdev->dev, cmd->request_buffer, 2324 cmd->use_sg, cmd->sc_data_direction); 2325 } else if (cmd->request_bufflen) { 2326 dma_unmap_single(&ha->pdev->dev, sp->dma_handle, 2327 cmd->request_bufflen, cmd->sc_data_direction); 2328 } 2329 sp->flags &= ~SRB_DMA_VALID; 2330 } 2331 CMD_SP(cmd) = NULL; 2332 } 2333 2334 void 2335 qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp) 2336 { 2337 struct scsi_cmnd *cmd = sp->cmd; 2338 2339 qla2x00_sp_free_dma(ha, sp); 2340 2341 mempool_free(sp, ha->srb_mempool); 2342 2343 cmd->scsi_done(cmd); 2344 } 2345 2346 /************************************************************************** 2347 * qla2x00_timer 2348 * 2349 * Description: 2350 * One second timer 2351 * 2352 * Context: Interrupt 2353 ***************************************************************************/ 2354 static void 2355 qla2x00_timer(scsi_qla_host_t *ha) 2356 { 2357 unsigned long cpu_flags = 0; 2358 fc_port_t *fcport; 2359 int start_dpc = 0; 2360 int index; 2361 srb_t *sp; 2362 int t; 2363 2364 /* 2365 * Ports - Port down timer. 2366 * 2367 * Whenever, a port is in the LOST state we start decrementing its port 2368 * down timer every second until it reaches zero. Once it reaches zero 2369 * the port it marked DEAD. 2370 */ 2371 t = 0; 2372 list_for_each_entry(fcport, &ha->fcports, list) { 2373 if (fcport->port_type != FCT_TARGET) 2374 continue; 2375 2376 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 2377 2378 if (atomic_read(&fcport->port_down_timer) == 0) 2379 continue; 2380 2381 if (atomic_dec_and_test(&fcport->port_down_timer) != 0) 2382 atomic_set(&fcport->state, FCS_DEVICE_DEAD); 2383 2384 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: " 2385 "%d remaining\n", 2386 ha->host_no, 2387 t, atomic_read(&fcport->port_down_timer))); 2388 } 2389 t++; 2390 } /* End of for fcport */ 2391 2392 2393 /* Loop down handler. */ 2394 if (atomic_read(&ha->loop_down_timer) > 0 && 2395 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) && ha->flags.online) { 2396 2397 if (atomic_read(&ha->loop_down_timer) == 2398 ha->loop_down_abort_time) { 2399 2400 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 2401 "queues before time expire\n", 2402 ha->host_no)); 2403 2404 if (!IS_QLA2100(ha) && ha->link_down_timeout) 2405 atomic_set(&ha->loop_state, LOOP_DEAD); 2406 2407 /* Schedule an ISP abort to return any tape commands. */ 2408 spin_lock_irqsave(&ha->hardware_lock, cpu_flags); 2409 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; 2410 index++) { 2411 fc_port_t *sfcp; 2412 2413 sp = ha->outstanding_cmds[index]; 2414 if (!sp) 2415 continue; 2416 sfcp = sp->fcport; 2417 if (!(sfcp->flags & FCF_TAPE_PRESENT)) 2418 continue; 2419 2420 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2421 break; 2422 } 2423 spin_unlock_irqrestore(&ha->hardware_lock, cpu_flags); 2424 2425 set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags); 2426 start_dpc++; 2427 } 2428 2429 /* if the loop has been down for 4 minutes, reinit adapter */ 2430 if (atomic_dec_and_test(&ha->loop_down_timer) != 0) { 2431 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - " 2432 "restarting queues.\n", 2433 ha->host_no)); 2434 2435 set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags); 2436 start_dpc++; 2437 2438 if (!(ha->device_flags & DFLG_NO_CABLE)) { 2439 DEBUG(printk("scsi(%ld): Loop down - " 2440 "aborting ISP.\n", 2441 ha->host_no)); 2442 qla_printk(KERN_WARNING, ha, 2443 "Loop down - aborting ISP.\n"); 2444 2445 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2446 } 2447 } 2448 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 2449 ha->host_no, 2450 atomic_read(&ha->loop_down_timer))); 2451 } 2452 2453 /* Schedule the DPC routine if needed */ 2454 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2455 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2456 test_bit(LOOP_RESET_NEEDED, &ha->dpc_flags) || 2457 start_dpc || 2458 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) || 2459 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2460 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2461 ha->dpc_wait && !ha->dpc_active) { 2462 2463 up(ha->dpc_wait); 2464 } 2465 2466 qla2x00_restart_timer(ha, WATCH_INTERVAL); 2467 } 2468 2469 /* XXX(hch): crude hack to emulate a down_timeout() */ 2470 int 2471 qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout) 2472 { 2473 const unsigned int step = 100; /* msecs */ 2474 unsigned int iterations = jiffies_to_msecs(timeout)/100; 2475 2476 do { 2477 if (!down_trylock(sema)) 2478 return 0; 2479 if (msleep_interruptible(step)) 2480 break; 2481 } while (--iterations >= 0); 2482 2483 return -ETIMEDOUT; 2484 } 2485 2486 static struct qla_board_info qla_board_tbl[] = { 2487 { 2488 .drv_name = "qla2400", 2489 .isp_name = "ISP2422", 2490 .fw_fname = "ql2400_fw.bin", 2491 .sht = &qla24xx_driver_template, 2492 }, 2493 { 2494 .drv_name = "qla2400", 2495 .isp_name = "ISP2432", 2496 .fw_fname = "ql2400_fw.bin", 2497 .sht = &qla24xx_driver_template, 2498 }, 2499 }; 2500 2501 static struct pci_device_id qla2xxx_pci_tbl[] = { 2502 { 2503 .vendor = PCI_VENDOR_ID_QLOGIC, 2504 .device = PCI_DEVICE_ID_QLOGIC_ISP2422, 2505 .subvendor = PCI_ANY_ID, 2506 .subdevice = PCI_ANY_ID, 2507 .driver_data = (unsigned long)&qla_board_tbl[0], 2508 }, 2509 { 2510 .vendor = PCI_VENDOR_ID_QLOGIC, 2511 .device = PCI_DEVICE_ID_QLOGIC_ISP2432, 2512 .subvendor = PCI_ANY_ID, 2513 .subdevice = PCI_ANY_ID, 2514 .driver_data = (unsigned long)&qla_board_tbl[1], 2515 }, 2516 {0, 0}, 2517 }; 2518 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 2519 2520 static int __devinit 2521 qla2xxx_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 2522 { 2523 return qla2x00_probe_one(pdev, 2524 (struct qla_board_info *)id->driver_data); 2525 } 2526 2527 static void __devexit 2528 qla2xxx_remove_one(struct pci_dev *pdev) 2529 { 2530 qla2x00_remove_one(pdev); 2531 } 2532 2533 static struct pci_driver qla2xxx_pci_driver = { 2534 .name = "qla2xxx", 2535 .id_table = qla2xxx_pci_tbl, 2536 .probe = qla2xxx_probe_one, 2537 .remove = __devexit_p(qla2xxx_remove_one), 2538 }; 2539 2540 /** 2541 * qla2x00_module_init - Module initialization. 2542 **/ 2543 static int __init 2544 qla2x00_module_init(void) 2545 { 2546 int ret = 0; 2547 2548 /* Allocate cache for SRBs. */ 2549 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 2550 SLAB_HWCACHE_ALIGN, NULL, NULL); 2551 if (srb_cachep == NULL) { 2552 printk(KERN_ERR 2553 "qla2xxx: Unable to allocate SRB cache...Failing load!\n"); 2554 return -ENOMEM; 2555 } 2556 2557 /* Derive version string. */ 2558 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 2559 #if DEBUG_QLA2100 2560 strcat(qla2x00_version_str, "-debug"); 2561 #endif 2562 qla2xxx_transport_template = 2563 fc_attach_transport(&qla2xxx_transport_functions); 2564 if (!qla2xxx_transport_template) 2565 return -ENODEV; 2566 2567 printk(KERN_INFO "QLogic Fibre Channel HBA Driver\n"); 2568 ret = pci_module_init(&qla2xxx_pci_driver); 2569 if (ret) { 2570 kmem_cache_destroy(srb_cachep); 2571 fc_release_transport(qla2xxx_transport_template); 2572 } 2573 return ret; 2574 } 2575 2576 /** 2577 * qla2x00_module_exit - Module cleanup. 2578 **/ 2579 static void __exit 2580 qla2x00_module_exit(void) 2581 { 2582 pci_unregister_driver(&qla2xxx_pci_driver); 2583 kmem_cache_destroy(srb_cachep); 2584 fc_release_transport(qla2xxx_transport_template); 2585 } 2586 2587 module_init(qla2x00_module_init); 2588 module_exit(qla2x00_module_exit); 2589 2590 MODULE_AUTHOR("QLogic Corporation"); 2591 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 2592 MODULE_LICENSE("GPL"); 2593 MODULE_VERSION(QLA2XXX_VERSION); 2594