1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2012 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/moduleparam.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mutex.h> 14 #include <linux/kobject.h> 15 #include <linux/slab.h> 16 #include <scsi/scsi_tcq.h> 17 #include <scsi/scsicam.h> 18 #include <scsi/scsi_transport.h> 19 #include <scsi/scsi_transport_fc.h> 20 21 #include "qla_target.h" 22 23 /* 24 * Driver version 25 */ 26 char qla2x00_version_str[40]; 27 28 static int apidev_major; 29 30 /* 31 * SRB allocation cache 32 */ 33 static struct kmem_cache *srb_cachep; 34 35 /* 36 * CT6 CTX allocation cache 37 */ 38 static struct kmem_cache *ctx_cachep; 39 /* 40 * error level for logging 41 */ 42 int ql_errlev = ql_log_all; 43 44 static int ql2xenableclass2; 45 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); 46 MODULE_PARM_DESC(ql2xenableclass2, 47 "Specify if Class 2 operations are supported from the very " 48 "beginning. Default is 0 - class 2 not supported."); 49 50 int ql2xlogintimeout = 20; 51 module_param(ql2xlogintimeout, int, S_IRUGO); 52 MODULE_PARM_DESC(ql2xlogintimeout, 53 "Login timeout value in seconds."); 54 55 int qlport_down_retry; 56 module_param(qlport_down_retry, int, S_IRUGO); 57 MODULE_PARM_DESC(qlport_down_retry, 58 "Maximum number of command retries to a port that returns " 59 "a PORT-DOWN status."); 60 61 int ql2xplogiabsentdevice; 62 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 63 MODULE_PARM_DESC(ql2xplogiabsentdevice, 64 "Option to enable PLOGI to devices that are not present after " 65 "a Fabric scan. This is needed for several broken switches. " 66 "Default is 0 - no PLOGI. 1 - perfom PLOGI."); 67 68 int ql2xloginretrycount = 0; 69 module_param(ql2xloginretrycount, int, S_IRUGO); 70 MODULE_PARM_DESC(ql2xloginretrycount, 71 "Specify an alternate value for the NVRAM login retry count."); 72 73 int ql2xallocfwdump = 1; 74 module_param(ql2xallocfwdump, int, S_IRUGO); 75 MODULE_PARM_DESC(ql2xallocfwdump, 76 "Option to enable allocation of memory for a firmware dump " 77 "during HBA initialization. Memory allocation requirements " 78 "vary by ISP type. Default is 1 - allocate memory."); 79 80 int ql2xextended_error_logging; 81 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 82 MODULE_PARM_DESC(ql2xextended_error_logging, 83 "Option to enable extended error logging,\n" 84 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" 85 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" 86 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" 87 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" 88 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" 89 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" 90 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" 91 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" 92 "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" 93 "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" 94 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" 95 "\t\t0x1e400000 - Preferred value for capturing essential " 96 "debug information (equivalent to old " 97 "ql2xextended_error_logging=1).\n" 98 "\t\tDo LOGICAL OR of the value to enable more than one level"); 99 100 int ql2xshiftctondsd = 6; 101 module_param(ql2xshiftctondsd, int, S_IRUGO); 102 MODULE_PARM_DESC(ql2xshiftctondsd, 103 "Set to control shifting of command type processing " 104 "based on total number of SG elements."); 105 106 static void qla2x00_free_device(scsi_qla_host_t *); 107 108 int ql2xfdmienable=1; 109 module_param(ql2xfdmienable, int, S_IRUGO); 110 MODULE_PARM_DESC(ql2xfdmienable, 111 "Enables FDMI registrations. " 112 "0 - no FDMI. Default is 1 - perform FDMI."); 113 114 #define MAX_Q_DEPTH 32 115 static int ql2xmaxqdepth = MAX_Q_DEPTH; 116 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 117 MODULE_PARM_DESC(ql2xmaxqdepth, 118 "Maximum queue depth to set for each LUN. " 119 "Default is 32."); 120 121 int ql2xenabledif = 2; 122 module_param(ql2xenabledif, int, S_IRUGO); 123 MODULE_PARM_DESC(ql2xenabledif, 124 " Enable T10-CRC-DIF " 125 " Default is 0 - No DIF Support. 1 - Enable it" 126 ", 2 - Enable DIF for all types, except Type 0."); 127 128 int ql2xenablehba_err_chk = 2; 129 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 130 MODULE_PARM_DESC(ql2xenablehba_err_chk, 131 " Enable T10-CRC-DIF Error isolation by HBA:\n" 132 " Default is 1.\n" 133 " 0 -- Error isolation disabled\n" 134 " 1 -- Error isolation enabled only for DIX Type 0\n" 135 " 2 -- Error isolation enabled for all Types\n"); 136 137 int ql2xiidmaenable=1; 138 module_param(ql2xiidmaenable, int, S_IRUGO); 139 MODULE_PARM_DESC(ql2xiidmaenable, 140 "Enables iIDMA settings " 141 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 142 143 int ql2xmaxqueues = 1; 144 module_param(ql2xmaxqueues, int, S_IRUGO); 145 MODULE_PARM_DESC(ql2xmaxqueues, 146 "Enables MQ settings " 147 "Default is 1 for single queue. Set it to number " 148 "of queues in MQ mode."); 149 150 int ql2xmultique_tag; 151 module_param(ql2xmultique_tag, int, S_IRUGO); 152 MODULE_PARM_DESC(ql2xmultique_tag, 153 "Enables CPU affinity settings for the driver " 154 "Default is 0 for no affinity of request and response IO. " 155 "Set it to 1 to turn on the cpu affinity."); 156 157 int ql2xfwloadbin; 158 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 159 MODULE_PARM_DESC(ql2xfwloadbin, 160 "Option to specify location from which to load ISP firmware:.\n" 161 " 2 -- load firmware via the request_firmware() (hotplug).\n" 162 " interface.\n" 163 " 1 -- load firmware from flash.\n" 164 " 0 -- use default semantics.\n"); 165 166 int ql2xetsenable; 167 module_param(ql2xetsenable, int, S_IRUGO); 168 MODULE_PARM_DESC(ql2xetsenable, 169 "Enables firmware ETS burst." 170 "Default is 0 - skip ETS enablement."); 171 172 int ql2xdbwr = 1; 173 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); 174 MODULE_PARM_DESC(ql2xdbwr, 175 "Option to specify scheme for request queue posting.\n" 176 " 0 -- Regular doorbell.\n" 177 " 1 -- CAMRAM doorbell (faster).\n"); 178 179 int ql2xtargetreset = 1; 180 module_param(ql2xtargetreset, int, S_IRUGO); 181 MODULE_PARM_DESC(ql2xtargetreset, 182 "Enable target reset." 183 "Default is 1 - use hw defaults."); 184 185 int ql2xgffidenable; 186 module_param(ql2xgffidenable, int, S_IRUGO); 187 MODULE_PARM_DESC(ql2xgffidenable, 188 "Enables GFF_ID checks of port type. " 189 "Default is 0 - Do not use GFF_ID information."); 190 191 int ql2xasynctmfenable; 192 module_param(ql2xasynctmfenable, int, S_IRUGO); 193 MODULE_PARM_DESC(ql2xasynctmfenable, 194 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 195 "Default is 0 - Issue TM IOCBs via mailbox mechanism."); 196 197 int ql2xdontresethba; 198 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); 199 MODULE_PARM_DESC(ql2xdontresethba, 200 "Option to specify reset behaviour.\n" 201 " 0 (Default) -- Reset on failure.\n" 202 " 1 -- Do not reset on failure.\n"); 203 204 uint ql2xmaxlun = MAX_LUNS; 205 module_param(ql2xmaxlun, uint, S_IRUGO); 206 MODULE_PARM_DESC(ql2xmaxlun, 207 "Defines the maximum LU number to register with the SCSI " 208 "midlayer. Default is 65535."); 209 210 int ql2xmdcapmask = 0x1F; 211 module_param(ql2xmdcapmask, int, S_IRUGO); 212 MODULE_PARM_DESC(ql2xmdcapmask, 213 "Set the Minidump driver capture mask level. " 214 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 215 216 int ql2xmdenable = 1; 217 module_param(ql2xmdenable, int, S_IRUGO); 218 MODULE_PARM_DESC(ql2xmdenable, 219 "Enable/disable MiniDump. " 220 "0 - MiniDump disabled. " 221 "1 (Default) - MiniDump enabled."); 222 223 /* 224 * SCSI host template entry points 225 */ 226 static int qla2xxx_slave_configure(struct scsi_device * device); 227 static int qla2xxx_slave_alloc(struct scsi_device *); 228 static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); 229 static void qla2xxx_scan_start(struct Scsi_Host *); 230 static void qla2xxx_slave_destroy(struct scsi_device *); 231 static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 232 static int qla2xxx_eh_abort(struct scsi_cmnd *); 233 static int qla2xxx_eh_device_reset(struct scsi_cmnd *); 234 static int qla2xxx_eh_target_reset(struct scsi_cmnd *); 235 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 236 static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 237 238 static int qla2x00_change_queue_depth(struct scsi_device *, int, int); 239 static int qla2x00_change_queue_type(struct scsi_device *, int); 240 241 struct scsi_host_template qla2xxx_driver_template = { 242 .module = THIS_MODULE, 243 .name = QLA2XXX_DRIVER_NAME, 244 .queuecommand = qla2xxx_queuecommand, 245 246 .eh_abort_handler = qla2xxx_eh_abort, 247 .eh_device_reset_handler = qla2xxx_eh_device_reset, 248 .eh_target_reset_handler = qla2xxx_eh_target_reset, 249 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 250 .eh_host_reset_handler = qla2xxx_eh_host_reset, 251 252 .slave_configure = qla2xxx_slave_configure, 253 254 .slave_alloc = qla2xxx_slave_alloc, 255 .slave_destroy = qla2xxx_slave_destroy, 256 .scan_finished = qla2xxx_scan_finished, 257 .scan_start = qla2xxx_scan_start, 258 .change_queue_depth = qla2x00_change_queue_depth, 259 .change_queue_type = qla2x00_change_queue_type, 260 .this_id = -1, 261 .cmd_per_lun = 3, 262 .use_clustering = ENABLE_CLUSTERING, 263 .sg_tablesize = SG_ALL, 264 265 .max_sectors = 0xFFFF, 266 .shost_attrs = qla2x00_host_attrs, 267 268 .supported_mode = MODE_INITIATOR, 269 }; 270 271 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 272 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 273 274 /* TODO Convert to inlines 275 * 276 * Timer routines 277 */ 278 279 __inline__ void 280 qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval) 281 { 282 init_timer(&vha->timer); 283 vha->timer.expires = jiffies + interval * HZ; 284 vha->timer.data = (unsigned long)vha; 285 vha->timer.function = (void (*)(unsigned long))func; 286 add_timer(&vha->timer); 287 vha->timer_active = 1; 288 } 289 290 static inline void 291 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 292 { 293 /* Currently used for 82XX only. */ 294 if (vha->device_flags & DFLG_DEV_FAILED) { 295 ql_dbg(ql_dbg_timer, vha, 0x600d, 296 "Device in a failed state, returning.\n"); 297 return; 298 } 299 300 mod_timer(&vha->timer, jiffies + interval * HZ); 301 } 302 303 static __inline__ void 304 qla2x00_stop_timer(scsi_qla_host_t *vha) 305 { 306 del_timer_sync(&vha->timer); 307 vha->timer_active = 0; 308 } 309 310 static int qla2x00_do_dpc(void *data); 311 312 static void qla2x00_rst_aen(scsi_qla_host_t *); 313 314 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, 315 struct req_que **, struct rsp_que **); 316 static void qla2x00_free_fw_dump(struct qla_hw_data *); 317 static void qla2x00_mem_free(struct qla_hw_data *); 318 319 /* -------------------------------------------------------------------------- */ 320 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, 321 struct rsp_que *rsp) 322 { 323 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 324 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 325 GFP_KERNEL); 326 if (!ha->req_q_map) { 327 ql_log(ql_log_fatal, vha, 0x003b, 328 "Unable to allocate memory for request queue ptrs.\n"); 329 goto fail_req_map; 330 } 331 332 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, 333 GFP_KERNEL); 334 if (!ha->rsp_q_map) { 335 ql_log(ql_log_fatal, vha, 0x003c, 336 "Unable to allocate memory for response queue ptrs.\n"); 337 goto fail_rsp_map; 338 } 339 /* 340 * Make sure we record at least the request and response queue zero in 341 * case we need to free them if part of the probe fails. 342 */ 343 ha->rsp_q_map[0] = rsp; 344 ha->req_q_map[0] = req; 345 set_bit(0, ha->rsp_qid_map); 346 set_bit(0, ha->req_qid_map); 347 return 1; 348 349 fail_rsp_map: 350 kfree(ha->req_q_map); 351 ha->req_q_map = NULL; 352 fail_req_map: 353 return -ENOMEM; 354 } 355 356 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 357 { 358 if (req && req->ring) 359 dma_free_coherent(&ha->pdev->dev, 360 (req->length + 1) * sizeof(request_t), 361 req->ring, req->dma); 362 363 kfree(req); 364 req = NULL; 365 } 366 367 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 368 { 369 if (rsp && rsp->ring) 370 dma_free_coherent(&ha->pdev->dev, 371 (rsp->length + 1) * sizeof(response_t), 372 rsp->ring, rsp->dma); 373 374 kfree(rsp); 375 rsp = NULL; 376 } 377 378 static void qla2x00_free_queues(struct qla_hw_data *ha) 379 { 380 struct req_que *req; 381 struct rsp_que *rsp; 382 int cnt; 383 384 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 385 req = ha->req_q_map[cnt]; 386 qla2x00_free_req_que(ha, req); 387 } 388 kfree(ha->req_q_map); 389 ha->req_q_map = NULL; 390 391 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 392 rsp = ha->rsp_q_map[cnt]; 393 qla2x00_free_rsp_que(ha, rsp); 394 } 395 kfree(ha->rsp_q_map); 396 ha->rsp_q_map = NULL; 397 } 398 399 static int qla25xx_setup_mode(struct scsi_qla_host *vha) 400 { 401 uint16_t options = 0; 402 int ques, req, ret; 403 struct qla_hw_data *ha = vha->hw; 404 405 if (!(ha->fw_attributes & BIT_6)) { 406 ql_log(ql_log_warn, vha, 0x00d8, 407 "Firmware is not multi-queue capable.\n"); 408 goto fail; 409 } 410 if (ql2xmultique_tag) { 411 /* create a request queue for IO */ 412 options |= BIT_7; 413 req = qla25xx_create_req_que(ha, options, 0, 0, -1, 414 QLA_DEFAULT_QUE_QOS); 415 if (!req) { 416 ql_log(ql_log_warn, vha, 0x00e0, 417 "Failed to create request queue.\n"); 418 goto fail; 419 } 420 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); 421 vha->req = ha->req_q_map[req]; 422 options |= BIT_1; 423 for (ques = 1; ques < ha->max_rsp_queues; ques++) { 424 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); 425 if (!ret) { 426 ql_log(ql_log_warn, vha, 0x00e8, 427 "Failed to create response queue.\n"); 428 goto fail2; 429 } 430 } 431 ha->flags.cpu_affinity_enabled = 1; 432 ql_dbg(ql_dbg_multiq, vha, 0xc007, 433 "CPU affinity mode enalbed, " 434 "no. of response queues:%d no. of request queues:%d.\n", 435 ha->max_rsp_queues, ha->max_req_queues); 436 ql_dbg(ql_dbg_init, vha, 0x00e9, 437 "CPU affinity mode enalbed, " 438 "no. of response queues:%d no. of request queues:%d.\n", 439 ha->max_rsp_queues, ha->max_req_queues); 440 } 441 return 0; 442 fail2: 443 qla25xx_delete_queues(vha); 444 destroy_workqueue(ha->wq); 445 ha->wq = NULL; 446 vha->req = ha->req_q_map[0]; 447 fail: 448 ha->mqenable = 0; 449 kfree(ha->req_q_map); 450 kfree(ha->rsp_q_map); 451 ha->max_req_queues = ha->max_rsp_queues = 1; 452 return 1; 453 } 454 455 static char * 456 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str) 457 { 458 struct qla_hw_data *ha = vha->hw; 459 static char *pci_bus_modes[] = { 460 "33", "66", "100", "133", 461 }; 462 uint16_t pci_bus; 463 464 strcpy(str, "PCI"); 465 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 466 if (pci_bus) { 467 strcat(str, "-X ("); 468 strcat(str, pci_bus_modes[pci_bus]); 469 } else { 470 pci_bus = (ha->pci_attr & BIT_8) >> 8; 471 strcat(str, " ("); 472 strcat(str, pci_bus_modes[pci_bus]); 473 } 474 strcat(str, " MHz)"); 475 476 return (str); 477 } 478 479 static char * 480 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) 481 { 482 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 483 struct qla_hw_data *ha = vha->hw; 484 uint32_t pci_bus; 485 int pcie_reg; 486 487 pcie_reg = pci_pcie_cap(ha->pdev); 488 if (pcie_reg) { 489 char lwstr[6]; 490 uint16_t pcie_lstat, lspeed, lwidth; 491 492 pcie_reg += PCI_EXP_LNKCAP; 493 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); 494 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); 495 lwidth = (pcie_lstat & 496 (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4; 497 498 strcpy(str, "PCIe ("); 499 switch (lspeed) { 500 case 1: 501 strcat(str, "2.5GT/s "); 502 break; 503 case 2: 504 strcat(str, "5.0GT/s "); 505 break; 506 case 3: 507 strcat(str, "8.0GT/s "); 508 break; 509 default: 510 strcat(str, "<unknown> "); 511 break; 512 } 513 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); 514 strcat(str, lwstr); 515 516 return str; 517 } 518 519 strcpy(str, "PCI"); 520 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 521 if (pci_bus == 0 || pci_bus == 8) { 522 strcat(str, " ("); 523 strcat(str, pci_bus_modes[pci_bus >> 3]); 524 } else { 525 strcat(str, "-X "); 526 if (pci_bus & BIT_2) 527 strcat(str, "Mode 2"); 528 else 529 strcat(str, "Mode 1"); 530 strcat(str, " ("); 531 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]); 532 } 533 strcat(str, " MHz)"); 534 535 return str; 536 } 537 538 static char * 539 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str) 540 { 541 char un_str[10]; 542 struct qla_hw_data *ha = vha->hw; 543 544 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 545 ha->fw_minor_version, 546 ha->fw_subminor_version); 547 548 if (ha->fw_attributes & BIT_9) { 549 strcat(str, "FLX"); 550 return (str); 551 } 552 553 switch (ha->fw_attributes & 0xFF) { 554 case 0x7: 555 strcat(str, "EF"); 556 break; 557 case 0x17: 558 strcat(str, "TP"); 559 break; 560 case 0x37: 561 strcat(str, "IP"); 562 break; 563 case 0x77: 564 strcat(str, "VI"); 565 break; 566 default: 567 sprintf(un_str, "(%x)", ha->fw_attributes); 568 strcat(str, un_str); 569 break; 570 } 571 if (ha->fw_attributes & 0x100) 572 strcat(str, "X"); 573 574 return (str); 575 } 576 577 static char * 578 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str) 579 { 580 struct qla_hw_data *ha = vha->hw; 581 582 sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version, 583 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); 584 return str; 585 } 586 587 void 588 qla2x00_sp_free_dma(void *vha, void *ptr) 589 { 590 srb_t *sp = (srb_t *)ptr; 591 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 592 struct qla_hw_data *ha = sp->fcport->vha->hw; 593 void *ctx = GET_CMD_CTX_SP(sp); 594 595 if (sp->flags & SRB_DMA_VALID) { 596 scsi_dma_unmap(cmd); 597 sp->flags &= ~SRB_DMA_VALID; 598 } 599 600 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 601 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 602 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 603 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 604 } 605 606 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 607 /* List assured to be having elements */ 608 qla2x00_clean_dsd_pool(ha, sp); 609 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 610 } 611 612 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 613 dma_pool_free(ha->dl_dma_pool, ctx, 614 ((struct crc_context *)ctx)->crc_ctx_dma); 615 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 616 } 617 618 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 619 struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; 620 621 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 622 ctx1->fcp_cmnd_dma); 623 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 624 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 625 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 626 mempool_free(ctx1, ha->ctx_mempool); 627 ctx1 = NULL; 628 } 629 630 CMD_SP(cmd) = NULL; 631 mempool_free(sp, ha->srb_mempool); 632 } 633 634 static void 635 qla2x00_sp_compl(void *data, void *ptr, int res) 636 { 637 struct qla_hw_data *ha = (struct qla_hw_data *)data; 638 srb_t *sp = (srb_t *)ptr; 639 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 640 641 cmd->result = res; 642 643 if (atomic_read(&sp->ref_count) == 0) { 644 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015, 645 "SP reference-count to ZERO -- sp=%p cmd=%p.\n", 646 sp, GET_CMD_SP(sp)); 647 if (ql2xextended_error_logging & ql_dbg_io) 648 BUG(); 649 return; 650 } 651 if (!atomic_dec_and_test(&sp->ref_count)) 652 return; 653 654 qla2x00_sp_free_dma(ha, sp); 655 cmd->scsi_done(cmd); 656 } 657 658 static int 659 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 660 { 661 scsi_qla_host_t *vha = shost_priv(host); 662 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 663 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 664 struct qla_hw_data *ha = vha->hw; 665 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 666 srb_t *sp; 667 int rval; 668 669 if (ha->flags.eeh_busy) { 670 if (ha->flags.pci_channel_io_perm_failure) { 671 ql_dbg(ql_dbg_aer, vha, 0x9010, 672 "PCI Channel IO permanent failure, exiting " 673 "cmd=%p.\n", cmd); 674 cmd->result = DID_NO_CONNECT << 16; 675 } else { 676 ql_dbg(ql_dbg_aer, vha, 0x9011, 677 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 678 cmd->result = DID_REQUEUE << 16; 679 } 680 goto qc24_fail_command; 681 } 682 683 rval = fc_remote_port_chkready(rport); 684 if (rval) { 685 cmd->result = rval; 686 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, 687 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 688 cmd, rval); 689 goto qc24_fail_command; 690 } 691 692 if (!vha->flags.difdix_supported && 693 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 694 ql_dbg(ql_dbg_io, vha, 0x3004, 695 "DIF Cap not reg, fail DIF capable cmd's:%p.\n", 696 cmd); 697 cmd->result = DID_NO_CONNECT << 16; 698 goto qc24_fail_command; 699 } 700 701 if (!fcport) { 702 cmd->result = DID_NO_CONNECT << 16; 703 goto qc24_fail_command; 704 } 705 706 if (atomic_read(&fcport->state) != FCS_ONLINE) { 707 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 708 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 709 ql_dbg(ql_dbg_io, vha, 0x3005, 710 "Returning DNC, fcport_state=%d loop_state=%d.\n", 711 atomic_read(&fcport->state), 712 atomic_read(&base_vha->loop_state)); 713 cmd->result = DID_NO_CONNECT << 16; 714 goto qc24_fail_command; 715 } 716 goto qc24_target_busy; 717 } 718 719 sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC); 720 if (!sp) 721 goto qc24_host_busy; 722 723 sp->u.scmd.cmd = cmd; 724 sp->type = SRB_SCSI_CMD; 725 atomic_set(&sp->ref_count, 1); 726 CMD_SP(cmd) = (void *)sp; 727 sp->free = qla2x00_sp_free_dma; 728 sp->done = qla2x00_sp_compl; 729 730 rval = ha->isp_ops->start_scsi(sp); 731 if (rval != QLA_SUCCESS) { 732 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, 733 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 734 goto qc24_host_busy_free_sp; 735 } 736 737 return 0; 738 739 qc24_host_busy_free_sp: 740 qla2x00_sp_free_dma(ha, sp); 741 742 qc24_host_busy: 743 return SCSI_MLQUEUE_HOST_BUSY; 744 745 qc24_target_busy: 746 return SCSI_MLQUEUE_TARGET_BUSY; 747 748 qc24_fail_command: 749 cmd->scsi_done(cmd); 750 751 return 0; 752 } 753 754 /* 755 * qla2x00_eh_wait_on_command 756 * Waits for the command to be returned by the Firmware for some 757 * max time. 758 * 759 * Input: 760 * cmd = Scsi Command to wait on. 761 * 762 * Return: 763 * Not Found : 0 764 * Found : 1 765 */ 766 static int 767 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) 768 { 769 #define ABORT_POLLING_PERIOD 1000 770 #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 771 unsigned long wait_iter = ABORT_WAIT_ITER; 772 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 773 struct qla_hw_data *ha = vha->hw; 774 int ret = QLA_SUCCESS; 775 776 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 777 ql_dbg(ql_dbg_taskm, vha, 0x8005, 778 "Return:eh_wait.\n"); 779 return ret; 780 } 781 782 while (CMD_SP(cmd) && wait_iter--) { 783 msleep(ABORT_POLLING_PERIOD); 784 } 785 if (CMD_SP(cmd)) 786 ret = QLA_FUNCTION_FAILED; 787 788 return ret; 789 } 790 791 /* 792 * qla2x00_wait_for_hba_online 793 * Wait till the HBA is online after going through 794 * <= MAX_RETRIES_OF_ISP_ABORT or 795 * finally HBA is disabled ie marked offline 796 * 797 * Input: 798 * ha - pointer to host adapter structure 799 * 800 * Note: 801 * Does context switching-Release SPIN_LOCK 802 * (if any) before calling this routine. 803 * 804 * Return: 805 * Success (Adapter is online) : 0 806 * Failed (Adapter is offline/disabled) : 1 807 */ 808 int 809 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) 810 { 811 int return_status; 812 unsigned long wait_online; 813 struct qla_hw_data *ha = vha->hw; 814 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 815 816 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 817 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 818 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 819 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 820 ha->dpc_active) && time_before(jiffies, wait_online)) { 821 822 msleep(1000); 823 } 824 if (base_vha->flags.online) 825 return_status = QLA_SUCCESS; 826 else 827 return_status = QLA_FUNCTION_FAILED; 828 829 return (return_status); 830 } 831 832 /* 833 * qla2x00_wait_for_reset_ready 834 * Wait till the HBA is online after going through 835 * <= MAX_RETRIES_OF_ISP_ABORT or 836 * finally HBA is disabled ie marked offline or flash 837 * operations are in progress. 838 * 839 * Input: 840 * ha - pointer to host adapter structure 841 * 842 * Note: 843 * Does context switching-Release SPIN_LOCK 844 * (if any) before calling this routine. 845 * 846 * Return: 847 * Success (Adapter is online/no flash ops) : 0 848 * Failed (Adapter is offline/disabled/flash ops in progress) : 1 849 */ 850 static int 851 qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha) 852 { 853 int return_status; 854 unsigned long wait_online; 855 struct qla_hw_data *ha = vha->hw; 856 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 857 858 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 859 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 860 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 861 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 862 ha->optrom_state != QLA_SWAITING || 863 ha->dpc_active) && time_before(jiffies, wait_online)) 864 msleep(1000); 865 866 if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING) 867 return_status = QLA_SUCCESS; 868 else 869 return_status = QLA_FUNCTION_FAILED; 870 871 ql_dbg(ql_dbg_taskm, vha, 0x8019, 872 "%s return status=%d.\n", __func__, return_status); 873 874 return return_status; 875 } 876 877 int 878 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) 879 { 880 int return_status; 881 unsigned long wait_reset; 882 struct qla_hw_data *ha = vha->hw; 883 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 884 885 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); 886 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 887 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 888 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 889 ha->dpc_active) && time_before(jiffies, wait_reset)) { 890 891 msleep(1000); 892 893 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 894 ha->flags.chip_reset_done) 895 break; 896 } 897 if (ha->flags.chip_reset_done) 898 return_status = QLA_SUCCESS; 899 else 900 return_status = QLA_FUNCTION_FAILED; 901 902 return return_status; 903 } 904 905 static void 906 sp_get(struct srb *sp) 907 { 908 atomic_inc(&sp->ref_count); 909 } 910 911 /************************************************************************** 912 * qla2xxx_eh_abort 913 * 914 * Description: 915 * The abort function will abort the specified command. 916 * 917 * Input: 918 * cmd = Linux SCSI command packet to be aborted. 919 * 920 * Returns: 921 * Either SUCCESS or FAILED. 922 * 923 * Note: 924 * Only return FAILED if command not returned by firmware. 925 **************************************************************************/ 926 static int 927 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 928 { 929 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 930 srb_t *sp; 931 int ret; 932 unsigned int id, lun; 933 unsigned long flags; 934 int wait = 0; 935 struct qla_hw_data *ha = vha->hw; 936 937 if (!CMD_SP(cmd)) 938 return SUCCESS; 939 940 ret = fc_block_scsi_eh(cmd); 941 if (ret != 0) 942 return ret; 943 ret = SUCCESS; 944 945 id = cmd->device->id; 946 lun = cmd->device->lun; 947 948 spin_lock_irqsave(&ha->hardware_lock, flags); 949 sp = (srb_t *) CMD_SP(cmd); 950 if (!sp) { 951 spin_unlock_irqrestore(&ha->hardware_lock, flags); 952 return SUCCESS; 953 } 954 955 ql_dbg(ql_dbg_taskm, vha, 0x8002, 956 "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n", 957 vha->host_no, id, lun, sp, cmd); 958 959 /* Get a reference to the sp and drop the lock.*/ 960 sp_get(sp); 961 962 spin_unlock_irqrestore(&ha->hardware_lock, flags); 963 if (ha->isp_ops->abort_command(sp)) { 964 ret = FAILED; 965 ql_dbg(ql_dbg_taskm, vha, 0x8003, 966 "Abort command mbx failed cmd=%p.\n", cmd); 967 } else { 968 ql_dbg(ql_dbg_taskm, vha, 0x8004, 969 "Abort command mbx success cmd=%p.\n", cmd); 970 wait = 1; 971 } 972 973 spin_lock_irqsave(&ha->hardware_lock, flags); 974 sp->done(ha, sp, 0); 975 spin_unlock_irqrestore(&ha->hardware_lock, flags); 976 977 /* Did the command return during mailbox execution? */ 978 if (ret == FAILED && !CMD_SP(cmd)) 979 ret = SUCCESS; 980 981 /* Wait for the command to be returned. */ 982 if (wait) { 983 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 984 ql_log(ql_log_warn, vha, 0x8006, 985 "Abort handler timed out cmd=%p.\n", cmd); 986 ret = FAILED; 987 } 988 } 989 990 ql_log(ql_log_info, vha, 0x801c, 991 "Abort command issued nexus=%ld:%d:%d -- %d %x.\n", 992 vha->host_no, id, lun, wait, ret); 993 994 return ret; 995 } 996 997 int 998 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 999 unsigned int l, enum nexus_wait_type type) 1000 { 1001 int cnt, match, status; 1002 unsigned long flags; 1003 struct qla_hw_data *ha = vha->hw; 1004 struct req_que *req; 1005 srb_t *sp; 1006 struct scsi_cmnd *cmd; 1007 1008 status = QLA_SUCCESS; 1009 1010 spin_lock_irqsave(&ha->hardware_lock, flags); 1011 req = vha->req; 1012 for (cnt = 1; status == QLA_SUCCESS && 1013 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1014 sp = req->outstanding_cmds[cnt]; 1015 if (!sp) 1016 continue; 1017 if (sp->type != SRB_SCSI_CMD) 1018 continue; 1019 if (vha->vp_idx != sp->fcport->vha->vp_idx) 1020 continue; 1021 match = 0; 1022 cmd = GET_CMD_SP(sp); 1023 switch (type) { 1024 case WAIT_HOST: 1025 match = 1; 1026 break; 1027 case WAIT_TARGET: 1028 match = cmd->device->id == t; 1029 break; 1030 case WAIT_LUN: 1031 match = (cmd->device->id == t && 1032 cmd->device->lun == l); 1033 break; 1034 } 1035 if (!match) 1036 continue; 1037 1038 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1039 status = qla2x00_eh_wait_on_command(cmd); 1040 spin_lock_irqsave(&ha->hardware_lock, flags); 1041 } 1042 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1043 1044 return status; 1045 } 1046 1047 static char *reset_errors[] = { 1048 "HBA not online", 1049 "HBA not ready", 1050 "Task management failed", 1051 "Waiting for command completions", 1052 }; 1053 1054 static int 1055 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 1056 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int)) 1057 { 1058 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1059 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1060 int err; 1061 1062 if (!fcport) { 1063 return FAILED; 1064 } 1065 1066 err = fc_block_scsi_eh(cmd); 1067 if (err != 0) 1068 return err; 1069 1070 ql_log(ql_log_info, vha, 0x8009, 1071 "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no, 1072 cmd->device->id, cmd->device->lun, cmd); 1073 1074 err = 0; 1075 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1076 ql_log(ql_log_warn, vha, 0x800a, 1077 "Wait for hba online failed for cmd=%p.\n", cmd); 1078 goto eh_reset_failed; 1079 } 1080 err = 2; 1081 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) 1082 != QLA_SUCCESS) { 1083 ql_log(ql_log_warn, vha, 0x800c, 1084 "do_reset failed for cmd=%p.\n", cmd); 1085 goto eh_reset_failed; 1086 } 1087 err = 3; 1088 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1089 cmd->device->lun, type) != QLA_SUCCESS) { 1090 ql_log(ql_log_warn, vha, 0x800d, 1091 "wait for pending cmds failed for cmd=%p.\n", cmd); 1092 goto eh_reset_failed; 1093 } 1094 1095 ql_log(ql_log_info, vha, 0x800e, 1096 "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name, 1097 vha->host_no, cmd->device->id, cmd->device->lun, cmd); 1098 1099 return SUCCESS; 1100 1101 eh_reset_failed: 1102 ql_log(ql_log_info, vha, 0x800f, 1103 "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name, 1104 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, 1105 cmd); 1106 return FAILED; 1107 } 1108 1109 static int 1110 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 1111 { 1112 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1113 struct qla_hw_data *ha = vha->hw; 1114 1115 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 1116 ha->isp_ops->lun_reset); 1117 } 1118 1119 static int 1120 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 1121 { 1122 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1123 struct qla_hw_data *ha = vha->hw; 1124 1125 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 1126 ha->isp_ops->target_reset); 1127 } 1128 1129 /************************************************************************** 1130 * qla2xxx_eh_bus_reset 1131 * 1132 * Description: 1133 * The bus reset function will reset the bus and abort any executing 1134 * commands. 1135 * 1136 * Input: 1137 * cmd = Linux SCSI command packet of the command that cause the 1138 * bus reset. 1139 * 1140 * Returns: 1141 * SUCCESS/FAILURE (defined as macro in scsi.h). 1142 * 1143 **************************************************************************/ 1144 static int 1145 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 1146 { 1147 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1148 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1149 int ret = FAILED; 1150 unsigned int id, lun; 1151 1152 id = cmd->device->id; 1153 lun = cmd->device->lun; 1154 1155 if (!fcport) { 1156 return ret; 1157 } 1158 1159 ret = fc_block_scsi_eh(cmd); 1160 if (ret != 0) 1161 return ret; 1162 ret = FAILED; 1163 1164 ql_log(ql_log_info, vha, 0x8012, 1165 "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); 1166 1167 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1168 ql_log(ql_log_fatal, vha, 0x8013, 1169 "Wait for hba online failed board disabled.\n"); 1170 goto eh_bus_reset_done; 1171 } 1172 1173 if (qla2x00_loop_reset(vha) == QLA_SUCCESS) 1174 ret = SUCCESS; 1175 1176 if (ret == FAILED) 1177 goto eh_bus_reset_done; 1178 1179 /* Flush outstanding commands. */ 1180 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1181 QLA_SUCCESS) { 1182 ql_log(ql_log_warn, vha, 0x8014, 1183 "Wait for pending commands failed.\n"); 1184 ret = FAILED; 1185 } 1186 1187 eh_bus_reset_done: 1188 ql_log(ql_log_warn, vha, 0x802b, 1189 "BUS RESET %s nexus=%ld:%d:%d.\n", 1190 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1191 1192 return ret; 1193 } 1194 1195 /************************************************************************** 1196 * qla2xxx_eh_host_reset 1197 * 1198 * Description: 1199 * The reset function will reset the Adapter. 1200 * 1201 * Input: 1202 * cmd = Linux SCSI command packet of the command that cause the 1203 * adapter reset. 1204 * 1205 * Returns: 1206 * Either SUCCESS or FAILED. 1207 * 1208 * Note: 1209 **************************************************************************/ 1210 static int 1211 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1212 { 1213 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1214 struct qla_hw_data *ha = vha->hw; 1215 int ret = FAILED; 1216 unsigned int id, lun; 1217 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1218 1219 id = cmd->device->id; 1220 lun = cmd->device->lun; 1221 1222 ql_log(ql_log_info, vha, 0x8018, 1223 "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); 1224 1225 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) 1226 goto eh_host_reset_lock; 1227 1228 if (vha != base_vha) { 1229 if (qla2x00_vp_abort_isp(vha)) 1230 goto eh_host_reset_lock; 1231 } else { 1232 if (IS_QLA82XX(vha->hw)) { 1233 if (!qla82xx_fcoe_ctx_reset(vha)) { 1234 /* Ctx reset success */ 1235 ret = SUCCESS; 1236 goto eh_host_reset_lock; 1237 } 1238 /* fall thru if ctx reset failed */ 1239 } 1240 if (ha->wq) 1241 flush_workqueue(ha->wq); 1242 1243 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1244 if (ha->isp_ops->abort_isp(base_vha)) { 1245 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1246 /* failed. schedule dpc to try */ 1247 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1248 1249 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1250 ql_log(ql_log_warn, vha, 0x802a, 1251 "wait for hba online failed.\n"); 1252 goto eh_host_reset_lock; 1253 } 1254 } 1255 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1256 } 1257 1258 /* Waiting for command to be returned to OS.*/ 1259 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == 1260 QLA_SUCCESS) 1261 ret = SUCCESS; 1262 1263 eh_host_reset_lock: 1264 ql_log(ql_log_info, vha, 0x8017, 1265 "ADAPTER RESET %s nexus=%ld:%d:%d.\n", 1266 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1267 1268 return ret; 1269 } 1270 1271 /* 1272 * qla2x00_loop_reset 1273 * Issue loop reset. 1274 * 1275 * Input: 1276 * ha = adapter block pointer. 1277 * 1278 * Returns: 1279 * 0 = success 1280 */ 1281 int 1282 qla2x00_loop_reset(scsi_qla_host_t *vha) 1283 { 1284 int ret; 1285 struct fc_port *fcport; 1286 struct qla_hw_data *ha = vha->hw; 1287 1288 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { 1289 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1290 if (fcport->port_type != FCT_TARGET) 1291 continue; 1292 1293 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1294 if (ret != QLA_SUCCESS) { 1295 ql_dbg(ql_dbg_taskm, vha, 0x802c, 1296 "Bus Reset failed: Target Reset=%d " 1297 "d_id=%x.\n", ret, fcport->d_id.b24); 1298 } 1299 } 1300 } 1301 1302 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { 1303 ret = qla2x00_full_login_lip(vha); 1304 if (ret != QLA_SUCCESS) { 1305 ql_dbg(ql_dbg_taskm, vha, 0x802d, 1306 "full_login_lip=%d.\n", ret); 1307 } 1308 atomic_set(&vha->loop_state, LOOP_DOWN); 1309 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1310 qla2x00_mark_all_devices_lost(vha, 0); 1311 } 1312 1313 if (ha->flags.enable_lip_reset) { 1314 ret = qla2x00_lip_reset(vha); 1315 if (ret != QLA_SUCCESS) 1316 ql_dbg(ql_dbg_taskm, vha, 0x802e, 1317 "lip_reset failed (%d).\n", ret); 1318 } 1319 1320 /* Issue marker command only when we are going to start the I/O */ 1321 vha->marker_needed = 1; 1322 1323 return QLA_SUCCESS; 1324 } 1325 1326 void 1327 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1328 { 1329 int que, cnt; 1330 unsigned long flags; 1331 srb_t *sp; 1332 struct qla_hw_data *ha = vha->hw; 1333 struct req_que *req; 1334 1335 spin_lock_irqsave(&ha->hardware_lock, flags); 1336 for (que = 0; que < ha->max_req_queues; que++) { 1337 req = ha->req_q_map[que]; 1338 if (!req) 1339 continue; 1340 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1341 sp = req->outstanding_cmds[cnt]; 1342 if (sp) { 1343 req->outstanding_cmds[cnt] = NULL; 1344 sp->done(vha, sp, res); 1345 } 1346 } 1347 } 1348 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1349 } 1350 1351 static int 1352 qla2xxx_slave_alloc(struct scsi_device *sdev) 1353 { 1354 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1355 1356 if (!rport || fc_remote_port_chkready(rport)) 1357 return -ENXIO; 1358 1359 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1360 1361 return 0; 1362 } 1363 1364 static int 1365 qla2xxx_slave_configure(struct scsi_device *sdev) 1366 { 1367 scsi_qla_host_t *vha = shost_priv(sdev->host); 1368 struct req_que *req = vha->req; 1369 1370 if (IS_T10_PI_CAPABLE(vha->hw)) 1371 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1372 1373 if (sdev->tagged_supported) 1374 scsi_activate_tcq(sdev, req->max_q_depth); 1375 else 1376 scsi_deactivate_tcq(sdev, req->max_q_depth); 1377 return 0; 1378 } 1379 1380 static void 1381 qla2xxx_slave_destroy(struct scsi_device *sdev) 1382 { 1383 sdev->hostdata = NULL; 1384 } 1385 1386 static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth) 1387 { 1388 fc_port_t *fcport = (struct fc_port *) sdev->hostdata; 1389 1390 if (!scsi_track_queue_full(sdev, qdepth)) 1391 return; 1392 1393 ql_dbg(ql_dbg_io, fcport->vha, 0x3029, 1394 "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n", 1395 sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); 1396 } 1397 1398 static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) 1399 { 1400 fc_port_t *fcport = sdev->hostdata; 1401 struct scsi_qla_host *vha = fcport->vha; 1402 struct req_que *req = NULL; 1403 1404 req = vha->req; 1405 if (!req) 1406 return; 1407 1408 if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth) 1409 return; 1410 1411 if (sdev->ordered_tags) 1412 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth); 1413 else 1414 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); 1415 1416 ql_dbg(ql_dbg_io, vha, 0x302a, 1417 "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n", 1418 sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); 1419 } 1420 1421 static int 1422 qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 1423 { 1424 switch (reason) { 1425 case SCSI_QDEPTH_DEFAULT: 1426 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 1427 break; 1428 case SCSI_QDEPTH_QFULL: 1429 qla2x00_handle_queue_full(sdev, qdepth); 1430 break; 1431 case SCSI_QDEPTH_RAMP_UP: 1432 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth); 1433 break; 1434 default: 1435 return -EOPNOTSUPP; 1436 } 1437 1438 return sdev->queue_depth; 1439 } 1440 1441 static int 1442 qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type) 1443 { 1444 if (sdev->tagged_supported) { 1445 scsi_set_tag_type(sdev, tag_type); 1446 if (tag_type) 1447 scsi_activate_tcq(sdev, sdev->queue_depth); 1448 else 1449 scsi_deactivate_tcq(sdev, sdev->queue_depth); 1450 } else 1451 tag_type = 0; 1452 1453 return tag_type; 1454 } 1455 1456 /** 1457 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1458 * @ha: HA context 1459 * 1460 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1461 * supported addressing method. 1462 */ 1463 static void 1464 qla2x00_config_dma_addressing(struct qla_hw_data *ha) 1465 { 1466 /* Assume a 32bit DMA mask. */ 1467 ha->flags.enable_64bit_addressing = 0; 1468 1469 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1470 /* Any upper-dword bits set? */ 1471 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1472 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 1473 /* Ok, a 64bit DMA mask is applicable. */ 1474 ha->flags.enable_64bit_addressing = 1; 1475 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1476 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1477 return; 1478 } 1479 } 1480 1481 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1482 pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32)); 1483 } 1484 1485 static void 1486 qla2x00_enable_intrs(struct qla_hw_data *ha) 1487 { 1488 unsigned long flags = 0; 1489 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1490 1491 spin_lock_irqsave(&ha->hardware_lock, flags); 1492 ha->interrupts_on = 1; 1493 /* enable risc and host interrupts */ 1494 WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1495 RD_REG_WORD(®->ictrl); 1496 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1497 1498 } 1499 1500 static void 1501 qla2x00_disable_intrs(struct qla_hw_data *ha) 1502 { 1503 unsigned long flags = 0; 1504 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1505 1506 spin_lock_irqsave(&ha->hardware_lock, flags); 1507 ha->interrupts_on = 0; 1508 /* disable risc and host interrupts */ 1509 WRT_REG_WORD(®->ictrl, 0); 1510 RD_REG_WORD(®->ictrl); 1511 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1512 } 1513 1514 static void 1515 qla24xx_enable_intrs(struct qla_hw_data *ha) 1516 { 1517 unsigned long flags = 0; 1518 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1519 1520 spin_lock_irqsave(&ha->hardware_lock, flags); 1521 ha->interrupts_on = 1; 1522 WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); 1523 RD_REG_DWORD(®->ictrl); 1524 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1525 } 1526 1527 static void 1528 qla24xx_disable_intrs(struct qla_hw_data *ha) 1529 { 1530 unsigned long flags = 0; 1531 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1532 1533 if (IS_NOPOLLING_TYPE(ha)) 1534 return; 1535 spin_lock_irqsave(&ha->hardware_lock, flags); 1536 ha->interrupts_on = 0; 1537 WRT_REG_DWORD(®->ictrl, 0); 1538 RD_REG_DWORD(®->ictrl); 1539 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1540 } 1541 1542 static int 1543 qla2x00_iospace_config(struct qla_hw_data *ha) 1544 { 1545 resource_size_t pio; 1546 uint16_t msix; 1547 int cpus; 1548 1549 if (pci_request_selected_regions(ha->pdev, ha->bars, 1550 QLA2XXX_DRIVER_NAME)) { 1551 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, 1552 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 1553 pci_name(ha->pdev)); 1554 goto iospace_error_exit; 1555 } 1556 if (!(ha->bars & 1)) 1557 goto skip_pio; 1558 1559 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 1560 pio = pci_resource_start(ha->pdev, 0); 1561 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1562 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1563 ql_log_pci(ql_log_warn, ha->pdev, 0x0012, 1564 "Invalid pci I/O region size (%s).\n", 1565 pci_name(ha->pdev)); 1566 pio = 0; 1567 } 1568 } else { 1569 ql_log_pci(ql_log_warn, ha->pdev, 0x0013, 1570 "Region #0 no a PIO resource (%s).\n", 1571 pci_name(ha->pdev)); 1572 pio = 0; 1573 } 1574 ha->pio_address = pio; 1575 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, 1576 "PIO address=%llu.\n", 1577 (unsigned long long)ha->pio_address); 1578 1579 skip_pio: 1580 /* Use MMIO operations for all accesses. */ 1581 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1582 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, 1583 "Region #1 not an MMIO resource (%s), aborting.\n", 1584 pci_name(ha->pdev)); 1585 goto iospace_error_exit; 1586 } 1587 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1588 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, 1589 "Invalid PCI mem region size (%s), aborting.\n", 1590 pci_name(ha->pdev)); 1591 goto iospace_error_exit; 1592 } 1593 1594 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1595 if (!ha->iobase) { 1596 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, 1597 "Cannot remap MMIO (%s), aborting.\n", 1598 pci_name(ha->pdev)); 1599 goto iospace_error_exit; 1600 } 1601 1602 /* Determine queue resources */ 1603 ha->max_req_queues = ha->max_rsp_queues = 1; 1604 if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) || 1605 (ql2xmaxqueues > 1 && ql2xmultique_tag) || 1606 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1607 goto mqiobase_exit; 1608 1609 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1610 pci_resource_len(ha->pdev, 3)); 1611 if (ha->mqiobase) { 1612 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, 1613 "MQIO Base=%p.\n", ha->mqiobase); 1614 /* Read MSIX vector size of the board */ 1615 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1616 ha->msix_count = msix; 1617 /* Max queues are bounded by available msix vectors */ 1618 /* queue 0 uses two msix vectors */ 1619 if (ql2xmultique_tag) { 1620 cpus = num_online_cpus(); 1621 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? 1622 (cpus + 1) : (ha->msix_count - 1); 1623 ha->max_req_queues = 2; 1624 } else if (ql2xmaxqueues > 1) { 1625 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? 1626 QLA_MQ_SIZE : ql2xmaxqueues; 1627 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008, 1628 "QoS mode set, max no of request queues:%d.\n", 1629 ha->max_req_queues); 1630 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019, 1631 "QoS mode set, max no of request queues:%d.\n", 1632 ha->max_req_queues); 1633 } 1634 ql_log_pci(ql_log_info, ha->pdev, 0x001a, 1635 "MSI-X vector count: %d.\n", msix); 1636 } else 1637 ql_log_pci(ql_log_info, ha->pdev, 0x001b, 1638 "BAR 3 not enabled.\n"); 1639 1640 mqiobase_exit: 1641 ha->msix_count = ha->max_rsp_queues + 1; 1642 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, 1643 "MSIX Count:%d.\n", ha->msix_count); 1644 return (0); 1645 1646 iospace_error_exit: 1647 return (-ENOMEM); 1648 } 1649 1650 1651 static int 1652 qla83xx_iospace_config(struct qla_hw_data *ha) 1653 { 1654 uint16_t msix; 1655 int cpus; 1656 1657 if (pci_request_selected_regions(ha->pdev, ha->bars, 1658 QLA2XXX_DRIVER_NAME)) { 1659 ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, 1660 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 1661 pci_name(ha->pdev)); 1662 1663 goto iospace_error_exit; 1664 } 1665 1666 /* Use MMIO operations for all accesses. */ 1667 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 1668 ql_log_pci(ql_log_warn, ha->pdev, 0x0118, 1669 "Invalid pci I/O region size (%s).\n", 1670 pci_name(ha->pdev)); 1671 goto iospace_error_exit; 1672 } 1673 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1674 ql_log_pci(ql_log_warn, ha->pdev, 0x0119, 1675 "Invalid PCI mem region size (%s), aborting\n", 1676 pci_name(ha->pdev)); 1677 goto iospace_error_exit; 1678 } 1679 1680 ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); 1681 if (!ha->iobase) { 1682 ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, 1683 "Cannot remap MMIO (%s), aborting.\n", 1684 pci_name(ha->pdev)); 1685 goto iospace_error_exit; 1686 } 1687 1688 /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ 1689 /* 83XX 26XX always use MQ type access for queues 1690 * - mbar 2, a.k.a region 4 */ 1691 ha->max_req_queues = ha->max_rsp_queues = 1; 1692 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), 1693 pci_resource_len(ha->pdev, 4)); 1694 1695 if (!ha->mqiobase) { 1696 ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, 1697 "BAR2/region4 not enabled\n"); 1698 goto mqiobase_exit; 1699 } 1700 1701 ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), 1702 pci_resource_len(ha->pdev, 2)); 1703 if (ha->msixbase) { 1704 /* Read MSIX vector size of the board */ 1705 pci_read_config_word(ha->pdev, 1706 QLA_83XX_PCI_MSIX_CONTROL, &msix); 1707 ha->msix_count = msix; 1708 /* Max queues are bounded by available msix vectors */ 1709 /* queue 0 uses two msix vectors */ 1710 if (ql2xmultique_tag) { 1711 cpus = num_online_cpus(); 1712 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? 1713 (cpus + 1) : (ha->msix_count - 1); 1714 ha->max_req_queues = 2; 1715 } else if (ql2xmaxqueues > 1) { 1716 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? 1717 QLA_MQ_SIZE : ql2xmaxqueues; 1718 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c, 1719 "QoS mode set, max no of request queues:%d.\n", 1720 ha->max_req_queues); 1721 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 1722 "QoS mode set, max no of request queues:%d.\n", 1723 ha->max_req_queues); 1724 } 1725 ql_log_pci(ql_log_info, ha->pdev, 0x011c, 1726 "MSI-X vector count: %d.\n", msix); 1727 } else 1728 ql_log_pci(ql_log_info, ha->pdev, 0x011e, 1729 "BAR 1 not enabled.\n"); 1730 1731 mqiobase_exit: 1732 ha->msix_count = ha->max_rsp_queues + 1; 1733 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, 1734 "MSIX Count:%d.\n", ha->msix_count); 1735 return 0; 1736 1737 iospace_error_exit: 1738 return -ENOMEM; 1739 } 1740 1741 static struct isp_operations qla2100_isp_ops = { 1742 .pci_config = qla2100_pci_config, 1743 .reset_chip = qla2x00_reset_chip, 1744 .chip_diag = qla2x00_chip_diag, 1745 .config_rings = qla2x00_config_rings, 1746 .reset_adapter = qla2x00_reset_adapter, 1747 .nvram_config = qla2x00_nvram_config, 1748 .update_fw_options = qla2x00_update_fw_options, 1749 .load_risc = qla2x00_load_risc, 1750 .pci_info_str = qla2x00_pci_info_str, 1751 .fw_version_str = qla2x00_fw_version_str, 1752 .intr_handler = qla2100_intr_handler, 1753 .enable_intrs = qla2x00_enable_intrs, 1754 .disable_intrs = qla2x00_disable_intrs, 1755 .abort_command = qla2x00_abort_command, 1756 .target_reset = qla2x00_abort_target, 1757 .lun_reset = qla2x00_lun_reset, 1758 .fabric_login = qla2x00_login_fabric, 1759 .fabric_logout = qla2x00_fabric_logout, 1760 .calc_req_entries = qla2x00_calc_iocbs_32, 1761 .build_iocbs = qla2x00_build_scsi_iocbs_32, 1762 .prep_ms_iocb = qla2x00_prep_ms_iocb, 1763 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 1764 .read_nvram = qla2x00_read_nvram_data, 1765 .write_nvram = qla2x00_write_nvram_data, 1766 .fw_dump = qla2100_fw_dump, 1767 .beacon_on = NULL, 1768 .beacon_off = NULL, 1769 .beacon_blink = NULL, 1770 .read_optrom = qla2x00_read_optrom_data, 1771 .write_optrom = qla2x00_write_optrom_data, 1772 .get_flash_version = qla2x00_get_flash_version, 1773 .start_scsi = qla2x00_start_scsi, 1774 .abort_isp = qla2x00_abort_isp, 1775 .iospace_config = qla2x00_iospace_config, 1776 }; 1777 1778 static struct isp_operations qla2300_isp_ops = { 1779 .pci_config = qla2300_pci_config, 1780 .reset_chip = qla2x00_reset_chip, 1781 .chip_diag = qla2x00_chip_diag, 1782 .config_rings = qla2x00_config_rings, 1783 .reset_adapter = qla2x00_reset_adapter, 1784 .nvram_config = qla2x00_nvram_config, 1785 .update_fw_options = qla2x00_update_fw_options, 1786 .load_risc = qla2x00_load_risc, 1787 .pci_info_str = qla2x00_pci_info_str, 1788 .fw_version_str = qla2x00_fw_version_str, 1789 .intr_handler = qla2300_intr_handler, 1790 .enable_intrs = qla2x00_enable_intrs, 1791 .disable_intrs = qla2x00_disable_intrs, 1792 .abort_command = qla2x00_abort_command, 1793 .target_reset = qla2x00_abort_target, 1794 .lun_reset = qla2x00_lun_reset, 1795 .fabric_login = qla2x00_login_fabric, 1796 .fabric_logout = qla2x00_fabric_logout, 1797 .calc_req_entries = qla2x00_calc_iocbs_32, 1798 .build_iocbs = qla2x00_build_scsi_iocbs_32, 1799 .prep_ms_iocb = qla2x00_prep_ms_iocb, 1800 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 1801 .read_nvram = qla2x00_read_nvram_data, 1802 .write_nvram = qla2x00_write_nvram_data, 1803 .fw_dump = qla2300_fw_dump, 1804 .beacon_on = qla2x00_beacon_on, 1805 .beacon_off = qla2x00_beacon_off, 1806 .beacon_blink = qla2x00_beacon_blink, 1807 .read_optrom = qla2x00_read_optrom_data, 1808 .write_optrom = qla2x00_write_optrom_data, 1809 .get_flash_version = qla2x00_get_flash_version, 1810 .start_scsi = qla2x00_start_scsi, 1811 .abort_isp = qla2x00_abort_isp, 1812 .iospace_config = qla2x00_iospace_config, 1813 }; 1814 1815 static struct isp_operations qla24xx_isp_ops = { 1816 .pci_config = qla24xx_pci_config, 1817 .reset_chip = qla24xx_reset_chip, 1818 .chip_diag = qla24xx_chip_diag, 1819 .config_rings = qla24xx_config_rings, 1820 .reset_adapter = qla24xx_reset_adapter, 1821 .nvram_config = qla24xx_nvram_config, 1822 .update_fw_options = qla24xx_update_fw_options, 1823 .load_risc = qla24xx_load_risc, 1824 .pci_info_str = qla24xx_pci_info_str, 1825 .fw_version_str = qla24xx_fw_version_str, 1826 .intr_handler = qla24xx_intr_handler, 1827 .enable_intrs = qla24xx_enable_intrs, 1828 .disable_intrs = qla24xx_disable_intrs, 1829 .abort_command = qla24xx_abort_command, 1830 .target_reset = qla24xx_abort_target, 1831 .lun_reset = qla24xx_lun_reset, 1832 .fabric_login = qla24xx_login_fabric, 1833 .fabric_logout = qla24xx_fabric_logout, 1834 .calc_req_entries = NULL, 1835 .build_iocbs = NULL, 1836 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1837 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1838 .read_nvram = qla24xx_read_nvram_data, 1839 .write_nvram = qla24xx_write_nvram_data, 1840 .fw_dump = qla24xx_fw_dump, 1841 .beacon_on = qla24xx_beacon_on, 1842 .beacon_off = qla24xx_beacon_off, 1843 .beacon_blink = qla24xx_beacon_blink, 1844 .read_optrom = qla24xx_read_optrom_data, 1845 .write_optrom = qla24xx_write_optrom_data, 1846 .get_flash_version = qla24xx_get_flash_version, 1847 .start_scsi = qla24xx_start_scsi, 1848 .abort_isp = qla2x00_abort_isp, 1849 .iospace_config = qla2x00_iospace_config, 1850 }; 1851 1852 static struct isp_operations qla25xx_isp_ops = { 1853 .pci_config = qla25xx_pci_config, 1854 .reset_chip = qla24xx_reset_chip, 1855 .chip_diag = qla24xx_chip_diag, 1856 .config_rings = qla24xx_config_rings, 1857 .reset_adapter = qla24xx_reset_adapter, 1858 .nvram_config = qla24xx_nvram_config, 1859 .update_fw_options = qla24xx_update_fw_options, 1860 .load_risc = qla24xx_load_risc, 1861 .pci_info_str = qla24xx_pci_info_str, 1862 .fw_version_str = qla24xx_fw_version_str, 1863 .intr_handler = qla24xx_intr_handler, 1864 .enable_intrs = qla24xx_enable_intrs, 1865 .disable_intrs = qla24xx_disable_intrs, 1866 .abort_command = qla24xx_abort_command, 1867 .target_reset = qla24xx_abort_target, 1868 .lun_reset = qla24xx_lun_reset, 1869 .fabric_login = qla24xx_login_fabric, 1870 .fabric_logout = qla24xx_fabric_logout, 1871 .calc_req_entries = NULL, 1872 .build_iocbs = NULL, 1873 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1874 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1875 .read_nvram = qla25xx_read_nvram_data, 1876 .write_nvram = qla25xx_write_nvram_data, 1877 .fw_dump = qla25xx_fw_dump, 1878 .beacon_on = qla24xx_beacon_on, 1879 .beacon_off = qla24xx_beacon_off, 1880 .beacon_blink = qla24xx_beacon_blink, 1881 .read_optrom = qla25xx_read_optrom_data, 1882 .write_optrom = qla24xx_write_optrom_data, 1883 .get_flash_version = qla24xx_get_flash_version, 1884 .start_scsi = qla24xx_dif_start_scsi, 1885 .abort_isp = qla2x00_abort_isp, 1886 .iospace_config = qla2x00_iospace_config, 1887 }; 1888 1889 static struct isp_operations qla81xx_isp_ops = { 1890 .pci_config = qla25xx_pci_config, 1891 .reset_chip = qla24xx_reset_chip, 1892 .chip_diag = qla24xx_chip_diag, 1893 .config_rings = qla24xx_config_rings, 1894 .reset_adapter = qla24xx_reset_adapter, 1895 .nvram_config = qla81xx_nvram_config, 1896 .update_fw_options = qla81xx_update_fw_options, 1897 .load_risc = qla81xx_load_risc, 1898 .pci_info_str = qla24xx_pci_info_str, 1899 .fw_version_str = qla24xx_fw_version_str, 1900 .intr_handler = qla24xx_intr_handler, 1901 .enable_intrs = qla24xx_enable_intrs, 1902 .disable_intrs = qla24xx_disable_intrs, 1903 .abort_command = qla24xx_abort_command, 1904 .target_reset = qla24xx_abort_target, 1905 .lun_reset = qla24xx_lun_reset, 1906 .fabric_login = qla24xx_login_fabric, 1907 .fabric_logout = qla24xx_fabric_logout, 1908 .calc_req_entries = NULL, 1909 .build_iocbs = NULL, 1910 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1911 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1912 .read_nvram = NULL, 1913 .write_nvram = NULL, 1914 .fw_dump = qla81xx_fw_dump, 1915 .beacon_on = qla24xx_beacon_on, 1916 .beacon_off = qla24xx_beacon_off, 1917 .beacon_blink = qla83xx_beacon_blink, 1918 .read_optrom = qla25xx_read_optrom_data, 1919 .write_optrom = qla24xx_write_optrom_data, 1920 .get_flash_version = qla24xx_get_flash_version, 1921 .start_scsi = qla24xx_dif_start_scsi, 1922 .abort_isp = qla2x00_abort_isp, 1923 .iospace_config = qla2x00_iospace_config, 1924 }; 1925 1926 static struct isp_operations qla82xx_isp_ops = { 1927 .pci_config = qla82xx_pci_config, 1928 .reset_chip = qla82xx_reset_chip, 1929 .chip_diag = qla24xx_chip_diag, 1930 .config_rings = qla82xx_config_rings, 1931 .reset_adapter = qla24xx_reset_adapter, 1932 .nvram_config = qla81xx_nvram_config, 1933 .update_fw_options = qla24xx_update_fw_options, 1934 .load_risc = qla82xx_load_risc, 1935 .pci_info_str = qla24xx_pci_info_str, 1936 .fw_version_str = qla24xx_fw_version_str, 1937 .intr_handler = qla82xx_intr_handler, 1938 .enable_intrs = qla82xx_enable_intrs, 1939 .disable_intrs = qla82xx_disable_intrs, 1940 .abort_command = qla24xx_abort_command, 1941 .target_reset = qla24xx_abort_target, 1942 .lun_reset = qla24xx_lun_reset, 1943 .fabric_login = qla24xx_login_fabric, 1944 .fabric_logout = qla24xx_fabric_logout, 1945 .calc_req_entries = NULL, 1946 .build_iocbs = NULL, 1947 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1948 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1949 .read_nvram = qla24xx_read_nvram_data, 1950 .write_nvram = qla24xx_write_nvram_data, 1951 .fw_dump = qla24xx_fw_dump, 1952 .beacon_on = qla82xx_beacon_on, 1953 .beacon_off = qla82xx_beacon_off, 1954 .beacon_blink = NULL, 1955 .read_optrom = qla82xx_read_optrom_data, 1956 .write_optrom = qla82xx_write_optrom_data, 1957 .get_flash_version = qla24xx_get_flash_version, 1958 .start_scsi = qla82xx_start_scsi, 1959 .abort_isp = qla82xx_abort_isp, 1960 .iospace_config = qla82xx_iospace_config, 1961 }; 1962 1963 static struct isp_operations qla83xx_isp_ops = { 1964 .pci_config = qla25xx_pci_config, 1965 .reset_chip = qla24xx_reset_chip, 1966 .chip_diag = qla24xx_chip_diag, 1967 .config_rings = qla24xx_config_rings, 1968 .reset_adapter = qla24xx_reset_adapter, 1969 .nvram_config = qla81xx_nvram_config, 1970 .update_fw_options = qla81xx_update_fw_options, 1971 .load_risc = qla81xx_load_risc, 1972 .pci_info_str = qla24xx_pci_info_str, 1973 .fw_version_str = qla24xx_fw_version_str, 1974 .intr_handler = qla24xx_intr_handler, 1975 .enable_intrs = qla24xx_enable_intrs, 1976 .disable_intrs = qla24xx_disable_intrs, 1977 .abort_command = qla24xx_abort_command, 1978 .target_reset = qla24xx_abort_target, 1979 .lun_reset = qla24xx_lun_reset, 1980 .fabric_login = qla24xx_login_fabric, 1981 .fabric_logout = qla24xx_fabric_logout, 1982 .calc_req_entries = NULL, 1983 .build_iocbs = NULL, 1984 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1985 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1986 .read_nvram = NULL, 1987 .write_nvram = NULL, 1988 .fw_dump = qla83xx_fw_dump, 1989 .beacon_on = qla24xx_beacon_on, 1990 .beacon_off = qla24xx_beacon_off, 1991 .beacon_blink = qla83xx_beacon_blink, 1992 .read_optrom = qla25xx_read_optrom_data, 1993 .write_optrom = qla24xx_write_optrom_data, 1994 .get_flash_version = qla24xx_get_flash_version, 1995 .start_scsi = qla24xx_dif_start_scsi, 1996 .abort_isp = qla2x00_abort_isp, 1997 .iospace_config = qla83xx_iospace_config, 1998 }; 1999 2000 static inline void 2001 qla2x00_set_isp_flags(struct qla_hw_data *ha) 2002 { 2003 ha->device_type = DT_EXTENDED_IDS; 2004 switch (ha->pdev->device) { 2005 case PCI_DEVICE_ID_QLOGIC_ISP2100: 2006 ha->device_type |= DT_ISP2100; 2007 ha->device_type &= ~DT_EXTENDED_IDS; 2008 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2009 break; 2010 case PCI_DEVICE_ID_QLOGIC_ISP2200: 2011 ha->device_type |= DT_ISP2200; 2012 ha->device_type &= ~DT_EXTENDED_IDS; 2013 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2014 break; 2015 case PCI_DEVICE_ID_QLOGIC_ISP2300: 2016 ha->device_type |= DT_ISP2300; 2017 ha->device_type |= DT_ZIO_SUPPORTED; 2018 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2019 break; 2020 case PCI_DEVICE_ID_QLOGIC_ISP2312: 2021 ha->device_type |= DT_ISP2312; 2022 ha->device_type |= DT_ZIO_SUPPORTED; 2023 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2024 break; 2025 case PCI_DEVICE_ID_QLOGIC_ISP2322: 2026 ha->device_type |= DT_ISP2322; 2027 ha->device_type |= DT_ZIO_SUPPORTED; 2028 if (ha->pdev->subsystem_vendor == 0x1028 && 2029 ha->pdev->subsystem_device == 0x0170) 2030 ha->device_type |= DT_OEM_001; 2031 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2032 break; 2033 case PCI_DEVICE_ID_QLOGIC_ISP6312: 2034 ha->device_type |= DT_ISP6312; 2035 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2036 break; 2037 case PCI_DEVICE_ID_QLOGIC_ISP6322: 2038 ha->device_type |= DT_ISP6322; 2039 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2040 break; 2041 case PCI_DEVICE_ID_QLOGIC_ISP2422: 2042 ha->device_type |= DT_ISP2422; 2043 ha->device_type |= DT_ZIO_SUPPORTED; 2044 ha->device_type |= DT_FWI2; 2045 ha->device_type |= DT_IIDMA; 2046 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2047 break; 2048 case PCI_DEVICE_ID_QLOGIC_ISP2432: 2049 ha->device_type |= DT_ISP2432; 2050 ha->device_type |= DT_ZIO_SUPPORTED; 2051 ha->device_type |= DT_FWI2; 2052 ha->device_type |= DT_IIDMA; 2053 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2054 break; 2055 case PCI_DEVICE_ID_QLOGIC_ISP8432: 2056 ha->device_type |= DT_ISP8432; 2057 ha->device_type |= DT_ZIO_SUPPORTED; 2058 ha->device_type |= DT_FWI2; 2059 ha->device_type |= DT_IIDMA; 2060 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2061 break; 2062 case PCI_DEVICE_ID_QLOGIC_ISP5422: 2063 ha->device_type |= DT_ISP5422; 2064 ha->device_type |= DT_FWI2; 2065 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2066 break; 2067 case PCI_DEVICE_ID_QLOGIC_ISP5432: 2068 ha->device_type |= DT_ISP5432; 2069 ha->device_type |= DT_FWI2; 2070 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2071 break; 2072 case PCI_DEVICE_ID_QLOGIC_ISP2532: 2073 ha->device_type |= DT_ISP2532; 2074 ha->device_type |= DT_ZIO_SUPPORTED; 2075 ha->device_type |= DT_FWI2; 2076 ha->device_type |= DT_IIDMA; 2077 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2078 break; 2079 case PCI_DEVICE_ID_QLOGIC_ISP8001: 2080 ha->device_type |= DT_ISP8001; 2081 ha->device_type |= DT_ZIO_SUPPORTED; 2082 ha->device_type |= DT_FWI2; 2083 ha->device_type |= DT_IIDMA; 2084 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2085 break; 2086 case PCI_DEVICE_ID_QLOGIC_ISP8021: 2087 ha->device_type |= DT_ISP8021; 2088 ha->device_type |= DT_ZIO_SUPPORTED; 2089 ha->device_type |= DT_FWI2; 2090 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2091 /* Initialize 82XX ISP flags */ 2092 qla82xx_init_flags(ha); 2093 break; 2094 case PCI_DEVICE_ID_QLOGIC_ISP2031: 2095 ha->device_type |= DT_ISP2031; 2096 ha->device_type |= DT_ZIO_SUPPORTED; 2097 ha->device_type |= DT_FWI2; 2098 ha->device_type |= DT_IIDMA; 2099 ha->device_type |= DT_T10_PI; 2100 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2101 break; 2102 case PCI_DEVICE_ID_QLOGIC_ISP8031: 2103 ha->device_type |= DT_ISP8031; 2104 ha->device_type |= DT_ZIO_SUPPORTED; 2105 ha->device_type |= DT_FWI2; 2106 ha->device_type |= DT_IIDMA; 2107 ha->device_type |= DT_T10_PI; 2108 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2109 break; 2110 } 2111 2112 if (IS_QLA82XX(ha)) 2113 ha->port_no = !(ha->portnum & 1); 2114 else 2115 /* Get adapter physical port no from interrupt pin register. */ 2116 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 2117 2118 if (ha->port_no & 1) 2119 ha->flags.port0 = 1; 2120 else 2121 ha->flags.port0 = 0; 2122 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 2123 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", 2124 ha->device_type, ha->flags.port0, ha->fw_srisc_address); 2125 } 2126 2127 static void 2128 qla2xxx_scan_start(struct Scsi_Host *shost) 2129 { 2130 scsi_qla_host_t *vha = shost_priv(shost); 2131 2132 if (vha->hw->flags.running_gold_fw) 2133 return; 2134 2135 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2136 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2137 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2138 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags); 2139 } 2140 2141 static int 2142 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 2143 { 2144 scsi_qla_host_t *vha = shost_priv(shost); 2145 2146 if (!vha->host) 2147 return 1; 2148 if (time > vha->hw->loop_reset_delay * HZ) 2149 return 1; 2150 2151 return atomic_read(&vha->loop_state) == LOOP_READY; 2152 } 2153 2154 /* 2155 * PCI driver interface 2156 */ 2157 static int __devinit 2158 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 2159 { 2160 int ret = -ENODEV; 2161 struct Scsi_Host *host; 2162 scsi_qla_host_t *base_vha = NULL; 2163 struct qla_hw_data *ha; 2164 char pci_info[30]; 2165 char fw_str[30], wq_name[30]; 2166 struct scsi_host_template *sht; 2167 int bars, mem_only = 0; 2168 uint16_t req_length = 0, rsp_length = 0; 2169 struct req_que *req = NULL; 2170 struct rsp_que *rsp = NULL; 2171 2172 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 2173 sht = &qla2xxx_driver_template; 2174 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 2175 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 2176 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 2177 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 2178 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 2179 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || 2180 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || 2181 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || 2182 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2183 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) { 2184 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2185 mem_only = 1; 2186 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2187 "Mem only adapter.\n"); 2188 } 2189 ql_dbg_pci(ql_dbg_init, pdev, 0x0008, 2190 "Bars=%d.\n", bars); 2191 2192 if (mem_only) { 2193 if (pci_enable_device_mem(pdev)) 2194 goto probe_out; 2195 } else { 2196 if (pci_enable_device(pdev)) 2197 goto probe_out; 2198 } 2199 2200 /* This may fail but that's ok */ 2201 pci_enable_pcie_error_reporting(pdev); 2202 2203 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2204 if (!ha) { 2205 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2206 "Unable to allocate memory for ha.\n"); 2207 goto probe_out; 2208 } 2209 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2210 "Memory allocated for ha=%p.\n", ha); 2211 ha->pdev = pdev; 2212 ha->tgt.enable_class_2 = ql2xenableclass2; 2213 2214 /* Clear our data area */ 2215 ha->bars = bars; 2216 ha->mem_only = mem_only; 2217 spin_lock_init(&ha->hardware_lock); 2218 spin_lock_init(&ha->vport_slock); 2219 mutex_init(&ha->selflogin_lock); 2220 2221 /* Set ISP-type information. */ 2222 qla2x00_set_isp_flags(ha); 2223 2224 /* Set EEH reset type to fundamental if required by hba */ 2225 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || 2226 IS_QLA83XX(ha)) 2227 pdev->needs_freset = 1; 2228 2229 ha->prev_topology = 0; 2230 ha->init_cb_size = sizeof(init_cb_t); 2231 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2232 ha->optrom_size = OPTROM_SIZE_2300; 2233 2234 /* Assign ISP specific operations. */ 2235 if (IS_QLA2100(ha)) { 2236 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2237 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 2238 req_length = REQUEST_ENTRY_CNT_2100; 2239 rsp_length = RESPONSE_ENTRY_CNT_2100; 2240 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2241 ha->gid_list_info_size = 4; 2242 ha->flash_conf_off = ~0; 2243 ha->flash_data_off = ~0; 2244 ha->nvram_conf_off = ~0; 2245 ha->nvram_data_off = ~0; 2246 ha->isp_ops = &qla2100_isp_ops; 2247 } else if (IS_QLA2200(ha)) { 2248 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2249 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; 2250 req_length = REQUEST_ENTRY_CNT_2200; 2251 rsp_length = RESPONSE_ENTRY_CNT_2100; 2252 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2253 ha->gid_list_info_size = 4; 2254 ha->flash_conf_off = ~0; 2255 ha->flash_data_off = ~0; 2256 ha->nvram_conf_off = ~0; 2257 ha->nvram_data_off = ~0; 2258 ha->isp_ops = &qla2100_isp_ops; 2259 } else if (IS_QLA23XX(ha)) { 2260 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2261 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2262 req_length = REQUEST_ENTRY_CNT_2200; 2263 rsp_length = RESPONSE_ENTRY_CNT_2300; 2264 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2265 ha->gid_list_info_size = 6; 2266 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2267 ha->optrom_size = OPTROM_SIZE_2322; 2268 ha->flash_conf_off = ~0; 2269 ha->flash_data_off = ~0; 2270 ha->nvram_conf_off = ~0; 2271 ha->nvram_data_off = ~0; 2272 ha->isp_ops = &qla2300_isp_ops; 2273 } else if (IS_QLA24XX_TYPE(ha)) { 2274 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2275 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2276 req_length = REQUEST_ENTRY_CNT_24XX; 2277 rsp_length = RESPONSE_ENTRY_CNT_2300; 2278 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2279 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2280 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2281 ha->gid_list_info_size = 8; 2282 ha->optrom_size = OPTROM_SIZE_24XX; 2283 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; 2284 ha->isp_ops = &qla24xx_isp_ops; 2285 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2286 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2287 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2288 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2289 } else if (IS_QLA25XX(ha)) { 2290 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2291 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2292 req_length = REQUEST_ENTRY_CNT_24XX; 2293 rsp_length = RESPONSE_ENTRY_CNT_2300; 2294 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2295 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2296 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2297 ha->gid_list_info_size = 8; 2298 ha->optrom_size = OPTROM_SIZE_25XX; 2299 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2300 ha->isp_ops = &qla25xx_isp_ops; 2301 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2302 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2303 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2304 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2305 } else if (IS_QLA81XX(ha)) { 2306 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2307 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2308 req_length = REQUEST_ENTRY_CNT_24XX; 2309 rsp_length = RESPONSE_ENTRY_CNT_2300; 2310 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2311 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2312 ha->gid_list_info_size = 8; 2313 ha->optrom_size = OPTROM_SIZE_81XX; 2314 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2315 ha->isp_ops = &qla81xx_isp_ops; 2316 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2317 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2318 ha->nvram_conf_off = ~0; 2319 ha->nvram_data_off = ~0; 2320 } else if (IS_QLA82XX(ha)) { 2321 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2322 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2323 req_length = REQUEST_ENTRY_CNT_82XX; 2324 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2325 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2326 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2327 ha->gid_list_info_size = 8; 2328 ha->optrom_size = OPTROM_SIZE_82XX; 2329 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2330 ha->isp_ops = &qla82xx_isp_ops; 2331 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2332 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2333 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2334 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2335 } else if (IS_QLA83XX(ha)) { 2336 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2337 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2338 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2339 req_length = REQUEST_ENTRY_CNT_24XX; 2340 rsp_length = RESPONSE_ENTRY_CNT_2300; 2341 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2342 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2343 ha->gid_list_info_size = 8; 2344 ha->optrom_size = OPTROM_SIZE_83XX; 2345 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2346 ha->isp_ops = &qla83xx_isp_ops; 2347 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2348 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2349 ha->nvram_conf_off = ~0; 2350 ha->nvram_data_off = ~0; 2351 } 2352 2353 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 2354 "mbx_count=%d, req_length=%d, " 2355 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " 2356 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " 2357 "max_fibre_devices=%d.\n", 2358 ha->mbx_count, req_length, rsp_length, ha->max_loop_id, 2359 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, 2360 ha->nvram_npiv_size, ha->max_fibre_devices); 2361 ql_dbg_pci(ql_dbg_init, pdev, 0x001f, 2362 "isp_ops=%p, flash_conf_off=%d, " 2363 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 2364 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, 2365 ha->nvram_conf_off, ha->nvram_data_off); 2366 2367 /* Configure PCI I/O space */ 2368 ret = ha->isp_ops->iospace_config(ha); 2369 if (ret) 2370 goto iospace_config_failed; 2371 2372 ql_log_pci(ql_log_info, pdev, 0x001d, 2373 "Found an ISP%04X irq %d iobase 0x%p.\n", 2374 pdev->device, pdev->irq, ha->iobase); 2375 mutex_init(&ha->vport_lock); 2376 init_completion(&ha->mbx_cmd_comp); 2377 complete(&ha->mbx_cmd_comp); 2378 init_completion(&ha->mbx_intr_comp); 2379 init_completion(&ha->dcbx_comp); 2380 2381 set_bit(0, (unsigned long *) ha->vp_idx_map); 2382 2383 qla2x00_config_dma_addressing(ha); 2384 ql_dbg_pci(ql_dbg_init, pdev, 0x0020, 2385 "64 Bit addressing is %s.\n", 2386 ha->flags.enable_64bit_addressing ? "enable" : 2387 "disable"); 2388 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 2389 if (!ret) { 2390 ql_log_pci(ql_log_fatal, pdev, 0x0031, 2391 "Failed to allocate memory for adapter, aborting.\n"); 2392 2393 goto probe_hw_failed; 2394 } 2395 2396 req->max_q_depth = MAX_Q_DEPTH; 2397 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 2398 req->max_q_depth = ql2xmaxqdepth; 2399 2400 2401 base_vha = qla2x00_create_host(sht, ha); 2402 if (!base_vha) { 2403 ret = -ENOMEM; 2404 qla2x00_mem_free(ha); 2405 qla2x00_free_req_que(ha, req); 2406 qla2x00_free_rsp_que(ha, rsp); 2407 goto probe_hw_failed; 2408 } 2409 2410 pci_set_drvdata(pdev, base_vha); 2411 2412 host = base_vha->host; 2413 base_vha->req = req; 2414 host->can_queue = req->length + 128; 2415 if (IS_QLA2XXX_MIDTYPE(ha)) 2416 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 2417 else 2418 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 2419 base_vha->vp_idx; 2420 2421 /* Set the SG table size based on ISP type */ 2422 if (!IS_FWI2_CAPABLE(ha)) { 2423 if (IS_QLA2100(ha)) 2424 host->sg_tablesize = 32; 2425 } else { 2426 if (!IS_QLA82XX(ha)) 2427 host->sg_tablesize = QLA_SG_ALL; 2428 } 2429 ql_dbg(ql_dbg_init, base_vha, 0x0032, 2430 "can_queue=%d, req=%p, " 2431 "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", 2432 host->can_queue, base_vha->req, 2433 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 2434 host->max_id = ha->max_fibre_devices; 2435 host->cmd_per_lun = 3; 2436 host->unique_id = host->host_no; 2437 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 2438 host->max_cmd_len = 32; 2439 else 2440 host->max_cmd_len = MAX_CMDSZ; 2441 host->max_channel = MAX_BUSES - 1; 2442 host->max_lun = ql2xmaxlun; 2443 host->transportt = qla2xxx_transport_template; 2444 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 2445 2446 ql_dbg(ql_dbg_init, base_vha, 0x0033, 2447 "max_id=%d this_id=%d " 2448 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " 2449 "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id, 2450 host->this_id, host->cmd_per_lun, host->unique_id, 2451 host->max_cmd_len, host->max_channel, host->max_lun, 2452 host->transportt, sht->vendor_id); 2453 2454 que_init: 2455 /* Alloc arrays of request and response ring ptrs */ 2456 if (!qla2x00_alloc_queues(ha, req, rsp)) { 2457 ql_log(ql_log_fatal, base_vha, 0x003d, 2458 "Failed to allocate memory for queue pointers..." 2459 "aborting.\n"); 2460 goto probe_init_failed; 2461 } 2462 2463 qlt_probe_one_stage1(base_vha, ha); 2464 2465 /* Set up the irqs */ 2466 ret = qla2x00_request_irqs(ha, rsp); 2467 if (ret) 2468 goto probe_init_failed; 2469 2470 pci_save_state(pdev); 2471 2472 /* Assign back pointers */ 2473 rsp->req = req; 2474 req->rsp = rsp; 2475 2476 /* FWI2-capable only. */ 2477 req->req_q_in = &ha->iobase->isp24.req_q_in; 2478 req->req_q_out = &ha->iobase->isp24.req_q_out; 2479 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 2480 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 2481 if (ha->mqenable || IS_QLA83XX(ha)) { 2482 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 2483 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 2484 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 2485 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; 2486 } 2487 2488 if (IS_QLA82XX(ha)) { 2489 req->req_q_out = &ha->iobase->isp82.req_q_out[0]; 2490 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; 2491 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 2492 } 2493 2494 ql_dbg(ql_dbg_multiq, base_vha, 0xc009, 2495 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 2496 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 2497 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, 2498 "req->req_q_in=%p req->req_q_out=%p " 2499 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 2500 req->req_q_in, req->req_q_out, 2501 rsp->rsp_q_in, rsp->rsp_q_out); 2502 ql_dbg(ql_dbg_init, base_vha, 0x003e, 2503 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 2504 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 2505 ql_dbg(ql_dbg_init, base_vha, 0x003f, 2506 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 2507 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); 2508 2509 if (qla2x00_initialize_adapter(base_vha)) { 2510 ql_log(ql_log_fatal, base_vha, 0x00d6, 2511 "Failed to initialize adapter - Adapter flags %x.\n", 2512 base_vha->device_flags); 2513 2514 if (IS_QLA82XX(ha)) { 2515 qla82xx_idc_lock(ha); 2516 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2517 QLA8XXX_DEV_FAILED); 2518 qla82xx_idc_unlock(ha); 2519 ql_log(ql_log_fatal, base_vha, 0x00d7, 2520 "HW State: FAILED.\n"); 2521 } 2522 2523 ret = -ENODEV; 2524 goto probe_failed; 2525 } 2526 2527 if (ha->mqenable) { 2528 if (qla25xx_setup_mode(base_vha)) { 2529 ql_log(ql_log_warn, base_vha, 0x00ec, 2530 "Failed to create queues, falling back to single queue mode.\n"); 2531 goto que_init; 2532 } 2533 } 2534 2535 if (ha->flags.running_gold_fw) 2536 goto skip_dpc; 2537 2538 /* 2539 * Startup the kernel thread for this host adapter 2540 */ 2541 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 2542 "%s_dpc", base_vha->host_str); 2543 if (IS_ERR(ha->dpc_thread)) { 2544 ql_log(ql_log_fatal, base_vha, 0x00ed, 2545 "Failed to start DPC thread.\n"); 2546 ret = PTR_ERR(ha->dpc_thread); 2547 goto probe_failed; 2548 } 2549 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 2550 "DPC thread started successfully.\n"); 2551 2552 /* 2553 * If we're not coming up in initiator mode, we might sit for 2554 * a while without waking up the dpc thread, which leads to a 2555 * stuck process warning. So just kick the dpc once here and 2556 * let the kthread start (and go back to sleep in qla2x00_do_dpc). 2557 */ 2558 qla2xxx_wake_dpc(base_vha); 2559 2560 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 2561 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); 2562 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); 2563 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); 2564 2565 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); 2566 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); 2567 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); 2568 INIT_WORK(&ha->idc_state_handler, 2569 qla83xx_idc_state_handler_work); 2570 INIT_WORK(&ha->nic_core_unrecoverable, 2571 qla83xx_nic_core_unrecoverable_work); 2572 } 2573 2574 skip_dpc: 2575 list_add_tail(&base_vha->list, &ha->vp_list); 2576 base_vha->host->irq = ha->pdev->irq; 2577 2578 /* Initialized the timer */ 2579 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); 2580 ql_dbg(ql_dbg_init, base_vha, 0x00ef, 2581 "Started qla2x00_timer with " 2582 "interval=%d.\n", WATCH_INTERVAL); 2583 ql_dbg(ql_dbg_init, base_vha, 0x00f0, 2584 "Detected hba at address=%p.\n", 2585 ha); 2586 2587 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 2588 if (ha->fw_attributes & BIT_4) { 2589 int prot = 0, guard; 2590 base_vha->flags.difdix_supported = 1; 2591 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 2592 "Registering for DIF/DIX type 1 and 3 protection.\n"); 2593 if (ql2xenabledif == 1) 2594 prot = SHOST_DIX_TYPE0_PROTECTION; 2595 scsi_host_set_prot(host, 2596 prot | SHOST_DIF_TYPE1_PROTECTION 2597 | SHOST_DIF_TYPE2_PROTECTION 2598 | SHOST_DIF_TYPE3_PROTECTION 2599 | SHOST_DIX_TYPE1_PROTECTION 2600 | SHOST_DIX_TYPE2_PROTECTION 2601 | SHOST_DIX_TYPE3_PROTECTION); 2602 2603 guard = SHOST_DIX_GUARD_CRC; 2604 2605 if (IS_PI_IPGUARD_CAPABLE(ha) && 2606 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 2607 guard |= SHOST_DIX_GUARD_IP; 2608 2609 scsi_host_set_guard(host, guard); 2610 } else 2611 base_vha->flags.difdix_supported = 0; 2612 } 2613 2614 ha->isp_ops->enable_intrs(ha); 2615 2616 ret = scsi_add_host(host, &pdev->dev); 2617 if (ret) 2618 goto probe_failed; 2619 2620 base_vha->flags.init_done = 1; 2621 base_vha->flags.online = 1; 2622 2623 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 2624 "Init done and hba is online.\n"); 2625 2626 if (qla_ini_mode_enabled(base_vha)) 2627 scsi_scan_host(host); 2628 else 2629 ql_dbg(ql_dbg_init, base_vha, 0x0122, 2630 "skipping scsi_scan_host() for non-initiator port\n"); 2631 2632 qla2x00_alloc_sysfs_attr(base_vha); 2633 2634 qla2x00_init_host_attr(base_vha); 2635 2636 qla2x00_dfs_setup(base_vha); 2637 2638 ql_log(ql_log_info, base_vha, 0x00fb, 2639 "QLogic %s - %s.\n", 2640 ha->model_number, ha->model_desc ? ha->model_desc : ""); 2641 ql_log(ql_log_info, base_vha, 0x00fc, 2642 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", 2643 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info), 2644 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', 2645 base_vha->host_no, 2646 ha->isp_ops->fw_version_str(base_vha, fw_str)); 2647 2648 qlt_add_target(ha, base_vha); 2649 2650 return 0; 2651 2652 probe_init_failed: 2653 qla2x00_free_req_que(ha, req); 2654 ha->req_q_map[0] = NULL; 2655 clear_bit(0, ha->req_qid_map); 2656 qla2x00_free_rsp_que(ha, rsp); 2657 ha->rsp_q_map[0] = NULL; 2658 clear_bit(0, ha->rsp_qid_map); 2659 ha->max_req_queues = ha->max_rsp_queues = 0; 2660 2661 probe_failed: 2662 if (base_vha->timer_active) 2663 qla2x00_stop_timer(base_vha); 2664 base_vha->flags.online = 0; 2665 if (ha->dpc_thread) { 2666 struct task_struct *t = ha->dpc_thread; 2667 2668 ha->dpc_thread = NULL; 2669 kthread_stop(t); 2670 } 2671 2672 qla2x00_free_device(base_vha); 2673 2674 scsi_host_put(base_vha->host); 2675 2676 probe_hw_failed: 2677 if (IS_QLA82XX(ha)) { 2678 qla82xx_idc_lock(ha); 2679 qla82xx_clear_drv_active(ha); 2680 qla82xx_idc_unlock(ha); 2681 } 2682 iospace_config_failed: 2683 if (IS_QLA82XX(ha)) { 2684 if (!ha->nx_pcibase) 2685 iounmap((device_reg_t __iomem *)ha->nx_pcibase); 2686 if (!ql2xdbwr) 2687 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); 2688 } else { 2689 if (ha->iobase) 2690 iounmap(ha->iobase); 2691 } 2692 pci_release_selected_regions(ha->pdev, ha->bars); 2693 kfree(ha); 2694 ha = NULL; 2695 2696 probe_out: 2697 pci_disable_device(pdev); 2698 return ret; 2699 } 2700 2701 static void 2702 qla2x00_stop_dpc_thread(scsi_qla_host_t *vha) 2703 { 2704 struct qla_hw_data *ha = vha->hw; 2705 struct task_struct *t = ha->dpc_thread; 2706 2707 if (ha->dpc_thread == NULL) 2708 return; 2709 /* 2710 * qla2xxx_wake_dpc checks for ->dpc_thread 2711 * so we need to zero it out. 2712 */ 2713 ha->dpc_thread = NULL; 2714 kthread_stop(t); 2715 } 2716 2717 static void 2718 qla2x00_shutdown(struct pci_dev *pdev) 2719 { 2720 scsi_qla_host_t *vha; 2721 struct qla_hw_data *ha; 2722 2723 vha = pci_get_drvdata(pdev); 2724 ha = vha->hw; 2725 2726 /* Turn-off FCE trace */ 2727 if (ha->flags.fce_enabled) { 2728 qla2x00_disable_fce_trace(vha, NULL, NULL); 2729 ha->flags.fce_enabled = 0; 2730 } 2731 2732 /* Turn-off EFT trace */ 2733 if (ha->eft) 2734 qla2x00_disable_eft_trace(vha); 2735 2736 /* Stop currently executing firmware. */ 2737 qla2x00_try_to_stop_firmware(vha); 2738 2739 /* Turn adapter off line */ 2740 vha->flags.online = 0; 2741 2742 /* turn-off interrupts on the card */ 2743 if (ha->interrupts_on) { 2744 vha->flags.init_done = 0; 2745 ha->isp_ops->disable_intrs(ha); 2746 } 2747 2748 qla2x00_free_irqs(vha); 2749 2750 qla2x00_free_fw_dump(ha); 2751 } 2752 2753 static void 2754 qla2x00_remove_one(struct pci_dev *pdev) 2755 { 2756 scsi_qla_host_t *base_vha, *vha; 2757 struct qla_hw_data *ha; 2758 unsigned long flags; 2759 2760 /* 2761 * If the PCI device is disabled that means that probe failed and any 2762 * resources should be have cleaned up on probe exit. 2763 */ 2764 if (!atomic_read(&pdev->enable_cnt)) 2765 return; 2766 2767 base_vha = pci_get_drvdata(pdev); 2768 ha = base_vha->hw; 2769 2770 ha->flags.host_shutting_down = 1; 2771 2772 set_bit(UNLOADING, &base_vha->dpc_flags); 2773 mutex_lock(&ha->vport_lock); 2774 while (ha->cur_vport_count) { 2775 struct Scsi_Host *scsi_host; 2776 2777 spin_lock_irqsave(&ha->vport_slock, flags); 2778 2779 BUG_ON(base_vha->list.next == &ha->vp_list); 2780 /* This assumes first entry in ha->vp_list is always base vha */ 2781 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); 2782 scsi_host = scsi_host_get(vha->host); 2783 2784 spin_unlock_irqrestore(&ha->vport_slock, flags); 2785 mutex_unlock(&ha->vport_lock); 2786 2787 fc_vport_terminate(vha->fc_vport); 2788 scsi_host_put(vha->host); 2789 2790 mutex_lock(&ha->vport_lock); 2791 } 2792 mutex_unlock(&ha->vport_lock); 2793 2794 if (IS_QLA8031(ha)) { 2795 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, 2796 "Clearing fcoe driver presence.\n"); 2797 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) 2798 ql_dbg(ql_dbg_p3p, base_vha, 0xb079, 2799 "Error while clearing DRV-Presence.\n"); 2800 } 2801 2802 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 2803 2804 qla2x00_dfs_remove(base_vha); 2805 2806 qla84xx_put_chip(base_vha); 2807 2808 /* Disable timer */ 2809 if (base_vha->timer_active) 2810 qla2x00_stop_timer(base_vha); 2811 2812 base_vha->flags.online = 0; 2813 2814 /* Flush the work queue and remove it */ 2815 if (ha->wq) { 2816 flush_workqueue(ha->wq); 2817 destroy_workqueue(ha->wq); 2818 ha->wq = NULL; 2819 } 2820 2821 /* Cancel all work and destroy DPC workqueues */ 2822 if (ha->dpc_lp_wq) { 2823 cancel_work_sync(&ha->idc_aen); 2824 destroy_workqueue(ha->dpc_lp_wq); 2825 ha->dpc_lp_wq = NULL; 2826 } 2827 2828 if (ha->dpc_hp_wq) { 2829 cancel_work_sync(&ha->nic_core_reset); 2830 cancel_work_sync(&ha->idc_state_handler); 2831 cancel_work_sync(&ha->nic_core_unrecoverable); 2832 destroy_workqueue(ha->dpc_hp_wq); 2833 ha->dpc_hp_wq = NULL; 2834 } 2835 2836 /* Kill the kernel thread for this host */ 2837 if (ha->dpc_thread) { 2838 struct task_struct *t = ha->dpc_thread; 2839 2840 /* 2841 * qla2xxx_wake_dpc checks for ->dpc_thread 2842 * so we need to zero it out. 2843 */ 2844 ha->dpc_thread = NULL; 2845 kthread_stop(t); 2846 } 2847 qlt_remove_target(ha, base_vha); 2848 2849 qla2x00_free_sysfs_attr(base_vha); 2850 2851 fc_remove_host(base_vha->host); 2852 2853 scsi_remove_host(base_vha->host); 2854 2855 qla2x00_free_device(base_vha); 2856 2857 scsi_host_put(base_vha->host); 2858 2859 if (IS_QLA82XX(ha)) { 2860 qla82xx_idc_lock(ha); 2861 qla82xx_clear_drv_active(ha); 2862 qla82xx_idc_unlock(ha); 2863 2864 iounmap((device_reg_t __iomem *)ha->nx_pcibase); 2865 if (!ql2xdbwr) 2866 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); 2867 } else { 2868 if (ha->iobase) 2869 iounmap(ha->iobase); 2870 2871 if (ha->mqiobase) 2872 iounmap(ha->mqiobase); 2873 2874 if (IS_QLA83XX(ha) && ha->msixbase) 2875 iounmap(ha->msixbase); 2876 } 2877 2878 pci_release_selected_regions(ha->pdev, ha->bars); 2879 kfree(ha); 2880 ha = NULL; 2881 2882 pci_disable_pcie_error_reporting(pdev); 2883 2884 pci_disable_device(pdev); 2885 pci_set_drvdata(pdev, NULL); 2886 } 2887 2888 static void 2889 qla2x00_free_device(scsi_qla_host_t *vha) 2890 { 2891 struct qla_hw_data *ha = vha->hw; 2892 2893 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 2894 2895 /* Disable timer */ 2896 if (vha->timer_active) 2897 qla2x00_stop_timer(vha); 2898 2899 qla2x00_stop_dpc_thread(vha); 2900 2901 qla25xx_delete_queues(vha); 2902 if (ha->flags.fce_enabled) 2903 qla2x00_disable_fce_trace(vha, NULL, NULL); 2904 2905 if (ha->eft) 2906 qla2x00_disable_eft_trace(vha); 2907 2908 /* Stop currently executing firmware. */ 2909 qla2x00_try_to_stop_firmware(vha); 2910 2911 vha->flags.online = 0; 2912 2913 /* turn-off interrupts on the card */ 2914 if (ha->interrupts_on) { 2915 vha->flags.init_done = 0; 2916 ha->isp_ops->disable_intrs(ha); 2917 } 2918 2919 qla2x00_free_irqs(vha); 2920 2921 qla2x00_free_fcports(vha); 2922 2923 qla2x00_mem_free(ha); 2924 2925 qla82xx_md_free(vha); 2926 2927 qla2x00_free_queues(ha); 2928 } 2929 2930 void qla2x00_free_fcports(struct scsi_qla_host *vha) 2931 { 2932 fc_port_t *fcport, *tfcport; 2933 2934 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { 2935 list_del(&fcport->list); 2936 qla2x00_clear_loop_id(fcport); 2937 kfree(fcport); 2938 fcport = NULL; 2939 } 2940 } 2941 2942 static inline void 2943 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, 2944 int defer) 2945 { 2946 struct fc_rport *rport; 2947 scsi_qla_host_t *base_vha; 2948 unsigned long flags; 2949 2950 if (!fcport->rport) 2951 return; 2952 2953 rport = fcport->rport; 2954 if (defer) { 2955 base_vha = pci_get_drvdata(vha->hw->pdev); 2956 spin_lock_irqsave(vha->host->host_lock, flags); 2957 fcport->drport = rport; 2958 spin_unlock_irqrestore(vha->host->host_lock, flags); 2959 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 2960 qla2xxx_wake_dpc(base_vha); 2961 } else { 2962 fc_remote_port_delete(rport); 2963 qlt_fc_port_deleted(vha, fcport); 2964 } 2965 } 2966 2967 /* 2968 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 2969 * 2970 * Input: ha = adapter block pointer. fcport = port structure pointer. 2971 * 2972 * Return: None. 2973 * 2974 * Context: 2975 */ 2976 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, 2977 int do_login, int defer) 2978 { 2979 if (atomic_read(&fcport->state) == FCS_ONLINE && 2980 vha->vp_idx == fcport->vha->vp_idx) { 2981 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2982 qla2x00_schedule_rport_del(vha, fcport, defer); 2983 } 2984 /* 2985 * We may need to retry the login, so don't change the state of the 2986 * port but do the retries. 2987 */ 2988 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 2989 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2990 2991 if (!do_login) 2992 return; 2993 2994 if (fcport->login_retry == 0) { 2995 fcport->login_retry = vha->hw->login_retry_count; 2996 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2997 2998 ql_dbg(ql_dbg_disc, vha, 0x2067, 2999 "Port login retry " 3000 "%02x%02x%02x%02x%02x%02x%02x%02x, " 3001 "id = 0x%04x retry cnt=%d.\n", 3002 fcport->port_name[0], fcport->port_name[1], 3003 fcport->port_name[2], fcport->port_name[3], 3004 fcport->port_name[4], fcport->port_name[5], 3005 fcport->port_name[6], fcport->port_name[7], 3006 fcport->loop_id, fcport->login_retry); 3007 } 3008 } 3009 3010 /* 3011 * qla2x00_mark_all_devices_lost 3012 * Updates fcport state when device goes offline. 3013 * 3014 * Input: 3015 * ha = adapter block pointer. 3016 * fcport = port structure pointer. 3017 * 3018 * Return: 3019 * None. 3020 * 3021 * Context: 3022 */ 3023 void 3024 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) 3025 { 3026 fc_port_t *fcport; 3027 3028 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3029 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) 3030 continue; 3031 3032 /* 3033 * No point in marking the device as lost, if the device is 3034 * already DEAD. 3035 */ 3036 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 3037 continue; 3038 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3039 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3040 if (defer) 3041 qla2x00_schedule_rport_del(vha, fcport, defer); 3042 else if (vha->vp_idx == fcport->vha->vp_idx) 3043 qla2x00_schedule_rport_del(vha, fcport, defer); 3044 } 3045 } 3046 } 3047 3048 /* 3049 * qla2x00_mem_alloc 3050 * Allocates adapter memory. 3051 * 3052 * Returns: 3053 * 0 = success. 3054 * !0 = failure. 3055 */ 3056 static int 3057 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, 3058 struct req_que **req, struct rsp_que **rsp) 3059 { 3060 char name[16]; 3061 3062 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 3063 &ha->init_cb_dma, GFP_KERNEL); 3064 if (!ha->init_cb) 3065 goto fail; 3066 3067 if (qlt_mem_alloc(ha) < 0) 3068 goto fail_free_init_cb; 3069 3070 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 3071 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 3072 if (!ha->gid_list) 3073 goto fail_free_tgt_mem; 3074 3075 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 3076 if (!ha->srb_mempool) 3077 goto fail_free_gid_list; 3078 3079 if (IS_QLA82XX(ha)) { 3080 /* Allocate cache for CT6 Ctx. */ 3081 if (!ctx_cachep) { 3082 ctx_cachep = kmem_cache_create("qla2xxx_ctx", 3083 sizeof(struct ct6_dsd), 0, 3084 SLAB_HWCACHE_ALIGN, NULL); 3085 if (!ctx_cachep) 3086 goto fail_free_gid_list; 3087 } 3088 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 3089 ctx_cachep); 3090 if (!ha->ctx_mempool) 3091 goto fail_free_srb_mempool; 3092 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, 3093 "ctx_cachep=%p ctx_mempool=%p.\n", 3094 ctx_cachep, ha->ctx_mempool); 3095 } 3096 3097 /* Get memory for cached NVRAM */ 3098 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 3099 if (!ha->nvram) 3100 goto fail_free_ctx_mempool; 3101 3102 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, 3103 ha->pdev->device); 3104 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3105 DMA_POOL_SIZE, 8, 0); 3106 if (!ha->s_dma_pool) 3107 goto fail_free_nvram; 3108 3109 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, 3110 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", 3111 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); 3112 3113 if (IS_QLA82XX(ha) || ql2xenabledif) { 3114 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3115 DSD_LIST_DMA_POOL_SIZE, 8, 0); 3116 if (!ha->dl_dma_pool) { 3117 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, 3118 "Failed to allocate memory for dl_dma_pool.\n"); 3119 goto fail_s_dma_pool; 3120 } 3121 3122 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3123 FCP_CMND_DMA_POOL_SIZE, 8, 0); 3124 if (!ha->fcp_cmnd_dma_pool) { 3125 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, 3126 "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); 3127 goto fail_dl_dma_pool; 3128 } 3129 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, 3130 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n", 3131 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool); 3132 } 3133 3134 /* Allocate memory for SNS commands */ 3135 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 3136 /* Get consistent memory allocated for SNS commands */ 3137 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 3138 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 3139 if (!ha->sns_cmd) 3140 goto fail_dma_pool; 3141 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, 3142 "sns_cmd: %p.\n", ha->sns_cmd); 3143 } else { 3144 /* Get consistent memory allocated for MS IOCB */ 3145 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3146 &ha->ms_iocb_dma); 3147 if (!ha->ms_iocb) 3148 goto fail_dma_pool; 3149 /* Get consistent memory allocated for CT SNS commands */ 3150 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 3151 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 3152 if (!ha->ct_sns) 3153 goto fail_free_ms_iocb; 3154 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, 3155 "ms_iocb=%p ct_sns=%p.\n", 3156 ha->ms_iocb, ha->ct_sns); 3157 } 3158 3159 /* Allocate memory for request ring */ 3160 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 3161 if (!*req) { 3162 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, 3163 "Failed to allocate memory for req.\n"); 3164 goto fail_req; 3165 } 3166 (*req)->length = req_len; 3167 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, 3168 ((*req)->length + 1) * sizeof(request_t), 3169 &(*req)->dma, GFP_KERNEL); 3170 if (!(*req)->ring) { 3171 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, 3172 "Failed to allocate memory for req_ring.\n"); 3173 goto fail_req_ring; 3174 } 3175 /* Allocate memory for response ring */ 3176 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 3177 if (!*rsp) { 3178 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, 3179 "Failed to allocate memory for rsp.\n"); 3180 goto fail_rsp; 3181 } 3182 (*rsp)->hw = ha; 3183 (*rsp)->length = rsp_len; 3184 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, 3185 ((*rsp)->length + 1) * sizeof(response_t), 3186 &(*rsp)->dma, GFP_KERNEL); 3187 if (!(*rsp)->ring) { 3188 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, 3189 "Failed to allocate memory for rsp_ring.\n"); 3190 goto fail_rsp_ring; 3191 } 3192 (*req)->rsp = *rsp; 3193 (*rsp)->req = *req; 3194 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, 3195 "req=%p req->length=%d req->ring=%p rsp=%p " 3196 "rsp->length=%d rsp->ring=%p.\n", 3197 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, 3198 (*rsp)->ring); 3199 /* Allocate memory for NVRAM data for vports */ 3200 if (ha->nvram_npiv_size) { 3201 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) * 3202 ha->nvram_npiv_size, GFP_KERNEL); 3203 if (!ha->npiv_info) { 3204 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, 3205 "Failed to allocate memory for npiv_info.\n"); 3206 goto fail_npiv_info; 3207 } 3208 } else 3209 ha->npiv_info = NULL; 3210 3211 /* Get consistent memory allocated for EX-INIT-CB. */ 3212 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) { 3213 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3214 &ha->ex_init_cb_dma); 3215 if (!ha->ex_init_cb) 3216 goto fail_ex_init_cb; 3217 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, 3218 "ex_init_cb=%p.\n", ha->ex_init_cb); 3219 } 3220 3221 INIT_LIST_HEAD(&ha->gbl_dsd_list); 3222 3223 /* Get consistent memory allocated for Async Port-Database. */ 3224 if (!IS_FWI2_CAPABLE(ha)) { 3225 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3226 &ha->async_pd_dma); 3227 if (!ha->async_pd) 3228 goto fail_async_pd; 3229 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, 3230 "async_pd=%p.\n", ha->async_pd); 3231 } 3232 3233 INIT_LIST_HEAD(&ha->vp_list); 3234 3235 /* Allocate memory for our loop_id bitmap */ 3236 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), 3237 GFP_KERNEL); 3238 if (!ha->loop_id_map) 3239 goto fail_async_pd; 3240 else { 3241 qla2x00_set_reserved_loop_ids(ha); 3242 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 3243 "loop_id_map=%p. \n", ha->loop_id_map); 3244 } 3245 3246 return 1; 3247 3248 fail_async_pd: 3249 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 3250 fail_ex_init_cb: 3251 kfree(ha->npiv_info); 3252 fail_npiv_info: 3253 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * 3254 sizeof(response_t), (*rsp)->ring, (*rsp)->dma); 3255 (*rsp)->ring = NULL; 3256 (*rsp)->dma = 0; 3257 fail_rsp_ring: 3258 kfree(*rsp); 3259 fail_rsp: 3260 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * 3261 sizeof(request_t), (*req)->ring, (*req)->dma); 3262 (*req)->ring = NULL; 3263 (*req)->dma = 0; 3264 fail_req_ring: 3265 kfree(*req); 3266 fail_req: 3267 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 3268 ha->ct_sns, ha->ct_sns_dma); 3269 ha->ct_sns = NULL; 3270 ha->ct_sns_dma = 0; 3271 fail_free_ms_iocb: 3272 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 3273 ha->ms_iocb = NULL; 3274 ha->ms_iocb_dma = 0; 3275 fail_dma_pool: 3276 if (IS_QLA82XX(ha) || ql2xenabledif) { 3277 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 3278 ha->fcp_cmnd_dma_pool = NULL; 3279 } 3280 fail_dl_dma_pool: 3281 if (IS_QLA82XX(ha) || ql2xenabledif) { 3282 dma_pool_destroy(ha->dl_dma_pool); 3283 ha->dl_dma_pool = NULL; 3284 } 3285 fail_s_dma_pool: 3286 dma_pool_destroy(ha->s_dma_pool); 3287 ha->s_dma_pool = NULL; 3288 fail_free_nvram: 3289 kfree(ha->nvram); 3290 ha->nvram = NULL; 3291 fail_free_ctx_mempool: 3292 mempool_destroy(ha->ctx_mempool); 3293 ha->ctx_mempool = NULL; 3294 fail_free_srb_mempool: 3295 mempool_destroy(ha->srb_mempool); 3296 ha->srb_mempool = NULL; 3297 fail_free_gid_list: 3298 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 3299 ha->gid_list, 3300 ha->gid_list_dma); 3301 ha->gid_list = NULL; 3302 ha->gid_list_dma = 0; 3303 fail_free_tgt_mem: 3304 qlt_mem_free(ha); 3305 fail_free_init_cb: 3306 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 3307 ha->init_cb_dma); 3308 ha->init_cb = NULL; 3309 ha->init_cb_dma = 0; 3310 fail: 3311 ql_log(ql_log_fatal, NULL, 0x0030, 3312 "Memory allocation failure.\n"); 3313 return -ENOMEM; 3314 } 3315 3316 /* 3317 * qla2x00_free_fw_dump 3318 * Frees fw dump stuff. 3319 * 3320 * Input: 3321 * ha = adapter block pointer. 3322 */ 3323 static void 3324 qla2x00_free_fw_dump(struct qla_hw_data *ha) 3325 { 3326 if (ha->fce) 3327 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 3328 ha->fce_dma); 3329 3330 if (ha->fw_dump) { 3331 if (ha->eft) 3332 dma_free_coherent(&ha->pdev->dev, 3333 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 3334 vfree(ha->fw_dump); 3335 } 3336 ha->fce = NULL; 3337 ha->fce_dma = 0; 3338 ha->eft = NULL; 3339 ha->eft_dma = 0; 3340 ha->fw_dump = NULL; 3341 ha->fw_dumped = 0; 3342 ha->fw_dump_reading = 0; 3343 } 3344 3345 /* 3346 * qla2x00_mem_free 3347 * Frees all adapter allocated memory. 3348 * 3349 * Input: 3350 * ha = adapter block pointer. 3351 */ 3352 static void 3353 qla2x00_mem_free(struct qla_hw_data *ha) 3354 { 3355 qla2x00_free_fw_dump(ha); 3356 3357 if (ha->mctp_dump) 3358 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, 3359 ha->mctp_dump_dma); 3360 3361 if (ha->srb_mempool) 3362 mempool_destroy(ha->srb_mempool); 3363 3364 if (ha->dcbx_tlv) 3365 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 3366 ha->dcbx_tlv, ha->dcbx_tlv_dma); 3367 3368 if (ha->xgmac_data) 3369 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 3370 ha->xgmac_data, ha->xgmac_data_dma); 3371 3372 if (ha->sns_cmd) 3373 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 3374 ha->sns_cmd, ha->sns_cmd_dma); 3375 3376 if (ha->ct_sns) 3377 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 3378 ha->ct_sns, ha->ct_sns_dma); 3379 3380 if (ha->sfp_data) 3381 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 3382 3383 if (ha->ms_iocb) 3384 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 3385 3386 if (ha->ex_init_cb) 3387 dma_pool_free(ha->s_dma_pool, 3388 ha->ex_init_cb, ha->ex_init_cb_dma); 3389 3390 if (ha->async_pd) 3391 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 3392 3393 if (ha->s_dma_pool) 3394 dma_pool_destroy(ha->s_dma_pool); 3395 3396 if (ha->gid_list) 3397 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 3398 ha->gid_list, ha->gid_list_dma); 3399 3400 if (IS_QLA82XX(ha)) { 3401 if (!list_empty(&ha->gbl_dsd_list)) { 3402 struct dsd_dma *dsd_ptr, *tdsd_ptr; 3403 3404 /* clean up allocated prev pool */ 3405 list_for_each_entry_safe(dsd_ptr, 3406 tdsd_ptr, &ha->gbl_dsd_list, list) { 3407 dma_pool_free(ha->dl_dma_pool, 3408 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); 3409 list_del(&dsd_ptr->list); 3410 kfree(dsd_ptr); 3411 } 3412 } 3413 } 3414 3415 if (ha->dl_dma_pool) 3416 dma_pool_destroy(ha->dl_dma_pool); 3417 3418 if (ha->fcp_cmnd_dma_pool) 3419 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 3420 3421 if (ha->ctx_mempool) 3422 mempool_destroy(ha->ctx_mempool); 3423 3424 qlt_mem_free(ha); 3425 3426 if (ha->init_cb) 3427 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 3428 ha->init_cb, ha->init_cb_dma); 3429 vfree(ha->optrom_buffer); 3430 kfree(ha->nvram); 3431 kfree(ha->npiv_info); 3432 kfree(ha->swl); 3433 kfree(ha->loop_id_map); 3434 3435 ha->srb_mempool = NULL; 3436 ha->ctx_mempool = NULL; 3437 ha->sns_cmd = NULL; 3438 ha->sns_cmd_dma = 0; 3439 ha->ct_sns = NULL; 3440 ha->ct_sns_dma = 0; 3441 ha->ms_iocb = NULL; 3442 ha->ms_iocb_dma = 0; 3443 ha->init_cb = NULL; 3444 ha->init_cb_dma = 0; 3445 ha->ex_init_cb = NULL; 3446 ha->ex_init_cb_dma = 0; 3447 ha->async_pd = NULL; 3448 ha->async_pd_dma = 0; 3449 3450 ha->s_dma_pool = NULL; 3451 ha->dl_dma_pool = NULL; 3452 ha->fcp_cmnd_dma_pool = NULL; 3453 3454 ha->gid_list = NULL; 3455 ha->gid_list_dma = 0; 3456 3457 ha->tgt.atio_ring = NULL; 3458 ha->tgt.atio_dma = 0; 3459 ha->tgt.tgt_vp_map = NULL; 3460 } 3461 3462 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 3463 struct qla_hw_data *ha) 3464 { 3465 struct Scsi_Host *host; 3466 struct scsi_qla_host *vha = NULL; 3467 3468 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 3469 if (host == NULL) { 3470 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, 3471 "Failed to allocate host from the scsi layer, aborting.\n"); 3472 goto fail; 3473 } 3474 3475 /* Clear our data area */ 3476 vha = shost_priv(host); 3477 memset(vha, 0, sizeof(scsi_qla_host_t)); 3478 3479 vha->host = host; 3480 vha->host_no = host->host_no; 3481 vha->hw = ha; 3482 3483 INIT_LIST_HEAD(&vha->vp_fcports); 3484 INIT_LIST_HEAD(&vha->work_list); 3485 INIT_LIST_HEAD(&vha->list); 3486 3487 spin_lock_init(&vha->work_lock); 3488 3489 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3490 ql_dbg(ql_dbg_init, vha, 0x0041, 3491 "Allocated the host=%p hw=%p vha=%p dev_name=%s", 3492 vha->host, vha->hw, vha, 3493 dev_name(&(ha->pdev->dev))); 3494 3495 return vha; 3496 3497 fail: 3498 return vha; 3499 } 3500 3501 static struct qla_work_evt * 3502 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 3503 { 3504 struct qla_work_evt *e; 3505 uint8_t bail; 3506 3507 QLA_VHA_MARK_BUSY(vha, bail); 3508 if (bail) 3509 return NULL; 3510 3511 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 3512 if (!e) { 3513 QLA_VHA_MARK_NOT_BUSY(vha); 3514 return NULL; 3515 } 3516 3517 INIT_LIST_HEAD(&e->list); 3518 e->type = type; 3519 e->flags = QLA_EVT_FLAG_FREE; 3520 return e; 3521 } 3522 3523 static int 3524 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 3525 { 3526 unsigned long flags; 3527 3528 spin_lock_irqsave(&vha->work_lock, flags); 3529 list_add_tail(&e->list, &vha->work_list); 3530 spin_unlock_irqrestore(&vha->work_lock, flags); 3531 qla2xxx_wake_dpc(vha); 3532 3533 return QLA_SUCCESS; 3534 } 3535 3536 int 3537 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, 3538 u32 data) 3539 { 3540 struct qla_work_evt *e; 3541 3542 e = qla2x00_alloc_work(vha, QLA_EVT_AEN); 3543 if (!e) 3544 return QLA_FUNCTION_FAILED; 3545 3546 e->u.aen.code = code; 3547 e->u.aen.data = data; 3548 return qla2x00_post_work(vha, e); 3549 } 3550 3551 int 3552 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) 3553 { 3554 struct qla_work_evt *e; 3555 3556 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); 3557 if (!e) 3558 return QLA_FUNCTION_FAILED; 3559 3560 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 3561 return qla2x00_post_work(vha, e); 3562 } 3563 3564 #define qla2x00_post_async_work(name, type) \ 3565 int qla2x00_post_async_##name##_work( \ 3566 struct scsi_qla_host *vha, \ 3567 fc_port_t *fcport, uint16_t *data) \ 3568 { \ 3569 struct qla_work_evt *e; \ 3570 \ 3571 e = qla2x00_alloc_work(vha, type); \ 3572 if (!e) \ 3573 return QLA_FUNCTION_FAILED; \ 3574 \ 3575 e->u.logio.fcport = fcport; \ 3576 if (data) { \ 3577 e->u.logio.data[0] = data[0]; \ 3578 e->u.logio.data[1] = data[1]; \ 3579 } \ 3580 return qla2x00_post_work(vha, e); \ 3581 } 3582 3583 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); 3584 qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE); 3585 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 3586 qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); 3587 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); 3588 qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE); 3589 3590 int 3591 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) 3592 { 3593 struct qla_work_evt *e; 3594 3595 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); 3596 if (!e) 3597 return QLA_FUNCTION_FAILED; 3598 3599 e->u.uevent.code = code; 3600 return qla2x00_post_work(vha, e); 3601 } 3602 3603 static void 3604 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) 3605 { 3606 char event_string[40]; 3607 char *envp[] = { event_string, NULL }; 3608 3609 switch (code) { 3610 case QLA_UEVENT_CODE_FW_DUMP: 3611 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", 3612 vha->host_no); 3613 break; 3614 default: 3615 /* do nothing */ 3616 break; 3617 } 3618 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); 3619 } 3620 3621 void 3622 qla2x00_do_work(struct scsi_qla_host *vha) 3623 { 3624 struct qla_work_evt *e, *tmp; 3625 unsigned long flags; 3626 LIST_HEAD(work); 3627 3628 spin_lock_irqsave(&vha->work_lock, flags); 3629 list_splice_init(&vha->work_list, &work); 3630 spin_unlock_irqrestore(&vha->work_lock, flags); 3631 3632 list_for_each_entry_safe(e, tmp, &work, list) { 3633 list_del_init(&e->list); 3634 3635 switch (e->type) { 3636 case QLA_EVT_AEN: 3637 fc_host_post_event(vha->host, fc_get_event_number(), 3638 e->u.aen.code, e->u.aen.data); 3639 break; 3640 case QLA_EVT_IDC_ACK: 3641 qla81xx_idc_ack(vha, e->u.idc_ack.mb); 3642 break; 3643 case QLA_EVT_ASYNC_LOGIN: 3644 qla2x00_async_login(vha, e->u.logio.fcport, 3645 e->u.logio.data); 3646 break; 3647 case QLA_EVT_ASYNC_LOGIN_DONE: 3648 qla2x00_async_login_done(vha, e->u.logio.fcport, 3649 e->u.logio.data); 3650 break; 3651 case QLA_EVT_ASYNC_LOGOUT: 3652 qla2x00_async_logout(vha, e->u.logio.fcport); 3653 break; 3654 case QLA_EVT_ASYNC_LOGOUT_DONE: 3655 qla2x00_async_logout_done(vha, e->u.logio.fcport, 3656 e->u.logio.data); 3657 break; 3658 case QLA_EVT_ASYNC_ADISC: 3659 qla2x00_async_adisc(vha, e->u.logio.fcport, 3660 e->u.logio.data); 3661 break; 3662 case QLA_EVT_ASYNC_ADISC_DONE: 3663 qla2x00_async_adisc_done(vha, e->u.logio.fcport, 3664 e->u.logio.data); 3665 break; 3666 case QLA_EVT_UEVENT: 3667 qla2x00_uevent_emit(vha, e->u.uevent.code); 3668 break; 3669 } 3670 if (e->flags & QLA_EVT_FLAG_FREE) 3671 kfree(e); 3672 3673 /* For each work completed decrement vha ref count */ 3674 QLA_VHA_MARK_NOT_BUSY(vha); 3675 } 3676 } 3677 3678 /* Relogins all the fcports of a vport 3679 * Context: dpc thread 3680 */ 3681 void qla2x00_relogin(struct scsi_qla_host *vha) 3682 { 3683 fc_port_t *fcport; 3684 int status; 3685 uint16_t next_loopid = 0; 3686 struct qla_hw_data *ha = vha->hw; 3687 uint16_t data[2]; 3688 3689 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3690 /* 3691 * If the port is not ONLINE then try to login 3692 * to it if we haven't run out of retries. 3693 */ 3694 if (atomic_read(&fcport->state) != FCS_ONLINE && 3695 fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) { 3696 fcport->login_retry--; 3697 if (fcport->flags & FCF_FABRIC_DEVICE) { 3698 if (fcport->flags & FCF_FCP2_DEVICE) 3699 ha->isp_ops->fabric_logout(vha, 3700 fcport->loop_id, 3701 fcport->d_id.b.domain, 3702 fcport->d_id.b.area, 3703 fcport->d_id.b.al_pa); 3704 3705 if (fcport->loop_id == FC_NO_LOOP_ID) { 3706 fcport->loop_id = next_loopid = 3707 ha->min_external_loopid; 3708 status = qla2x00_find_new_loop_id( 3709 vha, fcport); 3710 if (status != QLA_SUCCESS) { 3711 /* Ran out of IDs to use */ 3712 break; 3713 } 3714 } 3715 3716 if (IS_ALOGIO_CAPABLE(ha)) { 3717 fcport->flags |= FCF_ASYNC_SENT; 3718 data[0] = 0; 3719 data[1] = QLA_LOGIO_LOGIN_RETRIED; 3720 status = qla2x00_post_async_login_work( 3721 vha, fcport, data); 3722 if (status == QLA_SUCCESS) 3723 continue; 3724 /* Attempt a retry. */ 3725 status = 1; 3726 } else { 3727 status = qla2x00_fabric_login(vha, 3728 fcport, &next_loopid); 3729 if (status == QLA_SUCCESS) { 3730 int status2; 3731 uint8_t opts; 3732 3733 opts = 0; 3734 if (fcport->flags & 3735 FCF_FCP2_DEVICE) 3736 opts |= BIT_1; 3737 status2 = 3738 qla2x00_get_port_database( 3739 vha, fcport, opts); 3740 if (status2 != QLA_SUCCESS) 3741 status = 1; 3742 } 3743 } 3744 } else 3745 status = qla2x00_local_device_login(vha, 3746 fcport); 3747 3748 if (status == QLA_SUCCESS) { 3749 fcport->old_loop_id = fcport->loop_id; 3750 3751 ql_dbg(ql_dbg_disc, vha, 0x2003, 3752 "Port login OK: logged in ID 0x%x.\n", 3753 fcport->loop_id); 3754 3755 qla2x00_update_fcport(vha, fcport); 3756 3757 } else if (status == 1) { 3758 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3759 /* retry the login again */ 3760 ql_dbg(ql_dbg_disc, vha, 0x2007, 3761 "Retrying %d login again loop_id 0x%x.\n", 3762 fcport->login_retry, fcport->loop_id); 3763 } else { 3764 fcport->login_retry = 0; 3765 } 3766 3767 if (fcport->login_retry == 0 && status != QLA_SUCCESS) 3768 qla2x00_clear_loop_id(fcport); 3769 } 3770 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3771 break; 3772 } 3773 } 3774 3775 /* Schedule work on any of the dpc-workqueues */ 3776 void 3777 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) 3778 { 3779 struct qla_hw_data *ha = base_vha->hw; 3780 3781 switch (work_code) { 3782 case MBA_IDC_AEN: /* 0x8200 */ 3783 if (ha->dpc_lp_wq) 3784 queue_work(ha->dpc_lp_wq, &ha->idc_aen); 3785 break; 3786 3787 case QLA83XX_NIC_CORE_RESET: /* 0x1 */ 3788 if (!ha->flags.nic_core_reset_hdlr_active) { 3789 if (ha->dpc_hp_wq) 3790 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); 3791 } else 3792 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, 3793 "NIC Core reset is already active. Skip " 3794 "scheduling it again.\n"); 3795 break; 3796 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ 3797 if (ha->dpc_hp_wq) 3798 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); 3799 break; 3800 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ 3801 if (ha->dpc_hp_wq) 3802 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); 3803 break; 3804 default: 3805 ql_log(ql_log_warn, base_vha, 0xb05f, 3806 "Unknow work-code=0x%x.\n", work_code); 3807 } 3808 3809 return; 3810 } 3811 3812 /* Work: Perform NIC Core Unrecoverable state handling */ 3813 void 3814 qla83xx_nic_core_unrecoverable_work(struct work_struct *work) 3815 { 3816 struct qla_hw_data *ha = 3817 container_of(work, struct qla_hw_data, nic_core_unrecoverable); 3818 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3819 uint32_t dev_state = 0; 3820 3821 qla83xx_idc_lock(base_vha, 0); 3822 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 3823 qla83xx_reset_ownership(base_vha); 3824 if (ha->flags.nic_core_reset_owner) { 3825 ha->flags.nic_core_reset_owner = 0; 3826 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 3827 QLA8XXX_DEV_FAILED); 3828 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); 3829 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 3830 } 3831 qla83xx_idc_unlock(base_vha, 0); 3832 } 3833 3834 /* Work: Execute IDC state handler */ 3835 void 3836 qla83xx_idc_state_handler_work(struct work_struct *work) 3837 { 3838 struct qla_hw_data *ha = 3839 container_of(work, struct qla_hw_data, idc_state_handler); 3840 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3841 uint32_t dev_state = 0; 3842 3843 qla83xx_idc_lock(base_vha, 0); 3844 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 3845 if (dev_state == QLA8XXX_DEV_FAILED || 3846 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) 3847 qla83xx_idc_state_handler(base_vha); 3848 qla83xx_idc_unlock(base_vha, 0); 3849 } 3850 3851 static int 3852 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) 3853 { 3854 int rval = QLA_SUCCESS; 3855 unsigned long heart_beat_wait = jiffies + (1 * HZ); 3856 uint32_t heart_beat_counter1, heart_beat_counter2; 3857 3858 do { 3859 if (time_after(jiffies, heart_beat_wait)) { 3860 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, 3861 "Nic Core f/w is not alive.\n"); 3862 rval = QLA_FUNCTION_FAILED; 3863 break; 3864 } 3865 3866 qla83xx_idc_lock(base_vha, 0); 3867 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 3868 &heart_beat_counter1); 3869 qla83xx_idc_unlock(base_vha, 0); 3870 msleep(100); 3871 qla83xx_idc_lock(base_vha, 0); 3872 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 3873 &heart_beat_counter2); 3874 qla83xx_idc_unlock(base_vha, 0); 3875 } while (heart_beat_counter1 == heart_beat_counter2); 3876 3877 return rval; 3878 } 3879 3880 /* Work: Perform NIC Core Reset handling */ 3881 void 3882 qla83xx_nic_core_reset_work(struct work_struct *work) 3883 { 3884 struct qla_hw_data *ha = 3885 container_of(work, struct qla_hw_data, nic_core_reset); 3886 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3887 uint32_t dev_state = 0; 3888 3889 if (IS_QLA2031(ha)) { 3890 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) 3891 ql_log(ql_log_warn, base_vha, 0xb081, 3892 "Failed to dump mctp\n"); 3893 return; 3894 } 3895 3896 if (!ha->flags.nic_core_reset_hdlr_active) { 3897 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { 3898 qla83xx_idc_lock(base_vha, 0); 3899 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, 3900 &dev_state); 3901 qla83xx_idc_unlock(base_vha, 0); 3902 if (dev_state != QLA8XXX_DEV_NEED_RESET) { 3903 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, 3904 "Nic Core f/w is alive.\n"); 3905 return; 3906 } 3907 } 3908 3909 ha->flags.nic_core_reset_hdlr_active = 1; 3910 if (qla83xx_nic_core_reset(base_vha)) { 3911 /* NIC Core reset failed. */ 3912 ql_dbg(ql_dbg_p3p, base_vha, 0xb061, 3913 "NIC Core reset failed.\n"); 3914 } 3915 ha->flags.nic_core_reset_hdlr_active = 0; 3916 } 3917 } 3918 3919 /* Work: Handle 8200 IDC aens */ 3920 void 3921 qla83xx_service_idc_aen(struct work_struct *work) 3922 { 3923 struct qla_hw_data *ha = 3924 container_of(work, struct qla_hw_data, idc_aen); 3925 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3926 uint32_t dev_state, idc_control; 3927 3928 qla83xx_idc_lock(base_vha, 0); 3929 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 3930 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); 3931 qla83xx_idc_unlock(base_vha, 0); 3932 if (dev_state == QLA8XXX_DEV_NEED_RESET) { 3933 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { 3934 ql_dbg(ql_dbg_p3p, base_vha, 0xb062, 3935 "Application requested NIC Core Reset.\n"); 3936 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 3937 } else if (qla83xx_check_nic_core_fw_alive(base_vha) == 3938 QLA_SUCCESS) { 3939 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, 3940 "Other protocol driver requested NIC Core Reset.\n"); 3941 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 3942 } 3943 } else if (dev_state == QLA8XXX_DEV_FAILED || 3944 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { 3945 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 3946 } 3947 } 3948 3949 static void 3950 qla83xx_wait_logic(void) 3951 { 3952 int i; 3953 3954 /* Yield CPU */ 3955 if (!in_interrupt()) { 3956 /* 3957 * Wait about 200ms before retrying again. 3958 * This controls the number of retries for single 3959 * lock operation. 3960 */ 3961 msleep(100); 3962 schedule(); 3963 } else { 3964 for (i = 0; i < 20; i++) 3965 cpu_relax(); /* This a nop instr on i386 */ 3966 } 3967 } 3968 3969 static int 3970 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) 3971 { 3972 int rval; 3973 uint32_t data; 3974 uint32_t idc_lck_rcvry_stage_mask = 0x3; 3975 uint32_t idc_lck_rcvry_owner_mask = 0x3c; 3976 struct qla_hw_data *ha = base_vha->hw; 3977 3978 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); 3979 if (rval) 3980 return rval; 3981 3982 if ((data & idc_lck_rcvry_stage_mask) > 0) { 3983 return QLA_SUCCESS; 3984 } else { 3985 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); 3986 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 3987 data); 3988 if (rval) 3989 return rval; 3990 3991 msleep(200); 3992 3993 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 3994 &data); 3995 if (rval) 3996 return rval; 3997 3998 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { 3999 data &= (IDC_LOCK_RECOVERY_STAGE2 | 4000 ~(idc_lck_rcvry_stage_mask)); 4001 rval = qla83xx_wr_reg(base_vha, 4002 QLA83XX_IDC_LOCK_RECOVERY, data); 4003 if (rval) 4004 return rval; 4005 4006 /* Forcefully perform IDC UnLock */ 4007 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, 4008 &data); 4009 if (rval) 4010 return rval; 4011 /* Clear lock-id by setting 0xff */ 4012 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 4013 0xff); 4014 if (rval) 4015 return rval; 4016 /* Clear lock-recovery by setting 0x0 */ 4017 rval = qla83xx_wr_reg(base_vha, 4018 QLA83XX_IDC_LOCK_RECOVERY, 0x0); 4019 if (rval) 4020 return rval; 4021 } else 4022 return QLA_SUCCESS; 4023 } 4024 4025 return rval; 4026 } 4027 4028 static int 4029 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) 4030 { 4031 int rval = QLA_SUCCESS; 4032 uint32_t o_drv_lockid, n_drv_lockid; 4033 unsigned long lock_recovery_timeout; 4034 4035 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; 4036 retry_lockid: 4037 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); 4038 if (rval) 4039 goto exit; 4040 4041 /* MAX wait time before forcing IDC Lock recovery = 2 secs */ 4042 if (time_after_eq(jiffies, lock_recovery_timeout)) { 4043 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) 4044 return QLA_SUCCESS; 4045 else 4046 return QLA_FUNCTION_FAILED; 4047 } 4048 4049 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); 4050 if (rval) 4051 goto exit; 4052 4053 if (o_drv_lockid == n_drv_lockid) { 4054 qla83xx_wait_logic(); 4055 goto retry_lockid; 4056 } else 4057 return QLA_SUCCESS; 4058 4059 exit: 4060 return rval; 4061 } 4062 4063 void 4064 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) 4065 { 4066 uint16_t options = (requester_id << 15) | BIT_6; 4067 uint32_t data; 4068 struct qla_hw_data *ha = base_vha->hw; 4069 4070 /* IDC-lock implementation using driver-lock/lock-id remote registers */ 4071 retry_lock: 4072 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) 4073 == QLA_SUCCESS) { 4074 if (data) { 4075 /* Setting lock-id to our function-number */ 4076 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 4077 ha->portnum); 4078 } else { 4079 ql_dbg(ql_dbg_p3p, base_vha, 0xb063, 4080 "Failed to acquire IDC lock. retrying...\n"); 4081 4082 /* Retry/Perform IDC-Lock recovery */ 4083 if (qla83xx_idc_lock_recovery(base_vha) 4084 == QLA_SUCCESS) { 4085 qla83xx_wait_logic(); 4086 goto retry_lock; 4087 } else 4088 ql_log(ql_log_warn, base_vha, 0xb075, 4089 "IDC Lock recovery FAILED.\n"); 4090 } 4091 4092 } 4093 4094 return; 4095 4096 /* XXX: IDC-lock implementation using access-control mbx */ 4097 retry_lock2: 4098 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 4099 ql_dbg(ql_dbg_p3p, base_vha, 0xb072, 4100 "Failed to acquire IDC lock. retrying...\n"); 4101 /* Retry/Perform IDC-Lock recovery */ 4102 if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) { 4103 qla83xx_wait_logic(); 4104 goto retry_lock2; 4105 } else 4106 ql_log(ql_log_warn, base_vha, 0xb076, 4107 "IDC Lock recovery FAILED.\n"); 4108 } 4109 4110 return; 4111 } 4112 4113 void 4114 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) 4115 { 4116 uint16_t options = (requester_id << 15) | BIT_7, retry; 4117 uint32_t data; 4118 struct qla_hw_data *ha = base_vha->hw; 4119 4120 /* IDC-unlock implementation using driver-unlock/lock-id 4121 * remote registers 4122 */ 4123 retry = 0; 4124 retry_unlock: 4125 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) 4126 == QLA_SUCCESS) { 4127 if (data == ha->portnum) { 4128 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); 4129 /* Clearing lock-id by setting 0xff */ 4130 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); 4131 } else if (retry < 10) { 4132 /* SV: XXX: IDC unlock retrying needed here? */ 4133 4134 /* Retry for IDC-unlock */ 4135 qla83xx_wait_logic(); 4136 retry++; 4137 ql_dbg(ql_dbg_p3p, base_vha, 0xb064, 4138 "Failed to release IDC lock, retyring=%d\n", retry); 4139 goto retry_unlock; 4140 } 4141 } else if (retry < 10) { 4142 /* Retry for IDC-unlock */ 4143 qla83xx_wait_logic(); 4144 retry++; 4145 ql_dbg(ql_dbg_p3p, base_vha, 0xb065, 4146 "Failed to read drv-lockid, retyring=%d\n", retry); 4147 goto retry_unlock; 4148 } 4149 4150 return; 4151 4152 /* XXX: IDC-unlock implementation using access-control mbx */ 4153 retry = 0; 4154 retry_unlock2: 4155 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 4156 if (retry < 10) { 4157 /* Retry for IDC-unlock */ 4158 qla83xx_wait_logic(); 4159 retry++; 4160 ql_dbg(ql_dbg_p3p, base_vha, 0xb066, 4161 "Failed to release IDC lock, retyring=%d\n", retry); 4162 goto retry_unlock2; 4163 } 4164 } 4165 4166 return; 4167 } 4168 4169 int 4170 __qla83xx_set_drv_presence(scsi_qla_host_t *vha) 4171 { 4172 int rval = QLA_SUCCESS; 4173 struct qla_hw_data *ha = vha->hw; 4174 uint32_t drv_presence; 4175 4176 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 4177 if (rval == QLA_SUCCESS) { 4178 drv_presence |= (1 << ha->portnum); 4179 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 4180 drv_presence); 4181 } 4182 4183 return rval; 4184 } 4185 4186 int 4187 qla83xx_set_drv_presence(scsi_qla_host_t *vha) 4188 { 4189 int rval = QLA_SUCCESS; 4190 4191 qla83xx_idc_lock(vha, 0); 4192 rval = __qla83xx_set_drv_presence(vha); 4193 qla83xx_idc_unlock(vha, 0); 4194 4195 return rval; 4196 } 4197 4198 int 4199 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 4200 { 4201 int rval = QLA_SUCCESS; 4202 struct qla_hw_data *ha = vha->hw; 4203 uint32_t drv_presence; 4204 4205 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 4206 if (rval == QLA_SUCCESS) { 4207 drv_presence &= ~(1 << ha->portnum); 4208 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 4209 drv_presence); 4210 } 4211 4212 return rval; 4213 } 4214 4215 int 4216 qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 4217 { 4218 int rval = QLA_SUCCESS; 4219 4220 qla83xx_idc_lock(vha, 0); 4221 rval = __qla83xx_clear_drv_presence(vha); 4222 qla83xx_idc_unlock(vha, 0); 4223 4224 return rval; 4225 } 4226 4227 static void 4228 qla83xx_need_reset_handler(scsi_qla_host_t *vha) 4229 { 4230 struct qla_hw_data *ha = vha->hw; 4231 uint32_t drv_ack, drv_presence; 4232 unsigned long ack_timeout; 4233 4234 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ 4235 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); 4236 while (1) { 4237 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 4238 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 4239 if ((drv_ack & drv_presence) == drv_presence) 4240 break; 4241 4242 if (time_after_eq(jiffies, ack_timeout)) { 4243 ql_log(ql_log_warn, vha, 0xb067, 4244 "RESET ACK TIMEOUT! drv_presence=0x%x " 4245 "drv_ack=0x%x\n", drv_presence, drv_ack); 4246 /* 4247 * The function(s) which did not ack in time are forced 4248 * to withdraw any further participation in the IDC 4249 * reset. 4250 */ 4251 if (drv_ack != drv_presence) 4252 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 4253 drv_ack); 4254 break; 4255 } 4256 4257 qla83xx_idc_unlock(vha, 0); 4258 msleep(1000); 4259 qla83xx_idc_lock(vha, 0); 4260 } 4261 4262 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); 4263 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); 4264 } 4265 4266 static int 4267 qla83xx_device_bootstrap(scsi_qla_host_t *vha) 4268 { 4269 int rval = QLA_SUCCESS; 4270 uint32_t idc_control; 4271 4272 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); 4273 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); 4274 4275 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ 4276 __qla83xx_get_idc_control(vha, &idc_control); 4277 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; 4278 __qla83xx_set_idc_control(vha, 0); 4279 4280 qla83xx_idc_unlock(vha, 0); 4281 rval = qla83xx_restart_nic_firmware(vha); 4282 qla83xx_idc_lock(vha, 0); 4283 4284 if (rval != QLA_SUCCESS) { 4285 ql_log(ql_log_fatal, vha, 0xb06a, 4286 "Failed to restart NIC f/w.\n"); 4287 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); 4288 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); 4289 } else { 4290 ql_dbg(ql_dbg_p3p, vha, 0xb06c, 4291 "Success in restarting nic f/w.\n"); 4292 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); 4293 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); 4294 } 4295 4296 return rval; 4297 } 4298 4299 /* Assumes idc_lock always held on entry */ 4300 int 4301 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) 4302 { 4303 struct qla_hw_data *ha = base_vha->hw; 4304 int rval = QLA_SUCCESS; 4305 unsigned long dev_init_timeout; 4306 uint32_t dev_state; 4307 4308 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ 4309 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); 4310 4311 while (1) { 4312 4313 if (time_after_eq(jiffies, dev_init_timeout)) { 4314 ql_log(ql_log_warn, base_vha, 0xb06e, 4315 "Initialization TIMEOUT!\n"); 4316 /* Init timeout. Disable further NIC Core 4317 * communication. 4318 */ 4319 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 4320 QLA8XXX_DEV_FAILED); 4321 ql_log(ql_log_info, base_vha, 0xb06f, 4322 "HW State: FAILED.\n"); 4323 } 4324 4325 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 4326 switch (dev_state) { 4327 case QLA8XXX_DEV_READY: 4328 if (ha->flags.nic_core_reset_owner) 4329 qla83xx_idc_audit(base_vha, 4330 IDC_AUDIT_COMPLETION); 4331 ha->flags.nic_core_reset_owner = 0; 4332 ql_dbg(ql_dbg_p3p, base_vha, 0xb070, 4333 "Reset_owner reset by 0x%x.\n", 4334 ha->portnum); 4335 goto exit; 4336 case QLA8XXX_DEV_COLD: 4337 if (ha->flags.nic_core_reset_owner) 4338 rval = qla83xx_device_bootstrap(base_vha); 4339 else { 4340 /* Wait for AEN to change device-state */ 4341 qla83xx_idc_unlock(base_vha, 0); 4342 msleep(1000); 4343 qla83xx_idc_lock(base_vha, 0); 4344 } 4345 break; 4346 case QLA8XXX_DEV_INITIALIZING: 4347 /* Wait for AEN to change device-state */ 4348 qla83xx_idc_unlock(base_vha, 0); 4349 msleep(1000); 4350 qla83xx_idc_lock(base_vha, 0); 4351 break; 4352 case QLA8XXX_DEV_NEED_RESET: 4353 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) 4354 qla83xx_need_reset_handler(base_vha); 4355 else { 4356 /* Wait for AEN to change device-state */ 4357 qla83xx_idc_unlock(base_vha, 0); 4358 msleep(1000); 4359 qla83xx_idc_lock(base_vha, 0); 4360 } 4361 /* reset timeout value after need reset handler */ 4362 dev_init_timeout = jiffies + 4363 (ha->fcoe_dev_init_timeout * HZ); 4364 break; 4365 case QLA8XXX_DEV_NEED_QUIESCENT: 4366 /* XXX: DEBUG for now */ 4367 qla83xx_idc_unlock(base_vha, 0); 4368 msleep(1000); 4369 qla83xx_idc_lock(base_vha, 0); 4370 break; 4371 case QLA8XXX_DEV_QUIESCENT: 4372 /* XXX: DEBUG for now */ 4373 if (ha->flags.quiesce_owner) 4374 goto exit; 4375 4376 qla83xx_idc_unlock(base_vha, 0); 4377 msleep(1000); 4378 qla83xx_idc_lock(base_vha, 0); 4379 dev_init_timeout = jiffies + 4380 (ha->fcoe_dev_init_timeout * HZ); 4381 break; 4382 case QLA8XXX_DEV_FAILED: 4383 if (ha->flags.nic_core_reset_owner) 4384 qla83xx_idc_audit(base_vha, 4385 IDC_AUDIT_COMPLETION); 4386 ha->flags.nic_core_reset_owner = 0; 4387 __qla83xx_clear_drv_presence(base_vha); 4388 qla83xx_idc_unlock(base_vha, 0); 4389 qla8xxx_dev_failed_handler(base_vha); 4390 rval = QLA_FUNCTION_FAILED; 4391 qla83xx_idc_lock(base_vha, 0); 4392 goto exit; 4393 case QLA8XXX_BAD_VALUE: 4394 qla83xx_idc_unlock(base_vha, 0); 4395 msleep(1000); 4396 qla83xx_idc_lock(base_vha, 0); 4397 break; 4398 default: 4399 ql_log(ql_log_warn, base_vha, 0xb071, 4400 "Unknow Device State: %x.\n", dev_state); 4401 qla83xx_idc_unlock(base_vha, 0); 4402 qla8xxx_dev_failed_handler(base_vha); 4403 rval = QLA_FUNCTION_FAILED; 4404 qla83xx_idc_lock(base_vha, 0); 4405 goto exit; 4406 } 4407 } 4408 4409 exit: 4410 return rval; 4411 } 4412 4413 /************************************************************************** 4414 * qla2x00_do_dpc 4415 * This kernel thread is a task that is schedule by the interrupt handler 4416 * to perform the background processing for interrupts. 4417 * 4418 * Notes: 4419 * This task always run in the context of a kernel thread. It 4420 * is kick-off by the driver's detect code and starts up 4421 * up one per adapter. It immediately goes to sleep and waits for 4422 * some fibre event. When either the interrupt handler or 4423 * the timer routine detects a event it will one of the task 4424 * bits then wake us up. 4425 **************************************************************************/ 4426 static int 4427 qla2x00_do_dpc(void *data) 4428 { 4429 int rval; 4430 scsi_qla_host_t *base_vha; 4431 struct qla_hw_data *ha; 4432 4433 ha = (struct qla_hw_data *)data; 4434 base_vha = pci_get_drvdata(ha->pdev); 4435 4436 set_user_nice(current, -20); 4437 4438 set_current_state(TASK_INTERRUPTIBLE); 4439 while (!kthread_should_stop()) { 4440 ql_dbg(ql_dbg_dpc, base_vha, 0x4000, 4441 "DPC handler sleeping.\n"); 4442 4443 schedule(); 4444 __set_current_state(TASK_RUNNING); 4445 4446 if (!base_vha->flags.init_done || ha->flags.mbox_busy) 4447 goto end_loop; 4448 4449 if (ha->flags.eeh_busy) { 4450 ql_dbg(ql_dbg_dpc, base_vha, 0x4003, 4451 "eeh_busy=%d.\n", ha->flags.eeh_busy); 4452 goto end_loop; 4453 } 4454 4455 ha->dpc_active = 1; 4456 4457 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, 4458 "DPC handler waking up, dpc_flags=0x%lx.\n", 4459 base_vha->dpc_flags); 4460 4461 qla2x00_do_work(base_vha); 4462 4463 if (IS_QLA82XX(ha)) { 4464 if (test_and_clear_bit(ISP_UNRECOVERABLE, 4465 &base_vha->dpc_flags)) { 4466 qla82xx_idc_lock(ha); 4467 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4468 QLA8XXX_DEV_FAILED); 4469 qla82xx_idc_unlock(ha); 4470 ql_log(ql_log_info, base_vha, 0x4004, 4471 "HW State: FAILED.\n"); 4472 qla82xx_device_state_handler(base_vha); 4473 continue; 4474 } 4475 4476 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 4477 &base_vha->dpc_flags)) { 4478 4479 ql_dbg(ql_dbg_dpc, base_vha, 0x4005, 4480 "FCoE context reset scheduled.\n"); 4481 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 4482 &base_vha->dpc_flags))) { 4483 if (qla82xx_fcoe_ctx_reset(base_vha)) { 4484 /* FCoE-ctx reset failed. 4485 * Escalate to chip-reset 4486 */ 4487 set_bit(ISP_ABORT_NEEDED, 4488 &base_vha->dpc_flags); 4489 } 4490 clear_bit(ABORT_ISP_ACTIVE, 4491 &base_vha->dpc_flags); 4492 } 4493 4494 ql_dbg(ql_dbg_dpc, base_vha, 0x4006, 4495 "FCoE context reset end.\n"); 4496 } 4497 } 4498 4499 if (test_and_clear_bit(ISP_ABORT_NEEDED, 4500 &base_vha->dpc_flags)) { 4501 4502 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 4503 "ISP abort scheduled.\n"); 4504 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 4505 &base_vha->dpc_flags))) { 4506 4507 if (ha->isp_ops->abort_isp(base_vha)) { 4508 /* failed. retry later */ 4509 set_bit(ISP_ABORT_NEEDED, 4510 &base_vha->dpc_flags); 4511 } 4512 clear_bit(ABORT_ISP_ACTIVE, 4513 &base_vha->dpc_flags); 4514 } 4515 4516 ql_dbg(ql_dbg_dpc, base_vha, 0x4008, 4517 "ISP abort end.\n"); 4518 } 4519 4520 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, 4521 &base_vha->dpc_flags)) { 4522 qla2x00_update_fcports(base_vha); 4523 } 4524 4525 if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) { 4526 int ret; 4527 ret = qla2x00_send_change_request(base_vha, 0x3, 0); 4528 if (ret != QLA_SUCCESS) 4529 ql_log(ql_log_warn, base_vha, 0x121, 4530 "Failed to enable receiving of RSCN " 4531 "requests: 0x%x.\n", ret); 4532 clear_bit(SCR_PENDING, &base_vha->dpc_flags); 4533 } 4534 4535 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 4536 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 4537 "Quiescence mode scheduled.\n"); 4538 if (IS_QLA82XX(ha)) { 4539 qla82xx_device_state_handler(base_vha); 4540 clear_bit(ISP_QUIESCE_NEEDED, 4541 &base_vha->dpc_flags); 4542 if (!ha->flags.quiesce_owner) { 4543 qla2x00_perform_loop_resync(base_vha); 4544 4545 qla82xx_idc_lock(ha); 4546 qla82xx_clear_qsnt_ready(base_vha); 4547 qla82xx_idc_unlock(ha); 4548 } 4549 } else { 4550 clear_bit(ISP_QUIESCE_NEEDED, 4551 &base_vha->dpc_flags); 4552 qla2x00_quiesce_io(base_vha); 4553 } 4554 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 4555 "Quiescence mode end.\n"); 4556 } 4557 4558 if (test_and_clear_bit(RESET_MARKER_NEEDED, 4559 &base_vha->dpc_flags) && 4560 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 4561 4562 ql_dbg(ql_dbg_dpc, base_vha, 0x400b, 4563 "Reset marker scheduled.\n"); 4564 qla2x00_rst_aen(base_vha); 4565 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 4566 ql_dbg(ql_dbg_dpc, base_vha, 0x400c, 4567 "Reset marker end.\n"); 4568 } 4569 4570 /* Retry each device up to login retry count */ 4571 if ((test_and_clear_bit(RELOGIN_NEEDED, 4572 &base_vha->dpc_flags)) && 4573 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 4574 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 4575 4576 ql_dbg(ql_dbg_dpc, base_vha, 0x400d, 4577 "Relogin scheduled.\n"); 4578 qla2x00_relogin(base_vha); 4579 ql_dbg(ql_dbg_dpc, base_vha, 0x400e, 4580 "Relogin end.\n"); 4581 } 4582 4583 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 4584 &base_vha->dpc_flags)) { 4585 4586 ql_dbg(ql_dbg_dpc, base_vha, 0x400f, 4587 "Loop resync scheduled.\n"); 4588 4589 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 4590 &base_vha->dpc_flags))) { 4591 4592 rval = qla2x00_loop_resync(base_vha); 4593 4594 clear_bit(LOOP_RESYNC_ACTIVE, 4595 &base_vha->dpc_flags); 4596 } 4597 4598 ql_dbg(ql_dbg_dpc, base_vha, 0x4010, 4599 "Loop resync end.\n"); 4600 } 4601 4602 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 4603 atomic_read(&base_vha->loop_state) == LOOP_READY) { 4604 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); 4605 qla2xxx_flash_npiv_conf(base_vha); 4606 } 4607 4608 if (!ha->interrupts_on) 4609 ha->isp_ops->enable_intrs(ha); 4610 4611 if (test_and_clear_bit(BEACON_BLINK_NEEDED, 4612 &base_vha->dpc_flags)) 4613 ha->isp_ops->beacon_blink(base_vha); 4614 4615 qla2x00_do_dpc_all_vps(base_vha); 4616 4617 ha->dpc_active = 0; 4618 end_loop: 4619 set_current_state(TASK_INTERRUPTIBLE); 4620 } /* End of while(1) */ 4621 __set_current_state(TASK_RUNNING); 4622 4623 ql_dbg(ql_dbg_dpc, base_vha, 0x4011, 4624 "DPC handler exiting.\n"); 4625 4626 /* 4627 * Make sure that nobody tries to wake us up again. 4628 */ 4629 ha->dpc_active = 0; 4630 4631 /* Cleanup any residual CTX SRBs. */ 4632 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 4633 4634 return 0; 4635 } 4636 4637 void 4638 qla2xxx_wake_dpc(struct scsi_qla_host *vha) 4639 { 4640 struct qla_hw_data *ha = vha->hw; 4641 struct task_struct *t = ha->dpc_thread; 4642 4643 if (!test_bit(UNLOADING, &vha->dpc_flags) && t) 4644 wake_up_process(t); 4645 } 4646 4647 /* 4648 * qla2x00_rst_aen 4649 * Processes asynchronous reset. 4650 * 4651 * Input: 4652 * ha = adapter block pointer. 4653 */ 4654 static void 4655 qla2x00_rst_aen(scsi_qla_host_t *vha) 4656 { 4657 if (vha->flags.online && !vha->flags.reset_active && 4658 !atomic_read(&vha->loop_down_timer) && 4659 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { 4660 do { 4661 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4662 4663 /* 4664 * Issue marker command only when we are going to start 4665 * the I/O. 4666 */ 4667 vha->marker_needed = 1; 4668 } while (!atomic_read(&vha->loop_down_timer) && 4669 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); 4670 } 4671 } 4672 4673 /************************************************************************** 4674 * qla2x00_timer 4675 * 4676 * Description: 4677 * One second timer 4678 * 4679 * Context: Interrupt 4680 ***************************************************************************/ 4681 void 4682 qla2x00_timer(scsi_qla_host_t *vha) 4683 { 4684 unsigned long cpu_flags = 0; 4685 int start_dpc = 0; 4686 int index; 4687 srb_t *sp; 4688 uint16_t w; 4689 struct qla_hw_data *ha = vha->hw; 4690 struct req_que *req; 4691 4692 if (ha->flags.eeh_busy) { 4693 ql_dbg(ql_dbg_timer, vha, 0x6000, 4694 "EEH = %d, restarting timer.\n", 4695 ha->flags.eeh_busy); 4696 qla2x00_restart_timer(vha, WATCH_INTERVAL); 4697 return; 4698 } 4699 4700 /* Hardware read to raise pending EEH errors during mailbox waits. */ 4701 if (!pci_channel_offline(ha->pdev)) 4702 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 4703 4704 /* Make sure qla82xx_watchdog is run only for physical port */ 4705 if (!vha->vp_idx && IS_QLA82XX(ha)) { 4706 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 4707 start_dpc++; 4708 qla82xx_watchdog(vha); 4709 } 4710 4711 /* Loop down handler. */ 4712 if (atomic_read(&vha->loop_down_timer) > 0 && 4713 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 4714 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) 4715 && vha->flags.online) { 4716 4717 if (atomic_read(&vha->loop_down_timer) == 4718 vha->loop_down_abort_time) { 4719 4720 ql_log(ql_log_info, vha, 0x6008, 4721 "Loop down - aborting the queues before time expires.\n"); 4722 4723 if (!IS_QLA2100(ha) && vha->link_down_timeout) 4724 atomic_set(&vha->loop_state, LOOP_DEAD); 4725 4726 /* 4727 * Schedule an ISP abort to return any FCP2-device 4728 * commands. 4729 */ 4730 /* NPIV - scan physical port only */ 4731 if (!vha->vp_idx) { 4732 spin_lock_irqsave(&ha->hardware_lock, 4733 cpu_flags); 4734 req = ha->req_q_map[0]; 4735 for (index = 1; 4736 index < MAX_OUTSTANDING_COMMANDS; 4737 index++) { 4738 fc_port_t *sfcp; 4739 4740 sp = req->outstanding_cmds[index]; 4741 if (!sp) 4742 continue; 4743 if (sp->type != SRB_SCSI_CMD) 4744 continue; 4745 sfcp = sp->fcport; 4746 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 4747 continue; 4748 4749 if (IS_QLA82XX(ha)) 4750 set_bit(FCOE_CTX_RESET_NEEDED, 4751 &vha->dpc_flags); 4752 else 4753 set_bit(ISP_ABORT_NEEDED, 4754 &vha->dpc_flags); 4755 break; 4756 } 4757 spin_unlock_irqrestore(&ha->hardware_lock, 4758 cpu_flags); 4759 } 4760 start_dpc++; 4761 } 4762 4763 /* if the loop has been down for 4 minutes, reinit adapter */ 4764 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 4765 if (!(vha->device_flags & DFLG_NO_CABLE)) { 4766 ql_log(ql_log_warn, vha, 0x6009, 4767 "Loop down - aborting ISP.\n"); 4768 4769 if (IS_QLA82XX(ha)) 4770 set_bit(FCOE_CTX_RESET_NEEDED, 4771 &vha->dpc_flags); 4772 else 4773 set_bit(ISP_ABORT_NEEDED, 4774 &vha->dpc_flags); 4775 } 4776 } 4777 ql_dbg(ql_dbg_timer, vha, 0x600a, 4778 "Loop down - seconds remaining %d.\n", 4779 atomic_read(&vha->loop_down_timer)); 4780 } 4781 4782 /* Check if beacon LED needs to be blinked for physical host only */ 4783 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 4784 /* There is no beacon_blink function for ISP82xx */ 4785 if (!IS_QLA82XX(ha)) { 4786 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 4787 start_dpc++; 4788 } 4789 } 4790 4791 /* Process any deferred work. */ 4792 if (!list_empty(&vha->work_list)) 4793 start_dpc++; 4794 4795 /* Schedule the DPC routine if needed */ 4796 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 4797 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 4798 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) || 4799 start_dpc || 4800 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || 4801 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || 4802 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 4803 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 4804 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 4805 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) { 4806 ql_dbg(ql_dbg_timer, vha, 0x600b, 4807 "isp_abort_needed=%d loop_resync_needed=%d " 4808 "fcport_update_needed=%d start_dpc=%d " 4809 "reset_marker_needed=%d", 4810 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), 4811 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), 4812 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags), 4813 start_dpc, 4814 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); 4815 ql_dbg(ql_dbg_timer, vha, 0x600c, 4816 "beacon_blink_needed=%d isp_unrecoverable=%d " 4817 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " 4818 "relogin_needed=%d.\n", 4819 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), 4820 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), 4821 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), 4822 test_bit(VP_DPC_NEEDED, &vha->dpc_flags), 4823 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)); 4824 qla2xxx_wake_dpc(vha); 4825 } 4826 4827 qla2x00_restart_timer(vha, WATCH_INTERVAL); 4828 } 4829 4830 /* Firmware interface routines. */ 4831 4832 #define FW_BLOBS 10 4833 #define FW_ISP21XX 0 4834 #define FW_ISP22XX 1 4835 #define FW_ISP2300 2 4836 #define FW_ISP2322 3 4837 #define FW_ISP24XX 4 4838 #define FW_ISP25XX 5 4839 #define FW_ISP81XX 6 4840 #define FW_ISP82XX 7 4841 #define FW_ISP2031 8 4842 #define FW_ISP8031 9 4843 4844 #define FW_FILE_ISP21XX "ql2100_fw.bin" 4845 #define FW_FILE_ISP22XX "ql2200_fw.bin" 4846 #define FW_FILE_ISP2300 "ql2300_fw.bin" 4847 #define FW_FILE_ISP2322 "ql2322_fw.bin" 4848 #define FW_FILE_ISP24XX "ql2400_fw.bin" 4849 #define FW_FILE_ISP25XX "ql2500_fw.bin" 4850 #define FW_FILE_ISP81XX "ql8100_fw.bin" 4851 #define FW_FILE_ISP82XX "ql8200_fw.bin" 4852 #define FW_FILE_ISP2031 "ql2600_fw.bin" 4853 #define FW_FILE_ISP8031 "ql8300_fw.bin" 4854 4855 static DEFINE_MUTEX(qla_fw_lock); 4856 4857 static struct fw_blob qla_fw_blobs[FW_BLOBS] = { 4858 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 4859 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 4860 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 4861 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 4862 { .name = FW_FILE_ISP24XX, }, 4863 { .name = FW_FILE_ISP25XX, }, 4864 { .name = FW_FILE_ISP81XX, }, 4865 { .name = FW_FILE_ISP82XX, }, 4866 { .name = FW_FILE_ISP2031, }, 4867 { .name = FW_FILE_ISP8031, }, 4868 }; 4869 4870 struct fw_blob * 4871 qla2x00_request_firmware(scsi_qla_host_t *vha) 4872 { 4873 struct qla_hw_data *ha = vha->hw; 4874 struct fw_blob *blob; 4875 4876 if (IS_QLA2100(ha)) { 4877 blob = &qla_fw_blobs[FW_ISP21XX]; 4878 } else if (IS_QLA2200(ha)) { 4879 blob = &qla_fw_blobs[FW_ISP22XX]; 4880 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 4881 blob = &qla_fw_blobs[FW_ISP2300]; 4882 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 4883 blob = &qla_fw_blobs[FW_ISP2322]; 4884 } else if (IS_QLA24XX_TYPE(ha)) { 4885 blob = &qla_fw_blobs[FW_ISP24XX]; 4886 } else if (IS_QLA25XX(ha)) { 4887 blob = &qla_fw_blobs[FW_ISP25XX]; 4888 } else if (IS_QLA81XX(ha)) { 4889 blob = &qla_fw_blobs[FW_ISP81XX]; 4890 } else if (IS_QLA82XX(ha)) { 4891 blob = &qla_fw_blobs[FW_ISP82XX]; 4892 } else if (IS_QLA2031(ha)) { 4893 blob = &qla_fw_blobs[FW_ISP2031]; 4894 } else if (IS_QLA8031(ha)) { 4895 blob = &qla_fw_blobs[FW_ISP8031]; 4896 } else { 4897 return NULL; 4898 } 4899 4900 mutex_lock(&qla_fw_lock); 4901 if (blob->fw) 4902 goto out; 4903 4904 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 4905 ql_log(ql_log_warn, vha, 0x0063, 4906 "Failed to load firmware image (%s).\n", blob->name); 4907 blob->fw = NULL; 4908 blob = NULL; 4909 goto out; 4910 } 4911 4912 out: 4913 mutex_unlock(&qla_fw_lock); 4914 return blob; 4915 } 4916 4917 static void 4918 qla2x00_release_firmware(void) 4919 { 4920 int idx; 4921 4922 mutex_lock(&qla_fw_lock); 4923 for (idx = 0; idx < FW_BLOBS; idx++) 4924 release_firmware(qla_fw_blobs[idx].fw); 4925 mutex_unlock(&qla_fw_lock); 4926 } 4927 4928 static pci_ers_result_t 4929 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 4930 { 4931 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 4932 struct qla_hw_data *ha = vha->hw; 4933 4934 ql_dbg(ql_dbg_aer, vha, 0x9000, 4935 "PCI error detected, state %x.\n", state); 4936 4937 switch (state) { 4938 case pci_channel_io_normal: 4939 ha->flags.eeh_busy = 0; 4940 return PCI_ERS_RESULT_CAN_RECOVER; 4941 case pci_channel_io_frozen: 4942 ha->flags.eeh_busy = 1; 4943 /* For ISP82XX complete any pending mailbox cmd */ 4944 if (IS_QLA82XX(ha)) { 4945 ha->flags.isp82xx_fw_hung = 1; 4946 ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n"); 4947 qla82xx_clear_pending_mbx(vha); 4948 } 4949 qla2x00_free_irqs(vha); 4950 pci_disable_device(pdev); 4951 /* Return back all IOs */ 4952 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 4953 return PCI_ERS_RESULT_NEED_RESET; 4954 case pci_channel_io_perm_failure: 4955 ha->flags.pci_channel_io_perm_failure = 1; 4956 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 4957 return PCI_ERS_RESULT_DISCONNECT; 4958 } 4959 return PCI_ERS_RESULT_NEED_RESET; 4960 } 4961 4962 static pci_ers_result_t 4963 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 4964 { 4965 int risc_paused = 0; 4966 uint32_t stat; 4967 unsigned long flags; 4968 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 4969 struct qla_hw_data *ha = base_vha->hw; 4970 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 4971 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 4972 4973 if (IS_QLA82XX(ha)) 4974 return PCI_ERS_RESULT_RECOVERED; 4975 4976 spin_lock_irqsave(&ha->hardware_lock, flags); 4977 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 4978 stat = RD_REG_DWORD(®->hccr); 4979 if (stat & HCCR_RISC_PAUSE) 4980 risc_paused = 1; 4981 } else if (IS_QLA23XX(ha)) { 4982 stat = RD_REG_DWORD(®->u.isp2300.host_status); 4983 if (stat & HSR_RISC_PAUSED) 4984 risc_paused = 1; 4985 } else if (IS_FWI2_CAPABLE(ha)) { 4986 stat = RD_REG_DWORD(®24->host_status); 4987 if (stat & HSRX_RISC_PAUSED) 4988 risc_paused = 1; 4989 } 4990 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4991 4992 if (risc_paused) { 4993 ql_log(ql_log_info, base_vha, 0x9003, 4994 "RISC paused -- mmio_enabled, Dumping firmware.\n"); 4995 ha->isp_ops->fw_dump(base_vha, 0); 4996 4997 return PCI_ERS_RESULT_NEED_RESET; 4998 } else 4999 return PCI_ERS_RESULT_RECOVERED; 5000 } 5001 5002 static uint32_t 5003 qla82xx_error_recovery(scsi_qla_host_t *base_vha) 5004 { 5005 uint32_t rval = QLA_FUNCTION_FAILED; 5006 uint32_t drv_active = 0; 5007 struct qla_hw_data *ha = base_vha->hw; 5008 int fn; 5009 struct pci_dev *other_pdev = NULL; 5010 5011 ql_dbg(ql_dbg_aer, base_vha, 0x9006, 5012 "Entered %s.\n", __func__); 5013 5014 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 5015 5016 if (base_vha->flags.online) { 5017 /* Abort all outstanding commands, 5018 * so as to be requeued later */ 5019 qla2x00_abort_isp_cleanup(base_vha); 5020 } 5021 5022 5023 fn = PCI_FUNC(ha->pdev->devfn); 5024 while (fn > 0) { 5025 fn--; 5026 ql_dbg(ql_dbg_aer, base_vha, 0x9007, 5027 "Finding pci device at function = 0x%x.\n", fn); 5028 other_pdev = 5029 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 5030 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 5031 fn)); 5032 5033 if (!other_pdev) 5034 continue; 5035 if (atomic_read(&other_pdev->enable_cnt)) { 5036 ql_dbg(ql_dbg_aer, base_vha, 0x9008, 5037 "Found PCI func available and enable at 0x%x.\n", 5038 fn); 5039 pci_dev_put(other_pdev); 5040 break; 5041 } 5042 pci_dev_put(other_pdev); 5043 } 5044 5045 if (!fn) { 5046 /* Reset owner */ 5047 ql_dbg(ql_dbg_aer, base_vha, 0x9009, 5048 "This devfn is reset owner = 0x%x.\n", 5049 ha->pdev->devfn); 5050 qla82xx_idc_lock(ha); 5051 5052 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5053 QLA8XXX_DEV_INITIALIZING); 5054 5055 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, 5056 QLA82XX_IDC_VERSION); 5057 5058 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 5059 ql_dbg(ql_dbg_aer, base_vha, 0x900a, 5060 "drv_active = 0x%x.\n", drv_active); 5061 5062 qla82xx_idc_unlock(ha); 5063 /* Reset if device is not already reset 5064 * drv_active would be 0 if a reset has already been done 5065 */ 5066 if (drv_active) 5067 rval = qla82xx_start_firmware(base_vha); 5068 else 5069 rval = QLA_SUCCESS; 5070 qla82xx_idc_lock(ha); 5071 5072 if (rval != QLA_SUCCESS) { 5073 ql_log(ql_log_info, base_vha, 0x900b, 5074 "HW State: FAILED.\n"); 5075 qla82xx_clear_drv_active(ha); 5076 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5077 QLA8XXX_DEV_FAILED); 5078 } else { 5079 ql_log(ql_log_info, base_vha, 0x900c, 5080 "HW State: READY.\n"); 5081 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5082 QLA8XXX_DEV_READY); 5083 qla82xx_idc_unlock(ha); 5084 ha->flags.isp82xx_fw_hung = 0; 5085 rval = qla82xx_restart_isp(base_vha); 5086 qla82xx_idc_lock(ha); 5087 /* Clear driver state register */ 5088 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); 5089 qla82xx_set_drv_active(base_vha); 5090 } 5091 qla82xx_idc_unlock(ha); 5092 } else { 5093 ql_dbg(ql_dbg_aer, base_vha, 0x900d, 5094 "This devfn is not reset owner = 0x%x.\n", 5095 ha->pdev->devfn); 5096 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 5097 QLA8XXX_DEV_READY)) { 5098 ha->flags.isp82xx_fw_hung = 0; 5099 rval = qla82xx_restart_isp(base_vha); 5100 qla82xx_idc_lock(ha); 5101 qla82xx_set_drv_active(base_vha); 5102 qla82xx_idc_unlock(ha); 5103 } 5104 } 5105 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 5106 5107 return rval; 5108 } 5109 5110 static pci_ers_result_t 5111 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 5112 { 5113 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 5114 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 5115 struct qla_hw_data *ha = base_vha->hw; 5116 struct rsp_que *rsp; 5117 int rc, retries = 10; 5118 5119 ql_dbg(ql_dbg_aer, base_vha, 0x9004, 5120 "Slot Reset.\n"); 5121 5122 /* Workaround: qla2xxx driver which access hardware earlier 5123 * needs error state to be pci_channel_io_online. 5124 * Otherwise mailbox command timesout. 5125 */ 5126 pdev->error_state = pci_channel_io_normal; 5127 5128 pci_restore_state(pdev); 5129 5130 /* pci_restore_state() clears the saved_state flag of the device 5131 * save restored state which resets saved_state flag 5132 */ 5133 pci_save_state(pdev); 5134 5135 if (ha->mem_only) 5136 rc = pci_enable_device_mem(pdev); 5137 else 5138 rc = pci_enable_device(pdev); 5139 5140 if (rc) { 5141 ql_log(ql_log_warn, base_vha, 0x9005, 5142 "Can't re-enable PCI device after reset.\n"); 5143 goto exit_slot_reset; 5144 } 5145 5146 rsp = ha->rsp_q_map[0]; 5147 if (qla2x00_request_irqs(ha, rsp)) 5148 goto exit_slot_reset; 5149 5150 if (ha->isp_ops->pci_config(base_vha)) 5151 goto exit_slot_reset; 5152 5153 if (IS_QLA82XX(ha)) { 5154 if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) { 5155 ret = PCI_ERS_RESULT_RECOVERED; 5156 goto exit_slot_reset; 5157 } else 5158 goto exit_slot_reset; 5159 } 5160 5161 while (ha->flags.mbox_busy && retries--) 5162 msleep(1000); 5163 5164 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 5165 if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS) 5166 ret = PCI_ERS_RESULT_RECOVERED; 5167 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 5168 5169 5170 exit_slot_reset: 5171 ql_dbg(ql_dbg_aer, base_vha, 0x900e, 5172 "slot_reset return %x.\n", ret); 5173 5174 return ret; 5175 } 5176 5177 static void 5178 qla2xxx_pci_resume(struct pci_dev *pdev) 5179 { 5180 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 5181 struct qla_hw_data *ha = base_vha->hw; 5182 int ret; 5183 5184 ql_dbg(ql_dbg_aer, base_vha, 0x900f, 5185 "pci_resume.\n"); 5186 5187 ret = qla2x00_wait_for_hba_online(base_vha); 5188 if (ret != QLA_SUCCESS) { 5189 ql_log(ql_log_fatal, base_vha, 0x9002, 5190 "The device failed to resume I/O from slot/link_reset.\n"); 5191 } 5192 5193 pci_cleanup_aer_uncorrect_error_status(pdev); 5194 5195 ha->flags.eeh_busy = 0; 5196 } 5197 5198 static const struct pci_error_handlers qla2xxx_err_handler = { 5199 .error_detected = qla2xxx_pci_error_detected, 5200 .mmio_enabled = qla2xxx_pci_mmio_enabled, 5201 .slot_reset = qla2xxx_pci_slot_reset, 5202 .resume = qla2xxx_pci_resume, 5203 }; 5204 5205 static struct pci_device_id qla2xxx_pci_tbl[] = { 5206 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 5207 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 5208 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 5209 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 5210 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 5211 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 5212 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 5213 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 5214 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 5215 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 5216 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 5217 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 5218 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 5219 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 5220 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 5221 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 5222 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 5223 { 0 }, 5224 }; 5225 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 5226 5227 static struct pci_driver qla2xxx_pci_driver = { 5228 .name = QLA2XXX_DRIVER_NAME, 5229 .driver = { 5230 .owner = THIS_MODULE, 5231 }, 5232 .id_table = qla2xxx_pci_tbl, 5233 .probe = qla2x00_probe_one, 5234 .remove = qla2x00_remove_one, 5235 .shutdown = qla2x00_shutdown, 5236 .err_handler = &qla2xxx_err_handler, 5237 }; 5238 5239 static struct file_operations apidev_fops = { 5240 .owner = THIS_MODULE, 5241 .llseek = noop_llseek, 5242 }; 5243 5244 /** 5245 * qla2x00_module_init - Module initialization. 5246 **/ 5247 static int __init 5248 qla2x00_module_init(void) 5249 { 5250 int ret = 0; 5251 5252 /* Allocate cache for SRBs. */ 5253 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 5254 SLAB_HWCACHE_ALIGN, NULL); 5255 if (srb_cachep == NULL) { 5256 ql_log(ql_log_fatal, NULL, 0x0001, 5257 "Unable to allocate SRB cache...Failing load!.\n"); 5258 return -ENOMEM; 5259 } 5260 5261 /* Initialize target kmem_cache and mem_pools */ 5262 ret = qlt_init(); 5263 if (ret < 0) { 5264 kmem_cache_destroy(srb_cachep); 5265 return ret; 5266 } else if (ret > 0) { 5267 /* 5268 * If initiator mode is explictly disabled by qlt_init(), 5269 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from 5270 * performing scsi_scan_target() during LOOP UP event. 5271 */ 5272 qla2xxx_transport_functions.disable_target_scan = 1; 5273 qla2xxx_transport_vport_functions.disable_target_scan = 1; 5274 } 5275 5276 /* Derive version string. */ 5277 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 5278 if (ql2xextended_error_logging) 5279 strcat(qla2x00_version_str, "-debug"); 5280 5281 qla2xxx_transport_template = 5282 fc_attach_transport(&qla2xxx_transport_functions); 5283 if (!qla2xxx_transport_template) { 5284 kmem_cache_destroy(srb_cachep); 5285 ql_log(ql_log_fatal, NULL, 0x0002, 5286 "fc_attach_transport failed...Failing load!.\n"); 5287 qlt_exit(); 5288 return -ENODEV; 5289 } 5290 5291 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 5292 if (apidev_major < 0) { 5293 ql_log(ql_log_fatal, NULL, 0x0003, 5294 "Unable to register char device %s.\n", QLA2XXX_APIDEV); 5295 } 5296 5297 qla2xxx_transport_vport_template = 5298 fc_attach_transport(&qla2xxx_transport_vport_functions); 5299 if (!qla2xxx_transport_vport_template) { 5300 kmem_cache_destroy(srb_cachep); 5301 qlt_exit(); 5302 fc_release_transport(qla2xxx_transport_template); 5303 ql_log(ql_log_fatal, NULL, 0x0004, 5304 "fc_attach_transport vport failed...Failing load!.\n"); 5305 return -ENODEV; 5306 } 5307 ql_log(ql_log_info, NULL, 0x0005, 5308 "QLogic Fibre Channel HBA Driver: %s.\n", 5309 qla2x00_version_str); 5310 ret = pci_register_driver(&qla2xxx_pci_driver); 5311 if (ret) { 5312 kmem_cache_destroy(srb_cachep); 5313 qlt_exit(); 5314 fc_release_transport(qla2xxx_transport_template); 5315 fc_release_transport(qla2xxx_transport_vport_template); 5316 ql_log(ql_log_fatal, NULL, 0x0006, 5317 "pci_register_driver failed...ret=%d Failing load!.\n", 5318 ret); 5319 } 5320 return ret; 5321 } 5322 5323 /** 5324 * qla2x00_module_exit - Module cleanup. 5325 **/ 5326 static void __exit 5327 qla2x00_module_exit(void) 5328 { 5329 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 5330 pci_unregister_driver(&qla2xxx_pci_driver); 5331 qla2x00_release_firmware(); 5332 kmem_cache_destroy(srb_cachep); 5333 qlt_exit(); 5334 if (ctx_cachep) 5335 kmem_cache_destroy(ctx_cachep); 5336 fc_release_transport(qla2xxx_transport_template); 5337 fc_release_transport(qla2xxx_transport_vport_template); 5338 } 5339 5340 module_init(qla2x00_module_init); 5341 module_exit(qla2x00_module_exit); 5342 5343 MODULE_AUTHOR("QLogic Corporation"); 5344 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 5345 MODULE_LICENSE("GPL"); 5346 MODULE_VERSION(QLA2XXX_VERSION); 5347 MODULE_FIRMWARE(FW_FILE_ISP21XX); 5348 MODULE_FIRMWARE(FW_FILE_ISP22XX); 5349 MODULE_FIRMWARE(FW_FILE_ISP2300); 5350 MODULE_FIRMWARE(FW_FILE_ISP2322); 5351 MODULE_FIRMWARE(FW_FILE_ISP24XX); 5352 MODULE_FIRMWARE(FW_FILE_ISP25XX); 5353