1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 8 #include <linux/moduleparam.h> 9 #include <linux/vmalloc.h> 10 #include <linux/delay.h> 11 #include <linux/kthread.h> 12 #include <linux/mutex.h> 13 #include <linux/kobject.h> 14 #include <linux/slab.h> 15 #include <linux/blk-mq-pci.h> 16 #include <linux/refcount.h> 17 #include <linux/crash_dump.h> 18 #include <linux/trace_events.h> 19 #include <linux/trace.h> 20 21 #include <scsi/scsi_tcq.h> 22 #include <scsi/scsicam.h> 23 #include <scsi/scsi_transport.h> 24 #include <scsi/scsi_transport_fc.h> 25 26 #include "qla_target.h" 27 28 /* 29 * Driver version 30 */ 31 char qla2x00_version_str[40]; 32 33 static int apidev_major; 34 35 /* 36 * SRB allocation cache 37 */ 38 struct kmem_cache *srb_cachep; 39 40 static struct trace_array *qla_trc_array; 41 42 int ql2xfulldump_on_mpifail; 43 module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR); 44 MODULE_PARM_DESC(ql2xfulldump_on_mpifail, 45 "Set this to take full dump on MPI hang."); 46 47 int ql2xenforce_iocb_limit = 1; 48 module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR); 49 MODULE_PARM_DESC(ql2xenforce_iocb_limit, 50 "Enforce IOCB throttling, to avoid FW congestion. (default: 1)"); 51 52 /* 53 * CT6 CTX allocation cache 54 */ 55 static struct kmem_cache *ctx_cachep; 56 /* 57 * error level for logging 58 */ 59 uint ql_errlev = 0x8001; 60 61 int ql2xsecenable; 62 module_param(ql2xsecenable, int, S_IRUGO); 63 MODULE_PARM_DESC(ql2xsecenable, 64 "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled."); 65 66 static int ql2xenableclass2; 67 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); 68 MODULE_PARM_DESC(ql2xenableclass2, 69 "Specify if Class 2 operations are supported from the very " 70 "beginning. Default is 0 - class 2 not supported."); 71 72 73 int ql2xlogintimeout = 20; 74 module_param(ql2xlogintimeout, int, S_IRUGO); 75 MODULE_PARM_DESC(ql2xlogintimeout, 76 "Login timeout value in seconds."); 77 78 int qlport_down_retry; 79 module_param(qlport_down_retry, int, S_IRUGO); 80 MODULE_PARM_DESC(qlport_down_retry, 81 "Maximum number of command retries to a port that returns " 82 "a PORT-DOWN status."); 83 84 int ql2xplogiabsentdevice; 85 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 86 MODULE_PARM_DESC(ql2xplogiabsentdevice, 87 "Option to enable PLOGI to devices that are not present after " 88 "a Fabric scan. This is needed for several broken switches. " 89 "Default is 0 - no PLOGI. 1 - perform PLOGI."); 90 91 int ql2xloginretrycount; 92 module_param(ql2xloginretrycount, int, S_IRUGO); 93 MODULE_PARM_DESC(ql2xloginretrycount, 94 "Specify an alternate value for the NVRAM login retry count."); 95 96 int ql2xallocfwdump = 1; 97 module_param(ql2xallocfwdump, int, S_IRUGO); 98 MODULE_PARM_DESC(ql2xallocfwdump, 99 "Option to enable allocation of memory for a firmware dump " 100 "during HBA initialization. Memory allocation requirements " 101 "vary by ISP type. Default is 1 - allocate memory."); 102 103 int ql2xextended_error_logging; 104 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 105 module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 106 MODULE_PARM_DESC(ql2xextended_error_logging, 107 "Option to enable extended error logging,\n" 108 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" 109 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" 110 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" 111 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" 112 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" 113 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" 114 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" 115 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" 116 "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" 117 "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" 118 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" 119 "\t\t0x1e400000 - Preferred value for capturing essential " 120 "debug information (equivalent to old " 121 "ql2xextended_error_logging=1).\n" 122 "\t\tDo LOGICAL OR of the value to enable more than one level"); 123 124 int ql2xextended_error_logging_ktrace = 1; 125 module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR); 126 MODULE_PARM_DESC(ql2xextended_error_logging_ktrace, 127 "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n"); 128 129 int ql2xshiftctondsd = 6; 130 module_param(ql2xshiftctondsd, int, S_IRUGO); 131 MODULE_PARM_DESC(ql2xshiftctondsd, 132 "Set to control shifting of command type processing " 133 "based on total number of SG elements."); 134 135 int ql2xfdmienable = 1; 136 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); 137 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); 138 MODULE_PARM_DESC(ql2xfdmienable, 139 "Enables FDMI registrations. " 140 "0 - no FDMI registrations. " 141 "1 - provide FDMI registrations (default)."); 142 143 #define MAX_Q_DEPTH 64 144 static int ql2xmaxqdepth = MAX_Q_DEPTH; 145 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 146 MODULE_PARM_DESC(ql2xmaxqdepth, 147 "Maximum queue depth to set for each LUN. " 148 "Default is 64."); 149 150 int ql2xenabledif = 2; 151 module_param(ql2xenabledif, int, S_IRUGO); 152 MODULE_PARM_DESC(ql2xenabledif, 153 " Enable T10-CRC-DIF:\n" 154 " Default is 2.\n" 155 " 0 -- No DIF Support\n" 156 " 1 -- Enable DIF for all types\n" 157 " 2 -- Enable DIF for all types, except Type 0.\n"); 158 159 #if (IS_ENABLED(CONFIG_NVME_FC)) 160 int ql2xnvmeenable = 1; 161 #else 162 int ql2xnvmeenable; 163 #endif 164 module_param(ql2xnvmeenable, int, 0644); 165 MODULE_PARM_DESC(ql2xnvmeenable, 166 "Enables NVME support. " 167 "0 - no NVMe. Default is Y"); 168 169 int ql2xenablehba_err_chk = 2; 170 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 171 MODULE_PARM_DESC(ql2xenablehba_err_chk, 172 " Enable T10-CRC-DIF Error isolation by HBA:\n" 173 " Default is 2.\n" 174 " 0 -- Error isolation disabled\n" 175 " 1 -- Error isolation enabled only for DIX Type 0\n" 176 " 2 -- Error isolation enabled for all Types\n"); 177 178 int ql2xiidmaenable = 1; 179 module_param(ql2xiidmaenable, int, S_IRUGO); 180 MODULE_PARM_DESC(ql2xiidmaenable, 181 "Enables iIDMA settings " 182 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 183 184 int ql2xmqsupport = 1; 185 module_param(ql2xmqsupport, int, S_IRUGO); 186 MODULE_PARM_DESC(ql2xmqsupport, 187 "Enable on demand multiple queue pairs support " 188 "Default is 1 for supported. " 189 "Set it to 0 to turn off mq qpair support."); 190 191 int ql2xfwloadbin; 192 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 193 module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 194 MODULE_PARM_DESC(ql2xfwloadbin, 195 "Option to specify location from which to load ISP firmware:.\n" 196 " 2 -- load firmware via the request_firmware() (hotplug).\n" 197 " interface.\n" 198 " 1 -- load firmware from flash.\n" 199 " 0 -- use default semantics.\n"); 200 201 int ql2xetsenable; 202 module_param(ql2xetsenable, int, S_IRUGO); 203 MODULE_PARM_DESC(ql2xetsenable, 204 "Enables firmware ETS burst." 205 "Default is 0 - skip ETS enablement."); 206 207 int ql2xdbwr = 1; 208 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); 209 MODULE_PARM_DESC(ql2xdbwr, 210 "Option to specify scheme for request queue posting.\n" 211 " 0 -- Regular doorbell.\n" 212 " 1 -- CAMRAM doorbell (faster).\n"); 213 214 int ql2xgffidenable; 215 module_param(ql2xgffidenable, int, S_IRUGO); 216 MODULE_PARM_DESC(ql2xgffidenable, 217 "Enables GFF_ID checks of port type. " 218 "Default is 0 - Do not use GFF_ID information."); 219 220 int ql2xasynctmfenable = 1; 221 module_param(ql2xasynctmfenable, int, S_IRUGO); 222 MODULE_PARM_DESC(ql2xasynctmfenable, 223 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 224 "Default is 1 - Issue TM IOCBs via mailbox mechanism."); 225 226 int ql2xdontresethba; 227 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); 228 MODULE_PARM_DESC(ql2xdontresethba, 229 "Option to specify reset behaviour.\n" 230 " 0 (Default) -- Reset on failure.\n" 231 " 1 -- Do not reset on failure.\n"); 232 233 uint64_t ql2xmaxlun = MAX_LUNS; 234 module_param(ql2xmaxlun, ullong, S_IRUGO); 235 MODULE_PARM_DESC(ql2xmaxlun, 236 "Defines the maximum LU number to register with the SCSI " 237 "midlayer. Default is 65535."); 238 239 int ql2xmdcapmask = 0x1F; 240 module_param(ql2xmdcapmask, int, S_IRUGO); 241 MODULE_PARM_DESC(ql2xmdcapmask, 242 "Set the Minidump driver capture mask level. " 243 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 244 245 int ql2xmdenable = 1; 246 module_param(ql2xmdenable, int, S_IRUGO); 247 MODULE_PARM_DESC(ql2xmdenable, 248 "Enable/disable MiniDump. " 249 "0 - MiniDump disabled. " 250 "1 (Default) - MiniDump enabled."); 251 252 int ql2xexlogins; 253 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); 254 MODULE_PARM_DESC(ql2xexlogins, 255 "Number of extended Logins. " 256 "0 (Default)- Disabled."); 257 258 int ql2xexchoffld = 1024; 259 module_param(ql2xexchoffld, uint, 0644); 260 MODULE_PARM_DESC(ql2xexchoffld, 261 "Number of target exchanges."); 262 263 int ql2xiniexchg = 1024; 264 module_param(ql2xiniexchg, uint, 0644); 265 MODULE_PARM_DESC(ql2xiniexchg, 266 "Number of initiator exchanges."); 267 268 int ql2xfwholdabts; 269 module_param(ql2xfwholdabts, int, S_IRUGO); 270 MODULE_PARM_DESC(ql2xfwholdabts, 271 "Allow FW to hold status IOCB until ABTS rsp received. " 272 "0 (Default) Do not set fw option. " 273 "1 - Set fw option to hold ABTS."); 274 275 int ql2xmvasynctoatio = 1; 276 module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); 277 MODULE_PARM_DESC(ql2xmvasynctoatio, 278 "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" 279 "0 (Default). Do not move IOCBs" 280 "1 - Move IOCBs."); 281 282 int ql2xautodetectsfp = 1; 283 module_param(ql2xautodetectsfp, int, 0444); 284 MODULE_PARM_DESC(ql2xautodetectsfp, 285 "Detect SFP range and set appropriate distance.\n" 286 "1 (Default): Enable\n"); 287 288 int ql2xenablemsix = 1; 289 module_param(ql2xenablemsix, int, 0444); 290 MODULE_PARM_DESC(ql2xenablemsix, 291 "Set to enable MSI or MSI-X interrupt mechanism.\n" 292 " Default is 1, enable MSI-X interrupt mechanism.\n" 293 " 0 -- enable traditional pin-based mechanism.\n" 294 " 1 -- enable MSI-X interrupt mechanism.\n" 295 " 2 -- enable MSI interrupt mechanism.\n"); 296 297 int qla2xuseresexchforels; 298 module_param(qla2xuseresexchforels, int, 0444); 299 MODULE_PARM_DESC(qla2xuseresexchforels, 300 "Reserve 1/2 of emergency exchanges for ELS.\n" 301 " 0 (default): disabled"); 302 303 static int ql2xprotmask; 304 module_param(ql2xprotmask, int, 0644); 305 MODULE_PARM_DESC(ql2xprotmask, 306 "Override DIF/DIX protection capabilities mask\n" 307 "Default is 0 which sets protection mask based on " 308 "capabilities reported by HBA firmware.\n"); 309 310 static int ql2xprotguard; 311 module_param(ql2xprotguard, int, 0644); 312 MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n" 313 " 0 -- Let HBA firmware decide\n" 314 " 1 -- Force T10 CRC\n" 315 " 2 -- Force IP checksum\n"); 316 317 int ql2xdifbundlinginternalbuffers; 318 module_param(ql2xdifbundlinginternalbuffers, int, 0644); 319 MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers, 320 "Force using internal buffers for DIF information\n" 321 "0 (Default). Based on check.\n" 322 "1 Force using internal buffers\n"); 323 324 int ql2xsmartsan; 325 module_param(ql2xsmartsan, int, 0444); 326 module_param_named(smartsan, ql2xsmartsan, int, 0444); 327 MODULE_PARM_DESC(ql2xsmartsan, 328 "Send SmartSAN Management Attributes for FDMI Registration." 329 " Default is 0 - No SmartSAN registration," 330 " 1 - Register SmartSAN Management Attributes."); 331 332 int ql2xrdpenable; 333 module_param(ql2xrdpenable, int, 0444); 334 module_param_named(rdpenable, ql2xrdpenable, int, 0444); 335 MODULE_PARM_DESC(ql2xrdpenable, 336 "Enables RDP responses. " 337 "0 - no RDP responses (default). " 338 "1 - provide RDP responses."); 339 int ql2xabts_wait_nvme = 1; 340 module_param(ql2xabts_wait_nvme, int, 0444); 341 MODULE_PARM_DESC(ql2xabts_wait_nvme, 342 "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)"); 343 344 345 static u32 ql2xdelay_before_pci_error_handling = 5; 346 module_param(ql2xdelay_before_pci_error_handling, uint, 0644); 347 MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling, 348 "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n"); 349 350 static void qla2x00_clear_drv_active(struct qla_hw_data *); 351 static void qla2x00_free_device(scsi_qla_host_t *); 352 static void qla2xxx_map_queues(struct Scsi_Host *shost); 353 static void qla2x00_destroy_deferred_work(struct qla_hw_data *); 354 355 u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES; 356 module_param(ql2xnvme_queues, uint, S_IRUGO); 357 MODULE_PARM_DESC(ql2xnvme_queues, 358 "Number of NVMe Queues that can be configured.\n" 359 "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n" 360 "1 - Minimum number of queues supported\n" 361 "8 - Default value"); 362 363 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 364 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 365 366 /* TODO Convert to inlines 367 * 368 * Timer routines 369 */ 370 371 __inline__ void 372 qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval) 373 { 374 timer_setup(&vha->timer, qla2x00_timer, 0); 375 vha->timer.expires = jiffies + interval * HZ; 376 add_timer(&vha->timer); 377 vha->timer_active = 1; 378 } 379 380 static inline void 381 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 382 { 383 /* Currently used for 82XX only. */ 384 if (vha->device_flags & DFLG_DEV_FAILED) { 385 ql_dbg(ql_dbg_timer, vha, 0x600d, 386 "Device in a failed state, returning.\n"); 387 return; 388 } 389 390 mod_timer(&vha->timer, jiffies + interval * HZ); 391 } 392 393 static __inline__ void 394 qla2x00_stop_timer(scsi_qla_host_t *vha) 395 { 396 del_timer_sync(&vha->timer); 397 vha->timer_active = 0; 398 } 399 400 static int qla2x00_do_dpc(void *data); 401 402 static void qla2x00_rst_aen(scsi_qla_host_t *); 403 404 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, 405 struct req_que **, struct rsp_que **); 406 static void qla2x00_free_fw_dump(struct qla_hw_data *); 407 static void qla2x00_mem_free(struct qla_hw_data *); 408 int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 409 struct qla_qpair *qpair); 410 411 /* -------------------------------------------------------------------------- */ 412 static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, 413 struct rsp_que *rsp) 414 { 415 struct qla_hw_data *ha = vha->hw; 416 417 rsp->qpair = ha->base_qpair; 418 rsp->req = req; 419 ha->base_qpair->hw = ha; 420 ha->base_qpair->req = req; 421 ha->base_qpair->rsp = rsp; 422 ha->base_qpair->vha = vha; 423 ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; 424 ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 425 ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; 426 ha->base_qpair->srb_mempool = ha->srb_mempool; 427 INIT_LIST_HEAD(&ha->base_qpair->hints_list); 428 ha->base_qpair->enable_class_2 = ql2xenableclass2; 429 /* init qpair to this cpu. Will adjust at run time. */ 430 qla_cpu_update(rsp->qpair, raw_smp_processor_id()); 431 ha->base_qpair->pdev = ha->pdev; 432 433 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) 434 ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs; 435 } 436 437 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, 438 struct rsp_que *rsp) 439 { 440 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 441 442 ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *), 443 GFP_KERNEL); 444 if (!ha->req_q_map) { 445 ql_log(ql_log_fatal, vha, 0x003b, 446 "Unable to allocate memory for request queue ptrs.\n"); 447 goto fail_req_map; 448 } 449 450 ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *), 451 GFP_KERNEL); 452 if (!ha->rsp_q_map) { 453 ql_log(ql_log_fatal, vha, 0x003c, 454 "Unable to allocate memory for response queue ptrs.\n"); 455 goto fail_rsp_map; 456 } 457 458 ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 459 if (ha->base_qpair == NULL) { 460 ql_log(ql_log_warn, vha, 0x00e0, 461 "Failed to allocate base queue pair memory.\n"); 462 goto fail_base_qpair; 463 } 464 465 qla_init_base_qpair(vha, req, rsp); 466 467 if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) { 468 ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), 469 GFP_KERNEL); 470 if (!ha->queue_pair_map) { 471 ql_log(ql_log_fatal, vha, 0x0180, 472 "Unable to allocate memory for queue pair ptrs.\n"); 473 goto fail_qpair_map; 474 } 475 if (qla_mapq_alloc_qp_cpu_map(ha) != 0) { 476 kfree(ha->queue_pair_map); 477 ha->queue_pair_map = NULL; 478 goto fail_qpair_map; 479 } 480 } 481 482 /* 483 * Make sure we record at least the request and response queue zero in 484 * case we need to free them if part of the probe fails. 485 */ 486 ha->rsp_q_map[0] = rsp; 487 ha->req_q_map[0] = req; 488 set_bit(0, ha->rsp_qid_map); 489 set_bit(0, ha->req_qid_map); 490 return 0; 491 492 fail_qpair_map: 493 kfree(ha->base_qpair); 494 ha->base_qpair = NULL; 495 fail_base_qpair: 496 kfree(ha->rsp_q_map); 497 ha->rsp_q_map = NULL; 498 fail_rsp_map: 499 kfree(ha->req_q_map); 500 ha->req_q_map = NULL; 501 fail_req_map: 502 return -ENOMEM; 503 } 504 505 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 506 { 507 if (IS_QLAFX00(ha)) { 508 if (req && req->ring_fx00) 509 dma_free_coherent(&ha->pdev->dev, 510 (req->length_fx00 + 1) * sizeof(request_t), 511 req->ring_fx00, req->dma_fx00); 512 } else if (req && req->ring) 513 dma_free_coherent(&ha->pdev->dev, 514 (req->length + 1) * sizeof(request_t), 515 req->ring, req->dma); 516 517 if (req) 518 kfree(req->outstanding_cmds); 519 520 kfree(req); 521 } 522 523 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 524 { 525 if (IS_QLAFX00(ha)) { 526 if (rsp && rsp->ring_fx00) 527 dma_free_coherent(&ha->pdev->dev, 528 (rsp->length_fx00 + 1) * sizeof(request_t), 529 rsp->ring_fx00, rsp->dma_fx00); 530 } else if (rsp && rsp->ring) { 531 dma_free_coherent(&ha->pdev->dev, 532 (rsp->length + 1) * sizeof(response_t), 533 rsp->ring, rsp->dma); 534 } 535 kfree(rsp); 536 } 537 538 static void qla2x00_free_queues(struct qla_hw_data *ha) 539 { 540 struct req_que *req; 541 struct rsp_que *rsp; 542 int cnt; 543 unsigned long flags; 544 545 if (ha->queue_pair_map) { 546 kfree(ha->queue_pair_map); 547 ha->queue_pair_map = NULL; 548 } 549 if (ha->base_qpair) { 550 kfree(ha->base_qpair); 551 ha->base_qpair = NULL; 552 } 553 554 qla_mapq_free_qp_cpu_map(ha); 555 spin_lock_irqsave(&ha->hardware_lock, flags); 556 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 557 if (!test_bit(cnt, ha->req_qid_map)) 558 continue; 559 560 req = ha->req_q_map[cnt]; 561 clear_bit(cnt, ha->req_qid_map); 562 ha->req_q_map[cnt] = NULL; 563 564 spin_unlock_irqrestore(&ha->hardware_lock, flags); 565 qla2x00_free_req_que(ha, req); 566 spin_lock_irqsave(&ha->hardware_lock, flags); 567 } 568 spin_unlock_irqrestore(&ha->hardware_lock, flags); 569 570 kfree(ha->req_q_map); 571 ha->req_q_map = NULL; 572 573 574 spin_lock_irqsave(&ha->hardware_lock, flags); 575 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 576 if (!test_bit(cnt, ha->rsp_qid_map)) 577 continue; 578 579 rsp = ha->rsp_q_map[cnt]; 580 clear_bit(cnt, ha->rsp_qid_map); 581 ha->rsp_q_map[cnt] = NULL; 582 spin_unlock_irqrestore(&ha->hardware_lock, flags); 583 qla2x00_free_rsp_que(ha, rsp); 584 spin_lock_irqsave(&ha->hardware_lock, flags); 585 } 586 spin_unlock_irqrestore(&ha->hardware_lock, flags); 587 588 kfree(ha->rsp_q_map); 589 ha->rsp_q_map = NULL; 590 } 591 592 static char * 593 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) 594 { 595 struct qla_hw_data *ha = vha->hw; 596 static const char *const pci_bus_modes[] = { 597 "33", "66", "100", "133", 598 }; 599 uint16_t pci_bus; 600 601 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 602 if (pci_bus) { 603 snprintf(str, str_len, "PCI-X (%s MHz)", 604 pci_bus_modes[pci_bus]); 605 } else { 606 pci_bus = (ha->pci_attr & BIT_8) >> 8; 607 snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]); 608 } 609 610 return str; 611 } 612 613 static char * 614 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) 615 { 616 static const char *const pci_bus_modes[] = { 617 "33", "66", "100", "133", 618 }; 619 struct qla_hw_data *ha = vha->hw; 620 uint32_t pci_bus; 621 622 if (pci_is_pcie(ha->pdev)) { 623 uint32_t lstat, lspeed, lwidth; 624 const char *speed_str; 625 626 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); 627 lspeed = lstat & PCI_EXP_LNKCAP_SLS; 628 lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; 629 630 switch (lspeed) { 631 case 1: 632 speed_str = "2.5GT/s"; 633 break; 634 case 2: 635 speed_str = "5.0GT/s"; 636 break; 637 case 3: 638 speed_str = "8.0GT/s"; 639 break; 640 case 4: 641 speed_str = "16.0GT/s"; 642 break; 643 default: 644 speed_str = "<unknown>"; 645 break; 646 } 647 snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth); 648 649 return str; 650 } 651 652 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 653 if (pci_bus == 0 || pci_bus == 8) 654 snprintf(str, str_len, "PCI (%s MHz)", 655 pci_bus_modes[pci_bus >> 3]); 656 else 657 snprintf(str, str_len, "PCI-X Mode %d (%s MHz)", 658 pci_bus & 4 ? 2 : 1, 659 pci_bus_modes[pci_bus & 3]); 660 661 return str; 662 } 663 664 static char * 665 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 666 { 667 char un_str[10]; 668 struct qla_hw_data *ha = vha->hw; 669 670 snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version, 671 ha->fw_minor_version, ha->fw_subminor_version); 672 673 if (ha->fw_attributes & BIT_9) { 674 strcat(str, "FLX"); 675 return (str); 676 } 677 678 switch (ha->fw_attributes & 0xFF) { 679 case 0x7: 680 strcat(str, "EF"); 681 break; 682 case 0x17: 683 strcat(str, "TP"); 684 break; 685 case 0x37: 686 strcat(str, "IP"); 687 break; 688 case 0x77: 689 strcat(str, "VI"); 690 break; 691 default: 692 sprintf(un_str, "(%x)", ha->fw_attributes); 693 strcat(str, un_str); 694 break; 695 } 696 if (ha->fw_attributes & 0x100) 697 strcat(str, "X"); 698 699 return (str); 700 } 701 702 static char * 703 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 704 { 705 struct qla_hw_data *ha = vha->hw; 706 707 snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version, 708 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); 709 return str; 710 } 711 712 void qla2x00_sp_free_dma(srb_t *sp) 713 { 714 struct qla_hw_data *ha = sp->vha->hw; 715 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 716 717 if (sp->flags & SRB_DMA_VALID) { 718 scsi_dma_unmap(cmd); 719 sp->flags &= ~SRB_DMA_VALID; 720 } 721 722 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 723 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 724 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 725 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 726 } 727 728 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 729 /* List assured to be having elements */ 730 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); 731 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 732 } 733 734 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 735 struct crc_context *ctx0 = sp->u.scmd.crc_ctx; 736 737 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 738 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 739 } 740 741 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 742 struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; 743 744 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 745 ctx1->fcp_cmnd_dma); 746 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 747 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 748 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 749 } 750 751 if (sp->flags & SRB_GOT_BUF) 752 qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); 753 } 754 755 void qla2x00_sp_compl(srb_t *sp, int res) 756 { 757 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 758 struct completion *comp = sp->comp; 759 760 /* kref: INIT */ 761 kref_put(&sp->cmd_kref, qla2x00_sp_release); 762 cmd->result = res; 763 sp->type = 0; 764 scsi_done(cmd); 765 if (comp) 766 complete(comp); 767 } 768 769 void qla2xxx_qpair_sp_free_dma(srb_t *sp) 770 { 771 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 772 struct qla_hw_data *ha = sp->fcport->vha->hw; 773 774 if (sp->flags & SRB_DMA_VALID) { 775 scsi_dma_unmap(cmd); 776 sp->flags &= ~SRB_DMA_VALID; 777 } 778 779 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 780 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 781 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 782 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 783 } 784 785 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 786 /* List assured to be having elements */ 787 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); 788 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 789 } 790 791 if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) { 792 struct crc_context *difctx = sp->u.scmd.crc_ctx; 793 struct dsd_dma *dif_dsd, *nxt_dsd; 794 795 list_for_each_entry_safe(dif_dsd, nxt_dsd, 796 &difctx->ldif_dma_hndl_list, list) { 797 list_del(&dif_dsd->list); 798 dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr, 799 dif_dsd->dsd_list_dma); 800 kfree(dif_dsd); 801 difctx->no_dif_bundl--; 802 } 803 804 list_for_each_entry_safe(dif_dsd, nxt_dsd, 805 &difctx->ldif_dsd_list, list) { 806 list_del(&dif_dsd->list); 807 dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr, 808 dif_dsd->dsd_list_dma); 809 kfree(dif_dsd); 810 difctx->no_ldif_dsd--; 811 } 812 813 if (difctx->no_ldif_dsd) { 814 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, 815 "%s: difctx->no_ldif_dsd=%x\n", 816 __func__, difctx->no_ldif_dsd); 817 } 818 819 if (difctx->no_dif_bundl) { 820 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, 821 "%s: difctx->no_dif_bundl=%x\n", 822 __func__, difctx->no_dif_bundl); 823 } 824 sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID; 825 } 826 827 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 828 struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; 829 830 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 831 ctx1->fcp_cmnd_dma); 832 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 833 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 834 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 835 sp->flags &= ~SRB_FCP_CMND_DMA_VALID; 836 } 837 838 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 839 struct crc_context *ctx0 = sp->u.scmd.crc_ctx; 840 841 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 842 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 843 } 844 845 if (sp->flags & SRB_GOT_BUF) 846 qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); 847 } 848 849 void qla2xxx_qpair_sp_compl(srb_t *sp, int res) 850 { 851 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 852 struct completion *comp = sp->comp; 853 854 /* ref: INIT */ 855 kref_put(&sp->cmd_kref, qla2x00_sp_release); 856 cmd->result = res; 857 sp->type = 0; 858 scsi_done(cmd); 859 if (comp) 860 complete(comp); 861 } 862 863 static int 864 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 865 { 866 scsi_qla_host_t *vha = shost_priv(host); 867 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 868 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 869 struct qla_hw_data *ha = vha->hw; 870 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 871 srb_t *sp; 872 int rval; 873 874 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) || 875 WARN_ON_ONCE(!rport)) { 876 cmd->result = DID_NO_CONNECT << 16; 877 goto qc24_fail_command; 878 } 879 880 if (ha->mqenable) { 881 uint32_t tag; 882 uint16_t hwq; 883 struct qla_qpair *qpair = NULL; 884 885 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); 886 hwq = blk_mq_unique_tag_to_hwq(tag); 887 qpair = ha->queue_pair_map[hwq]; 888 889 if (qpair) 890 return qla2xxx_mqueuecommand(host, cmd, qpair); 891 } 892 893 if (ha->flags.eeh_busy) { 894 if (ha->flags.pci_channel_io_perm_failure) { 895 ql_dbg(ql_dbg_aer, vha, 0x9010, 896 "PCI Channel IO permanent failure, exiting " 897 "cmd=%p.\n", cmd); 898 cmd->result = DID_NO_CONNECT << 16; 899 } else { 900 ql_dbg(ql_dbg_aer, vha, 0x9011, 901 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 902 cmd->result = DID_REQUEUE << 16; 903 } 904 goto qc24_fail_command; 905 } 906 907 rval = fc_remote_port_chkready(rport); 908 if (rval) { 909 cmd->result = rval; 910 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, 911 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 912 cmd, rval); 913 goto qc24_fail_command; 914 } 915 916 if (!vha->flags.difdix_supported && 917 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 918 ql_dbg(ql_dbg_io, vha, 0x3004, 919 "DIF Cap not reg, fail DIF capable cmd's:%p.\n", 920 cmd); 921 cmd->result = DID_NO_CONNECT << 16; 922 goto qc24_fail_command; 923 } 924 925 if (!fcport || fcport->deleted) { 926 cmd->result = DID_IMM_RETRY << 16; 927 goto qc24_fail_command; 928 } 929 930 if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { 931 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 932 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 933 ql_dbg(ql_dbg_io, vha, 0x3005, 934 "Returning DNC, fcport_state=%d loop_state=%d.\n", 935 atomic_read(&fcport->state), 936 atomic_read(&base_vha->loop_state)); 937 cmd->result = DID_NO_CONNECT << 16; 938 goto qc24_fail_command; 939 } 940 goto qc24_target_busy; 941 } 942 943 /* 944 * Return target busy if we've received a non-zero retry_delay_timer 945 * in a FCP_RSP. 946 */ 947 if (fcport->retry_delay_timestamp == 0) { 948 /* retry delay not set */ 949 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 950 fcport->retry_delay_timestamp = 0; 951 else 952 goto qc24_target_busy; 953 954 sp = scsi_cmd_priv(cmd); 955 /* ref: INIT */ 956 qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport); 957 958 sp->u.scmd.cmd = cmd; 959 sp->type = SRB_SCSI_CMD; 960 sp->free = qla2x00_sp_free_dma; 961 sp->done = qla2x00_sp_compl; 962 963 rval = ha->isp_ops->start_scsi(sp); 964 if (rval != QLA_SUCCESS) { 965 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, 966 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 967 goto qc24_host_busy_free_sp; 968 } 969 970 return 0; 971 972 qc24_host_busy_free_sp: 973 /* ref: INIT */ 974 kref_put(&sp->cmd_kref, qla2x00_sp_release); 975 976 qc24_target_busy: 977 return SCSI_MLQUEUE_TARGET_BUSY; 978 979 qc24_fail_command: 980 scsi_done(cmd); 981 982 return 0; 983 } 984 985 /* For MQ supported I/O */ 986 int 987 qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 988 struct qla_qpair *qpair) 989 { 990 scsi_qla_host_t *vha = shost_priv(host); 991 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 992 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 993 struct qla_hw_data *ha = vha->hw; 994 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 995 srb_t *sp; 996 int rval; 997 998 rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16); 999 if (rval) { 1000 cmd->result = rval; 1001 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, 1002 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 1003 cmd, rval); 1004 goto qc24_fail_command; 1005 } 1006 1007 if (!qpair->online) { 1008 ql_dbg(ql_dbg_io, vha, 0x3077, 1009 "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy); 1010 cmd->result = DID_NO_CONNECT << 16; 1011 goto qc24_fail_command; 1012 } 1013 1014 if (!fcport || fcport->deleted) { 1015 cmd->result = DID_IMM_RETRY << 16; 1016 goto qc24_fail_command; 1017 } 1018 1019 if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { 1020 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 1021 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 1022 ql_dbg(ql_dbg_io, vha, 0x3077, 1023 "Returning DNC, fcport_state=%d loop_state=%d.\n", 1024 atomic_read(&fcport->state), 1025 atomic_read(&base_vha->loop_state)); 1026 cmd->result = DID_NO_CONNECT << 16; 1027 goto qc24_fail_command; 1028 } 1029 goto qc24_target_busy; 1030 } 1031 1032 /* 1033 * Return target busy if we've received a non-zero retry_delay_timer 1034 * in a FCP_RSP. 1035 */ 1036 if (fcport->retry_delay_timestamp == 0) { 1037 /* retry delay not set */ 1038 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 1039 fcport->retry_delay_timestamp = 0; 1040 else 1041 goto qc24_target_busy; 1042 1043 sp = scsi_cmd_priv(cmd); 1044 /* ref: INIT */ 1045 qla2xxx_init_sp(sp, vha, qpair, fcport); 1046 1047 sp->u.scmd.cmd = cmd; 1048 sp->type = SRB_SCSI_CMD; 1049 sp->free = qla2xxx_qpair_sp_free_dma; 1050 sp->done = qla2xxx_qpair_sp_compl; 1051 1052 rval = ha->isp_ops->start_scsi_mq(sp); 1053 if (rval != QLA_SUCCESS) { 1054 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, 1055 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 1056 goto qc24_host_busy_free_sp; 1057 } 1058 1059 return 0; 1060 1061 qc24_host_busy_free_sp: 1062 /* ref: INIT */ 1063 kref_put(&sp->cmd_kref, qla2x00_sp_release); 1064 1065 qc24_target_busy: 1066 return SCSI_MLQUEUE_TARGET_BUSY; 1067 1068 qc24_fail_command: 1069 scsi_done(cmd); 1070 1071 return 0; 1072 } 1073 1074 /* 1075 * qla2x00_eh_wait_on_command 1076 * Waits for the command to be returned by the Firmware for some 1077 * max time. 1078 * 1079 * Input: 1080 * cmd = Scsi Command to wait on. 1081 * 1082 * Return: 1083 * Completed in time : QLA_SUCCESS 1084 * Did not complete in time : QLA_FUNCTION_FAILED 1085 */ 1086 static int 1087 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) 1088 { 1089 #define ABORT_POLLING_PERIOD 1000 1090 #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) 1091 unsigned long wait_iter = ABORT_WAIT_ITER; 1092 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1093 struct qla_hw_data *ha = vha->hw; 1094 srb_t *sp = scsi_cmd_priv(cmd); 1095 int ret = QLA_SUCCESS; 1096 1097 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 1098 ql_dbg(ql_dbg_taskm, vha, 0x8005, 1099 "Return:eh_wait.\n"); 1100 return ret; 1101 } 1102 1103 while (sp->type && wait_iter--) 1104 msleep(ABORT_POLLING_PERIOD); 1105 if (sp->type) 1106 ret = QLA_FUNCTION_FAILED; 1107 1108 return ret; 1109 } 1110 1111 /* 1112 * qla2x00_wait_for_hba_online 1113 * Wait till the HBA is online after going through 1114 * <= MAX_RETRIES_OF_ISP_ABORT or 1115 * finally HBA is disabled ie marked offline 1116 * 1117 * Input: 1118 * ha - pointer to host adapter structure 1119 * 1120 * Note: 1121 * Does context switching-Release SPIN_LOCK 1122 * (if any) before calling this routine. 1123 * 1124 * Return: 1125 * Success (Adapter is online) : 0 1126 * Failed (Adapter is offline/disabled) : 1 1127 */ 1128 int 1129 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) 1130 { 1131 int return_status; 1132 unsigned long wait_online; 1133 struct qla_hw_data *ha = vha->hw; 1134 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1135 1136 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1137 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1138 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1139 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1140 ha->dpc_active) && time_before(jiffies, wait_online)) { 1141 1142 msleep(1000); 1143 } 1144 if (base_vha->flags.online) 1145 return_status = QLA_SUCCESS; 1146 else 1147 return_status = QLA_FUNCTION_FAILED; 1148 1149 return (return_status); 1150 } 1151 1152 static inline int test_fcport_count(scsi_qla_host_t *vha) 1153 { 1154 struct qla_hw_data *ha = vha->hw; 1155 unsigned long flags; 1156 int res; 1157 /* Return 0 = sleep, x=wake */ 1158 1159 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1160 ql_dbg(ql_dbg_init, vha, 0x00ec, 1161 "tgt %p, fcport_count=%d\n", 1162 vha, vha->fcport_count); 1163 res = (vha->fcport_count == 0); 1164 if (res) { 1165 struct fc_port *fcport; 1166 1167 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1168 if (fcport->deleted != QLA_SESS_DELETED) { 1169 /* session(s) may not be fully logged in 1170 * (ie fcport_count=0), but session 1171 * deletion thread(s) may be inflight. 1172 */ 1173 1174 res = 0; 1175 break; 1176 } 1177 } 1178 } 1179 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1180 1181 return res; 1182 } 1183 1184 /* 1185 * qla2x00_wait_for_sess_deletion can only be called from remove_one. 1186 * it has dependency on UNLOADING flag to stop device discovery 1187 */ 1188 void 1189 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) 1190 { 1191 u8 i; 1192 1193 qla2x00_mark_all_devices_lost(vha); 1194 1195 for (i = 0; i < 10; i++) { 1196 if (wait_event_timeout(vha->fcport_waitQ, 1197 test_fcport_count(vha), HZ) > 0) 1198 break; 1199 } 1200 1201 flush_workqueue(vha->hw->wq); 1202 } 1203 1204 /* 1205 * qla2x00_wait_for_hba_ready 1206 * Wait till the HBA is ready before doing driver unload 1207 * 1208 * Input: 1209 * ha - pointer to host adapter structure 1210 * 1211 * Note: 1212 * Does context switching-Release SPIN_LOCK 1213 * (if any) before calling this routine. 1214 * 1215 */ 1216 static void 1217 qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) 1218 { 1219 struct qla_hw_data *ha = vha->hw; 1220 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1221 1222 while ((qla2x00_reset_active(vha) || ha->dpc_active || 1223 ha->flags.mbox_busy) || 1224 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || 1225 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { 1226 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 1227 break; 1228 msleep(1000); 1229 } 1230 } 1231 1232 int 1233 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) 1234 { 1235 int return_status; 1236 unsigned long wait_reset; 1237 struct qla_hw_data *ha = vha->hw; 1238 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1239 1240 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1241 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1242 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1243 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1244 ha->dpc_active) && time_before(jiffies, wait_reset)) { 1245 1246 msleep(1000); 1247 1248 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 1249 ha->flags.chip_reset_done) 1250 break; 1251 } 1252 if (ha->flags.chip_reset_done) 1253 return_status = QLA_SUCCESS; 1254 else 1255 return_status = QLA_FUNCTION_FAILED; 1256 1257 return return_status; 1258 } 1259 1260 /************************************************************************** 1261 * qla2xxx_eh_abort 1262 * 1263 * Description: 1264 * The abort function will abort the specified command. 1265 * 1266 * Input: 1267 * cmd = Linux SCSI command packet to be aborted. 1268 * 1269 * Returns: 1270 * Either SUCCESS or FAILED. 1271 * 1272 * Note: 1273 * Only return FAILED if command not returned by firmware. 1274 **************************************************************************/ 1275 static int 1276 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 1277 { 1278 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1279 DECLARE_COMPLETION_ONSTACK(comp); 1280 srb_t *sp; 1281 int ret; 1282 unsigned int id; 1283 uint64_t lun; 1284 int rval; 1285 struct qla_hw_data *ha = vha->hw; 1286 uint32_t ratov_j; 1287 struct qla_qpair *qpair; 1288 unsigned long flags; 1289 int fast_fail_status = SUCCESS; 1290 1291 if (qla2x00_isp_reg_stat(ha)) { 1292 ql_log(ql_log_info, vha, 0x8042, 1293 "PCI/Register disconnect, exiting.\n"); 1294 qla_pci_set_eeh_busy(vha); 1295 return FAILED; 1296 } 1297 1298 /* Save any FAST_IO_FAIL value to return later if abort succeeds */ 1299 ret = fc_block_scsi_eh(cmd); 1300 if (ret != 0) 1301 fast_fail_status = ret; 1302 1303 sp = scsi_cmd_priv(cmd); 1304 qpair = sp->qpair; 1305 1306 vha->cmd_timeout_cnt++; 1307 1308 if ((sp->fcport && sp->fcport->deleted) || !qpair) 1309 return fast_fail_status != SUCCESS ? fast_fail_status : FAILED; 1310 1311 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1312 sp->comp = ∁ 1313 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1314 1315 1316 id = cmd->device->id; 1317 lun = cmd->device->lun; 1318 1319 ql_dbg(ql_dbg_taskm, vha, 0x8002, 1320 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", 1321 vha->host_no, id, lun, sp, cmd, sp->handle); 1322 1323 /* 1324 * Abort will release the original Command/sp from FW. Let the 1325 * original command call scsi_done. In return, he will wakeup 1326 * this sleeping thread. 1327 */ 1328 rval = ha->isp_ops->abort_command(sp); 1329 1330 ql_dbg(ql_dbg_taskm, vha, 0x8003, 1331 "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval); 1332 1333 /* Wait for the command completion. */ 1334 ratov_j = ha->r_a_tov/10 * 4 * 1000; 1335 ratov_j = msecs_to_jiffies(ratov_j); 1336 switch (rval) { 1337 case QLA_SUCCESS: 1338 if (!wait_for_completion_timeout(&comp, ratov_j)) { 1339 ql_dbg(ql_dbg_taskm, vha, 0xffff, 1340 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", 1341 __func__, ha->r_a_tov/10); 1342 ret = FAILED; 1343 } else { 1344 ret = fast_fail_status; 1345 } 1346 break; 1347 default: 1348 ret = FAILED; 1349 break; 1350 } 1351 1352 sp->comp = NULL; 1353 1354 ql_log(ql_log_info, vha, 0x801c, 1355 "Abort command issued nexus=%ld:%d:%llu -- %x.\n", 1356 vha->host_no, id, lun, ret); 1357 1358 return ret; 1359 } 1360 1361 /* 1362 * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED. 1363 */ 1364 static int 1365 __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t, 1366 uint64_t l, enum nexus_wait_type type) 1367 { 1368 int cnt, match, status; 1369 unsigned long flags; 1370 scsi_qla_host_t *vha = qpair->vha; 1371 struct req_que *req = qpair->req; 1372 srb_t *sp; 1373 struct scsi_cmnd *cmd; 1374 1375 status = QLA_SUCCESS; 1376 1377 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1378 for (cnt = 1; status == QLA_SUCCESS && 1379 cnt < req->num_outstanding_cmds; cnt++) { 1380 sp = req->outstanding_cmds[cnt]; 1381 if (!sp) 1382 continue; 1383 if (sp->type != SRB_SCSI_CMD) 1384 continue; 1385 if (vha->vp_idx != sp->vha->vp_idx) 1386 continue; 1387 match = 0; 1388 cmd = GET_CMD_SP(sp); 1389 switch (type) { 1390 case WAIT_HOST: 1391 match = 1; 1392 break; 1393 case WAIT_TARGET: 1394 match = cmd->device->id == t; 1395 break; 1396 case WAIT_LUN: 1397 match = (cmd->device->id == t && 1398 cmd->device->lun == l); 1399 break; 1400 } 1401 if (!match) 1402 continue; 1403 1404 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1405 status = qla2x00_eh_wait_on_command(cmd); 1406 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1407 } 1408 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1409 1410 return status; 1411 } 1412 1413 int 1414 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 1415 uint64_t l, enum nexus_wait_type type) 1416 { 1417 struct qla_qpair *qpair; 1418 struct qla_hw_data *ha = vha->hw; 1419 int i, status = QLA_SUCCESS; 1420 1421 status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l, 1422 type); 1423 for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) { 1424 qpair = ha->queue_pair_map[i]; 1425 if (!qpair) 1426 continue; 1427 status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l, 1428 type); 1429 } 1430 return status; 1431 } 1432 1433 static char *reset_errors[] = { 1434 "HBA not online", 1435 "HBA not ready", 1436 "Task management failed", 1437 "Waiting for command completions", 1438 }; 1439 1440 static int 1441 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 1442 { 1443 struct scsi_device *sdev = cmd->device; 1444 scsi_qla_host_t *vha = shost_priv(sdev->host); 1445 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1446 fc_port_t *fcport = (struct fc_port *) sdev->hostdata; 1447 struct qla_hw_data *ha = vha->hw; 1448 int err; 1449 1450 if (qla2x00_isp_reg_stat(ha)) { 1451 ql_log(ql_log_info, vha, 0x803e, 1452 "PCI/Register disconnect, exiting.\n"); 1453 qla_pci_set_eeh_busy(vha); 1454 return FAILED; 1455 } 1456 1457 if (!fcport) { 1458 return FAILED; 1459 } 1460 1461 err = fc_block_rport(rport); 1462 if (err != 0) 1463 return err; 1464 1465 if (fcport->deleted) 1466 return FAILED; 1467 1468 ql_log(ql_log_info, vha, 0x8009, 1469 "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no, 1470 sdev->id, sdev->lun, cmd); 1471 1472 err = 0; 1473 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1474 ql_log(ql_log_warn, vha, 0x800a, 1475 "Wait for hba online failed for cmd=%p.\n", cmd); 1476 goto eh_reset_failed; 1477 } 1478 err = 2; 1479 if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1) 1480 != QLA_SUCCESS) { 1481 ql_log(ql_log_warn, vha, 0x800c, 1482 "do_reset failed for cmd=%p.\n", cmd); 1483 goto eh_reset_failed; 1484 } 1485 err = 3; 1486 if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id, 1487 sdev->lun, WAIT_LUN) != QLA_SUCCESS) { 1488 ql_log(ql_log_warn, vha, 0x800d, 1489 "wait for pending cmds failed for cmd=%p.\n", cmd); 1490 goto eh_reset_failed; 1491 } 1492 1493 ql_log(ql_log_info, vha, 0x800e, 1494 "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", 1495 vha->host_no, sdev->id, sdev->lun, cmd); 1496 1497 return SUCCESS; 1498 1499 eh_reset_failed: 1500 ql_log(ql_log_info, vha, 0x800f, 1501 "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", 1502 reset_errors[err], vha->host_no, sdev->id, sdev->lun, 1503 cmd); 1504 vha->reset_cmd_err_cnt++; 1505 return FAILED; 1506 } 1507 1508 static int 1509 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 1510 { 1511 struct scsi_device *sdev = cmd->device; 1512 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1513 scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport)); 1514 struct qla_hw_data *ha = vha->hw; 1515 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1516 int err; 1517 1518 if (qla2x00_isp_reg_stat(ha)) { 1519 ql_log(ql_log_info, vha, 0x803f, 1520 "PCI/Register disconnect, exiting.\n"); 1521 qla_pci_set_eeh_busy(vha); 1522 return FAILED; 1523 } 1524 1525 if (!fcport) { 1526 return FAILED; 1527 } 1528 1529 err = fc_block_rport(rport); 1530 if (err != 0) 1531 return err; 1532 1533 if (fcport->deleted) 1534 return FAILED; 1535 1536 ql_log(ql_log_info, vha, 0x8009, 1537 "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no, 1538 sdev->id, cmd); 1539 1540 err = 0; 1541 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1542 ql_log(ql_log_warn, vha, 0x800a, 1543 "Wait for hba online failed for cmd=%p.\n", cmd); 1544 goto eh_reset_failed; 1545 } 1546 err = 2; 1547 if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) { 1548 ql_log(ql_log_warn, vha, 0x800c, 1549 "target_reset failed for cmd=%p.\n", cmd); 1550 goto eh_reset_failed; 1551 } 1552 err = 3; 1553 if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id, 1554 0, WAIT_TARGET) != QLA_SUCCESS) { 1555 ql_log(ql_log_warn, vha, 0x800d, 1556 "wait for pending cmds failed for cmd=%p.\n", cmd); 1557 goto eh_reset_failed; 1558 } 1559 1560 ql_log(ql_log_info, vha, 0x800e, 1561 "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n", 1562 vha->host_no, sdev->id, cmd); 1563 1564 return SUCCESS; 1565 1566 eh_reset_failed: 1567 ql_log(ql_log_info, vha, 0x800f, 1568 "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", 1569 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, 1570 cmd); 1571 vha->reset_cmd_err_cnt++; 1572 return FAILED; 1573 } 1574 1575 /************************************************************************** 1576 * qla2xxx_eh_bus_reset 1577 * 1578 * Description: 1579 * The bus reset function will reset the bus and abort any executing 1580 * commands. 1581 * 1582 * Input: 1583 * cmd = Linux SCSI command packet of the command that cause the 1584 * bus reset. 1585 * 1586 * Returns: 1587 * SUCCESS/FAILURE (defined as macro in scsi.h). 1588 * 1589 **************************************************************************/ 1590 static int 1591 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 1592 { 1593 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1594 int ret = FAILED; 1595 unsigned int id; 1596 uint64_t lun; 1597 struct qla_hw_data *ha = vha->hw; 1598 1599 if (qla2x00_isp_reg_stat(ha)) { 1600 ql_log(ql_log_info, vha, 0x8040, 1601 "PCI/Register disconnect, exiting.\n"); 1602 qla_pci_set_eeh_busy(vha); 1603 return FAILED; 1604 } 1605 1606 id = cmd->device->id; 1607 lun = cmd->device->lun; 1608 1609 if (qla2x00_chip_is_down(vha)) 1610 return ret; 1611 1612 ql_log(ql_log_info, vha, 0x8012, 1613 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1614 1615 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1616 ql_log(ql_log_fatal, vha, 0x8013, 1617 "Wait for hba online failed board disabled.\n"); 1618 goto eh_bus_reset_done; 1619 } 1620 1621 if (qla2x00_loop_reset(vha) == QLA_SUCCESS) 1622 ret = SUCCESS; 1623 1624 if (ret == FAILED) 1625 goto eh_bus_reset_done; 1626 1627 /* Flush outstanding commands. */ 1628 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1629 QLA_SUCCESS) { 1630 ql_log(ql_log_warn, vha, 0x8014, 1631 "Wait for pending commands failed.\n"); 1632 ret = FAILED; 1633 } 1634 1635 eh_bus_reset_done: 1636 ql_log(ql_log_warn, vha, 0x802b, 1637 "BUS RESET %s nexus=%ld:%d:%llu.\n", 1638 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1639 1640 return ret; 1641 } 1642 1643 /************************************************************************** 1644 * qla2xxx_eh_host_reset 1645 * 1646 * Description: 1647 * The reset function will reset the Adapter. 1648 * 1649 * Input: 1650 * cmd = Linux SCSI command packet of the command that cause the 1651 * adapter reset. 1652 * 1653 * Returns: 1654 * Either SUCCESS or FAILED. 1655 * 1656 * Note: 1657 **************************************************************************/ 1658 static int 1659 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1660 { 1661 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1662 struct qla_hw_data *ha = vha->hw; 1663 int ret = FAILED; 1664 unsigned int id; 1665 uint64_t lun; 1666 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1667 1668 if (qla2x00_isp_reg_stat(ha)) { 1669 ql_log(ql_log_info, vha, 0x8041, 1670 "PCI/Register disconnect, exiting.\n"); 1671 qla_pci_set_eeh_busy(vha); 1672 return SUCCESS; 1673 } 1674 1675 id = cmd->device->id; 1676 lun = cmd->device->lun; 1677 1678 ql_log(ql_log_info, vha, 0x8018, 1679 "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1680 1681 /* 1682 * No point in issuing another reset if one is active. Also do not 1683 * attempt a reset if we are updating flash. 1684 */ 1685 if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) 1686 goto eh_host_reset_lock; 1687 1688 if (vha != base_vha) { 1689 if (qla2x00_vp_abort_isp(vha)) 1690 goto eh_host_reset_lock; 1691 } else { 1692 if (IS_P3P_TYPE(vha->hw)) { 1693 if (!qla82xx_fcoe_ctx_reset(vha)) { 1694 /* Ctx reset success */ 1695 ret = SUCCESS; 1696 goto eh_host_reset_lock; 1697 } 1698 /* fall thru if ctx reset failed */ 1699 } 1700 if (ha->wq) 1701 flush_workqueue(ha->wq); 1702 1703 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1704 if (ha->isp_ops->abort_isp(base_vha)) { 1705 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1706 /* failed. schedule dpc to try */ 1707 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1708 1709 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1710 ql_log(ql_log_warn, vha, 0x802a, 1711 "wait for hba online failed.\n"); 1712 goto eh_host_reset_lock; 1713 } 1714 } 1715 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1716 } 1717 1718 /* Waiting for command to be returned to OS.*/ 1719 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == 1720 QLA_SUCCESS) 1721 ret = SUCCESS; 1722 1723 eh_host_reset_lock: 1724 ql_log(ql_log_info, vha, 0x8017, 1725 "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", 1726 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1727 1728 return ret; 1729 } 1730 1731 /* 1732 * qla2x00_loop_reset 1733 * Issue loop reset. 1734 * 1735 * Input: 1736 * ha = adapter block pointer. 1737 * 1738 * Returns: 1739 * 0 = success 1740 */ 1741 int 1742 qla2x00_loop_reset(scsi_qla_host_t *vha) 1743 { 1744 int ret; 1745 struct qla_hw_data *ha = vha->hw; 1746 1747 if (IS_QLAFX00(ha)) 1748 return QLA_SUCCESS; 1749 1750 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { 1751 atomic_set(&vha->loop_state, LOOP_DOWN); 1752 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1753 qla2x00_mark_all_devices_lost(vha); 1754 ret = qla2x00_full_login_lip(vha); 1755 if (ret != QLA_SUCCESS) { 1756 ql_dbg(ql_dbg_taskm, vha, 0x802d, 1757 "full_login_lip=%d.\n", ret); 1758 } 1759 } 1760 1761 if (ha->flags.enable_lip_reset) { 1762 ret = qla2x00_lip_reset(vha); 1763 if (ret != QLA_SUCCESS) 1764 ql_dbg(ql_dbg_taskm, vha, 0x802e, 1765 "lip_reset failed (%d).\n", ret); 1766 } 1767 1768 /* Issue marker command only when we are going to start the I/O */ 1769 vha->marker_needed = 1; 1770 1771 return QLA_SUCCESS; 1772 } 1773 1774 /* 1775 * The caller must ensure that no completion interrupts will happen 1776 * while this function is in progress. 1777 */ 1778 static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, 1779 unsigned long *flags) 1780 __releases(qp->qp_lock_ptr) 1781 __acquires(qp->qp_lock_ptr) 1782 { 1783 DECLARE_COMPLETION_ONSTACK(comp); 1784 scsi_qla_host_t *vha = qp->vha; 1785 struct qla_hw_data *ha = vha->hw; 1786 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1787 int rval; 1788 bool ret_cmd; 1789 uint32_t ratov_j; 1790 1791 lockdep_assert_held(qp->qp_lock_ptr); 1792 1793 if (qla2x00_chip_is_down(vha)) { 1794 sp->done(sp, res); 1795 return; 1796 } 1797 1798 if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS || 1799 (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy && 1800 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 1801 !qla2x00_isp_reg_stat(ha))) { 1802 if (sp->comp) { 1803 sp->done(sp, res); 1804 return; 1805 } 1806 1807 sp->comp = ∁ 1808 spin_unlock_irqrestore(qp->qp_lock_ptr, *flags); 1809 1810 rval = ha->isp_ops->abort_command(sp); 1811 /* Wait for command completion. */ 1812 ret_cmd = false; 1813 ratov_j = ha->r_a_tov/10 * 4 * 1000; 1814 ratov_j = msecs_to_jiffies(ratov_j); 1815 switch (rval) { 1816 case QLA_SUCCESS: 1817 if (wait_for_completion_timeout(&comp, ratov_j)) { 1818 ql_dbg(ql_dbg_taskm, vha, 0xffff, 1819 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", 1820 __func__, ha->r_a_tov/10); 1821 ret_cmd = true; 1822 } 1823 /* else FW return SP to driver */ 1824 break; 1825 default: 1826 ret_cmd = true; 1827 break; 1828 } 1829 1830 spin_lock_irqsave(qp->qp_lock_ptr, *flags); 1831 if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd))) 1832 sp->done(sp, res); 1833 } else { 1834 sp->done(sp, res); 1835 } 1836 } 1837 1838 /* 1839 * The caller must ensure that no completion interrupts will happen 1840 * while this function is in progress. 1841 */ 1842 static void 1843 __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) 1844 { 1845 int cnt; 1846 unsigned long flags; 1847 srb_t *sp; 1848 scsi_qla_host_t *vha = qp->vha; 1849 struct qla_hw_data *ha = vha->hw; 1850 struct req_que *req; 1851 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1852 struct qla_tgt_cmd *cmd; 1853 1854 if (!ha->req_q_map) 1855 return; 1856 spin_lock_irqsave(qp->qp_lock_ptr, flags); 1857 req = qp->req; 1858 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1859 sp = req->outstanding_cmds[cnt]; 1860 if (sp) { 1861 switch (sp->cmd_type) { 1862 case TYPE_SRB: 1863 qla2x00_abort_srb(qp, sp, res, &flags); 1864 break; 1865 case TYPE_TGT_CMD: 1866 if (!vha->hw->tgt.tgt_ops || !tgt || 1867 qla_ini_mode_enabled(vha)) { 1868 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, 1869 "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n", 1870 vha->dpc_flags); 1871 continue; 1872 } 1873 cmd = (struct qla_tgt_cmd *)sp; 1874 cmd->aborted = 1; 1875 break; 1876 case TYPE_TGT_TMCMD: 1877 /* Skip task management functions. */ 1878 break; 1879 default: 1880 break; 1881 } 1882 req->outstanding_cmds[cnt] = NULL; 1883 } 1884 } 1885 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 1886 } 1887 1888 /* 1889 * The caller must ensure that no completion interrupts will happen 1890 * while this function is in progress. 1891 */ 1892 void 1893 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1894 { 1895 int que; 1896 struct qla_hw_data *ha = vha->hw; 1897 1898 /* Continue only if initialization complete. */ 1899 if (!ha->base_qpair) 1900 return; 1901 __qla2x00_abort_all_cmds(ha->base_qpair, res); 1902 1903 if (!ha->queue_pair_map) 1904 return; 1905 for (que = 0; que < ha->max_qpairs; que++) { 1906 if (!ha->queue_pair_map[que]) 1907 continue; 1908 1909 __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res); 1910 } 1911 } 1912 1913 static int 1914 qla2xxx_slave_alloc(struct scsi_device *sdev) 1915 { 1916 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1917 1918 if (!rport || fc_remote_port_chkready(rport)) 1919 return -ENXIO; 1920 1921 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1922 1923 return 0; 1924 } 1925 1926 static int 1927 qla2xxx_slave_configure(struct scsi_device *sdev) 1928 { 1929 scsi_qla_host_t *vha = shost_priv(sdev->host); 1930 struct req_que *req = vha->req; 1931 1932 if (IS_T10_PI_CAPABLE(vha->hw)) 1933 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1934 1935 scsi_change_queue_depth(sdev, req->max_q_depth); 1936 return 0; 1937 } 1938 1939 static void 1940 qla2xxx_slave_destroy(struct scsi_device *sdev) 1941 { 1942 sdev->hostdata = NULL; 1943 } 1944 1945 /** 1946 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1947 * @ha: HA context 1948 * 1949 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1950 * supported addressing method. 1951 */ 1952 static void 1953 qla2x00_config_dma_addressing(struct qla_hw_data *ha) 1954 { 1955 /* Assume a 32bit DMA mask. */ 1956 ha->flags.enable_64bit_addressing = 0; 1957 1958 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1959 /* Any upper-dword bits set? */ 1960 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1961 !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1962 /* Ok, a 64bit DMA mask is applicable. */ 1963 ha->flags.enable_64bit_addressing = 1; 1964 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1965 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1966 return; 1967 } 1968 } 1969 1970 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1971 dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1972 } 1973 1974 static void 1975 qla2x00_enable_intrs(struct qla_hw_data *ha) 1976 { 1977 unsigned long flags = 0; 1978 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1979 1980 spin_lock_irqsave(&ha->hardware_lock, flags); 1981 ha->interrupts_on = 1; 1982 /* enable risc and host interrupts */ 1983 wrt_reg_word(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1984 rd_reg_word(®->ictrl); 1985 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1986 1987 } 1988 1989 static void 1990 qla2x00_disable_intrs(struct qla_hw_data *ha) 1991 { 1992 unsigned long flags = 0; 1993 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1994 1995 spin_lock_irqsave(&ha->hardware_lock, flags); 1996 ha->interrupts_on = 0; 1997 /* disable risc and host interrupts */ 1998 wrt_reg_word(®->ictrl, 0); 1999 rd_reg_word(®->ictrl); 2000 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2001 } 2002 2003 static void 2004 qla24xx_enable_intrs(struct qla_hw_data *ha) 2005 { 2006 unsigned long flags = 0; 2007 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2008 2009 spin_lock_irqsave(&ha->hardware_lock, flags); 2010 ha->interrupts_on = 1; 2011 wrt_reg_dword(®->ictrl, ICRX_EN_RISC_INT); 2012 rd_reg_dword(®->ictrl); 2013 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2014 } 2015 2016 static void 2017 qla24xx_disable_intrs(struct qla_hw_data *ha) 2018 { 2019 unsigned long flags = 0; 2020 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2021 2022 if (IS_NOPOLLING_TYPE(ha)) 2023 return; 2024 spin_lock_irqsave(&ha->hardware_lock, flags); 2025 ha->interrupts_on = 0; 2026 wrt_reg_dword(®->ictrl, 0); 2027 rd_reg_dword(®->ictrl); 2028 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2029 } 2030 2031 static int 2032 qla2x00_iospace_config(struct qla_hw_data *ha) 2033 { 2034 resource_size_t pio; 2035 uint16_t msix; 2036 2037 if (pci_request_selected_regions(ha->pdev, ha->bars, 2038 QLA2XXX_DRIVER_NAME)) { 2039 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, 2040 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 2041 pci_name(ha->pdev)); 2042 goto iospace_error_exit; 2043 } 2044 if (!(ha->bars & 1)) 2045 goto skip_pio; 2046 2047 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 2048 pio = pci_resource_start(ha->pdev, 0); 2049 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 2050 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 2051 ql_log_pci(ql_log_warn, ha->pdev, 0x0012, 2052 "Invalid pci I/O region size (%s).\n", 2053 pci_name(ha->pdev)); 2054 pio = 0; 2055 } 2056 } else { 2057 ql_log_pci(ql_log_warn, ha->pdev, 0x0013, 2058 "Region #0 no a PIO resource (%s).\n", 2059 pci_name(ha->pdev)); 2060 pio = 0; 2061 } 2062 ha->pio_address = pio; 2063 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, 2064 "PIO address=%llu.\n", 2065 (unsigned long long)ha->pio_address); 2066 2067 skip_pio: 2068 /* Use MMIO operations for all accesses. */ 2069 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 2070 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, 2071 "Region #1 not an MMIO resource (%s), aborting.\n", 2072 pci_name(ha->pdev)); 2073 goto iospace_error_exit; 2074 } 2075 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 2076 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, 2077 "Invalid PCI mem region size (%s), aborting.\n", 2078 pci_name(ha->pdev)); 2079 goto iospace_error_exit; 2080 } 2081 2082 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 2083 if (!ha->iobase) { 2084 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, 2085 "Cannot remap MMIO (%s), aborting.\n", 2086 pci_name(ha->pdev)); 2087 goto iospace_error_exit; 2088 } 2089 2090 /* Determine queue resources */ 2091 ha->max_req_queues = ha->max_rsp_queues = 1; 2092 ha->msix_count = QLA_BASE_VECTORS; 2093 2094 /* Check if FW supports MQ or not */ 2095 if (!(ha->fw_attributes & BIT_6)) 2096 goto mqiobase_exit; 2097 2098 if (!ql2xmqsupport || !ql2xnvmeenable || 2099 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 2100 goto mqiobase_exit; 2101 2102 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 2103 pci_resource_len(ha->pdev, 3)); 2104 if (ha->mqiobase) { 2105 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, 2106 "MQIO Base=%p.\n", ha->mqiobase); 2107 /* Read MSIX vector size of the board */ 2108 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 2109 ha->msix_count = msix + 1; 2110 /* Max queues are bounded by available msix vectors */ 2111 /* MB interrupt uses 1 vector */ 2112 ha->max_req_queues = ha->msix_count - 1; 2113 ha->max_rsp_queues = ha->max_req_queues; 2114 /* Queue pairs is the max value minus the base queue pair */ 2115 ha->max_qpairs = ha->max_rsp_queues - 1; 2116 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188, 2117 "Max no of queues pairs: %d.\n", ha->max_qpairs); 2118 2119 ql_log_pci(ql_log_info, ha->pdev, 0x001a, 2120 "MSI-X vector count: %d.\n", ha->msix_count); 2121 } else 2122 ql_log_pci(ql_log_info, ha->pdev, 0x001b, 2123 "BAR 3 not enabled.\n"); 2124 2125 mqiobase_exit: 2126 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, 2127 "MSIX Count: %d.\n", ha->msix_count); 2128 return (0); 2129 2130 iospace_error_exit: 2131 return (-ENOMEM); 2132 } 2133 2134 2135 static int 2136 qla83xx_iospace_config(struct qla_hw_data *ha) 2137 { 2138 uint16_t msix; 2139 2140 if (pci_request_selected_regions(ha->pdev, ha->bars, 2141 QLA2XXX_DRIVER_NAME)) { 2142 ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, 2143 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 2144 pci_name(ha->pdev)); 2145 2146 goto iospace_error_exit; 2147 } 2148 2149 /* Use MMIO operations for all accesses. */ 2150 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 2151 ql_log_pci(ql_log_warn, ha->pdev, 0x0118, 2152 "Invalid pci I/O region size (%s).\n", 2153 pci_name(ha->pdev)); 2154 goto iospace_error_exit; 2155 } 2156 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 2157 ql_log_pci(ql_log_warn, ha->pdev, 0x0119, 2158 "Invalid PCI mem region size (%s), aborting\n", 2159 pci_name(ha->pdev)); 2160 goto iospace_error_exit; 2161 } 2162 2163 ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); 2164 if (!ha->iobase) { 2165 ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, 2166 "Cannot remap MMIO (%s), aborting.\n", 2167 pci_name(ha->pdev)); 2168 goto iospace_error_exit; 2169 } 2170 2171 /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ 2172 /* 83XX 26XX always use MQ type access for queues 2173 * - mbar 2, a.k.a region 4 */ 2174 ha->max_req_queues = ha->max_rsp_queues = 1; 2175 ha->msix_count = QLA_BASE_VECTORS; 2176 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), 2177 pci_resource_len(ha->pdev, 4)); 2178 2179 if (!ha->mqiobase) { 2180 ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, 2181 "BAR2/region4 not enabled\n"); 2182 goto mqiobase_exit; 2183 } 2184 2185 ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), 2186 pci_resource_len(ha->pdev, 2)); 2187 if (ha->msixbase) { 2188 /* Read MSIX vector size of the board */ 2189 pci_read_config_word(ha->pdev, 2190 QLA_83XX_PCI_MSIX_CONTROL, &msix); 2191 ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1; 2192 /* 2193 * By default, driver uses at least two msix vectors 2194 * (default & rspq) 2195 */ 2196 if (ql2xmqsupport || ql2xnvmeenable) { 2197 /* MB interrupt uses 1 vector */ 2198 ha->max_req_queues = ha->msix_count - 1; 2199 2200 /* ATIOQ needs 1 vector. That's 1 less QPair */ 2201 if (QLA_TGT_MODE_ENABLED()) 2202 ha->max_req_queues--; 2203 2204 ha->max_rsp_queues = ha->max_req_queues; 2205 2206 /* Queue pairs is the max value minus 2207 * the base queue pair */ 2208 ha->max_qpairs = ha->max_req_queues - 1; 2209 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3, 2210 "Max no of queues pairs: %d.\n", ha->max_qpairs); 2211 } 2212 ql_log_pci(ql_log_info, ha->pdev, 0x011c, 2213 "MSI-X vector count: %d.\n", ha->msix_count); 2214 } else 2215 ql_log_pci(ql_log_info, ha->pdev, 0x011e, 2216 "BAR 1 not enabled.\n"); 2217 2218 mqiobase_exit: 2219 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, 2220 "MSIX Count: %d.\n", ha->msix_count); 2221 return 0; 2222 2223 iospace_error_exit: 2224 return -ENOMEM; 2225 } 2226 2227 static struct isp_operations qla2100_isp_ops = { 2228 .pci_config = qla2100_pci_config, 2229 .reset_chip = qla2x00_reset_chip, 2230 .chip_diag = qla2x00_chip_diag, 2231 .config_rings = qla2x00_config_rings, 2232 .reset_adapter = qla2x00_reset_adapter, 2233 .nvram_config = qla2x00_nvram_config, 2234 .update_fw_options = qla2x00_update_fw_options, 2235 .load_risc = qla2x00_load_risc, 2236 .pci_info_str = qla2x00_pci_info_str, 2237 .fw_version_str = qla2x00_fw_version_str, 2238 .intr_handler = qla2100_intr_handler, 2239 .enable_intrs = qla2x00_enable_intrs, 2240 .disable_intrs = qla2x00_disable_intrs, 2241 .abort_command = qla2x00_abort_command, 2242 .target_reset = qla2x00_abort_target, 2243 .lun_reset = qla2x00_lun_reset, 2244 .fabric_login = qla2x00_login_fabric, 2245 .fabric_logout = qla2x00_fabric_logout, 2246 .calc_req_entries = qla2x00_calc_iocbs_32, 2247 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2248 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2249 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2250 .read_nvram = qla2x00_read_nvram_data, 2251 .write_nvram = qla2x00_write_nvram_data, 2252 .fw_dump = qla2100_fw_dump, 2253 .beacon_on = NULL, 2254 .beacon_off = NULL, 2255 .beacon_blink = NULL, 2256 .read_optrom = qla2x00_read_optrom_data, 2257 .write_optrom = qla2x00_write_optrom_data, 2258 .get_flash_version = qla2x00_get_flash_version, 2259 .start_scsi = qla2x00_start_scsi, 2260 .start_scsi_mq = NULL, 2261 .abort_isp = qla2x00_abort_isp, 2262 .iospace_config = qla2x00_iospace_config, 2263 .initialize_adapter = qla2x00_initialize_adapter, 2264 }; 2265 2266 static struct isp_operations qla2300_isp_ops = { 2267 .pci_config = qla2300_pci_config, 2268 .reset_chip = qla2x00_reset_chip, 2269 .chip_diag = qla2x00_chip_diag, 2270 .config_rings = qla2x00_config_rings, 2271 .reset_adapter = qla2x00_reset_adapter, 2272 .nvram_config = qla2x00_nvram_config, 2273 .update_fw_options = qla2x00_update_fw_options, 2274 .load_risc = qla2x00_load_risc, 2275 .pci_info_str = qla2x00_pci_info_str, 2276 .fw_version_str = qla2x00_fw_version_str, 2277 .intr_handler = qla2300_intr_handler, 2278 .enable_intrs = qla2x00_enable_intrs, 2279 .disable_intrs = qla2x00_disable_intrs, 2280 .abort_command = qla2x00_abort_command, 2281 .target_reset = qla2x00_abort_target, 2282 .lun_reset = qla2x00_lun_reset, 2283 .fabric_login = qla2x00_login_fabric, 2284 .fabric_logout = qla2x00_fabric_logout, 2285 .calc_req_entries = qla2x00_calc_iocbs_32, 2286 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2287 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2288 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2289 .read_nvram = qla2x00_read_nvram_data, 2290 .write_nvram = qla2x00_write_nvram_data, 2291 .fw_dump = qla2300_fw_dump, 2292 .beacon_on = qla2x00_beacon_on, 2293 .beacon_off = qla2x00_beacon_off, 2294 .beacon_blink = qla2x00_beacon_blink, 2295 .read_optrom = qla2x00_read_optrom_data, 2296 .write_optrom = qla2x00_write_optrom_data, 2297 .get_flash_version = qla2x00_get_flash_version, 2298 .start_scsi = qla2x00_start_scsi, 2299 .start_scsi_mq = NULL, 2300 .abort_isp = qla2x00_abort_isp, 2301 .iospace_config = qla2x00_iospace_config, 2302 .initialize_adapter = qla2x00_initialize_adapter, 2303 }; 2304 2305 static struct isp_operations qla24xx_isp_ops = { 2306 .pci_config = qla24xx_pci_config, 2307 .reset_chip = qla24xx_reset_chip, 2308 .chip_diag = qla24xx_chip_diag, 2309 .config_rings = qla24xx_config_rings, 2310 .reset_adapter = qla24xx_reset_adapter, 2311 .nvram_config = qla24xx_nvram_config, 2312 .update_fw_options = qla24xx_update_fw_options, 2313 .load_risc = qla24xx_load_risc, 2314 .pci_info_str = qla24xx_pci_info_str, 2315 .fw_version_str = qla24xx_fw_version_str, 2316 .intr_handler = qla24xx_intr_handler, 2317 .enable_intrs = qla24xx_enable_intrs, 2318 .disable_intrs = qla24xx_disable_intrs, 2319 .abort_command = qla24xx_abort_command, 2320 .target_reset = qla24xx_abort_target, 2321 .lun_reset = qla24xx_lun_reset, 2322 .fabric_login = qla24xx_login_fabric, 2323 .fabric_logout = qla24xx_fabric_logout, 2324 .calc_req_entries = NULL, 2325 .build_iocbs = NULL, 2326 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2327 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2328 .read_nvram = qla24xx_read_nvram_data, 2329 .write_nvram = qla24xx_write_nvram_data, 2330 .fw_dump = qla24xx_fw_dump, 2331 .beacon_on = qla24xx_beacon_on, 2332 .beacon_off = qla24xx_beacon_off, 2333 .beacon_blink = qla24xx_beacon_blink, 2334 .read_optrom = qla24xx_read_optrom_data, 2335 .write_optrom = qla24xx_write_optrom_data, 2336 .get_flash_version = qla24xx_get_flash_version, 2337 .start_scsi = qla24xx_start_scsi, 2338 .start_scsi_mq = NULL, 2339 .abort_isp = qla2x00_abort_isp, 2340 .iospace_config = qla2x00_iospace_config, 2341 .initialize_adapter = qla2x00_initialize_adapter, 2342 }; 2343 2344 static struct isp_operations qla25xx_isp_ops = { 2345 .pci_config = qla25xx_pci_config, 2346 .reset_chip = qla24xx_reset_chip, 2347 .chip_diag = qla24xx_chip_diag, 2348 .config_rings = qla24xx_config_rings, 2349 .reset_adapter = qla24xx_reset_adapter, 2350 .nvram_config = qla24xx_nvram_config, 2351 .update_fw_options = qla24xx_update_fw_options, 2352 .load_risc = qla24xx_load_risc, 2353 .pci_info_str = qla24xx_pci_info_str, 2354 .fw_version_str = qla24xx_fw_version_str, 2355 .intr_handler = qla24xx_intr_handler, 2356 .enable_intrs = qla24xx_enable_intrs, 2357 .disable_intrs = qla24xx_disable_intrs, 2358 .abort_command = qla24xx_abort_command, 2359 .target_reset = qla24xx_abort_target, 2360 .lun_reset = qla24xx_lun_reset, 2361 .fabric_login = qla24xx_login_fabric, 2362 .fabric_logout = qla24xx_fabric_logout, 2363 .calc_req_entries = NULL, 2364 .build_iocbs = NULL, 2365 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2366 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2367 .read_nvram = qla25xx_read_nvram_data, 2368 .write_nvram = qla25xx_write_nvram_data, 2369 .fw_dump = qla25xx_fw_dump, 2370 .beacon_on = qla24xx_beacon_on, 2371 .beacon_off = qla24xx_beacon_off, 2372 .beacon_blink = qla24xx_beacon_blink, 2373 .read_optrom = qla25xx_read_optrom_data, 2374 .write_optrom = qla24xx_write_optrom_data, 2375 .get_flash_version = qla24xx_get_flash_version, 2376 .start_scsi = qla24xx_dif_start_scsi, 2377 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2378 .abort_isp = qla2x00_abort_isp, 2379 .iospace_config = qla2x00_iospace_config, 2380 .initialize_adapter = qla2x00_initialize_adapter, 2381 }; 2382 2383 static struct isp_operations qla81xx_isp_ops = { 2384 .pci_config = qla25xx_pci_config, 2385 .reset_chip = qla24xx_reset_chip, 2386 .chip_diag = qla24xx_chip_diag, 2387 .config_rings = qla24xx_config_rings, 2388 .reset_adapter = qla24xx_reset_adapter, 2389 .nvram_config = qla81xx_nvram_config, 2390 .update_fw_options = qla24xx_update_fw_options, 2391 .load_risc = qla81xx_load_risc, 2392 .pci_info_str = qla24xx_pci_info_str, 2393 .fw_version_str = qla24xx_fw_version_str, 2394 .intr_handler = qla24xx_intr_handler, 2395 .enable_intrs = qla24xx_enable_intrs, 2396 .disable_intrs = qla24xx_disable_intrs, 2397 .abort_command = qla24xx_abort_command, 2398 .target_reset = qla24xx_abort_target, 2399 .lun_reset = qla24xx_lun_reset, 2400 .fabric_login = qla24xx_login_fabric, 2401 .fabric_logout = qla24xx_fabric_logout, 2402 .calc_req_entries = NULL, 2403 .build_iocbs = NULL, 2404 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2405 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2406 .read_nvram = NULL, 2407 .write_nvram = NULL, 2408 .fw_dump = qla81xx_fw_dump, 2409 .beacon_on = qla24xx_beacon_on, 2410 .beacon_off = qla24xx_beacon_off, 2411 .beacon_blink = qla83xx_beacon_blink, 2412 .read_optrom = qla25xx_read_optrom_data, 2413 .write_optrom = qla24xx_write_optrom_data, 2414 .get_flash_version = qla24xx_get_flash_version, 2415 .start_scsi = qla24xx_dif_start_scsi, 2416 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2417 .abort_isp = qla2x00_abort_isp, 2418 .iospace_config = qla2x00_iospace_config, 2419 .initialize_adapter = qla2x00_initialize_adapter, 2420 }; 2421 2422 static struct isp_operations qla82xx_isp_ops = { 2423 .pci_config = qla82xx_pci_config, 2424 .reset_chip = qla82xx_reset_chip, 2425 .chip_diag = qla24xx_chip_diag, 2426 .config_rings = qla82xx_config_rings, 2427 .reset_adapter = qla24xx_reset_adapter, 2428 .nvram_config = qla81xx_nvram_config, 2429 .update_fw_options = qla24xx_update_fw_options, 2430 .load_risc = qla82xx_load_risc, 2431 .pci_info_str = qla24xx_pci_info_str, 2432 .fw_version_str = qla24xx_fw_version_str, 2433 .intr_handler = qla82xx_intr_handler, 2434 .enable_intrs = qla82xx_enable_intrs, 2435 .disable_intrs = qla82xx_disable_intrs, 2436 .abort_command = qla24xx_abort_command, 2437 .target_reset = qla24xx_abort_target, 2438 .lun_reset = qla24xx_lun_reset, 2439 .fabric_login = qla24xx_login_fabric, 2440 .fabric_logout = qla24xx_fabric_logout, 2441 .calc_req_entries = NULL, 2442 .build_iocbs = NULL, 2443 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2444 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2445 .read_nvram = qla24xx_read_nvram_data, 2446 .write_nvram = qla24xx_write_nvram_data, 2447 .fw_dump = qla82xx_fw_dump, 2448 .beacon_on = qla82xx_beacon_on, 2449 .beacon_off = qla82xx_beacon_off, 2450 .beacon_blink = NULL, 2451 .read_optrom = qla82xx_read_optrom_data, 2452 .write_optrom = qla82xx_write_optrom_data, 2453 .get_flash_version = qla82xx_get_flash_version, 2454 .start_scsi = qla82xx_start_scsi, 2455 .start_scsi_mq = NULL, 2456 .abort_isp = qla82xx_abort_isp, 2457 .iospace_config = qla82xx_iospace_config, 2458 .initialize_adapter = qla2x00_initialize_adapter, 2459 }; 2460 2461 static struct isp_operations qla8044_isp_ops = { 2462 .pci_config = qla82xx_pci_config, 2463 .reset_chip = qla82xx_reset_chip, 2464 .chip_diag = qla24xx_chip_diag, 2465 .config_rings = qla82xx_config_rings, 2466 .reset_adapter = qla24xx_reset_adapter, 2467 .nvram_config = qla81xx_nvram_config, 2468 .update_fw_options = qla24xx_update_fw_options, 2469 .load_risc = qla82xx_load_risc, 2470 .pci_info_str = qla24xx_pci_info_str, 2471 .fw_version_str = qla24xx_fw_version_str, 2472 .intr_handler = qla8044_intr_handler, 2473 .enable_intrs = qla82xx_enable_intrs, 2474 .disable_intrs = qla82xx_disable_intrs, 2475 .abort_command = qla24xx_abort_command, 2476 .target_reset = qla24xx_abort_target, 2477 .lun_reset = qla24xx_lun_reset, 2478 .fabric_login = qla24xx_login_fabric, 2479 .fabric_logout = qla24xx_fabric_logout, 2480 .calc_req_entries = NULL, 2481 .build_iocbs = NULL, 2482 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2483 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2484 .read_nvram = NULL, 2485 .write_nvram = NULL, 2486 .fw_dump = qla8044_fw_dump, 2487 .beacon_on = qla82xx_beacon_on, 2488 .beacon_off = qla82xx_beacon_off, 2489 .beacon_blink = NULL, 2490 .read_optrom = qla8044_read_optrom_data, 2491 .write_optrom = qla8044_write_optrom_data, 2492 .get_flash_version = qla82xx_get_flash_version, 2493 .start_scsi = qla82xx_start_scsi, 2494 .start_scsi_mq = NULL, 2495 .abort_isp = qla8044_abort_isp, 2496 .iospace_config = qla82xx_iospace_config, 2497 .initialize_adapter = qla2x00_initialize_adapter, 2498 }; 2499 2500 static struct isp_operations qla83xx_isp_ops = { 2501 .pci_config = qla25xx_pci_config, 2502 .reset_chip = qla24xx_reset_chip, 2503 .chip_diag = qla24xx_chip_diag, 2504 .config_rings = qla24xx_config_rings, 2505 .reset_adapter = qla24xx_reset_adapter, 2506 .nvram_config = qla81xx_nvram_config, 2507 .update_fw_options = qla24xx_update_fw_options, 2508 .load_risc = qla81xx_load_risc, 2509 .pci_info_str = qla24xx_pci_info_str, 2510 .fw_version_str = qla24xx_fw_version_str, 2511 .intr_handler = qla24xx_intr_handler, 2512 .enable_intrs = qla24xx_enable_intrs, 2513 .disable_intrs = qla24xx_disable_intrs, 2514 .abort_command = qla24xx_abort_command, 2515 .target_reset = qla24xx_abort_target, 2516 .lun_reset = qla24xx_lun_reset, 2517 .fabric_login = qla24xx_login_fabric, 2518 .fabric_logout = qla24xx_fabric_logout, 2519 .calc_req_entries = NULL, 2520 .build_iocbs = NULL, 2521 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2522 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2523 .read_nvram = NULL, 2524 .write_nvram = NULL, 2525 .fw_dump = qla83xx_fw_dump, 2526 .beacon_on = qla24xx_beacon_on, 2527 .beacon_off = qla24xx_beacon_off, 2528 .beacon_blink = qla83xx_beacon_blink, 2529 .read_optrom = qla25xx_read_optrom_data, 2530 .write_optrom = qla24xx_write_optrom_data, 2531 .get_flash_version = qla24xx_get_flash_version, 2532 .start_scsi = qla24xx_dif_start_scsi, 2533 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2534 .abort_isp = qla2x00_abort_isp, 2535 .iospace_config = qla83xx_iospace_config, 2536 .initialize_adapter = qla2x00_initialize_adapter, 2537 }; 2538 2539 static struct isp_operations qlafx00_isp_ops = { 2540 .pci_config = qlafx00_pci_config, 2541 .reset_chip = qlafx00_soft_reset, 2542 .chip_diag = qlafx00_chip_diag, 2543 .config_rings = qlafx00_config_rings, 2544 .reset_adapter = qlafx00_soft_reset, 2545 .nvram_config = NULL, 2546 .update_fw_options = NULL, 2547 .load_risc = NULL, 2548 .pci_info_str = qlafx00_pci_info_str, 2549 .fw_version_str = qlafx00_fw_version_str, 2550 .intr_handler = qlafx00_intr_handler, 2551 .enable_intrs = qlafx00_enable_intrs, 2552 .disable_intrs = qlafx00_disable_intrs, 2553 .abort_command = qla24xx_async_abort_command, 2554 .target_reset = qlafx00_abort_target, 2555 .lun_reset = qlafx00_lun_reset, 2556 .fabric_login = NULL, 2557 .fabric_logout = NULL, 2558 .calc_req_entries = NULL, 2559 .build_iocbs = NULL, 2560 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2561 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2562 .read_nvram = qla24xx_read_nvram_data, 2563 .write_nvram = qla24xx_write_nvram_data, 2564 .fw_dump = NULL, 2565 .beacon_on = qla24xx_beacon_on, 2566 .beacon_off = qla24xx_beacon_off, 2567 .beacon_blink = NULL, 2568 .read_optrom = qla24xx_read_optrom_data, 2569 .write_optrom = qla24xx_write_optrom_data, 2570 .get_flash_version = qla24xx_get_flash_version, 2571 .start_scsi = qlafx00_start_scsi, 2572 .start_scsi_mq = NULL, 2573 .abort_isp = qlafx00_abort_isp, 2574 .iospace_config = qlafx00_iospace_config, 2575 .initialize_adapter = qlafx00_initialize_adapter, 2576 }; 2577 2578 static struct isp_operations qla27xx_isp_ops = { 2579 .pci_config = qla25xx_pci_config, 2580 .reset_chip = qla24xx_reset_chip, 2581 .chip_diag = qla24xx_chip_diag, 2582 .config_rings = qla24xx_config_rings, 2583 .reset_adapter = qla24xx_reset_adapter, 2584 .nvram_config = qla81xx_nvram_config, 2585 .update_fw_options = qla24xx_update_fw_options, 2586 .load_risc = qla81xx_load_risc, 2587 .pci_info_str = qla24xx_pci_info_str, 2588 .fw_version_str = qla24xx_fw_version_str, 2589 .intr_handler = qla24xx_intr_handler, 2590 .enable_intrs = qla24xx_enable_intrs, 2591 .disable_intrs = qla24xx_disable_intrs, 2592 .abort_command = qla24xx_abort_command, 2593 .target_reset = qla24xx_abort_target, 2594 .lun_reset = qla24xx_lun_reset, 2595 .fabric_login = qla24xx_login_fabric, 2596 .fabric_logout = qla24xx_fabric_logout, 2597 .calc_req_entries = NULL, 2598 .build_iocbs = NULL, 2599 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2600 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2601 .read_nvram = NULL, 2602 .write_nvram = NULL, 2603 .fw_dump = qla27xx_fwdump, 2604 .mpi_fw_dump = qla27xx_mpi_fwdump, 2605 .beacon_on = qla24xx_beacon_on, 2606 .beacon_off = qla24xx_beacon_off, 2607 .beacon_blink = qla83xx_beacon_blink, 2608 .read_optrom = qla25xx_read_optrom_data, 2609 .write_optrom = qla24xx_write_optrom_data, 2610 .get_flash_version = qla24xx_get_flash_version, 2611 .start_scsi = qla24xx_dif_start_scsi, 2612 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2613 .abort_isp = qla2x00_abort_isp, 2614 .iospace_config = qla83xx_iospace_config, 2615 .initialize_adapter = qla2x00_initialize_adapter, 2616 }; 2617 2618 static inline void 2619 qla2x00_set_isp_flags(struct qla_hw_data *ha) 2620 { 2621 ha->device_type = DT_EXTENDED_IDS; 2622 switch (ha->pdev->device) { 2623 case PCI_DEVICE_ID_QLOGIC_ISP2100: 2624 ha->isp_type |= DT_ISP2100; 2625 ha->device_type &= ~DT_EXTENDED_IDS; 2626 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2627 break; 2628 case PCI_DEVICE_ID_QLOGIC_ISP2200: 2629 ha->isp_type |= DT_ISP2200; 2630 ha->device_type &= ~DT_EXTENDED_IDS; 2631 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2632 break; 2633 case PCI_DEVICE_ID_QLOGIC_ISP2300: 2634 ha->isp_type |= DT_ISP2300; 2635 ha->device_type |= DT_ZIO_SUPPORTED; 2636 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2637 break; 2638 case PCI_DEVICE_ID_QLOGIC_ISP2312: 2639 ha->isp_type |= DT_ISP2312; 2640 ha->device_type |= DT_ZIO_SUPPORTED; 2641 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2642 break; 2643 case PCI_DEVICE_ID_QLOGIC_ISP2322: 2644 ha->isp_type |= DT_ISP2322; 2645 ha->device_type |= DT_ZIO_SUPPORTED; 2646 if (ha->pdev->subsystem_vendor == 0x1028 && 2647 ha->pdev->subsystem_device == 0x0170) 2648 ha->device_type |= DT_OEM_001; 2649 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2650 break; 2651 case PCI_DEVICE_ID_QLOGIC_ISP6312: 2652 ha->isp_type |= DT_ISP6312; 2653 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2654 break; 2655 case PCI_DEVICE_ID_QLOGIC_ISP6322: 2656 ha->isp_type |= DT_ISP6322; 2657 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2658 break; 2659 case PCI_DEVICE_ID_QLOGIC_ISP2422: 2660 ha->isp_type |= DT_ISP2422; 2661 ha->device_type |= DT_ZIO_SUPPORTED; 2662 ha->device_type |= DT_FWI2; 2663 ha->device_type |= DT_IIDMA; 2664 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2665 break; 2666 case PCI_DEVICE_ID_QLOGIC_ISP2432: 2667 ha->isp_type |= DT_ISP2432; 2668 ha->device_type |= DT_ZIO_SUPPORTED; 2669 ha->device_type |= DT_FWI2; 2670 ha->device_type |= DT_IIDMA; 2671 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2672 break; 2673 case PCI_DEVICE_ID_QLOGIC_ISP8432: 2674 ha->isp_type |= DT_ISP8432; 2675 ha->device_type |= DT_ZIO_SUPPORTED; 2676 ha->device_type |= DT_FWI2; 2677 ha->device_type |= DT_IIDMA; 2678 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2679 break; 2680 case PCI_DEVICE_ID_QLOGIC_ISP5422: 2681 ha->isp_type |= DT_ISP5422; 2682 ha->device_type |= DT_FWI2; 2683 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2684 break; 2685 case PCI_DEVICE_ID_QLOGIC_ISP5432: 2686 ha->isp_type |= DT_ISP5432; 2687 ha->device_type |= DT_FWI2; 2688 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2689 break; 2690 case PCI_DEVICE_ID_QLOGIC_ISP2532: 2691 ha->isp_type |= DT_ISP2532; 2692 ha->device_type |= DT_ZIO_SUPPORTED; 2693 ha->device_type |= DT_FWI2; 2694 ha->device_type |= DT_IIDMA; 2695 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2696 break; 2697 case PCI_DEVICE_ID_QLOGIC_ISP8001: 2698 ha->isp_type |= DT_ISP8001; 2699 ha->device_type |= DT_ZIO_SUPPORTED; 2700 ha->device_type |= DT_FWI2; 2701 ha->device_type |= DT_IIDMA; 2702 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2703 break; 2704 case PCI_DEVICE_ID_QLOGIC_ISP8021: 2705 ha->isp_type |= DT_ISP8021; 2706 ha->device_type |= DT_ZIO_SUPPORTED; 2707 ha->device_type |= DT_FWI2; 2708 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2709 /* Initialize 82XX ISP flags */ 2710 qla82xx_init_flags(ha); 2711 break; 2712 case PCI_DEVICE_ID_QLOGIC_ISP8044: 2713 ha->isp_type |= DT_ISP8044; 2714 ha->device_type |= DT_ZIO_SUPPORTED; 2715 ha->device_type |= DT_FWI2; 2716 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2717 /* Initialize 82XX ISP flags */ 2718 qla82xx_init_flags(ha); 2719 break; 2720 case PCI_DEVICE_ID_QLOGIC_ISP2031: 2721 ha->isp_type |= DT_ISP2031; 2722 ha->device_type |= DT_ZIO_SUPPORTED; 2723 ha->device_type |= DT_FWI2; 2724 ha->device_type |= DT_IIDMA; 2725 ha->device_type |= DT_T10_PI; 2726 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2727 break; 2728 case PCI_DEVICE_ID_QLOGIC_ISP8031: 2729 ha->isp_type |= DT_ISP8031; 2730 ha->device_type |= DT_ZIO_SUPPORTED; 2731 ha->device_type |= DT_FWI2; 2732 ha->device_type |= DT_IIDMA; 2733 ha->device_type |= DT_T10_PI; 2734 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2735 break; 2736 case PCI_DEVICE_ID_QLOGIC_ISPF001: 2737 ha->isp_type |= DT_ISPFX00; 2738 break; 2739 case PCI_DEVICE_ID_QLOGIC_ISP2071: 2740 ha->isp_type |= DT_ISP2071; 2741 ha->device_type |= DT_ZIO_SUPPORTED; 2742 ha->device_type |= DT_FWI2; 2743 ha->device_type |= DT_IIDMA; 2744 ha->device_type |= DT_T10_PI; 2745 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2746 break; 2747 case PCI_DEVICE_ID_QLOGIC_ISP2271: 2748 ha->isp_type |= DT_ISP2271; 2749 ha->device_type |= DT_ZIO_SUPPORTED; 2750 ha->device_type |= DT_FWI2; 2751 ha->device_type |= DT_IIDMA; 2752 ha->device_type |= DT_T10_PI; 2753 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2754 break; 2755 case PCI_DEVICE_ID_QLOGIC_ISP2261: 2756 ha->isp_type |= DT_ISP2261; 2757 ha->device_type |= DT_ZIO_SUPPORTED; 2758 ha->device_type |= DT_FWI2; 2759 ha->device_type |= DT_IIDMA; 2760 ha->device_type |= DT_T10_PI; 2761 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2762 break; 2763 case PCI_DEVICE_ID_QLOGIC_ISP2081: 2764 case PCI_DEVICE_ID_QLOGIC_ISP2089: 2765 ha->isp_type |= DT_ISP2081; 2766 ha->device_type |= DT_ZIO_SUPPORTED; 2767 ha->device_type |= DT_FWI2; 2768 ha->device_type |= DT_IIDMA; 2769 ha->device_type |= DT_T10_PI; 2770 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2771 break; 2772 case PCI_DEVICE_ID_QLOGIC_ISP2281: 2773 case PCI_DEVICE_ID_QLOGIC_ISP2289: 2774 ha->isp_type |= DT_ISP2281; 2775 ha->device_type |= DT_ZIO_SUPPORTED; 2776 ha->device_type |= DT_FWI2; 2777 ha->device_type |= DT_IIDMA; 2778 ha->device_type |= DT_T10_PI; 2779 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2780 break; 2781 } 2782 2783 if (IS_QLA82XX(ha)) 2784 ha->port_no = ha->portnum & 1; 2785 else { 2786 /* Get adapter physical port no from interrupt pin register. */ 2787 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 2788 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || 2789 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2790 ha->port_no--; 2791 else 2792 ha->port_no = !(ha->port_no & 1); 2793 } 2794 2795 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 2796 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", 2797 ha->device_type, ha->port_no, ha->fw_srisc_address); 2798 } 2799 2800 static void 2801 qla2xxx_scan_start(struct Scsi_Host *shost) 2802 { 2803 scsi_qla_host_t *vha = shost_priv(shost); 2804 2805 if (vha->hw->flags.running_gold_fw) 2806 return; 2807 2808 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2809 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2810 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2811 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags); 2812 } 2813 2814 static int 2815 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 2816 { 2817 scsi_qla_host_t *vha = shost_priv(shost); 2818 2819 if (test_bit(UNLOADING, &vha->dpc_flags)) 2820 return 1; 2821 if (!vha->host) 2822 return 1; 2823 if (time > vha->hw->loop_reset_delay * HZ) 2824 return 1; 2825 2826 return atomic_read(&vha->loop_state) == LOOP_READY; 2827 } 2828 2829 static void qla_heartbeat_work_fn(struct work_struct *work) 2830 { 2831 struct qla_hw_data *ha = container_of(work, 2832 struct qla_hw_data, heartbeat_work); 2833 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2834 2835 if (!ha->flags.mbox_busy && base_vha->flags.init_done) 2836 qla_no_op_mb(base_vha); 2837 } 2838 2839 static void qla2x00_iocb_work_fn(struct work_struct *work) 2840 { 2841 struct scsi_qla_host *vha = container_of(work, 2842 struct scsi_qla_host, iocb_work); 2843 struct qla_hw_data *ha = vha->hw; 2844 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2845 int i = 2; 2846 unsigned long flags; 2847 2848 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 2849 return; 2850 2851 while (!list_empty(&vha->work_list) && i > 0) { 2852 qla2x00_do_work(vha); 2853 i--; 2854 } 2855 2856 spin_lock_irqsave(&vha->work_lock, flags); 2857 clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags); 2858 spin_unlock_irqrestore(&vha->work_lock, flags); 2859 } 2860 2861 static void 2862 qla_trace_init(void) 2863 { 2864 qla_trc_array = trace_array_get_by_name("qla2xxx"); 2865 if (!qla_trc_array) { 2866 ql_log(ql_log_fatal, NULL, 0x0001, 2867 "Unable to create qla2xxx trace instance, instance logging will be disabled.\n"); 2868 return; 2869 } 2870 2871 QLA_TRACE_ENABLE(qla_trc_array); 2872 } 2873 2874 static void 2875 qla_trace_uninit(void) 2876 { 2877 if (!qla_trc_array) 2878 return; 2879 trace_array_put(qla_trc_array); 2880 } 2881 2882 /* 2883 * PCI driver interface 2884 */ 2885 static int 2886 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 2887 { 2888 int ret = -ENODEV; 2889 struct Scsi_Host *host; 2890 scsi_qla_host_t *base_vha = NULL; 2891 struct qla_hw_data *ha; 2892 char pci_info[30]; 2893 char fw_str[30], wq_name[30]; 2894 struct scsi_host_template *sht; 2895 int bars, mem_only = 0; 2896 uint16_t req_length = 0, rsp_length = 0; 2897 struct req_que *req = NULL; 2898 struct rsp_que *rsp = NULL; 2899 int i; 2900 2901 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 2902 sht = &qla2xxx_driver_template; 2903 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 2904 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 2905 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 2906 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 2907 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 2908 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || 2909 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || 2910 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || 2911 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2912 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2913 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2914 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || 2915 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || 2916 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || 2917 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 || 2918 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 || 2919 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 || 2920 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 || 2921 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) { 2922 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2923 mem_only = 1; 2924 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2925 "Mem only adapter.\n"); 2926 } 2927 ql_dbg_pci(ql_dbg_init, pdev, 0x0008, 2928 "Bars=%d.\n", bars); 2929 2930 if (mem_only) { 2931 if (pci_enable_device_mem(pdev)) 2932 return ret; 2933 } else { 2934 if (pci_enable_device(pdev)) 2935 return ret; 2936 } 2937 2938 if (is_kdump_kernel()) { 2939 ql2xmqsupport = 0; 2940 ql2xallocfwdump = 0; 2941 } 2942 2943 /* This may fail but that's ok */ 2944 pci_enable_pcie_error_reporting(pdev); 2945 2946 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2947 if (!ha) { 2948 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2949 "Unable to allocate memory for ha.\n"); 2950 goto disable_device; 2951 } 2952 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2953 "Memory allocated for ha=%p.\n", ha); 2954 ha->pdev = pdev; 2955 INIT_LIST_HEAD(&ha->tgt.q_full_list); 2956 spin_lock_init(&ha->tgt.q_full_lock); 2957 spin_lock_init(&ha->tgt.sess_lock); 2958 spin_lock_init(&ha->tgt.atio_lock); 2959 2960 spin_lock_init(&ha->sadb_lock); 2961 INIT_LIST_HEAD(&ha->sadb_tx_index_list); 2962 INIT_LIST_HEAD(&ha->sadb_rx_index_list); 2963 2964 spin_lock_init(&ha->sadb_fp_lock); 2965 2966 if (qla_edif_sadb_build_free_pool(ha)) { 2967 kfree(ha); 2968 goto disable_device; 2969 } 2970 2971 atomic_set(&ha->nvme_active_aen_cnt, 0); 2972 2973 /* Clear our data area */ 2974 ha->bars = bars; 2975 ha->mem_only = mem_only; 2976 spin_lock_init(&ha->hardware_lock); 2977 spin_lock_init(&ha->vport_slock); 2978 mutex_init(&ha->selflogin_lock); 2979 mutex_init(&ha->optrom_mutex); 2980 2981 /* Set ISP-type information. */ 2982 qla2x00_set_isp_flags(ha); 2983 2984 /* Set EEH reset type to fundamental if required by hba */ 2985 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || 2986 IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2987 pdev->needs_freset = 1; 2988 2989 ha->prev_topology = 0; 2990 ha->init_cb_size = sizeof(init_cb_t); 2991 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2992 ha->optrom_size = OPTROM_SIZE_2300; 2993 ha->max_exchg = FW_MAX_EXCHANGES_CNT; 2994 atomic_set(&ha->num_pend_mbx_stage1, 0); 2995 atomic_set(&ha->num_pend_mbx_stage2, 0); 2996 atomic_set(&ha->num_pend_mbx_stage3, 0); 2997 atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD); 2998 ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD; 2999 3000 /* Assign ISP specific operations. */ 3001 if (IS_QLA2100(ha)) { 3002 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 3003 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 3004 req_length = REQUEST_ENTRY_CNT_2100; 3005 rsp_length = RESPONSE_ENTRY_CNT_2100; 3006 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 3007 ha->gid_list_info_size = 4; 3008 ha->flash_conf_off = ~0; 3009 ha->flash_data_off = ~0; 3010 ha->nvram_conf_off = ~0; 3011 ha->nvram_data_off = ~0; 3012 ha->isp_ops = &qla2100_isp_ops; 3013 } else if (IS_QLA2200(ha)) { 3014 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 3015 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; 3016 req_length = REQUEST_ENTRY_CNT_2200; 3017 rsp_length = RESPONSE_ENTRY_CNT_2100; 3018 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 3019 ha->gid_list_info_size = 4; 3020 ha->flash_conf_off = ~0; 3021 ha->flash_data_off = ~0; 3022 ha->nvram_conf_off = ~0; 3023 ha->nvram_data_off = ~0; 3024 ha->isp_ops = &qla2100_isp_ops; 3025 } else if (IS_QLA23XX(ha)) { 3026 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 3027 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3028 req_length = REQUEST_ENTRY_CNT_2200; 3029 rsp_length = RESPONSE_ENTRY_CNT_2300; 3030 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3031 ha->gid_list_info_size = 6; 3032 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 3033 ha->optrom_size = OPTROM_SIZE_2322; 3034 ha->flash_conf_off = ~0; 3035 ha->flash_data_off = ~0; 3036 ha->nvram_conf_off = ~0; 3037 ha->nvram_data_off = ~0; 3038 ha->isp_ops = &qla2300_isp_ops; 3039 } else if (IS_QLA24XX_TYPE(ha)) { 3040 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3041 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3042 req_length = REQUEST_ENTRY_CNT_24XX; 3043 rsp_length = RESPONSE_ENTRY_CNT_2300; 3044 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3045 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3046 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 3047 ha->gid_list_info_size = 8; 3048 ha->optrom_size = OPTROM_SIZE_24XX; 3049 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; 3050 ha->isp_ops = &qla24xx_isp_ops; 3051 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 3052 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 3053 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 3054 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 3055 } else if (IS_QLA25XX(ha)) { 3056 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3057 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3058 req_length = REQUEST_ENTRY_CNT_24XX; 3059 rsp_length = RESPONSE_ENTRY_CNT_2300; 3060 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3061 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3062 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 3063 ha->gid_list_info_size = 8; 3064 ha->optrom_size = OPTROM_SIZE_25XX; 3065 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3066 ha->isp_ops = &qla25xx_isp_ops; 3067 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 3068 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 3069 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 3070 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 3071 } else if (IS_QLA81XX(ha)) { 3072 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3073 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3074 req_length = REQUEST_ENTRY_CNT_24XX; 3075 rsp_length = RESPONSE_ENTRY_CNT_2300; 3076 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3077 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3078 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3079 ha->gid_list_info_size = 8; 3080 ha->optrom_size = OPTROM_SIZE_81XX; 3081 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3082 ha->isp_ops = &qla81xx_isp_ops; 3083 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 3084 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 3085 ha->nvram_conf_off = ~0; 3086 ha->nvram_data_off = ~0; 3087 } else if (IS_QLA82XX(ha)) { 3088 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3089 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3090 req_length = REQUEST_ENTRY_CNT_82XX; 3091 rsp_length = RESPONSE_ENTRY_CNT_82XX; 3092 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3093 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3094 ha->gid_list_info_size = 8; 3095 ha->optrom_size = OPTROM_SIZE_82XX; 3096 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3097 ha->isp_ops = &qla82xx_isp_ops; 3098 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 3099 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 3100 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 3101 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 3102 } else if (IS_QLA8044(ha)) { 3103 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3104 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3105 req_length = REQUEST_ENTRY_CNT_82XX; 3106 rsp_length = RESPONSE_ENTRY_CNT_82XX; 3107 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3108 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3109 ha->gid_list_info_size = 8; 3110 ha->optrom_size = OPTROM_SIZE_83XX; 3111 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3112 ha->isp_ops = &qla8044_isp_ops; 3113 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 3114 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 3115 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 3116 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 3117 } else if (IS_QLA83XX(ha)) { 3118 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3119 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3120 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3121 req_length = REQUEST_ENTRY_CNT_83XX; 3122 rsp_length = RESPONSE_ENTRY_CNT_83XX; 3123 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3124 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3125 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3126 ha->gid_list_info_size = 8; 3127 ha->optrom_size = OPTROM_SIZE_83XX; 3128 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3129 ha->isp_ops = &qla83xx_isp_ops; 3130 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 3131 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 3132 ha->nvram_conf_off = ~0; 3133 ha->nvram_data_off = ~0; 3134 } else if (IS_QLAFX00(ha)) { 3135 ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; 3136 ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; 3137 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; 3138 req_length = REQUEST_ENTRY_CNT_FX00; 3139 rsp_length = RESPONSE_ENTRY_CNT_FX00; 3140 ha->isp_ops = &qlafx00_isp_ops; 3141 ha->port_down_retry_count = 30; /* default value */ 3142 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 3143 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 3144 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; 3145 ha->mr.fw_hbt_en = 1; 3146 ha->mr.host_info_resend = false; 3147 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; 3148 } else if (IS_QLA27XX(ha)) { 3149 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3150 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3151 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3152 req_length = REQUEST_ENTRY_CNT_83XX; 3153 rsp_length = RESPONSE_ENTRY_CNT_83XX; 3154 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3155 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3156 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3157 ha->gid_list_info_size = 8; 3158 ha->optrom_size = OPTROM_SIZE_83XX; 3159 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3160 ha->isp_ops = &qla27xx_isp_ops; 3161 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 3162 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 3163 ha->nvram_conf_off = ~0; 3164 ha->nvram_data_off = ~0; 3165 } else if (IS_QLA28XX(ha)) { 3166 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3167 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3168 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3169 req_length = REQUEST_ENTRY_CNT_83XX; 3170 rsp_length = RESPONSE_ENTRY_CNT_83XX; 3171 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3172 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3173 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3174 ha->gid_list_info_size = 8; 3175 ha->optrom_size = OPTROM_SIZE_28XX; 3176 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3177 ha->isp_ops = &qla27xx_isp_ops; 3178 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX; 3179 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX; 3180 ha->nvram_conf_off = ~0; 3181 ha->nvram_data_off = ~0; 3182 } 3183 3184 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 3185 "mbx_count=%d, req_length=%d, " 3186 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " 3187 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " 3188 "max_fibre_devices=%d.\n", 3189 ha->mbx_count, req_length, rsp_length, ha->max_loop_id, 3190 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, 3191 ha->nvram_npiv_size, ha->max_fibre_devices); 3192 ql_dbg_pci(ql_dbg_init, pdev, 0x001f, 3193 "isp_ops=%p, flash_conf_off=%d, " 3194 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 3195 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, 3196 ha->nvram_conf_off, ha->nvram_data_off); 3197 3198 /* Configure PCI I/O space */ 3199 ret = ha->isp_ops->iospace_config(ha); 3200 if (ret) 3201 goto iospace_config_failed; 3202 3203 ql_log_pci(ql_log_info, pdev, 0x001d, 3204 "Found an ISP%04X irq %d iobase 0x%p.\n", 3205 pdev->device, pdev->irq, ha->iobase); 3206 mutex_init(&ha->vport_lock); 3207 mutex_init(&ha->mq_lock); 3208 init_completion(&ha->mbx_cmd_comp); 3209 complete(&ha->mbx_cmd_comp); 3210 init_completion(&ha->mbx_intr_comp); 3211 init_completion(&ha->dcbx_comp); 3212 init_completion(&ha->lb_portup_comp); 3213 3214 set_bit(0, (unsigned long *) ha->vp_idx_map); 3215 3216 qla2x00_config_dma_addressing(ha); 3217 ql_dbg_pci(ql_dbg_init, pdev, 0x0020, 3218 "64 Bit addressing is %s.\n", 3219 ha->flags.enable_64bit_addressing ? "enable" : 3220 "disable"); 3221 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 3222 if (ret) { 3223 ql_log_pci(ql_log_fatal, pdev, 0x0031, 3224 "Failed to allocate memory for adapter, aborting.\n"); 3225 3226 goto probe_hw_failed; 3227 } 3228 3229 req->max_q_depth = MAX_Q_DEPTH; 3230 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 3231 req->max_q_depth = ql2xmaxqdepth; 3232 3233 3234 base_vha = qla2x00_create_host(sht, ha); 3235 if (!base_vha) { 3236 ret = -ENOMEM; 3237 goto probe_hw_failed; 3238 } 3239 3240 pci_set_drvdata(pdev, base_vha); 3241 set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3242 3243 host = base_vha->host; 3244 base_vha->req = req; 3245 if (IS_QLA2XXX_MIDTYPE(ha)) 3246 base_vha->mgmt_svr_loop_id = 3247 qla2x00_reserve_mgmt_server_loop_id(base_vha); 3248 else 3249 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 3250 base_vha->vp_idx; 3251 3252 /* Setup fcport template structure. */ 3253 ha->mr.fcport.vha = base_vha; 3254 ha->mr.fcport.port_type = FCT_UNKNOWN; 3255 ha->mr.fcport.loop_id = FC_NO_LOOP_ID; 3256 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); 3257 ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; 3258 ha->mr.fcport.scan_state = 1; 3259 3260 qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN | 3261 QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT | 3262 QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN); 3263 3264 /* Set the SG table size based on ISP type */ 3265 if (!IS_FWI2_CAPABLE(ha)) { 3266 if (IS_QLA2100(ha)) 3267 host->sg_tablesize = 32; 3268 } else { 3269 if (!IS_QLA82XX(ha)) 3270 host->sg_tablesize = QLA_SG_ALL; 3271 } 3272 host->max_id = ha->max_fibre_devices; 3273 host->cmd_per_lun = 3; 3274 host->unique_id = host->host_no; 3275 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 3276 host->max_cmd_len = 32; 3277 else 3278 host->max_cmd_len = MAX_CMDSZ; 3279 host->max_channel = MAX_BUSES - 1; 3280 /* Older HBAs support only 16-bit LUNs */ 3281 if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && 3282 ql2xmaxlun > 0xffff) 3283 host->max_lun = 0xffff; 3284 else 3285 host->max_lun = ql2xmaxlun; 3286 host->transportt = qla2xxx_transport_template; 3287 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 3288 3289 ql_dbg(ql_dbg_init, base_vha, 0x0033, 3290 "max_id=%d this_id=%d " 3291 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " 3292 "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, 3293 host->this_id, host->cmd_per_lun, host->unique_id, 3294 host->max_cmd_len, host->max_channel, host->max_lun, 3295 host->transportt, sht->vendor_id); 3296 3297 INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn); 3298 3299 /* Set up the irqs */ 3300 ret = qla2x00_request_irqs(ha, rsp); 3301 if (ret) 3302 goto probe_failed; 3303 3304 /* Alloc arrays of request and response ring ptrs */ 3305 ret = qla2x00_alloc_queues(ha, req, rsp); 3306 if (ret) { 3307 ql_log(ql_log_fatal, base_vha, 0x003d, 3308 "Failed to allocate memory for queue pointers..." 3309 "aborting.\n"); 3310 ret = -ENODEV; 3311 goto probe_failed; 3312 } 3313 3314 if (ha->mqenable) { 3315 /* number of hardware queues supported by blk/scsi-mq*/ 3316 host->nr_hw_queues = ha->max_qpairs; 3317 3318 ql_dbg(ql_dbg_init, base_vha, 0x0192, 3319 "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); 3320 } else { 3321 if (ql2xnvmeenable) { 3322 host->nr_hw_queues = ha->max_qpairs; 3323 ql_dbg(ql_dbg_init, base_vha, 0x0194, 3324 "FC-NVMe support is enabled, HW queues=%d\n", 3325 host->nr_hw_queues); 3326 } else { 3327 ql_dbg(ql_dbg_init, base_vha, 0x0193, 3328 "blk/scsi-mq disabled.\n"); 3329 } 3330 } 3331 3332 qlt_probe_one_stage1(base_vha, ha); 3333 3334 pci_save_state(pdev); 3335 3336 /* Assign back pointers */ 3337 rsp->req = req; 3338 req->rsp = rsp; 3339 3340 if (IS_QLAFX00(ha)) { 3341 ha->rsp_q_map[0] = rsp; 3342 ha->req_q_map[0] = req; 3343 set_bit(0, ha->req_qid_map); 3344 set_bit(0, ha->rsp_qid_map); 3345 } 3346 3347 /* FWI2-capable only. */ 3348 req->req_q_in = &ha->iobase->isp24.req_q_in; 3349 req->req_q_out = &ha->iobase->isp24.req_q_out; 3350 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 3351 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 3352 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3353 IS_QLA28XX(ha)) { 3354 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 3355 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 3356 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 3357 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; 3358 } 3359 3360 if (IS_QLAFX00(ha)) { 3361 req->req_q_in = &ha->iobase->ispfx00.req_q_in; 3362 req->req_q_out = &ha->iobase->ispfx00.req_q_out; 3363 rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; 3364 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; 3365 } 3366 3367 if (IS_P3P_TYPE(ha)) { 3368 req->req_q_out = &ha->iobase->isp82.req_q_out[0]; 3369 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; 3370 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 3371 } 3372 3373 ql_dbg(ql_dbg_multiq, base_vha, 0xc009, 3374 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3375 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3376 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, 3377 "req->req_q_in=%p req->req_q_out=%p " 3378 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3379 req->req_q_in, req->req_q_out, 3380 rsp->rsp_q_in, rsp->rsp_q_out); 3381 ql_dbg(ql_dbg_init, base_vha, 0x003e, 3382 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3383 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3384 ql_dbg(ql_dbg_init, base_vha, 0x003f, 3385 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3386 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); 3387 3388 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); 3389 if (unlikely(!ha->wq)) { 3390 ret = -ENOMEM; 3391 goto probe_failed; 3392 } 3393 3394 if (ha->isp_ops->initialize_adapter(base_vha)) { 3395 ql_log(ql_log_fatal, base_vha, 0x00d6, 3396 "Failed to initialize adapter - Adapter flags %x.\n", 3397 base_vha->device_flags); 3398 3399 if (IS_QLA82XX(ha)) { 3400 qla82xx_idc_lock(ha); 3401 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3402 QLA8XXX_DEV_FAILED); 3403 qla82xx_idc_unlock(ha); 3404 ql_log(ql_log_fatal, base_vha, 0x00d7, 3405 "HW State: FAILED.\n"); 3406 } else if (IS_QLA8044(ha)) { 3407 qla8044_idc_lock(ha); 3408 qla8044_wr_direct(base_vha, 3409 QLA8044_CRB_DEV_STATE_INDEX, 3410 QLA8XXX_DEV_FAILED); 3411 qla8044_idc_unlock(ha); 3412 ql_log(ql_log_fatal, base_vha, 0x0150, 3413 "HW State: FAILED.\n"); 3414 } 3415 3416 ret = -ENODEV; 3417 goto probe_failed; 3418 } 3419 3420 if (IS_QLAFX00(ha)) 3421 host->can_queue = QLAFX00_MAX_CANQUEUE; 3422 else 3423 host->can_queue = req->num_outstanding_cmds - 10; 3424 3425 ql_dbg(ql_dbg_init, base_vha, 0x0032, 3426 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", 3427 host->can_queue, base_vha->req, 3428 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3429 3430 /* Check if FW supports MQ or not for ISP25xx */ 3431 if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6)) 3432 ha->mqenable = 0; 3433 3434 if (ha->mqenable) { 3435 bool startit = false; 3436 3437 if (QLA_TGT_MODE_ENABLED()) 3438 startit = false; 3439 3440 if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) 3441 startit = true; 3442 3443 /* Create start of day qpairs for Block MQ */ 3444 for (i = 0; i < ha->max_qpairs; i++) 3445 qla2xxx_create_qpair(base_vha, 5, 0, startit); 3446 } 3447 qla_init_iocb_limit(base_vha); 3448 3449 if (ha->flags.running_gold_fw) 3450 goto skip_dpc; 3451 3452 /* 3453 * Startup the kernel thread for this host adapter 3454 */ 3455 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 3456 "%s_dpc", base_vha->host_str); 3457 if (IS_ERR(ha->dpc_thread)) { 3458 ql_log(ql_log_fatal, base_vha, 0x00ed, 3459 "Failed to start DPC thread.\n"); 3460 ret = PTR_ERR(ha->dpc_thread); 3461 ha->dpc_thread = NULL; 3462 goto probe_failed; 3463 } 3464 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 3465 "DPC thread started successfully.\n"); 3466 3467 /* 3468 * If we're not coming up in initiator mode, we might sit for 3469 * a while without waking up the dpc thread, which leads to a 3470 * stuck process warning. So just kick the dpc once here and 3471 * let the kthread start (and go back to sleep in qla2x00_do_dpc). 3472 */ 3473 qla2xxx_wake_dpc(base_vha); 3474 3475 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3476 3477 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3478 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); 3479 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); 3480 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); 3481 3482 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); 3483 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); 3484 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); 3485 INIT_WORK(&ha->idc_state_handler, 3486 qla83xx_idc_state_handler_work); 3487 INIT_WORK(&ha->nic_core_unrecoverable, 3488 qla83xx_nic_core_unrecoverable_work); 3489 } 3490 3491 skip_dpc: 3492 list_add_tail(&base_vha->list, &ha->vp_list); 3493 base_vha->host->irq = ha->pdev->irq; 3494 3495 /* Initialized the timer */ 3496 qla2x00_start_timer(base_vha, WATCH_INTERVAL); 3497 ql_dbg(ql_dbg_init, base_vha, 0x00ef, 3498 "Started qla2x00_timer with " 3499 "interval=%d.\n", WATCH_INTERVAL); 3500 ql_dbg(ql_dbg_init, base_vha, 0x00f0, 3501 "Detected hba at address=%p.\n", 3502 ha); 3503 3504 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 3505 if (ha->fw_attributes & BIT_4) { 3506 int prot = 0, guard; 3507 3508 base_vha->flags.difdix_supported = 1; 3509 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 3510 "Registering for DIF/DIX type 1 and 3 protection.\n"); 3511 if (ql2xenabledif == 1) 3512 prot = SHOST_DIX_TYPE0_PROTECTION; 3513 if (ql2xprotmask) 3514 scsi_host_set_prot(host, ql2xprotmask); 3515 else 3516 scsi_host_set_prot(host, 3517 prot | SHOST_DIF_TYPE1_PROTECTION 3518 | SHOST_DIF_TYPE2_PROTECTION 3519 | SHOST_DIF_TYPE3_PROTECTION 3520 | SHOST_DIX_TYPE1_PROTECTION 3521 | SHOST_DIX_TYPE2_PROTECTION 3522 | SHOST_DIX_TYPE3_PROTECTION); 3523 3524 guard = SHOST_DIX_GUARD_CRC; 3525 3526 if (IS_PI_IPGUARD_CAPABLE(ha) && 3527 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 3528 guard |= SHOST_DIX_GUARD_IP; 3529 3530 if (ql2xprotguard) 3531 scsi_host_set_guard(host, ql2xprotguard); 3532 else 3533 scsi_host_set_guard(host, guard); 3534 } else 3535 base_vha->flags.difdix_supported = 0; 3536 } 3537 3538 ha->isp_ops->enable_intrs(ha); 3539 3540 if (IS_QLAFX00(ha)) { 3541 ret = qlafx00_fx_disc(base_vha, 3542 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); 3543 host->sg_tablesize = (ha->mr.extended_io_enabled) ? 3544 QLA_SG_ALL : 128; 3545 } 3546 3547 ret = scsi_add_host(host, &pdev->dev); 3548 if (ret) 3549 goto probe_failed; 3550 3551 base_vha->flags.init_done = 1; 3552 base_vha->flags.online = 1; 3553 ha->prev_minidump_failed = 0; 3554 3555 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 3556 "Init done and hba is online.\n"); 3557 3558 if (qla_ini_mode_enabled(base_vha) || 3559 qla_dual_mode_enabled(base_vha)) 3560 scsi_scan_host(host); 3561 else 3562 ql_log(ql_log_info, base_vha, 0x0122, 3563 "skipping scsi_scan_host() for non-initiator port\n"); 3564 3565 qla2x00_alloc_sysfs_attr(base_vha); 3566 3567 if (IS_QLAFX00(ha)) { 3568 ret = qlafx00_fx_disc(base_vha, 3569 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); 3570 3571 /* Register system information */ 3572 ret = qlafx00_fx_disc(base_vha, 3573 &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); 3574 } 3575 3576 qla2x00_init_host_attr(base_vha); 3577 3578 qla2x00_dfs_setup(base_vha); 3579 3580 ql_log(ql_log_info, base_vha, 0x00fb, 3581 "QLogic %s - %s.\n", ha->model_number, ha->model_desc); 3582 ql_log(ql_log_info, base_vha, 0x00fc, 3583 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", 3584 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info, 3585 sizeof(pci_info)), 3586 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', 3587 base_vha->host_no, 3588 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); 3589 3590 qlt_add_target(ha, base_vha); 3591 3592 clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3593 3594 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3595 return -ENODEV; 3596 3597 return 0; 3598 3599 probe_failed: 3600 qla_enode_stop(base_vha); 3601 qla_edb_stop(base_vha); 3602 if (base_vha->gnl.l) { 3603 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3604 base_vha->gnl.l, base_vha->gnl.ldma); 3605 base_vha->gnl.l = NULL; 3606 } 3607 3608 if (base_vha->timer_active) 3609 qla2x00_stop_timer(base_vha); 3610 base_vha->flags.online = 0; 3611 if (ha->dpc_thread) { 3612 struct task_struct *t = ha->dpc_thread; 3613 3614 ha->dpc_thread = NULL; 3615 kthread_stop(t); 3616 } 3617 3618 qla2x00_free_device(base_vha); 3619 scsi_host_put(base_vha->host); 3620 /* 3621 * Need to NULL out local req/rsp after 3622 * qla2x00_free_device => qla2x00_free_queues frees 3623 * what these are pointing to. Or else we'll 3624 * fall over below in qla2x00_free_req/rsp_que. 3625 */ 3626 req = NULL; 3627 rsp = NULL; 3628 3629 probe_hw_failed: 3630 qla2x00_mem_free(ha); 3631 qla2x00_free_req_que(ha, req); 3632 qla2x00_free_rsp_que(ha, rsp); 3633 qla2x00_clear_drv_active(ha); 3634 3635 iospace_config_failed: 3636 if (IS_P3P_TYPE(ha)) { 3637 if (!ha->nx_pcibase) 3638 iounmap((device_reg_t *)ha->nx_pcibase); 3639 if (!ql2xdbwr) 3640 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3641 } else { 3642 if (ha->iobase) 3643 iounmap(ha->iobase); 3644 if (ha->cregbase) 3645 iounmap(ha->cregbase); 3646 } 3647 pci_release_selected_regions(ha->pdev, ha->bars); 3648 kfree(ha); 3649 3650 disable_device: 3651 pci_disable_device(pdev); 3652 return ret; 3653 } 3654 3655 static void __qla_set_remove_flag(scsi_qla_host_t *base_vha) 3656 { 3657 scsi_qla_host_t *vp; 3658 unsigned long flags; 3659 struct qla_hw_data *ha; 3660 3661 if (!base_vha) 3662 return; 3663 3664 ha = base_vha->hw; 3665 3666 spin_lock_irqsave(&ha->vport_slock, flags); 3667 list_for_each_entry(vp, &ha->vp_list, list) 3668 set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags); 3669 3670 /* 3671 * Indicate device removal to prevent future board_disable 3672 * and wait until any pending board_disable has completed. 3673 */ 3674 set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); 3675 spin_unlock_irqrestore(&ha->vport_slock, flags); 3676 } 3677 3678 static void 3679 qla2x00_shutdown(struct pci_dev *pdev) 3680 { 3681 scsi_qla_host_t *vha; 3682 struct qla_hw_data *ha; 3683 3684 vha = pci_get_drvdata(pdev); 3685 ha = vha->hw; 3686 3687 ql_log(ql_log_info, vha, 0xfffa, 3688 "Adapter shutdown\n"); 3689 3690 /* 3691 * Prevent future board_disable and wait 3692 * until any pending board_disable has completed. 3693 */ 3694 __qla_set_remove_flag(vha); 3695 cancel_work_sync(&ha->board_disable); 3696 3697 if (!atomic_read(&pdev->enable_cnt)) 3698 return; 3699 3700 /* Notify ISPFX00 firmware */ 3701 if (IS_QLAFX00(ha)) 3702 qlafx00_driver_shutdown(vha, 20); 3703 3704 /* Turn-off FCE trace */ 3705 if (ha->flags.fce_enabled) { 3706 qla2x00_disable_fce_trace(vha, NULL, NULL); 3707 ha->flags.fce_enabled = 0; 3708 } 3709 3710 /* Turn-off EFT trace */ 3711 if (ha->eft) 3712 qla2x00_disable_eft_trace(vha); 3713 3714 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 3715 IS_QLA28XX(ha)) { 3716 if (ha->flags.fw_started) 3717 qla2x00_abort_isp_cleanup(vha); 3718 } else { 3719 /* Stop currently executing firmware. */ 3720 qla2x00_try_to_stop_firmware(vha); 3721 } 3722 3723 /* Disable timer */ 3724 if (vha->timer_active) 3725 qla2x00_stop_timer(vha); 3726 3727 /* Turn adapter off line */ 3728 vha->flags.online = 0; 3729 3730 /* turn-off interrupts on the card */ 3731 if (ha->interrupts_on) { 3732 vha->flags.init_done = 0; 3733 ha->isp_ops->disable_intrs(ha); 3734 } 3735 3736 qla2x00_free_irqs(vha); 3737 3738 qla2x00_free_fw_dump(ha); 3739 3740 pci_disable_device(pdev); 3741 ql_log(ql_log_info, vha, 0xfffe, 3742 "Adapter shutdown successfully.\n"); 3743 } 3744 3745 /* Deletes all the virtual ports for a given ha */ 3746 static void 3747 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) 3748 { 3749 scsi_qla_host_t *vha; 3750 unsigned long flags; 3751 3752 mutex_lock(&ha->vport_lock); 3753 while (ha->cur_vport_count) { 3754 spin_lock_irqsave(&ha->vport_slock, flags); 3755 3756 BUG_ON(base_vha->list.next == &ha->vp_list); 3757 /* This assumes first entry in ha->vp_list is always base vha */ 3758 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); 3759 scsi_host_get(vha->host); 3760 3761 spin_unlock_irqrestore(&ha->vport_slock, flags); 3762 mutex_unlock(&ha->vport_lock); 3763 3764 qla_nvme_delete(vha); 3765 3766 fc_vport_terminate(vha->fc_vport); 3767 scsi_host_put(vha->host); 3768 3769 mutex_lock(&ha->vport_lock); 3770 } 3771 mutex_unlock(&ha->vport_lock); 3772 } 3773 3774 /* Stops all deferred work threads */ 3775 static void 3776 qla2x00_destroy_deferred_work(struct qla_hw_data *ha) 3777 { 3778 /* Cancel all work and destroy DPC workqueues */ 3779 if (ha->dpc_lp_wq) { 3780 cancel_work_sync(&ha->idc_aen); 3781 destroy_workqueue(ha->dpc_lp_wq); 3782 ha->dpc_lp_wq = NULL; 3783 } 3784 3785 if (ha->dpc_hp_wq) { 3786 cancel_work_sync(&ha->nic_core_reset); 3787 cancel_work_sync(&ha->idc_state_handler); 3788 cancel_work_sync(&ha->nic_core_unrecoverable); 3789 destroy_workqueue(ha->dpc_hp_wq); 3790 ha->dpc_hp_wq = NULL; 3791 } 3792 3793 /* Kill the kernel thread for this host */ 3794 if (ha->dpc_thread) { 3795 struct task_struct *t = ha->dpc_thread; 3796 3797 /* 3798 * qla2xxx_wake_dpc checks for ->dpc_thread 3799 * so we need to zero it out. 3800 */ 3801 ha->dpc_thread = NULL; 3802 kthread_stop(t); 3803 } 3804 } 3805 3806 static void 3807 qla2x00_unmap_iobases(struct qla_hw_data *ha) 3808 { 3809 if (IS_QLA82XX(ha)) { 3810 3811 iounmap((device_reg_t *)ha->nx_pcibase); 3812 if (!ql2xdbwr) 3813 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3814 } else { 3815 if (ha->iobase) 3816 iounmap(ha->iobase); 3817 3818 if (ha->cregbase) 3819 iounmap(ha->cregbase); 3820 3821 if (ha->mqiobase) 3822 iounmap(ha->mqiobase); 3823 3824 if (ha->msixbase) 3825 iounmap(ha->msixbase); 3826 } 3827 } 3828 3829 static void 3830 qla2x00_clear_drv_active(struct qla_hw_data *ha) 3831 { 3832 if (IS_QLA8044(ha)) { 3833 qla8044_idc_lock(ha); 3834 qla8044_clear_drv_active(ha); 3835 qla8044_idc_unlock(ha); 3836 } else if (IS_QLA82XX(ha)) { 3837 qla82xx_idc_lock(ha); 3838 qla82xx_clear_drv_active(ha); 3839 qla82xx_idc_unlock(ha); 3840 } 3841 } 3842 3843 static void 3844 qla2x00_remove_one(struct pci_dev *pdev) 3845 { 3846 scsi_qla_host_t *base_vha; 3847 struct qla_hw_data *ha; 3848 3849 base_vha = pci_get_drvdata(pdev); 3850 ha = base_vha->hw; 3851 ql_log(ql_log_info, base_vha, 0xb079, 3852 "Removing driver\n"); 3853 __qla_set_remove_flag(base_vha); 3854 cancel_work_sync(&ha->board_disable); 3855 3856 /* 3857 * If the PCI device is disabled then there was a PCI-disconnect and 3858 * qla2x00_disable_board_on_pci_error has taken care of most of the 3859 * resources. 3860 */ 3861 if (!atomic_read(&pdev->enable_cnt)) { 3862 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3863 base_vha->gnl.l, base_vha->gnl.ldma); 3864 base_vha->gnl.l = NULL; 3865 scsi_host_put(base_vha->host); 3866 kfree(ha); 3867 pci_set_drvdata(pdev, NULL); 3868 return; 3869 } 3870 qla2x00_wait_for_hba_ready(base_vha); 3871 3872 /* 3873 * if UNLOADING flag is already set, then continue unload, 3874 * where it was set first. 3875 */ 3876 if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) 3877 return; 3878 3879 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 3880 IS_QLA28XX(ha)) { 3881 if (ha->flags.fw_started) 3882 qla2x00_abort_isp_cleanup(base_vha); 3883 } else if (!IS_QLAFX00(ha)) { 3884 if (IS_QLA8031(ha)) { 3885 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, 3886 "Clearing fcoe driver presence.\n"); 3887 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) 3888 ql_dbg(ql_dbg_p3p, base_vha, 0xb079, 3889 "Error while clearing DRV-Presence.\n"); 3890 } 3891 3892 qla2x00_try_to_stop_firmware(base_vha); 3893 } 3894 3895 qla2x00_wait_for_sess_deletion(base_vha); 3896 3897 qla_nvme_delete(base_vha); 3898 3899 dma_free_coherent(&ha->pdev->dev, 3900 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3901 3902 base_vha->gnl.l = NULL; 3903 qla_enode_stop(base_vha); 3904 qla_edb_stop(base_vha); 3905 3906 vfree(base_vha->scan.l); 3907 3908 if (IS_QLAFX00(ha)) 3909 qlafx00_driver_shutdown(base_vha, 20); 3910 3911 qla2x00_delete_all_vps(ha, base_vha); 3912 3913 qla2x00_dfs_remove(base_vha); 3914 3915 qla84xx_put_chip(base_vha); 3916 3917 /* Disable timer */ 3918 if (base_vha->timer_active) 3919 qla2x00_stop_timer(base_vha); 3920 3921 base_vha->flags.online = 0; 3922 3923 /* free DMA memory */ 3924 if (ha->exlogin_buf) 3925 qla2x00_free_exlogin_buffer(ha); 3926 3927 /* free DMA memory */ 3928 if (ha->exchoffld_buf) 3929 qla2x00_free_exchoffld_buffer(ha); 3930 3931 qla2x00_destroy_deferred_work(ha); 3932 3933 qlt_remove_target(ha, base_vha); 3934 3935 qla2x00_free_sysfs_attr(base_vha, true); 3936 3937 fc_remove_host(base_vha->host); 3938 3939 scsi_remove_host(base_vha->host); 3940 3941 qla2x00_free_device(base_vha); 3942 3943 qla2x00_clear_drv_active(ha); 3944 3945 scsi_host_put(base_vha->host); 3946 3947 qla2x00_unmap_iobases(ha); 3948 3949 pci_release_selected_regions(ha->pdev, ha->bars); 3950 kfree(ha); 3951 3952 pci_disable_pcie_error_reporting(pdev); 3953 3954 pci_disable_device(pdev); 3955 } 3956 3957 static inline void 3958 qla24xx_free_purex_list(struct purex_list *list) 3959 { 3960 struct purex_item *item, *next; 3961 ulong flags; 3962 3963 spin_lock_irqsave(&list->lock, flags); 3964 list_for_each_entry_safe(item, next, &list->head, list) { 3965 list_del(&item->list); 3966 if (item == &item->vha->default_item) 3967 continue; 3968 kfree(item); 3969 } 3970 spin_unlock_irqrestore(&list->lock, flags); 3971 } 3972 3973 static void 3974 qla2x00_free_device(scsi_qla_host_t *vha) 3975 { 3976 struct qla_hw_data *ha = vha->hw; 3977 3978 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 3979 3980 /* Disable timer */ 3981 if (vha->timer_active) 3982 qla2x00_stop_timer(vha); 3983 3984 qla25xx_delete_queues(vha); 3985 vha->flags.online = 0; 3986 3987 /* turn-off interrupts on the card */ 3988 if (ha->interrupts_on) { 3989 vha->flags.init_done = 0; 3990 ha->isp_ops->disable_intrs(ha); 3991 } 3992 3993 qla2x00_free_fcports(vha); 3994 3995 qla2x00_free_irqs(vha); 3996 3997 /* Flush the work queue and remove it */ 3998 if (ha->wq) { 3999 destroy_workqueue(ha->wq); 4000 ha->wq = NULL; 4001 } 4002 4003 4004 qla24xx_free_purex_list(&vha->purex_list); 4005 4006 qla2x00_mem_free(ha); 4007 4008 qla82xx_md_free(vha); 4009 4010 qla_edif_sadb_release_free_pool(ha); 4011 qla_edif_sadb_release(ha); 4012 4013 qla2x00_free_queues(ha); 4014 } 4015 4016 void qla2x00_free_fcports(struct scsi_qla_host *vha) 4017 { 4018 fc_port_t *fcport, *tfcport; 4019 4020 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) 4021 qla2x00_free_fcport(fcport); 4022 } 4023 4024 static inline void 4025 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport) 4026 { 4027 int now; 4028 4029 if (!fcport->rport) 4030 return; 4031 4032 if (fcport->rport) { 4033 ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, 4034 "%s %8phN. rport %p roles %x\n", 4035 __func__, fcport->port_name, fcport->rport, 4036 fcport->rport->roles); 4037 fc_remote_port_delete(fcport->rport); 4038 } 4039 qlt_do_generation_tick(vha, &now); 4040 } 4041 4042 /* 4043 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 4044 * 4045 * Input: ha = adapter block pointer. fcport = port structure pointer. 4046 * 4047 * Return: None. 4048 * 4049 * Context: 4050 */ 4051 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, 4052 int do_login) 4053 { 4054 if (IS_QLAFX00(vha->hw)) { 4055 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 4056 qla2x00_schedule_rport_del(vha, fcport); 4057 return; 4058 } 4059 4060 if (atomic_read(&fcport->state) == FCS_ONLINE && 4061 vha->vp_idx == fcport->vha->vp_idx) { 4062 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 4063 qla2x00_schedule_rport_del(vha, fcport); 4064 } 4065 4066 /* 4067 * We may need to retry the login, so don't change the state of the 4068 * port but do the retries. 4069 */ 4070 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 4071 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 4072 4073 if (!do_login) 4074 return; 4075 4076 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 4077 } 4078 4079 void 4080 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha) 4081 { 4082 fc_port_t *fcport; 4083 4084 ql_dbg(ql_dbg_disc, vha, 0x20f1, 4085 "Mark all dev lost\n"); 4086 4087 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4088 if (fcport->loop_id != FC_NO_LOOP_ID && 4089 (fcport->flags & FCF_FCP2_DEVICE) && 4090 fcport->port_type == FCT_TARGET && 4091 !qla2x00_reset_active(vha)) { 4092 ql_dbg(ql_dbg_disc, vha, 0x211a, 4093 "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC", 4094 fcport->flags, fcport->port_type, 4095 fcport->d_id.b24, fcport->port_name); 4096 continue; 4097 } 4098 fcport->scan_state = 0; 4099 qlt_schedule_sess_for_deletion(fcport); 4100 } 4101 } 4102 4103 static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) 4104 { 4105 int i; 4106 4107 if (IS_FWI2_CAPABLE(ha)) 4108 return; 4109 4110 for (i = 0; i < SNS_FIRST_LOOP_ID; i++) 4111 set_bit(i, ha->loop_id_map); 4112 set_bit(MANAGEMENT_SERVER, ha->loop_id_map); 4113 set_bit(BROADCAST, ha->loop_id_map); 4114 } 4115 4116 /* 4117 * qla2x00_mem_alloc 4118 * Allocates adapter memory. 4119 * 4120 * Returns: 4121 * 0 = success. 4122 * !0 = failure. 4123 */ 4124 static int 4125 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, 4126 struct req_que **req, struct rsp_que **rsp) 4127 { 4128 char name[16]; 4129 int rc; 4130 4131 if (QLA_TGT_MODE_ENABLED() || EDIF_CAP(ha)) { 4132 ha->vp_map = kcalloc(MAX_MULTI_ID_FABRIC, sizeof(struct qla_vp_map), GFP_KERNEL); 4133 if (!ha->vp_map) 4134 goto fail; 4135 } 4136 4137 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 4138 &ha->init_cb_dma, GFP_KERNEL); 4139 if (!ha->init_cb) 4140 goto fail_free_vp_map; 4141 4142 rc = btree_init32(&ha->host_map); 4143 if (rc) 4144 goto fail_free_init_cb; 4145 4146 if (qlt_mem_alloc(ha) < 0) 4147 goto fail_free_btree; 4148 4149 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 4150 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 4151 if (!ha->gid_list) 4152 goto fail_free_tgt_mem; 4153 4154 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 4155 if (!ha->srb_mempool) 4156 goto fail_free_gid_list; 4157 4158 if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) { 4159 /* Allocate cache for CT6 Ctx. */ 4160 if (!ctx_cachep) { 4161 ctx_cachep = kmem_cache_create("qla2xxx_ctx", 4162 sizeof(struct ct6_dsd), 0, 4163 SLAB_HWCACHE_ALIGN, NULL); 4164 if (!ctx_cachep) 4165 goto fail_free_srb_mempool; 4166 } 4167 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 4168 ctx_cachep); 4169 if (!ha->ctx_mempool) 4170 goto fail_free_srb_mempool; 4171 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, 4172 "ctx_cachep=%p ctx_mempool=%p.\n", 4173 ctx_cachep, ha->ctx_mempool); 4174 } 4175 4176 /* Get memory for cached NVRAM */ 4177 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 4178 if (!ha->nvram) 4179 goto fail_free_ctx_mempool; 4180 4181 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, 4182 ha->pdev->device); 4183 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4184 DMA_POOL_SIZE, 8, 0); 4185 if (!ha->s_dma_pool) 4186 goto fail_free_nvram; 4187 4188 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, 4189 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", 4190 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); 4191 4192 if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) { 4193 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4194 DSD_LIST_DMA_POOL_SIZE, 8, 0); 4195 if (!ha->dl_dma_pool) { 4196 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, 4197 "Failed to allocate memory for dl_dma_pool.\n"); 4198 goto fail_s_dma_pool; 4199 } 4200 4201 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4202 FCP_CMND_DMA_POOL_SIZE, 8, 0); 4203 if (!ha->fcp_cmnd_dma_pool) { 4204 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, 4205 "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); 4206 goto fail_dl_dma_pool; 4207 } 4208 4209 if (ql2xenabledif) { 4210 u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE; 4211 struct dsd_dma *dsd, *nxt; 4212 uint i; 4213 /* Creata a DMA pool of buffers for DIF bundling */ 4214 ha->dif_bundl_pool = dma_pool_create(name, 4215 &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0); 4216 if (!ha->dif_bundl_pool) { 4217 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, 4218 "%s: failed create dif_bundl_pool\n", 4219 __func__); 4220 goto fail_dif_bundl_dma_pool; 4221 } 4222 4223 INIT_LIST_HEAD(&ha->pool.good.head); 4224 INIT_LIST_HEAD(&ha->pool.unusable.head); 4225 ha->pool.good.count = 0; 4226 ha->pool.unusable.count = 0; 4227 for (i = 0; i < 128; i++) { 4228 dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC); 4229 if (!dsd) { 4230 ql_dbg_pci(ql_dbg_init, ha->pdev, 4231 0xe0ee, "%s: failed alloc dsd\n", 4232 __func__); 4233 return -ENOMEM; 4234 } 4235 ha->dif_bundle_kallocs++; 4236 4237 dsd->dsd_addr = dma_pool_alloc( 4238 ha->dif_bundl_pool, GFP_ATOMIC, 4239 &dsd->dsd_list_dma); 4240 if (!dsd->dsd_addr) { 4241 ql_dbg_pci(ql_dbg_init, ha->pdev, 4242 0xe0ee, 4243 "%s: failed alloc ->dsd_addr\n", 4244 __func__); 4245 kfree(dsd); 4246 ha->dif_bundle_kallocs--; 4247 continue; 4248 } 4249 ha->dif_bundle_dma_allocs++; 4250 4251 /* 4252 * if DMA buffer crosses 4G boundary, 4253 * put it on bad list 4254 */ 4255 if (MSD(dsd->dsd_list_dma) ^ 4256 MSD(dsd->dsd_list_dma + bufsize)) { 4257 list_add_tail(&dsd->list, 4258 &ha->pool.unusable.head); 4259 ha->pool.unusable.count++; 4260 } else { 4261 list_add_tail(&dsd->list, 4262 &ha->pool.good.head); 4263 ha->pool.good.count++; 4264 } 4265 } 4266 4267 /* return the good ones back to the pool */ 4268 list_for_each_entry_safe(dsd, nxt, 4269 &ha->pool.good.head, list) { 4270 list_del(&dsd->list); 4271 dma_pool_free(ha->dif_bundl_pool, 4272 dsd->dsd_addr, dsd->dsd_list_dma); 4273 ha->dif_bundle_dma_allocs--; 4274 kfree(dsd); 4275 ha->dif_bundle_kallocs--; 4276 } 4277 4278 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, 4279 "%s: dif dma pool (good=%u unusable=%u)\n", 4280 __func__, ha->pool.good.count, 4281 ha->pool.unusable.count); 4282 } 4283 4284 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, 4285 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n", 4286 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool, 4287 ha->dif_bundl_pool); 4288 } 4289 4290 /* Allocate memory for SNS commands */ 4291 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 4292 /* Get consistent memory allocated for SNS commands */ 4293 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 4294 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 4295 if (!ha->sns_cmd) 4296 goto fail_dma_pool; 4297 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, 4298 "sns_cmd: %p.\n", ha->sns_cmd); 4299 } else { 4300 /* Get consistent memory allocated for MS IOCB */ 4301 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4302 &ha->ms_iocb_dma); 4303 if (!ha->ms_iocb) 4304 goto fail_dma_pool; 4305 /* Get consistent memory allocated for CT SNS commands */ 4306 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 4307 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 4308 if (!ha->ct_sns) 4309 goto fail_free_ms_iocb; 4310 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, 4311 "ms_iocb=%p ct_sns=%p.\n", 4312 ha->ms_iocb, ha->ct_sns); 4313 } 4314 4315 /* Allocate memory for request ring */ 4316 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 4317 if (!*req) { 4318 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, 4319 "Failed to allocate memory for req.\n"); 4320 goto fail_req; 4321 } 4322 (*req)->length = req_len; 4323 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, 4324 ((*req)->length + 1) * sizeof(request_t), 4325 &(*req)->dma, GFP_KERNEL); 4326 if (!(*req)->ring) { 4327 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, 4328 "Failed to allocate memory for req_ring.\n"); 4329 goto fail_req_ring; 4330 } 4331 /* Allocate memory for response ring */ 4332 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 4333 if (!*rsp) { 4334 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, 4335 "Failed to allocate memory for rsp.\n"); 4336 goto fail_rsp; 4337 } 4338 (*rsp)->hw = ha; 4339 (*rsp)->length = rsp_len; 4340 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, 4341 ((*rsp)->length + 1) * sizeof(response_t), 4342 &(*rsp)->dma, GFP_KERNEL); 4343 if (!(*rsp)->ring) { 4344 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, 4345 "Failed to allocate memory for rsp_ring.\n"); 4346 goto fail_rsp_ring; 4347 } 4348 (*req)->rsp = *rsp; 4349 (*rsp)->req = *req; 4350 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, 4351 "req=%p req->length=%d req->ring=%p rsp=%p " 4352 "rsp->length=%d rsp->ring=%p.\n", 4353 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, 4354 (*rsp)->ring); 4355 /* Allocate memory for NVRAM data for vports */ 4356 if (ha->nvram_npiv_size) { 4357 ha->npiv_info = kcalloc(ha->nvram_npiv_size, 4358 sizeof(struct qla_npiv_entry), 4359 GFP_KERNEL); 4360 if (!ha->npiv_info) { 4361 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, 4362 "Failed to allocate memory for npiv_info.\n"); 4363 goto fail_npiv_info; 4364 } 4365 } else 4366 ha->npiv_info = NULL; 4367 4368 /* Get consistent memory allocated for EX-INIT-CB. */ 4369 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 4370 IS_QLA28XX(ha)) { 4371 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4372 &ha->ex_init_cb_dma); 4373 if (!ha->ex_init_cb) 4374 goto fail_ex_init_cb; 4375 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, 4376 "ex_init_cb=%p.\n", ha->ex_init_cb); 4377 } 4378 4379 /* Get consistent memory allocated for Special Features-CB. */ 4380 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4381 ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, 4382 &ha->sf_init_cb_dma); 4383 if (!ha->sf_init_cb) 4384 goto fail_sf_init_cb; 4385 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199, 4386 "sf_init_cb=%p.\n", ha->sf_init_cb); 4387 } 4388 4389 INIT_LIST_HEAD(&ha->gbl_dsd_list); 4390 4391 /* Get consistent memory allocated for Async Port-Database. */ 4392 if (!IS_FWI2_CAPABLE(ha)) { 4393 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4394 &ha->async_pd_dma); 4395 if (!ha->async_pd) 4396 goto fail_async_pd; 4397 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, 4398 "async_pd=%p.\n", ha->async_pd); 4399 } 4400 4401 INIT_LIST_HEAD(&ha->vp_list); 4402 4403 /* Allocate memory for our loop_id bitmap */ 4404 ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE), 4405 sizeof(long), 4406 GFP_KERNEL); 4407 if (!ha->loop_id_map) 4408 goto fail_loop_id_map; 4409 else { 4410 qla2x00_set_reserved_loop_ids(ha); 4411 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 4412 "loop_id_map=%p.\n", ha->loop_id_map); 4413 } 4414 4415 ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev, 4416 SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL); 4417 if (!ha->sfp_data) { 4418 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4419 "Unable to allocate memory for SFP read-data.\n"); 4420 goto fail_sfp_data; 4421 } 4422 4423 ha->flt = dma_alloc_coherent(&ha->pdev->dev, 4424 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma, 4425 GFP_KERNEL); 4426 if (!ha->flt) { 4427 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4428 "Unable to allocate memory for FLT.\n"); 4429 goto fail_flt_buffer; 4430 } 4431 4432 /* allocate the purex dma pool */ 4433 ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4434 ELS_MAX_PAYLOAD, 8, 0); 4435 4436 if (!ha->purex_dma_pool) { 4437 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4438 "Unable to allocate purex_dma_pool.\n"); 4439 goto fail_flt; 4440 } 4441 4442 ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16; 4443 ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev, 4444 ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL); 4445 4446 if (!ha->elsrej.c) { 4447 ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff, 4448 "Alloc failed for els reject cmd.\n"); 4449 goto fail_elsrej; 4450 } 4451 ha->elsrej.c->er_cmd = ELS_LS_RJT; 4452 ha->elsrej.c->er_reason = ELS_RJT_LOGIC; 4453 ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA; 4454 return 0; 4455 4456 fail_elsrej: 4457 dma_pool_destroy(ha->purex_dma_pool); 4458 fail_flt: 4459 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, 4460 ha->flt, ha->flt_dma); 4461 4462 fail_flt_buffer: 4463 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, 4464 ha->sfp_data, ha->sfp_data_dma); 4465 fail_sfp_data: 4466 kfree(ha->loop_id_map); 4467 fail_loop_id_map: 4468 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4469 fail_async_pd: 4470 dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma); 4471 fail_sf_init_cb: 4472 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 4473 fail_ex_init_cb: 4474 kfree(ha->npiv_info); 4475 fail_npiv_info: 4476 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * 4477 sizeof(response_t), (*rsp)->ring, (*rsp)->dma); 4478 (*rsp)->ring = NULL; 4479 (*rsp)->dma = 0; 4480 fail_rsp_ring: 4481 kfree(*rsp); 4482 *rsp = NULL; 4483 fail_rsp: 4484 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * 4485 sizeof(request_t), (*req)->ring, (*req)->dma); 4486 (*req)->ring = NULL; 4487 (*req)->dma = 0; 4488 fail_req_ring: 4489 kfree(*req); 4490 *req = NULL; 4491 fail_req: 4492 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4493 ha->ct_sns, ha->ct_sns_dma); 4494 ha->ct_sns = NULL; 4495 ha->ct_sns_dma = 0; 4496 fail_free_ms_iocb: 4497 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4498 ha->ms_iocb = NULL; 4499 ha->ms_iocb_dma = 0; 4500 4501 if (ha->sns_cmd) 4502 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4503 ha->sns_cmd, ha->sns_cmd_dma); 4504 fail_dma_pool: 4505 if (ql2xenabledif) { 4506 struct dsd_dma *dsd, *nxt; 4507 4508 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, 4509 list) { 4510 list_del(&dsd->list); 4511 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4512 dsd->dsd_list_dma); 4513 ha->dif_bundle_dma_allocs--; 4514 kfree(dsd); 4515 ha->dif_bundle_kallocs--; 4516 ha->pool.unusable.count--; 4517 } 4518 dma_pool_destroy(ha->dif_bundl_pool); 4519 ha->dif_bundl_pool = NULL; 4520 } 4521 4522 fail_dif_bundl_dma_pool: 4523 if (IS_QLA82XX(ha) || ql2xenabledif) { 4524 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4525 ha->fcp_cmnd_dma_pool = NULL; 4526 } 4527 fail_dl_dma_pool: 4528 if (IS_QLA82XX(ha) || ql2xenabledif) { 4529 dma_pool_destroy(ha->dl_dma_pool); 4530 ha->dl_dma_pool = NULL; 4531 } 4532 fail_s_dma_pool: 4533 dma_pool_destroy(ha->s_dma_pool); 4534 ha->s_dma_pool = NULL; 4535 fail_free_nvram: 4536 kfree(ha->nvram); 4537 ha->nvram = NULL; 4538 fail_free_ctx_mempool: 4539 mempool_destroy(ha->ctx_mempool); 4540 ha->ctx_mempool = NULL; 4541 fail_free_srb_mempool: 4542 mempool_destroy(ha->srb_mempool); 4543 ha->srb_mempool = NULL; 4544 fail_free_gid_list: 4545 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4546 ha->gid_list, 4547 ha->gid_list_dma); 4548 ha->gid_list = NULL; 4549 ha->gid_list_dma = 0; 4550 fail_free_tgt_mem: 4551 qlt_mem_free(ha); 4552 fail_free_btree: 4553 btree_destroy32(&ha->host_map); 4554 fail_free_init_cb: 4555 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 4556 ha->init_cb_dma); 4557 ha->init_cb = NULL; 4558 ha->init_cb_dma = 0; 4559 fail_free_vp_map: 4560 kfree(ha->vp_map); 4561 fail: 4562 ql_log(ql_log_fatal, NULL, 0x0030, 4563 "Memory allocation failure.\n"); 4564 return -ENOMEM; 4565 } 4566 4567 int 4568 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) 4569 { 4570 int rval; 4571 uint16_t size, max_cnt; 4572 uint32_t temp; 4573 struct qla_hw_data *ha = vha->hw; 4574 4575 /* Return if we don't need to alloacate any extended logins */ 4576 if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400) 4577 return QLA_SUCCESS; 4578 4579 if (!IS_EXLOGIN_OFFLD_CAPABLE(ha)) 4580 return QLA_SUCCESS; 4581 4582 ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); 4583 max_cnt = 0; 4584 rval = qla_get_exlogin_status(vha, &size, &max_cnt); 4585 if (rval != QLA_SUCCESS) { 4586 ql_log_pci(ql_log_fatal, ha->pdev, 0xd029, 4587 "Failed to get exlogin status.\n"); 4588 return rval; 4589 } 4590 4591 temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; 4592 temp *= size; 4593 4594 if (temp != ha->exlogin_size) { 4595 qla2x00_free_exlogin_buffer(ha); 4596 ha->exlogin_size = temp; 4597 4598 ql_log(ql_log_info, vha, 0xd024, 4599 "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", 4600 max_cnt, size, temp); 4601 4602 ql_log(ql_log_info, vha, 0xd025, 4603 "EXLOGIN: requested size=0x%x\n", ha->exlogin_size); 4604 4605 /* Get consistent memory for extended logins */ 4606 ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, 4607 ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); 4608 if (!ha->exlogin_buf) { 4609 ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, 4610 "Failed to allocate memory for exlogin_buf_dma.\n"); 4611 return -ENOMEM; 4612 } 4613 } 4614 4615 /* Now configure the dma buffer */ 4616 rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); 4617 if (rval) { 4618 ql_log(ql_log_fatal, vha, 0xd033, 4619 "Setup extended login buffer ****FAILED****.\n"); 4620 qla2x00_free_exlogin_buffer(ha); 4621 } 4622 4623 return rval; 4624 } 4625 4626 /* 4627 * qla2x00_free_exlogin_buffer 4628 * 4629 * Input: 4630 * ha = adapter block pointer 4631 */ 4632 void 4633 qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) 4634 { 4635 if (ha->exlogin_buf) { 4636 dma_free_coherent(&ha->pdev->dev, ha->exlogin_size, 4637 ha->exlogin_buf, ha->exlogin_buf_dma); 4638 ha->exlogin_buf = NULL; 4639 ha->exlogin_size = 0; 4640 } 4641 } 4642 4643 static void 4644 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) 4645 { 4646 u32 temp; 4647 struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb; 4648 *ret_cnt = FW_DEF_EXCHANGES_CNT; 4649 4650 if (max_cnt > vha->hw->max_exchg) 4651 max_cnt = vha->hw->max_exchg; 4652 4653 if (qla_ini_mode_enabled(vha)) { 4654 if (vha->ql2xiniexchg > max_cnt) 4655 vha->ql2xiniexchg = max_cnt; 4656 4657 if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT) 4658 *ret_cnt = vha->ql2xiniexchg; 4659 4660 } else if (qla_tgt_mode_enabled(vha)) { 4661 if (vha->ql2xexchoffld > max_cnt) { 4662 vha->ql2xexchoffld = max_cnt; 4663 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4664 } 4665 4666 if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT) 4667 *ret_cnt = vha->ql2xexchoffld; 4668 } else if (qla_dual_mode_enabled(vha)) { 4669 temp = vha->ql2xiniexchg + vha->ql2xexchoffld; 4670 if (temp > max_cnt) { 4671 vha->ql2xiniexchg -= (temp - max_cnt)/2; 4672 vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1); 4673 temp = max_cnt; 4674 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4675 } 4676 4677 if (temp > FW_DEF_EXCHANGES_CNT) 4678 *ret_cnt = temp; 4679 } 4680 } 4681 4682 int 4683 qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) 4684 { 4685 int rval; 4686 u16 size, max_cnt; 4687 u32 actual_cnt, totsz; 4688 struct qla_hw_data *ha = vha->hw; 4689 4690 if (!ha->flags.exchoffld_enabled) 4691 return QLA_SUCCESS; 4692 4693 if (!IS_EXCHG_OFFLD_CAPABLE(ha)) 4694 return QLA_SUCCESS; 4695 4696 max_cnt = 0; 4697 rval = qla_get_exchoffld_status(vha, &size, &max_cnt); 4698 if (rval != QLA_SUCCESS) { 4699 ql_log_pci(ql_log_fatal, ha->pdev, 0xd012, 4700 "Failed to get exlogin status.\n"); 4701 return rval; 4702 } 4703 4704 qla2x00_number_of_exch(vha, &actual_cnt, max_cnt); 4705 ql_log(ql_log_info, vha, 0xd014, 4706 "Actual exchange offload count: %d.\n", actual_cnt); 4707 4708 totsz = actual_cnt * size; 4709 4710 if (totsz != ha->exchoffld_size) { 4711 qla2x00_free_exchoffld_buffer(ha); 4712 if (actual_cnt <= FW_DEF_EXCHANGES_CNT) { 4713 ha->exchoffld_size = 0; 4714 ha->flags.exchoffld_enabled = 0; 4715 return QLA_SUCCESS; 4716 } 4717 4718 ha->exchoffld_size = totsz; 4719 4720 ql_log(ql_log_info, vha, 0xd016, 4721 "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n", 4722 max_cnt, actual_cnt, size, totsz); 4723 4724 ql_log(ql_log_info, vha, 0xd017, 4725 "Exchange Buffers requested size = 0x%x\n", 4726 ha->exchoffld_size); 4727 4728 /* Get consistent memory for extended logins */ 4729 ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, 4730 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); 4731 if (!ha->exchoffld_buf) { 4732 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4733 "Failed to allocate memory for Exchange Offload.\n"); 4734 4735 if (ha->max_exchg > 4736 (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) { 4737 ha->max_exchg -= REDUCE_EXCHANGES_CNT; 4738 } else if (ha->max_exchg > 4739 (FW_DEF_EXCHANGES_CNT + 512)) { 4740 ha->max_exchg -= 512; 4741 } else { 4742 ha->flags.exchoffld_enabled = 0; 4743 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4744 "Disabling Exchange offload due to lack of memory\n"); 4745 } 4746 ha->exchoffld_size = 0; 4747 4748 return -ENOMEM; 4749 } 4750 } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) { 4751 /* pathological case */ 4752 qla2x00_free_exchoffld_buffer(ha); 4753 ha->exchoffld_size = 0; 4754 ha->flags.exchoffld_enabled = 0; 4755 ql_log(ql_log_info, vha, 0xd016, 4756 "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n", 4757 ha->exchoffld_size, actual_cnt, size, totsz); 4758 return 0; 4759 } 4760 4761 /* Now configure the dma buffer */ 4762 rval = qla_set_exchoffld_mem_cfg(vha); 4763 if (rval) { 4764 ql_log(ql_log_fatal, vha, 0xd02e, 4765 "Setup exchange offload buffer ****FAILED****.\n"); 4766 qla2x00_free_exchoffld_buffer(ha); 4767 } else { 4768 /* re-adjust number of target exchange */ 4769 struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb; 4770 4771 if (qla_ini_mode_enabled(vha)) 4772 icb->exchange_count = 0; 4773 else 4774 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4775 } 4776 4777 return rval; 4778 } 4779 4780 /* 4781 * qla2x00_free_exchoffld_buffer 4782 * 4783 * Input: 4784 * ha = adapter block pointer 4785 */ 4786 void 4787 qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) 4788 { 4789 if (ha->exchoffld_buf) { 4790 dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size, 4791 ha->exchoffld_buf, ha->exchoffld_buf_dma); 4792 ha->exchoffld_buf = NULL; 4793 ha->exchoffld_size = 0; 4794 } 4795 } 4796 4797 /* 4798 * qla2x00_free_fw_dump 4799 * Frees fw dump stuff. 4800 * 4801 * Input: 4802 * ha = adapter block pointer 4803 */ 4804 static void 4805 qla2x00_free_fw_dump(struct qla_hw_data *ha) 4806 { 4807 struct fwdt *fwdt = ha->fwdt; 4808 uint j; 4809 4810 if (ha->fce) 4811 dma_free_coherent(&ha->pdev->dev, 4812 FCE_SIZE, ha->fce, ha->fce_dma); 4813 4814 if (ha->eft) 4815 dma_free_coherent(&ha->pdev->dev, 4816 EFT_SIZE, ha->eft, ha->eft_dma); 4817 4818 vfree(ha->fw_dump); 4819 4820 ha->fce = NULL; 4821 ha->fce_dma = 0; 4822 ha->flags.fce_enabled = 0; 4823 ha->eft = NULL; 4824 ha->eft_dma = 0; 4825 ha->fw_dumped = false; 4826 ha->fw_dump_cap_flags = 0; 4827 ha->fw_dump_reading = 0; 4828 ha->fw_dump = NULL; 4829 ha->fw_dump_len = 0; 4830 4831 for (j = 0; j < 2; j++, fwdt++) { 4832 vfree(fwdt->template); 4833 fwdt->template = NULL; 4834 fwdt->length = 0; 4835 } 4836 } 4837 4838 /* 4839 * qla2x00_mem_free 4840 * Frees all adapter allocated memory. 4841 * 4842 * Input: 4843 * ha = adapter block pointer. 4844 */ 4845 static void 4846 qla2x00_mem_free(struct qla_hw_data *ha) 4847 { 4848 qla2x00_free_fw_dump(ha); 4849 4850 if (ha->mctp_dump) 4851 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, 4852 ha->mctp_dump_dma); 4853 ha->mctp_dump = NULL; 4854 4855 mempool_destroy(ha->srb_mempool); 4856 ha->srb_mempool = NULL; 4857 4858 if (ha->dcbx_tlv) 4859 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 4860 ha->dcbx_tlv, ha->dcbx_tlv_dma); 4861 ha->dcbx_tlv = NULL; 4862 4863 if (ha->xgmac_data) 4864 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 4865 ha->xgmac_data, ha->xgmac_data_dma); 4866 ha->xgmac_data = NULL; 4867 4868 if (ha->sns_cmd) 4869 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4870 ha->sns_cmd, ha->sns_cmd_dma); 4871 ha->sns_cmd = NULL; 4872 ha->sns_cmd_dma = 0; 4873 4874 if (ha->ct_sns) 4875 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4876 ha->ct_sns, ha->ct_sns_dma); 4877 ha->ct_sns = NULL; 4878 ha->ct_sns_dma = 0; 4879 4880 if (ha->sfp_data) 4881 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, 4882 ha->sfp_data_dma); 4883 ha->sfp_data = NULL; 4884 4885 if (ha->flt) 4886 dma_free_coherent(&ha->pdev->dev, 4887 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, 4888 ha->flt, ha->flt_dma); 4889 ha->flt = NULL; 4890 ha->flt_dma = 0; 4891 4892 if (ha->ms_iocb) 4893 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4894 ha->ms_iocb = NULL; 4895 ha->ms_iocb_dma = 0; 4896 4897 if (ha->sf_init_cb) 4898 dma_pool_free(ha->s_dma_pool, 4899 ha->sf_init_cb, ha->sf_init_cb_dma); 4900 4901 if (ha->ex_init_cb) 4902 dma_pool_free(ha->s_dma_pool, 4903 ha->ex_init_cb, ha->ex_init_cb_dma); 4904 ha->ex_init_cb = NULL; 4905 ha->ex_init_cb_dma = 0; 4906 4907 if (ha->async_pd) 4908 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4909 ha->async_pd = NULL; 4910 ha->async_pd_dma = 0; 4911 4912 dma_pool_destroy(ha->s_dma_pool); 4913 ha->s_dma_pool = NULL; 4914 4915 if (ha->gid_list) 4916 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4917 ha->gid_list, ha->gid_list_dma); 4918 ha->gid_list = NULL; 4919 ha->gid_list_dma = 0; 4920 4921 if (IS_QLA82XX(ha)) { 4922 if (!list_empty(&ha->gbl_dsd_list)) { 4923 struct dsd_dma *dsd_ptr, *tdsd_ptr; 4924 4925 /* clean up allocated prev pool */ 4926 list_for_each_entry_safe(dsd_ptr, 4927 tdsd_ptr, &ha->gbl_dsd_list, list) { 4928 dma_pool_free(ha->dl_dma_pool, 4929 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); 4930 list_del(&dsd_ptr->list); 4931 kfree(dsd_ptr); 4932 } 4933 } 4934 } 4935 4936 dma_pool_destroy(ha->dl_dma_pool); 4937 ha->dl_dma_pool = NULL; 4938 4939 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4940 ha->fcp_cmnd_dma_pool = NULL; 4941 4942 mempool_destroy(ha->ctx_mempool); 4943 ha->ctx_mempool = NULL; 4944 4945 if (ql2xenabledif && ha->dif_bundl_pool) { 4946 struct dsd_dma *dsd, *nxt; 4947 4948 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, 4949 list) { 4950 list_del(&dsd->list); 4951 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4952 dsd->dsd_list_dma); 4953 ha->dif_bundle_dma_allocs--; 4954 kfree(dsd); 4955 ha->dif_bundle_kallocs--; 4956 ha->pool.unusable.count--; 4957 } 4958 list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) { 4959 list_del(&dsd->list); 4960 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4961 dsd->dsd_list_dma); 4962 ha->dif_bundle_dma_allocs--; 4963 kfree(dsd); 4964 ha->dif_bundle_kallocs--; 4965 } 4966 } 4967 4968 dma_pool_destroy(ha->dif_bundl_pool); 4969 ha->dif_bundl_pool = NULL; 4970 4971 qlt_mem_free(ha); 4972 qla_remove_hostmap(ha); 4973 4974 if (ha->init_cb) 4975 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 4976 ha->init_cb, ha->init_cb_dma); 4977 4978 dma_pool_destroy(ha->purex_dma_pool); 4979 ha->purex_dma_pool = NULL; 4980 4981 if (ha->elsrej.c) { 4982 dma_free_coherent(&ha->pdev->dev, ha->elsrej.size, 4983 ha->elsrej.c, ha->elsrej.cdma); 4984 ha->elsrej.c = NULL; 4985 } 4986 4987 ha->init_cb = NULL; 4988 ha->init_cb_dma = 0; 4989 4990 vfree(ha->optrom_buffer); 4991 ha->optrom_buffer = NULL; 4992 kfree(ha->nvram); 4993 ha->nvram = NULL; 4994 kfree(ha->npiv_info); 4995 ha->npiv_info = NULL; 4996 kfree(ha->swl); 4997 ha->swl = NULL; 4998 kfree(ha->loop_id_map); 4999 ha->sf_init_cb = NULL; 5000 ha->sf_init_cb_dma = 0; 5001 ha->loop_id_map = NULL; 5002 5003 kfree(ha->vp_map); 5004 ha->vp_map = NULL; 5005 } 5006 5007 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 5008 struct qla_hw_data *ha) 5009 { 5010 struct Scsi_Host *host; 5011 struct scsi_qla_host *vha = NULL; 5012 5013 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 5014 if (!host) { 5015 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, 5016 "Failed to allocate host from the scsi layer, aborting.\n"); 5017 return NULL; 5018 } 5019 5020 /* Clear our data area */ 5021 vha = shost_priv(host); 5022 memset(vha, 0, sizeof(scsi_qla_host_t)); 5023 5024 vha->host = host; 5025 vha->host_no = host->host_no; 5026 vha->hw = ha; 5027 5028 vha->qlini_mode = ql2x_ini_mode; 5029 vha->ql2xexchoffld = ql2xexchoffld; 5030 vha->ql2xiniexchg = ql2xiniexchg; 5031 5032 INIT_LIST_HEAD(&vha->vp_fcports); 5033 INIT_LIST_HEAD(&vha->work_list); 5034 INIT_LIST_HEAD(&vha->list); 5035 INIT_LIST_HEAD(&vha->qla_cmd_list); 5036 INIT_LIST_HEAD(&vha->logo_list); 5037 INIT_LIST_HEAD(&vha->plogi_ack_list); 5038 INIT_LIST_HEAD(&vha->qp_list); 5039 INIT_LIST_HEAD(&vha->gnl.fcports); 5040 INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn); 5041 5042 INIT_LIST_HEAD(&vha->purex_list.head); 5043 spin_lock_init(&vha->purex_list.lock); 5044 5045 spin_lock_init(&vha->work_lock); 5046 spin_lock_init(&vha->cmd_list_lock); 5047 init_waitqueue_head(&vha->fcport_waitQ); 5048 init_waitqueue_head(&vha->vref_waitq); 5049 qla_enode_init(vha); 5050 qla_edb_init(vha); 5051 5052 5053 vha->gnl.size = sizeof(struct get_name_list_extended) * 5054 (ha->max_loop_id + 1); 5055 vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, 5056 vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); 5057 if (!vha->gnl.l) { 5058 ql_log(ql_log_fatal, vha, 0xd04a, 5059 "Alloc failed for name list.\n"); 5060 scsi_host_put(vha->host); 5061 return NULL; 5062 } 5063 5064 /* todo: what about ext login? */ 5065 vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp); 5066 vha->scan.l = vmalloc(vha->scan.size); 5067 if (!vha->scan.l) { 5068 ql_log(ql_log_fatal, vha, 0xd04a, 5069 "Alloc failed for scan database.\n"); 5070 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, 5071 vha->gnl.l, vha->gnl.ldma); 5072 vha->gnl.l = NULL; 5073 scsi_host_put(vha->host); 5074 return NULL; 5075 } 5076 INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); 5077 5078 sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no); 5079 ql_dbg(ql_dbg_init, vha, 0x0041, 5080 "Allocated the host=%p hw=%p vha=%p dev_name=%s", 5081 vha->host, vha->hw, vha, 5082 dev_name(&(ha->pdev->dev))); 5083 5084 return vha; 5085 } 5086 5087 struct qla_work_evt * 5088 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 5089 { 5090 struct qla_work_evt *e; 5091 5092 if (test_bit(UNLOADING, &vha->dpc_flags)) 5093 return NULL; 5094 5095 if (qla_vha_mark_busy(vha)) 5096 return NULL; 5097 5098 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 5099 if (!e) { 5100 QLA_VHA_MARK_NOT_BUSY(vha); 5101 return NULL; 5102 } 5103 5104 INIT_LIST_HEAD(&e->list); 5105 e->type = type; 5106 e->flags = QLA_EVT_FLAG_FREE; 5107 return e; 5108 } 5109 5110 int 5111 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 5112 { 5113 unsigned long flags; 5114 bool q = false; 5115 5116 spin_lock_irqsave(&vha->work_lock, flags); 5117 list_add_tail(&e->list, &vha->work_list); 5118 5119 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) 5120 q = true; 5121 5122 spin_unlock_irqrestore(&vha->work_lock, flags); 5123 5124 if (q) 5125 queue_work(vha->hw->wq, &vha->iocb_work); 5126 5127 return QLA_SUCCESS; 5128 } 5129 5130 int 5131 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, 5132 u32 data) 5133 { 5134 struct qla_work_evt *e; 5135 5136 e = qla2x00_alloc_work(vha, QLA_EVT_AEN); 5137 if (!e) 5138 return QLA_FUNCTION_FAILED; 5139 5140 e->u.aen.code = code; 5141 e->u.aen.data = data; 5142 return qla2x00_post_work(vha, e); 5143 } 5144 5145 int 5146 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) 5147 { 5148 struct qla_work_evt *e; 5149 5150 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); 5151 if (!e) 5152 return QLA_FUNCTION_FAILED; 5153 5154 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 5155 return qla2x00_post_work(vha, e); 5156 } 5157 5158 #define qla2x00_post_async_work(name, type) \ 5159 int qla2x00_post_async_##name##_work( \ 5160 struct scsi_qla_host *vha, \ 5161 fc_port_t *fcport, uint16_t *data) \ 5162 { \ 5163 struct qla_work_evt *e; \ 5164 \ 5165 e = qla2x00_alloc_work(vha, type); \ 5166 if (!e) \ 5167 return QLA_FUNCTION_FAILED; \ 5168 \ 5169 e->u.logio.fcport = fcport; \ 5170 if (data) { \ 5171 e->u.logio.data[0] = data[0]; \ 5172 e->u.logio.data[1] = data[1]; \ 5173 } \ 5174 fcport->flags |= FCF_ASYNC_ACTIVE; \ 5175 return qla2x00_post_work(vha, e); \ 5176 } 5177 5178 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); 5179 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 5180 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); 5181 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO); 5182 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE); 5183 5184 int 5185 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) 5186 { 5187 struct qla_work_evt *e; 5188 5189 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); 5190 if (!e) 5191 return QLA_FUNCTION_FAILED; 5192 5193 e->u.uevent.code = code; 5194 return qla2x00_post_work(vha, e); 5195 } 5196 5197 static void 5198 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) 5199 { 5200 char event_string[40]; 5201 char *envp[] = { event_string, NULL }; 5202 5203 switch (code) { 5204 case QLA_UEVENT_CODE_FW_DUMP: 5205 snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu", 5206 vha->host_no); 5207 break; 5208 default: 5209 /* do nothing */ 5210 break; 5211 } 5212 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); 5213 } 5214 5215 int 5216 qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, 5217 uint32_t *data, int cnt) 5218 { 5219 struct qla_work_evt *e; 5220 5221 e = qla2x00_alloc_work(vha, QLA_EVT_AENFX); 5222 if (!e) 5223 return QLA_FUNCTION_FAILED; 5224 5225 e->u.aenfx.evtcode = evtcode; 5226 e->u.aenfx.count = cnt; 5227 memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); 5228 return qla2x00_post_work(vha, e); 5229 } 5230 5231 void qla24xx_sched_upd_fcport(fc_port_t *fcport) 5232 { 5233 unsigned long flags; 5234 5235 if (IS_SW_RESV_ADDR(fcport->d_id)) 5236 return; 5237 5238 spin_lock_irqsave(&fcport->vha->work_lock, flags); 5239 if (fcport->disc_state == DSC_UPD_FCPORT) { 5240 spin_unlock_irqrestore(&fcport->vha->work_lock, flags); 5241 return; 5242 } 5243 fcport->jiffies_at_registration = jiffies; 5244 fcport->sec_since_registration = 0; 5245 fcport->next_disc_state = DSC_DELETED; 5246 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); 5247 spin_unlock_irqrestore(&fcport->vha->work_lock, flags); 5248 5249 queue_work(system_unbound_wq, &fcport->reg_work); 5250 } 5251 5252 static 5253 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) 5254 { 5255 unsigned long flags; 5256 fc_port_t *fcport = NULL, *tfcp; 5257 struct qlt_plogi_ack_t *pla = 5258 (struct qlt_plogi_ack_t *)e->u.new_sess.pla; 5259 uint8_t free_fcport = 0; 5260 5261 ql_dbg(ql_dbg_disc, vha, 0xffff, 5262 "%s %d %8phC enter\n", 5263 __func__, __LINE__, e->u.new_sess.port_name); 5264 5265 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5266 fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); 5267 if (fcport) { 5268 fcport->d_id = e->u.new_sess.id; 5269 if (pla) { 5270 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 5271 memcpy(fcport->node_name, 5272 pla->iocb.u.isp24.u.plogi.node_name, 5273 WWN_SIZE); 5274 qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); 5275 /* we took an extra ref_count to prevent PLOGI ACK when 5276 * fcport/sess has not been created. 5277 */ 5278 pla->ref_count--; 5279 } 5280 } else { 5281 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5282 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5283 if (fcport) { 5284 fcport->d_id = e->u.new_sess.id; 5285 fcport->flags |= FCF_FABRIC_DEVICE; 5286 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 5287 fcport->tgt_short_link_down_cnt = 0; 5288 5289 memcpy(fcport->port_name, e->u.new_sess.port_name, 5290 WWN_SIZE); 5291 5292 fcport->fc4_type = e->u.new_sess.fc4_type; 5293 if (NVME_PRIORITY(vha->hw, fcport)) 5294 fcport->do_prli_nvme = 1; 5295 else 5296 fcport->do_prli_nvme = 0; 5297 5298 if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) { 5299 fcport->dm_login_expire = jiffies + 5300 QLA_N2N_WAIT_TIME * HZ; 5301 fcport->fc4_type = FS_FC4TYPE_FCP; 5302 fcport->n2n_flag = 1; 5303 if (vha->flags.nvme_enabled) 5304 fcport->fc4_type |= FS_FC4TYPE_NVME; 5305 } 5306 5307 } else { 5308 ql_dbg(ql_dbg_disc, vha, 0xffff, 5309 "%s %8phC mem alloc fail.\n", 5310 __func__, e->u.new_sess.port_name); 5311 5312 if (pla) { 5313 list_del(&pla->list); 5314 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5315 } 5316 return; 5317 } 5318 5319 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5320 /* search again to make sure no one else got ahead */ 5321 tfcp = qla2x00_find_fcport_by_wwpn(vha, 5322 e->u.new_sess.port_name, 1); 5323 if (tfcp) { 5324 /* should rarily happen */ 5325 ql_dbg(ql_dbg_disc, vha, 0xffff, 5326 "%s %8phC found existing fcport b4 add. DS %d LS %d\n", 5327 __func__, tfcp->port_name, tfcp->disc_state, 5328 tfcp->fw_login_state); 5329 5330 free_fcport = 1; 5331 } else { 5332 list_add_tail(&fcport->list, &vha->vp_fcports); 5333 5334 } 5335 if (pla) { 5336 qlt_plogi_ack_link(vha, pla, fcport, 5337 QLT_PLOGI_LINK_SAME_WWN); 5338 pla->ref_count--; 5339 } 5340 } 5341 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5342 5343 if (fcport) { 5344 fcport->id_changed = 1; 5345 fcport->scan_state = QLA_FCPORT_FOUND; 5346 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 5347 memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); 5348 5349 if (pla) { 5350 if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) { 5351 u16 wd3_lo; 5352 5353 fcport->fw_login_state = DSC_LS_PRLI_PEND; 5354 fcport->local = 0; 5355 fcport->loop_id = 5356 le16_to_cpu( 5357 pla->iocb.u.isp24.nport_handle); 5358 fcport->fw_login_state = DSC_LS_PRLI_PEND; 5359 wd3_lo = 5360 le16_to_cpu( 5361 pla->iocb.u.isp24.u.prli.wd3_lo); 5362 5363 if (wd3_lo & BIT_7) 5364 fcport->conf_compl_supported = 1; 5365 5366 if ((wd3_lo & BIT_4) == 0) 5367 fcport->port_type = FCT_INITIATOR; 5368 else 5369 fcport->port_type = FCT_TARGET; 5370 } 5371 qlt_plogi_ack_unref(vha, pla); 5372 } else { 5373 fc_port_t *dfcp = NULL; 5374 5375 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5376 tfcp = qla2x00_find_fcport_by_nportid(vha, 5377 &e->u.new_sess.id, 1); 5378 if (tfcp && (tfcp != fcport)) { 5379 /* 5380 * We have a conflict fcport with same NportID. 5381 */ 5382 ql_dbg(ql_dbg_disc, vha, 0xffff, 5383 "%s %8phC found conflict b4 add. DS %d LS %d\n", 5384 __func__, tfcp->port_name, tfcp->disc_state, 5385 tfcp->fw_login_state); 5386 5387 switch (tfcp->disc_state) { 5388 case DSC_DELETED: 5389 break; 5390 case DSC_DELETE_PEND: 5391 fcport->login_pause = 1; 5392 tfcp->conflict = fcport; 5393 break; 5394 default: 5395 fcport->login_pause = 1; 5396 tfcp->conflict = fcport; 5397 dfcp = tfcp; 5398 break; 5399 } 5400 } 5401 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5402 if (dfcp) 5403 qlt_schedule_sess_for_deletion(tfcp); 5404 5405 if (N2N_TOPO(vha->hw)) { 5406 fcport->flags &= ~FCF_FABRIC_DEVICE; 5407 fcport->keep_nport_handle = 1; 5408 if (vha->flags.nvme_enabled) { 5409 fcport->fc4_type = 5410 (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP); 5411 fcport->n2n_flag = 1; 5412 } 5413 fcport->fw_login_state = 0; 5414 5415 schedule_delayed_work(&vha->scan.scan_work, 5); 5416 } else { 5417 qla24xx_fcport_handle_login(vha, fcport); 5418 } 5419 } 5420 } 5421 5422 if (free_fcport) { 5423 qla2x00_free_fcport(fcport); 5424 if (pla) { 5425 list_del(&pla->list); 5426 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5427 } 5428 } 5429 } 5430 5431 static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e) 5432 { 5433 struct srb *sp = e->u.iosb.sp; 5434 int rval; 5435 5436 rval = qla2x00_start_sp(sp); 5437 if (rval != QLA_SUCCESS) { 5438 ql_dbg(ql_dbg_disc, vha, 0x2043, 5439 "%s: %s: Re-issue IOCB failed (%d).\n", 5440 __func__, sp->name, rval); 5441 qla24xx_sp_unmap(vha, sp); 5442 } 5443 } 5444 5445 void 5446 qla2x00_do_work(struct scsi_qla_host *vha) 5447 { 5448 struct qla_work_evt *e, *tmp; 5449 unsigned long flags; 5450 LIST_HEAD(work); 5451 int rc; 5452 5453 spin_lock_irqsave(&vha->work_lock, flags); 5454 list_splice_init(&vha->work_list, &work); 5455 spin_unlock_irqrestore(&vha->work_lock, flags); 5456 5457 list_for_each_entry_safe(e, tmp, &work, list) { 5458 rc = QLA_SUCCESS; 5459 switch (e->type) { 5460 case QLA_EVT_AEN: 5461 fc_host_post_event(vha->host, fc_get_event_number(), 5462 e->u.aen.code, e->u.aen.data); 5463 break; 5464 case QLA_EVT_IDC_ACK: 5465 qla81xx_idc_ack(vha, e->u.idc_ack.mb); 5466 break; 5467 case QLA_EVT_ASYNC_LOGIN: 5468 qla2x00_async_login(vha, e->u.logio.fcport, 5469 e->u.logio.data); 5470 break; 5471 case QLA_EVT_ASYNC_LOGOUT: 5472 rc = qla2x00_async_logout(vha, e->u.logio.fcport); 5473 break; 5474 case QLA_EVT_ASYNC_ADISC: 5475 qla2x00_async_adisc(vha, e->u.logio.fcport, 5476 e->u.logio.data); 5477 break; 5478 case QLA_EVT_UEVENT: 5479 qla2x00_uevent_emit(vha, e->u.uevent.code); 5480 break; 5481 case QLA_EVT_AENFX: 5482 qlafx00_process_aen(vha, e); 5483 break; 5484 case QLA_EVT_UNMAP: 5485 qla24xx_sp_unmap(vha, e->u.iosb.sp); 5486 break; 5487 case QLA_EVT_RELOGIN: 5488 qla2x00_relogin(vha); 5489 break; 5490 case QLA_EVT_NEW_SESS: 5491 qla24xx_create_new_sess(vha, e); 5492 break; 5493 case QLA_EVT_GPDB: 5494 qla24xx_async_gpdb(vha, e->u.fcport.fcport, 5495 e->u.fcport.opt); 5496 break; 5497 case QLA_EVT_PRLI: 5498 qla24xx_async_prli(vha, e->u.fcport.fcport); 5499 break; 5500 case QLA_EVT_GPSC: 5501 qla24xx_async_gpsc(vha, e->u.fcport.fcport); 5502 break; 5503 case QLA_EVT_GNL: 5504 qla24xx_async_gnl(vha, e->u.fcport.fcport); 5505 break; 5506 case QLA_EVT_NACK: 5507 qla24xx_do_nack_work(vha, e); 5508 break; 5509 case QLA_EVT_ASYNC_PRLO: 5510 rc = qla2x00_async_prlo(vha, e->u.logio.fcport); 5511 break; 5512 case QLA_EVT_ASYNC_PRLO_DONE: 5513 qla2x00_async_prlo_done(vha, e->u.logio.fcport, 5514 e->u.logio.data); 5515 break; 5516 case QLA_EVT_GPNFT: 5517 qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type, 5518 e->u.gpnft.sp); 5519 break; 5520 case QLA_EVT_GPNFT_DONE: 5521 qla24xx_async_gpnft_done(vha, e->u.iosb.sp); 5522 break; 5523 case QLA_EVT_GNNFT_DONE: 5524 qla24xx_async_gnnft_done(vha, e->u.iosb.sp); 5525 break; 5526 case QLA_EVT_GFPNID: 5527 qla24xx_async_gfpnid(vha, e->u.fcport.fcport); 5528 break; 5529 case QLA_EVT_SP_RETRY: 5530 qla_sp_retry(vha, e); 5531 break; 5532 case QLA_EVT_IIDMA: 5533 qla_do_iidma_work(vha, e->u.fcport.fcport); 5534 break; 5535 case QLA_EVT_ELS_PLOGI: 5536 qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, 5537 e->u.fcport.fcport, false); 5538 break; 5539 case QLA_EVT_SA_REPLACE: 5540 rc = qla24xx_issue_sa_replace_iocb(vha, e); 5541 break; 5542 } 5543 5544 if (rc == EAGAIN) { 5545 /* put 'work' at head of 'vha->work_list' */ 5546 spin_lock_irqsave(&vha->work_lock, flags); 5547 list_splice(&work, &vha->work_list); 5548 spin_unlock_irqrestore(&vha->work_lock, flags); 5549 break; 5550 } 5551 list_del_init(&e->list); 5552 if (e->flags & QLA_EVT_FLAG_FREE) 5553 kfree(e); 5554 5555 /* For each work completed decrement vha ref count */ 5556 QLA_VHA_MARK_NOT_BUSY(vha); 5557 } 5558 } 5559 5560 int qla24xx_post_relogin_work(struct scsi_qla_host *vha) 5561 { 5562 struct qla_work_evt *e; 5563 5564 e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN); 5565 5566 if (!e) { 5567 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5568 return QLA_FUNCTION_FAILED; 5569 } 5570 5571 return qla2x00_post_work(vha, e); 5572 } 5573 5574 /* Relogins all the fcports of a vport 5575 * Context: dpc thread 5576 */ 5577 void qla2x00_relogin(struct scsi_qla_host *vha) 5578 { 5579 fc_port_t *fcport; 5580 int status, relogin_needed = 0; 5581 struct event_arg ea; 5582 5583 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5584 /* 5585 * If the port is not ONLINE then try to login 5586 * to it if we haven't run out of retries. 5587 */ 5588 if (atomic_read(&fcport->state) != FCS_ONLINE && 5589 fcport->login_retry) { 5590 if (fcport->scan_state != QLA_FCPORT_FOUND || 5591 fcport->disc_state == DSC_LOGIN_AUTH_PEND || 5592 fcport->disc_state == DSC_LOGIN_COMPLETE) 5593 continue; 5594 5595 if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) || 5596 fcport->disc_state == DSC_DELETE_PEND) { 5597 relogin_needed = 1; 5598 } else { 5599 if (vha->hw->current_topology != ISP_CFG_NL) { 5600 memset(&ea, 0, sizeof(ea)); 5601 ea.fcport = fcport; 5602 qla24xx_handle_relogin_event(vha, &ea); 5603 } else if (vha->hw->current_topology == 5604 ISP_CFG_NL && 5605 IS_QLA2XXX_MIDTYPE(vha->hw)) { 5606 (void)qla24xx_fcport_handle_login(vha, 5607 fcport); 5608 } else if (vha->hw->current_topology == 5609 ISP_CFG_NL) { 5610 fcport->login_retry--; 5611 status = 5612 qla2x00_local_device_login(vha, 5613 fcport); 5614 if (status == QLA_SUCCESS) { 5615 fcport->old_loop_id = 5616 fcport->loop_id; 5617 ql_dbg(ql_dbg_disc, vha, 0x2003, 5618 "Port login OK: logged in ID 0x%x.\n", 5619 fcport->loop_id); 5620 qla2x00_update_fcport 5621 (vha, fcport); 5622 } else if (status == 1) { 5623 set_bit(RELOGIN_NEEDED, 5624 &vha->dpc_flags); 5625 /* retry the login again */ 5626 ql_dbg(ql_dbg_disc, vha, 0x2007, 5627 "Retrying %d login again loop_id 0x%x.\n", 5628 fcport->login_retry, 5629 fcport->loop_id); 5630 } else { 5631 fcport->login_retry = 0; 5632 } 5633 5634 if (fcport->login_retry == 0 && 5635 status != QLA_SUCCESS) 5636 qla2x00_clear_loop_id(fcport); 5637 } 5638 } 5639 } 5640 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5641 break; 5642 } 5643 5644 if (relogin_needed) 5645 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5646 5647 ql_dbg(ql_dbg_disc, vha, 0x400e, 5648 "Relogin end.\n"); 5649 } 5650 5651 /* Schedule work on any of the dpc-workqueues */ 5652 void 5653 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) 5654 { 5655 struct qla_hw_data *ha = base_vha->hw; 5656 5657 switch (work_code) { 5658 case MBA_IDC_AEN: /* 0x8200 */ 5659 if (ha->dpc_lp_wq) 5660 queue_work(ha->dpc_lp_wq, &ha->idc_aen); 5661 break; 5662 5663 case QLA83XX_NIC_CORE_RESET: /* 0x1 */ 5664 if (!ha->flags.nic_core_reset_hdlr_active) { 5665 if (ha->dpc_hp_wq) 5666 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); 5667 } else 5668 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, 5669 "NIC Core reset is already active. Skip " 5670 "scheduling it again.\n"); 5671 break; 5672 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ 5673 if (ha->dpc_hp_wq) 5674 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); 5675 break; 5676 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ 5677 if (ha->dpc_hp_wq) 5678 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); 5679 break; 5680 default: 5681 ql_log(ql_log_warn, base_vha, 0xb05f, 5682 "Unknown work-code=0x%x.\n", work_code); 5683 } 5684 5685 return; 5686 } 5687 5688 /* Work: Perform NIC Core Unrecoverable state handling */ 5689 void 5690 qla83xx_nic_core_unrecoverable_work(struct work_struct *work) 5691 { 5692 struct qla_hw_data *ha = 5693 container_of(work, struct qla_hw_data, nic_core_unrecoverable); 5694 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5695 uint32_t dev_state = 0; 5696 5697 qla83xx_idc_lock(base_vha, 0); 5698 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5699 qla83xx_reset_ownership(base_vha); 5700 if (ha->flags.nic_core_reset_owner) { 5701 ha->flags.nic_core_reset_owner = 0; 5702 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5703 QLA8XXX_DEV_FAILED); 5704 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); 5705 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 5706 } 5707 qla83xx_idc_unlock(base_vha, 0); 5708 } 5709 5710 /* Work: Execute IDC state handler */ 5711 void 5712 qla83xx_idc_state_handler_work(struct work_struct *work) 5713 { 5714 struct qla_hw_data *ha = 5715 container_of(work, struct qla_hw_data, idc_state_handler); 5716 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5717 uint32_t dev_state = 0; 5718 5719 qla83xx_idc_lock(base_vha, 0); 5720 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5721 if (dev_state == QLA8XXX_DEV_FAILED || 5722 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) 5723 qla83xx_idc_state_handler(base_vha); 5724 qla83xx_idc_unlock(base_vha, 0); 5725 } 5726 5727 static int 5728 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) 5729 { 5730 int rval = QLA_SUCCESS; 5731 unsigned long heart_beat_wait = jiffies + (1 * HZ); 5732 uint32_t heart_beat_counter1, heart_beat_counter2; 5733 5734 do { 5735 if (time_after(jiffies, heart_beat_wait)) { 5736 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, 5737 "Nic Core f/w is not alive.\n"); 5738 rval = QLA_FUNCTION_FAILED; 5739 break; 5740 } 5741 5742 qla83xx_idc_lock(base_vha, 0); 5743 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 5744 &heart_beat_counter1); 5745 qla83xx_idc_unlock(base_vha, 0); 5746 msleep(100); 5747 qla83xx_idc_lock(base_vha, 0); 5748 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 5749 &heart_beat_counter2); 5750 qla83xx_idc_unlock(base_vha, 0); 5751 } while (heart_beat_counter1 == heart_beat_counter2); 5752 5753 return rval; 5754 } 5755 5756 /* Work: Perform NIC Core Reset handling */ 5757 void 5758 qla83xx_nic_core_reset_work(struct work_struct *work) 5759 { 5760 struct qla_hw_data *ha = 5761 container_of(work, struct qla_hw_data, nic_core_reset); 5762 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5763 uint32_t dev_state = 0; 5764 5765 if (IS_QLA2031(ha)) { 5766 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) 5767 ql_log(ql_log_warn, base_vha, 0xb081, 5768 "Failed to dump mctp\n"); 5769 return; 5770 } 5771 5772 if (!ha->flags.nic_core_reset_hdlr_active) { 5773 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { 5774 qla83xx_idc_lock(base_vha, 0); 5775 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5776 &dev_state); 5777 qla83xx_idc_unlock(base_vha, 0); 5778 if (dev_state != QLA8XXX_DEV_NEED_RESET) { 5779 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, 5780 "Nic Core f/w is alive.\n"); 5781 return; 5782 } 5783 } 5784 5785 ha->flags.nic_core_reset_hdlr_active = 1; 5786 if (qla83xx_nic_core_reset(base_vha)) { 5787 /* NIC Core reset failed. */ 5788 ql_dbg(ql_dbg_p3p, base_vha, 0xb061, 5789 "NIC Core reset failed.\n"); 5790 } 5791 ha->flags.nic_core_reset_hdlr_active = 0; 5792 } 5793 } 5794 5795 /* Work: Handle 8200 IDC aens */ 5796 void 5797 qla83xx_service_idc_aen(struct work_struct *work) 5798 { 5799 struct qla_hw_data *ha = 5800 container_of(work, struct qla_hw_data, idc_aen); 5801 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5802 uint32_t dev_state, idc_control; 5803 5804 qla83xx_idc_lock(base_vha, 0); 5805 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5806 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); 5807 qla83xx_idc_unlock(base_vha, 0); 5808 if (dev_state == QLA8XXX_DEV_NEED_RESET) { 5809 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { 5810 ql_dbg(ql_dbg_p3p, base_vha, 0xb062, 5811 "Application requested NIC Core Reset.\n"); 5812 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 5813 } else if (qla83xx_check_nic_core_fw_alive(base_vha) == 5814 QLA_SUCCESS) { 5815 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, 5816 "Other protocol driver requested NIC Core Reset.\n"); 5817 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 5818 } 5819 } else if (dev_state == QLA8XXX_DEV_FAILED || 5820 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { 5821 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 5822 } 5823 } 5824 5825 /* 5826 * Control the frequency of IDC lock retries 5827 */ 5828 #define QLA83XX_WAIT_LOGIC_MS 100 5829 5830 static int 5831 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) 5832 { 5833 int rval; 5834 uint32_t data; 5835 uint32_t idc_lck_rcvry_stage_mask = 0x3; 5836 uint32_t idc_lck_rcvry_owner_mask = 0x3c; 5837 struct qla_hw_data *ha = base_vha->hw; 5838 5839 ql_dbg(ql_dbg_p3p, base_vha, 0xb086, 5840 "Trying force recovery of the IDC lock.\n"); 5841 5842 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); 5843 if (rval) 5844 return rval; 5845 5846 if ((data & idc_lck_rcvry_stage_mask) > 0) { 5847 return QLA_SUCCESS; 5848 } else { 5849 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); 5850 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 5851 data); 5852 if (rval) 5853 return rval; 5854 5855 msleep(200); 5856 5857 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 5858 &data); 5859 if (rval) 5860 return rval; 5861 5862 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { 5863 data &= (IDC_LOCK_RECOVERY_STAGE2 | 5864 ~(idc_lck_rcvry_stage_mask)); 5865 rval = qla83xx_wr_reg(base_vha, 5866 QLA83XX_IDC_LOCK_RECOVERY, data); 5867 if (rval) 5868 return rval; 5869 5870 /* Forcefully perform IDC UnLock */ 5871 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, 5872 &data); 5873 if (rval) 5874 return rval; 5875 /* Clear lock-id by setting 0xff */ 5876 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5877 0xff); 5878 if (rval) 5879 return rval; 5880 /* Clear lock-recovery by setting 0x0 */ 5881 rval = qla83xx_wr_reg(base_vha, 5882 QLA83XX_IDC_LOCK_RECOVERY, 0x0); 5883 if (rval) 5884 return rval; 5885 } else 5886 return QLA_SUCCESS; 5887 } 5888 5889 return rval; 5890 } 5891 5892 static int 5893 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) 5894 { 5895 int rval = QLA_SUCCESS; 5896 uint32_t o_drv_lockid, n_drv_lockid; 5897 unsigned long lock_recovery_timeout; 5898 5899 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; 5900 retry_lockid: 5901 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); 5902 if (rval) 5903 goto exit; 5904 5905 /* MAX wait time before forcing IDC Lock recovery = 2 secs */ 5906 if (time_after_eq(jiffies, lock_recovery_timeout)) { 5907 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) 5908 return QLA_SUCCESS; 5909 else 5910 return QLA_FUNCTION_FAILED; 5911 } 5912 5913 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); 5914 if (rval) 5915 goto exit; 5916 5917 if (o_drv_lockid == n_drv_lockid) { 5918 msleep(QLA83XX_WAIT_LOGIC_MS); 5919 goto retry_lockid; 5920 } else 5921 return QLA_SUCCESS; 5922 5923 exit: 5924 return rval; 5925 } 5926 5927 /* 5928 * Context: task, can sleep 5929 */ 5930 void 5931 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) 5932 { 5933 uint32_t data; 5934 uint32_t lock_owner; 5935 struct qla_hw_data *ha = base_vha->hw; 5936 5937 might_sleep(); 5938 5939 /* IDC-lock implementation using driver-lock/lock-id remote registers */ 5940 retry_lock: 5941 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) 5942 == QLA_SUCCESS) { 5943 if (data) { 5944 /* Setting lock-id to our function-number */ 5945 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5946 ha->portnum); 5947 } else { 5948 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5949 &lock_owner); 5950 ql_dbg(ql_dbg_p3p, base_vha, 0xb063, 5951 "Failed to acquire IDC lock, acquired by %d, " 5952 "retrying...\n", lock_owner); 5953 5954 /* Retry/Perform IDC-Lock recovery */ 5955 if (qla83xx_idc_lock_recovery(base_vha) 5956 == QLA_SUCCESS) { 5957 msleep(QLA83XX_WAIT_LOGIC_MS); 5958 goto retry_lock; 5959 } else 5960 ql_log(ql_log_warn, base_vha, 0xb075, 5961 "IDC Lock recovery FAILED.\n"); 5962 } 5963 5964 } 5965 5966 return; 5967 } 5968 5969 static bool 5970 qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha, 5971 struct purex_entry_24xx *purex) 5972 { 5973 char fwstr[16]; 5974 u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0]; 5975 struct port_database_24xx *pdb; 5976 5977 /* Domain Controller is always logged-out. */ 5978 /* if RDP request is not from Domain Controller: */ 5979 if (sid != 0xfffc01) 5980 return false; 5981 5982 ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid); 5983 5984 pdb = kzalloc(sizeof(*pdb), GFP_KERNEL); 5985 if (!pdb) { 5986 ql_dbg(ql_dbg_init, vha, 0x0181, 5987 "%s: Failed allocate pdb\n", __func__); 5988 } else if (qla24xx_get_port_database(vha, 5989 le16_to_cpu(purex->nport_handle), pdb)) { 5990 ql_dbg(ql_dbg_init, vha, 0x0181, 5991 "%s: Failed get pdb sid=%x\n", __func__, sid); 5992 } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE && 5993 pdb->current_login_state != PDS_PRLI_COMPLETE) { 5994 ql_dbg(ql_dbg_init, vha, 0x0181, 5995 "%s: Port not logged in sid=%#x\n", __func__, sid); 5996 } else { 5997 /* RDP request is from logged in port */ 5998 kfree(pdb); 5999 return false; 6000 } 6001 kfree(pdb); 6002 6003 vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr)); 6004 fwstr[strcspn(fwstr, " ")] = 0; 6005 /* if FW version allows RDP response length upto 2048 bytes: */ 6006 if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0) 6007 return false; 6008 6009 ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr); 6010 6011 /* RDP response length is to be reduced to maximum 256 bytes */ 6012 return true; 6013 } 6014 6015 /* 6016 * Function Name: qla24xx_process_purex_iocb 6017 * 6018 * Description: 6019 * Prepare a RDP response and send to Fabric switch 6020 * 6021 * PARAMETERS: 6022 * vha: SCSI qla host 6023 * purex: RDP request received by HBA 6024 */ 6025 void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, 6026 struct purex_item *item) 6027 { 6028 struct qla_hw_data *ha = vha->hw; 6029 struct purex_entry_24xx *purex = 6030 (struct purex_entry_24xx *)&item->iocb; 6031 dma_addr_t rsp_els_dma; 6032 dma_addr_t rsp_payload_dma; 6033 dma_addr_t stat_dma; 6034 dma_addr_t sfp_dma; 6035 struct els_entry_24xx *rsp_els = NULL; 6036 struct rdp_rsp_payload *rsp_payload = NULL; 6037 struct link_statistics *stat = NULL; 6038 uint8_t *sfp = NULL; 6039 uint16_t sfp_flags = 0; 6040 uint rsp_payload_length = sizeof(*rsp_payload); 6041 int rval; 6042 6043 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180, 6044 "%s: Enter\n", __func__); 6045 6046 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181, 6047 "-------- ELS REQ -------\n"); 6048 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182, 6049 purex, sizeof(*purex)); 6050 6051 if (qla25xx_rdp_rsp_reduce_size(vha, purex)) { 6052 rsp_payload_length = 6053 offsetof(typeof(*rsp_payload), optical_elmt_desc); 6054 ql_dbg(ql_dbg_init, vha, 0x0181, 6055 "Reducing RSP payload length to %u bytes...\n", 6056 rsp_payload_length); 6057 } 6058 6059 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), 6060 &rsp_els_dma, GFP_KERNEL); 6061 if (!rsp_els) { 6062 ql_log(ql_log_warn, vha, 0x0183, 6063 "Failed allocate dma buffer ELS RSP.\n"); 6064 goto dealloc; 6065 } 6066 6067 rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload), 6068 &rsp_payload_dma, GFP_KERNEL); 6069 if (!rsp_payload) { 6070 ql_log(ql_log_warn, vha, 0x0184, 6071 "Failed allocate dma buffer ELS RSP payload.\n"); 6072 goto dealloc; 6073 } 6074 6075 sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN, 6076 &sfp_dma, GFP_KERNEL); 6077 6078 stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat), 6079 &stat_dma, GFP_KERNEL); 6080 6081 /* Prepare Response IOCB */ 6082 rsp_els->entry_type = ELS_IOCB_TYPE; 6083 rsp_els->entry_count = 1; 6084 rsp_els->sys_define = 0; 6085 rsp_els->entry_status = 0; 6086 rsp_els->handle = 0; 6087 rsp_els->nport_handle = purex->nport_handle; 6088 rsp_els->tx_dsd_count = cpu_to_le16(1); 6089 rsp_els->vp_index = purex->vp_idx; 6090 rsp_els->sof_type = EST_SOFI3; 6091 rsp_els->rx_xchg_address = purex->rx_xchg_addr; 6092 rsp_els->rx_dsd_count = 0; 6093 rsp_els->opcode = purex->els_frame_payload[0]; 6094 6095 rsp_els->d_id[0] = purex->s_id[0]; 6096 rsp_els->d_id[1] = purex->s_id[1]; 6097 rsp_els->d_id[2] = purex->s_id[2]; 6098 6099 rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC); 6100 rsp_els->rx_byte_count = 0; 6101 rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length); 6102 6103 put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address); 6104 rsp_els->tx_len = rsp_els->tx_byte_count; 6105 6106 rsp_els->rx_address = 0; 6107 rsp_els->rx_len = 0; 6108 6109 /* Prepare Response Payload */ 6110 rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */ 6111 rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) - 6112 sizeof(rsp_payload->hdr)); 6113 6114 /* Link service Request Info Descriptor */ 6115 rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1); 6116 rsp_payload->ls_req_info_desc.desc_len = 6117 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc)); 6118 rsp_payload->ls_req_info_desc.req_payload_word_0 = 6119 cpu_to_be32p((uint32_t *)purex->els_frame_payload); 6120 6121 /* Link service Request Info Descriptor 2 */ 6122 rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1); 6123 rsp_payload->ls_req_info_desc2.desc_len = 6124 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2)); 6125 rsp_payload->ls_req_info_desc2.req_payload_word_0 = 6126 cpu_to_be32p((uint32_t *)purex->els_frame_payload); 6127 6128 6129 rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000); 6130 rsp_payload->sfp_diag_desc.desc_len = 6131 cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc)); 6132 6133 if (sfp) { 6134 /* SFP Flags */ 6135 memset(sfp, 0, SFP_RTDI_LEN); 6136 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0); 6137 if (!rval) { 6138 /* SFP Flags bits 3-0: Port Tx Laser Type */ 6139 if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5)) 6140 sfp_flags |= BIT_0; /* short wave */ 6141 else if (sfp[0] & BIT_1) 6142 sfp_flags |= BIT_1; /* long wave 1310nm */ 6143 else if (sfp[1] & BIT_4) 6144 sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */ 6145 } 6146 6147 /* SFP Type */ 6148 memset(sfp, 0, SFP_RTDI_LEN); 6149 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0); 6150 if (!rval) { 6151 sfp_flags |= BIT_4; /* optical */ 6152 if (sfp[0] == 0x3) 6153 sfp_flags |= BIT_6; /* sfp+ */ 6154 } 6155 6156 rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags); 6157 6158 /* SFP Diagnostics */ 6159 memset(sfp, 0, SFP_RTDI_LEN); 6160 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0); 6161 if (!rval) { 6162 __be16 *trx = (__force __be16 *)sfp; /* already be16 */ 6163 rsp_payload->sfp_diag_desc.temperature = trx[0]; 6164 rsp_payload->sfp_diag_desc.vcc = trx[1]; 6165 rsp_payload->sfp_diag_desc.tx_bias = trx[2]; 6166 rsp_payload->sfp_diag_desc.tx_power = trx[3]; 6167 rsp_payload->sfp_diag_desc.rx_power = trx[4]; 6168 } 6169 } 6170 6171 /* Port Speed Descriptor */ 6172 rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001); 6173 rsp_payload->port_speed_desc.desc_len = 6174 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc)); 6175 rsp_payload->port_speed_desc.speed_capab = cpu_to_be16( 6176 qla25xx_fdmi_port_speed_capability(ha)); 6177 rsp_payload->port_speed_desc.operating_speed = cpu_to_be16( 6178 qla25xx_fdmi_port_speed_currently(ha)); 6179 6180 /* Link Error Status Descriptor */ 6181 rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002); 6182 rsp_payload->ls_err_desc.desc_len = 6183 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc)); 6184 6185 if (stat) { 6186 rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0); 6187 if (!rval) { 6188 rsp_payload->ls_err_desc.link_fail_cnt = 6189 cpu_to_be32(le32_to_cpu(stat->link_fail_cnt)); 6190 rsp_payload->ls_err_desc.loss_sync_cnt = 6191 cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt)); 6192 rsp_payload->ls_err_desc.loss_sig_cnt = 6193 cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt)); 6194 rsp_payload->ls_err_desc.prim_seq_err_cnt = 6195 cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt)); 6196 rsp_payload->ls_err_desc.inval_xmit_word_cnt = 6197 cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt)); 6198 rsp_payload->ls_err_desc.inval_crc_cnt = 6199 cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt)); 6200 rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6; 6201 } 6202 } 6203 6204 /* Portname Descriptor */ 6205 rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003); 6206 rsp_payload->port_name_diag_desc.desc_len = 6207 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc)); 6208 memcpy(rsp_payload->port_name_diag_desc.WWNN, 6209 vha->node_name, 6210 sizeof(rsp_payload->port_name_diag_desc.WWNN)); 6211 memcpy(rsp_payload->port_name_diag_desc.WWPN, 6212 vha->port_name, 6213 sizeof(rsp_payload->port_name_diag_desc.WWPN)); 6214 6215 /* F-Port Portname Descriptor */ 6216 rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003); 6217 rsp_payload->port_name_direct_desc.desc_len = 6218 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc)); 6219 memcpy(rsp_payload->port_name_direct_desc.WWNN, 6220 vha->fabric_node_name, 6221 sizeof(rsp_payload->port_name_direct_desc.WWNN)); 6222 memcpy(rsp_payload->port_name_direct_desc.WWPN, 6223 vha->fabric_port_name, 6224 sizeof(rsp_payload->port_name_direct_desc.WWPN)); 6225 6226 /* Bufer Credit Descriptor */ 6227 rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006); 6228 rsp_payload->buffer_credit_desc.desc_len = 6229 cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc)); 6230 rsp_payload->buffer_credit_desc.fcport_b2b = 0; 6231 rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0); 6232 rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0); 6233 6234 if (ha->flags.plogi_template_valid) { 6235 uint32_t tmp = 6236 be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred); 6237 rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp); 6238 } 6239 6240 if (rsp_payload_length < sizeof(*rsp_payload)) 6241 goto send; 6242 6243 /* Optical Element Descriptor, Temperature */ 6244 rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007); 6245 rsp_payload->optical_elmt_desc[0].desc_len = 6246 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6247 /* Optical Element Descriptor, Voltage */ 6248 rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007); 6249 rsp_payload->optical_elmt_desc[1].desc_len = 6250 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6251 /* Optical Element Descriptor, Tx Bias Current */ 6252 rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007); 6253 rsp_payload->optical_elmt_desc[2].desc_len = 6254 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6255 /* Optical Element Descriptor, Tx Power */ 6256 rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007); 6257 rsp_payload->optical_elmt_desc[3].desc_len = 6258 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6259 /* Optical Element Descriptor, Rx Power */ 6260 rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007); 6261 rsp_payload->optical_elmt_desc[4].desc_len = 6262 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6263 6264 if (sfp) { 6265 memset(sfp, 0, SFP_RTDI_LEN); 6266 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0); 6267 if (!rval) { 6268 __be16 *trx = (__force __be16 *)sfp; /* already be16 */ 6269 6270 /* Optical Element Descriptor, Temperature */ 6271 rsp_payload->optical_elmt_desc[0].high_alarm = trx[0]; 6272 rsp_payload->optical_elmt_desc[0].low_alarm = trx[1]; 6273 rsp_payload->optical_elmt_desc[0].high_warn = trx[2]; 6274 rsp_payload->optical_elmt_desc[0].low_warn = trx[3]; 6275 rsp_payload->optical_elmt_desc[0].element_flags = 6276 cpu_to_be32(1 << 28); 6277 6278 /* Optical Element Descriptor, Voltage */ 6279 rsp_payload->optical_elmt_desc[1].high_alarm = trx[4]; 6280 rsp_payload->optical_elmt_desc[1].low_alarm = trx[5]; 6281 rsp_payload->optical_elmt_desc[1].high_warn = trx[6]; 6282 rsp_payload->optical_elmt_desc[1].low_warn = trx[7]; 6283 rsp_payload->optical_elmt_desc[1].element_flags = 6284 cpu_to_be32(2 << 28); 6285 6286 /* Optical Element Descriptor, Tx Bias Current */ 6287 rsp_payload->optical_elmt_desc[2].high_alarm = trx[8]; 6288 rsp_payload->optical_elmt_desc[2].low_alarm = trx[9]; 6289 rsp_payload->optical_elmt_desc[2].high_warn = trx[10]; 6290 rsp_payload->optical_elmt_desc[2].low_warn = trx[11]; 6291 rsp_payload->optical_elmt_desc[2].element_flags = 6292 cpu_to_be32(3 << 28); 6293 6294 /* Optical Element Descriptor, Tx Power */ 6295 rsp_payload->optical_elmt_desc[3].high_alarm = trx[12]; 6296 rsp_payload->optical_elmt_desc[3].low_alarm = trx[13]; 6297 rsp_payload->optical_elmt_desc[3].high_warn = trx[14]; 6298 rsp_payload->optical_elmt_desc[3].low_warn = trx[15]; 6299 rsp_payload->optical_elmt_desc[3].element_flags = 6300 cpu_to_be32(4 << 28); 6301 6302 /* Optical Element Descriptor, Rx Power */ 6303 rsp_payload->optical_elmt_desc[4].high_alarm = trx[16]; 6304 rsp_payload->optical_elmt_desc[4].low_alarm = trx[17]; 6305 rsp_payload->optical_elmt_desc[4].high_warn = trx[18]; 6306 rsp_payload->optical_elmt_desc[4].low_warn = trx[19]; 6307 rsp_payload->optical_elmt_desc[4].element_flags = 6308 cpu_to_be32(5 << 28); 6309 } 6310 6311 memset(sfp, 0, SFP_RTDI_LEN); 6312 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0); 6313 if (!rval) { 6314 /* Temperature high/low alarm/warning */ 6315 rsp_payload->optical_elmt_desc[0].element_flags |= 6316 cpu_to_be32( 6317 (sfp[0] >> 7 & 1) << 3 | 6318 (sfp[0] >> 6 & 1) << 2 | 6319 (sfp[4] >> 7 & 1) << 1 | 6320 (sfp[4] >> 6 & 1) << 0); 6321 6322 /* Voltage high/low alarm/warning */ 6323 rsp_payload->optical_elmt_desc[1].element_flags |= 6324 cpu_to_be32( 6325 (sfp[0] >> 5 & 1) << 3 | 6326 (sfp[0] >> 4 & 1) << 2 | 6327 (sfp[4] >> 5 & 1) << 1 | 6328 (sfp[4] >> 4 & 1) << 0); 6329 6330 /* Tx Bias Current high/low alarm/warning */ 6331 rsp_payload->optical_elmt_desc[2].element_flags |= 6332 cpu_to_be32( 6333 (sfp[0] >> 3 & 1) << 3 | 6334 (sfp[0] >> 2 & 1) << 2 | 6335 (sfp[4] >> 3 & 1) << 1 | 6336 (sfp[4] >> 2 & 1) << 0); 6337 6338 /* Tx Power high/low alarm/warning */ 6339 rsp_payload->optical_elmt_desc[3].element_flags |= 6340 cpu_to_be32( 6341 (sfp[0] >> 1 & 1) << 3 | 6342 (sfp[0] >> 0 & 1) << 2 | 6343 (sfp[4] >> 1 & 1) << 1 | 6344 (sfp[4] >> 0 & 1) << 0); 6345 6346 /* Rx Power high/low alarm/warning */ 6347 rsp_payload->optical_elmt_desc[4].element_flags |= 6348 cpu_to_be32( 6349 (sfp[1] >> 7 & 1) << 3 | 6350 (sfp[1] >> 6 & 1) << 2 | 6351 (sfp[5] >> 7 & 1) << 1 | 6352 (sfp[5] >> 6 & 1) << 0); 6353 } 6354 } 6355 6356 /* Optical Product Data Descriptor */ 6357 rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008); 6358 rsp_payload->optical_prod_desc.desc_len = 6359 cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc)); 6360 6361 if (sfp) { 6362 memset(sfp, 0, SFP_RTDI_LEN); 6363 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0); 6364 if (!rval) { 6365 memcpy(rsp_payload->optical_prod_desc.vendor_name, 6366 sfp + 0, 6367 sizeof(rsp_payload->optical_prod_desc.vendor_name)); 6368 memcpy(rsp_payload->optical_prod_desc.part_number, 6369 sfp + 20, 6370 sizeof(rsp_payload->optical_prod_desc.part_number)); 6371 memcpy(rsp_payload->optical_prod_desc.revision, 6372 sfp + 36, 6373 sizeof(rsp_payload->optical_prod_desc.revision)); 6374 memcpy(rsp_payload->optical_prod_desc.serial_number, 6375 sfp + 48, 6376 sizeof(rsp_payload->optical_prod_desc.serial_number)); 6377 } 6378 6379 memset(sfp, 0, SFP_RTDI_LEN); 6380 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0); 6381 if (!rval) { 6382 memcpy(rsp_payload->optical_prod_desc.date, 6383 sfp + 0, 6384 sizeof(rsp_payload->optical_prod_desc.date)); 6385 } 6386 } 6387 6388 send: 6389 ql_dbg(ql_dbg_init, vha, 0x0183, 6390 "Sending ELS Response to RDP Request...\n"); 6391 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184, 6392 "-------- ELS RSP -------\n"); 6393 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185, 6394 rsp_els, sizeof(*rsp_els)); 6395 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186, 6396 "-------- ELS RSP PAYLOAD -------\n"); 6397 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187, 6398 rsp_payload, rsp_payload_length); 6399 6400 rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0); 6401 6402 if (rval) { 6403 ql_log(ql_log_warn, vha, 0x0188, 6404 "%s: iocb failed to execute -> %x\n", __func__, rval); 6405 } else if (rsp_els->comp_status) { 6406 ql_log(ql_log_warn, vha, 0x0189, 6407 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 6408 __func__, rsp_els->comp_status, 6409 rsp_els->error_subcode_1, rsp_els->error_subcode_2); 6410 } else { 6411 ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__); 6412 } 6413 6414 dealloc: 6415 if (stat) 6416 dma_free_coherent(&ha->pdev->dev, sizeof(*stat), 6417 stat, stat_dma); 6418 if (sfp) 6419 dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN, 6420 sfp, sfp_dma); 6421 if (rsp_payload) 6422 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload), 6423 rsp_payload, rsp_payload_dma); 6424 if (rsp_els) 6425 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), 6426 rsp_els, rsp_els_dma); 6427 } 6428 6429 void 6430 qla24xx_free_purex_item(struct purex_item *item) 6431 { 6432 if (item == &item->vha->default_item) 6433 memset(&item->vha->default_item, 0, sizeof(struct purex_item)); 6434 else 6435 kfree(item); 6436 } 6437 6438 void qla24xx_process_purex_list(struct purex_list *list) 6439 { 6440 struct list_head head = LIST_HEAD_INIT(head); 6441 struct purex_item *item, *next; 6442 ulong flags; 6443 6444 spin_lock_irqsave(&list->lock, flags); 6445 list_splice_init(&list->head, &head); 6446 spin_unlock_irqrestore(&list->lock, flags); 6447 6448 list_for_each_entry_safe(item, next, &head, list) { 6449 list_del(&item->list); 6450 item->process_item(item->vha, item); 6451 qla24xx_free_purex_item(item); 6452 } 6453 } 6454 6455 /* 6456 * Context: task, can sleep 6457 */ 6458 void 6459 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) 6460 { 6461 #if 0 6462 uint16_t options = (requester_id << 15) | BIT_7; 6463 #endif 6464 uint16_t retry; 6465 uint32_t data; 6466 struct qla_hw_data *ha = base_vha->hw; 6467 6468 might_sleep(); 6469 6470 /* IDC-unlock implementation using driver-unlock/lock-id 6471 * remote registers 6472 */ 6473 retry = 0; 6474 retry_unlock: 6475 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) 6476 == QLA_SUCCESS) { 6477 if (data == ha->portnum) { 6478 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); 6479 /* Clearing lock-id by setting 0xff */ 6480 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); 6481 } else if (retry < 10) { 6482 /* SV: XXX: IDC unlock retrying needed here? */ 6483 6484 /* Retry for IDC-unlock */ 6485 msleep(QLA83XX_WAIT_LOGIC_MS); 6486 retry++; 6487 ql_dbg(ql_dbg_p3p, base_vha, 0xb064, 6488 "Failed to release IDC lock, retrying=%d\n", retry); 6489 goto retry_unlock; 6490 } 6491 } else if (retry < 10) { 6492 /* Retry for IDC-unlock */ 6493 msleep(QLA83XX_WAIT_LOGIC_MS); 6494 retry++; 6495 ql_dbg(ql_dbg_p3p, base_vha, 0xb065, 6496 "Failed to read drv-lockid, retrying=%d\n", retry); 6497 goto retry_unlock; 6498 } 6499 6500 return; 6501 6502 #if 0 6503 /* XXX: IDC-unlock implementation using access-control mbx */ 6504 retry = 0; 6505 retry_unlock2: 6506 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 6507 if (retry < 10) { 6508 /* Retry for IDC-unlock */ 6509 msleep(QLA83XX_WAIT_LOGIC_MS); 6510 retry++; 6511 ql_dbg(ql_dbg_p3p, base_vha, 0xb066, 6512 "Failed to release IDC lock, retrying=%d\n", retry); 6513 goto retry_unlock2; 6514 } 6515 } 6516 6517 return; 6518 #endif 6519 } 6520 6521 int 6522 __qla83xx_set_drv_presence(scsi_qla_host_t *vha) 6523 { 6524 int rval = QLA_SUCCESS; 6525 struct qla_hw_data *ha = vha->hw; 6526 uint32_t drv_presence; 6527 6528 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6529 if (rval == QLA_SUCCESS) { 6530 drv_presence |= (1 << ha->portnum); 6531 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 6532 drv_presence); 6533 } 6534 6535 return rval; 6536 } 6537 6538 int 6539 qla83xx_set_drv_presence(scsi_qla_host_t *vha) 6540 { 6541 int rval = QLA_SUCCESS; 6542 6543 qla83xx_idc_lock(vha, 0); 6544 rval = __qla83xx_set_drv_presence(vha); 6545 qla83xx_idc_unlock(vha, 0); 6546 6547 return rval; 6548 } 6549 6550 int 6551 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 6552 { 6553 int rval = QLA_SUCCESS; 6554 struct qla_hw_data *ha = vha->hw; 6555 uint32_t drv_presence; 6556 6557 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6558 if (rval == QLA_SUCCESS) { 6559 drv_presence &= ~(1 << ha->portnum); 6560 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 6561 drv_presence); 6562 } 6563 6564 return rval; 6565 } 6566 6567 int 6568 qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 6569 { 6570 int rval = QLA_SUCCESS; 6571 6572 qla83xx_idc_lock(vha, 0); 6573 rval = __qla83xx_clear_drv_presence(vha); 6574 qla83xx_idc_unlock(vha, 0); 6575 6576 return rval; 6577 } 6578 6579 static void 6580 qla83xx_need_reset_handler(scsi_qla_host_t *vha) 6581 { 6582 struct qla_hw_data *ha = vha->hw; 6583 uint32_t drv_ack, drv_presence; 6584 unsigned long ack_timeout; 6585 6586 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ 6587 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); 6588 while (1) { 6589 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 6590 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6591 if ((drv_ack & drv_presence) == drv_presence) 6592 break; 6593 6594 if (time_after_eq(jiffies, ack_timeout)) { 6595 ql_log(ql_log_warn, vha, 0xb067, 6596 "RESET ACK TIMEOUT! drv_presence=0x%x " 6597 "drv_ack=0x%x\n", drv_presence, drv_ack); 6598 /* 6599 * The function(s) which did not ack in time are forced 6600 * to withdraw any further participation in the IDC 6601 * reset. 6602 */ 6603 if (drv_ack != drv_presence) 6604 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 6605 drv_ack); 6606 break; 6607 } 6608 6609 qla83xx_idc_unlock(vha, 0); 6610 msleep(1000); 6611 qla83xx_idc_lock(vha, 0); 6612 } 6613 6614 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); 6615 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); 6616 } 6617 6618 static int 6619 qla83xx_device_bootstrap(scsi_qla_host_t *vha) 6620 { 6621 int rval = QLA_SUCCESS; 6622 uint32_t idc_control; 6623 6624 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); 6625 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); 6626 6627 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ 6628 __qla83xx_get_idc_control(vha, &idc_control); 6629 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; 6630 __qla83xx_set_idc_control(vha, 0); 6631 6632 qla83xx_idc_unlock(vha, 0); 6633 rval = qla83xx_restart_nic_firmware(vha); 6634 qla83xx_idc_lock(vha, 0); 6635 6636 if (rval != QLA_SUCCESS) { 6637 ql_log(ql_log_fatal, vha, 0xb06a, 6638 "Failed to restart NIC f/w.\n"); 6639 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); 6640 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); 6641 } else { 6642 ql_dbg(ql_dbg_p3p, vha, 0xb06c, 6643 "Success in restarting nic f/w.\n"); 6644 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); 6645 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); 6646 } 6647 6648 return rval; 6649 } 6650 6651 /* Assumes idc_lock always held on entry */ 6652 int 6653 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) 6654 { 6655 struct qla_hw_data *ha = base_vha->hw; 6656 int rval = QLA_SUCCESS; 6657 unsigned long dev_init_timeout; 6658 uint32_t dev_state; 6659 6660 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ 6661 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); 6662 6663 while (1) { 6664 6665 if (time_after_eq(jiffies, dev_init_timeout)) { 6666 ql_log(ql_log_warn, base_vha, 0xb06e, 6667 "Initialization TIMEOUT!\n"); 6668 /* Init timeout. Disable further NIC Core 6669 * communication. 6670 */ 6671 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 6672 QLA8XXX_DEV_FAILED); 6673 ql_log(ql_log_info, base_vha, 0xb06f, 6674 "HW State: FAILED.\n"); 6675 } 6676 6677 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6678 switch (dev_state) { 6679 case QLA8XXX_DEV_READY: 6680 if (ha->flags.nic_core_reset_owner) 6681 qla83xx_idc_audit(base_vha, 6682 IDC_AUDIT_COMPLETION); 6683 ha->flags.nic_core_reset_owner = 0; 6684 ql_dbg(ql_dbg_p3p, base_vha, 0xb070, 6685 "Reset_owner reset by 0x%x.\n", 6686 ha->portnum); 6687 goto exit; 6688 case QLA8XXX_DEV_COLD: 6689 if (ha->flags.nic_core_reset_owner) 6690 rval = qla83xx_device_bootstrap(base_vha); 6691 else { 6692 /* Wait for AEN to change device-state */ 6693 qla83xx_idc_unlock(base_vha, 0); 6694 msleep(1000); 6695 qla83xx_idc_lock(base_vha, 0); 6696 } 6697 break; 6698 case QLA8XXX_DEV_INITIALIZING: 6699 /* Wait for AEN to change device-state */ 6700 qla83xx_idc_unlock(base_vha, 0); 6701 msleep(1000); 6702 qla83xx_idc_lock(base_vha, 0); 6703 break; 6704 case QLA8XXX_DEV_NEED_RESET: 6705 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) 6706 qla83xx_need_reset_handler(base_vha); 6707 else { 6708 /* Wait for AEN to change device-state */ 6709 qla83xx_idc_unlock(base_vha, 0); 6710 msleep(1000); 6711 qla83xx_idc_lock(base_vha, 0); 6712 } 6713 /* reset timeout value after need reset handler */ 6714 dev_init_timeout = jiffies + 6715 (ha->fcoe_dev_init_timeout * HZ); 6716 break; 6717 case QLA8XXX_DEV_NEED_QUIESCENT: 6718 /* XXX: DEBUG for now */ 6719 qla83xx_idc_unlock(base_vha, 0); 6720 msleep(1000); 6721 qla83xx_idc_lock(base_vha, 0); 6722 break; 6723 case QLA8XXX_DEV_QUIESCENT: 6724 /* XXX: DEBUG for now */ 6725 if (ha->flags.quiesce_owner) 6726 goto exit; 6727 6728 qla83xx_idc_unlock(base_vha, 0); 6729 msleep(1000); 6730 qla83xx_idc_lock(base_vha, 0); 6731 dev_init_timeout = jiffies + 6732 (ha->fcoe_dev_init_timeout * HZ); 6733 break; 6734 case QLA8XXX_DEV_FAILED: 6735 if (ha->flags.nic_core_reset_owner) 6736 qla83xx_idc_audit(base_vha, 6737 IDC_AUDIT_COMPLETION); 6738 ha->flags.nic_core_reset_owner = 0; 6739 __qla83xx_clear_drv_presence(base_vha); 6740 qla83xx_idc_unlock(base_vha, 0); 6741 qla8xxx_dev_failed_handler(base_vha); 6742 rval = QLA_FUNCTION_FAILED; 6743 qla83xx_idc_lock(base_vha, 0); 6744 goto exit; 6745 case QLA8XXX_BAD_VALUE: 6746 qla83xx_idc_unlock(base_vha, 0); 6747 msleep(1000); 6748 qla83xx_idc_lock(base_vha, 0); 6749 break; 6750 default: 6751 ql_log(ql_log_warn, base_vha, 0xb071, 6752 "Unknown Device State: %x.\n", dev_state); 6753 qla83xx_idc_unlock(base_vha, 0); 6754 qla8xxx_dev_failed_handler(base_vha); 6755 rval = QLA_FUNCTION_FAILED; 6756 qla83xx_idc_lock(base_vha, 0); 6757 goto exit; 6758 } 6759 } 6760 6761 exit: 6762 return rval; 6763 } 6764 6765 void 6766 qla2x00_disable_board_on_pci_error(struct work_struct *work) 6767 { 6768 struct qla_hw_data *ha = container_of(work, struct qla_hw_data, 6769 board_disable); 6770 struct pci_dev *pdev = ha->pdev; 6771 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6772 6773 ql_log(ql_log_warn, base_vha, 0x015b, 6774 "Disabling adapter.\n"); 6775 6776 if (!atomic_read(&pdev->enable_cnt)) { 6777 ql_log(ql_log_info, base_vha, 0xfffc, 6778 "PCI device disabled, no action req for PCI error=%lx\n", 6779 base_vha->pci_flags); 6780 return; 6781 } 6782 6783 /* 6784 * if UNLOADING flag is already set, then continue unload, 6785 * where it was set first. 6786 */ 6787 if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) 6788 return; 6789 6790 qla2x00_wait_for_sess_deletion(base_vha); 6791 6792 qla2x00_delete_all_vps(ha, base_vha); 6793 6794 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 6795 6796 qla2x00_dfs_remove(base_vha); 6797 6798 qla84xx_put_chip(base_vha); 6799 6800 if (base_vha->timer_active) 6801 qla2x00_stop_timer(base_vha); 6802 6803 base_vha->flags.online = 0; 6804 6805 qla2x00_destroy_deferred_work(ha); 6806 6807 /* 6808 * Do not try to stop beacon blink as it will issue a mailbox 6809 * command. 6810 */ 6811 qla2x00_free_sysfs_attr(base_vha, false); 6812 6813 fc_remove_host(base_vha->host); 6814 6815 scsi_remove_host(base_vha->host); 6816 6817 base_vha->flags.init_done = 0; 6818 qla25xx_delete_queues(base_vha); 6819 qla2x00_free_fcports(base_vha); 6820 qla2x00_free_irqs(base_vha); 6821 qla2x00_mem_free(ha); 6822 qla82xx_md_free(base_vha); 6823 qla2x00_free_queues(ha); 6824 6825 qla2x00_unmap_iobases(ha); 6826 6827 pci_release_selected_regions(ha->pdev, ha->bars); 6828 pci_disable_pcie_error_reporting(pdev); 6829 pci_disable_device(pdev); 6830 6831 /* 6832 * Let qla2x00_remove_one cleanup qla_hw_data on device removal. 6833 */ 6834 } 6835 6836 /************************************************************************** 6837 * qla2x00_do_dpc 6838 * This kernel thread is a task that is schedule by the interrupt handler 6839 * to perform the background processing for interrupts. 6840 * 6841 * Notes: 6842 * This task always run in the context of a kernel thread. It 6843 * is kick-off by the driver's detect code and starts up 6844 * up one per adapter. It immediately goes to sleep and waits for 6845 * some fibre event. When either the interrupt handler or 6846 * the timer routine detects a event it will one of the task 6847 * bits then wake us up. 6848 **************************************************************************/ 6849 static int 6850 qla2x00_do_dpc(void *data) 6851 { 6852 scsi_qla_host_t *base_vha; 6853 struct qla_hw_data *ha; 6854 uint32_t online; 6855 struct qla_qpair *qpair; 6856 6857 ha = (struct qla_hw_data *)data; 6858 base_vha = pci_get_drvdata(ha->pdev); 6859 6860 set_user_nice(current, MIN_NICE); 6861 6862 set_current_state(TASK_INTERRUPTIBLE); 6863 while (!kthread_should_stop()) { 6864 ql_dbg(ql_dbg_dpc, base_vha, 0x4000, 6865 "DPC handler sleeping.\n"); 6866 6867 schedule(); 6868 6869 if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags)) 6870 qla_pci_set_eeh_busy(base_vha); 6871 6872 if (!base_vha->flags.init_done || ha->flags.mbox_busy) 6873 goto end_loop; 6874 6875 if (ha->flags.eeh_busy) { 6876 ql_dbg(ql_dbg_dpc, base_vha, 0x4003, 6877 "eeh_busy=%d.\n", ha->flags.eeh_busy); 6878 goto end_loop; 6879 } 6880 6881 ha->dpc_active = 1; 6882 6883 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, 6884 "DPC handler waking up, dpc_flags=0x%lx.\n", 6885 base_vha->dpc_flags); 6886 6887 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 6888 break; 6889 6890 if (IS_P3P_TYPE(ha)) { 6891 if (IS_QLA8044(ha)) { 6892 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6893 &base_vha->dpc_flags)) { 6894 qla8044_idc_lock(ha); 6895 qla8044_wr_direct(base_vha, 6896 QLA8044_CRB_DEV_STATE_INDEX, 6897 QLA8XXX_DEV_FAILED); 6898 qla8044_idc_unlock(ha); 6899 ql_log(ql_log_info, base_vha, 0x4004, 6900 "HW State: FAILED.\n"); 6901 qla8044_device_state_handler(base_vha); 6902 continue; 6903 } 6904 6905 } else { 6906 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6907 &base_vha->dpc_flags)) { 6908 qla82xx_idc_lock(ha); 6909 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6910 QLA8XXX_DEV_FAILED); 6911 qla82xx_idc_unlock(ha); 6912 ql_log(ql_log_info, base_vha, 0x0151, 6913 "HW State: FAILED.\n"); 6914 qla82xx_device_state_handler(base_vha); 6915 continue; 6916 } 6917 } 6918 6919 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 6920 &base_vha->dpc_flags)) { 6921 6922 ql_dbg(ql_dbg_dpc, base_vha, 0x4005, 6923 "FCoE context reset scheduled.\n"); 6924 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 6925 &base_vha->dpc_flags))) { 6926 if (qla82xx_fcoe_ctx_reset(base_vha)) { 6927 /* FCoE-ctx reset failed. 6928 * Escalate to chip-reset 6929 */ 6930 set_bit(ISP_ABORT_NEEDED, 6931 &base_vha->dpc_flags); 6932 } 6933 clear_bit(ABORT_ISP_ACTIVE, 6934 &base_vha->dpc_flags); 6935 } 6936 6937 ql_dbg(ql_dbg_dpc, base_vha, 0x4006, 6938 "FCoE context reset end.\n"); 6939 } 6940 } else if (IS_QLAFX00(ha)) { 6941 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6942 &base_vha->dpc_flags)) { 6943 ql_dbg(ql_dbg_dpc, base_vha, 0x4020, 6944 "Firmware Reset Recovery\n"); 6945 if (qlafx00_reset_initialize(base_vha)) { 6946 /* Failed. Abort isp later. */ 6947 if (!test_bit(UNLOADING, 6948 &base_vha->dpc_flags)) { 6949 set_bit(ISP_UNRECOVERABLE, 6950 &base_vha->dpc_flags); 6951 ql_dbg(ql_dbg_dpc, base_vha, 6952 0x4021, 6953 "Reset Recovery Failed\n"); 6954 } 6955 } 6956 } 6957 6958 if (test_and_clear_bit(FX00_TARGET_SCAN, 6959 &base_vha->dpc_flags)) { 6960 ql_dbg(ql_dbg_dpc, base_vha, 0x4022, 6961 "ISPFx00 Target Scan scheduled\n"); 6962 if (qlafx00_rescan_isp(base_vha)) { 6963 if (!test_bit(UNLOADING, 6964 &base_vha->dpc_flags)) 6965 set_bit(ISP_UNRECOVERABLE, 6966 &base_vha->dpc_flags); 6967 ql_dbg(ql_dbg_dpc, base_vha, 0x401e, 6968 "ISPFx00 Target Scan Failed\n"); 6969 } 6970 ql_dbg(ql_dbg_dpc, base_vha, 0x401f, 6971 "ISPFx00 Target Scan End\n"); 6972 } 6973 if (test_and_clear_bit(FX00_HOST_INFO_RESEND, 6974 &base_vha->dpc_flags)) { 6975 ql_dbg(ql_dbg_dpc, base_vha, 0x4023, 6976 "ISPFx00 Host Info resend scheduled\n"); 6977 qlafx00_fx_disc(base_vha, 6978 &base_vha->hw->mr.fcport, 6979 FXDISC_REG_HOST_INFO); 6980 } 6981 } 6982 6983 if (test_and_clear_bit(DETECT_SFP_CHANGE, 6984 &base_vha->dpc_flags)) { 6985 /* Semantic: 6986 * - NO-OP -- await next ISP-ABORT. Preferred method 6987 * to minimize disruptions that will occur 6988 * when a forced chip-reset occurs. 6989 * - Force -- ISP-ABORT scheduled. 6990 */ 6991 /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */ 6992 } 6993 6994 if (test_and_clear_bit 6995 (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 6996 !test_bit(UNLOADING, &base_vha->dpc_flags)) { 6997 bool do_reset = true; 6998 6999 switch (base_vha->qlini_mode) { 7000 case QLA2XXX_INI_MODE_ENABLED: 7001 break; 7002 case QLA2XXX_INI_MODE_DISABLED: 7003 if (!qla_tgt_mode_enabled(base_vha) && 7004 !ha->flags.fw_started) 7005 do_reset = false; 7006 break; 7007 case QLA2XXX_INI_MODE_DUAL: 7008 if (!qla_dual_mode_enabled(base_vha) && 7009 !ha->flags.fw_started) 7010 do_reset = false; 7011 break; 7012 default: 7013 break; 7014 } 7015 7016 if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, 7017 &base_vha->dpc_flags))) { 7018 base_vha->flags.online = 1; 7019 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 7020 "ISP abort scheduled.\n"); 7021 if (ha->isp_ops->abort_isp(base_vha)) { 7022 /* failed. retry later */ 7023 set_bit(ISP_ABORT_NEEDED, 7024 &base_vha->dpc_flags); 7025 } 7026 clear_bit(ABORT_ISP_ACTIVE, 7027 &base_vha->dpc_flags); 7028 ql_dbg(ql_dbg_dpc, base_vha, 0x4008, 7029 "ISP abort end.\n"); 7030 } 7031 } 7032 7033 if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) { 7034 if (atomic_read(&base_vha->loop_state) == LOOP_READY) { 7035 qla24xx_process_purex_list 7036 (&base_vha->purex_list); 7037 clear_bit(PROCESS_PUREX_IOCB, 7038 &base_vha->dpc_flags); 7039 } 7040 } 7041 7042 if (IS_QLAFX00(ha)) 7043 goto loop_resync_check; 7044 7045 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 7046 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 7047 "Quiescence mode scheduled.\n"); 7048 if (IS_P3P_TYPE(ha)) { 7049 if (IS_QLA82XX(ha)) 7050 qla82xx_device_state_handler(base_vha); 7051 if (IS_QLA8044(ha)) 7052 qla8044_device_state_handler(base_vha); 7053 clear_bit(ISP_QUIESCE_NEEDED, 7054 &base_vha->dpc_flags); 7055 if (!ha->flags.quiesce_owner) { 7056 qla2x00_perform_loop_resync(base_vha); 7057 if (IS_QLA82XX(ha)) { 7058 qla82xx_idc_lock(ha); 7059 qla82xx_clear_qsnt_ready( 7060 base_vha); 7061 qla82xx_idc_unlock(ha); 7062 } else if (IS_QLA8044(ha)) { 7063 qla8044_idc_lock(ha); 7064 qla8044_clear_qsnt_ready( 7065 base_vha); 7066 qla8044_idc_unlock(ha); 7067 } 7068 } 7069 } else { 7070 clear_bit(ISP_QUIESCE_NEEDED, 7071 &base_vha->dpc_flags); 7072 qla2x00_quiesce_io(base_vha); 7073 } 7074 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 7075 "Quiescence mode end.\n"); 7076 } 7077 7078 if (test_and_clear_bit(RESET_MARKER_NEEDED, 7079 &base_vha->dpc_flags) && 7080 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 7081 7082 ql_dbg(ql_dbg_dpc, base_vha, 0x400b, 7083 "Reset marker scheduled.\n"); 7084 qla2x00_rst_aen(base_vha); 7085 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 7086 ql_dbg(ql_dbg_dpc, base_vha, 0x400c, 7087 "Reset marker end.\n"); 7088 } 7089 7090 /* Retry each device up to login retry count */ 7091 if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) && 7092 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 7093 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 7094 7095 if (!base_vha->relogin_jif || 7096 time_after_eq(jiffies, base_vha->relogin_jif)) { 7097 base_vha->relogin_jif = jiffies + HZ; 7098 clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags); 7099 7100 ql_dbg(ql_dbg_disc, base_vha, 0x400d, 7101 "Relogin scheduled.\n"); 7102 qla24xx_post_relogin_work(base_vha); 7103 } 7104 } 7105 loop_resync_check: 7106 if (!qla2x00_reset_active(base_vha) && 7107 test_and_clear_bit(LOOP_RESYNC_NEEDED, 7108 &base_vha->dpc_flags)) { 7109 /* 7110 * Allow abort_isp to complete before moving on to scanning. 7111 */ 7112 ql_dbg(ql_dbg_dpc, base_vha, 0x400f, 7113 "Loop resync scheduled.\n"); 7114 7115 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 7116 &base_vha->dpc_flags))) { 7117 7118 qla2x00_loop_resync(base_vha); 7119 7120 clear_bit(LOOP_RESYNC_ACTIVE, 7121 &base_vha->dpc_flags); 7122 } 7123 7124 ql_dbg(ql_dbg_dpc, base_vha, 0x4010, 7125 "Loop resync end.\n"); 7126 } 7127 7128 if (IS_QLAFX00(ha)) 7129 goto intr_on_check; 7130 7131 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 7132 atomic_read(&base_vha->loop_state) == LOOP_READY) { 7133 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); 7134 qla2xxx_flash_npiv_conf(base_vha); 7135 } 7136 7137 intr_on_check: 7138 if (!ha->interrupts_on) 7139 ha->isp_ops->enable_intrs(ha); 7140 7141 if (test_and_clear_bit(BEACON_BLINK_NEEDED, 7142 &base_vha->dpc_flags)) { 7143 if (ha->beacon_blink_led == 1) 7144 ha->isp_ops->beacon_blink(base_vha); 7145 } 7146 7147 /* qpair online check */ 7148 if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED, 7149 &base_vha->dpc_flags)) { 7150 if (ha->flags.eeh_busy || 7151 ha->flags.pci_channel_io_perm_failure) 7152 online = 0; 7153 else 7154 online = 1; 7155 7156 mutex_lock(&ha->mq_lock); 7157 list_for_each_entry(qpair, &base_vha->qp_list, 7158 qp_list_elem) 7159 qpair->online = online; 7160 mutex_unlock(&ha->mq_lock); 7161 } 7162 7163 if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, 7164 &base_vha->dpc_flags)) { 7165 u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold; 7166 7167 if (threshold > ha->orig_fw_xcb_count) 7168 threshold = ha->orig_fw_xcb_count; 7169 7170 ql_log(ql_log_info, base_vha, 0xffffff, 7171 "SET ZIO Activity exchange threshold to %d.\n", 7172 threshold); 7173 if (qla27xx_set_zio_threshold(base_vha, threshold)) { 7174 ql_log(ql_log_info, base_vha, 0xffffff, 7175 "Unable to SET ZIO Activity exchange threshold to %d.\n", 7176 threshold); 7177 } 7178 } 7179 7180 if (!IS_QLAFX00(ha)) 7181 qla2x00_do_dpc_all_vps(base_vha); 7182 7183 if (test_and_clear_bit(N2N_LINK_RESET, 7184 &base_vha->dpc_flags)) { 7185 qla2x00_lip_reset(base_vha); 7186 } 7187 7188 ha->dpc_active = 0; 7189 end_loop: 7190 set_current_state(TASK_INTERRUPTIBLE); 7191 } /* End of while(1) */ 7192 __set_current_state(TASK_RUNNING); 7193 7194 ql_dbg(ql_dbg_dpc, base_vha, 0x4011, 7195 "DPC handler exiting.\n"); 7196 7197 /* 7198 * Make sure that nobody tries to wake us up again. 7199 */ 7200 ha->dpc_active = 0; 7201 7202 /* Cleanup any residual CTX SRBs. */ 7203 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 7204 7205 return 0; 7206 } 7207 7208 void 7209 qla2xxx_wake_dpc(struct scsi_qla_host *vha) 7210 { 7211 struct qla_hw_data *ha = vha->hw; 7212 struct task_struct *t = ha->dpc_thread; 7213 7214 if (!test_bit(UNLOADING, &vha->dpc_flags) && t) 7215 wake_up_process(t); 7216 } 7217 7218 /* 7219 * qla2x00_rst_aen 7220 * Processes asynchronous reset. 7221 * 7222 * Input: 7223 * ha = adapter block pointer. 7224 */ 7225 static void 7226 qla2x00_rst_aen(scsi_qla_host_t *vha) 7227 { 7228 if (vha->flags.online && !vha->flags.reset_active && 7229 !atomic_read(&vha->loop_down_timer) && 7230 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { 7231 do { 7232 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7233 7234 /* 7235 * Issue marker command only when we are going to start 7236 * the I/O. 7237 */ 7238 vha->marker_needed = 1; 7239 } while (!atomic_read(&vha->loop_down_timer) && 7240 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); 7241 } 7242 } 7243 7244 static bool qla_do_heartbeat(struct scsi_qla_host *vha) 7245 { 7246 struct qla_hw_data *ha = vha->hw; 7247 u32 cmpl_cnt; 7248 u16 i; 7249 bool do_heartbeat = false; 7250 7251 /* 7252 * Allow do_heartbeat only if we don’t have any active interrupts, 7253 * but there are still IOs outstanding with firmware. 7254 */ 7255 cmpl_cnt = ha->base_qpair->cmd_completion_cnt; 7256 if (cmpl_cnt == ha->base_qpair->prev_completion_cnt && 7257 cmpl_cnt != ha->base_qpair->cmd_cnt) { 7258 do_heartbeat = true; 7259 goto skip; 7260 } 7261 ha->base_qpair->prev_completion_cnt = cmpl_cnt; 7262 7263 for (i = 0; i < ha->max_qpairs; i++) { 7264 if (ha->queue_pair_map[i]) { 7265 cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt; 7266 if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt && 7267 cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) { 7268 do_heartbeat = true; 7269 break; 7270 } 7271 ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt; 7272 } 7273 } 7274 7275 skip: 7276 return do_heartbeat; 7277 } 7278 7279 static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started) 7280 { 7281 struct qla_hw_data *ha = vha->hw; 7282 7283 if (vha->vp_idx) 7284 return; 7285 7286 if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha)) 7287 return; 7288 7289 /* 7290 * dpc thread cannot run if heartbeat is running at the same time. 7291 * We also do not want to starve heartbeat task. Therefore, do 7292 * heartbeat task at least once every 5 seconds. 7293 */ 7294 if (dpc_started && 7295 time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ)) 7296 return; 7297 7298 if (qla_do_heartbeat(vha)) { 7299 ha->last_heartbeat_run_jiffies = jiffies; 7300 queue_work(ha->wq, &ha->heartbeat_work); 7301 } 7302 } 7303 7304 static void qla_wind_down_chip(scsi_qla_host_t *vha) 7305 { 7306 struct qla_hw_data *ha = vha->hw; 7307 7308 if (!ha->flags.eeh_busy) 7309 return; 7310 if (ha->pci_error_state) 7311 /* system is trying to recover */ 7312 return; 7313 7314 /* 7315 * Current system is not handling PCIE error. At this point, this is 7316 * best effort to wind down the adapter. 7317 */ 7318 if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) && 7319 !ha->flags.eeh_flush) { 7320 ql_log(ql_log_info, vha, 0x9009, 7321 "PCI Error detected, attempting to reset hardware.\n"); 7322 7323 ha->isp_ops->reset_chip(vha); 7324 ha->isp_ops->disable_intrs(ha); 7325 7326 ha->flags.eeh_flush = EEH_FLUSH_RDY; 7327 ha->eeh_jif = jiffies; 7328 7329 } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY && 7330 time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) { 7331 pci_clear_master(ha->pdev); 7332 7333 /* flush all command */ 7334 qla2x00_abort_isp_cleanup(vha); 7335 ha->flags.eeh_flush = EEH_FLUSH_DONE; 7336 7337 ql_log(ql_log_info, vha, 0x900a, 7338 "PCI Error handling complete, all IOs aborted.\n"); 7339 } 7340 } 7341 7342 /************************************************************************** 7343 * qla2x00_timer 7344 * 7345 * Description: 7346 * One second timer 7347 * 7348 * Context: Interrupt 7349 ***************************************************************************/ 7350 void 7351 qla2x00_timer(struct timer_list *t) 7352 { 7353 scsi_qla_host_t *vha = from_timer(vha, t, timer); 7354 unsigned long cpu_flags = 0; 7355 int start_dpc = 0; 7356 int index; 7357 srb_t *sp; 7358 uint16_t w; 7359 struct qla_hw_data *ha = vha->hw; 7360 struct req_que *req; 7361 unsigned long flags; 7362 fc_port_t *fcport = NULL; 7363 7364 if (ha->flags.eeh_busy) { 7365 qla_wind_down_chip(vha); 7366 7367 ql_dbg(ql_dbg_timer, vha, 0x6000, 7368 "EEH = %d, restarting timer.\n", 7369 ha->flags.eeh_busy); 7370 qla2x00_restart_timer(vha, WATCH_INTERVAL); 7371 return; 7372 } 7373 7374 /* 7375 * Hardware read to raise pending EEH errors during mailbox waits. If 7376 * the read returns -1 then disable the board. 7377 */ 7378 if (!pci_channel_offline(ha->pdev)) { 7379 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 7380 qla2x00_check_reg16_for_disconnect(vha, w); 7381 } 7382 7383 /* Make sure qla82xx_watchdog is run only for physical port */ 7384 if (!vha->vp_idx && IS_P3P_TYPE(ha)) { 7385 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 7386 start_dpc++; 7387 if (IS_QLA82XX(ha)) 7388 qla82xx_watchdog(vha); 7389 else if (IS_QLA8044(ha)) 7390 qla8044_watchdog(vha); 7391 } 7392 7393 if (!vha->vp_idx && IS_QLAFX00(ha)) 7394 qlafx00_timer_routine(vha); 7395 7396 if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) 7397 vha->link_down_time++; 7398 7399 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 7400 list_for_each_entry(fcport, &vha->vp_fcports, list) { 7401 if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) 7402 fcport->tgt_link_down_time++; 7403 } 7404 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 7405 7406 /* Loop down handler. */ 7407 if (atomic_read(&vha->loop_down_timer) > 0 && 7408 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 7409 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) 7410 && vha->flags.online) { 7411 7412 if (atomic_read(&vha->loop_down_timer) == 7413 vha->loop_down_abort_time) { 7414 7415 ql_log(ql_log_info, vha, 0x6008, 7416 "Loop down - aborting the queues before time expires.\n"); 7417 7418 if (!IS_QLA2100(ha) && vha->link_down_timeout) 7419 atomic_set(&vha->loop_state, LOOP_DEAD); 7420 7421 /* 7422 * Schedule an ISP abort to return any FCP2-device 7423 * commands. 7424 */ 7425 /* NPIV - scan physical port only */ 7426 if (!vha->vp_idx) { 7427 spin_lock_irqsave(&ha->hardware_lock, 7428 cpu_flags); 7429 req = ha->req_q_map[0]; 7430 for (index = 1; 7431 index < req->num_outstanding_cmds; 7432 index++) { 7433 fc_port_t *sfcp; 7434 7435 sp = req->outstanding_cmds[index]; 7436 if (!sp) 7437 continue; 7438 if (sp->cmd_type != TYPE_SRB) 7439 continue; 7440 if (sp->type != SRB_SCSI_CMD) 7441 continue; 7442 sfcp = sp->fcport; 7443 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 7444 continue; 7445 7446 if (IS_QLA82XX(ha)) 7447 set_bit(FCOE_CTX_RESET_NEEDED, 7448 &vha->dpc_flags); 7449 else 7450 set_bit(ISP_ABORT_NEEDED, 7451 &vha->dpc_flags); 7452 break; 7453 } 7454 spin_unlock_irqrestore(&ha->hardware_lock, 7455 cpu_flags); 7456 } 7457 start_dpc++; 7458 } 7459 7460 /* if the loop has been down for 4 minutes, reinit adapter */ 7461 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 7462 if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) { 7463 ql_log(ql_log_warn, vha, 0x6009, 7464 "Loop down - aborting ISP.\n"); 7465 7466 if (IS_QLA82XX(ha)) 7467 set_bit(FCOE_CTX_RESET_NEEDED, 7468 &vha->dpc_flags); 7469 else 7470 set_bit(ISP_ABORT_NEEDED, 7471 &vha->dpc_flags); 7472 } 7473 } 7474 ql_dbg(ql_dbg_timer, vha, 0x600a, 7475 "Loop down - seconds remaining %d.\n", 7476 atomic_read(&vha->loop_down_timer)); 7477 } 7478 /* Check if beacon LED needs to be blinked for physical host only */ 7479 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 7480 /* There is no beacon_blink function for ISP82xx */ 7481 if (!IS_P3P_TYPE(ha)) { 7482 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 7483 start_dpc++; 7484 } 7485 } 7486 7487 /* check if edif running */ 7488 if (vha->hw->flags.edif_enabled) 7489 qla_edif_timer(vha); 7490 7491 /* Process any deferred work. */ 7492 if (!list_empty(&vha->work_list)) { 7493 unsigned long flags; 7494 bool q = false; 7495 7496 spin_lock_irqsave(&vha->work_lock, flags); 7497 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) 7498 q = true; 7499 spin_unlock_irqrestore(&vha->work_lock, flags); 7500 if (q) 7501 queue_work(vha->hw->wq, &vha->iocb_work); 7502 } 7503 7504 /* 7505 * FC-NVME 7506 * see if the active AEN count has changed from what was last reported. 7507 */ 7508 index = atomic_read(&ha->nvme_active_aen_cnt); 7509 if (!vha->vp_idx && 7510 (index != ha->nvme_last_rptd_aen) && 7511 ha->zio_mode == QLA_ZIO_MODE_6 && 7512 !ha->flags.host_shutting_down) { 7513 ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); 7514 ql_log(ql_log_info, vha, 0x3002, 7515 "nvme: Sched: Set ZIO exchange threshold to %d.\n", 7516 ha->nvme_last_rptd_aen); 7517 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); 7518 start_dpc++; 7519 } 7520 7521 if (!vha->vp_idx && 7522 atomic_read(&ha->zio_threshold) != ha->last_zio_threshold && 7523 IS_ZIO_THRESHOLD_CAPABLE(ha)) { 7524 ql_log(ql_log_info, vha, 0x3002, 7525 "Sched: Set ZIO exchange threshold to %d.\n", 7526 ha->last_zio_threshold); 7527 ha->last_zio_threshold = atomic_read(&ha->zio_threshold); 7528 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); 7529 start_dpc++; 7530 } 7531 qla_adjust_buf(vha); 7532 7533 /* borrowing w to signify dpc will run */ 7534 w = 0; 7535 /* Schedule the DPC routine if needed */ 7536 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 7537 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 7538 start_dpc || 7539 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || 7540 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || 7541 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 7542 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 7543 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 7544 test_bit(RELOGIN_NEEDED, &vha->dpc_flags) || 7545 test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) { 7546 ql_dbg(ql_dbg_timer, vha, 0x600b, 7547 "isp_abort_needed=%d loop_resync_needed=%d " 7548 "start_dpc=%d reset_marker_needed=%d", 7549 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), 7550 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), 7551 start_dpc, test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); 7552 ql_dbg(ql_dbg_timer, vha, 0x600c, 7553 "beacon_blink_needed=%d isp_unrecoverable=%d " 7554 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " 7555 "relogin_needed=%d, Process_purex_iocb=%d.\n", 7556 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), 7557 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), 7558 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), 7559 test_bit(VP_DPC_NEEDED, &vha->dpc_flags), 7560 test_bit(RELOGIN_NEEDED, &vha->dpc_flags), 7561 test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)); 7562 qla2xxx_wake_dpc(vha); 7563 w = 1; 7564 } 7565 7566 qla_heart_beat(vha, w); 7567 7568 qla2x00_restart_timer(vha, WATCH_INTERVAL); 7569 } 7570 7571 /* Firmware interface routines. */ 7572 7573 #define FW_ISP21XX 0 7574 #define FW_ISP22XX 1 7575 #define FW_ISP2300 2 7576 #define FW_ISP2322 3 7577 #define FW_ISP24XX 4 7578 #define FW_ISP25XX 5 7579 #define FW_ISP81XX 6 7580 #define FW_ISP82XX 7 7581 #define FW_ISP2031 8 7582 #define FW_ISP8031 9 7583 #define FW_ISP27XX 10 7584 #define FW_ISP28XX 11 7585 7586 #define FW_FILE_ISP21XX "ql2100_fw.bin" 7587 #define FW_FILE_ISP22XX "ql2200_fw.bin" 7588 #define FW_FILE_ISP2300 "ql2300_fw.bin" 7589 #define FW_FILE_ISP2322 "ql2322_fw.bin" 7590 #define FW_FILE_ISP24XX "ql2400_fw.bin" 7591 #define FW_FILE_ISP25XX "ql2500_fw.bin" 7592 #define FW_FILE_ISP81XX "ql8100_fw.bin" 7593 #define FW_FILE_ISP82XX "ql8200_fw.bin" 7594 #define FW_FILE_ISP2031 "ql2600_fw.bin" 7595 #define FW_FILE_ISP8031 "ql8300_fw.bin" 7596 #define FW_FILE_ISP27XX "ql2700_fw.bin" 7597 #define FW_FILE_ISP28XX "ql2800_fw.bin" 7598 7599 7600 static DEFINE_MUTEX(qla_fw_lock); 7601 7602 static struct fw_blob qla_fw_blobs[] = { 7603 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 7604 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 7605 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 7606 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 7607 { .name = FW_FILE_ISP24XX, }, 7608 { .name = FW_FILE_ISP25XX, }, 7609 { .name = FW_FILE_ISP81XX, }, 7610 { .name = FW_FILE_ISP82XX, }, 7611 { .name = FW_FILE_ISP2031, }, 7612 { .name = FW_FILE_ISP8031, }, 7613 { .name = FW_FILE_ISP27XX, }, 7614 { .name = FW_FILE_ISP28XX, }, 7615 { .name = NULL, }, 7616 }; 7617 7618 struct fw_blob * 7619 qla2x00_request_firmware(scsi_qla_host_t *vha) 7620 { 7621 struct qla_hw_data *ha = vha->hw; 7622 struct fw_blob *blob; 7623 7624 if (IS_QLA2100(ha)) { 7625 blob = &qla_fw_blobs[FW_ISP21XX]; 7626 } else if (IS_QLA2200(ha)) { 7627 blob = &qla_fw_blobs[FW_ISP22XX]; 7628 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 7629 blob = &qla_fw_blobs[FW_ISP2300]; 7630 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 7631 blob = &qla_fw_blobs[FW_ISP2322]; 7632 } else if (IS_QLA24XX_TYPE(ha)) { 7633 blob = &qla_fw_blobs[FW_ISP24XX]; 7634 } else if (IS_QLA25XX(ha)) { 7635 blob = &qla_fw_blobs[FW_ISP25XX]; 7636 } else if (IS_QLA81XX(ha)) { 7637 blob = &qla_fw_blobs[FW_ISP81XX]; 7638 } else if (IS_QLA82XX(ha)) { 7639 blob = &qla_fw_blobs[FW_ISP82XX]; 7640 } else if (IS_QLA2031(ha)) { 7641 blob = &qla_fw_blobs[FW_ISP2031]; 7642 } else if (IS_QLA8031(ha)) { 7643 blob = &qla_fw_blobs[FW_ISP8031]; 7644 } else if (IS_QLA27XX(ha)) { 7645 blob = &qla_fw_blobs[FW_ISP27XX]; 7646 } else if (IS_QLA28XX(ha)) { 7647 blob = &qla_fw_blobs[FW_ISP28XX]; 7648 } else { 7649 return NULL; 7650 } 7651 7652 if (!blob->name) 7653 return NULL; 7654 7655 mutex_lock(&qla_fw_lock); 7656 if (blob->fw) 7657 goto out; 7658 7659 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 7660 ql_log(ql_log_warn, vha, 0x0063, 7661 "Failed to load firmware image (%s).\n", blob->name); 7662 blob->fw = NULL; 7663 blob = NULL; 7664 } 7665 7666 out: 7667 mutex_unlock(&qla_fw_lock); 7668 return blob; 7669 } 7670 7671 static void 7672 qla2x00_release_firmware(void) 7673 { 7674 struct fw_blob *blob; 7675 7676 mutex_lock(&qla_fw_lock); 7677 for (blob = qla_fw_blobs; blob->name; blob++) 7678 release_firmware(blob->fw); 7679 mutex_unlock(&qla_fw_lock); 7680 } 7681 7682 static void qla_pci_error_cleanup(scsi_qla_host_t *vha) 7683 { 7684 struct qla_hw_data *ha = vha->hw; 7685 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 7686 struct qla_qpair *qpair = NULL; 7687 struct scsi_qla_host *vp, *tvp; 7688 fc_port_t *fcport; 7689 int i; 7690 unsigned long flags; 7691 7692 ql_dbg(ql_dbg_aer, vha, 0x9000, 7693 "%s\n", __func__); 7694 ha->chip_reset++; 7695 7696 ha->base_qpair->chip_reset = ha->chip_reset; 7697 for (i = 0; i < ha->max_qpairs; i++) { 7698 if (ha->queue_pair_map[i]) 7699 ha->queue_pair_map[i]->chip_reset = 7700 ha->base_qpair->chip_reset; 7701 } 7702 7703 /* 7704 * purge mailbox might take a while. Slot Reset/chip reset 7705 * will take care of the purge 7706 */ 7707 7708 mutex_lock(&ha->mq_lock); 7709 ha->base_qpair->online = 0; 7710 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7711 qpair->online = 0; 7712 wmb(); 7713 mutex_unlock(&ha->mq_lock); 7714 7715 qla2x00_mark_all_devices_lost(vha); 7716 7717 spin_lock_irqsave(&ha->vport_slock, flags); 7718 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7719 atomic_inc(&vp->vref_count); 7720 spin_unlock_irqrestore(&ha->vport_slock, flags); 7721 qla2x00_mark_all_devices_lost(vp); 7722 spin_lock_irqsave(&ha->vport_slock, flags); 7723 atomic_dec(&vp->vref_count); 7724 } 7725 spin_unlock_irqrestore(&ha->vport_slock, flags); 7726 7727 /* Clear all async request states across all VPs. */ 7728 list_for_each_entry(fcport, &vha->vp_fcports, list) 7729 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7730 7731 spin_lock_irqsave(&ha->vport_slock, flags); 7732 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7733 atomic_inc(&vp->vref_count); 7734 spin_unlock_irqrestore(&ha->vport_slock, flags); 7735 list_for_each_entry(fcport, &vp->vp_fcports, list) 7736 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7737 spin_lock_irqsave(&ha->vport_slock, flags); 7738 atomic_dec(&vp->vref_count); 7739 } 7740 spin_unlock_irqrestore(&ha->vport_slock, flags); 7741 } 7742 7743 7744 static pci_ers_result_t 7745 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 7746 { 7747 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 7748 struct qla_hw_data *ha = vha->hw; 7749 pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET; 7750 7751 ql_log(ql_log_warn, vha, 0x9000, 7752 "PCI error detected, state %x.\n", state); 7753 ha->pci_error_state = QLA_PCI_ERR_DETECTED; 7754 7755 if (!atomic_read(&pdev->enable_cnt)) { 7756 ql_log(ql_log_info, vha, 0xffff, 7757 "PCI device is disabled,state %x\n", state); 7758 ret = PCI_ERS_RESULT_NEED_RESET; 7759 goto out; 7760 } 7761 7762 switch (state) { 7763 case pci_channel_io_normal: 7764 qla_pci_set_eeh_busy(vha); 7765 if (ql2xmqsupport || ql2xnvmeenable) { 7766 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 7767 qla2xxx_wake_dpc(vha); 7768 } 7769 ret = PCI_ERS_RESULT_CAN_RECOVER; 7770 break; 7771 case pci_channel_io_frozen: 7772 qla_pci_set_eeh_busy(vha); 7773 ret = PCI_ERS_RESULT_NEED_RESET; 7774 break; 7775 case pci_channel_io_perm_failure: 7776 ha->flags.pci_channel_io_perm_failure = 1; 7777 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 7778 if (ql2xmqsupport || ql2xnvmeenable) { 7779 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 7780 qla2xxx_wake_dpc(vha); 7781 } 7782 ret = PCI_ERS_RESULT_DISCONNECT; 7783 } 7784 out: 7785 ql_dbg(ql_dbg_aer, vha, 0x600d, 7786 "PCI error detected returning [%x].\n", ret); 7787 return ret; 7788 } 7789 7790 static pci_ers_result_t 7791 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 7792 { 7793 int risc_paused = 0; 7794 uint32_t stat; 7795 unsigned long flags; 7796 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7797 struct qla_hw_data *ha = base_vha->hw; 7798 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 7799 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 7800 7801 ql_log(ql_log_warn, base_vha, 0x9000, 7802 "mmio enabled\n"); 7803 7804 ha->pci_error_state = QLA_PCI_MMIO_ENABLED; 7805 7806 if (IS_QLA82XX(ha)) 7807 return PCI_ERS_RESULT_RECOVERED; 7808 7809 if (qla2x00_isp_reg_stat(ha)) { 7810 ql_log(ql_log_info, base_vha, 0x803f, 7811 "During mmio enabled, PCI/Register disconnect still detected.\n"); 7812 goto out; 7813 } 7814 7815 spin_lock_irqsave(&ha->hardware_lock, flags); 7816 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 7817 stat = rd_reg_word(®->hccr); 7818 if (stat & HCCR_RISC_PAUSE) 7819 risc_paused = 1; 7820 } else if (IS_QLA23XX(ha)) { 7821 stat = rd_reg_dword(®->u.isp2300.host_status); 7822 if (stat & HSR_RISC_PAUSED) 7823 risc_paused = 1; 7824 } else if (IS_FWI2_CAPABLE(ha)) { 7825 stat = rd_reg_dword(®24->host_status); 7826 if (stat & HSRX_RISC_PAUSED) 7827 risc_paused = 1; 7828 } 7829 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7830 7831 if (risc_paused) { 7832 ql_log(ql_log_info, base_vha, 0x9003, 7833 "RISC paused -- mmio_enabled, Dumping firmware.\n"); 7834 qla2xxx_dump_fw(base_vha); 7835 } 7836 out: 7837 /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */ 7838 ql_dbg(ql_dbg_aer, base_vha, 0x600d, 7839 "mmio enabled returning.\n"); 7840 return PCI_ERS_RESULT_NEED_RESET; 7841 } 7842 7843 static pci_ers_result_t 7844 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 7845 { 7846 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 7847 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7848 struct qla_hw_data *ha = base_vha->hw; 7849 int rc; 7850 struct qla_qpair *qpair = NULL; 7851 7852 ql_log(ql_log_warn, base_vha, 0x9004, 7853 "Slot Reset.\n"); 7854 7855 ha->pci_error_state = QLA_PCI_SLOT_RESET; 7856 /* Workaround: qla2xxx driver which access hardware earlier 7857 * needs error state to be pci_channel_io_online. 7858 * Otherwise mailbox command timesout. 7859 */ 7860 pdev->error_state = pci_channel_io_normal; 7861 7862 pci_restore_state(pdev); 7863 7864 /* pci_restore_state() clears the saved_state flag of the device 7865 * save restored state which resets saved_state flag 7866 */ 7867 pci_save_state(pdev); 7868 7869 if (ha->mem_only) 7870 rc = pci_enable_device_mem(pdev); 7871 else 7872 rc = pci_enable_device(pdev); 7873 7874 if (rc) { 7875 ql_log(ql_log_warn, base_vha, 0x9005, 7876 "Can't re-enable PCI device after reset.\n"); 7877 goto exit_slot_reset; 7878 } 7879 7880 7881 if (ha->isp_ops->pci_config(base_vha)) 7882 goto exit_slot_reset; 7883 7884 mutex_lock(&ha->mq_lock); 7885 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7886 qpair->online = 1; 7887 mutex_unlock(&ha->mq_lock); 7888 7889 ha->flags.eeh_busy = 0; 7890 base_vha->flags.online = 1; 7891 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7892 ha->isp_ops->abort_isp(base_vha); 7893 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7894 7895 if (qla2x00_isp_reg_stat(ha)) { 7896 ha->flags.eeh_busy = 1; 7897 qla_pci_error_cleanup(base_vha); 7898 ql_log(ql_log_warn, base_vha, 0x9005, 7899 "Device unable to recover from PCI error.\n"); 7900 } else { 7901 ret = PCI_ERS_RESULT_RECOVERED; 7902 } 7903 7904 exit_slot_reset: 7905 ql_dbg(ql_dbg_aer, base_vha, 0x900e, 7906 "Slot Reset returning %x.\n", ret); 7907 7908 return ret; 7909 } 7910 7911 static void 7912 qla2xxx_pci_resume(struct pci_dev *pdev) 7913 { 7914 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7915 struct qla_hw_data *ha = base_vha->hw; 7916 int ret; 7917 7918 ql_log(ql_log_warn, base_vha, 0x900f, 7919 "Pci Resume.\n"); 7920 7921 7922 ret = qla2x00_wait_for_hba_online(base_vha); 7923 if (ret != QLA_SUCCESS) { 7924 ql_log(ql_log_fatal, base_vha, 0x9002, 7925 "The device failed to resume I/O from slot/link_reset.\n"); 7926 } 7927 ha->pci_error_state = QLA_PCI_RESUME; 7928 ql_dbg(ql_dbg_aer, base_vha, 0x600d, 7929 "Pci Resume returning.\n"); 7930 } 7931 7932 void qla_pci_set_eeh_busy(struct scsi_qla_host *vha) 7933 { 7934 struct qla_hw_data *ha = vha->hw; 7935 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7936 bool do_cleanup = false; 7937 unsigned long flags; 7938 7939 if (ha->flags.eeh_busy) 7940 return; 7941 7942 spin_lock_irqsave(&base_vha->work_lock, flags); 7943 if (!ha->flags.eeh_busy) { 7944 ha->eeh_jif = jiffies; 7945 ha->flags.eeh_flush = 0; 7946 7947 ha->flags.eeh_busy = 1; 7948 do_cleanup = true; 7949 } 7950 spin_unlock_irqrestore(&base_vha->work_lock, flags); 7951 7952 if (do_cleanup) 7953 qla_pci_error_cleanup(base_vha); 7954 } 7955 7956 /* 7957 * this routine will schedule a task to pause IO from interrupt context 7958 * if caller sees a PCIE error event (register read = 0xf's) 7959 */ 7960 void qla_schedule_eeh_work(struct scsi_qla_host *vha) 7961 { 7962 struct qla_hw_data *ha = vha->hw; 7963 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7964 7965 if (ha->flags.eeh_busy) 7966 return; 7967 7968 set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags); 7969 qla2xxx_wake_dpc(base_vha); 7970 } 7971 7972 static void 7973 qla_pci_reset_prepare(struct pci_dev *pdev) 7974 { 7975 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7976 struct qla_hw_data *ha = base_vha->hw; 7977 struct qla_qpair *qpair; 7978 7979 ql_log(ql_log_warn, base_vha, 0xffff, 7980 "%s.\n", __func__); 7981 7982 /* 7983 * PCI FLR/function reset is about to reset the 7984 * slot. Stop the chip to stop all DMA access. 7985 * It is assumed that pci_reset_done will be called 7986 * after FLR to resume Chip operation. 7987 */ 7988 ha->flags.eeh_busy = 1; 7989 mutex_lock(&ha->mq_lock); 7990 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7991 qpair->online = 0; 7992 mutex_unlock(&ha->mq_lock); 7993 7994 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7995 qla2x00_abort_isp_cleanup(base_vha); 7996 qla2x00_abort_all_cmds(base_vha, DID_RESET << 16); 7997 } 7998 7999 static void 8000 qla_pci_reset_done(struct pci_dev *pdev) 8001 { 8002 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 8003 struct qla_hw_data *ha = base_vha->hw; 8004 struct qla_qpair *qpair; 8005 8006 ql_log(ql_log_warn, base_vha, 0xffff, 8007 "%s.\n", __func__); 8008 8009 /* 8010 * FLR just completed by PCI layer. Resume adapter 8011 */ 8012 ha->flags.eeh_busy = 0; 8013 mutex_lock(&ha->mq_lock); 8014 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 8015 qpair->online = 1; 8016 mutex_unlock(&ha->mq_lock); 8017 8018 base_vha->flags.online = 1; 8019 ha->isp_ops->abort_isp(base_vha); 8020 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 8021 } 8022 8023 static void qla2xxx_map_queues(struct Scsi_Host *shost) 8024 { 8025 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; 8026 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 8027 8028 if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) 8029 blk_mq_map_queues(qmap); 8030 else 8031 blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); 8032 } 8033 8034 struct scsi_host_template qla2xxx_driver_template = { 8035 .module = THIS_MODULE, 8036 .name = QLA2XXX_DRIVER_NAME, 8037 .queuecommand = qla2xxx_queuecommand, 8038 8039 .eh_timed_out = fc_eh_timed_out, 8040 .eh_abort_handler = qla2xxx_eh_abort, 8041 .eh_should_retry_cmd = fc_eh_should_retry_cmd, 8042 .eh_device_reset_handler = qla2xxx_eh_device_reset, 8043 .eh_target_reset_handler = qla2xxx_eh_target_reset, 8044 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 8045 .eh_host_reset_handler = qla2xxx_eh_host_reset, 8046 8047 .slave_configure = qla2xxx_slave_configure, 8048 8049 .slave_alloc = qla2xxx_slave_alloc, 8050 .slave_destroy = qla2xxx_slave_destroy, 8051 .scan_finished = qla2xxx_scan_finished, 8052 .scan_start = qla2xxx_scan_start, 8053 .change_queue_depth = scsi_change_queue_depth, 8054 .map_queues = qla2xxx_map_queues, 8055 .this_id = -1, 8056 .cmd_per_lun = 3, 8057 .sg_tablesize = SG_ALL, 8058 8059 .max_sectors = 0xFFFF, 8060 .shost_groups = qla2x00_host_groups, 8061 8062 .supported_mode = MODE_INITIATOR, 8063 .track_queue_depth = 1, 8064 .cmd_size = sizeof(srb_t), 8065 }; 8066 8067 static const struct pci_error_handlers qla2xxx_err_handler = { 8068 .error_detected = qla2xxx_pci_error_detected, 8069 .mmio_enabled = qla2xxx_pci_mmio_enabled, 8070 .slot_reset = qla2xxx_pci_slot_reset, 8071 .resume = qla2xxx_pci_resume, 8072 .reset_prepare = qla_pci_reset_prepare, 8073 .reset_done = qla_pci_reset_done, 8074 }; 8075 8076 static struct pci_device_id qla2xxx_pci_tbl[] = { 8077 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 8078 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 8079 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 8080 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 8081 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 8082 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 8083 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 8084 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 8085 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 8086 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 8087 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 8088 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 8089 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 8090 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 8091 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 8092 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 8093 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 8094 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 8095 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 8096 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, 8097 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, 8098 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, 8099 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) }, 8100 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) }, 8101 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) }, 8102 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) }, 8103 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) }, 8104 { 0 }, 8105 }; 8106 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 8107 8108 static struct pci_driver qla2xxx_pci_driver = { 8109 .name = QLA2XXX_DRIVER_NAME, 8110 .driver = { 8111 .owner = THIS_MODULE, 8112 }, 8113 .id_table = qla2xxx_pci_tbl, 8114 .probe = qla2x00_probe_one, 8115 .remove = qla2x00_remove_one, 8116 .shutdown = qla2x00_shutdown, 8117 .err_handler = &qla2xxx_err_handler, 8118 }; 8119 8120 static const struct file_operations apidev_fops = { 8121 .owner = THIS_MODULE, 8122 .llseek = noop_llseek, 8123 }; 8124 8125 /** 8126 * qla2x00_module_init - Module initialization. 8127 **/ 8128 static int __init 8129 qla2x00_module_init(void) 8130 { 8131 int ret = 0; 8132 8133 BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64); 8134 BUILD_BUG_ON(sizeof(cmd_entry_t) != 64); 8135 BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64); 8136 BUILD_BUG_ON(sizeof(cont_entry_t) != 64); 8137 BUILD_BUG_ON(sizeof(init_cb_t) != 96); 8138 BUILD_BUG_ON(sizeof(mrk_entry_t) != 64); 8139 BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64); 8140 BUILD_BUG_ON(sizeof(request_t) != 64); 8141 BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64); 8142 BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64); 8143 BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64); 8144 BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64); 8145 BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64); 8146 BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64); 8147 BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64); 8148 BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64); 8149 BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64); 8150 BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64); 8151 BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64); 8152 BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64); 8153 BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604); 8154 BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424); 8155 BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164); 8156 BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260); 8157 BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260); 8158 BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16); 8159 BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); 8160 BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256); 8161 BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24); 8162 BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256); 8163 BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288); 8164 BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216); 8165 BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64); 8166 BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64); 8167 BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64); 8168 BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64); 8169 BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128); 8170 BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128); 8171 BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64); 8172 BUILD_BUG_ON(sizeof(struct mbx_entry) != 64); 8173 BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252); 8174 BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64); 8175 BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512); 8176 BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512); 8177 BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64); 8178 BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64); 8179 BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64); 8180 BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634); 8181 BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100); 8182 BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976); 8183 BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228); 8184 BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52); 8185 BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172); 8186 BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524); 8187 BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8); 8188 BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12); 8189 BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24); 8190 BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420); 8191 BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28); 8192 BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32); 8193 BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196); 8194 BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE); 8195 BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128); 8196 BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); 8197 BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); 8198 BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24); 8199 BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16); 8200 BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336); 8201 BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); 8202 BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64); 8203 BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64); 8204 BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64); 8205 BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); 8206 BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52); 8207 BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); 8208 BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64); 8209 BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64); 8210 BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64); 8211 BUILD_BUG_ON(sizeof(sts21_entry_t) != 64); 8212 BUILD_BUG_ON(sizeof(sts22_entry_t) != 64); 8213 BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64); 8214 BUILD_BUG_ON(sizeof(sts_entry_t) != 64); 8215 BUILD_BUG_ON(sizeof(sw_info_t) != 32); 8216 BUILD_BUG_ON(sizeof(target_id_t) != 2); 8217 8218 qla_trace_init(); 8219 8220 /* Allocate cache for SRBs. */ 8221 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 8222 SLAB_HWCACHE_ALIGN, NULL); 8223 if (srb_cachep == NULL) { 8224 ql_log(ql_log_fatal, NULL, 0x0001, 8225 "Unable to allocate SRB cache...Failing load!.\n"); 8226 return -ENOMEM; 8227 } 8228 8229 /* Initialize target kmem_cache and mem_pools */ 8230 ret = qlt_init(); 8231 if (ret < 0) { 8232 goto destroy_cache; 8233 } else if (ret > 0) { 8234 /* 8235 * If initiator mode is explictly disabled by qlt_init(), 8236 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from 8237 * performing scsi_scan_target() during LOOP UP event. 8238 */ 8239 qla2xxx_transport_functions.disable_target_scan = 1; 8240 qla2xxx_transport_vport_functions.disable_target_scan = 1; 8241 } 8242 8243 /* Derive version string. */ 8244 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 8245 if (ql2xextended_error_logging) 8246 strcat(qla2x00_version_str, "-debug"); 8247 if (ql2xextended_error_logging == 1) 8248 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 8249 8250 qla2xxx_transport_template = 8251 fc_attach_transport(&qla2xxx_transport_functions); 8252 if (!qla2xxx_transport_template) { 8253 ql_log(ql_log_fatal, NULL, 0x0002, 8254 "fc_attach_transport failed...Failing load!.\n"); 8255 ret = -ENODEV; 8256 goto qlt_exit; 8257 } 8258 8259 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 8260 if (apidev_major < 0) { 8261 ql_log(ql_log_fatal, NULL, 0x0003, 8262 "Unable to register char device %s.\n", QLA2XXX_APIDEV); 8263 } 8264 8265 qla2xxx_transport_vport_template = 8266 fc_attach_transport(&qla2xxx_transport_vport_functions); 8267 if (!qla2xxx_transport_vport_template) { 8268 ql_log(ql_log_fatal, NULL, 0x0004, 8269 "fc_attach_transport vport failed...Failing load!.\n"); 8270 ret = -ENODEV; 8271 goto unreg_chrdev; 8272 } 8273 ql_log(ql_log_info, NULL, 0x0005, 8274 "QLogic Fibre Channel HBA Driver: %s.\n", 8275 qla2x00_version_str); 8276 ret = pci_register_driver(&qla2xxx_pci_driver); 8277 if (ret) { 8278 ql_log(ql_log_fatal, NULL, 0x0006, 8279 "pci_register_driver failed...ret=%d Failing load!.\n", 8280 ret); 8281 goto release_vport_transport; 8282 } 8283 return ret; 8284 8285 release_vport_transport: 8286 fc_release_transport(qla2xxx_transport_vport_template); 8287 8288 unreg_chrdev: 8289 if (apidev_major >= 0) 8290 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 8291 fc_release_transport(qla2xxx_transport_template); 8292 8293 qlt_exit: 8294 qlt_exit(); 8295 8296 destroy_cache: 8297 kmem_cache_destroy(srb_cachep); 8298 8299 qla_trace_uninit(); 8300 return ret; 8301 } 8302 8303 /** 8304 * qla2x00_module_exit - Module cleanup. 8305 **/ 8306 static void __exit 8307 qla2x00_module_exit(void) 8308 { 8309 pci_unregister_driver(&qla2xxx_pci_driver); 8310 qla2x00_release_firmware(); 8311 kmem_cache_destroy(ctx_cachep); 8312 fc_release_transport(qla2xxx_transport_vport_template); 8313 if (apidev_major >= 0) 8314 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 8315 fc_release_transport(qla2xxx_transport_template); 8316 qlt_exit(); 8317 kmem_cache_destroy(srb_cachep); 8318 qla_trace_uninit(); 8319 } 8320 8321 module_init(qla2x00_module_init); 8322 module_exit(qla2x00_module_exit); 8323 8324 MODULE_AUTHOR("QLogic Corporation"); 8325 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 8326 MODULE_LICENSE("GPL"); 8327 MODULE_FIRMWARE(FW_FILE_ISP21XX); 8328 MODULE_FIRMWARE(FW_FILE_ISP22XX); 8329 MODULE_FIRMWARE(FW_FILE_ISP2300); 8330 MODULE_FIRMWARE(FW_FILE_ISP2322); 8331 MODULE_FIRMWARE(FW_FILE_ISP24XX); 8332 MODULE_FIRMWARE(FW_FILE_ISP25XX); 8333