1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 4 * 5 * based on qla2x00t.c code: 6 * 7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 8 * Copyright (C) 2004 - 2005 Leonid Stoljar 9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 10 * Copyright (C) 2006 - 2010 ID7 Ltd. 11 * 12 * Forward port and refactoring to modern qla2xxx and target/configfs 13 * 14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 15 */ 16 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/types.h> 20 #include <linux/blkdev.h> 21 #include <linux/interrupt.h> 22 #include <linux/pci.h> 23 #include <linux/delay.h> 24 #include <linux/list.h> 25 #include <linux/workqueue.h> 26 #include <asm/unaligned.h> 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_tcq.h> 30 #include <target/target_core_base.h> 31 #include <target/target_core_fabric.h> 32 33 #include "qla_def.h" 34 #include "qla_target.h" 35 36 static int ql2xtgt_tape_enable; 37 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); 38 MODULE_PARM_DESC(ql2xtgt_tape_enable, 39 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); 40 41 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 42 module_param(qlini_mode, charp, S_IRUGO); 43 MODULE_PARM_DESC(qlini_mode, 44 "Determines when initiator mode will be enabled. Possible values: " 45 "\"exclusive\" - initiator mode will be enabled on load, " 46 "disabled on enabling target mode and then on disabling target mode " 47 "enabled back; " 48 "\"disabled\" - initiator mode will never be enabled; " 49 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " 50 "when ready " 51 "\"enabled\" (default) - initiator mode will always stay enabled."); 52 53 static int ql_dm_tgt_ex_pct = 0; 54 module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR); 55 MODULE_PARM_DESC(ql_dm_tgt_ex_pct, 56 "For Dual Mode (qlini_mode=dual), this parameter determines " 57 "the percentage of exchanges/cmds FW will allocate resources " 58 "for Target mode."); 59 60 int ql2xuctrlirq = 1; 61 module_param(ql2xuctrlirq, int, 0644); 62 MODULE_PARM_DESC(ql2xuctrlirq, 63 "User to control IRQ placement via smp_affinity." 64 "Valid with qlini_mode=disabled." 65 "1(default): enable"); 66 67 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 68 69 static int qla_sam_status = SAM_STAT_BUSY; 70 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */ 71 72 /* 73 * From scsi/fc/fc_fcp.h 74 */ 75 enum fcp_resp_rsp_codes { 76 FCP_TMF_CMPL = 0, 77 FCP_DATA_LEN_INVALID = 1, 78 FCP_CMND_FIELDS_INVALID = 2, 79 FCP_DATA_PARAM_MISMATCH = 3, 80 FCP_TMF_REJECTED = 4, 81 FCP_TMF_FAILED = 5, 82 FCP_TMF_INVALID_LUN = 9, 83 }; 84 85 /* 86 * fc_pri_ta from scsi/fc/fc_fcp.h 87 */ 88 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 89 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 90 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 91 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 92 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 93 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 94 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 95 96 /* 97 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 98 * must be called under HW lock and could unlock/lock it inside. 99 * It isn't an issue, since in the current implementation on the time when 100 * those functions are called: 101 * 102 * - Either context is IRQ and only IRQ handler can modify HW data, 103 * including rings related fields, 104 * 105 * - Or access to target mode variables from struct qla_tgt doesn't 106 * cross those functions boundaries, except tgt_stop, which 107 * additionally protected by irq_cmd_count. 108 */ 109 /* Predefs for callbacks handed to qla2xxx LLD */ 110 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 111 struct atio_from_isp *pkt, uint8_t); 112 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp, 113 response_t *pkt); 114 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 115 int fn, void *iocb, int flags); 116 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd 117 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); 118 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 119 struct atio_from_isp *atio, uint16_t status, int qfull); 120 static void qlt_disable_vha(struct scsi_qla_host *vha); 121 static void qlt_clear_tgt_db(struct qla_tgt *tgt); 122 static void qlt_send_notify_ack(struct qla_qpair *qpair, 123 struct imm_ntfy_from_isp *ntfy, 124 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 125 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); 126 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 127 struct imm_ntfy_from_isp *imm, int ha_locked); 128 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, 129 fc_port_t *fcport, bool local); 130 void qlt_unreg_sess(struct fc_port *sess); 131 static void qlt_24xx_handle_abts(struct scsi_qla_host *, 132 struct abts_recv_from_24xx *); 133 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, 134 uint16_t); 135 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t); 136 static inline uint32_t qlt_make_handle(struct qla_qpair *); 137 138 /* 139 * Global Variables 140 */ 141 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 142 struct kmem_cache *qla_tgt_plogi_cachep; 143 static mempool_t *qla_tgt_mgmt_cmd_mempool; 144 static struct workqueue_struct *qla_tgt_wq; 145 static DEFINE_MUTEX(qla_tgt_mutex); 146 static LIST_HEAD(qla_tgt_glist); 147 148 static const char *prot_op_str(u32 prot_op) 149 { 150 switch (prot_op) { 151 case TARGET_PROT_NORMAL: return "NORMAL"; 152 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; 153 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; 154 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; 155 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; 156 case TARGET_PROT_DIN_PASS: return "DIN_PASS"; 157 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; 158 default: return "UNKNOWN"; 159 } 160 } 161 162 /* This API intentionally takes dest as a parameter, rather than returning 163 * int value to avoid caller forgetting to issue wmb() after the store */ 164 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) 165 { 166 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 167 *dest = atomic_inc_return(&base_vha->generation_tick); 168 /* memory barrier */ 169 wmb(); 170 } 171 172 /* Might release hw lock, then reaquire!! */ 173 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 174 { 175 /* Send marker if required */ 176 if (unlikely(vha->marker_needed != 0)) { 177 int rc = qla2x00_issue_marker(vha, vha_locked); 178 179 if (rc != QLA_SUCCESS) { 180 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 181 "qla_target(%d): issue_marker() failed\n", 182 vha->vp_idx); 183 } 184 return rc; 185 } 186 return QLA_SUCCESS; 187 } 188 189 static inline 190 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 191 uint8_t *d_id) 192 { 193 struct scsi_qla_host *host; 194 uint32_t key = 0; 195 196 if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) && 197 (vha->d_id.b.al_pa == d_id[2])) 198 return vha; 199 200 key = (uint32_t)d_id[0] << 16; 201 key |= (uint32_t)d_id[1] << 8; 202 key |= (uint32_t)d_id[2]; 203 204 host = btree_lookup32(&vha->hw->tgt.host_map, key); 205 if (!host) 206 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005, 207 "Unable to find host %06x\n", key); 208 209 return host; 210 } 211 212 static inline 213 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 214 uint16_t vp_idx) 215 { 216 struct qla_hw_data *ha = vha->hw; 217 218 if (vha->vp_idx == vp_idx) 219 return vha; 220 221 BUG_ON(ha->tgt.tgt_vp_map == NULL); 222 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 223 return ha->tgt.tgt_vp_map[vp_idx].vha; 224 225 return NULL; 226 } 227 228 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) 229 { 230 unsigned long flags; 231 232 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 233 234 vha->hw->tgt.num_pend_cmds++; 235 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) 236 vha->qla_stats.stat_max_pend_cmds = 237 vha->hw->tgt.num_pend_cmds; 238 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 239 } 240 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) 241 { 242 unsigned long flags; 243 244 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 245 vha->hw->tgt.num_pend_cmds--; 246 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 247 } 248 249 250 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, 251 struct atio_from_isp *atio, uint8_t ha_locked) 252 { 253 struct qla_tgt_sess_op *u; 254 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 255 unsigned long flags; 256 257 if (tgt->tgt_stop) { 258 ql_dbg(ql_dbg_async, vha, 0x502c, 259 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped", 260 vha->vp_idx); 261 goto out_term; 262 } 263 264 u = kzalloc(sizeof(*u), GFP_ATOMIC); 265 if (u == NULL) 266 goto out_term; 267 268 u->vha = vha; 269 memcpy(&u->atio, atio, sizeof(*atio)); 270 INIT_LIST_HEAD(&u->cmd_list); 271 272 spin_lock_irqsave(&vha->cmd_list_lock, flags); 273 list_add_tail(&u->cmd_list, &vha->unknown_atio_list); 274 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 275 276 schedule_delayed_work(&vha->unknown_atio_work, 1); 277 278 out: 279 return; 280 281 out_term: 282 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0); 283 goto out; 284 } 285 286 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, 287 uint8_t ha_locked) 288 { 289 struct qla_tgt_sess_op *u, *t; 290 scsi_qla_host_t *host; 291 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 292 unsigned long flags; 293 uint8_t queued = 0; 294 295 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { 296 if (u->aborted) { 297 ql_dbg(ql_dbg_async, vha, 0x502e, 298 "Freeing unknown %s %p, because of Abort\n", 299 "ATIO_TYPE7", u); 300 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 301 &u->atio, ha_locked, 0); 302 goto abort; 303 } 304 305 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); 306 if (host != NULL) { 307 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f, 308 "Requeuing unknown ATIO_TYPE7 %p\n", u); 309 qlt_24xx_atio_pkt(host, &u->atio, ha_locked); 310 } else if (tgt->tgt_stop) { 311 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a, 312 "Freeing unknown %s %p, because tgt is being stopped\n", 313 "ATIO_TYPE7", u); 314 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 315 &u->atio, ha_locked, 0); 316 } else { 317 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d, 318 "Reschedule u %p, vha %p, host %p\n", u, vha, host); 319 if (!queued) { 320 queued = 1; 321 schedule_delayed_work(&vha->unknown_atio_work, 322 1); 323 } 324 continue; 325 } 326 327 abort: 328 spin_lock_irqsave(&vha->cmd_list_lock, flags); 329 list_del(&u->cmd_list); 330 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 331 kfree(u); 332 } 333 } 334 335 void qlt_unknown_atio_work_fn(struct work_struct *work) 336 { 337 struct scsi_qla_host *vha = container_of(to_delayed_work(work), 338 struct scsi_qla_host, unknown_atio_work); 339 340 qlt_try_to_dequeue_unknown_atios(vha, 0); 341 } 342 343 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 344 struct atio_from_isp *atio, uint8_t ha_locked) 345 { 346 ql_dbg(ql_dbg_tgt, vha, 0xe072, 347 "%s: qla_target(%d): type %x ox_id %04x\n", 348 __func__, vha->vp_idx, atio->u.raw.entry_type, 349 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 350 351 switch (atio->u.raw.entry_type) { 352 case ATIO_TYPE7: 353 { 354 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 355 atio->u.isp24.fcp_hdr.d_id); 356 if (unlikely(NULL == host)) { 357 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 358 "qla_target(%d): Received ATIO_TYPE7 " 359 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 360 atio->u.isp24.fcp_hdr.d_id[0], 361 atio->u.isp24.fcp_hdr.d_id[1], 362 atio->u.isp24.fcp_hdr.d_id[2]); 363 364 365 qlt_queue_unknown_atio(vha, atio, ha_locked); 366 break; 367 } 368 if (unlikely(!list_empty(&vha->unknown_atio_list))) 369 qlt_try_to_dequeue_unknown_atios(vha, ha_locked); 370 371 qlt_24xx_atio_pkt(host, atio, ha_locked); 372 break; 373 } 374 375 case IMMED_NOTIFY_TYPE: 376 { 377 struct scsi_qla_host *host = vha; 378 struct imm_ntfy_from_isp *entry = 379 (struct imm_ntfy_from_isp *)atio; 380 381 qlt_issue_marker(vha, ha_locked); 382 383 if ((entry->u.isp24.vp_index != 0xFF) && 384 (entry->u.isp24.nport_handle != 0xFFFF)) { 385 host = qlt_find_host_by_vp_idx(vha, 386 entry->u.isp24.vp_index); 387 if (unlikely(!host)) { 388 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 389 "qla_target(%d): Received " 390 "ATIO (IMMED_NOTIFY_TYPE) " 391 "with unknown vp_index %d\n", 392 vha->vp_idx, entry->u.isp24.vp_index); 393 break; 394 } 395 } 396 qlt_24xx_atio_pkt(host, atio, ha_locked); 397 break; 398 } 399 400 case VP_RPT_ID_IOCB_TYPE: 401 qla24xx_report_id_acquisition(vha, 402 (struct vp_rpt_id_entry_24xx *)atio); 403 break; 404 405 case ABTS_RECV_24XX: 406 { 407 struct abts_recv_from_24xx *entry = 408 (struct abts_recv_from_24xx *)atio; 409 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 410 entry->vp_index); 411 unsigned long flags; 412 413 if (unlikely(!host)) { 414 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 415 "qla_target(%d): Response pkt (ABTS_RECV_24XX) " 416 "received, with unknown vp_index %d\n", 417 vha->vp_idx, entry->vp_index); 418 break; 419 } 420 if (!ha_locked) 421 spin_lock_irqsave(&host->hw->hardware_lock, flags); 422 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); 423 if (!ha_locked) 424 spin_unlock_irqrestore(&host->hw->hardware_lock, flags); 425 break; 426 } 427 428 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ 429 430 default: 431 ql_dbg(ql_dbg_tgt, vha, 0xe040, 432 "qla_target(%d): Received unknown ATIO atio " 433 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 434 break; 435 } 436 437 return false; 438 } 439 440 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, 441 struct rsp_que *rsp, response_t *pkt) 442 { 443 switch (pkt->entry_type) { 444 case CTIO_CRC2: 445 ql_dbg(ql_dbg_tgt, vha, 0xe073, 446 "qla_target(%d):%s: CRC2 Response pkt\n", 447 vha->vp_idx, __func__); 448 /* fall through */ 449 case CTIO_TYPE7: 450 { 451 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 452 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 453 entry->vp_index); 454 if (unlikely(!host)) { 455 ql_dbg(ql_dbg_tgt, vha, 0xe041, 456 "qla_target(%d): Response pkt (CTIO_TYPE7) " 457 "received, with unknown vp_index %d\n", 458 vha->vp_idx, entry->vp_index); 459 break; 460 } 461 qlt_response_pkt(host, rsp, pkt); 462 break; 463 } 464 465 case IMMED_NOTIFY_TYPE: 466 { 467 struct scsi_qla_host *host = vha; 468 struct imm_ntfy_from_isp *entry = 469 (struct imm_ntfy_from_isp *)pkt; 470 471 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 472 if (unlikely(!host)) { 473 ql_dbg(ql_dbg_tgt, vha, 0xe042, 474 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 475 "received, with unknown vp_index %d\n", 476 vha->vp_idx, entry->u.isp24.vp_index); 477 break; 478 } 479 qlt_response_pkt(host, rsp, pkt); 480 break; 481 } 482 483 case NOTIFY_ACK_TYPE: 484 { 485 struct scsi_qla_host *host = vha; 486 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 487 488 if (0xFF != entry->u.isp24.vp_index) { 489 host = qlt_find_host_by_vp_idx(vha, 490 entry->u.isp24.vp_index); 491 if (unlikely(!host)) { 492 ql_dbg(ql_dbg_tgt, vha, 0xe043, 493 "qla_target(%d): Response " 494 "pkt (NOTIFY_ACK_TYPE) " 495 "received, with unknown " 496 "vp_index %d\n", vha->vp_idx, 497 entry->u.isp24.vp_index); 498 break; 499 } 500 } 501 qlt_response_pkt(host, rsp, pkt); 502 break; 503 } 504 505 case ABTS_RECV_24XX: 506 { 507 struct abts_recv_from_24xx *entry = 508 (struct abts_recv_from_24xx *)pkt; 509 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 510 entry->vp_index); 511 if (unlikely(!host)) { 512 ql_dbg(ql_dbg_tgt, vha, 0xe044, 513 "qla_target(%d): Response pkt " 514 "(ABTS_RECV_24XX) received, with unknown " 515 "vp_index %d\n", vha->vp_idx, entry->vp_index); 516 break; 517 } 518 qlt_response_pkt(host, rsp, pkt); 519 break; 520 } 521 522 case ABTS_RESP_24XX: 523 { 524 struct abts_resp_to_24xx *entry = 525 (struct abts_resp_to_24xx *)pkt; 526 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 527 entry->vp_index); 528 if (unlikely(!host)) { 529 ql_dbg(ql_dbg_tgt, vha, 0xe045, 530 "qla_target(%d): Response pkt " 531 "(ABTS_RECV_24XX) received, with unknown " 532 "vp_index %d\n", vha->vp_idx, entry->vp_index); 533 break; 534 } 535 qlt_response_pkt(host, rsp, pkt); 536 break; 537 } 538 default: 539 qlt_response_pkt(vha, rsp, pkt); 540 break; 541 } 542 543 } 544 545 /* 546 * All qlt_plogi_ack_t operations are protected by hardware_lock 547 */ 548 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, 549 struct imm_ntfy_from_isp *ntfy, int type) 550 { 551 struct qla_work_evt *e; 552 553 e = qla2x00_alloc_work(vha, QLA_EVT_NACK); 554 if (!e) 555 return QLA_FUNCTION_FAILED; 556 557 e->u.nack.fcport = fcport; 558 e->u.nack.type = type; 559 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); 560 return qla2x00_post_work(vha, e); 561 } 562 563 static 564 void qla2x00_async_nack_sp_done(void *s, int res) 565 { 566 struct srb *sp = (struct srb *)s; 567 struct scsi_qla_host *vha = sp->vha; 568 unsigned long flags; 569 570 ql_dbg(ql_dbg_disc, vha, 0x20f2, 571 "Async done-%s res %x %8phC type %d\n", 572 sp->name, res, sp->fcport->port_name, sp->type); 573 574 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 575 sp->fcport->flags &= ~FCF_ASYNC_SENT; 576 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 577 578 switch (sp->type) { 579 case SRB_NACK_PLOGI: 580 sp->fcport->login_gen++; 581 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 582 sp->fcport->logout_on_delete = 1; 583 sp->fcport->plogi_nack_done_deadline = jiffies + HZ; 584 sp->fcport->send_els_logo = 0; 585 break; 586 587 case SRB_NACK_PRLI: 588 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; 589 sp->fcport->deleted = 0; 590 sp->fcport->send_els_logo = 0; 591 592 if (!sp->fcport->login_succ && 593 !IS_SW_RESV_ADDR(sp->fcport->d_id)) { 594 sp->fcport->login_succ = 1; 595 596 vha->fcport_count++; 597 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 598 qla24xx_sched_upd_fcport(sp->fcport); 599 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 600 } else { 601 sp->fcport->login_retry = 0; 602 sp->fcport->disc_state = DSC_LOGIN_COMPLETE; 603 sp->fcport->deleted = 0; 604 sp->fcport->logout_on_delete = 1; 605 } 606 break; 607 608 case SRB_NACK_LOGO: 609 sp->fcport->login_gen++; 610 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 611 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); 612 break; 613 } 614 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 615 616 sp->free(sp); 617 } 618 619 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, 620 struct imm_ntfy_from_isp *ntfy, int type) 621 { 622 int rval = QLA_FUNCTION_FAILED; 623 srb_t *sp; 624 char *c = NULL; 625 626 fcport->flags |= FCF_ASYNC_SENT; 627 switch (type) { 628 case SRB_NACK_PLOGI: 629 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 630 c = "PLOGI"; 631 break; 632 case SRB_NACK_PRLI: 633 fcport->fw_login_state = DSC_LS_PRLI_PEND; 634 fcport->deleted = 0; 635 c = "PRLI"; 636 break; 637 case SRB_NACK_LOGO: 638 fcport->fw_login_state = DSC_LS_LOGO_PEND; 639 c = "LOGO"; 640 break; 641 } 642 643 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 644 if (!sp) 645 goto done; 646 647 sp->type = type; 648 sp->name = "nack"; 649 650 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 651 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 652 653 sp->u.iocb_cmd.u.nack.ntfy = ntfy; 654 sp->done = qla2x00_async_nack_sp_done; 655 656 ql_dbg(ql_dbg_disc, vha, 0x20f4, 657 "Async-%s %8phC hndl %x %s\n", 658 sp->name, fcport->port_name, sp->handle, c); 659 660 rval = qla2x00_start_sp(sp); 661 if (rval != QLA_SUCCESS) 662 goto done_free_sp; 663 664 return rval; 665 666 done_free_sp: 667 sp->free(sp); 668 done: 669 fcport->flags &= ~FCF_ASYNC_SENT; 670 return rval; 671 } 672 673 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 674 { 675 fc_port_t *t; 676 677 switch (e->u.nack.type) { 678 case SRB_NACK_PRLI: 679 t = e->u.nack.fcport; 680 flush_work(&t->del_work); 681 flush_work(&t->free_work); 682 mutex_lock(&vha->vha_tgt.tgt_mutex); 683 t = qlt_create_sess(vha, e->u.nack.fcport, 0); 684 mutex_unlock(&vha->vha_tgt.tgt_mutex); 685 if (t) { 686 ql_log(ql_log_info, vha, 0xd034, 687 "%s create sess success %p", __func__, t); 688 /* create sess has an extra kref */ 689 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); 690 } 691 break; 692 } 693 qla24xx_async_notify_ack(vha, e->u.nack.fcport, 694 (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type); 695 } 696 697 void qla24xx_delete_sess_fn(struct work_struct *work) 698 { 699 fc_port_t *fcport = container_of(work, struct fc_port, del_work); 700 struct qla_hw_data *ha = fcport->vha->hw; 701 702 if (fcport->se_sess) { 703 ha->tgt.tgt_ops->shutdown_sess(fcport); 704 ha->tgt.tgt_ops->put_sess(fcport); 705 } else { 706 qlt_unreg_sess(fcport); 707 } 708 } 709 710 /* 711 * Called from qla2x00_reg_remote_port() 712 */ 713 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 714 { 715 struct qla_hw_data *ha = vha->hw; 716 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 717 struct fc_port *sess = fcport; 718 unsigned long flags; 719 720 if (!vha->hw->tgt.tgt_ops) 721 return; 722 723 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 724 if (tgt->tgt_stop) { 725 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 726 return; 727 } 728 729 if (fcport->disc_state == DSC_DELETE_PEND) { 730 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 731 return; 732 } 733 734 if (!sess->se_sess) { 735 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 736 737 mutex_lock(&vha->vha_tgt.tgt_mutex); 738 sess = qlt_create_sess(vha, fcport, false); 739 mutex_unlock(&vha->vha_tgt.tgt_mutex); 740 741 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 742 } else { 743 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { 744 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 745 return; 746 } 747 748 if (!kref_get_unless_zero(&sess->sess_kref)) { 749 ql_dbg(ql_dbg_disc, vha, 0x2107, 750 "%s: kref_get fail sess %8phC \n", 751 __func__, sess->port_name); 752 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 753 return; 754 } 755 756 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 757 "qla_target(%u): %ssession for port %8phC " 758 "(loop ID %d) reappeared\n", vha->vp_idx, 759 sess->local ? "local " : "", sess->port_name, sess->loop_id); 760 761 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 762 "Reappeared sess %p\n", sess); 763 764 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, 765 fcport->loop_id, 766 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 767 } 768 769 if (sess && sess->local) { 770 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 771 "qla_target(%u): local session for " 772 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 773 fcport->port_name, sess->loop_id); 774 sess->local = 0; 775 } 776 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 777 778 ha->tgt.tgt_ops->put_sess(sess); 779 } 780 781 /* 782 * This is a zero-base ref-counting solution, since hardware_lock 783 * guarantees that ref_count is not modified concurrently. 784 * Upon successful return content of iocb is undefined 785 */ 786 static struct qlt_plogi_ack_t * 787 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, 788 struct imm_ntfy_from_isp *iocb) 789 { 790 struct qlt_plogi_ack_t *pla; 791 792 list_for_each_entry(pla, &vha->plogi_ack_list, list) { 793 if (pla->id.b24 == id->b24) { 794 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d, 795 "%s %d %8phC Term INOT due to new INOT", 796 __func__, __LINE__, 797 pla->iocb.u.isp24.port_name); 798 qlt_send_term_imm_notif(vha, &pla->iocb, 1); 799 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 800 return pla; 801 } 802 } 803 804 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); 805 if (!pla) { 806 ql_dbg(ql_dbg_async, vha, 0x5088, 807 "qla_target(%d): Allocation of plogi_ack failed\n", 808 vha->vp_idx); 809 return NULL; 810 } 811 812 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 813 pla->id = *id; 814 list_add_tail(&pla->list, &vha->plogi_ack_list); 815 816 return pla; 817 } 818 819 void qlt_plogi_ack_unref(struct scsi_qla_host *vha, 820 struct qlt_plogi_ack_t *pla) 821 { 822 struct imm_ntfy_from_isp *iocb = &pla->iocb; 823 port_id_t port_id; 824 uint16_t loop_id; 825 fc_port_t *fcport = pla->fcport; 826 827 BUG_ON(!pla->ref_count); 828 pla->ref_count--; 829 830 if (pla->ref_count) 831 return; 832 833 ql_dbg(ql_dbg_disc, vha, 0x5089, 834 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" 835 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, 836 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], 837 iocb->u.isp24.port_id[0], 838 le16_to_cpu(iocb->u.isp24.nport_handle), 839 iocb->u.isp24.exchange_address, iocb->ox_id); 840 841 port_id.b.domain = iocb->u.isp24.port_id[2]; 842 port_id.b.area = iocb->u.isp24.port_id[1]; 843 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 844 port_id.b.rsvd_1 = 0; 845 846 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 847 848 fcport->loop_id = loop_id; 849 fcport->d_id = port_id; 850 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 851 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); 852 else 853 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI); 854 855 list_for_each_entry(fcport, &vha->vp_fcports, list) { 856 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) 857 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 858 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) 859 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 860 } 861 862 list_del(&pla->list); 863 kmem_cache_free(qla_tgt_plogi_cachep, pla); 864 } 865 866 void 867 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, 868 struct fc_port *sess, enum qlt_plogi_link_t link) 869 { 870 struct imm_ntfy_from_isp *iocb = &pla->iocb; 871 /* Inc ref_count first because link might already be pointing at pla */ 872 pla->ref_count++; 873 874 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, 875 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" 876 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", 877 sess, link, sess->port_name, 878 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], 879 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 880 pla->ref_count, pla, link); 881 882 if (link == QLT_PLOGI_LINK_CONFLICT) { 883 switch (sess->disc_state) { 884 case DSC_DELETED: 885 case DSC_DELETE_PEND: 886 pla->ref_count--; 887 return; 888 default: 889 break; 890 } 891 } 892 893 if (sess->plogi_link[link]) 894 qlt_plogi_ack_unref(vha, sess->plogi_link[link]); 895 896 if (link == QLT_PLOGI_LINK_SAME_WWN) 897 pla->fcport = sess; 898 899 sess->plogi_link[link] = pla; 900 } 901 902 typedef struct { 903 /* These fields must be initialized by the caller */ 904 port_id_t id; 905 /* 906 * number of cmds dropped while we were waiting for 907 * initiator to ack LOGO initialize to 1 if LOGO is 908 * triggered by a command, otherwise, to 0 909 */ 910 int cmd_count; 911 912 /* These fields are used by callee */ 913 struct list_head list; 914 } qlt_port_logo_t; 915 916 static void 917 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) 918 { 919 qlt_port_logo_t *tmp; 920 int res; 921 922 mutex_lock(&vha->vha_tgt.tgt_mutex); 923 924 list_for_each_entry(tmp, &vha->logo_list, list) { 925 if (tmp->id.b24 == logo->id.b24) { 926 tmp->cmd_count += logo->cmd_count; 927 mutex_unlock(&vha->vha_tgt.tgt_mutex); 928 return; 929 } 930 } 931 932 list_add_tail(&logo->list, &vha->logo_list); 933 934 mutex_unlock(&vha->vha_tgt.tgt_mutex); 935 936 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); 937 938 mutex_lock(&vha->vha_tgt.tgt_mutex); 939 list_del(&logo->list); 940 mutex_unlock(&vha->vha_tgt.tgt_mutex); 941 942 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, 943 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", 944 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, 945 logo->cmd_count, res); 946 } 947 948 void qlt_free_session_done(struct work_struct *work) 949 { 950 struct fc_port *sess = container_of(work, struct fc_port, 951 free_work); 952 struct qla_tgt *tgt = sess->tgt; 953 struct scsi_qla_host *vha = sess->vha; 954 struct qla_hw_data *ha = vha->hw; 955 unsigned long flags; 956 bool logout_started = false; 957 scsi_qla_host_t *base_vha; 958 struct qlt_plogi_ack_t *own = 959 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 960 961 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, 962 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" 963 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", 964 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, 965 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, 966 sess->logout_on_delete, sess->keep_nport_handle, 967 sess->send_els_logo); 968 969 if (!IS_SW_RESV_ADDR(sess->d_id)) { 970 qla2x00_mark_device_lost(vha, sess, 0, 0); 971 972 if (sess->send_els_logo) { 973 qlt_port_logo_t logo; 974 975 logo.id = sess->d_id; 976 logo.cmd_count = 0; 977 if (!own) 978 qlt_send_first_logo(vha, &logo); 979 sess->send_els_logo = 0; 980 } 981 982 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { 983 int rc; 984 985 if (!own || 986 (own && 987 (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) { 988 rc = qla2x00_post_async_logout_work(vha, sess, 989 NULL); 990 if (rc != QLA_SUCCESS) 991 ql_log(ql_log_warn, vha, 0xf085, 992 "Schedule logo failed sess %p rc %d\n", 993 sess, rc); 994 else 995 logout_started = true; 996 } else if (own && (own->iocb.u.isp24.status_subcode == 997 ELS_PRLI) && ha->flags.rida_fmt2) { 998 rc = qla2x00_post_async_prlo_work(vha, sess, 999 NULL); 1000 if (rc != QLA_SUCCESS) 1001 ql_log(ql_log_warn, vha, 0xf085, 1002 "Schedule PRLO failed sess %p rc %d\n", 1003 sess, rc); 1004 else 1005 logout_started = true; 1006 } 1007 } /* if sess->logout_on_delete */ 1008 1009 if (sess->nvme_flag & NVME_FLAG_REGISTERED && 1010 !(sess->nvme_flag & NVME_FLAG_DELETING)) { 1011 sess->nvme_flag |= NVME_FLAG_DELETING; 1012 qla_nvme_unregister_remote_port(sess); 1013 } 1014 } 1015 1016 /* 1017 * Release the target session for FC Nexus from fabric module code. 1018 */ 1019 if (sess->se_sess != NULL) 1020 ha->tgt.tgt_ops->free_session(sess); 1021 1022 if (logout_started) { 1023 bool traced = false; 1024 1025 while (!READ_ONCE(sess->logout_completed)) { 1026 if (!traced) { 1027 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, 1028 "%s: waiting for sess %p logout\n", 1029 __func__, sess); 1030 traced = true; 1031 } 1032 msleep(100); 1033 } 1034 1035 ql_dbg(ql_dbg_disc, vha, 0xf087, 1036 "%s: sess %p logout completed\n", __func__, sess); 1037 } 1038 1039 if (sess->logo_ack_needed) { 1040 sess->logo_ack_needed = 0; 1041 qla24xx_async_notify_ack(vha, sess, 1042 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); 1043 } 1044 1045 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1046 if (sess->se_sess) { 1047 sess->se_sess = NULL; 1048 if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) 1049 tgt->sess_count--; 1050 } 1051 1052 sess->disc_state = DSC_DELETED; 1053 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1054 sess->deleted = QLA_SESS_DELETED; 1055 1056 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { 1057 vha->fcport_count--; 1058 sess->login_succ = 0; 1059 } 1060 1061 qla2x00_clear_loop_id(sess); 1062 1063 if (sess->conflict) { 1064 sess->conflict->login_pause = 0; 1065 sess->conflict = NULL; 1066 if (!test_bit(UNLOADING, &vha->dpc_flags)) 1067 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1068 } 1069 1070 { 1071 struct qlt_plogi_ack_t *con = 1072 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; 1073 struct imm_ntfy_from_isp *iocb; 1074 1075 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 1076 1077 if (con) { 1078 iocb = &con->iocb; 1079 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, 1080 "se_sess %p / sess %p port %8phC is gone," 1081 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", 1082 sess->se_sess, sess, sess->port_name, 1083 own ? "releasing own PLOGI" : "no own PLOGI pending", 1084 own ? own->ref_count : -1, 1085 iocb->u.isp24.port_name, con->ref_count); 1086 qlt_plogi_ack_unref(vha, con); 1087 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 1088 } else { 1089 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, 1090 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", 1091 sess->se_sess, sess, sess->port_name, 1092 own ? "releasing own PLOGI" : 1093 "no own PLOGI pending", 1094 own ? own->ref_count : -1); 1095 } 1096 1097 if (own) { 1098 sess->fw_login_state = DSC_LS_PLOGI_PEND; 1099 qlt_plogi_ack_unref(vha, own); 1100 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 1101 } 1102 } 1103 1104 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1105 1106 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 1107 "Unregistration of sess %p %8phC finished fcp_cnt %d\n", 1108 sess, sess->port_name, vha->fcport_count); 1109 1110 if (tgt && (tgt->sess_count == 0)) 1111 wake_up_all(&tgt->waitQ); 1112 1113 if (vha->fcport_count == 0) 1114 wake_up_all(&vha->fcport_waitQ); 1115 1116 base_vha = pci_get_drvdata(ha->pdev); 1117 1118 sess->free_pending = 0; 1119 1120 if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) 1121 return; 1122 1123 if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { 1124 switch (vha->host->active_mode) { 1125 case MODE_INITIATOR: 1126 case MODE_DUAL: 1127 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1128 qla2xxx_wake_dpc(vha); 1129 break; 1130 case MODE_TARGET: 1131 default: 1132 /* no-op */ 1133 break; 1134 } 1135 } 1136 } 1137 1138 /* ha->tgt.sess_lock supposed to be held on entry */ 1139 void qlt_unreg_sess(struct fc_port *sess) 1140 { 1141 struct scsi_qla_host *vha = sess->vha; 1142 unsigned long flags; 1143 1144 ql_dbg(ql_dbg_disc, sess->vha, 0x210a, 1145 "%s sess %p for deletion %8phC\n", 1146 __func__, sess, sess->port_name); 1147 1148 spin_lock_irqsave(&sess->vha->work_lock, flags); 1149 if (sess->free_pending) { 1150 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1151 return; 1152 } 1153 sess->free_pending = 1; 1154 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1155 1156 if (sess->se_sess) 1157 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 1158 1159 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1160 sess->disc_state = DSC_DELETE_PEND; 1161 sess->last_rscn_gen = sess->rscn_gen; 1162 sess->last_login_gen = sess->login_gen; 1163 1164 INIT_WORK(&sess->free_work, qlt_free_session_done); 1165 schedule_work(&sess->free_work); 1166 } 1167 EXPORT_SYMBOL(qlt_unreg_sess); 1168 1169 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 1170 { 1171 struct qla_hw_data *ha = vha->hw; 1172 struct fc_port *sess = NULL; 1173 uint16_t loop_id; 1174 int res = 0; 1175 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 1176 unsigned long flags; 1177 1178 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 1179 if (loop_id == 0xFFFF) { 1180 /* Global event */ 1181 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 1182 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1183 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); 1184 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1185 } else { 1186 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1187 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 1188 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1189 } 1190 1191 ql_dbg(ql_dbg_tgt, vha, 0xe000, 1192 "Using sess for qla_tgt_reset: %p\n", sess); 1193 if (!sess) { 1194 res = -ESRCH; 1195 return res; 1196 } 1197 1198 ql_dbg(ql_dbg_tgt, vha, 0xe047, 1199 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 1200 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 1201 mcmd, loop_id); 1202 1203 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); 1204 } 1205 1206 static void qla24xx_chk_fcp_state(struct fc_port *sess) 1207 { 1208 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) { 1209 sess->logout_on_delete = 0; 1210 sess->logo_ack_needed = 0; 1211 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1212 sess->scan_state = 0; 1213 } 1214 } 1215 1216 void qlt_schedule_sess_for_deletion(struct fc_port *sess) 1217 { 1218 struct qla_tgt *tgt = sess->tgt; 1219 unsigned long flags; 1220 u16 sec; 1221 1222 switch (sess->disc_state) { 1223 case DSC_DELETE_PEND: 1224 return; 1225 case DSC_DELETED: 1226 if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) 1227 wake_up_all(&tgt->waitQ); 1228 if (sess->vha->fcport_count == 0) 1229 wake_up_all(&sess->vha->fcport_waitQ); 1230 1231 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && 1232 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) 1233 return; 1234 break; 1235 case DSC_UPD_FCPORT: 1236 /* 1237 * This port is not done reporting to upper layer. 1238 * let it finish 1239 */ 1240 sess->next_disc_state = DSC_DELETE_PEND; 1241 sec = jiffies_to_msecs(jiffies - 1242 sess->jiffies_at_registration)/1000; 1243 if (sess->sec_since_registration < sec && sec && !(sec % 5)) { 1244 sess->sec_since_registration = sec; 1245 ql_dbg(ql_dbg_disc, sess->vha, 0xffff, 1246 "%s %8phC : Slow Rport registration(%d Sec)\n", 1247 __func__, sess->port_name, sec); 1248 } 1249 return; 1250 default: 1251 break; 1252 } 1253 1254 spin_lock_irqsave(&sess->vha->work_lock, flags); 1255 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1256 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1257 return; 1258 } 1259 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1260 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1261 1262 sess->disc_state = DSC_DELETE_PEND; 1263 1264 qla24xx_chk_fcp_state(sess); 1265 1266 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 1267 "Scheduling sess %p for deletion %8phC\n", 1268 sess, sess->port_name); 1269 1270 INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); 1271 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); 1272 } 1273 1274 static void qlt_clear_tgt_db(struct qla_tgt *tgt) 1275 { 1276 struct fc_port *sess; 1277 scsi_qla_host_t *vha = tgt->vha; 1278 1279 list_for_each_entry(sess, &vha->vp_fcports, list) { 1280 if (sess->se_sess) 1281 qlt_schedule_sess_for_deletion(sess); 1282 } 1283 1284 /* At this point tgt could be already dead */ 1285 } 1286 1287 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 1288 uint16_t *loop_id) 1289 { 1290 struct qla_hw_data *ha = vha->hw; 1291 dma_addr_t gid_list_dma; 1292 struct gid_list_info *gid_list; 1293 char *id_iter; 1294 int res, rc, i; 1295 uint16_t entries; 1296 1297 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1298 &gid_list_dma, GFP_KERNEL); 1299 if (!gid_list) { 1300 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 1301 "qla_target(%d): DMA Alloc failed of %u\n", 1302 vha->vp_idx, qla2x00_gid_list_size(ha)); 1303 return -ENOMEM; 1304 } 1305 1306 /* Get list of logged in devices */ 1307 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); 1308 if (rc != QLA_SUCCESS) { 1309 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 1310 "qla_target(%d): get_id_list() failed: %x\n", 1311 vha->vp_idx, rc); 1312 res = -EBUSY; 1313 goto out_free_id_list; 1314 } 1315 1316 id_iter = (char *)gid_list; 1317 res = -ENOENT; 1318 for (i = 0; i < entries; i++) { 1319 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 1320 1321 if ((gid->al_pa == s_id[2]) && 1322 (gid->area == s_id[1]) && 1323 (gid->domain == s_id[0])) { 1324 *loop_id = le16_to_cpu(gid->loop_id); 1325 res = 0; 1326 break; 1327 } 1328 id_iter += ha->gid_list_info_size; 1329 } 1330 1331 out_free_id_list: 1332 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1333 gid_list, gid_list_dma); 1334 return res; 1335 } 1336 1337 /* 1338 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 1339 * Caller must put it. 1340 */ 1341 static struct fc_port *qlt_create_sess( 1342 struct scsi_qla_host *vha, 1343 fc_port_t *fcport, 1344 bool local) 1345 { 1346 struct qla_hw_data *ha = vha->hw; 1347 struct fc_port *sess = fcport; 1348 unsigned long flags; 1349 1350 if (vha->vha_tgt.qla_tgt->tgt_stop) 1351 return NULL; 1352 1353 if (fcport->se_sess) { 1354 if (!kref_get_unless_zero(&sess->sess_kref)) { 1355 ql_dbg(ql_dbg_disc, vha, 0x20f6, 1356 "%s: kref_get_unless_zero failed for %8phC\n", 1357 __func__, sess->port_name); 1358 return NULL; 1359 } 1360 return fcport; 1361 } 1362 sess->tgt = vha->vha_tgt.qla_tgt; 1363 sess->local = local; 1364 1365 /* 1366 * Under normal circumstances we want to logout from firmware when 1367 * session eventually ends and release corresponding nport handle. 1368 * In the exception cases (e.g. when new PLOGI is waiting) corresponding 1369 * code will adjust these flags as necessary. 1370 */ 1371 sess->logout_on_delete = 1; 1372 sess->keep_nport_handle = 0; 1373 sess->logout_completed = 0; 1374 1375 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 1376 &fcport->port_name[0], sess) < 0) { 1377 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015, 1378 "(%d) %8phC check_initiator_node_acl failed\n", 1379 vha->vp_idx, fcport->port_name); 1380 return NULL; 1381 } else { 1382 kref_init(&fcport->sess_kref); 1383 /* 1384 * Take an extra reference to ->sess_kref here to handle 1385 * fc_port access across ->tgt.sess_lock reaquire. 1386 */ 1387 if (!kref_get_unless_zero(&sess->sess_kref)) { 1388 ql_dbg(ql_dbg_disc, vha, 0x20f7, 1389 "%s: kref_get_unless_zero failed for %8phC\n", 1390 __func__, sess->port_name); 1391 return NULL; 1392 } 1393 1394 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1395 if (!IS_SW_RESV_ADDR(sess->d_id)) 1396 vha->vha_tgt.qla_tgt->sess_count++; 1397 1398 qlt_do_generation_tick(vha, &sess->generation); 1399 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1400 } 1401 1402 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 1403 "Adding sess %p se_sess %p to tgt %p sess_count %d\n", 1404 sess, sess->se_sess, vha->vha_tgt.qla_tgt, 1405 vha->vha_tgt.qla_tgt->sess_count); 1406 1407 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 1408 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 1409 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 1410 vha->vp_idx, local ? "local " : "", fcport->port_name, 1411 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, 1412 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 1413 1414 return sess; 1415 } 1416 1417 /* 1418 * max_gen - specifies maximum session generation 1419 * at which this deletion requestion is still valid 1420 */ 1421 void 1422 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) 1423 { 1424 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1425 struct fc_port *sess = fcport; 1426 unsigned long flags; 1427 1428 if (!vha->hw->tgt.tgt_ops) 1429 return; 1430 1431 if (!tgt) 1432 return; 1433 1434 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1435 if (tgt->tgt_stop) { 1436 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1437 return; 1438 } 1439 if (!sess->se_sess) { 1440 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1441 return; 1442 } 1443 1444 if (max_gen - sess->generation < 0) { 1445 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1446 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, 1447 "Ignoring stale deletion request for se_sess %p / sess %p" 1448 " for port %8phC, req_gen %d, sess_gen %d\n", 1449 sess->se_sess, sess, sess->port_name, max_gen, 1450 sess->generation); 1451 return; 1452 } 1453 1454 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1455 1456 sess->local = 1; 1457 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1458 qlt_schedule_sess_for_deletion(sess); 1459 } 1460 1461 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1462 { 1463 struct qla_hw_data *ha = tgt->ha; 1464 unsigned long flags; 1465 int res; 1466 /* 1467 * We need to protect against race, when tgt is freed before or 1468 * inside wake_up() 1469 */ 1470 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1471 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 1472 "tgt %p, sess_count=%d\n", 1473 tgt, tgt->sess_count); 1474 res = (tgt->sess_count == 0); 1475 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1476 1477 return res; 1478 } 1479 1480 /* Called by tcm_qla2xxx configfs code */ 1481 int qlt_stop_phase1(struct qla_tgt *tgt) 1482 { 1483 struct scsi_qla_host *vha = tgt->vha; 1484 struct qla_hw_data *ha = tgt->ha; 1485 unsigned long flags; 1486 1487 mutex_lock(&ha->optrom_mutex); 1488 mutex_lock(&qla_tgt_mutex); 1489 1490 if (tgt->tgt_stop || tgt->tgt_stopped) { 1491 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 1492 "Already in tgt->tgt_stop or tgt_stopped state\n"); 1493 mutex_unlock(&qla_tgt_mutex); 1494 mutex_unlock(&ha->optrom_mutex); 1495 return -EPERM; 1496 } 1497 1498 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 1499 vha->host_no, vha); 1500 /* 1501 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 1502 * Lock is needed, because we still can get an incoming packet. 1503 */ 1504 mutex_lock(&vha->vha_tgt.tgt_mutex); 1505 tgt->tgt_stop = 1; 1506 qlt_clear_tgt_db(tgt); 1507 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1508 mutex_unlock(&qla_tgt_mutex); 1509 1510 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 1511 "Waiting for sess works (tgt %p)", tgt); 1512 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1513 while (!list_empty(&tgt->sess_works_list)) { 1514 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1515 flush_scheduled_work(); 1516 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1517 } 1518 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1519 1520 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 1521 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); 1522 1523 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1524 1525 /* Big hammer */ 1526 if (!ha->flags.host_shutting_down && 1527 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) 1528 qlt_disable_vha(vha); 1529 1530 /* Wait for sessions to clear out (just in case) */ 1531 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1532 mutex_unlock(&ha->optrom_mutex); 1533 1534 return 0; 1535 } 1536 EXPORT_SYMBOL(qlt_stop_phase1); 1537 1538 /* Called by tcm_qla2xxx configfs code */ 1539 void qlt_stop_phase2(struct qla_tgt *tgt) 1540 { 1541 scsi_qla_host_t *vha = tgt->vha; 1542 1543 if (tgt->tgt_stopped) { 1544 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 1545 "Already in tgt->tgt_stopped state\n"); 1546 dump_stack(); 1547 return; 1548 } 1549 if (!tgt->tgt_stop) { 1550 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 1551 "%s: phase1 stop is not completed\n", __func__); 1552 dump_stack(); 1553 return; 1554 } 1555 1556 mutex_lock(&vha->vha_tgt.tgt_mutex); 1557 tgt->tgt_stop = 0; 1558 tgt->tgt_stopped = 1; 1559 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1560 1561 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", 1562 tgt); 1563 1564 switch (vha->qlini_mode) { 1565 case QLA2XXX_INI_MODE_EXCLUSIVE: 1566 vha->flags.online = 1; 1567 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1568 break; 1569 default: 1570 break; 1571 } 1572 } 1573 EXPORT_SYMBOL(qlt_stop_phase2); 1574 1575 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 1576 static void qlt_release(struct qla_tgt *tgt) 1577 { 1578 scsi_qla_host_t *vha = tgt->vha; 1579 void *node; 1580 u64 key = 0; 1581 u16 i; 1582 struct qla_qpair_hint *h; 1583 struct qla_hw_data *ha = vha->hw; 1584 1585 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop && 1586 !tgt->tgt_stopped) 1587 qlt_stop_phase1(tgt); 1588 1589 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 1590 qlt_stop_phase2(tgt); 1591 1592 for (i = 0; i < vha->hw->max_qpairs + 1; i++) { 1593 unsigned long flags; 1594 1595 h = &tgt->qphints[i]; 1596 if (h->qpair) { 1597 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags); 1598 list_del(&h->hint_elem); 1599 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags); 1600 h->qpair = NULL; 1601 } 1602 } 1603 kfree(tgt->qphints); 1604 mutex_lock(&qla_tgt_mutex); 1605 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 1606 mutex_unlock(&qla_tgt_mutex); 1607 1608 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 1609 btree_remove64(&tgt->lun_qpair_map, key); 1610 1611 btree_destroy64(&tgt->lun_qpair_map); 1612 1613 if (vha->vp_idx) 1614 if (ha->tgt.tgt_ops && 1615 ha->tgt.tgt_ops->remove_target && 1616 vha->vha_tgt.target_lport_ptr) 1617 ha->tgt.tgt_ops->remove_target(vha); 1618 1619 vha->vha_tgt.qla_tgt = NULL; 1620 1621 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 1622 "Release of tgt %p finished\n", tgt); 1623 1624 kfree(tgt); 1625 } 1626 1627 /* ha->hardware_lock supposed to be held on entry */ 1628 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 1629 const void *param, unsigned int param_size) 1630 { 1631 struct qla_tgt_sess_work_param *prm; 1632 unsigned long flags; 1633 1634 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 1635 if (!prm) { 1636 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 1637 "qla_target(%d): Unable to create session " 1638 "work, command will be refused", 0); 1639 return -ENOMEM; 1640 } 1641 1642 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 1643 "Scheduling work (type %d, prm %p)" 1644 " to find session for param %p (size %d, tgt %p)\n", 1645 type, prm, param, param_size, tgt); 1646 1647 prm->type = type; 1648 memcpy(&prm->tm_iocb, param, param_size); 1649 1650 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1651 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 1652 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1653 1654 schedule_work(&tgt->sess_work); 1655 1656 return 0; 1657 } 1658 1659 /* 1660 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1661 */ 1662 static void qlt_send_notify_ack(struct qla_qpair *qpair, 1663 struct imm_ntfy_from_isp *ntfy, 1664 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 1665 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 1666 { 1667 struct scsi_qla_host *vha = qpair->vha; 1668 struct qla_hw_data *ha = vha->hw; 1669 request_t *pkt; 1670 struct nack_to_isp *nack; 1671 1672 if (!ha->flags.fw_started) 1673 return; 1674 1675 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1676 1677 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 1678 if (!pkt) { 1679 ql_dbg(ql_dbg_tgt, vha, 0xe049, 1680 "qla_target(%d): %s failed: unable to allocate " 1681 "request packet\n", vha->vp_idx, __func__); 1682 return; 1683 } 1684 1685 if (vha->vha_tgt.qla_tgt != NULL) 1686 vha->vha_tgt.qla_tgt->notify_ack_expected++; 1687 1688 pkt->entry_type = NOTIFY_ACK_TYPE; 1689 pkt->entry_count = 1; 1690 1691 nack = (struct nack_to_isp *)pkt; 1692 nack->ox_id = ntfy->ox_id; 1693 1694 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; 1695 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 1696 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 1697 nack->u.isp24.flags = ntfy->u.isp24.flags & 1698 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 1699 } 1700 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 1701 nack->u.isp24.status = ntfy->u.isp24.status; 1702 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 1703 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 1704 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1705 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1706 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1707 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1708 nack->u.isp24.srr_reject_code = srr_reject_code; 1709 nack->u.isp24.srr_reject_code_expl = srr_explan; 1710 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1711 1712 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1713 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1714 vha->vp_idx, nack->u.isp24.status); 1715 1716 /* Memory Barrier */ 1717 wmb(); 1718 qla2x00_start_iocbs(vha, qpair->req); 1719 } 1720 1721 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd) 1722 { 1723 struct scsi_qla_host *vha = mcmd->vha; 1724 struct qla_hw_data *ha = vha->hw; 1725 struct abts_resp_to_24xx *resp; 1726 uint32_t f_ctl, h; 1727 uint8_t *p; 1728 int rc; 1729 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts; 1730 struct qla_qpair *qpair = mcmd->qpair; 1731 1732 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1733 "Sending task mgmt ABTS response (ha=%p, status=%x)\n", 1734 ha, mcmd->fc_tm_rsp); 1735 1736 rc = qlt_check_reserve_free_req(qpair, 1); 1737 if (rc) { 1738 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1739 "qla_target(%d): %s failed: unable to allocate request packet\n", 1740 vha->vp_idx, __func__); 1741 return -EAGAIN; 1742 } 1743 1744 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr; 1745 memset(resp, 0, sizeof(*resp)); 1746 1747 h = qlt_make_handle(qpair); 1748 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1749 /* 1750 * CTIO type 7 from the firmware doesn't provide a way to 1751 * know the initiator's LOOP ID, hence we can't find 1752 * the session and, so, the command. 1753 */ 1754 return -EAGAIN; 1755 } else { 1756 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd; 1757 } 1758 1759 resp->handle = MAKE_HANDLE(qpair->req->id, h); 1760 resp->entry_type = ABTS_RESP_24XX; 1761 resp->entry_count = 1; 1762 resp->nport_handle = abts->nport_handle; 1763 resp->vp_index = vha->vp_idx; 1764 resp->sof_type = abts->sof_type; 1765 resp->exchange_address = abts->exchange_address; 1766 resp->fcp_hdr_le = abts->fcp_hdr_le; 1767 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1768 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1769 F_CTL_SEQ_INITIATIVE); 1770 p = (uint8_t *)&f_ctl; 1771 resp->fcp_hdr_le.f_ctl[0] = *p++; 1772 resp->fcp_hdr_le.f_ctl[1] = *p++; 1773 resp->fcp_hdr_le.f_ctl[2] = *p; 1774 1775 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1776 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1777 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1778 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1779 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1780 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1781 1782 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1783 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) { 1784 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1785 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1786 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1787 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1788 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1789 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1790 } else { 1791 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1792 resp->payload.ba_rjt.reason_code = 1793 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1794 /* Other bytes are zero */ 1795 } 1796 1797 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1798 1799 /* Memory Barrier */ 1800 wmb(); 1801 if (qpair->reqq_start_iocbs) 1802 qpair->reqq_start_iocbs(qpair); 1803 else 1804 qla2x00_start_iocbs(vha, qpair->req); 1805 1806 return rc; 1807 } 1808 1809 /* 1810 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1811 */ 1812 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, 1813 struct abts_recv_from_24xx *abts, uint32_t status, 1814 bool ids_reversed) 1815 { 1816 struct scsi_qla_host *vha = qpair->vha; 1817 struct qla_hw_data *ha = vha->hw; 1818 struct abts_resp_to_24xx *resp; 1819 uint32_t f_ctl; 1820 uint8_t *p; 1821 1822 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1823 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1824 ha, abts, status); 1825 1826 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, 1827 NULL); 1828 if (!resp) { 1829 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1830 "qla_target(%d): %s failed: unable to allocate " 1831 "request packet", vha->vp_idx, __func__); 1832 return; 1833 } 1834 1835 resp->entry_type = ABTS_RESP_24XX; 1836 resp->handle = QLA_TGT_SKIP_HANDLE; 1837 resp->entry_count = 1; 1838 resp->nport_handle = abts->nport_handle; 1839 resp->vp_index = vha->vp_idx; 1840 resp->sof_type = abts->sof_type; 1841 resp->exchange_address = abts->exchange_address; 1842 resp->fcp_hdr_le = abts->fcp_hdr_le; 1843 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1844 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1845 F_CTL_SEQ_INITIATIVE); 1846 p = (uint8_t *)&f_ctl; 1847 resp->fcp_hdr_le.f_ctl[0] = *p++; 1848 resp->fcp_hdr_le.f_ctl[1] = *p++; 1849 resp->fcp_hdr_le.f_ctl[2] = *p; 1850 if (ids_reversed) { 1851 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1852 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1853 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; 1854 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; 1855 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; 1856 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; 1857 } else { 1858 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1859 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1860 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1861 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1862 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1863 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1864 } 1865 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1866 if (status == FCP_TMF_CMPL) { 1867 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1868 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1869 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1870 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1871 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1872 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1873 } else { 1874 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1875 resp->payload.ba_rjt.reason_code = 1876 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1877 /* Other bytes are zero */ 1878 } 1879 1880 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1881 1882 /* Memory Barrier */ 1883 wmb(); 1884 if (qpair->reqq_start_iocbs) 1885 qpair->reqq_start_iocbs(qpair); 1886 else 1887 qla2x00_start_iocbs(vha, qpair->req); 1888 } 1889 1890 /* 1891 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1892 */ 1893 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1894 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd) 1895 { 1896 struct ctio7_to_24xx *ctio; 1897 u16 tmp; 1898 struct abts_recv_from_24xx *entry; 1899 1900 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL); 1901 if (ctio == NULL) { 1902 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1903 "qla_target(%d): %s failed: unable to allocate " 1904 "request packet\n", vha->vp_idx, __func__); 1905 return; 1906 } 1907 1908 if (mcmd) 1909 /* abts from remote port */ 1910 entry = &mcmd->orig_iocb.abts; 1911 else 1912 /* abts from this driver. */ 1913 entry = (struct abts_recv_from_24xx *)pkt; 1914 1915 /* 1916 * We've got on entrance firmware's response on by us generated 1917 * ABTS response. So, in it ID fields are reversed. 1918 */ 1919 1920 ctio->entry_type = CTIO_TYPE7; 1921 ctio->entry_count = 1; 1922 ctio->nport_handle = entry->nport_handle; 1923 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1924 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1925 ctio->vp_index = vha->vp_idx; 1926 ctio->exchange_addr = entry->exchange_addr_to_abort; 1927 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); 1928 1929 if (mcmd) { 1930 ctio->initiator_id[0] = entry->fcp_hdr_le.s_id[0]; 1931 ctio->initiator_id[1] = entry->fcp_hdr_le.s_id[1]; 1932 ctio->initiator_id[2] = entry->fcp_hdr_le.s_id[2]; 1933 1934 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) 1935 tmp |= (mcmd->abort_io_attr << 9); 1936 else if (qpair->retry_term_cnt & 1) 1937 tmp |= (0x4 << 9); 1938 } else { 1939 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1940 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1941 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1942 1943 if (qpair->retry_term_cnt & 1) 1944 tmp |= (0x4 << 9); 1945 } 1946 ctio->u.status1.flags = cpu_to_le16(tmp); 1947 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1948 1949 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1950 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n", 1951 le16_to_cpu(ctio->u.status1.flags), 1952 le16_to_cpu(ctio->u.status1.ox_id), 1953 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0); 1954 1955 /* Memory Barrier */ 1956 wmb(); 1957 if (qpair->reqq_start_iocbs) 1958 qpair->reqq_start_iocbs(qpair); 1959 else 1960 qla2x00_start_iocbs(vha, qpair->req); 1961 1962 if (mcmd) 1963 qlt_build_abts_resp_iocb(mcmd); 1964 else 1965 qlt_24xx_send_abts_resp(qpair, 1966 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true); 1967 1968 } 1969 1970 /* drop cmds for the given lun 1971 * XXX only looks for cmds on the port through which lun reset was recieved 1972 * XXX does not go through the list of other port (which may have cmds 1973 * for the same lun) 1974 */ 1975 static void abort_cmds_for_lun(struct scsi_qla_host *vha, 1976 u64 lun, uint8_t *s_id) 1977 { 1978 struct qla_tgt_sess_op *op; 1979 struct qla_tgt_cmd *cmd; 1980 uint32_t key; 1981 unsigned long flags; 1982 1983 key = sid_to_key(s_id); 1984 spin_lock_irqsave(&vha->cmd_list_lock, flags); 1985 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1986 uint32_t op_key; 1987 u64 op_lun; 1988 1989 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1990 op_lun = scsilun_to_int( 1991 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1992 if (op_key == key && op_lun == lun) 1993 op->aborted = true; 1994 } 1995 1996 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 1997 uint32_t op_key; 1998 u64 op_lun; 1999 2000 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 2001 op_lun = scsilun_to_int( 2002 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 2003 if (op_key == key && op_lun == lun) 2004 op->aborted = true; 2005 } 2006 2007 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 2008 uint32_t cmd_key; 2009 u64 cmd_lun; 2010 2011 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 2012 cmd_lun = scsilun_to_int( 2013 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); 2014 if (cmd_key == key && cmd_lun == lun) 2015 cmd->aborted = 1; 2016 } 2017 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 2018 } 2019 2020 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha, 2021 uint64_t unpacked_lun) 2022 { 2023 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2024 struct qla_qpair_hint *h = NULL; 2025 2026 if (vha->flags.qpairs_available) { 2027 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun); 2028 if (!h) 2029 h = &tgt->qphints[0]; 2030 } else { 2031 h = &tgt->qphints[0]; 2032 } 2033 2034 return h; 2035 } 2036 2037 static void qlt_do_tmr_work(struct work_struct *work) 2038 { 2039 struct qla_tgt_mgmt_cmd *mcmd = 2040 container_of(work, struct qla_tgt_mgmt_cmd, work); 2041 struct qla_hw_data *ha = mcmd->vha->hw; 2042 int rc = EIO; 2043 uint32_t tag; 2044 unsigned long flags; 2045 2046 switch (mcmd->tmr_func) { 2047 case QLA_TGT_ABTS: 2048 tag = mcmd->orig_iocb.abts.exchange_addr_to_abort; 2049 break; 2050 default: 2051 tag = 0; 2052 break; 2053 } 2054 2055 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun, 2056 mcmd->tmr_func, tag); 2057 2058 if (rc != 0) { 2059 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags); 2060 switch (mcmd->tmr_func) { 2061 case QLA_TGT_ABTS: 2062 mcmd->fc_tm_rsp = FCP_TMF_REJECTED; 2063 qlt_build_abts_resp_iocb(mcmd); 2064 break; 2065 case QLA_TGT_LUN_RESET: 2066 case QLA_TGT_CLEAR_TS: 2067 case QLA_TGT_ABORT_TS: 2068 case QLA_TGT_CLEAR_ACA: 2069 case QLA_TGT_TARGET_RESET: 2070 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio, 2071 qla_sam_status); 2072 break; 2073 2074 case QLA_TGT_ABORT_ALL: 2075 case QLA_TGT_NEXUS_LOSS_SESS: 2076 case QLA_TGT_NEXUS_LOSS: 2077 qlt_send_notify_ack(mcmd->qpair, 2078 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2079 break; 2080 } 2081 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags); 2082 2083 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052, 2084 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 2085 mcmd->vha->vp_idx, rc); 2086 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2087 } 2088 } 2089 2090 /* ha->hardware_lock supposed to be held on entry */ 2091 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2092 struct abts_recv_from_24xx *abts, struct fc_port *sess) 2093 { 2094 struct qla_hw_data *ha = vha->hw; 2095 struct qla_tgt_mgmt_cmd *mcmd; 2096 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 2097 2098 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 2099 "qla_target(%d): task abort (tag=%d)\n", 2100 vha->vp_idx, abts->exchange_addr_to_abort); 2101 2102 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2103 if (mcmd == NULL) { 2104 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 2105 "qla_target(%d): %s: Allocation of ABORT cmd failed", 2106 vha->vp_idx, __func__); 2107 return -ENOMEM; 2108 } 2109 memset(mcmd, 0, sizeof(*mcmd)); 2110 mcmd->cmd_type = TYPE_TGT_TMCMD; 2111 mcmd->sess = sess; 2112 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 2113 mcmd->reset_count = ha->base_qpair->chip_reset; 2114 mcmd->tmr_func = QLA_TGT_ABTS; 2115 mcmd->qpair = h->qpair; 2116 mcmd->vha = vha; 2117 2118 /* 2119 * LUN is looked up by target-core internally based on the passed 2120 * abts->exchange_addr_to_abort tag. 2121 */ 2122 mcmd->se_cmd.cpuid = h->cpuid; 2123 2124 if (ha->tgt.tgt_ops->find_cmd_by_tag) { 2125 struct qla_tgt_cmd *abort_cmd; 2126 2127 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, 2128 abts->exchange_addr_to_abort); 2129 if (abort_cmd && abort_cmd->qpair) { 2130 mcmd->qpair = abort_cmd->qpair; 2131 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; 2132 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr; 2133 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID; 2134 } 2135 } 2136 2137 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 2138 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work); 2139 2140 return 0; 2141 } 2142 2143 /* 2144 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2145 */ 2146 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2147 struct abts_recv_from_24xx *abts) 2148 { 2149 struct qla_hw_data *ha = vha->hw; 2150 struct fc_port *sess; 2151 uint32_t tag = abts->exchange_addr_to_abort; 2152 uint8_t s_id[3]; 2153 int rc; 2154 unsigned long flags; 2155 2156 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 2157 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 2158 "qla_target(%d): ABTS: Abort Sequence not " 2159 "supported\n", vha->vp_idx); 2160 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2161 false); 2162 return; 2163 } 2164 2165 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 2166 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 2167 "qla_target(%d): ABTS: Unknown Exchange " 2168 "Address received\n", vha->vp_idx); 2169 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2170 false); 2171 return; 2172 } 2173 2174 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 2175 "qla_target(%d): task abort (s_id=%x:%x:%x, " 2176 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 2177 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 2178 le32_to_cpu(abts->fcp_hdr_le.parameter)); 2179 2180 s_id[0] = abts->fcp_hdr_le.s_id[2]; 2181 s_id[1] = abts->fcp_hdr_le.s_id[1]; 2182 s_id[2] = abts->fcp_hdr_le.s_id[0]; 2183 2184 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 2185 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 2186 if (!sess) { 2187 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 2188 "qla_target(%d): task abort for non-existent session\n", 2189 vha->vp_idx); 2190 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2191 2192 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2193 false); 2194 return; 2195 } 2196 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2197 2198 2199 if (sess->deleted) { 2200 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2201 false); 2202 return; 2203 } 2204 2205 rc = __qlt_24xx_handle_abts(vha, abts, sess); 2206 if (rc != 0) { 2207 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 2208 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 2209 vha->vp_idx, rc); 2210 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2211 false); 2212 return; 2213 } 2214 } 2215 2216 /* 2217 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2218 */ 2219 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, 2220 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 2221 { 2222 struct scsi_qla_host *ha = mcmd->vha; 2223 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 2224 struct ctio7_to_24xx *ctio; 2225 uint16_t temp; 2226 2227 ql_dbg(ql_dbg_tgt, ha, 0xe008, 2228 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 2229 ha, atio, resp_code); 2230 2231 2232 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL); 2233 if (ctio == NULL) { 2234 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 2235 "qla_target(%d): %s failed: unable to allocate " 2236 "request packet\n", ha->vp_idx, __func__); 2237 return; 2238 } 2239 2240 ctio->entry_type = CTIO_TYPE7; 2241 ctio->entry_count = 1; 2242 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2243 ctio->nport_handle = mcmd->sess->loop_id; 2244 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2245 ctio->vp_index = ha->vp_idx; 2246 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2247 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2248 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2249 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2250 temp = (atio->u.isp24.attr << 9)| 2251 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2252 ctio->u.status1.flags = cpu_to_le16(temp); 2253 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2254 ctio->u.status1.ox_id = cpu_to_le16(temp); 2255 ctio->u.status1.scsi_status = 2256 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 2257 ctio->u.status1.response_len = cpu_to_le16(8); 2258 ctio->u.status1.sense_data[0] = resp_code; 2259 2260 /* Memory Barrier */ 2261 wmb(); 2262 if (qpair->reqq_start_iocbs) 2263 qpair->reqq_start_iocbs(qpair); 2264 else 2265 qla2x00_start_iocbs(ha, qpair->req); 2266 } 2267 2268 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 2269 { 2270 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2271 } 2272 EXPORT_SYMBOL(qlt_free_mcmd); 2273 2274 /* 2275 * ha->hardware_lock supposed to be held on entry. Might drop it, then 2276 * reacquire 2277 */ 2278 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 2279 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) 2280 { 2281 struct atio_from_isp *atio = &cmd->atio; 2282 struct ctio7_to_24xx *ctio; 2283 uint16_t temp; 2284 struct scsi_qla_host *vha = cmd->vha; 2285 2286 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, 2287 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " 2288 "sense_key=%02x, asc=%02x, ascq=%02x", 2289 vha, atio, scsi_status, sense_key, asc, ascq); 2290 2291 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 2292 if (!ctio) { 2293 ql_dbg(ql_dbg_async, vha, 0x3067, 2294 "qla2x00t(%ld): %s failed: unable to allocate request packet", 2295 vha->host_no, __func__); 2296 goto out; 2297 } 2298 2299 ctio->entry_type = CTIO_TYPE7; 2300 ctio->entry_count = 1; 2301 ctio->handle = QLA_TGT_SKIP_HANDLE; 2302 ctio->nport_handle = cmd->sess->loop_id; 2303 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2304 ctio->vp_index = vha->vp_idx; 2305 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2306 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2307 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2308 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2309 temp = (atio->u.isp24.attr << 9) | 2310 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2311 ctio->u.status1.flags = cpu_to_le16(temp); 2312 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2313 ctio->u.status1.ox_id = cpu_to_le16(temp); 2314 ctio->u.status1.scsi_status = 2315 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); 2316 ctio->u.status1.response_len = cpu_to_le16(18); 2317 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 2318 2319 if (ctio->u.status1.residual != 0) 2320 ctio->u.status1.scsi_status |= 2321 cpu_to_le16(SS_RESIDUAL_UNDER); 2322 2323 /* Fixed format sense data. */ 2324 ctio->u.status1.sense_data[0] = 0x70; 2325 ctio->u.status1.sense_data[2] = sense_key; 2326 /* Additional sense length */ 2327 ctio->u.status1.sense_data[7] = 0xa; 2328 /* ASC and ASCQ */ 2329 ctio->u.status1.sense_data[12] = asc; 2330 ctio->u.status1.sense_data[13] = ascq; 2331 2332 /* Memory Barrier */ 2333 wmb(); 2334 2335 if (qpair->reqq_start_iocbs) 2336 qpair->reqq_start_iocbs(qpair); 2337 else 2338 qla2x00_start_iocbs(vha, qpair->req); 2339 2340 out: 2341 return; 2342 } 2343 2344 /* callback from target fabric module code */ 2345 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 2346 { 2347 struct scsi_qla_host *vha = mcmd->sess->vha; 2348 struct qla_hw_data *ha = vha->hw; 2349 unsigned long flags; 2350 struct qla_qpair *qpair = mcmd->qpair; 2351 bool free_mcmd = true; 2352 2353 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 2354 "TM response mcmd (%p) status %#x state %#x", 2355 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 2356 2357 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 2358 2359 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) { 2360 /* 2361 * Either the port is not online or this request was from 2362 * previous life, just abort the processing. 2363 */ 2364 ql_dbg(ql_dbg_async, vha, 0xe100, 2365 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", 2366 vha->flags.online, qla2x00_reset_active(vha), 2367 mcmd->reset_count, qpair->chip_reset); 2368 ha->tgt.tgt_ops->free_mcmd(mcmd); 2369 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2370 return; 2371 } 2372 2373 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { 2374 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) { 2375 case ELS_LOGO: 2376 case ELS_PRLO: 2377 case ELS_TPRLO: 2378 ql_dbg(ql_dbg_disc, vha, 0x2106, 2379 "TM response logo %8phC status %#x state %#x", 2380 mcmd->sess->port_name, mcmd->fc_tm_rsp, 2381 mcmd->flags); 2382 qlt_schedule_sess_for_deletion(mcmd->sess); 2383 break; 2384 default: 2385 qlt_send_notify_ack(vha->hw->base_qpair, 2386 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2387 break; 2388 } 2389 } else { 2390 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) { 2391 qlt_build_abts_resp_iocb(mcmd); 2392 free_mcmd = false; 2393 } else 2394 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd, 2395 mcmd->fc_tm_rsp); 2396 } 2397 /* 2398 * Make the callback for ->free_mcmd() to queue_work() and invoke 2399 * target_put_sess_cmd() to drop cmd_kref to 1. The final 2400 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 2401 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 2402 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 2403 * qlt_xmit_tm_rsp() returns here.. 2404 */ 2405 if (free_mcmd) 2406 ha->tgt.tgt_ops->free_mcmd(mcmd); 2407 2408 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2409 } 2410 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 2411 2412 /* No locks */ 2413 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 2414 { 2415 struct qla_tgt_cmd *cmd = prm->cmd; 2416 2417 BUG_ON(cmd->sg_cnt == 0); 2418 2419 prm->sg = (struct scatterlist *)cmd->sg; 2420 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg, 2421 cmd->sg_cnt, cmd->dma_data_direction); 2422 if (unlikely(prm->seg_cnt == 0)) 2423 goto out_err; 2424 2425 prm->cmd->sg_mapped = 1; 2426 2427 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { 2428 /* 2429 * If greater than four sg entries then we need to allocate 2430 * the continuation entries 2431 */ 2432 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX) 2433 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 2434 QLA_TGT_DATASEGS_PER_CMD_24XX, 2435 QLA_TGT_DATASEGS_PER_CONT_24XX); 2436 } else { 2437 /* DIF */ 2438 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2439 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2440 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); 2441 prm->tot_dsds = prm->seg_cnt; 2442 } else 2443 prm->tot_dsds = prm->seg_cnt; 2444 2445 if (cmd->prot_sg_cnt) { 2446 prm->prot_sg = cmd->prot_sg; 2447 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, 2448 cmd->prot_sg, cmd->prot_sg_cnt, 2449 cmd->dma_data_direction); 2450 if (unlikely(prm->prot_seg_cnt == 0)) 2451 goto out_err; 2452 2453 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2454 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2455 /* Dif Bundling not support here */ 2456 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, 2457 cmd->blk_sz); 2458 prm->tot_dsds += prm->prot_seg_cnt; 2459 } else 2460 prm->tot_dsds += prm->prot_seg_cnt; 2461 } 2462 } 2463 2464 return 0; 2465 2466 out_err: 2467 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d, 2468 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 2469 0, prm->cmd->sg_cnt); 2470 return -1; 2471 } 2472 2473 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 2474 { 2475 struct qla_hw_data *ha; 2476 struct qla_qpair *qpair; 2477 2478 if (!cmd->sg_mapped) 2479 return; 2480 2481 qpair = cmd->qpair; 2482 2483 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt, 2484 cmd->dma_data_direction); 2485 cmd->sg_mapped = 0; 2486 2487 if (cmd->prot_sg_cnt) 2488 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt, 2489 cmd->dma_data_direction); 2490 2491 if (!cmd->ctx) 2492 return; 2493 ha = vha->hw; 2494 if (cmd->ctx_dsd_alloced) 2495 qla2x00_clean_dsd_pool(ha, cmd->ctx); 2496 2497 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 2498 } 2499 2500 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, 2501 uint32_t req_cnt) 2502 { 2503 uint32_t cnt; 2504 struct req_que *req = qpair->req; 2505 2506 if (req->cnt < (req_cnt + 2)) { 2507 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : 2508 RD_REG_DWORD_RELAXED(req->req_q_out)); 2509 2510 if (req->ring_index < cnt) 2511 req->cnt = cnt - req->ring_index; 2512 else 2513 req->cnt = req->length - (req->ring_index - cnt); 2514 2515 if (unlikely(req->cnt < (req_cnt + 2))) 2516 return -EAGAIN; 2517 } 2518 2519 req->cnt -= req_cnt; 2520 2521 return 0; 2522 } 2523 2524 /* 2525 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2526 */ 2527 static inline void *qlt_get_req_pkt(struct req_que *req) 2528 { 2529 /* Adjust ring index. */ 2530 req->ring_index++; 2531 if (req->ring_index == req->length) { 2532 req->ring_index = 0; 2533 req->ring_ptr = req->ring; 2534 } else { 2535 req->ring_ptr++; 2536 } 2537 return (cont_entry_t *)req->ring_ptr; 2538 } 2539 2540 /* ha->hardware_lock supposed to be held on entry */ 2541 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) 2542 { 2543 uint32_t h; 2544 int index; 2545 uint8_t found = 0; 2546 struct req_que *req = qpair->req; 2547 2548 h = req->current_outstanding_cmd; 2549 2550 for (index = 1; index < req->num_outstanding_cmds; index++) { 2551 h++; 2552 if (h == req->num_outstanding_cmds) 2553 h = 1; 2554 2555 if (h == QLA_TGT_SKIP_HANDLE) 2556 continue; 2557 2558 if (!req->outstanding_cmds[h]) { 2559 found = 1; 2560 break; 2561 } 2562 } 2563 2564 if (found) { 2565 req->current_outstanding_cmd = h; 2566 } else { 2567 ql_dbg(ql_dbg_io, qpair->vha, 0x305b, 2568 "qla_target(%d): Ran out of empty cmd slots\n", 2569 qpair->vha->vp_idx); 2570 h = QLA_TGT_NULL_HANDLE; 2571 } 2572 2573 return h; 2574 } 2575 2576 /* ha->hardware_lock supposed to be held on entry */ 2577 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, 2578 struct qla_tgt_prm *prm) 2579 { 2580 uint32_t h; 2581 struct ctio7_to_24xx *pkt; 2582 struct atio_from_isp *atio = &prm->cmd->atio; 2583 uint16_t temp; 2584 2585 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; 2586 prm->pkt = pkt; 2587 memset(pkt, 0, sizeof(*pkt)); 2588 2589 pkt->entry_type = CTIO_TYPE7; 2590 pkt->entry_count = (uint8_t)prm->req_cnt; 2591 pkt->vp_index = prm->cmd->vp_idx; 2592 2593 h = qlt_make_handle(qpair); 2594 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2595 /* 2596 * CTIO type 7 from the firmware doesn't provide a way to 2597 * know the initiator's LOOP ID, hence we can't find 2598 * the session and, so, the command. 2599 */ 2600 return -EAGAIN; 2601 } else 2602 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 2603 2604 pkt->handle = MAKE_HANDLE(qpair->req->id, h); 2605 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 2606 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 2607 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2608 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2609 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2610 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2611 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2612 temp = atio->u.isp24.attr << 9; 2613 pkt->u.status0.flags |= cpu_to_le16(temp); 2614 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2615 pkt->u.status0.ox_id = cpu_to_le16(temp); 2616 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 2617 2618 return 0; 2619 } 2620 2621 /* 2622 * ha->hardware_lock supposed to be held on entry. We have already made sure 2623 * that there is sufficient amount of request entries to not drop it. 2624 */ 2625 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) 2626 { 2627 int cnt; 2628 struct dsd64 *cur_dsd; 2629 2630 /* Build continuation packets */ 2631 while (prm->seg_cnt > 0) { 2632 cont_a64_entry_t *cont_pkt64 = 2633 (cont_a64_entry_t *)qlt_get_req_pkt( 2634 prm->cmd->qpair->req); 2635 2636 /* 2637 * Make sure that from cont_pkt64 none of 2638 * 64-bit specific fields used for 32-bit 2639 * addressing. Cast to (cont_entry_t *) for 2640 * that. 2641 */ 2642 2643 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 2644 2645 cont_pkt64->entry_count = 1; 2646 cont_pkt64->sys_define = 0; 2647 2648 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 2649 cur_dsd = cont_pkt64->dsd; 2650 2651 /* Load continuation entry data segments */ 2652 for (cnt = 0; 2653 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; 2654 cnt++, prm->seg_cnt--) { 2655 append_dsd64(&cur_dsd, prm->sg); 2656 prm->sg = sg_next(prm->sg); 2657 } 2658 } 2659 } 2660 2661 /* 2662 * ha->hardware_lock supposed to be held on entry. We have already made sure 2663 * that there is sufficient amount of request entries to not drop it. 2664 */ 2665 static void qlt_load_data_segments(struct qla_tgt_prm *prm) 2666 { 2667 int cnt; 2668 struct dsd64 *cur_dsd; 2669 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 2670 2671 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 2672 2673 /* Setup packet address segment pointer */ 2674 cur_dsd = &pkt24->u.status0.dsd; 2675 2676 /* Set total data segment count */ 2677 if (prm->seg_cnt) 2678 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 2679 2680 if (prm->seg_cnt == 0) { 2681 /* No data transfer */ 2682 cur_dsd->address = 0; 2683 cur_dsd->length = 0; 2684 return; 2685 } 2686 2687 /* If scatter gather */ 2688 2689 /* Load command entry data segments */ 2690 for (cnt = 0; 2691 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; 2692 cnt++, prm->seg_cnt--) { 2693 append_dsd64(&cur_dsd, prm->sg); 2694 prm->sg = sg_next(prm->sg); 2695 } 2696 2697 qlt_load_cont_data_segments(prm); 2698 } 2699 2700 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 2701 { 2702 return cmd->bufflen > 0; 2703 } 2704 2705 static void qlt_print_dif_err(struct qla_tgt_prm *prm) 2706 { 2707 struct qla_tgt_cmd *cmd; 2708 struct scsi_qla_host *vha; 2709 2710 /* asc 0x10=dif error */ 2711 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { 2712 cmd = prm->cmd; 2713 vha = cmd->vha; 2714 /* ASCQ */ 2715 switch (prm->sense_buffer[13]) { 2716 case 1: 2717 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b, 2718 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2719 "se_cmd=%p tag[%x]", 2720 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2721 cmd->atio.u.isp24.exchange_addr); 2722 break; 2723 case 2: 2724 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c, 2725 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2726 "se_cmd=%p tag[%x]", 2727 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2728 cmd->atio.u.isp24.exchange_addr); 2729 break; 2730 case 3: 2731 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f, 2732 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2733 "se_cmd=%p tag[%x]", 2734 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2735 cmd->atio.u.isp24.exchange_addr); 2736 break; 2737 default: 2738 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010, 2739 "BE detected Dif ERR: lba[%llx|%lld] len[%x] " 2740 "se_cmd=%p tag[%x]", 2741 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2742 cmd->atio.u.isp24.exchange_addr); 2743 break; 2744 } 2745 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16); 2746 } 2747 } 2748 2749 /* 2750 * Called without ha->hardware_lock held 2751 */ 2752 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 2753 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 2754 uint32_t *full_req_cnt) 2755 { 2756 struct se_cmd *se_cmd = &cmd->se_cmd; 2757 struct qla_qpair *qpair = cmd->qpair; 2758 2759 prm->cmd = cmd; 2760 prm->tgt = cmd->tgt; 2761 prm->pkt = NULL; 2762 prm->rq_result = scsi_status; 2763 prm->sense_buffer = &cmd->sense_buffer[0]; 2764 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 2765 prm->sg = NULL; 2766 prm->seg_cnt = -1; 2767 prm->req_cnt = 1; 2768 prm->residual = 0; 2769 prm->add_status_pkt = 0; 2770 prm->prot_sg = NULL; 2771 prm->prot_seg_cnt = 0; 2772 prm->tot_dsds = 0; 2773 2774 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 2775 if (qlt_pci_map_calc_cnt(prm) != 0) 2776 return -EAGAIN; 2777 } 2778 2779 *full_req_cnt = prm->req_cnt; 2780 2781 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 2782 prm->residual = se_cmd->residual_count; 2783 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c, 2784 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2785 prm->residual, se_cmd->tag, 2786 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 2787 cmd->bufflen, prm->rq_result); 2788 prm->rq_result |= SS_RESIDUAL_UNDER; 2789 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 2790 prm->residual = se_cmd->residual_count; 2791 ql_dbg_qp(ql_dbg_io, qpair, 0x305d, 2792 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2793 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? 2794 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); 2795 prm->rq_result |= SS_RESIDUAL_OVER; 2796 } 2797 2798 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2799 /* 2800 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 2801 * ignored in *xmit_response() below 2802 */ 2803 if (qlt_has_data(cmd)) { 2804 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 2805 (IS_FWI2_CAPABLE(cmd->vha->hw) && 2806 (prm->rq_result != 0))) { 2807 prm->add_status_pkt = 1; 2808 (*full_req_cnt)++; 2809 } 2810 } 2811 } 2812 2813 return 0; 2814 } 2815 2816 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd, 2817 int sending_sense) 2818 { 2819 if (cmd->qpair->enable_class_2) 2820 return 0; 2821 2822 if (sending_sense) 2823 return cmd->conf_compl_supported; 2824 else 2825 return cmd->qpair->enable_explicit_conf && 2826 cmd->conf_compl_supported; 2827 } 2828 2829 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 2830 struct qla_tgt_prm *prm) 2831 { 2832 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 2833 (uint32_t)sizeof(ctio->u.status1.sense_data)); 2834 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 2835 if (qlt_need_explicit_conf(prm->cmd, 0)) { 2836 ctio->u.status0.flags |= cpu_to_le16( 2837 CTIO7_FLAGS_EXPLICIT_CONFORM | 2838 CTIO7_FLAGS_CONFORM_REQ); 2839 } 2840 ctio->u.status0.residual = cpu_to_le32(prm->residual); 2841 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 2842 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 2843 int i; 2844 2845 if (qlt_need_explicit_conf(prm->cmd, 1)) { 2846 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { 2847 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017, 2848 "Skipping EXPLICIT_CONFORM and " 2849 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 2850 "non GOOD status\n"); 2851 goto skip_explict_conf; 2852 } 2853 ctio->u.status1.flags |= cpu_to_le16( 2854 CTIO7_FLAGS_EXPLICIT_CONFORM | 2855 CTIO7_FLAGS_CONFORM_REQ); 2856 } 2857 skip_explict_conf: 2858 ctio->u.status1.flags &= 2859 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2860 ctio->u.status1.flags |= 2861 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2862 ctio->u.status1.scsi_status |= 2863 cpu_to_le16(SS_SENSE_LEN_VALID); 2864 ctio->u.status1.sense_length = 2865 cpu_to_le16(prm->sense_buffer_len); 2866 for (i = 0; i < prm->sense_buffer_len/4; i++) 2867 ((uint32_t *)ctio->u.status1.sense_data)[i] = 2868 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 2869 2870 qlt_print_dif_err(prm); 2871 2872 } else { 2873 ctio->u.status1.flags &= 2874 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2875 ctio->u.status1.flags |= 2876 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2877 ctio->u.status1.sense_length = 0; 2878 memset(ctio->u.status1.sense_data, 0, 2879 sizeof(ctio->u.status1.sense_data)); 2880 } 2881 2882 /* Sense with len > 24, is it possible ??? */ 2883 } 2884 2885 static inline int 2886 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2887 { 2888 switch (se_cmd->prot_op) { 2889 case TARGET_PROT_DOUT_INSERT: 2890 case TARGET_PROT_DIN_STRIP: 2891 if (ql2xenablehba_err_chk >= 1) 2892 return 1; 2893 break; 2894 case TARGET_PROT_DOUT_PASS: 2895 case TARGET_PROT_DIN_PASS: 2896 if (ql2xenablehba_err_chk >= 2) 2897 return 1; 2898 break; 2899 case TARGET_PROT_DIN_INSERT: 2900 case TARGET_PROT_DOUT_STRIP: 2901 return 1; 2902 default: 2903 break; 2904 } 2905 return 0; 2906 } 2907 2908 static inline int 2909 qla_tgt_ref_mask_check(struct se_cmd *se_cmd) 2910 { 2911 switch (se_cmd->prot_op) { 2912 case TARGET_PROT_DIN_INSERT: 2913 case TARGET_PROT_DOUT_INSERT: 2914 case TARGET_PROT_DIN_STRIP: 2915 case TARGET_PROT_DOUT_STRIP: 2916 case TARGET_PROT_DIN_PASS: 2917 case TARGET_PROT_DOUT_PASS: 2918 return 1; 2919 default: 2920 return 0; 2921 } 2922 return 0; 2923 } 2924 2925 /* 2926 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command 2927 */ 2928 static void 2929 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, 2930 uint16_t *pfw_prot_opts) 2931 { 2932 struct se_cmd *se_cmd = &cmd->se_cmd; 2933 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2934 scsi_qla_host_t *vha = cmd->tgt->vha; 2935 struct qla_hw_data *ha = vha->hw; 2936 uint32_t t32 = 0; 2937 2938 /* 2939 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 2940 * have been immplemented by TCM, before AppTag is avail. 2941 * Look for modesense_handlers[] 2942 */ 2943 ctx->app_tag = 0; 2944 ctx->app_tag_mask[0] = 0x0; 2945 ctx->app_tag_mask[1] = 0x0; 2946 2947 if (IS_PI_UNINIT_CAPABLE(ha)) { 2948 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2949 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2950 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; 2951 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2952 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2953 } 2954 2955 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); 2956 2957 switch (se_cmd->prot_type) { 2958 case TARGET_DIF_TYPE0_PROT: 2959 /* 2960 * No check for ql2xenablehba_err_chk, as it 2961 * would be an I/O error if hba tag generation 2962 * is not done. 2963 */ 2964 ctx->ref_tag = cpu_to_le32(lba); 2965 /* enable ALL bytes of the ref tag */ 2966 ctx->ref_tag_mask[0] = 0xff; 2967 ctx->ref_tag_mask[1] = 0xff; 2968 ctx->ref_tag_mask[2] = 0xff; 2969 ctx->ref_tag_mask[3] = 0xff; 2970 break; 2971 case TARGET_DIF_TYPE1_PROT: 2972 /* 2973 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit 2974 * REF tag, and 16 bit app tag. 2975 */ 2976 ctx->ref_tag = cpu_to_le32(lba); 2977 if (!qla_tgt_ref_mask_check(se_cmd) || 2978 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2979 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2980 break; 2981 } 2982 /* enable ALL bytes of the ref tag */ 2983 ctx->ref_tag_mask[0] = 0xff; 2984 ctx->ref_tag_mask[1] = 0xff; 2985 ctx->ref_tag_mask[2] = 0xff; 2986 ctx->ref_tag_mask[3] = 0xff; 2987 break; 2988 case TARGET_DIF_TYPE2_PROT: 2989 /* 2990 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF 2991 * tag has to match LBA in CDB + N 2992 */ 2993 ctx->ref_tag = cpu_to_le32(lba); 2994 if (!qla_tgt_ref_mask_check(se_cmd) || 2995 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2996 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2997 break; 2998 } 2999 /* enable ALL bytes of the ref tag */ 3000 ctx->ref_tag_mask[0] = 0xff; 3001 ctx->ref_tag_mask[1] = 0xff; 3002 ctx->ref_tag_mask[2] = 0xff; 3003 ctx->ref_tag_mask[3] = 0xff; 3004 break; 3005 case TARGET_DIF_TYPE3_PROT: 3006 /* For TYPE 3 protection: 16 bit GUARD only */ 3007 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 3008 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 3009 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 3010 break; 3011 } 3012 } 3013 3014 static inline int 3015 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) 3016 { 3017 struct dsd64 *cur_dsd; 3018 uint32_t transfer_length = 0; 3019 uint32_t data_bytes; 3020 uint32_t dif_bytes; 3021 uint8_t bundling = 1; 3022 struct crc_context *crc_ctx_pkt = NULL; 3023 struct qla_hw_data *ha; 3024 struct ctio_crc2_to_fw *pkt; 3025 dma_addr_t crc_ctx_dma; 3026 uint16_t fw_prot_opts = 0; 3027 struct qla_tgt_cmd *cmd = prm->cmd; 3028 struct se_cmd *se_cmd = &cmd->se_cmd; 3029 uint32_t h; 3030 struct atio_from_isp *atio = &prm->cmd->atio; 3031 struct qla_tc_param tc; 3032 uint16_t t16; 3033 scsi_qla_host_t *vha = cmd->vha; 3034 3035 ha = vha->hw; 3036 3037 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr; 3038 prm->pkt = pkt; 3039 memset(pkt, 0, sizeof(*pkt)); 3040 3041 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071, 3042 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", 3043 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op, 3044 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); 3045 3046 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || 3047 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) 3048 bundling = 0; 3049 3050 /* Compute dif len and adjust data len to incude protection */ 3051 data_bytes = cmd->bufflen; 3052 dif_bytes = (data_bytes / cmd->blk_sz) * 8; 3053 3054 switch (se_cmd->prot_op) { 3055 case TARGET_PROT_DIN_INSERT: 3056 case TARGET_PROT_DOUT_STRIP: 3057 transfer_length = data_bytes; 3058 if (cmd->prot_sg_cnt) 3059 data_bytes += dif_bytes; 3060 break; 3061 case TARGET_PROT_DIN_STRIP: 3062 case TARGET_PROT_DOUT_INSERT: 3063 case TARGET_PROT_DIN_PASS: 3064 case TARGET_PROT_DOUT_PASS: 3065 transfer_length = data_bytes + dif_bytes; 3066 break; 3067 default: 3068 BUG(); 3069 break; 3070 } 3071 3072 if (!qlt_hba_err_chk_enabled(se_cmd)) 3073 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 3074 /* HBA error checking enabled */ 3075 else if (IS_PI_UNINIT_CAPABLE(ha)) { 3076 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 3077 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 3078 fw_prot_opts |= PO_DIS_VALD_APP_ESC; 3079 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 3080 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 3081 } 3082 3083 switch (se_cmd->prot_op) { 3084 case TARGET_PROT_DIN_INSERT: 3085 case TARGET_PROT_DOUT_INSERT: 3086 fw_prot_opts |= PO_MODE_DIF_INSERT; 3087 break; 3088 case TARGET_PROT_DIN_STRIP: 3089 case TARGET_PROT_DOUT_STRIP: 3090 fw_prot_opts |= PO_MODE_DIF_REMOVE; 3091 break; 3092 case TARGET_PROT_DIN_PASS: 3093 case TARGET_PROT_DOUT_PASS: 3094 fw_prot_opts |= PO_MODE_DIF_PASS; 3095 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ 3096 break; 3097 default:/* Normal Request */ 3098 fw_prot_opts |= PO_MODE_DIF_PASS; 3099 break; 3100 } 3101 3102 /* ---- PKT ---- */ 3103 /* Update entry type to indicate Command Type CRC_2 IOCB */ 3104 pkt->entry_type = CTIO_CRC2; 3105 pkt->entry_count = 1; 3106 pkt->vp_index = cmd->vp_idx; 3107 3108 h = qlt_make_handle(qpair); 3109 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 3110 /* 3111 * CTIO type 7 from the firmware doesn't provide a way to 3112 * know the initiator's LOOP ID, hence we can't find 3113 * the session and, so, the command. 3114 */ 3115 return -EAGAIN; 3116 } else 3117 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 3118 3119 pkt->handle = MAKE_HANDLE(qpair->req->id, h); 3120 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 3121 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 3122 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3123 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3124 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 3125 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 3126 pkt->exchange_addr = atio->u.isp24.exchange_addr; 3127 3128 /* silence compile warning */ 3129 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3130 pkt->ox_id = cpu_to_le16(t16); 3131 3132 t16 = (atio->u.isp24.attr << 9); 3133 pkt->flags |= cpu_to_le16(t16); 3134 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 3135 3136 /* Set transfer direction */ 3137 if (cmd->dma_data_direction == DMA_TO_DEVICE) 3138 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); 3139 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 3140 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 3141 3142 pkt->dseg_count = prm->tot_dsds; 3143 /* Fibre channel byte count */ 3144 pkt->transfer_length = cpu_to_le32(transfer_length); 3145 3146 /* ----- CRC context -------- */ 3147 3148 /* Allocate CRC context from global pool */ 3149 crc_ctx_pkt = cmd->ctx = 3150 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 3151 3152 if (!crc_ctx_pkt) 3153 goto crc_queuing_error; 3154 3155 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 3156 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 3157 3158 /* Set handle */ 3159 crc_ctx_pkt->handle = pkt->handle; 3160 3161 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); 3162 3163 put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address); 3164 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 3165 3166 if (!bundling) { 3167 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd; 3168 } else { 3169 /* 3170 * Configure Bundling if we need to fetch interlaving 3171 * protection PCI accesses 3172 */ 3173 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 3174 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 3175 crc_ctx_pkt->u.bundling.dseg_count = 3176 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 3177 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd; 3178 } 3179 3180 /* Finish the common fields of CRC pkt */ 3181 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 3182 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 3183 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 3184 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 3185 3186 memset((uint8_t *)&tc, 0 , sizeof(tc)); 3187 tc.vha = vha; 3188 tc.blk_sz = cmd->blk_sz; 3189 tc.bufflen = cmd->bufflen; 3190 tc.sg = cmd->sg; 3191 tc.prot_sg = cmd->prot_sg; 3192 tc.ctx = crc_ctx_pkt; 3193 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; 3194 3195 /* Walks data segments */ 3196 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 3197 3198 if (!bundling && prm->prot_seg_cnt) { 3199 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 3200 prm->tot_dsds, &tc)) 3201 goto crc_queuing_error; 3202 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 3203 (prm->tot_dsds - prm->prot_seg_cnt), &tc)) 3204 goto crc_queuing_error; 3205 3206 if (bundling && prm->prot_seg_cnt) { 3207 /* Walks dif segments */ 3208 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 3209 3210 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; 3211 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 3212 prm->prot_seg_cnt, cmd)) 3213 goto crc_queuing_error; 3214 } 3215 return QLA_SUCCESS; 3216 3217 crc_queuing_error: 3218 /* Cleanup will be performed by the caller */ 3219 qpair->req->outstanding_cmds[h] = NULL; 3220 3221 return QLA_FUNCTION_FAILED; 3222 } 3223 3224 /* 3225 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 3226 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 3227 */ 3228 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 3229 uint8_t scsi_status) 3230 { 3231 struct scsi_qla_host *vha = cmd->vha; 3232 struct qla_qpair *qpair = cmd->qpair; 3233 struct ctio7_to_24xx *pkt; 3234 struct qla_tgt_prm prm; 3235 uint32_t full_req_cnt = 0; 3236 unsigned long flags = 0; 3237 int res; 3238 3239 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3240 (cmd->sess && cmd->sess->deleted)) { 3241 cmd->state = QLA_TGT_STATE_PROCESSED; 3242 return 0; 3243 } 3244 3245 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, 3246 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", 3247 (xmit_type & QLA_TGT_XMIT_STATUS) ? 3248 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, 3249 &cmd->se_cmd, qpair->id); 3250 3251 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 3252 &full_req_cnt); 3253 if (unlikely(res != 0)) { 3254 return res; 3255 } 3256 3257 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3258 3259 if (xmit_type == QLA_TGT_XMIT_STATUS) 3260 qpair->tgt_counters.core_qla_snd_status++; 3261 else 3262 qpair->tgt_counters.core_qla_que_buf++; 3263 3264 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) { 3265 /* 3266 * Either the port is not online or this request was from 3267 * previous life, just abort the processing. 3268 */ 3269 cmd->state = QLA_TGT_STATE_PROCESSED; 3270 ql_dbg_qp(ql_dbg_async, qpair, 0xe101, 3271 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", 3272 vha->flags.online, qla2x00_reset_active(vha), 3273 cmd->reset_count, qpair->chip_reset); 3274 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3275 return 0; 3276 } 3277 3278 /* Does F/W have an IOCBs for this request */ 3279 res = qlt_check_reserve_free_req(qpair, full_req_cnt); 3280 if (unlikely(res)) 3281 goto out_unmap_unlock; 3282 3283 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) 3284 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3285 else 3286 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3287 if (unlikely(res != 0)) { 3288 qpair->req->cnt += full_req_cnt; 3289 goto out_unmap_unlock; 3290 } 3291 3292 pkt = (struct ctio7_to_24xx *)prm.pkt; 3293 3294 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 3295 pkt->u.status0.flags |= 3296 cpu_to_le16(CTIO7_FLAGS_DATA_IN | 3297 CTIO7_FLAGS_STATUS_MODE_0); 3298 3299 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3300 qlt_load_data_segments(&prm); 3301 3302 if (prm.add_status_pkt == 0) { 3303 if (xmit_type & QLA_TGT_XMIT_STATUS) { 3304 pkt->u.status0.scsi_status = 3305 cpu_to_le16(prm.rq_result); 3306 pkt->u.status0.residual = 3307 cpu_to_le32(prm.residual); 3308 pkt->u.status0.flags |= cpu_to_le16( 3309 CTIO7_FLAGS_SEND_STATUS); 3310 if (qlt_need_explicit_conf(cmd, 0)) { 3311 pkt->u.status0.flags |= 3312 cpu_to_le16( 3313 CTIO7_FLAGS_EXPLICIT_CONFORM | 3314 CTIO7_FLAGS_CONFORM_REQ); 3315 } 3316 } 3317 3318 } else { 3319 /* 3320 * We have already made sure that there is sufficient 3321 * amount of request entries to not drop HW lock in 3322 * req_pkt(). 3323 */ 3324 struct ctio7_to_24xx *ctio = 3325 (struct ctio7_to_24xx *)qlt_get_req_pkt( 3326 qpair->req); 3327 3328 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e, 3329 "Building additional status packet 0x%p.\n", 3330 ctio); 3331 3332 /* 3333 * T10Dif: ctio_crc2_to_fw overlay ontop of 3334 * ctio7_to_24xx 3335 */ 3336 memcpy(ctio, pkt, sizeof(*ctio)); 3337 /* reset back to CTIO7 */ 3338 ctio->entry_count = 1; 3339 ctio->entry_type = CTIO_TYPE7; 3340 ctio->dseg_count = 0; 3341 ctio->u.status1.flags &= ~cpu_to_le16( 3342 CTIO7_FLAGS_DATA_IN); 3343 3344 /* Real finish is ctio_m1's finish */ 3345 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 3346 pkt->u.status0.flags |= cpu_to_le16( 3347 CTIO7_FLAGS_DONT_RET_CTIO); 3348 3349 /* qlt_24xx_init_ctio_to_isp will correct 3350 * all neccessary fields that's part of CTIO7. 3351 * There should be no residual of CTIO-CRC2 data. 3352 */ 3353 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 3354 &prm); 3355 } 3356 } else 3357 qlt_24xx_init_ctio_to_isp(pkt, &prm); 3358 3359 3360 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 3361 cmd->cmd_sent_to_fw = 1; 3362 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); 3363 3364 /* Memory Barrier */ 3365 wmb(); 3366 if (qpair->reqq_start_iocbs) 3367 qpair->reqq_start_iocbs(qpair); 3368 else 3369 qla2x00_start_iocbs(vha, qpair->req); 3370 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3371 3372 return 0; 3373 3374 out_unmap_unlock: 3375 qlt_unmap_sg(vha, cmd); 3376 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3377 3378 return res; 3379 } 3380 EXPORT_SYMBOL(qlt_xmit_response); 3381 3382 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 3383 { 3384 struct ctio7_to_24xx *pkt; 3385 struct scsi_qla_host *vha = cmd->vha; 3386 struct qla_tgt *tgt = cmd->tgt; 3387 struct qla_tgt_prm prm; 3388 unsigned long flags = 0; 3389 int res = 0; 3390 struct qla_qpair *qpair = cmd->qpair; 3391 3392 memset(&prm, 0, sizeof(prm)); 3393 prm.cmd = cmd; 3394 prm.tgt = tgt; 3395 prm.sg = NULL; 3396 prm.req_cnt = 1; 3397 3398 /* Calculate number of entries and segments required */ 3399 if (qlt_pci_map_calc_cnt(&prm) != 0) 3400 return -EAGAIN; 3401 3402 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3403 (cmd->sess && cmd->sess->deleted)) { 3404 /* 3405 * Either the port is not online or this request was from 3406 * previous life, just abort the processing. 3407 */ 3408 cmd->aborted = 1; 3409 cmd->write_data_transferred = 0; 3410 cmd->state = QLA_TGT_STATE_DATA_IN; 3411 vha->hw->tgt.tgt_ops->handle_data(cmd); 3412 ql_dbg_qp(ql_dbg_async, qpair, 0xe102, 3413 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", 3414 vha->flags.online, qla2x00_reset_active(vha), 3415 cmd->reset_count, qpair->chip_reset); 3416 return 0; 3417 } 3418 3419 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3420 /* Does F/W have an IOCBs for this request */ 3421 res = qlt_check_reserve_free_req(qpair, prm.req_cnt); 3422 if (res != 0) 3423 goto out_unlock_free_unmap; 3424 if (cmd->se_cmd.prot_op) 3425 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3426 else 3427 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3428 3429 if (unlikely(res != 0)) { 3430 qpair->req->cnt += prm.req_cnt; 3431 goto out_unlock_free_unmap; 3432 } 3433 3434 pkt = (struct ctio7_to_24xx *)prm.pkt; 3435 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 3436 CTIO7_FLAGS_STATUS_MODE_0); 3437 3438 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3439 qlt_load_data_segments(&prm); 3440 3441 cmd->state = QLA_TGT_STATE_NEED_DATA; 3442 cmd->cmd_sent_to_fw = 1; 3443 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); 3444 3445 /* Memory Barrier */ 3446 wmb(); 3447 if (qpair->reqq_start_iocbs) 3448 qpair->reqq_start_iocbs(qpair); 3449 else 3450 qla2x00_start_iocbs(vha, qpair->req); 3451 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3452 3453 return res; 3454 3455 out_unlock_free_unmap: 3456 qlt_unmap_sg(vha, cmd); 3457 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3458 3459 return res; 3460 } 3461 EXPORT_SYMBOL(qlt_rdy_to_xfer); 3462 3463 3464 /* 3465 * it is assumed either hardware_lock or qpair lock is held. 3466 */ 3467 static void 3468 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 3469 struct ctio_crc_from_fw *sts) 3470 { 3471 uint8_t *ap = &sts->actual_dif[0]; 3472 uint8_t *ep = &sts->expected_dif[0]; 3473 uint64_t lba = cmd->se_cmd.t_task_lba; 3474 uint8_t scsi_status, sense_key, asc, ascq; 3475 unsigned long flags; 3476 struct scsi_qla_host *vha = cmd->vha; 3477 3478 cmd->trc_flags |= TRC_DIF_ERR; 3479 3480 cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 3481 cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); 3482 cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); 3483 3484 cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); 3485 cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); 3486 cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); 3487 3488 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, 3489 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); 3490 3491 scsi_status = sense_key = asc = ascq = 0; 3492 3493 /* check appl tag */ 3494 if (cmd->e_app_tag != cmd->a_app_tag) { 3495 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d, 3496 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3497 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3498 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3499 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3500 cmd->atio.u.isp24.fcp_hdr.ox_id); 3501 3502 cmd->dif_err_code = DIF_ERR_APP; 3503 scsi_status = SAM_STAT_CHECK_CONDITION; 3504 sense_key = ABORTED_COMMAND; 3505 asc = 0x10; 3506 ascq = 0x2; 3507 } 3508 3509 /* check ref tag */ 3510 if (cmd->e_ref_tag != cmd->a_ref_tag) { 3511 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e, 3512 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ", 3513 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3514 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3515 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3516 cmd->atio.u.isp24.fcp_hdr.ox_id); 3517 3518 cmd->dif_err_code = DIF_ERR_REF; 3519 scsi_status = SAM_STAT_CHECK_CONDITION; 3520 sense_key = ABORTED_COMMAND; 3521 asc = 0x10; 3522 ascq = 0x3; 3523 goto out; 3524 } 3525 3526 /* check guard */ 3527 if (cmd->e_guard != cmd->a_guard) { 3528 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012, 3529 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3530 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3531 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3532 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3533 cmd->atio.u.isp24.fcp_hdr.ox_id); 3534 3535 cmd->dif_err_code = DIF_ERR_GRD; 3536 scsi_status = SAM_STAT_CHECK_CONDITION; 3537 sense_key = ABORTED_COMMAND; 3538 asc = 0x10; 3539 ascq = 0x1; 3540 } 3541 out: 3542 switch (cmd->state) { 3543 case QLA_TGT_STATE_NEED_DATA: 3544 /* handle_data will load DIF error code */ 3545 cmd->state = QLA_TGT_STATE_DATA_IN; 3546 vha->hw->tgt.tgt_ops->handle_data(cmd); 3547 break; 3548 default: 3549 spin_lock_irqsave(&cmd->cmd_lock, flags); 3550 if (cmd->aborted) { 3551 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3552 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3553 break; 3554 } 3555 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3556 3557 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc, 3558 ascq); 3559 /* assume scsi status gets out on the wire. 3560 * Will not wait for completion. 3561 */ 3562 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3563 break; 3564 } 3565 } 3566 3567 /* If hardware_lock held on entry, might drop it, then reaquire */ 3568 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3569 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3570 struct imm_ntfy_from_isp *ntfy) 3571 { 3572 struct nack_to_isp *nack; 3573 struct qla_hw_data *ha = vha->hw; 3574 request_t *pkt; 3575 int ret = 0; 3576 3577 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3578 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3579 3580 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3581 if (pkt == NULL) { 3582 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3583 "qla_target(%d): %s failed: unable to allocate " 3584 "request packet\n", vha->vp_idx, __func__); 3585 return -ENOMEM; 3586 } 3587 3588 pkt->entry_type = NOTIFY_ACK_TYPE; 3589 pkt->entry_count = 1; 3590 pkt->handle = QLA_TGT_SKIP_HANDLE; 3591 3592 nack = (struct nack_to_isp *)pkt; 3593 nack->ox_id = ntfy->ox_id; 3594 3595 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3596 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3597 nack->u.isp24.flags = ntfy->u.isp24.flags & 3598 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 3599 } 3600 3601 /* terminate */ 3602 nack->u.isp24.flags |= 3603 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); 3604 3605 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3606 nack->u.isp24.status = ntfy->u.isp24.status; 3607 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3608 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3609 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3610 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3611 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3612 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3613 3614 qla2x00_start_iocbs(vha, vha->req); 3615 return ret; 3616 } 3617 3618 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3619 struct imm_ntfy_from_isp *imm, int ha_locked) 3620 { 3621 int rc; 3622 3623 WARN_ON_ONCE(!ha_locked); 3624 rc = __qlt_send_term_imm_notif(vha, imm); 3625 pr_debug("rc = %d\n", rc); 3626 } 3627 3628 /* 3629 * If hardware_lock held on entry, might drop it, then reaquire 3630 * This function sends the appropriate CTIO to ISP 2xxx or 24xx 3631 */ 3632 static int __qlt_send_term_exchange(struct qla_qpair *qpair, 3633 struct qla_tgt_cmd *cmd, 3634 struct atio_from_isp *atio) 3635 { 3636 struct scsi_qla_host *vha = qpair->vha; 3637 struct ctio7_to_24xx *ctio24; 3638 struct qla_hw_data *ha = vha->hw; 3639 request_t *pkt; 3640 int ret = 0; 3641 uint16_t temp; 3642 3643 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 3644 3645 if (cmd) 3646 vha = cmd->vha; 3647 3648 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL); 3649 if (pkt == NULL) { 3650 ql_dbg(ql_dbg_tgt, vha, 0xe050, 3651 "qla_target(%d): %s failed: unable to allocate " 3652 "request packet\n", vha->vp_idx, __func__); 3653 return -ENOMEM; 3654 } 3655 3656 if (cmd != NULL) { 3657 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 3658 ql_dbg(ql_dbg_tgt, vha, 0xe051, 3659 "qla_target(%d): Terminating cmd %p with " 3660 "incorrect state %d\n", vha->vp_idx, cmd, 3661 cmd->state); 3662 } else 3663 ret = 1; 3664 } 3665 3666 qpair->tgt_counters.num_term_xchg_sent++; 3667 pkt->entry_count = 1; 3668 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3669 3670 ctio24 = (struct ctio7_to_24xx *)pkt; 3671 ctio24->entry_type = CTIO_TYPE7; 3672 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED; 3673 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3674 ctio24->vp_index = vha->vp_idx; 3675 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3676 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 3677 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 3678 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3679 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | 3680 CTIO7_FLAGS_TERMINATE; 3681 ctio24->u.status1.flags = cpu_to_le16(temp); 3682 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3683 ctio24->u.status1.ox_id = cpu_to_le16(temp); 3684 3685 /* Memory Barrier */ 3686 wmb(); 3687 if (qpair->reqq_start_iocbs) 3688 qpair->reqq_start_iocbs(qpair); 3689 else 3690 qla2x00_start_iocbs(vha, qpair->req); 3691 return ret; 3692 } 3693 3694 static void qlt_send_term_exchange(struct qla_qpair *qpair, 3695 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, 3696 int ul_abort) 3697 { 3698 struct scsi_qla_host *vha; 3699 unsigned long flags = 0; 3700 int rc; 3701 3702 /* why use different vha? NPIV */ 3703 if (cmd) 3704 vha = cmd->vha; 3705 else 3706 vha = qpair->vha; 3707 3708 if (ha_locked) { 3709 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3710 if (rc == -ENOMEM) 3711 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3712 goto done; 3713 } 3714 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3715 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3716 if (rc == -ENOMEM) 3717 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3718 3719 done: 3720 if (cmd && !ul_abort && !cmd->aborted) { 3721 if (cmd->sg_mapped) 3722 qlt_unmap_sg(vha, cmd); 3723 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3724 } 3725 3726 if (!ha_locked) 3727 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3728 3729 return; 3730 } 3731 3732 static void qlt_init_term_exchange(struct scsi_qla_host *vha) 3733 { 3734 struct list_head free_list; 3735 struct qla_tgt_cmd *cmd, *tcmd; 3736 3737 vha->hw->tgt.leak_exchg_thresh_hold = 3738 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; 3739 3740 cmd = tcmd = NULL; 3741 if (!list_empty(&vha->hw->tgt.q_full_list)) { 3742 INIT_LIST_HEAD(&free_list); 3743 list_splice_init(&vha->hw->tgt.q_full_list, &free_list); 3744 3745 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 3746 list_del(&cmd->cmd_list); 3747 /* This cmd was never sent to TCM. There is no need 3748 * to schedule free or call free_cmd 3749 */ 3750 qlt_free_cmd(cmd); 3751 vha->hw->tgt.num_qfull_cmds_alloc--; 3752 } 3753 } 3754 vha->hw->tgt.num_qfull_cmds_dropped = 0; 3755 } 3756 3757 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) 3758 { 3759 uint32_t total_leaked; 3760 3761 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; 3762 3763 if (vha->hw->tgt.leak_exchg_thresh_hold && 3764 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { 3765 3766 ql_dbg(ql_dbg_tgt, vha, 0xe079, 3767 "Chip reset due to exchange starvation: %d/%d.\n", 3768 total_leaked, vha->hw->cur_fw_xcb_count); 3769 3770 if (IS_P3P_TYPE(vha->hw)) 3771 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3772 else 3773 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3774 qla2xxx_wake_dpc(vha); 3775 } 3776 3777 } 3778 3779 int qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3780 { 3781 struct qla_tgt *tgt = cmd->tgt; 3782 struct scsi_qla_host *vha = tgt->vha; 3783 struct se_cmd *se_cmd = &cmd->se_cmd; 3784 unsigned long flags; 3785 3786 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3787 "qla_target(%d): terminating exchange for aborted cmd=%p " 3788 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3789 se_cmd->tag); 3790 3791 spin_lock_irqsave(&cmd->cmd_lock, flags); 3792 if (cmd->aborted) { 3793 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3794 /* 3795 * It's normal to see 2 calls in this path: 3796 * 1) XFER Rdy completion + CMD_T_ABORT 3797 * 2) TCM TMR - drain_state_list 3798 */ 3799 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016, 3800 "multiple abort. %p transport_state %x, t_state %x, " 3801 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state, 3802 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); 3803 return EIO; 3804 } 3805 cmd->aborted = 1; 3806 cmd->trc_flags |= TRC_ABORT; 3807 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3808 3809 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1); 3810 return 0; 3811 } 3812 EXPORT_SYMBOL(qlt_abort_cmd); 3813 3814 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3815 { 3816 struct fc_port *sess = cmd->sess; 3817 3818 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 3819 "%s: se_cmd[%p] ox_id %04x\n", 3820 __func__, &cmd->se_cmd, 3821 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 3822 3823 BUG_ON(cmd->cmd_in_wq); 3824 3825 if (cmd->sg_mapped) 3826 qlt_unmap_sg(cmd->vha, cmd); 3827 3828 if (!cmd->q_full) 3829 qlt_decr_num_pend_cmds(cmd->vha); 3830 3831 BUG_ON(cmd->sg_mapped); 3832 cmd->jiffies_at_free = get_jiffies_64(); 3833 if (unlikely(cmd->free_sg)) 3834 kfree(cmd->sg); 3835 3836 if (!sess || !sess->se_sess) { 3837 WARN_ON(1); 3838 return; 3839 } 3840 cmd->jiffies_at_free = get_jiffies_64(); 3841 target_free_tag(sess->se_sess, &cmd->se_cmd); 3842 } 3843 EXPORT_SYMBOL(qlt_free_cmd); 3844 3845 /* 3846 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3847 */ 3848 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, 3849 struct qla_tgt_cmd *cmd, uint32_t status) 3850 { 3851 int term = 0; 3852 struct scsi_qla_host *vha = qpair->vha; 3853 3854 if (cmd->se_cmd.prot_op) 3855 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, 3856 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " 3857 "se_cmd=%p tag[%x] op %#x/%s", 3858 cmd->lba, cmd->lba, 3859 cmd->num_blks, &cmd->se_cmd, 3860 cmd->atio.u.isp24.exchange_addr, 3861 cmd->se_cmd.prot_op, 3862 prot_op_str(cmd->se_cmd.prot_op)); 3863 3864 if (ctio != NULL) { 3865 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3866 3867 term = !(c->flags & 3868 cpu_to_le16(OF_TERM_EXCH)); 3869 } else 3870 term = 1; 3871 3872 if (term) 3873 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0); 3874 3875 return term; 3876 } 3877 3878 3879 /* ha->hardware_lock supposed to be held on entry */ 3880 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 3881 struct rsp_que *rsp, uint32_t handle, void *ctio) 3882 { 3883 void *cmd = NULL; 3884 struct req_que *req; 3885 int qid = GET_QID(handle); 3886 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; 3887 3888 if (unlikely(h == QLA_TGT_SKIP_HANDLE)) 3889 return NULL; 3890 3891 if (qid == rsp->req->id) { 3892 req = rsp->req; 3893 } else if (vha->hw->req_q_map[qid]) { 3894 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a, 3895 "qla_target(%d): CTIO completion with different QID %d handle %x\n", 3896 vha->vp_idx, rsp->id, handle); 3897 req = vha->hw->req_q_map[qid]; 3898 } else { 3899 return NULL; 3900 } 3901 3902 h &= QLA_CMD_HANDLE_MASK; 3903 3904 if (h != QLA_TGT_NULL_HANDLE) { 3905 if (unlikely(h >= req->num_outstanding_cmds)) { 3906 ql_dbg(ql_dbg_tgt, vha, 0xe052, 3907 "qla_target(%d): Wrong handle %x received\n", 3908 vha->vp_idx, handle); 3909 return NULL; 3910 } 3911 3912 cmd = (void *) req->outstanding_cmds[h]; 3913 if (unlikely(cmd == NULL)) { 3914 ql_dbg(ql_dbg_async, vha, 0xe053, 3915 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", 3916 vha->vp_idx, handle, req->id, rsp->id); 3917 return NULL; 3918 } 3919 req->outstanding_cmds[h] = NULL; 3920 } else if (ctio != NULL) { 3921 /* We can't get loop ID from CTIO7 */ 3922 ql_dbg(ql_dbg_tgt, vha, 0xe054, 3923 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 3924 "support NULL handles\n", vha->vp_idx); 3925 return NULL; 3926 } 3927 3928 return cmd; 3929 } 3930 3931 /* 3932 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3933 */ 3934 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, 3935 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio) 3936 { 3937 struct qla_hw_data *ha = vha->hw; 3938 struct se_cmd *se_cmd; 3939 struct qla_tgt_cmd *cmd; 3940 struct qla_qpair *qpair = rsp->qpair; 3941 3942 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 3943 /* That could happen only in case of an error/reset/abort */ 3944 if (status != CTIO_SUCCESS) { 3945 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 3946 "Intermediate CTIO received" 3947 " (status %x)\n", status); 3948 } 3949 return; 3950 } 3951 3952 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio); 3953 if (cmd == NULL) 3954 return; 3955 3956 se_cmd = &cmd->se_cmd; 3957 cmd->cmd_sent_to_fw = 0; 3958 3959 qlt_unmap_sg(vha, cmd); 3960 3961 if (unlikely(status != CTIO_SUCCESS)) { 3962 switch (status & 0xFFFF) { 3963 case CTIO_INVALID_RX_ID: 3964 if (printk_ratelimit()) 3965 dev_info(&vha->hw->pdev->dev, 3966 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n", 3967 vha->vp_idx, cmd->atio.u.isp24.attr, 3968 ((cmd->ctio_flags >> 9) & 0xf), 3969 cmd->ctio_flags); 3970 3971 break; 3972 case CTIO_LIP_RESET: 3973 case CTIO_TARGET_RESET: 3974 case CTIO_ABORTED: 3975 /* driver request abort via Terminate exchange */ 3976 case CTIO_TIMEOUT: 3977 /* They are OK */ 3978 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 3979 "qla_target(%d): CTIO with " 3980 "status %#x received, state %x, se_cmd %p, " 3981 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 3982 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 3983 status, cmd->state, se_cmd); 3984 break; 3985 3986 case CTIO_PORT_LOGGED_OUT: 3987 case CTIO_PORT_UNAVAILABLE: 3988 { 3989 int logged_out = 3990 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; 3991 3992 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 3993 "qla_target(%d): CTIO with %s status %x " 3994 "received (state %x, se_cmd %p)\n", vha->vp_idx, 3995 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", 3996 status, cmd->state, se_cmd); 3997 3998 if (logged_out && cmd->sess) { 3999 /* 4000 * Session is already logged out, but we need 4001 * to notify initiator, who's not aware of this 4002 */ 4003 cmd->sess->send_els_logo = 1; 4004 ql_dbg(ql_dbg_disc, vha, 0x20f8, 4005 "%s %d %8phC post del sess\n", 4006 __func__, __LINE__, cmd->sess->port_name); 4007 4008 qlt_schedule_sess_for_deletion(cmd->sess); 4009 } 4010 break; 4011 } 4012 case CTIO_DIF_ERROR: { 4013 struct ctio_crc_from_fw *crc = 4014 (struct ctio_crc_from_fw *)ctio; 4015 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 4016 "qla_target(%d): CTIO with DIF_ERROR status %x " 4017 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " 4018 "expect_dif[0x%llx]\n", 4019 vha->vp_idx, status, cmd->state, se_cmd, 4020 *((u64 *)&crc->actual_dif[0]), 4021 *((u64 *)&crc->expected_dif[0])); 4022 4023 qlt_handle_dif_error(qpair, cmd, ctio); 4024 return; 4025 } 4026 default: 4027 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 4028 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", 4029 vha->vp_idx, status, cmd->state, se_cmd); 4030 break; 4031 } 4032 4033 4034 /* "cmd->aborted" means 4035 * cmd is already aborted/terminated, we don't 4036 * need to terminate again. The exchange is already 4037 * cleaned up/freed at FW level. Just cleanup at driver 4038 * level. 4039 */ 4040 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 4041 (!cmd->aborted)) { 4042 cmd->trc_flags |= TRC_CTIO_ERR; 4043 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status)) 4044 return; 4045 } 4046 } 4047 4048 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 4049 cmd->trc_flags |= TRC_CTIO_DONE; 4050 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 4051 cmd->state = QLA_TGT_STATE_DATA_IN; 4052 4053 if (status == CTIO_SUCCESS) 4054 cmd->write_data_transferred = 1; 4055 4056 ha->tgt.tgt_ops->handle_data(cmd); 4057 return; 4058 } else if (cmd->aborted) { 4059 cmd->trc_flags |= TRC_CTIO_ABORTED; 4060 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 4061 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 4062 } else { 4063 cmd->trc_flags |= TRC_CTIO_STRANGE; 4064 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 4065 "qla_target(%d): A command in state (%d) should " 4066 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 4067 } 4068 4069 if (unlikely(status != CTIO_SUCCESS) && 4070 !cmd->aborted) { 4071 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 4072 dump_stack(); 4073 } 4074 4075 ha->tgt.tgt_ops->free_cmd(cmd); 4076 } 4077 4078 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 4079 uint8_t task_codes) 4080 { 4081 int fcp_task_attr; 4082 4083 switch (task_codes) { 4084 case ATIO_SIMPLE_QUEUE: 4085 fcp_task_attr = TCM_SIMPLE_TAG; 4086 break; 4087 case ATIO_HEAD_OF_QUEUE: 4088 fcp_task_attr = TCM_HEAD_TAG; 4089 break; 4090 case ATIO_ORDERED_QUEUE: 4091 fcp_task_attr = TCM_ORDERED_TAG; 4092 break; 4093 case ATIO_ACA_QUEUE: 4094 fcp_task_attr = TCM_ACA_TAG; 4095 break; 4096 case ATIO_UNTAGGED: 4097 fcp_task_attr = TCM_SIMPLE_TAG; 4098 break; 4099 default: 4100 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 4101 "qla_target: unknown task code %x, use ORDERED instead\n", 4102 task_codes); 4103 fcp_task_attr = TCM_ORDERED_TAG; 4104 break; 4105 } 4106 4107 return fcp_task_attr; 4108 } 4109 4110 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *, 4111 uint8_t *); 4112 /* 4113 * Process context for I/O path into tcm_qla2xxx code 4114 */ 4115 static void __qlt_do_work(struct qla_tgt_cmd *cmd) 4116 { 4117 scsi_qla_host_t *vha = cmd->vha; 4118 struct qla_hw_data *ha = vha->hw; 4119 struct fc_port *sess = cmd->sess; 4120 struct atio_from_isp *atio = &cmd->atio; 4121 unsigned char *cdb; 4122 unsigned long flags; 4123 uint32_t data_length; 4124 int ret, fcp_task_attr, data_dir, bidi = 0; 4125 struct qla_qpair *qpair = cmd->qpair; 4126 4127 cmd->cmd_in_wq = 0; 4128 cmd->trc_flags |= TRC_DO_WORK; 4129 4130 if (cmd->aborted) { 4131 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, 4132 "cmd with tag %u is aborted\n", 4133 cmd->atio.u.isp24.exchange_addr); 4134 goto out_term; 4135 } 4136 4137 spin_lock_init(&cmd->cmd_lock); 4138 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 4139 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 4140 4141 if (atio->u.isp24.fcp_cmnd.rddata && 4142 atio->u.isp24.fcp_cmnd.wrdata) { 4143 bidi = 1; 4144 data_dir = DMA_TO_DEVICE; 4145 } else if (atio->u.isp24.fcp_cmnd.rddata) 4146 data_dir = DMA_FROM_DEVICE; 4147 else if (atio->u.isp24.fcp_cmnd.wrdata) 4148 data_dir = DMA_TO_DEVICE; 4149 else 4150 data_dir = DMA_NONE; 4151 4152 fcp_task_attr = qlt_get_fcp_task_attr(vha, 4153 atio->u.isp24.fcp_cmnd.task_attr); 4154 data_length = get_datalen_for_atio(atio); 4155 4156 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 4157 fcp_task_attr, data_dir, bidi); 4158 if (ret != 0) 4159 goto out_term; 4160 /* 4161 * Drop extra session reference from qlt_handle_cmd_for_atio(). 4162 */ 4163 ha->tgt.tgt_ops->put_sess(sess); 4164 return; 4165 4166 out_term: 4167 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); 4168 /* 4169 * cmd has not sent to target yet, so pass NULL as the second 4170 * argument to qlt_send_term_exchange() and free the memory here. 4171 */ 4172 cmd->trc_flags |= TRC_DO_WORK_ERR; 4173 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 4174 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); 4175 4176 qlt_decr_num_pend_cmds(vha); 4177 target_free_tag(sess->se_sess, &cmd->se_cmd); 4178 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 4179 4180 ha->tgt.tgt_ops->put_sess(sess); 4181 } 4182 4183 static void qlt_do_work(struct work_struct *work) 4184 { 4185 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 4186 scsi_qla_host_t *vha = cmd->vha; 4187 unsigned long flags; 4188 4189 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4190 list_del(&cmd->cmd_list); 4191 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4192 4193 __qlt_do_work(cmd); 4194 } 4195 4196 void qlt_clr_qp_table(struct scsi_qla_host *vha) 4197 { 4198 unsigned long flags; 4199 struct qla_hw_data *ha = vha->hw; 4200 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4201 void *node; 4202 u64 key = 0; 4203 4204 ql_log(ql_log_info, vha, 0x706c, 4205 "User update Number of Active Qpairs %d\n", 4206 ha->tgt.num_act_qpairs); 4207 4208 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4209 4210 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 4211 btree_remove64(&tgt->lun_qpair_map, key); 4212 4213 ha->base_qpair->lun_cnt = 0; 4214 for (key = 0; key < ha->max_qpairs; key++) 4215 if (ha->queue_pair_map[key]) 4216 ha->queue_pair_map[key]->lun_cnt = 0; 4217 4218 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 4219 } 4220 4221 static void qlt_assign_qpair(struct scsi_qla_host *vha, 4222 struct qla_tgt_cmd *cmd) 4223 { 4224 struct qla_qpair *qpair, *qp; 4225 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4226 struct qla_qpair_hint *h; 4227 4228 if (vha->flags.qpairs_available) { 4229 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun); 4230 if (unlikely(!h)) { 4231 /* spread lun to qpair ratio evently */ 4232 int lcnt = 0, rc; 4233 struct scsi_qla_host *base_vha = 4234 pci_get_drvdata(vha->hw->pdev); 4235 4236 qpair = vha->hw->base_qpair; 4237 if (qpair->lun_cnt == 0) { 4238 qpair->lun_cnt++; 4239 h = qla_qpair_to_hint(tgt, qpair); 4240 BUG_ON(!h); 4241 rc = btree_insert64(&tgt->lun_qpair_map, 4242 cmd->unpacked_lun, h, GFP_ATOMIC); 4243 if (rc) { 4244 qpair->lun_cnt--; 4245 ql_log(ql_log_info, vha, 0xd037, 4246 "Unable to insert lun %llx into lun_qpair_map\n", 4247 cmd->unpacked_lun); 4248 } 4249 goto out; 4250 } else { 4251 lcnt = qpair->lun_cnt; 4252 } 4253 4254 h = NULL; 4255 list_for_each_entry(qp, &base_vha->qp_list, 4256 qp_list_elem) { 4257 if (qp->lun_cnt == 0) { 4258 qp->lun_cnt++; 4259 h = qla_qpair_to_hint(tgt, qp); 4260 BUG_ON(!h); 4261 rc = btree_insert64(&tgt->lun_qpair_map, 4262 cmd->unpacked_lun, h, GFP_ATOMIC); 4263 if (rc) { 4264 qp->lun_cnt--; 4265 ql_log(ql_log_info, vha, 0xd038, 4266 "Unable to insert lun %llx into lun_qpair_map\n", 4267 cmd->unpacked_lun); 4268 } 4269 qpair = qp; 4270 goto out; 4271 } else { 4272 if (qp->lun_cnt < lcnt) { 4273 lcnt = qp->lun_cnt; 4274 qpair = qp; 4275 continue; 4276 } 4277 } 4278 } 4279 BUG_ON(!qpair); 4280 qpair->lun_cnt++; 4281 h = qla_qpair_to_hint(tgt, qpair); 4282 BUG_ON(!h); 4283 rc = btree_insert64(&tgt->lun_qpair_map, 4284 cmd->unpacked_lun, h, GFP_ATOMIC); 4285 if (rc) { 4286 qpair->lun_cnt--; 4287 ql_log(ql_log_info, vha, 0xd039, 4288 "Unable to insert lun %llx into lun_qpair_map\n", 4289 cmd->unpacked_lun); 4290 } 4291 } 4292 } else { 4293 h = &tgt->qphints[0]; 4294 } 4295 out: 4296 cmd->qpair = h->qpair; 4297 cmd->se_cmd.cpuid = h->cpuid; 4298 } 4299 4300 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, 4301 struct fc_port *sess, 4302 struct atio_from_isp *atio) 4303 { 4304 struct se_session *se_sess = sess->se_sess; 4305 struct qla_tgt_cmd *cmd; 4306 int tag, cpu; 4307 4308 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); 4309 if (tag < 0) 4310 return NULL; 4311 4312 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 4313 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 4314 cmd->cmd_type = TYPE_TGT_CMD; 4315 memcpy(&cmd->atio, atio, sizeof(*atio)); 4316 cmd->state = QLA_TGT_STATE_NEW; 4317 cmd->tgt = vha->vha_tgt.qla_tgt; 4318 qlt_incr_num_pend_cmds(vha); 4319 cmd->vha = vha; 4320 cmd->se_cmd.map_tag = tag; 4321 cmd->se_cmd.map_cpu = cpu; 4322 cmd->sess = sess; 4323 cmd->loop_id = sess->loop_id; 4324 cmd->conf_compl_supported = sess->conf_compl_supported; 4325 4326 cmd->trc_flags = 0; 4327 cmd->jiffies_at_alloc = get_jiffies_64(); 4328 4329 cmd->unpacked_lun = scsilun_to_int( 4330 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 4331 qlt_assign_qpair(vha, cmd); 4332 cmd->reset_count = vha->hw->base_qpair->chip_reset; 4333 cmd->vp_idx = vha->vp_idx; 4334 4335 return cmd; 4336 } 4337 4338 /* ha->hardware_lock supposed to be held on entry */ 4339 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 4340 struct atio_from_isp *atio) 4341 { 4342 struct qla_hw_data *ha = vha->hw; 4343 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4344 struct fc_port *sess; 4345 struct qla_tgt_cmd *cmd; 4346 unsigned long flags; 4347 port_id_t id; 4348 4349 if (unlikely(tgt->tgt_stop)) { 4350 ql_dbg(ql_dbg_io, vha, 0x3061, 4351 "New command while device %p is shutting down\n", tgt); 4352 return -ENODEV; 4353 } 4354 4355 id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2]; 4356 id.b.area = atio->u.isp24.fcp_hdr.s_id[1]; 4357 id.b.domain = atio->u.isp24.fcp_hdr.s_id[0]; 4358 if (IS_SW_RESV_ADDR(id)) 4359 return -EBUSY; 4360 4361 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 4362 if (unlikely(!sess)) 4363 return -EFAULT; 4364 4365 /* Another WWN used to have our s_id. Our PLOGI scheduled its 4366 * session deletion, but it's still in sess_del_work wq */ 4367 if (sess->deleted) { 4368 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, 4369 "New command while old session %p is being deleted\n", 4370 sess); 4371 return -EFAULT; 4372 } 4373 4374 /* 4375 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 4376 */ 4377 if (!kref_get_unless_zero(&sess->sess_kref)) { 4378 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 4379 "%s: kref_get fail, %8phC oxid %x \n", 4380 __func__, sess->port_name, 4381 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 4382 return -EFAULT; 4383 } 4384 4385 cmd = qlt_get_tag(vha, sess, atio); 4386 if (!cmd) { 4387 ql_dbg(ql_dbg_io, vha, 0x3062, 4388 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 4389 ha->tgt.tgt_ops->put_sess(sess); 4390 return -EBUSY; 4391 } 4392 4393 cmd->cmd_in_wq = 1; 4394 cmd->trc_flags |= TRC_NEW_CMD; 4395 4396 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4397 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 4398 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4399 4400 INIT_WORK(&cmd->work, qlt_do_work); 4401 if (vha->flags.qpairs_available) { 4402 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); 4403 } else if (ha->msix_count) { 4404 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4405 queue_work_on(smp_processor_id(), qla_tgt_wq, 4406 &cmd->work); 4407 else 4408 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, 4409 &cmd->work); 4410 } else { 4411 queue_work(qla_tgt_wq, &cmd->work); 4412 } 4413 4414 return 0; 4415 } 4416 4417 /* ha->hardware_lock supposed to be held on entry */ 4418 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 4419 int fn, void *iocb, int flags) 4420 { 4421 struct scsi_qla_host *vha = sess->vha; 4422 struct qla_hw_data *ha = vha->hw; 4423 struct qla_tgt_mgmt_cmd *mcmd; 4424 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4425 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 4426 4427 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4428 if (!mcmd) { 4429 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 4430 "qla_target(%d): Allocation of management " 4431 "command failed, some commands and their data could " 4432 "leak\n", vha->vp_idx); 4433 return -ENOMEM; 4434 } 4435 memset(mcmd, 0, sizeof(*mcmd)); 4436 mcmd->sess = sess; 4437 4438 if (iocb) { 4439 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4440 sizeof(mcmd->orig_iocb.imm_ntfy)); 4441 } 4442 mcmd->tmr_func = fn; 4443 mcmd->flags = flags; 4444 mcmd->reset_count = ha->base_qpair->chip_reset; 4445 mcmd->qpair = h->qpair; 4446 mcmd->vha = vha; 4447 mcmd->se_cmd.cpuid = h->cpuid; 4448 mcmd->unpacked_lun = lun; 4449 4450 switch (fn) { 4451 case QLA_TGT_LUN_RESET: 4452 case QLA_TGT_CLEAR_TS: 4453 case QLA_TGT_ABORT_TS: 4454 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); 4455 /* fall through */ 4456 case QLA_TGT_CLEAR_ACA: 4457 h = qlt_find_qphint(vha, mcmd->unpacked_lun); 4458 mcmd->qpair = h->qpair; 4459 mcmd->se_cmd.cpuid = h->cpuid; 4460 break; 4461 4462 case QLA_TGT_TARGET_RESET: 4463 case QLA_TGT_NEXUS_LOSS_SESS: 4464 case QLA_TGT_NEXUS_LOSS: 4465 case QLA_TGT_ABORT_ALL: 4466 default: 4467 /* no-op */ 4468 break; 4469 } 4470 4471 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 4472 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, 4473 &mcmd->work); 4474 4475 return 0; 4476 } 4477 4478 /* ha->hardware_lock supposed to be held on entry */ 4479 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 4480 { 4481 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4482 struct qla_hw_data *ha = vha->hw; 4483 struct fc_port *sess; 4484 u64 unpacked_lun; 4485 int fn; 4486 unsigned long flags; 4487 4488 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4489 4490 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4491 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4492 a->u.isp24.fcp_hdr.s_id); 4493 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4494 4495 unpacked_lun = 4496 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4497 4498 if (sess == NULL || sess->deleted) 4499 return -EFAULT; 4500 4501 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4502 } 4503 4504 /* ha->hardware_lock supposed to be held on entry */ 4505 static int __qlt_abort_task(struct scsi_qla_host *vha, 4506 struct imm_ntfy_from_isp *iocb, struct fc_port *sess) 4507 { 4508 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4509 struct qla_hw_data *ha = vha->hw; 4510 struct qla_tgt_mgmt_cmd *mcmd; 4511 u64 unpacked_lun; 4512 int rc; 4513 4514 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4515 if (mcmd == NULL) { 4516 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 4517 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 4518 vha->vp_idx, __func__); 4519 return -ENOMEM; 4520 } 4521 memset(mcmd, 0, sizeof(*mcmd)); 4522 4523 mcmd->sess = sess; 4524 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4525 sizeof(mcmd->orig_iocb.imm_ntfy)); 4526 4527 unpacked_lun = 4528 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4529 mcmd->reset_count = ha->base_qpair->chip_reset; 4530 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; 4531 mcmd->qpair = ha->base_qpair; 4532 4533 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, 4534 le16_to_cpu(iocb->u.isp2x.seq_id)); 4535 if (rc != 0) { 4536 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 4537 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 4538 vha->vp_idx, rc); 4539 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4540 return -EFAULT; 4541 } 4542 4543 return 0; 4544 } 4545 4546 /* ha->hardware_lock supposed to be held on entry */ 4547 static int qlt_abort_task(struct scsi_qla_host *vha, 4548 struct imm_ntfy_from_isp *iocb) 4549 { 4550 struct qla_hw_data *ha = vha->hw; 4551 struct fc_port *sess; 4552 int loop_id; 4553 unsigned long flags; 4554 4555 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 4556 4557 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4558 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 4559 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4560 4561 if (sess == NULL) { 4562 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 4563 "qla_target(%d): task abort for unexisting " 4564 "session\n", vha->vp_idx); 4565 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 4566 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 4567 } 4568 4569 return __qlt_abort_task(vha, iocb, sess); 4570 } 4571 4572 void qlt_logo_completion_handler(fc_port_t *fcport, int rc) 4573 { 4574 if (rc != MBS_COMMAND_COMPLETE) { 4575 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, 4576 "%s: se_sess %p / sess %p from" 4577 " port %8phC loop_id %#04x s_id %02x:%02x:%02x" 4578 " LOGO failed: %#x\n", 4579 __func__, 4580 fcport->se_sess, 4581 fcport, 4582 fcport->port_name, fcport->loop_id, 4583 fcport->d_id.b.domain, fcport->d_id.b.area, 4584 fcport->d_id.b.al_pa, rc); 4585 } 4586 4587 fcport->logout_completed = 1; 4588 } 4589 4590 /* 4591 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) 4592 * 4593 * Schedules sessions with matching port_id/loop_id but different wwn for 4594 * deletion. Returns existing session with matching wwn if present. 4595 * Null otherwise. 4596 */ 4597 struct fc_port * 4598 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, 4599 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) 4600 { 4601 struct fc_port *sess = NULL, *other_sess; 4602 uint64_t other_wwn; 4603 4604 *conflict_sess = NULL; 4605 4606 list_for_each_entry(other_sess, &vha->vp_fcports, list) { 4607 4608 other_wwn = wwn_to_u64(other_sess->port_name); 4609 4610 if (wwn == other_wwn) { 4611 WARN_ON(sess); 4612 sess = other_sess; 4613 continue; 4614 } 4615 4616 /* find other sess with nport_id collision */ 4617 if (port_id.b24 == other_sess->d_id.b24) { 4618 if (loop_id != other_sess->loop_id) { 4619 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, 4620 "Invalidating sess %p loop_id %d wwn %llx.\n", 4621 other_sess, other_sess->loop_id, other_wwn); 4622 4623 /* 4624 * logout_on_delete is set by default, but another 4625 * session that has the same s_id/loop_id combo 4626 * might have cleared it when requested this session 4627 * deletion, so don't touch it 4628 */ 4629 qlt_schedule_sess_for_deletion(other_sess); 4630 } else { 4631 /* 4632 * Another wwn used to have our s_id/loop_id 4633 * kill the session, but don't free the loop_id 4634 */ 4635 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b, 4636 "Invalidating sess %p loop_id %d wwn %llx.\n", 4637 other_sess, other_sess->loop_id, other_wwn); 4638 4639 other_sess->keep_nport_handle = 1; 4640 if (other_sess->disc_state != DSC_DELETED) 4641 *conflict_sess = other_sess; 4642 qlt_schedule_sess_for_deletion(other_sess); 4643 } 4644 continue; 4645 } 4646 4647 /* find other sess with nport handle collision */ 4648 if ((loop_id == other_sess->loop_id) && 4649 (loop_id != FC_NO_LOOP_ID)) { 4650 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, 4651 "Invalidating sess %p loop_id %d wwn %llx.\n", 4652 other_sess, other_sess->loop_id, other_wwn); 4653 4654 /* Same loop_id but different s_id 4655 * Ok to kill and logout */ 4656 qlt_schedule_sess_for_deletion(other_sess); 4657 } 4658 } 4659 4660 return sess; 4661 } 4662 4663 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ 4664 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) 4665 { 4666 struct qla_tgt_sess_op *op; 4667 struct qla_tgt_cmd *cmd; 4668 uint32_t key; 4669 int count = 0; 4670 unsigned long flags; 4671 4672 key = (((u32)s_id->b.domain << 16) | 4673 ((u32)s_id->b.area << 8) | 4674 ((u32)s_id->b.al_pa)); 4675 4676 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4677 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 4678 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4679 4680 if (op_key == key) { 4681 op->aborted = true; 4682 count++; 4683 } 4684 } 4685 4686 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 4687 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4688 4689 if (op_key == key) { 4690 op->aborted = true; 4691 count++; 4692 } 4693 } 4694 4695 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 4696 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 4697 4698 if (cmd_key == key) { 4699 cmd->aborted = 1; 4700 count++; 4701 } 4702 } 4703 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4704 4705 return count; 4706 } 4707 4708 static int qlt_handle_login(struct scsi_qla_host *vha, 4709 struct imm_ntfy_from_isp *iocb) 4710 { 4711 struct fc_port *sess = NULL, *conflict_sess = NULL; 4712 uint64_t wwn; 4713 port_id_t port_id; 4714 uint16_t loop_id, wd3_lo; 4715 int res = 0; 4716 struct qlt_plogi_ack_t *pla; 4717 unsigned long flags; 4718 4719 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4720 4721 port_id.b.domain = iocb->u.isp24.port_id[2]; 4722 port_id.b.area = iocb->u.isp24.port_id[1]; 4723 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4724 port_id.b.rsvd_1 = 0; 4725 4726 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4727 4728 /* Mark all stale commands sitting in qla_tgt_wq for deletion */ 4729 abort_cmds_for_s_id(vha, &port_id); 4730 4731 if (wwn) { 4732 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4733 sess = qlt_find_sess_invalidate_other(vha, wwn, 4734 port_id, loop_id, &conflict_sess); 4735 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4736 } else { 4737 ql_dbg(ql_dbg_disc, vha, 0xffff, 4738 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ", 4739 __func__, __LINE__, loop_id, port_id.b24); 4740 qlt_send_term_imm_notif(vha, iocb, 1); 4741 goto out; 4742 } 4743 4744 if (IS_SW_RESV_ADDR(port_id)) { 4745 res = 1; 4746 goto out; 4747 } 4748 4749 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); 4750 if (!pla) { 4751 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4752 "%s %d %8phC Term INOT due to mem alloc fail", 4753 __func__, __LINE__, 4754 iocb->u.isp24.port_name); 4755 qlt_send_term_imm_notif(vha, iocb, 1); 4756 goto out; 4757 } 4758 4759 if (conflict_sess) { 4760 conflict_sess->login_gen++; 4761 qlt_plogi_ack_link(vha, pla, conflict_sess, 4762 QLT_PLOGI_LINK_CONFLICT); 4763 } 4764 4765 if (!sess) { 4766 pla->ref_count++; 4767 ql_dbg(ql_dbg_disc, vha, 0xffff, 4768 "%s %d %8phC post new sess\n", 4769 __func__, __LINE__, iocb->u.isp24.port_name); 4770 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 4771 qla24xx_post_newsess_work(vha, &port_id, 4772 iocb->u.isp24.port_name, 4773 iocb->u.isp24.u.plogi.node_name, 4774 pla, FC4_TYPE_UNKNOWN); 4775 else 4776 qla24xx_post_newsess_work(vha, &port_id, 4777 iocb->u.isp24.port_name, NULL, 4778 pla, FC4_TYPE_UNKNOWN); 4779 4780 goto out; 4781 } 4782 4783 if (sess->disc_state == DSC_UPD_FCPORT) { 4784 u16 sec; 4785 4786 /* 4787 * Remote port registration is still going on from 4788 * previous login. Allow it to finish before we 4789 * accept the new login. 4790 */ 4791 sess->next_disc_state = DSC_DELETE_PEND; 4792 sec = jiffies_to_msecs(jiffies - 4793 sess->jiffies_at_registration) / 1000; 4794 if (sess->sec_since_registration < sec && sec && 4795 !(sec % 5)) { 4796 sess->sec_since_registration = sec; 4797 ql_dbg(ql_dbg_disc, vha, 0xffff, 4798 "%s %8phC - Slow Rport registration (%d Sec)\n", 4799 __func__, sess->port_name, sec); 4800 } 4801 4802 if (!conflict_sess) 4803 kmem_cache_free(qla_tgt_plogi_cachep, pla); 4804 4805 qlt_send_term_imm_notif(vha, iocb, 1); 4806 goto out; 4807 } 4808 4809 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); 4810 sess->d_id = port_id; 4811 sess->login_gen++; 4812 4813 if (iocb->u.isp24.status_subcode == ELS_PRLI) { 4814 sess->fw_login_state = DSC_LS_PRLI_PEND; 4815 sess->local = 0; 4816 sess->loop_id = loop_id; 4817 sess->d_id = port_id; 4818 sess->fw_login_state = DSC_LS_PRLI_PEND; 4819 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4820 4821 if (wd3_lo & BIT_7) 4822 sess->conf_compl_supported = 1; 4823 4824 if ((wd3_lo & BIT_4) == 0) 4825 sess->port_type = FCT_INITIATOR; 4826 else 4827 sess->port_type = FCT_TARGET; 4828 4829 } else 4830 sess->fw_login_state = DSC_LS_PLOGI_PEND; 4831 4832 4833 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4834 "%s %d %8phC DS %d\n", 4835 __func__, __LINE__, sess->port_name, sess->disc_state); 4836 4837 switch (sess->disc_state) { 4838 case DSC_DELETED: 4839 qlt_plogi_ack_unref(vha, pla); 4840 break; 4841 4842 default: 4843 /* 4844 * Under normal circumstances we want to release nport handle 4845 * during LOGO process to avoid nport handle leaks inside FW. 4846 * The exception is when LOGO is done while another PLOGI with 4847 * the same nport handle is waiting as might be the case here. 4848 * Note: there is always a possibily of a race where session 4849 * deletion has already started for other reasons (e.g. ACL 4850 * removal) and now PLOGI arrives: 4851 * 1. if PLOGI arrived in FW after nport handle has been freed, 4852 * FW must have assigned this PLOGI a new/same handle and we 4853 * can proceed ACK'ing it as usual when session deletion 4854 * completes. 4855 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT 4856 * bit reached it, the handle has now been released. We'll 4857 * get an error when we ACK this PLOGI. Nothing will be sent 4858 * back to initiator. Initiator should eventually retry 4859 * PLOGI and situation will correct itself. 4860 */ 4861 sess->keep_nport_handle = ((sess->loop_id == loop_id) && 4862 (sess->d_id.b24 == port_id.b24)); 4863 4864 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4865 "%s %d %8phC post del sess\n", 4866 __func__, __LINE__, sess->port_name); 4867 4868 4869 qlt_schedule_sess_for_deletion(sess); 4870 break; 4871 } 4872 out: 4873 return res; 4874 } 4875 4876 /* 4877 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4878 */ 4879 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4880 struct imm_ntfy_from_isp *iocb) 4881 { 4882 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4883 struct qla_hw_data *ha = vha->hw; 4884 struct fc_port *sess = NULL, *conflict_sess = NULL; 4885 uint64_t wwn; 4886 port_id_t port_id; 4887 uint16_t loop_id; 4888 uint16_t wd3_lo; 4889 int res = 0; 4890 unsigned long flags; 4891 4892 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4893 4894 port_id.b.domain = iocb->u.isp24.port_id[2]; 4895 port_id.b.area = iocb->u.isp24.port_id[1]; 4896 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4897 port_id.b.rsvd_1 = 0; 4898 4899 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4900 4901 ql_dbg(ql_dbg_disc, vha, 0xf026, 4902 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", 4903 vha->vp_idx, iocb->u.isp24.port_id[2], 4904 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 4905 iocb->u.isp24.status_subcode, loop_id, 4906 iocb->u.isp24.port_name); 4907 4908 /* res = 1 means ack at the end of thread 4909 * res = 0 means ack async/later. 4910 */ 4911 switch (iocb->u.isp24.status_subcode) { 4912 case ELS_PLOGI: 4913 res = qlt_handle_login(vha, iocb); 4914 break; 4915 4916 case ELS_PRLI: 4917 if (N2N_TOPO(ha)) { 4918 sess = qla2x00_find_fcport_by_wwpn(vha, 4919 iocb->u.isp24.port_name, 1); 4920 4921 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) { 4922 ql_dbg(ql_dbg_disc, vha, 0xffff, 4923 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n", 4924 __func__, __LINE__, 4925 iocb->u.isp24.port_name); 4926 qlt_send_term_imm_notif(vha, iocb, 1); 4927 break; 4928 } 4929 4930 res = qlt_handle_login(vha, iocb); 4931 break; 4932 } 4933 4934 if (IS_SW_RESV_ADDR(port_id)) { 4935 res = 1; 4936 break; 4937 } 4938 4939 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4940 4941 if (wwn) { 4942 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4943 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, 4944 loop_id, &conflict_sess); 4945 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 4946 } 4947 4948 if (conflict_sess) { 4949 switch (conflict_sess->disc_state) { 4950 case DSC_DELETED: 4951 case DSC_DELETE_PEND: 4952 break; 4953 default: 4954 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, 4955 "PRLI with conflicting sess %p port %8phC\n", 4956 conflict_sess, conflict_sess->port_name); 4957 conflict_sess->fw_login_state = 4958 DSC_LS_PORT_UNAVAIL; 4959 qlt_send_term_imm_notif(vha, iocb, 1); 4960 res = 0; 4961 break; 4962 } 4963 } 4964 4965 if (sess != NULL) { 4966 bool delete = false; 4967 int sec; 4968 4969 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4970 switch (sess->fw_login_state) { 4971 case DSC_LS_PLOGI_PEND: 4972 case DSC_LS_PLOGI_COMP: 4973 case DSC_LS_PRLI_COMP: 4974 break; 4975 default: 4976 delete = true; 4977 break; 4978 } 4979 4980 switch (sess->disc_state) { 4981 case DSC_UPD_FCPORT: 4982 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 4983 flags); 4984 4985 sec = jiffies_to_msecs(jiffies - 4986 sess->jiffies_at_registration)/1000; 4987 if (sess->sec_since_registration < sec && sec && 4988 !(sec % 5)) { 4989 sess->sec_since_registration = sec; 4990 ql_dbg(ql_dbg_disc, sess->vha, 0xffff, 4991 "%s %8phC : Slow Rport registration(%d Sec)\n", 4992 __func__, sess->port_name, sec); 4993 } 4994 qlt_send_term_imm_notif(vha, iocb, 1); 4995 return 0; 4996 4997 case DSC_LOGIN_PEND: 4998 case DSC_GPDB: 4999 case DSC_LOGIN_COMPLETE: 5000 case DSC_ADISC: 5001 delete = false; 5002 break; 5003 default: 5004 break; 5005 } 5006 5007 if (delete) { 5008 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 5009 flags); 5010 /* 5011 * Impatient initiator sent PRLI before last 5012 * PLOGI could finish. Will force him to re-try, 5013 * while last one finishes. 5014 */ 5015 ql_log(ql_log_warn, sess->vha, 0xf095, 5016 "sess %p PRLI received, before plogi ack.\n", 5017 sess); 5018 qlt_send_term_imm_notif(vha, iocb, 1); 5019 res = 0; 5020 break; 5021 } 5022 5023 /* 5024 * This shouldn't happen under normal circumstances, 5025 * since we have deleted the old session during PLOGI 5026 */ 5027 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, 5028 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", 5029 sess->loop_id, sess, iocb->u.isp24.nport_handle); 5030 5031 sess->local = 0; 5032 sess->loop_id = loop_id; 5033 sess->d_id = port_id; 5034 sess->fw_login_state = DSC_LS_PRLI_PEND; 5035 5036 if (wd3_lo & BIT_7) 5037 sess->conf_compl_supported = 1; 5038 5039 if ((wd3_lo & BIT_4) == 0) 5040 sess->port_type = FCT_INITIATOR; 5041 else 5042 sess->port_type = FCT_TARGET; 5043 5044 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 5045 } 5046 res = 1; /* send notify ack */ 5047 5048 /* Make session global (not used in fabric mode) */ 5049 if (ha->current_topology != ISP_CFG_F) { 5050 if (sess) { 5051 ql_dbg(ql_dbg_disc, vha, 0x20fa, 5052 "%s %d %8phC post nack\n", 5053 __func__, __LINE__, sess->port_name); 5054 qla24xx_post_nack_work(vha, sess, iocb, 5055 SRB_NACK_PRLI); 5056 res = 0; 5057 } else { 5058 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5059 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5060 qla2xxx_wake_dpc(vha); 5061 } 5062 } else { 5063 if (sess) { 5064 ql_dbg(ql_dbg_disc, vha, 0x20fb, 5065 "%s %d %8phC post nack\n", 5066 __func__, __LINE__, sess->port_name); 5067 qla24xx_post_nack_work(vha, sess, iocb, 5068 SRB_NACK_PRLI); 5069 res = 0; 5070 } 5071 } 5072 break; 5073 5074 case ELS_TPRLO: 5075 if (le16_to_cpu(iocb->u.isp24.flags) & 5076 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { 5077 loop_id = 0xFFFF; 5078 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); 5079 res = 1; 5080 break; 5081 } 5082 /* fall through */ 5083 case ELS_LOGO: 5084 case ELS_PRLO: 5085 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5086 sess = qla2x00_find_fcport_by_loopid(vha, loop_id); 5087 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5088 5089 if (sess) { 5090 sess->login_gen++; 5091 sess->fw_login_state = DSC_LS_LOGO_PEND; 5092 sess->logo_ack_needed = 1; 5093 memcpy(sess->iocb, iocb, IOCB_SIZE); 5094 } 5095 5096 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5097 5098 ql_dbg(ql_dbg_disc, vha, 0x20fc, 5099 "%s: logo %llx res %d sess %p ", 5100 __func__, wwn, res, sess); 5101 if (res == 0) { 5102 /* 5103 * cmd went upper layer, look for qlt_xmit_tm_rsp() 5104 * for LOGO_ACK & sess delete 5105 */ 5106 BUG_ON(!sess); 5107 res = 0; 5108 } else { 5109 /* cmd did not go to upper layer. */ 5110 if (sess) { 5111 qlt_schedule_sess_for_deletion(sess); 5112 res = 0; 5113 } 5114 /* else logo will be ack */ 5115 } 5116 break; 5117 case ELS_PDISC: 5118 case ELS_ADISC: 5119 { 5120 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5121 5122 if (tgt->link_reinit_iocb_pending) { 5123 qlt_send_notify_ack(ha->base_qpair, 5124 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5125 tgt->link_reinit_iocb_pending = 0; 5126 } 5127 5128 sess = qla2x00_find_fcport_by_wwpn(vha, 5129 iocb->u.isp24.port_name, 1); 5130 if (sess) { 5131 ql_dbg(ql_dbg_disc, vha, 0x20fd, 5132 "sess %p lid %d|%d DS %d LS %d\n", 5133 sess, sess->loop_id, loop_id, 5134 sess->disc_state, sess->fw_login_state); 5135 } 5136 5137 res = 1; /* send notify ack */ 5138 break; 5139 } 5140 5141 case ELS_FLOGI: /* should never happen */ 5142 default: 5143 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 5144 "qla_target(%d): Unsupported ELS command %x " 5145 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 5146 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5147 break; 5148 } 5149 5150 ql_dbg(ql_dbg_disc, vha, 0xf026, 5151 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n", 5152 vha->vp_idx, iocb->u.isp24.status_subcode, res); 5153 5154 return res; 5155 } 5156 5157 /* 5158 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5159 */ 5160 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 5161 struct imm_ntfy_from_isp *iocb) 5162 { 5163 struct qla_hw_data *ha = vha->hw; 5164 uint32_t add_flags = 0; 5165 int send_notify_ack = 1; 5166 uint16_t status; 5167 5168 status = le16_to_cpu(iocb->u.isp2x.status); 5169 switch (status) { 5170 case IMM_NTFY_LIP_RESET: 5171 { 5172 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 5173 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 5174 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 5175 iocb->u.isp24.status_subcode); 5176 5177 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5178 send_notify_ack = 0; 5179 break; 5180 } 5181 5182 case IMM_NTFY_LIP_LINK_REINIT: 5183 { 5184 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5185 5186 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 5187 "qla_target(%d): LINK REINIT (loop %#x, " 5188 "subcode %x)\n", vha->vp_idx, 5189 le16_to_cpu(iocb->u.isp24.nport_handle), 5190 iocb->u.isp24.status_subcode); 5191 if (tgt->link_reinit_iocb_pending) { 5192 qlt_send_notify_ack(ha->base_qpair, 5193 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5194 } 5195 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 5196 tgt->link_reinit_iocb_pending = 1; 5197 /* 5198 * QLogic requires to wait after LINK REINIT for possible 5199 * PDISC or ADISC ELS commands 5200 */ 5201 send_notify_ack = 0; 5202 break; 5203 } 5204 5205 case IMM_NTFY_PORT_LOGOUT: 5206 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 5207 "qla_target(%d): Port logout (loop " 5208 "%#x, subcode %x)\n", vha->vp_idx, 5209 le16_to_cpu(iocb->u.isp24.nport_handle), 5210 iocb->u.isp24.status_subcode); 5211 5212 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 5213 send_notify_ack = 0; 5214 /* The sessions will be cleared in the callback, if needed */ 5215 break; 5216 5217 case IMM_NTFY_GLBL_TPRLO: 5218 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 5219 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 5220 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5221 send_notify_ack = 0; 5222 /* The sessions will be cleared in the callback, if needed */ 5223 break; 5224 5225 case IMM_NTFY_PORT_CONFIG: 5226 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 5227 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 5228 status); 5229 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5230 send_notify_ack = 0; 5231 /* The sessions will be cleared in the callback, if needed */ 5232 break; 5233 5234 case IMM_NTFY_GLBL_LOGO: 5235 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 5236 "qla_target(%d): Link failure detected\n", 5237 vha->vp_idx); 5238 /* I_T nexus loss */ 5239 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5240 send_notify_ack = 0; 5241 break; 5242 5243 case IMM_NTFY_IOCB_OVERFLOW: 5244 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 5245 "qla_target(%d): Cannot provide requested " 5246 "capability (IOCB overflowed the immediate notify " 5247 "resource count)\n", vha->vp_idx); 5248 break; 5249 5250 case IMM_NTFY_ABORT_TASK: 5251 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 5252 "qla_target(%d): Abort Task (S %08x I %#x -> " 5253 "L %#x)\n", vha->vp_idx, 5254 le16_to_cpu(iocb->u.isp2x.seq_id), 5255 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 5256 le16_to_cpu(iocb->u.isp2x.lun)); 5257 if (qlt_abort_task(vha, iocb) == 0) 5258 send_notify_ack = 0; 5259 break; 5260 5261 case IMM_NTFY_RESOURCE: 5262 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 5263 "qla_target(%d): Out of resources, host %ld\n", 5264 vha->vp_idx, vha->host_no); 5265 break; 5266 5267 case IMM_NTFY_MSG_RX: 5268 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 5269 "qla_target(%d): Immediate notify task %x\n", 5270 vha->vp_idx, iocb->u.isp2x.task_flags); 5271 break; 5272 5273 case IMM_NTFY_ELS: 5274 if (qlt_24xx_handle_els(vha, iocb) == 0) 5275 send_notify_ack = 0; 5276 break; 5277 default: 5278 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 5279 "qla_target(%d): Received unknown immediate " 5280 "notify status %x\n", vha->vp_idx, status); 5281 break; 5282 } 5283 5284 if (send_notify_ack) 5285 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0, 5286 0, 0); 5287 } 5288 5289 /* 5290 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5291 * This function sends busy to ISP 2xxx or 24xx. 5292 */ 5293 static int __qlt_send_busy(struct qla_qpair *qpair, 5294 struct atio_from_isp *atio, uint16_t status) 5295 { 5296 struct scsi_qla_host *vha = qpair->vha; 5297 struct ctio7_to_24xx *ctio24; 5298 struct qla_hw_data *ha = vha->hw; 5299 request_t *pkt; 5300 struct fc_port *sess = NULL; 5301 unsigned long flags; 5302 u16 temp; 5303 port_id_t id; 5304 5305 id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2]; 5306 id.b.area = atio->u.isp24.fcp_hdr.s_id[1]; 5307 id.b.domain = atio->u.isp24.fcp_hdr.s_id[0]; 5308 id.b.rsvd_1 = 0; 5309 5310 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5311 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1); 5312 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5313 if (!sess) { 5314 qlt_send_term_exchange(qpair, NULL, atio, 1, 0); 5315 return 0; 5316 } 5317 /* Sending marker isn't necessary, since we called from ISR */ 5318 5319 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 5320 if (!pkt) { 5321 ql_dbg(ql_dbg_io, vha, 0x3063, 5322 "qla_target(%d): %s failed: unable to allocate " 5323 "request packet", vha->vp_idx, __func__); 5324 return -ENOMEM; 5325 } 5326 5327 qpair->tgt_counters.num_q_full_sent++; 5328 pkt->entry_count = 1; 5329 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 5330 5331 ctio24 = (struct ctio7_to_24xx *)pkt; 5332 ctio24->entry_type = CTIO_TYPE7; 5333 ctio24->nport_handle = sess->loop_id; 5334 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 5335 ctio24->vp_index = vha->vp_idx; 5336 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 5337 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 5338 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 5339 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 5340 temp = (atio->u.isp24.attr << 9) | 5341 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 5342 CTIO7_FLAGS_DONT_RET_CTIO; 5343 ctio24->u.status1.flags = cpu_to_le16(temp); 5344 /* 5345 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 5346 * if the explicit conformation is used. 5347 */ 5348 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 5349 ctio24->u.status1.scsi_status = cpu_to_le16(status); 5350 5351 ctio24->u.status1.residual = get_datalen_for_atio(atio); 5352 5353 if (ctio24->u.status1.residual != 0) 5354 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 5355 5356 /* Memory Barrier */ 5357 wmb(); 5358 if (qpair->reqq_start_iocbs) 5359 qpair->reqq_start_iocbs(qpair); 5360 else 5361 qla2x00_start_iocbs(vha, qpair->req); 5362 return 0; 5363 } 5364 5365 /* 5366 * This routine is used to allocate a command for either a QFull condition 5367 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go 5368 * out previously. 5369 */ 5370 static void 5371 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 5372 struct atio_from_isp *atio, uint16_t status, int qfull) 5373 { 5374 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5375 struct qla_hw_data *ha = vha->hw; 5376 struct fc_port *sess; 5377 struct se_session *se_sess; 5378 struct qla_tgt_cmd *cmd; 5379 int tag, cpu; 5380 unsigned long flags; 5381 5382 if (unlikely(tgt->tgt_stop)) { 5383 ql_dbg(ql_dbg_io, vha, 0x300a, 5384 "New command while device %p is shutting down\n", tgt); 5385 return; 5386 } 5387 5388 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { 5389 vha->hw->tgt.num_qfull_cmds_dropped++; 5390 if (vha->hw->tgt.num_qfull_cmds_dropped > 5391 vha->qla_stats.stat_max_qfull_cmds_dropped) 5392 vha->qla_stats.stat_max_qfull_cmds_dropped = 5393 vha->hw->tgt.num_qfull_cmds_dropped; 5394 5395 ql_dbg(ql_dbg_io, vha, 0x3068, 5396 "qla_target(%d): %s: QFull CMD dropped[%d]\n", 5397 vha->vp_idx, __func__, 5398 vha->hw->tgt.num_qfull_cmds_dropped); 5399 5400 qlt_chk_exch_leak_thresh_hold(vha); 5401 return; 5402 } 5403 5404 sess = ha->tgt.tgt_ops->find_sess_by_s_id 5405 (vha, atio->u.isp24.fcp_hdr.s_id); 5406 if (!sess) 5407 return; 5408 5409 se_sess = sess->se_sess; 5410 5411 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); 5412 if (tag < 0) { 5413 ql_dbg(ql_dbg_io, vha, 0x3009, 5414 "qla_target(%d): %s: Allocation of cmd failed\n", 5415 vha->vp_idx, __func__); 5416 5417 vha->hw->tgt.num_qfull_cmds_dropped++; 5418 if (vha->hw->tgt.num_qfull_cmds_dropped > 5419 vha->qla_stats.stat_max_qfull_cmds_dropped) 5420 vha->qla_stats.stat_max_qfull_cmds_dropped = 5421 vha->hw->tgt.num_qfull_cmds_dropped; 5422 5423 qlt_chk_exch_leak_thresh_hold(vha); 5424 return; 5425 } 5426 5427 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 5428 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 5429 5430 qlt_incr_num_pend_cmds(vha); 5431 INIT_LIST_HEAD(&cmd->cmd_list); 5432 memcpy(&cmd->atio, atio, sizeof(*atio)); 5433 5434 cmd->tgt = vha->vha_tgt.qla_tgt; 5435 cmd->vha = vha; 5436 cmd->reset_count = ha->base_qpair->chip_reset; 5437 cmd->q_full = 1; 5438 cmd->qpair = ha->base_qpair; 5439 cmd->se_cmd.map_cpu = cpu; 5440 5441 if (qfull) { 5442 cmd->q_full = 1; 5443 /* NOTE: borrowing the state field to carry the status */ 5444 cmd->state = status; 5445 } else 5446 cmd->term_exchg = 1; 5447 5448 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5449 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); 5450 5451 vha->hw->tgt.num_qfull_cmds_alloc++; 5452 if (vha->hw->tgt.num_qfull_cmds_alloc > 5453 vha->qla_stats.stat_max_qfull_cmds_alloc) 5454 vha->qla_stats.stat_max_qfull_cmds_alloc = 5455 vha->hw->tgt.num_qfull_cmds_alloc; 5456 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5457 } 5458 5459 int 5460 qlt_free_qfull_cmds(struct qla_qpair *qpair) 5461 { 5462 struct scsi_qla_host *vha = qpair->vha; 5463 struct qla_hw_data *ha = vha->hw; 5464 unsigned long flags; 5465 struct qla_tgt_cmd *cmd, *tcmd; 5466 struct list_head free_list, q_full_list; 5467 int rc = 0; 5468 5469 if (list_empty(&ha->tgt.q_full_list)) 5470 return 0; 5471 5472 INIT_LIST_HEAD(&free_list); 5473 INIT_LIST_HEAD(&q_full_list); 5474 5475 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5476 if (list_empty(&ha->tgt.q_full_list)) { 5477 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5478 return 0; 5479 } 5480 5481 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); 5482 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5483 5484 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 5485 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { 5486 if (cmd->q_full) 5487 /* cmd->state is a borrowed field to hold status */ 5488 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); 5489 else if (cmd->term_exchg) 5490 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); 5491 5492 if (rc == -ENOMEM) 5493 break; 5494 5495 if (cmd->q_full) 5496 ql_dbg(ql_dbg_io, vha, 0x3006, 5497 "%s: busy sent for ox_id[%04x]\n", __func__, 5498 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5499 else if (cmd->term_exchg) 5500 ql_dbg(ql_dbg_io, vha, 0x3007, 5501 "%s: Term exchg sent for ox_id[%04x]\n", __func__, 5502 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5503 else 5504 ql_dbg(ql_dbg_io, vha, 0x3008, 5505 "%s: Unexpected cmd in QFull list %p\n", __func__, 5506 cmd); 5507 5508 list_del(&cmd->cmd_list); 5509 list_add_tail(&cmd->cmd_list, &free_list); 5510 5511 /* piggy back on hardware_lock for protection */ 5512 vha->hw->tgt.num_qfull_cmds_alloc--; 5513 } 5514 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 5515 5516 cmd = NULL; 5517 5518 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 5519 list_del(&cmd->cmd_list); 5520 /* This cmd was never sent to TCM. There is no need 5521 * to schedule free or call free_cmd 5522 */ 5523 qlt_free_cmd(cmd); 5524 } 5525 5526 if (!list_empty(&q_full_list)) { 5527 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5528 list_splice(&q_full_list, &vha->hw->tgt.q_full_list); 5529 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5530 } 5531 5532 return rc; 5533 } 5534 5535 static void 5536 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, 5537 uint16_t status) 5538 { 5539 int rc = 0; 5540 struct scsi_qla_host *vha = qpair->vha; 5541 5542 rc = __qlt_send_busy(qpair, atio, status); 5543 if (rc == -ENOMEM) 5544 qlt_alloc_qfull_cmd(vha, atio, status, 1); 5545 } 5546 5547 static int 5548 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair, 5549 struct atio_from_isp *atio, uint8_t ha_locked) 5550 { 5551 struct qla_hw_data *ha = vha->hw; 5552 unsigned long flags; 5553 5554 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5555 return 0; 5556 5557 if (!ha_locked) 5558 spin_lock_irqsave(&ha->hardware_lock, flags); 5559 qlt_send_busy(qpair, atio, qla_sam_status); 5560 if (!ha_locked) 5561 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5562 5563 return 1; 5564 } 5565 5566 /* ha->hardware_lock supposed to be held on entry */ 5567 /* called via callback from qla2xxx */ 5568 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 5569 struct atio_from_isp *atio, uint8_t ha_locked) 5570 { 5571 struct qla_hw_data *ha = vha->hw; 5572 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5573 int rc; 5574 unsigned long flags = 0; 5575 5576 if (unlikely(tgt == NULL)) { 5577 ql_dbg(ql_dbg_tgt, vha, 0x3064, 5578 "ATIO pkt, but no tgt (ha %p)", ha); 5579 return; 5580 } 5581 /* 5582 * In tgt_stop mode we also should allow all requests to pass. 5583 * Otherwise, some commands can stuck. 5584 */ 5585 5586 tgt->atio_irq_cmd_count++; 5587 5588 switch (atio->u.raw.entry_type) { 5589 case ATIO_TYPE7: 5590 if (unlikely(atio->u.isp24.exchange_addr == 5591 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 5592 ql_dbg(ql_dbg_io, vha, 0x3065, 5593 "qla_target(%d): ATIO_TYPE7 " 5594 "received with UNKNOWN exchange address, " 5595 "sending QUEUE_FULL\n", vha->vp_idx); 5596 if (!ha_locked) 5597 spin_lock_irqsave(&ha->hardware_lock, flags); 5598 qlt_send_busy(ha->base_qpair, atio, qla_sam_status); 5599 if (!ha_locked) 5600 spin_unlock_irqrestore(&ha->hardware_lock, 5601 flags); 5602 break; 5603 } 5604 5605 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5606 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair, 5607 atio, ha_locked); 5608 if (rc != 0) { 5609 tgt->atio_irq_cmd_count--; 5610 return; 5611 } 5612 rc = qlt_handle_cmd_for_atio(vha, atio); 5613 } else { 5614 rc = qlt_handle_task_mgmt(vha, atio); 5615 } 5616 if (unlikely(rc != 0)) { 5617 if (!ha_locked) 5618 spin_lock_irqsave(&ha->hardware_lock, flags); 5619 switch (rc) { 5620 case -ENODEV: 5621 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5622 "qla_target: Unable to send command to target\n"); 5623 break; 5624 case -EBADF: 5625 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5626 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5627 qlt_send_term_exchange(ha->base_qpair, NULL, 5628 atio, 1, 0); 5629 break; 5630 case -EBUSY: 5631 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5632 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5633 vha->vp_idx); 5634 qlt_send_busy(ha->base_qpair, atio, 5635 tc_sam_status); 5636 break; 5637 default: 5638 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5639 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5640 vha->vp_idx); 5641 qlt_send_busy(ha->base_qpair, atio, 5642 qla_sam_status); 5643 break; 5644 } 5645 if (!ha_locked) 5646 spin_unlock_irqrestore(&ha->hardware_lock, 5647 flags); 5648 } 5649 break; 5650 5651 case IMMED_NOTIFY_TYPE: 5652 { 5653 if (unlikely(atio->u.isp2x.entry_status != 0)) { 5654 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 5655 "qla_target(%d): Received ATIO packet %x " 5656 "with error status %x\n", vha->vp_idx, 5657 atio->u.raw.entry_type, 5658 atio->u.isp2x.entry_status); 5659 break; 5660 } 5661 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 5662 5663 if (!ha_locked) 5664 spin_lock_irqsave(&ha->hardware_lock, flags); 5665 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 5666 if (!ha_locked) 5667 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5668 break; 5669 } 5670 5671 default: 5672 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 5673 "qla_target(%d): Received unknown ATIO atio " 5674 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 5675 break; 5676 } 5677 5678 tgt->atio_irq_cmd_count--; 5679 } 5680 5681 /* 5682 * qpair lock is assume to be held 5683 * rc = 0 : send terminate & abts respond 5684 * rc != 0: do not send term & abts respond 5685 */ 5686 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha, 5687 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry) 5688 { 5689 struct qla_hw_data *ha = vha->hw; 5690 int rc = 0; 5691 5692 /* 5693 * Detect unresolved exchange. If the same ABTS is unable 5694 * to terminate an existing command and the same ABTS loops 5695 * between FW & Driver, then force FW dump. Under 1 jiff, 5696 * we should see multiple loops. 5697 */ 5698 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort && 5699 qpair->retry_term_jiff == jiffies) { 5700 /* found existing exchange */ 5701 qpair->retry_term_cnt++; 5702 if (qpair->retry_term_cnt >= 5) { 5703 rc = EIO; 5704 qpair->retry_term_cnt = 0; 5705 ql_log(ql_log_warn, vha, 0xffff, 5706 "Unable to send ABTS Respond. Dumping firmware.\n"); 5707 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer, 5708 vha, 0xffff, (uint8_t *)entry, sizeof(*entry)); 5709 5710 if (qpair == ha->base_qpair) 5711 ha->isp_ops->fw_dump(vha, 1); 5712 else 5713 ha->isp_ops->fw_dump(vha, 0); 5714 5715 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5716 qla2xxx_wake_dpc(vha); 5717 } 5718 } else if (qpair->retry_term_jiff != jiffies) { 5719 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort; 5720 qpair->retry_term_cnt = 0; 5721 qpair->retry_term_jiff = jiffies; 5722 } 5723 5724 return rc; 5725 } 5726 5727 5728 static void qlt_handle_abts_completion(struct scsi_qla_host *vha, 5729 struct rsp_que *rsp, response_t *pkt) 5730 { 5731 struct abts_resp_from_24xx_fw *entry = 5732 (struct abts_resp_from_24xx_fw *)pkt; 5733 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK; 5734 struct qla_tgt_mgmt_cmd *mcmd; 5735 struct qla_hw_data *ha = vha->hw; 5736 5737 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt); 5738 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) { 5739 ql_dbg(ql_dbg_async, vha, 0xe064, 5740 "qla_target(%d): ABTS Comp without mcmd\n", 5741 vha->vp_idx); 5742 return; 5743 } 5744 5745 if (mcmd) 5746 vha = mcmd->vha; 5747 vha->vha_tgt.qla_tgt->abts_resp_expected--; 5748 5749 ql_dbg(ql_dbg_tgt, vha, 0xe038, 5750 "ABTS_RESP_24XX: compl_status %x\n", 5751 entry->compl_status); 5752 5753 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) { 5754 if ((entry->error_subcode1 == 0x1E) && 5755 (entry->error_subcode2 == 0)) { 5756 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) { 5757 ha->tgt.tgt_ops->free_mcmd(mcmd); 5758 return; 5759 } 5760 qlt_24xx_retry_term_exchange(vha, rsp->qpair, 5761 pkt, mcmd); 5762 } else { 5763 ql_dbg(ql_dbg_tgt, vha, 0xe063, 5764 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)", 5765 vha->vp_idx, entry->compl_status, 5766 entry->error_subcode1, 5767 entry->error_subcode2); 5768 ha->tgt.tgt_ops->free_mcmd(mcmd); 5769 } 5770 } else { 5771 ha->tgt.tgt_ops->free_mcmd(mcmd); 5772 } 5773 } 5774 5775 /* ha->hardware_lock supposed to be held on entry */ 5776 /* called via callback from qla2xxx */ 5777 static void qlt_response_pkt(struct scsi_qla_host *vha, 5778 struct rsp_que *rsp, response_t *pkt) 5779 { 5780 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5781 5782 if (unlikely(tgt == NULL)) { 5783 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 5784 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", 5785 vha->vp_idx, pkt->entry_type, vha->hw); 5786 return; 5787 } 5788 5789 /* 5790 * In tgt_stop mode we also should allow all requests to pass. 5791 * Otherwise, some commands can stuck. 5792 */ 5793 5794 switch (pkt->entry_type) { 5795 case CTIO_CRC2: 5796 case CTIO_TYPE7: 5797 { 5798 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 5799 5800 qlt_do_ctio_completion(vha, rsp, entry->handle, 5801 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5802 entry); 5803 break; 5804 } 5805 5806 case ACCEPT_TGT_IO_TYPE: 5807 { 5808 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 5809 int rc; 5810 5811 if (atio->u.isp2x.status != 5812 cpu_to_le16(ATIO_CDB_VALID)) { 5813 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 5814 "qla_target(%d): ATIO with error " 5815 "status %x received\n", vha->vp_idx, 5816 le16_to_cpu(atio->u.isp2x.status)); 5817 break; 5818 } 5819 5820 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1); 5821 if (rc != 0) 5822 return; 5823 5824 rc = qlt_handle_cmd_for_atio(vha, atio); 5825 if (unlikely(rc != 0)) { 5826 switch (rc) { 5827 case -ENODEV: 5828 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5829 "qla_target: Unable to send command to target\n"); 5830 break; 5831 case -EBADF: 5832 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5833 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5834 qlt_send_term_exchange(rsp->qpair, NULL, 5835 atio, 1, 0); 5836 break; 5837 case -EBUSY: 5838 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5839 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5840 vha->vp_idx); 5841 qlt_send_busy(rsp->qpair, atio, 5842 tc_sam_status); 5843 break; 5844 default: 5845 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5846 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5847 vha->vp_idx); 5848 qlt_send_busy(rsp->qpair, atio, 5849 qla_sam_status); 5850 break; 5851 } 5852 } 5853 } 5854 break; 5855 5856 case CONTINUE_TGT_IO_TYPE: 5857 { 5858 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5859 5860 qlt_do_ctio_completion(vha, rsp, entry->handle, 5861 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5862 entry); 5863 break; 5864 } 5865 5866 case CTIO_A64_TYPE: 5867 { 5868 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5869 5870 qlt_do_ctio_completion(vha, rsp, entry->handle, 5871 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5872 entry); 5873 break; 5874 } 5875 5876 case IMMED_NOTIFY_TYPE: 5877 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 5878 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 5879 break; 5880 5881 case NOTIFY_ACK_TYPE: 5882 if (tgt->notify_ack_expected > 0) { 5883 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 5884 5885 ql_dbg(ql_dbg_tgt, vha, 0xe036, 5886 "NOTIFY_ACK seq %08x status %x\n", 5887 le16_to_cpu(entry->u.isp2x.seq_id), 5888 le16_to_cpu(entry->u.isp2x.status)); 5889 tgt->notify_ack_expected--; 5890 if (entry->u.isp2x.status != 5891 cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 5892 ql_dbg(ql_dbg_tgt, vha, 0xe061, 5893 "qla_target(%d): NOTIFY_ACK " 5894 "failed %x\n", vha->vp_idx, 5895 le16_to_cpu(entry->u.isp2x.status)); 5896 } 5897 } else { 5898 ql_dbg(ql_dbg_tgt, vha, 0xe062, 5899 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 5900 vha->vp_idx); 5901 } 5902 break; 5903 5904 case ABTS_RECV_24XX: 5905 ql_dbg(ql_dbg_tgt, vha, 0xe037, 5906 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 5907 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 5908 break; 5909 5910 case ABTS_RESP_24XX: 5911 if (tgt->abts_resp_expected > 0) { 5912 qlt_handle_abts_completion(vha, rsp, pkt); 5913 } else { 5914 ql_dbg(ql_dbg_tgt, vha, 0xe064, 5915 "qla_target(%d): Unexpected ABTS_RESP_24XX " 5916 "received\n", vha->vp_idx); 5917 } 5918 break; 5919 5920 default: 5921 ql_dbg(ql_dbg_tgt, vha, 0xe065, 5922 "qla_target(%d): Received unknown response pkt " 5923 "type %x\n", vha->vp_idx, pkt->entry_type); 5924 break; 5925 } 5926 5927 } 5928 5929 /* 5930 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5931 */ 5932 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 5933 uint16_t *mailbox) 5934 { 5935 struct qla_hw_data *ha = vha->hw; 5936 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5937 int login_code; 5938 5939 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped) 5940 return; 5941 5942 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 5943 IS_QLA2100(ha)) 5944 return; 5945 /* 5946 * In tgt_stop mode we also should allow all requests to pass. 5947 * Otherwise, some commands can stuck. 5948 */ 5949 5950 5951 switch (code) { 5952 case MBA_RESET: /* Reset */ 5953 case MBA_SYSTEM_ERR: /* System Error */ 5954 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 5955 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 5956 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 5957 "qla_target(%d): System error async event %#x " 5958 "occurred", vha->vp_idx, code); 5959 break; 5960 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 5961 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5962 break; 5963 5964 case MBA_LOOP_UP: 5965 { 5966 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 5967 "qla_target(%d): Async LOOP_UP occurred " 5968 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 5969 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5970 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5971 if (tgt->link_reinit_iocb_pending) { 5972 qlt_send_notify_ack(ha->base_qpair, 5973 (void *)&tgt->link_reinit_iocb, 5974 0, 0, 0, 0, 0, 0); 5975 tgt->link_reinit_iocb_pending = 0; 5976 } 5977 break; 5978 } 5979 5980 case MBA_LIP_OCCURRED: 5981 case MBA_LOOP_DOWN: 5982 case MBA_LIP_RESET: 5983 case MBA_RSCN_UPDATE: 5984 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 5985 "qla_target(%d): Async event %#x occurred " 5986 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5987 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5988 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5989 break; 5990 5991 case MBA_REJECTED_FCP_CMD: 5992 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, 5993 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", 5994 vha->vp_idx, 5995 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5996 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5997 5998 if (le16_to_cpu(mailbox[3]) == 1) { 5999 /* exchange starvation. */ 6000 vha->hw->exch_starvation++; 6001 if (vha->hw->exch_starvation > 5) { 6002 ql_log(ql_log_warn, vha, 0xd03a, 6003 "Exchange starvation-. Resetting RISC\n"); 6004 6005 vha->hw->exch_starvation = 0; 6006 if (IS_P3P_TYPE(vha->hw)) 6007 set_bit(FCOE_CTX_RESET_NEEDED, 6008 &vha->dpc_flags); 6009 else 6010 set_bit(ISP_ABORT_NEEDED, 6011 &vha->dpc_flags); 6012 qla2xxx_wake_dpc(vha); 6013 } 6014 } 6015 break; 6016 6017 case MBA_PORT_UPDATE: 6018 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 6019 "qla_target(%d): Port update async event %#x " 6020 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 6021 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 6022 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 6023 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 6024 6025 login_code = le16_to_cpu(mailbox[2]); 6026 if (login_code == 0x4) { 6027 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 6028 "Async MB 2: Got PLOGI Complete\n"); 6029 vha->hw->exch_starvation = 0; 6030 } else if (login_code == 0x7) 6031 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 6032 "Async MB 2: Port Logged Out\n"); 6033 break; 6034 default: 6035 break; 6036 } 6037 6038 } 6039 6040 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 6041 uint16_t loop_id) 6042 { 6043 fc_port_t *fcport, *tfcp, *del; 6044 int rc; 6045 unsigned long flags; 6046 u8 newfcport = 0; 6047 6048 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 6049 if (!fcport) { 6050 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 6051 "qla_target(%d): Allocation of tmp FC port failed", 6052 vha->vp_idx); 6053 return NULL; 6054 } 6055 6056 fcport->loop_id = loop_id; 6057 6058 rc = qla24xx_gpdb_wait(vha, fcport, 0); 6059 if (rc != QLA_SUCCESS) { 6060 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 6061 "qla_target(%d): Failed to retrieve fcport " 6062 "information -- get_port_database() returned %x " 6063 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 6064 kfree(fcport); 6065 return NULL; 6066 } 6067 6068 del = NULL; 6069 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 6070 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); 6071 6072 if (tfcp) { 6073 tfcp->d_id = fcport->d_id; 6074 tfcp->port_type = fcport->port_type; 6075 tfcp->supported_classes = fcport->supported_classes; 6076 tfcp->flags |= fcport->flags; 6077 tfcp->scan_state = QLA_FCPORT_FOUND; 6078 6079 del = fcport; 6080 fcport = tfcp; 6081 } else { 6082 if (vha->hw->current_topology == ISP_CFG_F) 6083 fcport->flags |= FCF_FABRIC_DEVICE; 6084 6085 list_add_tail(&fcport->list, &vha->vp_fcports); 6086 if (!IS_SW_RESV_ADDR(fcport->d_id)) 6087 vha->fcport_count++; 6088 fcport->login_gen++; 6089 fcport->disc_state = DSC_LOGIN_COMPLETE; 6090 fcport->login_succ = 1; 6091 newfcport = 1; 6092 } 6093 6094 fcport->deleted = 0; 6095 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 6096 6097 switch (vha->host->active_mode) { 6098 case MODE_INITIATOR: 6099 case MODE_DUAL: 6100 if (newfcport) { 6101 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { 6102 qla24xx_sched_upd_fcport(fcport); 6103 } else { 6104 ql_dbg(ql_dbg_disc, vha, 0x20ff, 6105 "%s %d %8phC post gpsc fcp_cnt %d\n", 6106 __func__, __LINE__, fcport->port_name, vha->fcport_count); 6107 qla24xx_post_gpsc_work(vha, fcport); 6108 } 6109 } 6110 break; 6111 6112 case MODE_TARGET: 6113 default: 6114 break; 6115 } 6116 if (del) 6117 qla2x00_free_fcport(del); 6118 6119 return fcport; 6120 } 6121 6122 /* Must be called under tgt_mutex */ 6123 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, 6124 uint8_t *s_id) 6125 { 6126 struct fc_port *sess = NULL; 6127 fc_port_t *fcport = NULL; 6128 int rc, global_resets; 6129 uint16_t loop_id = 0; 6130 6131 if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) { 6132 /* 6133 * This is Domain Controller, so it should be 6134 * OK to drop SCSI commands from it. 6135 */ 6136 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 6137 "Unable to find initiator with S_ID %x:%x:%x", 6138 s_id[0], s_id[1], s_id[2]); 6139 return NULL; 6140 } 6141 6142 mutex_lock(&vha->vha_tgt.tgt_mutex); 6143 6144 retry: 6145 global_resets = 6146 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 6147 6148 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 6149 if (rc != 0) { 6150 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6151 6152 ql_log(ql_log_info, vha, 0xf071, 6153 "qla_target(%d): Unable to find " 6154 "initiator with S_ID %x:%x:%x", 6155 vha->vp_idx, s_id[0], s_id[1], 6156 s_id[2]); 6157 6158 if (rc == -ENOENT) { 6159 qlt_port_logo_t logo; 6160 6161 sid_to_portid(s_id, &logo.id); 6162 logo.cmd_count = 1; 6163 qlt_send_first_logo(vha, &logo); 6164 } 6165 6166 return NULL; 6167 } 6168 6169 fcport = qlt_get_port_database(vha, loop_id); 6170 if (!fcport) { 6171 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6172 return NULL; 6173 } 6174 6175 if (global_resets != 6176 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 6177 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 6178 "qla_target(%d): global reset during session discovery " 6179 "(counter was %d, new %d), retrying", vha->vp_idx, 6180 global_resets, 6181 atomic_read(&vha->vha_tgt. 6182 qla_tgt->tgt_global_resets_count)); 6183 goto retry; 6184 } 6185 6186 sess = qlt_create_sess(vha, fcport, true); 6187 6188 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6189 6190 return sess; 6191 } 6192 6193 static void qlt_abort_work(struct qla_tgt *tgt, 6194 struct qla_tgt_sess_work_param *prm) 6195 { 6196 struct scsi_qla_host *vha = tgt->vha; 6197 struct qla_hw_data *ha = vha->hw; 6198 struct fc_port *sess = NULL; 6199 unsigned long flags = 0, flags2 = 0; 6200 uint32_t be_s_id; 6201 uint8_t s_id[3]; 6202 int rc; 6203 6204 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6205 6206 if (tgt->tgt_stop) 6207 goto out_term2; 6208 6209 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 6210 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 6211 s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; 6212 6213 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 6214 (unsigned char *)&be_s_id); 6215 if (!sess) { 6216 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6217 6218 sess = qlt_make_local_sess(vha, s_id); 6219 /* sess has got an extra creation ref */ 6220 6221 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6222 if (!sess) 6223 goto out_term2; 6224 } else { 6225 if (sess->deleted) { 6226 sess = NULL; 6227 goto out_term2; 6228 } 6229 6230 if (!kref_get_unless_zero(&sess->sess_kref)) { 6231 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c, 6232 "%s: kref_get fail %8phC \n", 6233 __func__, sess->port_name); 6234 sess = NULL; 6235 goto out_term2; 6236 } 6237 } 6238 6239 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 6240 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6241 6242 ha->tgt.tgt_ops->put_sess(sess); 6243 6244 if (rc != 0) 6245 goto out_term; 6246 return; 6247 6248 out_term2: 6249 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6250 6251 if (sess) 6252 ha->tgt.tgt_ops->put_sess(sess); 6253 6254 out_term: 6255 spin_lock_irqsave(&ha->hardware_lock, flags); 6256 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, 6257 FCP_TMF_REJECTED, false); 6258 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6259 } 6260 6261 static void qlt_tmr_work(struct qla_tgt *tgt, 6262 struct qla_tgt_sess_work_param *prm) 6263 { 6264 struct atio_from_isp *a = &prm->tm_iocb2; 6265 struct scsi_qla_host *vha = tgt->vha; 6266 struct qla_hw_data *ha = vha->hw; 6267 struct fc_port *sess; 6268 unsigned long flags; 6269 uint8_t *s_id = NULL; /* to hide compiler warnings */ 6270 int rc; 6271 u64 unpacked_lun; 6272 int fn; 6273 void *iocb; 6274 6275 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 6276 6277 if (tgt->tgt_stop) 6278 goto out_term2; 6279 6280 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 6281 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 6282 if (!sess) { 6283 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6284 6285 sess = qlt_make_local_sess(vha, s_id); 6286 /* sess has got an extra creation ref */ 6287 6288 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 6289 if (!sess) 6290 goto out_term2; 6291 } else { 6292 if (sess->deleted) { 6293 goto out_term2; 6294 } 6295 6296 if (!kref_get_unless_zero(&sess->sess_kref)) { 6297 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020, 6298 "%s: kref_get fail %8phC\n", 6299 __func__, sess->port_name); 6300 goto out_term2; 6301 } 6302 } 6303 6304 iocb = a; 6305 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 6306 unpacked_lun = 6307 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 6308 6309 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 6310 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6311 6312 ha->tgt.tgt_ops->put_sess(sess); 6313 6314 if (rc != 0) 6315 goto out_term; 6316 return; 6317 6318 out_term2: 6319 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6320 out_term: 6321 qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0); 6322 } 6323 6324 static void qlt_sess_work_fn(struct work_struct *work) 6325 { 6326 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 6327 struct scsi_qla_host *vha = tgt->vha; 6328 unsigned long flags; 6329 6330 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 6331 6332 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6333 while (!list_empty(&tgt->sess_works_list)) { 6334 struct qla_tgt_sess_work_param *prm = list_entry( 6335 tgt->sess_works_list.next, typeof(*prm), 6336 sess_works_list_entry); 6337 6338 /* 6339 * This work can be scheduled on several CPUs at time, so we 6340 * must delete the entry to eliminate double processing 6341 */ 6342 list_del(&prm->sess_works_list_entry); 6343 6344 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6345 6346 switch (prm->type) { 6347 case QLA_TGT_SESS_WORK_ABORT: 6348 qlt_abort_work(tgt, prm); 6349 break; 6350 case QLA_TGT_SESS_WORK_TM: 6351 qlt_tmr_work(tgt, prm); 6352 break; 6353 default: 6354 BUG_ON(1); 6355 break; 6356 } 6357 6358 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6359 6360 kfree(prm); 6361 } 6362 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6363 } 6364 6365 /* Must be called under tgt_host_action_mutex */ 6366 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 6367 { 6368 struct qla_tgt *tgt; 6369 int rc, i; 6370 struct qla_qpair_hint *h; 6371 6372 if (!QLA_TGT_MODE_ENABLED()) 6373 return 0; 6374 6375 if (!IS_TGT_MODE_CAPABLE(ha)) { 6376 ql_log(ql_log_warn, base_vha, 0xe070, 6377 "This adapter does not support target mode.\n"); 6378 return 0; 6379 } 6380 6381 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 6382 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 6383 6384 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 6385 6386 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 6387 if (!tgt) { 6388 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 6389 "Unable to allocate struct qla_tgt\n"); 6390 return -ENOMEM; 6391 } 6392 6393 tgt->qphints = kcalloc(ha->max_qpairs + 1, 6394 sizeof(struct qla_qpair_hint), 6395 GFP_KERNEL); 6396 if (!tgt->qphints) { 6397 kfree(tgt); 6398 ql_log(ql_log_warn, base_vha, 0x0197, 6399 "Unable to allocate qpair hints.\n"); 6400 return -ENOMEM; 6401 } 6402 6403 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 6404 base_vha->host->hostt->supported_mode |= MODE_TARGET; 6405 6406 rc = btree_init64(&tgt->lun_qpair_map); 6407 if (rc) { 6408 kfree(tgt->qphints); 6409 kfree(tgt); 6410 ql_log(ql_log_info, base_vha, 0x0198, 6411 "Unable to initialize lun_qpair_map btree\n"); 6412 return -EIO; 6413 } 6414 h = &tgt->qphints[0]; 6415 h->qpair = ha->base_qpair; 6416 INIT_LIST_HEAD(&h->hint_elem); 6417 h->cpuid = ha->base_qpair->cpuid; 6418 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list); 6419 6420 for (i = 0; i < ha->max_qpairs; i++) { 6421 unsigned long flags; 6422 6423 struct qla_qpair *qpair = ha->queue_pair_map[i]; 6424 6425 h = &tgt->qphints[i + 1]; 6426 INIT_LIST_HEAD(&h->hint_elem); 6427 if (qpair) { 6428 h->qpair = qpair; 6429 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 6430 list_add_tail(&h->hint_elem, &qpair->hints_list); 6431 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 6432 h->cpuid = qpair->cpuid; 6433 } 6434 } 6435 6436 tgt->ha = ha; 6437 tgt->vha = base_vha; 6438 init_waitqueue_head(&tgt->waitQ); 6439 INIT_LIST_HEAD(&tgt->del_sess_list); 6440 spin_lock_init(&tgt->sess_work_lock); 6441 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 6442 INIT_LIST_HEAD(&tgt->sess_works_list); 6443 atomic_set(&tgt->tgt_global_resets_count, 0); 6444 6445 base_vha->vha_tgt.qla_tgt = tgt; 6446 6447 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 6448 "qla_target(%d): using 64 Bit PCI addressing", 6449 base_vha->vp_idx); 6450 /* 3 is reserved */ 6451 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 6452 6453 mutex_lock(&qla_tgt_mutex); 6454 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6455 mutex_unlock(&qla_tgt_mutex); 6456 6457 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) 6458 ha->tgt.tgt_ops->add_target(base_vha); 6459 6460 return 0; 6461 } 6462 6463 /* Must be called under tgt_host_action_mutex */ 6464 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 6465 { 6466 if (!vha->vha_tgt.qla_tgt) 6467 return 0; 6468 6469 if (vha->fc_vport) { 6470 qlt_release(vha->vha_tgt.qla_tgt); 6471 return 0; 6472 } 6473 6474 /* free left over qfull cmds */ 6475 qlt_init_term_exchange(vha); 6476 6477 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 6478 vha->host_no, ha); 6479 qlt_release(vha->vha_tgt.qla_tgt); 6480 6481 return 0; 6482 } 6483 6484 void qlt_remove_target_resources(struct qla_hw_data *ha) 6485 { 6486 struct scsi_qla_host *node; 6487 u32 key = 0; 6488 6489 btree_for_each_safe32(&ha->tgt.host_map, key, node) 6490 btree_remove32(&ha->tgt.host_map, key); 6491 6492 btree_destroy32(&ha->tgt.host_map); 6493 } 6494 6495 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6496 unsigned char *b) 6497 { 6498 int i; 6499 6500 pr_debug("qla2xxx HW vha->node_name: "); 6501 for (i = 0; i < WWN_SIZE; i++) 6502 pr_debug("%02x ", vha->node_name[i]); 6503 pr_debug("\n"); 6504 pr_debug("qla2xxx HW vha->port_name: "); 6505 for (i = 0; i < WWN_SIZE; i++) 6506 pr_debug("%02x ", vha->port_name[i]); 6507 pr_debug("\n"); 6508 6509 pr_debug("qla2xxx passed configfs WWPN: "); 6510 put_unaligned_be64(wwpn, b); 6511 for (i = 0; i < WWN_SIZE; i++) 6512 pr_debug("%02x ", b[i]); 6513 pr_debug("\n"); 6514 } 6515 6516 /** 6517 * qla_tgt_lport_register - register lport with external module 6518 * 6519 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 6520 * @phys_wwpn: physical port WWPN 6521 * @npiv_wwpn: NPIV WWPN 6522 * @npiv_wwnn: NPIV WWNN 6523 * @callback: lport initialization callback for tcm_qla2xxx code 6524 */ 6525 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 6526 u64 npiv_wwpn, u64 npiv_wwnn, 6527 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 6528 { 6529 struct qla_tgt *tgt; 6530 struct scsi_qla_host *vha; 6531 struct qla_hw_data *ha; 6532 struct Scsi_Host *host; 6533 unsigned long flags; 6534 int rc; 6535 u8 b[WWN_SIZE]; 6536 6537 mutex_lock(&qla_tgt_mutex); 6538 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 6539 vha = tgt->vha; 6540 ha = vha->hw; 6541 6542 host = vha->host; 6543 if (!host) 6544 continue; 6545 6546 if (!(host->hostt->supported_mode & MODE_TARGET)) 6547 continue; 6548 6549 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) 6550 continue; 6551 6552 spin_lock_irqsave(&ha->hardware_lock, flags); 6553 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 6554 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 6555 host->host_no); 6556 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6557 continue; 6558 } 6559 if (tgt->tgt_stop) { 6560 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 6561 host->host_no); 6562 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6563 continue; 6564 } 6565 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6566 6567 if (!scsi_host_get(host)) { 6568 ql_dbg(ql_dbg_tgt, vha, 0xe068, 6569 "Unable to scsi_host_get() for" 6570 " qla2xxx scsi_host\n"); 6571 continue; 6572 } 6573 qlt_lport_dump(vha, phys_wwpn, b); 6574 6575 if (memcmp(vha->port_name, b, WWN_SIZE)) { 6576 scsi_host_put(host); 6577 continue; 6578 } 6579 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 6580 if (rc != 0) 6581 scsi_host_put(host); 6582 6583 mutex_unlock(&qla_tgt_mutex); 6584 return rc; 6585 } 6586 mutex_unlock(&qla_tgt_mutex); 6587 6588 return -ENODEV; 6589 } 6590 EXPORT_SYMBOL(qlt_lport_register); 6591 6592 /** 6593 * qla_tgt_lport_deregister - Degister lport 6594 * 6595 * @vha: Registered scsi_qla_host pointer 6596 */ 6597 void qlt_lport_deregister(struct scsi_qla_host *vha) 6598 { 6599 struct qla_hw_data *ha = vha->hw; 6600 struct Scsi_Host *sh = vha->host; 6601 /* 6602 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 6603 */ 6604 vha->vha_tgt.target_lport_ptr = NULL; 6605 ha->tgt.tgt_ops = NULL; 6606 /* 6607 * Release the Scsi_Host reference for the underlying qla2xxx host 6608 */ 6609 scsi_host_put(sh); 6610 } 6611 EXPORT_SYMBOL(qlt_lport_deregister); 6612 6613 /* Must be called under HW lock */ 6614 void qlt_set_mode(struct scsi_qla_host *vha) 6615 { 6616 switch (vha->qlini_mode) { 6617 case QLA2XXX_INI_MODE_DISABLED: 6618 case QLA2XXX_INI_MODE_EXCLUSIVE: 6619 vha->host->active_mode = MODE_TARGET; 6620 break; 6621 case QLA2XXX_INI_MODE_ENABLED: 6622 vha->host->active_mode = MODE_INITIATOR; 6623 break; 6624 case QLA2XXX_INI_MODE_DUAL: 6625 vha->host->active_mode = MODE_DUAL; 6626 break; 6627 default: 6628 break; 6629 } 6630 } 6631 6632 /* Must be called under HW lock */ 6633 static void qlt_clear_mode(struct scsi_qla_host *vha) 6634 { 6635 switch (vha->qlini_mode) { 6636 case QLA2XXX_INI_MODE_DISABLED: 6637 vha->host->active_mode = MODE_UNKNOWN; 6638 break; 6639 case QLA2XXX_INI_MODE_EXCLUSIVE: 6640 vha->host->active_mode = MODE_INITIATOR; 6641 break; 6642 case QLA2XXX_INI_MODE_ENABLED: 6643 case QLA2XXX_INI_MODE_DUAL: 6644 vha->host->active_mode = MODE_INITIATOR; 6645 break; 6646 default: 6647 break; 6648 } 6649 } 6650 6651 /* 6652 * qla_tgt_enable_vha - NO LOCK HELD 6653 * 6654 * host_reset, bring up w/ Target Mode Enabled 6655 */ 6656 void 6657 qlt_enable_vha(struct scsi_qla_host *vha) 6658 { 6659 struct qla_hw_data *ha = vha->hw; 6660 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6661 unsigned long flags; 6662 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6663 6664 if (!tgt) { 6665 ql_dbg(ql_dbg_tgt, vha, 0xe069, 6666 "Unable to locate qla_tgt pointer from" 6667 " struct qla_hw_data\n"); 6668 dump_stack(); 6669 return; 6670 } 6671 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) 6672 return; 6673 6674 spin_lock_irqsave(&ha->hardware_lock, flags); 6675 tgt->tgt_stopped = 0; 6676 qlt_set_mode(vha); 6677 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6678 6679 mutex_lock(&ha->optrom_mutex); 6680 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 6681 "%s.\n", __func__); 6682 if (vha->vp_idx) { 6683 qla24xx_disable_vp(vha); 6684 qla24xx_enable_vp(vha); 6685 } else { 6686 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6687 qla2xxx_wake_dpc(base_vha); 6688 qla2x00_wait_for_hba_online(base_vha); 6689 } 6690 mutex_unlock(&ha->optrom_mutex); 6691 } 6692 EXPORT_SYMBOL(qlt_enable_vha); 6693 6694 /* 6695 * qla_tgt_disable_vha - NO LOCK HELD 6696 * 6697 * Disable Target Mode and reset the adapter 6698 */ 6699 static void qlt_disable_vha(struct scsi_qla_host *vha) 6700 { 6701 struct qla_hw_data *ha = vha->hw; 6702 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6703 unsigned long flags; 6704 6705 if (!tgt) { 6706 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 6707 "Unable to locate qla_tgt pointer from" 6708 " struct qla_hw_data\n"); 6709 dump_stack(); 6710 return; 6711 } 6712 6713 spin_lock_irqsave(&ha->hardware_lock, flags); 6714 qlt_clear_mode(vha); 6715 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6716 6717 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6718 qla2xxx_wake_dpc(vha); 6719 qla2x00_wait_for_hba_online(vha); 6720 } 6721 6722 /* 6723 * Called from qla_init.c:qla24xx_vport_create() contex to setup 6724 * the target mode specific struct scsi_qla_host and struct qla_hw_data 6725 * members. 6726 */ 6727 void 6728 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 6729 { 6730 vha->vha_tgt.qla_tgt = NULL; 6731 6732 mutex_init(&vha->vha_tgt.tgt_mutex); 6733 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 6734 6735 qlt_clear_mode(vha); 6736 6737 /* 6738 * NOTE: Currently the value is kept the same for <24xx and 6739 * >=24xx ISPs. If it is necessary to change it, 6740 * the check should be added for specific ISPs, 6741 * assigning the value appropriately. 6742 */ 6743 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 6744 6745 qlt_add_target(ha, vha); 6746 } 6747 6748 u8 6749 qlt_rff_id(struct scsi_qla_host *vha) 6750 { 6751 u8 fc4_feature = 0; 6752 /* 6753 * FC-4 Feature bit 0 indicates target functionality to the name server. 6754 */ 6755 if (qla_tgt_mode_enabled(vha)) { 6756 fc4_feature = BIT_0; 6757 } else if (qla_ini_mode_enabled(vha)) { 6758 fc4_feature = BIT_1; 6759 } else if (qla_dual_mode_enabled(vha)) 6760 fc4_feature = BIT_0 | BIT_1; 6761 6762 return fc4_feature; 6763 } 6764 6765 /* 6766 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 6767 * @ha: HA context 6768 * 6769 * Beginning of ATIO ring has initialization control block already built 6770 * by nvram config routine. 6771 * 6772 * Returns 0 on success. 6773 */ 6774 void 6775 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 6776 { 6777 struct qla_hw_data *ha = vha->hw; 6778 uint16_t cnt; 6779 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 6780 6781 if (qla_ini_mode_enabled(vha)) 6782 return; 6783 6784 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 6785 pkt->u.raw.signature = ATIO_PROCESSED; 6786 pkt++; 6787 } 6788 6789 } 6790 6791 /* 6792 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 6793 * @ha: SCSI driver HA context 6794 */ 6795 void 6796 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) 6797 { 6798 struct qla_hw_data *ha = vha->hw; 6799 struct atio_from_isp *pkt; 6800 int cnt, i; 6801 6802 if (!ha->flags.fw_started) 6803 return; 6804 6805 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || 6806 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { 6807 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6808 cnt = pkt->u.raw.entry_count; 6809 6810 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { 6811 /* 6812 * This packet is corrupted. The header + payload 6813 * can not be trusted. There is no point in passing 6814 * it further up. 6815 */ 6816 ql_log(ql_log_warn, vha, 0xd03c, 6817 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", 6818 pkt->u.isp24.fcp_hdr.s_id, 6819 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), 6820 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); 6821 6822 adjust_corrupted_atio(pkt); 6823 qlt_send_term_exchange(ha->base_qpair, NULL, pkt, 6824 ha_locked, 0); 6825 } else { 6826 qlt_24xx_atio_pkt_all_vps(vha, 6827 (struct atio_from_isp *)pkt, ha_locked); 6828 } 6829 6830 for (i = 0; i < cnt; i++) { 6831 ha->tgt.atio_ring_index++; 6832 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 6833 ha->tgt.atio_ring_index = 0; 6834 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 6835 } else 6836 ha->tgt.atio_ring_ptr++; 6837 6838 pkt->u.raw.signature = ATIO_PROCESSED; 6839 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6840 } 6841 wmb(); 6842 } 6843 6844 /* Adjust ring index */ 6845 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6846 } 6847 6848 void 6849 qlt_24xx_config_rings(struct scsi_qla_host *vha) 6850 { 6851 struct qla_hw_data *ha = vha->hw; 6852 struct qla_msix_entry *msix = &ha->msix_entries[2]; 6853 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 6854 6855 if (!QLA_TGT_MODE_ENABLED()) 6856 return; 6857 6858 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 6859 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 6860 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 6861 6862 if (ha->flags.msix_enabled) { 6863 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 6864 if (IS_QLA2071(ha)) { 6865 /* 4 ports Baker: Enable Interrupt Handshake */ 6866 icb->msix_atio = 0; 6867 icb->firmware_options_2 |= BIT_26; 6868 } else { 6869 icb->msix_atio = cpu_to_le16(msix->entry); 6870 icb->firmware_options_2 &= ~BIT_26; 6871 } 6872 ql_dbg(ql_dbg_init, vha, 0xf072, 6873 "Registering ICB vector 0x%x for atio que.\n", 6874 msix->entry); 6875 } 6876 } else { 6877 /* INTx|MSI */ 6878 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 6879 icb->msix_atio = 0; 6880 icb->firmware_options_2 |= BIT_26; 6881 ql_dbg(ql_dbg_init, vha, 0xf072, 6882 "%s: Use INTx for ATIOQ.\n", __func__); 6883 } 6884 } 6885 } 6886 6887 void 6888 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 6889 { 6890 struct qla_hw_data *ha = vha->hw; 6891 u32 tmp; 6892 6893 if (!QLA_TGT_MODE_ENABLED()) 6894 return; 6895 6896 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6897 if (!ha->tgt.saved_set) { 6898 /* We save only once */ 6899 ha->tgt.saved_exchange_count = nv->exchange_count; 6900 ha->tgt.saved_firmware_options_1 = 6901 nv->firmware_options_1; 6902 ha->tgt.saved_firmware_options_2 = 6903 nv->firmware_options_2; 6904 ha->tgt.saved_firmware_options_3 = 6905 nv->firmware_options_3; 6906 ha->tgt.saved_set = 1; 6907 } 6908 6909 if (qla_tgt_mode_enabled(vha)) 6910 nv->exchange_count = cpu_to_le16(0xFFFF); 6911 else /* dual */ 6912 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 6913 6914 /* Enable target mode */ 6915 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6916 6917 /* Disable ini mode, if requested */ 6918 if (qla_tgt_mode_enabled(vha)) 6919 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6920 6921 /* Disable Full Login after LIP */ 6922 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6923 /* Enable initial LIP */ 6924 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6925 if (ql2xtgt_tape_enable) 6926 /* Enable FC Tape support */ 6927 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6928 else 6929 /* Disable FC Tape support */ 6930 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6931 6932 /* Disable Full Login after LIP */ 6933 nv->host_p &= cpu_to_le32(~BIT_10); 6934 6935 /* 6936 * clear BIT 15 explicitly as we have seen at least 6937 * a couple of instances where this was set and this 6938 * was causing the firmware to not be initialized. 6939 */ 6940 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6941 /* Enable target PRLI control */ 6942 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6943 6944 if (IS_QLA25XX(ha)) { 6945 /* Change Loop-prefer to Pt-Pt */ 6946 tmp = ~(BIT_4|BIT_5|BIT_6); 6947 nv->firmware_options_2 &= cpu_to_le32(tmp); 6948 tmp = P2P << 4; 6949 nv->firmware_options_2 |= cpu_to_le32(tmp); 6950 } 6951 } else { 6952 if (ha->tgt.saved_set) { 6953 nv->exchange_count = ha->tgt.saved_exchange_count; 6954 nv->firmware_options_1 = 6955 ha->tgt.saved_firmware_options_1; 6956 nv->firmware_options_2 = 6957 ha->tgt.saved_firmware_options_2; 6958 nv->firmware_options_3 = 6959 ha->tgt.saved_firmware_options_3; 6960 } 6961 return; 6962 } 6963 6964 if (ha->base_qpair->enable_class_2) { 6965 if (vha->flags.init_done) 6966 fc_host_supported_classes(vha->host) = 6967 FC_COS_CLASS2 | FC_COS_CLASS3; 6968 6969 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6970 } else { 6971 if (vha->flags.init_done) 6972 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6973 6974 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6975 } 6976 } 6977 6978 void 6979 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 6980 struct init_cb_24xx *icb) 6981 { 6982 struct qla_hw_data *ha = vha->hw; 6983 6984 if (!QLA_TGT_MODE_ENABLED()) 6985 return; 6986 6987 if (ha->tgt.node_name_set) { 6988 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6989 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6990 } 6991 } 6992 6993 void 6994 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 6995 { 6996 struct qla_hw_data *ha = vha->hw; 6997 u32 tmp; 6998 6999 if (!QLA_TGT_MODE_ENABLED()) 7000 return; 7001 7002 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 7003 if (!ha->tgt.saved_set) { 7004 /* We save only once */ 7005 ha->tgt.saved_exchange_count = nv->exchange_count; 7006 ha->tgt.saved_firmware_options_1 = 7007 nv->firmware_options_1; 7008 ha->tgt.saved_firmware_options_2 = 7009 nv->firmware_options_2; 7010 ha->tgt.saved_firmware_options_3 = 7011 nv->firmware_options_3; 7012 ha->tgt.saved_set = 1; 7013 } 7014 7015 if (qla_tgt_mode_enabled(vha)) 7016 nv->exchange_count = cpu_to_le16(0xFFFF); 7017 else /* dual */ 7018 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 7019 7020 /* Enable target mode */ 7021 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 7022 7023 /* Disable ini mode, if requested */ 7024 if (qla_tgt_mode_enabled(vha)) 7025 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 7026 /* Disable Full Login after LIP */ 7027 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 7028 /* Enable initial LIP */ 7029 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 7030 /* 7031 * clear BIT 15 explicitly as we have seen at 7032 * least a couple of instances where this was set 7033 * and this was causing the firmware to not be 7034 * initialized. 7035 */ 7036 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 7037 if (ql2xtgt_tape_enable) 7038 /* Enable FC tape support */ 7039 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 7040 else 7041 /* Disable FC tape support */ 7042 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 7043 7044 /* Disable Full Login after LIP */ 7045 nv->host_p &= cpu_to_le32(~BIT_10); 7046 /* Enable target PRLI control */ 7047 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 7048 7049 /* Change Loop-prefer to Pt-Pt */ 7050 tmp = ~(BIT_4|BIT_5|BIT_6); 7051 nv->firmware_options_2 &= cpu_to_le32(tmp); 7052 tmp = P2P << 4; 7053 nv->firmware_options_2 |= cpu_to_le32(tmp); 7054 } else { 7055 if (ha->tgt.saved_set) { 7056 nv->exchange_count = ha->tgt.saved_exchange_count; 7057 nv->firmware_options_1 = 7058 ha->tgt.saved_firmware_options_1; 7059 nv->firmware_options_2 = 7060 ha->tgt.saved_firmware_options_2; 7061 nv->firmware_options_3 = 7062 ha->tgt.saved_firmware_options_3; 7063 } 7064 return; 7065 } 7066 7067 if (ha->base_qpair->enable_class_2) { 7068 if (vha->flags.init_done) 7069 fc_host_supported_classes(vha->host) = 7070 FC_COS_CLASS2 | FC_COS_CLASS3; 7071 7072 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 7073 } else { 7074 if (vha->flags.init_done) 7075 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 7076 7077 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 7078 } 7079 } 7080 7081 void 7082 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 7083 struct init_cb_81xx *icb) 7084 { 7085 struct qla_hw_data *ha = vha->hw; 7086 7087 if (!QLA_TGT_MODE_ENABLED()) 7088 return; 7089 7090 if (ha->tgt.node_name_set) { 7091 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 7092 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 7093 } 7094 } 7095 7096 void 7097 qlt_83xx_iospace_config(struct qla_hw_data *ha) 7098 { 7099 if (!QLA_TGT_MODE_ENABLED()) 7100 return; 7101 7102 ha->msix_count += 1; /* For ATIO Q */ 7103 } 7104 7105 7106 void 7107 qlt_modify_vp_config(struct scsi_qla_host *vha, 7108 struct vp_config_entry_24xx *vpmod) 7109 { 7110 /* enable target mode. Bit5 = 1 => disable */ 7111 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 7112 vpmod->options_idx1 &= ~BIT_5; 7113 7114 /* Disable ini mode, if requested. bit4 = 1 => disable */ 7115 if (qla_tgt_mode_enabled(vha)) 7116 vpmod->options_idx1 &= ~BIT_4; 7117 } 7118 7119 void 7120 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 7121 { 7122 int rc; 7123 7124 if (!QLA_TGT_MODE_ENABLED()) 7125 return; 7126 7127 if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 7128 IS_QLA28XX(ha)) { 7129 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 7130 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 7131 } else { 7132 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 7133 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 7134 } 7135 7136 mutex_init(&base_vha->vha_tgt.tgt_mutex); 7137 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 7138 7139 INIT_LIST_HEAD(&base_vha->unknown_atio_list); 7140 INIT_DELAYED_WORK(&base_vha->unknown_atio_work, 7141 qlt_unknown_atio_work_fn); 7142 7143 qlt_clear_mode(base_vha); 7144 7145 rc = btree_init32(&ha->tgt.host_map); 7146 if (rc) 7147 ql_log(ql_log_info, base_vha, 0xd03d, 7148 "Unable to initialize ha->host_map btree\n"); 7149 7150 qlt_update_vp_map(base_vha, SET_VP_IDX); 7151 } 7152 7153 irqreturn_t 7154 qla83xx_msix_atio_q(int irq, void *dev_id) 7155 { 7156 struct rsp_que *rsp; 7157 scsi_qla_host_t *vha; 7158 struct qla_hw_data *ha; 7159 unsigned long flags; 7160 7161 rsp = (struct rsp_que *) dev_id; 7162 ha = rsp->hw; 7163 vha = pci_get_drvdata(ha->pdev); 7164 7165 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7166 7167 qlt_24xx_process_atio_queue(vha, 0); 7168 7169 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7170 7171 return IRQ_HANDLED; 7172 } 7173 7174 static void 7175 qlt_handle_abts_recv_work(struct work_struct *work) 7176 { 7177 struct qla_tgt_sess_op *op = container_of(work, 7178 struct qla_tgt_sess_op, work); 7179 scsi_qla_host_t *vha = op->vha; 7180 struct qla_hw_data *ha = vha->hw; 7181 unsigned long flags; 7182 7183 if (qla2x00_reset_active(vha) || 7184 (op->chip_reset != ha->base_qpair->chip_reset)) 7185 return; 7186 7187 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7188 qlt_24xx_process_atio_queue(vha, 0); 7189 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7190 7191 spin_lock_irqsave(&ha->hardware_lock, flags); 7192 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio); 7193 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7194 7195 kfree(op); 7196 } 7197 7198 void 7199 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp, 7200 response_t *pkt) 7201 { 7202 struct qla_tgt_sess_op *op; 7203 7204 op = kzalloc(sizeof(*op), GFP_ATOMIC); 7205 7206 if (!op) { 7207 /* do not reach for ATIO queue here. This is best effort err 7208 * recovery at this point. 7209 */ 7210 qlt_response_pkt_all_vps(vha, rsp, pkt); 7211 return; 7212 } 7213 7214 memcpy(&op->atio, pkt, sizeof(*pkt)); 7215 op->vha = vha; 7216 op->chip_reset = vha->hw->base_qpair->chip_reset; 7217 op->rsp = rsp; 7218 INIT_WORK(&op->work, qlt_handle_abts_recv_work); 7219 queue_work(qla_tgt_wq, &op->work); 7220 return; 7221 } 7222 7223 int 7224 qlt_mem_alloc(struct qla_hw_data *ha) 7225 { 7226 if (!QLA_TGT_MODE_ENABLED()) 7227 return 0; 7228 7229 ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC, 7230 sizeof(struct qla_tgt_vp_map), 7231 GFP_KERNEL); 7232 if (!ha->tgt.tgt_vp_map) 7233 return -ENOMEM; 7234 7235 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 7236 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 7237 &ha->tgt.atio_dma, GFP_KERNEL); 7238 if (!ha->tgt.atio_ring) { 7239 kfree(ha->tgt.tgt_vp_map); 7240 return -ENOMEM; 7241 } 7242 return 0; 7243 } 7244 7245 void 7246 qlt_mem_free(struct qla_hw_data *ha) 7247 { 7248 if (!QLA_TGT_MODE_ENABLED()) 7249 return; 7250 7251 if (ha->tgt.atio_ring) { 7252 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 7253 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 7254 ha->tgt.atio_dma); 7255 } 7256 ha->tgt.atio_ring = NULL; 7257 ha->tgt.atio_dma = 0; 7258 kfree(ha->tgt.tgt_vp_map); 7259 ha->tgt.tgt_vp_map = NULL; 7260 } 7261 7262 /* vport_slock to be held by the caller */ 7263 void 7264 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 7265 { 7266 void *slot; 7267 u32 key; 7268 int rc; 7269 7270 if (!QLA_TGT_MODE_ENABLED()) 7271 return; 7272 7273 key = vha->d_id.b24; 7274 7275 switch (cmd) { 7276 case SET_VP_IDX: 7277 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 7278 break; 7279 case SET_AL_PA: 7280 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 7281 if (!slot) { 7282 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018, 7283 "Save vha in host_map %p %06x\n", vha, key); 7284 rc = btree_insert32(&vha->hw->tgt.host_map, 7285 key, vha, GFP_ATOMIC); 7286 if (rc) 7287 ql_log(ql_log_info, vha, 0xd03e, 7288 "Unable to insert s_id into host_map: %06x\n", 7289 key); 7290 return; 7291 } 7292 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 7293 "replace existing vha in host_map %p %06x\n", vha, key); 7294 btree_update32(&vha->hw->tgt.host_map, key, vha); 7295 break; 7296 case RESET_VP_IDX: 7297 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 7298 break; 7299 case RESET_AL_PA: 7300 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 7301 "clear vha in host_map %p %06x\n", vha, key); 7302 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 7303 if (slot) 7304 btree_remove32(&vha->hw->tgt.host_map, key); 7305 vha->d_id.b24 = 0; 7306 break; 7307 } 7308 } 7309 7310 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) 7311 { 7312 7313 if (!vha->d_id.b24) { 7314 vha->d_id = id; 7315 qlt_update_vp_map(vha, SET_AL_PA); 7316 } else if (vha->d_id.b24 != id.b24) { 7317 qlt_update_vp_map(vha, RESET_AL_PA); 7318 vha->d_id = id; 7319 qlt_update_vp_map(vha, SET_AL_PA); 7320 } 7321 } 7322 7323 static int __init qlt_parse_ini_mode(void) 7324 { 7325 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 7326 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 7327 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 7328 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 7329 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 7330 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 7331 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) 7332 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; 7333 else 7334 return false; 7335 7336 return true; 7337 } 7338 7339 int __init qlt_init(void) 7340 { 7341 int ret; 7342 7343 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); 7344 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); 7345 7346 if (!qlt_parse_ini_mode()) { 7347 ql_log(ql_log_fatal, NULL, 0xe06b, 7348 "qlt_parse_ini_mode() failed\n"); 7349 return -EINVAL; 7350 } 7351 7352 if (!QLA_TGT_MODE_ENABLED()) 7353 return 0; 7354 7355 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 7356 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 7357 qla_tgt_mgmt_cmd), 0, NULL); 7358 if (!qla_tgt_mgmt_cmd_cachep) { 7359 ql_log(ql_log_fatal, NULL, 0xd04b, 7360 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 7361 return -ENOMEM; 7362 } 7363 7364 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", 7365 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), 7366 0, NULL); 7367 7368 if (!qla_tgt_plogi_cachep) { 7369 ql_log(ql_log_fatal, NULL, 0xe06d, 7370 "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); 7371 ret = -ENOMEM; 7372 goto out_mgmt_cmd_cachep; 7373 } 7374 7375 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 7376 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 7377 if (!qla_tgt_mgmt_cmd_mempool) { 7378 ql_log(ql_log_fatal, NULL, 0xe06e, 7379 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 7380 ret = -ENOMEM; 7381 goto out_plogi_cachep; 7382 } 7383 7384 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 7385 if (!qla_tgt_wq) { 7386 ql_log(ql_log_fatal, NULL, 0xe06f, 7387 "alloc_workqueue for qla_tgt_wq failed\n"); 7388 ret = -ENOMEM; 7389 goto out_cmd_mempool; 7390 } 7391 /* 7392 * Return 1 to signal that initiator-mode is being disabled 7393 */ 7394 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 7395 7396 out_cmd_mempool: 7397 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7398 out_plogi_cachep: 7399 kmem_cache_destroy(qla_tgt_plogi_cachep); 7400 out_mgmt_cmd_cachep: 7401 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7402 return ret; 7403 } 7404 7405 void qlt_exit(void) 7406 { 7407 if (!QLA_TGT_MODE_ENABLED()) 7408 return; 7409 7410 destroy_workqueue(qla_tgt_wq); 7411 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7412 kmem_cache_destroy(qla_tgt_plogi_cachep); 7413 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7414 } 7415